Merge branch 'master' of https://github.com/apple/foundationdb into jfu-snapshot-record-version
This commit is contained in:
commit
69580593dd
|
@ -596,7 +596,7 @@ fdb_error_t fdb_transaction_set_option_impl( FDBTransaction* tr,
|
||||||
void fdb_transaction_set_option_v13( FDBTransaction* tr,
|
void fdb_transaction_set_option_v13( FDBTransaction* tr,
|
||||||
FDBTransactionOption option )
|
FDBTransactionOption option )
|
||||||
{
|
{
|
||||||
fdb_transaction_set_option_impl( tr, option, NULL, 0 );
|
fdb_transaction_set_option_impl( tr, option, nullptr, 0 );
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C" DLLEXPORT
|
extern "C" DLLEXPORT
|
||||||
|
|
|
@ -157,14 +157,14 @@ namespace FDB {
|
||||||
void cancel() override;
|
void cancel() override;
|
||||||
void reset() override;
|
void reset() override;
|
||||||
|
|
||||||
TransactionImpl() : tr(NULL) {}
|
TransactionImpl() : tr(nullptr) {}
|
||||||
TransactionImpl(TransactionImpl&& r) noexcept {
|
TransactionImpl(TransactionImpl&& r) noexcept {
|
||||||
tr = r.tr;
|
tr = r.tr;
|
||||||
r.tr = NULL;
|
r.tr = nullptr;
|
||||||
}
|
}
|
||||||
TransactionImpl& operator=(TransactionImpl&& r) noexcept {
|
TransactionImpl& operator=(TransactionImpl&& r) noexcept {
|
||||||
tr = r.tr;
|
tr = r.tr;
|
||||||
r.tr = NULL;
|
r.tr = nullptr;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -207,10 +207,10 @@ namespace FDB {
|
||||||
if ( value.present() )
|
if ( value.present() )
|
||||||
throw_on_error( fdb_network_set_option( option, value.get().begin(), value.get().size() ) );
|
throw_on_error( fdb_network_set_option( option, value.get().begin(), value.get().size() ) );
|
||||||
else
|
else
|
||||||
throw_on_error( fdb_network_set_option( option, NULL, 0 ) );
|
throw_on_error( fdb_network_set_option( option, nullptr, 0 ) );
|
||||||
}
|
}
|
||||||
|
|
||||||
API* API::instance = NULL;
|
API* API::instance = nullptr;
|
||||||
API::API(int version) : version(version) {}
|
API::API(int version) : version(version) {}
|
||||||
|
|
||||||
API* API::selectAPIVersion(int apiVersion) {
|
API* API::selectAPIVersion(int apiVersion) {
|
||||||
|
@ -234,11 +234,11 @@ namespace FDB {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool API::isAPIVersionSelected() {
|
bool API::isAPIVersionSelected() {
|
||||||
return API::instance != NULL;
|
return API::instance != nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
API* API::getInstance() {
|
API* API::getInstance() {
|
||||||
if(API::instance == NULL) {
|
if(API::instance == nullptr) {
|
||||||
throw api_version_unset();
|
throw api_version_unset();
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -280,7 +280,7 @@ namespace FDB {
|
||||||
if (value.present())
|
if (value.present())
|
||||||
throw_on_error(fdb_database_set_option(db, option, value.get().begin(), value.get().size()));
|
throw_on_error(fdb_database_set_option(db, option, value.get().begin(), value.get().size()));
|
||||||
else
|
else
|
||||||
throw_on_error(fdb_database_set_option(db, option, NULL, 0));
|
throw_on_error(fdb_database_set_option(db, option, nullptr, 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
TransactionImpl::TransactionImpl(FDBDatabase* db) {
|
TransactionImpl::TransactionImpl(FDBDatabase* db) {
|
||||||
|
@ -417,7 +417,7 @@ namespace FDB {
|
||||||
if ( value.present() ) {
|
if ( value.present() ) {
|
||||||
throw_on_error( fdb_transaction_set_option( tr, option, value.get().begin(), value.get().size() ) );
|
throw_on_error( fdb_transaction_set_option( tr, option, value.get().begin(), value.get().size() ) );
|
||||||
} else {
|
} else {
|
||||||
throw_on_error( fdb_transaction_set_option( tr, option, NULL, 0 ) );
|
throw_on_error( fdb_transaction_set_option( tr, option, nullptr, 0 ) );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
|
|
||||||
namespace FDB {
|
namespace FDB {
|
||||||
struct CFuture : NonCopyable, ReferenceCounted<CFuture>, FastAllocated<CFuture> {
|
struct CFuture : NonCopyable, ReferenceCounted<CFuture>, FastAllocated<CFuture> {
|
||||||
CFuture() : f(NULL) {}
|
CFuture() : f(nullptr) {}
|
||||||
explicit CFuture(FDBFuture* f) : f(f) {}
|
explicit CFuture(FDBFuture* f) : f(f) {}
|
||||||
~CFuture() {
|
~CFuture() {
|
||||||
if (f) {
|
if (f) {
|
||||||
|
|
|
@ -1089,13 +1089,13 @@ void JNI_OnUnload(JavaVM *vm, void *reserved) {
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
// delete global references so the GC can collect them
|
// delete global references so the GC can collect them
|
||||||
if (range_result_summary_class != NULL) {
|
if (range_result_summary_class != JNI_NULL) {
|
||||||
env->DeleteGlobalRef(range_result_summary_class);
|
env->DeleteGlobalRef(range_result_summary_class);
|
||||||
}
|
}
|
||||||
if (range_result_class != NULL) {
|
if (range_result_class != JNI_NULL) {
|
||||||
env->DeleteGlobalRef(range_result_class);
|
env->DeleteGlobalRef(range_result_class);
|
||||||
}
|
}
|
||||||
if (string_class != NULL) {
|
if (string_class != JNI_NULL) {
|
||||||
env->DeleteGlobalRef(string_class);
|
env->DeleteGlobalRef(string_class);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,11 +59,14 @@ else()
|
||||||
set(ROCKSDB_LIBRARIES
|
set(ROCKSDB_LIBRARIES
|
||||||
${BINARY_DIR}/librocksdb.a)
|
${BINARY_DIR}/librocksdb.a)
|
||||||
|
|
||||||
|
ExternalProject_Get_Property(rocksdb SOURCE_DIR)
|
||||||
|
set (ROCKSDB_INCLUDE_DIR "${SOURCE_DIR}/include")
|
||||||
|
|
||||||
set(ROCKSDB_FOUND TRUE)
|
set(ROCKSDB_FOUND TRUE)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
message(STATUS "Found RocksDB library: ${ROCKSDB_LIBRARIES}")
|
message(STATUS "Found RocksDB library: ${ROCKSDB_LIBRARIES}")
|
||||||
message(STATUS "Found RocksDB includes: ${ROCKSDB_INCLUDE_DIRS}")
|
message(STATUS "Found RocksDB includes: ${ROCKSDB_INCLUDE_DIR}")
|
||||||
|
|
||||||
mark_as_advanced(
|
mark_as_advanced(
|
||||||
ROCKSDB_LIBRARIES
|
ROCKSDB_LIBRARIES
|
||||||
|
|
|
@ -107,7 +107,9 @@ endif()
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
||||||
set(SSD_ROCKSDB_EXPERIMENTAL OFF CACHE BOOL "Build with experimental RocksDB support")
|
set(SSD_ROCKSDB_EXPERIMENTAL OFF CACHE BOOL "Build with experimental RocksDB support")
|
||||||
if (SSD_ROCKSDB_EXPERIMENTAL)
|
# RocksDB is currently enabled by default for GCC but does not build with the latest
|
||||||
|
# Clang.
|
||||||
|
if (SSD_ROCKSDB_EXPERIMENTAL OR GCC)
|
||||||
set(WITH_ROCKSDB_EXPERIMENTAL ON)
|
set(WITH_ROCKSDB_EXPERIMENTAL ON)
|
||||||
else()
|
else()
|
||||||
set(WITH_ROCKSDB_EXPERIMENTAL OFF)
|
set(WITH_ROCKSDB_EXPERIMENTAL OFF)
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||||
pkill fdbserver
|
|
||||||
ulimit -S -c unlimited
|
ulimit -S -c unlimited
|
||||||
|
|
||||||
unset FDB_NETWORK_OPTION_EXTERNAL_CLIENT_DIRECTORY
|
unset FDB_NETWORK_OPTION_EXTERNAL_CLIENT_DIRECTORY
|
||||||
|
@ -8,4 +7,4 @@ WORKDIR="$(pwd)/tmp/$$"
|
||||||
if [ ! -d "${WORKDIR}" ] ; then
|
if [ ! -d "${WORKDIR}" ] ; then
|
||||||
mkdir -p "${WORKDIR}"
|
mkdir -p "${WORKDIR}"
|
||||||
fi
|
fi
|
||||||
DEBUGLEVEL=0 DISPLAYERROR=1 RANDOMTEST=1 WORKDIR="${WORKDIR}" FDBSERVERPORT="${PORT_FDBSERVER:-4500}" ${SCRIPTDIR}/bindingTestScript.sh 1
|
DEBUGLEVEL=0 DISPLAYERROR=1 RANDOMTEST=1 WORKDIR="${WORKDIR}" ${SCRIPTDIR}/bindingTestScript.sh 1
|
||||||
|
|
|
@ -7,7 +7,7 @@ SCRIPTID="${$}"
|
||||||
SAVEONERROR="${SAVEONERROR:-1}"
|
SAVEONERROR="${SAVEONERROR:-1}"
|
||||||
PYTHONDIR="${BINDIR}/tests/python"
|
PYTHONDIR="${BINDIR}/tests/python"
|
||||||
testScript="${BINDIR}/tests/bindingtester/run_binding_tester.sh"
|
testScript="${BINDIR}/tests/bindingtester/run_binding_tester.sh"
|
||||||
VERSION="1.6"
|
VERSION="1.8"
|
||||||
|
|
||||||
source ${SCRIPTDIR}/localClusterStart.sh
|
source ${SCRIPTDIR}/localClusterStart.sh
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ then
|
||||||
echo "Log dir: ${LOGDIR}"
|
echo "Log dir: ${LOGDIR}"
|
||||||
echo "Python path: ${PYTHONDIR}"
|
echo "Python path: ${PYTHONDIR}"
|
||||||
echo "Lib dir: ${LIBDIR}"
|
echo "Lib dir: ${LIBDIR}"
|
||||||
echo "Server port: ${FDBSERVERPORT}"
|
echo "Cluster String: ${CLUSTERSTRING}"
|
||||||
echo "Script Id: ${SCRIPTID}"
|
echo "Script Id: ${SCRIPTID}"
|
||||||
echo "Version: ${VERSION}"
|
echo "Version: ${VERSION}"
|
||||||
fi
|
fi
|
||||||
|
@ -36,6 +36,9 @@ fi
|
||||||
# Begin the cluster using the logic in localClusterStart.sh.
|
# Begin the cluster using the logic in localClusterStart.sh.
|
||||||
startCluster
|
startCluster
|
||||||
|
|
||||||
|
# Stop the cluster on exit
|
||||||
|
trap "stopCluster" EXIT
|
||||||
|
|
||||||
# Display user message
|
# Display user message
|
||||||
if [ "${status}" -ne 0 ]; then
|
if [ "${status}" -ne 0 ]; then
|
||||||
:
|
:
|
||||||
|
@ -58,8 +61,8 @@ fi
|
||||||
# Display directory and log information, if an error occurred
|
# Display directory and log information, if an error occurred
|
||||||
if [ "${status}" -ne 0 ]
|
if [ "${status}" -ne 0 ]
|
||||||
then
|
then
|
||||||
ls "${WORKDIR}" > "${LOGDIR}/dir.log"
|
ls "${WORKDIR}" &> "${LOGDIR}/dir.log"
|
||||||
ps -eafw > "${LOGDIR}/process-preclean.log"
|
ps -eafwH &> "${LOGDIR}/process-preclean.log"
|
||||||
if [ -f "${FDBCONF}" ]; then
|
if [ -f "${FDBCONF}" ]; then
|
||||||
cp -f "${FDBCONF}" "${LOGDIR}/"
|
cp -f "${FDBCONF}" "${LOGDIR}/"
|
||||||
fi
|
fi
|
||||||
|
@ -71,10 +74,15 @@ fi
|
||||||
|
|
||||||
# Save debug information files, environment, and log information, if an error occurred
|
# Save debug information files, environment, and log information, if an error occurred
|
||||||
if [ "${status}" -ne 0 ] && [ "${SAVEONERROR}" -gt 0 ]; then
|
if [ "${status}" -ne 0 ] && [ "${SAVEONERROR}" -gt 0 ]; then
|
||||||
ps -eafw > "${LOGDIR}/process-exit.log"
|
ps -eafwH &> "${LOGDIR}/process-exit.log"
|
||||||
netstat -na > "${LOGDIR}/netstat.log"
|
netstat -na &> "${LOGDIR}/netstat.log"
|
||||||
df -h > "${LOGDIR}/disk.log"
|
df -h &> "${LOGDIR}/disk.log"
|
||||||
env > "${LOGDIR}/env.log"
|
env &> "${LOGDIR}/env.log"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stop the cluster
|
||||||
|
if stopCluster; then
|
||||||
|
unset FDBSERVERID
|
||||||
fi
|
fi
|
||||||
|
|
||||||
exit "${status}"
|
exit "${status}"
|
||||||
|
|
|
@ -5,15 +5,32 @@ WORKDIR="${WORKDIR:-${SCRIPTDIR}/tmp/fdb.work}"
|
||||||
LOGDIR="${WORKDIR}/log"
|
LOGDIR="${WORKDIR}/log"
|
||||||
ETCDIR="${WORKDIR}/etc"
|
ETCDIR="${WORKDIR}/etc"
|
||||||
BINDIR="${BINDIR:-${SCRIPTDIR}}"
|
BINDIR="${BINDIR:-${SCRIPTDIR}}"
|
||||||
FDBSERVERPORT="${FDBSERVERPORT:-4500}"
|
FDBPORTSTART="${FDBPORTSTART:-4000}"
|
||||||
|
SERVERCHECKS="${SERVERCHECKS:-10}"
|
||||||
|
CONFIGUREWAIT="${CONFIGUREWAIT:-240}"
|
||||||
FDBCONF="${ETCDIR}/fdb.cluster"
|
FDBCONF="${ETCDIR}/fdb.cluster"
|
||||||
LOGFILE="${LOGFILE:-${LOGDIR}/startcluster.log}"
|
LOGFILE="${LOGFILE:-${LOGDIR}/startcluster.log}"
|
||||||
|
AUDITCLUSTER="${AUDITCLUSTER:-0}"
|
||||||
|
AUDITLOG="${AUDITLOG:-/tmp/audit-cluster.log}"
|
||||||
|
|
||||||
# Initialize the variables
|
# Initialize the variables
|
||||||
status=0
|
status=0
|
||||||
messagetime=0
|
messagetime=0
|
||||||
messagecount=0
|
messagecount=0
|
||||||
|
|
||||||
|
# Define a random ip address and port on localhost
|
||||||
|
if [ -z ${IPADDRESS} ]; then
|
||||||
|
let index2="${RANDOM} % 256"
|
||||||
|
let index3="${RANDOM} % 256"
|
||||||
|
let index4="(${RANDOM} % 255) + 1"
|
||||||
|
IPADDRESS="127.${index2}.${index3}.${index4}"
|
||||||
|
fi
|
||||||
|
if [ -z ${FDBPORT} ]; then
|
||||||
|
let FDBPORT="(${RANDOM} % 1000) + ${FDBPORTSTART}"
|
||||||
|
fi
|
||||||
|
CLUSTERSTRING="${IPADDRESS}:${FDBPORT}"
|
||||||
|
|
||||||
|
|
||||||
function log
|
function log
|
||||||
{
|
{
|
||||||
local status=0
|
local status=0
|
||||||
|
@ -92,7 +109,10 @@ function displayMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
# Create the directories used by the server.
|
# Create the directories used by the server.
|
||||||
function createDirectories {
|
function createDirectories
|
||||||
|
{
|
||||||
|
local status=0
|
||||||
|
|
||||||
# Display user message
|
# Display user message
|
||||||
if ! displayMessage "Creating directories"
|
if ! displayMessage "Creating directories"
|
||||||
then
|
then
|
||||||
|
@ -137,7 +157,10 @@ function createDirectories {
|
||||||
}
|
}
|
||||||
|
|
||||||
# Create a cluster file for the local cluster.
|
# Create a cluster file for the local cluster.
|
||||||
function createClusterFile {
|
function createClusterFile
|
||||||
|
{
|
||||||
|
local status=0
|
||||||
|
|
||||||
if [ "${status}" -ne 0 ]; then
|
if [ "${status}" -ne 0 ]; then
|
||||||
:
|
:
|
||||||
# Display user message
|
# Display user message
|
||||||
|
@ -148,7 +171,7 @@ function createClusterFile {
|
||||||
else
|
else
|
||||||
description=$(LC_CTYPE=C tr -dc A-Za-z0-9 < /dev/urandom 2> /dev/null | head -c 8)
|
description=$(LC_CTYPE=C tr -dc A-Za-z0-9 < /dev/urandom 2> /dev/null | head -c 8)
|
||||||
random_str=$(LC_CTYPE=C tr -dc A-Za-z0-9 < /dev/urandom 2> /dev/null | head -c 8)
|
random_str=$(LC_CTYPE=C tr -dc A-Za-z0-9 < /dev/urandom 2> /dev/null | head -c 8)
|
||||||
echo "$description:$random_str@127.0.0.1:${FDBSERVERPORT}" > "${FDBCONF}"
|
echo "${description}:${random_str}@${CLUSTERSTRING}" > "${FDBCONF}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "${status}" -ne 0 ]; then
|
if [ "${status}" -ne 0 ]; then
|
||||||
|
@ -161,8 +184,51 @@ function createClusterFile {
|
||||||
return ${status}
|
return ${status}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Stop the Cluster from running.
|
||||||
|
function stopCluster
|
||||||
|
{
|
||||||
|
local status=0
|
||||||
|
|
||||||
|
# Add an audit entry, if enabled
|
||||||
|
if [ "${AUDITCLUSTER}" -gt 0 ]; then
|
||||||
|
printf '%-15s (%6s) Stopping cluster %-20s (%6s): %s\n' "$(date +'%Y-%m-%d %H:%M:%S')" "${$}" "${CLUSTERSTRING}" "${FDBSERVERID}" >> "${AUDITLOG}"
|
||||||
|
fi
|
||||||
|
if [ -z "${FDBSERVERID}" ]; then
|
||||||
|
log 'FDB Server process is not defined'
|
||||||
|
let status="${status} + 1"
|
||||||
|
elif ! kill -0 "${FDBSERVERID}"; then
|
||||||
|
log "Failed to locate FDB Server process (${FDBSERVERID})"
|
||||||
|
let status="${status} + 1"
|
||||||
|
elif "${BINDIR}/fdbcli" -C "${FDBCONF}" --exec "kill; kill ${CLUSTERSTRING}; sleep 3" --timeout 120 &>> "${LOGDIR}/fdbcli-kill.log"
|
||||||
|
then
|
||||||
|
# Ensure that process is dead
|
||||||
|
if ! kill -0 "${FDBSERVERID}" 2> /dev/null; then
|
||||||
|
log "Killed cluster (${FDBSERVERID}) via cli"
|
||||||
|
elif ! kill -9 "${FDBSERVERID}"; then
|
||||||
|
log "Failed to kill FDB Server process (${FDBSERVERID}) via cli or kill command"
|
||||||
|
let status="${status} + 1"
|
||||||
|
else
|
||||||
|
log "Forcibly killed FDB Server process (${FDBSERVERID}) since cli failed"
|
||||||
|
fi
|
||||||
|
elif ! kill -9 "${FDBSERVERID}"; then
|
||||||
|
log "Failed to forcibly kill FDB Server process (${FDBSERVERID})"
|
||||||
|
let status="${status} + 1"
|
||||||
|
else
|
||||||
|
log "Forcibly killed FDB Server process (${FDBSERVERID})"
|
||||||
|
fi
|
||||||
|
return "${status}"
|
||||||
|
}
|
||||||
|
|
||||||
# Start the server running.
|
# Start the server running.
|
||||||
function startFdbServer {
|
function startFdbServer
|
||||||
|
{
|
||||||
|
local status=0
|
||||||
|
|
||||||
|
# Add an audit entry, if enabled
|
||||||
|
if [ "${AUDITCLUSTER}" -gt 0 ]; then
|
||||||
|
printf '%-15s (%6s) Starting cluster %-20s\n' "$(date +'%Y-%m-%d %H:%M:%S')" "${$}" "${CLUSTERSTRING}" >> "${AUDITLOG}"
|
||||||
|
fi
|
||||||
|
|
||||||
if [ "${status}" -ne 0 ]; then
|
if [ "${status}" -ne 0 ]; then
|
||||||
:
|
:
|
||||||
elif ! displayMessage "Starting Fdb Server"
|
elif ! displayMessage "Starting Fdb Server"
|
||||||
|
@ -170,25 +236,34 @@ function startFdbServer {
|
||||||
log 'Failed to display user message'
|
log 'Failed to display user message'
|
||||||
let status="${status} + 1"
|
let status="${status} + 1"
|
||||||
|
|
||||||
elif ! "${BINDIR}/fdbserver" -C "${FDBCONF}" -p "auto:${FDBSERVERPORT}" -L "${LOGDIR}" -d "${WORKDIR}/fdb/$$" &> "${LOGDIR}/fdbserver.log" &
|
else
|
||||||
|
"${BINDIR}/fdbserver" --knob_disable_posix_kernel_aio=1 -C "${FDBCONF}" -p "${CLUSTERSTRING}" -L "${LOGDIR}" -d "${WORKDIR}/fdb/${$}" &> "${LOGDIR}/fdbserver.log" &
|
||||||
|
fdbpid=$!
|
||||||
|
fdbrc=$?
|
||||||
|
if [ $fdbrc -ne 0 ]
|
||||||
then
|
then
|
||||||
log "Failed to start FDB Server"
|
log "Failed to start FDB Server"
|
||||||
# Maybe the server is already running
|
|
||||||
FDBSERVERID="$(pidof fdbserver)"
|
|
||||||
let status="${status} + 1"
|
let status="${status} + 1"
|
||||||
else
|
else
|
||||||
FDBSERVERID="${!}"
|
FDBSERVERID="${fdbpid}"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! kill -0 ${FDBSERVERID} ; then
|
if [ -z "${FDBSERVERID}" ]; then
|
||||||
log "FDB Server start failed."
|
log "FDB Server start failed because no process"
|
||||||
|
let status="${status} + 1"
|
||||||
|
elif ! kill -0 "${FDBSERVERID}" ; then
|
||||||
|
log "FDB Server start failed because process terminated unexpectedly"
|
||||||
let status="${status} + 1"
|
let status="${status} + 1"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
return ${status}
|
return ${status}
|
||||||
}
|
}
|
||||||
|
|
||||||
function getStatus {
|
function getStatus
|
||||||
|
{
|
||||||
|
local status=0
|
||||||
|
|
||||||
if [ "${status}" -ne 0 ]; then
|
if [ "${status}" -ne 0 ]; then
|
||||||
:
|
:
|
||||||
elif ! date &>> "${LOGDIR}/fdbclient.log"
|
elif ! date &>> "${LOGDIR}/fdbclient.log"
|
||||||
|
@ -209,35 +284,41 @@ function getStatus {
|
||||||
}
|
}
|
||||||
|
|
||||||
# Verify that the cluster is available.
|
# Verify that the cluster is available.
|
||||||
function verifyAvailable {
|
function verifyAvailable
|
||||||
|
{
|
||||||
|
local status=0
|
||||||
|
|
||||||
|
if [ -z "${FDBSERVERID}" ]; then
|
||||||
|
log "FDB Server process is not defined."
|
||||||
|
let status="${status} + 1"
|
||||||
# Verify that the server is running.
|
# Verify that the server is running.
|
||||||
if ! kill -0 "${FDBSERVERID}"
|
elif ! kill -0 "${FDBSERVERID}"
|
||||||
then
|
then
|
||||||
log "FDB server process (${FDBSERVERID}) is not running"
|
log "FDB server process (${FDBSERVERID}) is not running"
|
||||||
let status="${status} + 1"
|
let status="${status} + 1"
|
||||||
return 1
|
|
||||||
|
|
||||||
# Display user message.
|
# Display user message.
|
||||||
elif ! displayMessage "Checking cluster availability"
|
elif ! displayMessage "Checking cluster availability"
|
||||||
then
|
then
|
||||||
log 'Failed to display user message'
|
log 'Failed to display user message'
|
||||||
let status="${status} + 1"
|
let status="${status} + 1"
|
||||||
return 1
|
|
||||||
|
|
||||||
# Determine if status json says the database is available.
|
# Determine if status json says the database is available.
|
||||||
else
|
else
|
||||||
avail=`"${BINDIR}/fdbcli" -C "${FDBCONF}" --exec 'status json' --timeout 10 2> /dev/null | grep -E '"database_available"|"available"' | grep 'true'`
|
avail=`"${BINDIR}/fdbcli" -C "${FDBCONF}" --exec 'status json' --timeout "${SERVERCHECKS}" 2> /dev/null | grep -E '"database_available"|"available"' | grep 'true'`
|
||||||
log "Avail value: ${avail}" "${DEBUGLEVEL}"
|
log "Avail value: ${avail}" "${DEBUGLEVEL}"
|
||||||
if [[ -n "${avail}" ]] ; then
|
if [[ -n "${avail}" ]] ; then
|
||||||
return 0
|
:
|
||||||
else
|
else
|
||||||
return 1
|
let status="${status} + 1"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
return "${status}"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Configure the database on the server.
|
# Configure the database on the server.
|
||||||
function createDatabase {
|
function createDatabase
|
||||||
|
{
|
||||||
|
local status=0
|
||||||
|
|
||||||
if [ "${status}" -ne 0 ]; then
|
if [ "${status}" -ne 0 ]; then
|
||||||
:
|
:
|
||||||
# Ensure that the server is running
|
# Ensure that the server is running
|
||||||
|
@ -262,7 +343,7 @@ function createDatabase {
|
||||||
|
|
||||||
# Configure the database.
|
# Configure the database.
|
||||||
else
|
else
|
||||||
"${BINDIR}/fdbcli" -C "${FDBCONF}" --exec 'configure new single memory; status' --timeout 240 --log --log-dir "${LOGDIR}" &>> "${LOGDIR}/fdbclient.log"
|
"${BINDIR}/fdbcli" -C "${FDBCONF}" --exec 'configure new single memory; status' --timeout "${CONFIGUREWAIT}" --log --log-dir "${LOGDIR}" &>> "${LOGDIR}/fdbclient.log"
|
||||||
|
|
||||||
if ! displayMessage "Checking if config succeeded"
|
if ! displayMessage "Checking if config succeeded"
|
||||||
then
|
then
|
||||||
|
@ -270,7 +351,7 @@ function createDatabase {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
iteration=0
|
iteration=0
|
||||||
while [[ "${iteration}" -lt 10 ]] && ! verifyAvailable
|
while [[ "${iteration}" -lt "${SERVERCHECKS}" ]] && ! verifyAvailable
|
||||||
do
|
do
|
||||||
log "Database not created (iteration ${iteration})."
|
log "Database not created (iteration ${iteration})."
|
||||||
let iteration="${iteration} + 1"
|
let iteration="${iteration} + 1"
|
||||||
|
@ -290,7 +371,10 @@ function createDatabase {
|
||||||
}
|
}
|
||||||
|
|
||||||
# Begin the local cluster from scratch.
|
# Begin the local cluster from scratch.
|
||||||
function startCluster {
|
function startCluster
|
||||||
|
{
|
||||||
|
local status=0
|
||||||
|
|
||||||
if [ "${status}" -ne 0 ]; then
|
if [ "${status}" -ne 0 ]; then
|
||||||
:
|
:
|
||||||
elif ! createDirectories
|
elif ! createDirectories
|
||||||
|
|
|
@ -24,22 +24,22 @@ def parse_args():
|
||||||
# (e)nd of a span with a better given name
|
# (e)nd of a span with a better given name
|
||||||
locationToPhase = {
|
locationToPhase = {
|
||||||
"NativeAPI.commit.Before": [],
|
"NativeAPI.commit.Before": [],
|
||||||
"MasterProxyServer.batcher": [("b", "Commit")],
|
"CommitProxyServer.batcher": [("b", "Commit")],
|
||||||
"MasterProxyServer.commitBatch.Before": [],
|
"CommitProxyServer.commitBatch.Before": [],
|
||||||
"MasterProxyServer.commitBatch.GettingCommitVersion": [("b", "CommitVersion")],
|
"CommitProxyServer.commitBatch.GettingCommitVersion": [("b", "CommitVersion")],
|
||||||
"MasterProxyServer.commitBatch.GotCommitVersion": [("e", "CommitVersion")],
|
"CommitProxyServer.commitBatch.GotCommitVersion": [("e", "CommitVersion")],
|
||||||
"Resolver.resolveBatch.Before": [("b", "Resolver.PipelineWait")],
|
"Resolver.resolveBatch.Before": [("b", "Resolver.PipelineWait")],
|
||||||
"Resolver.resolveBatch.AfterQueueSizeCheck": [],
|
"Resolver.resolveBatch.AfterQueueSizeCheck": [],
|
||||||
"Resolver.resolveBatch.AfterOrderer": [("e", "Resolver.PipelineWait"), ("b", "Resolver.Conflicts")],
|
"Resolver.resolveBatch.AfterOrderer": [("e", "Resolver.PipelineWait"), ("b", "Resolver.Conflicts")],
|
||||||
"Resolver.resolveBatch.After": [("e", "Resolver.Conflicts")],
|
"Resolver.resolveBatch.After": [("e", "Resolver.Conflicts")],
|
||||||
"MasterProxyServer.commitBatch.AfterResolution": [("b", "Proxy.Processing")],
|
"CommitProxyServer.commitBatch.AfterResolution": [("b", "Proxy.Processing")],
|
||||||
"MasterProxyServer.commitBatch.ProcessingMutations": [],
|
"CommitProxyServer.commitBatch.ProcessingMutations": [],
|
||||||
"MasterProxyServer.commitBatch.AfterStoreCommits": [("e", "Proxy.Processing")],
|
"CommitProxyServer.commitBatch.AfterStoreCommits": [("e", "Proxy.Processing")],
|
||||||
"TLog.tLogCommit.BeforeWaitForVersion": [("b", "TLog.PipelineWait")],
|
"TLog.tLogCommit.BeforeWaitForVersion": [("b", "TLog.PipelineWait")],
|
||||||
"TLog.tLogCommit.Before": [("e", "TLog.PipelineWait")],
|
"TLog.tLogCommit.Before": [("e", "TLog.PipelineWait")],
|
||||||
"TLog.tLogCommit.AfterTLogCommit": [("b", "TLog.FSync")],
|
"TLog.tLogCommit.AfterTLogCommit": [("b", "TLog.FSync")],
|
||||||
"TLog.tLogCommit.After": [("e", "TLog.FSync")],
|
"TLog.tLogCommit.After": [("e", "TLog.FSync")],
|
||||||
"MasterProxyServer.commitBatch.AfterLogPush": [("e", "Commit")],
|
"CommitProxyServer.commitBatch.AfterLogPush": [("e", "Commit")],
|
||||||
"NativeAPI.commit.After": [],
|
"NativeAPI.commit.After": [],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ As an essential component of a database system, backup and restore is commonly u
|
||||||
|
|
||||||
## Background
|
## Background
|
||||||
|
|
||||||
FDB backup system continuously scan the database’s key-value space, save key-value pairs and mutations at versions into range files and log files in blob storage. Specifically, mutation logs are generated at Proxy, and are written to transaction logs along with regular mutations. In production clusters like CK clusters, backup system is always on, which means each mutation is written twice to transaction logs, consuming about half of write bandwidth and about 40% of Proxy CPU time.
|
FDB backup system continuously scan the database’s key-value space, save key-value pairs and mutations at versions into range files and log files in blob storage. Specifically, mutation logs are generated at CommitProxy, and are written to transaction logs along with regular mutations. In production clusters like CK clusters, backup system is always on, which means each mutation is written twice to transaction logs, consuming about half of write bandwidth and about 40% of CommitProxy CPU time.
|
||||||
|
|
||||||
The design of old backup system is [here](https://github.com/apple/foundationdb/blob/master/design/backup.md), and the data format of range files and mutations files is [here](https://github.com/apple/foundationdb/blob/master/design/backup-dataFormat.md). The technical overview of FDB is [here](https://github.com/apple/foundationdb/wiki/Technical-Overview-of-the-Database). The FDB recovery is described in this [doc](https://github.com/apple/foundationdb/blob/master/design/recovery-internals.md).
|
The design of old backup system is [here](https://github.com/apple/foundationdb/blob/master/design/backup.md), and the data format of range files and mutations files is [here](https://github.com/apple/foundationdb/blob/master/design/backup-dataFormat.md). The technical overview of FDB is [here](https://github.com/apple/foundationdb/wiki/Technical-Overview-of-the-Database). The FDB recovery is described in this [doc](https://github.com/apple/foundationdb/blob/master/design/recovery-internals.md).
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ The design of old backup system is [here](https://github.com/apple/foundationdb/
|
||||||
|
|
||||||
Feature priorities: Feature 1, 2, 3, 4, 5 are must-have; Feature 6 is better to have.
|
Feature priorities: Feature 1, 2, 3, 4, 5 are must-have; Feature 6 is better to have.
|
||||||
|
|
||||||
1. **Write bandwidth reduction by half**: removes the requirement to generate backup mutations at the Proxy, thus reduce TLog write bandwidth usage by half and significantly improve Proxy CPU usage;
|
1. **Write bandwidth reduction by half**: removes the requirement to generate backup mutations at the CommitProxy, thus reduce TLog write bandwidth usage by half and significantly improve CommitProxy CPU usage;
|
||||||
2. **Correctness**: The restored database must be consistent: each *restored* state (i.e., key-value pair) at a version `v` must match the original state at version `v`.
|
2. **Correctness**: The restored database must be consistent: each *restored* state (i.e., key-value pair) at a version `v` must match the original state at version `v`.
|
||||||
3. **Performance**: The backup system should be performant, mostly measured as a small CPU overhead on transaction logs and backup workers. The version lag on backup workers is an indicator of performance.
|
3. **Performance**: The backup system should be performant, mostly measured as a small CPU overhead on transaction logs and backup workers. The version lag on backup workers is an indicator of performance.
|
||||||
4. **Fault-tolerant**: The backup system should be fault-tolerant to node failures in the FDB cluster.
|
4. **Fault-tolerant**: The backup system should be fault-tolerant to node failures in the FDB cluster.
|
||||||
|
@ -153,9 +153,9 @@ The requirement of the new backup system raises several design challenges:
|
||||||
|
|
||||||
**Master**: The master is responsible for coordinating the transition of the FDB transaction sub-system from one generation to the next. In particular, the master recruits backup workers during the recovery.
|
**Master**: The master is responsible for coordinating the transition of the FDB transaction sub-system from one generation to the next. In particular, the master recruits backup workers during the recovery.
|
||||||
|
|
||||||
**Transaction Logs (TLogs)**: The transaction logs make mutations durable to disk for fast commit latencies. The logs receive commits from the proxy in version order, and only respond to the proxy once the data has been written and fsync'ed to an append only mutation log on disk. Storage servers retrieve mutations from TLogs. Once the storage servers have persisted mutations, storage servers then pop the mutations from the TLogs.
|
**Transaction Logs (TLogs)**: The transaction logs make mutations durable to disk for fast commit latencies. The logs receive commits from the commit proxy in version order, and only respond to the commit proxy once the data has been written and fsync'ed to an append only mutation log on disk. Storage servers retrieve mutations from TLogs. Once the storage servers have persisted mutations, storage servers then pop the mutations from the TLogs.
|
||||||
|
|
||||||
**Proxy**: The proxies are responsible for committing transactions, and tracking the storage servers responsible for each range of keys. In the old backup system, Proxies are responsible to group mutations into backup mutations and write them to the database.
|
**CommitProxy**: The commit proxies are responsible for committing transactions, and tracking the storage servers responsible for each range of keys. In the old backup system, Proxies are responsible to group mutations into backup mutations and write them to the database.
|
||||||
|
|
||||||
**GrvProxy**: The GRV proxies are responsible for providing read versions.
|
**GrvProxy**: The GRV proxies are responsible for providing read versions.
|
||||||
## System overview
|
## System overview
|
||||||
|
|
|
@ -40,7 +40,7 @@ FoundationDB may return the following error codes from API functions. If you nee
|
||||||
+-----------------------------------------------+-----+--------------------------------------------------------------------------------+
|
+-----------------------------------------------+-----+--------------------------------------------------------------------------------+
|
||||||
| external_client_already_loaded | 1040| External client has already been loaded |
|
| external_client_already_loaded | 1040| External client has already been loaded |
|
||||||
+-----------------------------------------------+-----+--------------------------------------------------------------------------------+
|
+-----------------------------------------------+-----+--------------------------------------------------------------------------------+
|
||||||
| proxy_memory_limit_exceeded | 1042| Proxy commit memory limit exceeded |
|
| proxy_memory_limit_exceeded | 1042| CommitProxy commit memory limit exceeded |
|
||||||
+-----------------------------------------------+-----+--------------------------------------------------------------------------------+
|
+-----------------------------------------------+-----+--------------------------------------------------------------------------------+
|
||||||
| batch_transaction_throttled | 1051| Batch GRV request rate limit exceeded |
|
| batch_transaction_throttled | 1051| Batch GRV request rate limit exceeded |
|
||||||
+-----------------------------------------------+-----+--------------------------------------------------------------------------------+
|
+-----------------------------------------------+-----+--------------------------------------------------------------------------------+
|
||||||
|
|
|
@ -104,7 +104,7 @@ Field Name Description
|
||||||
``Name for the snapshot file`` recommended name for the disk snapshot cluster-name:ip-addr:port:UID
|
``Name for the snapshot file`` recommended name for the disk snapshot cluster-name:ip-addr:port:UID
|
||||||
================================ ======================================================== ========================================================
|
================================ ======================================================== ========================================================
|
||||||
|
|
||||||
``snapshot create binary`` will not be invoked on processes which does not have any persistent data (for example, Cluster Controller or Master or MasterProxy). Since these processes are stateless, there is no need for a snapshot. Any specialized configuration knobs used for one of these stateless processes need to be copied and restored externally.
|
``snapshot create binary`` will not be invoked on processes which does not have any persistent data (for example, Cluster Controller or Master or CommitProxy). Since these processes are stateless, there is no need for a snapshot. Any specialized configuration knobs used for one of these stateless processes need to be copied and restored externally.
|
||||||
|
|
||||||
Management of disk snapshots
|
Management of disk snapshots
|
||||||
----------------------------
|
----------------------------
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
"storage",
|
"storage",
|
||||||
"transaction",
|
"transaction",
|
||||||
"resolution",
|
"resolution",
|
||||||
"proxy",
|
"commit_proxy",
|
||||||
"grv_proxy",
|
"grv_proxy",
|
||||||
"master",
|
"master",
|
||||||
"test",
|
"test",
|
||||||
|
@ -61,7 +61,7 @@
|
||||||
"role":{
|
"role":{
|
||||||
"$enum":[
|
"$enum":[
|
||||||
"master",
|
"master",
|
||||||
"proxy",
|
"commit_proxy",
|
||||||
"grv_proxy",
|
"grv_proxy",
|
||||||
"log",
|
"log",
|
||||||
"storage",
|
"storage",
|
||||||
|
@ -447,7 +447,7 @@
|
||||||
],
|
],
|
||||||
"recovery_state":{
|
"recovery_state":{
|
||||||
"required_resolvers":1,
|
"required_resolvers":1,
|
||||||
"required_proxies":1,
|
"required_commit_proxies":1,
|
||||||
"required_grv_proxies":1,
|
"required_grv_proxies":1,
|
||||||
"name":{ // "fully_recovered" is the healthy state; other states are normal to transition through but not to persist in
|
"name":{ // "fully_recovered" is the healthy state; other states are normal to transition through but not to persist in
|
||||||
"$enum":[
|
"$enum":[
|
||||||
|
@ -633,11 +633,11 @@
|
||||||
"address":"10.0.4.1"
|
"address":"10.0.4.1"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"auto_proxies":3,
|
"auto_commit_proxies":3,
|
||||||
"auto_resolvers":1,
|
"auto_resolvers":1,
|
||||||
"auto_logs":3,
|
"auto_logs":3,
|
||||||
"backup_worker_enabled":1,
|
"backup_worker_enabled":1,
|
||||||
"proxies":5 // this field will be absent if a value has not been explicitly set
|
"commit_proxies":5 // this field will be absent if a value has not been explicitly set
|
||||||
},
|
},
|
||||||
"data":{
|
"data":{
|
||||||
"least_operating_space_bytes_log_server":0,
|
"least_operating_space_bytes_log_server":0,
|
||||||
|
|
|
@ -5,6 +5,8 @@ Release Notes
|
||||||
6.3.5
|
6.3.5
|
||||||
=====
|
=====
|
||||||
|
|
||||||
|
* Report missing old tlogs information when in recovery before storage servers are fully recovered. `(PR #3706) <https://github.com/apple/foundationdb/pull/3706>`_
|
||||||
|
|
||||||
Features
|
Features
|
||||||
--------
|
--------
|
||||||
|
|
||||||
|
|
|
@ -117,7 +117,7 @@ LineNoise::LineNoise(
|
||||||
Hint h = onMainThread( [line]() -> Future<Hint> {
|
Hint h = onMainThread( [line]() -> Future<Hint> {
|
||||||
return hint_callback(line);
|
return hint_callback(line);
|
||||||
}).getBlocking();
|
}).getBlocking();
|
||||||
if (!h.valid) return NULL;
|
if (!h.valid) return nullptr;
|
||||||
*color = h.color;
|
*color = h.color;
|
||||||
*bold = h.bold;
|
*bold = h.bold;
|
||||||
return strdup( h.text.c_str() );
|
return strdup( h.text.c_str() );
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
|
|
||||||
#include "boost/lexical_cast.hpp"
|
#include "boost/lexical_cast.hpp"
|
||||||
#include "fdbclient/NativeAPI.actor.h"
|
#include "fdbclient/NativeAPI.actor.h"
|
||||||
|
#include "fdbclient/FDBTypes.h"
|
||||||
#include "fdbclient/Status.h"
|
#include "fdbclient/Status.h"
|
||||||
#include "fdbclient/StatusClient.h"
|
#include "fdbclient/StatusClient.h"
|
||||||
#include "fdbclient/DatabaseContext.h"
|
#include "fdbclient/DatabaseContext.h"
|
||||||
|
@ -102,7 +103,7 @@ CSimpleOpt::SOption g_rgOptions[] = { { OPT_CONNFILE, "-C", SO_REQ_SEP },
|
||||||
void printAtCol(const char* text, int col) {
|
void printAtCol(const char* text, int col) {
|
||||||
const char* iter = text;
|
const char* iter = text;
|
||||||
const char* start = text;
|
const char* start = text;
|
||||||
const char* space = NULL;
|
const char* space = nullptr;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
iter++;
|
iter++;
|
||||||
|
@ -112,7 +113,7 @@ void printAtCol(const char* text, int col) {
|
||||||
printf("%.*s\n", (int)(space - start), start);
|
printf("%.*s\n", (int)(space - start), start);
|
||||||
start = space;
|
start = space;
|
||||||
if (*start == ' ' || *start == '\n') start++;
|
if (*start == ' ' || *start == '\n') start++;
|
||||||
space = NULL;
|
space = nullptr;
|
||||||
}
|
}
|
||||||
} while (*iter);
|
} while (*iter);
|
||||||
}
|
}
|
||||||
|
@ -120,7 +121,7 @@ void printAtCol(const char* text, int col) {
|
||||||
std::string lineWrap(const char* text, int col) {
|
std::string lineWrap(const char* text, int col) {
|
||||||
const char* iter = text;
|
const char* iter = text;
|
||||||
const char* start = text;
|
const char* start = text;
|
||||||
const char* space = NULL;
|
const char* space = nullptr;
|
||||||
std::string out = "";
|
std::string out = "";
|
||||||
do {
|
do {
|
||||||
iter++;
|
iter++;
|
||||||
|
@ -130,7 +131,7 @@ std::string lineWrap(const char* text, int col) {
|
||||||
out += format("%.*s\n", (int)(space - start), start);
|
out += format("%.*s\n", (int)(space - start), start);
|
||||||
start = space;
|
start = space;
|
||||||
if (*start == ' '/* || *start == '\n'*/) start++;
|
if (*start == ' '/* || *start == '\n'*/) start++;
|
||||||
space = NULL;
|
space = nullptr;
|
||||||
}
|
}
|
||||||
} while (*iter);
|
} while (*iter);
|
||||||
return out;
|
return out;
|
||||||
|
@ -470,8 +471,8 @@ void initHelp() {
|
||||||
"All keys between BEGINKEY (inclusive) and ENDKEY (exclusive) are cleared from the database. This command will succeed even if the specified range is empty, but may fail because of conflicts." ESCAPINGK);
|
"All keys between BEGINKEY (inclusive) and ENDKEY (exclusive) are cleared from the database. This command will succeed even if the specified range is empty, but may fail because of conflicts." ESCAPINGK);
|
||||||
helpMap["configure"] = CommandHelp(
|
helpMap["configure"] = CommandHelp(
|
||||||
"configure [new] "
|
"configure [new] "
|
||||||
"<single|double|triple|three_data_hall|three_datacenter|ssd|memory|memory-radixtree-beta|proxies=<PROXIES>|grv_"
|
"<single|double|triple|three_data_hall|three_datacenter|ssd|memory|memory-radixtree-beta|commit_proxies=<"
|
||||||
"proxies=<GRV_PROXIES>|logs=<LOGS>|resolvers=<RESOLVERS>>*",
|
"COMMIT_PROXIES>|grv_proxies=<GRV_PROXIES>|logs=<LOGS>|resolvers=<RESOLVERS>>*",
|
||||||
"change the database configuration",
|
"change the database configuration",
|
||||||
"The `new' option, if present, initializes a new database with the given configuration rather than changing "
|
"The `new' option, if present, initializes a new database with the given configuration rather than changing "
|
||||||
"the configuration of an existing one. When used, both a redundancy mode and a storage engine must be "
|
"the configuration of an existing one. When used, both a redundancy mode and a storage engine must be "
|
||||||
|
@ -479,13 +480,14 @@ void initHelp() {
|
||||||
"of data (survive one failure).\n triple - three copies of data (survive two failures).\n three_data_hall - "
|
"of data (survive one failure).\n triple - three copies of data (survive two failures).\n three_data_hall - "
|
||||||
"See the Admin Guide.\n three_datacenter - See the Admin Guide.\n\nStorage engine:\n ssd - B-Tree storage "
|
"See the Admin Guide.\n three_datacenter - See the Admin Guide.\n\nStorage engine:\n ssd - B-Tree storage "
|
||||||
"engine optimized for solid state disks.\n memory - Durable in-memory storage engine for small "
|
"engine optimized for solid state disks.\n memory - Durable in-memory storage engine for small "
|
||||||
"datasets.\n\nproxies=<PROXIES>: Sets the desired number of proxies in the cluster. Must be at least 1, or set "
|
"datasets.\n\ncommit_proxies=<COMMIT_PROXIES>: Sets the desired number of commit proxies in the cluster. Must "
|
||||||
"to -1 which restores the number of proxies to the default value.\n\ngrv_proxies=<GRV_PROXIES>: Sets the "
|
"be at least 1, or set to -1 which restores the number of commit proxies to the default "
|
||||||
"desired number of GRV proxies in the cluster. Must be at least 1, or set to -1 which restores the number of "
|
"value.\n\ngrv_proxies=<GRV_PROXIES>: Sets the desired number of GRV proxies in the cluster. Must be at least "
|
||||||
"proxies to the default value.\n\nlogs=<LOGS>: Sets the desired number of log servers in the cluster. Must be "
|
"1, or set to -1 which restores the number of GRV proxies to the default value.\n\nlogs=<LOGS>: Sets the "
|
||||||
"at least 1, or set to -1 which restores the number of logs to the default value.\n\nresolvers=<RESOLVERS>: "
|
"desired number of log servers in the cluster. Must be at least 1, or set to -1 which restores the number of "
|
||||||
"Sets the desired number of resolvers in the cluster. Must be at least 1, or set to -1 which restores the "
|
"logs to the default value.\n\nresolvers=<RESOLVERS>: Sets the desired number of resolvers in the cluster. "
|
||||||
"number of resolvers to the default value.\n\nSee the FoundationDB Administration Guide for more information.");
|
"Must be at least 1, or set to -1 which restores the number of resolvers to the default value.\n\nSee the "
|
||||||
|
"FoundationDB Administration Guide for more information.");
|
||||||
helpMap["fileconfigure"] = CommandHelp(
|
helpMap["fileconfigure"] = CommandHelp(
|
||||||
"fileconfigure [new] <FILENAME>",
|
"fileconfigure [new] <FILENAME>",
|
||||||
"change the database configuration from a file",
|
"change the database configuration from a file",
|
||||||
|
@ -871,10 +873,11 @@ void printStatus(StatusObjectReader statusObj, StatusClient::StatusLevel level,
|
||||||
fatalRecoveryState = true;
|
fatalRecoveryState = true;
|
||||||
|
|
||||||
if (name == "recruiting_transaction_servers") {
|
if (name == "recruiting_transaction_servers") {
|
||||||
description += format("\nNeed at least %d log servers across unique zones, %d proxies, "
|
description +=
|
||||||
|
format("\nNeed at least %d log servers across unique zones, %d commit proxies, "
|
||||||
"%d GRV proxies and %d resolvers.",
|
"%d GRV proxies and %d resolvers.",
|
||||||
recoveryState["required_logs"].get_int(),
|
recoveryState["required_logs"].get_int(),
|
||||||
recoveryState["required_proxies"].get_int(),
|
recoveryState["required_commit_proxies"].get_int(),
|
||||||
recoveryState["required_grv_proxies"].get_int(),
|
recoveryState["required_grv_proxies"].get_int(),
|
||||||
recoveryState["required_resolvers"].get_int());
|
recoveryState["required_resolvers"].get_int());
|
||||||
if (statusObjCluster.has("machines") && statusObjCluster.has("processes")) {
|
if (statusObjCluster.has("machines") && statusObjCluster.has("processes")) {
|
||||||
|
@ -1026,8 +1029,8 @@ void printStatus(StatusObjectReader statusObj, StatusClient::StatusLevel level,
|
||||||
outputString += format("\n Exclusions - %d (type `exclude' for details)", excludedServersArr.size());
|
outputString += format("\n Exclusions - %d (type `exclude' for details)", excludedServersArr.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (statusObjConfig.get("proxies", intVal))
|
if (statusObjConfig.get("commit_proxies", intVal))
|
||||||
outputString += format("\n Desired Proxies - %d", intVal);
|
outputString += format("\n Desired Commit Proxies - %d", intVal);
|
||||||
|
|
||||||
if (statusObjConfig.get("grv_proxies", intVal))
|
if (statusObjConfig.get("grv_proxies", intVal))
|
||||||
outputString += format("\n Desired GRV Proxies - %d", intVal);
|
outputString += format("\n Desired GRV Proxies - %d", intVal);
|
||||||
|
@ -1233,14 +1236,54 @@ void printStatus(StatusObjectReader statusObj, StatusClient::StatusLevel level,
|
||||||
|
|
||||||
int minLoss = std::min(availLoss, dataLoss);
|
int minLoss = std::min(availLoss, dataLoss);
|
||||||
const char *faultDomain = machinesAreZones ? "machine" : "zone";
|
const char *faultDomain = machinesAreZones ? "machine" : "zone";
|
||||||
if (minLoss == 1)
|
|
||||||
outputString += format("1 %s", faultDomain);
|
|
||||||
else
|
|
||||||
outputString += format("%d %ss", minLoss, faultDomain);
|
outputString += format("%d %ss", minLoss, faultDomain);
|
||||||
|
|
||||||
if (dataLoss > availLoss){
|
if (dataLoss > availLoss){
|
||||||
outputString += format(" (%d without data loss)", dataLoss);
|
outputString += format(" (%d without data loss)", dataLoss);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (dataLoss == -1) {
|
||||||
|
ASSERT_WE_THINK(availLoss == -1);
|
||||||
|
outputString += format(
|
||||||
|
"\n\n Warning: the database may have data loss and availability loss. Please restart "
|
||||||
|
"following tlog interfaces, otherwise storage servers may never be able to catch "
|
||||||
|
"up.\n");
|
||||||
|
StatusObjectReader logs;
|
||||||
|
if (statusObjCluster.has("logs")) {
|
||||||
|
for (StatusObjectReader logEpoch : statusObjCluster.last().get_array()) {
|
||||||
|
bool possiblyLosingData;
|
||||||
|
if (logEpoch.get("possibly_losing_data", possiblyLosingData) &&
|
||||||
|
!possiblyLosingData) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// Current epoch doesn't have an end version.
|
||||||
|
int64_t epoch, beginVersion, endVersion = invalidVersion;
|
||||||
|
bool current;
|
||||||
|
logEpoch.get("epoch", epoch);
|
||||||
|
logEpoch.get("begin_version", beginVersion);
|
||||||
|
logEpoch.get("end_version", endVersion);
|
||||||
|
logEpoch.get("current", current);
|
||||||
|
std::string missing_log_interfaces;
|
||||||
|
if (logEpoch.has("log_interfaces")) {
|
||||||
|
for (StatusObjectReader logInterface : logEpoch.last().get_array()) {
|
||||||
|
bool healthy;
|
||||||
|
std::string address, id;
|
||||||
|
if (logInterface.get("healthy", healthy) && !healthy) {
|
||||||
|
logInterface.get("id", id);
|
||||||
|
logInterface.get("address", address);
|
||||||
|
missing_log_interfaces += format("%s,%s ", id.c_str(), address.c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
outputString += format(
|
||||||
|
" %s log epoch: %ld begin: %ld end: %s, missing "
|
||||||
|
"log interfaces(id,address): %s\n",
|
||||||
|
current ? "Current" : "Old", epoch, beginVersion,
|
||||||
|
endVersion == invalidVersion ? "(unknown)" : format("%ld", endVersion).c_str(),
|
||||||
|
missing_log_interfaces.c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1790,14 +1833,14 @@ ACTOR Future<bool> configure( Database db, std::vector<StringRef> tokens, Refere
|
||||||
|
|
||||||
bool noChanges = conf.get().old_replication == conf.get().auto_replication &&
|
bool noChanges = conf.get().old_replication == conf.get().auto_replication &&
|
||||||
conf.get().old_logs == conf.get().auto_logs &&
|
conf.get().old_logs == conf.get().auto_logs &&
|
||||||
conf.get().old_proxies == conf.get().auto_proxies &&
|
conf.get().old_commit_proxies == conf.get().auto_commit_proxies &&
|
||||||
conf.get().old_grv_proxies == conf.get().auto_grv_proxies &&
|
conf.get().old_grv_proxies == conf.get().auto_grv_proxies &&
|
||||||
conf.get().old_resolvers == conf.get().auto_resolvers &&
|
conf.get().old_resolvers == conf.get().auto_resolvers &&
|
||||||
conf.get().old_processes_with_transaction == conf.get().auto_processes_with_transaction &&
|
conf.get().old_processes_with_transaction == conf.get().auto_processes_with_transaction &&
|
||||||
conf.get().old_machines_with_transaction == conf.get().auto_machines_with_transaction;
|
conf.get().old_machines_with_transaction == conf.get().auto_machines_with_transaction;
|
||||||
|
|
||||||
bool noDesiredChanges = noChanges && conf.get().old_logs == conf.get().desired_logs &&
|
bool noDesiredChanges = noChanges && conf.get().old_logs == conf.get().desired_logs &&
|
||||||
conf.get().old_proxies == conf.get().desired_proxies &&
|
conf.get().old_commit_proxies == conf.get().desired_commit_proxies &&
|
||||||
conf.get().old_grv_proxies == conf.get().desired_grv_proxies &&
|
conf.get().old_grv_proxies == conf.get().desired_grv_proxies &&
|
||||||
conf.get().old_resolvers == conf.get().desired_resolvers;
|
conf.get().old_resolvers == conf.get().desired_resolvers;
|
||||||
|
|
||||||
|
@ -1816,8 +1859,11 @@ ACTOR Future<bool> configure( Database db, std::vector<StringRef> tokens, Refere
|
||||||
outputString += format("| replication | %16s | %16s |\n", conf.get().old_replication.c_str(), conf.get().auto_replication.c_str());
|
outputString += format("| replication | %16s | %16s |\n", conf.get().old_replication.c_str(), conf.get().auto_replication.c_str());
|
||||||
outputString += format("| logs | %16d | %16d |", conf.get().old_logs, conf.get().auto_logs);
|
outputString += format("| logs | %16d | %16d |", conf.get().old_logs, conf.get().auto_logs);
|
||||||
outputString += conf.get().auto_logs != conf.get().desired_logs ? format(" (manually set; would be %d)\n", conf.get().desired_logs) : "\n";
|
outputString += conf.get().auto_logs != conf.get().desired_logs ? format(" (manually set; would be %d)\n", conf.get().desired_logs) : "\n";
|
||||||
outputString += format("| proxies | %16d | %16d |", conf.get().old_proxies, conf.get().auto_proxies);
|
outputString += format("| commit_proxies | %16d | %16d |", conf.get().old_commit_proxies,
|
||||||
outputString += conf.get().auto_proxies != conf.get().desired_proxies ? format(" (manually set; would be %d)\n", conf.get().desired_proxies) : "\n";
|
conf.get().auto_commit_proxies);
|
||||||
|
outputString += conf.get().auto_commit_proxies != conf.get().desired_commit_proxies
|
||||||
|
? format(" (manually set; would be %d)\n", conf.get().desired_commit_proxies)
|
||||||
|
: "\n";
|
||||||
outputString += format("| grv_proxies | %16d | %16d |", conf.get().old_grv_proxies,
|
outputString += format("| grv_proxies | %16d | %16d |", conf.get().old_grv_proxies,
|
||||||
conf.get().auto_grv_proxies);
|
conf.get().auto_grv_proxies);
|
||||||
outputString += conf.get().auto_grv_proxies != conf.get().desired_grv_proxies
|
outputString += conf.get().auto_grv_proxies != conf.get().desired_grv_proxies
|
||||||
|
@ -2472,7 +2518,7 @@ void compGenerator(const char* text, bool help, std::vector<std::string>& lc) {
|
||||||
std::map<std::string, CommandHelp>::const_iterator iter;
|
std::map<std::string, CommandHelp>::const_iterator iter;
|
||||||
int len = strlen(text);
|
int len = strlen(text);
|
||||||
|
|
||||||
const char* helpExtra[] = {"escaping", "options", NULL};
|
const char* helpExtra[] = {"escaping", "options", nullptr};
|
||||||
|
|
||||||
const char** he = helpExtra;
|
const char** he = helpExtra;
|
||||||
|
|
||||||
|
@ -2531,11 +2577,24 @@ void onOffGenerator(const char* text, const char *line, std::vector<std::string>
|
||||||
}
|
}
|
||||||
|
|
||||||
void configureGenerator(const char* text, const char *line, std::vector<std::string>& lc) {
|
void configureGenerator(const char* text, const char *line, std::vector<std::string>& lc) {
|
||||||
const char* opts[] = {
|
const char* opts[] = { "new",
|
||||||
"new", "single", "double", "triple", "three_data_hall", "three_datacenter", "ssd",
|
"single",
|
||||||
"ssd-1", "ssd-2", "memory", "memory-1", "memory-2", "memory-radixtree-beta", "proxies=",
|
"double",
|
||||||
"grv_proxies=", "logs=", "resolvers=", nullptr
|
"triple",
|
||||||
};
|
"three_data_hall",
|
||||||
|
"three_datacenter",
|
||||||
|
"ssd",
|
||||||
|
"ssd-1",
|
||||||
|
"ssd-2",
|
||||||
|
"memory",
|
||||||
|
"memory-1",
|
||||||
|
"memory-2",
|
||||||
|
"memory-radixtree-beta",
|
||||||
|
"commit_proxies=",
|
||||||
|
"grv_proxies=",
|
||||||
|
"logs=",
|
||||||
|
"resolvers=",
|
||||||
|
nullptr };
|
||||||
arrayGenerator(text, line, opts, lc);
|
arrayGenerator(text, line, opts, lc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2973,7 +3032,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
||||||
.detail("SourceVersion", getSourceVersion())
|
.detail("SourceVersion", getSourceVersion())
|
||||||
.detail("Version", FDB_VT_VERSION)
|
.detail("Version", FDB_VT_VERSION)
|
||||||
.detail("PackageName", FDB_VT_PACKAGE_NAME)
|
.detail("PackageName", FDB_VT_PACKAGE_NAME)
|
||||||
.detailf("ActualTime", "%lld", DEBUG_DETERMINISM ? 0 : time(NULL))
|
.detailf("ActualTime", "%lld", DEBUG_DETERMINISM ? 0 : time(nullptr))
|
||||||
.detail("ClusterFile", ccf->getFilename().c_str())
|
.detail("ClusterFile", ccf->getFilename().c_str())
|
||||||
.detail("ConnectionString", ccf->getConnectionString().toString())
|
.detail("ConnectionString", ccf->getConnectionString().toString())
|
||||||
.setMaxFieldLength(10000)
|
.setMaxFieldLength(10000)
|
||||||
|
@ -4548,7 +4607,7 @@ int main(int argc, char **argv) {
|
||||||
sigemptyset( &act.sa_mask );
|
sigemptyset( &act.sa_mask );
|
||||||
act.sa_flags = 0;
|
act.sa_flags = 0;
|
||||||
act.sa_handler = SIG_IGN;
|
act.sa_handler = SIG_IGN;
|
||||||
sigaction(SIGINT, &act, NULL);
|
sigaction(SIGINT, &act, nullptr);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
CLIOptions opt(argc, argv);
|
CLIOptions opt(argc, argv);
|
||||||
|
|
|
@ -59,7 +59,7 @@ public:
|
||||||
virtual void delref() { ReferenceCounted<AsyncFileBlobStoreWrite>::delref(); }
|
virtual void delref() { ReferenceCounted<AsyncFileBlobStoreWrite>::delref(); }
|
||||||
|
|
||||||
struct Part : ReferenceCounted<Part> {
|
struct Part : ReferenceCounted<Part> {
|
||||||
Part(int n, int minSize) : number(n), writer(content.getWriteBuffer(minSize), NULL, Unversioned()), length(0) {
|
Part(int n, int minSize) : number(n), writer(content.getWriteBuffer(minSize), nullptr, Unversioned()), length(0) {
|
||||||
etag = std::string();
|
etag = std::string();
|
||||||
::MD5_Init(&content_md5_buf);
|
::MD5_Init(&content_md5_buf);
|
||||||
}
|
}
|
||||||
|
|
|
@ -958,5 +958,7 @@ Value makePadding(int size);
|
||||||
ACTOR Future<Void> transformRestoredDatabase(Database cx, Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
ACTOR Future<Void> transformRestoredDatabase(Database cx, Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||||
Key addPrefix, Key removePrefix);
|
Key addPrefix, Key removePrefix);
|
||||||
|
|
||||||
|
void simulateBlobFailure();
|
||||||
|
|
||||||
#include "flow/unactorcompiler.h"
|
#include "flow/unactorcompiler.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1343,20 +1343,45 @@ public:
|
||||||
|
|
||||||
ACTOR static Future<KeyRange> getSnapshotFileKeyRange_impl(Reference<BackupContainerFileSystem> bc,
|
ACTOR static Future<KeyRange> getSnapshotFileKeyRange_impl(Reference<BackupContainerFileSystem> bc,
|
||||||
RangeFile file) {
|
RangeFile file) {
|
||||||
state Reference<IAsyncFile> inFile = wait(bc->readFile(file.fileName));
|
state int readFileRetries = 0;
|
||||||
state bool beginKeySet = false;
|
state bool beginKeySet = false;
|
||||||
state Key beginKey;
|
state Key beginKey;
|
||||||
state Key endKey;
|
state Key endKey;
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
state Reference<IAsyncFile> inFile = wait(bc->readFile(file.fileName));
|
||||||
|
beginKeySet = false;
|
||||||
state int64_t j = 0;
|
state int64_t j = 0;
|
||||||
for (; j < file.fileSize; j += file.blockSize) {
|
for (; j < file.fileSize; j += file.blockSize) {
|
||||||
int64_t len = std::min<int64_t>(file.blockSize, file.fileSize - j);
|
int64_t len = std::min<int64_t>(file.blockSize, file.fileSize - j);
|
||||||
Standalone<VectorRef<KeyValueRef>> blockData = wait(fileBackup::decodeRangeFileBlock(inFile, j, len));
|
Standalone<VectorRef<KeyValueRef>> blockData =
|
||||||
|
wait(fileBackup::decodeRangeFileBlock(inFile, j, len));
|
||||||
if (!beginKeySet) {
|
if (!beginKeySet) {
|
||||||
beginKey = blockData.front().key;
|
beginKey = blockData.front().key;
|
||||||
beginKeySet = true;
|
beginKeySet = true;
|
||||||
}
|
}
|
||||||
endKey = blockData.back().key;
|
endKey = blockData.back().key;
|
||||||
}
|
}
|
||||||
|
break;
|
||||||
|
} catch (Error& e) {
|
||||||
|
if (e.code() == error_code_restore_bad_read ||
|
||||||
|
e.code() == error_code_restore_unsupported_file_version ||
|
||||||
|
e.code() == error_code_restore_corrupted_data_padding) { // no retriable error
|
||||||
|
TraceEvent(SevError, "BackupContainerGetSnapshotFileKeyRange").error(e);
|
||||||
|
throw;
|
||||||
|
} else if (e.code() == error_code_http_request_failed || e.code() == error_code_connection_failed ||
|
||||||
|
e.code() == error_code_timed_out || e.code() == error_code_lookup_failed) {
|
||||||
|
// blob http request failure, retry
|
||||||
|
TraceEvent(SevWarnAlways, "BackupContainerGetSnapshotFileKeyRangeConnectionFailure")
|
||||||
|
.detail("Retries", ++readFileRetries)
|
||||||
|
.error(e);
|
||||||
|
wait(delayJittered(0.1));
|
||||||
|
} else {
|
||||||
|
TraceEvent(SevError, "BackupContainerGetSnapshotFileKeyRangeUnexpectedError").error(e);
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return KeyRange(KeyRangeRef(beginKey, endKey));
|
return KeyRange(KeyRangeRef(beginKey, endKey));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -277,7 +277,7 @@ ACTOR Future<bool> bucketExists_impl(Reference<BlobStoreEndpoint> b, std::string
|
||||||
std::string resource = std::string("/") + bucket;
|
std::string resource = std::string("/") + bucket;
|
||||||
HTTP::Headers headers;
|
HTTP::Headers headers;
|
||||||
|
|
||||||
Reference<HTTP::Response> r = wait(b->doRequest("HEAD", resource, headers, NULL, 0, {200, 404}));
|
Reference<HTTP::Response> r = wait(b->doRequest("HEAD", resource, headers, nullptr, 0, {200, 404}));
|
||||||
return r->code == 200;
|
return r->code == 200;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -291,7 +291,7 @@ ACTOR Future<bool> objectExists_impl(Reference<BlobStoreEndpoint> b, std::string
|
||||||
std::string resource = std::string("/") + bucket + "/" + object;
|
std::string resource = std::string("/") + bucket + "/" + object;
|
||||||
HTTP::Headers headers;
|
HTTP::Headers headers;
|
||||||
|
|
||||||
Reference<HTTP::Response> r = wait(b->doRequest("HEAD", resource, headers, NULL, 0, {200, 404}));
|
Reference<HTTP::Response> r = wait(b->doRequest("HEAD", resource, headers, nullptr, 0, {200, 404}));
|
||||||
return r->code == 200;
|
return r->code == 200;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -305,7 +305,7 @@ ACTOR Future<Void> deleteObject_impl(Reference<BlobStoreEndpoint> b, std::string
|
||||||
std::string resource = std::string("/") + bucket + "/" + object;
|
std::string resource = std::string("/") + bucket + "/" + object;
|
||||||
HTTP::Headers headers;
|
HTTP::Headers headers;
|
||||||
// 200 or 204 means object successfully deleted, 404 means it already doesn't exist, so any of those are considered successful
|
// 200 or 204 means object successfully deleted, 404 means it already doesn't exist, so any of those are considered successful
|
||||||
Reference<HTTP::Response> r = wait(b->doRequest("DELETE", resource, headers, NULL, 0, {200, 204, 404}));
|
Reference<HTTP::Response> r = wait(b->doRequest("DELETE", resource, headers, nullptr, 0, {200, 204, 404}));
|
||||||
|
|
||||||
// But if the object already did not exist then the 'delete' is assumed to be successful but a warning is logged.
|
// But if the object already did not exist then the 'delete' is assumed to be successful but a warning is logged.
|
||||||
if(r->code == 404) {
|
if(r->code == 404) {
|
||||||
|
@ -386,7 +386,7 @@ ACTOR Future<Void> createBucket_impl(Reference<BlobStoreEndpoint> b, std::string
|
||||||
if(!exists) {
|
if(!exists) {
|
||||||
std::string resource = std::string("/") + bucket;
|
std::string resource = std::string("/") + bucket;
|
||||||
HTTP::Headers headers;
|
HTTP::Headers headers;
|
||||||
Reference<HTTP::Response> r = wait(b->doRequest("PUT", resource, headers, NULL, 0, {200, 409}));
|
Reference<HTTP::Response> r = wait(b->doRequest("PUT", resource, headers, nullptr, 0, {200, 409}));
|
||||||
}
|
}
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
@ -401,7 +401,7 @@ ACTOR Future<int64_t> objectSize_impl(Reference<BlobStoreEndpoint> b, std::strin
|
||||||
std::string resource = std::string("/") + bucket + "/" + object;
|
std::string resource = std::string("/") + bucket + "/" + object;
|
||||||
HTTP::Headers headers;
|
HTTP::Headers headers;
|
||||||
|
|
||||||
Reference<HTTP::Response> r = wait(b->doRequest("HEAD", resource, headers, NULL, 0, {200, 404}));
|
Reference<HTTP::Response> r = wait(b->doRequest("HEAD", resource, headers, nullptr, 0, {200, 404}));
|
||||||
if(r->code == 404)
|
if(r->code == 404)
|
||||||
throw file_not_found();
|
throw file_not_found();
|
||||||
return r->contentLen;
|
return r->contentLen;
|
||||||
|
@ -737,7 +737,7 @@ ACTOR Future<Void> listObjectsStream_impl(Reference<BlobStoreEndpoint> bstore, s
|
||||||
HTTP::Headers headers;
|
HTTP::Headers headers;
|
||||||
state std::string fullResource = resource + HTTP::urlEncode(lastFile);
|
state std::string fullResource = resource + HTTP::urlEncode(lastFile);
|
||||||
lastFile.clear();
|
lastFile.clear();
|
||||||
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", fullResource, headers, NULL, 0, {200}));
|
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", fullResource, headers, nullptr, 0, {200}));
|
||||||
listReleaser.release();
|
listReleaser.release();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -782,7 +782,7 @@ ACTOR Future<Void> listObjectsStream_impl(Reference<BlobStoreEndpoint> bstore, s
|
||||||
if(size == nullptr) {
|
if(size == nullptr) {
|
||||||
throw http_bad_response();
|
throw http_bad_response();
|
||||||
}
|
}
|
||||||
object.size = strtoull(size->value(), NULL, 10);
|
object.size = strtoull(size->value(), nullptr, 10);
|
||||||
|
|
||||||
listResult.objects.push_back(object);
|
listResult.objects.push_back(object);
|
||||||
}
|
}
|
||||||
|
@ -893,7 +893,7 @@ ACTOR Future<std::vector<std::string>> listBuckets_impl(Reference<BlobStoreEndpo
|
||||||
|
|
||||||
HTTP::Headers headers;
|
HTTP::Headers headers;
|
||||||
state std::string fullResource = resource + HTTP::urlEncode(lastName);
|
state std::string fullResource = resource + HTTP::urlEncode(lastName);
|
||||||
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", fullResource, headers, NULL, 0, {200}));
|
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", fullResource, headers, nullptr, 0, {200}));
|
||||||
listReleaser.release();
|
listReleaser.release();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -1024,7 +1024,7 @@ ACTOR Future<std::string> readEntireFile_impl(Reference<BlobStoreEndpoint> bstor
|
||||||
|
|
||||||
std::string resource = std::string("/") + bucket + "/" + object;
|
std::string resource = std::string("/") + bucket + "/" + object;
|
||||||
HTTP::Headers headers;
|
HTTP::Headers headers;
|
||||||
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", resource, headers, NULL, 0, {200, 404}));
|
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", resource, headers, nullptr, 0, {200, 404}));
|
||||||
if(r->code == 404)
|
if(r->code == 404)
|
||||||
throw file_not_found();
|
throw file_not_found();
|
||||||
return r->content;
|
return r->content;
|
||||||
|
@ -1057,7 +1057,7 @@ ACTOR Future<Void> writeEntireFileFromBuffer_impl(Reference<BlobStoreEndpoint> b
|
||||||
|
|
||||||
ACTOR Future<Void> writeEntireFile_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object, std::string content) {
|
ACTOR Future<Void> writeEntireFile_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object, std::string content) {
|
||||||
state UnsentPacketQueue packets;
|
state UnsentPacketQueue packets;
|
||||||
PacketWriter pw(packets.getWriteBuffer(content.size()), NULL, Unversioned());
|
PacketWriter pw(packets.getWriteBuffer(content.size()), nullptr, Unversioned());
|
||||||
pw.serializeBytes(content);
|
pw.serializeBytes(content);
|
||||||
if(content.size() > bstore->knobs.multipart_max_part_size)
|
if(content.size() > bstore->knobs.multipart_max_part_size)
|
||||||
throw file_too_large();
|
throw file_too_large();
|
||||||
|
@ -1095,7 +1095,7 @@ ACTOR Future<int> readObject_impl(Reference<BlobStoreEndpoint> bstore, std::stri
|
||||||
std::string resource = std::string("/") + bucket + "/" + object;
|
std::string resource = std::string("/") + bucket + "/" + object;
|
||||||
HTTP::Headers headers;
|
HTTP::Headers headers;
|
||||||
headers["Range"] = format("bytes=%lld-%lld", offset, offset + length - 1);
|
headers["Range"] = format("bytes=%lld-%lld", offset, offset + length - 1);
|
||||||
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", resource, headers, NULL, 0, {200, 206, 404}));
|
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", resource, headers, nullptr, 0, {200, 206, 404}));
|
||||||
if(r->code == 404)
|
if(r->code == 404)
|
||||||
throw file_not_found();
|
throw file_not_found();
|
||||||
if(r->contentLen != r->content.size()) // Double check that this wasn't a header-only response, probably unnecessary
|
if(r->contentLen != r->content.size()) // Double check that this wasn't a header-only response, probably unnecessary
|
||||||
|
@ -1114,7 +1114,7 @@ ACTOR static Future<std::string> beginMultiPartUpload_impl(Reference<BlobStoreEn
|
||||||
|
|
||||||
std::string resource = std::string("/") + bucket + "/" + object + "?uploads";
|
std::string resource = std::string("/") + bucket + "/" + object + "?uploads";
|
||||||
HTTP::Headers headers;
|
HTTP::Headers headers;
|
||||||
Reference<HTTP::Response> r = wait(bstore->doRequest("POST", resource, headers, NULL, 0, {200}));
|
Reference<HTTP::Response> r = wait(bstore->doRequest("POST", resource, headers, nullptr, 0, {200}));
|
||||||
|
|
||||||
try {
|
try {
|
||||||
xml_document<> doc;
|
xml_document<> doc;
|
||||||
|
@ -1180,7 +1180,7 @@ ACTOR Future<Void> finishMultiPartUpload_impl(Reference<BlobStoreEndpoint> bstor
|
||||||
|
|
||||||
std::string resource = format("/%s/%s?uploadId=%s", bucket.c_str(), object.c_str(), uploadID.c_str());
|
std::string resource = format("/%s/%s?uploadId=%s", bucket.c_str(), object.c_str(), uploadID.c_str());
|
||||||
HTTP::Headers headers;
|
HTTP::Headers headers;
|
||||||
PacketWriter pw(part_list.getWriteBuffer(manifest.size()), NULL, Unversioned());
|
PacketWriter pw(part_list.getWriteBuffer(manifest.size()), nullptr, Unversioned());
|
||||||
pw.serializeBytes(manifest);
|
pw.serializeBytes(manifest);
|
||||||
Reference<HTTP::Response> r = wait(bstore->doRequest("POST", resource, headers, &part_list, manifest.size(), {200}));
|
Reference<HTTP::Response> r = wait(bstore->doRequest("POST", resource, headers, &part_list, manifest.size(), {200}));
|
||||||
// TODO: In the event that the client times out just before the request completes (so the client is unaware) then the next retry
|
// TODO: In the event that the client times out just before the request completes (so the client is unaware) then the next retry
|
||||||
|
|
|
@ -33,7 +33,7 @@ set(FDBCLIENT_SRCS
|
||||||
Knobs.h
|
Knobs.h
|
||||||
ManagementAPI.actor.cpp
|
ManagementAPI.actor.cpp
|
||||||
ManagementAPI.actor.h
|
ManagementAPI.actor.h
|
||||||
MasterProxyInterface.h
|
CommitProxyInterface.h
|
||||||
MetricLogger.actor.cpp
|
MetricLogger.actor.cpp
|
||||||
MetricLogger.h
|
MetricLogger.h
|
||||||
MonitorLeader.actor.cpp
|
MonitorLeader.actor.cpp
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
#include "fdbclient/FDBTypes.h"
|
#include "fdbclient/FDBTypes.h"
|
||||||
#include "fdbrpc/FailureMonitor.h"
|
#include "fdbrpc/FailureMonitor.h"
|
||||||
#include "fdbclient/Status.h"
|
#include "fdbclient/Status.h"
|
||||||
#include "fdbclient/MasterProxyInterface.h"
|
#include "fdbclient/CommitProxyInterface.h"
|
||||||
|
|
||||||
// Streams from WorkerInterface that are safe and useful to call from a client.
|
// Streams from WorkerInterface that are safe and useful to call from a client.
|
||||||
// A ClientWorkerInterface is embedded as the first element of a WorkerInterface.
|
// A ClientWorkerInterface is embedded as the first element of a WorkerInterface.
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
#include "fdbclient/FDBTypes.h"
|
#include "fdbclient/FDBTypes.h"
|
||||||
#include "fdbrpc/FailureMonitor.h"
|
#include "fdbrpc/FailureMonitor.h"
|
||||||
#include "fdbclient/Status.h"
|
#include "fdbclient/Status.h"
|
||||||
#include "fdbclient/MasterProxyInterface.h"
|
#include "fdbclient/CommitProxyInterface.h"
|
||||||
#include "fdbclient/ClientWorkerInterface.h"
|
#include "fdbclient/ClientWorkerInterface.h"
|
||||||
|
|
||||||
struct ClusterInterface {
|
struct ClusterInterface {
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* MasterProxyInterface.h
|
* CommitProxyInterface.h
|
||||||
*
|
*
|
||||||
* This source file is part of the FoundationDB open source project
|
* This source file is part of the FoundationDB open source project
|
||||||
*
|
*
|
||||||
|
@ -19,8 +19,8 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef FDBCLIENT_MASTERPROXYINTERFACE_H
|
#ifndef FDBCLIENT_COMMITPROXYINTERFACE_H
|
||||||
#define FDBCLIENT_MASTERPROXYINTERFACE_H
|
#define FDBCLIENT_COMMITPROXYINTERFACE_H
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
@ -36,7 +36,7 @@
|
||||||
#include "fdbrpc/TimedRequest.h"
|
#include "fdbrpc/TimedRequest.h"
|
||||||
#include "GrvProxyInterface.h"
|
#include "GrvProxyInterface.h"
|
||||||
|
|
||||||
struct MasterProxyInterface {
|
struct CommitProxyInterface {
|
||||||
constexpr static FileIdentifier file_identifier = 8954922;
|
constexpr static FileIdentifier file_identifier = 8954922;
|
||||||
enum { LocationAwareLoadBalance = 1 };
|
enum { LocationAwareLoadBalance = 1 };
|
||||||
enum { AlwaysFresh = 1 };
|
enum { AlwaysFresh = 1 };
|
||||||
|
@ -59,8 +59,8 @@ struct MasterProxyInterface {
|
||||||
|
|
||||||
UID id() const { return commit.getEndpoint().token; }
|
UID id() const { return commit.getEndpoint().token; }
|
||||||
std::string toString() const { return id().shortString(); }
|
std::string toString() const { return id().shortString(); }
|
||||||
bool operator == (MasterProxyInterface const& r) const { return id() == r.id(); }
|
bool operator==(CommitProxyInterface const& r) const { return id() == r.id(); }
|
||||||
bool operator != (MasterProxyInterface const& r) const { return id() != r.id(); }
|
bool operator!=(CommitProxyInterface const& r) const { return id() != r.id(); }
|
||||||
NetworkAddress address() const { return commit.getEndpoint().getPrimaryAddress(); }
|
NetworkAddress address() const { return commit.getEndpoint().getPrimaryAddress(); }
|
||||||
|
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
|
@ -100,9 +100,10 @@ struct MasterProxyInterface {
|
||||||
struct ClientDBInfo {
|
struct ClientDBInfo {
|
||||||
constexpr static FileIdentifier file_identifier = 5355080;
|
constexpr static FileIdentifier file_identifier = 5355080;
|
||||||
UID id; // Changes each time anything else changes
|
UID id; // Changes each time anything else changes
|
||||||
vector< GrvProxyInterface > grvProxies;
|
vector<GrvProxyInterface> grvProxies;
|
||||||
vector< MasterProxyInterface > masterProxies;
|
vector<CommitProxyInterface> commitProxies;
|
||||||
Optional<MasterProxyInterface> firstProxy; //not serialized, used for commitOnFirstProxy when the proxies vector has been shrunk
|
Optional<CommitProxyInterface>
|
||||||
|
firstCommitProxy; // not serialized, used for commitOnFirstProxy when the commit proxies vector has been shrunk
|
||||||
double clientTxnInfoSampleRate;
|
double clientTxnInfoSampleRate;
|
||||||
int64_t clientTxnInfoSizeLimit;
|
int64_t clientTxnInfoSizeLimit;
|
||||||
Optional<Value> forward;
|
Optional<Value> forward;
|
||||||
|
@ -122,7 +123,7 @@ struct ClientDBInfo {
|
||||||
if constexpr (!is_fb_function<Archive>) {
|
if constexpr (!is_fb_function<Archive>) {
|
||||||
ASSERT(ar.protocolVersion().isValid());
|
ASSERT(ar.protocolVersion().isValid());
|
||||||
}
|
}
|
||||||
serializer(ar, grvProxies, masterProxies, id, clientTxnInfoSampleRate, clientTxnInfoSizeLimit, forward,
|
serializer(ar, grvProxies, commitProxies, id, clientTxnInfoSampleRate, clientTxnInfoSizeLimit, forward,
|
||||||
transactionTagSampleRate, transactionTagSampleCost);
|
transactionTagSampleRate, transactionTagSampleCost);
|
||||||
}
|
}
|
||||||
};
|
};
|
|
@ -25,7 +25,7 @@
|
||||||
#include "fdbclient/FDBTypes.h"
|
#include "fdbclient/FDBTypes.h"
|
||||||
#include "fdbrpc/fdbrpc.h"
|
#include "fdbrpc/fdbrpc.h"
|
||||||
#include "fdbrpc/Locality.h"
|
#include "fdbrpc/Locality.h"
|
||||||
#include "fdbclient/MasterProxyInterface.h"
|
#include "fdbclient/CommitProxyInterface.h"
|
||||||
#include "fdbclient/ClusterInterface.h"
|
#include "fdbclient/ClusterInterface.h"
|
||||||
|
|
||||||
const int MAX_CLUSTER_FILE_BYTES = 60000;
|
const int MAX_CLUSTER_FILE_BYTES = 60000;
|
||||||
|
|
|
@ -29,12 +29,12 @@ DatabaseConfiguration::DatabaseConfiguration()
|
||||||
void DatabaseConfiguration::resetInternal() {
|
void DatabaseConfiguration::resetInternal() {
|
||||||
// does NOT reset rawConfiguration
|
// does NOT reset rawConfiguration
|
||||||
initialized = false;
|
initialized = false;
|
||||||
proxyCount = grvProxyCount = resolverCount = desiredTLogCount = tLogWriteAntiQuorum = tLogReplicationFactor =
|
commitProxyCount = grvProxyCount = resolverCount = desiredTLogCount = tLogWriteAntiQuorum = tLogReplicationFactor =
|
||||||
storageTeamSize = desiredLogRouterCount = -1;
|
storageTeamSize = desiredLogRouterCount = -1;
|
||||||
tLogVersion = TLogVersion::DEFAULT;
|
tLogVersion = TLogVersion::DEFAULT;
|
||||||
tLogDataStoreType = storageServerStoreType = KeyValueStoreType::END;
|
tLogDataStoreType = storageServerStoreType = KeyValueStoreType::END;
|
||||||
tLogSpillType = TLogSpillType::DEFAULT;
|
tLogSpillType = TLogSpillType::DEFAULT;
|
||||||
autoProxyCount = CLIENT_KNOBS->DEFAULT_AUTO_PROXIES;
|
autoCommitProxyCount = CLIENT_KNOBS->DEFAULT_AUTO_COMMIT_PROXIES;
|
||||||
autoGrvProxyCount = CLIENT_KNOBS->DEFAULT_AUTO_GRV_PROXIES;
|
autoGrvProxyCount = CLIENT_KNOBS->DEFAULT_AUTO_GRV_PROXIES;
|
||||||
autoResolverCount = CLIENT_KNOBS->DEFAULT_AUTO_RESOLVERS;
|
autoResolverCount = CLIENT_KNOBS->DEFAULT_AUTO_RESOLVERS;
|
||||||
autoDesiredTLogCount = CLIENT_KNOBS->DEFAULT_AUTO_LOGS;
|
autoDesiredTLogCount = CLIENT_KNOBS->DEFAULT_AUTO_LOGS;
|
||||||
|
@ -169,7 +169,7 @@ bool DatabaseConfiguration::isValid() const {
|
||||||
tLogWriteAntiQuorum <= tLogReplicationFactor/2 &&
|
tLogWriteAntiQuorum <= tLogReplicationFactor/2 &&
|
||||||
tLogReplicationFactor >= 1 &&
|
tLogReplicationFactor >= 1 &&
|
||||||
storageTeamSize >= 1 &&
|
storageTeamSize >= 1 &&
|
||||||
getDesiredProxies() >= 1 &&
|
getDesiredCommitProxies() >= 1 &&
|
||||||
getDesiredGrvProxies() >= 1 &&
|
getDesiredGrvProxies() >= 1 &&
|
||||||
getDesiredLogs() >= 1 &&
|
getDesiredLogs() >= 1 &&
|
||||||
getDesiredResolvers() >= 1 &&
|
getDesiredResolvers() >= 1 &&
|
||||||
|
@ -180,7 +180,7 @@ bool DatabaseConfiguration::isValid() const {
|
||||||
tLogSpillType != TLogSpillType::UNSET &&
|
tLogSpillType != TLogSpillType::UNSET &&
|
||||||
!(tLogSpillType == TLogSpillType::REFERENCE && tLogVersion < TLogVersion::V3) &&
|
!(tLogSpillType == TLogSpillType::REFERENCE && tLogVersion < TLogVersion::V3) &&
|
||||||
storageServerStoreType != KeyValueStoreType::END &&
|
storageServerStoreType != KeyValueStoreType::END &&
|
||||||
autoProxyCount >= 1 &&
|
autoCommitProxyCount >= 1 &&
|
||||||
autoGrvProxyCount >= 1 &&
|
autoGrvProxyCount >= 1 &&
|
||||||
autoResolverCount >= 1 &&
|
autoResolverCount >= 1 &&
|
||||||
autoDesiredTLogCount >= 1 &&
|
autoDesiredTLogCount >= 1 &&
|
||||||
|
@ -198,7 +198,6 @@ bool DatabaseConfiguration::isValid() const {
|
||||||
( regions.size() == 0 || tLogPolicy->info() != "dcid^2 x zoneid^2 x 1") ) ) { //We cannot specify regions with three_datacenter replication
|
( regions.size() == 0 || tLogPolicy->info() != "dcid^2 x zoneid^2 x 1") ) ) { //We cannot specify regions with three_datacenter replication
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::set<Key> dcIds;
|
std::set<Key> dcIds;
|
||||||
dcIds.insert(Key());
|
dcIds.insert(Key());
|
||||||
for(auto& r : regions) {
|
for(auto& r : regions) {
|
||||||
|
@ -318,11 +317,11 @@ StatusObject DatabaseConfiguration::toJSON(bool noPolicies) const {
|
||||||
if (desiredTLogCount != -1 || isOverridden("logs")) {
|
if (desiredTLogCount != -1 || isOverridden("logs")) {
|
||||||
result["logs"] = desiredTLogCount;
|
result["logs"] = desiredTLogCount;
|
||||||
}
|
}
|
||||||
if (proxyCount != -1 || isOverridden("proxies")) {
|
if (commitProxyCount != -1 || isOverridden("commit_proxies")) {
|
||||||
result["proxies"] = proxyCount;
|
result["commit_proxies"] = commitProxyCount;
|
||||||
}
|
}
|
||||||
if (grvProxyCount != -1 || isOverridden("grv_proxies")) {
|
if (grvProxyCount != -1 || isOverridden("grv_proxies")) {
|
||||||
result["grv_proxies"] = proxyCount;
|
result["grv_proxies"] = commitProxyCount;
|
||||||
}
|
}
|
||||||
if (resolverCount != -1 || isOverridden("resolvers")) {
|
if (resolverCount != -1 || isOverridden("resolvers")) {
|
||||||
result["resolvers"] = resolverCount;
|
result["resolvers"] = resolverCount;
|
||||||
|
@ -336,8 +335,8 @@ StatusObject DatabaseConfiguration::toJSON(bool noPolicies) const {
|
||||||
if (repopulateRegionAntiQuorum != 0 || isOverridden("repopulate_anti_quorum")) {
|
if (repopulateRegionAntiQuorum != 0 || isOverridden("repopulate_anti_quorum")) {
|
||||||
result["repopulate_anti_quorum"] = repopulateRegionAntiQuorum;
|
result["repopulate_anti_quorum"] = repopulateRegionAntiQuorum;
|
||||||
}
|
}
|
||||||
if (autoProxyCount != CLIENT_KNOBS->DEFAULT_AUTO_PROXIES || isOverridden("auto_proxies")) {
|
if (autoCommitProxyCount != CLIENT_KNOBS->DEFAULT_AUTO_COMMIT_PROXIES || isOverridden("auto_commit_proxies")) {
|
||||||
result["auto_proxies"] = autoProxyCount;
|
result["auto_commit_proxies"] = autoCommitProxyCount;
|
||||||
}
|
}
|
||||||
if (autoGrvProxyCount != CLIENT_KNOBS->DEFAULT_AUTO_GRV_PROXIES || isOverridden("auto_grv_proxies")) {
|
if (autoGrvProxyCount != CLIENT_KNOBS->DEFAULT_AUTO_GRV_PROXIES || isOverridden("auto_grv_proxies")) {
|
||||||
result["auto_grv_proxies"] = autoGrvProxyCount;
|
result["auto_grv_proxies"] = autoGrvProxyCount;
|
||||||
|
@ -419,8 +418,8 @@ bool DatabaseConfiguration::setInternal(KeyRef key, ValueRef value) {
|
||||||
|
|
||||||
if (ck == LiteralStringRef("initialized")) {
|
if (ck == LiteralStringRef("initialized")) {
|
||||||
initialized = true;
|
initialized = true;
|
||||||
} else if (ck == LiteralStringRef("proxies")) {
|
} else if (ck == LiteralStringRef("commit_proxies")) {
|
||||||
parse(&proxyCount, value);
|
parse(&commitProxyCount, value);
|
||||||
} else if (ck == LiteralStringRef("grv_proxies")) {
|
} else if (ck == LiteralStringRef("grv_proxies")) {
|
||||||
parse(&grvProxyCount, value);
|
parse(&grvProxyCount, value);
|
||||||
} else if (ck == LiteralStringRef("resolvers")) {
|
} else if (ck == LiteralStringRef("resolvers")) {
|
||||||
|
@ -459,8 +458,8 @@ bool DatabaseConfiguration::setInternal(KeyRef key, ValueRef value) {
|
||||||
} else if (ck == LiteralStringRef("storage_engine")) {
|
} else if (ck == LiteralStringRef("storage_engine")) {
|
||||||
parse((&type), value);
|
parse((&type), value);
|
||||||
storageServerStoreType = (KeyValueStoreType::StoreType)type;
|
storageServerStoreType = (KeyValueStoreType::StoreType)type;
|
||||||
} else if (ck == LiteralStringRef("auto_proxies")) {
|
} else if (ck == LiteralStringRef("auto_commit_proxies")) {
|
||||||
parse(&autoProxyCount, value);
|
parse(&autoCommitProxyCount, value);
|
||||||
} else if (ck == LiteralStringRef("auto_grv_proxies")) {
|
} else if (ck == LiteralStringRef("auto_grv_proxies")) {
|
||||||
parse(&autoGrvProxyCount, value);
|
parse(&autoGrvProxyCount, value);
|
||||||
} else if (ck == LiteralStringRef("auto_resolvers")) {
|
} else if (ck == LiteralStringRef("auto_resolvers")) {
|
||||||
|
|
|
@ -149,9 +149,9 @@ struct DatabaseConfiguration {
|
||||||
return std::min(tLogReplicationFactor - 1 - tLogWriteAntiQuorum, storageTeamSize - 1);
|
return std::min(tLogReplicationFactor - 1 - tLogWriteAntiQuorum, storageTeamSize - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Proxy Servers
|
// CommitProxy Servers
|
||||||
int32_t proxyCount;
|
int32_t commitProxyCount;
|
||||||
int32_t autoProxyCount;
|
int32_t autoCommitProxyCount;
|
||||||
int32_t grvProxyCount;
|
int32_t grvProxyCount;
|
||||||
int32_t autoGrvProxyCount;
|
int32_t autoGrvProxyCount;
|
||||||
|
|
||||||
|
@ -192,7 +192,10 @@ struct DatabaseConfiguration {
|
||||||
bool isExcludedServer( NetworkAddressList ) const;
|
bool isExcludedServer( NetworkAddressList ) const;
|
||||||
std::set<AddressExclusion> getExcludedServers() const;
|
std::set<AddressExclusion> getExcludedServers() const;
|
||||||
|
|
||||||
int32_t getDesiredProxies() const { if(proxyCount == -1) return autoProxyCount; return proxyCount; }
|
int32_t getDesiredCommitProxies() const {
|
||||||
|
if (commitProxyCount == -1) return autoCommitProxyCount;
|
||||||
|
return commitProxyCount;
|
||||||
|
}
|
||||||
int32_t getDesiredGrvProxies() const {
|
int32_t getDesiredGrvProxies() const {
|
||||||
if (grvProxyCount == -1) return autoGrvProxyCount;
|
if (grvProxyCount == -1) return autoGrvProxyCount;
|
||||||
return grvProxyCount;
|
return grvProxyCount;
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
|
|
||||||
#include "fdbclient/NativeAPI.actor.h"
|
#include "fdbclient/NativeAPI.actor.h"
|
||||||
#include "fdbclient/KeyRangeMap.h"
|
#include "fdbclient/KeyRangeMap.h"
|
||||||
#include "fdbclient/MasterProxyInterface.h"
|
#include "fdbclient/CommitProxyInterface.h"
|
||||||
#include "fdbclient/SpecialKeySpace.actor.h"
|
#include "fdbclient/SpecialKeySpace.actor.h"
|
||||||
#include "fdbrpc/QueueModel.h"
|
#include "fdbrpc/QueueModel.h"
|
||||||
#include "fdbrpc/MultiInterface.h"
|
#include "fdbrpc/MultiInterface.h"
|
||||||
|
@ -68,7 +68,7 @@ struct LocationInfo : MultiInterface<ReferencedInterface<StorageServerInterface>
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
using ProxyInfo = ModelInterface<MasterProxyInterface>;
|
using CommitProxyInfo = ModelInterface<CommitProxyInterface>;
|
||||||
using GrvProxyInfo = ModelInterface<GrvProxyInterface>;
|
using GrvProxyInfo = ModelInterface<GrvProxyInterface>;
|
||||||
|
|
||||||
class ClientTagThrottleData : NonCopyable {
|
class ClientTagThrottleData : NonCopyable {
|
||||||
|
@ -165,8 +165,8 @@ public:
|
||||||
bool sampleOnCost(uint64_t cost) const;
|
bool sampleOnCost(uint64_t cost) const;
|
||||||
|
|
||||||
void updateProxies();
|
void updateProxies();
|
||||||
Reference<ProxyInfo> getMasterProxies(bool useProvisionalProxies);
|
Reference<CommitProxyInfo> getCommitProxies(bool useProvisionalProxies);
|
||||||
Future<Reference<ProxyInfo>> getMasterProxiesFuture(bool useProvisionalProxies);
|
Future<Reference<CommitProxyInfo>> getCommitProxiesFuture(bool useProvisionalProxies);
|
||||||
Reference<GrvProxyInfo> getGrvProxies(bool useProvisionalProxies);
|
Reference<GrvProxyInfo> getGrvProxies(bool useProvisionalProxies);
|
||||||
Future<Void> onProxiesChanged();
|
Future<Void> onProxiesChanged();
|
||||||
Future<HealthMetrics> getHealthMetrics(bool detailed);
|
Future<HealthMetrics> getHealthMetrics(bool detailed);
|
||||||
|
@ -219,9 +219,9 @@ public:
|
||||||
Reference<AsyncVar<Reference<ClusterConnectionFile>>> connectionFile;
|
Reference<AsyncVar<Reference<ClusterConnectionFile>>> connectionFile;
|
||||||
AsyncTrigger proxiesChangeTrigger;
|
AsyncTrigger proxiesChangeTrigger;
|
||||||
Future<Void> monitorProxiesInfoChange;
|
Future<Void> monitorProxiesInfoChange;
|
||||||
Reference<ProxyInfo> masterProxies;
|
Reference<CommitProxyInfo> commitProxies;
|
||||||
Reference<GrvProxyInfo> grvProxies;
|
Reference<GrvProxyInfo> grvProxies;
|
||||||
bool proxyProvisional;
|
bool proxyProvisional; // Provisional commit proxy and grv proxy are used at the same time.
|
||||||
UID proxiesLastChange;
|
UID proxiesLastChange;
|
||||||
LocalityData clientLocality;
|
LocalityData clientLocality;
|
||||||
QueueModel queueModel;
|
QueueModel queueModel;
|
||||||
|
|
|
@ -563,6 +563,8 @@ namespace fileBackup {
|
||||||
if(rLen != len)
|
if(rLen != len)
|
||||||
throw restore_bad_read();
|
throw restore_bad_read();
|
||||||
|
|
||||||
|
simulateBlobFailure();
|
||||||
|
|
||||||
Standalone<VectorRef<KeyValueRef>> results({}, buf.arena());
|
Standalone<VectorRef<KeyValueRef>> results({}, buf.arena());
|
||||||
state StringRefReader reader(buf, restore_corrupted_data());
|
state StringRefReader reader(buf, restore_corrupted_data());
|
||||||
|
|
||||||
|
@ -606,7 +608,7 @@ namespace fileBackup {
|
||||||
return results;
|
return results;
|
||||||
|
|
||||||
} catch(Error &e) {
|
} catch(Error &e) {
|
||||||
TraceEvent(SevWarn, "FileRestoreCorruptRangeFileBlock")
|
TraceEvent(SevWarn, "FileRestoreDecodeRangeFileBlockFailed")
|
||||||
.error(e)
|
.error(e)
|
||||||
.detail("Filename", file->getFilename())
|
.detail("Filename", file->getFilename())
|
||||||
.detail("BlockOffset", offset)
|
.detail("BlockOffset", offset)
|
||||||
|
@ -5021,3 +5023,18 @@ ACTOR Future<Void> transformRestoredDatabase(Database cx, Standalone<VectorRef<K
|
||||||
|
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void simulateBlobFailure() {
|
||||||
|
if (BUGGIFY && deterministicRandom()->random01() < 0.01) { // Simulate blob failures
|
||||||
|
double i = deterministicRandom()->random01();
|
||||||
|
if (i < 0.5) {
|
||||||
|
throw http_request_failed();
|
||||||
|
} else if (i < 0.7) {
|
||||||
|
throw connection_failed();
|
||||||
|
} else if (i < 0.8) {
|
||||||
|
throw timed_out();
|
||||||
|
} else if (i < 0.9) {
|
||||||
|
throw lookup_failed();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -27,6 +27,8 @@
|
||||||
// with RateKeeper to gather health information of the cluster.
|
// with RateKeeper to gather health information of the cluster.
|
||||||
struct GrvProxyInterface {
|
struct GrvProxyInterface {
|
||||||
constexpr static FileIdentifier file_identifier = 8743216;
|
constexpr static FileIdentifier file_identifier = 8743216;
|
||||||
|
enum { LocationAwareLoadBalance = 1 };
|
||||||
|
enum { AlwaysFresh = 1 };
|
||||||
|
|
||||||
Optional<Key> processId;
|
Optional<Key> processId;
|
||||||
bool provisional;
|
bool provisional;
|
||||||
|
|
|
@ -72,7 +72,7 @@ namespace HTTP {
|
||||||
}
|
}
|
||||||
|
|
||||||
PacketBuffer * writeRequestHeader(std::string const &verb, std::string const &resource, HTTP::Headers const &headers, PacketBuffer *dest) {
|
PacketBuffer * writeRequestHeader(std::string const &verb, std::string const &resource, HTTP::Headers const &headers, PacketBuffer *dest) {
|
||||||
PacketWriter writer(dest, NULL, Unversioned());
|
PacketWriter writer(dest, nullptr, Unversioned());
|
||||||
writer.serializeBytes(verb);
|
writer.serializeBytes(verb);
|
||||||
writer.serializeBytes(" ", 1);
|
writer.serializeBytes(" ", 1);
|
||||||
writer.serializeBytes(resource);
|
writer.serializeBytes(resource);
|
||||||
|
@ -238,7 +238,7 @@ namespace HTTP {
|
||||||
{
|
{
|
||||||
// Read the line that contains the chunk length as text in hex
|
// Read the line that contains the chunk length as text in hex
|
||||||
size_t lineLen = wait(read_delimited_into_string(conn, "\r\n", &r->content, pos));
|
size_t lineLen = wait(read_delimited_into_string(conn, "\r\n", &r->content, pos));
|
||||||
state int chunkLen = strtol(r->content.substr(pos, lineLen).c_str(), NULL, 16);
|
state int chunkLen = strtol(r->content.substr(pos, lineLen).c_str(), nullptr, 16);
|
||||||
|
|
||||||
// Instead of advancing pos, erase the chunk length header line (line length + delimiter size) from the content buffer
|
// Instead of advancing pos, erase the chunk length header line (line length + delimiter size) from the content buffer
|
||||||
r->content.erase(pos, lineLen + 2);
|
r->content.erase(pos, lineLen + 2);
|
||||||
|
@ -301,7 +301,7 @@ namespace HTTP {
|
||||||
state TraceEvent event(SevDebug, "HTTPRequest");
|
state TraceEvent event(SevDebug, "HTTPRequest");
|
||||||
|
|
||||||
state UnsentPacketQueue empty;
|
state UnsentPacketQueue empty;
|
||||||
if(pContent == NULL)
|
if(pContent == nullptr)
|
||||||
pContent = ∅
|
pContent = ∅
|
||||||
|
|
||||||
// There is no standard http request id header field, so either a global default can be set via a knob
|
// There is no standard http request id header field, so either a global default can be set via a knob
|
||||||
|
|
|
@ -67,11 +67,11 @@
|
||||||
// // The following would throw if a.b.c did not exist, or if it was not an int.
|
// // The following would throw if a.b.c did not exist, or if it was not an int.
|
||||||
// int x = r["a.b.c"].get_int();
|
// int x = r["a.b.c"].get_int();
|
||||||
struct JSONDoc {
|
struct JSONDoc {
|
||||||
JSONDoc() : pObj(NULL) {}
|
JSONDoc() : pObj(nullptr) {}
|
||||||
|
|
||||||
// Construction from const json_spirit::mObject, trivial and will never throw.
|
// Construction from const json_spirit::mObject, trivial and will never throw.
|
||||||
// Resulting JSONDoc will not allow modifications.
|
// Resulting JSONDoc will not allow modifications.
|
||||||
JSONDoc(const json_spirit::mObject &o) : pObj(&o), wpObj(NULL) {}
|
JSONDoc(const json_spirit::mObject &o) : pObj(&o), wpObj(nullptr) {}
|
||||||
|
|
||||||
// Construction from json_spirit::mObject. Allows modifications.
|
// Construction from json_spirit::mObject. Allows modifications.
|
||||||
JSONDoc(json_spirit::mObject &o) : pObj(&o), wpObj(&o) {}
|
JSONDoc(json_spirit::mObject &o) : pObj(&o), wpObj(&o) {}
|
||||||
|
@ -79,7 +79,7 @@ struct JSONDoc {
|
||||||
// Construction from const json_spirit::mValue (which is a Variant type) which will try to
|
// Construction from const json_spirit::mValue (which is a Variant type) which will try to
|
||||||
// convert it to an mObject. This will throw if that fails, just as it would
|
// convert it to an mObject. This will throw if that fails, just as it would
|
||||||
// if the caller called get_obj() itself and used the previous constructor instead.
|
// if the caller called get_obj() itself and used the previous constructor instead.
|
||||||
JSONDoc(const json_spirit::mValue &v) : pObj(&v.get_obj()), wpObj(NULL) {}
|
JSONDoc(const json_spirit::mValue &v) : pObj(&v.get_obj()), wpObj(nullptr) {}
|
||||||
|
|
||||||
// Construction from non-const json_spirit::mValue - will convert the mValue to
|
// Construction from non-const json_spirit::mValue - will convert the mValue to
|
||||||
// an object if it isn't already and then attach to it.
|
// an object if it isn't already and then attach to it.
|
||||||
|
@ -98,13 +98,13 @@ struct JSONDoc {
|
||||||
// path into on the "dot" character.
|
// path into on the "dot" character.
|
||||||
// When a path is found, pLast is updated.
|
// When a path is found, pLast is updated.
|
||||||
bool has(std::string path, bool split=true) {
|
bool has(std::string path, bool split=true) {
|
||||||
if (pObj == NULL)
|
if (pObj == nullptr)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (path.empty())
|
if (path.empty())
|
||||||
return false;
|
return false;
|
||||||
size_t start = 0;
|
size_t start = 0;
|
||||||
const json_spirit::mValue *curVal = NULL;
|
const json_spirit::mValue *curVal = nullptr;
|
||||||
while (start < path.size())
|
while (start < path.size())
|
||||||
{
|
{
|
||||||
// If a path segment is found then curVal must be an object
|
// If a path segment is found then curVal must be an object
|
||||||
|
@ -140,7 +140,7 @@ struct JSONDoc {
|
||||||
// Creates the given path (forcing Objects to exist along its depth, replacing whatever else might have been there)
|
// Creates the given path (forcing Objects to exist along its depth, replacing whatever else might have been there)
|
||||||
// and returns a reference to the Value at that location.
|
// and returns a reference to the Value at that location.
|
||||||
json_spirit::mValue & create(std::string path, bool split=true) {
|
json_spirit::mValue & create(std::string path, bool split=true) {
|
||||||
if (wpObj == NULL || path.empty())
|
if (wpObj == nullptr || path.empty())
|
||||||
throw std::runtime_error("JSON Object not writable or bad JSON path");
|
throw std::runtime_error("JSON Object not writable or bad JSON path");
|
||||||
|
|
||||||
size_t start = 0;
|
size_t start = 0;
|
||||||
|
@ -280,7 +280,7 @@ struct JSONDoc {
|
||||||
}
|
}
|
||||||
|
|
||||||
const json_spirit::mValue & last() const { return *pLast; }
|
const json_spirit::mValue & last() const { return *pLast; }
|
||||||
bool valid() const { return pObj != NULL; }
|
bool valid() const { return pObj != nullptr; }
|
||||||
|
|
||||||
const json_spirit::mObject & obj() {
|
const json_spirit::mObject & obj() {
|
||||||
// This dummy object is necessary to make working with obj() easier when this does not currently
|
// This dummy object is necessary to make working with obj() easier when this does not currently
|
||||||
|
@ -304,7 +304,7 @@ struct JSONDoc {
|
||||||
static uint64_t expires_reference_version;
|
static uint64_t expires_reference_version;
|
||||||
private:
|
private:
|
||||||
const json_spirit::mObject *pObj;
|
const json_spirit::mObject *pObj;
|
||||||
// Writeable pointer to the same object. Will be NULL if initialized from a const object.
|
// Writeable pointer to the same object. Will be nullptr if initialized from a const object.
|
||||||
json_spirit::mObject *wpObj;
|
json_spirit::mObject *wpObj;
|
||||||
const json_spirit::mValue *pLast;
|
const json_spirit::mValue *pLast;
|
||||||
};
|
};
|
||||||
|
|
|
@ -52,7 +52,7 @@ void ClientKnobs::initialize(bool randomize) {
|
||||||
init( COORDINATOR_RECONNECTION_DELAY, 1.0 );
|
init( COORDINATOR_RECONNECTION_DELAY, 1.0 );
|
||||||
init( CLIENT_EXAMPLE_AMOUNT, 20 );
|
init( CLIENT_EXAMPLE_AMOUNT, 20 );
|
||||||
init( MAX_CLIENT_STATUS_AGE, 1.0 );
|
init( MAX_CLIENT_STATUS_AGE, 1.0 );
|
||||||
init( MAX_MASTER_PROXY_CONNECTIONS, 5 ); if( randomize && BUGGIFY ) MAX_MASTER_PROXY_CONNECTIONS = 1;
|
init( MAX_COMMIT_PROXY_CONNECTIONS, 5 ); if( randomize && BUGGIFY ) MAX_COMMIT_PROXY_CONNECTIONS = 1;
|
||||||
init( MAX_GRV_PROXY_CONNECTIONS, 3 ); if( randomize && BUGGIFY ) MAX_GRV_PROXY_CONNECTIONS = 1;
|
init( MAX_GRV_PROXY_CONNECTIONS, 3 ); if( randomize && BUGGIFY ) MAX_GRV_PROXY_CONNECTIONS = 1;
|
||||||
init( STATUS_IDLE_TIMEOUT, 120.0 );
|
init( STATUS_IDLE_TIMEOUT, 120.0 );
|
||||||
|
|
||||||
|
@ -171,7 +171,7 @@ void ClientKnobs::initialize(bool randomize) {
|
||||||
init( MIN_CLEANUP_SECONDS, 3600.0 );
|
init( MIN_CLEANUP_SECONDS, 3600.0 );
|
||||||
|
|
||||||
// Configuration
|
// Configuration
|
||||||
init( DEFAULT_AUTO_PROXIES, 3 );
|
init( DEFAULT_AUTO_COMMIT_PROXIES, 3 );
|
||||||
init( DEFAULT_AUTO_GRV_PROXIES, 1 );
|
init( DEFAULT_AUTO_GRV_PROXIES, 1 );
|
||||||
init( DEFAULT_AUTO_RESOLVERS, 1 );
|
init( DEFAULT_AUTO_RESOLVERS, 1 );
|
||||||
init( DEFAULT_AUTO_LOGS, 3 );
|
init( DEFAULT_AUTO_LOGS, 3 );
|
||||||
|
|
|
@ -46,7 +46,7 @@ public:
|
||||||
double COORDINATOR_RECONNECTION_DELAY;
|
double COORDINATOR_RECONNECTION_DELAY;
|
||||||
int CLIENT_EXAMPLE_AMOUNT;
|
int CLIENT_EXAMPLE_AMOUNT;
|
||||||
double MAX_CLIENT_STATUS_AGE;
|
double MAX_CLIENT_STATUS_AGE;
|
||||||
int MAX_MASTER_PROXY_CONNECTIONS;
|
int MAX_COMMIT_PROXY_CONNECTIONS;
|
||||||
int MAX_GRV_PROXY_CONNECTIONS;
|
int MAX_GRV_PROXY_CONNECTIONS;
|
||||||
double STATUS_IDLE_TIMEOUT;
|
double STATUS_IDLE_TIMEOUT;
|
||||||
|
|
||||||
|
@ -167,7 +167,7 @@ public:
|
||||||
double MIN_CLEANUP_SECONDS;
|
double MIN_CLEANUP_SECONDS;
|
||||||
|
|
||||||
// Configuration
|
// Configuration
|
||||||
int32_t DEFAULT_AUTO_PROXIES;
|
int32_t DEFAULT_AUTO_COMMIT_PROXIES;
|
||||||
int32_t DEFAULT_AUTO_GRV_PROXIES;
|
int32_t DEFAULT_AUTO_GRV_PROXIES;
|
||||||
int32_t DEFAULT_AUTO_RESOLVERS;
|
int32_t DEFAULT_AUTO_RESOLVERS;
|
||||||
int32_t DEFAULT_AUTO_LOGS;
|
int32_t DEFAULT_AUTO_LOGS;
|
||||||
|
|
|
@ -33,6 +33,7 @@
|
||||||
#include "fdbclient/DatabaseContext.h"
|
#include "fdbclient/DatabaseContext.h"
|
||||||
#include "fdbrpc/simulator.h"
|
#include "fdbrpc/simulator.h"
|
||||||
#include "fdbclient/StatusClient.h"
|
#include "fdbclient/StatusClient.h"
|
||||||
|
#include "flow/Trace.h"
|
||||||
#include "flow/UnitTest.h"
|
#include "flow/UnitTest.h"
|
||||||
#include "fdbrpc/ReplicationPolicy.h"
|
#include "fdbrpc/ReplicationPolicy.h"
|
||||||
#include "fdbrpc/Replication.h"
|
#include "fdbrpc/Replication.h"
|
||||||
|
@ -78,8 +79,9 @@ std::map<std::string, std::string> configForToken( std::string const& mode ) {
|
||||||
std::string key = mode.substr(0, pos);
|
std::string key = mode.substr(0, pos);
|
||||||
std::string value = mode.substr(pos+1);
|
std::string value = mode.substr(pos+1);
|
||||||
|
|
||||||
if ((key == "logs" || key == "proxies" || key == "grv_proxies" || key == "resolvers" || key == "remote_logs" ||
|
if ((key == "logs" || key == "commit_proxies" || key == "grv_proxies" || key == "resolvers" ||
|
||||||
key == "log_routers" || key == "usable_regions" || key == "repopulate_anti_quorum") &&
|
key == "remote_logs" || key == "log_routers" || key == "usable_regions" ||
|
||||||
|
key == "repopulate_anti_quorum") &&
|
||||||
isInteger(value)) {
|
isInteger(value)) {
|
||||||
out[p+key] = value;
|
out[p+key] = value;
|
||||||
}
|
}
|
||||||
|
@ -656,7 +658,7 @@ ConfigureAutoResult parseConfig( StatusObject const& status ) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (processClass.classType() == ProcessClass::TransactionClass ||
|
if (processClass.classType() == ProcessClass::TransactionClass ||
|
||||||
processClass.classType() == ProcessClass::ProxyClass ||
|
processClass.classType() == ProcessClass::CommitProxyClass ||
|
||||||
processClass.classType() == ProcessClass::GrvProxyClass ||
|
processClass.classType() == ProcessClass::GrvProxyClass ||
|
||||||
processClass.classType() == ProcessClass::ResolutionClass ||
|
processClass.classType() == ProcessClass::ResolutionClass ||
|
||||||
processClass.classType() == ProcessClass::StatelessClass ||
|
processClass.classType() == ProcessClass::StatelessClass ||
|
||||||
|
@ -701,7 +703,7 @@ ConfigureAutoResult parseConfig( StatusObject const& status ) {
|
||||||
if (proc.second == ProcessClass::StatelessClass) {
|
if (proc.second == ProcessClass::StatelessClass) {
|
||||||
existingStatelessCount++;
|
existingStatelessCount++;
|
||||||
}
|
}
|
||||||
if(proc.second == ProcessClass::ProxyClass) {
|
if (proc.second == ProcessClass::CommitProxyClass) {
|
||||||
existingProxyCount++;
|
existingProxyCount++;
|
||||||
}
|
}
|
||||||
if (proc.second == ProcessClass::GrvProxyClass) {
|
if (proc.second == ProcessClass::GrvProxyClass) {
|
||||||
|
@ -734,19 +736,18 @@ ConfigureAutoResult parseConfig( StatusObject const& status ) {
|
||||||
resolverCount = result.old_resolvers;
|
resolverCount = result.old_resolvers;
|
||||||
}
|
}
|
||||||
|
|
||||||
result.desired_proxies = std::max(std::min(12, processCount / 15), 1);
|
result.desired_commit_proxies = std::max(std::min(12, processCount / 15), 1);
|
||||||
int proxyCount;
|
int proxyCount;
|
||||||
if (!statusObjConfig.get("proxies", result.old_proxies)) {
|
if (!statusObjConfig.get("commit_proxies", result.old_commit_proxies)) {
|
||||||
result.old_proxies = CLIENT_KNOBS->DEFAULT_AUTO_PROXIES;
|
result.old_commit_proxies = CLIENT_KNOBS->DEFAULT_AUTO_COMMIT_PROXIES;
|
||||||
statusObjConfig.get("auto_proxies", result.old_proxies);
|
statusObjConfig.get("auto_commit_proxies", result.old_commit_proxies);
|
||||||
result.auto_proxies = result.desired_proxies;
|
result.auto_commit_proxies = result.desired_commit_proxies;
|
||||||
proxyCount = result.auto_proxies;
|
proxyCount = result.auto_commit_proxies;
|
||||||
} else {
|
} else {
|
||||||
result.auto_proxies = result.old_proxies;
|
result.auto_commit_proxies = result.old_commit_proxies;
|
||||||
proxyCount = result.old_proxies;
|
proxyCount = result.old_commit_proxies;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Need to configure a good number.
|
|
||||||
result.desired_grv_proxies = std::max(std::min(4, processCount / 20), 1);
|
result.desired_grv_proxies = std::max(std::min(4, processCount / 20), 1);
|
||||||
int grvProxyCount;
|
int grvProxyCount;
|
||||||
if (!statusObjConfig.get("grv_proxies", result.old_grv_proxies)) {
|
if (!statusObjConfig.get("grv_proxies", result.old_grv_proxies)) {
|
||||||
|
@ -857,8 +858,8 @@ ACTOR Future<ConfigurationResult::Type> autoConfig( Database cx, ConfigureAutoRe
|
||||||
if (conf.auto_logs != conf.old_logs)
|
if (conf.auto_logs != conf.old_logs)
|
||||||
tr.set(configKeysPrefix.toString() + "auto_logs", format("%d", conf.auto_logs));
|
tr.set(configKeysPrefix.toString() + "auto_logs", format("%d", conf.auto_logs));
|
||||||
|
|
||||||
if(conf.auto_proxies != conf.old_proxies)
|
if (conf.auto_commit_proxies != conf.old_commit_proxies)
|
||||||
tr.set(configKeysPrefix.toString() + "auto_proxies", format("%d", conf.auto_proxies));
|
tr.set(configKeysPrefix.toString() + "auto_commit_proxies", format("%d", conf.auto_commit_proxies));
|
||||||
|
|
||||||
if (conf.auto_grv_proxies != conf.old_grv_proxies)
|
if (conf.auto_grv_proxies != conf.old_grv_proxies)
|
||||||
tr.set(configKeysPrefix.toString() + "auto_grv_proxies", format("%d", conf.auto_grv_proxies));
|
tr.set(configKeysPrefix.toString() + "auto_grv_proxies", format("%d", conf.auto_grv_proxies));
|
||||||
|
|
|
@ -86,7 +86,7 @@ struct ConfigureAutoResult {
|
||||||
int32_t machines;
|
int32_t machines;
|
||||||
|
|
||||||
std::string old_replication;
|
std::string old_replication;
|
||||||
int32_t old_proxies;
|
int32_t old_commit_proxies;
|
||||||
int32_t old_grv_proxies;
|
int32_t old_grv_proxies;
|
||||||
int32_t old_resolvers;
|
int32_t old_resolvers;
|
||||||
int32_t old_logs;
|
int32_t old_logs;
|
||||||
|
@ -94,23 +94,24 @@ struct ConfigureAutoResult {
|
||||||
int32_t old_machines_with_transaction;
|
int32_t old_machines_with_transaction;
|
||||||
|
|
||||||
std::string auto_replication;
|
std::string auto_replication;
|
||||||
int32_t auto_proxies;
|
int32_t auto_commit_proxies;
|
||||||
int32_t auto_grv_proxies;
|
int32_t auto_grv_proxies;
|
||||||
int32_t auto_resolvers;
|
int32_t auto_resolvers;
|
||||||
int32_t auto_logs;
|
int32_t auto_logs;
|
||||||
int32_t auto_processes_with_transaction;
|
int32_t auto_processes_with_transaction;
|
||||||
int32_t auto_machines_with_transaction;
|
int32_t auto_machines_with_transaction;
|
||||||
|
|
||||||
int32_t desired_proxies;
|
int32_t desired_commit_proxies;
|
||||||
int32_t desired_grv_proxies;
|
int32_t desired_grv_proxies;
|
||||||
int32_t desired_resolvers;
|
int32_t desired_resolvers;
|
||||||
int32_t desired_logs;
|
int32_t desired_logs;
|
||||||
|
|
||||||
ConfigureAutoResult()
|
ConfigureAutoResult()
|
||||||
: processes(-1), machines(-1), old_proxies(-1), old_grv_proxies(-1), old_resolvers(-1), old_logs(-1),
|
: processes(-1), machines(-1), old_commit_proxies(-1), old_grv_proxies(-1), old_resolvers(-1), old_logs(-1),
|
||||||
old_processes_with_transaction(-1), old_machines_with_transaction(-1), auto_proxies(-1), auto_grv_proxies(-1),
|
old_processes_with_transaction(-1), old_machines_with_transaction(-1), auto_commit_proxies(-1),
|
||||||
auto_resolvers(-1), auto_logs(-1), auto_processes_with_transaction(-1), auto_machines_with_transaction(-1),
|
auto_grv_proxies(-1), auto_resolvers(-1), auto_logs(-1), auto_processes_with_transaction(-1),
|
||||||
desired_proxies(-1), desired_grv_proxies(-1), desired_resolvers(-1), desired_logs(-1) {}
|
auto_machines_with_transaction(-1), desired_commit_proxies(-1), desired_grv_proxies(-1), desired_resolvers(-1),
|
||||||
|
desired_logs(-1) {}
|
||||||
|
|
||||||
bool isValid() const { return processes != -1; }
|
bool isValid() const { return processes != -1; }
|
||||||
};
|
};
|
||||||
|
|
|
@ -171,7 +171,7 @@ ACTOR Future<Void> metricRuleUpdater(Database cx, MetricsConfig *config, TDMetri
|
||||||
// Implementation of IMetricDB
|
// Implementation of IMetricDB
|
||||||
class MetricDB : public IMetricDB {
|
class MetricDB : public IMetricDB {
|
||||||
public:
|
public:
|
||||||
MetricDB(ReadYourWritesTransaction *tr = NULL) : tr(tr) {}
|
MetricDB(ReadYourWritesTransaction *tr = nullptr) : tr(tr) {}
|
||||||
~MetricDB() {}
|
~MetricDB() {}
|
||||||
|
|
||||||
// levelKey is the prefix for the entire level, no timestamp at the end
|
// levelKey is the prefix for the entire level, no timestamp at the end
|
||||||
|
|
|
@ -624,7 +624,7 @@ ACTOR Future<Void> getClientInfoFromLeader( Reference<AsyncVar<Optional<ClusterC
|
||||||
choose {
|
choose {
|
||||||
when( ClientDBInfo ni = wait( brokenPromiseToNever( knownLeader->get().get().clientInterface.openDatabase.getReply( req ) ) ) ) {
|
when( ClientDBInfo ni = wait( brokenPromiseToNever( knownLeader->get().get().clientInterface.openDatabase.getReply( req ) ) ) ) {
|
||||||
TraceEvent("MonitorLeaderForProxiesGotClientInfo", knownLeader->get().get().clientInterface.id())
|
TraceEvent("MonitorLeaderForProxiesGotClientInfo", knownLeader->get().get().clientInterface.id())
|
||||||
.detail("MasterProxy0", ni.masterProxies.size() ? ni.masterProxies[0].id() : UID())
|
.detail("CommitProxy0", ni.commitProxies.size() ? ni.commitProxies[0].id() : UID())
|
||||||
.detail("GrvProxy0", ni.grvProxies.size() ? ni.grvProxies[0].id() : UID())
|
.detail("GrvProxy0", ni.grvProxies.size() ? ni.grvProxies[0].id() : UID())
|
||||||
.detail("ClientID", ni.id);
|
.detail("ClientID", ni.id);
|
||||||
clientData->clientInfo->set(CachedSerialization<ClientDBInfo>(ni));
|
clientData->clientInfo->set(CachedSerialization<ClientDBInfo>(ni));
|
||||||
|
@ -681,24 +681,25 @@ ACTOR Future<Void> monitorLeaderForProxies( Key clusterKey, vector<NetworkAddres
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void shrinkProxyList( ClientDBInfo& ni, std::vector<UID>& lastMasterProxyUIDs, std::vector<MasterProxyInterface>& lastMasterProxies,
|
void shrinkProxyList(ClientDBInfo& ni, std::vector<UID>& lastCommitProxyUIDs,
|
||||||
std::vector<UID>& lastGrvProxyUIDs, std::vector<GrvProxyInterface>& lastGrvProxies) {
|
std::vector<CommitProxyInterface>& lastCommitProxies, std::vector<UID>& lastGrvProxyUIDs,
|
||||||
if(ni.masterProxies.size() > CLIENT_KNOBS->MAX_MASTER_PROXY_CONNECTIONS) {
|
std::vector<GrvProxyInterface>& lastGrvProxies) {
|
||||||
std::vector<UID> masterProxyUIDs;
|
if (ni.commitProxies.size() > CLIENT_KNOBS->MAX_COMMIT_PROXY_CONNECTIONS) {
|
||||||
for(auto& masterProxy : ni.masterProxies) {
|
std::vector<UID> commitProxyUIDs;
|
||||||
masterProxyUIDs.push_back(masterProxy.id());
|
for (auto& commitProxy : ni.commitProxies) {
|
||||||
|
commitProxyUIDs.push_back(commitProxy.id());
|
||||||
}
|
}
|
||||||
if(masterProxyUIDs != lastMasterProxyUIDs) {
|
if (commitProxyUIDs != lastCommitProxyUIDs) {
|
||||||
lastMasterProxyUIDs.swap(masterProxyUIDs);
|
lastCommitProxyUIDs.swap(commitProxyUIDs);
|
||||||
lastMasterProxies = ni.masterProxies;
|
lastCommitProxies = ni.commitProxies;
|
||||||
deterministicRandom()->randomShuffle(lastMasterProxies);
|
deterministicRandom()->randomShuffle(lastCommitProxies);
|
||||||
lastMasterProxies.resize(CLIENT_KNOBS->MAX_MASTER_PROXY_CONNECTIONS);
|
lastCommitProxies.resize(CLIENT_KNOBS->MAX_COMMIT_PROXY_CONNECTIONS);
|
||||||
for(int i = 0; i < lastMasterProxies.size(); i++) {
|
for (int i = 0; i < lastCommitProxies.size(); i++) {
|
||||||
TraceEvent("ConnectedMasterProxy").detail("MasterProxy", lastMasterProxies[i].id());
|
TraceEvent("ConnectedCommitProxy").detail("CommitProxy", lastCommitProxies[i].id());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ni.firstProxy = ni.masterProxies[0];
|
ni.firstCommitProxy = ni.commitProxies[0];
|
||||||
ni.masterProxies = lastMasterProxies;
|
ni.commitProxies = lastCommitProxies;
|
||||||
}
|
}
|
||||||
if(ni.grvProxies.size() > CLIENT_KNOBS->MAX_GRV_PROXY_CONNECTIONS) {
|
if(ni.grvProxies.size() > CLIENT_KNOBS->MAX_GRV_PROXY_CONNECTIONS) {
|
||||||
std::vector<UID> grvProxyUIDs;
|
std::vector<UID> grvProxyUIDs;
|
||||||
|
@ -719,14 +720,16 @@ void shrinkProxyList( ClientDBInfo& ni, std::vector<UID>& lastMasterProxyUIDs, s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Leader is the process that will be elected by coordinators as the cluster controller
|
// Leader is the process that will be elected by coordinators as the cluster controller
|
||||||
ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration( Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<ClientDBInfo>> clientInfo, MonitorLeaderInfo info, Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> supportedVersions, Key traceLogGroup) {
|
ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
|
||||||
|
Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<ClientDBInfo>> clientInfo, MonitorLeaderInfo info,
|
||||||
|
Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> supportedVersions, Key traceLogGroup) {
|
||||||
state ClusterConnectionString cs = info.intermediateConnFile->getConnectionString();
|
state ClusterConnectionString cs = info.intermediateConnFile->getConnectionString();
|
||||||
state vector<NetworkAddress> addrs = cs.coordinators();
|
state vector<NetworkAddress> addrs = cs.coordinators();
|
||||||
state int idx = 0;
|
state int idx = 0;
|
||||||
state int successIdx = 0;
|
state int successIdx = 0;
|
||||||
state Optional<double> incorrectTime;
|
state Optional<double> incorrectTime;
|
||||||
state std::vector<UID> lastProxyUIDs;
|
state std::vector<UID> lastCommitProxyUIDs;
|
||||||
state std::vector<MasterProxyInterface> lastProxies;
|
state std::vector<CommitProxyInterface> lastCommitProxies;
|
||||||
state std::vector<UID> lastGrvProxyUIDs;
|
state std::vector<UID> lastGrvProxyUIDs;
|
||||||
state std::vector<GrvProxyInterface> lastGrvProxies;
|
state std::vector<GrvProxyInterface> lastGrvProxies;
|
||||||
|
|
||||||
|
@ -780,7 +783,7 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration( Reference<ClusterCo
|
||||||
connFile->notifyConnected();
|
connFile->notifyConnected();
|
||||||
|
|
||||||
auto& ni = rep.get().mutate();
|
auto& ni = rep.get().mutate();
|
||||||
shrinkProxyList(ni, lastProxyUIDs, lastProxies, lastGrvProxyUIDs, lastGrvProxies);
|
shrinkProxyList(ni, lastCommitProxyUIDs, lastCommitProxies, lastGrvProxyUIDs, lastGrvProxies);
|
||||||
clientInfo->set( ni );
|
clientInfo->set( ni );
|
||||||
successIdx = idx;
|
successIdx = idx;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
#include "fdbclient/FDBTypes.h"
|
#include "fdbclient/FDBTypes.h"
|
||||||
#include "fdbclient/CoordinationInterface.h"
|
#include "fdbclient/CoordinationInterface.h"
|
||||||
#include "fdbclient/ClusterInterface.h"
|
#include "fdbclient/ClusterInterface.h"
|
||||||
#include "fdbclient/MasterProxyInterface.h"
|
#include "fdbclient/CommitProxyInterface.h"
|
||||||
|
|
||||||
#define CLUSTER_FILE_ENV_VAR_NAME "FDB_CLUSTER_FILE"
|
#define CLUSTER_FILE_ENV_VAR_NAME "FDB_CLUSTER_FILE"
|
||||||
|
|
||||||
|
@ -67,8 +67,9 @@ Future<Void> monitorLeaderForProxies( Value const& key, vector<NetworkAddress> c
|
||||||
|
|
||||||
Future<Void> monitorProxies( Reference<AsyncVar<Reference<ClusterConnectionFile>>> const& connFile, Reference<AsyncVar<ClientDBInfo>> const& clientInfo, Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> const& supportedVersions, Key const& traceLogGroup );
|
Future<Void> monitorProxies( Reference<AsyncVar<Reference<ClusterConnectionFile>>> const& connFile, Reference<AsyncVar<ClientDBInfo>> const& clientInfo, Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> const& supportedVersions, Key const& traceLogGroup );
|
||||||
|
|
||||||
void shrinkProxyList( ClientDBInfo& ni, std::vector<UID>& lastMasterProxyUIDs, std::vector<MasterProxyInterface>& lastMasterProxies,
|
void shrinkProxyList(ClientDBInfo& ni, std::vector<UID>& lastCommitProxyUIDs,
|
||||||
std::vector<UID>& lastGrvProxyUIDs, std::vector<GrvProxyInterface>& lastGrvProxies);
|
std::vector<CommitProxyInterface>& lastCommitProxies, std::vector<UID>& lastGrvProxyUIDs,
|
||||||
|
std::vector<GrvProxyInterface>& lastGrvProxies);
|
||||||
|
|
||||||
#ifndef __INTEL_COMPILER
|
#ifndef __INTEL_COMPILER
|
||||||
#pragma region Implementation
|
#pragma region Implementation
|
||||||
|
|
|
@ -163,7 +163,7 @@ public:
|
||||||
|
|
||||||
if(destroyNow) {
|
if(destroyNow) {
|
||||||
api->futureDestroy(f);
|
api->futureDestroy(f);
|
||||||
f = NULL;
|
f = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
return destroyNow;
|
return destroyNow;
|
||||||
|
@ -202,7 +202,7 @@ public:
|
||||||
auto sav = (DLThreadSingleAssignmentVar<T>*)param;
|
auto sav = (DLThreadSingleAssignmentVar<T>*)param;
|
||||||
|
|
||||||
if(MultiVersionApi::api->callbackOnMainThread) {
|
if(MultiVersionApi::api->callbackOnMainThread) {
|
||||||
onMainThreadVoid([sav](){ sav->apply(); }, NULL);
|
onMainThreadVoid([sav](){ sav->apply(); }, nullptr);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
sav->apply();
|
sav->apply();
|
||||||
|
|
|
@ -224,7 +224,7 @@ ThreadFuture<int64_t> DLTransaction::getApproximateSize() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void DLTransaction::setOption(FDBTransactionOptions::Option option, Optional<StringRef> value) {
|
void DLTransaction::setOption(FDBTransactionOptions::Option option, Optional<StringRef> value) {
|
||||||
throwIfError(api->transactionSetOption(tr, option, value.present() ? value.get().begin() : NULL, value.present() ? value.get().size() : 0));
|
throwIfError(api->transactionSetOption(tr, option, value.present() ? value.get().begin() : nullptr, value.present() ? value.get().size() : 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
ThreadFuture<Void> DLTransaction::onError(Error const& e) {
|
ThreadFuture<Void> DLTransaction::onError(Error const& e) {
|
||||||
|
@ -262,14 +262,14 @@ Reference<ITransaction> DLDatabase::createTransaction() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void DLDatabase::setOption(FDBDatabaseOptions::Option option, Optional<StringRef> value) {
|
void DLDatabase::setOption(FDBDatabaseOptions::Option option, Optional<StringRef> value) {
|
||||||
throwIfError(api->databaseSetOption(db, option, value.present() ? value.get().begin() : NULL, value.present() ? value.get().size() : 0));
|
throwIfError(api->databaseSetOption(db, option, value.present() ? value.get().begin() : nullptr, value.present() ? value.get().size() : 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
// DLApi
|
// DLApi
|
||||||
template<class T>
|
template<class T>
|
||||||
void loadClientFunction(T *fp, void *lib, std::string libPath, const char *functionName, bool requireFunction = true) {
|
void loadClientFunction(T *fp, void *lib, std::string libPath, const char *functionName, bool requireFunction = true) {
|
||||||
*(void**)(fp) = loadFunction(lib, functionName);
|
*(void**)(fp) = loadFunction(lib, functionName);
|
||||||
if(*fp == NULL && requireFunction) {
|
if(*fp == nullptr && requireFunction) {
|
||||||
TraceEvent(SevError, "ErrorLoadingFunction").detail("LibraryPath", libPath).detail("Function", functionName);
|
TraceEvent(SevError, "ErrorLoadingFunction").detail("LibraryPath", libPath).detail("Function", functionName);
|
||||||
throw platform_error();
|
throw platform_error();
|
||||||
}
|
}
|
||||||
|
@ -283,7 +283,7 @@ void DLApi::init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void* lib = loadLibrary(fdbCPath.c_str());
|
void* lib = loadLibrary(fdbCPath.c_str());
|
||||||
if(lib == NULL) {
|
if(lib == nullptr) {
|
||||||
TraceEvent(SevError, "ErrorLoadingExternalClientLibrary").detail("LibraryPath", fdbCPath);
|
TraceEvent(SevError, "ErrorLoadingExternalClientLibrary").detail("LibraryPath", fdbCPath);
|
||||||
throw platform_error();
|
throw platform_error();
|
||||||
}
|
}
|
||||||
|
@ -347,7 +347,7 @@ void DLApi::selectApiVersion(int apiVersion) {
|
||||||
|
|
||||||
init();
|
init();
|
||||||
throwIfError(api->selectApiVersion(apiVersion, headerVersion));
|
throwIfError(api->selectApiVersion(apiVersion, headerVersion));
|
||||||
throwIfError(api->setNetworkOption(FDBNetworkOptions::EXTERNAL_CLIENT, NULL, 0));
|
throwIfError(api->setNetworkOption(FDBNetworkOptions::EXTERNAL_CLIENT, nullptr, 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* DLApi::getClientVersion() {
|
const char* DLApi::getClientVersion() {
|
||||||
|
@ -359,7 +359,7 @@ const char* DLApi::getClientVersion() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void DLApi::setNetworkOption(FDBNetworkOptions::Option option, Optional<StringRef> value) {
|
void DLApi::setNetworkOption(FDBNetworkOptions::Option option, Optional<StringRef> value) {
|
||||||
throwIfError(api->setNetworkOption(option, value.present() ? value.get().begin() : NULL, value.present() ? value.get().size() : 0));
|
throwIfError(api->setNetworkOption(option, value.present() ? value.get().begin() : nullptr, value.present() ? value.get().size() : 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
void DLApi::setupNetwork() {
|
void DLApi::setupNetwork() {
|
||||||
|
@ -786,7 +786,7 @@ void MultiVersionDatabase::Connector::connect() {
|
||||||
else {
|
else {
|
||||||
delref();
|
delref();
|
||||||
}
|
}
|
||||||
}, NULL);
|
}, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only called from main thread
|
// Only called from main thread
|
||||||
|
@ -805,7 +805,7 @@ void MultiVersionDatabase::Connector::fire(const Void &unused, int& userParam) {
|
||||||
dbState->stateChanged();
|
dbState->stateChanged();
|
||||||
}
|
}
|
||||||
delref();
|
delref();
|
||||||
}, NULL);
|
}, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MultiVersionDatabase::Connector::error(const Error& e, int& userParam) {
|
void MultiVersionDatabase::Connector::error(const Error& e, int& userParam) {
|
||||||
|
@ -820,7 +820,7 @@ void MultiVersionDatabase::Connector::error(const Error& e, int& userParam) {
|
||||||
}
|
}
|
||||||
|
|
||||||
MultiVersionDatabase::DatabaseState::DatabaseState()
|
MultiVersionDatabase::DatabaseState::DatabaseState()
|
||||||
: dbVar(new ThreadSafeAsyncVar<Reference<IDatabase>>(Reference<IDatabase>(NULL))), currentClientIndex(-1) {}
|
: dbVar(new ThreadSafeAsyncVar<Reference<IDatabase>>(Reference<IDatabase>(nullptr))), currentClientIndex(-1) {}
|
||||||
|
|
||||||
// Only called from main thread
|
// Only called from main thread
|
||||||
void MultiVersionDatabase::DatabaseState::stateChanged() {
|
void MultiVersionDatabase::DatabaseState::stateChanged() {
|
||||||
|
@ -898,7 +898,7 @@ void MultiVersionDatabase::DatabaseState::cancelConnections() {
|
||||||
connectionAttempts.clear();
|
connectionAttempts.clear();
|
||||||
clients.clear();
|
clients.clear();
|
||||||
delref();
|
delref();
|
||||||
}, NULL);
|
}, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// MultiVersionApi
|
// MultiVersionApi
|
||||||
|
@ -1043,7 +1043,7 @@ void MultiVersionApi::setSupportedClientVersions(Standalone<StringRef> versions)
|
||||||
// This option must be set on the main thread because it modifes structures that can be used concurrently by the main thread
|
// This option must be set on the main thread because it modifes structures that can be used concurrently by the main thread
|
||||||
onMainThreadVoid([this, versions](){
|
onMainThreadVoid([this, versions](){
|
||||||
localClient->api->setNetworkOption(FDBNetworkOptions::SUPPORTED_CLIENT_VERSIONS, versions);
|
localClient->api->setNetworkOption(FDBNetworkOptions::SUPPORTED_CLIENT_VERSIONS, versions);
|
||||||
}, NULL);
|
}, nullptr);
|
||||||
|
|
||||||
if(!bypassMultiClientApi) {
|
if(!bypassMultiClientApi) {
|
||||||
runOnExternalClients([versions](Reference<ClientInfo> client) {
|
runOnExternalClients([versions](Reference<ClientInfo> client) {
|
||||||
|
@ -1654,7 +1654,7 @@ THREAD_FUNC runSingleAssignmentVarTest(void *arg) {
|
||||||
|
|
||||||
onMainThreadVoid([done](){
|
onMainThreadVoid([done](){
|
||||||
*done = true;
|
*done = true;
|
||||||
}, NULL);
|
}, nullptr);
|
||||||
}
|
}
|
||||||
catch(Error &e) {
|
catch(Error &e) {
|
||||||
printf("Caught error in test: %s\n", e.name());
|
printf("Caught error in test: %s\n", e.name());
|
||||||
|
|
|
@ -286,7 +286,7 @@ struct ClientInfo : ThreadSafeReferenceCounted<ClientInfo> {
|
||||||
bool failed;
|
bool failed;
|
||||||
std::vector<std::pair<void (*)(void*), void*>> threadCompletionHooks;
|
std::vector<std::pair<void (*)(void*), void*>> threadCompletionHooks;
|
||||||
|
|
||||||
ClientInfo() : protocolVersion(0), api(NULL), external(false), failed(true) {}
|
ClientInfo() : protocolVersion(0), api(nullptr), external(false), failed(true) {}
|
||||||
ClientInfo(IClientApi *api) : protocolVersion(0), api(api), libPath("internal"), external(false), failed(false) {}
|
ClientInfo(IClientApi *api) : protocolVersion(0), api(api), libPath("internal"), external(false), failed(false) {}
|
||||||
ClientInfo(IClientApi *api, std::string libPath) : protocolVersion(0), api(api), libPath(libPath), external(true), failed(false) {}
|
ClientInfo(IClientApi *api, std::string libPath) : protocolVersion(0), api(api), libPath(libPath), external(true), failed(false) {}
|
||||||
|
|
||||||
|
|
|
@ -62,7 +62,7 @@ public:
|
||||||
auto e = ptr->end(); // e points to the end of the current blob
|
auto e = ptr->end(); // e points to the end of the current blob
|
||||||
if (e == blob->data.end()) { // the condition sanity checks e is at the end of current blob
|
if (e == blob->data.end()) { // the condition sanity checks e is at the end of current blob
|
||||||
blob = blob->next;
|
blob = blob->next;
|
||||||
e = blob ? blob->data.begin() : NULL;
|
e = blob ? blob->data.begin() : nullptr;
|
||||||
}
|
}
|
||||||
ptr = (Header*)e;
|
ptr = (Header*)e;
|
||||||
decode();
|
decode();
|
||||||
|
@ -70,7 +70,7 @@ public:
|
||||||
|
|
||||||
bool operator == ( Iterator const& i ) const { return ptr == i.ptr; }
|
bool operator == ( Iterator const& i ) const { return ptr == i.ptr; }
|
||||||
bool operator != ( Iterator const& i) const { return ptr != i.ptr; }
|
bool operator != ( Iterator const& i) const { return ptr != i.ptr; }
|
||||||
explicit operator bool() const { return blob!=NULL; }
|
explicit operator bool() const { return blob!=nullptr; }
|
||||||
|
|
||||||
typedef std::forward_iterator_tag iterator_category;
|
typedef std::forward_iterator_tag iterator_category;
|
||||||
typedef const MutationRef value_type;
|
typedef const MutationRef value_type;
|
||||||
|
@ -79,7 +79,7 @@ public:
|
||||||
typedef const MutationRef& reference;
|
typedef const MutationRef& reference;
|
||||||
|
|
||||||
Iterator( Blob* blob, const Header* ptr ) : blob(blob), ptr(ptr) { decode(); }
|
Iterator( Blob* blob, const Header* ptr ) : blob(blob), ptr(ptr) { decode(); }
|
||||||
Iterator() : blob(NULL), ptr(NULL) { }
|
Iterator() : blob(nullptr), ptr(nullptr) { }
|
||||||
private:
|
private:
|
||||||
friend struct MutationListRef;
|
friend struct MutationListRef;
|
||||||
const Blob* blob; // The blob containing the indicated mutation
|
const Blob* blob; // The blob containing the indicated mutation
|
||||||
|
@ -95,16 +95,16 @@ public:
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
MutationListRef() : blob_begin(NULL), blob_end(NULL), totalBytes(0) {
|
MutationListRef() : blob_begin(nullptr), blob_end(nullptr), totalBytes(0) {
|
||||||
}
|
}
|
||||||
MutationListRef( Arena& ar, MutationListRef const& r ) : blob_begin(NULL), blob_end(NULL), totalBytes(0) {
|
MutationListRef( Arena& ar, MutationListRef const& r ) : blob_begin(nullptr), blob_end(nullptr), totalBytes(0) {
|
||||||
append_deep(ar, r.begin(), r.end());
|
append_deep(ar, r.begin(), r.end());
|
||||||
}
|
}
|
||||||
Iterator begin() const {
|
Iterator begin() const {
|
||||||
if (blob_begin) return Iterator(blob_begin, (Header*)blob_begin->data.begin());
|
if (blob_begin) return Iterator(blob_begin, (Header*)blob_begin->data.begin());
|
||||||
return Iterator(NULL, NULL);
|
return Iterator(nullptr, nullptr);
|
||||||
}
|
}
|
||||||
Iterator end() const { return Iterator(NULL, NULL); }
|
Iterator end() const { return Iterator(nullptr, nullptr); }
|
||||||
size_t expectedSize() const { return sizeof(Blob) + totalBytes; }
|
size_t expectedSize() const { return sizeof(Blob) + totalBytes; }
|
||||||
int totalSize() const { return totalBytes; }
|
int totalSize() const { return totalBytes; }
|
||||||
|
|
||||||
|
@ -146,12 +146,13 @@ public:
|
||||||
|
|
||||||
if(totalBytes > 0) {
|
if(totalBytes > 0) {
|
||||||
blob_begin = blob_end = new (ar.arena()) Blob;
|
blob_begin = blob_end = new (ar.arena()) Blob;
|
||||||
blob_begin->next = NULL;
|
blob_begin->next = nullptr;
|
||||||
blob_begin->data = StringRef((const uint8_t*)ar.arenaRead(totalBytes), totalBytes); // Zero-copy read when deserializing from an ArenaReader
|
blob_begin->data = StringRef((const uint8_t*)ar.arenaRead(totalBytes), totalBytes); // Zero-copy read when deserializing from an ArenaReader
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//FIXME: this is re-implemented on the master proxy to include a yield, any changes to this function should also done there
|
// FIXME: this is re-implemented on the commit proxy to include a yield, any changes to this function should also
|
||||||
|
// done there
|
||||||
template <class Ar>
|
template <class Ar>
|
||||||
void serialize_save( Ar& ar ) const {
|
void serialize_save( Ar& ar ) const {
|
||||||
serializer(ar, totalBytes);
|
serializer(ar, totalBytes);
|
||||||
|
@ -180,7 +181,7 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
blob_end->data = StringRef(b, bytes);
|
blob_end->data = StringRef(b, bytes);
|
||||||
blob_end->next = NULL;
|
blob_end->next = nullptr;
|
||||||
return b;
|
return b;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,7 +40,7 @@
|
||||||
#include "fdbclient/KeyRangeMap.h"
|
#include "fdbclient/KeyRangeMap.h"
|
||||||
#include "fdbclient/Knobs.h"
|
#include "fdbclient/Knobs.h"
|
||||||
#include "fdbclient/ManagementAPI.actor.h"
|
#include "fdbclient/ManagementAPI.actor.h"
|
||||||
#include "fdbclient/MasterProxyInterface.h"
|
#include "fdbclient/CommitProxyInterface.h"
|
||||||
#include "fdbclient/MonitorLeader.h"
|
#include "fdbclient/MonitorLeader.h"
|
||||||
#include "fdbclient/MutationList.h"
|
#include "fdbclient/MutationList.h"
|
||||||
#include "fdbclient/ReadYourWrites.h"
|
#include "fdbclient/ReadYourWrites.h"
|
||||||
|
@ -95,7 +95,7 @@ Future<REPLY_TYPE(Request)> loadBalance(
|
||||||
DatabaseContext* ctx, const Reference<LocationInfo> alternatives, RequestStream<Request> Interface::*channel,
|
DatabaseContext* ctx, const Reference<LocationInfo> alternatives, RequestStream<Request> Interface::*channel,
|
||||||
const Request& request = Request(), TaskPriority taskID = TaskPriority::DefaultPromiseEndpoint,
|
const Request& request = Request(), TaskPriority taskID = TaskPriority::DefaultPromiseEndpoint,
|
||||||
bool atMostOnce = false, // if true, throws request_maybe_delivered() instead of retrying automatically
|
bool atMostOnce = false, // if true, throws request_maybe_delivered() instead of retrying automatically
|
||||||
QueueModel* model = NULL) {
|
QueueModel* model = nullptr) {
|
||||||
if (alternatives->hasCaches) {
|
if (alternatives->hasCaches) {
|
||||||
return loadBalance(alternatives->locations(), channel, request, taskID, atMostOnce, model);
|
return loadBalance(alternatives->locations(), channel, request, taskID, atMostOnce, model);
|
||||||
}
|
}
|
||||||
|
@ -147,7 +147,7 @@ Reference<StorageServerInfo> StorageServerInfo::getInterface( DatabaseContext *c
|
||||||
}
|
}
|
||||||
|
|
||||||
void StorageServerInfo::notifyContextDestroyed() {
|
void StorageServerInfo::notifyContextDestroyed() {
|
||||||
cx = NULL;
|
cx = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
StorageServerInfo::~StorageServerInfo() {
|
StorageServerInfo::~StorageServerInfo() {
|
||||||
|
@ -155,7 +155,7 @@ StorageServerInfo::~StorageServerInfo() {
|
||||||
auto it = cx->server_interf.find( interf.id() );
|
auto it = cx->server_interf.find( interf.id() );
|
||||||
if( it != cx->server_interf.end() )
|
if( it != cx->server_interf.end() )
|
||||||
cx->server_interf.erase( it );
|
cx->server_interf.erase( it );
|
||||||
cx = NULL;
|
cx = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -484,15 +484,15 @@ ACTOR static Future<Void> clientStatusUpdateActor(DatabaseContext *cx) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR static Future<Void> monitorProxiesChange(Reference<AsyncVar<ClientDBInfo>> clientDBInfo, AsyncTrigger *triggerVar) {
|
ACTOR static Future<Void> monitorProxiesChange(Reference<AsyncVar<ClientDBInfo>> clientDBInfo, AsyncTrigger *triggerVar) {
|
||||||
state vector< MasterProxyInterface > curProxies;
|
state vector<CommitProxyInterface> curCommitProxies;
|
||||||
state vector< GrvProxyInterface > curGrvProxies;
|
state vector< GrvProxyInterface > curGrvProxies;
|
||||||
curProxies = clientDBInfo->get().masterProxies;
|
curCommitProxies = clientDBInfo->get().commitProxies;
|
||||||
curGrvProxies = clientDBInfo->get().grvProxies;
|
curGrvProxies = clientDBInfo->get().grvProxies;
|
||||||
|
|
||||||
loop{
|
loop{
|
||||||
wait(clientDBInfo->onChange());
|
wait(clientDBInfo->onChange());
|
||||||
if (clientDBInfo->get().masterProxies != curProxies || clientDBInfo->get().grvProxies != curGrvProxies) {
|
if (clientDBInfo->get().commitProxies != curCommitProxies || clientDBInfo->get().grvProxies != curGrvProxies) {
|
||||||
curProxies = clientDBInfo->get().masterProxies;
|
curCommitProxies = clientDBInfo->get().commitProxies;
|
||||||
curGrvProxies = clientDBInfo->get().grvProxies;
|
curGrvProxies = clientDBInfo->get().grvProxies;
|
||||||
triggerVar->trigger();
|
triggerVar->trigger();
|
||||||
}
|
}
|
||||||
|
@ -881,7 +881,7 @@ DatabaseContext::DatabaseContext(Reference<AsyncVar<Reference<ClusterConnectionF
|
||||||
transactionsExpensiveClearCostEstCount("ExpensiveClearCostEstCount", cc),
|
transactionsExpensiveClearCostEstCount("ExpensiveClearCostEstCount", cc),
|
||||||
specialKeySpace(std::make_unique<SpecialKeySpace>(specialKeys.begin, specialKeys.end, /* test */ false)) {
|
specialKeySpace(std::make_unique<SpecialKeySpace>(specialKeys.begin, specialKeys.end, /* test */ false)) {
|
||||||
dbId = deterministicRandom()->randomUniqueID();
|
dbId = deterministicRandom()->randomUniqueID();
|
||||||
connected = (clientInfo->get().masterProxies.size() && clientInfo->get().grvProxies.size())
|
connected = (clientInfo->get().commitProxies.size() && clientInfo->get().grvProxies.size())
|
||||||
? Void()
|
? Void()
|
||||||
: clientInfo->onChange();
|
: clientInfo->onChange();
|
||||||
|
|
||||||
|
@ -930,6 +930,16 @@ DatabaseContext::DatabaseContext(Reference<AsyncVar<Reference<ClusterConnectionF
|
||||||
std::make_unique<ExclusionInProgressRangeImpl>(
|
std::make_unique<ExclusionInProgressRangeImpl>(
|
||||||
KeyRangeRef(LiteralStringRef("inProgressExclusion/"), LiteralStringRef("inProgressExclusion0"))
|
KeyRangeRef(LiteralStringRef("inProgressExclusion/"), LiteralStringRef("inProgressExclusion0"))
|
||||||
.withPrefix(SpecialKeySpace::getModuleRange(SpecialKeySpace::MODULE::MANAGEMENT).begin)));
|
.withPrefix(SpecialKeySpace::getModuleRange(SpecialKeySpace::MODULE::MANAGEMENT).begin)));
|
||||||
|
registerSpecialKeySpaceModule(
|
||||||
|
SpecialKeySpace::MODULE::CONFIGURATION, SpecialKeySpace::IMPLTYPE::READWRITE,
|
||||||
|
std::make_unique<ProcessClassRangeImpl>(
|
||||||
|
KeyRangeRef(LiteralStringRef("process/class_type/"), LiteralStringRef("process/class_type0"))
|
||||||
|
.withPrefix(SpecialKeySpace::getModuleRange(SpecialKeySpace::MODULE::CONFIGURATION).begin)));
|
||||||
|
registerSpecialKeySpaceModule(
|
||||||
|
SpecialKeySpace::MODULE::CONFIGURATION, SpecialKeySpace::IMPLTYPE::READONLY,
|
||||||
|
std::make_unique<ProcessClassSourceRangeImpl>(
|
||||||
|
KeyRangeRef(LiteralStringRef("process/class_source/"), LiteralStringRef("process/class_source0"))
|
||||||
|
.withPrefix(SpecialKeySpace::getModuleRange(SpecialKeySpace::MODULE::CONFIGURATION).begin)));
|
||||||
}
|
}
|
||||||
if (apiVersionAtLeast(630)) {
|
if (apiVersionAtLeast(630)) {
|
||||||
registerSpecialKeySpaceModule(SpecialKeySpace::MODULE::TRANSACTION, SpecialKeySpace::IMPLTYPE::READONLY,
|
registerSpecialKeySpaceModule(SpecialKeySpace::MODULE::TRANSACTION, SpecialKeySpace::IMPLTYPE::READONLY,
|
||||||
|
@ -1164,8 +1174,8 @@ void DatabaseContext::setOption( FDBDatabaseOptions::Option option, Optional<Str
|
||||||
break;
|
break;
|
||||||
case FDBDatabaseOptions::MACHINE_ID:
|
case FDBDatabaseOptions::MACHINE_ID:
|
||||||
clientLocality = LocalityData( clientLocality.processId(), value.present() ? Standalone<StringRef>(value.get()) : Optional<Standalone<StringRef>>(), clientLocality.machineId(), clientLocality.dcId() );
|
clientLocality = LocalityData( clientLocality.processId(), value.present() ? Standalone<StringRef>(value.get()) : Optional<Standalone<StringRef>>(), clientLocality.machineId(), clientLocality.dcId() );
|
||||||
if( clientInfo->get().masterProxies.size() )
|
if (clientInfo->get().commitProxies.size())
|
||||||
masterProxies = Reference<ProxyInfo>( new ProxyInfo( clientInfo->get().masterProxies) );
|
commitProxies = Reference<CommitProxyInfo>(new CommitProxyInfo(clientInfo->get().commitProxies));
|
||||||
if( clientInfo->get().grvProxies.size() )
|
if( clientInfo->get().grvProxies.size() )
|
||||||
grvProxies = Reference<GrvProxyInfo>( new GrvProxyInfo( clientInfo->get().grvProxies ) );
|
grvProxies = Reference<GrvProxyInfo>( new GrvProxyInfo( clientInfo->get().grvProxies ) );
|
||||||
server_interf.clear();
|
server_interf.clear();
|
||||||
|
@ -1176,8 +1186,8 @@ void DatabaseContext::setOption( FDBDatabaseOptions::Option option, Optional<Str
|
||||||
break;
|
break;
|
||||||
case FDBDatabaseOptions::DATACENTER_ID:
|
case FDBDatabaseOptions::DATACENTER_ID:
|
||||||
clientLocality = LocalityData(clientLocality.processId(), clientLocality.zoneId(), clientLocality.machineId(), value.present() ? Standalone<StringRef>(value.get()) : Optional<Standalone<StringRef>>());
|
clientLocality = LocalityData(clientLocality.processId(), clientLocality.zoneId(), clientLocality.machineId(), value.present() ? Standalone<StringRef>(value.get()) : Optional<Standalone<StringRef>>());
|
||||||
if( clientInfo->get().masterProxies.size() )
|
if (clientInfo->get().commitProxies.size())
|
||||||
masterProxies = Reference<ProxyInfo>( new ProxyInfo( clientInfo->get().masterProxies));
|
commitProxies = Reference<CommitProxyInfo>(new CommitProxyInfo(clientInfo->get().commitProxies));
|
||||||
if( clientInfo->get().grvProxies.size() )
|
if( clientInfo->get().grvProxies.size() )
|
||||||
grvProxies = Reference<GrvProxyInfo>( new GrvProxyInfo( clientInfo->get().grvProxies ));
|
grvProxies = Reference<GrvProxyInfo>( new GrvProxyInfo( clientInfo->get().grvProxies ));
|
||||||
server_interf.clear();
|
server_interf.clear();
|
||||||
|
@ -1220,13 +1230,13 @@ ACTOR static Future<Void> switchConnectionFileImpl(Reference<ClusterConnectionFi
|
||||||
.detail("ConnectionString", connFile->getConnectionString().toString());
|
.detail("ConnectionString", connFile->getConnectionString().toString());
|
||||||
|
|
||||||
// Reset state from former cluster.
|
// Reset state from former cluster.
|
||||||
self->masterProxies.clear();
|
self->commitProxies.clear();
|
||||||
self->grvProxies.clear();
|
self->grvProxies.clear();
|
||||||
self->minAcceptableReadVersion = std::numeric_limits<Version>::max();
|
self->minAcceptableReadVersion = std::numeric_limits<Version>::max();
|
||||||
self->invalidateCache(allKeys);
|
self->invalidateCache(allKeys);
|
||||||
|
|
||||||
auto clearedClientInfo = self->clientInfo->get();
|
auto clearedClientInfo = self->clientInfo->get();
|
||||||
clearedClientInfo.masterProxies.clear();
|
clearedClientInfo.commitProxies.clear();
|
||||||
clearedClientInfo.grvProxies.clear();
|
clearedClientInfo.grvProxies.clear();
|
||||||
clearedClientInfo.id = deterministicRandom()->randomUniqueID();
|
clearedClientInfo.id = deterministicRandom()->randomUniqueID();
|
||||||
self->clientInfo->set(clearedClientInfo);
|
self->clientInfo->set(clearedClientInfo);
|
||||||
|
@ -1307,7 +1317,7 @@ Database Database::createDatabase( Reference<ClusterConnectionFile> connFile, in
|
||||||
.detail("PackageName", FDB_VT_PACKAGE_NAME)
|
.detail("PackageName", FDB_VT_PACKAGE_NAME)
|
||||||
.detail("ClusterFile", connFile->getFilename().c_str())
|
.detail("ClusterFile", connFile->getFilename().c_str())
|
||||||
.detail("ConnectionString", connFile->getConnectionString().toString())
|
.detail("ConnectionString", connFile->getConnectionString().toString())
|
||||||
.detailf("ActualTime", "%lld", DEBUG_DETERMINISM ? 0 : time(NULL))
|
.detailf("ActualTime", "%lld", DEBUG_DETERMINISM ? 0 : time(nullptr))
|
||||||
.detail("ApiVersion", apiVersion)
|
.detail("ApiVersion", apiVersion)
|
||||||
.detailf("ImageOffset", "%p", platform::getImageOffset())
|
.detailf("ImageOffset", "%p", platform::getImageOffset())
|
||||||
.trackLatest("ClientStart");
|
.trackLatest("ClientStart");
|
||||||
|
@ -1561,29 +1571,29 @@ void stopNetwork() {
|
||||||
void DatabaseContext::updateProxies() {
|
void DatabaseContext::updateProxies() {
|
||||||
if (proxiesLastChange == clientInfo->get().id) return;
|
if (proxiesLastChange == clientInfo->get().id) return;
|
||||||
proxiesLastChange = clientInfo->get().id;
|
proxiesLastChange = clientInfo->get().id;
|
||||||
masterProxies.clear();
|
commitProxies.clear();
|
||||||
grvProxies.clear();
|
grvProxies.clear();
|
||||||
bool masterProxyProvisional = false, grvProxyProvisional = false;
|
bool commitProxyProvisional = false, grvProxyProvisional = false;
|
||||||
if (clientInfo->get().masterProxies.size()) {
|
if (clientInfo->get().commitProxies.size()) {
|
||||||
masterProxies = Reference<ProxyInfo>(new ProxyInfo(clientInfo->get().masterProxies));
|
commitProxies = Reference<CommitProxyInfo>(new CommitProxyInfo(clientInfo->get().commitProxies));
|
||||||
masterProxyProvisional = clientInfo->get().masterProxies[0].provisional;
|
commitProxyProvisional = clientInfo->get().commitProxies[0].provisional;
|
||||||
}
|
}
|
||||||
if (clientInfo->get().grvProxies.size()) {
|
if (clientInfo->get().grvProxies.size()) {
|
||||||
grvProxies = Reference<GrvProxyInfo>(new GrvProxyInfo(clientInfo->get().grvProxies));
|
grvProxies = Reference<GrvProxyInfo>(new GrvProxyInfo(clientInfo->get().grvProxies));
|
||||||
grvProxyProvisional = clientInfo->get().grvProxies[0].provisional;
|
grvProxyProvisional = clientInfo->get().grvProxies[0].provisional;
|
||||||
}
|
}
|
||||||
if (clientInfo->get().masterProxies.size() && clientInfo->get().grvProxies.size()) {
|
if (clientInfo->get().commitProxies.size() && clientInfo->get().grvProxies.size()) {
|
||||||
ASSERT(masterProxyProvisional == grvProxyProvisional);
|
ASSERT(commitProxyProvisional == grvProxyProvisional);
|
||||||
proxyProvisional = masterProxyProvisional;
|
proxyProvisional = commitProxyProvisional;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Reference<ProxyInfo> DatabaseContext::getMasterProxies(bool useProvisionalProxies) {
|
Reference<CommitProxyInfo> DatabaseContext::getCommitProxies(bool useProvisionalProxies) {
|
||||||
updateProxies();
|
updateProxies();
|
||||||
if (proxyProvisional && !useProvisionalProxies) {
|
if (proxyProvisional && !useProvisionalProxies) {
|
||||||
return Reference<ProxyInfo>();
|
return Reference<CommitProxyInfo>();
|
||||||
}
|
}
|
||||||
return masterProxies;
|
return commitProxies;
|
||||||
}
|
}
|
||||||
|
|
||||||
Reference<GrvProxyInfo> DatabaseContext::getGrvProxies(bool useProvisionalProxies) {
|
Reference<GrvProxyInfo> DatabaseContext::getGrvProxies(bool useProvisionalProxies) {
|
||||||
|
@ -1594,19 +1604,19 @@ Reference<GrvProxyInfo> DatabaseContext::getGrvProxies(bool useProvisionalProxie
|
||||||
return grvProxies;
|
return grvProxies;
|
||||||
}
|
}
|
||||||
|
|
||||||
//Actor which will wait until the MultiInterface<MasterProxyInterface> returned by the DatabaseContext cx is not NULL
|
// Actor which will wait until the MultiInterface<CommitProxyInterface> returned by the DatabaseContext cx is not nullptr
|
||||||
ACTOR Future<Reference<ProxyInfo>> getMasterProxiesFuture(DatabaseContext *cx, bool useProvisionalProxies) {
|
ACTOR Future<Reference<CommitProxyInfo>> getCommitProxiesFuture(DatabaseContext* cx, bool useProvisionalProxies) {
|
||||||
loop{
|
loop{
|
||||||
Reference<ProxyInfo> proxies = cx->getMasterProxies(useProvisionalProxies);
|
Reference<CommitProxyInfo> commitProxies = cx->getCommitProxies(useProvisionalProxies);
|
||||||
if (proxies)
|
if (commitProxies)
|
||||||
return proxies;
|
return commitProxies;
|
||||||
wait( cx->onProxiesChanged() );
|
wait( cx->onProxiesChanged() );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//Returns a future which will not be set until the ProxyInfo of this DatabaseContext is not NULL
|
// Returns a future which will not be set until the CommitProxyInfo of this DatabaseContext is not nullptr
|
||||||
Future<Reference<ProxyInfo>> DatabaseContext::getMasterProxiesFuture(bool useProvisionalProxies) {
|
Future<Reference<CommitProxyInfo>> DatabaseContext::getCommitProxiesFuture(bool useProvisionalProxies) {
|
||||||
return ::getMasterProxiesFuture(this, useProvisionalProxies);
|
return ::getCommitProxiesFuture(this, useProvisionalProxies);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GetRangeLimits::decrement( VectorRef<KeyValueRef> const& data ) {
|
void GetRangeLimits::decrement( VectorRef<KeyValueRef> const& data ) {
|
||||||
|
@ -1733,8 +1743,8 @@ ACTOR Future<pair<KeyRange, Reference<LocationInfo>>> getKeyLocation_internal(Da
|
||||||
++cx->transactionKeyServerLocationRequests;
|
++cx->transactionKeyServerLocationRequests;
|
||||||
choose {
|
choose {
|
||||||
when (wait(cx->onProxiesChanged())) {}
|
when (wait(cx->onProxiesChanged())) {}
|
||||||
when (GetKeyServerLocationsReply rep = wait(basicLoadBalance(
|
when(GetKeyServerLocationsReply rep = wait(basicLoadBalance(
|
||||||
cx->getMasterProxies(info.useProvisionalProxies), &MasterProxyInterface::getKeyServersLocations,
|
cx->getCommitProxies(info.useProvisionalProxies), &CommitProxyInterface::getKeyServersLocations,
|
||||||
GetKeyServerLocationsRequest(span.context, key, Optional<KeyRef>(), 100, isBackward, key.arena()),
|
GetKeyServerLocationsRequest(span.context, key, Optional<KeyRef>(), 100, isBackward, key.arena()),
|
||||||
TaskPriority::DefaultPromiseEndpoint))) {
|
TaskPriority::DefaultPromiseEndpoint))) {
|
||||||
++cx->transactionKeyServerLocationRequestsCompleted;
|
++cx->transactionKeyServerLocationRequestsCompleted;
|
||||||
|
@ -1782,8 +1792,8 @@ ACTOR Future<vector<pair<KeyRange, Reference<LocationInfo>>>> getKeyRangeLocatio
|
||||||
++cx->transactionKeyServerLocationRequests;
|
++cx->transactionKeyServerLocationRequests;
|
||||||
choose {
|
choose {
|
||||||
when ( wait( cx->onProxiesChanged() ) ) {}
|
when ( wait( cx->onProxiesChanged() ) ) {}
|
||||||
when ( GetKeyServerLocationsReply _rep = wait(basicLoadBalance(
|
when(GetKeyServerLocationsReply _rep = wait(basicLoadBalance(
|
||||||
cx->getMasterProxies(info.useProvisionalProxies), &MasterProxyInterface::getKeyServersLocations,
|
cx->getCommitProxies(info.useProvisionalProxies), &CommitProxyInterface::getKeyServersLocations,
|
||||||
GetKeyServerLocationsRequest(span.context, keys.begin, keys.end, limit, reverse, keys.arena()),
|
GetKeyServerLocationsRequest(span.context, keys.begin, keys.end, limit, reverse, keys.arena()),
|
||||||
TaskPriority::DefaultPromiseEndpoint))) {
|
TaskPriority::DefaultPromiseEndpoint))) {
|
||||||
++cx->transactionKeyServerLocationRequestsCompleted;
|
++cx->transactionKeyServerLocationRequestsCompleted;
|
||||||
|
@ -2512,7 +2522,7 @@ ACTOR Future<Standalone<RangeResultRef>> getRange( Database cx, Reference<Transa
|
||||||
GetKeyValuesReply _rep =
|
GetKeyValuesReply _rep =
|
||||||
wait(loadBalance(cx.getPtr(), beginServer.second, &StorageServerInterface::getKeyValues, req,
|
wait(loadBalance(cx.getPtr(), beginServer.second, &StorageServerInterface::getKeyValues, req,
|
||||||
TaskPriority::DefaultPromiseEndpoint, false,
|
TaskPriority::DefaultPromiseEndpoint, false,
|
||||||
cx->enableLocalityLoadBalance ? &cx->queueModel : NULL));
|
cx->enableLocalityLoadBalance ? &cx->queueModel : nullptr));
|
||||||
rep = _rep;
|
rep = _rep;
|
||||||
++cx->transactionPhysicalReadsCompleted;
|
++cx->transactionPhysicalReadsCompleted;
|
||||||
} catch(Error&) {
|
} catch(Error&) {
|
||||||
|
@ -3450,14 +3460,16 @@ ACTOR static Future<Void> tryCommit( Database cx, Reference<TransactionLogInfo>
|
||||||
req.debugID = commitID;
|
req.debugID = commitID;
|
||||||
state Future<CommitID> reply;
|
state Future<CommitID> reply;
|
||||||
if (options.commitOnFirstProxy) {
|
if (options.commitOnFirstProxy) {
|
||||||
if(cx->clientInfo->get().firstProxy.present()) {
|
if (cx->clientInfo->get().firstCommitProxy.present()) {
|
||||||
reply = throwErrorOr ( brokenPromiseToMaybeDelivered ( cx->clientInfo->get().firstProxy.get().commit.tryGetReply(req) ) );
|
reply = throwErrorOr(brokenPromiseToMaybeDelivered(
|
||||||
|
cx->clientInfo->get().firstCommitProxy.get().commit.tryGetReply(req)));
|
||||||
} else {
|
} else {
|
||||||
const std::vector<MasterProxyInterface>& proxies = cx->clientInfo->get().masterProxies;
|
const std::vector<CommitProxyInterface>& proxies = cx->clientInfo->get().commitProxies;
|
||||||
reply = proxies.size() ? throwErrorOr ( brokenPromiseToMaybeDelivered ( proxies[0].commit.tryGetReply(req) ) ) : Never();
|
reply = proxies.size() ? throwErrorOr ( brokenPromiseToMaybeDelivered ( proxies[0].commit.tryGetReply(req) ) ) : Never();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
reply = basicLoadBalance( cx->getMasterProxies(info.useProvisionalProxies), &MasterProxyInterface::commit, req, TaskPriority::DefaultPromiseEndpoint, true );
|
reply = basicLoadBalance(cx->getCommitProxies(info.useProvisionalProxies), &CommitProxyInterface::commit,
|
||||||
|
req, TaskPriority::DefaultPromiseEndpoint, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
choose {
|
choose {
|
||||||
|
@ -3531,8 +3543,9 @@ ACTOR static Future<Void> tryCommit( Database cx, Reference<TransactionLogInfo>
|
||||||
// We don't know if the commit happened, and it might even still be in flight.
|
// We don't know if the commit happened, and it might even still be in flight.
|
||||||
|
|
||||||
if (!options.causalWriteRisky) {
|
if (!options.causalWriteRisky) {
|
||||||
// Make sure it's not still in flight, either by ensuring the master we submitted to is dead, or the version we submitted with is dead, or by committing a conflicting transaction successfully
|
// Make sure it's not still in flight, either by ensuring the master we submitted to is dead, or the
|
||||||
//if ( cx->getMasterProxies()->masterGeneration <= originalMasterGeneration )
|
// version we submitted with is dead, or by committing a conflicting transaction successfully
|
||||||
|
// if ( cx->getCommitProxies()->masterGeneration <= originalMasterGeneration )
|
||||||
|
|
||||||
// To ensure the original request is not in flight, we need a key range which intersects its read conflict ranges
|
// To ensure the original request is not in flight, we need a key range which intersects its read conflict ranges
|
||||||
// We pick a key range which also intersects its write conflict ranges, since that avoids potentially creating conflicts where there otherwise would be none
|
// We pick a key range which also intersects its write conflict ranges, since that avoids potentially creating conflicts where there otherwise would be none
|
||||||
|
@ -4433,7 +4446,7 @@ ACTOR Future<Standalone<VectorRef<DDMetricsRef>>> waitDataDistributionMetricsLis
|
||||||
choose {
|
choose {
|
||||||
when(wait(cx->onProxiesChanged())) {}
|
when(wait(cx->onProxiesChanged())) {}
|
||||||
when(ErrorOr<GetDDMetricsReply> rep =
|
when(ErrorOr<GetDDMetricsReply> rep =
|
||||||
wait(errorOr(basicLoadBalance(cx->getMasterProxies(false), &MasterProxyInterface::getDDMetrics,
|
wait(errorOr(basicLoadBalance(cx->getCommitProxies(false), &CommitProxyInterface::getDDMetrics,
|
||||||
GetDDMetricsRequest(keys, shardLimit))))) {
|
GetDDMetricsRequest(keys, shardLimit))))) {
|
||||||
if (rep.isError()) {
|
if (rep.isError()) {
|
||||||
throw rep.getError();
|
throw rep.getError();
|
||||||
|
@ -4539,7 +4552,9 @@ ACTOR Future<Void> snapCreate(Database cx, Standalone<StringRef> snapCmd, UID sn
|
||||||
loop {
|
loop {
|
||||||
choose {
|
choose {
|
||||||
when(wait(cx->onProxiesChanged())) {}
|
when(wait(cx->onProxiesChanged())) {}
|
||||||
when(wait(basicLoadBalance(cx->getMasterProxies(false), &MasterProxyInterface::proxySnapReq, ProxySnapRequest(snapCmd, snapUID, snapUID), cx->taskID, true /*atmostOnce*/ ))) {
|
when(wait(basicLoadBalance(cx->getCommitProxies(false), &CommitProxyInterface::proxySnapReq,
|
||||||
|
ProxySnapRequest(snapCmd, snapUID, snapUID), cx->taskID,
|
||||||
|
true /*atmostOnce*/))) {
|
||||||
TraceEvent("SnapCreateExit")
|
TraceEvent("SnapCreateExit")
|
||||||
.detail("SnapCmd", snapCmd.toString())
|
.detail("SnapCmd", snapCmd.toString())
|
||||||
.detail("UID", snapUID);
|
.detail("UID", snapUID);
|
||||||
|
@ -4567,8 +4582,8 @@ ACTOR Future<bool> checkSafeExclusions(Database cx, vector<AddressExclusion> exc
|
||||||
choose {
|
choose {
|
||||||
when(wait(cx->onProxiesChanged())) {}
|
when(wait(cx->onProxiesChanged())) {}
|
||||||
when(ExclusionSafetyCheckReply _ddCheck =
|
when(ExclusionSafetyCheckReply _ddCheck =
|
||||||
wait(basicLoadBalance(cx->getMasterProxies(false), &MasterProxyInterface::exclusionSafetyCheckReq,
|
wait(basicLoadBalance(cx->getCommitProxies(false),
|
||||||
req, cx->taskID))) {
|
&CommitProxyInterface::exclusionSafetyCheckReq, req, cx->taskID))) {
|
||||||
ddCheck = _ddCheck.safe;
|
ddCheck = _ddCheck.safe;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
#include "flow/flow.h"
|
#include "flow/flow.h"
|
||||||
#include "flow/TDMetric.actor.h"
|
#include "flow/TDMetric.actor.h"
|
||||||
#include "fdbclient/FDBTypes.h"
|
#include "fdbclient/FDBTypes.h"
|
||||||
#include "fdbclient/MasterProxyInterface.h"
|
#include "fdbclient/CommitProxyInterface.h"
|
||||||
#include "fdbclient/FDBOptions.g.h"
|
#include "fdbclient/FDBOptions.g.h"
|
||||||
#include "fdbclient/CoordinationInterface.h"
|
#include "fdbclient/CoordinationInterface.h"
|
||||||
#include "fdbclient/ClusterInterface.h"
|
#include "fdbclient/ClusterInterface.h"
|
||||||
|
|
|
@ -1338,7 +1338,7 @@ Future< Standalone<RangeResultRef> > ReadYourWritesTransaction::getRange(
|
||||||
if(begin.getKey() > maxKey || end.getKey() > maxKey)
|
if(begin.getKey() > maxKey || end.getKey() > maxKey)
|
||||||
return key_outside_legal_range();
|
return key_outside_legal_range();
|
||||||
|
|
||||||
//This optimization prevents NULL operations from being added to the conflict range
|
//This optimization prevents nullptr operations from being added to the conflict range
|
||||||
if( limits.isReached() ) {
|
if( limits.isReached() ) {
|
||||||
TEST(true); // RYW range read limit 0
|
TEST(true); // RYW range read limit 0
|
||||||
return Standalone<RangeResultRef>();
|
return Standalone<RangeResultRef>();
|
||||||
|
@ -2053,9 +2053,6 @@ void ReadYourWritesTransaction::setOptionImpl( FDBTransactionOptions::Option opt
|
||||||
case FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES:
|
case FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES:
|
||||||
validateOptionValue(value, false);
|
validateOptionValue(value, false);
|
||||||
options.specialKeySpaceChangeConfiguration = true;
|
options.specialKeySpaceChangeConfiguration = true;
|
||||||
// By default, it allows to read system keys
|
|
||||||
// More options will be implicitly enabled if needed when doing set or clear
|
|
||||||
options.readSystemKeys = true;
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -47,7 +47,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
|
||||||
"storage",
|
"storage",
|
||||||
"transaction",
|
"transaction",
|
||||||
"resolution",
|
"resolution",
|
||||||
"proxy",
|
"commit_proxy",
|
||||||
"grv_proxy",
|
"grv_proxy",
|
||||||
"master",
|
"master",
|
||||||
"test",
|
"test",
|
||||||
|
@ -84,7 +84,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
|
||||||
"role":{
|
"role":{
|
||||||
"$enum":[
|
"$enum":[
|
||||||
"master",
|
"master",
|
||||||
"proxy",
|
"commit_proxy",
|
||||||
"grv_proxy",
|
"grv_proxy",
|
||||||
"log",
|
"log",
|
||||||
"storage",
|
"storage",
|
||||||
|
@ -278,15 +278,20 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
|
||||||
"run_loop_busy":0.2
|
"run_loop_busy":0.2
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"old_logs":[
|
|
||||||
{
|
|
||||||
"logs":[
|
"logs":[
|
||||||
|
{
|
||||||
|
"log_interfaces":[
|
||||||
{
|
{
|
||||||
"id":"7f8d623d0cb9966e",
|
"id":"7f8d623d0cb9966e",
|
||||||
"healthy":true,
|
"healthy":true,
|
||||||
"address":"1.2.3.4:1234"
|
"address":"1.2.3.4:1234"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
"epoch":1,
|
||||||
|
"current":false,
|
||||||
|
"begin_version":23,
|
||||||
|
"end_version":112315141,
|
||||||
|
"possibly_losing_data":true,
|
||||||
"log_replication_factor":3,
|
"log_replication_factor":3,
|
||||||
"log_write_anti_quorum":0,
|
"log_write_anti_quorum":0,
|
||||||
"log_fault_tolerance":2,
|
"log_fault_tolerance":2,
|
||||||
|
@ -486,7 +491,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
|
||||||
R"statusSchema(
|
R"statusSchema(
|
||||||
"recovery_state":{
|
"recovery_state":{
|
||||||
"required_resolvers":1,
|
"required_resolvers":1,
|
||||||
"required_proxies":1,
|
"required_commit_proxies":1,
|
||||||
"required_grv_proxies":1,
|
"required_grv_proxies":1,
|
||||||
"name":{
|
"name":{
|
||||||
"$enum":[
|
"$enum":[
|
||||||
|
@ -675,11 +680,11 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
|
||||||
"address":"10.0.4.1"
|
"address":"10.0.4.1"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"auto_proxies":3,
|
"auto_commit_proxies":3,
|
||||||
"auto_grv_proxies":1,
|
"auto_grv_proxies":1,
|
||||||
"auto_resolvers":1,
|
"auto_resolvers":1,
|
||||||
"auto_logs":3,
|
"auto_logs":3,
|
||||||
"proxies":5,
|
"commit_proxies":5,
|
||||||
"grv_proxies":1,
|
"grv_proxies":1,
|
||||||
"backup_worker_enabled":1
|
"backup_worker_enabled":1
|
||||||
},
|
},
|
||||||
|
@ -879,11 +884,11 @@ const KeyRef JSONSchemas::clusterConfigurationSchema = LiteralStringRef(R"config
|
||||||
"ssd-2",
|
"ssd-2",
|
||||||
"memory"
|
"memory"
|
||||||
]},
|
]},
|
||||||
"auto_proxies":3,
|
"auto_commit_proxies":3,
|
||||||
"auto_grv_proxies":1,
|
"auto_grv_proxies":1,
|
||||||
"auto_resolvers":1,
|
"auto_resolvers":1,
|
||||||
"auto_logs":3,
|
"auto_logs":3,
|
||||||
"proxies":5
|
"commit_proxies":5
|
||||||
"grv_proxies":1
|
"grv_proxies":1
|
||||||
})configSchema");
|
})configSchema");
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,9 @@ std::unordered_map<SpecialKeySpace::MODULE, KeyRange> SpecialKeySpace::moduleToB
|
||||||
KeyRangeRef(LiteralStringRef("\xff\xff/metrics/"), LiteralStringRef("\xff\xff/metrics0")) },
|
KeyRangeRef(LiteralStringRef("\xff\xff/metrics/"), LiteralStringRef("\xff\xff/metrics0")) },
|
||||||
{ SpecialKeySpace::MODULE::MANAGEMENT,
|
{ SpecialKeySpace::MODULE::MANAGEMENT,
|
||||||
KeyRangeRef(LiteralStringRef("\xff\xff/management/"), LiteralStringRef("\xff\xff/management0")) },
|
KeyRangeRef(LiteralStringRef("\xff\xff/management/"), LiteralStringRef("\xff\xff/management0")) },
|
||||||
{ SpecialKeySpace::MODULE::ERRORMSG, singleKeyRange(LiteralStringRef("\xff\xff/error_message")) }
|
{ SpecialKeySpace::MODULE::ERRORMSG, singleKeyRange(LiteralStringRef("\xff\xff/error_message")) },
|
||||||
|
{ SpecialKeySpace::MODULE::CONFIGURATION,
|
||||||
|
KeyRangeRef(LiteralStringRef("\xff\xff/configuration/"), LiteralStringRef("\xff\xff/configuration0")) }
|
||||||
};
|
};
|
||||||
|
|
||||||
std::unordered_map<std::string, KeyRange> SpecialKeySpace::managementApiCommandToRange = {
|
std::unordered_map<std::string, KeyRange> SpecialKeySpace::managementApiCommandToRange = {
|
||||||
|
@ -48,6 +50,9 @@ std::unordered_map<std::string, KeyRange> SpecialKeySpace::managementApiCommandT
|
||||||
|
|
||||||
std::set<std::string> SpecialKeySpace::options = { "excluded/force", "failed/force" };
|
std::set<std::string> SpecialKeySpace::options = { "excluded/force", "failed/force" };
|
||||||
|
|
||||||
|
Standalone<RangeResultRef> rywGetRange(ReadYourWritesTransaction* ryw, const KeyRangeRef& kr,
|
||||||
|
const Standalone<RangeResultRef>& res);
|
||||||
|
|
||||||
// This function will move the given KeySelector as far as possible to the standard form:
|
// This function will move the given KeySelector as far as possible to the standard form:
|
||||||
// orEqual == false && offset == 1 (Standard form)
|
// orEqual == false && offset == 1 (Standard form)
|
||||||
// If the corresponding key is not in the underlying key range, it will move over the range
|
// If the corresponding key is not in the underlying key range, it will move over the range
|
||||||
|
@ -456,6 +461,24 @@ Future<Void> SpecialKeySpace::commit(ReadYourWritesTransaction* ryw) {
|
||||||
return commitActor(this, ryw);
|
return commitActor(this, ryw);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SKSCTestImpl::SKSCTestImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {}
|
||||||
|
|
||||||
|
Future<Standalone<RangeResultRef>> SKSCTestImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const {
|
||||||
|
ASSERT(range.contains(kr));
|
||||||
|
auto resultFuture = ryw->getRange(kr, CLIENT_KNOBS->TOO_MANY);
|
||||||
|
// all keys are written to RYW, since GRV is set, the read should happen locally
|
||||||
|
ASSERT(resultFuture.isReady());
|
||||||
|
auto result = resultFuture.getValue();
|
||||||
|
ASSERT(!result.more && result.size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
|
auto kvs = resultFuture.getValue();
|
||||||
|
return rywGetRange(ryw, kr, kvs);
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<Optional<std::string>> SKSCTestImpl::commit(ReadYourWritesTransaction* ryw) {
|
||||||
|
ASSERT(false);
|
||||||
|
return Optional<std::string>();
|
||||||
|
}
|
||||||
|
|
||||||
ReadConflictRangeImpl::ReadConflictRangeImpl(KeyRangeRef kr) : SpecialKeyRangeReadImpl(kr) {}
|
ReadConflictRangeImpl::ReadConflictRangeImpl(KeyRangeRef kr) : SpecialKeyRangeReadImpl(kr) {}
|
||||||
|
|
||||||
ACTOR static Future<Standalone<RangeResultRef>> getReadConflictRangeImpl(ReadYourWritesTransaction* ryw, KeyRange kr) {
|
ACTOR static Future<Standalone<RangeResultRef>> getReadConflictRangeImpl(ReadYourWritesTransaction* ryw, KeyRange kr) {
|
||||||
|
@ -570,86 +593,82 @@ void ManagementCommandsOptionsImpl::clear(ReadYourWritesTransaction* ryw, const
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Key ManagementCommandsOptionsImpl::decode(const KeyRef& key) const {
|
|
||||||
// Should never be used
|
|
||||||
ASSERT(false);
|
|
||||||
return key;
|
|
||||||
}
|
|
||||||
|
|
||||||
Key ManagementCommandsOptionsImpl::encode(const KeyRef& key) const {
|
|
||||||
// Should never be used
|
|
||||||
ASSERT(false);
|
|
||||||
return key;
|
|
||||||
}
|
|
||||||
|
|
||||||
Future<Optional<std::string>> ManagementCommandsOptionsImpl::commit(ReadYourWritesTransaction* ryw) {
|
Future<Optional<std::string>> ManagementCommandsOptionsImpl::commit(ReadYourWritesTransaction* ryw) {
|
||||||
// Nothing to do, keys should be used by other impls' commit callback
|
// Nothing to do, keys should be used by other impls' commit callback
|
||||||
return Optional<std::string>();
|
return Optional<std::string>();
|
||||||
}
|
}
|
||||||
|
|
||||||
// read from rwModule
|
Standalone<RangeResultRef> rywGetRange(ReadYourWritesTransaction* ryw, const KeyRangeRef& kr,
|
||||||
ACTOR Future<Standalone<RangeResultRef>> rwModuleGetRangeActor(ReadYourWritesTransaction* ryw,
|
const Standalone<RangeResultRef>& res) {
|
||||||
const SpecialKeyRangeRWImpl* impl, KeyRangeRef kr) {
|
// "res" is the read result regardless of your writes, if ryw disabled, return immediately
|
||||||
state KeyRangeRef range = impl->getKeyRange();
|
if (ryw->readYourWritesDisabled()) return res;
|
||||||
Standalone<RangeResultRef> resultWithoutPrefix =
|
// If ryw enabled, we update it with writes from the transaction
|
||||||
wait(ryw->getRange(ryw->getDatabase()->specialKeySpace->decode(kr), CLIENT_KNOBS->TOO_MANY));
|
|
||||||
ASSERT(!resultWithoutPrefix.more && resultWithoutPrefix.size() < CLIENT_KNOBS->TOO_MANY);
|
|
||||||
Standalone<RangeResultRef> result;
|
Standalone<RangeResultRef> result;
|
||||||
if (ryw->readYourWritesDisabled()) {
|
|
||||||
for (const KeyValueRef& kv : resultWithoutPrefix)
|
|
||||||
result.push_back_deep(result.arena(), KeyValueRef(impl->encode(kv.key), kv.value));
|
|
||||||
} else {
|
|
||||||
RangeMap<Key, std::pair<bool, Optional<Value>>, KeyRangeRef>::Ranges ranges =
|
RangeMap<Key, std::pair<bool, Optional<Value>>, KeyRangeRef>::Ranges ranges =
|
||||||
ryw->getSpecialKeySpaceWriteMap().containedRanges(range);
|
ryw->getSpecialKeySpaceWriteMap().containedRanges(kr);
|
||||||
RangeMap<Key, std::pair<bool, Optional<Value>>, KeyRangeRef>::iterator iter = ranges.begin();
|
RangeMap<Key, std::pair<bool, Optional<Value>>, KeyRangeRef>::iterator iter = ranges.begin();
|
||||||
int index = 0;
|
auto iter2 = res.begin();
|
||||||
while (iter != ranges.end()) {
|
result.arena().dependsOn(res.arena());
|
||||||
// add all previous entries into result
|
while (iter != ranges.end() || iter2 != res.end()) {
|
||||||
Key rk = impl->encode(resultWithoutPrefix[index].key);
|
if (iter == ranges.end()) {
|
||||||
while (index < resultWithoutPrefix.size() && rk < iter->begin()) {
|
result.push_back(result.arena(), KeyValueRef(iter2->key, iter2->value));
|
||||||
result.push_back_deep(result.arena(), KeyValueRef(rk, resultWithoutPrefix[index].value));
|
++iter2;
|
||||||
++index;
|
} else if (iter2 == res.end()) {
|
||||||
}
|
// insert if it is a set entry
|
||||||
std::pair<bool, Optional<Value>> entry = iter->value();
|
std::pair<bool, Optional<Value>> entry = iter->value();
|
||||||
if (entry.first) {
|
if (entry.first && entry.second.present()) {
|
||||||
// add the writen entries if exists
|
|
||||||
if (entry.second.present()) {
|
|
||||||
result.push_back_deep(result.arena(), KeyValueRef(iter->begin(), entry.second.get()));
|
result.push_back_deep(result.arena(), KeyValueRef(iter->begin(), entry.second.get()));
|
||||||
}
|
}
|
||||||
// move index to skip all entries in the iter->range
|
++iter;
|
||||||
while (index < resultWithoutPrefix.size() &&
|
} else if (iter->range().contains(iter2->key)) {
|
||||||
iter->range().contains(impl->encode(resultWithoutPrefix[index].key)))
|
std::pair<bool, Optional<Value>> entry = iter->value();
|
||||||
++index;
|
// if this is a valid range either for set or clear, move iter2 outside the range
|
||||||
|
if (entry.first) {
|
||||||
|
// insert if this is a set entry
|
||||||
|
if (entry.second.present())
|
||||||
|
result.push_back_deep(result.arena(), KeyValueRef(iter->begin(), entry.second.get()));
|
||||||
|
// move iter2 outside the range
|
||||||
|
while (iter2 != res.end() && iter->range().contains(iter2->key)) ++iter2;
|
||||||
|
}
|
||||||
|
++iter;
|
||||||
|
} else if (iter->begin() > iter2->key) {
|
||||||
|
result.push_back(result.arena(), KeyValueRef(iter2->key, iter2->value));
|
||||||
|
++iter2;
|
||||||
|
} else if (iter->end() <= iter2->key) {
|
||||||
|
// insert if it is a set entry
|
||||||
|
std::pair<bool, Optional<Value>> entry = iter->value();
|
||||||
|
if (entry.first && entry.second.present()) {
|
||||||
|
result.push_back_deep(result.arena(), KeyValueRef(iter->begin(), entry.second.get()));
|
||||||
}
|
}
|
||||||
++iter;
|
++iter;
|
||||||
}
|
}
|
||||||
// add all remaining entries into result
|
|
||||||
while (index < resultWithoutPrefix.size()) {
|
|
||||||
const KeyValueRef& kv = resultWithoutPrefix[index];
|
|
||||||
result.push_back_deep(result.arena(), KeyValueRef(impl->encode(kv.key), kv.value));
|
|
||||||
++index;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// read from those readwrite modules in which special keys have one-to-one mapping with real persisted keys
|
||||||
|
ACTOR Future<Standalone<RangeResultRef>> rwModuleWithMappingGetRangeActor(ReadYourWritesTransaction* ryw,
|
||||||
|
const SpecialKeyRangeRWImpl* impl,
|
||||||
|
KeyRangeRef kr) {
|
||||||
|
Standalone<RangeResultRef> resultWithoutPrefix =
|
||||||
|
wait(ryw->getTransaction().getRange(ryw->getDatabase()->specialKeySpace->decode(kr), CLIENT_KNOBS->TOO_MANY));
|
||||||
|
ASSERT(!resultWithoutPrefix.more && resultWithoutPrefix.size() < CLIENT_KNOBS->TOO_MANY);
|
||||||
|
Standalone<RangeResultRef> result;
|
||||||
|
for (const KeyValueRef& kv : resultWithoutPrefix)
|
||||||
|
result.push_back_deep(result.arena(), KeyValueRef(impl->encode(kv.key), kv.value));
|
||||||
|
return rywGetRange(ryw, kr, result);
|
||||||
|
}
|
||||||
|
|
||||||
ExcludeServersRangeImpl::ExcludeServersRangeImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {}
|
ExcludeServersRangeImpl::ExcludeServersRangeImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {}
|
||||||
|
|
||||||
Future<Standalone<RangeResultRef>> ExcludeServersRangeImpl::getRange(ReadYourWritesTransaction* ryw,
|
Future<Standalone<RangeResultRef>> ExcludeServersRangeImpl::getRange(ReadYourWritesTransaction* ryw,
|
||||||
KeyRangeRef kr) const {
|
KeyRangeRef kr) const {
|
||||||
return rwModuleGetRangeActor(ryw, this, kr);
|
return rwModuleWithMappingGetRangeActor(ryw, this, kr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ExcludeServersRangeImpl::set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) {
|
void ExcludeServersRangeImpl::set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) {
|
||||||
ryw->getSpecialKeySpaceWriteMap().insert(key, std::make_pair(true, Optional<Value>(value)));
|
// ignore value
|
||||||
}
|
ryw->getSpecialKeySpaceWriteMap().insert(key, std::make_pair(true, Optional<Value>(ValueRef())));
|
||||||
|
|
||||||
void ExcludeServersRangeImpl::clear(ReadYourWritesTransaction* ryw, const KeyRef& key) {
|
|
||||||
ryw->getSpecialKeySpaceWriteMap().insert(key, std::make_pair(true, Optional<Value>()));
|
|
||||||
}
|
|
||||||
|
|
||||||
void ExcludeServersRangeImpl::clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) {
|
|
||||||
ryw->getSpecialKeySpaceWriteMap().insert(range, std::make_pair(true, Optional<Value>()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Key ExcludeServersRangeImpl::decode(const KeyRef& key) const {
|
Key ExcludeServersRangeImpl::decode(const KeyRef& key) const {
|
||||||
|
@ -671,7 +690,7 @@ bool parseNetWorkAddrFromKeys(ReadYourWritesTransaction* ryw, bool failed, std::
|
||||||
while (iter != ranges.end()) {
|
while (iter != ranges.end()) {
|
||||||
auto entry = iter->value();
|
auto entry = iter->value();
|
||||||
// only check for exclude(set) operation, include(clear) are not checked
|
// only check for exclude(set) operation, include(clear) are not checked
|
||||||
TraceEvent(SevInfo, "ParseNetworkAddress")
|
TraceEvent(SevDebug, "ParseNetworkAddress")
|
||||||
.detail("Valid", entry.first)
|
.detail("Valid", entry.first)
|
||||||
.detail("Set", entry.second.present())
|
.detail("Set", entry.second.present())
|
||||||
.detail("Key", iter->begin().toString());
|
.detail("Key", iter->begin().toString());
|
||||||
|
@ -810,7 +829,6 @@ ACTOR Future<bool> checkExclusion(Database db, std::vector<AddressExclusion>* ad
|
||||||
}
|
}
|
||||||
|
|
||||||
void includeServers(ReadYourWritesTransaction* ryw) {
|
void includeServers(ReadYourWritesTransaction* ryw) {
|
||||||
ryw->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
||||||
ryw->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
ryw->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||||
ryw->setOption(FDBTransactionOptions::LOCK_AWARE);
|
ryw->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||||
ryw->setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
ryw->setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
||||||
|
@ -874,19 +892,12 @@ FailedServersRangeImpl::FailedServersRangeImpl(KeyRangeRef kr) : SpecialKeyRange
|
||||||
|
|
||||||
Future<Standalone<RangeResultRef>> FailedServersRangeImpl::getRange(ReadYourWritesTransaction* ryw,
|
Future<Standalone<RangeResultRef>> FailedServersRangeImpl::getRange(ReadYourWritesTransaction* ryw,
|
||||||
KeyRangeRef kr) const {
|
KeyRangeRef kr) const {
|
||||||
return rwModuleGetRangeActor(ryw, this, kr);
|
return rwModuleWithMappingGetRangeActor(ryw, this, kr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void FailedServersRangeImpl::set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) {
|
void FailedServersRangeImpl::set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) {
|
||||||
ryw->getSpecialKeySpaceWriteMap().insert(key, std::make_pair(true, Optional<Value>(value)));
|
// ignore value
|
||||||
}
|
ryw->getSpecialKeySpaceWriteMap().insert(key, std::make_pair(true, Optional<Value>(ValueRef())));
|
||||||
|
|
||||||
void FailedServersRangeImpl::clear(ReadYourWritesTransaction* ryw, const KeyRef& key) {
|
|
||||||
ryw->getSpecialKeySpaceWriteMap().insert(key, std::make_pair(true, Optional<Value>()));
|
|
||||||
}
|
|
||||||
|
|
||||||
void FailedServersRangeImpl::clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) {
|
|
||||||
ryw->getSpecialKeySpaceWriteMap().insert(range, std::make_pair(true, Optional<Value>()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Key FailedServersRangeImpl::decode(const KeyRef& key) const {
|
Key FailedServersRangeImpl::decode(const KeyRef& key) const {
|
||||||
|
@ -943,8 +954,14 @@ ACTOR Future<Standalone<RangeResultRef>> ExclusionInProgressActor(ReadYourWrites
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sort and remove :tls
|
||||||
|
std::set<std::string> inProgressAddresses;
|
||||||
for (auto const& address : inProgressExclusion) {
|
for (auto const& address : inProgressExclusion) {
|
||||||
Key addrKey = prefix.withSuffix(address.toString());
|
inProgressAddresses.insert(formatIpPort(address.ip, address.port));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto const& address : inProgressAddresses) {
|
||||||
|
Key addrKey = prefix.withSuffix(address);
|
||||||
if (kr.contains(addrKey)) {
|
if (kr.contains(addrKey)) {
|
||||||
result.push_back(result.arena(), KeyValueRef(addrKey, ValueRef()));
|
result.push_back(result.arena(), KeyValueRef(addrKey, ValueRef()));
|
||||||
result.arena().dependsOn(addrKey.arena());
|
result.arena().dependsOn(addrKey.arena());
|
||||||
|
@ -959,3 +976,148 @@ Future<Standalone<RangeResultRef>> ExclusionInProgressRangeImpl::getRange(ReadYo
|
||||||
KeyRangeRef kr) const {
|
KeyRangeRef kr) const {
|
||||||
return ExclusionInProgressActor(ryw, getKeyRange().begin, kr);
|
return ExclusionInProgressActor(ryw, getKeyRange().begin, kr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ACTOR Future<Standalone<RangeResultRef>> getProcessClassActor(ReadYourWritesTransaction* ryw, KeyRef prefix,
|
||||||
|
KeyRangeRef kr) {
|
||||||
|
vector<ProcessData> _workers = wait(getWorkers(&ryw->getTransaction()));
|
||||||
|
auto workers = _workers; // strip const
|
||||||
|
// Note : the sort by string is anti intuition, ex. 1.1.1.1:11 < 1.1.1.1:5
|
||||||
|
std::sort(workers.begin(), workers.end(), [](const ProcessData& lhs, const ProcessData& rhs) {
|
||||||
|
return formatIpPort(lhs.address.ip, lhs.address.port) < formatIpPort(rhs.address.ip, rhs.address.port);
|
||||||
|
});
|
||||||
|
Standalone<RangeResultRef> result;
|
||||||
|
for (auto& w : workers) {
|
||||||
|
// exclude :tls in keys even the network addresss is TLS
|
||||||
|
KeyRef k(prefix.withSuffix(formatIpPort(w.address.ip, w.address.port), result.arena()));
|
||||||
|
if (kr.contains(k)) {
|
||||||
|
ValueRef v(result.arena(), w.processClass.toString());
|
||||||
|
result.push_back(result.arena(), KeyValueRef(k, v));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rywGetRange(ryw, kr, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR Future<Optional<std::string>> processClassCommitActor(ReadYourWritesTransaction* ryw, KeyRangeRef range) {
|
||||||
|
// enable related options
|
||||||
|
ryw->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||||
|
ryw->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||||
|
ryw->setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
|
||||||
|
vector<ProcessData> workers = wait(
|
||||||
|
getWorkers(&ryw->getTransaction())); // make sure we use the Transaction object to avoid used_during_commit()
|
||||||
|
|
||||||
|
auto ranges = ryw->getSpecialKeySpaceWriteMap().containedRanges(range);
|
||||||
|
auto iter = ranges.begin();
|
||||||
|
while (iter != ranges.end()) {
|
||||||
|
auto entry = iter->value();
|
||||||
|
// only loop through (set) operation, (clear) not exist
|
||||||
|
if (entry.first && entry.second.present()) {
|
||||||
|
// parse network address
|
||||||
|
Key address = iter->begin().removePrefix(range.begin);
|
||||||
|
AddressExclusion addr = AddressExclusion::parse(address);
|
||||||
|
// parse class type
|
||||||
|
ValueRef processClassType = entry.second.get();
|
||||||
|
ProcessClass processClass(processClassType.toString(), ProcessClass::DBSource);
|
||||||
|
// make sure we use the underlying Transaction object to avoid used_during_commit()
|
||||||
|
bool foundChange = false;
|
||||||
|
for (int i = 0; i < workers.size(); i++) {
|
||||||
|
if (addr.excludes(workers[i].address)) {
|
||||||
|
if (processClass.classType() != ProcessClass::InvalidClass)
|
||||||
|
ryw->getTransaction().set(processClassKeyFor(workers[i].locality.processId().get()),
|
||||||
|
processClassValue(processClass));
|
||||||
|
else
|
||||||
|
ryw->getTransaction().clear(processClassKeyFor(workers[i].locality.processId().get()));
|
||||||
|
foundChange = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (foundChange)
|
||||||
|
ryw->getTransaction().set(processClassChangeKey, deterministicRandom()->randomUniqueID().toString());
|
||||||
|
}
|
||||||
|
++iter;
|
||||||
|
}
|
||||||
|
return Optional<std::string>();
|
||||||
|
}
|
||||||
|
|
||||||
|
ProcessClassRangeImpl::ProcessClassRangeImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {}
|
||||||
|
|
||||||
|
Future<Standalone<RangeResultRef>> ProcessClassRangeImpl::getRange(ReadYourWritesTransaction* ryw,
|
||||||
|
KeyRangeRef kr) const {
|
||||||
|
return getProcessClassActor(ryw, getKeyRange().begin, kr);
|
||||||
|
}
|
||||||
|
|
||||||
|
Future<Optional<std::string>> ProcessClassRangeImpl::commit(ReadYourWritesTransaction* ryw) {
|
||||||
|
// Validate network address and process class type
|
||||||
|
Optional<std::string> errorMsg;
|
||||||
|
auto ranges = ryw->getSpecialKeySpaceWriteMap().containedRanges(getKeyRange());
|
||||||
|
auto iter = ranges.begin();
|
||||||
|
while (iter != ranges.end()) {
|
||||||
|
auto entry = iter->value();
|
||||||
|
// only check for setclass(set) operation, (clear) are forbidden thus not exist
|
||||||
|
if (entry.first && entry.second.present()) {
|
||||||
|
// validate network address
|
||||||
|
Key address = iter->begin().removePrefix(range.begin);
|
||||||
|
AddressExclusion addr = AddressExclusion::parse(address);
|
||||||
|
if (!addr.isValid()) {
|
||||||
|
std::string error = "ERROR: \'" + address.toString() + "\' is not a valid network endpoint address\n";
|
||||||
|
if (address.toString().find(":tls") != std::string::npos)
|
||||||
|
error += " Do not include the `:tls' suffix when naming a process\n";
|
||||||
|
errorMsg = ManagementAPIError::toJsonString(false, "setclass", error);
|
||||||
|
return errorMsg;
|
||||||
|
}
|
||||||
|
// validate class type
|
||||||
|
ValueRef processClassType = entry.second.get();
|
||||||
|
ProcessClass processClass(processClassType.toString(), ProcessClass::DBSource);
|
||||||
|
if (processClass.classType() == ProcessClass::InvalidClass &&
|
||||||
|
processClassType != LiteralStringRef("default")) {
|
||||||
|
std::string error = "ERROR: \'" + processClassType.toString() + "\' is not a valid process class\n";
|
||||||
|
errorMsg = ManagementAPIError::toJsonString(false, "setclass", error);
|
||||||
|
return errorMsg;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
++iter;
|
||||||
|
}
|
||||||
|
return processClassCommitActor(ryw, getKeyRange());
|
||||||
|
}
|
||||||
|
|
||||||
|
void throwNotAllowedError(ReadYourWritesTransaction* ryw) {
|
||||||
|
auto msg = ManagementAPIError::toJsonString(false, "setclass",
|
||||||
|
"Clear operation is meaningless thus forbidden for setclass");
|
||||||
|
ryw->setSpecialKeySpaceErrorMsg(msg);
|
||||||
|
throw special_keys_api_failure();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ProcessClassRangeImpl::clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) {
|
||||||
|
return throwNotAllowedError(ryw);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ProcessClassRangeImpl::clear(ReadYourWritesTransaction* ryw, const KeyRef& key) {
|
||||||
|
return throwNotAllowedError(ryw);
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR Future<Standalone<RangeResultRef>> getProcessClassSourceActor(ReadYourWritesTransaction* ryw, KeyRef prefix,
|
||||||
|
KeyRangeRef kr) {
|
||||||
|
vector<ProcessData> _workers = wait(getWorkers(&ryw->getTransaction()));
|
||||||
|
auto workers = _workers; // strip const
|
||||||
|
// Note : the sort by string is anti intuition, ex. 1.1.1.1:11 < 1.1.1.1:5
|
||||||
|
std::sort(workers.begin(), workers.end(), [](const ProcessData& lhs, const ProcessData& rhs) {
|
||||||
|
return formatIpPort(lhs.address.ip, lhs.address.port) < formatIpPort(rhs.address.ip, rhs.address.port);
|
||||||
|
});
|
||||||
|
Standalone<RangeResultRef> result;
|
||||||
|
for (auto& w : workers) {
|
||||||
|
// exclude :tls in keys even the network addresss is TLS
|
||||||
|
Key k(prefix.withSuffix(formatIpPort(w.address.ip, w.address.port)));
|
||||||
|
if (kr.contains(k)) {
|
||||||
|
Value v(w.processClass.sourceString());
|
||||||
|
result.push_back(result.arena(), KeyValueRef(k, v));
|
||||||
|
result.arena().dependsOn(k.arena());
|
||||||
|
result.arena().dependsOn(v.arena());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
ProcessClassSourceRangeImpl::ProcessClassSourceRangeImpl(KeyRangeRef kr) : SpecialKeyRangeReadImpl(kr) {}
|
||||||
|
|
||||||
|
Future<Standalone<RangeResultRef>> ProcessClassSourceRangeImpl::getRange(ReadYourWritesTransaction* ryw,
|
||||||
|
KeyRangeRef kr) const {
|
||||||
|
return getProcessClassSourceActor(ryw, getKeyRange().begin, kr);
|
||||||
|
}
|
|
@ -67,15 +67,29 @@ private:
|
||||||
|
|
||||||
class SpecialKeyRangeRWImpl : public SpecialKeyRangeReadImpl {
|
class SpecialKeyRangeRWImpl : public SpecialKeyRangeReadImpl {
|
||||||
public:
|
public:
|
||||||
virtual void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) = 0;
|
virtual void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) {
|
||||||
virtual void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) = 0;
|
ryw->getSpecialKeySpaceWriteMap().insert(key, std::make_pair(true, Optional<Value>(value)));
|
||||||
virtual void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) = 0;
|
}
|
||||||
|
virtual void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) {
|
||||||
|
ryw->getSpecialKeySpaceWriteMap().insert(range, std::make_pair(true, Optional<Value>()));
|
||||||
|
}
|
||||||
|
virtual void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) {
|
||||||
|
ryw->getSpecialKeySpaceWriteMap().insert(key, std::make_pair(true, Optional<Value>()));
|
||||||
|
}
|
||||||
virtual Future<Optional<std::string>> commit(
|
virtual Future<Optional<std::string>> commit(
|
||||||
ReadYourWritesTransaction* ryw) = 0; // all delayed async operations of writes in special-key-space
|
ReadYourWritesTransaction* ryw) = 0; // all delayed async operations of writes in special-key-space
|
||||||
// Given the special key to write, return the real key that needs to be modified
|
// Given the special key to write, return the real key that needs to be modified
|
||||||
virtual Key decode(const KeyRef& key) const = 0;
|
virtual Key decode(const KeyRef& key) const {
|
||||||
|
// Default implementation should never be used
|
||||||
|
ASSERT(false);
|
||||||
|
return key;
|
||||||
|
}
|
||||||
// Given the read key, return the corresponding special key
|
// Given the read key, return the corresponding special key
|
||||||
virtual Key encode(const KeyRef& key) const = 0;
|
virtual Key encode(const KeyRef& key) const {
|
||||||
|
// Default implementation should never be used
|
||||||
|
ASSERT(false);
|
||||||
|
return key;
|
||||||
|
};
|
||||||
|
|
||||||
explicit SpecialKeyRangeRWImpl(KeyRangeRef kr) : SpecialKeyRangeReadImpl(kr) {}
|
explicit SpecialKeyRangeRWImpl(KeyRangeRef kr) : SpecialKeyRangeReadImpl(kr) {}
|
||||||
|
|
||||||
|
@ -125,6 +139,7 @@ class SpecialKeySpace {
|
||||||
public:
|
public:
|
||||||
enum class MODULE {
|
enum class MODULE {
|
||||||
CLUSTERFILEPATH,
|
CLUSTERFILEPATH,
|
||||||
|
CONFIGURATION, // Configuration of the cluster
|
||||||
CONNECTIONSTRING,
|
CONNECTIONSTRING,
|
||||||
ERRORMSG, // A single key space contains a json string which describes the last error in special-key-space
|
ERRORMSG, // A single key space contains a json string which describes the last error in special-key-space
|
||||||
MANAGEMENT, // Management-API
|
MANAGEMENT, // Management-API
|
||||||
|
@ -201,6 +216,14 @@ private:
|
||||||
void modulesBoundaryInit();
|
void modulesBoundaryInit();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Used for SpecialKeySpaceCorrectnessWorkload
|
||||||
|
class SKSCTestImpl : public SpecialKeyRangeRWImpl {
|
||||||
|
public:
|
||||||
|
explicit SKSCTestImpl(KeyRangeRef kr);
|
||||||
|
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
|
||||||
|
Future<Optional<std::string>> commit(ReadYourWritesTransaction* ryw) override;
|
||||||
|
};
|
||||||
|
|
||||||
// Use special key prefix "\xff\xff/transaction/conflicting_keys/<some_key>",
|
// Use special key prefix "\xff\xff/transaction/conflicting_keys/<some_key>",
|
||||||
// to retrieve keys which caused latest not_committed(conflicting with another transaction) error.
|
// to retrieve keys which caused latest not_committed(conflicting with another transaction) error.
|
||||||
// The returned key value pairs are interpretted as :
|
// The returned key value pairs are interpretted as :
|
||||||
|
@ -238,8 +261,6 @@ public:
|
||||||
void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) override;
|
void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) override;
|
||||||
void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) override;
|
void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) override;
|
||||||
void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) override;
|
void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) override;
|
||||||
Key decode(const KeyRef& key) const override;
|
|
||||||
Key encode(const KeyRef& key) const override;
|
|
||||||
Future<Optional<std::string>> commit(ReadYourWritesTransaction* ryw) override;
|
Future<Optional<std::string>> commit(ReadYourWritesTransaction* ryw) override;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -248,8 +269,6 @@ public:
|
||||||
explicit ExcludeServersRangeImpl(KeyRangeRef kr);
|
explicit ExcludeServersRangeImpl(KeyRangeRef kr);
|
||||||
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
|
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
|
||||||
void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) override;
|
void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) override;
|
||||||
void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) override;
|
|
||||||
void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) override;
|
|
||||||
Key decode(const KeyRef& key) const override;
|
Key decode(const KeyRef& key) const override;
|
||||||
Key encode(const KeyRef& key) const override;
|
Key encode(const KeyRef& key) const override;
|
||||||
Future<Optional<std::string>> commit(ReadYourWritesTransaction* ryw) override;
|
Future<Optional<std::string>> commit(ReadYourWritesTransaction* ryw) override;
|
||||||
|
@ -260,8 +279,6 @@ public:
|
||||||
explicit FailedServersRangeImpl(KeyRangeRef kr);
|
explicit FailedServersRangeImpl(KeyRangeRef kr);
|
||||||
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
|
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
|
||||||
void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) override;
|
void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) override;
|
||||||
void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) override;
|
|
||||||
void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) override;
|
|
||||||
Key decode(const KeyRef& key) const override;
|
Key decode(const KeyRef& key) const override;
|
||||||
Key encode(const KeyRef& key) const override;
|
Key encode(const KeyRef& key) const override;
|
||||||
Future<Optional<std::string>> commit(ReadYourWritesTransaction* ryw) override;
|
Future<Optional<std::string>> commit(ReadYourWritesTransaction* ryw) override;
|
||||||
|
@ -273,5 +290,20 @@ public:
|
||||||
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
|
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class ProcessClassRangeImpl : public SpecialKeyRangeRWImpl {
|
||||||
|
public:
|
||||||
|
explicit ProcessClassRangeImpl(KeyRangeRef kr);
|
||||||
|
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
|
||||||
|
Future<Optional<std::string>> commit(ReadYourWritesTransaction* ryw) override;
|
||||||
|
void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) override;
|
||||||
|
void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) override;
|
||||||
|
};
|
||||||
|
|
||||||
|
class ProcessClassSourceRangeImpl : public SpecialKeyRangeReadImpl {
|
||||||
|
public:
|
||||||
|
explicit ProcessClassSourceRangeImpl(KeyRangeRef kr);
|
||||||
|
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
|
||||||
|
};
|
||||||
|
|
||||||
#include "flow/unactorcompiler.h"
|
#include "flow/unactorcompiler.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -260,10 +260,10 @@ extern const KeyRangeRef logRangesRange;
|
||||||
Key logRangesEncodeKey(KeyRef keyBegin, UID logUid);
|
Key logRangesEncodeKey(KeyRef keyBegin, UID logUid);
|
||||||
|
|
||||||
// Returns the start key and optionally the logRange Uid
|
// Returns the start key and optionally the logRange Uid
|
||||||
KeyRef logRangesDecodeKey(KeyRef key, UID* logUid = NULL);
|
KeyRef logRangesDecodeKey(KeyRef key, UID* logUid = nullptr);
|
||||||
|
|
||||||
// Returns the end key and optionally the key prefix
|
// Returns the end key and optionally the key prefix
|
||||||
Key logRangesDecodeValue(KeyRef keyValue, Key* destKeyPrefix = NULL);
|
Key logRangesDecodeValue(KeyRef keyValue, Key* destKeyPrefix = nullptr);
|
||||||
|
|
||||||
// Returns the encoded key value comprised of the end key and destination prefix
|
// Returns the encoded key value comprised of the end key and destination prefix
|
||||||
Key logRangesEncodeValue(KeyRef keyEnd, KeyRef destPath);
|
Key logRangesEncodeValue(KeyRef keyEnd, KeyRef destPath);
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "fdbclient/TagThrottle.h"
|
#include "fdbclient/TagThrottle.h"
|
||||||
#include "fdbclient/MasterProxyInterface.h"
|
#include "fdbclient/CommitProxyInterface.h"
|
||||||
#include "fdbclient/DatabaseContext.h"
|
#include "fdbclient/DatabaseContext.h"
|
||||||
|
|
||||||
#include "flow/actorcompiler.h" // has to be last include
|
#include "flow/actorcompiler.h" // has to be last include
|
||||||
|
@ -104,7 +104,7 @@ TagThrottleKey TagThrottleKey::fromKey(const KeyRef& key) {
|
||||||
|
|
||||||
TagThrottleValue TagThrottleValue::fromValue(const ValueRef& value) {
|
TagThrottleValue TagThrottleValue::fromValue(const ValueRef& value) {
|
||||||
TagThrottleValue throttleValue;
|
TagThrottleValue throttleValue;
|
||||||
BinaryReader reader(value, IncludeVersion());
|
BinaryReader reader(value, IncludeVersion(ProtocolVersion::withTagThrottleValueReason()));
|
||||||
reader >> throttleValue;
|
reader >> throttleValue;
|
||||||
return throttleValue;
|
return throttleValue;
|
||||||
}
|
}
|
||||||
|
@ -228,7 +228,7 @@ namespace ThrottleApi {
|
||||||
}
|
}
|
||||||
TagThrottleValue throttle(tpsRate, expirationTime.present() ? expirationTime.get() : 0, initialDuration,
|
TagThrottleValue throttle(tpsRate, expirationTime.present() ? expirationTime.get() : 0, initialDuration,
|
||||||
reason.present() ? reason.get() : TagThrottledReason::UNSET);
|
reason.present() ? reason.get() : TagThrottledReason::UNSET);
|
||||||
BinaryWriter wr(IncludeVersion(ProtocolVersion::withTagThrottleValue()));
|
BinaryWriter wr(IncludeVersion(ProtocolVersion::withTagThrottleValueReason()));
|
||||||
wr << throttle;
|
wr << throttle;
|
||||||
state Value value = wr.toValue();
|
state Value value = wr.toValue();
|
||||||
|
|
||||||
|
@ -347,6 +347,7 @@ namespace ThrottleApi {
|
||||||
|
|
||||||
removed = true;
|
removed = true;
|
||||||
tr.clear(tag.key);
|
tr.clear(tag.key);
|
||||||
|
unthrottledTags ++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(manualUnthrottledTags > 0) {
|
if(manualUnthrottledTags > 0) {
|
||||||
|
|
|
@ -1249,6 +1249,6 @@ ACTOR Future<Key> getCompletionKey(TaskCompletionKey *self, Future<Reference<Tas
|
||||||
}
|
}
|
||||||
|
|
||||||
Future<Key> TaskCompletionKey::get(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket) {
|
Future<Key> TaskCompletionKey::get(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket) {
|
||||||
ASSERT(key.present() == (joinFuture.getPtr() == NULL));
|
ASSERT(key.present() == (joinFuture.getPtr() == nullptr));
|
||||||
return key.present() ? key.get() : getCompletionKey(this, joinFuture->joinedFuture(tr, taskBucket));
|
return key.present() ? key.get() : getCompletionKey(this, joinFuture->joinedFuture(tr, taskBucket));
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,12 +84,12 @@ ThreadSafeDatabase::ThreadSafeDatabase(std::string connFilename, int apiVersion)
|
||||||
catch(...) {
|
catch(...) {
|
||||||
new (db) DatabaseContext(unknown_error());
|
new (db) DatabaseContext(unknown_error());
|
||||||
}
|
}
|
||||||
}, NULL);
|
}, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
ThreadSafeDatabase::~ThreadSafeDatabase() {
|
ThreadSafeDatabase::~ThreadSafeDatabase() {
|
||||||
DatabaseContext *db = this->db;
|
DatabaseContext *db = this->db;
|
||||||
onMainThreadVoid( [db](){ db->delref(); }, NULL );
|
onMainThreadVoid( [db](){ db->delref(); }, nullptr );
|
||||||
}
|
}
|
||||||
|
|
||||||
ThreadSafeTransaction::ThreadSafeTransaction(DatabaseContext* cx) {
|
ThreadSafeTransaction::ThreadSafeTransaction(DatabaseContext* cx) {
|
||||||
|
@ -107,18 +107,18 @@ ThreadSafeTransaction::ThreadSafeTransaction(DatabaseContext* cx) {
|
||||||
cx->addref();
|
cx->addref();
|
||||||
new (tr) ReadYourWritesTransaction(Database(cx));
|
new (tr) ReadYourWritesTransaction(Database(cx));
|
||||||
},
|
},
|
||||||
NULL);
|
nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
ThreadSafeTransaction::~ThreadSafeTransaction() {
|
ThreadSafeTransaction::~ThreadSafeTransaction() {
|
||||||
ReadYourWritesTransaction *tr = this->tr;
|
ReadYourWritesTransaction *tr = this->tr;
|
||||||
if (tr)
|
if (tr)
|
||||||
onMainThreadVoid( [tr](){ tr->delref(); }, NULL );
|
onMainThreadVoid( [tr](){ tr->delref(); }, nullptr );
|
||||||
}
|
}
|
||||||
|
|
||||||
void ThreadSafeTransaction::cancel() {
|
void ThreadSafeTransaction::cancel() {
|
||||||
ReadYourWritesTransaction *tr = this->tr;
|
ReadYourWritesTransaction *tr = this->tr;
|
||||||
onMainThreadVoid( [tr](){ tr->cancel(); }, NULL );
|
onMainThreadVoid( [tr](){ tr->cancel(); }, nullptr );
|
||||||
}
|
}
|
||||||
|
|
||||||
void ThreadSafeTransaction::setVersion( Version v ) {
|
void ThreadSafeTransaction::setVersion( Version v ) {
|
||||||
|
@ -328,17 +328,17 @@ ThreadFuture<Void> ThreadSafeTransaction::onError( Error const& e ) {
|
||||||
|
|
||||||
void ThreadSafeTransaction::operator=(ThreadSafeTransaction&& r) noexcept {
|
void ThreadSafeTransaction::operator=(ThreadSafeTransaction&& r) noexcept {
|
||||||
tr = r.tr;
|
tr = r.tr;
|
||||||
r.tr = NULL;
|
r.tr = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
ThreadSafeTransaction::ThreadSafeTransaction(ThreadSafeTransaction&& r) noexcept {
|
ThreadSafeTransaction::ThreadSafeTransaction(ThreadSafeTransaction&& r) noexcept {
|
||||||
tr = r.tr;
|
tr = r.tr;
|
||||||
r.tr = NULL;
|
r.tr = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ThreadSafeTransaction::reset() {
|
void ThreadSafeTransaction::reset() {
|
||||||
ReadYourWritesTransaction *tr = this->tr;
|
ReadYourWritesTransaction *tr = this->tr;
|
||||||
onMainThreadVoid( [tr](){ tr->reset(); }, NULL );
|
onMainThreadVoid( [tr](){ tr->reset(); }, nullptr );
|
||||||
}
|
}
|
||||||
|
|
||||||
extern const char* getSourceVersion();
|
extern const char* getSourceVersion();
|
||||||
|
|
|
@ -96,7 +96,7 @@ public:
|
||||||
ThreadFuture<Void> onError( Error const& e ) override;
|
ThreadFuture<Void> onError( Error const& e ) override;
|
||||||
|
|
||||||
// These are to permit use as state variables in actors:
|
// These are to permit use as state variables in actors:
|
||||||
ThreadSafeTransaction() : tr(NULL) {}
|
ThreadSafeTransaction() : tr(nullptr) {}
|
||||||
void operator=(ThreadSafeTransaction&& r) noexcept;
|
void operator=(ThreadSafeTransaction&& r) noexcept;
|
||||||
ThreadSafeTransaction(ThreadSafeTransaction&& r) noexcept;
|
ThreadSafeTransaction(ThreadSafeTransaction&& r) noexcept;
|
||||||
|
|
||||||
|
|
|
@ -802,7 +802,7 @@ public:
|
||||||
|
|
||||||
void validate() {
|
void validate() {
|
||||||
int count=0, height=0;
|
int count=0, height=0;
|
||||||
PTreeImpl::validate<MapPair<K,std::pair<T,Version>>>( root, at, NULL, NULL, count, height );
|
PTreeImpl::validate<MapPair<K,std::pair<T,Version>>>( root, at, nullptr, nullptr, count, height );
|
||||||
if ( height > 100 )
|
if ( height > 100 )
|
||||||
TraceEvent(SevWarnAlways, "DiabolicalPTreeSize").detail("Size", count).detail("Height", height);
|
TraceEvent(SevWarnAlways, "DiabolicalPTreeSize").detail("Size", count).detail("Height", height);
|
||||||
}
|
}
|
||||||
|
|
|
@ -195,7 +195,7 @@ description is not currently required but encouraged.
|
||||||
<Option name="next_write_no_write_conflict_range" code="30"
|
<Option name="next_write_no_write_conflict_range" code="30"
|
||||||
description="The next write performed on this transaction will not generate a write conflict range. As a result, other transactions which read the key(s) being modified by the next write will not conflict with this transaction. Care needs to be taken when using this option on a transaction that is shared between multiple threads. When setting this option, write conflict ranges will be disabled on the next write operation, regardless of what thread it is on." />
|
description="The next write performed on this transaction will not generate a write conflict range. As a result, other transactions which read the key(s) being modified by the next write will not conflict with this transaction. Care needs to be taken when using this option on a transaction that is shared between multiple threads. When setting this option, write conflict ranges will be disabled on the next write operation, regardless of what thread it is on." />
|
||||||
<Option name="commit_on_first_proxy" code="40"
|
<Option name="commit_on_first_proxy" code="40"
|
||||||
description="Committing this transaction will bypass the normal load balancing across proxies and go directly to the specifically nominated 'first proxy'."
|
description="Committing this transaction will bypass the normal load balancing across commit proxies and go directly to the specifically nominated 'first commit proxy'."
|
||||||
hidden="true" />
|
hidden="true" />
|
||||||
<Option name="check_writes_enable" code="50"
|
<Option name="check_writes_enable" code="50"
|
||||||
hidden="true" />
|
hidden="true" />
|
||||||
|
|
|
@ -96,7 +96,7 @@ void monitor_fd( fdb_fd_set list, int fd, int* maxfd, void* cmd ) {
|
||||||
/* ignore maxfd */
|
/* ignore maxfd */
|
||||||
struct kevent ev;
|
struct kevent ev;
|
||||||
EV_SET( &ev, fd, EVFILT_READ, EV_ADD, 0, 0, cmd );
|
EV_SET( &ev, fd, EVFILT_READ, EV_ADD, 0, 0, cmd );
|
||||||
kevent( list, &ev, 1, NULL, 0, NULL ); // FIXME: check?
|
kevent( list, &ev, 1, nullptr, 0, nullptr ); // FIXME: check?
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -105,15 +105,15 @@ void unmonitor_fd( fdb_fd_set list, int fd ) {
|
||||||
FD_CLR( fd, list );
|
FD_CLR( fd, list );
|
||||||
#elif defined(__APPLE__) || defined(__FreeBSD__)
|
#elif defined(__APPLE__) || defined(__FreeBSD__)
|
||||||
struct kevent ev;
|
struct kevent ev;
|
||||||
EV_SET( &ev, fd, EVFILT_READ, EV_DELETE, 0, 0, NULL );
|
EV_SET( &ev, fd, EVFILT_READ, EV_DELETE, 0, 0, nullptr );
|
||||||
kevent( list, &ev, 1, NULL, 0, NULL ); // FIXME: check?
|
kevent( list, &ev, 1, nullptr, 0, nullptr ); // FIXME: check?
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
double get_cur_timestamp() {
|
double get_cur_timestamp() {
|
||||||
struct tm tm_info;
|
struct tm tm_info;
|
||||||
struct timeval tv;
|
struct timeval tv;
|
||||||
gettimeofday(&tv, NULL);
|
gettimeofday(&tv, nullptr);
|
||||||
localtime_r(&tv.tv_sec, &tm_info);
|
localtime_r(&tv.tv_sec, &tm_info);
|
||||||
|
|
||||||
return tv.tv_sec + 1e-6*tv.tv_usec;
|
return tv.tv_sec + 1e-6*tv.tv_usec;
|
||||||
|
@ -182,14 +182,14 @@ void log_err(const char* func, int err, const char* format, ...) {
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* get_value_multi(const CSimpleIni& ini, const char* key, ...) {
|
const char* get_value_multi(const CSimpleIni& ini, const char* key, ...) {
|
||||||
const char* ret = NULL;
|
const char* ret = nullptr;
|
||||||
const char* section = NULL;
|
const char* section = nullptr;
|
||||||
|
|
||||||
va_list ap;
|
va_list ap;
|
||||||
va_start(ap, key);
|
va_start(ap, key);
|
||||||
|
|
||||||
while (!ret && (section = va_arg(ap, const char *)))
|
while (!ret && (section = va_arg(ap, const char *)))
|
||||||
ret = ini.GetValue(section, key, NULL);
|
ret = ini.GetValue(section, key, nullptr);
|
||||||
|
|
||||||
va_end(ap);
|
va_end(ap);
|
||||||
|
|
||||||
|
@ -378,8 +378,8 @@ public:
|
||||||
// one pair for each of stdout and stderr
|
// one pair for each of stdout and stderr
|
||||||
int pipes[2][2];
|
int pipes[2][2];
|
||||||
|
|
||||||
Command() : argv(NULL) { }
|
Command() : argv(nullptr) { }
|
||||||
Command(const CSimpleIni& ini, std::string _section, uint64_t id, fdb_fd_set fds, int* maxfd) : section(_section), argv(NULL), fork_retry_time(-1), quiet(false), delete_envvars(NULL), fds(fds), deconfigured(false), kill_on_configuration_change(true) {
|
Command(const CSimpleIni& ini, std::string _section, uint64_t id, fdb_fd_set fds, int* maxfd) : section(_section), argv(nullptr), fork_retry_time(-1), quiet(false), delete_envvars(nullptr), fds(fds), deconfigured(false), kill_on_configuration_change(true) {
|
||||||
char _ssection[strlen(section.c_str()) + 22];
|
char _ssection[strlen(section.c_str()) + 22];
|
||||||
snprintf(_ssection, strlen(section.c_str()) + 22, "%s.%" PRIu64, section.c_str(), id);
|
snprintf(_ssection, strlen(section.c_str()) + 22, "%s.%" PRIu64, section.c_str(), id);
|
||||||
ssection = _ssection;
|
ssection = _ssection;
|
||||||
|
@ -410,7 +410,7 @@ public:
|
||||||
last_start = 0;
|
last_start = 0;
|
||||||
|
|
||||||
char* endptr;
|
char* endptr;
|
||||||
const char* rd = get_value_multi(ini, "restart_delay", ssection.c_str(), section.c_str(), "general", "fdbmonitor", NULL);
|
const char* rd = get_value_multi(ini, "restart_delay", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
|
||||||
if (!rd) {
|
if (!rd) {
|
||||||
log_msg(SevError, "Unable to resolve restart delay for %s\n", ssection.c_str());
|
log_msg(SevError, "Unable to resolve restart delay for %s\n", ssection.c_str());
|
||||||
return;
|
return;
|
||||||
|
@ -423,7 +423,7 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* mrd = get_value_multi(ini, "initial_restart_delay", ssection.c_str(), section.c_str(), "general", "fdbmonitor", NULL);
|
const char* mrd = get_value_multi(ini, "initial_restart_delay", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
|
||||||
if (!mrd) {
|
if (!mrd) {
|
||||||
initial_restart_delay = 0;
|
initial_restart_delay = 0;
|
||||||
}
|
}
|
||||||
|
@ -437,7 +437,7 @@ public:
|
||||||
|
|
||||||
current_restart_delay = initial_restart_delay;
|
current_restart_delay = initial_restart_delay;
|
||||||
|
|
||||||
const char* rbo = get_value_multi(ini, "restart_backoff", ssection.c_str(), section.c_str(), "general", "fdbmonitor", NULL);
|
const char* rbo = get_value_multi(ini, "restart_backoff", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
|
||||||
if(!rbo) {
|
if(!rbo) {
|
||||||
restart_backoff = max_restart_delay;
|
restart_backoff = max_restart_delay;
|
||||||
}
|
}
|
||||||
|
@ -453,7 +453,7 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* rdri = get_value_multi(ini, "restart_delay_reset_interval", ssection.c_str(), section.c_str(), "general", "fdbmonitor", NULL);
|
const char* rdri = get_value_multi(ini, "restart_delay_reset_interval", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
|
||||||
if (!rdri) {
|
if (!rdri) {
|
||||||
restart_delay_reset_interval = max_restart_delay;
|
restart_delay_reset_interval = max_restart_delay;
|
||||||
}
|
}
|
||||||
|
@ -465,19 +465,19 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* q = get_value_multi(ini, "disable_lifecycle_logging", ssection.c_str(), section.c_str(), "general", NULL);
|
const char* q = get_value_multi(ini, "disable_lifecycle_logging", ssection.c_str(), section.c_str(), "general", nullptr);
|
||||||
if (q && !strcmp(q, "true"))
|
if (q && !strcmp(q, "true"))
|
||||||
quiet = true;
|
quiet = true;
|
||||||
|
|
||||||
const char* del_env = get_value_multi(ini, "delete_envvars", ssection.c_str(), section.c_str(), "general", NULL);
|
const char* del_env = get_value_multi(ini, "delete_envvars", ssection.c_str(), section.c_str(), "general", nullptr);
|
||||||
delete_envvars = del_env;
|
delete_envvars = del_env;
|
||||||
|
|
||||||
const char* kocc = get_value_multi(ini, "kill_on_configuration_change", ssection.c_str(), section.c_str(), "general", NULL);
|
const char* kocc = get_value_multi(ini, "kill_on_configuration_change", ssection.c_str(), section.c_str(), "general", nullptr);
|
||||||
if(kocc && strcmp(kocc, "true")) {
|
if(kocc && strcmp(kocc, "true")) {
|
||||||
kill_on_configuration_change = false;
|
kill_on_configuration_change = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* binary = get_value_multi(ini, "command", ssection.c_str(), section.c_str(), "general", NULL);
|
const char* binary = get_value_multi(ini, "command", ssection.c_str(), section.c_str(), "general", nullptr);
|
||||||
if (!binary) {
|
if (!binary) {
|
||||||
log_msg(SevError, "Unable to resolve command for %s\n", ssection.c_str());
|
log_msg(SevError, "Unable to resolve command for %s\n", ssection.c_str());
|
||||||
return;
|
return;
|
||||||
|
@ -495,7 +495,7 @@ public:
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string opt = get_value_multi(ini, i.pItem, ssection.c_str(), section.c_str(), "general", NULL);
|
std::string opt = get_value_multi(ini, i.pItem, ssection.c_str(), section.c_str(), "general", nullptr);
|
||||||
|
|
||||||
std::size_t pos = 0;
|
std::size_t pos = 0;
|
||||||
|
|
||||||
|
@ -520,7 +520,7 @@ public:
|
||||||
for (auto itr : commands) {
|
for (auto itr : commands) {
|
||||||
argv[i++] = strdup(itr.c_str());
|
argv[i++] = strdup(itr.c_str());
|
||||||
}
|
}
|
||||||
argv[i] = NULL;
|
argv[i] = nullptr;
|
||||||
}
|
}
|
||||||
~Command() {
|
~Command() {
|
||||||
delete[] argv;
|
delete[] argv;
|
||||||
|
@ -609,7 +609,7 @@ void start_process(Command* cmd, uint64_t id, uid_t uid, gid_t gid, int delay, s
|
||||||
dup2( cmd->pipes[0][1], fileno(stdout) );
|
dup2( cmd->pipes[0][1], fileno(stdout) );
|
||||||
dup2( cmd->pipes[1][1], fileno(stderr) );
|
dup2( cmd->pipes[1][1], fileno(stderr) );
|
||||||
|
|
||||||
if(cmd->delete_envvars != NULL && std::strlen(cmd->delete_envvars) > 0) {
|
if(cmd->delete_envvars != nullptr && std::strlen(cmd->delete_envvars) > 0) {
|
||||||
std::string vars(cmd->delete_envvars);
|
std::string vars(cmd->delete_envvars);
|
||||||
size_t start = 0;
|
size_t start = 0;
|
||||||
do {
|
do {
|
||||||
|
@ -630,7 +630,7 @@ void start_process(Command* cmd, uint64_t id, uid_t uid, gid_t gid, int delay, s
|
||||||
#ifdef __linux__
|
#ifdef __linux__
|
||||||
signal(SIGCHLD, SIG_DFL);
|
signal(SIGCHLD, SIG_DFL);
|
||||||
|
|
||||||
sigprocmask(SIG_SETMASK, mask, NULL);
|
sigprocmask(SIG_SETMASK, mask, nullptr);
|
||||||
|
|
||||||
/* death of our parent raises SIGHUP */
|
/* death of our parent raises SIGHUP */
|
||||||
prctl(PR_SET_PDEATHSIG, SIGHUP);
|
prctl(PR_SET_PDEATHSIG, SIGHUP);
|
||||||
|
@ -722,7 +722,7 @@ bool argv_equal(const char** a1, const char** a2)
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (a1[i] != NULL || a2[i] != NULL)
|
if (a1[i] != nullptr || a2[i] != nullptr)
|
||||||
return false;
|
return false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -734,7 +734,7 @@ void kill_process(uint64_t id, bool wait = true) {
|
||||||
|
|
||||||
kill(pid, SIGTERM);
|
kill(pid, SIGTERM);
|
||||||
if(wait) {
|
if(wait) {
|
||||||
waitpid(pid, NULL, 0);
|
waitpid(pid, nullptr, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
pid_id.erase(pid);
|
pid_id.erase(pid);
|
||||||
|
@ -758,8 +758,8 @@ void load_conf(const char* confpath, uid_t &uid, gid_t &gid, sigset_t* mask, fdb
|
||||||
uid_t _uid;
|
uid_t _uid;
|
||||||
gid_t _gid;
|
gid_t _gid;
|
||||||
|
|
||||||
const char* user = ini.GetValue("fdbmonitor", "user", NULL);
|
const char* user = ini.GetValue("fdbmonitor", "user", nullptr);
|
||||||
const char* group = ini.GetValue("fdbmonitor", "group", NULL);
|
const char* group = ini.GetValue("fdbmonitor", "group", nullptr);
|
||||||
|
|
||||||
if (user) {
|
if (user) {
|
||||||
errno = 0;
|
errno = 0;
|
||||||
|
@ -924,8 +924,8 @@ void watch_conf_dir( int kq, int* confd_fd, std::string confdir ) {
|
||||||
while(true) {
|
while(true) {
|
||||||
/* If already watching, drop it and close */
|
/* If already watching, drop it and close */
|
||||||
if ( *confd_fd >= 0 ) {
|
if ( *confd_fd >= 0 ) {
|
||||||
EV_SET( &ev, *confd_fd, EVFILT_VNODE, EV_DELETE, NOTE_WRITE, 0, NULL );
|
EV_SET( &ev, *confd_fd, EVFILT_VNODE, EV_DELETE, NOTE_WRITE, 0, nullptr );
|
||||||
kevent( kq, &ev, 1, NULL, 0, NULL );
|
kevent( kq, &ev, 1, nullptr, 0, nullptr );
|
||||||
close( *confd_fd );
|
close( *confd_fd );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -939,8 +939,8 @@ void watch_conf_dir( int kq, int* confd_fd, std::string confdir ) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( *confd_fd >= 0 ) {
|
if ( *confd_fd >= 0 ) {
|
||||||
EV_SET( &ev, *confd_fd, EVFILT_VNODE, EV_ADD | EV_CLEAR, NOTE_WRITE, 0, NULL );
|
EV_SET( &ev, *confd_fd, EVFILT_VNODE, EV_ADD | EV_CLEAR, NOTE_WRITE, 0, nullptr );
|
||||||
kevent( kq, &ev, 1, NULL, 0, NULL );
|
kevent( kq, &ev, 1, nullptr, 0, nullptr );
|
||||||
|
|
||||||
/* If our child appeared since we last tested it, start over from the beginning */
|
/* If our child appeared since we last tested it, start over from the beginning */
|
||||||
if ( confdir != child && (access(child.c_str(), F_OK) == 0 || errno != ENOENT) ) {
|
if ( confdir != child && (access(child.c_str(), F_OK) == 0 || errno != ENOENT) ) {
|
||||||
|
@ -964,16 +964,16 @@ void watch_conf_file( int kq, int* conff_fd, const char* confpath ) {
|
||||||
|
|
||||||
/* If already watching, drop it and close */
|
/* If already watching, drop it and close */
|
||||||
if ( *conff_fd >= 0 ) {
|
if ( *conff_fd >= 0 ) {
|
||||||
EV_SET( &ev, *conff_fd, EVFILT_VNODE, EV_DELETE, NOTE_WRITE | NOTE_ATTRIB, 0, NULL );
|
EV_SET( &ev, *conff_fd, EVFILT_VNODE, EV_DELETE, NOTE_WRITE | NOTE_ATTRIB, 0, nullptr );
|
||||||
kevent( kq, &ev, 1, NULL, 0, NULL );
|
kevent( kq, &ev, 1, nullptr, 0, nullptr );
|
||||||
close( *conff_fd );
|
close( *conff_fd );
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Open and watch */
|
/* Open and watch */
|
||||||
*conff_fd = open( confpath, O_EVTONLY );
|
*conff_fd = open( confpath, O_EVTONLY );
|
||||||
if ( *conff_fd >= 0 ) {
|
if ( *conff_fd >= 0 ) {
|
||||||
EV_SET( &ev, *conff_fd, EVFILT_VNODE, EV_ADD | EV_CLEAR, NOTE_WRITE | NOTE_ATTRIB, 0, NULL );
|
EV_SET( &ev, *conff_fd, EVFILT_VNODE, EV_ADD | EV_CLEAR, NOTE_WRITE | NOTE_ATTRIB, 0, nullptr );
|
||||||
kevent( kq, &ev, 1, NULL, 0, NULL );
|
kevent( kq, &ev, 1, nullptr, 0, nullptr );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -1194,7 +1194,7 @@ int main(int argc, char** argv) {
|
||||||
lockfile = args.OptionArg();
|
lockfile = args.OptionArg();
|
||||||
break;
|
break;
|
||||||
case OPT_LOGGROUP:
|
case OPT_LOGGROUP:
|
||||||
if(strchr(args.OptionArg(), '"') != NULL) {
|
if(strchr(args.OptionArg(), '"') != nullptr) {
|
||||||
log_msg(SevError, "Invalid log group '%s', cannot contain '\"'\n", args.OptionArg());
|
log_msg(SevError, "Invalid log group '%s', cannot contain '\"'\n", args.OptionArg());
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
@ -1226,9 +1226,9 @@ int main(int argc, char** argv) {
|
||||||
_confpath = joinPath(buf, _confpath);
|
_confpath = joinPath(buf, _confpath);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Guaranteed (if non-NULL) to be an absolute path with no
|
// Guaranteed (if non-nullptr) to be an absolute path with no
|
||||||
// symbolic link, /./ or /../ components
|
// symbolic link, /./ or /../ components
|
||||||
const char *p = realpath(_confpath.c_str(), NULL);
|
const char *p = realpath(_confpath.c_str(), nullptr);
|
||||||
if (!p) {
|
if (!p) {
|
||||||
log_msg(SevError, "No configuration file at %s\n", _confpath.c_str());
|
log_msg(SevError, "No configuration file at %s\n", _confpath.c_str());
|
||||||
exit(1);
|
exit(1);
|
||||||
|
@ -1351,14 +1351,14 @@ int main(int argc, char** argv) {
|
||||||
|
|
||||||
struct kevent ev;
|
struct kevent ev;
|
||||||
|
|
||||||
EV_SET( &ev, SIGHUP, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL);
|
EV_SET( &ev, SIGHUP, EVFILT_SIGNAL, EV_ADD, 0, 0, nullptr);
|
||||||
kevent( kq, &ev, 1, NULL, 0, NULL );
|
kevent( kq, &ev, 1, nullptr, 0, nullptr );
|
||||||
EV_SET( &ev, SIGINT, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL);
|
EV_SET( &ev, SIGINT, EVFILT_SIGNAL, EV_ADD, 0, 0, nullptr);
|
||||||
kevent( kq, &ev, 1, NULL, 0, NULL );
|
kevent( kq, &ev, 1, nullptr, 0, nullptr );
|
||||||
EV_SET( &ev, SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL);
|
EV_SET( &ev, SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, nullptr);
|
||||||
kevent( kq, &ev, 1, NULL, 0, NULL );
|
kevent( kq, &ev, 1, nullptr, 0, nullptr );
|
||||||
EV_SET( &ev, SIGCHLD, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL);
|
EV_SET( &ev, SIGCHLD, EVFILT_SIGNAL, EV_ADD, 0, 0, nullptr);
|
||||||
kevent( kq, &ev, 1, NULL, 0, NULL );
|
kevent( kq, &ev, 1, nullptr, 0, nullptr );
|
||||||
|
|
||||||
int confd_fd = -1;
|
int confd_fd = -1;
|
||||||
int conff_fd = -1;
|
int conff_fd = -1;
|
||||||
|
@ -1383,7 +1383,7 @@ int main(int argc, char** argv) {
|
||||||
pselect, but none blocks all signals while processing events */
|
pselect, but none blocks all signals while processing events */
|
||||||
sigprocmask(SIG_SETMASK, &full_mask, &normal_mask);
|
sigprocmask(SIG_SETMASK, &full_mask, &normal_mask);
|
||||||
#elif defined(__APPLE__) || defined(__FreeBSD__)
|
#elif defined(__APPLE__) || defined(__FreeBSD__)
|
||||||
sigprocmask(0, NULL, &normal_mask);
|
sigprocmask(0, nullptr, &normal_mask);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(__APPLE__) || defined(__FreeBSD__)
|
#if defined(__APPLE__) || defined(__FreeBSD__)
|
||||||
|
@ -1474,10 +1474,10 @@ int main(int argc, char** argv) {
|
||||||
srfds = rfds;
|
srfds = rfds;
|
||||||
nfds = 0;
|
nfds = 0;
|
||||||
if(timeout < 0) {
|
if(timeout < 0) {
|
||||||
nfds = pselect(maxfd+1, &srfds, NULL, NULL, NULL, &normal_mask);
|
nfds = pselect(maxfd+1, &srfds, nullptr, nullptr, nullptr, &normal_mask);
|
||||||
}
|
}
|
||||||
else if(timeout > 0) {
|
else if(timeout > 0) {
|
||||||
nfds = pselect(maxfd+1, &srfds, NULL, NULL, &tv, &normal_mask);
|
nfds = pselect(maxfd+1, &srfds, nullptr, nullptr, &tv, &normal_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(nfds == 0) {
|
if(nfds == 0) {
|
||||||
|
@ -1486,10 +1486,10 @@ int main(int argc, char** argv) {
|
||||||
#elif defined(__APPLE__) || defined(__FreeBSD__)
|
#elif defined(__APPLE__) || defined(__FreeBSD__)
|
||||||
int nev = 0;
|
int nev = 0;
|
||||||
if(timeout < 0) {
|
if(timeout < 0) {
|
||||||
nev = kevent( kq, NULL, 0, &ev, 1, NULL );
|
nev = kevent( kq, nullptr, 0, &ev, 1, nullptr );
|
||||||
}
|
}
|
||||||
else if(timeout > 0) {
|
else if(timeout > 0) {
|
||||||
nev = kevent( kq, NULL, 0, &ev, 1, &tv );
|
nev = kevent( kq, nullptr, 0, &ev, 1, &tv );
|
||||||
}
|
}
|
||||||
|
|
||||||
if(nev == 0) {
|
if(nev == 0) {
|
||||||
|
@ -1503,8 +1503,8 @@ int main(int argc, char** argv) {
|
||||||
// This could be the conf dir or conf file
|
// This could be the conf dir or conf file
|
||||||
if ( ev.ident == confd_fd ) {
|
if ( ev.ident == confd_fd ) {
|
||||||
/* Changes in the directory holding the conf file; schedule a future timeout to reset watches and reload the conf */
|
/* Changes in the directory holding the conf file; schedule a future timeout to reset watches and reload the conf */
|
||||||
EV_SET( &timeout, 1, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 200, NULL );
|
EV_SET( &timeout, 1, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 200, nullptr );
|
||||||
kevent( kq, &timeout, 1, NULL, 0, NULL );
|
kevent( kq, &timeout, 1, nullptr, 0, nullptr );
|
||||||
} else {
|
} else {
|
||||||
/* Direct writes to the conf file; reload! */
|
/* Direct writes to the conf file; reload! */
|
||||||
reload = true;
|
reload = true;
|
||||||
|
@ -1559,7 +1559,7 @@ int main(int argc, char** argv) {
|
||||||
|
|
||||||
/* Unblock signals */
|
/* Unblock signals */
|
||||||
signal(SIGCHLD, SIG_IGN);
|
signal(SIGCHLD, SIG_IGN);
|
||||||
sigprocmask(SIG_SETMASK, &normal_mask, NULL);
|
sigprocmask(SIG_SETMASK, &normal_mask, nullptr);
|
||||||
|
|
||||||
/* If daemonized, setsid() was called earlier so we can just kill our entire new process group */
|
/* If daemonized, setsid() was called earlier so we can just kill our entire new process group */
|
||||||
if(daemonize) {
|
if(daemonize) {
|
||||||
|
@ -1578,7 +1578,7 @@ int main(int argc, char** argv) {
|
||||||
/* Wait for all child processes (says POSIX.1-2001) */
|
/* Wait for all child processes (says POSIX.1-2001) */
|
||||||
/* POSIX.1-2001 specifies that if the disposition of SIGCHLD is set to SIG_IGN, then children that terminate do not become zombies and a call to wait()
|
/* POSIX.1-2001 specifies that if the disposition of SIGCHLD is set to SIG_IGN, then children that terminate do not become zombies and a call to wait()
|
||||||
will block until all children have terminated, and then fail with errno set to ECHILD */
|
will block until all children have terminated, and then fail with errno set to ECHILD */
|
||||||
wait(NULL);
|
wait(nullptr);
|
||||||
|
|
||||||
unlink(lockfile.c_str());
|
unlink(lockfile.c_str());
|
||||||
exit(0);
|
exit(0);
|
||||||
|
@ -1617,7 +1617,7 @@ int main(int argc, char** argv) {
|
||||||
if(search != additional_watch_wds.end() && event->len && search->second.count(event->name)) {
|
if(search != additional_watch_wds.end() && event->len && search->second.count(event->name)) {
|
||||||
log_msg(SevInfo, "Changes detected on watched symlink `%s': (%d, %#010x)\n", event->name, event->wd, event->mask);
|
log_msg(SevInfo, "Changes detected on watched symlink `%s': (%d, %#010x)\n", event->name, event->wd, event->mask);
|
||||||
|
|
||||||
char *redone_confpath = realpath(_confpath.c_str(), NULL);
|
char *redone_confpath = realpath(_confpath.c_str(), nullptr);
|
||||||
if(!redone_confpath) {
|
if(!redone_confpath) {
|
||||||
log_msg(SevInfo, "Error calling realpath on `%s', continuing...\n", _confpath.c_str());
|
log_msg(SevInfo, "Error calling realpath on `%s', continuing...\n", _confpath.c_str());
|
||||||
// exit(1);
|
// exit(1);
|
||||||
|
|
|
@ -46,7 +46,7 @@ class AsyncFileEIO : public IAsyncFile, public ReferenceCounted<AsyncFileEIO> {
|
||||||
public:
|
public:
|
||||||
static void init() {
|
static void init() {
|
||||||
eio_set_max_parallel(FLOW_KNOBS->EIO_MAX_PARALLELISM);
|
eio_set_max_parallel(FLOW_KNOBS->EIO_MAX_PARALLELISM);
|
||||||
if (eio_init( &eio_want_poll, NULL )) {
|
if (eio_init( &eio_want_poll, nullptr )) {
|
||||||
TraceEvent("EioInitError").detail("ErrorNo", errno);
|
TraceEvent("EioInitError").detail("ErrorNo", errno);
|
||||||
throw platform_error();
|
throw platform_error();
|
||||||
}
|
}
|
||||||
|
@ -423,8 +423,8 @@ private:
|
||||||
|
|
||||||
static void eio_want_poll() {
|
static void eio_want_poll() {
|
||||||
want_poll = 1;
|
want_poll = 1;
|
||||||
// SOMEDAY: NULL for deferred error, no analysis of correctness (itp)
|
// SOMEDAY: nullptr for deferred error, no analysis of correctness (itp)
|
||||||
onMainThreadVoid([](){ poll_eio(); }, NULL, TaskPriority::PollEIO);
|
onMainThreadVoid([](){ poll_eio(); }, nullptr, TaskPriority::PollEIO);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int eio_callback( eio_req* req ) {
|
static int eio_callback( eio_req* req ) {
|
||||||
|
|
|
@ -55,12 +55,12 @@ public:
|
||||||
|
|
||||||
HANDLE h = CreateFile( open_filename.c_str(),
|
HANDLE h = CreateFile( open_filename.c_str(),
|
||||||
GENERIC_READ | ((flags&OPEN_READWRITE) ? GENERIC_WRITE : 0),
|
GENERIC_READ | ((flags&OPEN_READWRITE) ? GENERIC_WRITE : 0),
|
||||||
FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, NULL,
|
FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, nullptr,
|
||||||
(flags&OPEN_EXCLUSIVE) ? CREATE_NEW :
|
(flags&OPEN_EXCLUSIVE) ? CREATE_NEW :
|
||||||
(flags&OPEN_CREATE) ? OPEN_ALWAYS :
|
(flags&OPEN_CREATE) ? OPEN_ALWAYS :
|
||||||
OPEN_EXISTING,
|
OPEN_EXISTING,
|
||||||
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_OVERLAPPED | FILE_FLAG_NO_BUFFERING,
|
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_OVERLAPPED | FILE_FLAG_NO_BUFFERING,
|
||||||
NULL );
|
nullptr );
|
||||||
if (h == INVALID_HANDLE_VALUE) {
|
if (h == INVALID_HANDLE_VALUE) {
|
||||||
bool notFound = GetLastError() == ERROR_FILE_NOT_FOUND;
|
bool notFound = GetLastError() == ERROR_FILE_NOT_FOUND;
|
||||||
Error e = notFound ? file_not_found() : io_error();
|
Error e = notFound ? file_not_found() : io_error();
|
||||||
|
@ -141,7 +141,7 @@ public:
|
||||||
}
|
}
|
||||||
Future<Void> truncate(int64_t size) override {
|
Future<Void> truncate(int64_t size) override {
|
||||||
// FIXME: Possibly use SetFileInformationByHandle( file.native_handle(), FileEndOfFileInfo, ... ) instead
|
// FIXME: Possibly use SetFileInformationByHandle( file.native_handle(), FileEndOfFileInfo, ... ) instead
|
||||||
if (!SetFilePointerEx( file.native_handle(), *(LARGE_INTEGER*)&size, NULL, FILE_BEGIN ))
|
if (!SetFilePointerEx( file.native_handle(), *(LARGE_INTEGER*)&size, nullptr, FILE_BEGIN ))
|
||||||
throw io_error();
|
throw io_error();
|
||||||
if (!SetEndOfFile(file.native_handle()))
|
if (!SetEndOfFile(file.native_handle()))
|
||||||
throw io_error();
|
throw io_error();
|
||||||
|
|
|
@ -177,7 +177,7 @@ Future< REPLY_TYPE(Request) > loadBalance(
|
||||||
Request request = Request(),
|
Request request = Request(),
|
||||||
TaskPriority taskID = TaskPriority::DefaultPromiseEndpoint,
|
TaskPriority taskID = TaskPriority::DefaultPromiseEndpoint,
|
||||||
bool atMostOnce = false, // if true, throws request_maybe_delivered() instead of retrying automatically
|
bool atMostOnce = false, // if true, throws request_maybe_delivered() instead of retrying automatically
|
||||||
QueueModel* model = NULL)
|
QueueModel* model = nullptr)
|
||||||
{
|
{
|
||||||
state Future<Optional<REPLY_TYPE(Request)>> firstRequest;
|
state Future<Optional<REPLY_TYPE(Request)>> firstRequest;
|
||||||
state Optional<uint64_t> firstRequestEndpoint;
|
state Optional<uint64_t> firstRequestEndpoint;
|
||||||
|
@ -296,7 +296,7 @@ Future< REPLY_TYPE(Request) > loadBalance(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find an alternative, if any, that is not failed, starting with nextAlt
|
// Find an alternative, if any, that is not failed, starting with nextAlt
|
||||||
state RequestStream<Request> const* stream = NULL;
|
state RequestStream<Request> const* stream = nullptr;
|
||||||
for(int alternativeNum=0; alternativeNum<alternatives->size(); alternativeNum++) {
|
for(int alternativeNum=0; alternativeNum<alternatives->size(); alternativeNum++) {
|
||||||
int useAlt = nextAlt;
|
int useAlt = nextAlt;
|
||||||
if( nextAlt == startAlt )
|
if( nextAlt == startAlt )
|
||||||
|
@ -309,7 +309,7 @@ Future< REPLY_TYPE(Request) > loadBalance(
|
||||||
break;
|
break;
|
||||||
nextAlt = (nextAlt+1) % alternatives->size();
|
nextAlt = (nextAlt+1) % alternatives->size();
|
||||||
if(nextAlt == startAlt) triedAllOptions = true;
|
if(nextAlt == startAlt) triedAllOptions = true;
|
||||||
stream=NULL;
|
stream=nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(!stream && !firstRequest.isValid() ) {
|
if(!stream && !firstRequest.isValid() ) {
|
||||||
|
@ -493,7 +493,7 @@ Future< REPLY_TYPE(Request) > basicLoadBalance(
|
||||||
state int useAlt;
|
state int useAlt;
|
||||||
loop {
|
loop {
|
||||||
// Find an alternative, if any, that is not failed, starting with nextAlt
|
// Find an alternative, if any, that is not failed, starting with nextAlt
|
||||||
state RequestStream<Request> const* stream = NULL;
|
state RequestStream<Request> const* stream = nullptr;
|
||||||
for(int alternativeNum=0; alternativeNum<alternatives->size(); alternativeNum++) {
|
for(int alternativeNum=0; alternativeNum<alternatives->size(); alternativeNum++) {
|
||||||
useAlt = nextAlt;
|
useAlt = nextAlt;
|
||||||
if( nextAlt == startAlt )
|
if( nextAlt == startAlt )
|
||||||
|
@ -505,7 +505,7 @@ Future< REPLY_TYPE(Request) > basicLoadBalance(
|
||||||
if (!IFailureMonitor::failureMonitor().getState( stream->getEndpoint() ).failed)
|
if (!IFailureMonitor::failureMonitor().getState( stream->getEndpoint() ).failed)
|
||||||
break;
|
break;
|
||||||
nextAlt = (nextAlt+1) % alternatives->size();
|
nextAlt = (nextAlt+1) % alternatives->size();
|
||||||
stream=NULL;
|
stream=nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(!stream) {
|
if(!stream) {
|
||||||
|
|
|
@ -28,11 +28,11 @@
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
Reference<T> loadPlugin( std::string const& plugin_name ) {
|
Reference<T> loadPlugin( std::string const& plugin_name ) {
|
||||||
void *(*get_plugin)(const char*) = NULL;
|
void *(*get_plugin)(const char*) = nullptr;
|
||||||
void* plugin = loadLibrary( plugin_name.c_str() );
|
void* plugin = loadLibrary( plugin_name.c_str() );
|
||||||
if (plugin)
|
if (plugin)
|
||||||
get_plugin = (void*(*)(const char*))loadFunction( plugin, "get_plugin" );
|
get_plugin = (void*(*)(const char*))loadFunction( plugin, "get_plugin" );
|
||||||
return (get_plugin) ? Reference<T>( (T*)get_plugin( T::get_plugin_type_name_and_version() ) ) : Reference<T>( NULL );
|
return (get_plugin) ? Reference<T>( (T*)get_plugin( T::get_plugin_type_name_and_version() ) ) : Reference<T>( nullptr );
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -63,9 +63,9 @@ ProcessClass::Fitness ProcessClass::machineClassFitness( ClusterRole role ) cons
|
||||||
default:
|
default:
|
||||||
return ProcessClass::NeverAssign;
|
return ProcessClass::NeverAssign;
|
||||||
}
|
}
|
||||||
case ProcessClass::Proxy:
|
case ProcessClass::CommitProxy:
|
||||||
switch( _class ) {
|
switch( _class ) {
|
||||||
case ProcessClass::ProxyClass:
|
case ProcessClass::CommitProxyClass:
|
||||||
return ProcessClass::BestFit;
|
return ProcessClass::BestFit;
|
||||||
case ProcessClass::StatelessClass:
|
case ProcessClass::StatelessClass:
|
||||||
return ProcessClass::GoodFit;
|
return ProcessClass::GoodFit;
|
||||||
|
@ -92,7 +92,7 @@ ProcessClass::Fitness ProcessClass::machineClassFitness( ClusterRole role ) cons
|
||||||
return ProcessClass::GoodFit;
|
return ProcessClass::GoodFit;
|
||||||
case ProcessClass::UnsetClass:
|
case ProcessClass::UnsetClass:
|
||||||
return ProcessClass::UnsetFit;
|
return ProcessClass::UnsetFit;
|
||||||
case ProcessClass::ProxyClass:
|
case ProcessClass::CommitProxyClass:
|
||||||
return ProcessClass::OkayFit;
|
return ProcessClass::OkayFit;
|
||||||
case ProcessClass::ResolutionClass:
|
case ProcessClass::ResolutionClass:
|
||||||
return ProcessClass::OkayFit;
|
return ProcessClass::OkayFit;
|
||||||
|
@ -192,7 +192,7 @@ ProcessClass::Fitness ProcessClass::machineClassFitness( ClusterRole role ) cons
|
||||||
return ProcessClass::OkayFit;
|
return ProcessClass::OkayFit;
|
||||||
case ProcessClass::TransactionClass:
|
case ProcessClass::TransactionClass:
|
||||||
return ProcessClass::OkayFit;
|
return ProcessClass::OkayFit;
|
||||||
case ProcessClass::ProxyClass:
|
case ProcessClass::CommitProxyClass:
|
||||||
return ProcessClass::OkayFit;
|
return ProcessClass::OkayFit;
|
||||||
case ProcessClass::GrvProxyClass:
|
case ProcessClass::GrvProxyClass:
|
||||||
return ProcessClass::OkayFit;
|
return ProcessClass::OkayFit;
|
||||||
|
|
|
@ -33,7 +33,7 @@ struct ProcessClass {
|
||||||
TransactionClass,
|
TransactionClass,
|
||||||
ResolutionClass,
|
ResolutionClass,
|
||||||
TesterClass,
|
TesterClass,
|
||||||
ProxyClass, // Process class of CommitProxy
|
CommitProxyClass,
|
||||||
GrvProxyClass,
|
GrvProxyClass,
|
||||||
MasterClass,
|
MasterClass,
|
||||||
StatelessClass,
|
StatelessClass,
|
||||||
|
@ -53,7 +53,7 @@ struct ProcessClass {
|
||||||
enum ClusterRole {
|
enum ClusterRole {
|
||||||
Storage,
|
Storage,
|
||||||
TLog,
|
TLog,
|
||||||
Proxy,
|
CommitProxy,
|
||||||
GrvProxy,
|
GrvProxy,
|
||||||
Master,
|
Master,
|
||||||
Resolver,
|
Resolver,
|
||||||
|
@ -77,7 +77,7 @@ public:
|
||||||
if (s=="storage") _class = StorageClass;
|
if (s=="storage") _class = StorageClass;
|
||||||
else if (s=="transaction") _class = TransactionClass;
|
else if (s=="transaction") _class = TransactionClass;
|
||||||
else if (s=="resolution") _class = ResolutionClass;
|
else if (s=="resolution") _class = ResolutionClass;
|
||||||
else if (s=="proxy") _class = ProxyClass;
|
else if (s=="commit_proxy") _class = CommitProxyClass;
|
||||||
else if (s=="grv_proxy") _class = GrvProxyClass;
|
else if (s=="grv_proxy") _class = GrvProxyClass;
|
||||||
else if (s=="master") _class = MasterClass;
|
else if (s=="master") _class = MasterClass;
|
||||||
else if (s=="test") _class = TesterClass;
|
else if (s=="test") _class = TesterClass;
|
||||||
|
@ -99,7 +99,7 @@ public:
|
||||||
if (classStr=="storage") _class = StorageClass;
|
if (classStr=="storage") _class = StorageClass;
|
||||||
else if (classStr=="transaction") _class = TransactionClass;
|
else if (classStr=="transaction") _class = TransactionClass;
|
||||||
else if (classStr=="resolution") _class = ResolutionClass;
|
else if (classStr=="resolution") _class = ResolutionClass;
|
||||||
else if (classStr=="proxy") _class = ProxyClass;
|
else if (classStr=="commit_proxy") _class = CommitProxyClass;
|
||||||
else if (classStr=="grv_proxy") _class = GrvProxyClass;
|
else if (classStr=="grv_proxy") _class = GrvProxyClass;
|
||||||
else if (classStr=="master") _class = MasterClass;
|
else if (classStr=="master") _class = MasterClass;
|
||||||
else if (classStr=="test") _class = TesterClass;
|
else if (classStr=="test") _class = TesterClass;
|
||||||
|
@ -137,7 +137,7 @@ public:
|
||||||
case StorageClass: return "storage";
|
case StorageClass: return "storage";
|
||||||
case TransactionClass: return "transaction";
|
case TransactionClass: return "transaction";
|
||||||
case ResolutionClass: return "resolution";
|
case ResolutionClass: return "resolution";
|
||||||
case ProxyClass: return "proxy";
|
case CommitProxyClass: return "commit_proxy";
|
||||||
case GrvProxyClass: return "grv_proxy";
|
case GrvProxyClass: return "grv_proxy";
|
||||||
case MasterClass: return "master";
|
case MasterClass: return "master";
|
||||||
case TesterClass: return "test";
|
case TesterClass: return "test";
|
||||||
|
|
|
@ -65,7 +65,7 @@ Future< Reference<class IAsyncFile> > Net2FileSystem::open( std::string filename
|
||||||
// EIO.
|
// EIO.
|
||||||
if ((flags & IAsyncFile::OPEN_UNBUFFERED) && !(flags & IAsyncFile::OPEN_NO_AIO) &&
|
if ((flags & IAsyncFile::OPEN_UNBUFFERED) && !(flags & IAsyncFile::OPEN_NO_AIO) &&
|
||||||
!FLOW_KNOBS->DISABLE_POSIX_KERNEL_AIO)
|
!FLOW_KNOBS->DISABLE_POSIX_KERNEL_AIO)
|
||||||
f = AsyncFileKAIO::open(filename, flags, mode, NULL);
|
f = AsyncFileKAIO::open(filename, flags, mode, nullptr);
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
f = Net2AsyncFile::open(filename, flags, mode, static_cast<boost::asio::io_service*> ((void*) g_network->global(INetwork::enASIOService)));
|
f = Net2AsyncFile::open(filename, flags, mode, static_cast<boost::asio::io_service*> ((void*) g_network->global(INetwork::enASIOService)));
|
||||||
|
|
|
@ -1057,8 +1057,8 @@ void sleeptest() {
|
||||||
timespec ts;
|
timespec ts;
|
||||||
ts.tv_sec = times[j] / 1000000;
|
ts.tv_sec = times[j] / 1000000;
|
||||||
ts.tv_nsec = (times[j] % 1000000)*1000;
|
ts.tv_nsec = (times[j] % 1000000)*1000;
|
||||||
clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, NULL);
|
clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, nullptr);
|
||||||
//nanosleep(&ts, NULL);
|
//nanosleep(&ts, nullptr);
|
||||||
}
|
}
|
||||||
double t = timer() - b;
|
double t = timer() - b;
|
||||||
printf("Sleep test (%dus x %d): %0.1f\n", times[j], n, double(t)/n*1e6);
|
printf("Sleep test (%dus x %d): %0.1f\n", times[j], n, double(t)/n*1e6);
|
||||||
|
|
|
@ -115,7 +115,7 @@ public:
|
||||||
|
|
||||||
Future<T> getFuture() const { sav->addFutureRef(); return Future<T>(sav); }
|
Future<T> getFuture() const { sav->addFutureRef(); return Future<T>(sav); }
|
||||||
bool isSet() { return sav->isSet(); }
|
bool isSet() { return sav->isSet(); }
|
||||||
bool isValid() const { return sav != NULL; }
|
bool isValid() const { return sav != nullptr; }
|
||||||
ReplyPromise() : sav(new NetSAV<T>(0, 1)) {}
|
ReplyPromise() : sav(new NetSAV<T>(0, 1)) {}
|
||||||
ReplyPromise(const ReplyPromise& rhs) : sav(rhs.sav) { sav->addPromiseRef(); }
|
ReplyPromise(const ReplyPromise& rhs) : sav(rhs.sav) { sav->addPromiseRef(); }
|
||||||
ReplyPromise(ReplyPromise&& rhs) noexcept : sav(rhs.sav) { rhs.sav = 0; }
|
ReplyPromise(ReplyPromise&& rhs) noexcept : sav(rhs.sav) { rhs.sav = 0; }
|
||||||
|
@ -144,7 +144,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Beware, these operations are very unsafe
|
// Beware, these operations are very unsafe
|
||||||
SAV<T>* extractRawPointer() { auto ptr = sav; sav = NULL; return ptr; }
|
SAV<T>* extractRawPointer() { auto ptr = sav; sav = nullptr; return ptr; }
|
||||||
explicit ReplyPromise<T>(SAV<T>* ptr) : sav(ptr) {}
|
explicit ReplyPromise<T>(SAV<T>* ptr) : sav(ptr) {}
|
||||||
|
|
||||||
int getFutureReferenceCount() const { return sav->getFutureReferenceCount(); }
|
int getFutureReferenceCount() const { return sav->getFutureReferenceCount(); }
|
||||||
|
|
|
@ -1062,7 +1062,7 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return canKillProcesses(processesLeft, processesDead, KillInstantly, NULL);
|
return canKillProcesses(processesLeft, processesDead, KillInstantly, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual bool datacenterDead(Optional<Standalone<StringRef>> dcId) const
|
virtual bool datacenterDead(Optional<Standalone<StringRef>> dcId) const
|
||||||
|
@ -1622,7 +1622,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
Sim2() : time(0.0), timerTime(0.0), taskCount(0), yielded(false), yield_limit(0), currentTaskID(TaskPriority::Zero) {
|
Sim2() : time(0.0), timerTime(0.0), taskCount(0), yielded(false), yield_limit(0), currentTaskID(TaskPriority::Zero) {
|
||||||
// Not letting currentProcess be NULL eliminates some annoying special cases
|
// Not letting currentProcess be nullptr eliminates some annoying special cases
|
||||||
currentProcess = new ProcessInfo("NoMachine", LocalityData(Optional<Standalone<StringRef>>(), StringRef(), StringRef(), StringRef()), ProcessClass(), {NetworkAddress()}, this, "", "");
|
currentProcess = new ProcessInfo("NoMachine", LocalityData(Optional<Standalone<StringRef>>(), StringRef(), StringRef(), StringRef()), ProcessClass(), {NetworkAddress()}, this, "", "");
|
||||||
g_network = net2 = newNet2(TLSConfig(), false, true);
|
g_network = net2 = newNet2(TLSConfig(), false, true);
|
||||||
g_network->addStopCallback( Net2FileSystem::stop );
|
g_network->addStopCallback( Net2FileSystem::stop );
|
||||||
|
@ -1813,12 +1813,12 @@ Future<Void> waitUntilDiskReady( Reference<DiskParameters> diskParameters, int64
|
||||||
|
|
||||||
int sf_open( const char* filename, int flags, int convFlags, int mode ) {
|
int sf_open( const char* filename, int flags, int convFlags, int mode ) {
|
||||||
HANDLE wh = CreateFile( filename, GENERIC_READ | ((flags&IAsyncFile::OPEN_READWRITE) ? GENERIC_WRITE : 0),
|
HANDLE wh = CreateFile( filename, GENERIC_READ | ((flags&IAsyncFile::OPEN_READWRITE) ? GENERIC_WRITE : 0),
|
||||||
FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, NULL,
|
FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, nullptr,
|
||||||
(flags&IAsyncFile::OPEN_EXCLUSIVE) ? CREATE_NEW :
|
(flags&IAsyncFile::OPEN_EXCLUSIVE) ? CREATE_NEW :
|
||||||
(flags&IAsyncFile::OPEN_CREATE) ? OPEN_ALWAYS :
|
(flags&IAsyncFile::OPEN_CREATE) ? OPEN_ALWAYS :
|
||||||
OPEN_EXISTING,
|
OPEN_EXISTING,
|
||||||
FILE_ATTRIBUTE_NORMAL,
|
FILE_ATTRIBUTE_NORMAL,
|
||||||
NULL );
|
nullptr );
|
||||||
int h = -1;
|
int h = -1;
|
||||||
if (wh != INVALID_HANDLE_VALUE) h = _open_osfhandle( (intptr_t)wh, convFlags );
|
if (wh != INVALID_HANDLE_VALUE) h = _open_osfhandle( (intptr_t)wh, convFlags );
|
||||||
else errno = GetLastError() == ERROR_FILE_NOT_FOUND ? ENOENT : EFAULT;
|
else errno = GetLastError() == ERROR_FILE_NOT_FOUND ? ENOENT : EFAULT;
|
||||||
|
|
|
@ -34,7 +34,7 @@ enum ClogMode { ClogDefault, ClogAll, ClogSend, ClogReceive };
|
||||||
|
|
||||||
class ISimulator : public INetwork {
|
class ISimulator : public INetwork {
|
||||||
public:
|
public:
|
||||||
ISimulator() : desiredCoordinators(1), physicalDatacenters(1), processesPerMachine(0), listenersPerProcess(1), isStopped(false), lastConnectionFailure(0), connectionFailuresDisableDuration(0), speedUpSimulation(false), allSwapsDisabled(false), backupAgents(WaitForType), drAgents(WaitForType), extraDB(NULL), allowLogSetKills(true), usableRegions(1) {}
|
ISimulator() : desiredCoordinators(1), physicalDatacenters(1), processesPerMachine(0), listenersPerProcess(1), isStopped(false), lastConnectionFailure(0), connectionFailuresDisableDuration(0), speedUpSimulation(false), allSwapsDisabled(false), backupAgents(WaitForType), drAgents(WaitForType), extraDB(nullptr), allowLogSetKills(true), usableRegions(1) {}
|
||||||
|
|
||||||
// Order matters!
|
// Order matters!
|
||||||
enum KillType { KillInstantly, InjectFaults, RebootAndDelete, RebootProcessAndDelete, Reboot, RebootProcess, None };
|
enum KillType { KillInstantly, InjectFaults, RebootAndDelete, RebootProcessAndDelete, Reboot, RebootProcess, None };
|
||||||
|
@ -97,7 +97,8 @@ public:
|
||||||
case ProcessClass::StorageClass: return true;
|
case ProcessClass::StorageClass: return true;
|
||||||
case ProcessClass::TransactionClass: return true;
|
case ProcessClass::TransactionClass: return true;
|
||||||
case ProcessClass::ResolutionClass: return false;
|
case ProcessClass::ResolutionClass: return false;
|
||||||
case ProcessClass::ProxyClass: return false;
|
case ProcessClass::CommitProxyClass:
|
||||||
|
return false;
|
||||||
case ProcessClass::GrvProxyClass:
|
case ProcessClass::GrvProxyClass:
|
||||||
return false;
|
return false;
|
||||||
case ProcessClass::MasterClass:
|
case ProcessClass::MasterClass:
|
||||||
|
@ -163,9 +164,9 @@ public:
|
||||||
virtual void rebootProcess(Optional<Standalone<StringRef>> zoneId, bool allProcesses ) = 0;
|
virtual void rebootProcess(Optional<Standalone<StringRef>> zoneId, bool allProcesses ) = 0;
|
||||||
virtual void rebootProcess( ProcessInfo* process, KillType kt ) = 0;
|
virtual void rebootProcess( ProcessInfo* process, KillType kt ) = 0;
|
||||||
virtual void killInterface( NetworkAddress address, KillType ) = 0;
|
virtual void killInterface( NetworkAddress address, KillType ) = 0;
|
||||||
virtual bool killMachine(Optional<Standalone<StringRef>> machineId, KillType kt, bool forceKill = false, KillType* ktFinal = NULL) = 0;
|
virtual bool killMachine(Optional<Standalone<StringRef>> machineId, KillType kt, bool forceKill = false, KillType* ktFinal = nullptr) = 0;
|
||||||
virtual bool killZone(Optional<Standalone<StringRef>> zoneId, KillType kt, bool forceKill = false, KillType* ktFinal = NULL) = 0;
|
virtual bool killZone(Optional<Standalone<StringRef>> zoneId, KillType kt, bool forceKill = false, KillType* ktFinal = nullptr) = 0;
|
||||||
virtual bool killDataCenter(Optional<Standalone<StringRef>> dcId, KillType kt, bool forceKill = false, KillType* ktFinal = NULL) = 0;
|
virtual bool killDataCenter(Optional<Standalone<StringRef>> dcId, KillType kt, bool forceKill = false, KillType* ktFinal = nullptr) = 0;
|
||||||
//virtual KillType getMachineKillState( UID zoneID ) = 0;
|
//virtual KillType getMachineKillState( UID zoneID ) = 0;
|
||||||
virtual bool canKillProcesses(std::vector<ProcessInfo*> const& availableProcesses, std::vector<ProcessInfo*> const& deadProcesses, KillType kt, KillType* newKillType) const = 0;
|
virtual bool canKillProcesses(std::vector<ProcessInfo*> const& availableProcesses, std::vector<ProcessInfo*> const& deadProcesses, KillType kt, KillType* newKillType) const = 0;
|
||||||
virtual bool isAvailable() const = 0;
|
virtual bool isAvailable() const = 0;
|
||||||
|
|
|
@ -43,8 +43,8 @@ Reference<StorageInfo> getStorageInfo(UID id, std::map<UID, Reference<StorageInf
|
||||||
}
|
}
|
||||||
|
|
||||||
// It is incredibly important that any modifications to txnStateStore are done in such a way that
|
// It is incredibly important that any modifications to txnStateStore are done in such a way that
|
||||||
// the same operations will be done on all proxies at the same time. Otherwise, the data stored in
|
// the same operations will be done on all commit proxies at the same time. Otherwise, the data
|
||||||
// txnStateStore will become corrupted.
|
// stored in txnStateStore will become corrupted.
|
||||||
void applyMetadataMutations(UID const& dbgid, Arena& arena, VectorRef<MutationRef> const& mutations,
|
void applyMetadataMutations(UID const& dbgid, Arena& arena, VectorRef<MutationRef> const& mutations,
|
||||||
IKeyValueStore* txnStateStore, LogPushData* toCommit, bool& confChange,
|
IKeyValueStore* txnStateStore, LogPushData* toCommit, bool& confChange,
|
||||||
Reference<ILogSystem> logSystem, Version popVersion,
|
Reference<ILogSystem> logSystem, Version popVersion,
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
#include "fdbclient/BackupAgent.actor.h"
|
#include "fdbclient/BackupAgent.actor.h"
|
||||||
#include "fdbclient/BackupContainer.h"
|
#include "fdbclient/BackupContainer.h"
|
||||||
#include "fdbclient/DatabaseContext.h"
|
#include "fdbclient/DatabaseContext.h"
|
||||||
#include "fdbclient/MasterProxyInterface.h"
|
#include "fdbclient/CommitProxyInterface.h"
|
||||||
#include "fdbclient/SystemData.h"
|
#include "fdbclient/SystemData.h"
|
||||||
#include "fdbserver/BackupInterface.h"
|
#include "fdbserver/BackupInterface.h"
|
||||||
#include "fdbserver/BackupProgress.actor.h"
|
#include "fdbserver/BackupProgress.actor.h"
|
||||||
|
|
|
@ -46,7 +46,7 @@ set(FDBSERVER_SRCS
|
||||||
LogSystemDiskQueueAdapter.h
|
LogSystemDiskQueueAdapter.h
|
||||||
LogSystemPeekCursor.actor.cpp
|
LogSystemPeekCursor.actor.cpp
|
||||||
MasterInterface.h
|
MasterInterface.h
|
||||||
MasterProxyServer.actor.cpp
|
CommitProxyServer.actor.cpp
|
||||||
masterserver.actor.cpp
|
masterserver.actor.cpp
|
||||||
MutationTracking.h
|
MutationTracking.h
|
||||||
MutationTracking.cpp
|
MutationTracking.cpp
|
||||||
|
|
|
@ -753,20 +753,21 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto first_proxy = getWorkerForRoleInDatacenter(dcId, ProcessClass::Proxy, ProcessClass::ExcludeFit,
|
auto first_commit_proxy = getWorkerForRoleInDatacenter(dcId, ProcessClass::CommitProxy,
|
||||||
req.configuration, id_used);
|
ProcessClass::ExcludeFit, req.configuration, id_used);
|
||||||
auto first_grv_proxy = getWorkerForRoleInDatacenter(dcId, ProcessClass::GrvProxy, ProcessClass::ExcludeFit,
|
auto first_grv_proxy = getWorkerForRoleInDatacenter(dcId, ProcessClass::GrvProxy, ProcessClass::ExcludeFit,
|
||||||
req.configuration, id_used);
|
req.configuration, id_used);
|
||||||
auto first_resolver = getWorkerForRoleInDatacenter(dcId, ProcessClass::Resolver, ProcessClass::ExcludeFit,
|
auto first_resolver = getWorkerForRoleInDatacenter(dcId, ProcessClass::Resolver, ProcessClass::ExcludeFit,
|
||||||
req.configuration, id_used);
|
req.configuration, id_used);
|
||||||
|
|
||||||
auto proxies = getWorkersForRoleInDatacenter(dcId, ProcessClass::Proxy, req.configuration.getDesiredProxies(),
|
auto commit_proxies =
|
||||||
req.configuration, id_used, first_proxy);
|
getWorkersForRoleInDatacenter(dcId, ProcessClass::CommitProxy, req.configuration.getDesiredCommitProxies(),
|
||||||
|
req.configuration, id_used, first_commit_proxy);
|
||||||
auto grv_proxies =
|
auto grv_proxies =
|
||||||
getWorkersForRoleInDatacenter(dcId, ProcessClass::GrvProxy, req.configuration.getDesiredGrvProxies(),
|
getWorkersForRoleInDatacenter(dcId, ProcessClass::GrvProxy, req.configuration.getDesiredGrvProxies(),
|
||||||
req.configuration, id_used, first_grv_proxy);
|
req.configuration, id_used, first_grv_proxy);
|
||||||
auto resolvers = getWorkersForRoleInDatacenter( dcId, ProcessClass::Resolver, req.configuration.getDesiredResolvers(), req.configuration, id_used, first_resolver );
|
auto resolvers = getWorkersForRoleInDatacenter( dcId, ProcessClass::Resolver, req.configuration.getDesiredResolvers(), req.configuration, id_used, first_resolver );
|
||||||
for (int i = 0; i < proxies.size(); i++) result.masterProxies.push_back(proxies[i].interf);
|
for (int i = 0; i < commit_proxies.size(); i++) result.commitProxies.push_back(commit_proxies[i].interf);
|
||||||
for (int i = 0; i < grv_proxies.size(); i++) result.grvProxies.push_back(grv_proxies[i].interf);
|
for (int i = 0; i < grv_proxies.size(); i++) result.grvProxies.push_back(grv_proxies[i].interf);
|
||||||
for(int i = 0; i < resolvers.size(); i++)
|
for(int i = 0; i < resolvers.size(); i++)
|
||||||
result.resolvers.push_back(resolvers[i].interf);
|
result.resolvers.push_back(resolvers[i].interf);
|
||||||
|
@ -800,9 +801,9 @@ public:
|
||||||
RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredSatelliteLogs(dcId),
|
RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredSatelliteLogs(dcId),
|
||||||
ProcessClass::TLog)
|
ProcessClass::TLog)
|
||||||
.betterCount(RoleFitness(satelliteLogs, ProcessClass::TLog))) ||
|
.betterCount(RoleFitness(satelliteLogs, ProcessClass::TLog))) ||
|
||||||
RoleFitness(SERVER_KNOBS->EXPECTED_PROXY_FITNESS, req.configuration.getDesiredProxies(),
|
RoleFitness(SERVER_KNOBS->EXPECTED_COMMIT_PROXY_FITNESS, req.configuration.getDesiredCommitProxies(),
|
||||||
ProcessClass::Proxy)
|
ProcessClass::CommitProxy)
|
||||||
.betterCount(RoleFitness(proxies, ProcessClass::Proxy)) ||
|
.betterCount(RoleFitness(commit_proxies, ProcessClass::CommitProxy)) ||
|
||||||
RoleFitness(SERVER_KNOBS->EXPECTED_GRV_PROXY_FITNESS, req.configuration.getDesiredGrvProxies(),
|
RoleFitness(SERVER_KNOBS->EXPECTED_GRV_PROXY_FITNESS, req.configuration.getDesiredGrvProxies(),
|
||||||
ProcessClass::GrvProxy)
|
ProcessClass::GrvProxy)
|
||||||
.betterCount(RoleFitness(grv_proxies, ProcessClass::GrvProxy)) ||
|
.betterCount(RoleFitness(grv_proxies, ProcessClass::GrvProxy)) ||
|
||||||
|
@ -911,22 +912,22 @@ public:
|
||||||
try {
|
try {
|
||||||
//SOMEDAY: recruitment in other DCs besides the clusterControllerDcID will not account for the processes used by the master and cluster controller properly.
|
//SOMEDAY: recruitment in other DCs besides the clusterControllerDcID will not account for the processes used by the master and cluster controller properly.
|
||||||
auto used = id_used;
|
auto used = id_used;
|
||||||
auto first_proxy = getWorkerForRoleInDatacenter(dcId, ProcessClass::Proxy, ProcessClass::ExcludeFit,
|
auto first_commit_proxy = getWorkerForRoleInDatacenter(
|
||||||
req.configuration, used);
|
dcId, ProcessClass::CommitProxy, ProcessClass::ExcludeFit, req.configuration, used);
|
||||||
auto first_grv_proxy = getWorkerForRoleInDatacenter(
|
auto first_grv_proxy = getWorkerForRoleInDatacenter(
|
||||||
dcId, ProcessClass::GrvProxy, ProcessClass::ExcludeFit, req.configuration, used);
|
dcId, ProcessClass::GrvProxy, ProcessClass::ExcludeFit, req.configuration, used);
|
||||||
auto first_resolver = getWorkerForRoleInDatacenter(
|
auto first_resolver = getWorkerForRoleInDatacenter(
|
||||||
dcId, ProcessClass::Resolver, ProcessClass::ExcludeFit, req.configuration, used);
|
dcId, ProcessClass::Resolver, ProcessClass::ExcludeFit, req.configuration, used);
|
||||||
|
|
||||||
auto proxies =
|
auto commit_proxies = getWorkersForRoleInDatacenter(dcId, ProcessClass::CommitProxy,
|
||||||
getWorkersForRoleInDatacenter(dcId, ProcessClass::Proxy, req.configuration.getDesiredProxies(),
|
req.configuration.getDesiredCommitProxies(),
|
||||||
req.configuration, used, first_proxy);
|
req.configuration, used, first_commit_proxy);
|
||||||
auto grv_proxies = getWorkersForRoleInDatacenter(dcId, ProcessClass::GrvProxy,
|
auto grv_proxies = getWorkersForRoleInDatacenter(dcId, ProcessClass::GrvProxy,
|
||||||
req.configuration.getDesiredGrvProxies(),
|
req.configuration.getDesiredGrvProxies(),
|
||||||
req.configuration, used, first_grv_proxy);
|
req.configuration, used, first_grv_proxy);
|
||||||
auto resolvers = getWorkersForRoleInDatacenter( dcId, ProcessClass::Resolver, req.configuration.getDesiredResolvers(), req.configuration, used, first_resolver );
|
auto resolvers = getWorkersForRoleInDatacenter( dcId, ProcessClass::Resolver, req.configuration.getDesiredResolvers(), req.configuration, used, first_resolver );
|
||||||
|
|
||||||
RoleFitnessPair fitness(RoleFitness(proxies, ProcessClass::Proxy),
|
RoleFitnessPair fitness(RoleFitness(commit_proxies, ProcessClass::CommitProxy),
|
||||||
RoleFitness(grv_proxies, ProcessClass::GrvProxy),
|
RoleFitness(grv_proxies, ProcessClass::GrvProxy),
|
||||||
RoleFitness(resolvers, ProcessClass::Resolver));
|
RoleFitness(resolvers, ProcessClass::Resolver));
|
||||||
|
|
||||||
|
@ -936,8 +937,8 @@ public:
|
||||||
for (int i = 0; i < resolvers.size(); i++) {
|
for (int i = 0; i < resolvers.size(); i++) {
|
||||||
result.resolvers.push_back(resolvers[i].interf);
|
result.resolvers.push_back(resolvers[i].interf);
|
||||||
}
|
}
|
||||||
for (int i = 0; i < proxies.size(); i++) {
|
for (int i = 0; i < commit_proxies.size(); i++) {
|
||||||
result.masterProxies.push_back(proxies[i].interf);
|
result.commitProxies.push_back(commit_proxies[i].interf);
|
||||||
}
|
}
|
||||||
for (int i = 0; i < grv_proxies.size(); i++) {
|
for (int i = 0; i < grv_proxies.size(); i++) {
|
||||||
result.grvProxies.push_back(grv_proxies[i].interf);
|
result.grvProxies.push_back(grv_proxies[i].interf);
|
||||||
|
@ -982,8 +983,8 @@ public:
|
||||||
.detail("Replication", req.configuration.tLogReplicationFactor)
|
.detail("Replication", req.configuration.tLogReplicationFactor)
|
||||||
.detail("DesiredLogs", req.configuration.getDesiredLogs())
|
.detail("DesiredLogs", req.configuration.getDesiredLogs())
|
||||||
.detail("ActualLogs", result.tLogs.size())
|
.detail("ActualLogs", result.tLogs.size())
|
||||||
.detail("DesiredProxies", req.configuration.getDesiredProxies())
|
.detail("DesiredCommitProxies", req.configuration.getDesiredCommitProxies())
|
||||||
.detail("ActualProxies", result.masterProxies.size())
|
.detail("ActualCommitProxies", result.commitProxies.size())
|
||||||
.detail("DesiredGrvProxies", req.configuration.getDesiredGrvProxies())
|
.detail("DesiredGrvProxies", req.configuration.getDesiredGrvProxies())
|
||||||
.detail("ActualGrvProxies", result.grvProxies.size())
|
.detail("ActualGrvProxies", result.grvProxies.size())
|
||||||
.detail("DesiredResolvers", req.configuration.getDesiredResolvers())
|
.detail("DesiredResolvers", req.configuration.getDesiredResolvers())
|
||||||
|
@ -993,8 +994,8 @@ public:
|
||||||
(RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredLogs(),
|
(RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredLogs(),
|
||||||
ProcessClass::TLog)
|
ProcessClass::TLog)
|
||||||
.betterCount(RoleFitness(tlogs, ProcessClass::TLog)) ||
|
.betterCount(RoleFitness(tlogs, ProcessClass::TLog)) ||
|
||||||
RoleFitness(SERVER_KNOBS->EXPECTED_PROXY_FITNESS, req.configuration.getDesiredProxies(),
|
RoleFitness(SERVER_KNOBS->EXPECTED_COMMIT_PROXY_FITNESS, req.configuration.getDesiredCommitProxies(),
|
||||||
ProcessClass::Proxy)
|
ProcessClass::CommitProxy)
|
||||||
.betterCount(bestFitness.proxy) ||
|
.betterCount(bestFitness.proxy) ||
|
||||||
RoleFitness(SERVER_KNOBS->EXPECTED_GRV_PROXY_FITNESS, req.configuration.getDesiredGrvProxies(),
|
RoleFitness(SERVER_KNOBS->EXPECTED_GRV_PROXY_FITNESS, req.configuration.getDesiredGrvProxies(),
|
||||||
ProcessClass::GrvProxy)
|
ProcessClass::GrvProxy)
|
||||||
|
@ -1028,7 +1029,8 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
getWorkerForRoleInDatacenter( regions[0].dcId, ProcessClass::Resolver, ProcessClass::ExcludeFit, db.config, id_used, true );
|
getWorkerForRoleInDatacenter( regions[0].dcId, ProcessClass::Resolver, ProcessClass::ExcludeFit, db.config, id_used, true );
|
||||||
getWorkerForRoleInDatacenter( regions[0].dcId, ProcessClass::Proxy, ProcessClass::ExcludeFit, db.config, id_used, true );
|
getWorkerForRoleInDatacenter(regions[0].dcId, ProcessClass::CommitProxy, ProcessClass::ExcludeFit,
|
||||||
|
db.config, id_used, true);
|
||||||
getWorkerForRoleInDatacenter(regions[0].dcId, ProcessClass::GrvProxy, ProcessClass::ExcludeFit, db.config,
|
getWorkerForRoleInDatacenter(regions[0].dcId, ProcessClass::GrvProxy, ProcessClass::ExcludeFit, db.config,
|
||||||
id_used, true);
|
id_used, true);
|
||||||
|
|
||||||
|
@ -1129,15 +1131,13 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get proxy classes
|
// Get commit proxy classes
|
||||||
std::vector<WorkerDetails> proxyClasses;
|
std::vector<WorkerDetails> commitProxyClasses;
|
||||||
for(auto& it : dbi.client.masterProxies) {
|
for (auto& it : dbi.client.commitProxies) {
|
||||||
auto masterProxyWorker = id_worker.find(it.processId);
|
auto commitProxyWorker = id_worker.find(it.processId);
|
||||||
if ( masterProxyWorker == id_worker.end() )
|
if (commitProxyWorker == id_worker.end()) return false;
|
||||||
return false;
|
if (commitProxyWorker->second.priorityInfo.isExcluded) return true;
|
||||||
if ( masterProxyWorker->second.priorityInfo.isExcluded )
|
commitProxyClasses.push_back(commitProxyWorker->second.details);
|
||||||
return true;
|
|
||||||
proxyClasses.push_back(masterProxyWorker->second.details);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get grv proxy classes
|
// Get grv proxy classes
|
||||||
|
@ -1285,25 +1285,25 @@ public:
|
||||||
if(oldLogRoutersFit < newLogRoutersFit) return false;
|
if(oldLogRoutersFit < newLogRoutersFit) return false;
|
||||||
|
|
||||||
// Check proxy/grvProxy/resolver fitness
|
// Check proxy/grvProxy/resolver fitness
|
||||||
RoleFitnessPair oldInFit(RoleFitness(proxyClasses, ProcessClass::Proxy),
|
RoleFitnessPair oldInFit(RoleFitness(commitProxyClasses, ProcessClass::CommitProxy),
|
||||||
RoleFitness(grvProxyClasses, ProcessClass::GrvProxy),
|
RoleFitness(grvProxyClasses, ProcessClass::GrvProxy),
|
||||||
RoleFitness(resolverClasses, ProcessClass::Resolver));
|
RoleFitness(resolverClasses, ProcessClass::Resolver));
|
||||||
|
|
||||||
auto first_proxy = getWorkerForRoleInDatacenter(clusterControllerDcId, ProcessClass::Proxy,
|
auto first_commit_proxy = getWorkerForRoleInDatacenter(clusterControllerDcId, ProcessClass::CommitProxy,
|
||||||
ProcessClass::ExcludeFit, db.config, id_used, true);
|
ProcessClass::ExcludeFit, db.config, id_used, true);
|
||||||
auto first_grv_proxy = getWorkerForRoleInDatacenter(clusterControllerDcId, ProcessClass::GrvProxy,
|
auto first_grv_proxy = getWorkerForRoleInDatacenter(clusterControllerDcId, ProcessClass::GrvProxy,
|
||||||
ProcessClass::ExcludeFit, db.config, id_used, true);
|
ProcessClass::ExcludeFit, db.config, id_used, true);
|
||||||
auto first_resolver = getWorkerForRoleInDatacenter(clusterControllerDcId, ProcessClass::Resolver,
|
auto first_resolver = getWorkerForRoleInDatacenter(clusterControllerDcId, ProcessClass::Resolver,
|
||||||
ProcessClass::ExcludeFit, db.config, id_used, true);
|
ProcessClass::ExcludeFit, db.config, id_used, true);
|
||||||
auto proxies =
|
auto commit_proxies = getWorkersForRoleInDatacenter(clusterControllerDcId, ProcessClass::CommitProxy,
|
||||||
getWorkersForRoleInDatacenter(clusterControllerDcId, ProcessClass::Proxy, db.config.getDesiredProxies(),
|
db.config.getDesiredCommitProxies(), db.config, id_used,
|
||||||
db.config, id_used, first_proxy, true);
|
first_commit_proxy, true);
|
||||||
auto grv_proxies =
|
auto grv_proxies =
|
||||||
getWorkersForRoleInDatacenter(clusterControllerDcId, ProcessClass::GrvProxy,
|
getWorkersForRoleInDatacenter(clusterControllerDcId, ProcessClass::GrvProxy,
|
||||||
db.config.getDesiredGrvProxies(), db.config, id_used, first_grv_proxy, true);
|
db.config.getDesiredGrvProxies(), db.config, id_used, first_grv_proxy, true);
|
||||||
auto resolvers = getWorkersForRoleInDatacenter( clusterControllerDcId, ProcessClass::Resolver, db.config.getDesiredResolvers(), db.config, id_used, first_resolver, true );
|
auto resolvers = getWorkersForRoleInDatacenter( clusterControllerDcId, ProcessClass::Resolver, db.config.getDesiredResolvers(), db.config, id_used, first_resolver, true );
|
||||||
|
|
||||||
RoleFitnessPair newInFit(RoleFitness(proxies, ProcessClass::Proxy),
|
RoleFitnessPair newInFit(RoleFitness(commit_proxies, ProcessClass::CommitProxy),
|
||||||
RoleFitness(grv_proxies, ProcessClass::GrvProxy),
|
RoleFitness(grv_proxies, ProcessClass::GrvProxy),
|
||||||
RoleFitness(resolvers, ProcessClass::Resolver));
|
RoleFitness(resolvers, ProcessClass::Resolver));
|
||||||
if (oldInFit.proxy.betterFitness(newInFit.proxy) || oldInFit.grvProxy.betterFitness(newInFit.grvProxy) ||
|
if (oldInFit.proxy.betterFitness(newInFit.proxy) || oldInFit.grvProxy.betterFitness(newInFit.grvProxy) ||
|
||||||
|
@ -1358,7 +1358,7 @@ public:
|
||||||
if (tlog.present() && tlog.interf().filteredLocality.processId() == processId) return true;
|
if (tlog.present() && tlog.interf().filteredLocality.processId() == processId) return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (const MasterProxyInterface& interf : dbInfo.client.masterProxies) {
|
for (const CommitProxyInterface& interf : dbInfo.client.commitProxies) {
|
||||||
if (interf.processId == processId) return true;
|
if (interf.processId == processId) return true;
|
||||||
}
|
}
|
||||||
for (const GrvProxyInterface& interf : dbInfo.client.grvProxies) {
|
for (const GrvProxyInterface& interf : dbInfo.client.grvProxies) {
|
||||||
|
@ -1393,7 +1393,7 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (const MasterProxyInterface& interf : dbInfo.client.masterProxies) {
|
for (const CommitProxyInterface& interf : dbInfo.client.commitProxies) {
|
||||||
ASSERT(interf.processId.present());
|
ASSERT(interf.processId.present());
|
||||||
idUsed[interf.processId]++;
|
idUsed[interf.processId]++;
|
||||||
}
|
}
|
||||||
|
@ -1967,7 +1967,7 @@ void clusterRegisterMaster( ClusterControllerData* self, RegisterMasterRequest c
|
||||||
.detail("Resolvers", req.resolvers.size())
|
.detail("Resolvers", req.resolvers.size())
|
||||||
.detail("RecoveryState", (int)req.recoveryState)
|
.detail("RecoveryState", (int)req.recoveryState)
|
||||||
.detail("RegistrationCount", req.registrationCount)
|
.detail("RegistrationCount", req.registrationCount)
|
||||||
.detail("MasterProxies", req.masterProxies.size())
|
.detail("CommitProxies", req.commitProxies.size())
|
||||||
.detail("GrvProxies", req.grvProxies.size())
|
.detail("GrvProxies", req.grvProxies.size())
|
||||||
.detail("RecoveryCount", req.recoveryCount)
|
.detail("RecoveryCount", req.recoveryCount)
|
||||||
.detail("Stalled", req.recoveryStalled)
|
.detail("Stalled", req.recoveryStalled)
|
||||||
|
@ -2022,11 +2022,12 @@ void clusterRegisterMaster( ClusterControllerData* self, RegisterMasterRequest c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct the client information
|
// Construct the client information
|
||||||
if (db->clientInfo->get().masterProxies != req.masterProxies || db->clientInfo->get().grvProxies != req.grvProxies) {
|
if (db->clientInfo->get().commitProxies != req.commitProxies ||
|
||||||
|
db->clientInfo->get().grvProxies != req.grvProxies) {
|
||||||
isChanged = true;
|
isChanged = true;
|
||||||
ClientDBInfo clientInfo;
|
ClientDBInfo clientInfo;
|
||||||
clientInfo.id = deterministicRandom()->randomUniqueID();
|
clientInfo.id = deterministicRandom()->randomUniqueID();
|
||||||
clientInfo.masterProxies = req.masterProxies;
|
clientInfo.commitProxies = req.commitProxies;
|
||||||
clientInfo.grvProxies = req.grvProxies;
|
clientInfo.grvProxies = req.grvProxies;
|
||||||
clientInfo.clientTxnInfoSampleRate = db->clientInfo->get().clientTxnInfoSampleRate;
|
clientInfo.clientTxnInfoSampleRate = db->clientInfo->get().clientTxnInfoSampleRate;
|
||||||
clientInfo.clientTxnInfoSizeLimit = db->clientInfo->get().clientTxnInfoSizeLimit;
|
clientInfo.clientTxnInfoSizeLimit = db->clientInfo->get().clientTxnInfoSizeLimit;
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* MasterProxyServer.actor.cpp
|
* CommitProxyServer.actor.cpp
|
||||||
*
|
*
|
||||||
* This source file is part of the FoundationDB open source project
|
* This source file is part of the FoundationDB open source project
|
||||||
*
|
*
|
||||||
|
@ -25,7 +25,7 @@
|
||||||
#include "fdbclient/Atomic.h"
|
#include "fdbclient/Atomic.h"
|
||||||
#include "fdbclient/FDBTypes.h"
|
#include "fdbclient/FDBTypes.h"
|
||||||
#include "fdbclient/Knobs.h"
|
#include "fdbclient/Knobs.h"
|
||||||
#include "fdbclient/MasterProxyInterface.h"
|
#include "fdbclient/CommitProxyInterface.h"
|
||||||
#include "fdbclient/NativeAPI.actor.h"
|
#include "fdbclient/NativeAPI.actor.h"
|
||||||
#include "fdbclient/SystemData.h"
|
#include "fdbclient/SystemData.h"
|
||||||
#include "fdbrpc/sim_validation.h"
|
#include "fdbrpc/sim_validation.h"
|
||||||
|
@ -42,7 +42,6 @@
|
||||||
#include "fdbserver/ProxyCommitData.actor.h"
|
#include "fdbserver/ProxyCommitData.actor.h"
|
||||||
#include "fdbserver/RatekeeperInterface.h"
|
#include "fdbserver/RatekeeperInterface.h"
|
||||||
#include "fdbserver/RecoveryState.h"
|
#include "fdbserver/RecoveryState.h"
|
||||||
#include "fdbserver/ServerDBInfo.h"
|
|
||||||
#include "fdbserver/WaitFailure.h"
|
#include "fdbserver/WaitFailure.h"
|
||||||
#include "fdbserver/WorkerInterface.actor.h"
|
#include "fdbserver/WorkerInterface.actor.h"
|
||||||
#include "flow/ActorCollection.h"
|
#include "flow/ActorCollection.h"
|
||||||
|
@ -119,7 +118,7 @@ struct ResolutionRequestBuilder {
|
||||||
void addTransaction(CommitTransactionRequest& trRequest, int transactionNumberInBatch) {
|
void addTransaction(CommitTransactionRequest& trRequest, int transactionNumberInBatch) {
|
||||||
auto& trIn = trRequest.transaction;
|
auto& trIn = trRequest.transaction;
|
||||||
// SOMEDAY: There are a couple of unnecessary O( # resolvers ) steps here
|
// SOMEDAY: There are a couple of unnecessary O( # resolvers ) steps here
|
||||||
outTr.assign(requests.size(), NULL);
|
outTr.assign(requests.size(), nullptr);
|
||||||
ASSERT( transactionNumberInBatch >= 0 && transactionNumberInBatch < 32768 );
|
ASSERT( transactionNumberInBatch >= 0 && transactionNumberInBatch < 32768 );
|
||||||
|
|
||||||
bool isTXNStateTransaction = false;
|
bool isTXNStateTransaction = false;
|
||||||
|
@ -229,7 +228,7 @@ ACTOR Future<Void> commitBatcher(ProxyCommitData *commitData, PromiseStream<std:
|
||||||
++commitData->stats.txnCommitIn;
|
++commitData->stats.txnCommitIn;
|
||||||
|
|
||||||
if(req.debugID.present()) {
|
if(req.debugID.present()) {
|
||||||
g_traceBatch.addEvent("CommitDebug", req.debugID.get().first(), "MasterProxyServer.batcher");
|
g_traceBatch.addEvent("CommitDebug", req.debugID.get().first(), "CommitProxyServer.batcher");
|
||||||
}
|
}
|
||||||
|
|
||||||
if(!batch.size()) {
|
if(!batch.size()) {
|
||||||
|
@ -331,7 +330,7 @@ ACTOR Future<Void> addBackupMutations(ProxyCommitData* self, std::map<Key, Mutat
|
||||||
|
|
||||||
MutationRef backupMutation;
|
MutationRef backupMutation;
|
||||||
backupMutation.type = MutationRef::SetValue;
|
backupMutation.type = MutationRef::SetValue;
|
||||||
uint32_t* partBuffer = NULL;
|
uint32_t* partBuffer = nullptr;
|
||||||
|
|
||||||
for (int part = 0; part * CLIENT_KNOBS->MUTATION_BLOCK_SIZE < val.size(); part++) {
|
for (int part = 0; part * CLIENT_KNOBS->MUTATION_BLOCK_SIZE < val.size(); part++) {
|
||||||
|
|
||||||
|
@ -512,11 +511,7 @@ void CommitBatchContext::setupTraceBatch() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (debugID.present()) {
|
if (debugID.present()) {
|
||||||
g_traceBatch.addEvent(
|
g_traceBatch.addEvent("CommitDebug", debugID.get().first(), "CommitProxyServer.commitBatch.Before");
|
||||||
"CommitDebug",
|
|
||||||
debugID.get().first(),
|
|
||||||
"MasterProxyServer.commitBatch.Before"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -546,10 +541,8 @@ ACTOR Future<Void> preresolutionProcessing(CommitBatchContext* self) {
|
||||||
);
|
);
|
||||||
|
|
||||||
if (debugID.present()) {
|
if (debugID.present()) {
|
||||||
g_traceBatch.addEvent(
|
g_traceBatch.addEvent("CommitDebug", debugID.get().first(),
|
||||||
"CommitDebug", debugID.get().first(),
|
"CommitProxyServer.commitBatch.GettingCommitVersion");
|
||||||
"MasterProxyServer.commitBatch.GettingCommitVersion"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
GetCommitVersionRequest req(self->span.context, pProxyCommitData->commitVersionRequestNumber++,
|
GetCommitVersionRequest req(self->span.context, pProxyCommitData->commitVersionRequestNumber++,
|
||||||
|
@ -577,10 +570,7 @@ ACTOR Future<Void> preresolutionProcessing(CommitBatchContext* self) {
|
||||||
//TraceEvent("ProxyGotVer", pProxyContext->dbgid).detail("Commit", commitVersion).detail("Prev", prevVersion);
|
//TraceEvent("ProxyGotVer", pProxyContext->dbgid).detail("Commit", commitVersion).detail("Prev", prevVersion);
|
||||||
|
|
||||||
if (debugID.present()) {
|
if (debugID.present()) {
|
||||||
g_traceBatch.addEvent(
|
g_traceBatch.addEvent("CommitDebug", debugID.get().first(), "CommitProxyServer.commitBatch.GotCommitVersion");
|
||||||
"CommitDebug", debugID.get().first(),
|
|
||||||
"MasterProxyServer.commitBatch.GotCommitVersion"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return Void();
|
return Void();
|
||||||
|
@ -639,10 +629,8 @@ ACTOR Future<Void> getResolution(CommitBatchContext* self) {
|
||||||
self->resolution.swap(*const_cast<std::vector<ResolveTransactionBatchReply>*>(&resolutionResp));
|
self->resolution.swap(*const_cast<std::vector<ResolveTransactionBatchReply>*>(&resolutionResp));
|
||||||
|
|
||||||
if (self->debugID.present()) {
|
if (self->debugID.present()) {
|
||||||
g_traceBatch.addEvent(
|
g_traceBatch.addEvent("CommitDebug", self->debugID.get().first(),
|
||||||
"CommitDebug", self->debugID.get().first(),
|
"CommitProxyServer.commitBatch.AfterResolution");
|
||||||
"MasterProxyServer.commitBatch.AfterResolution"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return Void();
|
return Void();
|
||||||
|
@ -972,10 +960,8 @@ ACTOR Future<Void> postResolution(CommitBatchContext* self) {
|
||||||
pProxyCommitData->stats.txnCommitResolved += trs.size();
|
pProxyCommitData->stats.txnCommitResolved += trs.size();
|
||||||
|
|
||||||
if (debugID.present()) {
|
if (debugID.present()) {
|
||||||
g_traceBatch.addEvent(
|
g_traceBatch.addEvent("CommitDebug", debugID.get().first(),
|
||||||
"CommitDebug", debugID.get().first(),
|
"CommitProxyServer.commitBatch.ProcessingMutations");
|
||||||
"MasterProxyServer.commitBatch.ProcessingMutations"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
self->isMyFirstBatch = !pProxyCommitData->version;
|
self->isMyFirstBatch = !pProxyCommitData->version;
|
||||||
|
@ -1041,7 +1027,8 @@ ACTOR Future<Void> postResolution(CommitBatchContext* self) {
|
||||||
self->msg = self->storeCommits.back().first.get();
|
self->msg = self->storeCommits.back().first.get();
|
||||||
|
|
||||||
if (self->debugID.present())
|
if (self->debugID.present())
|
||||||
g_traceBatch.addEvent("CommitDebug", self->debugID.get().first(), "MasterProxyServer.commitBatch.AfterStoreCommits");
|
g_traceBatch.addEvent("CommitDebug", self->debugID.get().first(),
|
||||||
|
"CommitProxyServer.commitBatch.AfterStoreCommits");
|
||||||
|
|
||||||
// txnState (transaction subsystem state) tag: message extracted from log adapter
|
// txnState (transaction subsystem state) tag: message extracted from log adapter
|
||||||
bool firstMessage = true;
|
bool firstMessage = true;
|
||||||
|
@ -1129,7 +1116,7 @@ ACTOR Future<Void> reply(CommitBatchContext* self) {
|
||||||
|
|
||||||
//TraceEvent("ProxyPushed", pProxyCommitData->dbgid).detail("PrevVersion", prevVersion).detail("Version", commitVersion);
|
//TraceEvent("ProxyPushed", pProxyCommitData->dbgid).detail("PrevVersion", prevVersion).detail("Version", commitVersion);
|
||||||
if (debugID.present())
|
if (debugID.present())
|
||||||
g_traceBatch.addEvent("CommitDebug", debugID.get().first(), "MasterProxyServer.commitBatch.AfterLogPush");
|
g_traceBatch.addEvent("CommitDebug", debugID.get().first(), "CommitProxyServer.commitBatch.AfterLogPush");
|
||||||
|
|
||||||
for (auto &p : self->storeCommits) {
|
for (auto &p : self->storeCommits) {
|
||||||
ASSERT(!p.second.isReady());
|
ASSERT(!p.second.isReady());
|
||||||
|
@ -1328,7 +1315,8 @@ ACTOR static Future<Void> doKeyServerLocationRequest( GetKeyServerLocationsReque
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR static Future<Void> readRequestServer( MasterProxyInterface proxy, PromiseStream<Future<Void>> addActor, ProxyCommitData* commitData ) {
|
ACTOR static Future<Void> readRequestServer(CommitProxyInterface proxy, PromiseStream<Future<Void>> addActor,
|
||||||
|
ProxyCommitData* commitData) {
|
||||||
loop {
|
loop {
|
||||||
GetKeyServerLocationsRequest req = waitNext(proxy.getKeyServersLocations.getFuture());
|
GetKeyServerLocationsRequest req = waitNext(proxy.getKeyServersLocations.getFuture());
|
||||||
//WARNING: this code is run at a high priority, so it needs to do as little work as possible
|
//WARNING: this code is run at a high priority, so it needs to do as little work as possible
|
||||||
|
@ -1344,7 +1332,7 @@ ACTOR static Future<Void> readRequestServer( MasterProxyInterface proxy, Promise
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR static Future<Void> rejoinServer( MasterProxyInterface proxy, ProxyCommitData* commitData ) {
|
ACTOR static Future<Void> rejoinServer(CommitProxyInterface proxy, ProxyCommitData* commitData) {
|
||||||
// We can't respond to these requests until we have valid txnStateStore
|
// We can't respond to these requests until we have valid txnStateStore
|
||||||
wait(commitData->validState.getFuture());
|
wait(commitData->validState.getFuture());
|
||||||
|
|
||||||
|
@ -1413,8 +1401,7 @@ ACTOR static Future<Void> rejoinServer( MasterProxyInterface proxy, ProxyCommitD
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> ddMetricsRequestServer(MasterProxyInterface proxy, Reference<AsyncVar<ServerDBInfo>> db)
|
ACTOR Future<Void> ddMetricsRequestServer(CommitProxyInterface proxy, Reference<AsyncVar<ServerDBInfo>> db) {
|
||||||
{
|
|
||||||
loop {
|
loop {
|
||||||
choose {
|
choose {
|
||||||
when(state GetDDMetricsRequest req = waitNext(proxy.getDDMetrics.getFuture()))
|
when(state GetDDMetricsRequest req = waitNext(proxy.getDDMetrics.getFuture()))
|
||||||
|
@ -1496,7 +1483,7 @@ ACTOR Future<Void> monitorRemoteCommitted(ProxyCommitData* self) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* commitData) {
|
ACTOR Future<Void> proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* commitData) {
|
||||||
TraceEvent("SnapMasterProxy_SnapReqEnter")
|
TraceEvent("SnapCommitProxy_SnapReqEnter")
|
||||||
.detail("SnapPayload", snapReq.snapPayload)
|
.detail("SnapPayload", snapReq.snapPayload)
|
||||||
.detail("SnapUID", snapReq.snapUID);
|
.detail("SnapUID", snapReq.snapUID);
|
||||||
try {
|
try {
|
||||||
|
@ -1504,7 +1491,7 @@ ACTOR Future<Void> proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* co
|
||||||
ExecCmdValueString execArg(snapReq.snapPayload);
|
ExecCmdValueString execArg(snapReq.snapPayload);
|
||||||
StringRef binPath = execArg.getBinaryPath();
|
StringRef binPath = execArg.getBinaryPath();
|
||||||
if (!isWhitelisted(commitData->whitelistedBinPathVec, binPath)) {
|
if (!isWhitelisted(commitData->whitelistedBinPathVec, binPath)) {
|
||||||
TraceEvent("SnapMasterProxy_WhiteListCheckFailed")
|
TraceEvent("SnapCommitProxy_WhiteListCheckFailed")
|
||||||
.detail("SnapPayload", snapReq.snapPayload)
|
.detail("SnapPayload", snapReq.snapPayload)
|
||||||
.detail("SnapUID", snapReq.snapUID);
|
.detail("SnapUID", snapReq.snapUID);
|
||||||
throw snap_path_not_whitelisted();
|
throw snap_path_not_whitelisted();
|
||||||
|
@ -1516,7 +1503,7 @@ ACTOR Future<Void> proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* co
|
||||||
// Currently, snapshot of old tlog generation is not
|
// Currently, snapshot of old tlog generation is not
|
||||||
// supported and hence failing the snapshot request until
|
// supported and hence failing the snapshot request until
|
||||||
// cluster is fully_recovered.
|
// cluster is fully_recovered.
|
||||||
TraceEvent("SnapMasterProxy_ClusterNotFullyRecovered")
|
TraceEvent("SnapCommitProxy_ClusterNotFullyRecovered")
|
||||||
.detail("SnapPayload", snapReq.snapPayload)
|
.detail("SnapPayload", snapReq.snapPayload)
|
||||||
.detail("SnapUID", snapReq.snapUID);
|
.detail("SnapUID", snapReq.snapUID);
|
||||||
throw snap_not_fully_recovered_unsupported();
|
throw snap_not_fully_recovered_unsupported();
|
||||||
|
@ -1531,7 +1518,7 @@ ACTOR Future<Void> proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* co
|
||||||
// FIXME: logAntiQuorum not supported, remove it later,
|
// FIXME: logAntiQuorum not supported, remove it later,
|
||||||
// In version2, we probably don't need this limtiation, but this needs to be tested.
|
// In version2, we probably don't need this limtiation, but this needs to be tested.
|
||||||
if (logAntiQuorum > 0) {
|
if (logAntiQuorum > 0) {
|
||||||
TraceEvent("SnapMasterProxy_LogAnitQuorumNotSupported")
|
TraceEvent("SnapCommitProxy_LogAnitQuorumNotSupported")
|
||||||
.detail("SnapPayload", snapReq.snapPayload)
|
.detail("SnapPayload", snapReq.snapPayload)
|
||||||
.detail("SnapUID", snapReq.snapUID);
|
.detail("SnapUID", snapReq.snapUID);
|
||||||
throw snap_log_anti_quorum_unsupported();
|
throw snap_log_anti_quorum_unsupported();
|
||||||
|
@ -1547,15 +1534,15 @@ ACTOR Future<Void> proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* co
|
||||||
try {
|
try {
|
||||||
wait(throwErrorOr(ddSnapReq));
|
wait(throwErrorOr(ddSnapReq));
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
TraceEvent("SnapMasterProxy_DDSnapResponseError")
|
TraceEvent("SnapCommitProxy_DDSnapResponseError")
|
||||||
.detail("SnapPayload", snapReq.snapPayload)
|
.detail("SnapPayload", snapReq.snapPayload)
|
||||||
.detail("SnapUID", snapReq.snapUID)
|
.detail("SnapUID", snapReq.snapUID)
|
||||||
.error(e, true /*includeCancelled*/ );
|
.error(e, true /*includeCancelled*/);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
snapReq.reply.send(Void());
|
snapReq.reply.send(Void());
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
TraceEvent("SnapMasterProxy_SnapReqError")
|
TraceEvent("SnapCommitProxy_SnapReqError")
|
||||||
.detail("SnapPayload", snapReq.snapPayload)
|
.detail("SnapPayload", snapReq.snapPayload)
|
||||||
.detail("SnapUID", snapReq.snapUID)
|
.detail("SnapUID", snapReq.snapUID)
|
||||||
.error(e, true /*includeCancelled*/);
|
.error(e, true /*includeCancelled*/);
|
||||||
|
@ -1565,14 +1552,14 @@ ACTOR Future<Void> proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* co
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
TraceEvent("SnapMasterProxy_SnapReqExit")
|
TraceEvent("SnapCommitProxy_SnapReqExit")
|
||||||
.detail("SnapPayload", snapReq.snapPayload)
|
.detail("SnapPayload", snapReq.snapPayload)
|
||||||
.detail("SnapUID", snapReq.snapUID);
|
.detail("SnapUID", snapReq.snapUID);
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> proxyCheckSafeExclusion(Reference<AsyncVar<ServerDBInfo>> db, ExclusionSafetyCheckRequest req) {
|
ACTOR Future<Void> proxyCheckSafeExclusion(Reference<AsyncVar<ServerDBInfo>> db, ExclusionSafetyCheckRequest req) {
|
||||||
TraceEvent("SafetyCheckMasterProxyBegin");
|
TraceEvent("SafetyCheckCommitProxyBegin");
|
||||||
state ExclusionSafetyCheckReply reply(false);
|
state ExclusionSafetyCheckReply reply(false);
|
||||||
if (!db->get().distributor.present()) {
|
if (!db->get().distributor.present()) {
|
||||||
TraceEvent(SevWarnAlways, "DataDistributorNotPresent").detail("Operation", "ExclusionSafetyCheck");
|
TraceEvent(SevWarnAlways, "DataDistributorNotPresent").detail("Operation", "ExclusionSafetyCheck");
|
||||||
|
@ -1586,7 +1573,7 @@ ACTOR Future<Void> proxyCheckSafeExclusion(Reference<AsyncVar<ServerDBInfo>> db,
|
||||||
DistributorExclusionSafetyCheckReply _reply = wait(throwErrorOr(safeFuture));
|
DistributorExclusionSafetyCheckReply _reply = wait(throwErrorOr(safeFuture));
|
||||||
reply.safe = _reply.safe;
|
reply.safe = _reply.safe;
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
TraceEvent("SafetyCheckMasterProxyResponseError").error(e);
|
TraceEvent("SafetyCheckCommitProxyResponseError").error(e);
|
||||||
if (e.code() != error_code_operation_cancelled) {
|
if (e.code() != error_code_operation_cancelled) {
|
||||||
req.reply.sendError(e);
|
req.reply.sendError(e);
|
||||||
return Void();
|
return Void();
|
||||||
|
@ -1594,7 +1581,7 @@ ACTOR Future<Void> proxyCheckSafeExclusion(Reference<AsyncVar<ServerDBInfo>> db,
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
TraceEvent("SafetyCheckMasterProxyFinish");
|
TraceEvent("SafetyCheckCommitProxyFinish");
|
||||||
req.reply.send(reply);
|
req.reply.send(reply);
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
@ -1631,15 +1618,10 @@ ACTOR Future<Void> reportTxnTagCommitCost(UID myID, Reference<AsyncVar<ServerDBI
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> masterProxyServerCore(
|
ACTOR Future<Void> commitProxyServerCore(CommitProxyInterface proxy, MasterInterface master,
|
||||||
MasterProxyInterface proxy,
|
Reference<AsyncVar<ServerDBInfo>> db, LogEpoch epoch,
|
||||||
MasterInterface master,
|
Version recoveryTransactionVersion, bool firstProxy,
|
||||||
Reference<AsyncVar<ServerDBInfo>> db,
|
std::string whitelistBinPaths) {
|
||||||
LogEpoch epoch,
|
|
||||||
Version recoveryTransactionVersion,
|
|
||||||
bool firstProxy,
|
|
||||||
std::string whitelistBinPaths)
|
|
||||||
{
|
|
||||||
state ProxyCommitData commitData(proxy.id(), master, proxy.getConsistentReadVersion, recoveryTransactionVersion, proxy.commit, db, firstProxy);
|
state ProxyCommitData commitData(proxy.id(), master, proxy.getConsistentReadVersion, recoveryTransactionVersion, proxy.commit, db, firstProxy);
|
||||||
|
|
||||||
state Future<Sequence> sequenceFuture = (Sequence)0;
|
state Future<Sequence> sequenceFuture = (Sequence)0;
|
||||||
|
@ -1657,9 +1639,9 @@ ACTOR Future<Void> masterProxyServerCore(
|
||||||
state GetHealthMetricsReply detailedHealthMetricsReply;
|
state GetHealthMetricsReply detailedHealthMetricsReply;
|
||||||
|
|
||||||
addActor.send( waitFailureServer(proxy.waitFailure.getFuture()) );
|
addActor.send( waitFailureServer(proxy.waitFailure.getFuture()) );
|
||||||
addActor.send( traceRole(Role::MASTER_PROXY, proxy.id()) );
|
addActor.send(traceRole(Role::COMMIT_PROXY, proxy.id()));
|
||||||
|
|
||||||
//TraceEvent("ProxyInit1", proxy.id());
|
//TraceEvent("CommitProxyInit1", proxy.id());
|
||||||
|
|
||||||
// Wait until we can load the "real" logsystem, since we don't support switching them currently
|
// Wait until we can load the "real" logsystem, since we don't support switching them currently
|
||||||
while (!(commitData.db->get().master.id() == master.id() && commitData.db->get().recoveryState >= RecoveryState::RECOVERY_TRANSACTION)) {
|
while (!(commitData.db->get().master.id() == master.id() && commitData.db->get().recoveryState >= RecoveryState::RECOVERY_TRANSACTION)) {
|
||||||
|
@ -1701,7 +1683,7 @@ ACTOR Future<Void> masterProxyServerCore(
|
||||||
(int)std::min<double>(SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_BYTES_MAX,
|
(int)std::min<double>(SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_BYTES_MAX,
|
||||||
std::max<double>(SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_BYTES_MIN,
|
std::max<double>(SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_BYTES_MIN,
|
||||||
SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_BYTES_SCALE_BASE *
|
SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_BYTES_SCALE_BASE *
|
||||||
pow(commitData.db->get().client.masterProxies.size(),
|
pow(commitData.db->get().client.commitProxies.size(),
|
||||||
SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_BYTES_SCALE_POWER)));
|
SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_BYTES_SCALE_POWER)));
|
||||||
|
|
||||||
commitBatcherActor = commitBatcher(&commitData, batchedCommits, proxy.commit.getFuture(), commitBatchByteLimit, commitBatchesMemoryLimit);
|
commitBatcherActor = commitBatcher(&commitData, batchedCommits, proxy.commit.getFuture(), commitBatchByteLimit, commitBatchesMemoryLimit);
|
||||||
|
@ -1723,7 +1705,7 @@ ACTOR Future<Void> masterProxyServerCore(
|
||||||
//WARNING: this code is run at a high priority, so it needs to do as little work as possible
|
//WARNING: this code is run at a high priority, so it needs to do as little work as possible
|
||||||
const vector<CommitTransactionRequest> &trs = batchedRequests.first;
|
const vector<CommitTransactionRequest> &trs = batchedRequests.first;
|
||||||
int batchBytes = batchedRequests.second;
|
int batchBytes = batchedRequests.second;
|
||||||
//TraceEvent("MasterProxyCTR", proxy.id()).detail("CommitTransactions", trs.size()).detail("TransactionRate", transactionRate).detail("TransactionQueue", transactionQueue.size()).detail("ReleasedTransactionCount", transactionCount);
|
//TraceEvent("CommitProxyCTR", proxy.id()).detail("CommitTransactions", trs.size()).detail("TransactionRate", transactionRate).detail("TransactionQueue", transactionQueue.size()).detail("ReleasedTransactionCount", transactionCount);
|
||||||
if (trs.size() || (commitData.db->get().recoveryState >= RecoveryState::ACCEPTING_COMMITS && now() - lastCommit >= SERVER_KNOBS->MAX_COMMIT_BATCH_INTERVAL)) {
|
if (trs.size() || (commitData.db->get().recoveryState >= RecoveryState::ACCEPTING_COMMITS && now() - lastCommit >= SERVER_KNOBS->MAX_COMMIT_BATCH_INTERVAL)) {
|
||||||
lastCommit = now();
|
lastCommit = now();
|
||||||
|
|
||||||
|
@ -1824,27 +1806,27 @@ ACTOR Future<Void> masterProxyServerCore(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> checkRemoved(Reference<AsyncVar<ServerDBInfo>> db, uint64_t recoveryCount, MasterProxyInterface myInterface) {
|
ACTOR Future<Void> checkRemoved(Reference<AsyncVar<ServerDBInfo>> db, uint64_t recoveryCount,
|
||||||
|
CommitProxyInterface myInterface) {
|
||||||
loop{
|
loop{
|
||||||
if (db->get().recoveryCount >= recoveryCount && !std::count(db->get().client.masterProxies.begin(), db->get().client.masterProxies.end(), myInterface)) {
|
if (db->get().recoveryCount >= recoveryCount &&
|
||||||
|
!std::count(db->get().client.commitProxies.begin(), db->get().client.commitProxies.end(), myInterface)) {
|
||||||
throw worker_removed();
|
throw worker_removed();
|
||||||
}
|
}
|
||||||
wait(db->onChange());
|
wait(db->onChange());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> masterProxyServer(
|
ACTOR Future<Void> commitProxyServer(CommitProxyInterface proxy, InitializeCommitProxyRequest req,
|
||||||
MasterProxyInterface proxy,
|
Reference<AsyncVar<ServerDBInfo>> db, std::string whitelistBinPaths) {
|
||||||
InitializeMasterProxyRequest req,
|
|
||||||
Reference<AsyncVar<ServerDBInfo>> db,
|
|
||||||
std::string whitelistBinPaths)
|
|
||||||
{
|
|
||||||
try {
|
try {
|
||||||
state Future<Void> core = masterProxyServerCore(proxy, req.master, db, req.recoveryCount, req.recoveryTransactionVersion, req.firstProxy, whitelistBinPaths);
|
state Future<Void> core =
|
||||||
|
commitProxyServerCore(proxy, req.master, db, req.recoveryCount, req.recoveryTransactionVersion,
|
||||||
|
req.firstProxy, whitelistBinPaths);
|
||||||
wait(core || checkRemoved(db, req.recoveryCount, proxy));
|
wait(core || checkRemoved(db, req.recoveryCount, proxy));
|
||||||
}
|
}
|
||||||
catch (Error& e) {
|
catch (Error& e) {
|
||||||
TraceEvent("MasterProxyTerminated", proxy.id()).error(e, true);
|
TraceEvent("CommitProxyTerminated", proxy.id()).error(e, true);
|
||||||
|
|
||||||
if (e.code() != error_code_worker_removed && e.code() != error_code_tlog_stopped &&
|
if (e.code() != error_code_worker_removed && e.code() != error_code_tlog_stopped &&
|
||||||
e.code() != error_code_master_tlog_failed && e.code() != error_code_coordinators_changed &&
|
e.code() != error_code_master_tlog_failed && e.code() != error_code_coordinators_changed &&
|
|
@ -268,7 +268,7 @@ struct CompactPreOrderTree {
|
||||||
|
|
||||||
Deque< BuildInfo > queue;
|
Deque< BuildInfo > queue;
|
||||||
Deque< BuildInfo > deferred;
|
Deque< BuildInfo > deferred;
|
||||||
queue.push_back(BuildInfo(NULL, false, prefix, &input[0], &input[0] + input.size()));
|
queue.push_back(BuildInfo(nullptr, false, prefix, &input[0], &input[0] + input.size()));
|
||||||
|
|
||||||
Node* node = &root;
|
Node* node = &root;
|
||||||
uint8_t* cacheLineEnd = (uint8_t*)node + 64;
|
uint8_t* cacheLineEnd = (uint8_t*)node + 64;
|
||||||
|
|
|
@ -96,7 +96,7 @@ ServerCoordinators::ServerCoordinators( Reference<ClusterConnectionFile> cf )
|
||||||
// The coordination server wants to create its key value store only if it is actually used
|
// The coordination server wants to create its key value store only if it is actually used
|
||||||
struct OnDemandStore {
|
struct OnDemandStore {
|
||||||
public:
|
public:
|
||||||
OnDemandStore( std::string folder, UID myID ) : folder(folder), store(NULL), myID(myID) {}
|
OnDemandStore( std::string folder, UID myID ) : folder(folder), store(nullptr), myID(myID) {}
|
||||||
~OnDemandStore() { if (store) store->close(); }
|
~OnDemandStore() { if (store) store->close(); }
|
||||||
|
|
||||||
IKeyValueStore* get() {
|
IKeyValueStore* get() {
|
||||||
|
|
|
@ -48,7 +48,7 @@ protected:
|
||||||
struct Coroutine /*: IThreadlike*/ {
|
struct Coroutine /*: IThreadlike*/ {
|
||||||
Coroutine() {
|
Coroutine() {
|
||||||
coro = Coro_new();
|
coro = Coro_new();
|
||||||
if (coro == NULL)
|
if (coro == nullptr)
|
||||||
platform::outOfMemory();
|
platform::outOfMemory();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -294,7 +294,7 @@ void CoroThreadPool::init()
|
||||||
{
|
{
|
||||||
if (!current_coro) {
|
if (!current_coro) {
|
||||||
current_coro = main_coro = Coro_new();
|
current_coro = main_coro = Coro_new();
|
||||||
if (main_coro == NULL)
|
if (main_coro == nullptr)
|
||||||
platform::outOfMemory();
|
platform::outOfMemory();
|
||||||
|
|
||||||
Coro_initializeMainCoro(main_coro);
|
Coro_initializeMainCoro(main_coro);
|
||||||
|
|
|
@ -5188,7 +5188,7 @@ TEST_CASE("DataDistribution/AddTeamsBestOf/NotUseMachineID") {
|
||||||
Reference<IReplicationPolicy> policy = Reference<IReplicationPolicy>(new PolicyAcross(teamSize, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
Reference<IReplicationPolicy> policy = Reference<IReplicationPolicy>(new PolicyAcross(teamSize, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||||
state DDTeamCollection* collection = testMachineTeamCollection(teamSize, policy, processSize);
|
state DDTeamCollection* collection = testMachineTeamCollection(teamSize, policy, processSize);
|
||||||
|
|
||||||
if (collection == NULL) {
|
if (collection == nullptr) {
|
||||||
fprintf(stderr, "collection is null\n");
|
fprintf(stderr, "collection is null\n");
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
|
@ -785,7 +785,7 @@ public:
|
||||||
// FIXME: Is setting lastCommittedSeq to -1 instead of 0 necessary?
|
// FIXME: Is setting lastCommittedSeq to -1 instead of 0 necessary?
|
||||||
DiskQueue( std::string basename, std::string fileExtension, UID dbgid, DiskQueueVersion diskQueueVersion, int64_t fileSizeWarningLimit )
|
DiskQueue( std::string basename, std::string fileExtension, UID dbgid, DiskQueueVersion diskQueueVersion, int64_t fileSizeWarningLimit )
|
||||||
: rawQueue( new RawDiskQueue_TwoFiles(basename, fileExtension, dbgid, fileSizeWarningLimit) ), dbgid(dbgid), diskQueueVersion(diskQueueVersion), anyPopped(false), nextPageSeq(0), poppedSeq(0), lastPoppedSeq(0),
|
: rawQueue( new RawDiskQueue_TwoFiles(basename, fileExtension, dbgid, fileSizeWarningLimit) ), dbgid(dbgid), diskQueueVersion(diskQueueVersion), anyPopped(false), nextPageSeq(0), poppedSeq(0), lastPoppedSeq(0),
|
||||||
nextReadLocation(-1), readBufPage(NULL), readBufPos(0), pushed_page_buffer(NULL), recovered(false), initialized(false), lastCommittedSeq(-1), warnAlwaysForMemory(true)
|
nextReadLocation(-1), readBufPage(nullptr), readBufPos(0), pushed_page_buffer(nullptr), recovered(false), initialized(false), lastCommittedSeq(-1), warnAlwaysForMemory(true)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
#include "fdbclient/Notified.h"
|
#include "fdbclient/Notified.h"
|
||||||
#include "fdbserver/LogSystem.h"
|
#include "fdbserver/LogSystem.h"
|
||||||
#include "fdbserver/LogSystemDiskQueueAdapter.h"
|
#include "fdbserver/LogSystemDiskQueueAdapter.h"
|
||||||
#include "fdbclient/MasterProxyInterface.h"
|
#include "fdbclient/CommitProxyInterface.h"
|
||||||
#include "fdbclient/GrvProxyInterface.h"
|
#include "fdbclient/GrvProxyInterface.h"
|
||||||
#include "fdbserver/WaitFailure.h"
|
#include "fdbserver/WaitFailure.h"
|
||||||
#include "fdbserver/WorkerInterface.actor.h"
|
#include "fdbserver/WorkerInterface.actor.h"
|
||||||
|
@ -443,13 +443,13 @@ ACTOR Future<Void> sendGrvReplies(Future<GetReadVersionReply> replyFuture, std::
|
||||||
TEST(true); // Auto TPS rate is unlimited
|
TEST(true); // Auto TPS rate is unlimited
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
TEST(true); // Proxy returning tag throttle
|
TEST(true); // GRV proxy returning tag throttle
|
||||||
reply.tagThrottleInfo[tag.first] = tagItr->second;
|
reply.tagThrottleInfo[tag.first] = tagItr->second;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
// This isn't required, but we might as well
|
// This isn't required, but we might as well
|
||||||
TEST(true); // Proxy expiring tag throttle
|
TEST(true); // GRV proxy expiring tag throttle
|
||||||
priorityThrottledTags.erase(tagItr);
|
priorityThrottledTags.erase(tagItr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -299,13 +299,13 @@ private:
|
||||||
|
|
||||||
void rollback() { clear(); }
|
void rollback() { clear(); }
|
||||||
|
|
||||||
void set(KeyValueRef keyValue, const Arena* arena = NULL) {
|
void set(KeyValueRef keyValue, const Arena* arena = nullptr) {
|
||||||
queue_op(OpSet, keyValue.key, keyValue.value, arena);
|
queue_op(OpSet, keyValue.key, keyValue.value, arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
void clear(KeyRangeRef range, const Arena* arena = NULL) { queue_op(OpClear, range.begin, range.end, arena); }
|
void clear(KeyRangeRef range, const Arena* arena = nullptr) { queue_op(OpClear, range.begin, range.end, arena); }
|
||||||
|
|
||||||
void clear_to_end(StringRef fromKey, const Arena* arena = NULL) {
|
void clear_to_end(StringRef fromKey, const Arena* arena = nullptr) {
|
||||||
queue_op(OpClearToEnd, fromKey, StringRef(), arena);
|
queue_op(OpClearToEnd, fromKey, StringRef(), arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -316,7 +316,7 @@ private:
|
||||||
r.op = op;
|
r.op = op;
|
||||||
r.p1 = p1;
|
r.p1 = p1;
|
||||||
r.p2 = p2;
|
r.p2 = p2;
|
||||||
if (arena == NULL) {
|
if (arena == nullptr) {
|
||||||
operations.push_back_deep(operations.arena(), r);
|
operations.push_back_deep(operations.arena(), r);
|
||||||
} else {
|
} else {
|
||||||
operations.push_back(operations.arena(), r);
|
operations.push_back(operations.arena(), r);
|
||||||
|
|
|
@ -46,7 +46,7 @@ void hexdump(FILE *fout, StringRef val);
|
||||||
#include <Windows.h>*/
|
#include <Windows.h>*/
|
||||||
|
|
||||||
/*uint64_t getFileSize( const char* filename ) {
|
/*uint64_t getFileSize( const char* filename ) {
|
||||||
HANDLE f = CreateFile( filename, GENERIC_READ, FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, NULL, OPEN_EXISTING, 0, NULL);
|
HANDLE f = CreateFile( filename, GENERIC_READ, FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, nullptr, OPEN_EXISTING, 0, nullptr);
|
||||||
if (f == INVALID_HANDLE_VALUE) return 0;
|
if (f == INVALID_HANDLE_VALUE) return 0;
|
||||||
DWORD hi,lo;
|
DWORD hi,lo;
|
||||||
lo = GetFileSize(f, &hi);
|
lo = GetFileSize(f, &hi);
|
||||||
|
@ -165,12 +165,12 @@ struct PageChecksumCodec {
|
||||||
.detail("Filename", self->filename)
|
.detail("Filename", self->filename)
|
||||||
.detail("PageNumber", pageNumber);
|
.detail("PageNumber", pageNumber);
|
||||||
|
|
||||||
return NULL;
|
return nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if(!self->checksum(pageNumber, data, self->pageSize, write))
|
if(!self->checksum(pageNumber, data, self->pageSize, write))
|
||||||
return NULL;
|
return nullptr;
|
||||||
|
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
@ -211,7 +211,7 @@ struct SQLiteDB : NonCopyable {
|
||||||
void open(bool writable);
|
void open(bool writable);
|
||||||
void createFromScratch();
|
void createFromScratch();
|
||||||
|
|
||||||
SQLiteDB( std::string filename, bool page_checksums, bool fragment_values): filename(filename), db(NULL), btree(NULL), table(-1), freetable(-1), haveMutex(false), page_checksums(page_checksums), fragment_values(fragment_values) {}
|
SQLiteDB( std::string filename, bool page_checksums, bool fragment_values): filename(filename), db(nullptr), btree(nullptr), table(-1), freetable(-1), haveMutex(false), page_checksums(page_checksums), fragment_values(fragment_values) {}
|
||||||
|
|
||||||
~SQLiteDB() {
|
~SQLiteDB() {
|
||||||
if (db) {
|
if (db) {
|
||||||
|
@ -315,9 +315,9 @@ class Statement : NonCopyable {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Statement( SQLiteDB& db, const char* sql )
|
Statement( SQLiteDB& db, const char* sql )
|
||||||
: db(db), stmt(NULL)
|
: db(db), stmt(nullptr)
|
||||||
{
|
{
|
||||||
db.checkError("prepare", sqlite3_prepare_v2( db.db, sql, -1, &stmt, NULL));
|
db.checkError("prepare", sqlite3_prepare_v2( db.db, sql, -1, &stmt, nullptr));
|
||||||
}
|
}
|
||||||
~Statement() {
|
~Statement() {
|
||||||
try {
|
try {
|
||||||
|
@ -520,7 +520,7 @@ int getEncodedKVFragmentSize( int keySize, int valuePrefixSize ) {
|
||||||
// the full key and index were in the encoded buffer. The value returned will be 0 or
|
// the full key and index were in the encoded buffer. The value returned will be 0 or
|
||||||
// more value bytes, however many were available.
|
// more value bytes, however many were available.
|
||||||
// Note that a short encoded buffer must at *least* contain the header length varint.
|
// Note that a short encoded buffer must at *least* contain the header length varint.
|
||||||
Optional<KeyValueRef> decodeKVFragment( StringRef encoded, uint32_t *index = NULL, bool partial = false) {
|
Optional<KeyValueRef> decodeKVFragment( StringRef encoded, uint32_t *index = nullptr, bool partial = false) {
|
||||||
uint8_t const* d = encoded.begin();
|
uint8_t const* d = encoded.begin();
|
||||||
uint64_t h, len1, len2;
|
uint64_t h, len1, len2;
|
||||||
d += sqlite3GetVarint( d, (u64*)&h );
|
d += sqlite3GetVarint( d, (u64*)&h );
|
||||||
|
@ -634,7 +634,7 @@ struct IntKeyCursor {
|
||||||
IntKeyCursor( SQLiteDB& db, int table, bool write ) : cursor(0), db(db) {
|
IntKeyCursor( SQLiteDB& db, int table, bool write ) : cursor(0), db(db) {
|
||||||
cursor = (BtCursor*)new char[sqlite3BtreeCursorSize()];
|
cursor = (BtCursor*)new char[sqlite3BtreeCursorSize()];
|
||||||
sqlite3BtreeCursorZero(cursor);
|
sqlite3BtreeCursorZero(cursor);
|
||||||
db.checkError("BtreeCursor", sqlite3BtreeCursor(db.btree, table, write, NULL, cursor));
|
db.checkError("BtreeCursor", sqlite3BtreeCursor(db.btree, table, write, nullptr, cursor));
|
||||||
}
|
}
|
||||||
~IntKeyCursor() {
|
~IntKeyCursor() {
|
||||||
if (cursor) {
|
if (cursor) {
|
||||||
|
@ -726,7 +726,7 @@ struct RawCursor {
|
||||||
}
|
}
|
||||||
void insertFragment( KeyValueRef kv, uint32_t index, int seekResult ) {
|
void insertFragment( KeyValueRef kv, uint32_t index, int seekResult ) {
|
||||||
Value v = encodeKVFragment(kv, index);
|
Value v = encodeKVFragment(kv, index);
|
||||||
db.checkError("BtreeInsert", sqlite3BtreeInsert(cursor, v.begin(), v.size(), NULL, 0, 0, 0, seekResult));
|
db.checkError("BtreeInsert", sqlite3BtreeInsert(cursor, v.begin(), v.size(), nullptr, 0, 0, 0, seekResult));
|
||||||
}
|
}
|
||||||
void remove() {
|
void remove() {
|
||||||
db.checkError("BtreeDelete", sqlite3BtreeDelete(cursor));
|
db.checkError("BtreeDelete", sqlite3BtreeDelete(cursor));
|
||||||
|
@ -823,7 +823,7 @@ struct RawCursor {
|
||||||
int r = moveTo( kv.key );
|
int r = moveTo( kv.key );
|
||||||
if (!r) remove();
|
if (!r) remove();
|
||||||
Value v = encode(kv);
|
Value v = encode(kv);
|
||||||
db.checkError("BTreeInsert", sqlite3BtreeInsert(cursor, v.begin(), v.size(), NULL, 0, 0, 0, r));
|
db.checkError("BTreeInsert", sqlite3BtreeInsert(cursor, v.begin(), v.size(), nullptr, 0, 0, 0, r));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
void clearOne( KeyRangeRef keys ) {
|
void clearOne( KeyRangeRef keys ) {
|
||||||
|
@ -1158,7 +1158,7 @@ struct RawCursor {
|
||||||
// Set field 1 of tuple to key, which is a string type with typecode 12 + 2*len
|
// Set field 1 of tuple to key, which is a string type with typecode 12 + 2*len
|
||||||
tupleValues[0].db = keyInfo.db;
|
tupleValues[0].db = keyInfo.db;
|
||||||
tupleValues[0].enc = keyInfo.enc;
|
tupleValues[0].enc = keyInfo.enc;
|
||||||
tupleValues[0].zMalloc = NULL;
|
tupleValues[0].zMalloc = nullptr;
|
||||||
ASSERT(sqlite3VdbeSerialGet(key.begin(), 12 + (2 * key.size()), &tupleValues[0]) == key.size());
|
ASSERT(sqlite3VdbeSerialGet(key.begin(), 12 + (2 * key.size()), &tupleValues[0]) == key.size());
|
||||||
|
|
||||||
// In fragmenting mode, seek is to (k, 0, ), otherwise just (k, ).
|
// In fragmenting mode, seek is to (k, 0, ), otherwise just (k, ).
|
||||||
|
@ -1168,8 +1168,8 @@ struct RawCursor {
|
||||||
// Set field 2 of tuple to the null type which is typecode 0
|
// Set field 2 of tuple to the null type which is typecode 0
|
||||||
tupleValues[1].db = keyInfo.db;
|
tupleValues[1].db = keyInfo.db;
|
||||||
tupleValues[1].enc = keyInfo.enc;
|
tupleValues[1].enc = keyInfo.enc;
|
||||||
tupleValues[1].zMalloc = NULL;
|
tupleValues[1].zMalloc = nullptr;
|
||||||
ASSERT(sqlite3VdbeSerialGet(NULL, 0, &tupleValues[1]) == 0);
|
ASSERT(sqlite3VdbeSerialGet(nullptr, 0, &tupleValues[1]) == 0);
|
||||||
|
|
||||||
r.nField = 2;
|
r.nField = 2;
|
||||||
}
|
}
|
||||||
|
@ -1231,7 +1231,7 @@ int SQLiteDB::checkAllPageChecksums() {
|
||||||
// Now that the file itself is open and locked, let sqlite open the database
|
// Now that the file itself is open and locked, let sqlite open the database
|
||||||
// Note that VFSAsync will also call g_network->open (including for the WAL), so its flags are important, too
|
// Note that VFSAsync will also call g_network->open (including for the WAL), so its flags are important, too
|
||||||
// TODO: If better performance is needed, make AsyncFileReadAheadCache work and be enabled by SQLITE_OPEN_READAHEAD which was added for that purpose.
|
// TODO: If better performance is needed, make AsyncFileReadAheadCache work and be enabled by SQLITE_OPEN_READAHEAD which was added for that purpose.
|
||||||
int result = sqlite3_open_v2(apath.c_str(), &db, SQLITE_OPEN_READONLY, NULL);
|
int result = sqlite3_open_v2(apath.c_str(), &db, SQLITE_OPEN_READONLY, nullptr);
|
||||||
checkError("open", result);
|
checkError("open", result);
|
||||||
|
|
||||||
// This check has the useful side effect of actually opening/reading the database. If we were not doing this,
|
// This check has the useful side effect of actually opening/reading the database. If we were not doing this,
|
||||||
|
@ -1350,7 +1350,7 @@ void SQLiteDB::open(bool writable) {
|
||||||
|
|
||||||
// Now that the file itself is open and locked, let sqlite open the database
|
// Now that the file itself is open and locked, let sqlite open the database
|
||||||
// Note that VFSAsync will also call g_network->open (including for the WAL), so its flags are important, too
|
// Note that VFSAsync will also call g_network->open (including for the WAL), so its flags are important, too
|
||||||
int result = sqlite3_open_v2(apath.c_str(), &db, (writable ? SQLITE_OPEN_READWRITE : SQLITE_OPEN_READONLY), NULL);
|
int result = sqlite3_open_v2(apath.c_str(), &db, (writable ? SQLITE_OPEN_READWRITE : SQLITE_OPEN_READONLY), nullptr);
|
||||||
checkError("open", result);
|
checkError("open", result);
|
||||||
|
|
||||||
int chunkSize;
|
int chunkSize;
|
||||||
|
@ -1400,7 +1400,7 @@ void SQLiteDB::open(bool writable) {
|
||||||
|
|
||||||
void SQLiteDB::createFromScratch() {
|
void SQLiteDB::createFromScratch() {
|
||||||
int sqliteFlags = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE;
|
int sqliteFlags = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE;
|
||||||
checkError("open", sqlite3_open_v2(filename.c_str(), &db, sqliteFlags, NULL));
|
checkError("open", sqlite3_open_v2(filename.c_str(), &db, sqliteFlags, nullptr));
|
||||||
|
|
||||||
Statement(*this, "PRAGMA page_size = 4096").nextRow(); //fast
|
Statement(*this, "PRAGMA page_size = 4096").nextRow(); //fast
|
||||||
btree = db->aDb[0].pBt;
|
btree = db->aDb[0].pBt;
|
||||||
|
@ -1593,7 +1593,7 @@ private:
|
||||||
springCleaningStats(springCleaningStats),
|
springCleaningStats(springCleaningStats),
|
||||||
diskBytesUsed(diskBytesUsed),
|
diskBytesUsed(diskBytesUsed),
|
||||||
freeListPages(freeListPages),
|
freeListPages(freeListPages),
|
||||||
cursor(NULL),
|
cursor(nullptr),
|
||||||
dbgid(dbgid),
|
dbgid(dbgid),
|
||||||
readThreads(*pReadThreads),
|
readThreads(*pReadThreads),
|
||||||
checkAllChecksumsOnOpen(checkAllChecksumsOnOpen),
|
checkAllChecksumsOnOpen(checkAllChecksumsOnOpen),
|
||||||
|
@ -1684,7 +1684,7 @@ private:
|
||||||
double t1 = now();
|
double t1 = now();
|
||||||
cursor->commit();
|
cursor->commit();
|
||||||
delete cursor;
|
delete cursor;
|
||||||
cursor = NULL;
|
cursor = nullptr;
|
||||||
|
|
||||||
double t2 = now();
|
double t2 = now();
|
||||||
|
|
||||||
|
@ -1713,7 +1713,7 @@ private:
|
||||||
//Checkpoints the database and resets the wal file back to the beginning
|
//Checkpoints the database and resets the wal file back to the beginning
|
||||||
void fullCheckpoint() {
|
void fullCheckpoint() {
|
||||||
//A checkpoint cannot succeed while there is an outstanding transaction
|
//A checkpoint cannot succeed while there is an outstanding transaction
|
||||||
ASSERT(cursor == NULL);
|
ASSERT(cursor == nullptr);
|
||||||
|
|
||||||
resetReaders();
|
resetReaders();
|
||||||
conn.checkpoint(false);
|
conn.checkpoint(false);
|
||||||
|
|
|
@ -38,7 +38,7 @@ void ServerKnobs::initialize(bool randomize, ClientKnobs* clientKnobs, bool isSi
|
||||||
init( MAX_VERSIONS_IN_FLIGHT_FORCED, 6e5 * VERSIONS_PER_SECOND ); //one week of versions
|
init( MAX_VERSIONS_IN_FLIGHT_FORCED, 6e5 * VERSIONS_PER_SECOND ); //one week of versions
|
||||||
init( MAX_READ_TRANSACTION_LIFE_VERSIONS, 5 * VERSIONS_PER_SECOND ); if (randomize && BUGGIFY) MAX_READ_TRANSACTION_LIFE_VERSIONS = VERSIONS_PER_SECOND; else if (randomize && BUGGIFY) MAX_READ_TRANSACTION_LIFE_VERSIONS = std::max<int>(1, 0.1 * VERSIONS_PER_SECOND); else if( randomize && BUGGIFY ) MAX_READ_TRANSACTION_LIFE_VERSIONS = 10 * VERSIONS_PER_SECOND;
|
init( MAX_READ_TRANSACTION_LIFE_VERSIONS, 5 * VERSIONS_PER_SECOND ); if (randomize && BUGGIFY) MAX_READ_TRANSACTION_LIFE_VERSIONS = VERSIONS_PER_SECOND; else if (randomize && BUGGIFY) MAX_READ_TRANSACTION_LIFE_VERSIONS = std::max<int>(1, 0.1 * VERSIONS_PER_SECOND); else if( randomize && BUGGIFY ) MAX_READ_TRANSACTION_LIFE_VERSIONS = 10 * VERSIONS_PER_SECOND;
|
||||||
init( MAX_WRITE_TRANSACTION_LIFE_VERSIONS, 5 * VERSIONS_PER_SECOND ); if (randomize && BUGGIFY) MAX_WRITE_TRANSACTION_LIFE_VERSIONS=std::max<int>(1, 1 * VERSIONS_PER_SECOND);
|
init( MAX_WRITE_TRANSACTION_LIFE_VERSIONS, 5 * VERSIONS_PER_SECOND ); if (randomize && BUGGIFY) MAX_WRITE_TRANSACTION_LIFE_VERSIONS=std::max<int>(1, 1 * VERSIONS_PER_SECOND);
|
||||||
init( MAX_COMMIT_BATCH_INTERVAL, 2.0 ); if( randomize && BUGGIFY ) MAX_COMMIT_BATCH_INTERVAL = 0.5; // Each master proxy generates a CommitTransactionBatchRequest at least this often, so that versions always advance smoothly
|
init( MAX_COMMIT_BATCH_INTERVAL, 2.0 ); if( randomize && BUGGIFY ) MAX_COMMIT_BATCH_INTERVAL = 0.5; // Each commit proxy generates a CommitTransactionBatchRequest at least this often, so that versions always advance smoothly
|
||||||
MAX_COMMIT_BATCH_INTERVAL = std::min(MAX_COMMIT_BATCH_INTERVAL, MAX_READ_TRANSACTION_LIFE_VERSIONS/double(2*VERSIONS_PER_SECOND)); // Ensure that the proxy commits 2 times every MAX_READ_TRANSACTION_LIFE_VERSIONS, otherwise the master will not give out versions fast enough
|
MAX_COMMIT_BATCH_INTERVAL = std::min(MAX_COMMIT_BATCH_INTERVAL, MAX_READ_TRANSACTION_LIFE_VERSIONS/double(2*VERSIONS_PER_SECOND)); // Ensure that the proxy commits 2 times every MAX_READ_TRANSACTION_LIFE_VERSIONS, otherwise the master will not give out versions fast enough
|
||||||
|
|
||||||
// TLogs
|
// TLogs
|
||||||
|
@ -328,7 +328,7 @@ void ServerKnobs::initialize(bool randomize, ClientKnobs* clientKnobs, bool isSi
|
||||||
init( POLLING_FREQUENCY, 2.0 ); if( longLeaderElection ) POLLING_FREQUENCY = 8.0;
|
init( POLLING_FREQUENCY, 2.0 ); if( longLeaderElection ) POLLING_FREQUENCY = 8.0;
|
||||||
init( HEARTBEAT_FREQUENCY, 0.5 ); if( longLeaderElection ) HEARTBEAT_FREQUENCY = 1.0;
|
init( HEARTBEAT_FREQUENCY, 0.5 ); if( longLeaderElection ) HEARTBEAT_FREQUENCY = 1.0;
|
||||||
|
|
||||||
// Master Proxy and GRV Proxy
|
// Commit CommitProxy and GRV CommitProxy
|
||||||
init( START_TRANSACTION_BATCH_INTERVAL_MIN, 1e-6 );
|
init( START_TRANSACTION_BATCH_INTERVAL_MIN, 1e-6 );
|
||||||
init( START_TRANSACTION_BATCH_INTERVAL_MAX, 0.010 );
|
init( START_TRANSACTION_BATCH_INTERVAL_MAX, 0.010 );
|
||||||
init( START_TRANSACTION_BATCH_INTERVAL_LATENCY_FRACTION, 0.5 );
|
init( START_TRANSACTION_BATCH_INTERVAL_LATENCY_FRACTION, 0.5 );
|
||||||
|
@ -438,7 +438,7 @@ void ServerKnobs::initialize(bool randomize, ClientKnobs* clientKnobs, bool isSi
|
||||||
init( EXPECTED_MASTER_FITNESS, ProcessClass::UnsetFit );
|
init( EXPECTED_MASTER_FITNESS, ProcessClass::UnsetFit );
|
||||||
init( EXPECTED_TLOG_FITNESS, ProcessClass::UnsetFit );
|
init( EXPECTED_TLOG_FITNESS, ProcessClass::UnsetFit );
|
||||||
init( EXPECTED_LOG_ROUTER_FITNESS, ProcessClass::UnsetFit );
|
init( EXPECTED_LOG_ROUTER_FITNESS, ProcessClass::UnsetFit );
|
||||||
init( EXPECTED_PROXY_FITNESS, ProcessClass::UnsetFit );
|
init( EXPECTED_COMMIT_PROXY_FITNESS, ProcessClass::UnsetFit );
|
||||||
init( EXPECTED_GRV_PROXY_FITNESS, ProcessClass::UnsetFit );
|
init( EXPECTED_GRV_PROXY_FITNESS, ProcessClass::UnsetFit );
|
||||||
init( EXPECTED_RESOLVER_FITNESS, ProcessClass::UnsetFit );
|
init( EXPECTED_RESOLVER_FITNESS, ProcessClass::UnsetFit );
|
||||||
init( RECRUITMENT_TIMEOUT, 600 ); if( randomize && BUGGIFY ) RECRUITMENT_TIMEOUT = deterministicRandom()->coinflip() ? 60.0 : 1.0;
|
init( RECRUITMENT_TIMEOUT, 600 ); if( randomize && BUGGIFY ) RECRUITMENT_TIMEOUT = deterministicRandom()->coinflip() ? 60.0 : 1.0;
|
||||||
|
@ -566,7 +566,8 @@ void ServerKnobs::initialize(bool randomize, ClientKnobs* clientKnobs, bool isSi
|
||||||
init( BEHIND_CHECK_COUNT, 2 );
|
init( BEHIND_CHECK_COUNT, 2 );
|
||||||
init( BEHIND_CHECK_VERSIONS, 5 * VERSIONS_PER_SECOND );
|
init( BEHIND_CHECK_VERSIONS, 5 * VERSIONS_PER_SECOND );
|
||||||
init( WAIT_METRICS_WRONG_SHARD_CHANCE, isSimulated ? 1.0 : 0.1 );
|
init( WAIT_METRICS_WRONG_SHARD_CHANCE, isSimulated ? 1.0 : 0.1 );
|
||||||
init( MIN_TAG_PAGES_RATE, 1.0e4 ); if( randomize && BUGGIFY ) MIN_TAG_PAGES_RATE = 0;
|
init( MIN_TAG_READ_PAGES_RATE, 1.0e4 ); if( randomize && BUGGIFY ) MIN_TAG_READ_PAGES_RATE = 0;
|
||||||
|
init( MIN_TAG_WRITE_PAGES_RATE, 3200 ); if( randomize && BUGGIFY ) MIN_TAG_WRITE_PAGES_RATE = 0;
|
||||||
init( TAG_MEASUREMENT_INTERVAL, 30.0 ); if( randomize && BUGGIFY ) TAG_MEASUREMENT_INTERVAL = 1.0;
|
init( TAG_MEASUREMENT_INTERVAL, 30.0 ); if( randomize && BUGGIFY ) TAG_MEASUREMENT_INTERVAL = 1.0;
|
||||||
init( READ_COST_BYTE_FACTOR, 16384 ); if( randomize && BUGGIFY ) READ_COST_BYTE_FACTOR = 4096;
|
init( READ_COST_BYTE_FACTOR, 16384 ); if( randomize && BUGGIFY ) READ_COST_BYTE_FACTOR = 4096;
|
||||||
init( PREFIX_COMPRESS_KVS_MEM_SNAPSHOTS, true ); if( randomize && BUGGIFY ) PREFIX_COMPRESS_KVS_MEM_SNAPSHOTS = false;
|
init( PREFIX_COMPRESS_KVS_MEM_SNAPSHOTS, true ); if( randomize && BUGGIFY ) PREFIX_COMPRESS_KVS_MEM_SNAPSHOTS = false;
|
||||||
|
|
|
@ -37,10 +37,11 @@ public:
|
||||||
int64_t MAX_VERSIONS_IN_FLIGHT_FORCED;
|
int64_t MAX_VERSIONS_IN_FLIGHT_FORCED;
|
||||||
int64_t MAX_READ_TRANSACTION_LIFE_VERSIONS;
|
int64_t MAX_READ_TRANSACTION_LIFE_VERSIONS;
|
||||||
int64_t MAX_WRITE_TRANSACTION_LIFE_VERSIONS;
|
int64_t MAX_WRITE_TRANSACTION_LIFE_VERSIONS;
|
||||||
double MAX_COMMIT_BATCH_INTERVAL; // Each master proxy generates a CommitTransactionBatchRequest at least this often, so that versions always advance smoothly
|
double MAX_COMMIT_BATCH_INTERVAL; // Each commit proxy generates a CommitTransactionBatchRequest at least this
|
||||||
|
// often, so that versions always advance smoothly
|
||||||
|
|
||||||
// TLogs
|
// TLogs
|
||||||
double TLOG_TIMEOUT; // tlog OR master proxy failure - master's reaction time
|
double TLOG_TIMEOUT; // tlog OR commit proxy failure - master's reaction time
|
||||||
double RECOVERY_TLOG_SMART_QUORUM_DELAY; // smaller might be better for bug amplification
|
double RECOVERY_TLOG_SMART_QUORUM_DELAY; // smaller might be better for bug amplification
|
||||||
double TLOG_STORAGE_MIN_UPDATE_INTERVAL;
|
double TLOG_STORAGE_MIN_UPDATE_INTERVAL;
|
||||||
double BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL;
|
double BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL;
|
||||||
|
@ -262,7 +263,7 @@ public:
|
||||||
double POLLING_FREQUENCY;
|
double POLLING_FREQUENCY;
|
||||||
double HEARTBEAT_FREQUENCY;
|
double HEARTBEAT_FREQUENCY;
|
||||||
|
|
||||||
// Master Proxy
|
// Commit CommitProxy
|
||||||
double START_TRANSACTION_BATCH_INTERVAL_MIN;
|
double START_TRANSACTION_BATCH_INTERVAL_MIN;
|
||||||
double START_TRANSACTION_BATCH_INTERVAL_MAX;
|
double START_TRANSACTION_BATCH_INTERVAL_MAX;
|
||||||
double START_TRANSACTION_BATCH_INTERVAL_LATENCY_FRACTION;
|
double START_TRANSACTION_BATCH_INTERVAL_LATENCY_FRACTION;
|
||||||
|
@ -368,7 +369,7 @@ public:
|
||||||
int EXPECTED_MASTER_FITNESS;
|
int EXPECTED_MASTER_FITNESS;
|
||||||
int EXPECTED_TLOG_FITNESS;
|
int EXPECTED_TLOG_FITNESS;
|
||||||
int EXPECTED_LOG_ROUTER_FITNESS;
|
int EXPECTED_LOG_ROUTER_FITNESS;
|
||||||
int EXPECTED_PROXY_FITNESS;
|
int EXPECTED_COMMIT_PROXY_FITNESS;
|
||||||
int EXPECTED_GRV_PROXY_FITNESS;
|
int EXPECTED_GRV_PROXY_FITNESS;
|
||||||
int EXPECTED_RESOLVER_FITNESS;
|
int EXPECTED_RESOLVER_FITNESS;
|
||||||
double RECRUITMENT_TIMEOUT;
|
double RECRUITMENT_TIMEOUT;
|
||||||
|
@ -495,7 +496,8 @@ public:
|
||||||
int BEHIND_CHECK_COUNT;
|
int BEHIND_CHECK_COUNT;
|
||||||
int64_t BEHIND_CHECK_VERSIONS;
|
int64_t BEHIND_CHECK_VERSIONS;
|
||||||
double WAIT_METRICS_WRONG_SHARD_CHANCE;
|
double WAIT_METRICS_WRONG_SHARD_CHANCE;
|
||||||
int64_t MIN_TAG_PAGES_RATE;
|
int64_t MIN_TAG_READ_PAGES_RATE;
|
||||||
|
int64_t MIN_TAG_WRITE_PAGES_RATE;
|
||||||
double TAG_MEASUREMENT_INTERVAL;
|
double TAG_MEASUREMENT_INTERVAL;
|
||||||
int64_t READ_COST_BYTE_FACTOR;
|
int64_t READ_COST_BYTE_FACTOR;
|
||||||
bool PREFIX_COMPRESS_KVS_MEM_SNAPSHOTS;
|
bool PREFIX_COMPRESS_KVS_MEM_SNAPSHOTS;
|
||||||
|
@ -609,7 +611,7 @@ public:
|
||||||
double LATENCY_METRICS_LOGGING_INTERVAL;
|
double LATENCY_METRICS_LOGGING_INTERVAL;
|
||||||
|
|
||||||
ServerKnobs();
|
ServerKnobs();
|
||||||
void initialize(bool randomize = false, ClientKnobs* clientKnobs = NULL, bool isSimulated = false);
|
void initialize(bool randomize = false, ClientKnobs* clientKnobs = nullptr, bool isSimulated = false);
|
||||||
};
|
};
|
||||||
|
|
||||||
extern ServerKnobs const* SERVER_KNOBS;
|
extern ServerKnobs const* SERVER_KNOBS;
|
||||||
|
|
|
@ -817,7 +817,7 @@ struct LengthPrefixedStringRef {
|
||||||
int expectedSize() const { ASSERT(length); return *length; }
|
int expectedSize() const { ASSERT(length); return *length; }
|
||||||
uint32_t* getLengthPtr() const { return length; }
|
uint32_t* getLengthPtr() const { return length; }
|
||||||
|
|
||||||
LengthPrefixedStringRef() : length(NULL) {}
|
LengthPrefixedStringRef() : length(nullptr) {}
|
||||||
LengthPrefixedStringRef(uint32_t* length) : length(length) {}
|
LengthPrefixedStringRef(uint32_t* length) : length(length) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ struct MasterInterface {
|
||||||
RequestStream< struct ChangeCoordinatorsRequest > changeCoordinators;
|
RequestStream< struct ChangeCoordinatorsRequest > changeCoordinators;
|
||||||
RequestStream< struct GetCommitVersionRequest > getCommitVersion;
|
RequestStream< struct GetCommitVersionRequest > getCommitVersion;
|
||||||
RequestStream<struct BackupWorkerDoneRequest> notifyBackupWorkerDone;
|
RequestStream<struct BackupWorkerDoneRequest> notifyBackupWorkerDone;
|
||||||
// Get the centralized live committed version reported by proxies.
|
// Get the centralized live committed version reported by commit proxies.
|
||||||
RequestStream< struct GetRawCommittedVersionRequest > getLiveCommittedVersion;
|
RequestStream< struct GetRawCommittedVersionRequest > getLiveCommittedVersion;
|
||||||
// Report a proxy's committed version.
|
// Report a proxy's committed version.
|
||||||
RequestStream< struct ReportRawCommittedVersionRequest> reportLiveCommittedVersion;
|
RequestStream< struct ReportRawCommittedVersionRequest> reportLiveCommittedVersion;
|
||||||
|
|
|
@ -75,7 +75,7 @@ struct AlternativeTLogQueueEntryRef {
|
||||||
Version knownCommittedVersion;
|
Version knownCommittedVersion;
|
||||||
std::vector<TagsAndMessage>* alternativeMessages;
|
std::vector<TagsAndMessage>* alternativeMessages;
|
||||||
|
|
||||||
AlternativeTLogQueueEntryRef() : version(0), knownCommittedVersion(0), alternativeMessages(NULL) {}
|
AlternativeTLogQueueEntryRef() : version(0), knownCommittedVersion(0), alternativeMessages(nullptr) {}
|
||||||
|
|
||||||
template <class Ar>
|
template <class Ar>
|
||||||
void serialize(Ar& ar) {
|
void serialize(Ar& ar) {
|
||||||
|
|
|
@ -76,7 +76,7 @@ struct AlternativeTLogQueueEntryRef {
|
||||||
Version knownCommittedVersion;
|
Version knownCommittedVersion;
|
||||||
std::vector<TagsAndMessage>* alternativeMessages;
|
std::vector<TagsAndMessage>* alternativeMessages;
|
||||||
|
|
||||||
AlternativeTLogQueueEntryRef() : version(0), knownCommittedVersion(0), alternativeMessages(NULL) {}
|
AlternativeTLogQueueEntryRef() : version(0), knownCommittedVersion(0), alternativeMessages(nullptr) {}
|
||||||
|
|
||||||
template <class Ar>
|
template <class Ar>
|
||||||
void serialize(Ar& ar) {
|
void serialize(Ar& ar) {
|
||||||
|
|
|
@ -198,7 +198,7 @@ struct ProxyCommitData {
|
||||||
Version recoveryTransactionVersion, RequestStream<CommitTransactionRequest> commit,
|
Version recoveryTransactionVersion, RequestStream<CommitTransactionRequest> commit,
|
||||||
Reference<AsyncVar<ServerDBInfo>> db, bool firstProxy)
|
Reference<AsyncVar<ServerDBInfo>> db, bool firstProxy)
|
||||||
: dbgid(dbgid), stats(dbgid, &version, &committedVersion, &commitBatchesMemBytesCount), master(master),
|
: dbgid(dbgid), stats(dbgid, &version, &committedVersion, &commitBatchesMemBytesCount), master(master),
|
||||||
logAdapter(NULL), txnStateStore(NULL), popRemoteTxs(false), committedVersion(recoveryTransactionVersion),
|
logAdapter(nullptr), txnStateStore(nullptr), popRemoteTxs(false), committedVersion(recoveryTransactionVersion),
|
||||||
version(0), minKnownCommittedVersion(0), lastVersionTime(0), commitVersionRequestNumber(1),
|
version(0), minKnownCommittedVersion(0), lastVersionTime(0), commitVersionRequestNumber(1),
|
||||||
mostRecentProcessedRequestNumber(0), getConsistentReadVersion(getConsistentReadVersion), commit(commit),
|
mostRecentProcessedRequestNumber(0), getConsistentReadVersion(getConsistentReadVersion), commit(commit),
|
||||||
lastCoalesceTime(0), localCommitBatchesStarted(0), locked(false),
|
lastCoalesceTime(0), localCommitBatchesStarted(0), locked(false),
|
||||||
|
|
|
@ -527,7 +527,7 @@ struct RatekeeperLimits {
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ProxyInfo {
|
struct GrvProxyInfo {
|
||||||
int64_t totalTransactions;
|
int64_t totalTransactions;
|
||||||
int64_t batchTransactions;
|
int64_t batchTransactions;
|
||||||
uint64_t lastThrottledTagChangeId;
|
uint64_t lastThrottledTagChangeId;
|
||||||
|
@ -535,7 +535,9 @@ struct ProxyInfo {
|
||||||
double lastUpdateTime;
|
double lastUpdateTime;
|
||||||
double lastTagPushTime;
|
double lastTagPushTime;
|
||||||
|
|
||||||
ProxyInfo() : totalTransactions(0), batchTransactions(0), lastUpdateTime(0), lastThrottledTagChangeId(0), lastTagPushTime(0) {}
|
GrvProxyInfo()
|
||||||
|
: totalTransactions(0), batchTransactions(0), lastUpdateTime(0), lastThrottledTagChangeId(0), lastTagPushTime(0) {
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct RatekeeperData {
|
struct RatekeeperData {
|
||||||
|
@ -545,7 +547,7 @@ struct RatekeeperData {
|
||||||
Map<UID, StorageQueueInfo> storageQueueInfo;
|
Map<UID, StorageQueueInfo> storageQueueInfo;
|
||||||
Map<UID, TLogQueueInfo> tlogQueueInfo;
|
Map<UID, TLogQueueInfo> tlogQueueInfo;
|
||||||
|
|
||||||
std::map<UID, ProxyInfo> proxyInfo;
|
std::map<UID, GrvProxyInfo> grvProxyInfo;
|
||||||
Smoother smoothReleasedTransactions, smoothBatchReleasedTransactions, smoothTotalDurableBytes;
|
Smoother smoothReleasedTransactions, smoothBatchReleasedTransactions, smoothTotalDurableBytes;
|
||||||
HealthMetrics healthMetrics;
|
HealthMetrics healthMetrics;
|
||||||
DatabaseConfiguration configuration;
|
DatabaseConfiguration configuration;
|
||||||
|
@ -812,7 +814,7 @@ ACTOR Future<Void> monitorThrottlingChanges(RatekeeperData *self) {
|
||||||
if(tagValue.expirationTime == 0 || tagValue.expirationTime > now() + tagValue.initialDuration) {
|
if(tagValue.expirationTime == 0 || tagValue.expirationTime > now() + tagValue.initialDuration) {
|
||||||
TEST(true); // Converting tag throttle duration to absolute time
|
TEST(true); // Converting tag throttle duration to absolute time
|
||||||
tagValue.expirationTime = now() + tagValue.initialDuration;
|
tagValue.expirationTime = now() + tagValue.initialDuration;
|
||||||
BinaryWriter wr(IncludeVersion(ProtocolVersion::withTagThrottleValue()));
|
BinaryWriter wr(IncludeVersion(ProtocolVersion::withTagThrottleValueReason()));
|
||||||
wr << tagValue;
|
wr << tagValue;
|
||||||
state Value value = wr.toValue();
|
state Value value = wr.toValue();
|
||||||
|
|
||||||
|
@ -877,7 +879,7 @@ Future<Void> refreshStorageServerCommitCost(RatekeeperData* self) {
|
||||||
maxCost = cost;
|
maxCost = cost;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (maxRate > SERVER_KNOBS->MIN_TAG_PAGES_RATE) {
|
if (maxRate > SERVER_KNOBS->MIN_TAG_WRITE_PAGES_RATE) {
|
||||||
it->value.busiestWriteTag = busiestTag;
|
it->value.busiestWriteTag = busiestTag;
|
||||||
// TraceEvent("RefreshSSCommitCost").detail("TotalWriteCost", it->value.totalWriteCost).detail("TotalWriteOps",it->value.totalWriteOps);
|
// TraceEvent("RefreshSSCommitCost").detail("TotalWriteCost", it->value.totalWriteCost).detail("TotalWriteOps",it->value.totalWriteOps);
|
||||||
ASSERT(it->value.totalWriteCosts > 0);
|
ASSERT(it->value.totalWriteCosts > 0);
|
||||||
|
@ -906,6 +908,8 @@ Future<Void> refreshStorageServerCommitCost(RatekeeperData* self) {
|
||||||
|
|
||||||
void tryAutoThrottleTag(RatekeeperData* self, TransactionTag tag, double rate, double busyness,
|
void tryAutoThrottleTag(RatekeeperData* self, TransactionTag tag, double rate, double busyness,
|
||||||
TagThrottledReason reason) {
|
TagThrottledReason reason) {
|
||||||
|
// NOTE: before the comparison with MIN_TAG_COST, the busiest tag rate also compares with MIN_TAG_PAGES_RATE
|
||||||
|
// currently MIN_TAG_PAGES_RATE > MIN_TAG_COST in our default knobs.
|
||||||
if (busyness > SERVER_KNOBS->AUTO_THROTTLE_TARGET_TAG_BUSYNESS && rate > SERVER_KNOBS->MIN_TAG_COST) {
|
if (busyness > SERVER_KNOBS->AUTO_THROTTLE_TARGET_TAG_BUSYNESS && rate > SERVER_KNOBS->MIN_TAG_COST) {
|
||||||
TEST(true); // Transaction tag auto-throttled
|
TEST(true); // Transaction tag auto-throttled
|
||||||
Optional<double> clientRate = self->throttledTags.autoThrottleTag(self->id, tag, busyness);
|
Optional<double> clientRate = self->throttledTags.autoThrottleTag(self->id, tag, busyness);
|
||||||
|
@ -922,18 +926,17 @@ void tryAutoThrottleTag(RatekeeperData* self, TransactionTag tag, double rate, d
|
||||||
|
|
||||||
void tryAutoThrottleTag(RatekeeperData* self, StorageQueueInfo& ss, int64_t storageQueue,
|
void tryAutoThrottleTag(RatekeeperData* self, StorageQueueInfo& ss, int64_t storageQueue,
|
||||||
int64_t storageDurabilityLag) {
|
int64_t storageDurabilityLag) {
|
||||||
// TODO: reasonable criteria for write satuation should be investigated in experiment
|
// NOTE: we just keep it simple and don't differentiate write-saturation and read-saturation at the moment. In most of situation, this works.
|
||||||
// if (ss.busiestWriteTag.present() && storageQueue > SERVER_KNOBS->AUTO_TAG_THROTTLE_STORAGE_QUEUE_BYTES &&
|
// More indicators besides queue size and durability lag could be investigated in the future
|
||||||
// storageDurabilityLag > SERVER_KNOBS->AUTO_TAG_THROTTLE_DURABILITY_LAG_VERSIONS) {
|
if (storageQueue > SERVER_KNOBS->AUTO_TAG_THROTTLE_STORAGE_QUEUE_BYTES || storageDurabilityLag > SERVER_KNOBS->AUTO_TAG_THROTTLE_DURABILITY_LAG_VERSIONS) {
|
||||||
// // write-saturated
|
if(ss.busiestWriteTag.present()) {
|
||||||
// tryAutoThrottleTag(self, ss.busiestWriteTag.get(), ss.busiestWriteTagRate,
|
tryAutoThrottleTag(self, ss.busiestWriteTag.get(), ss.busiestWriteTagRate,
|
||||||
//ss.busiestWriteTagFractionalBusyness); } else
|
ss.busiestWriteTagFractionalBusyness, TagThrottledReason::BUSY_WRITE);
|
||||||
if (ss.busiestReadTag.present() &&
|
}
|
||||||
(storageQueue > SERVER_KNOBS->AUTO_TAG_THROTTLE_STORAGE_QUEUE_BYTES ||
|
if(ss.busiestReadTag.present()) {
|
||||||
storageDurabilityLag > SERVER_KNOBS->AUTO_TAG_THROTTLE_DURABILITY_LAG_VERSIONS)) {
|
tryAutoThrottleTag(self, ss.busiestReadTag.get(), ss.busiestReadTagRate,
|
||||||
// read saturated
|
ss.busiestReadTagFractionalBusyness, TagThrottledReason::BUSY_READ);
|
||||||
tryAutoThrottleTag(self, ss.busiestReadTag.get(), ss.busiestReadTagRate, ss.busiestReadTagFractionalBusyness,
|
}
|
||||||
TagThrottledReason::BUSY_READ);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1262,12 +1265,12 @@ void updateRate(RatekeeperData* self, RatekeeperLimits* limits) {
|
||||||
TraceEvent(name.c_str(), self->id)
|
TraceEvent(name.c_str(), self->id)
|
||||||
.detail("TPSLimit", limits->tpsLimit)
|
.detail("TPSLimit", limits->tpsLimit)
|
||||||
.detail("Reason", limitReason)
|
.detail("Reason", limitReason)
|
||||||
.detail("ReasonServerID", reasonID==UID() ? std::string() : Traceable<UID>::toString(reasonID))
|
.detail("ReasonServerID", reasonID == UID() ? std::string() : Traceable<UID>::toString(reasonID))
|
||||||
.detail("ReleasedTPS", self->smoothReleasedTransactions.smoothRate())
|
.detail("ReleasedTPS", self->smoothReleasedTransactions.smoothRate())
|
||||||
.detail("ReleasedBatchTPS", self->smoothBatchReleasedTransactions.smoothRate())
|
.detail("ReleasedBatchTPS", self->smoothBatchReleasedTransactions.smoothRate())
|
||||||
.detail("TPSBasis", actualTps)
|
.detail("TPSBasis", actualTps)
|
||||||
.detail("StorageServers", sscount)
|
.detail("StorageServers", sscount)
|
||||||
.detail("GrvProxies", self->proxyInfo.size())
|
.detail("GrvProxies", self->grvProxyInfo.size())
|
||||||
.detail("TLogs", tlcount)
|
.detail("TLogs", tlcount)
|
||||||
.detail("WorstFreeSpaceStorageServer", worstFreeSpaceStorageServer)
|
.detail("WorstFreeSpaceStorageServer", worstFreeSpaceStorageServer)
|
||||||
.detail("WorstFreeSpaceTLog", worstFreeSpaceTLog)
|
.detail("WorstFreeSpaceTLog", worstFreeSpaceTLog)
|
||||||
|
@ -1369,9 +1372,9 @@ ACTOR Future<Void> ratekeeper(RatekeeperInterface rkInterf, Reference<AsyncVar<S
|
||||||
|
|
||||||
lastLimited = self.smoothReleasedTransactions.smoothRate() > SERVER_KNOBS->LAST_LIMITED_RATIO * self.batchLimits.tpsLimit;
|
lastLimited = self.smoothReleasedTransactions.smoothRate() > SERVER_KNOBS->LAST_LIMITED_RATIO * self.batchLimits.tpsLimit;
|
||||||
double tooOld = now() - 1.0;
|
double tooOld = now() - 1.0;
|
||||||
for(auto p=self.proxyInfo.begin(); p!=self.proxyInfo.end(); ) {
|
for (auto p = self.grvProxyInfo.begin(); p != self.grvProxyInfo.end();) {
|
||||||
if (p->second.lastUpdateTime < tooOld)
|
if (p->second.lastUpdateTime < tooOld)
|
||||||
p = self.proxyInfo.erase(p);
|
p = self.grvProxyInfo.erase(p);
|
||||||
else
|
else
|
||||||
++p;
|
++p;
|
||||||
}
|
}
|
||||||
|
@ -1380,7 +1383,7 @@ ACTOR Future<Void> ratekeeper(RatekeeperInterface rkInterf, Reference<AsyncVar<S
|
||||||
when (GetRateInfoRequest req = waitNext(rkInterf.getRateInfo.getFuture())) {
|
when (GetRateInfoRequest req = waitNext(rkInterf.getRateInfo.getFuture())) {
|
||||||
GetRateInfoReply reply;
|
GetRateInfoReply reply;
|
||||||
|
|
||||||
auto& p = self.proxyInfo[ req.requesterID ];
|
auto& p = self.grvProxyInfo[req.requesterID];
|
||||||
//TraceEvent("RKMPU", req.requesterID).detail("TRT", req.totalReleasedTransactions).detail("Last", p.totalTransactions).detail("Delta", req.totalReleasedTransactions - p.totalTransactions);
|
//TraceEvent("RKMPU", req.requesterID).detail("TRT", req.totalReleasedTransactions).detail("Last", p.totalTransactions).detail("Delta", req.totalReleasedTransactions - p.totalTransactions);
|
||||||
if (p.totalTransactions > 0) {
|
if (p.totalTransactions > 0) {
|
||||||
self.smoothReleasedTransactions.addDelta( req.totalReleasedTransactions - p.totalTransactions );
|
self.smoothReleasedTransactions.addDelta( req.totalReleasedTransactions - p.totalTransactions );
|
||||||
|
@ -1397,8 +1400,8 @@ ACTOR Future<Void> ratekeeper(RatekeeperInterface rkInterf, Reference<AsyncVar<S
|
||||||
p.batchTransactions = req.batchReleasedTransactions;
|
p.batchTransactions = req.batchReleasedTransactions;
|
||||||
p.lastUpdateTime = now();
|
p.lastUpdateTime = now();
|
||||||
|
|
||||||
reply.transactionRate = self.normalLimits.tpsLimit / self.proxyInfo.size();
|
reply.transactionRate = self.normalLimits.tpsLimit / self.grvProxyInfo.size();
|
||||||
reply.batchTransactionRate = self.batchLimits.tpsLimit / self.proxyInfo.size();
|
reply.batchTransactionRate = self.batchLimits.tpsLimit / self.grvProxyInfo.size();
|
||||||
reply.leaseDuration = SERVER_KNOBS->METRIC_UPDATE_RATE;
|
reply.leaseDuration = SERVER_KNOBS->METRIC_UPDATE_RATE;
|
||||||
|
|
||||||
if(p.lastThrottledTagChangeId != self.throttledTagChangeId || now() > p.lastTagPushTime + SERVER_KNOBS->TAG_THROTTLE_PUSH_INTERVAL) {
|
if(p.lastThrottledTagChangeId != self.throttledTagChangeId || now() > p.lastTagPushTime + SERVER_KNOBS->TAG_THROTTLE_PUSH_INTERVAL) {
|
||||||
|
|
|
@ -44,7 +44,7 @@ struct ProxyRequestsInfo {
|
||||||
namespace{
|
namespace{
|
||||||
struct Resolver : ReferenceCounted<Resolver> {
|
struct Resolver : ReferenceCounted<Resolver> {
|
||||||
UID dbgid;
|
UID dbgid;
|
||||||
int proxyCount, resolverCount;
|
int commitProxyCount, resolverCount;
|
||||||
NotifiedVersion version;
|
NotifiedVersion version;
|
||||||
AsyncVar<Version> neededVersion;
|
AsyncVar<Version> neededVersion;
|
||||||
|
|
||||||
|
@ -77,8 +77,8 @@ struct Resolver : ReferenceCounted<Resolver> {
|
||||||
|
|
||||||
Future<Void> logger;
|
Future<Void> logger;
|
||||||
|
|
||||||
Resolver( UID dbgid, int proxyCount, int resolverCount )
|
Resolver( UID dbgid, int commitProxyCount, int resolverCount )
|
||||||
: dbgid(dbgid), proxyCount(proxyCount), resolverCount(resolverCount), version(-1), conflictSet( newConflictSet() ), iopsSample( SERVER_KNOBS->KEY_BYTES_PER_SAMPLE ), debugMinRecentStateVersion(0),
|
: dbgid(dbgid), commitProxyCount(commitProxyCount), resolverCount(resolverCount), version(-1), conflictSet( newConflictSet() ), iopsSample( SERVER_KNOBS->KEY_BYTES_PER_SAMPLE ), debugMinRecentStateVersion(0),
|
||||||
cc("Resolver", dbgid.toString()),
|
cc("Resolver", dbgid.toString()),
|
||||||
resolveBatchIn("ResolveBatchIn", cc), resolveBatchStart("ResolveBatchStart", cc), resolvedTransactions("ResolvedTransactions", cc), resolvedBytes("ResolvedBytes", cc),
|
resolveBatchIn("ResolveBatchIn", cc), resolveBatchStart("ResolveBatchStart", cc), resolvedTransactions("ResolvedTransactions", cc), resolvedBytes("ResolvedBytes", cc),
|
||||||
resolvedReadConflictRanges("ResolvedReadConflictRanges", cc), resolvedWriteConflictRanges("ResolvedWriteConflictRanges", cc), transactionsAccepted("TransactionsAccepted", cc),
|
resolvedReadConflictRanges("ResolvedReadConflictRanges", cc), resolvedWriteConflictRanges("ResolvedWriteConflictRanges", cc), transactionsAccepted("TransactionsAccepted", cc),
|
||||||
|
@ -238,12 +238,12 @@ ACTOR Future<Void> resolveBatch(
|
||||||
//TraceEvent("ResolveBatch", self->dbgid).detail("PrevVersion", req.prevVersion).detail("Version", req.version).detail("StateTransactionVersions", self->recentStateTransactionSizes.size()).detail("StateBytes", stateBytes).detail("FirstVersion", self->recentStateTransactionSizes.empty() ? -1 : self->recentStateTransactionSizes.front().first).detail("StateMutationsIn", req.txnStateTransactions.size()).detail("StateMutationsOut", reply.stateMutations.size()).detail("From", proxyAddress);
|
//TraceEvent("ResolveBatch", self->dbgid).detail("PrevVersion", req.prevVersion).detail("Version", req.version).detail("StateTransactionVersions", self->recentStateTransactionSizes.size()).detail("StateBytes", stateBytes).detail("FirstVersion", self->recentStateTransactionSizes.empty() ? -1 : self->recentStateTransactionSizes.front().first).detail("StateMutationsIn", req.txnStateTransactions.size()).detail("StateMutationsOut", reply.stateMutations.size()).detail("From", proxyAddress);
|
||||||
|
|
||||||
ASSERT(!proxyInfo.outstandingBatches.empty());
|
ASSERT(!proxyInfo.outstandingBatches.empty());
|
||||||
ASSERT(self->proxyInfoMap.size() <= self->proxyCount+1);
|
ASSERT(self->proxyInfoMap.size() <= self->commitProxyCount+1);
|
||||||
|
|
||||||
// SOMEDAY: This is O(n) in number of proxies. O(log n) solution using appropriate data structure?
|
// SOMEDAY: This is O(n) in number of proxies. O(log n) solution using appropriate data structure?
|
||||||
Version oldestProxyVersion = req.version;
|
Version oldestProxyVersion = req.version;
|
||||||
for(auto itr = self->proxyInfoMap.begin(); itr != self->proxyInfoMap.end(); ++itr) {
|
for(auto itr = self->proxyInfoMap.begin(); itr != self->proxyInfoMap.end(); ++itr) {
|
||||||
//TraceEvent("ResolveBatchProxyVersion", self->dbgid).detail("Proxy", itr->first).detail("Version", itr->second.lastVersion);
|
//TraceEvent("ResolveBatchProxyVersion", self->dbgid).detail("CommitProxy", itr->first).detail("Version", itr->second.lastVersion);
|
||||||
if(itr->first.isValid()) { // Don't consider the first master request
|
if(itr->first.isValid()) { // Don't consider the first master request
|
||||||
oldestProxyVersion = std::min(itr->second.lastVersion, oldestProxyVersion);
|
oldestProxyVersion = std::min(itr->second.lastVersion, oldestProxyVersion);
|
||||||
}
|
}
|
||||||
|
@ -257,7 +257,7 @@ ACTOR Future<Void> resolveBatch(
|
||||||
TEST(oldestProxyVersion != req.version); // The proxy that sent this request does not have the oldest current version
|
TEST(oldestProxyVersion != req.version); // The proxy that sent this request does not have the oldest current version
|
||||||
|
|
||||||
bool anyPopped = false;
|
bool anyPopped = false;
|
||||||
if(firstUnseenVersion <= oldestProxyVersion && self->proxyInfoMap.size() == self->proxyCount+1) {
|
if(firstUnseenVersion <= oldestProxyVersion && self->proxyInfoMap.size() == self->commitProxyCount+1) {
|
||||||
TEST(true); // Deleting old state transactions
|
TEST(true); // Deleting old state transactions
|
||||||
self->recentStateTransactions.erase( self->recentStateTransactions.begin(), self->recentStateTransactions.upper_bound( oldestProxyVersion ) );
|
self->recentStateTransactions.erase( self->recentStateTransactions.begin(), self->recentStateTransactions.upper_bound( oldestProxyVersion ) );
|
||||||
self->debugMinRecentStateVersion = oldestProxyVersion + 1;
|
self->debugMinRecentStateVersion = oldestProxyVersion + 1;
|
||||||
|
@ -311,7 +311,7 @@ ACTOR Future<Void> resolverCore(
|
||||||
ResolverInterface resolver,
|
ResolverInterface resolver,
|
||||||
InitializeResolverRequest initReq)
|
InitializeResolverRequest initReq)
|
||||||
{
|
{
|
||||||
state Reference<Resolver> self( new Resolver(resolver.id(), initReq.proxyCount, initReq.resolverCount) );
|
state Reference<Resolver> self(new Resolver(resolver.id(), initReq.commitProxyCount, initReq.resolverCount));
|
||||||
state ActorCollection actors(false);
|
state ActorCollection actors(false);
|
||||||
state Future<Void> doPollMetrics = self->resolverCount > 1 ? Void() : Future<Void>(Never());
|
state Future<Void> doPollMetrics = self->resolverCount > 1 ? Void() : Future<Void>(Never());
|
||||||
actors.add( waitFailureServer(resolver.waitFailure.getFuture()) );
|
actors.add( waitFailureServer(resolver.waitFailure.getFuture()) );
|
||||||
|
|
|
@ -89,9 +89,10 @@ ACTOR Future<Void> restoreApplierCore(RestoreApplierInterface applierInterf, int
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
TraceEvent("RestoreApplierCore", self->id()).detail("Request", requestTypeStr); // For debug only
|
//TraceEvent("RestoreApplierCore", self->id()).detail("Request", requestTypeStr); // For debug only
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
TraceEvent(SevWarn, "FastRestoreApplierError", self->id())
|
bool isError = e.code() != error_code_operation_cancelled;
|
||||||
|
TraceEvent(isError ? SevError : SevWarnAlways, "FastRestoreApplierError", self->id())
|
||||||
.detail("RequestType", requestTypeStr)
|
.detail("RequestType", requestTypeStr)
|
||||||
.error(e, true);
|
.error(e, true);
|
||||||
actors.clear(false);
|
actors.clear(false);
|
||||||
|
@ -477,7 +478,7 @@ ACTOR static Future<Void> applyStagingKeysBatch(std::map<Key, StagingKey>::itera
|
||||||
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
|
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
|
||||||
state int sets = 0;
|
state int sets = 0;
|
||||||
state int clears = 0;
|
state int clears = 0;
|
||||||
state Key endKey = begin->second.key;
|
state Key endKey = begin->first;
|
||||||
TraceEvent(SevFRDebugInfo, "FastRestoreApplierPhaseApplyStagingKeysBatch", applierID).detail("Begin", begin->first);
|
TraceEvent(SevFRDebugInfo, "FastRestoreApplierPhaseApplyStagingKeysBatch", applierID).detail("Begin", begin->first);
|
||||||
loop {
|
loop {
|
||||||
try {
|
try {
|
||||||
|
@ -507,7 +508,7 @@ ACTOR static Future<Void> applyStagingKeysBatch(std::map<Key, StagingKey>::itera
|
||||||
} else {
|
} else {
|
||||||
ASSERT(false);
|
ASSERT(false);
|
||||||
}
|
}
|
||||||
endKey = iter != end ? iter->second.key : endKey;
|
endKey = iter != end ? iter->first : endKey;
|
||||||
iter++;
|
iter++;
|
||||||
if (sets > 10000000 || clears > 10000000) {
|
if (sets > 10000000 || clears > 10000000) {
|
||||||
TraceEvent(SevError, "FastRestoreApplierPhaseApplyStagingKeysBatchInfiniteLoop", applierID)
|
TraceEvent(SevError, "FastRestoreApplierPhaseApplyStagingKeysBatchInfiniteLoop", applierID)
|
||||||
|
@ -521,6 +522,7 @@ ACTOR static Future<Void> applyStagingKeysBatch(std::map<Key, StagingKey>::itera
|
||||||
.detail("End", endKey)
|
.detail("End", endKey)
|
||||||
.detail("Sets", sets)
|
.detail("Sets", sets)
|
||||||
.detail("Clears", clears);
|
.detail("Clears", clears);
|
||||||
|
tr->addWriteConflictRange(KeyRangeRef(begin->first, keyAfter(endKey))); // Reduce resolver load
|
||||||
wait(tr->commit());
|
wait(tr->commit());
|
||||||
cc->appliedTxns += 1;
|
cc->appliedTxns += 1;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -55,7 +55,7 @@ struct StagingKey {
|
||||||
LogMessageVersion version; // largest version of set or clear for the key
|
LogMessageVersion version; // largest version of set or clear for the key
|
||||||
std::map<LogMessageVersion, Standalone<MutationRef>> pendingMutations; // mutations not set or clear type
|
std::map<LogMessageVersion, Standalone<MutationRef>> pendingMutations; // mutations not set or clear type
|
||||||
|
|
||||||
explicit StagingKey() : version(0), type(MutationRef::MAX_ATOMIC_OP) {}
|
explicit StagingKey(Key key) : key(key), version(0), type(MutationRef::MAX_ATOMIC_OP) {}
|
||||||
|
|
||||||
// Add mutation m at newVersion to stagingKey
|
// Add mutation m at newVersion to stagingKey
|
||||||
// Assume: SetVersionstampedKey and SetVersionstampedValue have been converted to set
|
// Assume: SetVersionstampedKey and SetVersionstampedValue have been converted to set
|
||||||
|
@ -148,7 +148,7 @@ struct StagingKey {
|
||||||
}
|
}
|
||||||
for (; lb != pendingMutations.end(); lb++) {
|
for (; lb != pendingMutations.end(); lb++) {
|
||||||
MutationRef mutation = lb->second;
|
MutationRef mutation = lb->second;
|
||||||
if (type == MutationRef::CompareAndClear) { // Special atomicOp
|
if (mutation.type == MutationRef::CompareAndClear) { // Special atomicOp
|
||||||
Arena arena;
|
Arena arena;
|
||||||
Optional<StringRef> inputVal;
|
Optional<StringRef> inputVal;
|
||||||
if (hasBaseValue()) {
|
if (hasBaseValue()) {
|
||||||
|
@ -167,14 +167,14 @@ struct StagingKey {
|
||||||
val = applyAtomicOp(inputVal, mutation.param2, (MutationRef::Type)mutation.type);
|
val = applyAtomicOp(inputVal, mutation.param2, (MutationRef::Type)mutation.type);
|
||||||
type = MutationRef::SetValue; // Precomputed result should be set to DB.
|
type = MutationRef::SetValue; // Precomputed result should be set to DB.
|
||||||
} else if (mutation.type == MutationRef::SetValue || mutation.type == MutationRef::ClearRange) {
|
} else if (mutation.type == MutationRef::SetValue || mutation.type == MutationRef::ClearRange) {
|
||||||
type = MutationRef::SetValue; // Precomputed result should be set to DB.
|
type = MutationRef::SetValue;
|
||||||
TraceEvent(SevError, "FastRestoreApplierPrecomputeResultUnexpectedSet", applierID)
|
TraceEvent(SevError, "FastRestoreApplierPrecomputeResultUnexpectedSet", applierID)
|
||||||
.detail("BatchIndex", batchIndex)
|
.detail("BatchIndex", batchIndex)
|
||||||
.detail("Context", context)
|
.detail("Context", context)
|
||||||
.detail("MutationType", getTypeString(mutation.type))
|
.detail("MutationType", getTypeString(mutation.type))
|
||||||
.detail("Version", lb->first.toString());
|
.detail("Version", lb->first.toString());
|
||||||
} else {
|
} else {
|
||||||
TraceEvent(SevWarnAlways, "FastRestoreApplierPrecomputeResultSkipUnexpectedBackupMutation", applierID)
|
TraceEvent(SevError, "FastRestoreApplierPrecomputeResultSkipUnexpectedBackupMutation", applierID)
|
||||||
.detail("BatchIndex", batchIndex)
|
.detail("BatchIndex", batchIndex)
|
||||||
.detail("Context", context)
|
.detail("Context", context)
|
||||||
.detail("MutationType", getTypeString(mutation.type))
|
.detail("MutationType", getTypeString(mutation.type))
|
||||||
|
@ -291,7 +291,7 @@ struct ApplierBatchData : public ReferenceCounted<ApplierBatchData> {
|
||||||
|
|
||||||
void addMutation(MutationRef m, LogMessageVersion ver) {
|
void addMutation(MutationRef m, LogMessageVersion ver) {
|
||||||
if (!isRangeMutation(m)) {
|
if (!isRangeMutation(m)) {
|
||||||
auto item = stagingKeys.emplace(m.param1, StagingKey());
|
auto item = stagingKeys.emplace(m.param1, StagingKey(m.param1));
|
||||||
item.first->second.add(m, ver);
|
item.first->second.add(m, ver);
|
||||||
} else {
|
} else {
|
||||||
stagingKeyRanges.insert(StagingKeyRange(m, ver));
|
stagingKeyRanges.insert(StagingKeyRange(m, ver));
|
||||||
|
|
|
@ -312,6 +312,8 @@ ACTOR Future<Standalone<VectorRef<KeyValueRef>>> decodeLogFileBlock(Reference<IA
|
||||||
int rLen = wait(file->read(mutateString(buf), len, offset));
|
int rLen = wait(file->read(mutateString(buf), len, offset));
|
||||||
if (rLen != len) throw restore_bad_read();
|
if (rLen != len) throw restore_bad_read();
|
||||||
|
|
||||||
|
simulateBlobFailure();
|
||||||
|
|
||||||
Standalone<VectorRef<KeyValueRef>> results({}, buf.arena());
|
Standalone<VectorRef<KeyValueRef>> results({}, buf.arena());
|
||||||
state StringRefReader reader(buf, restore_corrupted_data());
|
state StringRefReader reader(buf, restore_corrupted_data());
|
||||||
|
|
||||||
|
|
|
@ -307,6 +307,12 @@ Future<Void> getBatchReplies(RequestStream<Request> Interface::*channel, std::ma
|
||||||
if (ongoingReplies[j].isReady()) {
|
if (ongoingReplies[j].isReady()) {
|
||||||
std::get<2>(replyDurations[ongoingRepliesIndex[j]]) = now();
|
std::get<2>(replyDurations[ongoingRepliesIndex[j]]) = now();
|
||||||
--oustandingReplies;
|
--oustandingReplies;
|
||||||
|
} else if (ongoingReplies[j].isError()) {
|
||||||
|
// When this happens,
|
||||||
|
// the above assertion ASSERT(ongoingReplies.size() == oustandingReplies) will fail
|
||||||
|
TraceEvent(SevError, "FastRestoreGetBatchRepliesReplyError")
|
||||||
|
.detail("OngoingReplyIndex", j)
|
||||||
|
.detail("FutureError", ongoingReplies[j].getError().what());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,6 +58,9 @@ ACTOR Future<Void> sendMutationsToApplier(
|
||||||
ACTOR static Future<Void> _parseLogFileToMutationsOnLoader(NotifiedVersion* pProcessedFileOffset,
|
ACTOR static Future<Void> _parseLogFileToMutationsOnLoader(NotifiedVersion* pProcessedFileOffset,
|
||||||
SerializedMutationListMap* mutationMap,
|
SerializedMutationListMap* mutationMap,
|
||||||
Reference<IBackupContainer> bc, RestoreAsset asset);
|
Reference<IBackupContainer> bc, RestoreAsset asset);
|
||||||
|
ACTOR static Future<Void> parseLogFileToMutationsOnLoader(NotifiedVersion* pProcessedFileOffset,
|
||||||
|
SerializedMutationListMap* mutationMap,
|
||||||
|
Reference<IBackupContainer> bc, RestoreAsset asset);
|
||||||
ACTOR static Future<Void> _parseRangeFileToMutationsOnLoader(
|
ACTOR static Future<Void> _parseRangeFileToMutationsOnLoader(
|
||||||
std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsIter,
|
std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsIter,
|
||||||
std::map<LoadingParam, SampledMutationsVec>::iterator samplesIter, LoaderCounters* cc,
|
std::map<LoadingParam, SampledMutationsVec>::iterator samplesIter, LoaderCounters* cc,
|
||||||
|
@ -280,8 +283,8 @@ ACTOR Future<Void> restoreLoaderCore(RestoreLoaderInterface loaderInterf, int no
|
||||||
when(wait(error)) { TraceEvent("FastRestoreLoaderActorCollectionError", self->id()); }
|
when(wait(error)) { TraceEvent("FastRestoreLoaderActorCollectionError", self->id()); }
|
||||||
}
|
}
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
TraceEvent(e.code() == error_code_broken_promise ? SevError : SevWarnAlways, "FastRestoreLoaderError",
|
bool isError = e.code() != error_code_operation_cancelled; // == error_code_broken_promise
|
||||||
self->id())
|
TraceEvent(isError ? SevError : SevWarnAlways, "FastRestoreLoaderError", self->id())
|
||||||
.detail("RequestType", requestTypeStr)
|
.detail("RequestType", requestTypeStr)
|
||||||
.error(e, true);
|
.error(e, true);
|
||||||
actors.clear(false);
|
actors.clear(false);
|
||||||
|
@ -354,6 +357,8 @@ ACTOR static Future<Void> _parsePartitionedLogFileOnLoader(
|
||||||
int rLen = wait(file->read(mutateString(buf), asset.len, asset.offset));
|
int rLen = wait(file->read(mutateString(buf), asset.len, asset.offset));
|
||||||
if (rLen != asset.len) throw restore_bad_read();
|
if (rLen != asset.len) throw restore_bad_read();
|
||||||
|
|
||||||
|
simulateBlobFailure();
|
||||||
|
|
||||||
TraceEvent("FastRestoreLoaderDecodingLogFile")
|
TraceEvent("FastRestoreLoaderDecodingLogFile")
|
||||||
.detail("BatchIndex", asset.batchIndex)
|
.detail("BatchIndex", asset.batchIndex)
|
||||||
.detail("Filename", asset.filename)
|
.detail("Filename", asset.filename)
|
||||||
|
@ -460,6 +465,39 @@ ACTOR static Future<Void> _parsePartitionedLogFileOnLoader(
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// wrapper of _parsePartitionedLogFileOnLoader to retry on blob error
|
||||||
|
ACTOR static Future<Void> parsePartitionedLogFileOnLoader(
|
||||||
|
KeyRangeMap<Version>* pRangeVersions, NotifiedVersion* processedFileOffset,
|
||||||
|
std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsIter,
|
||||||
|
std::map<LoadingParam, SampledMutationsVec>::iterator samplesIter, LoaderCounters* cc,
|
||||||
|
Reference<IBackupContainer> bc, RestoreAsset asset) {
|
||||||
|
state int readFileRetries = 0;
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
wait(_parsePartitionedLogFileOnLoader(pRangeVersions, processedFileOffset, kvOpsIter, samplesIter, cc, bc,
|
||||||
|
asset));
|
||||||
|
break;
|
||||||
|
} catch (Error& e) {
|
||||||
|
if (e.code() == error_code_restore_bad_read || e.code() == error_code_restore_unsupported_file_version ||
|
||||||
|
e.code() == error_code_restore_corrupted_data_padding) { // no retriable error
|
||||||
|
TraceEvent(SevError, "FastRestoreFileRestoreCorruptedPartitionedLogFileBlock").error(e);
|
||||||
|
throw;
|
||||||
|
} else if (e.code() == error_code_http_request_failed || e.code() == error_code_connection_failed ||
|
||||||
|
e.code() == error_code_timed_out || e.code() == error_code_lookup_failed) {
|
||||||
|
// blob http request failure, retry
|
||||||
|
TraceEvent(SevWarnAlways, "FastRestoreDecodedPartitionedLogFileConnectionFailure")
|
||||||
|
.detail("Retries", ++readFileRetries)
|
||||||
|
.error(e);
|
||||||
|
wait(delayJittered(0.1));
|
||||||
|
} else {
|
||||||
|
TraceEvent(SevError, "FastRestoreParsePartitionedLogFileOnLoaderUnexpectedError").error(e);
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Void();
|
||||||
|
}
|
||||||
|
|
||||||
ACTOR Future<Void> _processLoadingParam(KeyRangeMap<Version>* pRangeVersions, LoadingParam param,
|
ACTOR Future<Void> _processLoadingParam(KeyRangeMap<Version>* pRangeVersions, LoadingParam param,
|
||||||
Reference<LoaderBatchData> batchData, UID loaderID,
|
Reference<LoaderBatchData> batchData, UID loaderID,
|
||||||
Reference<IBackupContainer> bc) {
|
Reference<IBackupContainer> bc) {
|
||||||
|
@ -496,12 +534,12 @@ ACTOR Future<Void> _processLoadingParam(KeyRangeMap<Version>* pRangeVersions, Lo
|
||||||
} else {
|
} else {
|
||||||
// TODO: Sanity check the log file's range is overlapped with the restored version range
|
// TODO: Sanity check the log file's range is overlapped with the restored version range
|
||||||
if (param.isPartitionedLog()) {
|
if (param.isPartitionedLog()) {
|
||||||
fileParserFutures.push_back(_parsePartitionedLogFileOnLoader(pRangeVersions, &processedFileOffset,
|
fileParserFutures.push_back(parsePartitionedLogFileOnLoader(pRangeVersions, &processedFileOffset,
|
||||||
kvOpsPerLPIter, samplesIter,
|
kvOpsPerLPIter, samplesIter,
|
||||||
&batchData->counters, bc, subAsset));
|
&batchData->counters, bc, subAsset));
|
||||||
} else {
|
} else {
|
||||||
fileParserFutures.push_back(
|
fileParserFutures.push_back(
|
||||||
_parseLogFileToMutationsOnLoader(&processedFileOffset, &mutationMap, bc, subAsset));
|
parseLogFileToMutationsOnLoader(&processedFileOffset, &mutationMap, bc, subAsset));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -586,9 +624,10 @@ ACTOR Future<Void> handleLoadFileRequest(RestoreLoadFileRequest req, Reference<R
|
||||||
state int samplesMessages = fSendSamples.size();
|
state int samplesMessages = fSendSamples.size();
|
||||||
wait(waitForAll(fSendSamples));
|
wait(waitForAll(fSendSamples));
|
||||||
} catch (Error& e) { // In case ci.samples throws broken_promise due to unstable network
|
} catch (Error& e) { // In case ci.samples throws broken_promise due to unstable network
|
||||||
if (e.code() == error_code_broken_promise) {
|
if (e.code() == error_code_broken_promise || e.code() == error_code_operation_cancelled) {
|
||||||
TraceEvent(SevWarnAlways, "FastRestoreLoaderPhaseLoadFileSendSamples")
|
TraceEvent(SevWarnAlways, "FastRestoreLoaderPhaseLoadFileSendSamples")
|
||||||
.detail("SamplesMessages", samplesMessages);
|
.detail("SamplesMessages", samplesMessages)
|
||||||
|
.error(e, true);
|
||||||
} else {
|
} else {
|
||||||
TraceEvent(SevError, "FastRestoreLoaderPhaseLoadFileSendSamplesUnexpectedError").error(e, true);
|
TraceEvent(SevError, "FastRestoreLoaderPhaseLoadFileSendSamplesUnexpectedError").error(e, true);
|
||||||
}
|
}
|
||||||
|
@ -1107,10 +1146,14 @@ ACTOR static Future<Void> _parseRangeFileToMutationsOnLoader(
|
||||||
// Sanity check the range file is within the restored version range
|
// Sanity check the range file is within the restored version range
|
||||||
ASSERT_WE_THINK(asset.isInVersionRange(version));
|
ASSERT_WE_THINK(asset.isInVersionRange(version));
|
||||||
|
|
||||||
// The set of key value version is rangeFile.version. the key-value set in the same range file has the same version
|
|
||||||
Reference<IAsyncFile> inFile = wait(bc->readFile(asset.filename));
|
|
||||||
state Standalone<VectorRef<KeyValueRef>> blockData;
|
state Standalone<VectorRef<KeyValueRef>> blockData;
|
||||||
|
// should retry here
|
||||||
|
state int readFileRetries = 0;
|
||||||
|
loop {
|
||||||
try {
|
try {
|
||||||
|
// The set of key value version is rangeFile.version. the key-value set in the same range file has the same
|
||||||
|
// version
|
||||||
|
Reference<IAsyncFile> inFile = wait(bc->readFile(asset.filename));
|
||||||
Standalone<VectorRef<KeyValueRef>> kvs =
|
Standalone<VectorRef<KeyValueRef>> kvs =
|
||||||
wait(fileBackup::decodeRangeFileBlock(inFile, asset.offset, asset.len));
|
wait(fileBackup::decodeRangeFileBlock(inFile, asset.offset, asset.len));
|
||||||
TraceEvent("FastRestoreLoaderDecodedRangeFile")
|
TraceEvent("FastRestoreLoaderDecodedRangeFile")
|
||||||
|
@ -1118,9 +1161,24 @@ ACTOR static Future<Void> _parseRangeFileToMutationsOnLoader(
|
||||||
.detail("Filename", asset.filename)
|
.detail("Filename", asset.filename)
|
||||||
.detail("DataSize", kvs.contents().size());
|
.detail("DataSize", kvs.contents().size());
|
||||||
blockData = kvs;
|
blockData = kvs;
|
||||||
|
break;
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
TraceEvent(SevError, "FileRestoreCorruptRangeFileBlock").error(e);
|
if (e.code() == error_code_restore_bad_read || e.code() == error_code_restore_unsupported_file_version ||
|
||||||
|
e.code() == error_code_restore_corrupted_data_padding) { // no retriable error
|
||||||
|
TraceEvent(SevError, "FastRestoreFileRestoreCorruptedRangeFileBlock").error(e);
|
||||||
throw;
|
throw;
|
||||||
|
} else if (e.code() == error_code_http_request_failed || e.code() == error_code_connection_failed ||
|
||||||
|
e.code() == error_code_timed_out || e.code() == error_code_lookup_failed) {
|
||||||
|
// blob http request failure, retry
|
||||||
|
TraceEvent(SevWarnAlways, "FastRestoreDecodedRangeFileConnectionFailure")
|
||||||
|
.detail("Retries", ++readFileRetries)
|
||||||
|
.error(e);
|
||||||
|
wait(delayJittered(0.1));
|
||||||
|
} else {
|
||||||
|
TraceEvent(SevError, "FastRestoreParseRangeFileOnLoaderUnexpectedError").error(e);
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// First and last key are the range for this file
|
// First and last key are the range for this file
|
||||||
|
@ -1218,6 +1276,36 @@ ACTOR static Future<Void> _parseLogFileToMutationsOnLoader(NotifiedVersion* pPro
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// retry on _parseLogFileToMutationsOnLoader
|
||||||
|
ACTOR static Future<Void> parseLogFileToMutationsOnLoader(NotifiedVersion* pProcessedFileOffset,
|
||||||
|
SerializedMutationListMap* pMutationMap,
|
||||||
|
Reference<IBackupContainer> bc, RestoreAsset asset) {
|
||||||
|
state int readFileRetries = 0;
|
||||||
|
loop {
|
||||||
|
try {
|
||||||
|
wait(_parseLogFileToMutationsOnLoader(pProcessedFileOffset, pMutationMap, bc, asset));
|
||||||
|
break;
|
||||||
|
} catch (Error& e) {
|
||||||
|
if (e.code() == error_code_restore_bad_read || e.code() == error_code_restore_unsupported_file_version ||
|
||||||
|
e.code() == error_code_restore_corrupted_data_padding) { // non retriable error
|
||||||
|
TraceEvent(SevError, "FastRestoreFileRestoreCorruptedLogFileBlock").error(e);
|
||||||
|
throw;
|
||||||
|
} else if (e.code() == error_code_http_request_failed || e.code() == error_code_connection_failed ||
|
||||||
|
e.code() == error_code_timed_out || e.code() == error_code_lookup_failed) {
|
||||||
|
// blob http request failure, retry
|
||||||
|
TraceEvent(SevWarnAlways, "FastRestoreDecodedLogFileConnectionFailure")
|
||||||
|
.detail("Retries", ++readFileRetries)
|
||||||
|
.error(e);
|
||||||
|
wait(delayJittered(0.1));
|
||||||
|
} else {
|
||||||
|
TraceEvent(SevError, "FastRestoreParseLogFileToMutationsOnLoaderUnexpectedError").error(e);
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Void();
|
||||||
|
}
|
||||||
|
|
||||||
// Return applier IDs that are used to apply key-values
|
// Return applier IDs that are used to apply key-values
|
||||||
std::vector<UID> getApplierIDs(std::map<Key, UID>& rangeToApplier) {
|
std::vector<UID> getApplierIDs(std::map<Key, UID>& rangeToApplier) {
|
||||||
std::vector<UID> applierIDs;
|
std::vector<UID> applierIDs;
|
||||||
|
|
|
@ -172,7 +172,7 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<ClusterConnec
|
||||||
.detail("PackageName", FDB_VT_PACKAGE_NAME)
|
.detail("PackageName", FDB_VT_PACKAGE_NAME)
|
||||||
.detail("DataFolder", *dataFolder)
|
.detail("DataFolder", *dataFolder)
|
||||||
.detail("ConnectionString", connFile ? connFile->getConnectionString().toString() : "")
|
.detail("ConnectionString", connFile ? connFile->getConnectionString().toString() : "")
|
||||||
.detailf("ActualTime", "%lld", DEBUG_DETERMINISM ? 0 : time(NULL))
|
.detailf("ActualTime", "%lld", DEBUG_DETERMINISM ? 0 : time(nullptr))
|
||||||
.detail("CommandLine", "fdbserver -r simulation")
|
.detail("CommandLine", "fdbserver -r simulation")
|
||||||
.detail("BuggifyEnabled", isBuggifyEnabled(BuggifyType::General))
|
.detail("BuggifyEnabled", isBuggifyEnabled(BuggifyType::General))
|
||||||
.detail("Simulated", true)
|
.detail("Simulated", true)
|
||||||
|
@ -559,7 +559,7 @@ ACTOR Future<Void> restartSimulatedSystem(vector<Future<Void>>* systemActors, st
|
||||||
int processesPerMachine = atoi(ini.GetValue("META", "processesPerMachine"));
|
int processesPerMachine = atoi(ini.GetValue("META", "processesPerMachine"));
|
||||||
int listenersPerProcess = 1;
|
int listenersPerProcess = 1;
|
||||||
auto listenersPerProcessStr = ini.GetValue("META", "listenersPerProcess");
|
auto listenersPerProcessStr = ini.GetValue("META", "listenersPerProcess");
|
||||||
if(listenersPerProcessStr != NULL) {
|
if(listenersPerProcessStr != nullptr) {
|
||||||
listenersPerProcess = atoi(listenersPerProcessStr);
|
listenersPerProcess = atoi(listenersPerProcessStr);
|
||||||
}
|
}
|
||||||
int desiredCoordinators = atoi(ini.GetValue("META", "desiredCoordinators"));
|
int desiredCoordinators = atoi(ini.GetValue("META", "desiredCoordinators"));
|
||||||
|
@ -586,7 +586,7 @@ ACTOR Future<Void> restartSimulatedSystem(vector<Future<Void>>* systemActors, st
|
||||||
}
|
}
|
||||||
|
|
||||||
auto zoneIDini = ini.GetValue(machineIdString.c_str(), "zoneId");
|
auto zoneIDini = ini.GetValue(machineIdString.c_str(), "zoneId");
|
||||||
if( zoneIDini == NULL ) {
|
if( zoneIDini == nullptr ) {
|
||||||
zoneId = machineId;
|
zoneId = machineId;
|
||||||
} else {
|
} else {
|
||||||
zoneId = StringRef(zoneIDini);
|
zoneId = StringRef(zoneIDini);
|
||||||
|
@ -610,11 +610,11 @@ ACTOR Future<Void> restartSimulatedSystem(vector<Future<Void>>* systemActors, st
|
||||||
if (parsedIp.present()) {
|
if (parsedIp.present()) {
|
||||||
return parsedIp.get();
|
return parsedIp.get();
|
||||||
} else {
|
} else {
|
||||||
return IPAddress(strtoul(ipStr, NULL, 10));
|
return IPAddress(strtoul(ipStr, nullptr, 10));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if( ip == NULL ) {
|
if( ip == nullptr ) {
|
||||||
for (int i = 0; i < processes; i++) {
|
for (int i = 0; i < processes; i++) {
|
||||||
const char* val =
|
const char* val =
|
||||||
ini.GetValue(machineIdString.c_str(), format("ipAddr%d", i * listenersPerProcess).c_str());
|
ini.GetValue(machineIdString.c_str(), format("ipAddr%d", i * listenersPerProcess).c_str());
|
||||||
|
@ -735,7 +735,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
|
||||||
bool generateFearless = simple ? false : (minimumRegions > 1 || deterministicRandom()->random01() < 0.5);
|
bool generateFearless = simple ? false : (minimumRegions > 1 || deterministicRandom()->random01() < 0.5);
|
||||||
datacenters = simple ? 1 : ( generateFearless ? ( minimumReplication > 0 || deterministicRandom()->random01() < 0.5 ? 4 : 6 ) : deterministicRandom()->randomInt( 1, 4 ) );
|
datacenters = simple ? 1 : ( generateFearless ? ( minimumReplication > 0 || deterministicRandom()->random01() < 0.5 ? 4 : 6 ) : deterministicRandom()->randomInt( 1, 4 ) );
|
||||||
if (deterministicRandom()->random01() < 0.25) db.desiredTLogCount = deterministicRandom()->randomInt(1,7);
|
if (deterministicRandom()->random01() < 0.25) db.desiredTLogCount = deterministicRandom()->randomInt(1,7);
|
||||||
if (deterministicRandom()->random01() < 0.25) db.proxyCount = deterministicRandom()->randomInt(1, 7);
|
if (deterministicRandom()->random01() < 0.25) db.commitProxyCount = deterministicRandom()->randomInt(1, 7);
|
||||||
if (deterministicRandom()->random01() < 0.25) db.grvProxyCount = deterministicRandom()->randomInt(1, 4);
|
if (deterministicRandom()->random01() < 0.25) db.grvProxyCount = deterministicRandom()->randomInt(1, 4);
|
||||||
if (deterministicRandom()->random01() < 0.25) db.resolverCount = deterministicRandom()->randomInt(1,7);
|
if (deterministicRandom()->random01() < 0.25) db.resolverCount = deterministicRandom()->randomInt(1,7);
|
||||||
int storage_engine_type = deterministicRandom()->randomInt(0, 4);
|
int storage_engine_type = deterministicRandom()->randomInt(0, 4);
|
||||||
|
@ -772,7 +772,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
|
||||||
// set_config("memory-radixtree-beta");
|
// set_config("memory-radixtree-beta");
|
||||||
if(simple) {
|
if(simple) {
|
||||||
db.desiredTLogCount = 1;
|
db.desiredTLogCount = 1;
|
||||||
db.proxyCount = 1;
|
db.commitProxyCount = 1;
|
||||||
db.grvProxyCount = 1;
|
db.grvProxyCount = 1;
|
||||||
db.resolverCount = 1;
|
db.resolverCount = 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -318,14 +318,14 @@ public:
|
||||||
Node* alreadyChecked;
|
Node* alreadyChecked;
|
||||||
StringRef value;
|
StringRef value;
|
||||||
|
|
||||||
Finger() : level(MaxLevels), x(NULL), alreadyChecked(NULL) {}
|
Finger() : level(MaxLevels), x(nullptr), alreadyChecked(nullptr) {}
|
||||||
|
|
||||||
Finger(Node* header, const StringRef& ptr) : value(ptr), level(MaxLevels), alreadyChecked(NULL), x(header) {}
|
Finger(Node* header, const StringRef& ptr) : value(ptr), level(MaxLevels), alreadyChecked(nullptr), x(header) {}
|
||||||
|
|
||||||
void init(const StringRef& value, Node* header) {
|
void init(const StringRef& value, Node* header) {
|
||||||
this->value = value;
|
this->value = value;
|
||||||
x = header;
|
x = header;
|
||||||
alreadyChecked = NULL;
|
alreadyChecked = nullptr;
|
||||||
level = MaxLevels;
|
level = MaxLevels;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -366,7 +366,7 @@ public:
|
||||||
if (n && n->length() == value.size() && !memcmp(n->value(), value.begin(), value.size()))
|
if (n && n->length() == value.size() && !memcmp(n->value(), value.begin(), value.size()))
|
||||||
return n;
|
return n;
|
||||||
else
|
else
|
||||||
return NULL;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
StringRef getValue() const {
|
StringRef getValue() const {
|
||||||
|
@ -388,16 +388,16 @@ public:
|
||||||
explicit SkipList(Version version = 0) {
|
explicit SkipList(Version version = 0) {
|
||||||
header = Node::create(StringRef(), MaxLevels - 1);
|
header = Node::create(StringRef(), MaxLevels - 1);
|
||||||
for (int l = 0; l < MaxLevels; l++) {
|
for (int l = 0; l < MaxLevels; l++) {
|
||||||
header->setNext(l, NULL);
|
header->setNext(l, nullptr);
|
||||||
header->setMaxVersion(l, version);
|
header->setMaxVersion(l, version);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
~SkipList() { destroy(); }
|
~SkipList() { destroy(); }
|
||||||
SkipList(SkipList&& other) noexcept : header(other.header) { other.header = NULL; }
|
SkipList(SkipList&& other) noexcept : header(other.header) { other.header = nullptr; }
|
||||||
void operator=(SkipList&& other) noexcept {
|
void operator=(SkipList&& other) noexcept {
|
||||||
destroy();
|
destroy();
|
||||||
header = other.header;
|
header = other.header;
|
||||||
other.header = NULL;
|
other.header = nullptr;
|
||||||
}
|
}
|
||||||
void swap(SkipList& other) { std::swap(header, other.header); }
|
void swap(SkipList& other) { std::swap(header, other.header); }
|
||||||
|
|
||||||
|
@ -406,7 +406,7 @@ public:
|
||||||
const Finger& startF = fingers[r * 2];
|
const Finger& startF = fingers[r * 2];
|
||||||
const Finger& endF = fingers[r * 2 + 1];
|
const Finger& endF = fingers[r * 2 + 1];
|
||||||
|
|
||||||
if (endF.found() == NULL) insert(endF, endF.finger[0]->getMaxVersion(0));
|
if (endF.found() == nullptr) insert(endF, endF.finger[0]->getMaxVersion(0));
|
||||||
|
|
||||||
remove(startF, endF);
|
remove(startF, endF);
|
||||||
insert(startF, version);
|
insert(startF, version);
|
||||||
|
@ -470,7 +470,7 @@ public:
|
||||||
for (int i = ends.size() - 1; i >= 0; i--) {
|
for (int i = ends.size() - 1; i >= 0; i--) {
|
||||||
ends[i].finger[l]->setNext(l, input[i + 1].header->getNext(l));
|
ends[i].finger[l]->setNext(l, input[i + 1].header->getNext(l));
|
||||||
if (l && (!i || ends[i].finger[l] != input[i].header)) ends[i].finger[l]->calcVersionForLevel(l);
|
if (l && (!i || ends[i].finger[l] != input[i].header)) ends[i].finger[l]->calcVersionForLevel(l);
|
||||||
input[i + 1].header->setNext(l, NULL);
|
input[i + 1].header->setNext(l, nullptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
swap(input[0]);
|
swap(input[0]);
|
||||||
|
@ -499,7 +499,7 @@ public:
|
||||||
for (int i = 1; i < count; i++) {
|
for (int i = 1; i < count; i++) {
|
||||||
results[i].level = startLevel;
|
results[i].level = startLevel;
|
||||||
results[i].x = x;
|
results[i].x = x;
|
||||||
results[i].alreadyChecked = NULL;
|
results[i].alreadyChecked = nullptr;
|
||||||
results[i].value = values[i];
|
results[i].value = values[i];
|
||||||
for (int j = startLevel; j < MaxLevels; j++) results[i].finger[j] = results[0].finger[j];
|
for (int j = startLevel; j < MaxLevels; j++) results[i].finger[j] = results[0].finger[j];
|
||||||
}
|
}
|
||||||
|
@ -697,7 +697,7 @@ private:
|
||||||
right.header->setMaxVersion(0, f.finger[0]->getMaxVersion(0));
|
right.header->setMaxVersion(0, f.finger[0]->getMaxVersion(0));
|
||||||
for (int l = 0; l < MaxLevels; l++) {
|
for (int l = 0; l < MaxLevels; l++) {
|
||||||
right.header->setNext(l, f.finger[l]->getNext(l));
|
right.header->setNext(l, f.finger[l]->getNext(l));
|
||||||
f.finger[l]->setNext(l, NULL);
|
f.finger[l]->setNext(l, nullptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -705,7 +705,7 @@ private:
|
||||||
Node* node = header;
|
Node* node = header;
|
||||||
for (int l = MaxLevels - 1; l >= 0; l--) {
|
for (int l = MaxLevels - 1; l >= 0; l--) {
|
||||||
Node* next;
|
Node* next;
|
||||||
while ((next = node->getNext(l)) != NULL) node = next;
|
while ((next = node->getNext(l)) != nullptr) node = next;
|
||||||
end.finger[l] = node;
|
end.finger[l] = node;
|
||||||
}
|
}
|
||||||
end.level = 0;
|
end.level = 0;
|
||||||
|
|
|
@ -574,7 +574,7 @@ struct RolesInfo {
|
||||||
*pMetricVersion = metricVersion;
|
*pMetricVersion = metricVersion;
|
||||||
return roles.insert( std::make_pair(iface.address(), obj ))->second;
|
return roles.insert( std::make_pair(iface.address(), obj ))->second;
|
||||||
}
|
}
|
||||||
JsonBuilderObject& addRole(std::string const& role, MasterProxyInterface& iface, EventMap const& metrics) {
|
JsonBuilderObject& addRole(std::string const& role, CommitProxyInterface& iface, EventMap const& metrics) {
|
||||||
JsonBuilderObject obj;
|
JsonBuilderObject obj;
|
||||||
obj["id"] = iface.id().shortString();
|
obj["id"] = iface.id().shortString();
|
||||||
obj["role"] = role;
|
obj["role"] = role;
|
||||||
|
@ -646,11 +646,10 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
|
||||||
WorkerEvents mMetrics, WorkerEvents nMetrics, WorkerEvents errors, WorkerEvents traceFileOpenErrors,
|
WorkerEvents mMetrics, WorkerEvents nMetrics, WorkerEvents errors, WorkerEvents traceFileOpenErrors,
|
||||||
WorkerEvents programStarts, std::map<std::string, std::vector<JsonBuilderObject>> processIssues,
|
WorkerEvents programStarts, std::map<std::string, std::vector<JsonBuilderObject>> processIssues,
|
||||||
vector<std::pair<StorageServerInterface, EventMap>> storageServers,
|
vector<std::pair<StorageServerInterface, EventMap>> storageServers,
|
||||||
vector<std::pair<TLogInterface, EventMap>> tLogs,
|
vector<std::pair<TLogInterface, EventMap>> tLogs, vector<std::pair<CommitProxyInterface, EventMap>> commitProxies,
|
||||||
vector<std::pair<MasterProxyInterface, EventMap>> proxies,
|
vector<std::pair<GrvProxyInterface, EventMap>> grvProxies, ServerCoordinators coordinators, Database cx,
|
||||||
vector<std::pair<GrvProxyInterface, EventMap>> grvProxies,
|
Optional<DatabaseConfiguration> configuration, Optional<Key> healthyZone,
|
||||||
ServerCoordinators coordinators, Database cx, Optional<DatabaseConfiguration> configuration,
|
std::set<std::string>* incomplete_reasons) {
|
||||||
Optional<Key> healthyZone, std::set<std::string>* incomplete_reasons) {
|
|
||||||
|
|
||||||
state JsonBuilderObject processMap;
|
state JsonBuilderObject processMap;
|
||||||
|
|
||||||
|
@ -736,9 +735,9 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
|
||||||
roles.addCoordinatorRole(coordinator);
|
roles.addCoordinatorRole(coordinator);
|
||||||
}
|
}
|
||||||
|
|
||||||
state std::vector<std::pair<MasterProxyInterface, EventMap>>::iterator proxy;
|
state std::vector<std::pair<CommitProxyInterface, EventMap>>::iterator commit_proxy;
|
||||||
for(proxy = proxies.begin(); proxy != proxies.end(); ++proxy) {
|
for (commit_proxy = commitProxies.begin(); commit_proxy != commitProxies.end(); ++commit_proxy) {
|
||||||
roles.addRole( "proxy", proxy->first, proxy->second );
|
roles.addRole("commit_proxy", commit_proxy->first, commit_proxy->second);
|
||||||
wait(yield());
|
wait(yield());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1064,14 +1063,14 @@ ACTOR static Future<JsonBuilderObject> recoveryStateStatusFetcher(WorkerDetails
|
||||||
// Add additional metadata for certain statuses
|
// Add additional metadata for certain statuses
|
||||||
if (mStatusCode == RecoveryStatus::recruiting_transaction_servers) {
|
if (mStatusCode == RecoveryStatus::recruiting_transaction_servers) {
|
||||||
int requiredLogs = atoi( md.getValue("RequiredTLogs").c_str() );
|
int requiredLogs = atoi( md.getValue("RequiredTLogs").c_str() );
|
||||||
int requiredProxies = atoi( md.getValue("RequiredProxies").c_str() );
|
int requiredCommitProxies = atoi(md.getValue("RequiredCommitProxies").c_str());
|
||||||
int requiredGrvProxies = atoi(md.getValue("RequiredGrvProxies").c_str());
|
int requiredGrvProxies = atoi(md.getValue("RequiredGrvProxies").c_str());
|
||||||
int requiredResolvers = atoi( md.getValue("RequiredResolvers").c_str() );
|
int requiredResolvers = atoi( md.getValue("RequiredResolvers").c_str() );
|
||||||
//int requiredProcesses = std::max(requiredLogs, std::max(requiredResolvers, requiredProxies));
|
//int requiredProcesses = std::max(requiredLogs, std::max(requiredResolvers, requiredCommitProxies));
|
||||||
//int requiredMachines = std::max(requiredLogs, 1);
|
//int requiredMachines = std::max(requiredLogs, 1);
|
||||||
|
|
||||||
message["required_logs"] = requiredLogs;
|
message["required_logs"] = requiredLogs;
|
||||||
message["required_proxies"] = requiredProxies;
|
message["required_commit_proxies"] = requiredCommitProxies;
|
||||||
message["required_grv_proxies"] = requiredGrvProxies;
|
message["required_grv_proxies"] = requiredGrvProxies;
|
||||||
message["required_resolvers"] = requiredResolvers;
|
message["required_resolvers"] = requiredResolvers;
|
||||||
} else if (mStatusCode == RecoveryStatus::locking_old_transaction_servers) {
|
} else if (mStatusCode == RecoveryStatus::locking_old_transaction_servers) {
|
||||||
|
@ -1669,9 +1668,11 @@ ACTOR static Future<vector<std::pair<TLogInterface, EventMap>>> getTLogsAndMetri
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR static Future<vector<std::pair<MasterProxyInterface, EventMap>>> getProxiesAndMetrics(Reference<AsyncVar<ServerDBInfo>> db, std::unordered_map<NetworkAddress, WorkerInterface> address_workers) {
|
ACTOR static Future<vector<std::pair<CommitProxyInterface, EventMap>>> getCommitProxiesAndMetrics(
|
||||||
vector<std::pair<MasterProxyInterface, EventMap>> results = wait(getServerMetrics(
|
Reference<AsyncVar<ServerDBInfo>> db, std::unordered_map<NetworkAddress, WorkerInterface> address_workers) {
|
||||||
db->get().client.masterProxies, address_workers, std::vector<std::string>{ "CommitLatencyMetrics", "CommitLatencyBands" }));
|
vector<std::pair<CommitProxyInterface, EventMap>> results =
|
||||||
|
wait(getServerMetrics(db->get().client.commitProxies, address_workers,
|
||||||
|
std::vector<std::string>{ "CommitLatencyMetrics", "CommitLatencyBands" }));
|
||||||
|
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
|
@ -1755,16 +1756,18 @@ ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<
|
||||||
|
|
||||||
// Writes and conflicts
|
// Writes and conflicts
|
||||||
try {
|
try {
|
||||||
state vector<Future<TraceEventFields>> proxyStatFutures;
|
state vector<Future<TraceEventFields>> commitProxyStatFutures;
|
||||||
state vector<Future<TraceEventFields>> grvProxyStatFutures;
|
state vector<Future<TraceEventFields>> grvProxyStatFutures;
|
||||||
std::map<NetworkAddress, WorkerDetails> workersMap;
|
std::map<NetworkAddress, WorkerDetails> workersMap;
|
||||||
for (auto const& w : workers) {
|
for (auto const& w : workers) {
|
||||||
workersMap[w.interf.address()] = w;
|
workersMap[w.interf.address()] = w;
|
||||||
}
|
}
|
||||||
for (auto &p : db->get().client.masterProxies) {
|
for (auto& p : db->get().client.commitProxies) {
|
||||||
auto worker = getWorker(workersMap, p.address());
|
auto worker = getWorker(workersMap, p.address());
|
||||||
if (worker.present())
|
if (worker.present())
|
||||||
proxyStatFutures.push_back(timeoutError(worker.get().interf.eventLogRequest.getReply(EventLogRequest(LiteralStringRef("ProxyMetrics"))), 1.0));
|
commitProxyStatFutures.push_back(timeoutError(
|
||||||
|
worker.get().interf.eventLogRequest.getReply(EventLogRequest(LiteralStringRef("ProxyMetrics"))),
|
||||||
|
1.0));
|
||||||
else
|
else
|
||||||
throw all_alternatives_failed(); // We need data from all proxies for this result to be trustworthy
|
throw all_alternatives_failed(); // We need data from all proxies for this result to be trustworthy
|
||||||
}
|
}
|
||||||
|
@ -1775,7 +1778,7 @@ ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<
|
||||||
else
|
else
|
||||||
throw all_alternatives_failed(); // We need data from all proxies for this result to be trustworthy
|
throw all_alternatives_failed(); // We need data from all proxies for this result to be trustworthy
|
||||||
}
|
}
|
||||||
state vector<TraceEventFields> proxyStats = wait(getAll(proxyStatFutures));
|
state vector<TraceEventFields> commitProxyStats = wait(getAll(commitProxyStatFutures));
|
||||||
state vector<TraceEventFields> grvProxyStats = wait(getAll(grvProxyStatFutures));
|
state vector<TraceEventFields> grvProxyStats = wait(getAll(grvProxyStatFutures));
|
||||||
|
|
||||||
StatusCounter txnStartOut;
|
StatusCounter txnStartOut;
|
||||||
|
@ -1798,14 +1801,14 @@ ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<
|
||||||
txnMemoryErrors.updateValues(StatusCounter(gps.getValue("TxnRequestErrors")));
|
txnMemoryErrors.updateValues(StatusCounter(gps.getValue("TxnRequestErrors")));
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto &ps : proxyStats) {
|
for (auto& cps : commitProxyStats) {
|
||||||
mutations.updateValues( StatusCounter(ps.getValue("Mutations")) );
|
mutations.updateValues(StatusCounter(cps.getValue("Mutations")));
|
||||||
mutationBytes.updateValues( StatusCounter(ps.getValue("MutationBytes")) );
|
mutationBytes.updateValues(StatusCounter(cps.getValue("MutationBytes")));
|
||||||
txnConflicts.updateValues( StatusCounter(ps.getValue("TxnConflicts")) );
|
txnConflicts.updateValues(StatusCounter(cps.getValue("TxnConflicts")));
|
||||||
txnCommitOutSuccess.updateValues( StatusCounter(ps.getValue("TxnCommitOutSuccess")) );
|
txnCommitOutSuccess.updateValues(StatusCounter(cps.getValue("TxnCommitOutSuccess")));
|
||||||
txnKeyLocationOut.updateValues( StatusCounter(ps.getValue("KeyServerLocationOut")) );
|
txnKeyLocationOut.updateValues(StatusCounter(cps.getValue("KeyServerLocationOut")));
|
||||||
txnMemoryErrors.updateValues( StatusCounter(ps.getValue("KeyServerLocationErrors")) );
|
txnMemoryErrors.updateValues(StatusCounter(cps.getValue("KeyServerLocationErrors")));
|
||||||
txnMemoryErrors.updateValues( StatusCounter(ps.getValue("TxnCommitErrors")) );
|
txnMemoryErrors.updateValues(StatusCounter(cps.getValue("TxnCommitErrors")));
|
||||||
}
|
}
|
||||||
|
|
||||||
operationsObj["writes"] = mutations.getStatus();
|
operationsObj["writes"] = mutations.getStatus();
|
||||||
|
@ -2009,78 +2012,98 @@ ACTOR static Future<JsonBuilderObject> clusterSummaryStatisticsFetcher(WorkerEve
|
||||||
return statusObj;
|
return statusObj;
|
||||||
}
|
}
|
||||||
|
|
||||||
static JsonBuilderArray oldTlogFetcher(int* oldLogFaultTolerance, Reference<AsyncVar<ServerDBInfo>> db, std::unordered_map<NetworkAddress, WorkerInterface> const& address_workers) {
|
static JsonBuilderObject tlogFetcher(int* logFaultTolerance, const std::vector<TLogSet>& tLogs,
|
||||||
JsonBuilderArray oldTlogsArray;
|
std::unordered_map<NetworkAddress, WorkerInterface> const& address_workers) {
|
||||||
|
|
||||||
if(db->get().recoveryState >= RecoveryState::ACCEPTING_COMMITS) {
|
|
||||||
for(auto it : db->get().logSystemConfig.oldTLogs) {
|
|
||||||
JsonBuilderObject statusObj;
|
JsonBuilderObject statusObj;
|
||||||
JsonBuilderArray logsObj;
|
JsonBuilderArray logsObj;
|
||||||
Optional<int32_t> sat_log_replication_factor, sat_log_write_anti_quorum, sat_log_fault_tolerance, log_replication_factor, log_write_anti_quorum, log_fault_tolerance, remote_log_replication_factor, remote_log_fault_tolerance;
|
Optional<int32_t> sat_log_replication_factor, sat_log_write_anti_quorum, sat_log_fault_tolerance,
|
||||||
|
log_replication_factor, log_write_anti_quorum, log_fault_tolerance, remote_log_replication_factor,
|
||||||
|
remote_log_fault_tolerance;
|
||||||
|
|
||||||
int maxFaultTolerance = 0;
|
int maxFaultTolerance = 0;
|
||||||
|
|
||||||
for(int i = 0; i < it.tLogs.size(); i++) {
|
for (int i = 0; i < tLogs.size(); i++) {
|
||||||
int failedLogs = 0;
|
int failedLogs = 0;
|
||||||
for(auto& log : it.tLogs[i].tLogs) {
|
for (auto& log : tLogs[i].tLogs) {
|
||||||
JsonBuilderObject logObj;
|
JsonBuilderObject logObj;
|
||||||
bool failed = !log.present() || !address_workers.count(log.interf().address());
|
bool failed = !log.present() || !address_workers.count(log.interf().address());
|
||||||
logObj["id"] = log.id().shortString();
|
logObj["id"] = log.id().shortString();
|
||||||
logObj["healthy"] = !failed;
|
logObj["healthy"] = !failed;
|
||||||
if(log.present()) {
|
if (log.present()) {
|
||||||
logObj["address"] = log.interf().address().toString();
|
logObj["address"] = log.interf().address().toString();
|
||||||
}
|
}
|
||||||
logsObj.push_back(logObj);
|
logsObj.push_back(logObj);
|
||||||
if(failed) {
|
if (failed) {
|
||||||
failedLogs++;
|
failedLogs++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
maxFaultTolerance = std::max(maxFaultTolerance, it.tLogs[i].tLogReplicationFactor - 1 - it.tLogs[i].tLogWriteAntiQuorum - failedLogs);
|
// The log generation's fault tolerance is the maximum tlog fault tolerance of each region.
|
||||||
if(it.tLogs[i].isLocal && it.tLogs[i].locality == tagLocalitySatellite) {
|
maxFaultTolerance =
|
||||||
sat_log_replication_factor = it.tLogs[i].tLogReplicationFactor;
|
std::max(maxFaultTolerance, tLogs[i].tLogReplicationFactor - 1 - tLogs[i].tLogWriteAntiQuorum - failedLogs);
|
||||||
sat_log_write_anti_quorum = it.tLogs[i].tLogWriteAntiQuorum;
|
if (tLogs[i].isLocal && tLogs[i].locality == tagLocalitySatellite) {
|
||||||
sat_log_fault_tolerance = it.tLogs[i].tLogReplicationFactor - 1 - it.tLogs[i].tLogWriteAntiQuorum - failedLogs;
|
sat_log_replication_factor = tLogs[i].tLogReplicationFactor;
|
||||||
}
|
sat_log_write_anti_quorum = tLogs[i].tLogWriteAntiQuorum;
|
||||||
else if(it.tLogs[i].isLocal) {
|
sat_log_fault_tolerance = tLogs[i].tLogReplicationFactor - 1 - tLogs[i].tLogWriteAntiQuorum - failedLogs;
|
||||||
log_replication_factor = it.tLogs[i].tLogReplicationFactor;
|
} else if (tLogs[i].isLocal) {
|
||||||
log_write_anti_quorum = it.tLogs[i].tLogWriteAntiQuorum;
|
log_replication_factor = tLogs[i].tLogReplicationFactor;
|
||||||
log_fault_tolerance = it.tLogs[i].tLogReplicationFactor - 1 - it.tLogs[i].tLogWriteAntiQuorum - failedLogs;
|
log_write_anti_quorum = tLogs[i].tLogWriteAntiQuorum;
|
||||||
}
|
log_fault_tolerance = tLogs[i].tLogReplicationFactor - 1 - tLogs[i].tLogWriteAntiQuorum - failedLogs;
|
||||||
else {
|
} else {
|
||||||
remote_log_replication_factor = it.tLogs[i].tLogReplicationFactor;
|
remote_log_replication_factor = tLogs[i].tLogReplicationFactor;
|
||||||
remote_log_fault_tolerance = it.tLogs[i].tLogReplicationFactor - 1 - failedLogs;
|
remote_log_fault_tolerance = tLogs[i].tLogReplicationFactor - 1 - failedLogs;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*oldLogFaultTolerance = std::min(*oldLogFaultTolerance, maxFaultTolerance);
|
*logFaultTolerance = std::min(*logFaultTolerance, maxFaultTolerance);
|
||||||
statusObj["logs"] = logsObj;
|
statusObj["log_interfaces"] = logsObj;
|
||||||
|
// We may lose logs in this log generation, storage servers may never be able to catch up this log
|
||||||
|
// generation.
|
||||||
|
statusObj["possibly_losing_data"] = maxFaultTolerance < 0;
|
||||||
|
|
||||||
if (sat_log_replication_factor.present())
|
if (sat_log_replication_factor.present())
|
||||||
statusObj["satellite_log_replication_factor"] = sat_log_replication_factor.get();
|
statusObj["satellite_log_replication_factor"] = sat_log_replication_factor.get();
|
||||||
if (sat_log_write_anti_quorum.present())
|
if (sat_log_write_anti_quorum.present())
|
||||||
statusObj["satellite_log_write_anti_quorum"] = sat_log_write_anti_quorum.get();
|
statusObj["satellite_log_write_anti_quorum"] = sat_log_write_anti_quorum.get();
|
||||||
if (sat_log_fault_tolerance.present())
|
if (sat_log_fault_tolerance.present()) statusObj["satellite_log_fault_tolerance"] = sat_log_fault_tolerance.get();
|
||||||
statusObj["satellite_log_fault_tolerance"] = sat_log_fault_tolerance.get();
|
|
||||||
|
|
||||||
if (log_replication_factor.present())
|
if (log_replication_factor.present()) statusObj["log_replication_factor"] = log_replication_factor.get();
|
||||||
statusObj["log_replication_factor"] = log_replication_factor.get();
|
if (log_write_anti_quorum.present()) statusObj["log_write_anti_quorum"] = log_write_anti_quorum.get();
|
||||||
if (log_write_anti_quorum.present())
|
if (log_fault_tolerance.present()) statusObj["log_fault_tolerance"] = log_fault_tolerance.get();
|
||||||
statusObj["log_write_anti_quorum"] = log_write_anti_quorum.get();
|
|
||||||
if (log_fault_tolerance.present())
|
|
||||||
statusObj["log_fault_tolerance"] = log_fault_tolerance.get();
|
|
||||||
|
|
||||||
if (remote_log_replication_factor.present())
|
if (remote_log_replication_factor.present())
|
||||||
statusObj["remote_log_replication_factor"] = remote_log_replication_factor.get();
|
statusObj["remote_log_replication_factor"] = remote_log_replication_factor.get();
|
||||||
if (remote_log_fault_tolerance.present())
|
if (remote_log_fault_tolerance.present())
|
||||||
statusObj["remote_log_fault_tolerance"] = remote_log_fault_tolerance.get();
|
statusObj["remote_log_fault_tolerance"] = remote_log_fault_tolerance.get();
|
||||||
|
|
||||||
oldTlogsArray.push_back(statusObj);
|
return statusObj;
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return oldTlogsArray;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static JsonBuilderObject faultToleranceStatusFetcher(DatabaseConfiguration configuration, ServerCoordinators coordinators, std::vector<WorkerDetails>& workers, int extraTlogEligibleZones, int minReplicasRemaining, bool underMaintenance) {
|
static JsonBuilderArray tlogFetcher(int* logFaultTolerance, Reference<AsyncVar<ServerDBInfo>> db,
|
||||||
|
std::unordered_map<NetworkAddress, WorkerInterface> const& address_workers) {
|
||||||
|
JsonBuilderArray tlogsArray;
|
||||||
|
JsonBuilderObject tlogsStatus;
|
||||||
|
tlogsStatus = tlogFetcher(logFaultTolerance, db->get().logSystemConfig.tLogs, address_workers);
|
||||||
|
tlogsStatus["epoch"] = db->get().logSystemConfig.epoch;
|
||||||
|
tlogsStatus["current"] = true;
|
||||||
|
if (db->get().logSystemConfig.recoveredAt.present()) {
|
||||||
|
tlogsStatus["begin_version"] = db->get().logSystemConfig.recoveredAt.get();
|
||||||
|
}
|
||||||
|
tlogsArray.push_back(tlogsStatus);
|
||||||
|
for (auto it : db->get().logSystemConfig.oldTLogs) {
|
||||||
|
JsonBuilderObject oldTlogsStatus = tlogFetcher(logFaultTolerance, it.tLogs, address_workers);
|
||||||
|
oldTlogsStatus["epoch"] = it.epoch;
|
||||||
|
oldTlogsStatus["current"] = false;
|
||||||
|
oldTlogsStatus["begin_version"] = it.epochBegin;
|
||||||
|
oldTlogsStatus["end_version"] = it.epochEnd;
|
||||||
|
tlogsArray.push_back(oldTlogsStatus);
|
||||||
|
}
|
||||||
|
return tlogsArray;
|
||||||
|
}
|
||||||
|
|
||||||
|
static JsonBuilderObject faultToleranceStatusFetcher(DatabaseConfiguration configuration,
|
||||||
|
ServerCoordinators coordinators,
|
||||||
|
std::vector<WorkerDetails>& workers, int extraTlogEligibleZones,
|
||||||
|
int minReplicasRemaining, int oldLogFaultTolerance,
|
||||||
|
bool underMaintenance) {
|
||||||
JsonBuilderObject statusObj;
|
JsonBuilderObject statusObj;
|
||||||
|
|
||||||
// without losing data
|
// without losing data
|
||||||
|
@ -2112,17 +2135,18 @@ static JsonBuilderObject faultToleranceStatusFetcher(DatabaseConfiguration confi
|
||||||
}
|
}
|
||||||
maxCoordinatorZoneFailures += 1;
|
maxCoordinatorZoneFailures += 1;
|
||||||
}
|
}
|
||||||
|
// max zone failures that we can tolerate to not lose data
|
||||||
int zoneFailuresWithoutLosingData = std::min(maxZoneFailures, maxCoordinatorZoneFailures);
|
int zoneFailuresWithoutLosingData = std::min(maxZoneFailures, maxCoordinatorZoneFailures);
|
||||||
|
|
||||||
if (minReplicasRemaining >= 0){
|
if (minReplicasRemaining >= 0){
|
||||||
zoneFailuresWithoutLosingData = std::min(zoneFailuresWithoutLosingData, minReplicasRemaining - 1);
|
zoneFailuresWithoutLosingData = std::min(zoneFailuresWithoutLosingData, minReplicasRemaining - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
statusObj["max_zone_failures_without_losing_data"] = std::max(zoneFailuresWithoutLosingData, 0);
|
// oldLogFaultTolerance means max failures we can tolerate to lose logs data. -1 means we lose data or availability.
|
||||||
|
zoneFailuresWithoutLosingData = std::max(std::min(zoneFailuresWithoutLosingData, oldLogFaultTolerance), -1);
|
||||||
// without losing availablity
|
statusObj["max_zone_failures_without_losing_data"] = zoneFailuresWithoutLosingData;
|
||||||
statusObj["max_zone_failures_without_losing_availability"] = std::max(std::min(extraTlogEligibleZones, zoneFailuresWithoutLosingData), 0);
|
statusObj["max_zone_failures_without_losing_availability"] =
|
||||||
|
std::max(std::min(extraTlogEligibleZones, zoneFailuresWithoutLosingData), -1);
|
||||||
return statusObj;
|
return statusObj;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2440,7 +2464,7 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
||||||
getProcessIssuesAsMessages(workerIssues);
|
getProcessIssuesAsMessages(workerIssues);
|
||||||
state vector<std::pair<StorageServerInterface, EventMap>> storageServers;
|
state vector<std::pair<StorageServerInterface, EventMap>> storageServers;
|
||||||
state vector<std::pair<TLogInterface, EventMap>> tLogs;
|
state vector<std::pair<TLogInterface, EventMap>> tLogs;
|
||||||
state vector<std::pair<MasterProxyInterface, EventMap>> proxies;
|
state vector<std::pair<CommitProxyInterface, EventMap>> commitProxies;
|
||||||
state vector<std::pair<GrvProxyInterface, EventMap>> grvProxies;
|
state vector<std::pair<GrvProxyInterface, EventMap>> grvProxies;
|
||||||
state JsonBuilderObject qos;
|
state JsonBuilderObject qos;
|
||||||
state JsonBuilderObject data_overlay;
|
state JsonBuilderObject data_overlay;
|
||||||
|
@ -2504,7 +2528,8 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
||||||
|
|
||||||
state Future<ErrorOr<vector<std::pair<StorageServerInterface, EventMap>>>> storageServerFuture = errorOr(getStorageServersAndMetrics(cx, address_workers, rkWorker));
|
state Future<ErrorOr<vector<std::pair<StorageServerInterface, EventMap>>>> storageServerFuture = errorOr(getStorageServersAndMetrics(cx, address_workers, rkWorker));
|
||||||
state Future<ErrorOr<vector<std::pair<TLogInterface, EventMap>>>> tLogFuture = errorOr(getTLogsAndMetrics(db, address_workers));
|
state Future<ErrorOr<vector<std::pair<TLogInterface, EventMap>>>> tLogFuture = errorOr(getTLogsAndMetrics(db, address_workers));
|
||||||
state Future<ErrorOr<vector<std::pair<MasterProxyInterface, EventMap>>>> proxyFuture = errorOr(getProxiesAndMetrics(db, address_workers));
|
state Future<ErrorOr<vector<std::pair<CommitProxyInterface, EventMap>>>> commitProxyFuture =
|
||||||
|
errorOr(getCommitProxiesAndMetrics(db, address_workers));
|
||||||
state Future<ErrorOr<vector<std::pair<GrvProxyInterface, EventMap>>>> grvProxyFuture = errorOr(getGrvProxiesAndMetrics(db, address_workers));
|
state Future<ErrorOr<vector<std::pair<GrvProxyInterface, EventMap>>>> grvProxyFuture = errorOr(getGrvProxiesAndMetrics(db, address_workers));
|
||||||
|
|
||||||
state int minReplicasRemaining = -1;
|
state int minReplicasRemaining = -1;
|
||||||
|
@ -2517,14 +2542,16 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
||||||
futures2.push_back(clusterSummaryStatisticsFetcher(pMetrics, storageServerFuture, tLogFuture, &status_incomplete_reasons));
|
futures2.push_back(clusterSummaryStatisticsFetcher(pMetrics, storageServerFuture, tLogFuture, &status_incomplete_reasons));
|
||||||
state std::vector<JsonBuilderObject> workerStatuses = wait(getAll(futures2));
|
state std::vector<JsonBuilderObject> workerStatuses = wait(getAll(futures2));
|
||||||
|
|
||||||
int oldLogFaultTolerance = 100;
|
int logFaultTolerance = 100;
|
||||||
if(db->get().recoveryState >= RecoveryState::ACCEPTING_COMMITS && db->get().logSystemConfig.oldTLogs.size() > 0) {
|
if (db->get().recoveryState >= RecoveryState::ACCEPTING_COMMITS) {
|
||||||
statusObj["old_logs"] = oldTlogFetcher(&oldLogFaultTolerance, db, address_workers);
|
statusObj["logs"] = tlogFetcher(&logFaultTolerance, db, address_workers);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(configuration.present()) {
|
if(configuration.present()) {
|
||||||
int extraTlogEligibleZones = getExtraTLogEligibleZones(workers, configuration.get());
|
int extraTlogEligibleZones = getExtraTLogEligibleZones(workers, configuration.get());
|
||||||
statusObj["fault_tolerance"] = faultToleranceStatusFetcher(configuration.get(), coordinators, workers, extraTlogEligibleZones, minReplicasRemaining, loadResult.present() && loadResult.get().healthyZone.present());
|
statusObj["fault_tolerance"] = faultToleranceStatusFetcher(
|
||||||
|
configuration.get(), coordinators, workers, extraTlogEligibleZones, minReplicasRemaining,
|
||||||
|
logFaultTolerance, loadResult.present() && loadResult.get().healthyZone.present());
|
||||||
}
|
}
|
||||||
|
|
||||||
state JsonBuilderObject configObj =
|
state JsonBuilderObject configObj =
|
||||||
|
@ -2587,13 +2614,13 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
||||||
messages.push_back(JsonBuilder::makeMessage("log_servers_error", "Timed out trying to retrieve log servers."));
|
messages.push_back(JsonBuilder::makeMessage("log_servers_error", "Timed out trying to retrieve log servers."));
|
||||||
}
|
}
|
||||||
|
|
||||||
// ...also proxies
|
// ...also commit proxies
|
||||||
ErrorOr<vector<std::pair<MasterProxyInterface, EventMap>>> _proxies = wait(proxyFuture);
|
ErrorOr<vector<std::pair<CommitProxyInterface, EventMap>>> _commitProxies = wait(commitProxyFuture);
|
||||||
if (_proxies.present()) {
|
if (_commitProxies.present()) {
|
||||||
proxies = _proxies.get();
|
commitProxies = _commitProxies.get();
|
||||||
}
|
} else {
|
||||||
else {
|
messages.push_back(
|
||||||
messages.push_back(JsonBuilder::makeMessage("proxies_error", "Timed out trying to retrieve proxies."));
|
JsonBuilder::makeMessage("commit_proxies_error", "Timed out trying to retrieve commit proxies."));
|
||||||
}
|
}
|
||||||
|
|
||||||
// ...also grv proxies
|
// ...also grv proxies
|
||||||
|
@ -2614,12 +2641,10 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
||||||
statusObj["layers"] = layers;
|
statusObj["layers"] = layers;
|
||||||
}
|
}
|
||||||
|
|
||||||
JsonBuilderObject processStatus = wait(processStatusFetcher(db, workers, pMetrics, mMetrics, networkMetrics,
|
JsonBuilderObject processStatus = wait(processStatusFetcher(
|
||||||
latestError, traceFileOpenErrors, programStarts,
|
db, workers, pMetrics, mMetrics, networkMetrics, latestError, traceFileOpenErrors, programStarts,
|
||||||
processIssues, storageServers, tLogs, proxies,
|
processIssues, storageServers, tLogs, commitProxies, grvProxies, coordinators, cx, configuration,
|
||||||
grvProxies, coordinators, cx, configuration,
|
loadResult.present() ? loadResult.get().healthyZone : Optional<Key>(), &status_incomplete_reasons));
|
||||||
loadResult.present() ? loadResult.get().healthyZone : Optional<Key>(),
|
|
||||||
&status_incomplete_reasons));
|
|
||||||
statusObj["processes"] = processStatus;
|
statusObj["processes"] = processStatus;
|
||||||
statusObj["clients"] = clientStatusFetcher(clientStatus);
|
statusObj["clients"] = clientStatusFetcher(clientStatus);
|
||||||
|
|
||||||
|
|
|
@ -103,12 +103,12 @@ struct CacheRangeInfo : ReferenceCounted<CacheRangeInfo>, NonCopyable {
|
||||||
delete adding;
|
delete adding;
|
||||||
}
|
}
|
||||||
|
|
||||||
static CacheRangeInfo* newNotAssigned(KeyRange keys) { return new CacheRangeInfo(keys, NULL, NULL); }
|
static CacheRangeInfo* newNotAssigned(KeyRange keys) { return new CacheRangeInfo(keys, nullptr, nullptr); }
|
||||||
static CacheRangeInfo* newReadWrite(KeyRange keys, StorageCacheData* data) { return new CacheRangeInfo(keys, NULL, data); }
|
static CacheRangeInfo* newReadWrite(KeyRange keys, StorageCacheData* data) { return new CacheRangeInfo(keys, nullptr, data); }
|
||||||
static CacheRangeInfo* newAdding(StorageCacheData* data, KeyRange keys) { return new CacheRangeInfo(keys, new AddingCacheRange(data, keys), NULL); }
|
static CacheRangeInfo* newAdding(StorageCacheData* data, KeyRange keys) { return new CacheRangeInfo(keys, new AddingCacheRange(data, keys), nullptr); }
|
||||||
|
|
||||||
bool isReadable() const { return readWrite!=NULL; }
|
bool isReadable() const { return readWrite!=nullptr; }
|
||||||
bool isAdding() const { return adding!=NULL; }
|
bool isAdding() const { return adding!=nullptr; }
|
||||||
bool notAssigned() const { return !readWrite && !adding; }
|
bool notAssigned() const { return !readWrite && !adding; }
|
||||||
bool assigned() const { return readWrite || adding; }
|
bool assigned() const { return readWrite || adding; }
|
||||||
bool isInVersionedData() const { return readWrite || (adding && adding->isTransferred()); }
|
bool isInVersionedData() const { return readWrite || (adding && adding->isTransferred()); }
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue