Merge branch 'master' of https://github.com/apple/foundationdb into jfu-snapshot-record-version

This commit is contained in:
Jon Fu 2020-09-23 15:35:05 -04:00
commit 69580593dd
170 changed files with 2242 additions and 1495 deletions

View File

@ -596,7 +596,7 @@ fdb_error_t fdb_transaction_set_option_impl( FDBTransaction* tr,
void fdb_transaction_set_option_v13( FDBTransaction* tr,
FDBTransactionOption option )
{
fdb_transaction_set_option_impl( tr, option, NULL, 0 );
fdb_transaction_set_option_impl( tr, option, nullptr, 0 );
}
extern "C" DLLEXPORT

View File

@ -157,14 +157,14 @@ namespace FDB {
void cancel() override;
void reset() override;
TransactionImpl() : tr(NULL) {}
TransactionImpl() : tr(nullptr) {}
TransactionImpl(TransactionImpl&& r) noexcept {
tr = r.tr;
r.tr = NULL;
r.tr = nullptr;
}
TransactionImpl& operator=(TransactionImpl&& r) noexcept {
tr = r.tr;
r.tr = NULL;
r.tr = nullptr;
return *this;
}
@ -207,10 +207,10 @@ namespace FDB {
if ( value.present() )
throw_on_error( fdb_network_set_option( option, value.get().begin(), value.get().size() ) );
else
throw_on_error( fdb_network_set_option( option, NULL, 0 ) );
throw_on_error( fdb_network_set_option( option, nullptr, 0 ) );
}
API* API::instance = NULL;
API* API::instance = nullptr;
API::API(int version) : version(version) {}
API* API::selectAPIVersion(int apiVersion) {
@ -234,11 +234,11 @@ namespace FDB {
}
bool API::isAPIVersionSelected() {
return API::instance != NULL;
return API::instance != nullptr;
}
API* API::getInstance() {
if(API::instance == NULL) {
if(API::instance == nullptr) {
throw api_version_unset();
}
else {
@ -280,7 +280,7 @@ namespace FDB {
if (value.present())
throw_on_error(fdb_database_set_option(db, option, value.get().begin(), value.get().size()));
else
throw_on_error(fdb_database_set_option(db, option, NULL, 0));
throw_on_error(fdb_database_set_option(db, option, nullptr, 0));
}
TransactionImpl::TransactionImpl(FDBDatabase* db) {
@ -417,7 +417,7 @@ namespace FDB {
if ( value.present() ) {
throw_on_error( fdb_transaction_set_option( tr, option, value.get().begin(), value.get().size() ) );
} else {
throw_on_error( fdb_transaction_set_option( tr, option, NULL, 0 ) );
throw_on_error( fdb_transaction_set_option( tr, option, nullptr, 0 ) );
}
}

View File

@ -31,7 +31,7 @@
namespace FDB {
struct CFuture : NonCopyable, ReferenceCounted<CFuture>, FastAllocated<CFuture> {
CFuture() : f(NULL) {}
CFuture() : f(nullptr) {}
explicit CFuture(FDBFuture* f) : f(f) {}
~CFuture() {
if (f) {

View File

@ -1089,13 +1089,13 @@ void JNI_OnUnload(JavaVM *vm, void *reserved) {
return;
} else {
// delete global references so the GC can collect them
if (range_result_summary_class != NULL) {
if (range_result_summary_class != JNI_NULL) {
env->DeleteGlobalRef(range_result_summary_class);
}
if (range_result_class != NULL) {
if (range_result_class != JNI_NULL) {
env->DeleteGlobalRef(range_result_class);
}
if (string_class != NULL) {
if (string_class != JNI_NULL) {
env->DeleteGlobalRef(string_class);
}
}

View File

@ -59,11 +59,14 @@ else()
set(ROCKSDB_LIBRARIES
${BINARY_DIR}/librocksdb.a)
ExternalProject_Get_Property(rocksdb SOURCE_DIR)
set (ROCKSDB_INCLUDE_DIR "${SOURCE_DIR}/include")
set(ROCKSDB_FOUND TRUE)
endif()
message(STATUS "Found RocksDB library: ${ROCKSDB_LIBRARIES}")
message(STATUS "Found RocksDB includes: ${ROCKSDB_INCLUDE_DIRS}")
message(STATUS "Found RocksDB includes: ${ROCKSDB_INCLUDE_DIR}")
mark_as_advanced(
ROCKSDB_LIBRARIES

View File

@ -107,7 +107,9 @@ endif()
################################################################################
set(SSD_ROCKSDB_EXPERIMENTAL OFF CACHE BOOL "Build with experimental RocksDB support")
if (SSD_ROCKSDB_EXPERIMENTAL)
# RocksDB is currently enabled by default for GCC but does not build with the latest
# Clang.
if (SSD_ROCKSDB_EXPERIMENTAL OR GCC)
set(WITH_ROCKSDB_EXPERIMENTAL ON)
else()
set(WITH_ROCKSDB_EXPERIMENTAL OFF)

View File

@ -1,6 +1,5 @@
#!/bin/bash
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
pkill fdbserver
ulimit -S -c unlimited
unset FDB_NETWORK_OPTION_EXTERNAL_CLIENT_DIRECTORY
@ -8,4 +7,4 @@ WORKDIR="$(pwd)/tmp/$$"
if [ ! -d "${WORKDIR}" ] ; then
mkdir -p "${WORKDIR}"
fi
DEBUGLEVEL=0 DISPLAYERROR=1 RANDOMTEST=1 WORKDIR="${WORKDIR}" FDBSERVERPORT="${PORT_FDBSERVER:-4500}" ${SCRIPTDIR}/bindingTestScript.sh 1
DEBUGLEVEL=0 DISPLAYERROR=1 RANDOMTEST=1 WORKDIR="${WORKDIR}" ${SCRIPTDIR}/bindingTestScript.sh 1

View File

@ -7,7 +7,7 @@ SCRIPTID="${$}"
SAVEONERROR="${SAVEONERROR:-1}"
PYTHONDIR="${BINDIR}/tests/python"
testScript="${BINDIR}/tests/bindingtester/run_binding_tester.sh"
VERSION="1.6"
VERSION="1.8"
source ${SCRIPTDIR}/localClusterStart.sh
@ -28,7 +28,7 @@ then
echo "Log dir: ${LOGDIR}"
echo "Python path: ${PYTHONDIR}"
echo "Lib dir: ${LIBDIR}"
echo "Server port: ${FDBSERVERPORT}"
echo "Cluster String: ${CLUSTERSTRING}"
echo "Script Id: ${SCRIPTID}"
echo "Version: ${VERSION}"
fi
@ -36,6 +36,9 @@ fi
# Begin the cluster using the logic in localClusterStart.sh.
startCluster
# Stop the cluster on exit
trap "stopCluster" EXIT
# Display user message
if [ "${status}" -ne 0 ]; then
:
@ -58,8 +61,8 @@ fi
# Display directory and log information, if an error occurred
if [ "${status}" -ne 0 ]
then
ls "${WORKDIR}" > "${LOGDIR}/dir.log"
ps -eafw > "${LOGDIR}/process-preclean.log"
ls "${WORKDIR}" &> "${LOGDIR}/dir.log"
ps -eafwH &> "${LOGDIR}/process-preclean.log"
if [ -f "${FDBCONF}" ]; then
cp -f "${FDBCONF}" "${LOGDIR}/"
fi
@ -71,10 +74,15 @@ fi
# Save debug information files, environment, and log information, if an error occurred
if [ "${status}" -ne 0 ] && [ "${SAVEONERROR}" -gt 0 ]; then
ps -eafw > "${LOGDIR}/process-exit.log"
netstat -na > "${LOGDIR}/netstat.log"
df -h > "${LOGDIR}/disk.log"
env > "${LOGDIR}/env.log"
ps -eafwH &> "${LOGDIR}/process-exit.log"
netstat -na &> "${LOGDIR}/netstat.log"
df -h &> "${LOGDIR}/disk.log"
env &> "${LOGDIR}/env.log"
fi
# Stop the cluster
if stopCluster; then
unset FDBSERVERID
fi
exit "${status}"

View File

@ -5,15 +5,32 @@ WORKDIR="${WORKDIR:-${SCRIPTDIR}/tmp/fdb.work}"
LOGDIR="${WORKDIR}/log"
ETCDIR="${WORKDIR}/etc"
BINDIR="${BINDIR:-${SCRIPTDIR}}"
FDBSERVERPORT="${FDBSERVERPORT:-4500}"
FDBPORTSTART="${FDBPORTSTART:-4000}"
SERVERCHECKS="${SERVERCHECKS:-10}"
CONFIGUREWAIT="${CONFIGUREWAIT:-240}"
FDBCONF="${ETCDIR}/fdb.cluster"
LOGFILE="${LOGFILE:-${LOGDIR}/startcluster.log}"
AUDITCLUSTER="${AUDITCLUSTER:-0}"
AUDITLOG="${AUDITLOG:-/tmp/audit-cluster.log}"
# Initialize the variables
status=0
messagetime=0
messagecount=0
# Define a random ip address and port on localhost
if [ -z ${IPADDRESS} ]; then
let index2="${RANDOM} % 256"
let index3="${RANDOM} % 256"
let index4="(${RANDOM} % 255) + 1"
IPADDRESS="127.${index2}.${index3}.${index4}"
fi
if [ -z ${FDBPORT} ]; then
let FDBPORT="(${RANDOM} % 1000) + ${FDBPORTSTART}"
fi
CLUSTERSTRING="${IPADDRESS}:${FDBPORT}"
function log
{
local status=0
@ -92,7 +109,10 @@ function displayMessage
}
# Create the directories used by the server.
function createDirectories {
function createDirectories
{
local status=0
# Display user message
if ! displayMessage "Creating directories"
then
@ -137,7 +157,10 @@ function createDirectories {
}
# Create a cluster file for the local cluster.
function createClusterFile {
function createClusterFile
{
local status=0
if [ "${status}" -ne 0 ]; then
:
# Display user message
@ -148,7 +171,7 @@ function createClusterFile {
else
description=$(LC_CTYPE=C tr -dc A-Za-z0-9 < /dev/urandom 2> /dev/null | head -c 8)
random_str=$(LC_CTYPE=C tr -dc A-Za-z0-9 < /dev/urandom 2> /dev/null | head -c 8)
echo "$description:$random_str@127.0.0.1:${FDBSERVERPORT}" > "${FDBCONF}"
echo "${description}:${random_str}@${CLUSTERSTRING}" > "${FDBCONF}"
fi
if [ "${status}" -ne 0 ]; then
@ -161,8 +184,51 @@ function createClusterFile {
return ${status}
}
# Stop the Cluster from running.
function stopCluster
{
local status=0
# Add an audit entry, if enabled
if [ "${AUDITCLUSTER}" -gt 0 ]; then
printf '%-15s (%6s) Stopping cluster %-20s (%6s): %s\n' "$(date +'%Y-%m-%d %H:%M:%S')" "${$}" "${CLUSTERSTRING}" "${FDBSERVERID}" >> "${AUDITLOG}"
fi
if [ -z "${FDBSERVERID}" ]; then
log 'FDB Server process is not defined'
let status="${status} + 1"
elif ! kill -0 "${FDBSERVERID}"; then
log "Failed to locate FDB Server process (${FDBSERVERID})"
let status="${status} + 1"
elif "${BINDIR}/fdbcli" -C "${FDBCONF}" --exec "kill; kill ${CLUSTERSTRING}; sleep 3" --timeout 120 &>> "${LOGDIR}/fdbcli-kill.log"
then
# Ensure that process is dead
if ! kill -0 "${FDBSERVERID}" 2> /dev/null; then
log "Killed cluster (${FDBSERVERID}) via cli"
elif ! kill -9 "${FDBSERVERID}"; then
log "Failed to kill FDB Server process (${FDBSERVERID}) via cli or kill command"
let status="${status} + 1"
else
log "Forcibly killed FDB Server process (${FDBSERVERID}) since cli failed"
fi
elif ! kill -9 "${FDBSERVERID}"; then
log "Failed to forcibly kill FDB Server process (${FDBSERVERID})"
let status="${status} + 1"
else
log "Forcibly killed FDB Server process (${FDBSERVERID})"
fi
return "${status}"
}
# Start the server running.
function startFdbServer {
function startFdbServer
{
local status=0
# Add an audit entry, if enabled
if [ "${AUDITCLUSTER}" -gt 0 ]; then
printf '%-15s (%6s) Starting cluster %-20s\n' "$(date +'%Y-%m-%d %H:%M:%S')" "${$}" "${CLUSTERSTRING}" >> "${AUDITLOG}"
fi
if [ "${status}" -ne 0 ]; then
:
elif ! displayMessage "Starting Fdb Server"
@ -170,25 +236,34 @@ function startFdbServer {
log 'Failed to display user message'
let status="${status} + 1"
elif ! "${BINDIR}/fdbserver" -C "${FDBCONF}" -p "auto:${FDBSERVERPORT}" -L "${LOGDIR}" -d "${WORKDIR}/fdb/$$" &> "${LOGDIR}/fdbserver.log" &
else
"${BINDIR}/fdbserver" --knob_disable_posix_kernel_aio=1 -C "${FDBCONF}" -p "${CLUSTERSTRING}" -L "${LOGDIR}" -d "${WORKDIR}/fdb/${$}" &> "${LOGDIR}/fdbserver.log" &
fdbpid=$!
fdbrc=$?
if [ $fdbrc -ne 0 ]
then
log "Failed to start FDB Server"
# Maybe the server is already running
FDBSERVERID="$(pidof fdbserver)"
let status="${status} + 1"
else
FDBSERVERID="${!}"
FDBSERVERID="${fdbpid}"
fi
fi
if ! kill -0 ${FDBSERVERID} ; then
log "FDB Server start failed."
if [ -z "${FDBSERVERID}" ]; then
log "FDB Server start failed because no process"
let status="${status} + 1"
elif ! kill -0 "${FDBSERVERID}" ; then
log "FDB Server start failed because process terminated unexpectedly"
let status="${status} + 1"
fi
return ${status}
}
function getStatus {
function getStatus
{
local status=0
if [ "${status}" -ne 0 ]; then
:
elif ! date &>> "${LOGDIR}/fdbclient.log"
@ -209,35 +284,41 @@ function getStatus {
}
# Verify that the cluster is available.
function verifyAvailable {
function verifyAvailable
{
local status=0
if [ -z "${FDBSERVERID}" ]; then
log "FDB Server process is not defined."
let status="${status} + 1"
# Verify that the server is running.
if ! kill -0 "${FDBSERVERID}"
elif ! kill -0 "${FDBSERVERID}"
then
log "FDB server process (${FDBSERVERID}) is not running"
let status="${status} + 1"
return 1
# Display user message.
elif ! displayMessage "Checking cluster availability"
then
log 'Failed to display user message'
let status="${status} + 1"
return 1
# Determine if status json says the database is available.
else
avail=`"${BINDIR}/fdbcli" -C "${FDBCONF}" --exec 'status json' --timeout 10 2> /dev/null | grep -E '"database_available"|"available"' | grep 'true'`
avail=`"${BINDIR}/fdbcli" -C "${FDBCONF}" --exec 'status json' --timeout "${SERVERCHECKS}" 2> /dev/null | grep -E '"database_available"|"available"' | grep 'true'`
log "Avail value: ${avail}" "${DEBUGLEVEL}"
if [[ -n "${avail}" ]] ; then
return 0
:
else
return 1
let status="${status} + 1"
fi
fi
return "${status}"
}
# Configure the database on the server.
function createDatabase {
function createDatabase
{
local status=0
if [ "${status}" -ne 0 ]; then
:
# Ensure that the server is running
@ -262,7 +343,7 @@ function createDatabase {
# Configure the database.
else
"${BINDIR}/fdbcli" -C "${FDBCONF}" --exec 'configure new single memory; status' --timeout 240 --log --log-dir "${LOGDIR}" &>> "${LOGDIR}/fdbclient.log"
"${BINDIR}/fdbcli" -C "${FDBCONF}" --exec 'configure new single memory; status' --timeout "${CONFIGUREWAIT}" --log --log-dir "${LOGDIR}" &>> "${LOGDIR}/fdbclient.log"
if ! displayMessage "Checking if config succeeded"
then
@ -270,7 +351,7 @@ function createDatabase {
fi
iteration=0
while [[ "${iteration}" -lt 10 ]] && ! verifyAvailable
while [[ "${iteration}" -lt "${SERVERCHECKS}" ]] && ! verifyAvailable
do
log "Database not created (iteration ${iteration})."
let iteration="${iteration} + 1"
@ -290,7 +371,10 @@ function createDatabase {
}
# Begin the local cluster from scratch.
function startCluster {
function startCluster
{
local status=0
if [ "${status}" -ne 0 ]; then
:
elif ! createDirectories

View File

@ -24,22 +24,22 @@ def parse_args():
# (e)nd of a span with a better given name
locationToPhase = {
"NativeAPI.commit.Before": [],
"MasterProxyServer.batcher": [("b", "Commit")],
"MasterProxyServer.commitBatch.Before": [],
"MasterProxyServer.commitBatch.GettingCommitVersion": [("b", "CommitVersion")],
"MasterProxyServer.commitBatch.GotCommitVersion": [("e", "CommitVersion")],
"CommitProxyServer.batcher": [("b", "Commit")],
"CommitProxyServer.commitBatch.Before": [],
"CommitProxyServer.commitBatch.GettingCommitVersion": [("b", "CommitVersion")],
"CommitProxyServer.commitBatch.GotCommitVersion": [("e", "CommitVersion")],
"Resolver.resolveBatch.Before": [("b", "Resolver.PipelineWait")],
"Resolver.resolveBatch.AfterQueueSizeCheck": [],
"Resolver.resolveBatch.AfterOrderer": [("e", "Resolver.PipelineWait"), ("b", "Resolver.Conflicts")],
"Resolver.resolveBatch.After": [("e", "Resolver.Conflicts")],
"MasterProxyServer.commitBatch.AfterResolution": [("b", "Proxy.Processing")],
"MasterProxyServer.commitBatch.ProcessingMutations": [],
"MasterProxyServer.commitBatch.AfterStoreCommits": [("e", "Proxy.Processing")],
"CommitProxyServer.commitBatch.AfterResolution": [("b", "Proxy.Processing")],
"CommitProxyServer.commitBatch.ProcessingMutations": [],
"CommitProxyServer.commitBatch.AfterStoreCommits": [("e", "Proxy.Processing")],
"TLog.tLogCommit.BeforeWaitForVersion": [("b", "TLog.PipelineWait")],
"TLog.tLogCommit.Before": [("e", "TLog.PipelineWait")],
"TLog.tLogCommit.AfterTLogCommit": [("b", "TLog.FSync")],
"TLog.tLogCommit.After": [("e", "TLog.FSync")],
"MasterProxyServer.commitBatch.AfterLogPush": [("e", "Commit")],
"CommitProxyServer.commitBatch.AfterLogPush": [("e", "Commit")],
"NativeAPI.commit.After": [],
}

View File

@ -16,7 +16,7 @@ As an essential component of a database system, backup and restore is commonly u
## Background
FDB backup system continuously scan the databases key-value space, save key-value pairs and mutations at versions into range files and log files in blob storage. Specifically, mutation logs are generated at Proxy, and are written to transaction logs along with regular mutations. In production clusters like CK clusters, backup system is always on, which means each mutation is written twice to transaction logs, consuming about half of write bandwidth and about 40% of Proxy CPU time.
FDB backup system continuously scan the databases key-value space, save key-value pairs and mutations at versions into range files and log files in blob storage. Specifically, mutation logs are generated at CommitProxy, and are written to transaction logs along with regular mutations. In production clusters like CK clusters, backup system is always on, which means each mutation is written twice to transaction logs, consuming about half of write bandwidth and about 40% of CommitProxy CPU time.
The design of old backup system is [here](https://github.com/apple/foundationdb/blob/master/design/backup.md), and the data format of range files and mutations files is [here](https://github.com/apple/foundationdb/blob/master/design/backup-dataFormat.md). The technical overview of FDB is [here](https://github.com/apple/foundationdb/wiki/Technical-Overview-of-the-Database). The FDB recovery is described in this [doc](https://github.com/apple/foundationdb/blob/master/design/recovery-internals.md).
@ -37,7 +37,7 @@ The design of old backup system is [here](https://github.com/apple/foundationdb/
Feature priorities: Feature 1, 2, 3, 4, 5 are must-have; Feature 6 is better to have.
1. **Write bandwidth reduction by half**: removes the requirement to generate backup mutations at the Proxy, thus reduce TLog write bandwidth usage by half and significantly improve Proxy CPU usage;
1. **Write bandwidth reduction by half**: removes the requirement to generate backup mutations at the CommitProxy, thus reduce TLog write bandwidth usage by half and significantly improve CommitProxy CPU usage;
2. **Correctness**: The restored database must be consistent: each *restored* state (i.e., key-value pair) at a version `v` must match the original state at version `v`.
3. **Performance**: The backup system should be performant, mostly measured as a small CPU overhead on transaction logs and backup workers. The version lag on backup workers is an indicator of performance.
4. **Fault-tolerant**: The backup system should be fault-tolerant to node failures in the FDB cluster.
@ -153,9 +153,9 @@ The requirement of the new backup system raises several design challenges:
**Master**: The master is responsible for coordinating the transition of the FDB transaction sub-system from one generation to the next. In particular, the master recruits backup workers during the recovery.
**Transaction Logs (TLogs)**: The transaction logs make mutations durable to disk for fast commit latencies. The logs receive commits from the proxy in version order, and only respond to the proxy once the data has been written and fsync'ed to an append only mutation log on disk. Storage servers retrieve mutations from TLogs. Once the storage servers have persisted mutations, storage servers then pop the mutations from the TLogs.
**Transaction Logs (TLogs)**: The transaction logs make mutations durable to disk for fast commit latencies. The logs receive commits from the commit proxy in version order, and only respond to the commit proxy once the data has been written and fsync'ed to an append only mutation log on disk. Storage servers retrieve mutations from TLogs. Once the storage servers have persisted mutations, storage servers then pop the mutations from the TLogs.
**Proxy**: The proxies are responsible for committing transactions, and tracking the storage servers responsible for each range of keys. In the old backup system, Proxies are responsible to group mutations into backup mutations and write them to the database.
**CommitProxy**: The commit proxies are responsible for committing transactions, and tracking the storage servers responsible for each range of keys. In the old backup system, Proxies are responsible to group mutations into backup mutations and write them to the database.
**GrvProxy**: The GRV proxies are responsible for providing read versions.
## System overview

View File

@ -40,7 +40,7 @@ FoundationDB may return the following error codes from API functions. If you nee
+-----------------------------------------------+-----+--------------------------------------------------------------------------------+
| external_client_already_loaded | 1040| External client has already been loaded |
+-----------------------------------------------+-----+--------------------------------------------------------------------------------+
| proxy_memory_limit_exceeded | 1042| Proxy commit memory limit exceeded |
| proxy_memory_limit_exceeded | 1042| CommitProxy commit memory limit exceeded |
+-----------------------------------------------+-----+--------------------------------------------------------------------------------+
| batch_transaction_throttled | 1051| Batch GRV request rate limit exceeded |
+-----------------------------------------------+-----+--------------------------------------------------------------------------------+

View File

@ -104,7 +104,7 @@ Field Name Description
``Name for the snapshot file`` recommended name for the disk snapshot cluster-name:ip-addr:port:UID
================================ ======================================================== ========================================================
``snapshot create binary`` will not be invoked on processes which does not have any persistent data (for example, Cluster Controller or Master or MasterProxy). Since these processes are stateless, there is no need for a snapshot. Any specialized configuration knobs used for one of these stateless processes need to be copied and restored externally.
``snapshot create binary`` will not be invoked on processes which does not have any persistent data (for example, Cluster Controller or Master or CommitProxy). Since these processes are stateless, there is no need for a snapshot. Any specialized configuration knobs used for one of these stateless processes need to be copied and restored externally.
Management of disk snapshots
----------------------------

View File

@ -27,7 +27,7 @@
"storage",
"transaction",
"resolution",
"proxy",
"commit_proxy",
"grv_proxy",
"master",
"test",
@ -61,7 +61,7 @@
"role":{
"$enum":[
"master",
"proxy",
"commit_proxy",
"grv_proxy",
"log",
"storage",
@ -447,7 +447,7 @@
],
"recovery_state":{
"required_resolvers":1,
"required_proxies":1,
"required_commit_proxies":1,
"required_grv_proxies":1,
"name":{ // "fully_recovered" is the healthy state; other states are normal to transition through but not to persist in
"$enum":[
@ -633,11 +633,11 @@
"address":"10.0.4.1"
}
],
"auto_proxies":3,
"auto_commit_proxies":3,
"auto_resolvers":1,
"auto_logs":3,
"backup_worker_enabled":1,
"proxies":5 // this field will be absent if a value has not been explicitly set
"commit_proxies":5 // this field will be absent if a value has not been explicitly set
},
"data":{
"least_operating_space_bytes_log_server":0,

View File

@ -5,6 +5,8 @@ Release Notes
6.3.5
=====
* Report missing old tlogs information when in recovery before storage servers are fully recovered. `(PR #3706) <https://github.com/apple/foundationdb/pull/3706>`_
Features
--------

View File

@ -117,7 +117,7 @@ LineNoise::LineNoise(
Hint h = onMainThread( [line]() -> Future<Hint> {
return hint_callback(line);
}).getBlocking();
if (!h.valid) return NULL;
if (!h.valid) return nullptr;
*color = h.color;
*bold = h.bold;
return strdup( h.text.c_str() );

View File

@ -20,6 +20,7 @@
#include "boost/lexical_cast.hpp"
#include "fdbclient/NativeAPI.actor.h"
#include "fdbclient/FDBTypes.h"
#include "fdbclient/Status.h"
#include "fdbclient/StatusClient.h"
#include "fdbclient/DatabaseContext.h"
@ -102,7 +103,7 @@ CSimpleOpt::SOption g_rgOptions[] = { { OPT_CONNFILE, "-C", SO_REQ_SEP },
void printAtCol(const char* text, int col) {
const char* iter = text;
const char* start = text;
const char* space = NULL;
const char* space = nullptr;
do {
iter++;
@ -112,7 +113,7 @@ void printAtCol(const char* text, int col) {
printf("%.*s\n", (int)(space - start), start);
start = space;
if (*start == ' ' || *start == '\n') start++;
space = NULL;
space = nullptr;
}
} while (*iter);
}
@ -120,7 +121,7 @@ void printAtCol(const char* text, int col) {
std::string lineWrap(const char* text, int col) {
const char* iter = text;
const char* start = text;
const char* space = NULL;
const char* space = nullptr;
std::string out = "";
do {
iter++;
@ -130,7 +131,7 @@ std::string lineWrap(const char* text, int col) {
out += format("%.*s\n", (int)(space - start), start);
start = space;
if (*start == ' '/* || *start == '\n'*/) start++;
space = NULL;
space = nullptr;
}
} while (*iter);
return out;
@ -470,8 +471,8 @@ void initHelp() {
"All keys between BEGINKEY (inclusive) and ENDKEY (exclusive) are cleared from the database. This command will succeed even if the specified range is empty, but may fail because of conflicts." ESCAPINGK);
helpMap["configure"] = CommandHelp(
"configure [new] "
"<single|double|triple|three_data_hall|three_datacenter|ssd|memory|memory-radixtree-beta|proxies=<PROXIES>|grv_"
"proxies=<GRV_PROXIES>|logs=<LOGS>|resolvers=<RESOLVERS>>*",
"<single|double|triple|three_data_hall|three_datacenter|ssd|memory|memory-radixtree-beta|commit_proxies=<"
"COMMIT_PROXIES>|grv_proxies=<GRV_PROXIES>|logs=<LOGS>|resolvers=<RESOLVERS>>*",
"change the database configuration",
"The `new' option, if present, initializes a new database with the given configuration rather than changing "
"the configuration of an existing one. When used, both a redundancy mode and a storage engine must be "
@ -479,13 +480,14 @@ void initHelp() {
"of data (survive one failure).\n triple - three copies of data (survive two failures).\n three_data_hall - "
"See the Admin Guide.\n three_datacenter - See the Admin Guide.\n\nStorage engine:\n ssd - B-Tree storage "
"engine optimized for solid state disks.\n memory - Durable in-memory storage engine for small "
"datasets.\n\nproxies=<PROXIES>: Sets the desired number of proxies in the cluster. Must be at least 1, or set "
"to -1 which restores the number of proxies to the default value.\n\ngrv_proxies=<GRV_PROXIES>: Sets the "
"desired number of GRV proxies in the cluster. Must be at least 1, or set to -1 which restores the number of "
"proxies to the default value.\n\nlogs=<LOGS>: Sets the desired number of log servers in the cluster. Must be "
"at least 1, or set to -1 which restores the number of logs to the default value.\n\nresolvers=<RESOLVERS>: "
"Sets the desired number of resolvers in the cluster. Must be at least 1, or set to -1 which restores the "
"number of resolvers to the default value.\n\nSee the FoundationDB Administration Guide for more information.");
"datasets.\n\ncommit_proxies=<COMMIT_PROXIES>: Sets the desired number of commit proxies in the cluster. Must "
"be at least 1, or set to -1 which restores the number of commit proxies to the default "
"value.\n\ngrv_proxies=<GRV_PROXIES>: Sets the desired number of GRV proxies in the cluster. Must be at least "
"1, or set to -1 which restores the number of GRV proxies to the default value.\n\nlogs=<LOGS>: Sets the "
"desired number of log servers in the cluster. Must be at least 1, or set to -1 which restores the number of "
"logs to the default value.\n\nresolvers=<RESOLVERS>: Sets the desired number of resolvers in the cluster. "
"Must be at least 1, or set to -1 which restores the number of resolvers to the default value.\n\nSee the "
"FoundationDB Administration Guide for more information.");
helpMap["fileconfigure"] = CommandHelp(
"fileconfigure [new] <FILENAME>",
"change the database configuration from a file",
@ -871,10 +873,11 @@ void printStatus(StatusObjectReader statusObj, StatusClient::StatusLevel level,
fatalRecoveryState = true;
if (name == "recruiting_transaction_servers") {
description += format("\nNeed at least %d log servers across unique zones, %d proxies, "
description +=
format("\nNeed at least %d log servers across unique zones, %d commit proxies, "
"%d GRV proxies and %d resolvers.",
recoveryState["required_logs"].get_int(),
recoveryState["required_proxies"].get_int(),
recoveryState["required_commit_proxies"].get_int(),
recoveryState["required_grv_proxies"].get_int(),
recoveryState["required_resolvers"].get_int());
if (statusObjCluster.has("machines") && statusObjCluster.has("processes")) {
@ -1026,8 +1029,8 @@ void printStatus(StatusObjectReader statusObj, StatusClient::StatusLevel level,
outputString += format("\n Exclusions - %d (type `exclude' for details)", excludedServersArr.size());
}
if (statusObjConfig.get("proxies", intVal))
outputString += format("\n Desired Proxies - %d", intVal);
if (statusObjConfig.get("commit_proxies", intVal))
outputString += format("\n Desired Commit Proxies - %d", intVal);
if (statusObjConfig.get("grv_proxies", intVal))
outputString += format("\n Desired GRV Proxies - %d", intVal);
@ -1233,14 +1236,54 @@ void printStatus(StatusObjectReader statusObj, StatusClient::StatusLevel level,
int minLoss = std::min(availLoss, dataLoss);
const char *faultDomain = machinesAreZones ? "machine" : "zone";
if (minLoss == 1)
outputString += format("1 %s", faultDomain);
else
outputString += format("%d %ss", minLoss, faultDomain);
if (dataLoss > availLoss){
outputString += format(" (%d without data loss)", dataLoss);
}
if (dataLoss == -1) {
ASSERT_WE_THINK(availLoss == -1);
outputString += format(
"\n\n Warning: the database may have data loss and availability loss. Please restart "
"following tlog interfaces, otherwise storage servers may never be able to catch "
"up.\n");
StatusObjectReader logs;
if (statusObjCluster.has("logs")) {
for (StatusObjectReader logEpoch : statusObjCluster.last().get_array()) {
bool possiblyLosingData;
if (logEpoch.get("possibly_losing_data", possiblyLosingData) &&
!possiblyLosingData) {
continue;
}
// Current epoch doesn't have an end version.
int64_t epoch, beginVersion, endVersion = invalidVersion;
bool current;
logEpoch.get("epoch", epoch);
logEpoch.get("begin_version", beginVersion);
logEpoch.get("end_version", endVersion);
logEpoch.get("current", current);
std::string missing_log_interfaces;
if (logEpoch.has("log_interfaces")) {
for (StatusObjectReader logInterface : logEpoch.last().get_array()) {
bool healthy;
std::string address, id;
if (logInterface.get("healthy", healthy) && !healthy) {
logInterface.get("id", id);
logInterface.get("address", address);
missing_log_interfaces += format("%s,%s ", id.c_str(), address.c_str());
}
}
}
outputString += format(
" %s log epoch: %ld begin: %ld end: %s, missing "
"log interfaces(id,address): %s\n",
current ? "Current" : "Old", epoch, beginVersion,
endVersion == invalidVersion ? "(unknown)" : format("%ld", endVersion).c_str(),
missing_log_interfaces.c_str());
}
}
}
}
}
@ -1790,14 +1833,14 @@ ACTOR Future<bool> configure( Database db, std::vector<StringRef> tokens, Refere
bool noChanges = conf.get().old_replication == conf.get().auto_replication &&
conf.get().old_logs == conf.get().auto_logs &&
conf.get().old_proxies == conf.get().auto_proxies &&
conf.get().old_commit_proxies == conf.get().auto_commit_proxies &&
conf.get().old_grv_proxies == conf.get().auto_grv_proxies &&
conf.get().old_resolvers == conf.get().auto_resolvers &&
conf.get().old_processes_with_transaction == conf.get().auto_processes_with_transaction &&
conf.get().old_machines_with_transaction == conf.get().auto_machines_with_transaction;
bool noDesiredChanges = noChanges && conf.get().old_logs == conf.get().desired_logs &&
conf.get().old_proxies == conf.get().desired_proxies &&
conf.get().old_commit_proxies == conf.get().desired_commit_proxies &&
conf.get().old_grv_proxies == conf.get().desired_grv_proxies &&
conf.get().old_resolvers == conf.get().desired_resolvers;
@ -1816,8 +1859,11 @@ ACTOR Future<bool> configure( Database db, std::vector<StringRef> tokens, Refere
outputString += format("| replication | %16s | %16s |\n", conf.get().old_replication.c_str(), conf.get().auto_replication.c_str());
outputString += format("| logs | %16d | %16d |", conf.get().old_logs, conf.get().auto_logs);
outputString += conf.get().auto_logs != conf.get().desired_logs ? format(" (manually set; would be %d)\n", conf.get().desired_logs) : "\n";
outputString += format("| proxies | %16d | %16d |", conf.get().old_proxies, conf.get().auto_proxies);
outputString += conf.get().auto_proxies != conf.get().desired_proxies ? format(" (manually set; would be %d)\n", conf.get().desired_proxies) : "\n";
outputString += format("| commit_proxies | %16d | %16d |", conf.get().old_commit_proxies,
conf.get().auto_commit_proxies);
outputString += conf.get().auto_commit_proxies != conf.get().desired_commit_proxies
? format(" (manually set; would be %d)\n", conf.get().desired_commit_proxies)
: "\n";
outputString += format("| grv_proxies | %16d | %16d |", conf.get().old_grv_proxies,
conf.get().auto_grv_proxies);
outputString += conf.get().auto_grv_proxies != conf.get().desired_grv_proxies
@ -2472,7 +2518,7 @@ void compGenerator(const char* text, bool help, std::vector<std::string>& lc) {
std::map<std::string, CommandHelp>::const_iterator iter;
int len = strlen(text);
const char* helpExtra[] = {"escaping", "options", NULL};
const char* helpExtra[] = {"escaping", "options", nullptr};
const char** he = helpExtra;
@ -2531,11 +2577,24 @@ void onOffGenerator(const char* text, const char *line, std::vector<std::string>
}
void configureGenerator(const char* text, const char *line, std::vector<std::string>& lc) {
const char* opts[] = {
"new", "single", "double", "triple", "three_data_hall", "three_datacenter", "ssd",
"ssd-1", "ssd-2", "memory", "memory-1", "memory-2", "memory-radixtree-beta", "proxies=",
"grv_proxies=", "logs=", "resolvers=", nullptr
};
const char* opts[] = { "new",
"single",
"double",
"triple",
"three_data_hall",
"three_datacenter",
"ssd",
"ssd-1",
"ssd-2",
"memory",
"memory-1",
"memory-2",
"memory-radixtree-beta",
"commit_proxies=",
"grv_proxies=",
"logs=",
"resolvers=",
nullptr };
arrayGenerator(text, line, opts, lc);
}
@ -2973,7 +3032,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
.detail("SourceVersion", getSourceVersion())
.detail("Version", FDB_VT_VERSION)
.detail("PackageName", FDB_VT_PACKAGE_NAME)
.detailf("ActualTime", "%lld", DEBUG_DETERMINISM ? 0 : time(NULL))
.detailf("ActualTime", "%lld", DEBUG_DETERMINISM ? 0 : time(nullptr))
.detail("ClusterFile", ccf->getFilename().c_str())
.detail("ConnectionString", ccf->getConnectionString().toString())
.setMaxFieldLength(10000)
@ -4548,7 +4607,7 @@ int main(int argc, char **argv) {
sigemptyset( &act.sa_mask );
act.sa_flags = 0;
act.sa_handler = SIG_IGN;
sigaction(SIGINT, &act, NULL);
sigaction(SIGINT, &act, nullptr);
#endif
CLIOptions opt(argc, argv);

View File

@ -59,7 +59,7 @@ public:
virtual void delref() { ReferenceCounted<AsyncFileBlobStoreWrite>::delref(); }
struct Part : ReferenceCounted<Part> {
Part(int n, int minSize) : number(n), writer(content.getWriteBuffer(minSize), NULL, Unversioned()), length(0) {
Part(int n, int minSize) : number(n), writer(content.getWriteBuffer(minSize), nullptr, Unversioned()), length(0) {
etag = std::string();
::MD5_Init(&content_md5_buf);
}

View File

@ -958,5 +958,7 @@ Value makePadding(int size);
ACTOR Future<Void> transformRestoredDatabase(Database cx, Standalone<VectorRef<KeyRangeRef>> backupRanges,
Key addPrefix, Key removePrefix);
void simulateBlobFailure();
#include "flow/unactorcompiler.h"
#endif

View File

@ -1343,20 +1343,45 @@ public:
ACTOR static Future<KeyRange> getSnapshotFileKeyRange_impl(Reference<BackupContainerFileSystem> bc,
RangeFile file) {
state Reference<IAsyncFile> inFile = wait(bc->readFile(file.fileName));
state int readFileRetries = 0;
state bool beginKeySet = false;
state Key beginKey;
state Key endKey;
loop {
try {
state Reference<IAsyncFile> inFile = wait(bc->readFile(file.fileName));
beginKeySet = false;
state int64_t j = 0;
for (; j < file.fileSize; j += file.blockSize) {
int64_t len = std::min<int64_t>(file.blockSize, file.fileSize - j);
Standalone<VectorRef<KeyValueRef>> blockData = wait(fileBackup::decodeRangeFileBlock(inFile, j, len));
Standalone<VectorRef<KeyValueRef>> blockData =
wait(fileBackup::decodeRangeFileBlock(inFile, j, len));
if (!beginKeySet) {
beginKey = blockData.front().key;
beginKeySet = true;
}
endKey = blockData.back().key;
}
break;
} catch (Error& e) {
if (e.code() == error_code_restore_bad_read ||
e.code() == error_code_restore_unsupported_file_version ||
e.code() == error_code_restore_corrupted_data_padding) { // no retriable error
TraceEvent(SevError, "BackupContainerGetSnapshotFileKeyRange").error(e);
throw;
} else if (e.code() == error_code_http_request_failed || e.code() == error_code_connection_failed ||
e.code() == error_code_timed_out || e.code() == error_code_lookup_failed) {
// blob http request failure, retry
TraceEvent(SevWarnAlways, "BackupContainerGetSnapshotFileKeyRangeConnectionFailure")
.detail("Retries", ++readFileRetries)
.error(e);
wait(delayJittered(0.1));
} else {
TraceEvent(SevError, "BackupContainerGetSnapshotFileKeyRangeUnexpectedError").error(e);
throw;
}
}
}
return KeyRange(KeyRangeRef(beginKey, endKey));
}

View File

@ -277,7 +277,7 @@ ACTOR Future<bool> bucketExists_impl(Reference<BlobStoreEndpoint> b, std::string
std::string resource = std::string("/") + bucket;
HTTP::Headers headers;
Reference<HTTP::Response> r = wait(b->doRequest("HEAD", resource, headers, NULL, 0, {200, 404}));
Reference<HTTP::Response> r = wait(b->doRequest("HEAD", resource, headers, nullptr, 0, {200, 404}));
return r->code == 200;
}
@ -291,7 +291,7 @@ ACTOR Future<bool> objectExists_impl(Reference<BlobStoreEndpoint> b, std::string
std::string resource = std::string("/") + bucket + "/" + object;
HTTP::Headers headers;
Reference<HTTP::Response> r = wait(b->doRequest("HEAD", resource, headers, NULL, 0, {200, 404}));
Reference<HTTP::Response> r = wait(b->doRequest("HEAD", resource, headers, nullptr, 0, {200, 404}));
return r->code == 200;
}
@ -305,7 +305,7 @@ ACTOR Future<Void> deleteObject_impl(Reference<BlobStoreEndpoint> b, std::string
std::string resource = std::string("/") + bucket + "/" + object;
HTTP::Headers headers;
// 200 or 204 means object successfully deleted, 404 means it already doesn't exist, so any of those are considered successful
Reference<HTTP::Response> r = wait(b->doRequest("DELETE", resource, headers, NULL, 0, {200, 204, 404}));
Reference<HTTP::Response> r = wait(b->doRequest("DELETE", resource, headers, nullptr, 0, {200, 204, 404}));
// But if the object already did not exist then the 'delete' is assumed to be successful but a warning is logged.
if(r->code == 404) {
@ -386,7 +386,7 @@ ACTOR Future<Void> createBucket_impl(Reference<BlobStoreEndpoint> b, std::string
if(!exists) {
std::string resource = std::string("/") + bucket;
HTTP::Headers headers;
Reference<HTTP::Response> r = wait(b->doRequest("PUT", resource, headers, NULL, 0, {200, 409}));
Reference<HTTP::Response> r = wait(b->doRequest("PUT", resource, headers, nullptr, 0, {200, 409}));
}
return Void();
}
@ -401,7 +401,7 @@ ACTOR Future<int64_t> objectSize_impl(Reference<BlobStoreEndpoint> b, std::strin
std::string resource = std::string("/") + bucket + "/" + object;
HTTP::Headers headers;
Reference<HTTP::Response> r = wait(b->doRequest("HEAD", resource, headers, NULL, 0, {200, 404}));
Reference<HTTP::Response> r = wait(b->doRequest("HEAD", resource, headers, nullptr, 0, {200, 404}));
if(r->code == 404)
throw file_not_found();
return r->contentLen;
@ -737,7 +737,7 @@ ACTOR Future<Void> listObjectsStream_impl(Reference<BlobStoreEndpoint> bstore, s
HTTP::Headers headers;
state std::string fullResource = resource + HTTP::urlEncode(lastFile);
lastFile.clear();
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", fullResource, headers, NULL, 0, {200}));
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", fullResource, headers, nullptr, 0, {200}));
listReleaser.release();
try {
@ -782,7 +782,7 @@ ACTOR Future<Void> listObjectsStream_impl(Reference<BlobStoreEndpoint> bstore, s
if(size == nullptr) {
throw http_bad_response();
}
object.size = strtoull(size->value(), NULL, 10);
object.size = strtoull(size->value(), nullptr, 10);
listResult.objects.push_back(object);
}
@ -893,7 +893,7 @@ ACTOR Future<std::vector<std::string>> listBuckets_impl(Reference<BlobStoreEndpo
HTTP::Headers headers;
state std::string fullResource = resource + HTTP::urlEncode(lastName);
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", fullResource, headers, NULL, 0, {200}));
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", fullResource, headers, nullptr, 0, {200}));
listReleaser.release();
try {
@ -1024,7 +1024,7 @@ ACTOR Future<std::string> readEntireFile_impl(Reference<BlobStoreEndpoint> bstor
std::string resource = std::string("/") + bucket + "/" + object;
HTTP::Headers headers;
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", resource, headers, NULL, 0, {200, 404}));
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", resource, headers, nullptr, 0, {200, 404}));
if(r->code == 404)
throw file_not_found();
return r->content;
@ -1057,7 +1057,7 @@ ACTOR Future<Void> writeEntireFileFromBuffer_impl(Reference<BlobStoreEndpoint> b
ACTOR Future<Void> writeEntireFile_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object, std::string content) {
state UnsentPacketQueue packets;
PacketWriter pw(packets.getWriteBuffer(content.size()), NULL, Unversioned());
PacketWriter pw(packets.getWriteBuffer(content.size()), nullptr, Unversioned());
pw.serializeBytes(content);
if(content.size() > bstore->knobs.multipart_max_part_size)
throw file_too_large();
@ -1095,7 +1095,7 @@ ACTOR Future<int> readObject_impl(Reference<BlobStoreEndpoint> bstore, std::stri
std::string resource = std::string("/") + bucket + "/" + object;
HTTP::Headers headers;
headers["Range"] = format("bytes=%lld-%lld", offset, offset + length - 1);
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", resource, headers, NULL, 0, {200, 206, 404}));
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", resource, headers, nullptr, 0, {200, 206, 404}));
if(r->code == 404)
throw file_not_found();
if(r->contentLen != r->content.size()) // Double check that this wasn't a header-only response, probably unnecessary
@ -1114,7 +1114,7 @@ ACTOR static Future<std::string> beginMultiPartUpload_impl(Reference<BlobStoreEn
std::string resource = std::string("/") + bucket + "/" + object + "?uploads";
HTTP::Headers headers;
Reference<HTTP::Response> r = wait(bstore->doRequest("POST", resource, headers, NULL, 0, {200}));
Reference<HTTP::Response> r = wait(bstore->doRequest("POST", resource, headers, nullptr, 0, {200}));
try {
xml_document<> doc;
@ -1180,7 +1180,7 @@ ACTOR Future<Void> finishMultiPartUpload_impl(Reference<BlobStoreEndpoint> bstor
std::string resource = format("/%s/%s?uploadId=%s", bucket.c_str(), object.c_str(), uploadID.c_str());
HTTP::Headers headers;
PacketWriter pw(part_list.getWriteBuffer(manifest.size()), NULL, Unversioned());
PacketWriter pw(part_list.getWriteBuffer(manifest.size()), nullptr, Unversioned());
pw.serializeBytes(manifest);
Reference<HTTP::Response> r = wait(bstore->doRequest("POST", resource, headers, &part_list, manifest.size(), {200}));
// TODO: In the event that the client times out just before the request completes (so the client is unaware) then the next retry

View File

@ -33,7 +33,7 @@ set(FDBCLIENT_SRCS
Knobs.h
ManagementAPI.actor.cpp
ManagementAPI.actor.h
MasterProxyInterface.h
CommitProxyInterface.h
MetricLogger.actor.cpp
MetricLogger.h
MonitorLeader.actor.cpp

View File

@ -25,7 +25,7 @@
#include "fdbclient/FDBTypes.h"
#include "fdbrpc/FailureMonitor.h"
#include "fdbclient/Status.h"
#include "fdbclient/MasterProxyInterface.h"
#include "fdbclient/CommitProxyInterface.h"
// Streams from WorkerInterface that are safe and useful to call from a client.
// A ClientWorkerInterface is embedded as the first element of a WorkerInterface.

View File

@ -25,7 +25,7 @@
#include "fdbclient/FDBTypes.h"
#include "fdbrpc/FailureMonitor.h"
#include "fdbclient/Status.h"
#include "fdbclient/MasterProxyInterface.h"
#include "fdbclient/CommitProxyInterface.h"
#include "fdbclient/ClientWorkerInterface.h"
struct ClusterInterface {

View File

@ -1,6 +1,6 @@
/*
* MasterProxyInterface.h
* CommitProxyInterface.h
*
* This source file is part of the FoundationDB open source project
*
@ -19,8 +19,8 @@
* limitations under the License.
*/
#ifndef FDBCLIENT_MASTERPROXYINTERFACE_H
#define FDBCLIENT_MASTERPROXYINTERFACE_H
#ifndef FDBCLIENT_COMMITPROXYINTERFACE_H
#define FDBCLIENT_COMMITPROXYINTERFACE_H
#pragma once
#include <utility>
@ -36,7 +36,7 @@
#include "fdbrpc/TimedRequest.h"
#include "GrvProxyInterface.h"
struct MasterProxyInterface {
struct CommitProxyInterface {
constexpr static FileIdentifier file_identifier = 8954922;
enum { LocationAwareLoadBalance = 1 };
enum { AlwaysFresh = 1 };
@ -59,8 +59,8 @@ struct MasterProxyInterface {
UID id() const { return commit.getEndpoint().token; }
std::string toString() const { return id().shortString(); }
bool operator == (MasterProxyInterface const& r) const { return id() == r.id(); }
bool operator != (MasterProxyInterface const& r) const { return id() != r.id(); }
bool operator==(CommitProxyInterface const& r) const { return id() == r.id(); }
bool operator!=(CommitProxyInterface const& r) const { return id() != r.id(); }
NetworkAddress address() const { return commit.getEndpoint().getPrimaryAddress(); }
template <class Archive>
@ -100,9 +100,10 @@ struct MasterProxyInterface {
struct ClientDBInfo {
constexpr static FileIdentifier file_identifier = 5355080;
UID id; // Changes each time anything else changes
vector< GrvProxyInterface > grvProxies;
vector< MasterProxyInterface > masterProxies;
Optional<MasterProxyInterface> firstProxy; //not serialized, used for commitOnFirstProxy when the proxies vector has been shrunk
vector<GrvProxyInterface> grvProxies;
vector<CommitProxyInterface> commitProxies;
Optional<CommitProxyInterface>
firstCommitProxy; // not serialized, used for commitOnFirstProxy when the commit proxies vector has been shrunk
double clientTxnInfoSampleRate;
int64_t clientTxnInfoSizeLimit;
Optional<Value> forward;
@ -122,7 +123,7 @@ struct ClientDBInfo {
if constexpr (!is_fb_function<Archive>) {
ASSERT(ar.protocolVersion().isValid());
}
serializer(ar, grvProxies, masterProxies, id, clientTxnInfoSampleRate, clientTxnInfoSizeLimit, forward,
serializer(ar, grvProxies, commitProxies, id, clientTxnInfoSampleRate, clientTxnInfoSizeLimit, forward,
transactionTagSampleRate, transactionTagSampleCost);
}
};

View File

@ -25,7 +25,7 @@
#include "fdbclient/FDBTypes.h"
#include "fdbrpc/fdbrpc.h"
#include "fdbrpc/Locality.h"
#include "fdbclient/MasterProxyInterface.h"
#include "fdbclient/CommitProxyInterface.h"
#include "fdbclient/ClusterInterface.h"
const int MAX_CLUSTER_FILE_BYTES = 60000;

View File

@ -29,12 +29,12 @@ DatabaseConfiguration::DatabaseConfiguration()
void DatabaseConfiguration::resetInternal() {
// does NOT reset rawConfiguration
initialized = false;
proxyCount = grvProxyCount = resolverCount = desiredTLogCount = tLogWriteAntiQuorum = tLogReplicationFactor =
commitProxyCount = grvProxyCount = resolverCount = desiredTLogCount = tLogWriteAntiQuorum = tLogReplicationFactor =
storageTeamSize = desiredLogRouterCount = -1;
tLogVersion = TLogVersion::DEFAULT;
tLogDataStoreType = storageServerStoreType = KeyValueStoreType::END;
tLogSpillType = TLogSpillType::DEFAULT;
autoProxyCount = CLIENT_KNOBS->DEFAULT_AUTO_PROXIES;
autoCommitProxyCount = CLIENT_KNOBS->DEFAULT_AUTO_COMMIT_PROXIES;
autoGrvProxyCount = CLIENT_KNOBS->DEFAULT_AUTO_GRV_PROXIES;
autoResolverCount = CLIENT_KNOBS->DEFAULT_AUTO_RESOLVERS;
autoDesiredTLogCount = CLIENT_KNOBS->DEFAULT_AUTO_LOGS;
@ -169,7 +169,7 @@ bool DatabaseConfiguration::isValid() const {
tLogWriteAntiQuorum <= tLogReplicationFactor/2 &&
tLogReplicationFactor >= 1 &&
storageTeamSize >= 1 &&
getDesiredProxies() >= 1 &&
getDesiredCommitProxies() >= 1 &&
getDesiredGrvProxies() >= 1 &&
getDesiredLogs() >= 1 &&
getDesiredResolvers() >= 1 &&
@ -180,7 +180,7 @@ bool DatabaseConfiguration::isValid() const {
tLogSpillType != TLogSpillType::UNSET &&
!(tLogSpillType == TLogSpillType::REFERENCE && tLogVersion < TLogVersion::V3) &&
storageServerStoreType != KeyValueStoreType::END &&
autoProxyCount >= 1 &&
autoCommitProxyCount >= 1 &&
autoGrvProxyCount >= 1 &&
autoResolverCount >= 1 &&
autoDesiredTLogCount >= 1 &&
@ -198,7 +198,6 @@ bool DatabaseConfiguration::isValid() const {
( regions.size() == 0 || tLogPolicy->info() != "dcid^2 x zoneid^2 x 1") ) ) { //We cannot specify regions with three_datacenter replication
return false;
}
std::set<Key> dcIds;
dcIds.insert(Key());
for(auto& r : regions) {
@ -318,11 +317,11 @@ StatusObject DatabaseConfiguration::toJSON(bool noPolicies) const {
if (desiredTLogCount != -1 || isOverridden("logs")) {
result["logs"] = desiredTLogCount;
}
if (proxyCount != -1 || isOverridden("proxies")) {
result["proxies"] = proxyCount;
if (commitProxyCount != -1 || isOverridden("commit_proxies")) {
result["commit_proxies"] = commitProxyCount;
}
if (grvProxyCount != -1 || isOverridden("grv_proxies")) {
result["grv_proxies"] = proxyCount;
result["grv_proxies"] = commitProxyCount;
}
if (resolverCount != -1 || isOverridden("resolvers")) {
result["resolvers"] = resolverCount;
@ -336,8 +335,8 @@ StatusObject DatabaseConfiguration::toJSON(bool noPolicies) const {
if (repopulateRegionAntiQuorum != 0 || isOverridden("repopulate_anti_quorum")) {
result["repopulate_anti_quorum"] = repopulateRegionAntiQuorum;
}
if (autoProxyCount != CLIENT_KNOBS->DEFAULT_AUTO_PROXIES || isOverridden("auto_proxies")) {
result["auto_proxies"] = autoProxyCount;
if (autoCommitProxyCount != CLIENT_KNOBS->DEFAULT_AUTO_COMMIT_PROXIES || isOverridden("auto_commit_proxies")) {
result["auto_commit_proxies"] = autoCommitProxyCount;
}
if (autoGrvProxyCount != CLIENT_KNOBS->DEFAULT_AUTO_GRV_PROXIES || isOverridden("auto_grv_proxies")) {
result["auto_grv_proxies"] = autoGrvProxyCount;
@ -419,8 +418,8 @@ bool DatabaseConfiguration::setInternal(KeyRef key, ValueRef value) {
if (ck == LiteralStringRef("initialized")) {
initialized = true;
} else if (ck == LiteralStringRef("proxies")) {
parse(&proxyCount, value);
} else if (ck == LiteralStringRef("commit_proxies")) {
parse(&commitProxyCount, value);
} else if (ck == LiteralStringRef("grv_proxies")) {
parse(&grvProxyCount, value);
} else if (ck == LiteralStringRef("resolvers")) {
@ -459,8 +458,8 @@ bool DatabaseConfiguration::setInternal(KeyRef key, ValueRef value) {
} else if (ck == LiteralStringRef("storage_engine")) {
parse((&type), value);
storageServerStoreType = (KeyValueStoreType::StoreType)type;
} else if (ck == LiteralStringRef("auto_proxies")) {
parse(&autoProxyCount, value);
} else if (ck == LiteralStringRef("auto_commit_proxies")) {
parse(&autoCommitProxyCount, value);
} else if (ck == LiteralStringRef("auto_grv_proxies")) {
parse(&autoGrvProxyCount, value);
} else if (ck == LiteralStringRef("auto_resolvers")) {

View File

@ -149,9 +149,9 @@ struct DatabaseConfiguration {
return std::min(tLogReplicationFactor - 1 - tLogWriteAntiQuorum, storageTeamSize - 1);
}
// Proxy Servers
int32_t proxyCount;
int32_t autoProxyCount;
// CommitProxy Servers
int32_t commitProxyCount;
int32_t autoCommitProxyCount;
int32_t grvProxyCount;
int32_t autoGrvProxyCount;
@ -192,7 +192,10 @@ struct DatabaseConfiguration {
bool isExcludedServer( NetworkAddressList ) const;
std::set<AddressExclusion> getExcludedServers() const;
int32_t getDesiredProxies() const { if(proxyCount == -1) return autoProxyCount; return proxyCount; }
int32_t getDesiredCommitProxies() const {
if (commitProxyCount == -1) return autoCommitProxyCount;
return commitProxyCount;
}
int32_t getDesiredGrvProxies() const {
if (grvProxyCount == -1) return autoGrvProxyCount;
return grvProxyCount;

View File

@ -29,7 +29,7 @@
#include "fdbclient/NativeAPI.actor.h"
#include "fdbclient/KeyRangeMap.h"
#include "fdbclient/MasterProxyInterface.h"
#include "fdbclient/CommitProxyInterface.h"
#include "fdbclient/SpecialKeySpace.actor.h"
#include "fdbrpc/QueueModel.h"
#include "fdbrpc/MultiInterface.h"
@ -68,7 +68,7 @@ struct LocationInfo : MultiInterface<ReferencedInterface<StorageServerInterface>
}
};
using ProxyInfo = ModelInterface<MasterProxyInterface>;
using CommitProxyInfo = ModelInterface<CommitProxyInterface>;
using GrvProxyInfo = ModelInterface<GrvProxyInterface>;
class ClientTagThrottleData : NonCopyable {
@ -165,8 +165,8 @@ public:
bool sampleOnCost(uint64_t cost) const;
void updateProxies();
Reference<ProxyInfo> getMasterProxies(bool useProvisionalProxies);
Future<Reference<ProxyInfo>> getMasterProxiesFuture(bool useProvisionalProxies);
Reference<CommitProxyInfo> getCommitProxies(bool useProvisionalProxies);
Future<Reference<CommitProxyInfo>> getCommitProxiesFuture(bool useProvisionalProxies);
Reference<GrvProxyInfo> getGrvProxies(bool useProvisionalProxies);
Future<Void> onProxiesChanged();
Future<HealthMetrics> getHealthMetrics(bool detailed);
@ -219,9 +219,9 @@ public:
Reference<AsyncVar<Reference<ClusterConnectionFile>>> connectionFile;
AsyncTrigger proxiesChangeTrigger;
Future<Void> monitorProxiesInfoChange;
Reference<ProxyInfo> masterProxies;
Reference<CommitProxyInfo> commitProxies;
Reference<GrvProxyInfo> grvProxies;
bool proxyProvisional;
bool proxyProvisional; // Provisional commit proxy and grv proxy are used at the same time.
UID proxiesLastChange;
LocalityData clientLocality;
QueueModel queueModel;

View File

@ -563,6 +563,8 @@ namespace fileBackup {
if(rLen != len)
throw restore_bad_read();
simulateBlobFailure();
Standalone<VectorRef<KeyValueRef>> results({}, buf.arena());
state StringRefReader reader(buf, restore_corrupted_data());
@ -606,7 +608,7 @@ namespace fileBackup {
return results;
} catch(Error &e) {
TraceEvent(SevWarn, "FileRestoreCorruptRangeFileBlock")
TraceEvent(SevWarn, "FileRestoreDecodeRangeFileBlockFailed")
.error(e)
.detail("Filename", file->getFilename())
.detail("BlockOffset", offset)
@ -5021,3 +5023,18 @@ ACTOR Future<Void> transformRestoredDatabase(Database cx, Standalone<VectorRef<K
return Void();
}
void simulateBlobFailure() {
if (BUGGIFY && deterministicRandom()->random01() < 0.01) { // Simulate blob failures
double i = deterministicRandom()->random01();
if (i < 0.5) {
throw http_request_failed();
} else if (i < 0.7) {
throw connection_failed();
} else if (i < 0.8) {
throw timed_out();
} else if (i < 0.9) {
throw lookup_failed();
}
}
}

View File

@ -27,6 +27,8 @@
// with RateKeeper to gather health information of the cluster.
struct GrvProxyInterface {
constexpr static FileIdentifier file_identifier = 8743216;
enum { LocationAwareLoadBalance = 1 };
enum { AlwaysFresh = 1 };
Optional<Key> processId;
bool provisional;

View File

@ -72,7 +72,7 @@ namespace HTTP {
}
PacketBuffer * writeRequestHeader(std::string const &verb, std::string const &resource, HTTP::Headers const &headers, PacketBuffer *dest) {
PacketWriter writer(dest, NULL, Unversioned());
PacketWriter writer(dest, nullptr, Unversioned());
writer.serializeBytes(verb);
writer.serializeBytes(" ", 1);
writer.serializeBytes(resource);
@ -238,7 +238,7 @@ namespace HTTP {
{
// Read the line that contains the chunk length as text in hex
size_t lineLen = wait(read_delimited_into_string(conn, "\r\n", &r->content, pos));
state int chunkLen = strtol(r->content.substr(pos, lineLen).c_str(), NULL, 16);
state int chunkLen = strtol(r->content.substr(pos, lineLen).c_str(), nullptr, 16);
// Instead of advancing pos, erase the chunk length header line (line length + delimiter size) from the content buffer
r->content.erase(pos, lineLen + 2);
@ -301,7 +301,7 @@ namespace HTTP {
state TraceEvent event(SevDebug, "HTTPRequest");
state UnsentPacketQueue empty;
if(pContent == NULL)
if(pContent == nullptr)
pContent = &empty;
// There is no standard http request id header field, so either a global default can be set via a knob

View File

@ -67,11 +67,11 @@
// // The following would throw if a.b.c did not exist, or if it was not an int.
// int x = r["a.b.c"].get_int();
struct JSONDoc {
JSONDoc() : pObj(NULL) {}
JSONDoc() : pObj(nullptr) {}
// Construction from const json_spirit::mObject, trivial and will never throw.
// Resulting JSONDoc will not allow modifications.
JSONDoc(const json_spirit::mObject &o) : pObj(&o), wpObj(NULL) {}
JSONDoc(const json_spirit::mObject &o) : pObj(&o), wpObj(nullptr) {}
// Construction from json_spirit::mObject. Allows modifications.
JSONDoc(json_spirit::mObject &o) : pObj(&o), wpObj(&o) {}
@ -79,7 +79,7 @@ struct JSONDoc {
// Construction from const json_spirit::mValue (which is a Variant type) which will try to
// convert it to an mObject. This will throw if that fails, just as it would
// if the caller called get_obj() itself and used the previous constructor instead.
JSONDoc(const json_spirit::mValue &v) : pObj(&v.get_obj()), wpObj(NULL) {}
JSONDoc(const json_spirit::mValue &v) : pObj(&v.get_obj()), wpObj(nullptr) {}
// Construction from non-const json_spirit::mValue - will convert the mValue to
// an object if it isn't already and then attach to it.
@ -98,13 +98,13 @@ struct JSONDoc {
// path into on the "dot" character.
// When a path is found, pLast is updated.
bool has(std::string path, bool split=true) {
if (pObj == NULL)
if (pObj == nullptr)
return false;
if (path.empty())
return false;
size_t start = 0;
const json_spirit::mValue *curVal = NULL;
const json_spirit::mValue *curVal = nullptr;
while (start < path.size())
{
// If a path segment is found then curVal must be an object
@ -140,7 +140,7 @@ struct JSONDoc {
// Creates the given path (forcing Objects to exist along its depth, replacing whatever else might have been there)
// and returns a reference to the Value at that location.
json_spirit::mValue & create(std::string path, bool split=true) {
if (wpObj == NULL || path.empty())
if (wpObj == nullptr || path.empty())
throw std::runtime_error("JSON Object not writable or bad JSON path");
size_t start = 0;
@ -280,7 +280,7 @@ struct JSONDoc {
}
const json_spirit::mValue & last() const { return *pLast; }
bool valid() const { return pObj != NULL; }
bool valid() const { return pObj != nullptr; }
const json_spirit::mObject & obj() {
// This dummy object is necessary to make working with obj() easier when this does not currently
@ -304,7 +304,7 @@ struct JSONDoc {
static uint64_t expires_reference_version;
private:
const json_spirit::mObject *pObj;
// Writeable pointer to the same object. Will be NULL if initialized from a const object.
// Writeable pointer to the same object. Will be nullptr if initialized from a const object.
json_spirit::mObject *wpObj;
const json_spirit::mValue *pLast;
};

View File

@ -52,7 +52,7 @@ void ClientKnobs::initialize(bool randomize) {
init( COORDINATOR_RECONNECTION_DELAY, 1.0 );
init( CLIENT_EXAMPLE_AMOUNT, 20 );
init( MAX_CLIENT_STATUS_AGE, 1.0 );
init( MAX_MASTER_PROXY_CONNECTIONS, 5 ); if( randomize && BUGGIFY ) MAX_MASTER_PROXY_CONNECTIONS = 1;
init( MAX_COMMIT_PROXY_CONNECTIONS, 5 ); if( randomize && BUGGIFY ) MAX_COMMIT_PROXY_CONNECTIONS = 1;
init( MAX_GRV_PROXY_CONNECTIONS, 3 ); if( randomize && BUGGIFY ) MAX_GRV_PROXY_CONNECTIONS = 1;
init( STATUS_IDLE_TIMEOUT, 120.0 );
@ -171,7 +171,7 @@ void ClientKnobs::initialize(bool randomize) {
init( MIN_CLEANUP_SECONDS, 3600.0 );
// Configuration
init( DEFAULT_AUTO_PROXIES, 3 );
init( DEFAULT_AUTO_COMMIT_PROXIES, 3 );
init( DEFAULT_AUTO_GRV_PROXIES, 1 );
init( DEFAULT_AUTO_RESOLVERS, 1 );
init( DEFAULT_AUTO_LOGS, 3 );

View File

@ -46,7 +46,7 @@ public:
double COORDINATOR_RECONNECTION_DELAY;
int CLIENT_EXAMPLE_AMOUNT;
double MAX_CLIENT_STATUS_AGE;
int MAX_MASTER_PROXY_CONNECTIONS;
int MAX_COMMIT_PROXY_CONNECTIONS;
int MAX_GRV_PROXY_CONNECTIONS;
double STATUS_IDLE_TIMEOUT;
@ -167,7 +167,7 @@ public:
double MIN_CLEANUP_SECONDS;
// Configuration
int32_t DEFAULT_AUTO_PROXIES;
int32_t DEFAULT_AUTO_COMMIT_PROXIES;
int32_t DEFAULT_AUTO_GRV_PROXIES;
int32_t DEFAULT_AUTO_RESOLVERS;
int32_t DEFAULT_AUTO_LOGS;

View File

@ -33,6 +33,7 @@
#include "fdbclient/DatabaseContext.h"
#include "fdbrpc/simulator.h"
#include "fdbclient/StatusClient.h"
#include "flow/Trace.h"
#include "flow/UnitTest.h"
#include "fdbrpc/ReplicationPolicy.h"
#include "fdbrpc/Replication.h"
@ -78,8 +79,9 @@ std::map<std::string, std::string> configForToken( std::string const& mode ) {
std::string key = mode.substr(0, pos);
std::string value = mode.substr(pos+1);
if ((key == "logs" || key == "proxies" || key == "grv_proxies" || key == "resolvers" || key == "remote_logs" ||
key == "log_routers" || key == "usable_regions" || key == "repopulate_anti_quorum") &&
if ((key == "logs" || key == "commit_proxies" || key == "grv_proxies" || key == "resolvers" ||
key == "remote_logs" || key == "log_routers" || key == "usable_regions" ||
key == "repopulate_anti_quorum") &&
isInteger(value)) {
out[p+key] = value;
}
@ -656,7 +658,7 @@ ConfigureAutoResult parseConfig( StatusObject const& status ) {
}
if (processClass.classType() == ProcessClass::TransactionClass ||
processClass.classType() == ProcessClass::ProxyClass ||
processClass.classType() == ProcessClass::CommitProxyClass ||
processClass.classType() == ProcessClass::GrvProxyClass ||
processClass.classType() == ProcessClass::ResolutionClass ||
processClass.classType() == ProcessClass::StatelessClass ||
@ -701,7 +703,7 @@ ConfigureAutoResult parseConfig( StatusObject const& status ) {
if (proc.second == ProcessClass::StatelessClass) {
existingStatelessCount++;
}
if(proc.second == ProcessClass::ProxyClass) {
if (proc.second == ProcessClass::CommitProxyClass) {
existingProxyCount++;
}
if (proc.second == ProcessClass::GrvProxyClass) {
@ -734,19 +736,18 @@ ConfigureAutoResult parseConfig( StatusObject const& status ) {
resolverCount = result.old_resolvers;
}
result.desired_proxies = std::max(std::min(12, processCount / 15), 1);
result.desired_commit_proxies = std::max(std::min(12, processCount / 15), 1);
int proxyCount;
if (!statusObjConfig.get("proxies", result.old_proxies)) {
result.old_proxies = CLIENT_KNOBS->DEFAULT_AUTO_PROXIES;
statusObjConfig.get("auto_proxies", result.old_proxies);
result.auto_proxies = result.desired_proxies;
proxyCount = result.auto_proxies;
if (!statusObjConfig.get("commit_proxies", result.old_commit_proxies)) {
result.old_commit_proxies = CLIENT_KNOBS->DEFAULT_AUTO_COMMIT_PROXIES;
statusObjConfig.get("auto_commit_proxies", result.old_commit_proxies);
result.auto_commit_proxies = result.desired_commit_proxies;
proxyCount = result.auto_commit_proxies;
} else {
result.auto_proxies = result.old_proxies;
proxyCount = result.old_proxies;
result.auto_commit_proxies = result.old_commit_proxies;
proxyCount = result.old_commit_proxies;
}
// Need to configure a good number.
result.desired_grv_proxies = std::max(std::min(4, processCount / 20), 1);
int grvProxyCount;
if (!statusObjConfig.get("grv_proxies", result.old_grv_proxies)) {
@ -857,8 +858,8 @@ ACTOR Future<ConfigurationResult::Type> autoConfig( Database cx, ConfigureAutoRe
if (conf.auto_logs != conf.old_logs)
tr.set(configKeysPrefix.toString() + "auto_logs", format("%d", conf.auto_logs));
if(conf.auto_proxies != conf.old_proxies)
tr.set(configKeysPrefix.toString() + "auto_proxies", format("%d", conf.auto_proxies));
if (conf.auto_commit_proxies != conf.old_commit_proxies)
tr.set(configKeysPrefix.toString() + "auto_commit_proxies", format("%d", conf.auto_commit_proxies));
if (conf.auto_grv_proxies != conf.old_grv_proxies)
tr.set(configKeysPrefix.toString() + "auto_grv_proxies", format("%d", conf.auto_grv_proxies));

View File

@ -86,7 +86,7 @@ struct ConfigureAutoResult {
int32_t machines;
std::string old_replication;
int32_t old_proxies;
int32_t old_commit_proxies;
int32_t old_grv_proxies;
int32_t old_resolvers;
int32_t old_logs;
@ -94,23 +94,24 @@ struct ConfigureAutoResult {
int32_t old_machines_with_transaction;
std::string auto_replication;
int32_t auto_proxies;
int32_t auto_commit_proxies;
int32_t auto_grv_proxies;
int32_t auto_resolvers;
int32_t auto_logs;
int32_t auto_processes_with_transaction;
int32_t auto_machines_with_transaction;
int32_t desired_proxies;
int32_t desired_commit_proxies;
int32_t desired_grv_proxies;
int32_t desired_resolvers;
int32_t desired_logs;
ConfigureAutoResult()
: processes(-1), machines(-1), old_proxies(-1), old_grv_proxies(-1), old_resolvers(-1), old_logs(-1),
old_processes_with_transaction(-1), old_machines_with_transaction(-1), auto_proxies(-1), auto_grv_proxies(-1),
auto_resolvers(-1), auto_logs(-1), auto_processes_with_transaction(-1), auto_machines_with_transaction(-1),
desired_proxies(-1), desired_grv_proxies(-1), desired_resolvers(-1), desired_logs(-1) {}
: processes(-1), machines(-1), old_commit_proxies(-1), old_grv_proxies(-1), old_resolvers(-1), old_logs(-1),
old_processes_with_transaction(-1), old_machines_with_transaction(-1), auto_commit_proxies(-1),
auto_grv_proxies(-1), auto_resolvers(-1), auto_logs(-1), auto_processes_with_transaction(-1),
auto_machines_with_transaction(-1), desired_commit_proxies(-1), desired_grv_proxies(-1), desired_resolvers(-1),
desired_logs(-1) {}
bool isValid() const { return processes != -1; }
};

View File

@ -171,7 +171,7 @@ ACTOR Future<Void> metricRuleUpdater(Database cx, MetricsConfig *config, TDMetri
// Implementation of IMetricDB
class MetricDB : public IMetricDB {
public:
MetricDB(ReadYourWritesTransaction *tr = NULL) : tr(tr) {}
MetricDB(ReadYourWritesTransaction *tr = nullptr) : tr(tr) {}
~MetricDB() {}
// levelKey is the prefix for the entire level, no timestamp at the end

View File

@ -624,7 +624,7 @@ ACTOR Future<Void> getClientInfoFromLeader( Reference<AsyncVar<Optional<ClusterC
choose {
when( ClientDBInfo ni = wait( brokenPromiseToNever( knownLeader->get().get().clientInterface.openDatabase.getReply( req ) ) ) ) {
TraceEvent("MonitorLeaderForProxiesGotClientInfo", knownLeader->get().get().clientInterface.id())
.detail("MasterProxy0", ni.masterProxies.size() ? ni.masterProxies[0].id() : UID())
.detail("CommitProxy0", ni.commitProxies.size() ? ni.commitProxies[0].id() : UID())
.detail("GrvProxy0", ni.grvProxies.size() ? ni.grvProxies[0].id() : UID())
.detail("ClientID", ni.id);
clientData->clientInfo->set(CachedSerialization<ClientDBInfo>(ni));
@ -681,24 +681,25 @@ ACTOR Future<Void> monitorLeaderForProxies( Key clusterKey, vector<NetworkAddres
}
}
void shrinkProxyList( ClientDBInfo& ni, std::vector<UID>& lastMasterProxyUIDs, std::vector<MasterProxyInterface>& lastMasterProxies,
std::vector<UID>& lastGrvProxyUIDs, std::vector<GrvProxyInterface>& lastGrvProxies) {
if(ni.masterProxies.size() > CLIENT_KNOBS->MAX_MASTER_PROXY_CONNECTIONS) {
std::vector<UID> masterProxyUIDs;
for(auto& masterProxy : ni.masterProxies) {
masterProxyUIDs.push_back(masterProxy.id());
void shrinkProxyList(ClientDBInfo& ni, std::vector<UID>& lastCommitProxyUIDs,
std::vector<CommitProxyInterface>& lastCommitProxies, std::vector<UID>& lastGrvProxyUIDs,
std::vector<GrvProxyInterface>& lastGrvProxies) {
if (ni.commitProxies.size() > CLIENT_KNOBS->MAX_COMMIT_PROXY_CONNECTIONS) {
std::vector<UID> commitProxyUIDs;
for (auto& commitProxy : ni.commitProxies) {
commitProxyUIDs.push_back(commitProxy.id());
}
if(masterProxyUIDs != lastMasterProxyUIDs) {
lastMasterProxyUIDs.swap(masterProxyUIDs);
lastMasterProxies = ni.masterProxies;
deterministicRandom()->randomShuffle(lastMasterProxies);
lastMasterProxies.resize(CLIENT_KNOBS->MAX_MASTER_PROXY_CONNECTIONS);
for(int i = 0; i < lastMasterProxies.size(); i++) {
TraceEvent("ConnectedMasterProxy").detail("MasterProxy", lastMasterProxies[i].id());
if (commitProxyUIDs != lastCommitProxyUIDs) {
lastCommitProxyUIDs.swap(commitProxyUIDs);
lastCommitProxies = ni.commitProxies;
deterministicRandom()->randomShuffle(lastCommitProxies);
lastCommitProxies.resize(CLIENT_KNOBS->MAX_COMMIT_PROXY_CONNECTIONS);
for (int i = 0; i < lastCommitProxies.size(); i++) {
TraceEvent("ConnectedCommitProxy").detail("CommitProxy", lastCommitProxies[i].id());
}
}
ni.firstProxy = ni.masterProxies[0];
ni.masterProxies = lastMasterProxies;
ni.firstCommitProxy = ni.commitProxies[0];
ni.commitProxies = lastCommitProxies;
}
if(ni.grvProxies.size() > CLIENT_KNOBS->MAX_GRV_PROXY_CONNECTIONS) {
std::vector<UID> grvProxyUIDs;
@ -719,14 +720,16 @@ void shrinkProxyList( ClientDBInfo& ni, std::vector<UID>& lastMasterProxyUIDs, s
}
// Leader is the process that will be elected by coordinators as the cluster controller
ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration( Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<ClientDBInfo>> clientInfo, MonitorLeaderInfo info, Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> supportedVersions, Key traceLogGroup) {
ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<ClientDBInfo>> clientInfo, MonitorLeaderInfo info,
Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> supportedVersions, Key traceLogGroup) {
state ClusterConnectionString cs = info.intermediateConnFile->getConnectionString();
state vector<NetworkAddress> addrs = cs.coordinators();
state int idx = 0;
state int successIdx = 0;
state Optional<double> incorrectTime;
state std::vector<UID> lastProxyUIDs;
state std::vector<MasterProxyInterface> lastProxies;
state std::vector<UID> lastCommitProxyUIDs;
state std::vector<CommitProxyInterface> lastCommitProxies;
state std::vector<UID> lastGrvProxyUIDs;
state std::vector<GrvProxyInterface> lastGrvProxies;
@ -780,7 +783,7 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration( Reference<ClusterCo
connFile->notifyConnected();
auto& ni = rep.get().mutate();
shrinkProxyList(ni, lastProxyUIDs, lastProxies, lastGrvProxyUIDs, lastGrvProxies);
shrinkProxyList(ni, lastCommitProxyUIDs, lastCommitProxies, lastGrvProxyUIDs, lastGrvProxies);
clientInfo->set( ni );
successIdx = idx;
} else {

View File

@ -25,7 +25,7 @@
#include "fdbclient/FDBTypes.h"
#include "fdbclient/CoordinationInterface.h"
#include "fdbclient/ClusterInterface.h"
#include "fdbclient/MasterProxyInterface.h"
#include "fdbclient/CommitProxyInterface.h"
#define CLUSTER_FILE_ENV_VAR_NAME "FDB_CLUSTER_FILE"
@ -67,8 +67,9 @@ Future<Void> monitorLeaderForProxies( Value const& key, vector<NetworkAddress> c
Future<Void> monitorProxies( Reference<AsyncVar<Reference<ClusterConnectionFile>>> const& connFile, Reference<AsyncVar<ClientDBInfo>> const& clientInfo, Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> const& supportedVersions, Key const& traceLogGroup );
void shrinkProxyList( ClientDBInfo& ni, std::vector<UID>& lastMasterProxyUIDs, std::vector<MasterProxyInterface>& lastMasterProxies,
std::vector<UID>& lastGrvProxyUIDs, std::vector<GrvProxyInterface>& lastGrvProxies);
void shrinkProxyList(ClientDBInfo& ni, std::vector<UID>& lastCommitProxyUIDs,
std::vector<CommitProxyInterface>& lastCommitProxies, std::vector<UID>& lastGrvProxyUIDs,
std::vector<GrvProxyInterface>& lastGrvProxies);
#ifndef __INTEL_COMPILER
#pragma region Implementation

View File

@ -163,7 +163,7 @@ public:
if(destroyNow) {
api->futureDestroy(f);
f = NULL;
f = nullptr;
}
return destroyNow;
@ -202,7 +202,7 @@ public:
auto sav = (DLThreadSingleAssignmentVar<T>*)param;
if(MultiVersionApi::api->callbackOnMainThread) {
onMainThreadVoid([sav](){ sav->apply(); }, NULL);
onMainThreadVoid([sav](){ sav->apply(); }, nullptr);
}
else {
sav->apply();

View File

@ -224,7 +224,7 @@ ThreadFuture<int64_t> DLTransaction::getApproximateSize() {
}
void DLTransaction::setOption(FDBTransactionOptions::Option option, Optional<StringRef> value) {
throwIfError(api->transactionSetOption(tr, option, value.present() ? value.get().begin() : NULL, value.present() ? value.get().size() : 0));
throwIfError(api->transactionSetOption(tr, option, value.present() ? value.get().begin() : nullptr, value.present() ? value.get().size() : 0));
}
ThreadFuture<Void> DLTransaction::onError(Error const& e) {
@ -262,14 +262,14 @@ Reference<ITransaction> DLDatabase::createTransaction() {
}
void DLDatabase::setOption(FDBDatabaseOptions::Option option, Optional<StringRef> value) {
throwIfError(api->databaseSetOption(db, option, value.present() ? value.get().begin() : NULL, value.present() ? value.get().size() : 0));
throwIfError(api->databaseSetOption(db, option, value.present() ? value.get().begin() : nullptr, value.present() ? value.get().size() : 0));
}
// DLApi
template<class T>
void loadClientFunction(T *fp, void *lib, std::string libPath, const char *functionName, bool requireFunction = true) {
*(void**)(fp) = loadFunction(lib, functionName);
if(*fp == NULL && requireFunction) {
if(*fp == nullptr && requireFunction) {
TraceEvent(SevError, "ErrorLoadingFunction").detail("LibraryPath", libPath).detail("Function", functionName);
throw platform_error();
}
@ -283,7 +283,7 @@ void DLApi::init() {
}
void* lib = loadLibrary(fdbCPath.c_str());
if(lib == NULL) {
if(lib == nullptr) {
TraceEvent(SevError, "ErrorLoadingExternalClientLibrary").detail("LibraryPath", fdbCPath);
throw platform_error();
}
@ -347,7 +347,7 @@ void DLApi::selectApiVersion(int apiVersion) {
init();
throwIfError(api->selectApiVersion(apiVersion, headerVersion));
throwIfError(api->setNetworkOption(FDBNetworkOptions::EXTERNAL_CLIENT, NULL, 0));
throwIfError(api->setNetworkOption(FDBNetworkOptions::EXTERNAL_CLIENT, nullptr, 0));
}
const char* DLApi::getClientVersion() {
@ -359,7 +359,7 @@ const char* DLApi::getClientVersion() {
}
void DLApi::setNetworkOption(FDBNetworkOptions::Option option, Optional<StringRef> value) {
throwIfError(api->setNetworkOption(option, value.present() ? value.get().begin() : NULL, value.present() ? value.get().size() : 0));
throwIfError(api->setNetworkOption(option, value.present() ? value.get().begin() : nullptr, value.present() ? value.get().size() : 0));
}
void DLApi::setupNetwork() {
@ -786,7 +786,7 @@ void MultiVersionDatabase::Connector::connect() {
else {
delref();
}
}, NULL);
}, nullptr);
}
// Only called from main thread
@ -805,7 +805,7 @@ void MultiVersionDatabase::Connector::fire(const Void &unused, int& userParam) {
dbState->stateChanged();
}
delref();
}, NULL);
}, nullptr);
}
void MultiVersionDatabase::Connector::error(const Error& e, int& userParam) {
@ -820,7 +820,7 @@ void MultiVersionDatabase::Connector::error(const Error& e, int& userParam) {
}
MultiVersionDatabase::DatabaseState::DatabaseState()
: dbVar(new ThreadSafeAsyncVar<Reference<IDatabase>>(Reference<IDatabase>(NULL))), currentClientIndex(-1) {}
: dbVar(new ThreadSafeAsyncVar<Reference<IDatabase>>(Reference<IDatabase>(nullptr))), currentClientIndex(-1) {}
// Only called from main thread
void MultiVersionDatabase::DatabaseState::stateChanged() {
@ -898,7 +898,7 @@ void MultiVersionDatabase::DatabaseState::cancelConnections() {
connectionAttempts.clear();
clients.clear();
delref();
}, NULL);
}, nullptr);
}
// MultiVersionApi
@ -1043,7 +1043,7 @@ void MultiVersionApi::setSupportedClientVersions(Standalone<StringRef> versions)
// This option must be set on the main thread because it modifes structures that can be used concurrently by the main thread
onMainThreadVoid([this, versions](){
localClient->api->setNetworkOption(FDBNetworkOptions::SUPPORTED_CLIENT_VERSIONS, versions);
}, NULL);
}, nullptr);
if(!bypassMultiClientApi) {
runOnExternalClients([versions](Reference<ClientInfo> client) {
@ -1654,7 +1654,7 @@ THREAD_FUNC runSingleAssignmentVarTest(void *arg) {
onMainThreadVoid([done](){
*done = true;
}, NULL);
}, nullptr);
}
catch(Error &e) {
printf("Caught error in test: %s\n", e.name());

View File

@ -286,7 +286,7 @@ struct ClientInfo : ThreadSafeReferenceCounted<ClientInfo> {
bool failed;
std::vector<std::pair<void (*)(void*), void*>> threadCompletionHooks;
ClientInfo() : protocolVersion(0), api(NULL), external(false), failed(true) {}
ClientInfo() : protocolVersion(0), api(nullptr), external(false), failed(true) {}
ClientInfo(IClientApi *api) : protocolVersion(0), api(api), libPath("internal"), external(false), failed(false) {}
ClientInfo(IClientApi *api, std::string libPath) : protocolVersion(0), api(api), libPath(libPath), external(true), failed(false) {}

View File

@ -62,7 +62,7 @@ public:
auto e = ptr->end(); // e points to the end of the current blob
if (e == blob->data.end()) { // the condition sanity checks e is at the end of current blob
blob = blob->next;
e = blob ? blob->data.begin() : NULL;
e = blob ? blob->data.begin() : nullptr;
}
ptr = (Header*)e;
decode();
@ -70,7 +70,7 @@ public:
bool operator == ( Iterator const& i ) const { return ptr == i.ptr; }
bool operator != ( Iterator const& i) const { return ptr != i.ptr; }
explicit operator bool() const { return blob!=NULL; }
explicit operator bool() const { return blob!=nullptr; }
typedef std::forward_iterator_tag iterator_category;
typedef const MutationRef value_type;
@ -79,7 +79,7 @@ public:
typedef const MutationRef& reference;
Iterator( Blob* blob, const Header* ptr ) : blob(blob), ptr(ptr) { decode(); }
Iterator() : blob(NULL), ptr(NULL) { }
Iterator() : blob(nullptr), ptr(nullptr) { }
private:
friend struct MutationListRef;
const Blob* blob; // The blob containing the indicated mutation
@ -95,16 +95,16 @@ public:
}
};
MutationListRef() : blob_begin(NULL), blob_end(NULL), totalBytes(0) {
MutationListRef() : blob_begin(nullptr), blob_end(nullptr), totalBytes(0) {
}
MutationListRef( Arena& ar, MutationListRef const& r ) : blob_begin(NULL), blob_end(NULL), totalBytes(0) {
MutationListRef( Arena& ar, MutationListRef const& r ) : blob_begin(nullptr), blob_end(nullptr), totalBytes(0) {
append_deep(ar, r.begin(), r.end());
}
Iterator begin() const {
if (blob_begin) return Iterator(blob_begin, (Header*)blob_begin->data.begin());
return Iterator(NULL, NULL);
return Iterator(nullptr, nullptr);
}
Iterator end() const { return Iterator(NULL, NULL); }
Iterator end() const { return Iterator(nullptr, nullptr); }
size_t expectedSize() const { return sizeof(Blob) + totalBytes; }
int totalSize() const { return totalBytes; }
@ -146,12 +146,13 @@ public:
if(totalBytes > 0) {
blob_begin = blob_end = new (ar.arena()) Blob;
blob_begin->next = NULL;
blob_begin->next = nullptr;
blob_begin->data = StringRef((const uint8_t*)ar.arenaRead(totalBytes), totalBytes); // Zero-copy read when deserializing from an ArenaReader
}
}
//FIXME: this is re-implemented on the master proxy to include a yield, any changes to this function should also done there
// FIXME: this is re-implemented on the commit proxy to include a yield, any changes to this function should also
// done there
template <class Ar>
void serialize_save( Ar& ar ) const {
serializer(ar, totalBytes);
@ -180,7 +181,7 @@ private:
}
blob_end->data = StringRef(b, bytes);
blob_end->next = NULL;
blob_end->next = nullptr;
return b;
}

View File

@ -40,7 +40,7 @@
#include "fdbclient/KeyRangeMap.h"
#include "fdbclient/Knobs.h"
#include "fdbclient/ManagementAPI.actor.h"
#include "fdbclient/MasterProxyInterface.h"
#include "fdbclient/CommitProxyInterface.h"
#include "fdbclient/MonitorLeader.h"
#include "fdbclient/MutationList.h"
#include "fdbclient/ReadYourWrites.h"
@ -95,7 +95,7 @@ Future<REPLY_TYPE(Request)> loadBalance(
DatabaseContext* ctx, const Reference<LocationInfo> alternatives, RequestStream<Request> Interface::*channel,
const Request& request = Request(), TaskPriority taskID = TaskPriority::DefaultPromiseEndpoint,
bool atMostOnce = false, // if true, throws request_maybe_delivered() instead of retrying automatically
QueueModel* model = NULL) {
QueueModel* model = nullptr) {
if (alternatives->hasCaches) {
return loadBalance(alternatives->locations(), channel, request, taskID, atMostOnce, model);
}
@ -147,7 +147,7 @@ Reference<StorageServerInfo> StorageServerInfo::getInterface( DatabaseContext *c
}
void StorageServerInfo::notifyContextDestroyed() {
cx = NULL;
cx = nullptr;
}
StorageServerInfo::~StorageServerInfo() {
@ -155,7 +155,7 @@ StorageServerInfo::~StorageServerInfo() {
auto it = cx->server_interf.find( interf.id() );
if( it != cx->server_interf.end() )
cx->server_interf.erase( it );
cx = NULL;
cx = nullptr;
}
}
@ -484,15 +484,15 @@ ACTOR static Future<Void> clientStatusUpdateActor(DatabaseContext *cx) {
}
ACTOR static Future<Void> monitorProxiesChange(Reference<AsyncVar<ClientDBInfo>> clientDBInfo, AsyncTrigger *triggerVar) {
state vector< MasterProxyInterface > curProxies;
state vector<CommitProxyInterface> curCommitProxies;
state vector< GrvProxyInterface > curGrvProxies;
curProxies = clientDBInfo->get().masterProxies;
curCommitProxies = clientDBInfo->get().commitProxies;
curGrvProxies = clientDBInfo->get().grvProxies;
loop{
wait(clientDBInfo->onChange());
if (clientDBInfo->get().masterProxies != curProxies || clientDBInfo->get().grvProxies != curGrvProxies) {
curProxies = clientDBInfo->get().masterProxies;
if (clientDBInfo->get().commitProxies != curCommitProxies || clientDBInfo->get().grvProxies != curGrvProxies) {
curCommitProxies = clientDBInfo->get().commitProxies;
curGrvProxies = clientDBInfo->get().grvProxies;
triggerVar->trigger();
}
@ -881,7 +881,7 @@ DatabaseContext::DatabaseContext(Reference<AsyncVar<Reference<ClusterConnectionF
transactionsExpensiveClearCostEstCount("ExpensiveClearCostEstCount", cc),
specialKeySpace(std::make_unique<SpecialKeySpace>(specialKeys.begin, specialKeys.end, /* test */ false)) {
dbId = deterministicRandom()->randomUniqueID();
connected = (clientInfo->get().masterProxies.size() && clientInfo->get().grvProxies.size())
connected = (clientInfo->get().commitProxies.size() && clientInfo->get().grvProxies.size())
? Void()
: clientInfo->onChange();
@ -930,6 +930,16 @@ DatabaseContext::DatabaseContext(Reference<AsyncVar<Reference<ClusterConnectionF
std::make_unique<ExclusionInProgressRangeImpl>(
KeyRangeRef(LiteralStringRef("inProgressExclusion/"), LiteralStringRef("inProgressExclusion0"))
.withPrefix(SpecialKeySpace::getModuleRange(SpecialKeySpace::MODULE::MANAGEMENT).begin)));
registerSpecialKeySpaceModule(
SpecialKeySpace::MODULE::CONFIGURATION, SpecialKeySpace::IMPLTYPE::READWRITE,
std::make_unique<ProcessClassRangeImpl>(
KeyRangeRef(LiteralStringRef("process/class_type/"), LiteralStringRef("process/class_type0"))
.withPrefix(SpecialKeySpace::getModuleRange(SpecialKeySpace::MODULE::CONFIGURATION).begin)));
registerSpecialKeySpaceModule(
SpecialKeySpace::MODULE::CONFIGURATION, SpecialKeySpace::IMPLTYPE::READONLY,
std::make_unique<ProcessClassSourceRangeImpl>(
KeyRangeRef(LiteralStringRef("process/class_source/"), LiteralStringRef("process/class_source0"))
.withPrefix(SpecialKeySpace::getModuleRange(SpecialKeySpace::MODULE::CONFIGURATION).begin)));
}
if (apiVersionAtLeast(630)) {
registerSpecialKeySpaceModule(SpecialKeySpace::MODULE::TRANSACTION, SpecialKeySpace::IMPLTYPE::READONLY,
@ -1164,8 +1174,8 @@ void DatabaseContext::setOption( FDBDatabaseOptions::Option option, Optional<Str
break;
case FDBDatabaseOptions::MACHINE_ID:
clientLocality = LocalityData( clientLocality.processId(), value.present() ? Standalone<StringRef>(value.get()) : Optional<Standalone<StringRef>>(), clientLocality.machineId(), clientLocality.dcId() );
if( clientInfo->get().masterProxies.size() )
masterProxies = Reference<ProxyInfo>( new ProxyInfo( clientInfo->get().masterProxies) );
if (clientInfo->get().commitProxies.size())
commitProxies = Reference<CommitProxyInfo>(new CommitProxyInfo(clientInfo->get().commitProxies));
if( clientInfo->get().grvProxies.size() )
grvProxies = Reference<GrvProxyInfo>( new GrvProxyInfo( clientInfo->get().grvProxies ) );
server_interf.clear();
@ -1176,8 +1186,8 @@ void DatabaseContext::setOption( FDBDatabaseOptions::Option option, Optional<Str
break;
case FDBDatabaseOptions::DATACENTER_ID:
clientLocality = LocalityData(clientLocality.processId(), clientLocality.zoneId(), clientLocality.machineId(), value.present() ? Standalone<StringRef>(value.get()) : Optional<Standalone<StringRef>>());
if( clientInfo->get().masterProxies.size() )
masterProxies = Reference<ProxyInfo>( new ProxyInfo( clientInfo->get().masterProxies));
if (clientInfo->get().commitProxies.size())
commitProxies = Reference<CommitProxyInfo>(new CommitProxyInfo(clientInfo->get().commitProxies));
if( clientInfo->get().grvProxies.size() )
grvProxies = Reference<GrvProxyInfo>( new GrvProxyInfo( clientInfo->get().grvProxies ));
server_interf.clear();
@ -1220,13 +1230,13 @@ ACTOR static Future<Void> switchConnectionFileImpl(Reference<ClusterConnectionFi
.detail("ConnectionString", connFile->getConnectionString().toString());
// Reset state from former cluster.
self->masterProxies.clear();
self->commitProxies.clear();
self->grvProxies.clear();
self->minAcceptableReadVersion = std::numeric_limits<Version>::max();
self->invalidateCache(allKeys);
auto clearedClientInfo = self->clientInfo->get();
clearedClientInfo.masterProxies.clear();
clearedClientInfo.commitProxies.clear();
clearedClientInfo.grvProxies.clear();
clearedClientInfo.id = deterministicRandom()->randomUniqueID();
self->clientInfo->set(clearedClientInfo);
@ -1307,7 +1317,7 @@ Database Database::createDatabase( Reference<ClusterConnectionFile> connFile, in
.detail("PackageName", FDB_VT_PACKAGE_NAME)
.detail("ClusterFile", connFile->getFilename().c_str())
.detail("ConnectionString", connFile->getConnectionString().toString())
.detailf("ActualTime", "%lld", DEBUG_DETERMINISM ? 0 : time(NULL))
.detailf("ActualTime", "%lld", DEBUG_DETERMINISM ? 0 : time(nullptr))
.detail("ApiVersion", apiVersion)
.detailf("ImageOffset", "%p", platform::getImageOffset())
.trackLatest("ClientStart");
@ -1561,29 +1571,29 @@ void stopNetwork() {
void DatabaseContext::updateProxies() {
if (proxiesLastChange == clientInfo->get().id) return;
proxiesLastChange = clientInfo->get().id;
masterProxies.clear();
commitProxies.clear();
grvProxies.clear();
bool masterProxyProvisional = false, grvProxyProvisional = false;
if (clientInfo->get().masterProxies.size()) {
masterProxies = Reference<ProxyInfo>(new ProxyInfo(clientInfo->get().masterProxies));
masterProxyProvisional = clientInfo->get().masterProxies[0].provisional;
bool commitProxyProvisional = false, grvProxyProvisional = false;
if (clientInfo->get().commitProxies.size()) {
commitProxies = Reference<CommitProxyInfo>(new CommitProxyInfo(clientInfo->get().commitProxies));
commitProxyProvisional = clientInfo->get().commitProxies[0].provisional;
}
if (clientInfo->get().grvProxies.size()) {
grvProxies = Reference<GrvProxyInfo>(new GrvProxyInfo(clientInfo->get().grvProxies));
grvProxyProvisional = clientInfo->get().grvProxies[0].provisional;
}
if (clientInfo->get().masterProxies.size() && clientInfo->get().grvProxies.size()) {
ASSERT(masterProxyProvisional == grvProxyProvisional);
proxyProvisional = masterProxyProvisional;
if (clientInfo->get().commitProxies.size() && clientInfo->get().grvProxies.size()) {
ASSERT(commitProxyProvisional == grvProxyProvisional);
proxyProvisional = commitProxyProvisional;
}
}
Reference<ProxyInfo> DatabaseContext::getMasterProxies(bool useProvisionalProxies) {
Reference<CommitProxyInfo> DatabaseContext::getCommitProxies(bool useProvisionalProxies) {
updateProxies();
if (proxyProvisional && !useProvisionalProxies) {
return Reference<ProxyInfo>();
return Reference<CommitProxyInfo>();
}
return masterProxies;
return commitProxies;
}
Reference<GrvProxyInfo> DatabaseContext::getGrvProxies(bool useProvisionalProxies) {
@ -1594,19 +1604,19 @@ Reference<GrvProxyInfo> DatabaseContext::getGrvProxies(bool useProvisionalProxie
return grvProxies;
}
//Actor which will wait until the MultiInterface<MasterProxyInterface> returned by the DatabaseContext cx is not NULL
ACTOR Future<Reference<ProxyInfo>> getMasterProxiesFuture(DatabaseContext *cx, bool useProvisionalProxies) {
// Actor which will wait until the MultiInterface<CommitProxyInterface> returned by the DatabaseContext cx is not nullptr
ACTOR Future<Reference<CommitProxyInfo>> getCommitProxiesFuture(DatabaseContext* cx, bool useProvisionalProxies) {
loop{
Reference<ProxyInfo> proxies = cx->getMasterProxies(useProvisionalProxies);
if (proxies)
return proxies;
Reference<CommitProxyInfo> commitProxies = cx->getCommitProxies(useProvisionalProxies);
if (commitProxies)
return commitProxies;
wait( cx->onProxiesChanged() );
}
}
//Returns a future which will not be set until the ProxyInfo of this DatabaseContext is not NULL
Future<Reference<ProxyInfo>> DatabaseContext::getMasterProxiesFuture(bool useProvisionalProxies) {
return ::getMasterProxiesFuture(this, useProvisionalProxies);
// Returns a future which will not be set until the CommitProxyInfo of this DatabaseContext is not nullptr
Future<Reference<CommitProxyInfo>> DatabaseContext::getCommitProxiesFuture(bool useProvisionalProxies) {
return ::getCommitProxiesFuture(this, useProvisionalProxies);
}
void GetRangeLimits::decrement( VectorRef<KeyValueRef> const& data ) {
@ -1733,8 +1743,8 @@ ACTOR Future<pair<KeyRange, Reference<LocationInfo>>> getKeyLocation_internal(Da
++cx->transactionKeyServerLocationRequests;
choose {
when (wait(cx->onProxiesChanged())) {}
when (GetKeyServerLocationsReply rep = wait(basicLoadBalance(
cx->getMasterProxies(info.useProvisionalProxies), &MasterProxyInterface::getKeyServersLocations,
when(GetKeyServerLocationsReply rep = wait(basicLoadBalance(
cx->getCommitProxies(info.useProvisionalProxies), &CommitProxyInterface::getKeyServersLocations,
GetKeyServerLocationsRequest(span.context, key, Optional<KeyRef>(), 100, isBackward, key.arena()),
TaskPriority::DefaultPromiseEndpoint))) {
++cx->transactionKeyServerLocationRequestsCompleted;
@ -1782,8 +1792,8 @@ ACTOR Future<vector<pair<KeyRange, Reference<LocationInfo>>>> getKeyRangeLocatio
++cx->transactionKeyServerLocationRequests;
choose {
when ( wait( cx->onProxiesChanged() ) ) {}
when ( GetKeyServerLocationsReply _rep = wait(basicLoadBalance(
cx->getMasterProxies(info.useProvisionalProxies), &MasterProxyInterface::getKeyServersLocations,
when(GetKeyServerLocationsReply _rep = wait(basicLoadBalance(
cx->getCommitProxies(info.useProvisionalProxies), &CommitProxyInterface::getKeyServersLocations,
GetKeyServerLocationsRequest(span.context, keys.begin, keys.end, limit, reverse, keys.arena()),
TaskPriority::DefaultPromiseEndpoint))) {
++cx->transactionKeyServerLocationRequestsCompleted;
@ -2512,7 +2522,7 @@ ACTOR Future<Standalone<RangeResultRef>> getRange( Database cx, Reference<Transa
GetKeyValuesReply _rep =
wait(loadBalance(cx.getPtr(), beginServer.second, &StorageServerInterface::getKeyValues, req,
TaskPriority::DefaultPromiseEndpoint, false,
cx->enableLocalityLoadBalance ? &cx->queueModel : NULL));
cx->enableLocalityLoadBalance ? &cx->queueModel : nullptr));
rep = _rep;
++cx->transactionPhysicalReadsCompleted;
} catch(Error&) {
@ -3450,14 +3460,16 @@ ACTOR static Future<Void> tryCommit( Database cx, Reference<TransactionLogInfo>
req.debugID = commitID;
state Future<CommitID> reply;
if (options.commitOnFirstProxy) {
if(cx->clientInfo->get().firstProxy.present()) {
reply = throwErrorOr ( brokenPromiseToMaybeDelivered ( cx->clientInfo->get().firstProxy.get().commit.tryGetReply(req) ) );
if (cx->clientInfo->get().firstCommitProxy.present()) {
reply = throwErrorOr(brokenPromiseToMaybeDelivered(
cx->clientInfo->get().firstCommitProxy.get().commit.tryGetReply(req)));
} else {
const std::vector<MasterProxyInterface>& proxies = cx->clientInfo->get().masterProxies;
const std::vector<CommitProxyInterface>& proxies = cx->clientInfo->get().commitProxies;
reply = proxies.size() ? throwErrorOr ( brokenPromiseToMaybeDelivered ( proxies[0].commit.tryGetReply(req) ) ) : Never();
}
} else {
reply = basicLoadBalance( cx->getMasterProxies(info.useProvisionalProxies), &MasterProxyInterface::commit, req, TaskPriority::DefaultPromiseEndpoint, true );
reply = basicLoadBalance(cx->getCommitProxies(info.useProvisionalProxies), &CommitProxyInterface::commit,
req, TaskPriority::DefaultPromiseEndpoint, true);
}
choose {
@ -3531,8 +3543,9 @@ ACTOR static Future<Void> tryCommit( Database cx, Reference<TransactionLogInfo>
// We don't know if the commit happened, and it might even still be in flight.
if (!options.causalWriteRisky) {
// Make sure it's not still in flight, either by ensuring the master we submitted to is dead, or the version we submitted with is dead, or by committing a conflicting transaction successfully
//if ( cx->getMasterProxies()->masterGeneration <= originalMasterGeneration )
// Make sure it's not still in flight, either by ensuring the master we submitted to is dead, or the
// version we submitted with is dead, or by committing a conflicting transaction successfully
// if ( cx->getCommitProxies()->masterGeneration <= originalMasterGeneration )
// To ensure the original request is not in flight, we need a key range which intersects its read conflict ranges
// We pick a key range which also intersects its write conflict ranges, since that avoids potentially creating conflicts where there otherwise would be none
@ -4433,7 +4446,7 @@ ACTOR Future<Standalone<VectorRef<DDMetricsRef>>> waitDataDistributionMetricsLis
choose {
when(wait(cx->onProxiesChanged())) {}
when(ErrorOr<GetDDMetricsReply> rep =
wait(errorOr(basicLoadBalance(cx->getMasterProxies(false), &MasterProxyInterface::getDDMetrics,
wait(errorOr(basicLoadBalance(cx->getCommitProxies(false), &CommitProxyInterface::getDDMetrics,
GetDDMetricsRequest(keys, shardLimit))))) {
if (rep.isError()) {
throw rep.getError();
@ -4539,7 +4552,9 @@ ACTOR Future<Void> snapCreate(Database cx, Standalone<StringRef> snapCmd, UID sn
loop {
choose {
when(wait(cx->onProxiesChanged())) {}
when(wait(basicLoadBalance(cx->getMasterProxies(false), &MasterProxyInterface::proxySnapReq, ProxySnapRequest(snapCmd, snapUID, snapUID), cx->taskID, true /*atmostOnce*/ ))) {
when(wait(basicLoadBalance(cx->getCommitProxies(false), &CommitProxyInterface::proxySnapReq,
ProxySnapRequest(snapCmd, snapUID, snapUID), cx->taskID,
true /*atmostOnce*/))) {
TraceEvent("SnapCreateExit")
.detail("SnapCmd", snapCmd.toString())
.detail("UID", snapUID);
@ -4567,8 +4582,8 @@ ACTOR Future<bool> checkSafeExclusions(Database cx, vector<AddressExclusion> exc
choose {
when(wait(cx->onProxiesChanged())) {}
when(ExclusionSafetyCheckReply _ddCheck =
wait(basicLoadBalance(cx->getMasterProxies(false), &MasterProxyInterface::exclusionSafetyCheckReq,
req, cx->taskID))) {
wait(basicLoadBalance(cx->getCommitProxies(false),
&CommitProxyInterface::exclusionSafetyCheckReq, req, cx->taskID))) {
ddCheck = _ddCheck.safe;
break;
}

View File

@ -30,7 +30,7 @@
#include "flow/flow.h"
#include "flow/TDMetric.actor.h"
#include "fdbclient/FDBTypes.h"
#include "fdbclient/MasterProxyInterface.h"
#include "fdbclient/CommitProxyInterface.h"
#include "fdbclient/FDBOptions.g.h"
#include "fdbclient/CoordinationInterface.h"
#include "fdbclient/ClusterInterface.h"

View File

@ -1338,7 +1338,7 @@ Future< Standalone<RangeResultRef> > ReadYourWritesTransaction::getRange(
if(begin.getKey() > maxKey || end.getKey() > maxKey)
return key_outside_legal_range();
//This optimization prevents NULL operations from being added to the conflict range
//This optimization prevents nullptr operations from being added to the conflict range
if( limits.isReached() ) {
TEST(true); // RYW range read limit 0
return Standalone<RangeResultRef>();
@ -2053,9 +2053,6 @@ void ReadYourWritesTransaction::setOptionImpl( FDBTransactionOptions::Option opt
case FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES:
validateOptionValue(value, false);
options.specialKeySpaceChangeConfiguration = true;
// By default, it allows to read system keys
// More options will be implicitly enabled if needed when doing set or clear
options.readSystemKeys = true;
break;
default:
break;

View File

@ -47,7 +47,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
"storage",
"transaction",
"resolution",
"proxy",
"commit_proxy",
"grv_proxy",
"master",
"test",
@ -84,7 +84,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
"role":{
"$enum":[
"master",
"proxy",
"commit_proxy",
"grv_proxy",
"log",
"storage",
@ -278,15 +278,20 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
"run_loop_busy":0.2
}
},
"old_logs":[
{
"logs":[
{
"log_interfaces":[
{
"id":"7f8d623d0cb9966e",
"healthy":true,
"address":"1.2.3.4:1234"
}
],
"epoch":1,
"current":false,
"begin_version":23,
"end_version":112315141,
"possibly_losing_data":true,
"log_replication_factor":3,
"log_write_anti_quorum":0,
"log_fault_tolerance":2,
@ -486,7 +491,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
R"statusSchema(
"recovery_state":{
"required_resolvers":1,
"required_proxies":1,
"required_commit_proxies":1,
"required_grv_proxies":1,
"name":{
"$enum":[
@ -675,11 +680,11 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
"address":"10.0.4.1"
}
],
"auto_proxies":3,
"auto_commit_proxies":3,
"auto_grv_proxies":1,
"auto_resolvers":1,
"auto_logs":3,
"proxies":5,
"commit_proxies":5,
"grv_proxies":1,
"backup_worker_enabled":1
},
@ -879,11 +884,11 @@ const KeyRef JSONSchemas::clusterConfigurationSchema = LiteralStringRef(R"config
"ssd-2",
"memory"
]},
"auto_proxies":3,
"auto_commit_proxies":3,
"auto_grv_proxies":1,
"auto_resolvers":1,
"auto_logs":3,
"proxies":5
"commit_proxies":5
"grv_proxies":1
})configSchema");

View File

@ -36,7 +36,9 @@ std::unordered_map<SpecialKeySpace::MODULE, KeyRange> SpecialKeySpace::moduleToB
KeyRangeRef(LiteralStringRef("\xff\xff/metrics/"), LiteralStringRef("\xff\xff/metrics0")) },
{ SpecialKeySpace::MODULE::MANAGEMENT,
KeyRangeRef(LiteralStringRef("\xff\xff/management/"), LiteralStringRef("\xff\xff/management0")) },
{ SpecialKeySpace::MODULE::ERRORMSG, singleKeyRange(LiteralStringRef("\xff\xff/error_message")) }
{ SpecialKeySpace::MODULE::ERRORMSG, singleKeyRange(LiteralStringRef("\xff\xff/error_message")) },
{ SpecialKeySpace::MODULE::CONFIGURATION,
KeyRangeRef(LiteralStringRef("\xff\xff/configuration/"), LiteralStringRef("\xff\xff/configuration0")) }
};
std::unordered_map<std::string, KeyRange> SpecialKeySpace::managementApiCommandToRange = {
@ -48,6 +50,9 @@ std::unordered_map<std::string, KeyRange> SpecialKeySpace::managementApiCommandT
std::set<std::string> SpecialKeySpace::options = { "excluded/force", "failed/force" };
Standalone<RangeResultRef> rywGetRange(ReadYourWritesTransaction* ryw, const KeyRangeRef& kr,
const Standalone<RangeResultRef>& res);
// This function will move the given KeySelector as far as possible to the standard form:
// orEqual == false && offset == 1 (Standard form)
// If the corresponding key is not in the underlying key range, it will move over the range
@ -456,6 +461,24 @@ Future<Void> SpecialKeySpace::commit(ReadYourWritesTransaction* ryw) {
return commitActor(this, ryw);
}
SKSCTestImpl::SKSCTestImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {}
Future<Standalone<RangeResultRef>> SKSCTestImpl::getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const {
ASSERT(range.contains(kr));
auto resultFuture = ryw->getRange(kr, CLIENT_KNOBS->TOO_MANY);
// all keys are written to RYW, since GRV is set, the read should happen locally
ASSERT(resultFuture.isReady());
auto result = resultFuture.getValue();
ASSERT(!result.more && result.size() < CLIENT_KNOBS->TOO_MANY);
auto kvs = resultFuture.getValue();
return rywGetRange(ryw, kr, kvs);
}
Future<Optional<std::string>> SKSCTestImpl::commit(ReadYourWritesTransaction* ryw) {
ASSERT(false);
return Optional<std::string>();
}
ReadConflictRangeImpl::ReadConflictRangeImpl(KeyRangeRef kr) : SpecialKeyRangeReadImpl(kr) {}
ACTOR static Future<Standalone<RangeResultRef>> getReadConflictRangeImpl(ReadYourWritesTransaction* ryw, KeyRange kr) {
@ -570,86 +593,82 @@ void ManagementCommandsOptionsImpl::clear(ReadYourWritesTransaction* ryw, const
}
}
Key ManagementCommandsOptionsImpl::decode(const KeyRef& key) const {
// Should never be used
ASSERT(false);
return key;
}
Key ManagementCommandsOptionsImpl::encode(const KeyRef& key) const {
// Should never be used
ASSERT(false);
return key;
}
Future<Optional<std::string>> ManagementCommandsOptionsImpl::commit(ReadYourWritesTransaction* ryw) {
// Nothing to do, keys should be used by other impls' commit callback
return Optional<std::string>();
}
// read from rwModule
ACTOR Future<Standalone<RangeResultRef>> rwModuleGetRangeActor(ReadYourWritesTransaction* ryw,
const SpecialKeyRangeRWImpl* impl, KeyRangeRef kr) {
state KeyRangeRef range = impl->getKeyRange();
Standalone<RangeResultRef> resultWithoutPrefix =
wait(ryw->getRange(ryw->getDatabase()->specialKeySpace->decode(kr), CLIENT_KNOBS->TOO_MANY));
ASSERT(!resultWithoutPrefix.more && resultWithoutPrefix.size() < CLIENT_KNOBS->TOO_MANY);
Standalone<RangeResultRef> rywGetRange(ReadYourWritesTransaction* ryw, const KeyRangeRef& kr,
const Standalone<RangeResultRef>& res) {
// "res" is the read result regardless of your writes, if ryw disabled, return immediately
if (ryw->readYourWritesDisabled()) return res;
// If ryw enabled, we update it with writes from the transaction
Standalone<RangeResultRef> result;
if (ryw->readYourWritesDisabled()) {
for (const KeyValueRef& kv : resultWithoutPrefix)
result.push_back_deep(result.arena(), KeyValueRef(impl->encode(kv.key), kv.value));
} else {
RangeMap<Key, std::pair<bool, Optional<Value>>, KeyRangeRef>::Ranges ranges =
ryw->getSpecialKeySpaceWriteMap().containedRanges(range);
ryw->getSpecialKeySpaceWriteMap().containedRanges(kr);
RangeMap<Key, std::pair<bool, Optional<Value>>, KeyRangeRef>::iterator iter = ranges.begin();
int index = 0;
while (iter != ranges.end()) {
// add all previous entries into result
Key rk = impl->encode(resultWithoutPrefix[index].key);
while (index < resultWithoutPrefix.size() && rk < iter->begin()) {
result.push_back_deep(result.arena(), KeyValueRef(rk, resultWithoutPrefix[index].value));
++index;
}
auto iter2 = res.begin();
result.arena().dependsOn(res.arena());
while (iter != ranges.end() || iter2 != res.end()) {
if (iter == ranges.end()) {
result.push_back(result.arena(), KeyValueRef(iter2->key, iter2->value));
++iter2;
} else if (iter2 == res.end()) {
// insert if it is a set entry
std::pair<bool, Optional<Value>> entry = iter->value();
if (entry.first) {
// add the writen entries if exists
if (entry.second.present()) {
if (entry.first && entry.second.present()) {
result.push_back_deep(result.arena(), KeyValueRef(iter->begin(), entry.second.get()));
}
// move index to skip all entries in the iter->range
while (index < resultWithoutPrefix.size() &&
iter->range().contains(impl->encode(resultWithoutPrefix[index].key)))
++index;
++iter;
} else if (iter->range().contains(iter2->key)) {
std::pair<bool, Optional<Value>> entry = iter->value();
// if this is a valid range either for set or clear, move iter2 outside the range
if (entry.first) {
// insert if this is a set entry
if (entry.second.present())
result.push_back_deep(result.arena(), KeyValueRef(iter->begin(), entry.second.get()));
// move iter2 outside the range
while (iter2 != res.end() && iter->range().contains(iter2->key)) ++iter2;
}
++iter;
} else if (iter->begin() > iter2->key) {
result.push_back(result.arena(), KeyValueRef(iter2->key, iter2->value));
++iter2;
} else if (iter->end() <= iter2->key) {
// insert if it is a set entry
std::pair<bool, Optional<Value>> entry = iter->value();
if (entry.first && entry.second.present()) {
result.push_back_deep(result.arena(), KeyValueRef(iter->begin(), entry.second.get()));
}
++iter;
}
// add all remaining entries into result
while (index < resultWithoutPrefix.size()) {
const KeyValueRef& kv = resultWithoutPrefix[index];
result.push_back_deep(result.arena(), KeyValueRef(impl->encode(kv.key), kv.value));
++index;
}
}
return result;
}
// read from those readwrite modules in which special keys have one-to-one mapping with real persisted keys
ACTOR Future<Standalone<RangeResultRef>> rwModuleWithMappingGetRangeActor(ReadYourWritesTransaction* ryw,
const SpecialKeyRangeRWImpl* impl,
KeyRangeRef kr) {
Standalone<RangeResultRef> resultWithoutPrefix =
wait(ryw->getTransaction().getRange(ryw->getDatabase()->specialKeySpace->decode(kr), CLIENT_KNOBS->TOO_MANY));
ASSERT(!resultWithoutPrefix.more && resultWithoutPrefix.size() < CLIENT_KNOBS->TOO_MANY);
Standalone<RangeResultRef> result;
for (const KeyValueRef& kv : resultWithoutPrefix)
result.push_back_deep(result.arena(), KeyValueRef(impl->encode(kv.key), kv.value));
return rywGetRange(ryw, kr, result);
}
ExcludeServersRangeImpl::ExcludeServersRangeImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {}
Future<Standalone<RangeResultRef>> ExcludeServersRangeImpl::getRange(ReadYourWritesTransaction* ryw,
KeyRangeRef kr) const {
return rwModuleGetRangeActor(ryw, this, kr);
return rwModuleWithMappingGetRangeActor(ryw, this, kr);
}
void ExcludeServersRangeImpl::set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) {
ryw->getSpecialKeySpaceWriteMap().insert(key, std::make_pair(true, Optional<Value>(value)));
}
void ExcludeServersRangeImpl::clear(ReadYourWritesTransaction* ryw, const KeyRef& key) {
ryw->getSpecialKeySpaceWriteMap().insert(key, std::make_pair(true, Optional<Value>()));
}
void ExcludeServersRangeImpl::clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) {
ryw->getSpecialKeySpaceWriteMap().insert(range, std::make_pair(true, Optional<Value>()));
// ignore value
ryw->getSpecialKeySpaceWriteMap().insert(key, std::make_pair(true, Optional<Value>(ValueRef())));
}
Key ExcludeServersRangeImpl::decode(const KeyRef& key) const {
@ -671,7 +690,7 @@ bool parseNetWorkAddrFromKeys(ReadYourWritesTransaction* ryw, bool failed, std::
while (iter != ranges.end()) {
auto entry = iter->value();
// only check for exclude(set) operation, include(clear) are not checked
TraceEvent(SevInfo, "ParseNetworkAddress")
TraceEvent(SevDebug, "ParseNetworkAddress")
.detail("Valid", entry.first)
.detail("Set", entry.second.present())
.detail("Key", iter->begin().toString());
@ -810,7 +829,6 @@ ACTOR Future<bool> checkExclusion(Database db, std::vector<AddressExclusion>* ad
}
void includeServers(ReadYourWritesTransaction* ryw) {
ryw->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
ryw->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
ryw->setOption(FDBTransactionOptions::LOCK_AWARE);
ryw->setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
@ -874,19 +892,12 @@ FailedServersRangeImpl::FailedServersRangeImpl(KeyRangeRef kr) : SpecialKeyRange
Future<Standalone<RangeResultRef>> FailedServersRangeImpl::getRange(ReadYourWritesTransaction* ryw,
KeyRangeRef kr) const {
return rwModuleGetRangeActor(ryw, this, kr);
return rwModuleWithMappingGetRangeActor(ryw, this, kr);
}
void FailedServersRangeImpl::set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) {
ryw->getSpecialKeySpaceWriteMap().insert(key, std::make_pair(true, Optional<Value>(value)));
}
void FailedServersRangeImpl::clear(ReadYourWritesTransaction* ryw, const KeyRef& key) {
ryw->getSpecialKeySpaceWriteMap().insert(key, std::make_pair(true, Optional<Value>()));
}
void FailedServersRangeImpl::clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) {
ryw->getSpecialKeySpaceWriteMap().insert(range, std::make_pair(true, Optional<Value>()));
// ignore value
ryw->getSpecialKeySpaceWriteMap().insert(key, std::make_pair(true, Optional<Value>(ValueRef())));
}
Key FailedServersRangeImpl::decode(const KeyRef& key) const {
@ -943,8 +954,14 @@ ACTOR Future<Standalone<RangeResultRef>> ExclusionInProgressActor(ReadYourWrites
}
}
// sort and remove :tls
std::set<std::string> inProgressAddresses;
for (auto const& address : inProgressExclusion) {
Key addrKey = prefix.withSuffix(address.toString());
inProgressAddresses.insert(formatIpPort(address.ip, address.port));
}
for (auto const& address : inProgressAddresses) {
Key addrKey = prefix.withSuffix(address);
if (kr.contains(addrKey)) {
result.push_back(result.arena(), KeyValueRef(addrKey, ValueRef()));
result.arena().dependsOn(addrKey.arena());
@ -959,3 +976,148 @@ Future<Standalone<RangeResultRef>> ExclusionInProgressRangeImpl::getRange(ReadYo
KeyRangeRef kr) const {
return ExclusionInProgressActor(ryw, getKeyRange().begin, kr);
}
ACTOR Future<Standalone<RangeResultRef>> getProcessClassActor(ReadYourWritesTransaction* ryw, KeyRef prefix,
KeyRangeRef kr) {
vector<ProcessData> _workers = wait(getWorkers(&ryw->getTransaction()));
auto workers = _workers; // strip const
// Note : the sort by string is anti intuition, ex. 1.1.1.1:11 < 1.1.1.1:5
std::sort(workers.begin(), workers.end(), [](const ProcessData& lhs, const ProcessData& rhs) {
return formatIpPort(lhs.address.ip, lhs.address.port) < formatIpPort(rhs.address.ip, rhs.address.port);
});
Standalone<RangeResultRef> result;
for (auto& w : workers) {
// exclude :tls in keys even the network addresss is TLS
KeyRef k(prefix.withSuffix(formatIpPort(w.address.ip, w.address.port), result.arena()));
if (kr.contains(k)) {
ValueRef v(result.arena(), w.processClass.toString());
result.push_back(result.arena(), KeyValueRef(k, v));
}
}
return rywGetRange(ryw, kr, result);
}
ACTOR Future<Optional<std::string>> processClassCommitActor(ReadYourWritesTransaction* ryw, KeyRangeRef range) {
// enable related options
ryw->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
ryw->setOption(FDBTransactionOptions::LOCK_AWARE);
ryw->setOption(FDBTransactionOptions::USE_PROVISIONAL_PROXIES);
vector<ProcessData> workers = wait(
getWorkers(&ryw->getTransaction())); // make sure we use the Transaction object to avoid used_during_commit()
auto ranges = ryw->getSpecialKeySpaceWriteMap().containedRanges(range);
auto iter = ranges.begin();
while (iter != ranges.end()) {
auto entry = iter->value();
// only loop through (set) operation, (clear) not exist
if (entry.first && entry.second.present()) {
// parse network address
Key address = iter->begin().removePrefix(range.begin);
AddressExclusion addr = AddressExclusion::parse(address);
// parse class type
ValueRef processClassType = entry.second.get();
ProcessClass processClass(processClassType.toString(), ProcessClass::DBSource);
// make sure we use the underlying Transaction object to avoid used_during_commit()
bool foundChange = false;
for (int i = 0; i < workers.size(); i++) {
if (addr.excludes(workers[i].address)) {
if (processClass.classType() != ProcessClass::InvalidClass)
ryw->getTransaction().set(processClassKeyFor(workers[i].locality.processId().get()),
processClassValue(processClass));
else
ryw->getTransaction().clear(processClassKeyFor(workers[i].locality.processId().get()));
foundChange = true;
}
}
if (foundChange)
ryw->getTransaction().set(processClassChangeKey, deterministicRandom()->randomUniqueID().toString());
}
++iter;
}
return Optional<std::string>();
}
ProcessClassRangeImpl::ProcessClassRangeImpl(KeyRangeRef kr) : SpecialKeyRangeRWImpl(kr) {}
Future<Standalone<RangeResultRef>> ProcessClassRangeImpl::getRange(ReadYourWritesTransaction* ryw,
KeyRangeRef kr) const {
return getProcessClassActor(ryw, getKeyRange().begin, kr);
}
Future<Optional<std::string>> ProcessClassRangeImpl::commit(ReadYourWritesTransaction* ryw) {
// Validate network address and process class type
Optional<std::string> errorMsg;
auto ranges = ryw->getSpecialKeySpaceWriteMap().containedRanges(getKeyRange());
auto iter = ranges.begin();
while (iter != ranges.end()) {
auto entry = iter->value();
// only check for setclass(set) operation, (clear) are forbidden thus not exist
if (entry.first && entry.second.present()) {
// validate network address
Key address = iter->begin().removePrefix(range.begin);
AddressExclusion addr = AddressExclusion::parse(address);
if (!addr.isValid()) {
std::string error = "ERROR: \'" + address.toString() + "\' is not a valid network endpoint address\n";
if (address.toString().find(":tls") != std::string::npos)
error += " Do not include the `:tls' suffix when naming a process\n";
errorMsg = ManagementAPIError::toJsonString(false, "setclass", error);
return errorMsg;
}
// validate class type
ValueRef processClassType = entry.second.get();
ProcessClass processClass(processClassType.toString(), ProcessClass::DBSource);
if (processClass.classType() == ProcessClass::InvalidClass &&
processClassType != LiteralStringRef("default")) {
std::string error = "ERROR: \'" + processClassType.toString() + "\' is not a valid process class\n";
errorMsg = ManagementAPIError::toJsonString(false, "setclass", error);
return errorMsg;
}
}
++iter;
}
return processClassCommitActor(ryw, getKeyRange());
}
void throwNotAllowedError(ReadYourWritesTransaction* ryw) {
auto msg = ManagementAPIError::toJsonString(false, "setclass",
"Clear operation is meaningless thus forbidden for setclass");
ryw->setSpecialKeySpaceErrorMsg(msg);
throw special_keys_api_failure();
}
void ProcessClassRangeImpl::clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) {
return throwNotAllowedError(ryw);
}
void ProcessClassRangeImpl::clear(ReadYourWritesTransaction* ryw, const KeyRef& key) {
return throwNotAllowedError(ryw);
}
ACTOR Future<Standalone<RangeResultRef>> getProcessClassSourceActor(ReadYourWritesTransaction* ryw, KeyRef prefix,
KeyRangeRef kr) {
vector<ProcessData> _workers = wait(getWorkers(&ryw->getTransaction()));
auto workers = _workers; // strip const
// Note : the sort by string is anti intuition, ex. 1.1.1.1:11 < 1.1.1.1:5
std::sort(workers.begin(), workers.end(), [](const ProcessData& lhs, const ProcessData& rhs) {
return formatIpPort(lhs.address.ip, lhs.address.port) < formatIpPort(rhs.address.ip, rhs.address.port);
});
Standalone<RangeResultRef> result;
for (auto& w : workers) {
// exclude :tls in keys even the network addresss is TLS
Key k(prefix.withSuffix(formatIpPort(w.address.ip, w.address.port)));
if (kr.contains(k)) {
Value v(w.processClass.sourceString());
result.push_back(result.arena(), KeyValueRef(k, v));
result.arena().dependsOn(k.arena());
result.arena().dependsOn(v.arena());
}
}
return result;
}
ProcessClassSourceRangeImpl::ProcessClassSourceRangeImpl(KeyRangeRef kr) : SpecialKeyRangeReadImpl(kr) {}
Future<Standalone<RangeResultRef>> ProcessClassSourceRangeImpl::getRange(ReadYourWritesTransaction* ryw,
KeyRangeRef kr) const {
return getProcessClassSourceActor(ryw, getKeyRange().begin, kr);
}

View File

@ -67,15 +67,29 @@ private:
class SpecialKeyRangeRWImpl : public SpecialKeyRangeReadImpl {
public:
virtual void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) = 0;
virtual void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) = 0;
virtual void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) = 0;
virtual void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) {
ryw->getSpecialKeySpaceWriteMap().insert(key, std::make_pair(true, Optional<Value>(value)));
}
virtual void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) {
ryw->getSpecialKeySpaceWriteMap().insert(range, std::make_pair(true, Optional<Value>()));
}
virtual void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) {
ryw->getSpecialKeySpaceWriteMap().insert(key, std::make_pair(true, Optional<Value>()));
}
virtual Future<Optional<std::string>> commit(
ReadYourWritesTransaction* ryw) = 0; // all delayed async operations of writes in special-key-space
// Given the special key to write, return the real key that needs to be modified
virtual Key decode(const KeyRef& key) const = 0;
virtual Key decode(const KeyRef& key) const {
// Default implementation should never be used
ASSERT(false);
return key;
}
// Given the read key, return the corresponding special key
virtual Key encode(const KeyRef& key) const = 0;
virtual Key encode(const KeyRef& key) const {
// Default implementation should never be used
ASSERT(false);
return key;
};
explicit SpecialKeyRangeRWImpl(KeyRangeRef kr) : SpecialKeyRangeReadImpl(kr) {}
@ -125,6 +139,7 @@ class SpecialKeySpace {
public:
enum class MODULE {
CLUSTERFILEPATH,
CONFIGURATION, // Configuration of the cluster
CONNECTIONSTRING,
ERRORMSG, // A single key space contains a json string which describes the last error in special-key-space
MANAGEMENT, // Management-API
@ -201,6 +216,14 @@ private:
void modulesBoundaryInit();
};
// Used for SpecialKeySpaceCorrectnessWorkload
class SKSCTestImpl : public SpecialKeyRangeRWImpl {
public:
explicit SKSCTestImpl(KeyRangeRef kr);
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
Future<Optional<std::string>> commit(ReadYourWritesTransaction* ryw) override;
};
// Use special key prefix "\xff\xff/transaction/conflicting_keys/<some_key>",
// to retrieve keys which caused latest not_committed(conflicting with another transaction) error.
// The returned key value pairs are interpretted as :
@ -238,8 +261,6 @@ public:
void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) override;
void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) override;
void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) override;
Key decode(const KeyRef& key) const override;
Key encode(const KeyRef& key) const override;
Future<Optional<std::string>> commit(ReadYourWritesTransaction* ryw) override;
};
@ -248,8 +269,6 @@ public:
explicit ExcludeServersRangeImpl(KeyRangeRef kr);
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) override;
void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) override;
void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) override;
Key decode(const KeyRef& key) const override;
Key encode(const KeyRef& key) const override;
Future<Optional<std::string>> commit(ReadYourWritesTransaction* ryw) override;
@ -260,8 +279,6 @@ public:
explicit FailedServersRangeImpl(KeyRangeRef kr);
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
void set(ReadYourWritesTransaction* ryw, const KeyRef& key, const ValueRef& value) override;
void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) override;
void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) override;
Key decode(const KeyRef& key) const override;
Key encode(const KeyRef& key) const override;
Future<Optional<std::string>> commit(ReadYourWritesTransaction* ryw) override;
@ -273,5 +290,20 @@ public:
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
};
class ProcessClassRangeImpl : public SpecialKeyRangeRWImpl {
public:
explicit ProcessClassRangeImpl(KeyRangeRef kr);
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
Future<Optional<std::string>> commit(ReadYourWritesTransaction* ryw) override;
void clear(ReadYourWritesTransaction* ryw, const KeyRangeRef& range) override;
void clear(ReadYourWritesTransaction* ryw, const KeyRef& key) override;
};
class ProcessClassSourceRangeImpl : public SpecialKeyRangeReadImpl {
public:
explicit ProcessClassSourceRangeImpl(KeyRangeRef kr);
Future<Standalone<RangeResultRef>> getRange(ReadYourWritesTransaction* ryw, KeyRangeRef kr) const override;
};
#include "flow/unactorcompiler.h"
#endif

View File

@ -260,10 +260,10 @@ extern const KeyRangeRef logRangesRange;
Key logRangesEncodeKey(KeyRef keyBegin, UID logUid);
// Returns the start key and optionally the logRange Uid
KeyRef logRangesDecodeKey(KeyRef key, UID* logUid = NULL);
KeyRef logRangesDecodeKey(KeyRef key, UID* logUid = nullptr);
// Returns the end key and optionally the key prefix
Key logRangesDecodeValue(KeyRef keyValue, Key* destKeyPrefix = NULL);
Key logRangesDecodeValue(KeyRef keyValue, Key* destKeyPrefix = nullptr);
// Returns the encoded key value comprised of the end key and destination prefix
Key logRangesEncodeValue(KeyRef keyEnd, KeyRef destPath);

View File

@ -19,7 +19,7 @@
*/
#include "fdbclient/TagThrottle.h"
#include "fdbclient/MasterProxyInterface.h"
#include "fdbclient/CommitProxyInterface.h"
#include "fdbclient/DatabaseContext.h"
#include "flow/actorcompiler.h" // has to be last include
@ -104,7 +104,7 @@ TagThrottleKey TagThrottleKey::fromKey(const KeyRef& key) {
TagThrottleValue TagThrottleValue::fromValue(const ValueRef& value) {
TagThrottleValue throttleValue;
BinaryReader reader(value, IncludeVersion());
BinaryReader reader(value, IncludeVersion(ProtocolVersion::withTagThrottleValueReason()));
reader >> throttleValue;
return throttleValue;
}
@ -228,7 +228,7 @@ namespace ThrottleApi {
}
TagThrottleValue throttle(tpsRate, expirationTime.present() ? expirationTime.get() : 0, initialDuration,
reason.present() ? reason.get() : TagThrottledReason::UNSET);
BinaryWriter wr(IncludeVersion(ProtocolVersion::withTagThrottleValue()));
BinaryWriter wr(IncludeVersion(ProtocolVersion::withTagThrottleValueReason()));
wr << throttle;
state Value value = wr.toValue();
@ -347,6 +347,7 @@ namespace ThrottleApi {
removed = true;
tr.clear(tag.key);
unthrottledTags ++;
}
if(manualUnthrottledTags > 0) {

View File

@ -1249,6 +1249,6 @@ ACTOR Future<Key> getCompletionKey(TaskCompletionKey *self, Future<Reference<Tas
}
Future<Key> TaskCompletionKey::get(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket) {
ASSERT(key.present() == (joinFuture.getPtr() == NULL));
ASSERT(key.present() == (joinFuture.getPtr() == nullptr));
return key.present() ? key.get() : getCompletionKey(this, joinFuture->joinedFuture(tr, taskBucket));
}

View File

@ -84,12 +84,12 @@ ThreadSafeDatabase::ThreadSafeDatabase(std::string connFilename, int apiVersion)
catch(...) {
new (db) DatabaseContext(unknown_error());
}
}, NULL);
}, nullptr);
}
ThreadSafeDatabase::~ThreadSafeDatabase() {
DatabaseContext *db = this->db;
onMainThreadVoid( [db](){ db->delref(); }, NULL );
onMainThreadVoid( [db](){ db->delref(); }, nullptr );
}
ThreadSafeTransaction::ThreadSafeTransaction(DatabaseContext* cx) {
@ -107,18 +107,18 @@ ThreadSafeTransaction::ThreadSafeTransaction(DatabaseContext* cx) {
cx->addref();
new (tr) ReadYourWritesTransaction(Database(cx));
},
NULL);
nullptr);
}
ThreadSafeTransaction::~ThreadSafeTransaction() {
ReadYourWritesTransaction *tr = this->tr;
if (tr)
onMainThreadVoid( [tr](){ tr->delref(); }, NULL );
onMainThreadVoid( [tr](){ tr->delref(); }, nullptr );
}
void ThreadSafeTransaction::cancel() {
ReadYourWritesTransaction *tr = this->tr;
onMainThreadVoid( [tr](){ tr->cancel(); }, NULL );
onMainThreadVoid( [tr](){ tr->cancel(); }, nullptr );
}
void ThreadSafeTransaction::setVersion( Version v ) {
@ -328,17 +328,17 @@ ThreadFuture<Void> ThreadSafeTransaction::onError( Error const& e ) {
void ThreadSafeTransaction::operator=(ThreadSafeTransaction&& r) noexcept {
tr = r.tr;
r.tr = NULL;
r.tr = nullptr;
}
ThreadSafeTransaction::ThreadSafeTransaction(ThreadSafeTransaction&& r) noexcept {
tr = r.tr;
r.tr = NULL;
r.tr = nullptr;
}
void ThreadSafeTransaction::reset() {
ReadYourWritesTransaction *tr = this->tr;
onMainThreadVoid( [tr](){ tr->reset(); }, NULL );
onMainThreadVoid( [tr](){ tr->reset(); }, nullptr );
}
extern const char* getSourceVersion();

View File

@ -96,7 +96,7 @@ public:
ThreadFuture<Void> onError( Error const& e ) override;
// These are to permit use as state variables in actors:
ThreadSafeTransaction() : tr(NULL) {}
ThreadSafeTransaction() : tr(nullptr) {}
void operator=(ThreadSafeTransaction&& r) noexcept;
ThreadSafeTransaction(ThreadSafeTransaction&& r) noexcept;

View File

@ -802,7 +802,7 @@ public:
void validate() {
int count=0, height=0;
PTreeImpl::validate<MapPair<K,std::pair<T,Version>>>( root, at, NULL, NULL, count, height );
PTreeImpl::validate<MapPair<K,std::pair<T,Version>>>( root, at, nullptr, nullptr, count, height );
if ( height > 100 )
TraceEvent(SevWarnAlways, "DiabolicalPTreeSize").detail("Size", count).detail("Height", height);
}

View File

@ -195,7 +195,7 @@ description is not currently required but encouraged.
<Option name="next_write_no_write_conflict_range" code="30"
description="The next write performed on this transaction will not generate a write conflict range. As a result, other transactions which read the key(s) being modified by the next write will not conflict with this transaction. Care needs to be taken when using this option on a transaction that is shared between multiple threads. When setting this option, write conflict ranges will be disabled on the next write operation, regardless of what thread it is on." />
<Option name="commit_on_first_proxy" code="40"
description="Committing this transaction will bypass the normal load balancing across proxies and go directly to the specifically nominated 'first proxy'."
description="Committing this transaction will bypass the normal load balancing across commit proxies and go directly to the specifically nominated 'first commit proxy'."
hidden="true" />
<Option name="check_writes_enable" code="50"
hidden="true" />

View File

@ -96,7 +96,7 @@ void monitor_fd( fdb_fd_set list, int fd, int* maxfd, void* cmd ) {
/* ignore maxfd */
struct kevent ev;
EV_SET( &ev, fd, EVFILT_READ, EV_ADD, 0, 0, cmd );
kevent( list, &ev, 1, NULL, 0, NULL ); // FIXME: check?
kevent( list, &ev, 1, nullptr, 0, nullptr ); // FIXME: check?
#endif
}
@ -105,15 +105,15 @@ void unmonitor_fd( fdb_fd_set list, int fd ) {
FD_CLR( fd, list );
#elif defined(__APPLE__) || defined(__FreeBSD__)
struct kevent ev;
EV_SET( &ev, fd, EVFILT_READ, EV_DELETE, 0, 0, NULL );
kevent( list, &ev, 1, NULL, 0, NULL ); // FIXME: check?
EV_SET( &ev, fd, EVFILT_READ, EV_DELETE, 0, 0, nullptr );
kevent( list, &ev, 1, nullptr, 0, nullptr ); // FIXME: check?
#endif
}
double get_cur_timestamp() {
struct tm tm_info;
struct timeval tv;
gettimeofday(&tv, NULL);
gettimeofday(&tv, nullptr);
localtime_r(&tv.tv_sec, &tm_info);
return tv.tv_sec + 1e-6*tv.tv_usec;
@ -182,14 +182,14 @@ void log_err(const char* func, int err, const char* format, ...) {
}
const char* get_value_multi(const CSimpleIni& ini, const char* key, ...) {
const char* ret = NULL;
const char* section = NULL;
const char* ret = nullptr;
const char* section = nullptr;
va_list ap;
va_start(ap, key);
while (!ret && (section = va_arg(ap, const char *)))
ret = ini.GetValue(section, key, NULL);
ret = ini.GetValue(section, key, nullptr);
va_end(ap);
@ -378,8 +378,8 @@ public:
// one pair for each of stdout and stderr
int pipes[2][2];
Command() : argv(NULL) { }
Command(const CSimpleIni& ini, std::string _section, uint64_t id, fdb_fd_set fds, int* maxfd) : section(_section), argv(NULL), fork_retry_time(-1), quiet(false), delete_envvars(NULL), fds(fds), deconfigured(false), kill_on_configuration_change(true) {
Command() : argv(nullptr) { }
Command(const CSimpleIni& ini, std::string _section, uint64_t id, fdb_fd_set fds, int* maxfd) : section(_section), argv(nullptr), fork_retry_time(-1), quiet(false), delete_envvars(nullptr), fds(fds), deconfigured(false), kill_on_configuration_change(true) {
char _ssection[strlen(section.c_str()) + 22];
snprintf(_ssection, strlen(section.c_str()) + 22, "%s.%" PRIu64, section.c_str(), id);
ssection = _ssection;
@ -410,7 +410,7 @@ public:
last_start = 0;
char* endptr;
const char* rd = get_value_multi(ini, "restart_delay", ssection.c_str(), section.c_str(), "general", "fdbmonitor", NULL);
const char* rd = get_value_multi(ini, "restart_delay", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
if (!rd) {
log_msg(SevError, "Unable to resolve restart delay for %s\n", ssection.c_str());
return;
@ -423,7 +423,7 @@ public:
}
}
const char* mrd = get_value_multi(ini, "initial_restart_delay", ssection.c_str(), section.c_str(), "general", "fdbmonitor", NULL);
const char* mrd = get_value_multi(ini, "initial_restart_delay", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
if (!mrd) {
initial_restart_delay = 0;
}
@ -437,7 +437,7 @@ public:
current_restart_delay = initial_restart_delay;
const char* rbo = get_value_multi(ini, "restart_backoff", ssection.c_str(), section.c_str(), "general", "fdbmonitor", NULL);
const char* rbo = get_value_multi(ini, "restart_backoff", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
if(!rbo) {
restart_backoff = max_restart_delay;
}
@ -453,7 +453,7 @@ public:
}
}
const char* rdri = get_value_multi(ini, "restart_delay_reset_interval", ssection.c_str(), section.c_str(), "general", "fdbmonitor", NULL);
const char* rdri = get_value_multi(ini, "restart_delay_reset_interval", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
if (!rdri) {
restart_delay_reset_interval = max_restart_delay;
}
@ -465,19 +465,19 @@ public:
}
}
const char* q = get_value_multi(ini, "disable_lifecycle_logging", ssection.c_str(), section.c_str(), "general", NULL);
const char* q = get_value_multi(ini, "disable_lifecycle_logging", ssection.c_str(), section.c_str(), "general", nullptr);
if (q && !strcmp(q, "true"))
quiet = true;
const char* del_env = get_value_multi(ini, "delete_envvars", ssection.c_str(), section.c_str(), "general", NULL);
const char* del_env = get_value_multi(ini, "delete_envvars", ssection.c_str(), section.c_str(), "general", nullptr);
delete_envvars = del_env;
const char* kocc = get_value_multi(ini, "kill_on_configuration_change", ssection.c_str(), section.c_str(), "general", NULL);
const char* kocc = get_value_multi(ini, "kill_on_configuration_change", ssection.c_str(), section.c_str(), "general", nullptr);
if(kocc && strcmp(kocc, "true")) {
kill_on_configuration_change = false;
}
const char* binary = get_value_multi(ini, "command", ssection.c_str(), section.c_str(), "general", NULL);
const char* binary = get_value_multi(ini, "command", ssection.c_str(), section.c_str(), "general", nullptr);
if (!binary) {
log_msg(SevError, "Unable to resolve command for %s\n", ssection.c_str());
return;
@ -495,7 +495,7 @@ public:
continue;
}
std::string opt = get_value_multi(ini, i.pItem, ssection.c_str(), section.c_str(), "general", NULL);
std::string opt = get_value_multi(ini, i.pItem, ssection.c_str(), section.c_str(), "general", nullptr);
std::size_t pos = 0;
@ -520,7 +520,7 @@ public:
for (auto itr : commands) {
argv[i++] = strdup(itr.c_str());
}
argv[i] = NULL;
argv[i] = nullptr;
}
~Command() {
delete[] argv;
@ -609,7 +609,7 @@ void start_process(Command* cmd, uint64_t id, uid_t uid, gid_t gid, int delay, s
dup2( cmd->pipes[0][1], fileno(stdout) );
dup2( cmd->pipes[1][1], fileno(stderr) );
if(cmd->delete_envvars != NULL && std::strlen(cmd->delete_envvars) > 0) {
if(cmd->delete_envvars != nullptr && std::strlen(cmd->delete_envvars) > 0) {
std::string vars(cmd->delete_envvars);
size_t start = 0;
do {
@ -630,7 +630,7 @@ void start_process(Command* cmd, uint64_t id, uid_t uid, gid_t gid, int delay, s
#ifdef __linux__
signal(SIGCHLD, SIG_DFL);
sigprocmask(SIG_SETMASK, mask, NULL);
sigprocmask(SIG_SETMASK, mask, nullptr);
/* death of our parent raises SIGHUP */
prctl(PR_SET_PDEATHSIG, SIGHUP);
@ -722,7 +722,7 @@ bool argv_equal(const char** a1, const char** a2)
i++;
}
if (a1[i] != NULL || a2[i] != NULL)
if (a1[i] != nullptr || a2[i] != nullptr)
return false;
return true;
}
@ -734,7 +734,7 @@ void kill_process(uint64_t id, bool wait = true) {
kill(pid, SIGTERM);
if(wait) {
waitpid(pid, NULL, 0);
waitpid(pid, nullptr, 0);
}
pid_id.erase(pid);
@ -758,8 +758,8 @@ void load_conf(const char* confpath, uid_t &uid, gid_t &gid, sigset_t* mask, fdb
uid_t _uid;
gid_t _gid;
const char* user = ini.GetValue("fdbmonitor", "user", NULL);
const char* group = ini.GetValue("fdbmonitor", "group", NULL);
const char* user = ini.GetValue("fdbmonitor", "user", nullptr);
const char* group = ini.GetValue("fdbmonitor", "group", nullptr);
if (user) {
errno = 0;
@ -924,8 +924,8 @@ void watch_conf_dir( int kq, int* confd_fd, std::string confdir ) {
while(true) {
/* If already watching, drop it and close */
if ( *confd_fd >= 0 ) {
EV_SET( &ev, *confd_fd, EVFILT_VNODE, EV_DELETE, NOTE_WRITE, 0, NULL );
kevent( kq, &ev, 1, NULL, 0, NULL );
EV_SET( &ev, *confd_fd, EVFILT_VNODE, EV_DELETE, NOTE_WRITE, 0, nullptr );
kevent( kq, &ev, 1, nullptr, 0, nullptr );
close( *confd_fd );
}
@ -939,8 +939,8 @@ void watch_conf_dir( int kq, int* confd_fd, std::string confdir ) {
}
if ( *confd_fd >= 0 ) {
EV_SET( &ev, *confd_fd, EVFILT_VNODE, EV_ADD | EV_CLEAR, NOTE_WRITE, 0, NULL );
kevent( kq, &ev, 1, NULL, 0, NULL );
EV_SET( &ev, *confd_fd, EVFILT_VNODE, EV_ADD | EV_CLEAR, NOTE_WRITE, 0, nullptr );
kevent( kq, &ev, 1, nullptr, 0, nullptr );
/* If our child appeared since we last tested it, start over from the beginning */
if ( confdir != child && (access(child.c_str(), F_OK) == 0 || errno != ENOENT) ) {
@ -964,16 +964,16 @@ void watch_conf_file( int kq, int* conff_fd, const char* confpath ) {
/* If already watching, drop it and close */
if ( *conff_fd >= 0 ) {
EV_SET( &ev, *conff_fd, EVFILT_VNODE, EV_DELETE, NOTE_WRITE | NOTE_ATTRIB, 0, NULL );
kevent( kq, &ev, 1, NULL, 0, NULL );
EV_SET( &ev, *conff_fd, EVFILT_VNODE, EV_DELETE, NOTE_WRITE | NOTE_ATTRIB, 0, nullptr );
kevent( kq, &ev, 1, nullptr, 0, nullptr );
close( *conff_fd );
}
/* Open and watch */
*conff_fd = open( confpath, O_EVTONLY );
if ( *conff_fd >= 0 ) {
EV_SET( &ev, *conff_fd, EVFILT_VNODE, EV_ADD | EV_CLEAR, NOTE_WRITE | NOTE_ATTRIB, 0, NULL );
kevent( kq, &ev, 1, NULL, 0, NULL );
EV_SET( &ev, *conff_fd, EVFILT_VNODE, EV_ADD | EV_CLEAR, NOTE_WRITE | NOTE_ATTRIB, 0, nullptr );
kevent( kq, &ev, 1, nullptr, 0, nullptr );
}
}
#endif
@ -1194,7 +1194,7 @@ int main(int argc, char** argv) {
lockfile = args.OptionArg();
break;
case OPT_LOGGROUP:
if(strchr(args.OptionArg(), '"') != NULL) {
if(strchr(args.OptionArg(), '"') != nullptr) {
log_msg(SevError, "Invalid log group '%s', cannot contain '\"'\n", args.OptionArg());
exit(1);
}
@ -1226,9 +1226,9 @@ int main(int argc, char** argv) {
_confpath = joinPath(buf, _confpath);
}
// Guaranteed (if non-NULL) to be an absolute path with no
// Guaranteed (if non-nullptr) to be an absolute path with no
// symbolic link, /./ or /../ components
const char *p = realpath(_confpath.c_str(), NULL);
const char *p = realpath(_confpath.c_str(), nullptr);
if (!p) {
log_msg(SevError, "No configuration file at %s\n", _confpath.c_str());
exit(1);
@ -1351,14 +1351,14 @@ int main(int argc, char** argv) {
struct kevent ev;
EV_SET( &ev, SIGHUP, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL);
kevent( kq, &ev, 1, NULL, 0, NULL );
EV_SET( &ev, SIGINT, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL);
kevent( kq, &ev, 1, NULL, 0, NULL );
EV_SET( &ev, SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL);
kevent( kq, &ev, 1, NULL, 0, NULL );
EV_SET( &ev, SIGCHLD, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL);
kevent( kq, &ev, 1, NULL, 0, NULL );
EV_SET( &ev, SIGHUP, EVFILT_SIGNAL, EV_ADD, 0, 0, nullptr);
kevent( kq, &ev, 1, nullptr, 0, nullptr );
EV_SET( &ev, SIGINT, EVFILT_SIGNAL, EV_ADD, 0, 0, nullptr);
kevent( kq, &ev, 1, nullptr, 0, nullptr );
EV_SET( &ev, SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, nullptr);
kevent( kq, &ev, 1, nullptr, 0, nullptr );
EV_SET( &ev, SIGCHLD, EVFILT_SIGNAL, EV_ADD, 0, 0, nullptr);
kevent( kq, &ev, 1, nullptr, 0, nullptr );
int confd_fd = -1;
int conff_fd = -1;
@ -1383,7 +1383,7 @@ int main(int argc, char** argv) {
pselect, but none blocks all signals while processing events */
sigprocmask(SIG_SETMASK, &full_mask, &normal_mask);
#elif defined(__APPLE__) || defined(__FreeBSD__)
sigprocmask(0, NULL, &normal_mask);
sigprocmask(0, nullptr, &normal_mask);
#endif
#if defined(__APPLE__) || defined(__FreeBSD__)
@ -1474,10 +1474,10 @@ int main(int argc, char** argv) {
srfds = rfds;
nfds = 0;
if(timeout < 0) {
nfds = pselect(maxfd+1, &srfds, NULL, NULL, NULL, &normal_mask);
nfds = pselect(maxfd+1, &srfds, nullptr, nullptr, nullptr, &normal_mask);
}
else if(timeout > 0) {
nfds = pselect(maxfd+1, &srfds, NULL, NULL, &tv, &normal_mask);
nfds = pselect(maxfd+1, &srfds, nullptr, nullptr, &tv, &normal_mask);
}
if(nfds == 0) {
@ -1486,10 +1486,10 @@ int main(int argc, char** argv) {
#elif defined(__APPLE__) || defined(__FreeBSD__)
int nev = 0;
if(timeout < 0) {
nev = kevent( kq, NULL, 0, &ev, 1, NULL );
nev = kevent( kq, nullptr, 0, &ev, 1, nullptr );
}
else if(timeout > 0) {
nev = kevent( kq, NULL, 0, &ev, 1, &tv );
nev = kevent( kq, nullptr, 0, &ev, 1, &tv );
}
if(nev == 0) {
@ -1503,8 +1503,8 @@ int main(int argc, char** argv) {
// This could be the conf dir or conf file
if ( ev.ident == confd_fd ) {
/* Changes in the directory holding the conf file; schedule a future timeout to reset watches and reload the conf */
EV_SET( &timeout, 1, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 200, NULL );
kevent( kq, &timeout, 1, NULL, 0, NULL );
EV_SET( &timeout, 1, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, 200, nullptr );
kevent( kq, &timeout, 1, nullptr, 0, nullptr );
} else {
/* Direct writes to the conf file; reload! */
reload = true;
@ -1559,7 +1559,7 @@ int main(int argc, char** argv) {
/* Unblock signals */
signal(SIGCHLD, SIG_IGN);
sigprocmask(SIG_SETMASK, &normal_mask, NULL);
sigprocmask(SIG_SETMASK, &normal_mask, nullptr);
/* If daemonized, setsid() was called earlier so we can just kill our entire new process group */
if(daemonize) {
@ -1578,7 +1578,7 @@ int main(int argc, char** argv) {
/* Wait for all child processes (says POSIX.1-2001) */
/* POSIX.1-2001 specifies that if the disposition of SIGCHLD is set to SIG_IGN, then children that terminate do not become zombies and a call to wait()
will block until all children have terminated, and then fail with errno set to ECHILD */
wait(NULL);
wait(nullptr);
unlink(lockfile.c_str());
exit(0);
@ -1617,7 +1617,7 @@ int main(int argc, char** argv) {
if(search != additional_watch_wds.end() && event->len && search->second.count(event->name)) {
log_msg(SevInfo, "Changes detected on watched symlink `%s': (%d, %#010x)\n", event->name, event->wd, event->mask);
char *redone_confpath = realpath(_confpath.c_str(), NULL);
char *redone_confpath = realpath(_confpath.c_str(), nullptr);
if(!redone_confpath) {
log_msg(SevInfo, "Error calling realpath on `%s', continuing...\n", _confpath.c_str());
// exit(1);

View File

@ -46,7 +46,7 @@ class AsyncFileEIO : public IAsyncFile, public ReferenceCounted<AsyncFileEIO> {
public:
static void init() {
eio_set_max_parallel(FLOW_KNOBS->EIO_MAX_PARALLELISM);
if (eio_init( &eio_want_poll, NULL )) {
if (eio_init( &eio_want_poll, nullptr )) {
TraceEvent("EioInitError").detail("ErrorNo", errno);
throw platform_error();
}
@ -423,8 +423,8 @@ private:
static void eio_want_poll() {
want_poll = 1;
// SOMEDAY: NULL for deferred error, no analysis of correctness (itp)
onMainThreadVoid([](){ poll_eio(); }, NULL, TaskPriority::PollEIO);
// SOMEDAY: nullptr for deferred error, no analysis of correctness (itp)
onMainThreadVoid([](){ poll_eio(); }, nullptr, TaskPriority::PollEIO);
}
static int eio_callback( eio_req* req ) {

View File

@ -55,12 +55,12 @@ public:
HANDLE h = CreateFile( open_filename.c_str(),
GENERIC_READ | ((flags&OPEN_READWRITE) ? GENERIC_WRITE : 0),
FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, NULL,
FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, nullptr,
(flags&OPEN_EXCLUSIVE) ? CREATE_NEW :
(flags&OPEN_CREATE) ? OPEN_ALWAYS :
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_OVERLAPPED | FILE_FLAG_NO_BUFFERING,
NULL );
nullptr );
if (h == INVALID_HANDLE_VALUE) {
bool notFound = GetLastError() == ERROR_FILE_NOT_FOUND;
Error e = notFound ? file_not_found() : io_error();
@ -141,7 +141,7 @@ public:
}
Future<Void> truncate(int64_t size) override {
// FIXME: Possibly use SetFileInformationByHandle( file.native_handle(), FileEndOfFileInfo, ... ) instead
if (!SetFilePointerEx( file.native_handle(), *(LARGE_INTEGER*)&size, NULL, FILE_BEGIN ))
if (!SetFilePointerEx( file.native_handle(), *(LARGE_INTEGER*)&size, nullptr, FILE_BEGIN ))
throw io_error();
if (!SetEndOfFile(file.native_handle()))
throw io_error();

View File

@ -177,7 +177,7 @@ Future< REPLY_TYPE(Request) > loadBalance(
Request request = Request(),
TaskPriority taskID = TaskPriority::DefaultPromiseEndpoint,
bool atMostOnce = false, // if true, throws request_maybe_delivered() instead of retrying automatically
QueueModel* model = NULL)
QueueModel* model = nullptr)
{
state Future<Optional<REPLY_TYPE(Request)>> firstRequest;
state Optional<uint64_t> firstRequestEndpoint;
@ -296,7 +296,7 @@ Future< REPLY_TYPE(Request) > loadBalance(
}
// Find an alternative, if any, that is not failed, starting with nextAlt
state RequestStream<Request> const* stream = NULL;
state RequestStream<Request> const* stream = nullptr;
for(int alternativeNum=0; alternativeNum<alternatives->size(); alternativeNum++) {
int useAlt = nextAlt;
if( nextAlt == startAlt )
@ -309,7 +309,7 @@ Future< REPLY_TYPE(Request) > loadBalance(
break;
nextAlt = (nextAlt+1) % alternatives->size();
if(nextAlt == startAlt) triedAllOptions = true;
stream=NULL;
stream=nullptr;
}
if(!stream && !firstRequest.isValid() ) {
@ -493,7 +493,7 @@ Future< REPLY_TYPE(Request) > basicLoadBalance(
state int useAlt;
loop {
// Find an alternative, if any, that is not failed, starting with nextAlt
state RequestStream<Request> const* stream = NULL;
state RequestStream<Request> const* stream = nullptr;
for(int alternativeNum=0; alternativeNum<alternatives->size(); alternativeNum++) {
useAlt = nextAlt;
if( nextAlt == startAlt )
@ -505,7 +505,7 @@ Future< REPLY_TYPE(Request) > basicLoadBalance(
if (!IFailureMonitor::failureMonitor().getState( stream->getEndpoint() ).failed)
break;
nextAlt = (nextAlt+1) % alternatives->size();
stream=NULL;
stream=nullptr;
}
if(!stream) {

View File

@ -28,11 +28,11 @@
template <class T>
Reference<T> loadPlugin( std::string const& plugin_name ) {
void *(*get_plugin)(const char*) = NULL;
void *(*get_plugin)(const char*) = nullptr;
void* plugin = loadLibrary( plugin_name.c_str() );
if (plugin)
get_plugin = (void*(*)(const char*))loadFunction( plugin, "get_plugin" );
return (get_plugin) ? Reference<T>( (T*)get_plugin( T::get_plugin_type_name_and_version() ) ) : Reference<T>( NULL );
return (get_plugin) ? Reference<T>( (T*)get_plugin( T::get_plugin_type_name_and_version() ) ) : Reference<T>( nullptr );
}
#endif

View File

@ -63,9 +63,9 @@ ProcessClass::Fitness ProcessClass::machineClassFitness( ClusterRole role ) cons
default:
return ProcessClass::NeverAssign;
}
case ProcessClass::Proxy:
case ProcessClass::CommitProxy:
switch( _class ) {
case ProcessClass::ProxyClass:
case ProcessClass::CommitProxyClass:
return ProcessClass::BestFit;
case ProcessClass::StatelessClass:
return ProcessClass::GoodFit;
@ -92,7 +92,7 @@ ProcessClass::Fitness ProcessClass::machineClassFitness( ClusterRole role ) cons
return ProcessClass::GoodFit;
case ProcessClass::UnsetClass:
return ProcessClass::UnsetFit;
case ProcessClass::ProxyClass:
case ProcessClass::CommitProxyClass:
return ProcessClass::OkayFit;
case ProcessClass::ResolutionClass:
return ProcessClass::OkayFit;
@ -192,7 +192,7 @@ ProcessClass::Fitness ProcessClass::machineClassFitness( ClusterRole role ) cons
return ProcessClass::OkayFit;
case ProcessClass::TransactionClass:
return ProcessClass::OkayFit;
case ProcessClass::ProxyClass:
case ProcessClass::CommitProxyClass:
return ProcessClass::OkayFit;
case ProcessClass::GrvProxyClass:
return ProcessClass::OkayFit;

View File

@ -33,7 +33,7 @@ struct ProcessClass {
TransactionClass,
ResolutionClass,
TesterClass,
ProxyClass, // Process class of CommitProxy
CommitProxyClass,
GrvProxyClass,
MasterClass,
StatelessClass,
@ -53,7 +53,7 @@ struct ProcessClass {
enum ClusterRole {
Storage,
TLog,
Proxy,
CommitProxy,
GrvProxy,
Master,
Resolver,
@ -77,7 +77,7 @@ public:
if (s=="storage") _class = StorageClass;
else if (s=="transaction") _class = TransactionClass;
else if (s=="resolution") _class = ResolutionClass;
else if (s=="proxy") _class = ProxyClass;
else if (s=="commit_proxy") _class = CommitProxyClass;
else if (s=="grv_proxy") _class = GrvProxyClass;
else if (s=="master") _class = MasterClass;
else if (s=="test") _class = TesterClass;
@ -99,7 +99,7 @@ public:
if (classStr=="storage") _class = StorageClass;
else if (classStr=="transaction") _class = TransactionClass;
else if (classStr=="resolution") _class = ResolutionClass;
else if (classStr=="proxy") _class = ProxyClass;
else if (classStr=="commit_proxy") _class = CommitProxyClass;
else if (classStr=="grv_proxy") _class = GrvProxyClass;
else if (classStr=="master") _class = MasterClass;
else if (classStr=="test") _class = TesterClass;
@ -137,7 +137,7 @@ public:
case StorageClass: return "storage";
case TransactionClass: return "transaction";
case ResolutionClass: return "resolution";
case ProxyClass: return "proxy";
case CommitProxyClass: return "commit_proxy";
case GrvProxyClass: return "grv_proxy";
case MasterClass: return "master";
case TesterClass: return "test";

View File

@ -65,7 +65,7 @@ Future< Reference<class IAsyncFile> > Net2FileSystem::open( std::string filename
// EIO.
if ((flags & IAsyncFile::OPEN_UNBUFFERED) && !(flags & IAsyncFile::OPEN_NO_AIO) &&
!FLOW_KNOBS->DISABLE_POSIX_KERNEL_AIO)
f = AsyncFileKAIO::open(filename, flags, mode, NULL);
f = AsyncFileKAIO::open(filename, flags, mode, nullptr);
else
#endif
f = Net2AsyncFile::open(filename, flags, mode, static_cast<boost::asio::io_service*> ((void*) g_network->global(INetwork::enASIOService)));

View File

@ -1057,8 +1057,8 @@ void sleeptest() {
timespec ts;
ts.tv_sec = times[j] / 1000000;
ts.tv_nsec = (times[j] % 1000000)*1000;
clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, NULL);
//nanosleep(&ts, NULL);
clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, nullptr);
//nanosleep(&ts, nullptr);
}
double t = timer() - b;
printf("Sleep test (%dus x %d): %0.1f\n", times[j], n, double(t)/n*1e6);

View File

@ -115,7 +115,7 @@ public:
Future<T> getFuture() const { sav->addFutureRef(); return Future<T>(sav); }
bool isSet() { return sav->isSet(); }
bool isValid() const { return sav != NULL; }
bool isValid() const { return sav != nullptr; }
ReplyPromise() : sav(new NetSAV<T>(0, 1)) {}
ReplyPromise(const ReplyPromise& rhs) : sav(rhs.sav) { sav->addPromiseRef(); }
ReplyPromise(ReplyPromise&& rhs) noexcept : sav(rhs.sav) { rhs.sav = 0; }
@ -144,7 +144,7 @@ public:
}
// Beware, these operations are very unsafe
SAV<T>* extractRawPointer() { auto ptr = sav; sav = NULL; return ptr; }
SAV<T>* extractRawPointer() { auto ptr = sav; sav = nullptr; return ptr; }
explicit ReplyPromise<T>(SAV<T>* ptr) : sav(ptr) {}
int getFutureReferenceCount() const { return sav->getFutureReferenceCount(); }

View File

@ -1062,7 +1062,7 @@ public:
}
}
}
return canKillProcesses(processesLeft, processesDead, KillInstantly, NULL);
return canKillProcesses(processesLeft, processesDead, KillInstantly, nullptr);
}
virtual bool datacenterDead(Optional<Standalone<StringRef>> dcId) const
@ -1622,7 +1622,7 @@ public:
}
Sim2() : time(0.0), timerTime(0.0), taskCount(0), yielded(false), yield_limit(0), currentTaskID(TaskPriority::Zero) {
// Not letting currentProcess be NULL eliminates some annoying special cases
// Not letting currentProcess be nullptr eliminates some annoying special cases
currentProcess = new ProcessInfo("NoMachine", LocalityData(Optional<Standalone<StringRef>>(), StringRef(), StringRef(), StringRef()), ProcessClass(), {NetworkAddress()}, this, "", "");
g_network = net2 = newNet2(TLSConfig(), false, true);
g_network->addStopCallback( Net2FileSystem::stop );
@ -1813,12 +1813,12 @@ Future<Void> waitUntilDiskReady( Reference<DiskParameters> diskParameters, int64
int sf_open( const char* filename, int flags, int convFlags, int mode ) {
HANDLE wh = CreateFile( filename, GENERIC_READ | ((flags&IAsyncFile::OPEN_READWRITE) ? GENERIC_WRITE : 0),
FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, NULL,
FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, nullptr,
(flags&IAsyncFile::OPEN_EXCLUSIVE) ? CREATE_NEW :
(flags&IAsyncFile::OPEN_CREATE) ? OPEN_ALWAYS :
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
NULL );
nullptr );
int h = -1;
if (wh != INVALID_HANDLE_VALUE) h = _open_osfhandle( (intptr_t)wh, convFlags );
else errno = GetLastError() == ERROR_FILE_NOT_FOUND ? ENOENT : EFAULT;

View File

@ -34,7 +34,7 @@ enum ClogMode { ClogDefault, ClogAll, ClogSend, ClogReceive };
class ISimulator : public INetwork {
public:
ISimulator() : desiredCoordinators(1), physicalDatacenters(1), processesPerMachine(0), listenersPerProcess(1), isStopped(false), lastConnectionFailure(0), connectionFailuresDisableDuration(0), speedUpSimulation(false), allSwapsDisabled(false), backupAgents(WaitForType), drAgents(WaitForType), extraDB(NULL), allowLogSetKills(true), usableRegions(1) {}
ISimulator() : desiredCoordinators(1), physicalDatacenters(1), processesPerMachine(0), listenersPerProcess(1), isStopped(false), lastConnectionFailure(0), connectionFailuresDisableDuration(0), speedUpSimulation(false), allSwapsDisabled(false), backupAgents(WaitForType), drAgents(WaitForType), extraDB(nullptr), allowLogSetKills(true), usableRegions(1) {}
// Order matters!
enum KillType { KillInstantly, InjectFaults, RebootAndDelete, RebootProcessAndDelete, Reboot, RebootProcess, None };
@ -97,7 +97,8 @@ public:
case ProcessClass::StorageClass: return true;
case ProcessClass::TransactionClass: return true;
case ProcessClass::ResolutionClass: return false;
case ProcessClass::ProxyClass: return false;
case ProcessClass::CommitProxyClass:
return false;
case ProcessClass::GrvProxyClass:
return false;
case ProcessClass::MasterClass:
@ -163,9 +164,9 @@ public:
virtual void rebootProcess(Optional<Standalone<StringRef>> zoneId, bool allProcesses ) = 0;
virtual void rebootProcess( ProcessInfo* process, KillType kt ) = 0;
virtual void killInterface( NetworkAddress address, KillType ) = 0;
virtual bool killMachine(Optional<Standalone<StringRef>> machineId, KillType kt, bool forceKill = false, KillType* ktFinal = NULL) = 0;
virtual bool killZone(Optional<Standalone<StringRef>> zoneId, KillType kt, bool forceKill = false, KillType* ktFinal = NULL) = 0;
virtual bool killDataCenter(Optional<Standalone<StringRef>> dcId, KillType kt, bool forceKill = false, KillType* ktFinal = NULL) = 0;
virtual bool killMachine(Optional<Standalone<StringRef>> machineId, KillType kt, bool forceKill = false, KillType* ktFinal = nullptr) = 0;
virtual bool killZone(Optional<Standalone<StringRef>> zoneId, KillType kt, bool forceKill = false, KillType* ktFinal = nullptr) = 0;
virtual bool killDataCenter(Optional<Standalone<StringRef>> dcId, KillType kt, bool forceKill = false, KillType* ktFinal = nullptr) = 0;
//virtual KillType getMachineKillState( UID zoneID ) = 0;
virtual bool canKillProcesses(std::vector<ProcessInfo*> const& availableProcesses, std::vector<ProcessInfo*> const& deadProcesses, KillType kt, KillType* newKillType) const = 0;
virtual bool isAvailable() const = 0;

View File

@ -43,8 +43,8 @@ Reference<StorageInfo> getStorageInfo(UID id, std::map<UID, Reference<StorageInf
}
// It is incredibly important that any modifications to txnStateStore are done in such a way that
// the same operations will be done on all proxies at the same time. Otherwise, the data stored in
// txnStateStore will become corrupted.
// the same operations will be done on all commit proxies at the same time. Otherwise, the data
// stored in txnStateStore will become corrupted.
void applyMetadataMutations(UID const& dbgid, Arena& arena, VectorRef<MutationRef> const& mutations,
IKeyValueStore* txnStateStore, LogPushData* toCommit, bool& confChange,
Reference<ILogSystem> logSystem, Version popVersion,

View File

@ -21,7 +21,7 @@
#include "fdbclient/BackupAgent.actor.h"
#include "fdbclient/BackupContainer.h"
#include "fdbclient/DatabaseContext.h"
#include "fdbclient/MasterProxyInterface.h"
#include "fdbclient/CommitProxyInterface.h"
#include "fdbclient/SystemData.h"
#include "fdbserver/BackupInterface.h"
#include "fdbserver/BackupProgress.actor.h"

View File

@ -46,7 +46,7 @@ set(FDBSERVER_SRCS
LogSystemDiskQueueAdapter.h
LogSystemPeekCursor.actor.cpp
MasterInterface.h
MasterProxyServer.actor.cpp
CommitProxyServer.actor.cpp
masterserver.actor.cpp
MutationTracking.h
MutationTracking.cpp

View File

@ -753,20 +753,21 @@ public:
}
}
auto first_proxy = getWorkerForRoleInDatacenter(dcId, ProcessClass::Proxy, ProcessClass::ExcludeFit,
req.configuration, id_used);
auto first_commit_proxy = getWorkerForRoleInDatacenter(dcId, ProcessClass::CommitProxy,
ProcessClass::ExcludeFit, req.configuration, id_used);
auto first_grv_proxy = getWorkerForRoleInDatacenter(dcId, ProcessClass::GrvProxy, ProcessClass::ExcludeFit,
req.configuration, id_used);
auto first_resolver = getWorkerForRoleInDatacenter(dcId, ProcessClass::Resolver, ProcessClass::ExcludeFit,
req.configuration, id_used);
auto proxies = getWorkersForRoleInDatacenter(dcId, ProcessClass::Proxy, req.configuration.getDesiredProxies(),
req.configuration, id_used, first_proxy);
auto commit_proxies =
getWorkersForRoleInDatacenter(dcId, ProcessClass::CommitProxy, req.configuration.getDesiredCommitProxies(),
req.configuration, id_used, first_commit_proxy);
auto grv_proxies =
getWorkersForRoleInDatacenter(dcId, ProcessClass::GrvProxy, req.configuration.getDesiredGrvProxies(),
req.configuration, id_used, first_grv_proxy);
auto resolvers = getWorkersForRoleInDatacenter( dcId, ProcessClass::Resolver, req.configuration.getDesiredResolvers(), req.configuration, id_used, first_resolver );
for (int i = 0; i < proxies.size(); i++) result.masterProxies.push_back(proxies[i].interf);
for (int i = 0; i < commit_proxies.size(); i++) result.commitProxies.push_back(commit_proxies[i].interf);
for (int i = 0; i < grv_proxies.size(); i++) result.grvProxies.push_back(grv_proxies[i].interf);
for(int i = 0; i < resolvers.size(); i++)
result.resolvers.push_back(resolvers[i].interf);
@ -800,9 +801,9 @@ public:
RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredSatelliteLogs(dcId),
ProcessClass::TLog)
.betterCount(RoleFitness(satelliteLogs, ProcessClass::TLog))) ||
RoleFitness(SERVER_KNOBS->EXPECTED_PROXY_FITNESS, req.configuration.getDesiredProxies(),
ProcessClass::Proxy)
.betterCount(RoleFitness(proxies, ProcessClass::Proxy)) ||
RoleFitness(SERVER_KNOBS->EXPECTED_COMMIT_PROXY_FITNESS, req.configuration.getDesiredCommitProxies(),
ProcessClass::CommitProxy)
.betterCount(RoleFitness(commit_proxies, ProcessClass::CommitProxy)) ||
RoleFitness(SERVER_KNOBS->EXPECTED_GRV_PROXY_FITNESS, req.configuration.getDesiredGrvProxies(),
ProcessClass::GrvProxy)
.betterCount(RoleFitness(grv_proxies, ProcessClass::GrvProxy)) ||
@ -911,22 +912,22 @@ public:
try {
//SOMEDAY: recruitment in other DCs besides the clusterControllerDcID will not account for the processes used by the master and cluster controller properly.
auto used = id_used;
auto first_proxy = getWorkerForRoleInDatacenter(dcId, ProcessClass::Proxy, ProcessClass::ExcludeFit,
req.configuration, used);
auto first_commit_proxy = getWorkerForRoleInDatacenter(
dcId, ProcessClass::CommitProxy, ProcessClass::ExcludeFit, req.configuration, used);
auto first_grv_proxy = getWorkerForRoleInDatacenter(
dcId, ProcessClass::GrvProxy, ProcessClass::ExcludeFit, req.configuration, used);
auto first_resolver = getWorkerForRoleInDatacenter(
dcId, ProcessClass::Resolver, ProcessClass::ExcludeFit, req.configuration, used);
auto proxies =
getWorkersForRoleInDatacenter(dcId, ProcessClass::Proxy, req.configuration.getDesiredProxies(),
req.configuration, used, first_proxy);
auto commit_proxies = getWorkersForRoleInDatacenter(dcId, ProcessClass::CommitProxy,
req.configuration.getDesiredCommitProxies(),
req.configuration, used, first_commit_proxy);
auto grv_proxies = getWorkersForRoleInDatacenter(dcId, ProcessClass::GrvProxy,
req.configuration.getDesiredGrvProxies(),
req.configuration, used, first_grv_proxy);
auto resolvers = getWorkersForRoleInDatacenter( dcId, ProcessClass::Resolver, req.configuration.getDesiredResolvers(), req.configuration, used, first_resolver );
RoleFitnessPair fitness(RoleFitness(proxies, ProcessClass::Proxy),
RoleFitnessPair fitness(RoleFitness(commit_proxies, ProcessClass::CommitProxy),
RoleFitness(grv_proxies, ProcessClass::GrvProxy),
RoleFitness(resolvers, ProcessClass::Resolver));
@ -936,8 +937,8 @@ public:
for (int i = 0; i < resolvers.size(); i++) {
result.resolvers.push_back(resolvers[i].interf);
}
for (int i = 0; i < proxies.size(); i++) {
result.masterProxies.push_back(proxies[i].interf);
for (int i = 0; i < commit_proxies.size(); i++) {
result.commitProxies.push_back(commit_proxies[i].interf);
}
for (int i = 0; i < grv_proxies.size(); i++) {
result.grvProxies.push_back(grv_proxies[i].interf);
@ -982,8 +983,8 @@ public:
.detail("Replication", req.configuration.tLogReplicationFactor)
.detail("DesiredLogs", req.configuration.getDesiredLogs())
.detail("ActualLogs", result.tLogs.size())
.detail("DesiredProxies", req.configuration.getDesiredProxies())
.detail("ActualProxies", result.masterProxies.size())
.detail("DesiredCommitProxies", req.configuration.getDesiredCommitProxies())
.detail("ActualCommitProxies", result.commitProxies.size())
.detail("DesiredGrvProxies", req.configuration.getDesiredGrvProxies())
.detail("ActualGrvProxies", result.grvProxies.size())
.detail("DesiredResolvers", req.configuration.getDesiredResolvers())
@ -993,8 +994,8 @@ public:
(RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredLogs(),
ProcessClass::TLog)
.betterCount(RoleFitness(tlogs, ProcessClass::TLog)) ||
RoleFitness(SERVER_KNOBS->EXPECTED_PROXY_FITNESS, req.configuration.getDesiredProxies(),
ProcessClass::Proxy)
RoleFitness(SERVER_KNOBS->EXPECTED_COMMIT_PROXY_FITNESS, req.configuration.getDesiredCommitProxies(),
ProcessClass::CommitProxy)
.betterCount(bestFitness.proxy) ||
RoleFitness(SERVER_KNOBS->EXPECTED_GRV_PROXY_FITNESS, req.configuration.getDesiredGrvProxies(),
ProcessClass::GrvProxy)
@ -1028,7 +1029,8 @@ public:
}
getWorkerForRoleInDatacenter( regions[0].dcId, ProcessClass::Resolver, ProcessClass::ExcludeFit, db.config, id_used, true );
getWorkerForRoleInDatacenter( regions[0].dcId, ProcessClass::Proxy, ProcessClass::ExcludeFit, db.config, id_used, true );
getWorkerForRoleInDatacenter(regions[0].dcId, ProcessClass::CommitProxy, ProcessClass::ExcludeFit,
db.config, id_used, true);
getWorkerForRoleInDatacenter(regions[0].dcId, ProcessClass::GrvProxy, ProcessClass::ExcludeFit, db.config,
id_used, true);
@ -1129,15 +1131,13 @@ public:
}
}
// Get proxy classes
std::vector<WorkerDetails> proxyClasses;
for(auto& it : dbi.client.masterProxies) {
auto masterProxyWorker = id_worker.find(it.processId);
if ( masterProxyWorker == id_worker.end() )
return false;
if ( masterProxyWorker->second.priorityInfo.isExcluded )
return true;
proxyClasses.push_back(masterProxyWorker->second.details);
// Get commit proxy classes
std::vector<WorkerDetails> commitProxyClasses;
for (auto& it : dbi.client.commitProxies) {
auto commitProxyWorker = id_worker.find(it.processId);
if (commitProxyWorker == id_worker.end()) return false;
if (commitProxyWorker->second.priorityInfo.isExcluded) return true;
commitProxyClasses.push_back(commitProxyWorker->second.details);
}
// Get grv proxy classes
@ -1285,25 +1285,25 @@ public:
if(oldLogRoutersFit < newLogRoutersFit) return false;
// Check proxy/grvProxy/resolver fitness
RoleFitnessPair oldInFit(RoleFitness(proxyClasses, ProcessClass::Proxy),
RoleFitnessPair oldInFit(RoleFitness(commitProxyClasses, ProcessClass::CommitProxy),
RoleFitness(grvProxyClasses, ProcessClass::GrvProxy),
RoleFitness(resolverClasses, ProcessClass::Resolver));
auto first_proxy = getWorkerForRoleInDatacenter(clusterControllerDcId, ProcessClass::Proxy,
auto first_commit_proxy = getWorkerForRoleInDatacenter(clusterControllerDcId, ProcessClass::CommitProxy,
ProcessClass::ExcludeFit, db.config, id_used, true);
auto first_grv_proxy = getWorkerForRoleInDatacenter(clusterControllerDcId, ProcessClass::GrvProxy,
ProcessClass::ExcludeFit, db.config, id_used, true);
auto first_resolver = getWorkerForRoleInDatacenter(clusterControllerDcId, ProcessClass::Resolver,
ProcessClass::ExcludeFit, db.config, id_used, true);
auto proxies =
getWorkersForRoleInDatacenter(clusterControllerDcId, ProcessClass::Proxy, db.config.getDesiredProxies(),
db.config, id_used, first_proxy, true);
auto commit_proxies = getWorkersForRoleInDatacenter(clusterControllerDcId, ProcessClass::CommitProxy,
db.config.getDesiredCommitProxies(), db.config, id_used,
first_commit_proxy, true);
auto grv_proxies =
getWorkersForRoleInDatacenter(clusterControllerDcId, ProcessClass::GrvProxy,
db.config.getDesiredGrvProxies(), db.config, id_used, first_grv_proxy, true);
auto resolvers = getWorkersForRoleInDatacenter( clusterControllerDcId, ProcessClass::Resolver, db.config.getDesiredResolvers(), db.config, id_used, first_resolver, true );
RoleFitnessPair newInFit(RoleFitness(proxies, ProcessClass::Proxy),
RoleFitnessPair newInFit(RoleFitness(commit_proxies, ProcessClass::CommitProxy),
RoleFitness(grv_proxies, ProcessClass::GrvProxy),
RoleFitness(resolvers, ProcessClass::Resolver));
if (oldInFit.proxy.betterFitness(newInFit.proxy) || oldInFit.grvProxy.betterFitness(newInFit.grvProxy) ||
@ -1358,7 +1358,7 @@ public:
if (tlog.present() && tlog.interf().filteredLocality.processId() == processId) return true;
}
}
for (const MasterProxyInterface& interf : dbInfo.client.masterProxies) {
for (const CommitProxyInterface& interf : dbInfo.client.commitProxies) {
if (interf.processId == processId) return true;
}
for (const GrvProxyInterface& interf : dbInfo.client.grvProxies) {
@ -1393,7 +1393,7 @@ public:
}
}
}
for (const MasterProxyInterface& interf : dbInfo.client.masterProxies) {
for (const CommitProxyInterface& interf : dbInfo.client.commitProxies) {
ASSERT(interf.processId.present());
idUsed[interf.processId]++;
}
@ -1967,7 +1967,7 @@ void clusterRegisterMaster( ClusterControllerData* self, RegisterMasterRequest c
.detail("Resolvers", req.resolvers.size())
.detail("RecoveryState", (int)req.recoveryState)
.detail("RegistrationCount", req.registrationCount)
.detail("MasterProxies", req.masterProxies.size())
.detail("CommitProxies", req.commitProxies.size())
.detail("GrvProxies", req.grvProxies.size())
.detail("RecoveryCount", req.recoveryCount)
.detail("Stalled", req.recoveryStalled)
@ -2022,11 +2022,12 @@ void clusterRegisterMaster( ClusterControllerData* self, RegisterMasterRequest c
}
// Construct the client information
if (db->clientInfo->get().masterProxies != req.masterProxies || db->clientInfo->get().grvProxies != req.grvProxies) {
if (db->clientInfo->get().commitProxies != req.commitProxies ||
db->clientInfo->get().grvProxies != req.grvProxies) {
isChanged = true;
ClientDBInfo clientInfo;
clientInfo.id = deterministicRandom()->randomUniqueID();
clientInfo.masterProxies = req.masterProxies;
clientInfo.commitProxies = req.commitProxies;
clientInfo.grvProxies = req.grvProxies;
clientInfo.clientTxnInfoSampleRate = db->clientInfo->get().clientTxnInfoSampleRate;
clientInfo.clientTxnInfoSizeLimit = db->clientInfo->get().clientTxnInfoSizeLimit;

View File

@ -1,5 +1,5 @@
/*
* MasterProxyServer.actor.cpp
* CommitProxyServer.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
@ -25,7 +25,7 @@
#include "fdbclient/Atomic.h"
#include "fdbclient/FDBTypes.h"
#include "fdbclient/Knobs.h"
#include "fdbclient/MasterProxyInterface.h"
#include "fdbclient/CommitProxyInterface.h"
#include "fdbclient/NativeAPI.actor.h"
#include "fdbclient/SystemData.h"
#include "fdbrpc/sim_validation.h"
@ -42,7 +42,6 @@
#include "fdbserver/ProxyCommitData.actor.h"
#include "fdbserver/RatekeeperInterface.h"
#include "fdbserver/RecoveryState.h"
#include "fdbserver/ServerDBInfo.h"
#include "fdbserver/WaitFailure.h"
#include "fdbserver/WorkerInterface.actor.h"
#include "flow/ActorCollection.h"
@ -119,7 +118,7 @@ struct ResolutionRequestBuilder {
void addTransaction(CommitTransactionRequest& trRequest, int transactionNumberInBatch) {
auto& trIn = trRequest.transaction;
// SOMEDAY: There are a couple of unnecessary O( # resolvers ) steps here
outTr.assign(requests.size(), NULL);
outTr.assign(requests.size(), nullptr);
ASSERT( transactionNumberInBatch >= 0 && transactionNumberInBatch < 32768 );
bool isTXNStateTransaction = false;
@ -229,7 +228,7 @@ ACTOR Future<Void> commitBatcher(ProxyCommitData *commitData, PromiseStream<std:
++commitData->stats.txnCommitIn;
if(req.debugID.present()) {
g_traceBatch.addEvent("CommitDebug", req.debugID.get().first(), "MasterProxyServer.batcher");
g_traceBatch.addEvent("CommitDebug", req.debugID.get().first(), "CommitProxyServer.batcher");
}
if(!batch.size()) {
@ -331,7 +330,7 @@ ACTOR Future<Void> addBackupMutations(ProxyCommitData* self, std::map<Key, Mutat
MutationRef backupMutation;
backupMutation.type = MutationRef::SetValue;
uint32_t* partBuffer = NULL;
uint32_t* partBuffer = nullptr;
for (int part = 0; part * CLIENT_KNOBS->MUTATION_BLOCK_SIZE < val.size(); part++) {
@ -512,11 +511,7 @@ void CommitBatchContext::setupTraceBatch() {
}
if (debugID.present()) {
g_traceBatch.addEvent(
"CommitDebug",
debugID.get().first(),
"MasterProxyServer.commitBatch.Before"
);
g_traceBatch.addEvent("CommitDebug", debugID.get().first(), "CommitProxyServer.commitBatch.Before");
}
}
@ -546,10 +541,8 @@ ACTOR Future<Void> preresolutionProcessing(CommitBatchContext* self) {
);
if (debugID.present()) {
g_traceBatch.addEvent(
"CommitDebug", debugID.get().first(),
"MasterProxyServer.commitBatch.GettingCommitVersion"
);
g_traceBatch.addEvent("CommitDebug", debugID.get().first(),
"CommitProxyServer.commitBatch.GettingCommitVersion");
}
GetCommitVersionRequest req(self->span.context, pProxyCommitData->commitVersionRequestNumber++,
@ -577,10 +570,7 @@ ACTOR Future<Void> preresolutionProcessing(CommitBatchContext* self) {
//TraceEvent("ProxyGotVer", pProxyContext->dbgid).detail("Commit", commitVersion).detail("Prev", prevVersion);
if (debugID.present()) {
g_traceBatch.addEvent(
"CommitDebug", debugID.get().first(),
"MasterProxyServer.commitBatch.GotCommitVersion"
);
g_traceBatch.addEvent("CommitDebug", debugID.get().first(), "CommitProxyServer.commitBatch.GotCommitVersion");
}
return Void();
@ -639,10 +629,8 @@ ACTOR Future<Void> getResolution(CommitBatchContext* self) {
self->resolution.swap(*const_cast<std::vector<ResolveTransactionBatchReply>*>(&resolutionResp));
if (self->debugID.present()) {
g_traceBatch.addEvent(
"CommitDebug", self->debugID.get().first(),
"MasterProxyServer.commitBatch.AfterResolution"
);
g_traceBatch.addEvent("CommitDebug", self->debugID.get().first(),
"CommitProxyServer.commitBatch.AfterResolution");
}
return Void();
@ -972,10 +960,8 @@ ACTOR Future<Void> postResolution(CommitBatchContext* self) {
pProxyCommitData->stats.txnCommitResolved += trs.size();
if (debugID.present()) {
g_traceBatch.addEvent(
"CommitDebug", debugID.get().first(),
"MasterProxyServer.commitBatch.ProcessingMutations"
);
g_traceBatch.addEvent("CommitDebug", debugID.get().first(),
"CommitProxyServer.commitBatch.ProcessingMutations");
}
self->isMyFirstBatch = !pProxyCommitData->version;
@ -1041,7 +1027,8 @@ ACTOR Future<Void> postResolution(CommitBatchContext* self) {
self->msg = self->storeCommits.back().first.get();
if (self->debugID.present())
g_traceBatch.addEvent("CommitDebug", self->debugID.get().first(), "MasterProxyServer.commitBatch.AfterStoreCommits");
g_traceBatch.addEvent("CommitDebug", self->debugID.get().first(),
"CommitProxyServer.commitBatch.AfterStoreCommits");
// txnState (transaction subsystem state) tag: message extracted from log adapter
bool firstMessage = true;
@ -1129,7 +1116,7 @@ ACTOR Future<Void> reply(CommitBatchContext* self) {
//TraceEvent("ProxyPushed", pProxyCommitData->dbgid).detail("PrevVersion", prevVersion).detail("Version", commitVersion);
if (debugID.present())
g_traceBatch.addEvent("CommitDebug", debugID.get().first(), "MasterProxyServer.commitBatch.AfterLogPush");
g_traceBatch.addEvent("CommitDebug", debugID.get().first(), "CommitProxyServer.commitBatch.AfterLogPush");
for (auto &p : self->storeCommits) {
ASSERT(!p.second.isReady());
@ -1328,7 +1315,8 @@ ACTOR static Future<Void> doKeyServerLocationRequest( GetKeyServerLocationsReque
return Void();
}
ACTOR static Future<Void> readRequestServer( MasterProxyInterface proxy, PromiseStream<Future<Void>> addActor, ProxyCommitData* commitData ) {
ACTOR static Future<Void> readRequestServer(CommitProxyInterface proxy, PromiseStream<Future<Void>> addActor,
ProxyCommitData* commitData) {
loop {
GetKeyServerLocationsRequest req = waitNext(proxy.getKeyServersLocations.getFuture());
//WARNING: this code is run at a high priority, so it needs to do as little work as possible
@ -1344,7 +1332,7 @@ ACTOR static Future<Void> readRequestServer( MasterProxyInterface proxy, Promise
}
}
ACTOR static Future<Void> rejoinServer( MasterProxyInterface proxy, ProxyCommitData* commitData ) {
ACTOR static Future<Void> rejoinServer(CommitProxyInterface proxy, ProxyCommitData* commitData) {
// We can't respond to these requests until we have valid txnStateStore
wait(commitData->validState.getFuture());
@ -1413,8 +1401,7 @@ ACTOR static Future<Void> rejoinServer( MasterProxyInterface proxy, ProxyCommitD
}
}
ACTOR Future<Void> ddMetricsRequestServer(MasterProxyInterface proxy, Reference<AsyncVar<ServerDBInfo>> db)
{
ACTOR Future<Void> ddMetricsRequestServer(CommitProxyInterface proxy, Reference<AsyncVar<ServerDBInfo>> db) {
loop {
choose {
when(state GetDDMetricsRequest req = waitNext(proxy.getDDMetrics.getFuture()))
@ -1496,7 +1483,7 @@ ACTOR Future<Void> monitorRemoteCommitted(ProxyCommitData* self) {
}
ACTOR Future<Void> proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* commitData) {
TraceEvent("SnapMasterProxy_SnapReqEnter")
TraceEvent("SnapCommitProxy_SnapReqEnter")
.detail("SnapPayload", snapReq.snapPayload)
.detail("SnapUID", snapReq.snapUID);
try {
@ -1504,7 +1491,7 @@ ACTOR Future<Void> proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* co
ExecCmdValueString execArg(snapReq.snapPayload);
StringRef binPath = execArg.getBinaryPath();
if (!isWhitelisted(commitData->whitelistedBinPathVec, binPath)) {
TraceEvent("SnapMasterProxy_WhiteListCheckFailed")
TraceEvent("SnapCommitProxy_WhiteListCheckFailed")
.detail("SnapPayload", snapReq.snapPayload)
.detail("SnapUID", snapReq.snapUID);
throw snap_path_not_whitelisted();
@ -1516,7 +1503,7 @@ ACTOR Future<Void> proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* co
// Currently, snapshot of old tlog generation is not
// supported and hence failing the snapshot request until
// cluster is fully_recovered.
TraceEvent("SnapMasterProxy_ClusterNotFullyRecovered")
TraceEvent("SnapCommitProxy_ClusterNotFullyRecovered")
.detail("SnapPayload", snapReq.snapPayload)
.detail("SnapUID", snapReq.snapUID);
throw snap_not_fully_recovered_unsupported();
@ -1531,7 +1518,7 @@ ACTOR Future<Void> proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* co
// FIXME: logAntiQuorum not supported, remove it later,
// In version2, we probably don't need this limtiation, but this needs to be tested.
if (logAntiQuorum > 0) {
TraceEvent("SnapMasterProxy_LogAnitQuorumNotSupported")
TraceEvent("SnapCommitProxy_LogAnitQuorumNotSupported")
.detail("SnapPayload", snapReq.snapPayload)
.detail("SnapUID", snapReq.snapUID);
throw snap_log_anti_quorum_unsupported();
@ -1547,15 +1534,15 @@ ACTOR Future<Void> proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* co
try {
wait(throwErrorOr(ddSnapReq));
} catch (Error& e) {
TraceEvent("SnapMasterProxy_DDSnapResponseError")
TraceEvent("SnapCommitProxy_DDSnapResponseError")
.detail("SnapPayload", snapReq.snapPayload)
.detail("SnapUID", snapReq.snapUID)
.error(e, true /*includeCancelled*/ );
.error(e, true /*includeCancelled*/);
throw e;
}
snapReq.reply.send(Void());
} catch (Error& e) {
TraceEvent("SnapMasterProxy_SnapReqError")
TraceEvent("SnapCommitProxy_SnapReqError")
.detail("SnapPayload", snapReq.snapPayload)
.detail("SnapUID", snapReq.snapUID)
.error(e, true /*includeCancelled*/);
@ -1565,14 +1552,14 @@ ACTOR Future<Void> proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* co
throw e;
}
}
TraceEvent("SnapMasterProxy_SnapReqExit")
TraceEvent("SnapCommitProxy_SnapReqExit")
.detail("SnapPayload", snapReq.snapPayload)
.detail("SnapUID", snapReq.snapUID);
return Void();
}
ACTOR Future<Void> proxyCheckSafeExclusion(Reference<AsyncVar<ServerDBInfo>> db, ExclusionSafetyCheckRequest req) {
TraceEvent("SafetyCheckMasterProxyBegin");
TraceEvent("SafetyCheckCommitProxyBegin");
state ExclusionSafetyCheckReply reply(false);
if (!db->get().distributor.present()) {
TraceEvent(SevWarnAlways, "DataDistributorNotPresent").detail("Operation", "ExclusionSafetyCheck");
@ -1586,7 +1573,7 @@ ACTOR Future<Void> proxyCheckSafeExclusion(Reference<AsyncVar<ServerDBInfo>> db,
DistributorExclusionSafetyCheckReply _reply = wait(throwErrorOr(safeFuture));
reply.safe = _reply.safe;
} catch (Error& e) {
TraceEvent("SafetyCheckMasterProxyResponseError").error(e);
TraceEvent("SafetyCheckCommitProxyResponseError").error(e);
if (e.code() != error_code_operation_cancelled) {
req.reply.sendError(e);
return Void();
@ -1594,7 +1581,7 @@ ACTOR Future<Void> proxyCheckSafeExclusion(Reference<AsyncVar<ServerDBInfo>> db,
throw e;
}
}
TraceEvent("SafetyCheckMasterProxyFinish");
TraceEvent("SafetyCheckCommitProxyFinish");
req.reply.send(reply);
return Void();
}
@ -1631,15 +1618,10 @@ ACTOR Future<Void> reportTxnTagCommitCost(UID myID, Reference<AsyncVar<ServerDBI
}
}
ACTOR Future<Void> masterProxyServerCore(
MasterProxyInterface proxy,
MasterInterface master,
Reference<AsyncVar<ServerDBInfo>> db,
LogEpoch epoch,
Version recoveryTransactionVersion,
bool firstProxy,
std::string whitelistBinPaths)
{
ACTOR Future<Void> commitProxyServerCore(CommitProxyInterface proxy, MasterInterface master,
Reference<AsyncVar<ServerDBInfo>> db, LogEpoch epoch,
Version recoveryTransactionVersion, bool firstProxy,
std::string whitelistBinPaths) {
state ProxyCommitData commitData(proxy.id(), master, proxy.getConsistentReadVersion, recoveryTransactionVersion, proxy.commit, db, firstProxy);
state Future<Sequence> sequenceFuture = (Sequence)0;
@ -1657,9 +1639,9 @@ ACTOR Future<Void> masterProxyServerCore(
state GetHealthMetricsReply detailedHealthMetricsReply;
addActor.send( waitFailureServer(proxy.waitFailure.getFuture()) );
addActor.send( traceRole(Role::MASTER_PROXY, proxy.id()) );
addActor.send(traceRole(Role::COMMIT_PROXY, proxy.id()));
//TraceEvent("ProxyInit1", proxy.id());
//TraceEvent("CommitProxyInit1", proxy.id());
// Wait until we can load the "real" logsystem, since we don't support switching them currently
while (!(commitData.db->get().master.id() == master.id() && commitData.db->get().recoveryState >= RecoveryState::RECOVERY_TRANSACTION)) {
@ -1701,7 +1683,7 @@ ACTOR Future<Void> masterProxyServerCore(
(int)std::min<double>(SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_BYTES_MAX,
std::max<double>(SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_BYTES_MIN,
SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_BYTES_SCALE_BASE *
pow(commitData.db->get().client.masterProxies.size(),
pow(commitData.db->get().client.commitProxies.size(),
SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_BYTES_SCALE_POWER)));
commitBatcherActor = commitBatcher(&commitData, batchedCommits, proxy.commit.getFuture(), commitBatchByteLimit, commitBatchesMemoryLimit);
@ -1723,7 +1705,7 @@ ACTOR Future<Void> masterProxyServerCore(
//WARNING: this code is run at a high priority, so it needs to do as little work as possible
const vector<CommitTransactionRequest> &trs = batchedRequests.first;
int batchBytes = batchedRequests.second;
//TraceEvent("MasterProxyCTR", proxy.id()).detail("CommitTransactions", trs.size()).detail("TransactionRate", transactionRate).detail("TransactionQueue", transactionQueue.size()).detail("ReleasedTransactionCount", transactionCount);
//TraceEvent("CommitProxyCTR", proxy.id()).detail("CommitTransactions", trs.size()).detail("TransactionRate", transactionRate).detail("TransactionQueue", transactionQueue.size()).detail("ReleasedTransactionCount", transactionCount);
if (trs.size() || (commitData.db->get().recoveryState >= RecoveryState::ACCEPTING_COMMITS && now() - lastCommit >= SERVER_KNOBS->MAX_COMMIT_BATCH_INTERVAL)) {
lastCommit = now();
@ -1824,27 +1806,27 @@ ACTOR Future<Void> masterProxyServerCore(
}
}
ACTOR Future<Void> checkRemoved(Reference<AsyncVar<ServerDBInfo>> db, uint64_t recoveryCount, MasterProxyInterface myInterface) {
ACTOR Future<Void> checkRemoved(Reference<AsyncVar<ServerDBInfo>> db, uint64_t recoveryCount,
CommitProxyInterface myInterface) {
loop{
if (db->get().recoveryCount >= recoveryCount && !std::count(db->get().client.masterProxies.begin(), db->get().client.masterProxies.end(), myInterface)) {
if (db->get().recoveryCount >= recoveryCount &&
!std::count(db->get().client.commitProxies.begin(), db->get().client.commitProxies.end(), myInterface)) {
throw worker_removed();
}
wait(db->onChange());
}
}
ACTOR Future<Void> masterProxyServer(
MasterProxyInterface proxy,
InitializeMasterProxyRequest req,
Reference<AsyncVar<ServerDBInfo>> db,
std::string whitelistBinPaths)
{
ACTOR Future<Void> commitProxyServer(CommitProxyInterface proxy, InitializeCommitProxyRequest req,
Reference<AsyncVar<ServerDBInfo>> db, std::string whitelistBinPaths) {
try {
state Future<Void> core = masterProxyServerCore(proxy, req.master, db, req.recoveryCount, req.recoveryTransactionVersion, req.firstProxy, whitelistBinPaths);
state Future<Void> core =
commitProxyServerCore(proxy, req.master, db, req.recoveryCount, req.recoveryTransactionVersion,
req.firstProxy, whitelistBinPaths);
wait(core || checkRemoved(db, req.recoveryCount, proxy));
}
catch (Error& e) {
TraceEvent("MasterProxyTerminated", proxy.id()).error(e, true);
TraceEvent("CommitProxyTerminated", proxy.id()).error(e, true);
if (e.code() != error_code_worker_removed && e.code() != error_code_tlog_stopped &&
e.code() != error_code_master_tlog_failed && e.code() != error_code_coordinators_changed &&

View File

@ -268,7 +268,7 @@ struct CompactPreOrderTree {
Deque< BuildInfo > queue;
Deque< BuildInfo > deferred;
queue.push_back(BuildInfo(NULL, false, prefix, &input[0], &input[0] + input.size()));
queue.push_back(BuildInfo(nullptr, false, prefix, &input[0], &input[0] + input.size()));
Node* node = &root;
uint8_t* cacheLineEnd = (uint8_t*)node + 64;

View File

@ -96,7 +96,7 @@ ServerCoordinators::ServerCoordinators( Reference<ClusterConnectionFile> cf )
// The coordination server wants to create its key value store only if it is actually used
struct OnDemandStore {
public:
OnDemandStore( std::string folder, UID myID ) : folder(folder), store(NULL), myID(myID) {}
OnDemandStore( std::string folder, UID myID ) : folder(folder), store(nullptr), myID(myID) {}
~OnDemandStore() { if (store) store->close(); }
IKeyValueStore* get() {

View File

@ -48,7 +48,7 @@ protected:
struct Coroutine /*: IThreadlike*/ {
Coroutine() {
coro = Coro_new();
if (coro == NULL)
if (coro == nullptr)
platform::outOfMemory();
}
@ -294,7 +294,7 @@ void CoroThreadPool::init()
{
if (!current_coro) {
current_coro = main_coro = Coro_new();
if (main_coro == NULL)
if (main_coro == nullptr)
platform::outOfMemory();
Coro_initializeMainCoro(main_coro);

View File

@ -5188,7 +5188,7 @@ TEST_CASE("DataDistribution/AddTeamsBestOf/NotUseMachineID") {
Reference<IReplicationPolicy> policy = Reference<IReplicationPolicy>(new PolicyAcross(teamSize, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
state DDTeamCollection* collection = testMachineTeamCollection(teamSize, policy, processSize);
if (collection == NULL) {
if (collection == nullptr) {
fprintf(stderr, "collection is null\n");
return Void();
}

View File

@ -785,7 +785,7 @@ public:
// FIXME: Is setting lastCommittedSeq to -1 instead of 0 necessary?
DiskQueue( std::string basename, std::string fileExtension, UID dbgid, DiskQueueVersion diskQueueVersion, int64_t fileSizeWarningLimit )
: rawQueue( new RawDiskQueue_TwoFiles(basename, fileExtension, dbgid, fileSizeWarningLimit) ), dbgid(dbgid), diskQueueVersion(diskQueueVersion), anyPopped(false), nextPageSeq(0), poppedSeq(0), lastPoppedSeq(0),
nextReadLocation(-1), readBufPage(NULL), readBufPos(0), pushed_page_buffer(NULL), recovered(false), initialized(false), lastCommittedSeq(-1), warnAlwaysForMemory(true)
nextReadLocation(-1), readBufPage(nullptr), readBufPos(0), pushed_page_buffer(nullptr), recovered(false), initialized(false), lastCommittedSeq(-1), warnAlwaysForMemory(true)
{
}

View File

@ -21,7 +21,7 @@
#include "fdbclient/Notified.h"
#include "fdbserver/LogSystem.h"
#include "fdbserver/LogSystemDiskQueueAdapter.h"
#include "fdbclient/MasterProxyInterface.h"
#include "fdbclient/CommitProxyInterface.h"
#include "fdbclient/GrvProxyInterface.h"
#include "fdbserver/WaitFailure.h"
#include "fdbserver/WorkerInterface.actor.h"
@ -443,13 +443,13 @@ ACTOR Future<Void> sendGrvReplies(Future<GetReadVersionReply> replyFuture, std::
TEST(true); // Auto TPS rate is unlimited
}
else {
TEST(true); // Proxy returning tag throttle
TEST(true); // GRV proxy returning tag throttle
reply.tagThrottleInfo[tag.first] = tagItr->second;
}
}
else {
// This isn't required, but we might as well
TEST(true); // Proxy expiring tag throttle
TEST(true); // GRV proxy expiring tag throttle
priorityThrottledTags.erase(tagItr);
}
}

View File

@ -299,13 +299,13 @@ private:
void rollback() { clear(); }
void set(KeyValueRef keyValue, const Arena* arena = NULL) {
void set(KeyValueRef keyValue, const Arena* arena = nullptr) {
queue_op(OpSet, keyValue.key, keyValue.value, arena);
}
void clear(KeyRangeRef range, const Arena* arena = NULL) { queue_op(OpClear, range.begin, range.end, arena); }
void clear(KeyRangeRef range, const Arena* arena = nullptr) { queue_op(OpClear, range.begin, range.end, arena); }
void clear_to_end(StringRef fromKey, const Arena* arena = NULL) {
void clear_to_end(StringRef fromKey, const Arena* arena = nullptr) {
queue_op(OpClearToEnd, fromKey, StringRef(), arena);
}
@ -316,7 +316,7 @@ private:
r.op = op;
r.p1 = p1;
r.p2 = p2;
if (arena == NULL) {
if (arena == nullptr) {
operations.push_back_deep(operations.arena(), r);
} else {
operations.push_back(operations.arena(), r);

View File

@ -46,7 +46,7 @@ void hexdump(FILE *fout, StringRef val);
#include <Windows.h>*/
/*uint64_t getFileSize( const char* filename ) {
HANDLE f = CreateFile( filename, GENERIC_READ, FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, NULL, OPEN_EXISTING, 0, NULL);
HANDLE f = CreateFile( filename, GENERIC_READ, FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE, nullptr, OPEN_EXISTING, 0, nullptr);
if (f == INVALID_HANDLE_VALUE) return 0;
DWORD hi,lo;
lo = GetFileSize(f, &hi);
@ -165,12 +165,12 @@ struct PageChecksumCodec {
.detail("Filename", self->filename)
.detail("PageNumber", pageNumber);
return NULL;
return nullptr;
}
}
if(!self->checksum(pageNumber, data, self->pageSize, write))
return NULL;
return nullptr;
return data;
}
@ -211,7 +211,7 @@ struct SQLiteDB : NonCopyable {
void open(bool writable);
void createFromScratch();
SQLiteDB( std::string filename, bool page_checksums, bool fragment_values): filename(filename), db(NULL), btree(NULL), table(-1), freetable(-1), haveMutex(false), page_checksums(page_checksums), fragment_values(fragment_values) {}
SQLiteDB( std::string filename, bool page_checksums, bool fragment_values): filename(filename), db(nullptr), btree(nullptr), table(-1), freetable(-1), haveMutex(false), page_checksums(page_checksums), fragment_values(fragment_values) {}
~SQLiteDB() {
if (db) {
@ -315,9 +315,9 @@ class Statement : NonCopyable {
public:
Statement( SQLiteDB& db, const char* sql )
: db(db), stmt(NULL)
: db(db), stmt(nullptr)
{
db.checkError("prepare", sqlite3_prepare_v2( db.db, sql, -1, &stmt, NULL));
db.checkError("prepare", sqlite3_prepare_v2( db.db, sql, -1, &stmt, nullptr));
}
~Statement() {
try {
@ -520,7 +520,7 @@ int getEncodedKVFragmentSize( int keySize, int valuePrefixSize ) {
// the full key and index were in the encoded buffer. The value returned will be 0 or
// more value bytes, however many were available.
// Note that a short encoded buffer must at *least* contain the header length varint.
Optional<KeyValueRef> decodeKVFragment( StringRef encoded, uint32_t *index = NULL, bool partial = false) {
Optional<KeyValueRef> decodeKVFragment( StringRef encoded, uint32_t *index = nullptr, bool partial = false) {
uint8_t const* d = encoded.begin();
uint64_t h, len1, len2;
d += sqlite3GetVarint( d, (u64*)&h );
@ -634,7 +634,7 @@ struct IntKeyCursor {
IntKeyCursor( SQLiteDB& db, int table, bool write ) : cursor(0), db(db) {
cursor = (BtCursor*)new char[sqlite3BtreeCursorSize()];
sqlite3BtreeCursorZero(cursor);
db.checkError("BtreeCursor", sqlite3BtreeCursor(db.btree, table, write, NULL, cursor));
db.checkError("BtreeCursor", sqlite3BtreeCursor(db.btree, table, write, nullptr, cursor));
}
~IntKeyCursor() {
if (cursor) {
@ -726,7 +726,7 @@ struct RawCursor {
}
void insertFragment( KeyValueRef kv, uint32_t index, int seekResult ) {
Value v = encodeKVFragment(kv, index);
db.checkError("BtreeInsert", sqlite3BtreeInsert(cursor, v.begin(), v.size(), NULL, 0, 0, 0, seekResult));
db.checkError("BtreeInsert", sqlite3BtreeInsert(cursor, v.begin(), v.size(), nullptr, 0, 0, 0, seekResult));
}
void remove() {
db.checkError("BtreeDelete", sqlite3BtreeDelete(cursor));
@ -823,7 +823,7 @@ struct RawCursor {
int r = moveTo( kv.key );
if (!r) remove();
Value v = encode(kv);
db.checkError("BTreeInsert", sqlite3BtreeInsert(cursor, v.begin(), v.size(), NULL, 0, 0, 0, r));
db.checkError("BTreeInsert", sqlite3BtreeInsert(cursor, v.begin(), v.size(), nullptr, 0, 0, 0, r));
}
}
void clearOne( KeyRangeRef keys ) {
@ -1158,7 +1158,7 @@ struct RawCursor {
// Set field 1 of tuple to key, which is a string type with typecode 12 + 2*len
tupleValues[0].db = keyInfo.db;
tupleValues[0].enc = keyInfo.enc;
tupleValues[0].zMalloc = NULL;
tupleValues[0].zMalloc = nullptr;
ASSERT(sqlite3VdbeSerialGet(key.begin(), 12 + (2 * key.size()), &tupleValues[0]) == key.size());
// In fragmenting mode, seek is to (k, 0, ), otherwise just (k, ).
@ -1168,8 +1168,8 @@ struct RawCursor {
// Set field 2 of tuple to the null type which is typecode 0
tupleValues[1].db = keyInfo.db;
tupleValues[1].enc = keyInfo.enc;
tupleValues[1].zMalloc = NULL;
ASSERT(sqlite3VdbeSerialGet(NULL, 0, &tupleValues[1]) == 0);
tupleValues[1].zMalloc = nullptr;
ASSERT(sqlite3VdbeSerialGet(nullptr, 0, &tupleValues[1]) == 0);
r.nField = 2;
}
@ -1231,7 +1231,7 @@ int SQLiteDB::checkAllPageChecksums() {
// Now that the file itself is open and locked, let sqlite open the database
// Note that VFSAsync will also call g_network->open (including for the WAL), so its flags are important, too
// TODO: If better performance is needed, make AsyncFileReadAheadCache work and be enabled by SQLITE_OPEN_READAHEAD which was added for that purpose.
int result = sqlite3_open_v2(apath.c_str(), &db, SQLITE_OPEN_READONLY, NULL);
int result = sqlite3_open_v2(apath.c_str(), &db, SQLITE_OPEN_READONLY, nullptr);
checkError("open", result);
// This check has the useful side effect of actually opening/reading the database. If we were not doing this,
@ -1350,7 +1350,7 @@ void SQLiteDB::open(bool writable) {
// Now that the file itself is open and locked, let sqlite open the database
// Note that VFSAsync will also call g_network->open (including for the WAL), so its flags are important, too
int result = sqlite3_open_v2(apath.c_str(), &db, (writable ? SQLITE_OPEN_READWRITE : SQLITE_OPEN_READONLY), NULL);
int result = sqlite3_open_v2(apath.c_str(), &db, (writable ? SQLITE_OPEN_READWRITE : SQLITE_OPEN_READONLY), nullptr);
checkError("open", result);
int chunkSize;
@ -1400,7 +1400,7 @@ void SQLiteDB::open(bool writable) {
void SQLiteDB::createFromScratch() {
int sqliteFlags = SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE;
checkError("open", sqlite3_open_v2(filename.c_str(), &db, sqliteFlags, NULL));
checkError("open", sqlite3_open_v2(filename.c_str(), &db, sqliteFlags, nullptr));
Statement(*this, "PRAGMA page_size = 4096").nextRow(); //fast
btree = db->aDb[0].pBt;
@ -1593,7 +1593,7 @@ private:
springCleaningStats(springCleaningStats),
diskBytesUsed(diskBytesUsed),
freeListPages(freeListPages),
cursor(NULL),
cursor(nullptr),
dbgid(dbgid),
readThreads(*pReadThreads),
checkAllChecksumsOnOpen(checkAllChecksumsOnOpen),
@ -1684,7 +1684,7 @@ private:
double t1 = now();
cursor->commit();
delete cursor;
cursor = NULL;
cursor = nullptr;
double t2 = now();
@ -1713,7 +1713,7 @@ private:
//Checkpoints the database and resets the wal file back to the beginning
void fullCheckpoint() {
//A checkpoint cannot succeed while there is an outstanding transaction
ASSERT(cursor == NULL);
ASSERT(cursor == nullptr);
resetReaders();
conn.checkpoint(false);

View File

@ -38,7 +38,7 @@ void ServerKnobs::initialize(bool randomize, ClientKnobs* clientKnobs, bool isSi
init( MAX_VERSIONS_IN_FLIGHT_FORCED, 6e5 * VERSIONS_PER_SECOND ); //one week of versions
init( MAX_READ_TRANSACTION_LIFE_VERSIONS, 5 * VERSIONS_PER_SECOND ); if (randomize && BUGGIFY) MAX_READ_TRANSACTION_LIFE_VERSIONS = VERSIONS_PER_SECOND; else if (randomize && BUGGIFY) MAX_READ_TRANSACTION_LIFE_VERSIONS = std::max<int>(1, 0.1 * VERSIONS_PER_SECOND); else if( randomize && BUGGIFY ) MAX_READ_TRANSACTION_LIFE_VERSIONS = 10 * VERSIONS_PER_SECOND;
init( MAX_WRITE_TRANSACTION_LIFE_VERSIONS, 5 * VERSIONS_PER_SECOND ); if (randomize && BUGGIFY) MAX_WRITE_TRANSACTION_LIFE_VERSIONS=std::max<int>(1, 1 * VERSIONS_PER_SECOND);
init( MAX_COMMIT_BATCH_INTERVAL, 2.0 ); if( randomize && BUGGIFY ) MAX_COMMIT_BATCH_INTERVAL = 0.5; // Each master proxy generates a CommitTransactionBatchRequest at least this often, so that versions always advance smoothly
init( MAX_COMMIT_BATCH_INTERVAL, 2.0 ); if( randomize && BUGGIFY ) MAX_COMMIT_BATCH_INTERVAL = 0.5; // Each commit proxy generates a CommitTransactionBatchRequest at least this often, so that versions always advance smoothly
MAX_COMMIT_BATCH_INTERVAL = std::min(MAX_COMMIT_BATCH_INTERVAL, MAX_READ_TRANSACTION_LIFE_VERSIONS/double(2*VERSIONS_PER_SECOND)); // Ensure that the proxy commits 2 times every MAX_READ_TRANSACTION_LIFE_VERSIONS, otherwise the master will not give out versions fast enough
// TLogs
@ -328,7 +328,7 @@ void ServerKnobs::initialize(bool randomize, ClientKnobs* clientKnobs, bool isSi
init( POLLING_FREQUENCY, 2.0 ); if( longLeaderElection ) POLLING_FREQUENCY = 8.0;
init( HEARTBEAT_FREQUENCY, 0.5 ); if( longLeaderElection ) HEARTBEAT_FREQUENCY = 1.0;
// Master Proxy and GRV Proxy
// Commit CommitProxy and GRV CommitProxy
init( START_TRANSACTION_BATCH_INTERVAL_MIN, 1e-6 );
init( START_TRANSACTION_BATCH_INTERVAL_MAX, 0.010 );
init( START_TRANSACTION_BATCH_INTERVAL_LATENCY_FRACTION, 0.5 );
@ -438,7 +438,7 @@ void ServerKnobs::initialize(bool randomize, ClientKnobs* clientKnobs, bool isSi
init( EXPECTED_MASTER_FITNESS, ProcessClass::UnsetFit );
init( EXPECTED_TLOG_FITNESS, ProcessClass::UnsetFit );
init( EXPECTED_LOG_ROUTER_FITNESS, ProcessClass::UnsetFit );
init( EXPECTED_PROXY_FITNESS, ProcessClass::UnsetFit );
init( EXPECTED_COMMIT_PROXY_FITNESS, ProcessClass::UnsetFit );
init( EXPECTED_GRV_PROXY_FITNESS, ProcessClass::UnsetFit );
init( EXPECTED_RESOLVER_FITNESS, ProcessClass::UnsetFit );
init( RECRUITMENT_TIMEOUT, 600 ); if( randomize && BUGGIFY ) RECRUITMENT_TIMEOUT = deterministicRandom()->coinflip() ? 60.0 : 1.0;
@ -566,7 +566,8 @@ void ServerKnobs::initialize(bool randomize, ClientKnobs* clientKnobs, bool isSi
init( BEHIND_CHECK_COUNT, 2 );
init( BEHIND_CHECK_VERSIONS, 5 * VERSIONS_PER_SECOND );
init( WAIT_METRICS_WRONG_SHARD_CHANCE, isSimulated ? 1.0 : 0.1 );
init( MIN_TAG_PAGES_RATE, 1.0e4 ); if( randomize && BUGGIFY ) MIN_TAG_PAGES_RATE = 0;
init( MIN_TAG_READ_PAGES_RATE, 1.0e4 ); if( randomize && BUGGIFY ) MIN_TAG_READ_PAGES_RATE = 0;
init( MIN_TAG_WRITE_PAGES_RATE, 3200 ); if( randomize && BUGGIFY ) MIN_TAG_WRITE_PAGES_RATE = 0;
init( TAG_MEASUREMENT_INTERVAL, 30.0 ); if( randomize && BUGGIFY ) TAG_MEASUREMENT_INTERVAL = 1.0;
init( READ_COST_BYTE_FACTOR, 16384 ); if( randomize && BUGGIFY ) READ_COST_BYTE_FACTOR = 4096;
init( PREFIX_COMPRESS_KVS_MEM_SNAPSHOTS, true ); if( randomize && BUGGIFY ) PREFIX_COMPRESS_KVS_MEM_SNAPSHOTS = false;

View File

@ -37,10 +37,11 @@ public:
int64_t MAX_VERSIONS_IN_FLIGHT_FORCED;
int64_t MAX_READ_TRANSACTION_LIFE_VERSIONS;
int64_t MAX_WRITE_TRANSACTION_LIFE_VERSIONS;
double MAX_COMMIT_BATCH_INTERVAL; // Each master proxy generates a CommitTransactionBatchRequest at least this often, so that versions always advance smoothly
double MAX_COMMIT_BATCH_INTERVAL; // Each commit proxy generates a CommitTransactionBatchRequest at least this
// often, so that versions always advance smoothly
// TLogs
double TLOG_TIMEOUT; // tlog OR master proxy failure - master's reaction time
double TLOG_TIMEOUT; // tlog OR commit proxy failure - master's reaction time
double RECOVERY_TLOG_SMART_QUORUM_DELAY; // smaller might be better for bug amplification
double TLOG_STORAGE_MIN_UPDATE_INTERVAL;
double BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL;
@ -262,7 +263,7 @@ public:
double POLLING_FREQUENCY;
double HEARTBEAT_FREQUENCY;
// Master Proxy
// Commit CommitProxy
double START_TRANSACTION_BATCH_INTERVAL_MIN;
double START_TRANSACTION_BATCH_INTERVAL_MAX;
double START_TRANSACTION_BATCH_INTERVAL_LATENCY_FRACTION;
@ -368,7 +369,7 @@ public:
int EXPECTED_MASTER_FITNESS;
int EXPECTED_TLOG_FITNESS;
int EXPECTED_LOG_ROUTER_FITNESS;
int EXPECTED_PROXY_FITNESS;
int EXPECTED_COMMIT_PROXY_FITNESS;
int EXPECTED_GRV_PROXY_FITNESS;
int EXPECTED_RESOLVER_FITNESS;
double RECRUITMENT_TIMEOUT;
@ -495,7 +496,8 @@ public:
int BEHIND_CHECK_COUNT;
int64_t BEHIND_CHECK_VERSIONS;
double WAIT_METRICS_WRONG_SHARD_CHANCE;
int64_t MIN_TAG_PAGES_RATE;
int64_t MIN_TAG_READ_PAGES_RATE;
int64_t MIN_TAG_WRITE_PAGES_RATE;
double TAG_MEASUREMENT_INTERVAL;
int64_t READ_COST_BYTE_FACTOR;
bool PREFIX_COMPRESS_KVS_MEM_SNAPSHOTS;
@ -609,7 +611,7 @@ public:
double LATENCY_METRICS_LOGGING_INTERVAL;
ServerKnobs();
void initialize(bool randomize = false, ClientKnobs* clientKnobs = NULL, bool isSimulated = false);
void initialize(bool randomize = false, ClientKnobs* clientKnobs = nullptr, bool isSimulated = false);
};
extern ServerKnobs const* SERVER_KNOBS;

View File

@ -817,7 +817,7 @@ struct LengthPrefixedStringRef {
int expectedSize() const { ASSERT(length); return *length; }
uint32_t* getLengthPtr() const { return length; }
LengthPrefixedStringRef() : length(NULL) {}
LengthPrefixedStringRef() : length(nullptr) {}
LengthPrefixedStringRef(uint32_t* length) : length(length) {}
};

View File

@ -38,7 +38,7 @@ struct MasterInterface {
RequestStream< struct ChangeCoordinatorsRequest > changeCoordinators;
RequestStream< struct GetCommitVersionRequest > getCommitVersion;
RequestStream<struct BackupWorkerDoneRequest> notifyBackupWorkerDone;
// Get the centralized live committed version reported by proxies.
// Get the centralized live committed version reported by commit proxies.
RequestStream< struct GetRawCommittedVersionRequest > getLiveCommittedVersion;
// Report a proxy's committed version.
RequestStream< struct ReportRawCommittedVersionRequest> reportLiveCommittedVersion;

View File

@ -75,7 +75,7 @@ struct AlternativeTLogQueueEntryRef {
Version knownCommittedVersion;
std::vector<TagsAndMessage>* alternativeMessages;
AlternativeTLogQueueEntryRef() : version(0), knownCommittedVersion(0), alternativeMessages(NULL) {}
AlternativeTLogQueueEntryRef() : version(0), knownCommittedVersion(0), alternativeMessages(nullptr) {}
template <class Ar>
void serialize(Ar& ar) {

View File

@ -76,7 +76,7 @@ struct AlternativeTLogQueueEntryRef {
Version knownCommittedVersion;
std::vector<TagsAndMessage>* alternativeMessages;
AlternativeTLogQueueEntryRef() : version(0), knownCommittedVersion(0), alternativeMessages(NULL) {}
AlternativeTLogQueueEntryRef() : version(0), knownCommittedVersion(0), alternativeMessages(nullptr) {}
template <class Ar>
void serialize(Ar& ar) {

View File

@ -198,7 +198,7 @@ struct ProxyCommitData {
Version recoveryTransactionVersion, RequestStream<CommitTransactionRequest> commit,
Reference<AsyncVar<ServerDBInfo>> db, bool firstProxy)
: dbgid(dbgid), stats(dbgid, &version, &committedVersion, &commitBatchesMemBytesCount), master(master),
logAdapter(NULL), txnStateStore(NULL), popRemoteTxs(false), committedVersion(recoveryTransactionVersion),
logAdapter(nullptr), txnStateStore(nullptr), popRemoteTxs(false), committedVersion(recoveryTransactionVersion),
version(0), minKnownCommittedVersion(0), lastVersionTime(0), commitVersionRequestNumber(1),
mostRecentProcessedRequestNumber(0), getConsistentReadVersion(getConsistentReadVersion), commit(commit),
lastCoalesceTime(0), localCommitBatchesStarted(0), locked(false),

View File

@ -527,7 +527,7 @@ struct RatekeeperLimits {
{}
};
struct ProxyInfo {
struct GrvProxyInfo {
int64_t totalTransactions;
int64_t batchTransactions;
uint64_t lastThrottledTagChangeId;
@ -535,7 +535,9 @@ struct ProxyInfo {
double lastUpdateTime;
double lastTagPushTime;
ProxyInfo() : totalTransactions(0), batchTransactions(0), lastUpdateTime(0), lastThrottledTagChangeId(0), lastTagPushTime(0) {}
GrvProxyInfo()
: totalTransactions(0), batchTransactions(0), lastUpdateTime(0), lastThrottledTagChangeId(0), lastTagPushTime(0) {
}
};
struct RatekeeperData {
@ -545,7 +547,7 @@ struct RatekeeperData {
Map<UID, StorageQueueInfo> storageQueueInfo;
Map<UID, TLogQueueInfo> tlogQueueInfo;
std::map<UID, ProxyInfo> proxyInfo;
std::map<UID, GrvProxyInfo> grvProxyInfo;
Smoother smoothReleasedTransactions, smoothBatchReleasedTransactions, smoothTotalDurableBytes;
HealthMetrics healthMetrics;
DatabaseConfiguration configuration;
@ -812,7 +814,7 @@ ACTOR Future<Void> monitorThrottlingChanges(RatekeeperData *self) {
if(tagValue.expirationTime == 0 || tagValue.expirationTime > now() + tagValue.initialDuration) {
TEST(true); // Converting tag throttle duration to absolute time
tagValue.expirationTime = now() + tagValue.initialDuration;
BinaryWriter wr(IncludeVersion(ProtocolVersion::withTagThrottleValue()));
BinaryWriter wr(IncludeVersion(ProtocolVersion::withTagThrottleValueReason()));
wr << tagValue;
state Value value = wr.toValue();
@ -877,7 +879,7 @@ Future<Void> refreshStorageServerCommitCost(RatekeeperData* self) {
maxCost = cost;
}
}
if (maxRate > SERVER_KNOBS->MIN_TAG_PAGES_RATE) {
if (maxRate > SERVER_KNOBS->MIN_TAG_WRITE_PAGES_RATE) {
it->value.busiestWriteTag = busiestTag;
// TraceEvent("RefreshSSCommitCost").detail("TotalWriteCost", it->value.totalWriteCost).detail("TotalWriteOps",it->value.totalWriteOps);
ASSERT(it->value.totalWriteCosts > 0);
@ -906,6 +908,8 @@ Future<Void> refreshStorageServerCommitCost(RatekeeperData* self) {
void tryAutoThrottleTag(RatekeeperData* self, TransactionTag tag, double rate, double busyness,
TagThrottledReason reason) {
// NOTE: before the comparison with MIN_TAG_COST, the busiest tag rate also compares with MIN_TAG_PAGES_RATE
// currently MIN_TAG_PAGES_RATE > MIN_TAG_COST in our default knobs.
if (busyness > SERVER_KNOBS->AUTO_THROTTLE_TARGET_TAG_BUSYNESS && rate > SERVER_KNOBS->MIN_TAG_COST) {
TEST(true); // Transaction tag auto-throttled
Optional<double> clientRate = self->throttledTags.autoThrottleTag(self->id, tag, busyness);
@ -922,18 +926,17 @@ void tryAutoThrottleTag(RatekeeperData* self, TransactionTag tag, double rate, d
void tryAutoThrottleTag(RatekeeperData* self, StorageQueueInfo& ss, int64_t storageQueue,
int64_t storageDurabilityLag) {
// TODO: reasonable criteria for write satuation should be investigated in experiment
// if (ss.busiestWriteTag.present() && storageQueue > SERVER_KNOBS->AUTO_TAG_THROTTLE_STORAGE_QUEUE_BYTES &&
// storageDurabilityLag > SERVER_KNOBS->AUTO_TAG_THROTTLE_DURABILITY_LAG_VERSIONS) {
// // write-saturated
// tryAutoThrottleTag(self, ss.busiestWriteTag.get(), ss.busiestWriteTagRate,
//ss.busiestWriteTagFractionalBusyness); } else
if (ss.busiestReadTag.present() &&
(storageQueue > SERVER_KNOBS->AUTO_TAG_THROTTLE_STORAGE_QUEUE_BYTES ||
storageDurabilityLag > SERVER_KNOBS->AUTO_TAG_THROTTLE_DURABILITY_LAG_VERSIONS)) {
// read saturated
tryAutoThrottleTag(self, ss.busiestReadTag.get(), ss.busiestReadTagRate, ss.busiestReadTagFractionalBusyness,
TagThrottledReason::BUSY_READ);
// NOTE: we just keep it simple and don't differentiate write-saturation and read-saturation at the moment. In most of situation, this works.
// More indicators besides queue size and durability lag could be investigated in the future
if (storageQueue > SERVER_KNOBS->AUTO_TAG_THROTTLE_STORAGE_QUEUE_BYTES || storageDurabilityLag > SERVER_KNOBS->AUTO_TAG_THROTTLE_DURABILITY_LAG_VERSIONS) {
if(ss.busiestWriteTag.present()) {
tryAutoThrottleTag(self, ss.busiestWriteTag.get(), ss.busiestWriteTagRate,
ss.busiestWriteTagFractionalBusyness, TagThrottledReason::BUSY_WRITE);
}
if(ss.busiestReadTag.present()) {
tryAutoThrottleTag(self, ss.busiestReadTag.get(), ss.busiestReadTagRate,
ss.busiestReadTagFractionalBusyness, TagThrottledReason::BUSY_READ);
}
}
}
@ -1262,12 +1265,12 @@ void updateRate(RatekeeperData* self, RatekeeperLimits* limits) {
TraceEvent(name.c_str(), self->id)
.detail("TPSLimit", limits->tpsLimit)
.detail("Reason", limitReason)
.detail("ReasonServerID", reasonID==UID() ? std::string() : Traceable<UID>::toString(reasonID))
.detail("ReasonServerID", reasonID == UID() ? std::string() : Traceable<UID>::toString(reasonID))
.detail("ReleasedTPS", self->smoothReleasedTransactions.smoothRate())
.detail("ReleasedBatchTPS", self->smoothBatchReleasedTransactions.smoothRate())
.detail("TPSBasis", actualTps)
.detail("StorageServers", sscount)
.detail("GrvProxies", self->proxyInfo.size())
.detail("GrvProxies", self->grvProxyInfo.size())
.detail("TLogs", tlcount)
.detail("WorstFreeSpaceStorageServer", worstFreeSpaceStorageServer)
.detail("WorstFreeSpaceTLog", worstFreeSpaceTLog)
@ -1369,9 +1372,9 @@ ACTOR Future<Void> ratekeeper(RatekeeperInterface rkInterf, Reference<AsyncVar<S
lastLimited = self.smoothReleasedTransactions.smoothRate() > SERVER_KNOBS->LAST_LIMITED_RATIO * self.batchLimits.tpsLimit;
double tooOld = now() - 1.0;
for(auto p=self.proxyInfo.begin(); p!=self.proxyInfo.end(); ) {
for (auto p = self.grvProxyInfo.begin(); p != self.grvProxyInfo.end();) {
if (p->second.lastUpdateTime < tooOld)
p = self.proxyInfo.erase(p);
p = self.grvProxyInfo.erase(p);
else
++p;
}
@ -1380,7 +1383,7 @@ ACTOR Future<Void> ratekeeper(RatekeeperInterface rkInterf, Reference<AsyncVar<S
when (GetRateInfoRequest req = waitNext(rkInterf.getRateInfo.getFuture())) {
GetRateInfoReply reply;
auto& p = self.proxyInfo[ req.requesterID ];
auto& p = self.grvProxyInfo[req.requesterID];
//TraceEvent("RKMPU", req.requesterID).detail("TRT", req.totalReleasedTransactions).detail("Last", p.totalTransactions).detail("Delta", req.totalReleasedTransactions - p.totalTransactions);
if (p.totalTransactions > 0) {
self.smoothReleasedTransactions.addDelta( req.totalReleasedTransactions - p.totalTransactions );
@ -1397,8 +1400,8 @@ ACTOR Future<Void> ratekeeper(RatekeeperInterface rkInterf, Reference<AsyncVar<S
p.batchTransactions = req.batchReleasedTransactions;
p.lastUpdateTime = now();
reply.transactionRate = self.normalLimits.tpsLimit / self.proxyInfo.size();
reply.batchTransactionRate = self.batchLimits.tpsLimit / self.proxyInfo.size();
reply.transactionRate = self.normalLimits.tpsLimit / self.grvProxyInfo.size();
reply.batchTransactionRate = self.batchLimits.tpsLimit / self.grvProxyInfo.size();
reply.leaseDuration = SERVER_KNOBS->METRIC_UPDATE_RATE;
if(p.lastThrottledTagChangeId != self.throttledTagChangeId || now() > p.lastTagPushTime + SERVER_KNOBS->TAG_THROTTLE_PUSH_INTERVAL) {

View File

@ -44,7 +44,7 @@ struct ProxyRequestsInfo {
namespace{
struct Resolver : ReferenceCounted<Resolver> {
UID dbgid;
int proxyCount, resolverCount;
int commitProxyCount, resolverCount;
NotifiedVersion version;
AsyncVar<Version> neededVersion;
@ -77,8 +77,8 @@ struct Resolver : ReferenceCounted<Resolver> {
Future<Void> logger;
Resolver( UID dbgid, int proxyCount, int resolverCount )
: dbgid(dbgid), proxyCount(proxyCount), resolverCount(resolverCount), version(-1), conflictSet( newConflictSet() ), iopsSample( SERVER_KNOBS->KEY_BYTES_PER_SAMPLE ), debugMinRecentStateVersion(0),
Resolver( UID dbgid, int commitProxyCount, int resolverCount )
: dbgid(dbgid), commitProxyCount(commitProxyCount), resolverCount(resolverCount), version(-1), conflictSet( newConflictSet() ), iopsSample( SERVER_KNOBS->KEY_BYTES_PER_SAMPLE ), debugMinRecentStateVersion(0),
cc("Resolver", dbgid.toString()),
resolveBatchIn("ResolveBatchIn", cc), resolveBatchStart("ResolveBatchStart", cc), resolvedTransactions("ResolvedTransactions", cc), resolvedBytes("ResolvedBytes", cc),
resolvedReadConflictRanges("ResolvedReadConflictRanges", cc), resolvedWriteConflictRanges("ResolvedWriteConflictRanges", cc), transactionsAccepted("TransactionsAccepted", cc),
@ -238,12 +238,12 @@ ACTOR Future<Void> resolveBatch(
//TraceEvent("ResolveBatch", self->dbgid).detail("PrevVersion", req.prevVersion).detail("Version", req.version).detail("StateTransactionVersions", self->recentStateTransactionSizes.size()).detail("StateBytes", stateBytes).detail("FirstVersion", self->recentStateTransactionSizes.empty() ? -1 : self->recentStateTransactionSizes.front().first).detail("StateMutationsIn", req.txnStateTransactions.size()).detail("StateMutationsOut", reply.stateMutations.size()).detail("From", proxyAddress);
ASSERT(!proxyInfo.outstandingBatches.empty());
ASSERT(self->proxyInfoMap.size() <= self->proxyCount+1);
ASSERT(self->proxyInfoMap.size() <= self->commitProxyCount+1);
// SOMEDAY: This is O(n) in number of proxies. O(log n) solution using appropriate data structure?
Version oldestProxyVersion = req.version;
for(auto itr = self->proxyInfoMap.begin(); itr != self->proxyInfoMap.end(); ++itr) {
//TraceEvent("ResolveBatchProxyVersion", self->dbgid).detail("Proxy", itr->first).detail("Version", itr->second.lastVersion);
//TraceEvent("ResolveBatchProxyVersion", self->dbgid).detail("CommitProxy", itr->first).detail("Version", itr->second.lastVersion);
if(itr->first.isValid()) { // Don't consider the first master request
oldestProxyVersion = std::min(itr->second.lastVersion, oldestProxyVersion);
}
@ -257,7 +257,7 @@ ACTOR Future<Void> resolveBatch(
TEST(oldestProxyVersion != req.version); // The proxy that sent this request does not have the oldest current version
bool anyPopped = false;
if(firstUnseenVersion <= oldestProxyVersion && self->proxyInfoMap.size() == self->proxyCount+1) {
if(firstUnseenVersion <= oldestProxyVersion && self->proxyInfoMap.size() == self->commitProxyCount+1) {
TEST(true); // Deleting old state transactions
self->recentStateTransactions.erase( self->recentStateTransactions.begin(), self->recentStateTransactions.upper_bound( oldestProxyVersion ) );
self->debugMinRecentStateVersion = oldestProxyVersion + 1;
@ -311,7 +311,7 @@ ACTOR Future<Void> resolverCore(
ResolverInterface resolver,
InitializeResolverRequest initReq)
{
state Reference<Resolver> self( new Resolver(resolver.id(), initReq.proxyCount, initReq.resolverCount) );
state Reference<Resolver> self(new Resolver(resolver.id(), initReq.commitProxyCount, initReq.resolverCount));
state ActorCollection actors(false);
state Future<Void> doPollMetrics = self->resolverCount > 1 ? Void() : Future<Void>(Never());
actors.add( waitFailureServer(resolver.waitFailure.getFuture()) );

View File

@ -89,9 +89,10 @@ ACTOR Future<Void> restoreApplierCore(RestoreApplierInterface applierInterf, int
break;
}
}
TraceEvent("RestoreApplierCore", self->id()).detail("Request", requestTypeStr); // For debug only
//TraceEvent("RestoreApplierCore", self->id()).detail("Request", requestTypeStr); // For debug only
} catch (Error& e) {
TraceEvent(SevWarn, "FastRestoreApplierError", self->id())
bool isError = e.code() != error_code_operation_cancelled;
TraceEvent(isError ? SevError : SevWarnAlways, "FastRestoreApplierError", self->id())
.detail("RequestType", requestTypeStr)
.error(e, true);
actors.clear(false);
@ -477,7 +478,7 @@ ACTOR static Future<Void> applyStagingKeysBatch(std::map<Key, StagingKey>::itera
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
state int sets = 0;
state int clears = 0;
state Key endKey = begin->second.key;
state Key endKey = begin->first;
TraceEvent(SevFRDebugInfo, "FastRestoreApplierPhaseApplyStagingKeysBatch", applierID).detail("Begin", begin->first);
loop {
try {
@ -507,7 +508,7 @@ ACTOR static Future<Void> applyStagingKeysBatch(std::map<Key, StagingKey>::itera
} else {
ASSERT(false);
}
endKey = iter != end ? iter->second.key : endKey;
endKey = iter != end ? iter->first : endKey;
iter++;
if (sets > 10000000 || clears > 10000000) {
TraceEvent(SevError, "FastRestoreApplierPhaseApplyStagingKeysBatchInfiniteLoop", applierID)
@ -521,6 +522,7 @@ ACTOR static Future<Void> applyStagingKeysBatch(std::map<Key, StagingKey>::itera
.detail("End", endKey)
.detail("Sets", sets)
.detail("Clears", clears);
tr->addWriteConflictRange(KeyRangeRef(begin->first, keyAfter(endKey))); // Reduce resolver load
wait(tr->commit());
cc->appliedTxns += 1;
break;

View File

@ -55,7 +55,7 @@ struct StagingKey {
LogMessageVersion version; // largest version of set or clear for the key
std::map<LogMessageVersion, Standalone<MutationRef>> pendingMutations; // mutations not set or clear type
explicit StagingKey() : version(0), type(MutationRef::MAX_ATOMIC_OP) {}
explicit StagingKey(Key key) : key(key), version(0), type(MutationRef::MAX_ATOMIC_OP) {}
// Add mutation m at newVersion to stagingKey
// Assume: SetVersionstampedKey and SetVersionstampedValue have been converted to set
@ -148,7 +148,7 @@ struct StagingKey {
}
for (; lb != pendingMutations.end(); lb++) {
MutationRef mutation = lb->second;
if (type == MutationRef::CompareAndClear) { // Special atomicOp
if (mutation.type == MutationRef::CompareAndClear) { // Special atomicOp
Arena arena;
Optional<StringRef> inputVal;
if (hasBaseValue()) {
@ -167,14 +167,14 @@ struct StagingKey {
val = applyAtomicOp(inputVal, mutation.param2, (MutationRef::Type)mutation.type);
type = MutationRef::SetValue; // Precomputed result should be set to DB.
} else if (mutation.type == MutationRef::SetValue || mutation.type == MutationRef::ClearRange) {
type = MutationRef::SetValue; // Precomputed result should be set to DB.
type = MutationRef::SetValue;
TraceEvent(SevError, "FastRestoreApplierPrecomputeResultUnexpectedSet", applierID)
.detail("BatchIndex", batchIndex)
.detail("Context", context)
.detail("MutationType", getTypeString(mutation.type))
.detail("Version", lb->first.toString());
} else {
TraceEvent(SevWarnAlways, "FastRestoreApplierPrecomputeResultSkipUnexpectedBackupMutation", applierID)
TraceEvent(SevError, "FastRestoreApplierPrecomputeResultSkipUnexpectedBackupMutation", applierID)
.detail("BatchIndex", batchIndex)
.detail("Context", context)
.detail("MutationType", getTypeString(mutation.type))
@ -291,7 +291,7 @@ struct ApplierBatchData : public ReferenceCounted<ApplierBatchData> {
void addMutation(MutationRef m, LogMessageVersion ver) {
if (!isRangeMutation(m)) {
auto item = stagingKeys.emplace(m.param1, StagingKey());
auto item = stagingKeys.emplace(m.param1, StagingKey(m.param1));
item.first->second.add(m, ver);
} else {
stagingKeyRanges.insert(StagingKeyRange(m, ver));

View File

@ -312,6 +312,8 @@ ACTOR Future<Standalone<VectorRef<KeyValueRef>>> decodeLogFileBlock(Reference<IA
int rLen = wait(file->read(mutateString(buf), len, offset));
if (rLen != len) throw restore_bad_read();
simulateBlobFailure();
Standalone<VectorRef<KeyValueRef>> results({}, buf.arena());
state StringRefReader reader(buf, restore_corrupted_data());

View File

@ -307,6 +307,12 @@ Future<Void> getBatchReplies(RequestStream<Request> Interface::*channel, std::ma
if (ongoingReplies[j].isReady()) {
std::get<2>(replyDurations[ongoingRepliesIndex[j]]) = now();
--oustandingReplies;
} else if (ongoingReplies[j].isError()) {
// When this happens,
// the above assertion ASSERT(ongoingReplies.size() == oustandingReplies) will fail
TraceEvent(SevError, "FastRestoreGetBatchRepliesReplyError")
.detail("OngoingReplyIndex", j)
.detail("FutureError", ongoingReplies[j].getError().what());
}
}
}

View File

@ -58,6 +58,9 @@ ACTOR Future<Void> sendMutationsToApplier(
ACTOR static Future<Void> _parseLogFileToMutationsOnLoader(NotifiedVersion* pProcessedFileOffset,
SerializedMutationListMap* mutationMap,
Reference<IBackupContainer> bc, RestoreAsset asset);
ACTOR static Future<Void> parseLogFileToMutationsOnLoader(NotifiedVersion* pProcessedFileOffset,
SerializedMutationListMap* mutationMap,
Reference<IBackupContainer> bc, RestoreAsset asset);
ACTOR static Future<Void> _parseRangeFileToMutationsOnLoader(
std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsIter,
std::map<LoadingParam, SampledMutationsVec>::iterator samplesIter, LoaderCounters* cc,
@ -280,8 +283,8 @@ ACTOR Future<Void> restoreLoaderCore(RestoreLoaderInterface loaderInterf, int no
when(wait(error)) { TraceEvent("FastRestoreLoaderActorCollectionError", self->id()); }
}
} catch (Error& e) {
TraceEvent(e.code() == error_code_broken_promise ? SevError : SevWarnAlways, "FastRestoreLoaderError",
self->id())
bool isError = e.code() != error_code_operation_cancelled; // == error_code_broken_promise
TraceEvent(isError ? SevError : SevWarnAlways, "FastRestoreLoaderError", self->id())
.detail("RequestType", requestTypeStr)
.error(e, true);
actors.clear(false);
@ -354,6 +357,8 @@ ACTOR static Future<Void> _parsePartitionedLogFileOnLoader(
int rLen = wait(file->read(mutateString(buf), asset.len, asset.offset));
if (rLen != asset.len) throw restore_bad_read();
simulateBlobFailure();
TraceEvent("FastRestoreLoaderDecodingLogFile")
.detail("BatchIndex", asset.batchIndex)
.detail("Filename", asset.filename)
@ -460,6 +465,39 @@ ACTOR static Future<Void> _parsePartitionedLogFileOnLoader(
return Void();
}
// wrapper of _parsePartitionedLogFileOnLoader to retry on blob error
ACTOR static Future<Void> parsePartitionedLogFileOnLoader(
KeyRangeMap<Version>* pRangeVersions, NotifiedVersion* processedFileOffset,
std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsIter,
std::map<LoadingParam, SampledMutationsVec>::iterator samplesIter, LoaderCounters* cc,
Reference<IBackupContainer> bc, RestoreAsset asset) {
state int readFileRetries = 0;
loop {
try {
wait(_parsePartitionedLogFileOnLoader(pRangeVersions, processedFileOffset, kvOpsIter, samplesIter, cc, bc,
asset));
break;
} catch (Error& e) {
if (e.code() == error_code_restore_bad_read || e.code() == error_code_restore_unsupported_file_version ||
e.code() == error_code_restore_corrupted_data_padding) { // no retriable error
TraceEvent(SevError, "FastRestoreFileRestoreCorruptedPartitionedLogFileBlock").error(e);
throw;
} else if (e.code() == error_code_http_request_failed || e.code() == error_code_connection_failed ||
e.code() == error_code_timed_out || e.code() == error_code_lookup_failed) {
// blob http request failure, retry
TraceEvent(SevWarnAlways, "FastRestoreDecodedPartitionedLogFileConnectionFailure")
.detail("Retries", ++readFileRetries)
.error(e);
wait(delayJittered(0.1));
} else {
TraceEvent(SevError, "FastRestoreParsePartitionedLogFileOnLoaderUnexpectedError").error(e);
throw;
}
}
}
return Void();
}
ACTOR Future<Void> _processLoadingParam(KeyRangeMap<Version>* pRangeVersions, LoadingParam param,
Reference<LoaderBatchData> batchData, UID loaderID,
Reference<IBackupContainer> bc) {
@ -496,12 +534,12 @@ ACTOR Future<Void> _processLoadingParam(KeyRangeMap<Version>* pRangeVersions, Lo
} else {
// TODO: Sanity check the log file's range is overlapped with the restored version range
if (param.isPartitionedLog()) {
fileParserFutures.push_back(_parsePartitionedLogFileOnLoader(pRangeVersions, &processedFileOffset,
fileParserFutures.push_back(parsePartitionedLogFileOnLoader(pRangeVersions, &processedFileOffset,
kvOpsPerLPIter, samplesIter,
&batchData->counters, bc, subAsset));
} else {
fileParserFutures.push_back(
_parseLogFileToMutationsOnLoader(&processedFileOffset, &mutationMap, bc, subAsset));
parseLogFileToMutationsOnLoader(&processedFileOffset, &mutationMap, bc, subAsset));
}
}
}
@ -586,9 +624,10 @@ ACTOR Future<Void> handleLoadFileRequest(RestoreLoadFileRequest req, Reference<R
state int samplesMessages = fSendSamples.size();
wait(waitForAll(fSendSamples));
} catch (Error& e) { // In case ci.samples throws broken_promise due to unstable network
if (e.code() == error_code_broken_promise) {
if (e.code() == error_code_broken_promise || e.code() == error_code_operation_cancelled) {
TraceEvent(SevWarnAlways, "FastRestoreLoaderPhaseLoadFileSendSamples")
.detail("SamplesMessages", samplesMessages);
.detail("SamplesMessages", samplesMessages)
.error(e, true);
} else {
TraceEvent(SevError, "FastRestoreLoaderPhaseLoadFileSendSamplesUnexpectedError").error(e, true);
}
@ -1107,10 +1146,14 @@ ACTOR static Future<Void> _parseRangeFileToMutationsOnLoader(
// Sanity check the range file is within the restored version range
ASSERT_WE_THINK(asset.isInVersionRange(version));
// The set of key value version is rangeFile.version. the key-value set in the same range file has the same version
Reference<IAsyncFile> inFile = wait(bc->readFile(asset.filename));
state Standalone<VectorRef<KeyValueRef>> blockData;
// should retry here
state int readFileRetries = 0;
loop {
try {
// The set of key value version is rangeFile.version. the key-value set in the same range file has the same
// version
Reference<IAsyncFile> inFile = wait(bc->readFile(asset.filename));
Standalone<VectorRef<KeyValueRef>> kvs =
wait(fileBackup::decodeRangeFileBlock(inFile, asset.offset, asset.len));
TraceEvent("FastRestoreLoaderDecodedRangeFile")
@ -1118,9 +1161,24 @@ ACTOR static Future<Void> _parseRangeFileToMutationsOnLoader(
.detail("Filename", asset.filename)
.detail("DataSize", kvs.contents().size());
blockData = kvs;
break;
} catch (Error& e) {
TraceEvent(SevError, "FileRestoreCorruptRangeFileBlock").error(e);
if (e.code() == error_code_restore_bad_read || e.code() == error_code_restore_unsupported_file_version ||
e.code() == error_code_restore_corrupted_data_padding) { // no retriable error
TraceEvent(SevError, "FastRestoreFileRestoreCorruptedRangeFileBlock").error(e);
throw;
} else if (e.code() == error_code_http_request_failed || e.code() == error_code_connection_failed ||
e.code() == error_code_timed_out || e.code() == error_code_lookup_failed) {
// blob http request failure, retry
TraceEvent(SevWarnAlways, "FastRestoreDecodedRangeFileConnectionFailure")
.detail("Retries", ++readFileRetries)
.error(e);
wait(delayJittered(0.1));
} else {
TraceEvent(SevError, "FastRestoreParseRangeFileOnLoaderUnexpectedError").error(e);
throw;
}
}
}
// First and last key are the range for this file
@ -1218,6 +1276,36 @@ ACTOR static Future<Void> _parseLogFileToMutationsOnLoader(NotifiedVersion* pPro
return Void();
}
// retry on _parseLogFileToMutationsOnLoader
ACTOR static Future<Void> parseLogFileToMutationsOnLoader(NotifiedVersion* pProcessedFileOffset,
SerializedMutationListMap* pMutationMap,
Reference<IBackupContainer> bc, RestoreAsset asset) {
state int readFileRetries = 0;
loop {
try {
wait(_parseLogFileToMutationsOnLoader(pProcessedFileOffset, pMutationMap, bc, asset));
break;
} catch (Error& e) {
if (e.code() == error_code_restore_bad_read || e.code() == error_code_restore_unsupported_file_version ||
e.code() == error_code_restore_corrupted_data_padding) { // non retriable error
TraceEvent(SevError, "FastRestoreFileRestoreCorruptedLogFileBlock").error(e);
throw;
} else if (e.code() == error_code_http_request_failed || e.code() == error_code_connection_failed ||
e.code() == error_code_timed_out || e.code() == error_code_lookup_failed) {
// blob http request failure, retry
TraceEvent(SevWarnAlways, "FastRestoreDecodedLogFileConnectionFailure")
.detail("Retries", ++readFileRetries)
.error(e);
wait(delayJittered(0.1));
} else {
TraceEvent(SevError, "FastRestoreParseLogFileToMutationsOnLoaderUnexpectedError").error(e);
throw;
}
}
}
return Void();
}
// Return applier IDs that are used to apply key-values
std::vector<UID> getApplierIDs(std::map<Key, UID>& rangeToApplier) {
std::vector<UID> applierIDs;

View File

@ -172,7 +172,7 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<ClusterConnec
.detail("PackageName", FDB_VT_PACKAGE_NAME)
.detail("DataFolder", *dataFolder)
.detail("ConnectionString", connFile ? connFile->getConnectionString().toString() : "")
.detailf("ActualTime", "%lld", DEBUG_DETERMINISM ? 0 : time(NULL))
.detailf("ActualTime", "%lld", DEBUG_DETERMINISM ? 0 : time(nullptr))
.detail("CommandLine", "fdbserver -r simulation")
.detail("BuggifyEnabled", isBuggifyEnabled(BuggifyType::General))
.detail("Simulated", true)
@ -559,7 +559,7 @@ ACTOR Future<Void> restartSimulatedSystem(vector<Future<Void>>* systemActors, st
int processesPerMachine = atoi(ini.GetValue("META", "processesPerMachine"));
int listenersPerProcess = 1;
auto listenersPerProcessStr = ini.GetValue("META", "listenersPerProcess");
if(listenersPerProcessStr != NULL) {
if(listenersPerProcessStr != nullptr) {
listenersPerProcess = atoi(listenersPerProcessStr);
}
int desiredCoordinators = atoi(ini.GetValue("META", "desiredCoordinators"));
@ -586,7 +586,7 @@ ACTOR Future<Void> restartSimulatedSystem(vector<Future<Void>>* systemActors, st
}
auto zoneIDini = ini.GetValue(machineIdString.c_str(), "zoneId");
if( zoneIDini == NULL ) {
if( zoneIDini == nullptr ) {
zoneId = machineId;
} else {
zoneId = StringRef(zoneIDini);
@ -610,11 +610,11 @@ ACTOR Future<Void> restartSimulatedSystem(vector<Future<Void>>* systemActors, st
if (parsedIp.present()) {
return parsedIp.get();
} else {
return IPAddress(strtoul(ipStr, NULL, 10));
return IPAddress(strtoul(ipStr, nullptr, 10));
}
};
if( ip == NULL ) {
if( ip == nullptr ) {
for (int i = 0; i < processes; i++) {
const char* val =
ini.GetValue(machineIdString.c_str(), format("ipAddr%d", i * listenersPerProcess).c_str());
@ -735,7 +735,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
bool generateFearless = simple ? false : (minimumRegions > 1 || deterministicRandom()->random01() < 0.5);
datacenters = simple ? 1 : ( generateFearless ? ( minimumReplication > 0 || deterministicRandom()->random01() < 0.5 ? 4 : 6 ) : deterministicRandom()->randomInt( 1, 4 ) );
if (deterministicRandom()->random01() < 0.25) db.desiredTLogCount = deterministicRandom()->randomInt(1,7);
if (deterministicRandom()->random01() < 0.25) db.proxyCount = deterministicRandom()->randomInt(1, 7);
if (deterministicRandom()->random01() < 0.25) db.commitProxyCount = deterministicRandom()->randomInt(1, 7);
if (deterministicRandom()->random01() < 0.25) db.grvProxyCount = deterministicRandom()->randomInt(1, 4);
if (deterministicRandom()->random01() < 0.25) db.resolverCount = deterministicRandom()->randomInt(1,7);
int storage_engine_type = deterministicRandom()->randomInt(0, 4);
@ -772,7 +772,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
// set_config("memory-radixtree-beta");
if(simple) {
db.desiredTLogCount = 1;
db.proxyCount = 1;
db.commitProxyCount = 1;
db.grvProxyCount = 1;
db.resolverCount = 1;
}

View File

@ -318,14 +318,14 @@ public:
Node* alreadyChecked;
StringRef value;
Finger() : level(MaxLevels), x(NULL), alreadyChecked(NULL) {}
Finger() : level(MaxLevels), x(nullptr), alreadyChecked(nullptr) {}
Finger(Node* header, const StringRef& ptr) : value(ptr), level(MaxLevels), alreadyChecked(NULL), x(header) {}
Finger(Node* header, const StringRef& ptr) : value(ptr), level(MaxLevels), alreadyChecked(nullptr), x(header) {}
void init(const StringRef& value, Node* header) {
this->value = value;
x = header;
alreadyChecked = NULL;
alreadyChecked = nullptr;
level = MaxLevels;
}
@ -366,7 +366,7 @@ public:
if (n && n->length() == value.size() && !memcmp(n->value(), value.begin(), value.size()))
return n;
else
return NULL;
return nullptr;
}
StringRef getValue() const {
@ -388,16 +388,16 @@ public:
explicit SkipList(Version version = 0) {
header = Node::create(StringRef(), MaxLevels - 1);
for (int l = 0; l < MaxLevels; l++) {
header->setNext(l, NULL);
header->setNext(l, nullptr);
header->setMaxVersion(l, version);
}
}
~SkipList() { destroy(); }
SkipList(SkipList&& other) noexcept : header(other.header) { other.header = NULL; }
SkipList(SkipList&& other) noexcept : header(other.header) { other.header = nullptr; }
void operator=(SkipList&& other) noexcept {
destroy();
header = other.header;
other.header = NULL;
other.header = nullptr;
}
void swap(SkipList& other) { std::swap(header, other.header); }
@ -406,7 +406,7 @@ public:
const Finger& startF = fingers[r * 2];
const Finger& endF = fingers[r * 2 + 1];
if (endF.found() == NULL) insert(endF, endF.finger[0]->getMaxVersion(0));
if (endF.found() == nullptr) insert(endF, endF.finger[0]->getMaxVersion(0));
remove(startF, endF);
insert(startF, version);
@ -470,7 +470,7 @@ public:
for (int i = ends.size() - 1; i >= 0; i--) {
ends[i].finger[l]->setNext(l, input[i + 1].header->getNext(l));
if (l && (!i || ends[i].finger[l] != input[i].header)) ends[i].finger[l]->calcVersionForLevel(l);
input[i + 1].header->setNext(l, NULL);
input[i + 1].header->setNext(l, nullptr);
}
}
swap(input[0]);
@ -499,7 +499,7 @@ public:
for (int i = 1; i < count; i++) {
results[i].level = startLevel;
results[i].x = x;
results[i].alreadyChecked = NULL;
results[i].alreadyChecked = nullptr;
results[i].value = values[i];
for (int j = startLevel; j < MaxLevels; j++) results[i].finger[j] = results[0].finger[j];
}
@ -697,7 +697,7 @@ private:
right.header->setMaxVersion(0, f.finger[0]->getMaxVersion(0));
for (int l = 0; l < MaxLevels; l++) {
right.header->setNext(l, f.finger[l]->getNext(l));
f.finger[l]->setNext(l, NULL);
f.finger[l]->setNext(l, nullptr);
}
}
@ -705,7 +705,7 @@ private:
Node* node = header;
for (int l = MaxLevels - 1; l >= 0; l--) {
Node* next;
while ((next = node->getNext(l)) != NULL) node = next;
while ((next = node->getNext(l)) != nullptr) node = next;
end.finger[l] = node;
}
end.level = 0;

View File

@ -574,7 +574,7 @@ struct RolesInfo {
*pMetricVersion = metricVersion;
return roles.insert( std::make_pair(iface.address(), obj ))->second;
}
JsonBuilderObject& addRole(std::string const& role, MasterProxyInterface& iface, EventMap const& metrics) {
JsonBuilderObject& addRole(std::string const& role, CommitProxyInterface& iface, EventMap const& metrics) {
JsonBuilderObject obj;
obj["id"] = iface.id().shortString();
obj["role"] = role;
@ -646,11 +646,10 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
WorkerEvents mMetrics, WorkerEvents nMetrics, WorkerEvents errors, WorkerEvents traceFileOpenErrors,
WorkerEvents programStarts, std::map<std::string, std::vector<JsonBuilderObject>> processIssues,
vector<std::pair<StorageServerInterface, EventMap>> storageServers,
vector<std::pair<TLogInterface, EventMap>> tLogs,
vector<std::pair<MasterProxyInterface, EventMap>> proxies,
vector<std::pair<GrvProxyInterface, EventMap>> grvProxies,
ServerCoordinators coordinators, Database cx, Optional<DatabaseConfiguration> configuration,
Optional<Key> healthyZone, std::set<std::string>* incomplete_reasons) {
vector<std::pair<TLogInterface, EventMap>> tLogs, vector<std::pair<CommitProxyInterface, EventMap>> commitProxies,
vector<std::pair<GrvProxyInterface, EventMap>> grvProxies, ServerCoordinators coordinators, Database cx,
Optional<DatabaseConfiguration> configuration, Optional<Key> healthyZone,
std::set<std::string>* incomplete_reasons) {
state JsonBuilderObject processMap;
@ -736,9 +735,9 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
roles.addCoordinatorRole(coordinator);
}
state std::vector<std::pair<MasterProxyInterface, EventMap>>::iterator proxy;
for(proxy = proxies.begin(); proxy != proxies.end(); ++proxy) {
roles.addRole( "proxy", proxy->first, proxy->second );
state std::vector<std::pair<CommitProxyInterface, EventMap>>::iterator commit_proxy;
for (commit_proxy = commitProxies.begin(); commit_proxy != commitProxies.end(); ++commit_proxy) {
roles.addRole("commit_proxy", commit_proxy->first, commit_proxy->second);
wait(yield());
}
@ -1064,14 +1063,14 @@ ACTOR static Future<JsonBuilderObject> recoveryStateStatusFetcher(WorkerDetails
// Add additional metadata for certain statuses
if (mStatusCode == RecoveryStatus::recruiting_transaction_servers) {
int requiredLogs = atoi( md.getValue("RequiredTLogs").c_str() );
int requiredProxies = atoi( md.getValue("RequiredProxies").c_str() );
int requiredCommitProxies = atoi(md.getValue("RequiredCommitProxies").c_str());
int requiredGrvProxies = atoi(md.getValue("RequiredGrvProxies").c_str());
int requiredResolvers = atoi( md.getValue("RequiredResolvers").c_str() );
//int requiredProcesses = std::max(requiredLogs, std::max(requiredResolvers, requiredProxies));
//int requiredProcesses = std::max(requiredLogs, std::max(requiredResolvers, requiredCommitProxies));
//int requiredMachines = std::max(requiredLogs, 1);
message["required_logs"] = requiredLogs;
message["required_proxies"] = requiredProxies;
message["required_commit_proxies"] = requiredCommitProxies;
message["required_grv_proxies"] = requiredGrvProxies;
message["required_resolvers"] = requiredResolvers;
} else if (mStatusCode == RecoveryStatus::locking_old_transaction_servers) {
@ -1669,9 +1668,11 @@ ACTOR static Future<vector<std::pair<TLogInterface, EventMap>>> getTLogsAndMetri
return results;
}
ACTOR static Future<vector<std::pair<MasterProxyInterface, EventMap>>> getProxiesAndMetrics(Reference<AsyncVar<ServerDBInfo>> db, std::unordered_map<NetworkAddress, WorkerInterface> address_workers) {
vector<std::pair<MasterProxyInterface, EventMap>> results = wait(getServerMetrics(
db->get().client.masterProxies, address_workers, std::vector<std::string>{ "CommitLatencyMetrics", "CommitLatencyBands" }));
ACTOR static Future<vector<std::pair<CommitProxyInterface, EventMap>>> getCommitProxiesAndMetrics(
Reference<AsyncVar<ServerDBInfo>> db, std::unordered_map<NetworkAddress, WorkerInterface> address_workers) {
vector<std::pair<CommitProxyInterface, EventMap>> results =
wait(getServerMetrics(db->get().client.commitProxies, address_workers,
std::vector<std::string>{ "CommitLatencyMetrics", "CommitLatencyBands" }));
return results;
}
@ -1755,16 +1756,18 @@ ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<
// Writes and conflicts
try {
state vector<Future<TraceEventFields>> proxyStatFutures;
state vector<Future<TraceEventFields>> commitProxyStatFutures;
state vector<Future<TraceEventFields>> grvProxyStatFutures;
std::map<NetworkAddress, WorkerDetails> workersMap;
for (auto const& w : workers) {
workersMap[w.interf.address()] = w;
}
for (auto &p : db->get().client.masterProxies) {
for (auto& p : db->get().client.commitProxies) {
auto worker = getWorker(workersMap, p.address());
if (worker.present())
proxyStatFutures.push_back(timeoutError(worker.get().interf.eventLogRequest.getReply(EventLogRequest(LiteralStringRef("ProxyMetrics"))), 1.0));
commitProxyStatFutures.push_back(timeoutError(
worker.get().interf.eventLogRequest.getReply(EventLogRequest(LiteralStringRef("ProxyMetrics"))),
1.0));
else
throw all_alternatives_failed(); // We need data from all proxies for this result to be trustworthy
}
@ -1775,7 +1778,7 @@ ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<
else
throw all_alternatives_failed(); // We need data from all proxies for this result to be trustworthy
}
state vector<TraceEventFields> proxyStats = wait(getAll(proxyStatFutures));
state vector<TraceEventFields> commitProxyStats = wait(getAll(commitProxyStatFutures));
state vector<TraceEventFields> grvProxyStats = wait(getAll(grvProxyStatFutures));
StatusCounter txnStartOut;
@ -1798,14 +1801,14 @@ ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<
txnMemoryErrors.updateValues(StatusCounter(gps.getValue("TxnRequestErrors")));
}
for (auto &ps : proxyStats) {
mutations.updateValues( StatusCounter(ps.getValue("Mutations")) );
mutationBytes.updateValues( StatusCounter(ps.getValue("MutationBytes")) );
txnConflicts.updateValues( StatusCounter(ps.getValue("TxnConflicts")) );
txnCommitOutSuccess.updateValues( StatusCounter(ps.getValue("TxnCommitOutSuccess")) );
txnKeyLocationOut.updateValues( StatusCounter(ps.getValue("KeyServerLocationOut")) );
txnMemoryErrors.updateValues( StatusCounter(ps.getValue("KeyServerLocationErrors")) );
txnMemoryErrors.updateValues( StatusCounter(ps.getValue("TxnCommitErrors")) );
for (auto& cps : commitProxyStats) {
mutations.updateValues(StatusCounter(cps.getValue("Mutations")));
mutationBytes.updateValues(StatusCounter(cps.getValue("MutationBytes")));
txnConflicts.updateValues(StatusCounter(cps.getValue("TxnConflicts")));
txnCommitOutSuccess.updateValues(StatusCounter(cps.getValue("TxnCommitOutSuccess")));
txnKeyLocationOut.updateValues(StatusCounter(cps.getValue("KeyServerLocationOut")));
txnMemoryErrors.updateValues(StatusCounter(cps.getValue("KeyServerLocationErrors")));
txnMemoryErrors.updateValues(StatusCounter(cps.getValue("TxnCommitErrors")));
}
operationsObj["writes"] = mutations.getStatus();
@ -2009,78 +2012,98 @@ ACTOR static Future<JsonBuilderObject> clusterSummaryStatisticsFetcher(WorkerEve
return statusObj;
}
static JsonBuilderArray oldTlogFetcher(int* oldLogFaultTolerance, Reference<AsyncVar<ServerDBInfo>> db, std::unordered_map<NetworkAddress, WorkerInterface> const& address_workers) {
JsonBuilderArray oldTlogsArray;
if(db->get().recoveryState >= RecoveryState::ACCEPTING_COMMITS) {
for(auto it : db->get().logSystemConfig.oldTLogs) {
static JsonBuilderObject tlogFetcher(int* logFaultTolerance, const std::vector<TLogSet>& tLogs,
std::unordered_map<NetworkAddress, WorkerInterface> const& address_workers) {
JsonBuilderObject statusObj;
JsonBuilderArray logsObj;
Optional<int32_t> sat_log_replication_factor, sat_log_write_anti_quorum, sat_log_fault_tolerance, log_replication_factor, log_write_anti_quorum, log_fault_tolerance, remote_log_replication_factor, remote_log_fault_tolerance;
Optional<int32_t> sat_log_replication_factor, sat_log_write_anti_quorum, sat_log_fault_tolerance,
log_replication_factor, log_write_anti_quorum, log_fault_tolerance, remote_log_replication_factor,
remote_log_fault_tolerance;
int maxFaultTolerance = 0;
for(int i = 0; i < it.tLogs.size(); i++) {
for (int i = 0; i < tLogs.size(); i++) {
int failedLogs = 0;
for(auto& log : it.tLogs[i].tLogs) {
for (auto& log : tLogs[i].tLogs) {
JsonBuilderObject logObj;
bool failed = !log.present() || !address_workers.count(log.interf().address());
logObj["id"] = log.id().shortString();
logObj["healthy"] = !failed;
if(log.present()) {
if (log.present()) {
logObj["address"] = log.interf().address().toString();
}
logsObj.push_back(logObj);
if(failed) {
if (failed) {
failedLogs++;
}
}
maxFaultTolerance = std::max(maxFaultTolerance, it.tLogs[i].tLogReplicationFactor - 1 - it.tLogs[i].tLogWriteAntiQuorum - failedLogs);
if(it.tLogs[i].isLocal && it.tLogs[i].locality == tagLocalitySatellite) {
sat_log_replication_factor = it.tLogs[i].tLogReplicationFactor;
sat_log_write_anti_quorum = it.tLogs[i].tLogWriteAntiQuorum;
sat_log_fault_tolerance = it.tLogs[i].tLogReplicationFactor - 1 - it.tLogs[i].tLogWriteAntiQuorum - failedLogs;
}
else if(it.tLogs[i].isLocal) {
log_replication_factor = it.tLogs[i].tLogReplicationFactor;
log_write_anti_quorum = it.tLogs[i].tLogWriteAntiQuorum;
log_fault_tolerance = it.tLogs[i].tLogReplicationFactor - 1 - it.tLogs[i].tLogWriteAntiQuorum - failedLogs;
}
else {
remote_log_replication_factor = it.tLogs[i].tLogReplicationFactor;
remote_log_fault_tolerance = it.tLogs[i].tLogReplicationFactor - 1 - failedLogs;
// The log generation's fault tolerance is the maximum tlog fault tolerance of each region.
maxFaultTolerance =
std::max(maxFaultTolerance, tLogs[i].tLogReplicationFactor - 1 - tLogs[i].tLogWriteAntiQuorum - failedLogs);
if (tLogs[i].isLocal && tLogs[i].locality == tagLocalitySatellite) {
sat_log_replication_factor = tLogs[i].tLogReplicationFactor;
sat_log_write_anti_quorum = tLogs[i].tLogWriteAntiQuorum;
sat_log_fault_tolerance = tLogs[i].tLogReplicationFactor - 1 - tLogs[i].tLogWriteAntiQuorum - failedLogs;
} else if (tLogs[i].isLocal) {
log_replication_factor = tLogs[i].tLogReplicationFactor;
log_write_anti_quorum = tLogs[i].tLogWriteAntiQuorum;
log_fault_tolerance = tLogs[i].tLogReplicationFactor - 1 - tLogs[i].tLogWriteAntiQuorum - failedLogs;
} else {
remote_log_replication_factor = tLogs[i].tLogReplicationFactor;
remote_log_fault_tolerance = tLogs[i].tLogReplicationFactor - 1 - failedLogs;
}
}
*oldLogFaultTolerance = std::min(*oldLogFaultTolerance, maxFaultTolerance);
statusObj["logs"] = logsObj;
*logFaultTolerance = std::min(*logFaultTolerance, maxFaultTolerance);
statusObj["log_interfaces"] = logsObj;
// We may lose logs in this log generation, storage servers may never be able to catch up this log
// generation.
statusObj["possibly_losing_data"] = maxFaultTolerance < 0;
if (sat_log_replication_factor.present())
statusObj["satellite_log_replication_factor"] = sat_log_replication_factor.get();
if (sat_log_write_anti_quorum.present())
statusObj["satellite_log_write_anti_quorum"] = sat_log_write_anti_quorum.get();
if (sat_log_fault_tolerance.present())
statusObj["satellite_log_fault_tolerance"] = sat_log_fault_tolerance.get();
if (sat_log_fault_tolerance.present()) statusObj["satellite_log_fault_tolerance"] = sat_log_fault_tolerance.get();
if (log_replication_factor.present())
statusObj["log_replication_factor"] = log_replication_factor.get();
if (log_write_anti_quorum.present())
statusObj["log_write_anti_quorum"] = log_write_anti_quorum.get();
if (log_fault_tolerance.present())
statusObj["log_fault_tolerance"] = log_fault_tolerance.get();
if (log_replication_factor.present()) statusObj["log_replication_factor"] = log_replication_factor.get();
if (log_write_anti_quorum.present()) statusObj["log_write_anti_quorum"] = log_write_anti_quorum.get();
if (log_fault_tolerance.present()) statusObj["log_fault_tolerance"] = log_fault_tolerance.get();
if (remote_log_replication_factor.present())
statusObj["remote_log_replication_factor"] = remote_log_replication_factor.get();
if (remote_log_fault_tolerance.present())
statusObj["remote_log_fault_tolerance"] = remote_log_fault_tolerance.get();
oldTlogsArray.push_back(statusObj);
}
}
return oldTlogsArray;
return statusObj;
}
static JsonBuilderObject faultToleranceStatusFetcher(DatabaseConfiguration configuration, ServerCoordinators coordinators, std::vector<WorkerDetails>& workers, int extraTlogEligibleZones, int minReplicasRemaining, bool underMaintenance) {
static JsonBuilderArray tlogFetcher(int* logFaultTolerance, Reference<AsyncVar<ServerDBInfo>> db,
std::unordered_map<NetworkAddress, WorkerInterface> const& address_workers) {
JsonBuilderArray tlogsArray;
JsonBuilderObject tlogsStatus;
tlogsStatus = tlogFetcher(logFaultTolerance, db->get().logSystemConfig.tLogs, address_workers);
tlogsStatus["epoch"] = db->get().logSystemConfig.epoch;
tlogsStatus["current"] = true;
if (db->get().logSystemConfig.recoveredAt.present()) {
tlogsStatus["begin_version"] = db->get().logSystemConfig.recoveredAt.get();
}
tlogsArray.push_back(tlogsStatus);
for (auto it : db->get().logSystemConfig.oldTLogs) {
JsonBuilderObject oldTlogsStatus = tlogFetcher(logFaultTolerance, it.tLogs, address_workers);
oldTlogsStatus["epoch"] = it.epoch;
oldTlogsStatus["current"] = false;
oldTlogsStatus["begin_version"] = it.epochBegin;
oldTlogsStatus["end_version"] = it.epochEnd;
tlogsArray.push_back(oldTlogsStatus);
}
return tlogsArray;
}
static JsonBuilderObject faultToleranceStatusFetcher(DatabaseConfiguration configuration,
ServerCoordinators coordinators,
std::vector<WorkerDetails>& workers, int extraTlogEligibleZones,
int minReplicasRemaining, int oldLogFaultTolerance,
bool underMaintenance) {
JsonBuilderObject statusObj;
// without losing data
@ -2112,17 +2135,18 @@ static JsonBuilderObject faultToleranceStatusFetcher(DatabaseConfiguration confi
}
maxCoordinatorZoneFailures += 1;
}
// max zone failures that we can tolerate to not lose data
int zoneFailuresWithoutLosingData = std::min(maxZoneFailures, maxCoordinatorZoneFailures);
if (minReplicasRemaining >= 0){
zoneFailuresWithoutLosingData = std::min(zoneFailuresWithoutLosingData, minReplicasRemaining - 1);
}
statusObj["max_zone_failures_without_losing_data"] = std::max(zoneFailuresWithoutLosingData, 0);
// without losing availablity
statusObj["max_zone_failures_without_losing_availability"] = std::max(std::min(extraTlogEligibleZones, zoneFailuresWithoutLosingData), 0);
// oldLogFaultTolerance means max failures we can tolerate to lose logs data. -1 means we lose data or availability.
zoneFailuresWithoutLosingData = std::max(std::min(zoneFailuresWithoutLosingData, oldLogFaultTolerance), -1);
statusObj["max_zone_failures_without_losing_data"] = zoneFailuresWithoutLosingData;
statusObj["max_zone_failures_without_losing_availability"] =
std::max(std::min(extraTlogEligibleZones, zoneFailuresWithoutLosingData), -1);
return statusObj;
}
@ -2440,7 +2464,7 @@ ACTOR Future<StatusReply> clusterGetStatus(
getProcessIssuesAsMessages(workerIssues);
state vector<std::pair<StorageServerInterface, EventMap>> storageServers;
state vector<std::pair<TLogInterface, EventMap>> tLogs;
state vector<std::pair<MasterProxyInterface, EventMap>> proxies;
state vector<std::pair<CommitProxyInterface, EventMap>> commitProxies;
state vector<std::pair<GrvProxyInterface, EventMap>> grvProxies;
state JsonBuilderObject qos;
state JsonBuilderObject data_overlay;
@ -2504,7 +2528,8 @@ ACTOR Future<StatusReply> clusterGetStatus(
state Future<ErrorOr<vector<std::pair<StorageServerInterface, EventMap>>>> storageServerFuture = errorOr(getStorageServersAndMetrics(cx, address_workers, rkWorker));
state Future<ErrorOr<vector<std::pair<TLogInterface, EventMap>>>> tLogFuture = errorOr(getTLogsAndMetrics(db, address_workers));
state Future<ErrorOr<vector<std::pair<MasterProxyInterface, EventMap>>>> proxyFuture = errorOr(getProxiesAndMetrics(db, address_workers));
state Future<ErrorOr<vector<std::pair<CommitProxyInterface, EventMap>>>> commitProxyFuture =
errorOr(getCommitProxiesAndMetrics(db, address_workers));
state Future<ErrorOr<vector<std::pair<GrvProxyInterface, EventMap>>>> grvProxyFuture = errorOr(getGrvProxiesAndMetrics(db, address_workers));
state int minReplicasRemaining = -1;
@ -2517,14 +2542,16 @@ ACTOR Future<StatusReply> clusterGetStatus(
futures2.push_back(clusterSummaryStatisticsFetcher(pMetrics, storageServerFuture, tLogFuture, &status_incomplete_reasons));
state std::vector<JsonBuilderObject> workerStatuses = wait(getAll(futures2));
int oldLogFaultTolerance = 100;
if(db->get().recoveryState >= RecoveryState::ACCEPTING_COMMITS && db->get().logSystemConfig.oldTLogs.size() > 0) {
statusObj["old_logs"] = oldTlogFetcher(&oldLogFaultTolerance, db, address_workers);
int logFaultTolerance = 100;
if (db->get().recoveryState >= RecoveryState::ACCEPTING_COMMITS) {
statusObj["logs"] = tlogFetcher(&logFaultTolerance, db, address_workers);
}
if(configuration.present()) {
int extraTlogEligibleZones = getExtraTLogEligibleZones(workers, configuration.get());
statusObj["fault_tolerance"] = faultToleranceStatusFetcher(configuration.get(), coordinators, workers, extraTlogEligibleZones, minReplicasRemaining, loadResult.present() && loadResult.get().healthyZone.present());
statusObj["fault_tolerance"] = faultToleranceStatusFetcher(
configuration.get(), coordinators, workers, extraTlogEligibleZones, minReplicasRemaining,
logFaultTolerance, loadResult.present() && loadResult.get().healthyZone.present());
}
state JsonBuilderObject configObj =
@ -2587,13 +2614,13 @@ ACTOR Future<StatusReply> clusterGetStatus(
messages.push_back(JsonBuilder::makeMessage("log_servers_error", "Timed out trying to retrieve log servers."));
}
// ...also proxies
ErrorOr<vector<std::pair<MasterProxyInterface, EventMap>>> _proxies = wait(proxyFuture);
if (_proxies.present()) {
proxies = _proxies.get();
}
else {
messages.push_back(JsonBuilder::makeMessage("proxies_error", "Timed out trying to retrieve proxies."));
// ...also commit proxies
ErrorOr<vector<std::pair<CommitProxyInterface, EventMap>>> _commitProxies = wait(commitProxyFuture);
if (_commitProxies.present()) {
commitProxies = _commitProxies.get();
} else {
messages.push_back(
JsonBuilder::makeMessage("commit_proxies_error", "Timed out trying to retrieve commit proxies."));
}
// ...also grv proxies
@ -2614,12 +2641,10 @@ ACTOR Future<StatusReply> clusterGetStatus(
statusObj["layers"] = layers;
}
JsonBuilderObject processStatus = wait(processStatusFetcher(db, workers, pMetrics, mMetrics, networkMetrics,
latestError, traceFileOpenErrors, programStarts,
processIssues, storageServers, tLogs, proxies,
grvProxies, coordinators, cx, configuration,
loadResult.present() ? loadResult.get().healthyZone : Optional<Key>(),
&status_incomplete_reasons));
JsonBuilderObject processStatus = wait(processStatusFetcher(
db, workers, pMetrics, mMetrics, networkMetrics, latestError, traceFileOpenErrors, programStarts,
processIssues, storageServers, tLogs, commitProxies, grvProxies, coordinators, cx, configuration,
loadResult.present() ? loadResult.get().healthyZone : Optional<Key>(), &status_incomplete_reasons));
statusObj["processes"] = processStatus;
statusObj["clients"] = clientStatusFetcher(clientStatus);

View File

@ -103,12 +103,12 @@ struct CacheRangeInfo : ReferenceCounted<CacheRangeInfo>, NonCopyable {
delete adding;
}
static CacheRangeInfo* newNotAssigned(KeyRange keys) { return new CacheRangeInfo(keys, NULL, NULL); }
static CacheRangeInfo* newReadWrite(KeyRange keys, StorageCacheData* data) { return new CacheRangeInfo(keys, NULL, data); }
static CacheRangeInfo* newAdding(StorageCacheData* data, KeyRange keys) { return new CacheRangeInfo(keys, new AddingCacheRange(data, keys), NULL); }
static CacheRangeInfo* newNotAssigned(KeyRange keys) { return new CacheRangeInfo(keys, nullptr, nullptr); }
static CacheRangeInfo* newReadWrite(KeyRange keys, StorageCacheData* data) { return new CacheRangeInfo(keys, nullptr, data); }
static CacheRangeInfo* newAdding(StorageCacheData* data, KeyRange keys) { return new CacheRangeInfo(keys, new AddingCacheRange(data, keys), nullptr); }
bool isReadable() const { return readWrite!=NULL; }
bool isAdding() const { return adding!=NULL; }
bool isReadable() const { return readWrite!=nullptr; }
bool isAdding() const { return adding!=nullptr; }
bool notAssigned() const { return !readWrite && !adding; }
bool assigned() const { return readWrite || adding; }
bool isInVersionedData() const { return readWrite || (adding && adding->isTransferred()); }

Some files were not shown because too many files have changed in this diff Show More