Merge branch 'master' of https://github.com/apple/foundationdb into redwood-header-changes

This commit is contained in:
Steve Atherton 2022-01-06 04:43:00 -08:00
commit dd90b7661d
64 changed files with 1274 additions and 607 deletions

View File

@ -63,7 +63,7 @@ if(APPLE)
target_link_options(fdb_c PRIVATE "LINKER:-no_weak_exports,-exported_symbols_list,${symbols}")
elseif(WIN32)
else()
target_link_options(fdb_c PRIVATE "LINKER:--version-script=${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.map,-z,nodelete")
target_link_options(fdb_c PRIVATE "LINKER:--version-script=${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.map,-z,nodelete,-z,noexecstack")
endif()
target_include_directories(fdb_c PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}>

View File

@ -30,7 +30,7 @@
namespace {
struct SimpleWorkload : FDBWorkload {
struct SimpleWorkload final : FDBWorkload {
static const std::string name;
static const std::string KEY_PREFIX;
std::mt19937 random;

View File

@ -85,7 +85,6 @@ if (NOT WIN32 AND NOT OPEN_FOR_IDE)
add_fdbclient_test(
NAME multi_process_fdbcli_tests
PROCESS_NUMBER 5
TEST_TIMEOUT 120 # The test can take near to 1 minutes sometime, set timeout to 2 minutes to be safe
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
${CMAKE_BINARY_DIR}
@CLUSTER_FILE@
@ -102,7 +101,6 @@ if (NOT WIN32 AND NOT OPEN_FOR_IDE)
add_fdbclient_test(
NAME multi_process_external_client_fdbcli_tests
PROCESS_NUMBER 5
TEST_TIMEOUT 120 # The test can take near to 1 minutes sometime, set timeout to 2 minutes to be safe
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
${CMAKE_BINARY_DIR}
@CLUSTER_FILE@

View File

@ -503,6 +503,18 @@ def profile(logger):
assert run_fdbcli_command('profile', 'client', 'get') == default_profile_client_get_output
@enable_logging()
def test_available(logger):
duration = 0 # seconds we already wait
while not get_value_from_status_json(False, 'client', 'database_status', 'available') and duration < 10:
logger.debug("Sleep for 1 second to wait cluster recovery")
time.sleep(1)
duration += 1
if duration >= 10:
logger.debug(run_fdbcli_command('status', 'json'))
assert False
@enable_logging()
def triggerddteaminfolog(logger):
# this command is straightforward and only has one code path
@ -538,6 +550,7 @@ if __name__ == '__main__':
command_template = [args.build_dir + '/bin/fdbcli', '-C', args.cluster_file, '--exec']
# tests for fdbcli commands
# assertions will fail if fdbcli does not work as expected
test_available()
if args.process_number == 1:
# TODO: disable for now, the change can cause the database unavailable
# advanceversion()

View File

@ -438,7 +438,7 @@ function(add_fdbclient_test)
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT ${T_TEST_TIMEOUT})
else()
# default timeout
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT 60)
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT 300)
endif()
set_tests_properties("${T_NAME}" PROPERTIES ENVIRONMENT UBSAN_OPTIONS=print_stacktrace=1:halt_on_error=1)
endfunction()

View File

@ -283,7 +283,6 @@ else()
-Woverloaded-virtual
-Wshift-sign-overflow
# Here's the current set of warnings we need to explicitly disable to compile warning-free with clang 11
-Wno-delete-non-virtual-dtor
-Wno-sign-compare
-Wno-undefined-var-template
-Wno-unknown-warning-option
@ -340,9 +339,19 @@ else()
set(DTRACE_PROBES 1)
endif()
if(CMAKE_COMPILER_IS_GNUCXX)
set(USE_LTO OFF CACHE BOOL "Do link time optimization")
if (USE_LTO)
set(USE_LTO OFF CACHE BOOL "Do link time optimization")
if (USE_LTO)
if (CLANG)
set(CLANG_LTO_STRATEGY "Thin" CACHE STRING "LLVM LTO strategy (Thin, or Full)")
if (CLANG_LTO_STRATEGY STREQUAL "Full")
add_compile_options($<$<CONFIG:Release>:-flto=full>)
else()
add_compile_options($<$<CONFIG:Release>:-flto=thin>)
endif()
set(CMAKE_RANLIB "llvm-ranlib")
set(CMAKE_AR "llvm-ar")
endif()
if(CMAKE_COMPILER_IS_GNUCXX)
add_compile_options($<$<CONFIG:Release>:-flto>)
set(CMAKE_AR "gcc-ar")
set(CMAKE_C_ARCHIVE_CREATE "<CMAKE_AR> qcs <TARGET> <LINK_FLAGS> <OBJECTS>")

View File

@ -176,6 +176,12 @@ install(DIRECTORY "${script_dir}/clients/usr/lib/cmake"
DESTINATION usr/lib
COMPONENT clients-versioned)
################################################################################
# Move Docker Setup
################################################################################
file(COPY "${PROJECT_SOURCE_DIR}/packaging/docker" DESTINATION "${PROJECT_BINARY_DIR}/packages/")
################################################################################
# General CPack configuration
################################################################################

View File

@ -7,32 +7,32 @@
[fdbmonitor]
[general]
restart_delay = 10
## by default, restart_backoff = restart_delay_reset_interval = restart_delay
# initial_restart_delay = 0
# restart_backoff = 60
# restart_delay_reset_interval = 60
cluster_file = ${CMAKE_BINARY_DIR}/fdb.cluster
# delete_envvars =
# kill_on_configuration_change = true
restart-delay = 10
## by default, restart-backoff = restart-delay-reset-interval = restart-delay
# initial-restart-delay = 0
# restart-backoff = 60
# restart-delay-reset-interval = 60
cluster-file = ${CMAKE_BINARY_DIR}/fdb.cluster
# delete-envvars =
# kill-on-configuration-change = true
## Default parameters for individual fdbserver processes
[fdbserver]
command = ${CMAKE_BINARY_DIR}/bin/fdbserver
public_address = auto:$ID
listen_address = public
public-address = auto:$ID
listen-address = public
datadir = ${CMAKE_BINARY_DIR}/sandbox/data/$ID
logdir = ${CMAKE_BINARY_DIR}/sandbox/logs
# logsize = 10MiB
# maxlogssize = 100MiB
# machine_id =
# datacenter_id =
# machine-id =
# datacenter-id =
# class =
# memory = 8GiB
# storage_memory = 1GiB
# cache_memory = 2GiB
# metrics_cluster =
# metrics_prefix =
# storage-memory = 1GiB
# cache-memory = 2GiB
# metrics-cluster =
# metrics-prefix =
## An individual fdbserver process with id 4000
## Parameters set here override defaults from the [fdbserver] section

View File

@ -242,7 +242,7 @@ function startFdbServer
let status="${status} + 1"
else
"${BINDIR}/fdbserver" --knob_disable_posix_kernel_aio=1 -C "${FDBCONF}" -p "${FDBCLUSTERTEXT}" -L "${LOGDIR}" -d "${WORKDIR}/fdb/${$}" &> "${LOGDIR}/fdbserver.log" &
"${BINDIR}/fdbserver" --knob-disable-posix-kernel-aio=1 -C "${FDBCONF}" -p "${FDBCLUSTERTEXT}" -L "${LOGDIR}" -d "${WORKDIR}/fdb/${$}" &> "${LOGDIR}/fdbserver.log" &
if [ "${?}" -ne 0 ]
then
log "Failed to start FDB Server"

View File

@ -440,6 +440,7 @@ namespace SummarizeTest
string tlsPluginArg = "";
if (tlsPluginFile.Length > 0) {
process.StartInfo.EnvironmentVariables["FDB_TLS_PLUGIN"] = tlsPluginFile;
// Use the old-style option with underscores because old binaries do not support hyphens
tlsPluginArg = "--tls_plugin=" + tlsPluginFile;
}
process.StartInfo.RedirectStandardOutput = true;

View File

@ -46,6 +46,34 @@
'
---
# name: test_execstack_permissions_libfdb_c[centos-versioned]
'
GNU_STACK 0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 RW 0x0
'
---
# name: test_execstack_permissions_libfdb_c[centos]
'
GNU_STACK 0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 RW 0x0
'
---
# name: test_execstack_permissions_libfdb_c[ubuntu-versioned]
'
GNU_STACK 0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 RW 0x0
'
---
# name: test_execstack_permissions_libfdb_c[ubuntu]
'
GNU_STACK 0x0000000000000000 0x0000000000000000 0x0000000000000000
0x0000000000000000 0x0000000000000000 RW 0x0
'
---
# name: test_fdbcli_help_text[centos-versioned]
'
FoundationDB CLI 7.1 (v7.1.0)
@ -59,7 +87,7 @@
--log-dir PATH Specifes the output directory for trace files. If
unspecified, defaults to the current directory. Has
no effect unless --log is specified.
--trace_format FORMAT
--trace-format FORMAT
Select the format of the log files. xml (the default) and json
are supported. Has no effect unless --log is specified.
--exec CMDS Immediately executes the semicolon separated CLI commands
@ -68,24 +96,24 @@
the CLI.
--api-version APIVERSION
Specifies the version of the API for the CLI to use.
--tls_certificate_file CERTFILE
--tls-certificate-file CERTFILE
The path of a file containing the TLS certificate and CA
chain.
--tls_ca_file CERTAUTHFILE
--tls-ca-file CERTAUTHFILE
The path of a file containing the CA certificates chain.
--tls_key_file KEYFILE
--tls-key-file KEYFILE
The path of a file containing the private key corresponding
to the TLS certificate.
--tls_password PASSCODE
--tls-password PASSCODE
The passphrase of encrypted private key
--tls_verify_peers CONSTRAINTS
--tls-verify-peers CONSTRAINTS
The constraints by which to validate TLS peers. The contents
and format of CONSTRAINTS are plugin-specific.
--knob_KNOBNAME KNOBVALUE
--knob-KNOBNAME KNOBVALUE
Changes a knob option. KNOBNAME should be lowercase.
--debug-tls Prints the TLS configuration and certificate chain, then exits.
Useful in reporting and diagnosing TLS issues.
--build_flags Print build information and exit.
--build-flags Print build information and exit.
-v, --version Print FoundationDB CLI version information and exit.
-h, --help Display this help and exit.
@ -104,7 +132,7 @@
--log-dir PATH Specifes the output directory for trace files. If
unspecified, defaults to the current directory. Has
no effect unless --log is specified.
--trace_format FORMAT
--trace-format FORMAT
Select the format of the log files. xml (the default) and json
are supported. Has no effect unless --log is specified.
--exec CMDS Immediately executes the semicolon separated CLI commands
@ -113,24 +141,24 @@
the CLI.
--api-version APIVERSION
Specifies the version of the API for the CLI to use.
--tls_certificate_file CERTFILE
--tls-certificate-file CERTFILE
The path of a file containing the TLS certificate and CA
chain.
--tls_ca_file CERTAUTHFILE
--tls-ca-file CERTAUTHFILE
The path of a file containing the CA certificates chain.
--tls_key_file KEYFILE
--tls-key-file KEYFILE
The path of a file containing the private key corresponding
to the TLS certificate.
--tls_password PASSCODE
--tls-password PASSCODE
The passphrase of encrypted private key
--tls_verify_peers CONSTRAINTS
--tls-verify-peers CONSTRAINTS
The constraints by which to validate TLS peers. The contents
and format of CONSTRAINTS are plugin-specific.
--knob_KNOBNAME KNOBVALUE
--knob-KNOBNAME KNOBVALUE
Changes a knob option. KNOBNAME should be lowercase.
--debug-tls Prints the TLS configuration and certificate chain, then exits.
Useful in reporting and diagnosing TLS issues.
--build_flags Print build information and exit.
--build-flags Print build information and exit.
-v, --version Print FoundationDB CLI version information and exit.
-h, --help Display this help and exit.
@ -149,7 +177,7 @@
--log-dir PATH Specifes the output directory for trace files. If
unspecified, defaults to the current directory. Has
no effect unless --log is specified.
--trace_format FORMAT
--trace-format FORMAT
Select the format of the log files. xml (the default) and json
are supported. Has no effect unless --log is specified.
--exec CMDS Immediately executes the semicolon separated CLI commands
@ -158,24 +186,24 @@
the CLI.
--api-version APIVERSION
Specifies the version of the API for the CLI to use.
--tls_certificate_file CERTFILE
--tls-certificate-file CERTFILE
The path of a file containing the TLS certificate and CA
chain.
--tls_ca_file CERTAUTHFILE
--tls-ca-file CERTAUTHFILE
The path of a file containing the CA certificates chain.
--tls_key_file KEYFILE
--tls-key-file KEYFILE
The path of a file containing the private key corresponding
to the TLS certificate.
--tls_password PASSCODE
--tls-password PASSCODE
The passphrase of encrypted private key
--tls_verify_peers CONSTRAINTS
--tls-verify-peers CONSTRAINTS
The constraints by which to validate TLS peers. The contents
and format of CONSTRAINTS are plugin-specific.
--knob_KNOBNAME KNOBVALUE
--knob-KNOBNAME KNOBVALUE
Changes a knob option. KNOBNAME should be lowercase.
--debug-tls Prints the TLS configuration and certificate chain, then exits.
Useful in reporting and diagnosing TLS issues.
--build_flags Print build information and exit.
--build-flags Print build information and exit.
-v, --version Print FoundationDB CLI version information and exit.
-h, --help Display this help and exit.
@ -194,7 +222,7 @@
--log-dir PATH Specifes the output directory for trace files. If
unspecified, defaults to the current directory. Has
no effect unless --log is specified.
--trace_format FORMAT
--trace-format FORMAT
Select the format of the log files. xml (the default) and json
are supported. Has no effect unless --log is specified.
--exec CMDS Immediately executes the semicolon separated CLI commands
@ -203,24 +231,24 @@
the CLI.
--api-version APIVERSION
Specifies the version of the API for the CLI to use.
--tls_certificate_file CERTFILE
--tls-certificate-file CERTFILE
The path of a file containing the TLS certificate and CA
chain.
--tls_ca_file CERTAUTHFILE
--tls-ca-file CERTAUTHFILE
The path of a file containing the CA certificates chain.
--tls_key_file KEYFILE
--tls-key-file KEYFILE
The path of a file containing the private key corresponding
to the TLS certificate.
--tls_password PASSCODE
--tls-password PASSCODE
The passphrase of encrypted private key
--tls_verify_peers CONSTRAINTS
--tls-verify-peers CONSTRAINTS
The constraints by which to validate TLS peers. The contents
and format of CONSTRAINTS are plugin-specific.
--knob_KNOBNAME KNOBVALUE
--knob-KNOBNAME KNOBVALUE
Changes a knob option. KNOBNAME should be lowercase.
--debug-tls Prints the TLS configuration and certificate chain, then exits.
Useful in reporting and diagnosing TLS issues.
--build_flags Print build information and exit.
--build-flags Print build information and exit.
-v, --version Print FoundationDB CLI version information and exit.
-h, --help Display this help and exit.

View File

@ -54,7 +54,9 @@ class Container:
# https://developers.redhat.com/blog/2016/09/13/running-systemd-in-a-non-privileged-container#the_quest
extra_initd_args = []
if initd:
extra_initd_args = "--tmpfs /tmp --tmpfs /run -v /sys/fs/cgroup:/sys/fs/cgroup:ro".split()
extra_initd_args = (
"--tmpfs /tmp --tmpfs /run -v /sys/fs/cgroup:/sys/fs/cgroup:ro".split()
)
self.uid = str(uuid.uuid4())
@ -103,6 +105,8 @@ def ubuntu_image_with_fdb_helper(versioned: bool) -> Iterator[Optional[Image]]:
container = Container("ubuntu")
for deb in debs:
container.copy_to(deb, "/opt")
container.run(["bash", "-c", "apt-get update"])
container.run(["bash", "-c", "apt-get install --yes binutils"]) # this is for testing libfdb_c execstack permissions
container.run(["bash", "-c", "dpkg -i /opt/*.deb"])
container.run(["bash", "-c", "rm /opt/*.deb"])
image = container.commit()
@ -146,6 +150,8 @@ def centos_image_with_fdb_helper(versioned: bool) -> Iterator[Optional[Image]]:
container = Container("centos", initd=True)
for rpm in rpms:
container.copy_to(rpm, "/opt")
container.run(["bash", "-c", "yum update -y"])
container.run(["bash", "-c", "yum install -y binutils"]) # this is for testing libfdb_c execstack permissions
container.run(["bash", "-c", "yum install -y /opt/*.rpm"])
container.run(["bash", "-c", "rm /opt/*.rpm"])
image = container.commit()
@ -235,6 +241,17 @@ def test_fdbcli_help_text(linux_container: Container, snapshot):
assert snapshot == linux_container.run(["fdbcli", "--help"])
def test_execstack_permissions_libfdb_c(linux_container: Container, snapshot):
linux_container.run(["ldconfig"])
assert snapshot == linux_container.run(
[
"bash",
"-c",
"readelf -l $(ldconfig -p | grep libfdb_c | awk '{print $(NF)}') | grep -A1 GNU_STACK",
]
)
def test_backup_restore(linux_container: Container, snapshot, tmp_path: pathlib.Path):
linux_container.run(["fdbcli", "--exec", "writemode on; set x y"])
assert snapshot == linux_container.run(
@ -245,7 +262,7 @@ def test_backup_restore(linux_container: Container, snapshot, tmp_path: pathlib.
[
"bash",
"-c",
"fdbrestore start -r file://$(echo /tmp/fdb_backup/*) -w --dest_cluster_file /etc/foundationdb/fdb.cluster",
"fdbrestore start -r file://$(echo /tmp/fdb_backup/*) -w --dest-cluster-file /etc/foundationdb/fdb.cluster",
]
)
assert snapshot == linux_container.run(["fdbcli", "--exec", "get x"])

View File

@ -223,7 +223,7 @@ We strive to keep the operational interface the same as the old backup system. T
By default, backup workers are not enabled in the system. When operators submit a new backup request for the first time, the database performs a configuration change (`backup_worker_enabled:=1`) that enables backup workers.
The operators backup request can indicate if an old backup or a new backup is used. This is a command line option (i.e., `-p` or `--partitioned_log`) in the `fdbbackup` command. A backup request of the new type is started in the following steps:
The operators backup request can indicate if an old backup or a new backup is used. This is a command line option (i.e., `-p` or `--partitioned-log`) in the `fdbbackup` command. A backup request of the new type is started in the following steps:
1. Operators use `fdbbackup` tool to write the backup range to a system key, i.e., `\xff\x02/backupStarted`.
2. All backup workers monitor the key `\xff\x02/backupStarted`, see the change, and start logging mutations.

View File

@ -613,7 +613,7 @@ minor impacts on recovery times:
: How many bytes of memory can be allocated to hold the results of reads from disk to respond to peek requests.<br>
Increasing it will increase the number of parallel peek requests a TLog can handle at once.<br>
Decreasing it will reduce TLog memory usage.<br>
If increased, `--max_memory` should be increased by the same amount.<br>
If increased, `--memory` should be increased by the same amount.<br>
`TLOG_DISK_QUEUE_EXTENSION_BYTES`
: When a DiskQueue needs to extend a file, by how many bytes should it extend the file.<br>

View File

@ -668,7 +668,7 @@ Datacenters
FoundationDB is datacenter aware and supports operation across datacenters. In a multiple-datacenter configuration, it is recommended that you set the :ref:`redundancy mode <configuration-choosing-redundancy-mode>` to ``three_datacenter`` and that you set the ``locality_dcid`` parameter for all FoundationDB processes in :ref:`foundationdb.conf <foundationdb-conf>`.
If you specify the ``--datacenter_id`` option to any FoundationDB process in your cluster, you should specify it to all such processes. Processes which do not have a specified datacenter ID on the command line are considered part of a default "unset" datacenter. FoundationDB will incorrectly believe that these processes are failure-isolated from other datacenters, which can reduce performance and fault tolerance.
If you specify the ``--datacenter-id`` option to any FoundationDB process in your cluster, you should specify it to all such processes. Processes which do not have a specified datacenter ID on the command line are considered part of a default "unset" datacenter. FoundationDB will incorrectly believe that these processes are failure-isolated from other datacenters, which can reduce performance and fault tolerance.
(Re)creating a database
-----------------------

View File

@ -161,7 +161,7 @@ Blob Credential Files
In order to help safeguard blob store credentials, the <SECRET> can optionally be omitted from blobstore:// URLs on the command line. Omitted secrets will be resolved at connect time using 1 or more Blob Credential files.
Blob Credential files can be specified on the command line (via --blob_credentials <FILE>) or via the environment variable FDB_BLOB_CREDENTIALS which can be set to a colon-separated list of files. The command line takes priority over the environment variable however all files from both sources will be used.
Blob Credential files can be specified on the command line (via --blob-credentials <FILE>) or via the environment variable FDB_BLOB_CREDENTIALS which can be set to a colon-separated list of files. The command line takes priority over the environment variable however all files from both sources will be used.
At connect time, the specified files are read in order and the first matching account specification (user@host)
will be used to obtain the secret key.
@ -222,7 +222,7 @@ The following options apply to most subcommands:
``-t <TAG>``
A "tag" is a named slot in which a backup task executes. Backups on different named tags make progress and are controlled independently, though their executions are handled by the same set of backup agent processes. Any number of unique backup tags can be active at once. It the tag is not specified, the default tag name "default" is used.
``--blob_credentials <FILE>``
``--blob-credentials <FILE>``
Use FILE as a :ref:`Blob Credential File<blob-credential-files>`. Can be used multiple times.
.. _backup-start:
@ -236,18 +236,18 @@ The ``start`` subcommand is used to start a backup. If there is already a backu
::
user@host$ fdbbackup start [-t <TAG>] -d <BACKUP_URL> [-z] [-s <DURATION>] [--partitioned_log_experimental] [-w] [-k '<BEGIN>[ <END>]']...
user@host$ fdbbackup start [-t <TAG>] -d <BACKUP_URL> [-z] [-s <DURATION>] [--partitioned-log-experimental] [-w] [-k '<BEGIN>[ <END>]']...
``-z``
Perform the backup continuously rather than terminating once a restorable backup is achieved. Database mutations within the backup's target key ranges will be continuously written to the backup as well as repeated inconsistent snapshots at the configured snapshot rate.
``-s <DURATION>`` or ``--snapshot_interval <DURATION>``
``-s <DURATION>`` or ``--snapshot-interval <DURATION>``
Specifies the duration, in seconds, of the inconsistent snapshots written to the backup in continuous mode. The default is 864000 which is 10 days.
``--initial_snapshot_interval <DURATION>``
``--initial-snapshot-interval <DURATION>``
Specifies the duration, in seconds, of the first inconsistent snapshot written to the backup. The default is 0, which means as fast as possible.
``--partitioned_log_experimental``
``--partitioned-log-experimental``
Specifies the backup uses the partitioned mutation logs generated by backup workers. Since FDB version 6.3, this option is experimental and requires using fast restore for restoring the database from the generated files. The default is to use non-partitioned mutation logs generated by backup agents.
``-w``
@ -270,18 +270,18 @@ The ``modify`` subcommand is used to modify parameters of a running backup. All
::
user@host$ fdbbackup modify [-t <TAG>] [-d <BACKUP_URL>] [-s <DURATION>] [--active_snapshot_interval <DURATION>] [--verify_uid <UID>]
user@host$ fdbbackup modify [-t <TAG>] [-d <BACKUP_URL>] [-s <DURATION>] [--active-snapshot-interval <DURATION>] [--verify-uid <UID>]
``-d <BACKUP_URL>``
Sets a new Backup URL for the backup to write to. This is most likely to be used to change only URL parameters or account information. However, it can also be used to start writing to a new destination mid-backup. The new old location will cease gaining any additional restorability, while the new location will not be restorable until a new snapshot begins and completes. Full restorability would be regained, however, if the contents of the two destinations were to be combined by the user.
``-s <DURATION>`` or ``--snapshot_interval <DURATION>``
``-s <DURATION>`` or ``--snapshot-interval <DURATION>``
Sets a new duration for backup snapshots, in seconds.
``--active_snapshot_interval <DURATION>``
``--active-snapshot-interval <DURATION>``
Sets new duration for the backup's currently active snapshot, in seconds, relative to the start of the snapshot.
``--verify_uid <UID>``
``--verify-uid <UID>``
Specifies a UID to verify against the BackupUID of the running backup. If provided, the UID is verified in the same transaction which sets the new backup parameters (if the UID matches).
.. program:: fdbbackup abort
@ -359,18 +359,18 @@ The ``expire`` subcommand will remove data from a backup prior to some point in
The expiration CUTOFF must be specified by one of the two following arguments:
``--expire_before_timestamp <DATETIME>``
``--expire-before-timestamp <DATETIME>``
Specifies the expiration cutoff to DATETIME. Requires a cluster file and will use version/timestamp metadata in the database to convert DATETIME to a database commit version. DATETIME must be in the form "YYYY/MM/DD.HH:MI:SS+hhmm", for example "2018/12/31.23:59:59-0800".
``--expire_before_version <VERSION>``
``--expire-before-version <VERSION>``
Specifies the cutoff by a database commit version.
Optionally, the user can specify a minimum RESTORABILITY guarantee with one of the following options.
``--restorable_after_timestamp <DATETIME>``
``--restorable-after-timestamp <DATETIME>``
Specifies that the backup must be restorable to DATETIME and later. Requires a cluster file and will use version/timestamp metadata in the database to convert DATETIME to a database commit version. DATETIME must be in the form "YYYY/MM/DD.HH:MI:SS+hhmm", for example "2018/12/31.23:59:59-0800".
``--restorable_after_version <VERSION>``
``--restorable-after-version <VERSION>``
Specifies that the backup must be restorable as of VERSION and later.
``-f`` or ``--force``
@ -385,9 +385,9 @@ The ``describe`` subcommand will analyze the given backup and print a summary of
::
user@host$ fdbbackup describe -d <BACKUP_URL> [--version_timestamps] [-C <CLUSTER_FILE>]
user@host$ fdbbackup describe -d <BACKUP_URL> [--version-timestamps] [-C <CLUSTER_FILE>]
``--version_timestamps``
``--version-timestamps``
If the originating cluster is still available and is passed on the command line, this option can be specified in order for all versions in the output to also be converted to timestamps for better human readability.
@ -402,7 +402,7 @@ The ``list`` subcommand will list the backups at a given 'base' or shortened Bac
user@host$ fdbbackup list -b <BASE_URL>
``-b <BASE_URL>`` or ``--base_url <BASE_URL>``
``-b <BASE_URL>`` or ``--base-url <BASE_URL>``
This a shortened Backup URL which looks just like a Backup URL but without the backup <name> so that the list command will discover and list all of the backups in the bucket.
@ -415,12 +415,12 @@ The ``cleanup`` subcommand will list orphaned backups and DRs and optionally rem
::
user@host$ fdbbackup cleanup [--delete_data] [--min_cleanup_seconds] [-C <CLUSTER_FILE>]
user@host$ fdbbackup cleanup [--delete-data] [--min-cleanup-seconds] [-C <CLUSTER_FILE>]
``--delete_data``
``--delete-data``
This flag will cause ``cleanup`` to remove mutations for the most stale backup or DR.
``--min_cleanup_seconds``
``--min-cleanup-seconds``
Specifies the amount of time a backup or DR needs to be stale before ``cleanup`` will remove mutations for it. By default this is set to one hour.
@ -447,10 +447,10 @@ The following options apply to all commands:
.. warning:: If multiple restore tasks are in progress they should be restoring to different prefixes or the result is undefined.
``--blob_credentials <FILE>``
``--blob-credentials <FILE>``
Use FILE as a :ref:`Blob Credential File<blob-credential-files>`. Can be used multiple times.
``--dest_cluster_file <CONNFILE>``
``--dest-cluster-file <CONNFILE>``
Required. Path to the cluster file that should be used to connect to the FoundationDB cluster you are restoring to.
.. _restore-start:
@ -472,10 +472,10 @@ The ``start`` command will start a new restore on the specified (or default) tag
``-k <KEYS>``
Specify list of key ranges from the backup to restore to the database
``--remove_prefix <PREFIX>``
``--remove-prefix <PREFIX>``
Remove PREFIX from the keys read from the backup
``--add_prefix <PREFIX>``
``--add-prefix <PREFIX>``
Add PREFIX to restored keys before writing them to the database
``-n``
@ -487,10 +487,10 @@ The ``start`` command will start a new restore on the specified (or default) tag
``--timestamp <DATETIME>``
Instead of the latest version the backup can be restored to, restore to a version from approximately the given timestamp. Requires orig_cluster_file to be specified. DATETIME must be in the form "YYYY/MM/DD.HH:MI:SS+hhmm", for example "2018/12/31.23:59:59-0800".
``--orig_cluster_file <CONNFILE>``
``--orig-cluster-file <CONNFILE>``
The cluster file for the original database from which the backup was created. The original database is only needed to convert a --timestamp argument to a database version.
``--inconsistent_snapshot_only``
``--inconsistent-snapshot-only``
Ignore mutation log files during the restore to speedup the process. Because only range files are restored, this option gives an inconsistent snapshot in most cases and is not recommended to use.
.. program:: fdbrestore abort
@ -532,7 +532,7 @@ The ``status`` command will print a detailed status report on restore job progre
If not specified, a :ref:`default cluster file <default-cluster-file>` will be used.
``--blob_credentials <FILE>``
``--blob-credentials <FILE>``
Use FILE as a :ref:`Blob Credential File<blob-credential-files>`. Can be used multiple times.
.. _fdbdr-intro:

View File

@ -341,14 +341,14 @@ example this file would create a server with 8 processes of which 4 would act as
group = foundationdb
[general]
restart_delay = 60
cluster_file = /etc/foundationdb/fdb.cluster
restart-delay = 60
cluster-file = /etc/foundationdb/fdb.cluster
## Default parameters for individual fdbserver processes
[fdbserver]
command = /usr/sbin/fdbserver
public_address = auto:$ID
listen_address = public
public-address = auto:$ID
listen-address = public
datadir = /var/lib/foundationdb/data/$ID
logdir = /var/log/foundationdb

View File

@ -212,46 +212,46 @@ Contains basic configuration parameters of the ``fdbmonitor`` process. ``user``
.. code-block:: ini
[general]
cluster_file = /etc/foundationdb/fdb.cluster
restart_delay = 60
## restart_backoff and restart_delay_reset_interval default to the value that is used for restart_delay
# initial_restart_delay = 0
# restart_backoff = 60.0
# restart_delay_reset_interval = 60
# delete_envvars =
# kill_on_configuration_change = true
# disable_lifecycle_logging = false
cluster-file = /etc/foundationdb/fdb.cluster
restart-delay = 60
## restart-backoff and restart-delay-reset-interval default to the value that is used for restart-delay
# initial-restart-delay = 0
# restart-backoff = 60.0
# restart-delay-reset-interval = 60
# delete-envvars =
# kill-on-configuration-change = true
# disable-lifecycle-logging = false
Contains settings applicable to all processes (e.g. fdbserver, backup_agent).
* ``cluster_file``: Specifies the location of the cluster file. This file and the directory that contains it must be writable by all processes (i.e. by the user or group set in the ``[fdbmonitor]`` section).
* ``delete_envvars``: A space separated list of environment variables to remove from the environments of child processes. This can be used if the ``fdbmonitor`` process needs to be run with environment variables that are undesired in its children.
* ``kill_on_configuration_change``: If ``true``, affected processes will be restarted whenever the configuration file changes. Defaults to ``true``.
* ``disable_lifecycle_logging``: If ``true``, ``fdbmonitor`` will not write log events when processes start or terminate. Defaults to ``false``.
* ``cluster-file``: Specifies the location of the cluster file. This file and the directory that contains it must be writable by all processes (i.e. by the user or group set in the ``[fdbmonitor]`` section).
* ``delete-envvars``: A space separated list of environment variables to remove from the environments of child processes. This can be used if the ``fdbmonitor`` process needs to be run with environment variables that are undesired in its children.
* ``kill-on-configuration-change``: If ``true``, affected processes will be restarted whenever the configuration file changes. Defaults to ``true``.
* ``disable-lifecycle-logging``: If ``true``, ``fdbmonitor`` will not write log events when processes start or terminate. Defaults to ``false``.
.. _configuration-restarting:
The ``[general]`` section also contains some parameters to control how processes are restarted when they die. ``fdbmonitor`` uses backoff logic to prevent a process that dies repeatedly from cycling too quickly, and it also introduces up to +/-10% random jitter into the delay to avoid multiple processes all restarting simultaneously. ``fdbmonitor`` tracks separate backoff state for each process, so the restarting of one process will have no effect on the backoff behavior of another.
* ``restart_delay``: The maximum number of seconds (subject to jitter) that fdbmonitor will delay before restarting a failed process.
* ``initial_restart_delay``: The number of seconds ``fdbmonitor`` waits to restart a process the first time it dies. Defaults to 0 (i.e. the process gets restarted immediately).
* ``restart_backoff``: Controls how quickly ``fdbmonitor`` backs off when a process dies repeatedly. The previous delay (or 1, if the previous delay is 0) is multiplied by ``restart_backoff`` to get the next delay, maxing out at the value of ``restart_delay``. Defaults to the value of ``restart_delay``, meaning that the second and subsequent failures will all delay ``restart_delay`` between restarts.
* ``restart_delay_reset_interval``: The number of seconds a process must be running before resetting the backoff back to the value of ``initial_restart_delay``. Defaults to the value of ``restart_delay``.
* ``restart-delay``: The maximum number of seconds (subject to jitter) that fdbmonitor will delay before restarting a failed process.
* ``initial-restart-delay``: The number of seconds ``fdbmonitor`` waits to restart a process the first time it dies. Defaults to 0 (i.e. the process gets restarted immediately).
* ``restart-backoff``: Controls how quickly ``fdbmonitor`` backs off when a process dies repeatedly. The previous delay (or 1, if the previous delay is 0) is multiplied by ``restart-backoff`` to get the next delay, maxing out at the value of ``restart-delay``. Defaults to the value of ``restart-delay``, meaning that the second and subsequent failures will all delay ``restart-delay`` between restarts.
* ``restart-delay-reset-interval``: The number of seconds a process must be running before resetting the backoff back to the value of ``initial-restart-delay``. Defaults to the value of ``restart-delay``.
These ``restart_`` parameters are not applicable to the ``fdbmonitor`` process itself. See :ref:`Configuring autorestart of fdbmonitor <configuration-restart-fdbmonitor>` for details.
These ``restart-`` parameters are not applicable to the ``fdbmonitor`` process itself. See :ref:`Configuring autorestart of fdbmonitor <configuration-restart-fdbmonitor>` for details.
As an example, let's say the following parameters have been set:
.. code-block:: ini
restart_delay = 60
initial_restart_delay = 0
restart_backoff = 2.0
restart_delay_reset_interval = 180
restart-delay = 60
initial-restart-delay = 0
restart-backoff = 2.0
restart-delay-reset-interval = 180
The progression of delays for a process that fails repeatedly would be ``0, 2, 4, 8, 16, 32, 60, 60, ...``, each subject to a 10% random jitter. After the process stays alive for 180 seconds, the backoff would reset and the next failure would restart the process immediately.
Using the default parameters, a process will restart immediately if it fails and then delay ``restart_delay`` seconds if it fails again within ``restart_delay`` seconds.
Using the default parameters, a process will restart immediately if it fails and then delay ``restart-delay`` seconds if it fails again within ``restart-delay`` seconds.
.. _foundationdb-conf-fdbserver:
@ -263,43 +263,43 @@ Using the default parameters, a process will restart immediately if it fails and
## Default parameters for individual fdbserver processes
[fdbserver]
command = /usr/sbin/fdbserver
public_address = auto:$ID
listen_address = public
public-address = auto:$ID
listen-address = public
datadir = /var/lib/foundationdb/data/$ID
logdir = /var/log/foundationdb
# logsize = 10MiB
# maxlogssize = 100MiB
# class =
# memory = 8GiB
# storage_memory = 1GiB
# cache_memory = 2GiB
# locality_machineid =
# locality_zoneid =
# locality_data_hall =
# locality_dcid =
# io_trust_seconds = 20
# storage-memory = 1GiB
# cache-memory = 2GiB
# locality-machineid =
# locality-zoneid =
# locality-data-hall =
# locality-dcid =
# io-trust-seconds = 20
Contains default parameters for all fdbserver processes on this machine. These same options can be overridden for individual processes in their respective ``[fdbserver.<ID>]`` sections. In this section, the ID of the individual fdbserver can be substituted by using the ``$ID`` variable in the value. For example, ``public_address = auto:$ID`` makes each fdbserver listen on a port equal to its ID.
Contains default parameters for all fdbserver processes on this machine. These same options can be overridden for individual processes in their respective ``[fdbserver.<ID>]`` sections. In this section, the ID of the individual fdbserver can be substituted by using the ``$ID`` variable in the value. For example, ``public-address = auto:$ID`` makes each fdbserver listen on a port equal to its ID.
.. note:: |multiplicative-suffixes|
.. note:: In general locality id's are used to specify the location of processes which in turn is used to determine fault and replication domains.
* ``command``: The location of the ``fdbserver`` binary.
* ``public_address``: The publicly visible IP:Port of the process. If ``auto``, the address will be the one used to communicate with the coordination servers.
* ``listen_address``: The IP:Port that the server socket should bind to. If ``public``, it will be the same as the public_address.
* ``public-address``: The publicly visible IP:Port of the process. If ``auto``, the address will be the one used to communicate with the coordination servers.
* ``listen-address``: The IP:Port that the server socket should bind to. If ``public``, it will be the same as the public-address.
* ``datadir``: A writable directory (by root or by the user set in the [fdbmonitor] section) where persistent data files will be stored.
* ``logdir``: A writable directory (by root or by the user set in the [fdbmonitor] section) where FoundationDB will store log files.
* ``logsize``: Roll over to a new log file after the current log file reaches the specified size. The default value is 10MiB.
* ``maxlogssize``: Delete the oldest log file when the total size of all log files exceeds the specified size. If set to 0B, old log files will not be deleted. The default value is 100MiB.
* ``class``: Process class specifying the roles that will be taken in the cluster. Recommended options are ``storage``, ``transaction``, ``stateless``. See :ref:`guidelines-process-class-config` for process class config recommendations.
* ``memory``: Maximum memory used by the process. The default value is 8GiB. When specified without a unit, MiB is assumed. This parameter does not change the memory allocation of the program. Rather, it sets a hard limit beyond which the process will kill itself and be restarted. The default value of 8GiB is double the intended memory usage in the default configuration (providing an emergency buffer to deal with memory leaks or similar problems). It is *not* recommended to decrease the value of this parameter below its default value. It may be *increased* if you wish to allocate a very large amount of storage engine memory or cache. In particular, when the ``storage_memory`` or ``cache_memory`` parameters are increased, the ``memory`` parameter should be increased by an equal amount.
* ``storage_memory``: Maximum memory used for data storage. This parameter is used *only* with memory storage engine, not the ssd storage engine. The default value is 1GiB. When specified without a unit, MB is assumed. Clusters will be restricted to using this amount of memory per process for purposes of data storage. Memory overhead associated with storing the data is counted against this total. If you increase the ``storage_memory`` parameter, you should also increase the ``memory`` parameter by the same amount.
* ``cache_memory``: Maximum memory used for caching pages from disk. The default value is 2GiB. When specified without a unit, MiB is assumed. If you increase the ``cache_memory`` parameter, you should also increase the ``memory`` parameter by the same amount.
* ``locality_machineid``: Machine identifier key. All processes on a machine should share a unique id. By default, processes on a machine determine a unique id to share. This does not generally need to be set.
* ``locality_zoneid``: Zone identifier key. Processes that share a zone id are considered non-unique for the purposes of data replication. If unset, defaults to machine id.
* ``locality_dcid``: Datacenter identifier key. All processes physically located in a datacenter should share the id. No default value. If you are depending on datacenter based replication this must be set on all processes.
* ``locality_data_hall``: Data hall identifier key. All processes physically located in a data hall should share the id. No default value. If you are depending on data hall based replication this must be set on all processes.
* ``io_trust_seconds``: Time in seconds that a read or write operation is allowed to take before timing out with an error. If an operation times out, all future operations on that file will fail with an error as well. Only has an effect when using AsyncFileKAIO in Linux. If unset, defaults to 0 which means timeout is disabled.
* ``memory``: Maximum memory used by the process. The default value is 8GiB. When specified without a unit, MiB is assumed. This parameter does not change the memory allocation of the program. Rather, it sets a hard limit beyond which the process will kill itself and be restarted. The default value of 8GiB is double the intended memory usage in the default configuration (providing an emergency buffer to deal with memory leaks or similar problems). It is *not* recommended to decrease the value of this parameter below its default value. It may be *increased* if you wish to allocate a very large amount of storage engine memory or cache. In particular, when the ``storage-memory`` or ``cache-memory`` parameters are increased, the ``memory`` parameter should be increased by an equal amount.
* ``storage-memory``: Maximum memory used for data storage. This parameter is used *only* with memory storage engine, not the ssd storage engine. The default value is 1GiB. When specified without a unit, MB is assumed. Clusters will be restricted to using this amount of memory per process for purposes of data storage. Memory overhead associated with storing the data is counted against this total. If you increase the ``storage-memory`` parameter, you should also increase the ``memory`` parameter by the same amount.
* ``cache-memory``: Maximum memory used for caching pages from disk. The default value is 2GiB. When specified without a unit, MiB is assumed. If you increase the ``cache-memory`` parameter, you should also increase the ``memory`` parameter by the same amount.
* ``locality-machineid``: Machine identifier key. All processes on a machine should share a unique id. By default, processes on a machine determine a unique id to share. This does not generally need to be set.
* ``locality-zoneid``: Zone identifier key. Processes that share a zone id are considered non-unique for the purposes of data replication. If unset, defaults to machine id.
* ``locality-dcid``: Datacenter identifier key. All processes physically located in a datacenter should share the id. No default value. If you are depending on datacenter based replication this must be set on all processes.
* ``locality-data-hall``: Data hall identifier key. All processes physically located in a data hall should share the id. No default value. If you are depending on data hall based replication this must be set on all processes.
* ``io-trust-seconds``: Time in seconds that a read or write operation is allowed to take before timing out with an error. If an operation times out, all future operations on that file will fail with an error as well. Only has an effect when using AsyncFileKAIO in Linux. If unset, defaults to 0 which means timeout is disabled.
.. note:: In addition to the options above, TLS settings as described for the :ref:`TLS plugin <configuring-tls>` can be specified in the [fdbserver] section.
@ -598,9 +598,9 @@ If a region failover occurs, clients will generally only see a latency spike of
Specifying datacenters
----------------------
To use region configurations all processes in the cluster need to specify in which datacenter they are located. This can be done on the command line with either ``--locality_dcid`` or ``--datacenter_id``. This datacenter identifier is case sensitive.
To use region configurations all processes in the cluster need to specify in which datacenter they are located. This can be done on the command line with either ``--locality-dcid`` or ``--datacenter-id``. This datacenter identifier is case sensitive.
Clients should also specify their datacenter with the database option ``datacenter_id``. If a client does not specify their datacenter, they will use latency estimates to balance traffic between the two regions. This will result in about 5% of requests being served by the remote regions, so reads will suffer from high tail latencies.
Clients should also specify their datacenter with the database option ``datacenter-id``. If a client does not specify their datacenter, they will use latency estimates to balance traffic between the two regions. This will result in about 5% of requests being served by the remote regions, so reads will suffer from high tail latencies.
Changing the region configuration
---------------------------------

View File

@ -156,7 +156,7 @@ The TLS certificate will be automatically refreshed on a configurable cadence. T
* They are valid certificates.
* The key file matches the certificate file.
The refresh rate is controlled by ``--knob_tls_cert_refresh_delay_seconds``. Setting it to 0 will disable the refresh.
The refresh rate is controlled by ``--knob-tls-cert-refresh-delay-seconds``. Setting it to 0 will disable the refresh.
The default LibreSSL-based implementation
=========================================

View File

@ -49,10 +49,10 @@ void printConvertUsage() {
<< " --loggroup LOG_GROUP\n"
<< " Sets the LogGroup field with the specified value for all\n"
<< " events in the trace output (defaults to `default').\n"
<< " --trace_format FORMAT\n"
<< " --trace-format FORMAT\n"
<< " Select the format of the trace files. xml (the default) and json are supported.\n"
<< " Has no effect unless --log is specified.\n"
<< " --build_flags Print build information and exit.\n"
<< " --build-flags Print build information and exit.\n"
<< " -h, --help Display this help and exit.\n"
<< "\n";
@ -571,7 +571,8 @@ int parseCommandLine(ConvertParams* param, CSimpleOpt* args) {
int main(int argc, char** argv) {
try {
CSimpleOpt* args = new CSimpleOpt(argc, argv, file_converter::gConverterOptions, SO_O_EXACT);
CSimpleOpt* args =
new CSimpleOpt(argc, argv, file_converter::gConverterOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
file_converter::ConvertParams param;
int status = file_converter::parseCommandLine(&param, args);
std::cout << "Params: " << param.toString() << "\n";

View File

@ -58,20 +58,20 @@ CSimpleOpt::SOption gConverterOptions[] = { { OPT_CONTAINER, "-r", SO_REQ_SEP },
{ OPT_END_VERSION, "--end", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_INPUT_FILE, "-i", SO_REQ_SEP },
{ OPT_INPUT_FILE, "--input", SO_REQ_SEP },
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
{ OPT_BUILD_FLAGS, "--build_flags", SO_NONE },
{ OPT_LIST_ONLY, "--list_only", SO_NONE },
{ OPT_BUILD_FLAGS, "--build-flags", SO_NONE },
{ OPT_LIST_ONLY, "--list-only", SO_NONE },
{ OPT_KEY_PREFIX, "-k", SO_REQ_SEP },
{ OPT_HEX_KEY_PREFIX, "--hex_prefix", SO_REQ_SEP },
{ OPT_BEGIN_VERSION_FILTER, "--begin_version_filter", SO_REQ_SEP },
{ OPT_END_VERSION_FILTER, "--end_version_filter", SO_REQ_SEP },
{ OPT_HEX_KEY_PREFIX, "--hex-prefix", SO_REQ_SEP },
{ OPT_BEGIN_VERSION_FILTER, "--begin-version-filter", SO_REQ_SEP },
{ OPT_END_VERSION_FILTER, "--end-version-filter", SO_REQ_SEP },
{ OPT_HELP, "-?", SO_NONE },
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },

View File

@ -59,24 +59,24 @@ void printDecodeUsage() {
" --loggroup LOG_GROUP\n"
" Sets the LogGroup field with the specified value for all\n"
" events in the trace output (defaults to `default').\n"
" --trace_format FORMAT\n"
" --trace-format FORMAT\n"
" Select the format of the trace files, xml (the default) or json.\n"
" Has no effect unless --log is specified.\n"
" --crash Crash on serious error.\n"
" --blob_credentials FILE\n"
" --blob-credentials FILE\n"
" File containing blob credentials in JSON format.\n"
" The same credential format/file fdbbackup uses.\n"
#ifndef TLS_DISABLED
TLS_HELP
#endif
" --build_flags Print build information and exit.\n"
" --list_only Print file list and exit.\n"
" --build-flags Print build information and exit.\n"
" --list-only Print file list and exit.\n"
" -k KEY_PREFIX Use the prefix for filtering mutations\n"
" --hex_prefix HEX_PREFIX\n"
" --hex-prefix HEX_PREFIX\n"
" The prefix specified in HEX format, e.g., \\x05\\x01.\n"
" --begin_version_filter BEGIN_VERSION\n"
" --begin-version-filter BEGIN_VERSION\n"
" The version range's begin version (inclusive) for filtering.\n"
" --end_version_filter END_VERSION\n"
" --end-version-filter END_VERSION\n"
" The version range's end version (exclusive) for filtering.\n"
"\n";
return;
@ -514,7 +514,8 @@ ACTOR Future<Void> decode_logs(DecodeParams params) {
int main(int argc, char** argv) {
try {
CSimpleOpt* args = new CSimpleOpt(argc, argv, file_converter::gConverterOptions, SO_O_EXACT);
CSimpleOpt* args =
new CSimpleOpt(argc, argv, file_converter::gConverterOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
file_converter::DecodeParams param;
int status = file_converter::parseDecodeCommandLine(&param, args);
std::cout << "Params: " << param.toString() << "\n";

View File

@ -22,6 +22,7 @@
#include "fdbbackup/BackupTLSConfig.h"
#include "fdbclient/JsonBuilder.h"
#include "flow/Arena.h"
#include "flow/ArgParseUtil.h"
#include "flow/Error.h"
#include "flow/Trace.h"
#define BOOST_DATE_TIME_NO_LIB
@ -182,7 +183,7 @@ enum {
// Top level binary commands.
CSimpleOpt::SOption g_rgOptions[] = { { OPT_VERSION, "-v", SO_NONE },
{ OPT_VERSION, "--version", SO_NONE },
{ OPT_BUILD_FLAGS, "--build_flags", SO_NONE },
{ OPT_BUILD_FLAGS, "--build-flags", SO_NONE },
{ OPT_HELP, "-?", SO_NONE },
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
@ -194,26 +195,26 @@ CSimpleOpt::SOption g_rgAgentOptions[] = {
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
#endif
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster-file", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
{ OPT_VERSION, "--version", SO_NONE },
{ OPT_VERSION, "-v", SO_NONE },
{ OPT_BUILD_FLAGS, "--build_flags", SO_NONE },
{ OPT_BUILD_FLAGS, "--build-flags", SO_NONE },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_CRASHONERROR, "--crash", SO_NONE },
{ OPT_LOCALITY, "--locality_", SO_REQ_SEP },
{ OPT_LOCALITY, "--locality-", SO_REQ_SEP },
{ OPT_MEMLIMIT, "-m", SO_REQ_SEP },
{ OPT_MEMLIMIT, "--memory", SO_REQ_SEP },
{ OPT_HELP, "-?", SO_NONE },
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -225,7 +226,7 @@ CSimpleOpt::SOption g_rgBackupStartOptions[] = {
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
#endif
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster-file", SO_REQ_SEP },
{ OPT_WAITFORDONE, "-w", SO_NONE },
{ OPT_WAITFORDONE, "--waitfordone", SO_NONE },
{ OPT_NOSTOPWHENDONE, "-z", SO_NONE },
@ -234,10 +235,10 @@ CSimpleOpt::SOption g_rgBackupStartOptions[] = {
{ OPT_DESTCONTAINER, "--destcontainer", SO_REQ_SEP },
// Enable "-p" option after GA
// { OPT_USE_PARTITIONED_LOG, "-p", SO_NONE },
{ OPT_USE_PARTITIONED_LOG, "--partitioned_log_experimental", SO_NONE },
{ OPT_USE_PARTITIONED_LOG, "--partitioned-log-experimental", SO_NONE },
{ OPT_SNAPSHOTINTERVAL, "-s", SO_REQ_SEP },
{ OPT_SNAPSHOTINTERVAL, "--snapshot_interval", SO_REQ_SEP },
{ OPT_INITIAL_SNAPSHOT_INTERVAL, "--initial_snapshot_interval", SO_REQ_SEP },
{ OPT_SNAPSHOTINTERVAL, "--snapshot-interval", SO_REQ_SEP },
{ OPT_INITIAL_SNAPSHOT_INTERVAL, "--initial-snapshot-interval", SO_REQ_SEP },
{ OPT_TAGNAME, "-t", SO_REQ_SEP },
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
{ OPT_BACKUPKEYS, "-k", SO_REQ_SEP },
@ -246,7 +247,7 @@ CSimpleOpt::SOption g_rgBackupStartOptions[] = {
{ OPT_DRYRUN, "--dryrun", SO_NONE },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -257,10 +258,10 @@ CSimpleOpt::SOption g_rgBackupStartOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
{ OPT_INCREMENTALONLY, "--incremental", SO_NONE },
{ OPT_ENCRYPTION_KEY_FILE, "--encryption_key_file", SO_REQ_SEP },
{ OPT_ENCRYPTION_KEY_FILE, "--encryption-key-file", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -283,18 +284,18 @@ CSimpleOpt::SOption g_rgBackupModifyOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster-file", SO_REQ_SEP },
{ OPT_TAGNAME, "-t", SO_REQ_SEP },
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
{ OPT_MOD_VERIFY_UID, "--verify_uid", SO_REQ_SEP },
{ OPT_MOD_VERIFY_UID, "--verify-uid", SO_REQ_SEP },
{ OPT_DESTCONTAINER, "-d", SO_REQ_SEP },
{ OPT_DESTCONTAINER, "--destcontainer", SO_REQ_SEP },
{ OPT_SNAPSHOTINTERVAL, "-s", SO_REQ_SEP },
{ OPT_SNAPSHOTINTERVAL, "--snapshot_interval", SO_REQ_SEP },
{ OPT_MOD_ACTIVE_INTERVAL, "--active_snapshot_interval", SO_REQ_SEP },
{ OPT_SNAPSHOTINTERVAL, "--snapshot-interval", SO_REQ_SEP },
{ OPT_MOD_ACTIVE_INTERVAL, "--active-snapshot-interval", SO_REQ_SEP },
SO_END_OF_OPTIONS
};
@ -304,14 +305,14 @@ CSimpleOpt::SOption g_rgBackupStatusOptions[] = {
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
#endif
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster-file", SO_REQ_SEP },
{ OPT_ERRORLIMIT, "-e", SO_REQ_SEP },
{ OPT_ERRORLIMIT, "--errorlimit", SO_REQ_SEP },
{ OPT_TAGNAME, "-t", SO_REQ_SEP },
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -323,7 +324,7 @@ CSimpleOpt::SOption g_rgBackupStatusOptions[] = {
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_JSON, "--json", SO_NONE },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -335,12 +336,12 @@ CSimpleOpt::SOption g_rgBackupAbortOptions[] = {
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
#endif
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster-file", SO_REQ_SEP },
{ OPT_TAGNAME, "-t", SO_REQ_SEP },
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -351,7 +352,7 @@ CSimpleOpt::SOption g_rgBackupAbortOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -363,10 +364,10 @@ CSimpleOpt::SOption g_rgBackupCleanupOptions[] = {
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
#endif
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster-file", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -377,9 +378,9 @@ CSimpleOpt::SOption g_rgBackupCleanupOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_DELETE_DATA, "--delete_data", SO_NONE },
{ OPT_MIN_CLEANUP_SECONDS, "--min_cleanup_seconds", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
{ OPT_DELETE_DATA, "--delete-data", SO_NONE },
{ OPT_MIN_CLEANUP_SECONDS, "--min-cleanup-seconds", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -391,14 +392,14 @@ CSimpleOpt::SOption g_rgBackupDiscontinueOptions[] = {
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
#endif
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster-file", SO_REQ_SEP },
{ OPT_TAGNAME, "-t", SO_REQ_SEP },
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
{ OPT_WAITFORDONE, "-w", SO_NONE },
{ OPT_WAITFORDONE, "--waitfordone", SO_NONE },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -409,7 +410,7 @@ CSimpleOpt::SOption g_rgBackupDiscontinueOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -421,14 +422,14 @@ CSimpleOpt::SOption g_rgBackupWaitOptions[] = {
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
#endif
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster-file", SO_REQ_SEP },
{ OPT_TAGNAME, "-t", SO_REQ_SEP },
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
{ OPT_NOSTOPWHENDONE, "-z", SO_NONE },
{ OPT_NOSTOPWHENDONE, "--no-stop-when-done", SO_NONE },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -439,7 +440,7 @@ CSimpleOpt::SOption g_rgBackupWaitOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -451,10 +452,10 @@ CSimpleOpt::SOption g_rgBackupPauseOptions[] = {
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
#endif
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster-file", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -465,7 +466,7 @@ CSimpleOpt::SOption g_rgBackupPauseOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -477,12 +478,12 @@ CSimpleOpt::SOption g_rgBackupExpireOptions[] = {
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
#endif
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster-file", SO_REQ_SEP },
{ OPT_DESTCONTAINER, "-d", SO_REQ_SEP },
{ OPT_DESTCONTAINER, "--destcontainer", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -493,16 +494,16 @@ CSimpleOpt::SOption g_rgBackupExpireOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
{ OPT_FORCE, "-f", SO_NONE },
{ OPT_FORCE, "--force", SO_NONE },
{ OPT_EXPIRE_RESTORABLE_AFTER_VERSION, "--restorable_after_version", SO_REQ_SEP },
{ OPT_EXPIRE_RESTORABLE_AFTER_DATETIME, "--restorable_after_timestamp", SO_REQ_SEP },
{ OPT_EXPIRE_BEFORE_VERSION, "--expire_before_version", SO_REQ_SEP },
{ OPT_EXPIRE_BEFORE_DATETIME, "--expire_before_timestamp", SO_REQ_SEP },
{ OPT_EXPIRE_MIN_RESTORABLE_DAYS, "--min_restorable_days", SO_REQ_SEP },
{ OPT_EXPIRE_DELETE_BEFORE_DAYS, "--delete_before_days", SO_REQ_SEP },
{ OPT_EXPIRE_RESTORABLE_AFTER_VERSION, "--restorable-after-version", SO_REQ_SEP },
{ OPT_EXPIRE_RESTORABLE_AFTER_DATETIME, "--restorable-after-timestamp", SO_REQ_SEP },
{ OPT_EXPIRE_BEFORE_VERSION, "--expire-before-version", SO_REQ_SEP },
{ OPT_EXPIRE_BEFORE_DATETIME, "--expire-before-timestamp", SO_REQ_SEP },
{ OPT_EXPIRE_MIN_RESTORABLE_DAYS, "--min-restorable-days", SO_REQ_SEP },
{ OPT_EXPIRE_DELETE_BEFORE_DAYS, "--delete-before-days", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -517,7 +518,7 @@ CSimpleOpt::SOption g_rgBackupDeleteOptions[] = {
{ OPT_DESTCONTAINER, "--destcontainer", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -528,8 +529,8 @@ CSimpleOpt::SOption g_rgBackupDeleteOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -541,12 +542,12 @@ CSimpleOpt::SOption g_rgBackupDescribeOptions[] = {
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
#endif
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster-file", SO_REQ_SEP },
{ OPT_DESTCONTAINER, "-d", SO_REQ_SEP },
{ OPT_DESTCONTAINER, "--destcontainer", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -557,10 +558,10 @@ CSimpleOpt::SOption g_rgBackupDescribeOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
{ OPT_DESCRIBE_DEEP, "--deep", SO_NONE },
{ OPT_DESCRIBE_TIMESTAMPS, "--version_timestamps", SO_NONE },
{ OPT_DESCRIBE_TIMESTAMPS, "--version-timestamps", SO_NONE },
{ OPT_JSON, "--json", SO_NONE },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
@ -573,7 +574,7 @@ CSimpleOpt::SOption g_rgBackupDumpOptions[] = {
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
#endif
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_CLUSTERFILE, "--cluster-file", SO_REQ_SEP },
{ OPT_DESTCONTAINER, "-d", SO_REQ_SEP },
{ OPT_DESTCONTAINER, "--destcontainer", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
@ -588,8 +589,8 @@ CSimpleOpt::SOption g_rgBackupDumpOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
{ OPT_DUMP_BEGIN, "--begin", SO_REQ_SEP },
{ OPT_DUMP_END, "--end", SO_REQ_SEP },
#ifndef TLS_DISABLED
@ -603,10 +604,10 @@ CSimpleOpt::SOption g_rgBackupListOptions[] = {
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
#endif
{ OPT_BASEURL, "-b", SO_REQ_SEP },
{ OPT_BASEURL, "--base_url", SO_REQ_SEP },
{ OPT_BASEURL, "--base-url", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -617,8 +618,8 @@ CSimpleOpt::SOption g_rgBackupListOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -629,16 +630,16 @@ CSimpleOpt::SOption g_rgBackupQueryOptions[] = {
#ifdef _WIN32
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
#endif
{ OPT_RESTORE_TIMESTAMP, "--query_restore_timestamp", SO_REQ_SEP },
{ OPT_RESTORE_TIMESTAMP, "--query-restore-timestamp", SO_REQ_SEP },
{ OPT_DESTCONTAINER, "-d", SO_REQ_SEP },
{ OPT_DESTCONTAINER, "--destcontainer", SO_REQ_SEP },
{ OPT_RESTORE_VERSION, "-qrv", SO_REQ_SEP },
{ OPT_RESTORE_VERSION, "--query_restore_version", SO_REQ_SEP },
{ OPT_RESTORE_VERSION, "--query-restore-version", SO_REQ_SEP },
{ OPT_BACKUPKEYS_FILTER, "-k", SO_REQ_SEP },
{ OPT_BACKUPKEYS_FILTER, "--keys", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -651,8 +652,8 @@ CSimpleOpt::SOption g_rgBackupQueryOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -664,13 +665,13 @@ CSimpleOpt::SOption g_rgRestoreOptions[] = {
#ifdef _WIN32
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
#endif
{ OPT_RESTORE_CLUSTERFILE_DEST, "--dest_cluster_file", SO_REQ_SEP },
{ OPT_RESTORE_CLUSTERFILE_ORIG, "--orig_cluster_file", SO_REQ_SEP },
{ OPT_RESTORE_CLUSTERFILE_DEST, "--dest-cluster-file", SO_REQ_SEP },
{ OPT_RESTORE_CLUSTERFILE_ORIG, "--orig-cluster-file", SO_REQ_SEP },
{ OPT_RESTORE_TIMESTAMP, "--timestamp", SO_REQ_SEP },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
{ OPT_RESTORECONTAINER, "-r", SO_REQ_SEP },
{ OPT_PREFIX_ADD, "--add_prefix", SO_REQ_SEP },
{ OPT_PREFIX_REMOVE, "--remove_prefix", SO_REQ_SEP },
{ OPT_PREFIX_ADD, "--add-prefix", SO_REQ_SEP },
{ OPT_PREFIX_REMOVE, "--remove-prefix", SO_REQ_SEP },
{ OPT_TAGNAME, "-t", SO_REQ_SEP },
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
{ OPT_BACKUPKEYS, "-k", SO_REQ_SEP },
@ -681,7 +682,7 @@ CSimpleOpt::SOption g_rgRestoreOptions[] = {
{ OPT_RESTORE_VERSION, "-v", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -695,11 +696,11 @@ CSimpleOpt::SOption g_rgRestoreOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
{ OPT_BLOB_CREDENTIALS, "--blob-credentials", SO_REQ_SEP },
{ OPT_INCREMENTALONLY, "--incremental", SO_NONE },
{ OPT_RESTORE_BEGIN_VERSION, "--begin_version", SO_REQ_SEP },
{ OPT_RESTORE_INCONSISTENT_SNAPSHOT_ONLY, "--inconsistent_snapshot_only", SO_NONE },
{ OPT_ENCRYPTION_KEY_FILE, "--encryption_key_file", SO_REQ_SEP },
{ OPT_RESTORE_BEGIN_VERSION, "--begin-version", SO_REQ_SEP },
{ OPT_RESTORE_INCONSISTENT_SNAPSHOT_ONLY, "--inconsistent-snapshot-only", SO_NONE },
{ OPT_ENCRYPTION_KEY_FILE, "--encryption-key-file", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -714,18 +715,18 @@ CSimpleOpt::SOption g_rgDBAgentOptions[] = {
{ OPT_SOURCE_CLUSTER, "--source", SO_REQ_SEP },
{ OPT_DEST_CLUSTER, "-d", SO_REQ_SEP },
{ OPT_DEST_CLUSTER, "--destination", SO_REQ_SEP },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
{ OPT_VERSION, "--version", SO_NONE },
{ OPT_VERSION, "-v", SO_NONE },
{ OPT_BUILD_FLAGS, "--build_flags", SO_NONE },
{ OPT_BUILD_FLAGS, "--build-flags", SO_NONE },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_CRASHONERROR, "--crash", SO_NONE },
{ OPT_LOCALITY, "--locality_", SO_REQ_SEP },
{ OPT_LOCALITY, "--locality-", SO_REQ_SEP },
{ OPT_MEMLIMIT, "-m", SO_REQ_SEP },
{ OPT_MEMLIMIT, "--memory", SO_REQ_SEP },
{ OPT_HELP, "-?", SO_NONE },
@ -752,7 +753,7 @@ CSimpleOpt::SOption g_rgDBStartOptions[] = {
{ OPT_BACKUPKEYS, "--keys", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -763,7 +764,7 @@ CSimpleOpt::SOption g_rgDBStartOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -784,7 +785,7 @@ CSimpleOpt::SOption g_rgDBStatusOptions[] = {
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -795,7 +796,7 @@ CSimpleOpt::SOption g_rgDBStatusOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -814,7 +815,7 @@ CSimpleOpt::SOption g_rgDBSwitchOptions[] = {
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -826,7 +827,7 @@ CSimpleOpt::SOption g_rgDBSwitchOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -847,7 +848,7 @@ CSimpleOpt::SOption g_rgDBAbortOptions[] = {
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -858,7 +859,7 @@ CSimpleOpt::SOption g_rgDBAbortOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -875,7 +876,7 @@ CSimpleOpt::SOption g_rgDBPauseOptions[] = {
{ OPT_DEST_CLUSTER, "--destination", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
{ OPT_QUIET, "-q", SO_NONE },
{ OPT_QUIET, "--quiet", SO_NONE },
@ -886,7 +887,7 @@ CSimpleOpt::SOption g_rgDBPauseOptions[] = {
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
#endif
@ -928,7 +929,7 @@ const char* BlobCredentialInfo =
" BLOB CREDENTIALS\n"
" Blob account secret keys can optionally be omitted from blobstore:// URLs, in which case they will be\n"
" loaded, if possible, from 1 or more blob credentials definition files.\n\n"
" These files can be specified with the --blob_credentials argument described above or via the environment "
" These files can be specified with the --blob-credentials argument described above or via the environment "
"variable\n"
" FDB_BLOB_CREDENTIALS, whose value is a colon-separated list of files. The command line takes priority over\n"
" over the environment but all files from both sources are used.\n\n"
@ -958,7 +959,7 @@ static void printAgentUsage(bool devhelp) {
printf(" --loggroup LOG_GROUP\n"
" Sets the LogGroup field with the specified value for all\n"
" events in the trace output (defaults to `default').\n");
printf(" --trace_format FORMAT\n"
printf(" --trace-format FORMAT\n"
" Select the format of the trace files. xml (the default) and json are supported.\n"
" Has no effect unless --log is specified.\n");
printf(" -m SIZE, --memory SIZE\n"
@ -967,7 +968,7 @@ static void printAgentUsage(bool devhelp) {
#ifndef TLS_DISABLED
printf(TLS_HELP);
#endif
printf(" --build_flags Print build information and exit.\n");
printf(" --build-flags Print build information and exit.\n");
printf(" -v, --version Print version information and exit.\n");
printf(" -h, --help Display this help and exit.\n");
@ -1000,7 +1001,7 @@ static void printBackupUsage(bool devhelp) {
"delete | describe | list | query | cleanup) [ACTION_OPTIONS]\n\n",
exeBackup.toString().c_str());
printf(" TOP LEVEL OPTIONS:\n");
printf(" --build_flags Print build information and exit.\n");
printf(" --build-flags Print build information and exit.\n");
printf(" -v, --version Print version information and exit.\n");
printf(" -h, --help Display this help and exit.\n");
printf("\n");
@ -1015,63 +1016,63 @@ static void printBackupUsage(bool devhelp) {
" The Backup container URL for start, modify, describe, query, expire, and delete "
"operations.\n");
printBackupContainerInfo();
printf(" -b, --base_url BASEURL\n"
printf(" -b, --base-url BASEURL\n"
" Base backup URL for list operations. This looks like a Backup URL but without a backup "
"name.\n");
printf(" --blob_credentials FILE\n"
printf(" --blob-credentials FILE\n"
" File containing blob credentials in JSON format. Can be specified multiple times for "
"multiple files. See below for more details.\n");
printf(" --expire_before_timestamp DATETIME\n"
printf(" --expire-before-timestamp DATETIME\n"
" Datetime cutoff for expire operations. Requires a cluster file and will use "
"version/timestamp metadata\n"
" in the database to obtain a cutoff version very close to the timestamp given in %s.\n",
BackupAgentBase::timeFormat().c_str());
printf(" --expire_before_version VERSION\n"
printf(" --expire-before-version VERSION\n"
" Version cutoff for expire operations. Deletes data files containing no data at or after "
"VERSION.\n");
printf(" --delete_before_days NUM_DAYS\n"
printf(" --delete-before-days NUM_DAYS\n"
" Another way to specify version cutoff for expire operations. Deletes data files "
"containing no data at or after a\n"
" version approximately NUM_DAYS days worth of versions prior to the latest log version in "
"the backup.\n");
printf(" -qrv --query_restore_version VERSION\n"
printf(" -qrv --query-restore-version VERSION\n"
" For query operations, set target version for restoring a backup. Set -1 for maximum\n"
" restorable version (default) and -2 for minimum restorable version.\n");
printf(
" --query_restore_timestamp DATETIME\n"
" --query-restore-timestamp DATETIME\n"
" For query operations, instead of a numeric version, use this to specify a timestamp in %s\n",
BackupAgentBase::timeFormat().c_str());
printf(
" and it will be converted to a version from that time using metadata in the cluster file.\n");
printf(" --restorable_after_timestamp DATETIME\n"
printf(" --restorable-after-timestamp DATETIME\n"
" For expire operations, set minimum acceptable restorability to the version equivalent of "
"DATETIME and later.\n");
printf(" --restorable_after_version VERSION\n"
printf(" --restorable-after-version VERSION\n"
" For expire operations, set minimum acceptable restorability to the VERSION and later.\n");
printf(" --min_restorable_days NUM_DAYS\n"
printf(" --min-restorable-days NUM-DAYS\n"
" For expire operations, set minimum acceptable restorability to approximately NUM_DAYS "
"days worth of versions\n"
" prior to the latest log version in the backup.\n");
printf(" --version_timestamps\n");
printf(" --version-timestamps\n");
printf(" For describe operations, lookup versions in the database to obtain timestamps. A cluster "
"file is required.\n");
printf(
" -f, --force For expire operations, force expiration even if minimum restorability would be violated.\n");
printf(" -s, --snapshot_interval DURATION\n"
printf(" -s, --snapshot-interval DURATION\n"
" For start or modify operations, specifies the backup's default target snapshot interval "
"as DURATION seconds. Defaults to %d for start operations.\n",
CLIENT_KNOBS->BACKUP_DEFAULT_SNAPSHOT_INTERVAL_SEC);
printf(" --active_snapshot_interval DURATION\n"
printf(" --active-snapshot-interval DURATION\n"
" For modify operations, sets the desired interval for the backup's currently active "
"snapshot, relative to the start of the snapshot.\n");
printf(" --verify_uid UID\n"
printf(" --verify-uid UID\n"
" Specifies a UID to verify against the BackupUID of the running backup. If provided, the "
"UID is verified in the same transaction\n"
" which sets the new backup parameters (if the UID matches).\n");
printf(" -e ERRORLIMIT The maximum number of errors printed by status (default is 10).\n");
printf(" -k KEYS List of key ranges to backup or to filter the backup in query operations.\n"
" If not specified, the entire database will be backed up or no filter will be applied.\n");
printf(" --partitioned_log_experimental Starts with new type of backup system using partitioned logs.\n");
printf(" --partitioned-log-experimental Starts with new type of backup system using partitioned logs.\n");
printf(" -n, --dryrun For backup start or restore start, performs a trial run with no actual changes made.\n");
printf(" --log Enables trace file logging for the CLI session.\n"
" --logdir PATH Specifes the output directory for trace files. If\n"
@ -1080,19 +1081,19 @@ static void printBackupUsage(bool devhelp) {
printf(" --loggroup LOG_GROUP\n"
" Sets the LogGroup field with the specified value for all\n"
" events in the trace output (defaults to `default').\n");
printf(" --trace_format FORMAT\n"
printf(" --trace-format FORMAT\n"
" Select the format of the trace files. xml (the default) and json are supported.\n"
" Has no effect unless --log is specified.\n");
printf(" --max_cleanup_seconds SECONDS\n"
printf(" --max-cleanup-seconds SECONDS\n"
" Specifies the amount of time a backup or DR needs to be stale before cleanup will\n"
" remove mutations for it. By default this is set to one hour.\n");
printf(" --delete_data\n"
printf(" --delete-data\n"
" This flag will cause cleanup to remove mutations for the most stale backup or DR.\n");
printf(" --incremental\n"
" Performs incremental backup without the base backup.\n"
" This option indicates to the backup agent that it will only need to record the log files, "
"and ignore the range files.\n");
printf(" --encryption_key_file"
printf(" --encryption-key-file"
" The AES-128-GCM key in the provided file is used for encrypting backup files.\n");
#ifndef TLS_DISABLED
printf(TLS_HELP);
@ -1125,7 +1126,7 @@ static void printRestoreUsage(bool devhelp) {
exeRestore.toString().c_str());
printf(" TOP LEVEL OPTIONS:\n");
printf(" --build_flags Print build information and exit.\n");
printf(" --build-flags Print build information and exit.\n");
printf(" -v, --version Print version information and exit.\n");
printf(" -h, --help Display this help and exit.\n");
printf("\n");
@ -1133,7 +1134,7 @@ static void printRestoreUsage(bool devhelp) {
printf(" ACTION OPTIONS:\n");
// printf(" FOLDERS Paths to folders containing the backup files.\n");
printf(" Options for all commands:\n\n");
printf(" --dest_cluster_file CONNFILE\n");
printf(" --dest-cluster-file CONNFILE\n");
printf(" The cluster file to restore data into.\n");
printf(" -t, --tagname TAGNAME\n");
printf(" The restore tag to act on. Default is 'default'\n");
@ -1144,9 +1145,9 @@ static void printRestoreUsage(bool devhelp) {
printf(" -w, --waitfordone\n");
printf(" Wait for the restore to complete before exiting. Prints progress updates.\n");
printf(" -k KEYS List of key ranges from the backup to restore.\n");
printf(" --remove_prefix PREFIX\n");
printf(" --remove-prefix PREFIX\n");
printf(" Prefix to remove from the restored keys.\n");
printf(" --add_prefix PREFIX\n");
printf(" --add-prefix PREFIX\n");
printf(" Prefix to add to the restored keys\n");
printf(" -n, --dryrun Perform a trial run with no changes made.\n");
printf(" --log Enables trace file logging for the CLI session.\n"
@ -1156,18 +1157,18 @@ static void printRestoreUsage(bool devhelp) {
printf(" --loggroup LOG_GROUP\n"
" Sets the LogGroup field with the specified value for all\n"
" events in the trace output (defaults to `default').\n");
printf(" --trace_format FORMAT\n"
printf(" --trace-format FORMAT\n"
" Select the format of the trace files. xml (the default) and json are supported.\n"
" Has no effect unless --log is specified.\n");
printf(" --incremental\n"
" Performs incremental restore without the base backup.\n"
" This tells the backup agent to only replay the log files from the backup source.\n"
" This also allows a restore to be performed into a non-empty destination database.\n");
printf(" --begin_version\n"
printf(" --begin-version\n"
" To be used in conjunction with incremental restore.\n"
" Indicates to the backup agent to only begin replaying log files from a certain version, "
"instead of the entire set.\n");
printf(" --encryption_key_file"
printf(" --encryption-key-file"
" The AES-128-GCM key in the provided file is used for decrypting backup files.\n");
#ifndef TLS_DISABLED
printf(TLS_HELP);
@ -1177,7 +1178,7 @@ static void printRestoreUsage(bool devhelp) {
BackupAgentBase::timeFormat().c_str());
printf(
" and it will be converted to a version from that time using metadata in orig_cluster_file.\n");
printf(" --orig_cluster_file CONNFILE\n");
printf(" --orig-cluster-file CONNFILE\n");
printf(" The cluster file for the original database from which the backup was created. The "
"original database\n");
printf(" is only needed to convert a --timestamp argument to a database version.\n");
@ -1201,7 +1202,7 @@ static void printRestoreUsage(bool devhelp) {
static void printFastRestoreUsage(bool devhelp) {
printf(" NOTE: Fast restore aims to support the same fdbrestore option list.\n");
printf(" But fast restore is still under development. The options may not be fully supported.\n");
printf(" Supported options are: --dest_cluster_file, -r, --waitfordone, --logdir\n");
printf(" Supported options are: --dest-cluster-file, -r, --waitfordone, --logdir\n");
printRestoreUsage(devhelp);
return;
}
@ -1222,7 +1223,7 @@ static void printDBAgentUsage(bool devhelp) {
printf(" --loggroup LOG_GROUP\n"
" Sets the LogGroup field with the specified value for all\n"
" events in the trace output (defaults to `default').\n");
printf(" --trace_format FORMAT\n"
printf(" --trace-format FORMAT\n"
" Select the format of the trace files. xml (the default) and json are supported.\n"
" Has no effect unless --log is specified.\n");
printf(" -m, --memory SIZE\n"
@ -1231,7 +1232,7 @@ static void printDBAgentUsage(bool devhelp) {
#ifndef TLS_DISABLED
printf(TLS_HELP);
#endif
printf(" --build_flags Print build information and exit.\n");
printf(" --build-flags Print build information and exit.\n");
printf(" -v, --version Print version information and exit.\n");
printf(" -h, --help Display this help and exit.\n");
if (devhelp) {
@ -1252,7 +1253,7 @@ static void printDBBackupUsage(bool devhelp) {
exeDatabaseBackup.toString().c_str());
printf(" TOP LEVEL OPTIONS:\n");
printf(" --build_flags Print build information and exit.\n");
printf(" --build-flags Print build information and exit.\n");
printf(" -v, --version Print version information and exit.\n");
printf(" -h, --help Display this help and exit.\n");
printf("\n");
@ -1279,7 +1280,7 @@ static void printDBBackupUsage(bool devhelp) {
printf(" --loggroup LOG_GROUP\n"
" Sets the LogGroup field with the specified value for all\n"
" events in the trace output (defaults to `default').\n");
printf(" --trace_format FORMAT\n"
printf(" --trace-format FORMAT\n"
" Select the format of the trace files. xml (the default) and json are supported.\n"
" Has no effect unless --log is specified.\n");
printf(" -h, --help Display this help and exit.\n");
@ -3137,10 +3138,10 @@ int main(int argc, char* argv[]) {
switch (programExe) {
case ProgramExe::AGENT:
args = std::make_unique<CSimpleOpt>(argc, argv, g_rgAgentOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(argc, argv, g_rgAgentOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case ProgramExe::DR_AGENT:
args = std::make_unique<CSimpleOpt>(argc, argv, g_rgDBAgentOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(argc, argv, g_rgDBAgentOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case ProgramExe::BACKUP:
// Display backup help, if no arguments
@ -3154,53 +3155,69 @@ int main(int argc, char* argv[]) {
// Create the appropriate simple opt
switch (backupType) {
case BackupType::START:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgBackupStartOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgBackupStartOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case BackupType::STATUS:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgBackupStatusOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgBackupStatusOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case BackupType::ABORT:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgBackupAbortOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgBackupAbortOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case BackupType::CLEANUP:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgBackupCleanupOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgBackupCleanupOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case BackupType::WAIT:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgBackupWaitOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgBackupWaitOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case BackupType::DISCONTINUE:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgBackupDiscontinueOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgBackupDiscontinueOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case BackupType::PAUSE:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgBackupPauseOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgBackupPauseOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case BackupType::RESUME:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgBackupPauseOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgBackupPauseOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case BackupType::EXPIRE:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgBackupExpireOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgBackupExpireOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case BackupType::DELETE_BACKUP:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgBackupDeleteOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgBackupDeleteOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case BackupType::DESCRIBE:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgBackupDescribeOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgBackupDescribeOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case BackupType::DUMP:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgBackupDumpOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgBackupDumpOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case BackupType::LIST:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgBackupListOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgBackupListOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case BackupType::QUERY:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgBackupQueryOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgBackupQueryOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case BackupType::MODIFY:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgBackupModifyOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgBackupModifyOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case BackupType::UNDEFINED:
default:
args = std::make_unique<CSimpleOpt>(argc, argv, g_rgOptions, SO_O_EXACT);
args =
std::make_unique<CSimpleOpt>(argc, argv, g_rgOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
}
}
@ -3217,26 +3234,33 @@ int main(int argc, char* argv[]) {
// Create the appropriate simple opt
switch (dbType) {
case DBType::START:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgDBStartOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgDBStartOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case DBType::STATUS:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgDBStatusOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgDBStatusOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case DBType::SWITCH:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgDBSwitchOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgDBSwitchOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case DBType::ABORT:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgDBAbortOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgDBAbortOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case DBType::PAUSE:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgDBPauseOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgDBPauseOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case DBType::RESUME:
args = std::make_unique<CSimpleOpt>(argc - 1, &argv[1], g_rgDBPauseOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, &argv[1], g_rgDBPauseOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
case DBType::UNDEFINED:
default:
args = std::make_unique<CSimpleOpt>(argc, argv, g_rgOptions, SO_O_EXACT);
args =
std::make_unique<CSimpleOpt>(argc, argv, g_rgOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
break;
}
}
@ -3249,9 +3273,10 @@ int main(int argc, char* argv[]) {
// Get the restore operation type
restoreType = getRestoreType(argv[1]);
if (restoreType == RestoreType::UNKNOWN) {
args = std::make_unique<CSimpleOpt>(argc, argv, g_rgOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(argc, argv, g_rgOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
} else {
args = std::make_unique<CSimpleOpt>(argc - 1, argv + 1, g_rgRestoreOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, argv + 1, g_rgRestoreOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
}
break;
case ProgramExe::FASTRESTORE_TOOL:
@ -3262,9 +3287,10 @@ int main(int argc, char* argv[]) {
// Get the restore operation type
restoreType = getRestoreType(argv[1]);
if (restoreType == RestoreType::UNKNOWN) {
args = std::make_unique<CSimpleOpt>(argc, argv, g_rgOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(argc, argv, g_rgOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
} else {
args = std::make_unique<CSimpleOpt>(argc - 1, argv + 1, g_rgRestoreOptions, SO_O_EXACT);
args = std::make_unique<CSimpleOpt>(
argc - 1, argv + 1, g_rgRestoreOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
}
break;
case ProgramExe::UNDEFINED:
@ -3437,14 +3463,14 @@ int main(int argc, char* argv[]) {
traceLogGroup = args->OptionArg();
break;
case OPT_LOCALITY: {
std::string syn = args->OptionSyntax();
if (!StringRef(syn).startsWith(LiteralStringRef("--locality_"))) {
fprintf(stderr, "ERROR: unable to parse locality key '%s'\n", syn.c_str());
Optional<std::string> localityKey = extractPrefixedArgument("--locality", args->OptionSyntax());
if (!localityKey.present()) {
fprintf(stderr, "ERROR: unable to parse locality key '%s'\n", args->OptionSyntax());
return FDB_EXIT_ERROR;
}
syn = syn.substr(11);
std::transform(syn.begin(), syn.end(), syn.begin(), ::tolower);
localities.set(Standalone<StringRef>(syn), Standalone<StringRef>(std::string(args->OptionArg())));
Standalone<StringRef> key = StringRef(localityKey.get());
std::transform(key.begin(), key.end(), mutateString(key), ::tolower);
localities.set(key, Standalone<StringRef>(std::string(args->OptionArg())));
break;
}
case OPT_EXPIRE_BEFORE_DATETIME:
@ -3504,13 +3530,12 @@ int main(int argc, char* argv[]) {
dstOnly.set(true);
break;
case OPT_KNOB: {
std::string syn = args->OptionSyntax();
if (!StringRef(syn).startsWith(LiteralStringRef("--knob_"))) {
fprintf(stderr, "ERROR: unable to parse knob option '%s'\n", syn.c_str());
Optional<std::string> knobName = extractPrefixedArgument("--knob", args->OptionSyntax());
if (!knobName.present()) {
fprintf(stderr, "ERROR: unable to parse knob option '%s'\n", args->OptionSyntax());
return FDB_EXIT_ERROR;
}
syn = syn.substr(7);
knobs.emplace_back(syn, args->OptionArg());
knobs.emplace_back(knobName.get(), args->OptionArg());
break;
}
case OPT_BACKUPKEYS:
@ -4108,7 +4133,7 @@ int main(int argc, char* argv[]) {
}
break;
case ProgramExe::FASTRESTORE_TOOL:
// Support --dest_cluster_file option as fdbrestore does
// Support --dest-cluster-file option as fdbrestore does
if (dryRun) {
if (restoreType != RestoreType::START) {
fprintf(stderr, "Restore dry run only works for 'start' command\n");

View File

@ -42,6 +42,7 @@
#include "fdbclient/Tuple.h"
#include "fdbclient/ThreadSafeTransaction.h"
#include "flow/ArgParseUtil.h"
#include "flow/DeterministicRandom.h"
#include "flow/FastRef.h"
#include "flow/Platform.h"
@ -98,7 +99,7 @@ enum {
};
CSimpleOpt::SOption g_rgOptions[] = { { OPT_CONNFILE, "-C", SO_REQ_SEP },
{ OPT_CONNFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_CONNFILE, "--cluster-file", SO_REQ_SEP },
{ OPT_DATABASE, "-d", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--log-dir", SO_REQ_SEP },
@ -112,9 +113,9 @@ CSimpleOpt::SOption g_rgOptions[] = { { OPT_CONNFILE, "-C", SO_REQ_SEP },
{ OPT_STATUS_FROM_JSON, "--status-from-json", SO_REQ_SEP },
{ OPT_VERSION, "--version", SO_NONE },
{ OPT_VERSION, "-v", SO_NONE },
{ OPT_BUILD_FLAGS, "--build_flags", SO_NONE },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_BUILD_FLAGS, "--build-flags", SO_NONE },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
{ OPT_DEBUG_TLS, "--debug-tls", SO_NONE },
{ OPT_API_VERSION, "--api-version", SO_REQ_SEP },
@ -426,7 +427,7 @@ static void printProgramUsage(const char* name) {
" --log-dir PATH Specifes the output directory for trace files. If\n"
" unspecified, defaults to the current directory. Has\n"
" no effect unless --log is specified.\n"
" --trace_format FORMAT\n"
" --trace-format FORMAT\n"
" Select the format of the log files. xml (the default) and json\n"
" are supported. Has no effect unless --log is specified.\n"
" --exec CMDS Immediately executes the semicolon separated CLI commands\n"
@ -438,11 +439,11 @@ static void printProgramUsage(const char* name) {
#ifndef TLS_DISABLED
TLS_HELP
#endif
" --knob_KNOBNAME KNOBVALUE\n"
" --knob-KNOBNAME KNOBVALUE\n"
" Changes a knob option. KNOBNAME should be lowercase.\n"
" --debug-tls Prints the TLS configuration and certificate chain, then exits.\n"
" Useful in reporting and diagnosing TLS issues.\n"
" --build_flags Print build information and exit.\n"
" --build-flags Print build information and exit.\n"
" -v, --version Print FoundationDB CLI version information and exit.\n"
" -h, --help Display this help and exit.\n");
}
@ -1389,7 +1390,7 @@ struct CLIOptions {
commandLine += argv[a];
}
CSimpleOpt args(argc, argv, g_rgOptions);
CSimpleOpt args(argc, argv, g_rgOptions, SO_O_HYPHEN_TO_UNDERSCORE);
while (args.Next()) {
int ec = processArg(args);
@ -1518,13 +1519,12 @@ struct CLIOptions {
traceFormat = args.OptionArg();
break;
case OPT_KNOB: {
std::string syn = args.OptionSyntax();
if (!StringRef(syn).startsWith(LiteralStringRef("--knob_"))) {
fprintf(stderr, "ERROR: unable to parse knob option '%s'\n", syn.c_str());
Optional<std::string> knobName = extractPrefixedArgument("--knob", args.OptionSyntax());
if (!knobName.present()) {
fprintf(stderr, "ERROR: unable to parse knob option '%s'\n", args.OptionSyntax());
return FDB_EXIT_ERROR;
}
syn = syn.substr(7);
knobs.emplace_back(syn, args.OptionArg());
knobs.emplace_back(knobName.get(), args.OptionArg());
break;
}
case OPT_DEBUG_TLS:

View File

@ -1001,20 +1001,20 @@ ACTOR Future<Void> cleanupLogMutations(Database cx, Value destUidValue, bool del
printf("\nSuccessfully removed the tag that was %.4f hours behind.\n\n",
(readVer - minVersion) / (3600.0 * CLIENT_KNOBS->CORE_VERSIONSPERSECOND));
} else if (removingLogUid.present() && minVersionLogUid != removingLogUid.get()) {
printf("\nWARNING: The oldest tag was possibly removed, run again without `--delete_data' to "
printf("\nWARNING: The oldest tag was possibly removed, run again without `--delete-data' to "
"check.\n\n");
} else {
printf("\nWARNING: Did not delete data because the tag is not at least %.4f hours behind. Change "
"`--min_cleanup_seconds' to adjust this threshold.\n\n",
"`--min-cleanup-seconds' to adjust this threshold.\n\n",
CLIENT_KNOBS->MIN_CLEANUP_SECONDS / 3600.0);
}
} else if (readVer - minVersion >
CLIENT_KNOBS->MIN_CLEANUP_SECONDS * CLIENT_KNOBS->CORE_VERSIONSPERSECOND) {
printf("\nPassing `--delete_data' would delete the tag that is %.4f hours behind.\n\n",
printf("\nPassing `--delete-data' would delete the tag that is %.4f hours behind.\n\n",
(readVer - minVersion) / (3600.0 * CLIENT_KNOBS->CORE_VERSIONSPERSECOND));
} else {
printf("\nPassing `--delete_data' would not delete the tag that is %.4f hours behind. Change "
"`--min_cleanup_seconds' to adjust the cleanup threshold.\n\n",
printf("\nPassing `--delete-data' would not delete the tag that is %.4f hours behind. Change "
"`--min-cleanup-seconds' to adjust the cleanup threshold.\n\n",
(readVer - minVersion) / (3600.0 * CLIENT_KNOBS->CORE_VERSIONSPERSECOND));
}

View File

@ -1116,9 +1116,10 @@ struct AutoQuorumChange final : IQuorumChange {
self->addDesiredWorkers(chosen, workers, desiredCount, excluded);
if (chosen.size() < desiredCount) {
if (chosen.size() < oldCoordinators.size()) {
if (chosen.empty() || chosen.size() < oldCoordinators.size()) {
TraceEvent("NotEnoughMachinesForCoordinators")
.detail("EligibleWorkers", workers.size())
.detail("ChosenWorkers", chosen.size())
.detail("DesiredCoordinators", desiredCount)
.detail("CurrentCoordinators", oldCoordinators.size());
*err = CoordinatorsResult::NOT_ENOUGH_MACHINES;

View File

@ -354,6 +354,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
init( ROCKSDB_READ_QUEUE_SOFT_MAX, 500 );
init( ROCKSDB_FETCH_QUEUE_HARD_MAX, 100 );
init( ROCKSDB_FETCH_QUEUE_SOFT_MAX, 50 );
init( ROCKSDB_HISTOGRAMS_SAMPLE_RATE, 0.001 ); if( randomize && BUGGIFY ) ROCKSDB_HISTOGRAMS_SAMPLE_RATE = 0;
// Leader election
bool longLeaderElection = randomize && BUGGIFY;

View File

@ -286,6 +286,9 @@ public:
int ROCKSDB_READ_QUEUE_HARD_MAX;
int ROCKSDB_FETCH_QUEUE_SOFT_MAX;
int ROCKSDB_FETCH_QUEUE_HARD_MAX;
// These histograms are in read and write path which can cause performance overhead.
// Set to 0 to disable histograms.
double ROCKSDB_HISTOGRAMS_SAMPLE_RATE;
// Leader election
int MAX_NOTIFICATIONS;

View File

@ -1,15 +1,15 @@
{
"version": "6.3.15",
"arguments": [
{"value": "--cluster_file"},
{"value": "--cluster-file"},
{"value": ".testdata/fdb.cluster"},
{"value": "--public_address"},
{"value": "--public-address"},
{"type": "Concatenate", "values": [
{"type": "Environment", "source": "FDB_PUBLIC_IP"},
{"value": ":"},
{"type": "ProcessNumber", "offset": 4499, "multiplier": 2}
]},
{"value": "--listen_address"},
{"value": "--listen-address"},
{"type": "Concatenate", "values": [
{"type": "Environment", "source": "FDB_POD_IP"},
{"value": ":"},
@ -22,11 +22,11 @@
]},
{"value": "--class"},
{"value": "storage"},
{"value": "--locality_zoneid"},
{"value": "--locality-zoneid"},
{"type": "Environment", "source": "FDB_ZONE_ID"},
{"value": "--locality_instance-id"},
{"value": "--locality-instance-id"},
{"type": "Environment", "source": "FDB_INSTANCE_ID"},
{"value": "--locality_process-id"},
{"value": "--locality-process-id"},
{"type": "Concatenate", "values": [
{"type": "Environment", "source": "FDB_INSTANCE_ID"},
{"value": "-"},

View File

@ -60,11 +60,11 @@ func TestGeneratingArgumentsForDefaultConfig(t *testing.T) {
}
expectedArguments := []string{
"--cluster_file", ".testdata/fdb.cluster",
"--public_address", "10.0.0.1:4501", "--listen_address", "192.168.0.1:4501",
"--cluster-file", ".testdata/fdb.cluster",
"--public-address", "10.0.0.1:4501", "--listen-address", "192.168.0.1:4501",
"--datadir", ".testdata/data/1", "--class", "storage",
"--locality_zoneid", "zone1", "--locality_instance-id", "storage-1",
"--locality_process-id", "storage-1-1",
"--locality-zoneid", "zone1", "--locality-instance-id", "storage-1",
"--locality-process-id", "storage-1-1",
}
if !reflect.DeepEqual(arguments, expectedArguments) {
@ -87,11 +87,11 @@ func TestGeneratingArgumentsForDefaultConfig(t *testing.T) {
expectedArguments = []string{
"/usr/bin/fdbserver",
"--cluster_file", ".testdata/fdb.cluster",
"--public_address", "10.0.0.1:4501", "--listen_address", "192.168.0.1:4501",
"--cluster-file", ".testdata/fdb.cluster",
"--public-address", "10.0.0.1:4501", "--listen-address", "192.168.0.1:4501",
"--datadir", ".testdata/data/1", "--class", "storage",
"--locality_zoneid", "zone1", "--locality_instance-id", "storage-1",
"--locality_process-id", "storage-1-1",
"--locality-zoneid", "zone1", "--locality-instance-id", "storage-1",
"--locality-process-id", "storage-1-1",
}
if !reflect.DeepEqual(arguments, expectedArguments) {

View File

@ -197,17 +197,46 @@ const char* get_value_multi(const CSimpleIni& ini, const char* key, ...) {
const char* ret = nullptr;
const char* section = nullptr;
std::string keyWithUnderscores(key);
for (int i = keyWithUnderscores.size() - 1; i >= 0; --i) {
if (keyWithUnderscores[i] == '-') {
keyWithUnderscores.at(i) = '_';
}
}
va_list ap;
va_start(ap, key);
while (!ret && (section = va_arg(ap, const char*)))
while (!ret && (section = va_arg(ap, const char*))) {
ret = ini.GetValue(section, key, nullptr);
if (!ret) {
ret = ini.GetValue(section, keyWithUnderscores.c_str(), nullptr);
}
}
va_end(ap);
return ret;
}
bool isParameterNameEqual(const char* str, const char* target) {
if (!str || !target) {
return false;
}
while (*str && *target) {
char curStr = *str, curTarget = *target;
if (curStr == '-') {
curStr = '_';
}
if (curTarget == '-') {
curTarget = '_';
}
if (curStr != curTarget) {
return false;
}
str++;
target++;
}
return !(*str || *target);
}
double timer() {
#if defined(__linux__) || defined(__FreeBSD__)
struct timespec ts;
@ -426,7 +455,7 @@ public:
char* endptr;
const char* rd =
get_value_multi(ini, "restart_delay", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
get_value_multi(ini, "restart-delay", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
if (!rd) {
log_msg(SevError, "Unable to resolve restart delay for %s\n", ssection.c_str());
return;
@ -439,7 +468,7 @@ public:
}
const char* mrd = get_value_multi(
ini, "initial_restart_delay", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
ini, "initial-restart-delay", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
if (!mrd) {
initial_restart_delay = 0;
} else {
@ -453,7 +482,7 @@ public:
current_restart_delay = initial_restart_delay;
const char* rbo = get_value_multi(
ini, "restart_backoff", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
ini, "restart-backoff", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
if (!rbo) {
restart_backoff = max_restart_delay;
} else {
@ -469,7 +498,7 @@ public:
}
const char* rdri = get_value_multi(
ini, "restart_delay_reset_interval", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
ini, "restart-delay-reset-interval", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
if (!rdri) {
restart_delay_reset_interval = max_restart_delay;
} else {
@ -481,16 +510,16 @@ public:
}
const char* q =
get_value_multi(ini, "disable_lifecycle_logging", ssection.c_str(), section.c_str(), "general", nullptr);
get_value_multi(ini, "disable-lifecycle-logging", ssection.c_str(), section.c_str(), "general", nullptr);
if (q && !strcmp(q, "true"))
quiet = true;
const char* del_env =
get_value_multi(ini, "delete_envvars", ssection.c_str(), section.c_str(), "general", nullptr);
get_value_multi(ini, "delete-envvars", ssection.c_str(), section.c_str(), "general", nullptr);
delete_envvars = del_env;
const char* kocc =
get_value_multi(ini, "kill_on_configuration_change", ssection.c_str(), section.c_str(), "general", nullptr);
get_value_multi(ini, "kill-on-configuration-change", ssection.c_str(), section.c_str(), "general", nullptr);
if (kocc && strcmp(kocc, "true")) {
kill_on_configuration_change = false;
}
@ -508,10 +537,13 @@ public:
const char* id_s = ssection.c_str() + strlen(section.c_str()) + 1;
for (auto i : keys) {
if (!strcmp(i.pItem, "command") || !strcmp(i.pItem, "restart_delay") ||
!strcmp(i.pItem, "initial_restart_delay") || !strcmp(i.pItem, "restart_backoff") ||
!strcmp(i.pItem, "restart_delay_reset_interval") || !strcmp(i.pItem, "disable_lifecycle_logging") ||
!strcmp(i.pItem, "delete_envvars") || !strcmp(i.pItem, "kill_on_configuration_change")) {
if (isParameterNameEqual(i.pItem, "command") || isParameterNameEqual(i.pItem, "restart-delay") ||
isParameterNameEqual(i.pItem, "initial-restart-delay") ||
isParameterNameEqual(i.pItem, "restart-backoff") ||
isParameterNameEqual(i.pItem, "restart-delay-reset-interval") ||
isParameterNameEqual(i.pItem, "disable-lifecycle-logging") ||
isParameterNameEqual(i.pItem, "delete-envvars") ||
isParameterNameEqual(i.pItem, "kill-on-configuration-change")) {
continue;
}
@ -523,7 +555,7 @@ public:
opt.replace(pos, 3, id_s, strlen(id_s));
const char* flagName = i.pItem + 5;
if (strncmp("flag_", i.pItem, 5) == 0 && strlen(flagName) > 0) {
if ((strncmp("flag_", i.pItem, 5) == 0 || strncmp("flag-", i.pItem, 5) == 0) && strlen(flagName) > 0) {
if (opt == "true")
commands.push_back(std::string("--") + flagName);
else if (opt != "false") {
@ -1227,7 +1259,7 @@ int main(int argc, char** argv) {
std::vector<const char*> additional_watch_paths;
CSimpleOpt args(argc, argv, g_rgOptions, SO_O_NOERR);
CSimpleOpt args(argc, argv, g_rgOptions, SO_O_NOERR | SO_O_HYPHEN_TO_UNDERSCORE);
while (args.Next()) {
if (args.LastError() == SO_SUCCESS) {

View File

@ -124,6 +124,8 @@ public:
sav->sendError(exc);
}
void send(Never) { sendError(never_reply()); }
Future<T> getFuture() const {
sav->addFutureRef();
return Future<T>(sav);

View File

@ -30,6 +30,7 @@
#include "flow/flow.h"
#include "flow/actorcompiler.h" // This must be the last #include.
// This actor is used by FlowTransport to serialize the response to a ReplyPromise across the network
ACTOR template <class T>
void networkSender(Future<T> input, Endpoint endpoint) {
try {
@ -37,6 +38,9 @@ void networkSender(Future<T> input, Endpoint endpoint) {
FlowTransport::transport().sendUnreliable(SerializeSource<ErrorOr<EnsureTable<T>>>(value), endpoint, false);
} catch (Error& err) {
// if (err.code() == error_code_broken_promise) return;
if (err.code() == error_code_never_reply) {
return;
}
ASSERT(err.code() != error_code_actor_cancelled);
FlowTransport::transport().sendUnreliable(SerializeSource<ErrorOr<EnsureTable<T>>>(err), endpoint, false);
}

View File

@ -119,7 +119,7 @@ class WorkPool final : public IThreadPool, public ReferenceCounted<WorkPool<Thre
}
};
struct Worker : Threadlike {
struct Worker final : Threadlike {
Pool* pool;
IThreadPoolReceiver* userData;
bool stop;

View File

@ -869,7 +869,7 @@ public:
}
};
class DiskQueue : public IDiskQueue, public Tracked<DiskQueue> {
class DiskQueue final : public IDiskQueue, public Tracked<DiskQueue> {
public:
// FIXME: Is setting lastCommittedSeq to -1 instead of 0 necessary?
DiskQueue(std::string basename,
@ -1539,7 +1539,7 @@ private:
// This works by performing two commits when uncommitted data is popped:
// Commit 1 - pop only previously committed data and push new data (i.e., commit uncommitted data)
// Commit 2 - finish pop into uncommitted data
class DiskQueue_PopUncommitted : public IDiskQueue {
class DiskQueue_PopUncommitted final : public IDiskQueue {
public:
DiskQueue_PopUncommitted(std::string basename,

View File

@ -14,6 +14,7 @@
#include "flow/flow.h"
#include "flow/IThreadPool.h"
#include "flow/ThreadHelper.actor.h"
#include "flow/Histogram.h"
#include <memory>
#include <tuple>
@ -35,6 +36,25 @@ static_assert((ROCKSDB_MAJOR == 6 && ROCKSDB_MINOR == 22) ? ROCKSDB_PATCH >= 1 :
namespace {
const StringRef ROCKSDBSTORAGE_HISTOGRAM_GROUP = LiteralStringRef("RocksDBStorage");
const StringRef ROCKSDB_COMMIT_LATENCY_HISTOGRAM = LiteralStringRef("RocksDBCommitLatency");
const StringRef ROCKSDB_COMMIT_ACTION_HISTOGRAM = LiteralStringRef("RocksDBCommitAction");
const StringRef ROCKSDB_COMMIT_QUEUEWAIT_HISTOGRAM = LiteralStringRef("RocksDBCommitQueueWait");
const StringRef ROCKSDB_WRITE_HISTOGRAM = LiteralStringRef("RocksDBWrite");
const StringRef ROCKSDB_DELETE_COMPACTRANGE_HISTOGRAM = LiteralStringRef("RocksDBDeleteCompactRange");
const StringRef ROCKSDB_READRANGE_LATENCY_HISTOGRAM = LiteralStringRef("RocksDBReadRangeLatency");
const StringRef ROCKSDB_READVALUE_LATENCY_HISTOGRAM = LiteralStringRef("RocksDBReadValueLatency");
const StringRef ROCKSDB_READPREFIX_LATENCY_HISTOGRAM = LiteralStringRef("RocksDBReadPrefixLatency");
const StringRef ROCKSDB_READRANGE_ACTION_HISTOGRAM = LiteralStringRef("RocksDBReadRangeAction");
const StringRef ROCKSDB_READVALUE_ACTION_HISTOGRAM = LiteralStringRef("RocksDBReadValueAction");
const StringRef ROCKSDB_READPREFIX_ACTION_HISTOGRAM = LiteralStringRef("RocksDBReadPrefixAction");
const StringRef ROCKSDB_READRANGE_QUEUEWAIT_HISTOGRAM = LiteralStringRef("RocksDBReadRangeQueueWait");
const StringRef ROCKSDB_READVALUE_QUEUEWAIT_HISTOGRAM = LiteralStringRef("RocksDBReadValueQueueWait");
const StringRef ROCKSDB_READPREFIX_QUEUEWAIT_HISTOGRAM = LiteralStringRef("RocksDBReadPrefixQueueWait");
const StringRef ROCKSDB_READRANGE_NEWITERATOR_HISTOGRAM = LiteralStringRef("RocksDBReadRangeNewIterator");
const StringRef ROCKSDB_READVALUE_GET_HISTOGRAM = LiteralStringRef("RocksDBReadValueGet");
const StringRef ROCKSDB_READPREFIX_GET_HISTOGRAM = LiteralStringRef("RocksDBReadPrefixGet");
rocksdb::Slice toSlice(StringRef s) {
return rocksdb::Slice(reinterpret_cast<const char*>(s.begin()), s.size());
}
@ -231,8 +251,28 @@ struct RocksDBKeyValueStore : IKeyValueStore {
struct Writer : IThreadPoolReceiver {
DB& db;
UID id;
Reference<Histogram> commitLatencyHistogram;
Reference<Histogram> commitActionHistogram;
Reference<Histogram> commitQueueWaitHistogram;
Reference<Histogram> writeHistogram;
Reference<Histogram> deleteCompactRangeHistogram;
explicit Writer(DB& db, UID id) : db(db), id(id) {}
explicit Writer(DB& db, UID id)
: db(db), id(id), commitLatencyHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_COMMIT_LATENCY_HISTOGRAM,
Histogram::Unit::microseconds)),
commitActionHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_COMMIT_ACTION_HISTOGRAM,
Histogram::Unit::microseconds)),
commitQueueWaitHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_COMMIT_QUEUEWAIT_HISTOGRAM,
Histogram::Unit::microseconds)),
writeHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_WRITE_HISTOGRAM,
Histogram::Unit::microseconds)),
deleteCompactRangeHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_DELETE_COMPACTRANGE_HISTOGRAM,
Histogram::Unit::microseconds)) {}
~Writer() override {
if (db) {
@ -301,9 +341,24 @@ struct RocksDBKeyValueStore : IKeyValueStore {
struct CommitAction : TypedAction<Writer, CommitAction> {
std::unique_ptr<rocksdb::WriteBatch> batchToCommit;
ThreadReturnPromise<Void> done;
double startTime;
bool getHistograms;
double getTimeEstimate() const override { return SERVER_KNOBS->COMMIT_TIME_ESTIMATE; }
CommitAction() {
if (deterministicRandom()->random01() < SERVER_KNOBS->ROCKSDB_HISTOGRAMS_SAMPLE_RATE) {
getHistograms = true;
startTime = timer_monotonic();
} else {
getHistograms = false;
}
}
};
void action(CommitAction& a) {
double commitBeginTime;
if (a.getHistograms) {
commitBeginTime = timer_monotonic();
commitQueueWaitHistogram->sampleSeconds(commitBeginTime - a.startTime);
}
Standalone<VectorRef<KeyRangeRef>> deletes;
DeleteVisitor dv(deletes, deletes.arena());
ASSERT(a.batchToCommit->Iterate(&dv).ok());
@ -311,17 +366,33 @@ struct RocksDBKeyValueStore : IKeyValueStore {
ASSERT(!deletes.empty() || !a.batchToCommit->HasDeleteRange());
rocksdb::WriteOptions options;
options.sync = !SERVER_KNOBS->ROCKSDB_UNSAFE_AUTO_FSYNC;
double writeBeginTime = a.getHistograms ? timer_monotonic() : 0;
auto s = db->Write(options, a.batchToCommit.get());
if (a.getHistograms) {
writeHistogram->sampleSeconds(timer_monotonic() - writeBeginTime);
}
if (!s.ok()) {
logRocksDBError(s, "Commit");
a.done.sendError(statusToError(s));
} else {
a.done.send(Void());
double compactRangeBeginTime = a.getHistograms ? timer_monotonic() : 0;
for (const auto& keyRange : deletes) {
auto begin = toSlice(keyRange.begin);
auto end = toSlice(keyRange.end);
ASSERT(db->SuggestCompactRange(db->DefaultColumnFamily(), &begin, &end).ok());
}
if (a.getHistograms) {
deleteCompactRangeHistogram->sampleSeconds(timer_monotonic() - compactRangeBeginTime);
}
}
if (a.getHistograms) {
double currTime = timer_monotonic();
commitActionHistogram->sampleSeconds(currTime - commitBeginTime);
commitLatencyHistogram->sampleSeconds(currTime - a.startTime);
}
}
@ -361,8 +432,56 @@ struct RocksDBKeyValueStore : IKeyValueStore {
double readValueTimeout;
double readValuePrefixTimeout;
double readRangeTimeout;
Reference<Histogram> readRangeLatencyHistogram;
Reference<Histogram> readValueLatencyHistogram;
Reference<Histogram> readPrefixLatencyHistogram;
Reference<Histogram> readRangeActionHistogram;
Reference<Histogram> readValueActionHistogram;
Reference<Histogram> readPrefixActionHistogram;
Reference<Histogram> readRangeQueueWaitHistogram;
Reference<Histogram> readValueQueueWaitHistogram;
Reference<Histogram> readPrefixQueueWaitHistogram;
Reference<Histogram> readRangeNewIteratorHistogram;
Reference<Histogram> readValueGetHistogram;
Reference<Histogram> readPrefixGetHistogram;
explicit Reader(DB& db) : db(db) {
explicit Reader(DB& db)
: db(db), readRangeLatencyHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_READRANGE_LATENCY_HISTOGRAM,
Histogram::Unit::microseconds)),
readValueLatencyHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_READVALUE_LATENCY_HISTOGRAM,
Histogram::Unit::microseconds)),
readPrefixLatencyHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_READPREFIX_LATENCY_HISTOGRAM,
Histogram::Unit::microseconds)),
readRangeActionHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_READRANGE_ACTION_HISTOGRAM,
Histogram::Unit::microseconds)),
readValueActionHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_READVALUE_ACTION_HISTOGRAM,
Histogram::Unit::microseconds)),
readPrefixActionHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_READPREFIX_ACTION_HISTOGRAM,
Histogram::Unit::microseconds)),
readRangeQueueWaitHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_READRANGE_QUEUEWAIT_HISTOGRAM,
Histogram::Unit::microseconds)),
readValueQueueWaitHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_READVALUE_QUEUEWAIT_HISTOGRAM,
Histogram::Unit::microseconds)),
readPrefixQueueWaitHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_READPREFIX_QUEUEWAIT_HISTOGRAM,
Histogram::Unit::microseconds)),
readRangeNewIteratorHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_READRANGE_NEWITERATOR_HISTOGRAM,
Histogram::Unit::microseconds)),
readValueGetHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_READVALUE_GET_HISTOGRAM,
Histogram::Unit::microseconds)),
readPrefixGetHistogram(Histogram::getHistogram(ROCKSDBSTORAGE_HISTOGRAM_GROUP,
ROCKSDB_READPREFIX_GET_HISTOGRAM,
Histogram::Unit::microseconds)) {
if (g_network->isSimulated()) {
// In simulation, increasing the read operation timeouts to 5 minutes, as some of the tests have
// very high load and single read thread cannot process all the load within the timeouts.
@ -382,18 +501,26 @@ struct RocksDBKeyValueStore : IKeyValueStore {
Key key;
Optional<UID> debugID;
double startTime;
bool getHistograms;
ThreadReturnPromise<Optional<Value>> result;
ReadValueAction(KeyRef key, Optional<UID> debugID)
: key(key), debugID(debugID), startTime(timer_monotonic()) {}
: key(key), debugID(debugID), startTime(timer_monotonic()),
getHistograms(
(deterministicRandom()->random01() < SERVER_KNOBS->ROCKSDB_HISTOGRAMS_SAMPLE_RATE) ? true : false) {
}
double getTimeEstimate() const override { return SERVER_KNOBS->READ_VALUE_TIME_ESTIMATE; }
};
void action(ReadValueAction& a) {
double readBeginTime = timer_monotonic();
if (a.getHistograms) {
readValueQueueWaitHistogram->sampleSeconds(readBeginTime - a.startTime);
}
Optional<TraceBatch> traceBatch;
if (a.debugID.present()) {
traceBatch = { TraceBatch{} };
traceBatch.get().addEvent("GetValueDebug", a.debugID.get().first(), "Reader.Before");
}
if (timer_monotonic() - a.startTime > readValueTimeout) {
if (readBeginTime - a.startTime > readValueTimeout) {
TraceEvent(SevWarn, "RocksDBError")
.detail("Error", "Read value request timedout")
.detail("Method", "ReadValueAction")
@ -401,13 +528,20 @@ struct RocksDBKeyValueStore : IKeyValueStore {
a.result.sendError(transaction_too_old());
return;
}
rocksdb::PinnableSlice value;
auto options = getReadOptions();
uint64_t deadlineMircos =
db->GetEnv()->NowMicros() + (readValueTimeout - (timer_monotonic() - a.startTime)) * 1000000;
db->GetEnv()->NowMicros() + (readValueTimeout - (readBeginTime - a.startTime)) * 1000000;
std::chrono::seconds deadlineSeconds(deadlineMircos / 1000000);
options.deadline = std::chrono::duration_cast<std::chrono::microseconds>(deadlineSeconds);
double dbGetBeginTime = a.getHistograms ? timer_monotonic() : 0;
auto s = db->Get(options, db->DefaultColumnFamily(), toSlice(a.key), &value);
if (a.getHistograms) {
readValueGetHistogram->sampleSeconds(timer_monotonic() - dbGetBeginTime);
}
if (a.debugID.present()) {
traceBatch.get().addEvent("GetValueDebug", a.debugID.get().first(), "Reader.After");
traceBatch.get().dump();
@ -420,6 +554,12 @@ struct RocksDBKeyValueStore : IKeyValueStore {
logRocksDBError(s, "ReadValue");
a.result.sendError(statusToError(s));
}
if (a.getHistograms) {
double currTime = timer_monotonic();
readValueActionHistogram->sampleSeconds(currTime - readBeginTime);
readValueLatencyHistogram->sampleSeconds(currTime - a.startTime);
}
}
struct ReadValuePrefixAction : TypedAction<Reader, ReadValuePrefixAction> {
@ -427,12 +567,20 @@ struct RocksDBKeyValueStore : IKeyValueStore {
int maxLength;
Optional<UID> debugID;
double startTime;
bool getHistograms;
ThreadReturnPromise<Optional<Value>> result;
ReadValuePrefixAction(Key key, int maxLength, Optional<UID> debugID)
: key(key), maxLength(maxLength), debugID(debugID), startTime(timer_monotonic()){};
: key(key), maxLength(maxLength), debugID(debugID), startTime(timer_monotonic()),
getHistograms(
(deterministicRandom()->random01() < SERVER_KNOBS->ROCKSDB_HISTOGRAMS_SAMPLE_RATE) ? true : false) {
}
double getTimeEstimate() const override { return SERVER_KNOBS->READ_VALUE_TIME_ESTIMATE; }
};
void action(ReadValuePrefixAction& a) {
double readBeginTime = timer_monotonic();
if (a.getHistograms) {
readPrefixQueueWaitHistogram->sampleSeconds(readBeginTime - a.startTime);
}
Optional<TraceBatch> traceBatch;
if (a.debugID.present()) {
traceBatch = { TraceBatch{} };
@ -440,7 +588,7 @@ struct RocksDBKeyValueStore : IKeyValueStore {
a.debugID.get().first(),
"Reader.Before"); //.detail("TaskID", g_network->getCurrentTask());
}
if (timer_monotonic() - a.startTime > readValuePrefixTimeout) {
if (readBeginTime - a.startTime > readValuePrefixTimeout) {
TraceEvent(SevWarn, "RocksDBError")
.detail("Error", "Read value prefix request timedout")
.detail("Method", "ReadValuePrefixAction")
@ -448,13 +596,20 @@ struct RocksDBKeyValueStore : IKeyValueStore {
a.result.sendError(transaction_too_old());
return;
}
rocksdb::PinnableSlice value;
auto options = getReadOptions();
uint64_t deadlineMircos =
db->GetEnv()->NowMicros() + (readValuePrefixTimeout - (timer_monotonic() - a.startTime)) * 1000000;
db->GetEnv()->NowMicros() + (readValuePrefixTimeout - (readBeginTime - a.startTime)) * 1000000;
std::chrono::seconds deadlineSeconds(deadlineMircos / 1000000);
options.deadline = std::chrono::duration_cast<std::chrono::microseconds>(deadlineSeconds);
double dbGetBeginTime = a.getHistograms ? timer_monotonic() : 0;
auto s = db->Get(options, db->DefaultColumnFamily(), toSlice(a.key), &value);
if (a.getHistograms) {
readPrefixGetHistogram->sampleSeconds(timer_monotonic() - dbGetBeginTime);
}
if (a.debugID.present()) {
traceBatch.get().addEvent("GetValuePrefixDebug",
a.debugID.get().first(),
@ -470,19 +625,32 @@ struct RocksDBKeyValueStore : IKeyValueStore {
logRocksDBError(s, "ReadValuePrefix");
a.result.sendError(statusToError(s));
}
if (a.getHistograms) {
double currTime = timer_monotonic();
readPrefixActionHistogram->sampleSeconds(currTime - readBeginTime);
readPrefixLatencyHistogram->sampleSeconds(currTime - a.startTime);
}
}
struct ReadRangeAction : TypedAction<Reader, ReadRangeAction>, FastAllocated<ReadRangeAction> {
KeyRange keys;
int rowLimit, byteLimit;
double startTime;
bool getHistograms;
ThreadReturnPromise<RangeResult> result;
ReadRangeAction(KeyRange keys, int rowLimit, int byteLimit)
: keys(keys), rowLimit(rowLimit), byteLimit(byteLimit), startTime(timer_monotonic()) {}
: keys(keys), rowLimit(rowLimit), byteLimit(byteLimit), startTime(timer_monotonic()),
getHistograms(
(deterministicRandom()->random01() < SERVER_KNOBS->ROCKSDB_HISTOGRAMS_SAMPLE_RATE) ? true : false) {
}
double getTimeEstimate() const override { return SERVER_KNOBS->READ_RANGE_TIME_ESTIMATE; }
};
void action(ReadRangeAction& a) {
if (timer_monotonic() - a.startTime > readRangeTimeout) {
double readBeginTime = timer_monotonic();
if (a.getHistograms) {
readRangeQueueWaitHistogram->sampleSeconds(readBeginTime - a.startTime);
}
if (readBeginTime - a.startTime > readRangeTimeout) {
TraceEvent(SevWarn, "RocksDBError")
.detail("Error", "Read range request timedout")
.detail("Method", "ReadRangeAction")
@ -499,7 +667,7 @@ struct RocksDBKeyValueStore : IKeyValueStore {
rocksdb::Status s;
auto options = getReadOptions();
uint64_t deadlineMircos =
db->GetEnv()->NowMicros() + (readRangeTimeout - (timer_monotonic() - a.startTime)) * 1000000;
db->GetEnv()->NowMicros() + (readRangeTimeout - (readBeginTime - a.startTime)) * 1000000;
std::chrono::seconds deadlineSeconds(deadlineMircos / 1000000);
options.deadline = std::chrono::duration_cast<std::chrono::microseconds>(deadlineSeconds);
// When using a prefix extractor, ensure that keys are returned in order even if they cross
@ -508,7 +676,13 @@ struct RocksDBKeyValueStore : IKeyValueStore {
if (a.rowLimit >= 0) {
auto endSlice = toSlice(a.keys.end);
options.iterate_upper_bound = &endSlice;
double iterCreationBeginTime = a.getHistograms ? timer_monotonic() : 0;
auto cursor = std::unique_ptr<rocksdb::Iterator>(db->NewIterator(options));
if (a.getHistograms) {
readRangeNewIteratorHistogram->sampleSeconds(timer_monotonic() - iterCreationBeginTime);
}
cursor->Seek(toSlice(a.keys.begin));
while (cursor->Valid() && toStringRef(cursor->key()) < a.keys.end) {
KeyValueRef kv(toStringRef(cursor->key()), toStringRef(cursor->value()));
@ -532,7 +706,13 @@ struct RocksDBKeyValueStore : IKeyValueStore {
} else {
auto beginSlice = toSlice(a.keys.begin);
options.iterate_lower_bound = &beginSlice;
double iterCreationBeginTime = a.getHistograms ? timer_monotonic() : 0;
auto cursor = std::unique_ptr<rocksdb::Iterator>(db->NewIterator(options));
if (a.getHistograms) {
readRangeNewIteratorHistogram->sampleSeconds(timer_monotonic() - iterCreationBeginTime);
}
cursor->SeekForPrev(toSlice(a.keys.end));
if (cursor->Valid() && toStringRef(cursor->key()) == a.keys.end) {
cursor->Prev();
@ -569,6 +749,11 @@ struct RocksDBKeyValueStore : IKeyValueStore {
result.readThrough = result[result.size() - 1].key;
}
a.result.send(result);
if (a.getHistograms) {
double currTime = timer_monotonic();
readRangeActionHistogram->sampleSeconds(currTime - readBeginTime);
readRangeLatencyHistogram->sampleSeconds(currTime - a.startTime);
}
}
};
@ -675,7 +860,11 @@ struct RocksDBKeyValueStore : IKeyValueStore {
writeBatch.reset(new rocksdb::WriteBatch());
}
writeBatch->DeleteRange(toSlice(keyRange.begin), toSlice(keyRange.end));
if (keyRange.singleKeyRange()) {
writeBatch->Delete(toSlice(keyRange.begin));
} else {
writeBatch->DeleteRange(toSlice(keyRange.begin), toSlice(keyRange.end));
}
}
Future<Void> commit(bool) override {
@ -830,7 +1019,63 @@ IKeyValueStore* keyValueStoreRocksDB(std::string const& path,
namespace {
TEST_CASE("noSim/fdbserver/KeyValueStoreRocksDB/Reopen") {
TEST_CASE("noSim/fdbserver/KeyValueStoreRocksDB/RocksDBBasic") {
state const std::string rocksDBTestDir = "rocksdb-kvstore-basic-test-db";
platform::eraseDirectoryRecursive(rocksDBTestDir);
state IKeyValueStore* kvStore = new RocksDBKeyValueStore(rocksDBTestDir, deterministicRandom()->randomUniqueID());
wait(kvStore->init());
state StringRef foo = "foo"_sr;
state StringRef bar = "ibar"_sr;
kvStore->set({ foo, foo });
kvStore->set({ keyAfter(foo), keyAfter(foo) });
kvStore->set({ bar, bar });
kvStore->set({ keyAfter(bar), keyAfter(bar) });
wait(kvStore->commit(false));
{
Optional<Value> val = wait(kvStore->readValue(foo));
ASSERT(foo == val.get());
}
// Test single key deletion.
kvStore->clear(singleKeyRange(foo));
wait(kvStore->commit(false));
{
Optional<Value> val = wait(kvStore->readValue(foo));
ASSERT(!val.present());
}
{
Optional<Value> val = wait(kvStore->readValue(keyAfter(foo)));
ASSERT(keyAfter(foo) == val.get());
}
// Test range deletion.
kvStore->clear(KeyRangeRef(keyAfter(foo), keyAfter(bar)));
wait(kvStore->commit(false));
{
Optional<Value> val = wait(kvStore->readValue(bar));
ASSERT(!val.present());
}
{
Optional<Value> val = wait(kvStore->readValue(keyAfter(bar)));
ASSERT(keyAfter(bar) == val.get());
}
Future<Void> closed = kvStore->onClosed();
kvStore->close();
wait(closed);
platform::eraseDirectoryRecursive(rocksDBTestDir);
return Void();
}
TEST_CASE("noSim/fdbserver/KeyValueStoreRocksDB/RocksDBReopen") {
state const std::string rocksDBTestDir = "rocksdb-kvstore-reopen-test-db";
platform::eraseDirectoryRecursive(rocksDBTestDir);

View File

@ -41,7 +41,7 @@ struct PeekTxsInfo {
knownCommittedVersion(knownCommittedVersion) {}
};
class LogSystemDiskQueueAdapter : public IDiskQueue {
class LogSystemDiskQueueAdapter final : public IDiskQueue {
public:
// This adapter is designed to let KeyValueStoreMemory use ILogSystem
// as a backing store, so that the transaction subsystem can in

View File

@ -891,8 +891,8 @@ ACTOR Future<Void> sendMutationsToApplier(
for (splitMutationIndex = 0; splitMutationIndex < mvector.size(); splitMutationIndex++) {
MutationRef mutation = mvector[splitMutationIndex];
UID applierID = nodeIDs[splitMutationIndex];
DEBUG_MUTATION("RestoreLoaderSplittedMutation", commitVersion.version, mutation)
.detail("Version", commitVersion.toString());
DEBUG_MUTATION("RestoreLoaderSplitMutation", commitVersion.version, mutation)
.detail("CommitVersion", commitVersion.toString());
// CAREFUL: The splitted mutations' lifetime is shorter than the for-loop
// Must use deep copy for splitted mutations
applierVersionedMutationsBuffer[applierID].push_back_deep(

View File

@ -63,6 +63,7 @@
#include "fdbserver/WorkerInterface.actor.h"
#include "fdbserver/pubsub.h"
#include "fdbserver/workloads/workloads.actor.h"
#include "flow/ArgParseUtil.h"
#include "flow/DeterministicRandom.h"
#include "flow/Platform.h"
#include "flow/ProtocolVersion.h"
@ -104,17 +105,17 @@ enum {
CSimpleOpt::SOption g_rgOptions[] = {
{ OPT_CONNFILE, "-C", SO_REQ_SEP },
{ OPT_CONNFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_SEEDCONNFILE, "--seed_cluster_file", SO_REQ_SEP },
{ OPT_SEEDCONNSTRING, "--seed_connection_string", SO_REQ_SEP },
{ OPT_CONNFILE, "--cluster-file", SO_REQ_SEP },
{ OPT_SEEDCONNFILE, "--seed-cluster-file", SO_REQ_SEP },
{ OPT_SEEDCONNSTRING, "--seed-connection-string", SO_REQ_SEP },
{ OPT_ROLE, "-r", SO_REQ_SEP },
{ OPT_ROLE, "--role", SO_REQ_SEP },
{ OPT_PUBLICADDR, "-p", SO_REQ_SEP },
{ OPT_PUBLICADDR, "--public_address", SO_REQ_SEP },
{ OPT_PUBLICADDR, "--public-address", SO_REQ_SEP },
{ OPT_LISTEN, "-l", SO_REQ_SEP },
{ OPT_LISTEN, "--listen_address", SO_REQ_SEP },
{ OPT_LISTEN, "--listen-address", SO_REQ_SEP },
#ifdef __linux__
{ OPT_FILESYSTEM, "--data_filesystem", SO_REQ_SEP },
{ OPT_FILESYSTEM, "--data-filesystem", SO_REQ_SEP },
{ OPT_PROFILER_RSS_SIZE, "--rsssize", SO_REQ_SEP },
#endif
{ OPT_DATAFOLDER, "-d", SO_REQ_SEP },
@ -132,7 +133,7 @@ CSimpleOpt::SOption g_rgOptions[] = {
{ OPT_NEWCONSOLE, "-n", SO_NONE },
{ OPT_NEWCONSOLE, "--newconsole", SO_NONE },
{ OPT_NOBOX, "-q", SO_NONE },
{ OPT_NOBOX, "--no_dialog", SO_NONE },
{ OPT_NOBOX, "--no-dialog", SO_NONE },
#endif
{ OPT_KVFILE, "--kvfile", SO_REQ_SEP },
{ OPT_TESTFILE, "-f", SO_REQ_SEP },
@ -146,48 +147,48 @@ CSimpleOpt::SOption g_rgOptions[] = {
{ OPT_MEMLIMIT, "-m", SO_REQ_SEP },
{ OPT_MEMLIMIT, "--memory", SO_REQ_SEP },
{ OPT_STORAGEMEMLIMIT, "-M", SO_REQ_SEP },
{ OPT_STORAGEMEMLIMIT, "--storage_memory", SO_REQ_SEP },
{ OPT_CACHEMEMLIMIT, "--cache_memory", SO_REQ_SEP },
{ OPT_STORAGEMEMLIMIT, "--storage-memory", SO_REQ_SEP },
{ OPT_CACHEMEMLIMIT, "--cache-memory", SO_REQ_SEP },
{ OPT_MACHINEID, "-i", SO_REQ_SEP },
{ OPT_MACHINEID, "--machine_id", SO_REQ_SEP },
{ OPT_MACHINEID, "--machine-id", SO_REQ_SEP },
{ OPT_DCID, "-a", SO_REQ_SEP },
{ OPT_DCID, "--datacenter_id", SO_REQ_SEP },
{ OPT_DCID, "--datacenter-id", SO_REQ_SEP },
{ OPT_MACHINE_CLASS, "-c", SO_REQ_SEP },
{ OPT_MACHINE_CLASS, "--class", SO_REQ_SEP },
{ OPT_BUGGIFY, "-b", SO_REQ_SEP },
{ OPT_BUGGIFY, "--buggify", SO_REQ_SEP },
{ OPT_VERSION, "-v", SO_NONE },
{ OPT_VERSION, "--version", SO_NONE },
{ OPT_BUILD_FLAGS, "--build_flags", SO_NONE },
{ OPT_BUILD_FLAGS, "--build-flags", SO_NONE },
{ OPT_CRASHONERROR, "--crash", SO_NONE },
{ OPT_NETWORKIMPL, "-N", SO_REQ_SEP },
{ OPT_NETWORKIMPL, "--network", SO_REQ_SEP },
{ OPT_NOBUFSTDOUT, "--unbufferedout", SO_NONE },
{ OPT_BUFSTDOUTERR, "--bufferedout", SO_NONE },
{ OPT_TRACECLOCK, "--traceclock", SO_REQ_SEP },
{ OPT_NUMTESTERS, "--num_testers", SO_REQ_SEP },
{ OPT_NUMTESTERS, "--num-testers", SO_REQ_SEP },
{ OPT_HELP, "-?", SO_NONE },
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_UNITTESTPARAM, "--test_", SO_REQ_SEP },
{ OPT_LOCALITY, "--locality_", SO_REQ_SEP },
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
{ OPT_UNITTESTPARAM, "--test-", SO_REQ_SEP },
{ OPT_LOCALITY, "--locality-", SO_REQ_SEP },
{ OPT_TESTSERVERS, "--testservers", SO_REQ_SEP },
{ OPT_TEST_ON_SERVERS, "--testonservers", SO_NONE },
{ OPT_METRICSCONNFILE, "--metrics_cluster", SO_REQ_SEP },
{ OPT_METRICSPREFIX, "--metrics_prefix", SO_REQ_SEP },
{ OPT_IO_TRUST_SECONDS, "--io_trust_seconds", SO_REQ_SEP },
{ OPT_IO_TRUST_WARN_ONLY, "--io_trust_warn_only", SO_NONE },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_WHITELIST_BINPATH, "--whitelist_binpath", SO_REQ_SEP },
{ OPT_BLOB_CREDENTIAL_FILE, "--blob_credential_file", SO_REQ_SEP },
{ OPT_CONFIG_PATH, "--config_path", SO_REQ_SEP },
{ OPT_USE_TEST_CONFIG_DB, "--use_test_config_db", SO_NONE },
{ OPT_METRICSCONNFILE, "--metrics-cluster", SO_REQ_SEP },
{ OPT_METRICSPREFIX, "--metrics-prefix", SO_REQ_SEP },
{ OPT_IO_TRUST_SECONDS, "--io-trust-seconds", SO_REQ_SEP },
{ OPT_IO_TRUST_WARN_ONLY, "--io-trust-warn-only", SO_NONE },
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
{ OPT_WHITELIST_BINPATH, "--whitelist-binpath", SO_REQ_SEP },
{ OPT_BLOB_CREDENTIAL_FILE, "--blob-credential-file", SO_REQ_SEP },
{ OPT_CONFIG_PATH, "--config-path", SO_REQ_SEP },
{ OPT_USE_TEST_CONFIG_DB, "--use-test-config-db", SO_NONE },
{ OPT_FAULT_INJECTION, "-fi", SO_REQ_SEP },
{ OPT_FAULT_INJECTION, "--fault_injection", SO_REQ_SEP },
{ OPT_PROFILER, "--profiler_", SO_REQ_SEP},
{ OPT_PRINT_SIMTIME, "--print_sim_time", SO_NONE },
{ OPT_FAULT_INJECTION, "--fault-injection", SO_REQ_SEP },
{ OPT_PROFILER, "--profiler-", SO_REQ_SEP},
{ OPT_PRINT_SIMTIME, "--print-sim-time", SO_NONE },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
@ -559,29 +560,29 @@ static void printOptionUsage(std::string option, std::string description) {
static void printUsage(const char* name, bool devhelp) {
printf("FoundationDB " FDB_VT_PACKAGE_NAME " (v" FDB_VT_VERSION ")\n");
printf("Usage: %s -p ADDRESS [OPTIONS]\n\n", name);
printOptionUsage("-p ADDRESS, --public_address ADDRESS",
printOptionUsage("-p ADDRESS, --public-address ADDRESS",
" Public address, specified as `IP_ADDRESS:PORT' or `auto:PORT'.");
printOptionUsage("-l ADDRESS, --listen_address ADDRESS",
printOptionUsage("-l ADDRESS, --listen-address ADDRESS",
" Listen address, specified as `IP_ADDRESS:PORT' (defaults to"
" public address).");
printOptionUsage("-C CONNFILE, --cluster_file CONNFILE",
printOptionUsage("-C CONNFILE, --cluster-file CONNFILE",
" The path of a file containing the connection string for the"
" FoundationDB cluster. The default is first the value of the"
" FDB_CLUSTER_FILE environment variable, then `./fdb.cluster',"
" then `" +
platform::getDefaultClusterFilePath() + "'.");
printOptionUsage("--seed_cluster_file SEEDCONNFILE",
printOptionUsage("--seed-cluster-file SEEDCONNFILE",
" The path of a seed cluster file which will be used to connect"
" if the -C cluster file does not exist. If the server connects"
" successfully using the seed file, then it copies the file to"
" the -C file location.");
printOptionUsage("--seed_connection_string SEEDCONNSTRING",
printOptionUsage("--seed-connection-string SEEDCONNSTRING",
" The path of a seed connection string which will be used to connect"
" if the -C cluster file does not exist. If the server connects"
" successfully using the seed string, then it copies the string to"
" the -C file location.");
#ifdef __linux__
printOptionUsage("--data_filesystem PATH",
printOptionUsage("--data-filesystem PATH",
" Turns on validation that all data files are written to a drive"
" mounted at the specified PATH. This checks that the device at PATH"
" is currently mounted and that any data files get written to the"
@ -601,28 +602,28 @@ static void printUsage(const char* name, bool devhelp) {
printOptionUsage("--loggroup LOG_GROUP",
" Sets the LogGroup field with the specified value for all"
" events in the trace output (defaults to `default').");
printOptionUsage("--trace_format FORMAT",
printOptionUsage("--trace-format FORMAT",
" Select the format of the log files. xml (the default) and json"
" are supported.");
printOptionUsage("--tracer TRACER",
" Select a tracer for transaction tracing. Currently disabled"
" (the default) and log_file are supported.");
printOptionUsage("-i ID, --machine_id ID",
printOptionUsage("-i ID, --machine-id ID",
" Machine and zone identifier key (up to 16 hex characters)."
" Defaults to a random value shared by all fdbserver processes"
" on this machine.");
printOptionUsage("-a ID, --datacenter_id ID", " Data center identifier key (up to 16 hex characters).");
printOptionUsage("--locality_LOCALITYKEY LOCALITYVALUE",
printOptionUsage("-a ID, --datacenter-id ID", " Data center identifier key (up to 16 hex characters).");
printOptionUsage("--locality-LOCALITYKEY LOCALITYVALUE",
" Define a locality key. LOCALITYKEY is case-insensitive though"
" LOCALITYVALUE is not.");
printOptionUsage("-m SIZE, --memory SIZE",
" Memory limit. The default value is 8GiB. When specified"
" without a unit, MiB is assumed.");
printOptionUsage("-M SIZE, --storage_memory SIZE",
printOptionUsage("-M SIZE, --storage-memory SIZE",
" Maximum amount of memory used for storage. The default"
" value is 1GiB. When specified without a unit, MB is"
" assumed.");
printOptionUsage("--cache_memory SIZE",
printOptionUsage("--cache-memory SIZE",
" The amount of memory to use for caching disk pages."
" The default value is 2GiB. When specified without a unit,"
" MiB is assumed.");
@ -630,7 +631,7 @@ static void printUsage(const char* name, bool devhelp) {
" Machine class (valid options are storage, transaction,"
" resolution, grv_proxy, commit_proxy, master, test, unset, stateless, log, router,"
" and cluster_controller).");
printOptionUsage("--profiler_",
printOptionUsage("--profiler-",
"Set an actor profiler option. Supported options are:\n"
" collector -- None or FluentD (FluentD requires collector_endpoint to be set)\n"
" collector_endpoint -- IP:PORT of the fluentd server\n"
@ -641,7 +642,7 @@ static void printUsage(const char* name, bool devhelp) {
printOptionUsage("-v, --version", "Print version information and exit.");
printOptionUsage("-h, -?, --help", "Display this help and exit.");
if (devhelp) {
printf(" --build_flags Print build information and exit.\n");
printf(" --build-flags Print build information and exit.\n");
printOptionUsage(
"-r ROLE, --role ROLE",
" Server role (valid options are fdbd, test, multitest,"
@ -649,7 +650,7 @@ static void printUsage(const char* name, bool devhelp) {
" consistencycheck, kvfileintegritycheck, kvfilegeneratesums, unittests). The default is `fdbd'.");
#ifdef _WIN32
printOptionUsage("-n, --newconsole", " Create a new console.");
printOptionUsage("-q, --no_dialog", " Disable error dialog on crash.");
printOptionUsage("-q, --no-dialog", " Disable error dialog on crash.");
printOptionUsage("--parentpid PID", " Specify a process after whose termination to exit.");
#endif
printOptionUsage("-f TESTFILE, --testfile",
@ -662,7 +663,7 @@ static void printUsage(const char* name, bool devhelp) {
"--kvfile FILE",
"Input file (SQLite database file) for use by the 'kvfilegeneratesums' and 'kvfileintegritycheck' roles.");
printOptionUsage("-b [on,off], --buggify [on,off]", " Sets Buggify system state, defaults to `off'.");
printOptionUsage("-fi [on,off], --fault_injection [on,off]", " Sets fault injection, defaults to `on'.");
printOptionUsage("-fi [on,off], --fault-injection [on,off]", " Sets fault injection, defaults to `on'.");
printOptionUsage("--crash", "Crash on serious errors instead of continuing.");
printOptionUsage("-N NETWORKIMPL, --network NETWORKIMPL",
" Select network implementation, `net2' (default),"
@ -672,10 +673,10 @@ static void printUsage(const char* name, bool devhelp) {
printOptionUsage("--traceclock CLOCKIMPL",
" Select clock source for trace files, `now' (default) or"
" `realtime'.");
printOptionUsage("--num_testers NUM",
printOptionUsage("--num-testers NUM",
" A multitester will wait for NUM testers before starting"
" (defaults to 1).");
printOptionUsage("--test_PARAMNAME PARAMVALUE",
printOptionUsage("--test-PARAMNAME PARAMVALUE",
" Set a UnitTest named parameter to the given value. Names are case sensitive.");
#ifdef __linux__
printOptionUsage("--rsssize SIZE",
@ -687,20 +688,20 @@ static void printUsage(const char* name, bool devhelp) {
" The addresses of networktestservers"
" specified as ADDRESS:PORT,ADDRESS:PORT...");
printOptionUsage("--testonservers", " Testers are recruited on servers.");
printOptionUsage("--metrics_cluster CONNFILE",
printOptionUsage("--metrics-cluster CONNFILE",
" The cluster file designating where this process will"
" store its metric data. By default metrics will be stored"
" in the same database the process is participating in.");
printOptionUsage("--metrics_prefix PREFIX",
printOptionUsage("--metrics-prefix PREFIX",
" The prefix where this process will store its metric data."
" Must be specified if using a different database for metrics.");
printOptionUsage("--knob_KNOBNAME KNOBVALUE", " Changes a database knob. KNOBNAME should be lowercase.");
printOptionUsage("--io_trust_seconds SECONDS",
printOptionUsage("--knob-KNOBNAME KNOBVALUE", " Changes a database knob. KNOBNAME should be lowercase.");
printOptionUsage("--io-trust-seconds SECONDS",
" Sets the time in seconds that a read or write operation is allowed to take"
" before timing out with an error. If an operation times out, all future"
" operations on that file will fail with an error as well. Only has an effect"
" when using AsyncFileKAIO in Linux.");
printOptionUsage("--io_trust_warn_only",
printOptionUsage("--io-trust-warn-only",
" Instead of failing when an I/O operation exceeds io_trust_seconds, just"
" log a warning to the trace log. Has no effect if io_trust_seconds is unspecified.");
} else {
@ -1023,7 +1024,7 @@ private:
commandLine += argv[a];
}
CSimpleOpt args(argc, argv, g_rgOptions, SO_O_EXACT);
CSimpleOpt args(argc, argv, g_rgOptions, SO_O_EXACT | SO_O_HYPHEN_TO_UNDERSCORE);
if (argc == 1) {
printUsage(argv[0], false);
@ -1071,46 +1072,42 @@ private:
flushAndExit(FDB_EXIT_SUCCESS);
break;
case OPT_KNOB: {
std::string syn = args.OptionSyntax();
if (!StringRef(syn).startsWith(LiteralStringRef("--knob_"))) {
fprintf(stderr, "ERROR: unable to parse knob option '%s'\n", syn.c_str());
Optional<std::string> knobName = extractPrefixedArgument("--knob", args.OptionSyntax());
if (!knobName.present()) {
fprintf(stderr, "ERROR: unable to parse knob option '%s'\n", args.OptionSyntax());
flushAndExit(FDB_EXIT_ERROR);
}
syn = syn.substr(7);
knobs.emplace_back(syn, args.OptionArg());
manualKnobOverrides[syn] = args.OptionArg();
knobs.emplace_back(knobName.get(), args.OptionArg());
manualKnobOverrides[knobName.get()] = args.OptionArg();
break;
}
case OPT_PROFILER: {
std::string syn = args.OptionSyntax();
std::string_view key = syn;
auto prefix = "--profiler_"sv;
if (key.find(prefix) != 0) {
fprintf(stderr, "ERROR: unable to parse profiler option '%s'\n", syn.c_str());
Optional<std::string> profilerArg = extractPrefixedArgument("--profiler", args.OptionSyntax());
if (!profilerArg.present()) {
fprintf(stderr, "ERROR: unable to parse profiler option '%s'\n", args.OptionSyntax());
flushAndExit(FDB_EXIT_ERROR);
}
key.remove_prefix(prefix.size());
profilerConfig.emplace(key, args.OptionArg());
profilerConfig.emplace(profilerArg.get(), args.OptionArg());
break;
};
case OPT_UNITTESTPARAM: {
std::string syn = args.OptionSyntax();
if (!StringRef(syn).startsWith(LiteralStringRef("--test_"))) {
fprintf(stderr, "ERROR: unable to parse knob option '%s'\n", syn.c_str());
Optional<std::string> testArg = extractPrefixedArgument("--test", args.OptionSyntax());
if (!testArg.present()) {
fprintf(stderr, "ERROR: unable to parse unit test option '%s'\n", args.OptionSyntax());
flushAndExit(FDB_EXIT_ERROR);
}
testParams.set(syn.substr(7), args.OptionArg());
testParams.set(testArg.get(), args.OptionArg());
break;
}
case OPT_LOCALITY: {
std::string syn = args.OptionSyntax();
if (!StringRef(syn).startsWith(LiteralStringRef("--locality_"))) {
fprintf(stderr, "ERROR: unable to parse locality key '%s'\n", syn.c_str());
Optional<std::string> localityKey = extractPrefixedArgument("--locality", args.OptionSyntax());
if (!localityKey.present()) {
fprintf(stderr, "ERROR: unable to parse locality key '%s'\n", args.OptionSyntax());
flushAndExit(FDB_EXIT_ERROR);
}
syn = syn.substr(11);
std::transform(syn.begin(), syn.end(), syn.begin(), ::tolower);
localities.set(Standalone<StringRef>(syn), Standalone<StringRef>(std::string(args.OptionArg())));
Standalone<StringRef> key = StringRef(localityKey.get());
std::transform(key.begin(), key.end(), mutateString(key), ::tolower);
localities.set(key, Standalone<StringRef>(std::string(args.OptionArg())));
break;
}
case OPT_VERSION:
@ -1522,7 +1519,7 @@ private:
if (seedConnString.length() && seedConnFile.length()) {
fprintf(
stderr, "%s\n", "--seed_cluster_file and --seed_connection_string may not both be specified at once.");
stderr, "%s\n", "--seed-cluster-file and --seed-connection-string may not both be specified at once.");
flushAndExit(FDB_EXIT_ERROR);
}
@ -1531,7 +1528,7 @@ private:
if (seedSpecified && !connFile.length()) {
fprintf(stderr,
"%s\n",
"If -seed_cluster_file or --seed_connection_string is specified, -C must be specified as well.");
"If -seed-cluster-file or --seed-connection-string is specified, -C must be specified as well.");
flushAndExit(FDB_EXIT_ERROR);
}
@ -1751,7 +1748,7 @@ int main(int argc, char* argv[]) {
EvictablePageCache::evictionPolicyStringToEnum(FLOW_KNOBS->CACHE_EVICTION_POLICY);
if (opts.memLimit <= FLOW_KNOBS->PAGE_CACHE_4K) {
fprintf(stderr, "ERROR: --memory has to be larger than --cache_memory\n");
fprintf(stderr, "ERROR: --memory has to be larger than --cache-memory\n");
flushAndExit(FDB_EXIT_ERROR);
}
@ -1793,7 +1790,7 @@ int main(int argc, char* argv[]) {
(role == ServerRole::FDBD || role == ServerRole::NetworkTestServer || role == ServerRole::Restore);
if (opts.publicAddressStrs.empty()) {
if (expectsPublicAddress) {
fprintf(stderr, "ERROR: The -p or --public_address option is required\n");
fprintf(stderr, "ERROR: The -p or --public-address option is required\n");
printHelpTeaser(argv[0]);
flushAndExit(FDB_EXIT_ERROR);
}

View File

@ -1746,7 +1746,7 @@ ACTOR Future<Void> masterCore(Reference<MasterData> self) {
.detail("OldGenerations", self->cstate.myDBState.oldTLogData.size())
.detail("Reason",
"Recovery stopped because too many recoveries have happened since the last time the cluster "
"was fully_recovered. Set --knob_max_generations_override on your server processes to a value "
"was fully_recovered. Set --knob-max-generations-override on your server processes to a value "
"larger than OldGenerations to resume recovery once the underlying problem has been fixed.");
wait(Future<Void>(Never()));
} else if (self->cstate.myDBState.oldTLogData.size() > CLIENT_KNOBS->RECOVERY_DELAY_START_GENERATION) {
@ -1754,7 +1754,7 @@ ACTOR Future<Void> masterCore(Reference<MasterData> self) {
.detail("OldGenerations", self->cstate.myDBState.oldTLogData.size())
.detail("Reason",
"Recovery is delayed because too many recoveries have happened since the last time the cluster "
"was fully_recovered. Set --knob_max_generations_override on your server processes to a value "
"was fully_recovered. Set --knob-max-generations-override on your server processes to a value "
"larger than OldGenerations to resume recovery once the underlying problem has been fixed.");
wait(delay(CLIENT_KNOBS->RECOVERY_DELAY_SECONDS_PER_GENERATION *
(self->cstate.myDBState.oldTLogData.size() - CLIENT_KNOBS->RECOVERY_DELAY_START_GENERATION)));

View File

@ -325,7 +325,7 @@ protected:
LogEvent(EVENTLOG_INFORMATION_TYPE, format("Default config file at %s", _confpath.c_str()));
// Parse "command line" options
CSimpleOpt args(argc, argv, g_rgOptions, SO_O_NOERR);
CSimpleOpt args(argc, argv, g_rgOptions, SO_O_NOERR | SO_O_HYPHEN_TO_UNDERSCORE);
while (args.Next()) {
if (args.LastError() == SO_SUCCESS) {
@ -646,15 +646,47 @@ private:
const char* getValueMulti(const CSimpleIni& ini, const char* name, ...) {
const char* ret = nullptr;
const char* section = nullptr;
std::string nameWithUnderscores(name);
for (int i = nameWithUnderscores.size() - 1; i >= 0; --i) {
if (nameWithUnderscores[i] == '-') {
nameWithUnderscores.at(i) = '_';
}
}
va_list ap;
va_start(ap, name);
while (!ret && (section = va_arg(ap, const char*))) {
ret = ini.GetValue(section, name, nullptr);
if (!ret) {
ret = ini.GetValue(section, nameWithUnderscores.c_str(), nullptr);
}
}
va_end(ap);
return ret;
}
bool isParameterNameEqual(const char* str, const char* target) {
if (!str || !target) {
return false;
}
while (*str && *target) {
char curStr = *str, curTarget = *target;
if (curStr == '-') {
curStr = '_';
}
if (curTarget == '-') {
curTarget = '_';
}
if (curStr != curTarget) {
return false;
}
str++;
target++;
}
return !(*str || *target);
}
Command makeCommand(const CSimpleIni& ini, std::string section, uint16_t id) {
std::string ssection = format("%s.%d", section.c_str(), id);
Command result;
@ -673,7 +705,7 @@ private:
});
const char* rd =
getValueMulti(ini, "restart_delay", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
getValueMulti(ini, "restart-delay", ssection.c_str(), section.c_str(), "general", "fdbmonitor", nullptr);
if (!rd) {
LogEvent(EVENTLOG_ERROR_TYPE, format("Unable to resolve restart delay for %s\n", ssection.c_str()));
return result;
@ -687,7 +719,7 @@ private:
}
const char* q =
getValueMulti(ini, "disable_lifecycle_logging", ssection.c_str(), section.c_str(), "general", nullptr);
getValueMulti(ini, "disable-lifecycle-logging", ssection.c_str(), section.c_str(), "general", nullptr);
if (q && !strcmp(q, "true"))
result.quiet = true;
@ -702,8 +734,8 @@ private:
const char* id_s = ssection.c_str() + strlen(section.c_str()) + 1;
for (auto i : keys) {
if (!strcmp(i.pItem, "command") || !strcmp(i.pItem, "restart_delay") ||
!strcmp(i.pItem, "disable_lifecycle_logging")) {
if (isParameterNameEqual(i.pItem, "command") || isParameterNameEqual(i.pItem, "restart-delay") ||
isParameterNameEqual(i.pItem, "disable-lifecycle-logging")) {
continue;
}

40
flow/ArgParseUtil.h Normal file
View File

@ -0,0 +1,40 @@
/*
* Arena.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef FLOW_ARGPARSEUTIL_H
#define FLOW_ARGPARSEUTIL_H
#include "flow/Arena.h"
// Extracts the key for command line arguments that are specified with a prefix (e.g. --knob-).
// This function converts any hyphens in the extracted key to underscores.
Optional<std::string> extractPrefixedArgument(std::string prefix, std::string arg) {
if (arg.size() <= prefix.size() || arg.find(prefix) != 0 ||
(arg[prefix.size()] != '-' && arg[prefix.size()] != '_')) {
return Optional<std::string>();
}
arg = arg.substr(prefix.size() + 1);
std::transform(arg.begin(), arg.end(), arg.begin(), [](int c) { return c == '-' ? '_' : c; });
return arg;
}
#endif

View File

@ -5,6 +5,7 @@ set(FLOW_SRCS
ActorCollection.h
Arena.cpp
Arena.h
ArgParseUtil.h
AsioReactor.h
BooleanParam.h
CompressedInt.actor.cpp

View File

@ -30,7 +30,7 @@
#include <random>
class DeterministicRandom : public IRandom, public ReferenceCounted<DeterministicRandom> {
class DeterministicRandom final : public IRandom, public ReferenceCounted<DeterministicRandom> {
private:
std::mt19937 random;
uint64_t next;

View File

@ -280,7 +280,11 @@ enum _ESOFlags {
SO_O_ICASE_WORD = 0x0400,
/*! Case-insensitive comparisons for all arg types */
SO_O_ICASE = 0x0700
SO_O_ICASE = 0x0700,
/*! Treat all hyphens from flag names as underscores except leading hyphens
For example: --cluster-file ==> --cluster_file while comparing. */
SO_O_HYPHEN_TO_UNDERSCORE = 0x1000
};
/*! Types of arguments that options may have. Note that some of the _ESOFlags
@ -912,8 +916,9 @@ int CSimpleOptTempl<SOCHAR>::CalcMatch(const SOCHAR* a_pszSource, const SOCHAR*
return -1;
}
// and the source is a "wildcard option", then it's a perfect match (e.g. "--knob_" matches "--knob_foo")
if (a_pszSource[-1] == '_') {
// and the source is a "wildcard option", then it's a perfect match (e.g. "--knob_" or "--knob-" matches
// "--knob_foo" and "--knob-foo")
if (a_pszSource[-1] == '_' || a_pszSource[-1] == '-') {
return -1;
}
@ -941,6 +946,14 @@ bool CSimpleOptTempl<SOCHAR>::IsEqual(SOCHAR a_cLeft, SOCHAR a_cRight, int a_nAr
if (a_cRight >= 'A' && a_cRight <= 'Z')
a_cRight += 'a' - 'A';
}
if (m_nFlags & SO_O_HYPHEN_TO_UNDERSCORE) {
if (a_cLeft == (SOCHAR)'-') {
a_cLeft = (SOCHAR)'_';
}
if (a_cRight == (SOCHAR)'-') {
a_cRight = (SOCHAR)'_';
}
}
return a_cLeft == a_cRight;
}

View File

@ -265,12 +265,12 @@ public:
bool is_client;
};
#define TLS_PLUGIN_FLAG "--tls_plugin"
#define TLS_CERTIFICATE_FILE_FLAG "--tls_certificate_file"
#define TLS_KEY_FILE_FLAG "--tls_key_file"
#define TLS_VERIFY_PEERS_FLAG "--tls_verify_peers"
#define TLS_CA_FILE_FLAG "--tls_ca_file"
#define TLS_PASSWORD_FLAG "--tls_password"
#define TLS_PLUGIN_FLAG "--tls-plugin"
#define TLS_CERTIFICATE_FILE_FLAG "--tls-certificate-file"
#define TLS_KEY_FILE_FLAG "--tls-key-file"
#define TLS_VERIFY_PEERS_FLAG "--tls-verify-peers"
#define TLS_CA_FILE_FLAG "--tls-ca-file"
#define TLS_PASSWORD_FLAG "--tls-password"
#define TLS_OPTION_FLAGS \
{ TLSConfig::OPT_TLS_PLUGIN, TLS_PLUGIN_FLAG, SO_REQ_SEP }, \

View File

@ -431,10 +431,15 @@ namespace actorcompiler
writer.WriteLine("public:");
writer.WriteLine("\tusing FastAllocated<{0}>::operator new;", fullClassName);
writer.WriteLine("\tusing FastAllocated<{0}>::operator delete;", fullClassName);
writer.WriteLine("#pragma clang diagnostic push");
writer.WriteLine("#pragma clang diagnostic ignored \"-Wdelete-non-virtual-dtor\"");
if (actor.returnType != null)
writer.WriteLine("\tvoid destroy() override {{ ((Actor<{0}>*)this)->~Actor(); operator delete(this); }}", actor.returnType);
else
writer.WriteLine("\tvoid destroy() {{ ((Actor<void>*)this)->~Actor(); operator delete(this); }}");
writer.WriteLine("#pragma clang diagnostic pop");
foreach (var cb in callbacks)
writer.WriteLine("friend struct {0};", cb.type);

View File

@ -94,6 +94,7 @@ ERROR( broken_promise, 1100, "Broken promise" )
ERROR( operation_cancelled, 1101, "Asynchronous operation cancelled" )
ERROR( future_released, 1102, "Future has been released" )
ERROR( connection_leaked, 1103, "Connection object leaked" )
ERROR( never_reply, 1104, "Never reply to the request" )
ERROR( recruitment_failed, 1200, "Recruitment of a server failed" ) // Be careful, catching this will delete the data of a storage server or tlog permanently
ERROR( move_to_removed_server, 1201, "Attempt to move keys to a storage server that was removed" )

View File

@ -747,7 +747,14 @@ public:
int getFutureReferenceCount() const { return futures; }
int getPromiseReferenceCount() const { return promises; }
virtual void destroy() { delete this; }
// Derived classes should override destroy.
virtual void destroy() {
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdelete-non-virtual-dtor"
delete this;
#pragma clang diagnostic pop
}
virtual void cancel() {}
void addCallbackAndDelFutureRef(Callback<T>* cb) {
@ -967,6 +974,8 @@ struct NotifiedQueue : private SingleCallback<T>, FastAllocated<NotifiedQueue<T>
SingleCallback<T>::next = this;
}
virtual ~NotifiedQueue() = default;
bool isReady() const { return !queue.empty() || error.isValid(); }
bool isError() const { return queue.empty() && error.isValid(); } // the *next* thing queued is an error
uint32_t size() const { return queue.size(); }

View File

@ -870,7 +870,7 @@ template <class T>
class QuorumCallback;
template <class T>
struct Quorum : SAV<Void> {
struct Quorum final : SAV<Void> {
int antiQuorum;
int count;
@ -1558,7 +1558,9 @@ Future<Void> yieldPromiseStream(FutureStream<T> input,
}
}
struct YieldedFutureActor : SAV<Void>, ActorCallback<YieldedFutureActor, 1, Void>, FastAllocated<YieldedFutureActor> {
struct YieldedFutureActor final : SAV<Void>,
ActorCallback<YieldedFutureActor, 1, Void>,
FastAllocated<YieldedFutureActor> {
Error in_error_state;
typedef ActorCallback<YieldedFutureActor, 1, Void> CB1;

View File

@ -53,6 +53,18 @@ RUN curl -Ls https://github.com/krallin/tini/releases/download/v0.19.0/tini-amd6
mv tini /usr/bin/ && \
rm -rf /tmp/*
RUN curl -Ls https://amazon-eks.s3.amazonaws.com/1.19.6/2021-01-05/bin/linux/amd64/kubectl -o kubectl && \
echo "08ff68159bbcb844455167abb1d0de75bbfe5ae1b051f81ab060a1988027868a kubectl" > kubectl.txt && \
sha256sum --quiet -c kubectl.txt && \
mv kubectl /usr/local/bin/kubectl && \
chmod 755 /usr/local/bin/kubectl && \
curl -Ls https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.2.43.zip -o "awscliv2.zip" && \
echo "9a8b3c4e7f72bbcc55e341dce3af42479f2730c225d6d265ee6f9162cfdebdfd awscliv2.zip" > awscliv2.txt && \
sha256sum --quiet -c awscliv2.txt && \
unzip -qq awscliv2.zip && \
./aws/install && \
rm -rf /tmp/*
WORKDIR /
FROM golang:1.16.7-bullseye AS go-build
@ -168,19 +180,6 @@ RUN yum -y install \
yum clean all && \
rm -rf /var/cache/yum
WORKDIR /tmp
RUN curl -Ls https://amazon-eks.s3.amazonaws.com/1.19.6/2021-01-05/bin/linux/amd64/kubectl -o kubectl && \
echo "08ff68159bbcb844455167abb1d0de75bbfe5ae1b051f81ab060a1988027868a kubectl" > kubectl.txt && \
sha256sum --quiet -c kubectl.txt && \
mv kubectl /usr/local/bin/kubectl && \
chmod 755 /usr/local/bin/kubectl && \
curl -Ls https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.2.43.zip -o "awscliv2.zip" && \
echo "9a8b3c4e7f72bbcc55e341dce3af42479f2730c225d6d265ee6f9162cfdebdfd awscliv2.zip" > awscliv2.txt && \
sha256sum --quiet -c awscliv2.txt && \
unzip -qq awscliv2.zip && \
./aws/install && \
rm -rf /tmp/*
# TODO: Log4J complains that it's eating the HTracer logs. Even without it, we get per-operation
# time series graphs of throughput, median, 90, 99, 99.9 and 99.99 (in usec).
ADD run_ycsb.sh /usr/local/bin/run_ycsb.sh

View File

@ -1,4 +1,13 @@
# packaging/docker
This directory contains the pieces for building FoundationDB docker images.
Read the `build-images.sh` shell script to learn more about how the images are
built and `Dockerfile{,.eks}` for what is contained in the images.
`build-images.sh` will optionally take a single parameter that will be used as an
image tag postfix.
For more details it is best to read the `build-images.sh` shell script itself to
learn more about how the images are built.
For details about what is in the images, peruse `Dockerfile{,.eks}`
the `samples` directory is out of date, and anything therein should be used with
the expectation that it is, at least, partially (if not entirely) incorrect.

View File

@ -228,8 +228,9 @@ artifactory_base_url="${ARTIFACTORY_URL:-https://artifactory.foundationdb.org}"
aws_region="us-west-2"
aws_account_id=$(aws --output text sts get-caller-identity --query 'Account')
build_date=$(date +"%Y-%m-%dT%H:%M:%S%z")
build_output_directory="${script_dir}/../../build_output"
commit_sha=$(git rev-parse --verify HEAD --short=10)
build_output_directory="${script_dir}/../../"
source_code_diretory=$(awk -F= '/foundationdb_SOURCE_DIR:STATIC/{print $2}' "${build_output_directory}/CMakeCache.txt")
commit_sha=$(cd "${source_code_diretory}" && git rev-parse --verify HEAD --short=10)
fdb_version=$(cat "${build_output_directory}/version.txt")
fdb_library_versions=( '5.1.7' '6.1.13' '6.2.30' '6.3.18' "${fdb_version}" )
fdb_website="https://www.foundationdb.org"
@ -259,7 +260,11 @@ if [ -n "${OKTETO_NAMESPACE+x}" ]; then
fdb_library_versions=( "${fdb_version}" )
registry="${aws_account_id}.dkr.ecr.${aws_region}.amazonaws.com"
tag_base="${registry}/foundationdb/"
tag_postfix="${OKTETO_NAME:-dev}"
if [ -n "${1+x}" ]; then
tag_postfix="${1}"
else
tag_postfix="${OKTETO_NAME:-dev}"
fi
stripped_binaries_and_from_where="unstripped_local" # MUST BE ONE OF ( "unstripped_artifactory" "stripped_artifactory" "unstripped_local" "stripped_local" )
dockerfile_name="Dockerfile.eks"
use_development_java_bindings="true"

View File

@ -63,6 +63,6 @@ function create_server_environment() {
create_server_environment
source /var/fdb/.fdbenv
echo "Starting FDB server on $PUBLIC_IP:$FDB_PORT"
fdbserver --listen_address 0.0.0.0:"$FDB_PORT" --public_address "$PUBLIC_IP:$FDB_PORT" \
fdbserver --listen-address 0.0.0.0:"$FDB_PORT" --public-address "$PUBLIC_IP:$FDB_PORT" \
--datadir /var/fdb/data --logdir /var/fdb/logs \
--locality_zoneid="$(hostname)" --locality_machineid="$(hostname)" --class "$FDB_PROCESS_CLASS"
--locality-zoneid="$(hostname)" --locality-machineid="$(hostname)" --class "$FDB_PROCESS_CLASS"

View File

@ -141,17 +141,17 @@ data:
"runProcesses": false,
"version": "6.3.13",
"arguments": [
{"value": "--cluster_file"},
{"value": "--cluster-file"},
{"value": "/var/fdb/data/fdb.cluster"},
{"value": "--seed_cluster_file"},
{"value": "--seed-cluster-file"},
{"value": "/var/fdb/dynamic-conf/fdb.cluster"},
{"value": "--public_address"},
{"value": "--public-address"},
{"type": "Concatenate", "values": [
{"type": "Environment", "source": "FDB_PUBLIC_IP"},
{"value": ":"},
{"type": "ProcessNumber", "offset": 4499, "multiplier": 2}
]},
{"value": "--listen_address"},
{"value": "--listen-address"},
{"type": "Concatenate", "values": [
{"type": "Environment", "source": "FDB_POD_IP"},
{"value": ":"},
@ -164,11 +164,11 @@ data:
]},
{"value": "--class"},
{"value": "storage"},
{"value": "--locality_zoneid"},
{"value": "--locality-zoneid"},
{"type": "Environment", "source": "FDB_ZONE_ID"},
{"value": "--locality_instance-id"},
{"value": "--locality-instance-id"},
{"type": "Environment", "source": "FDB_INSTANCE_ID"},
{"value": "--locality_process-id"},
{"value": "--locality-process-id"},
{"type": "Concatenate", "values": [
{"type": "Environment", "source": "FDB_INSTANCE_ID"},
{"value": "-"},
@ -176,7 +176,7 @@ data:
]},
{"value": "--logdir"},
{"value": "/var/fdb/logs"},
{"value": "--trace_format"},
{"value": "--trace-format"},
{"value": "json"}
]
}

View File

@ -9,32 +9,32 @@ user = foundationdb
group = foundationdb
[general]
restart_delay = 60
## by default, restart_backoff = restart_delay_reset_interval = restart_delay
# initial_restart_delay = 0
# restart_backoff = 60
# restart_delay_reset_interval = 60
cluster_file = /etc/foundationdb/fdb.cluster
# delete_envvars =
# kill_on_configuration_change = true
restart-delay = 60
## by default, restart-backoff = restart-delay-reset-interval = restart-delay
# initial-restart-delay = 0
# restart-backoff = 60
# restart-delay-reset-interval = 60
cluster-file = /etc/foundationdb/fdb.cluster
# delete-envvars =
# kill-on-configuration-change = true
## Default parameters for individual fdbserver processes
[fdbserver]
command = /usr/sbin/fdbserver
public_address = auto:$ID
listen_address = public
public-address = auto:$ID
listen-address = public
datadir = /var/lib/foundationdb/data/$ID
logdir = /var/log/foundationdb
# logsize = 10MiB
# maxlogssize = 100MiB
# machine_id =
# datacenter_id =
# machine-id =
# datacenter-id =
# class =
# memory = 8GiB
# storage_memory = 1GiB
# cache_memory = 2GiB
# metrics_cluster =
# metrics_prefix =
# storage-memory = 1GiB
# cache-memory = 2GiB
# metrics-cluster =
# metrics-prefix =
## An individual fdbserver process with id 4500
## Parameters set here override defaults from the [fdbserver] section

View File

@ -5,25 +5,25 @@
## https://apple.github.io/foundationdb/configuration.html#the-configuration-file
[fdbmonitor]
restart_delay = 20
restart-delay = 20
[general]
## Default parameters for individual fdbserver processes
[fdbserver]
public_address = auto:$ID
listen_address = public
public-address = auto:$ID
listen-address = public
parentpid = $PID
# logsize = 10MiB
# maxlogssize = 100MiB
# machine_id =
# datacenter_id =
# machine-id =
# datacenter-id =
# class =
# memory = 8GiB
# storage_memory = 1GiB
# cache_memory = 2GiB
# metrics_cluster =
# metrics_prefix =
# storage-memory = 1GiB
# cache-memory = 2GiB
# metrics-cluster =
# metrics-prefix =
## An individual fdbserver process with id 4500
## Parameters set here override defaults from the [fdbserver] section

View File

@ -5,31 +5,31 @@
## https://apple.github.io/foundationdb/configuration.html#the-configuration-file
[general]
restart_delay = 60
## by default, restart_backoff = restart_delay_reset_interval = restart_delay
# initial_restart_delay = 0
# restart_backoff = 60
# restart_delay_reset_interval = 60
cluster_file = /usr/local/etc/foundationdb/fdb.cluster
# kill_on_configuration_change = true
restart-delay = 60
## by default, restart-backoff = restart-delay-reset-interval = restart-delay
# initial-restart-delay = 0
# restart-backoff = 60
# restart-delay-reset-interval = 60
cluster-file = /usr/local/etc/foundationdb/fdb.cluster
# kill-on-configuration-change = true
## Default parameters for individual fdbserver processes
[fdbserver]
command = /usr/local/libexec/fdbserver
public_address = auto:$ID
listen_address = public
public-address = auto:$ID
listen-address = public
datadir = /usr/local/foundationdb/data/$ID
logdir = /usr/local/foundationdb/logs
# logsize = 10MiB
# maxlogssize = 100MiB
# machine_id =
# datacenter_id =
# machine-id =
# datacenter-id =
# class =
# memory = 8GiB
# storage_memory = 1GiB
# cache_memory = 2GiB
# metrics_cluster =
# metrics_prefix =
# storage-memory = 1GiB
# cache-memory = 2GiB
# metrics-cluster =
# metrics-prefix =
## An individual fdbserver process with id 4689
## Parameters set here override defaults from the [fdbserver] section

View File

@ -176,6 +176,11 @@ if(WITH_PYTHON)
add_fdb_test(TEST_FILES fast/WriteDuringRead.toml)
add_fdb_test(TEST_FILES fast/WriteDuringReadClean.toml)
add_fdb_test(TEST_FILES noSim/RandomUnitTests.toml UNIT)
if(SSD_ROCKSDB_EXPERIMENTAL)
add_fdb_test(TEST_FILES noSim/KeyValueStoreRocksDBTest.toml)
else()
add_fdb_test(TEST_FILES noSim/KeyValueStoreRocksDBTest.toml IGNORE) # re-enable as needed for RocksDB. Breaks correctness tests if RocksDB is disabled.
endif()
add_fdb_test(TEST_FILES rare/CheckRelocation.toml)
add_fdb_test(TEST_FILES rare/ClogUnclog.toml)
add_fdb_test(TEST_FILES rare/CloggedCycleWithKills.toml)
@ -317,6 +322,13 @@ if(WITH_PYTHON)
set_tests_properties("threadsafe_threadfuture_to_future/unit_tests" PROPERTIES ENVIRONMENT UBSAN_OPTIONS=print_stacktrace=1:halt_on_error=1)
endif()
if(NOT OPEN_FOR_IDE)
add_test(
NAME command_line_argument_test
COMMAND ${Python_EXECUTABLE} ${CMAKE_SOURCE_DIR}/tests/argument_parsing/test_argument_parsing.py ${CMAKE_BINARY_DIR}
)
endif()
verify_testing()
if (NOT OPEN_FOR_IDE AND NOT WIN32)
create_correctness_package()

View File

@ -328,6 +328,8 @@ def run_simulation_test(basedir, options):
pargs.append('on')
if options.crash:
pargs.append('--crash')
# Use old style argument with underscores because old binaries don't support hyphens
pargs.append('--trace_format')
pargs.append(options.log_format)
test_dir = td.get_current_test_dir()

View File

@ -24,32 +24,32 @@ class LocalCluster:
[fdbmonitor]
[general]
restart_delay = 10
## by default, restart_backoff = restart_delay_reset_interval = restart_delay
# initial_restart_delay = 0
# restart_backoff = 60
# restart_delay_reset_interval = 60
cluster_file = {etcdir}/fdb.cluster
# delete_envvars =
# kill_on_configuration_change = true
restart-delay = 10
## by default, restart-backoff = restart-delay-reset-interval = restart-delay
# initial-restart-delay = 0
# restart-backoff = 60
# restart-delay-reset-interval = 60
cluster-file = {etcdir}/fdb.cluster
# delete-envvars =
# kill-on-configuration-change = true
## Default parameters for individual fdbserver processes
[fdbserver]
command = {fdbserver_bin}
public_address = auto:$ID
listen_address = public
public-address = auto:$ID
listen-address = public
datadir = {datadir}/$ID
logdir = {logdir}
# logsize = 10MiB
# maxlogssize = 100MiB
# machine_id =
# datacenter_id =
# machine-id =
# datacenter-id =
# class =
# memory = 8GiB
# storage_memory = 1GiB
# cache_memory = 2GiB
# metrics_cluster =
# metrics_prefix =
# storage-memory = 1GiB
# cache-memory = 2GiB
# metrics-cluster =
# metrics-prefix =
## An individual fdbserver process with id 4000
## Parameters set here override defaults from the [fdbserver] section

View File

@ -1,30 +1,34 @@
#!/usr/bin/env python3
import glob
import os
import shutil
import subprocess
import sys
import socket
from local_cluster import LocalCluster
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from random import choice
from pathlib import Path
class TempCluster:
def __init__(self, build_dir: str, process_number: int = 1, port: str = None):
self.build_dir = Path(build_dir).resolve()
assert self.build_dir.exists(), "{} does not exist".format(build_dir)
assert self.build_dir.is_dir(), "{} is not a directory".format(build_dir)
tmp_dir = self.build_dir.joinpath(
'tmp',
''.join(choice(LocalCluster.valid_letters_for_secret) for i in range(16)))
"tmp",
"".join(choice(LocalCluster.valid_letters_for_secret) for i in range(16)),
)
tmp_dir.mkdir(parents=True)
self.cluster = LocalCluster(tmp_dir,
self.build_dir.joinpath('bin', 'fdbserver'),
self.build_dir.joinpath('bin', 'fdbmonitor'),
self.build_dir.joinpath('bin', 'fdbcli'),
process_number,
port = port)
self.cluster = LocalCluster(
tmp_dir,
self.build_dir.joinpath("bin", "fdbserver"),
self.build_dir.joinpath("bin", "fdbmonitor"),
self.build_dir.joinpath("bin", "fdbcli"),
process_number,
port=port,
)
self.log = self.cluster.log
self.etc = self.cluster.etc
self.data = self.cluster.data
@ -40,13 +44,14 @@ class TempCluster:
shutil.rmtree(self.tmp_dir)
def close(self):
self.cluster.__exit__(None,None,None)
self.cluster.__exit__(None, None, None)
shutil.rmtree(self.tmp_dir)
if __name__ == '__main__':
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
description="""
if __name__ == "__main__":
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description="""
This script automatically configures a temporary local cluster on the machine
and then calls a command while this cluster is running. As soon as the command
returns, the configured cluster is killed and all generated data is deleted.
@ -61,30 +66,72 @@ if __name__ == '__main__':
- All occurrences of @ETC_DIR@ will be replaced with the path to the configuration directory.
The environment variable FDB_CLUSTER_FILE is set to the generated cluster for the command if it is not set already.
""")
parser.add_argument('--build-dir', '-b', metavar='BUILD_DIRECTORY', help='FDB build directory', required=True)
parser.add_argument('cmd', metavar="COMMAND", nargs="+", help="The command to run")
parser.add_argument('--process-number', '-p', help="Number of fdb processes running", type=int, default=1)
""",
)
parser.add_argument(
"--build-dir",
"-b",
metavar="BUILD_DIRECTORY",
help="FDB build directory",
required=True,
)
parser.add_argument("cmd", metavar="COMMAND", nargs="+", help="The command to run")
parser.add_argument(
"--process-number",
"-p",
help="Number of fdb processes running",
type=int,
default=1,
)
args = parser.parse_args()
errcode = 1
with TempCluster(args.build_dir, args.process_number) as cluster:
print("log-dir: {}".format(cluster.log))
print("etc-dir: {}".format(cluster.etc))
print("data-dir: {}".format(cluster.data))
print("cluster-file: {}".format(cluster.etc.joinpath('fdb.cluster')))
print("cluster-file: {}".format(cluster.etc.joinpath("fdb.cluster")))
cmd_args = []
for cmd in args.cmd:
if cmd == '@CLUSTER_FILE@':
cmd_args.append(str(cluster.etc.joinpath('fdb.cluster')))
elif cmd == '@DATA_DIR@':
if cmd == "@CLUSTER_FILE@":
cmd_args.append(str(cluster.etc.joinpath("fdb.cluster")))
elif cmd == "@DATA_DIR@":
cmd_args.append(str(cluster.data))
elif cmd == '@LOG_DIR@':
elif cmd == "@LOG_DIR@":
cmd_args.append(str(cluster.log))
elif cmd == '@ETC_DIR@':
elif cmd == "@ETC_DIR@":
cmd_args.append(str(cluster.etc))
else:
cmd_args.append(cmd)
env = dict(**os.environ)
env['FDB_CLUSTER_FILE'] = env.get('FDB_CLUSTER_FILE', cluster.etc.joinpath('fdb.cluster'))
errcode = subprocess.run(cmd_args, stdout=sys.stdout, stderr=sys.stderr, env=env).returncode
env["FDB_CLUSTER_FILE"] = env.get(
"FDB_CLUSTER_FILE", cluster.etc.joinpath("fdb.cluster")
)
errcode = subprocess.run(
cmd_args, stdout=sys.stdout, stderr=sys.stderr, env=env
).returncode
sev40s = (
subprocess.getoutput(
"grep -r 'Severity=\"40\"' {}".format(cluster.log.as_posix())
)
.rstrip()
.splitlines()
)
for line in sev40s:
# When running ASAN we expect to see this message. Boost coroutine should be using the correct asan annotations so that it shouldn't produce any false positives.
if line.endswith(
"WARNING: ASan doesn't fully support makecontext/swapcontext functions and may produce false positives in some cases!"
):
continue
print(">>>>>>>>>>>>>>>>>>>> Found severity 40 events - the test fails")
errcode = 1
break
if errcode:
for log_file in glob.glob(os.path.join(cluster.log, "*")):
print(">>>>>>>>>>>>>>>>>>>> Contents of {}:".format(log_file))
with open(log_file, "r") as f:
print(f.read())
sys.exit(errcode)

View File

@ -0,0 +1,97 @@
#!/usr/bin/env python3
#
# test_argument_parsing.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import subprocess
last_command_output = None
def check(condition):
global last_command_output
assert condition, 'Command output:\n' + last_command_output
def run_command(command, args):
global last_command_output
last_command_output = subprocess.run(command + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.decode('utf-8').strip()
return last_command_output
def is_unknown_option(output):
return output.startswith('ERROR: unknown option')
def is_unknown_knob(output):
return output.startswith('ERROR: Failed to set knob option')
def is_cli_usage(output):
return output.startswith('FoundationDB CLI')
def test_fdbserver(build_dir):
command = [args.build_dir + '/bin/fdbserver', '-r', 'unittests']
check(is_unknown_option(run_command(command, ['--unknown-option'])))
check(not is_unknown_option(run_command(command, ['--cluster-file', 'foo'])))
check( not is_unknown_option(run_command(command, ['--cluster_file', 'foo'])))
check(is_unknown_knob(run_command(command, ['--knob-fake-knob', 'foo'])))
check(not is_unknown_knob(run_command(command, ['--knob-min-trace-severity', '5'])))
check(not is_unknown_knob(run_command(command, ['--knob-min_trace_severity', '5'])))
check(not is_unknown_knob(run_command(command, ['--knob_min_trace_severity', '5'])))
check(not is_unknown_knob(run_command(command, ['--knob_min-trace-severity', '5'])))
def test_fdbcli(build_dir):
command = [args.build_dir + '/bin/fdbcli', '--exec', 'begin']
check(is_cli_usage(run_command(command, ['--unknown-option'])))
check(not is_cli_usage(run_command(command, ['--api-version', '700'])))
check(not is_cli_usage(run_command(command, ['--api_version', '700'])))
check(is_unknown_knob(run_command(command, ['--knob-fake-knob', 'foo'])))
check(not is_unknown_knob(run_command(command, ['--knob-min-trace-severity', '5'])))
check(not is_unknown_knob(run_command(command, ['--knob-min_trace_severity', '5'])))
check(not is_unknown_knob(run_command(command, ['--knob_min_trace_severity', '5'])))
check(not is_unknown_knob(run_command(command, ['--knob_min-trace-severity', '5'])))
def test_fdbbackup(build_dir):
command = [args.build_dir + '/bin/fdbbackup', 'list']
check(is_unknown_option(run_command(command, ['--unknown-option'])))
check(not is_unknown_option(run_command(command, ['--trace-format', 'foo'])))
check(not is_unknown_option(run_command(command, ['--trace_format', 'foo'])))
check(is_unknown_knob(run_command(command, ['--knob-fake-knob', 'foo'])))
check(not is_unknown_knob(run_command(command, ['--knob-min-trace-severity', '5'])))
check(not is_unknown_knob(run_command(command, ['--knob-min_trace_severity', '5'])))
check(not is_unknown_knob(run_command(command, ['--knob_min_trace_severity', '5'])))
check(not is_unknown_knob(run_command(command, ['--knob_min-trace-severity', '5'])))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="This test checks for proper command line argument parsing.")
parser.add_argument('build_dir', metavar='BUILD_DIRECTORY', help='FDB build directory')
args = parser.parse_args()
test_fdbserver(args.build_dir)
test_fdbcli(args.build_dir)
test_fdbbackup(args.build_dir)

View File

@ -0,0 +1,9 @@
[[test]]
testTitle = 'UnitTests'
useDB = false
startDelay = 0
[[test.workload]]
testName = 'UnitTests'
maxTestCases = 10
testsMatching = 'noSim/fdbserver/KeyValueStoreRocksDB/'