Merge pull request #3254 from apple/release-6.3
Merge release-6.3 into master
This commit is contained in:
commit
135b88d85f
|
@ -139,7 +139,7 @@ Accessing cluster file information from a client
|
|||
Any client connected to FoundationDB can access information about its cluster file directly from the database:
|
||||
|
||||
* To get the path to the cluster file, read the key ``\xFF\xFF/cluster_file_path``.
|
||||
* To get the contents of the cluster file, read the key ``\xFF\xFF/connection_string``.
|
||||
* To get the desired contents of the cluster file, read the key ``\xFF\xFF/connection_string``. Make sure the client can write to the cluster file and keep it up to date.
|
||||
|
||||
.. _ipv6-support:
|
||||
|
||||
|
|
|
@ -106,6 +106,13 @@ Set the process using ``configure [proxies|resolvers|logs]=<N>``, where ``<N>``
|
|||
|
||||
For recommendations on appropriate values for process types in large clusters, see :ref:`guidelines-process-class-config`.
|
||||
|
||||
consistencycheck
|
||||
----------------
|
||||
|
||||
The ``consistencycheck`` command enables or disables consistency checking. Its syntax is ``consistencycheck [on|off]``. Calling it with ``on`` enables consistency checking, and ``off`` disables it. Calling it with no arguments displays whether consistency checking is currently enabled.
|
||||
|
||||
You must be running an ``fdbserver`` process with the ``consistencycheck`` role to perform consistency checking.
|
||||
|
||||
coordinators
|
||||
------------
|
||||
|
||||
|
@ -146,6 +153,12 @@ The format should be the same as the value of the ``configuration`` entry in sta
|
|||
|
||||
"The ``new`` option, if present, initializes a new database with the given configuration rather than changing the configuration of an existing one.
|
||||
|
||||
force_recovery_with_data_loss
|
||||
-----------------------------
|
||||
|
||||
The ``force_recovery_with_data_loss`` command will recover a multi-region database to the specified datacenter. Its syntax is ``force_recovery_with_data_loss <DCID>``. It will likely result in the loss of the most recently committed mutations and is intended to be used if the primary datacenter has been lost.
|
||||
|
||||
This command will change the :ref:`region configuration <configuration-configuring-regions>` to have a positive priority for the chosen ``DCID`` and a negative priority for all other ``DCIDs``. It will also set ``usable_regions`` to 1. If the database has already recovered, this command does nothing.
|
||||
|
||||
get
|
||||
---
|
||||
|
@ -237,11 +250,45 @@ For each IP address or IP:port pair in ``<ADDRESS...>``, the command removes any
|
|||
|
||||
For information on adding machines to a cluster, see :ref:`adding-machines-to-a-cluster`.
|
||||
|
||||
kill
|
||||
----
|
||||
|
||||
The ``kill`` command attempts to kill one or more processes in the cluster.
|
||||
|
||||
``kill``
|
||||
|
||||
With no arguments, ``kill`` populates the list of processes that can be killed. This must be run prior to running any other ``kill`` commands.
|
||||
|
||||
``kill list``
|
||||
|
||||
Displays all known processes. This is only useful when the database is unresponsive.
|
||||
|
||||
``kill <ADDRESS...>``
|
||||
|
||||
Attempts to kill all specified processes. Each address should include the IP and port of the process being targeted.
|
||||
|
||||
``kill all``
|
||||
|
||||
Attempts to kill all known processes in the cluster.
|
||||
|
||||
lock
|
||||
----
|
||||
|
||||
The ``lock`` command locks the database with a randomly generated lockUID.
|
||||
|
||||
maintenance
|
||||
-----------
|
||||
|
||||
The ``maintenance`` command marks a particular zone ID (i.e. fault domain) as being under maintenance. Its syntax is ``maintenance [on|off] [ZONEID] [SECONDS]``.
|
||||
|
||||
A zone that is under maintenance will not have data moved away from it even if processes in that zone fail. In particular, this means the cluster will not attempt to heal the replication factor as a result of failures in the maintenance zone. This is useful when the amount of time that the processes in a fault domain are expected to be absent is reasonably short and you don't want to move data to and from the affected processes.
|
||||
|
||||
Running this command with no arguments will display the state of any current maintenance.
|
||||
|
||||
Running ``maintenance on <ZONEID> <SECONDS>`` will turn maintenance on for the specified zone. A duration must be specified for the length of maintenance mode.
|
||||
|
||||
Running ``maintenance off`` will turn off maintenance mode.
|
||||
|
||||
option
|
||||
------
|
||||
|
||||
|
@ -255,6 +302,39 @@ If there is an active transaction (one created with ``begin``), then enabled opt
|
|||
|
||||
Calling the ``option`` command with no parameters prints a list of all enabled options.
|
||||
|
||||
profile
|
||||
-------
|
||||
|
||||
The ``profile`` command is used to control various profiling actions.
|
||||
|
||||
client
|
||||
^^^^^^
|
||||
|
||||
``profile client <get|set>``
|
||||
|
||||
Reads or sets parameters of client transaction sampling. Use ``get`` to list the current parameters, and ``set <RATE|default> <SIZE|default>`` to set them. ``RATE`` is the fraction of transactions to be sampled, and ``SIZE`` is the amount (in bytes) of sampled data to store in the database.
|
||||
|
||||
list
|
||||
^^^^
|
||||
|
||||
``profile list``
|
||||
|
||||
Lists the processes that can be profiled using the ``flow`` and ``heap`` subcommands.
|
||||
|
||||
flow
|
||||
^^^^
|
||||
|
||||
``profile flow run <DURATION> <FILENAME> <PROCESS...>``
|
||||
|
||||
Enables flow profiling on the specifed processes for ``DURATION`` seconds. Profiling output will be stored at the specified filename relative to the fdbserver process's trace log directory. To profile all processes, use ``all`` for the ``PROCESS`` parameter.
|
||||
|
||||
heap
|
||||
^^^^
|
||||
|
||||
``profile heap <PROCESS>``
|
||||
|
||||
Enables heap profiling for the specified process.
|
||||
|
||||
reset
|
||||
-----
|
||||
|
||||
|
@ -272,6 +352,18 @@ The ``set`` command sets a value for a given key. Its syntax is ``set <KEY> <VAL
|
|||
|
||||
Note that :ref:`characters can be escaped <cli-escaping>` when specifying keys (or values) in ``fdbcli``.
|
||||
|
||||
setclass
|
||||
--------
|
||||
|
||||
The ``setclass`` command can be used to change the :ref:`process class <guidelines-process-class-config>` for a given process. Its syntax is ``setclass [<ADDRESS> <CLASS>]``. If no arguments are specified, then the process classes of all processes are listed. Setting the class to ``default`` to revert to the process class specified on the command line.
|
||||
|
||||
The available process classes are ``unset``, ``storage``, ``transaction``, ``resolution``, ``proxy``, ``master``, ``test``, ``unset``, ``stateless``, ``log``, ``router``, ``cluster_controller``, ``fast_restore``, ``data_distributor``, ``coordinator``, ``ratekeeper``, ``storage_cache``, ``backup``, and ``default``.
|
||||
|
||||
sleep
|
||||
-----
|
||||
|
||||
The ``sleep`` command inserts a delay before running the next command. Its syntax is ``sleep <SECONDS>``. This command can be useful when ``fdbcli`` is run with the ``--exec`` flag to control the timing of commands.
|
||||
|
||||
.. _cli-status:
|
||||
|
||||
status
|
||||
|
@ -381,3 +473,16 @@ unlock
|
|||
------
|
||||
|
||||
The ``unlock`` command unlocks the database with the specified lock UID. Because this is a potentially dangerous operation, users must copy a passphrase before the unlock command is executed.
|
||||
|
||||
writemode
|
||||
---------
|
||||
|
||||
Controls whether or not ``fdbcli`` can perform sets and clears.
|
||||
|
||||
``writemode off``
|
||||
|
||||
Disables writing from ``fdbcli`` (the default). In this mode, attempting to set or clear keys will result in an error.
|
||||
|
||||
``writemode on``
|
||||
|
||||
Enables writing from ``fdbcli``.
|
||||
|
|
|
@ -756,6 +756,118 @@ If you only need to detect the *fact* of a change, and your response doesn't dep
|
|||
|
||||
.. _developer-guide-peformance-considerations:
|
||||
|
||||
|
||||
Special keys
|
||||
============
|
||||
|
||||
Keys starting with the bytes ``\xff\xff`` are called "special" keys, and they are materialized when read. :doc:`\\xff\\xff/status/json <mr-status>` is an example of a special key.
|
||||
As of api version 630, additional features have been exposed as special keys and are available to read as ranges instead of just individual keys. Additionally, the special keys are now organized into "modules".
|
||||
|
||||
Modules
|
||||
-------
|
||||
|
||||
A module is loosely defined as a key range in the special key space where a user can expect similar behavior from reading any key in that range.
|
||||
By default, users will see a ``special_keys_no_module_found`` error if they read from a range not contained in a module.
|
||||
The error indicates the read would always return an empty set of keys if it proceeded. This could be caused by typo in the keys to read.
|
||||
Users will also (by default) see a ``special_keys_cross_module_read`` error if their read spans a module boundary.
|
||||
The error is to save the user from the surprise of seeing the behavior of multiple modules in the same read.
|
||||
Users may opt out of these restrictions by setting the ``special_key_space_relaxed`` transaction option.
|
||||
|
||||
Each special key that existed before api version 630 is its own module. These are
|
||||
|
||||
#. ``\xff\xff/cluster_file_path`` See :ref:`cluster file client access <cluster-file-client-access>`
|
||||
#. ``\xff\xff/cluster_file_path`` See :ref:`cluster file client access <cluster-file-client-access>`
|
||||
#. ``\xff\xff/status/json`` See :doc:`Machine-readable status <mr-status>`
|
||||
|
||||
Prior to api version 630, it was also possible to read a range starting at
|
||||
``\xff\xff/worker_interfaces``. This is mostly an implementation detail of fdbcli,
|
||||
but it's available in api version 630 as a module with prefix ``\xff\xff/worker_interfaces/``.
|
||||
|
||||
Api version 630 includes two new modules with prefixes
|
||||
``\xff\xff/transaction/`` (information about the current transaction), and
|
||||
``\xff\xff/metrics/`` (various metrics, not transactional).
|
||||
|
||||
Transaction module
|
||||
------------------
|
||||
|
||||
Reads from the transaction module generally do not require an rpc and only inspect in-memory state for the current transaction.
|
||||
|
||||
There are three sets of keys exposed by the transaction module, and each set uses the same encoding, so let's first describe that encoding.
|
||||
|
||||
Let's say we have a set of keys represented as intervals of the form ``begin1 <= k < end1 && begin2 <= k < end2 && ...``.
|
||||
It could be the case that some of the intervals overlap, e.g. if ``begin1 <= begin2 < end1``, or are adjacent, e.g. if ``end1 == begin2``.
|
||||
If we merge all overlapping/adjacent intervals then sort, we end up with a canonical representation of this set of keys.
|
||||
|
||||
We encode this canonical set as ordered key value pairs like this::
|
||||
|
||||
<namespace><begin1> -> "1"
|
||||
<namespace><end1> -> "0"
|
||||
<namespace><begin2> -> "1"
|
||||
<namespace><end2> -> "0"
|
||||
...
|
||||
|
||||
Python example::
|
||||
|
||||
>>> tr = db.create_transaction()
|
||||
>>> tr.add_read_conflict_key('foo')
|
||||
>>> tr.add_read_conflict_range('bar/', 'bar0')
|
||||
>>> for k, v in tr.get_range_startswith('\xff\xff/transaction/read_conflict_range/'):
|
||||
... print(k, v)
|
||||
...
|
||||
('\xff\xff/transaction/read_conflict_range/bar/', '1')
|
||||
('\xff\xff/transaction/read_conflict_range/bar0', '0')
|
||||
('\xff\xff/transaction/read_conflict_range/foo', '1')
|
||||
('\xff\xff/transaction/read_conflict_range/foo\x00', '0')
|
||||
|
||||
For read-your-writes transactions, this canonical encoding of conflict ranges
|
||||
is already available in memory, and so requesting small ranges is
|
||||
correspondingly cheaper than large ranges.
|
||||
|
||||
For transactions with read-your-writes disabled, this canonical encoding is computed on
|
||||
every read, so you're paying the full cost in CPU time whether or not you
|
||||
request a small range.
|
||||
|
||||
The namespaces for sets of keys are
|
||||
|
||||
#. ``\xff\xff/transaction/read_conflict_range/`` This is the set of keys that will be used for read conflict detection. If another transaction writes to any of these keys after this transaction's read version, then this transaction won't commit.
|
||||
#. ``\xff\xff/transaction/write_conflict_range/`` This is the set of keys that will be used for write conflict detection. Keys in this range may cause other transactions which read these keys to abort if this transaction commits.
|
||||
#. ``\xff\xff/transaction/conflicting_keys/`` If this transaction failed due to a conflict, it must be the case that some transaction attempted [#conflicting_keys]_ to commit with a write conflict range that intersects this transaction's read conflict range. This is the subset of your read conflict range that actually intersected a write conflict from another transaction.
|
||||
|
||||
Caveats
|
||||
~~~~~~~
|
||||
|
||||
#. ``\xff\xff/transaction/read_conflict_range/`` The conflict range for a read is sometimes not known until that read completes (e.g. range reads with limits, key selectors). When you read from these special keys, the returned future first blocks until all pending reads are complete so it can give an accurate response.
|
||||
#. ``\xff\xff/transaction/write_conflict_range/`` The conflict range range for a ``set_versionstamped_key`` atomic op is not known until commit time. You'll get an approximate range (the actual range will be a subset of the approximate range) until the precise range is known.
|
||||
#. ``\xff\xff/transaction/conflicting_keys/`` Since using this feature costs server (i.e., proxy and resolver) resources, it's disabled by default. You must opt in by setting the ``report_conflicting_keys`` transaction option.
|
||||
|
||||
Metrics module
|
||||
--------------
|
||||
|
||||
Reads in the metrics module are not transactional and may require rpcs to complete.
|
||||
|
||||
The key ``\xff\xff/metrics/data_distribution_stats/<begin>`` represent stats about the shard that begins at ``<begin>``. The value is a json object with a "ShardBytes" field. More fields may be added in the future.
|
||||
|
||||
A user can see stats about data distribution like so::
|
||||
|
||||
>>> for k, v in db.get_range_startswith('\xff\xff/metrics/data_distribution_stats/'):
|
||||
... print(k, v)
|
||||
...
|
||||
('\xff\xff/metrics/data_distribution_stats/', '{"ShardBytes":330000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako00509', '{"ShardBytes":330000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako0099', '{"ShardBytes":330000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako01468', '{"ShardBytes":297000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako023', '{"ShardBytes":264000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako0289', '{"ShardBytes":297000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako037', '{"ShardBytes":330000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako042', '{"ShardBytes":264000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako0457', '{"ShardBytes":297000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako0524', '{"ShardBytes":264000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako058', '{"ShardBytes":297000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako064', '{"ShardBytes":297000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako0718', '{"ShardBytes":264000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako083', '{"ShardBytes":297000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako0909', '{"ShardBytes":741000}')
|
||||
|
||||
Performance considerations
|
||||
==========================
|
||||
|
||||
|
@ -852,11 +964,11 @@ Versions are generated by the process that runs the *master* role. FoundationDB
|
|||
|
||||
In order to assign read and commit versions to transactions, a client will never talk to the master. Instead it will get both from a proxy. Getting a read version is more complex than a commit version. Let's first look at commit versions:
|
||||
|
||||
1. The client will send a commit message to a proxy.
|
||||
1. The proxy will put this commit message in a queue in order to build a batch.
|
||||
1. In parallel, the proxy will ask for a new version from the master (note that this means that only proxies will ever ask for new versions - which scales much better as it puts less stress on the network).
|
||||
1. The proxy will then resolve all transactions within that batch (discussed later) and assign the version it got from the master to *all* transactions within that batch. It will then write the transactions to the transaction log system to make it durable.
|
||||
1. If the transaction succeeded, it will send back the version as commit version to the client. Otherwise it will send back an error.
|
||||
#. The client will send a commit message to a proxy.
|
||||
#. The proxy will put this commit message in a queue in order to build a batch.
|
||||
#. In parallel, the proxy will ask for a new version from the master (note that this means that only proxies will ever ask for new versions - which scales much better as it puts less stress on the network).
|
||||
#. The proxy will then resolve all transactions within that batch (discussed later) and assign the version it got from the master to *all* transactions within that batch. It will then write the transactions to the transaction log system to make it durable.
|
||||
#. If the transaction succeeded, it will send back the version as commit version to the client. Otherwise it will send back an error.
|
||||
|
||||
As mentioned before, the algorithm to assign read versions is a bit more complex. At the start of a transaction, a client will ask a proxy server for a read version. The proxy will reply with the last committed version as of the time it received the request - this is important to guarantee external consistency. This is how this is achieved:
|
||||
|
||||
|
@ -955,3 +1067,5 @@ The trickiest errors are non-retryable errors. ``Transaction.on_error`` will ret
|
|||
If you see one of those errors, the best way of action is to fail the client.
|
||||
|
||||
At a first glance this looks very similar to an ``commit_unknown_result``. However, these errors lack the one guarantee ``commit_unknown_result`` still gives to the user: if the commit has already been sent to the database, the transaction could get committed at a later point in time. This means that if you retry the transaction, your new transaction might race with the old transaction. While this technically doesn't violate any consistency guarantees, abandoning a transaction means that there are no causality guaranatees.
|
||||
|
||||
.. [#conflicting_keys] In practice, the transaction probably committed successfully. However, if you're running multiple resolvers then it's possible for a transaction to cause another to abort even if it doesn't commit successfully.
|
||||
|
|
|
@ -20,6 +20,7 @@ if(NOT OPEN_FOR_IDE)
|
|||
if(GENERATE_DEBUG_PACKAGES)
|
||||
fdb_install(TARGETS fdbbackup DESTINATION bin COMPONENT clients)
|
||||
else()
|
||||
add_custom_target(prepare_fdbbackup_install ALL DEPENDS strip_only_fdbbackup)
|
||||
fdb_install(FILES ${CMAKE_BINARY_DIR}/packages/bin/fdbbackup DESTINATION bin COMPONENT clients)
|
||||
endif()
|
||||
install_symlink(
|
||||
|
|
|
@ -14,5 +14,6 @@ target_link_libraries(fdbcli PRIVATE fdbclient)
|
|||
if(GENERATE_DEBUG_PACKAGES)
|
||||
fdb_install(TARGETS fdbcli DESTINATION bin COMPONENT clients)
|
||||
else()
|
||||
add_custom_target(prepare_fdbcli_install ALL DEPENDS strip_only_fdbcli)
|
||||
fdb_install(FILES ${CMAKE_BINARY_DIR}/packages/bin/fdbcli DESTINATION bin COMPONENT clients)
|
||||
endif()
|
||||
|
|
|
@ -481,26 +481,26 @@ void initHelp() {
|
|||
"change cluster coordinators or description",
|
||||
"If 'auto' is specified, coordinator addresses will be choosen automatically to support the configured redundancy level. (If the current set of coordinators are healthy and already support the redundancy level, nothing will be changed.)\n\nOtherwise, sets the coordinators to the list of IP:port pairs specified by <ADDRESS>+. An fdbserver process must be running on each of the specified addresses.\n\ne.g. coordinators 10.0.0.1:4000 10.0.0.2:4000 10.0.0.3:4000\n\nIf 'description=desc' is specified then the description field in the cluster\nfile is changed to desc, which must match [A-Za-z0-9_]+.");
|
||||
helpMap["exclude"] =
|
||||
CommandHelp("exclude [FORCE] [failed] [no_wait] <ADDRESS>*", "exclude servers from the database",
|
||||
CommandHelp("exclude [FORCE] [failed] [no_wait] <ADDRESS...>", "exclude servers from the database",
|
||||
"If no addresses are specified, lists the set of excluded servers.\n\nFor each IP address or "
|
||||
"IP:port pair in <ADDRESS>*, adds the address to the set of excluded servers then waits until all "
|
||||
"IP:port pair in <ADDRESS...>, adds the address to the set of excluded servers then waits until all "
|
||||
"database state has been safely moved away from the specified servers. If 'no_wait' is set, the "
|
||||
"command returns \nimmediately without checking if the exclusions have completed successfully.\n"
|
||||
"If 'FORCE' is set, the command does not perform safety checks before excluding.\n"
|
||||
"If 'failed' is set, the transaction log queue is dropped pre-emptively before waiting\n"
|
||||
"for data movement to finish and the server cannot be included again.");
|
||||
helpMap["include"] = CommandHelp(
|
||||
"include all|<ADDRESS>*",
|
||||
"include all|<ADDRESS...>",
|
||||
"permit previously-excluded servers to rejoin the database",
|
||||
"If `all' is specified, the excluded servers list is cleared.\n\nFor each IP address or IP:port pair in <ADDRESS>*, removes any matching exclusions from the excluded servers list. (A specified IP will match all IP:* exclusion entries)");
|
||||
"If `all' is specified, the excluded servers list is cleared.\n\nFor each IP address or IP:port pair in <ADDRESS...>, removes any matching exclusions from the excluded servers list. (A specified IP will match all IP:* exclusion entries)");
|
||||
helpMap["setclass"] = CommandHelp(
|
||||
"setclass <ADDRESS> <unset|storage|transaction|default>",
|
||||
"setclass [<ADDRESS> <CLASS>]",
|
||||
"change the class of a process",
|
||||
"If no address and class are specified, lists the classes of all servers.\n\nSetting the class to `default' resets the process class to the class specified on the command line.");
|
||||
"If no address and class are specified, lists the classes of all servers.\n\nSetting the class to `default' resets the process class to the class specified on the command line. The available classes are `unset', `storage', `transaction', `resolution', `proxy', `master', `test', `unset', `stateless', `log', `router', `cluster_controller', `fast_restore', `data_distributor', `coordinator', `ratekeeper', `storage_cache', `backup', and `default'.");
|
||||
helpMap["status"] = CommandHelp(
|
||||
"status [minimal|details|json]",
|
||||
"get the status of a FoundationDB cluster",
|
||||
"If the cluster is down, this command will print a diagnostic which may be useful in figuring out what is wrong. If the cluster is running, this command will print cluster statistics.\n\nSpecifying 'minimal' will provide a minimal description of the status of your database.\n\nSpecifying 'details' will provide load information for individual workers.\n\nSpecifying 'json' will provide status information in a machine readable JSON format.");
|
||||
"If the cluster is down, this command will print a diagnostic which may be useful in figuring out what is wrong. If the cluster is running, this command will print cluster statistics.\n\nSpecifying `minimal' will provide a minimal description of the status of your database.\n\nSpecifying `details' will provide load information for individual workers.\n\nSpecifying `json' will provide status information in a machine readable JSON format.");
|
||||
helpMap["exit"] = CommandHelp("exit", "exit the CLI", "");
|
||||
helpMap["quit"] = CommandHelp();
|
||||
helpMap["waitconnected"] = CommandHelp();
|
||||
|
@ -553,9 +553,9 @@ void initHelp() {
|
|||
"enables or disables sets and clears",
|
||||
"Setting or clearing keys from the CLI is not recommended.");
|
||||
helpMap["kill"] = CommandHelp(
|
||||
"kill all|list|<ADDRESS>*",
|
||||
"kill all|list|<ADDRESS...>",
|
||||
"attempts to kill one or more processes in the cluster",
|
||||
"If no addresses are specified, populates the list of processes which can be killed. Processes cannot be killed before this list has been populated.\n\nIf `all' is specified, attempts to kill all known processes.\n\nIf `list' is specified, displays all known processes. This is only useful when the database is unresponsive.\n\nFor each IP:port pair in <ADDRESS>*, attempt to kill the specified process.");
|
||||
"If no addresses are specified, populates the list of processes which can be killed. Processes cannot be killed before this list has been populated.\n\nIf `all' is specified, attempts to kill all known processes.\n\nIf `list' is specified, displays all known processes. This is only useful when the database is unresponsive.\n\nFor each IP:port pair in <ADDRESS ...>, attempt to kill the specified process.");
|
||||
helpMap["profile"] = CommandHelp(
|
||||
"profile <client|list|flow|heap> <action> <ARGS>",
|
||||
"namespace for all the profiling-related commands.",
|
||||
|
@ -1835,42 +1835,42 @@ ACTOR Future<bool> configure( Database db, std::vector<StringRef> tokens, Refere
|
|||
break;
|
||||
case ConfigurationResult::DATABASE_UNAVAILABLE:
|
||||
printf("ERROR: The database is unavailable\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
printf("Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::STORAGE_IN_UNKNOWN_DCID:
|
||||
printf("ERROR: All storage servers must be in one of the known regions\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
printf("Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::REGION_NOT_FULLY_REPLICATED:
|
||||
printf("ERROR: When usable_regions > 1, all regions with priority >= 0 must be fully replicated before changing the configuration\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
printf("Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::MULTIPLE_ACTIVE_REGIONS:
|
||||
printf("ERROR: When changing usable_regions, only one region can have priority >= 0\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
printf("Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::REGIONS_CHANGED:
|
||||
printf("ERROR: The region configuration cannot be changed while simultaneously changing usable_regions\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
printf("Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::NOT_ENOUGH_WORKERS:
|
||||
printf("ERROR: Not enough processes exist to support the specified configuration\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
printf("Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::REGION_REPLICATION_MISMATCH:
|
||||
printf("ERROR: `three_datacenter' replication is incompatible with region configuration\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
printf("Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::DCID_MISSING:
|
||||
printf("ERROR: `No storage servers in one of the specified regions\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
printf("Type `configure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::SUCCESS:
|
||||
|
@ -1997,12 +1997,12 @@ ACTOR Future<bool> fileConfigure(Database db, std::string filePath, bool isNewDa
|
|||
break;
|
||||
case ConfigurationResult::REGION_REPLICATION_MISMATCH:
|
||||
printf("ERROR: `three_datacenter' replication is incompatible with region configuration\n");
|
||||
printf("Type `fileconfigure FORCE <TOKEN>*' to configure without this check\n");
|
||||
printf("Type `fileconfigure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::DCID_MISSING:
|
||||
printf("ERROR: `No storage servers in one of the specified regions\n");
|
||||
printf("Type `fileconfigure FORCE <TOKEN>*' to configure without this check\n");
|
||||
printf("Type `fileconfigure FORCE <TOKEN...>' to configure without this check\n");
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::SUCCESS:
|
||||
|
@ -2191,7 +2191,7 @@ ACTOR Future<bool> exclude( Database db, std::vector<StringRef> tokens, Referenc
|
|||
"Please check that this exclusion does not bring down an entire storage team.\n"
|
||||
"Please also ensure that the exclusion will keep a majority of coordinators alive.\n"
|
||||
"You may add more storage processes or coordinators to make the operation safe.\n"
|
||||
"Type `exclude FORCE failed <ADDRESS>*' to exclude without performing safety checks.\n";
|
||||
"Type `exclude FORCE failed <ADDRESS...>' to exclude without performing safety checks.\n";
|
||||
printf("%s", errorStr.c_str());
|
||||
return true;
|
||||
}
|
||||
|
@ -2200,7 +2200,7 @@ ACTOR Future<bool> exclude( Database db, std::vector<StringRef> tokens, Referenc
|
|||
|
||||
state std::string errorString = "ERROR: Could not calculate the impact of this exclude on the total free space in the cluster.\n"
|
||||
"Please try the exclude again in 30 seconds.\n"
|
||||
"Type `exclude FORCE <ADDRESS>*' to exclude without checking free space.\n";
|
||||
"Type `exclude FORCE <ADDRESS...>' to exclude without checking free space.\n";
|
||||
|
||||
StatusObjectReader statusObj(status);
|
||||
|
||||
|
@ -2276,7 +2276,7 @@ ACTOR Future<bool> exclude( Database db, std::vector<StringRef> tokens, Referenc
|
|||
|
||||
if( ssExcludedCount==ssTotalCount || (1-worstFreeSpaceRatio)*ssTotalCount/(ssTotalCount-ssExcludedCount) > 0.9 ) {
|
||||
printf("ERROR: This exclude may cause the total free space in the cluster to drop below 10%%.\n"
|
||||
"Type `exclude FORCE <ADDRESS>*' to exclude without checking free space.\n");
|
||||
"Type `exclude FORCE <ADDRESS...>' to exclude without checking free space.\n");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -3562,7 +3562,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
}
|
||||
if (tokencmp(tokens[2], "run")) {
|
||||
if (tokens.size() < 6) {
|
||||
printf("ERROR: Usage: profile flow run <duration in seconds> <filename> <hosts>\n");
|
||||
printf("ERROR: Usage: profile flow run <DURATION_IN_SECONDS> <FILENAME> <PROCESS...>\n");
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
|
@ -3629,7 +3629,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
}
|
||||
if (tokencmp(tokens[1], "heap")) {
|
||||
if (tokens.size() != 3) {
|
||||
printf("ERROR: Usage: profile heap host\n");
|
||||
printf("ERROR: Usage: profile heap <PROCESS>\n");
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ target_link_libraries(fdbmonitor PUBLIC Threads::Threads)
|
|||
if(GENERATE_DEBUG_PACKAGES)
|
||||
fdb_install(TARGETS fdbmonitor DESTINATION fdbmonitor COMPONENT server)
|
||||
else()
|
||||
add_custom_target(prepare_fdbmonitor_install ALL DEPENDS strip_only_fdbmonitor)
|
||||
fdb_install(FILES ${CMAKE_BINARY_DIR}/packages/bin/fdbmonitor DESTINATION fdbmonitor COMPONENT server)
|
||||
endif()
|
||||
|
||||
|
|
|
@ -239,5 +239,6 @@ endif()
|
|||
if(GENERATE_DEBUG_PACKAGES)
|
||||
fdb_install(TARGETS fdbserver DESTINATION sbin COMPONENT server)
|
||||
else()
|
||||
add_custom_target(prepare_fdbserver_install ALL DEPENDS strip_only_fdbserver)
|
||||
fdb_install(FILES ${CMAKE_BINARY_DIR}/packages/bin/fdbserver sbin COMPONENT server)
|
||||
endif()
|
||||
|
|
Loading…
Reference in New Issue