Merge branch 'release-5.2' into directory-tester-cleanup

This commit is contained in:
A.J. Beamon 2018-06-26 14:56:09 -07:00
commit 2ed452353f
42 changed files with 460 additions and 417 deletions

View File

@ -285,7 +285,20 @@ bool FDBLibTLSPolicy::set_verify_peers(int count, const uint8_t* verify_peers[],
for (int i = 0; i < count; i++) {
try {
Reference<FDBLibTLSVerify> verify = Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(std::string((const char*)verify_peers[i], verify_peers_len[i])));
std::string verifyString((const char*)verify_peers[i], verify_peers_len[i]);
int start = 0;
while(start < verifyString.size()) {
int split = verifyString.find('|', start);
if(split == std::string::npos) {
break;
}
if(split == start || verifyString[split-1] != '\\') {
Reference<FDBLibTLSVerify> verify = Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(verifyString.substr(start,split-start)));
verify_rules.push_back(verify);
start = split+1;
}
}
Reference<FDBLibTLSVerify> verify = Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(verifyString.substr(start)));
verify_rules.push_back(verify);
} catch ( const std::runtime_error& e ) {
verify_rules.clear();

View File

@ -69,6 +69,7 @@ static std::string de4514(std::string const& input, int start, int& out_end) {
case '<':
case '=':
case '>':
case '|':
case '\\':
output += input[p+1];
p += 2;
@ -135,7 +136,7 @@ static std::pair<std::string, std::string> splitPair(std::string const& input, c
static int abbrevToNID(std::string const& sn) {
int nid = NID_undef;
if (sn == "C" || sn == "CN" || sn == "L" || sn == "ST" || sn == "O" || sn == "OU")
if (sn == "C" || sn == "CN" || sn == "L" || sn == "ST" || sn == "O" || sn == "OU" || sn == "UID" || sn == "DC")
nid = OBJ_sn2nid(sn.c_str());
if (nid == NID_undef)
throw std::runtime_error("abbrevToNID");

View File

@ -4,7 +4,7 @@
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.apple.cie.foundationdb</groupId>
<groupId>org.foundationdb</groupId>
<artifactId>NAME</artifactId>
<version>VERSION</version>
<packaging>jar</packaging>

View File

@ -68,7 +68,7 @@ buildsphinx:
if [ ! -e $(SPHINXBUILD) ]; then \
mkdir $(BUILDDIR); \
cd $(BUILDDIR); \
curl -O $(VENV_URL); \
curl -OL $(VENV_URL); \
tar zxvf $(VENV_VERSION).tar.gz; \
./$(VENV_VERSION)/virtualenv.py venv; \
fi

View File

@ -10,6 +10,7 @@ Administration
:titlesonly:
moving-a-cluster
tls
This document covers the administration of an existing FoundationDB cluster. We recommend you read this document before setting up a cluster for performance testing or production use.
@ -268,7 +269,6 @@ Backup and DR:
Client time: 03/19/18 08:51:51
The summary fields are interpreted as follows:
====================== ==========================================================================================================
@ -452,6 +452,7 @@ Coordination servers:
10.0.4.5:4500 (reachable)
Client time: 03/19/18 08:59:37
Several details about individual FoundationDB processes are displayed in a list format in parenthesis after the IP address and port:
======= =========================================================================
@ -633,59 +634,24 @@ You can now remove old client library versions from your clients. This is only t
Version-specific notes on upgrading
===================================
Upgrading from 5.1.x
Upgrading from 5.2.x
--------------------
Upgrades from 5.0.x will keep all your old data and configuration settings. 5.1 has a new backup format so backups will need to be restarted after upgrading.
Upgrades from 5.2.x will keep all your old data and configuration settings.
Upgrading from 5.0.x
--------------------
Upgrading from 4.4.x - 5.1.x
----------------------------
Upgrades from 5.0.x will keep all your old data and configuration settings.
Upgrades from versions between 4.4.x and 5.1.x will keep all your old data and configuration settings. Backups that are running will automatically be aborted and must be restarted.
Upgrading from 4.6.x
--------------------
Upgrading from 3.0.x - 4.3.x
----------------------------
Upgrades from 4.6.x will keep all your old data and configuration settings.
Upgrading from 4.5.x
--------------------
Upgrades from 4.5.x will keep all your old data and configuration settings.
Upgrading from 4.4.x
--------------------
Upgrades from 4.4.x will keep all your old data and configuration settings.
Upgrading from 4.3.x
--------------------
Backup and DR must be stopped before upgrading. Upgrades from 4.3.x will keep all your old data and configuration settings.
Upgrading from 4.2.x
--------------------
Backup and DR must be stopped before upgrading. Upgrades from 4.2.x will keep all your old data and configuration settings.
Upgrading from 4.1.x
--------------------
Backup and DR must be stopped before upgrading. Upgrades from 4.1.x will keep all your old data and configuration settings.
Upgrading from 4.0.x
--------------------
Backup and DR must be stopped before upgrading. Upgrades from 4.0.x will keep all your old data and configuration settings.
Upgrading from 3.x
--------------------
To upgrade from versions prior to 4.0, you should first upgrade to 4.0 and then to the current version.
Backup and DR must be stopped before upgrading. Upgrades from versions between 3.0.x and 4.3.x will keep all your old data and configuration settings.
.. _upgrading-from-older-versions:
Upgrading from Older Versions
-----------------------------
Upgrades from versions older than 3.0.0 are no longer supported. To upgrade from an older version, first upgrade to 4.0.x, then upgrade to the desired version.
Upgrades from versions older than 3.0.0 are no longer supported.

View File

@ -309,7 +309,7 @@
.. |option-snapshot-ryw-disable-blurb| replace::
If this option is set more times in this transction than the enable option, snapshot reads will *not* see the effects of prior writes in the same transaction.
If this option is set more times in this transaction than the enable option, snapshot reads will *not* see the effects of prior writes in the same transaction.
.. |option-priority-batch-blurb| replace::
This transaction should be treated as low priority (other transactions should be processed first). Useful for doing potentially saturating batch work without interfering with the latency of other operations.

View File

@ -54,7 +54,7 @@ Python API
Installation
============
The FoundationDB Python API is compatible with Python 2.7 - 3.4. You will need to have a Python version within this range on your system before the FoundationDB Python API can be installed.
The FoundationDB Python API is compatible with Python 2.7 - 3.6. You will need to have a Python version within this range on your system before the FoundationDB Python API can be installed.
On macOS, the FoundationDB Python API is installed as part of the FoundationDB installation (see :ref:`installing-client-binaries`). On Ubuntu or RHEL/CentOS, you will need to install the FoundationDB Python API manually.
@ -1110,7 +1110,7 @@ the most part, this also implies that ``T == fdb.tuple.unpack(fdb.tuple.pack(T))
differ from the default sort when non-ASCII characters are included within the string), and UUIDs are sorted
based on their big-endian byte representation. Single-precision floating point numbers are sorted before all
double-precision floating point numbers, and for floating point numbers, -NaN is sorted before -Infinity which
is sorted before finite numbers which are sorted before Infinity which is sorted before NaN. Different represetations
is sorted before finite numbers which are sorted before Infinity which is sorted before NaN. Different representations
of NaN are not treated as equal.
Additionally, the tuple serialization contract is such that after they are serialized, the byte-string representations

View File

@ -101,6 +101,8 @@ Blob store Backup URLs can have optional parameters at the end which set various
Here is a complete list of valid parameters:
*secure_connection* (or *sc*) - Set 1 for secure connection and 0 for unsecure connection. Defaults to secure connection.
*connect_tries* (or *ct*) - Number of times to try to connect for each request.
*request_tries* (or *rt*) - Number of times to try each request until a parseable HTTP response other than 429 is received.
@ -150,6 +152,26 @@ The Blob Credential File format is JSON with the following schema:
}
}
SSL Support
===========
By default, backup will communicate over https. To configure https, the following environment variables are used:
============================ ====================================================
Environment Variable Purpose
============================ ====================================================
``FDB_TLS_PLUGIN`` Path to the file to be loaded as the TLS plugin
``FDB_TLS_CERTIFICATE_FILE`` Path to the file from which the local certificates
can be loaded, used by the plugin
``FDB_TLS_KEY_FILE`` Path to the file from which to load the private
key, used by the plugin
``FDB_TLS_PASSWORD`` The byte-string representing the passcode for
unencrypting the private key
``FDB_TLS_CA_FILE`` Path to the file containing the CA certificates
to trust. Specify to override the default openssl
location.
============================ ====================================================
``fdbbackup`` command line tool
===============================

View File

@ -71,7 +71,7 @@ You can change coordinators when the following conditions are met:
* a majority of the current coordinators are available;
* all of the new coordinators are available; and
* client and server cluster files are writable.
* client and server cluster files and their parent directories are writable.
``fdbcli`` supports a ``coordinators`` command to specify the new list of coordinators::

View File

@ -10,38 +10,38 @@ macOS
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
* `FoundationDB-5.2.1.pkg <https://www.foundationdb.org/downloads/5.2.1/macOS/installers/FoundationDB-5.2.1.pkg>`_
* `FoundationDB-5.2.5.pkg <https://www.foundationdb.org/downloads/5.2.5/macOS/installers/FoundationDB-5.2.5.pkg>`_
Ubuntu
------
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
* `foundationdb-clients-5.2.1-1_amd64.deb <https://www.foundationdb.org/downloads/5.2.1/ubuntu/installers/foundationdb-clients_5.2.1-1_amd64.deb>`_
* `foundationdb-server-5.2.1-1_amd64.deb <https://www.foundationdb.org/downloads/5.2.1/ubuntu/installers/foundationdb-server_5.2.1-1_amd64.deb>`_ (depends on the clients package)
* `foundationdb-clients-5.2.5-1_amd64.deb <https://www.foundationdb.org/downloads/5.2.5/ubuntu/installers/foundationdb-clients_5.2.5-1_amd64.deb>`_
* `foundationdb-server-5.2.5-1_amd64.deb <https://www.foundationdb.org/downloads/5.2.5/ubuntu/installers/foundationdb-server_5.2.5-1_amd64.deb>`_ (depends on the clients package)
RHEL/CentOS EL6
---------------
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
* `foundationdb-clients-5.2.1-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/5.2.1/rhel6/installers/foundationdb-clients-5.2.1-1.el6.x86_64.rpm>`_
* `foundationdb-server-5.2.1-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/5.2.1/rhel6/installers/foundationdb-server-5.2.1-1.el6.x86_64.rpm>`_ (depends on the clients package)
* `foundationdb-clients-5.2.5-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/5.2.5/rhel6/installers/foundationdb-clients-5.2.5-1.el6.x86_64.rpm>`_
* `foundationdb-server-5.2.5-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/5.2.5/rhel6/installers/foundationdb-server-5.2.5-1.el6.x86_64.rpm>`_ (depends on the clients package)
RHEL/CentOS EL7
---------------
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
* `foundationdb-clients-5.2.1-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/5.2.1/rhel7/installers/foundationdb-clients-5.2.1-1.el7.x86_64.rpm>`_
* `foundationdb-server-5.2.1-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/5.2.1/rhel7/installers/foundationdb-server-5.2.1-1.el7.x86_64.rpm>`_ (depends on the clients package)
* `foundationdb-clients-5.2.5-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/5.2.5/rhel7/installers/foundationdb-clients-5.2.5-1.el7.x86_64.rpm>`_
* `foundationdb-server-5.2.5-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/5.2.5/rhel7/installers/foundationdb-server-5.2.5-1.el7.x86_64.rpm>`_ (depends on the clients package)
Windows
-------
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
* `foundationdb-5.2.1-x64.msi <https://www.foundationdb.org/downloads/5.2.1/windows/installers/foundationdb-5.2.1-x64.msi>`_
* `foundationdb-5.2.5-x64.msi <https://www.foundationdb.org/downloads/5.2.5/windows/installers/foundationdb-5.2.5-x64.msi>`_
API Language Bindings
=====================
@ -51,25 +51,25 @@ C
FoundationDB's C bindings are installed with the FoundationDB client binaries. You can find more details in the :doc:`C API Documentation <api-c>`.
Python 2.7 - 3.4
Python 2.7 - 3.5
----------------
On macOS and Windows, the FoundationDB Python API bindings are installed as part of your FoundationDB installation.
If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package:
* `foundationdb-5.2.1.tar.gz <https://www.foundationdb.org/downloads/5.2.1/bindings/python/foundationdb-5.2.1.tar.gz>`_
* `foundationdb-5.2.5.tar.gz <https://www.foundationdb.org/downloads/5.2.5/bindings/python/foundationdb-5.2.5.tar.gz>`_
Ruby 1.9.3/2.0.0+
-----------------
* `fdb-5.2.1.gem <https://www.foundationdb.org/downloads/5.2.1/bindings/ruby/fdb-5.2.1.gem>`_
* `fdb-5.2.5.gem <https://www.foundationdb.org/downloads/5.2.5/bindings/ruby/fdb-5.2.5.gem>`_
Java 8+
-------
* `fdb-java-5.2.1.jar <https://www.foundationdb.org/downloads/5.2.1/bindings/java/fdb-java-5.2.1.jar>`_
* `fdb-java-5.2.1-javadoc.jar <https://www.foundationdb.org/downloads/5.2.1/bindings/java/fdb-java-5.2.1-javadoc.jar>`_
* `fdb-java-5.2.5.jar <https://www.foundationdb.org/downloads/5.2.5/bindings/java/fdb-java-5.2.5.jar>`_
* `fdb-java-5.2.5-javadoc.jar <https://www.foundationdb.org/downloads/5.2.5/bindings/java/fdb-java-5.2.5-javadoc.jar>`_
Go 1.1+
-------

View File

@ -50,4 +50,5 @@ The latest changes are detailed in :doc:`release-notes`. The documentation has t
design-recipes
api-reference
tutorials
administration
earlier-release-notes

View File

@ -40,6 +40,6 @@ $ sudo service foundationdb stop
fdb> exclude 192.168.1.1:4500 192.168.1.2:4500 192.168.1.3:4500
8. Run ``coordinators auto`` in ``fdbcli`` to move coordination state to the new machines. Please note that this will cause the fdb.cluster file to be updated with the addresses of the new machines. Any currently connected clients will be notified and (assuming they have appropriate file system permissions) will update their own copy of the cluster file. As long as the original machines are still running, any clients that connect to them will be automatically forwarded to the new cluster coordinators. However, if you have a client that has not yet connected or only connects intermittently, you will need to copy the new cluster file from one of the new machines to the client machine.
8. Run ``coordinators auto`` in ``fdbcli`` to move coordination state to the new machines. Please note that this will cause the fdb.cluster file to be updated with the addresses of the new machines. Any currently connected clients will be notified and (assuming they have appropriate file system :ref:`permissions <cluster_file_permissions>`) will update their own copy of the cluster file. As long as the original machines are still running, any clients that connect to them will be automatically forwarded to the new cluster coordinators. However, if you have a client that has not yet connected or only connects intermittently, you will need to copy the new cluster file from one of the new machines to the client machine.
9. The ``status details`` command in the fdbcli will now show only the new processes (both as workers and coordinators), and you can safely shut down the older machines.

View File

@ -186,7 +186,7 @@ The following format informally describes the JSON containing the status data. T
"name": < "incorrect_cluster_file_contents"
| ...
>,
"description": "Cluster file contents do not match current cluster connection string. Verify cluster file is writable and has not been overwritten externally."
"description": "Cluster file contents do not match current cluster connection string. Verify the cluster file and its parent directory are writable and that the cluster file has not been overwritten externally."
}
],
"reasons": [

View File

@ -83,7 +83,7 @@ Fixes
* A multi-version API external client that was a duplicate could cause a crash at client shutdown. <rdar://problem/25838039>
* A multi-version API external client that failed to load would cause a crash at client shutdown. <rdar://problem/26611919>
* Status could report an incorrect database size in rare scenarios. <rdar://problem/25295683>
* Data distribution would stopped working (both exclude and new recruitment) if a call to commit on the storage server could hang indefinitely. <rdar://problem/26276166>
* Data distribution would stop working (both exclude and new recruitment) if a call to commit on the storage server could hang indefinitely. <rdar://problem/26276166>
* ``fdbcli`` would quit on some commands that were not errors or continue after some real errors when run with ``--exec``. <rdar://problem/25755317>
* Trace files generated by clients using the multi-version API could have name collisions. <rdar://problem/25884486>

View File

@ -2,15 +2,7 @@
Release Notes
#############
5.2.1
=====
Fixes
-----
* Client input validation would handle inputs to versionstamp mutations incorrectly if the API version was less than 520. `(Issue #387) <https://github.com/apple/foundationdb/issues/387>`_
5.2.0
5.2.5
=====
Features
@ -22,6 +14,8 @@ Features
* Added the APPEND_IF_FITS atomic operation. `(PR #22) <https://github.com/apple/foundationdb/pull/22>`_
* Updated the SET_VERSIONSTAMPED_KEY atomic operation to take four bytes to specify the offset instead of two (if the API version is set to 520 or higher). `(Issue #148) <https://github.com/apple/foundationdb/issues/148>`_
* Updated the SET_VERSIONSTAMPED_VALUE atomic operation to place the versionstamp at a specified offset in a value (if the API version is set to 520 or higher). `(Issue #148) <https://github.com/apple/foundationdb/issues/148>`_
* tls_verify_peers splits input using the '|' character. [5.2.4] `(PR #468) https://github.com/apple/foundationdb/pull/468`_
* Added knobs and blob Backup URL parameters for operations/sec limits by operation type. [5.2.5] `(PR #513) https://github.com/apple/foundationdb/pull/513`_
Performance
-----------
@ -32,6 +26,13 @@ Fixes
-----
* The client did not clear the storage server interface cache on endpoint failure for all request types. This causes up to one second of additional latency on the first get range request to a rebooted storage server. `(Issue #351) <https://github.com/apple/foundationdb/issues/351>`_
* Client input validation would handle inputs to versionstamp mutations incorrectly if the API version was less than 520. [5.2.1] `(Issue #387) <https://github.com/apple/foundationdb/issues/387>`_
* Build would fail on recent versions of Clang. [5.2.2] `(PR #389) https://github.com/apple/foundationdb/pull/389/files`_
* Clusters running with TLS plugin would reject clients using non-server certificates. [5.2.2] `(PR #396) https://github.com/apple/foundationdb/pull/396`_
* Backup would attempt to clear too many ranges in a single transaction when erasing log ranges. [5.2.3] `(PR #440) https://github.com/apple/foundationdb/pull/440`_
* A read-only transaction using the ``READ_LOCK_AWARE`` option would fail if committed. [5.2.3] `(PR #437) https://github.com/apple/foundationdb/pull/437`_
* fdbcli kill command did not work when TLS was enabled. [5.2.4] `(PR #471) https://github.com/apple/foundationdb/pull/471`_
* Don't disable certificate checks by default. [5.2.5] `(PR #511) https://github.com/apple/foundationdb/pull/511`_
Status
------

View File

@ -59,6 +59,10 @@ Command-line Option Client Option Environment Variable Purpo
key, used by the plugin
``tls_verify_peers`` ``TLS_verify_peers`` ``FDB_TLS_VERIFY_PEERS`` The byte-string for the verification of peer
certificates and sessions, used by the plugin
``tls_password`` ``TLS_password`` ``FDB_TLS_PASSWORD`` The byte-string representing the passcode for
unencrypting the private key
``tls_ca_file`` ``TLS_ca_path`` ``FDB_TLS_CA_FILE`` Path to the file containing the CA certificates
to trust
======================== ==================== ============================ ==================================================
The value for each setting can be specified in more than one way. The actual valued used is determined in the following order:
@ -69,7 +73,7 @@ The value for each setting can be specified in more than one way. The actual va
As with all other command-line options to ``fdbserver``, the TLS settings can be specified in the :ref:`[fdbserver] section of the configuration file <foundationdb-conf-fdbserver>`.
The settings for certificate file, key file, and peer verification are interpreted by the loaded plugin.
The settings for certificate file, key file, peer verification, password and CA file are interpreted by the loaded plugin.
Default Values
--------------
@ -97,7 +101,17 @@ The default behavior when the certificate or key file is not specified is to loo
Default Peer Verification
^^^^^^^^^^^^^^^^^^^^^^^^^
The default peer verification is the empty string.
The default peer verification is ``Check.Valid=0``.
Default Password
^^^^^^^^^^^^^^^^^^^^^^^^^
There is no default password. If no password is specified, the plugin assumes that private key is unencrypted.
CA file default location
^^^^^^^^^^^^^^^^^^^^^^^^^
If a value is not specified, the plugin searches for certs in the default openssl certs location.
Parameters and client bindings
------------------------------
@ -132,7 +146,7 @@ A file must be supplied that contains an ordered list of certificates. The first
All but the last certificate are provided to peers during TLS handshake as the certificate chain.
The last certificate in the list is the trusted certificate. All processes that want to communicate must have the same trusted certificate.
The last certificate in the list is the trusted certificate.
.. note:: If the certificate list contains only one certificate, that certificate *must* be self-signed and will be used as both the certificate chain and the trusted certificate.
@ -153,6 +167,8 @@ The key file must contain the private key corresponding to the process' own cert
xxxxxxxxxxxxxxx
-----END PRIVATE KEY-----
It can optionally be encrypted by the password provided to tls_password.
Certificate creation
--------------------
@ -173,7 +189,7 @@ A FoundationDB server or client will only communicate with peers that present a
Certificate field verification
------------------------------
With a peer verification string, FoundationDB servers and clients can adjust what is required of the certificate chain presented by a peer. These options can make the certificate requirements more rigorous or more lenient.
With a peer verification string, FoundationDB servers and clients can adjust what is required of the certificate chain presented by a peer. These options can make the certificate requirements more rigorous or more lenient. You can specify multiple verification strings by providing additional tls_verify_peers command line arguments or concatenating them with ``|``. All ``,`` or ``|`` in the verify peers fields should be escaped with ``\``.
Turning down the validation
^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -216,6 +232,8 @@ Prefix DN
``I.``, or ``Issuer.`` Issuer
============================= ========
Additionally, the verification can be restricted to certificates signed by a given root CA with the field ``Root.CN``. This allows you to have different requirements for different root chains.
The value of a condition must be specified in a form derived from a subset of `RFC 4514 <http://www.ietf.org/rfc/rfc4514.txt>`_. Specifically, the "raw" notation (a value starting with the ``#`` character) is not accepted. Other escaping mechanisms, including specifying characters by hex notation, are allowed. The specified field's value must exactly match the value in the peer's certificate.
By default, the fields of a peer certificate's DNs are not examined.

View File

@ -2582,7 +2582,8 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
if (tokens.size() == 1) {
Standalone<RangeResultRef> kvs = wait( makeInterruptable( tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces"), LiteralStringRef("\xff\xff\xff")), 1) ) );
for( auto it : kvs ) {
address_interface[it.key] = it.value;
auto ip_port = it.key.endsWith(LiteralStringRef(":tls")) ? it.key.removeSuffix(LiteralStringRef(":tls")) : it.key;
address_interface[ip_port] = it.value;
}
}
if (tokens.size() == 1 || tokencmp(tokens[1], "list")) {
@ -2718,7 +2719,8 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
LiteralStringRef("\xff\xff\xff")),
1)));
for (const auto& pair : kvs) {
printf("%s\n", printable(pair.key).c_str());
auto ip_port = pair.key.endsWith(LiteralStringRef(":tls")) ? pair.key.removeSuffix(LiteralStringRef(":tls")) : pair.key;
printf("%s\n", printable(ip_port).c_str());
}
continue;
}
@ -2750,7 +2752,8 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
state std::vector<Key> all_profiler_addresses;
state std::vector<Future<ErrorOr<Void>>> all_profiler_responses;
for (const auto& pair : kvs) {
interfaces.emplace(pair.key, BinaryReader::fromStringRef<ClientWorkerInterface>(pair.value, IncludeVersion()));
auto ip_port = pair.key.endsWith(LiteralStringRef(":tls")) ? pair.key.removeSuffix(LiteralStringRef(":tls")) : pair.key;
interfaces.emplace(ip_port, BinaryReader::fromStringRef<ClientWorkerInterface>(pair.value, IncludeVersion()));
}
if (tokens.size() == 6 && tokencmp(tokens[5], "all")) {
for (const auto& pair : interfaces) {

View File

@ -423,7 +423,7 @@ bool copyParameter(Reference<Task> source, Reference<Task> dest, Key key);
Version getVersionFromString(std::string const& value);
Standalone<VectorRef<KeyRangeRef>> getLogRanges(Version beginVersion, Version endVersion, Key destUidValue, int blockSize = CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE);
Standalone<VectorRef<KeyRangeRef>> getApplyRanges(Version beginVersion, Version endVersion, Key backupUid);
Future<Void> eraseLogData(Database cx, Key logUidValue, Key destUidValue, Optional<Version> beginVersion = Optional<Version>(), Optional<Version> endVersion = Optional<Version>(), bool checkBackupUid = false, Version backupUid = 0);
Future<Void> eraseLogData(Database cx, Key logUidValue, Key destUidValue, Optional<Version> endVersion = Optional<Version>(), bool checkBackupUid = false, Version backupUid = 0);
Key getApplyKey( Version version, Key backupUid );
std::pair<uint64_t, uint32_t> decodeBKMutationLogKey(Key key);
Standalone<VectorRef<MutationRef>> decodeBackupLogValue(StringRef value);

View File

@ -624,112 +624,15 @@ ACTOR Future<Void> applyMutations(Database cx, Key uid, Key addPrefix, Key remov
}
}
ACTOR Future<Void> _clearLogRanges(Reference<ReadYourWritesTransaction> tr, bool clearVersionHistory, Key logUidValue, Key destUidValue, Version beginVersion, Version endVersion) {
ACTOR static Future<Void> _eraseLogData(Database cx, Key logUidValue, Key destUidValue, Optional<Version> endVersion, bool checkBackupUid, Version backupUid) {
state Key backupLatestVersionsPath = destUidValue.withPrefix(backupLatestVersionsPrefix);
state Key backupLatestVersionsKey = logUidValue.withPrefix(backupLatestVersionsPath);
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
state Standalone<RangeResultRef> backupVersions = wait(tr->getRange(KeyRangeRef(backupLatestVersionsPath, strinc(backupLatestVersionsPath)), CLIENT_KNOBS->TOO_MANY));
// Make sure version history key does exist and lower the beginVersion if needed
bool foundSelf = false;
for (auto backupVersion : backupVersions) {
Key currLogUidValue = backupVersion.key.removePrefix(backupLatestVersionsPrefix).removePrefix(destUidValue);
if (currLogUidValue == logUidValue) {
foundSelf = true;
beginVersion = std::min(beginVersion, BinaryReader::fromStringRef<Version>(backupVersion.value, Unversioned()));
}
}
// Do not clear anything if version history key cannot be found
if (!foundSelf) {
if (!destUidValue.size()) {
return Void();
}
Version nextSmallestVersion = endVersion;
bool clearLogRangesRequired = true;
// More than one backup/DR with the same range
if (backupVersions.size() > 1) {
for (auto backupVersion : backupVersions) {
Key currLogUidValue = backupVersion.key.removePrefix(backupLatestVersionsPrefix).removePrefix(destUidValue);
Version currVersion = BinaryReader::fromStringRef<Version>(backupVersion.value, Unversioned());
if (currLogUidValue == logUidValue) {
continue;
} else if (currVersion > beginVersion) {
nextSmallestVersion = std::min(currVersion, nextSmallestVersion);
} else {
// If we can find a version less than or equal to beginVersion, clearing log ranges is not required
clearLogRangesRequired = false;
break;
}
}
}
if (clearVersionHistory && backupVersions.size() == 1) {
// Clear version history
tr->clear(prefixRange(backupLatestVersionsPath));
// Clear everything under blog/[destUid]
tr->clear(prefixRange(destUidValue.withPrefix(backupLogKeys.begin)));
// Disable committing mutations into blog
tr->clear(prefixRange(destUidValue.withPrefix(logRangesRange.begin)));
} else {
if (clearVersionHistory) {
// Clear current backup version history
tr->clear(backupLatestVersionsKey);
} else {
// Update current backup latest version
tr->set(backupLatestVersionsKey, BinaryWriter::toValue<Version>(endVersion, Unversioned()));
}
// Clear log ranges if needed
if (clearLogRangesRequired) {
Standalone<VectorRef<KeyRangeRef>> ranges = getLogRanges(beginVersion, nextSmallestVersion, destUidValue);
for (auto& range : ranges) {
tr->clear(range);
}
}
}
return Void();
}
// The difference between beginVersion and endVersion should not be too large
Future<Void> clearLogRanges(Reference<ReadYourWritesTransaction> tr, bool clearVersionHistory, Key logUidValue, Key destUidValue, Version beginVersion, Version endVersion) {
return _clearLogRanges(tr, clearVersionHistory, logUidValue, destUidValue, beginVersion, endVersion);
}
ACTOR static Future<Void> _eraseLogData(Database cx, Key logUidValue, Key destUidValue, Optional<Version> beginVersion, Optional<Version> endVersion, bool checkBackupUid, Version backupUid) {
if ((beginVersion.present() && endVersion.present() && endVersion.get() <= beginVersion.get()) || !destUidValue.size())
return Void();
state Version currBeginVersion;
state Version endVersionValue;
state Version currEndVersion;
state bool clearVersionHistory;
ASSERT(beginVersion.present() == endVersion.present());
if (beginVersion.present()) {
currBeginVersion = beginVersion.get();
endVersionValue = endVersion.get();
clearVersionHistory = false;
} else {
// If beginVersion and endVersion are not presented, it means backup is done and we need to clear version history.
// Set currBeginVersion to INTMAX_MAX and it will be set to the correct version in clearLogRanges().
// Set endVersionValue to INTMAX_MAX since we need to clear log ranges up to next smallest version.
currBeginVersion = endVersionValue = currEndVersion = INTMAX_MAX;
clearVersionHistory = true;
}
while (currBeginVersion < endVersionValue || clearVersionHistory) {
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
loop{
try {
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
@ -742,28 +645,91 @@ ACTOR static Future<Void> _eraseLogData(Database cx, Key logUidValue, Key destUi
return Void();
}
if (!clearVersionHistory) {
currEndVersion = std::min(currBeginVersion + CLIENT_KNOBS->CLEAR_LOG_RANGE_COUNT * CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE, endVersionValue);
state Standalone<RangeResultRef> backupVersions = wait(tr->getRange(KeyRangeRef(backupLatestVersionsPath, strinc(backupLatestVersionsPath)), CLIENT_KNOBS->TOO_MANY));
// Make sure version history key does exist and lower the beginVersion if needed
state Version currBeginVersion = invalidVersion;
for (auto backupVersion : backupVersions) {
Key currLogUidValue = backupVersion.key.removePrefix(backupLatestVersionsPrefix).removePrefix(destUidValue);
if (currLogUidValue == logUidValue) {
currBeginVersion = BinaryReader::fromStringRef<Version>(backupVersion.value, Unversioned());
break;
}
}
Void _ = wait(clearLogRanges(tr, clearVersionHistory, logUidValue, destUidValue, currBeginVersion, currEndVersion));
Void _ = wait(tr->commit());
if (clearVersionHistory) {
// Do not clear anything if version history key cannot be found
if (currBeginVersion == invalidVersion) {
return Void();
}
currBeginVersion = currEndVersion;
state Version currEndVersion = currBeginVersion + CLIENT_KNOBS->CLEAR_LOG_RANGE_COUNT * CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE;
if(endVersion.present()) {
currEndVersion = std::min(currEndVersion, endVersion.get());
}
state Version nextSmallestVersion = currEndVersion;
bool clearLogRangesRequired = true;
// More than one backup/DR with the same range
if (backupVersions.size() > 1) {
for (auto backupVersion : backupVersions) {
Key currLogUidValue = backupVersion.key.removePrefix(backupLatestVersionsPrefix).removePrefix(destUidValue);
Version currVersion = BinaryReader::fromStringRef<Version>(backupVersion.value, Unversioned());
if (currLogUidValue == logUidValue) {
continue;
} else if (currVersion > currBeginVersion) {
nextSmallestVersion = std::min(currVersion, nextSmallestVersion);
} else {
// If we can find a version less than or equal to beginVersion, clearing log ranges is not required
clearLogRangesRequired = false;
break;
}
}
}
if (!endVersion.present() && backupVersions.size() == 1) {
// Clear version history
tr->clear(prefixRange(backupLatestVersionsPath));
// Clear everything under blog/[destUid]
tr->clear(prefixRange(destUidValue.withPrefix(backupLogKeys.begin)));
// Disable committing mutations into blog
tr->clear(prefixRange(destUidValue.withPrefix(logRangesRange.begin)));
} else {
if (!endVersion.present() && currEndVersion >= nextSmallestVersion) {
// Clear current backup version history
tr->clear(backupLatestVersionsKey);
} else {
// Update current backup latest version
tr->set(backupLatestVersionsKey, BinaryWriter::toValue<Version>(currEndVersion, Unversioned()));
}
// Clear log ranges if needed
if (clearLogRangesRequired) {
Standalone<VectorRef<KeyRangeRef>> ranges = getLogRanges(currBeginVersion, nextSmallestVersion, destUidValue);
for (auto& range : ranges) {
tr->clear(range);
}
}
}
Void _ = wait(tr->commit());
if (!endVersion.present() && (backupVersions.size() == 1 || currEndVersion >= nextSmallestVersion)) {
return Void();
}
if(endVersion.present() && currEndVersion == endVersion.get()) {
return Void();
}
tr->reset();
} catch (Error &e) {
Void _ = wait(tr->onError(e));
}
}
}
return Void();
}
Future<Void> eraseLogData(Database cx, Key logUidValue, Key destUidValue, Optional<Version> beginVersion, Optional<Version> endVersion, bool checkBackupUid, Version backupUid) {
return _eraseLogData(cx, logUidValue, destUidValue, beginVersion, endVersion, checkBackupUid, backupUid);
Future<Void> eraseLogData(Database cx, Key logUidValue, Key destUidValue, Optional<Version> endVersion, bool checkBackupUid, Version backupUid) {
return _eraseLogData(cx, logUidValue, destUidValue, endVersion, checkBackupUid, backupUid);
}

View File

@ -476,21 +476,20 @@ namespace dbBackup {
Void _ = wait(checkTaskVersion(cx, task, EraseLogRangeTaskFunc::name, EraseLogRangeTaskFunc::version));
Version beginVersion = BinaryReader::fromStringRef<Version>(task->params[DatabaseBackupAgent::keyBeginVersion], Unversioned());
Version endVersion = BinaryReader::fromStringRef<Version>(task->params[DatabaseBackupAgent::keyEndVersion], Unversioned());
Void _ = wait(eraseLogData(taskBucket->src, task->params[BackupAgentBase::keyConfigLogUid], task->params[BackupAgentBase::destUid], Optional<Version>(beginVersion), Optional<Version>(endVersion), true, BinaryReader::fromStringRef<Version>(task->params[BackupAgentBase::keyFolderId], Unversioned())));
Void _ = wait(eraseLogData(taskBucket->src, task->params[BackupAgentBase::keyConfigLogUid], task->params[BackupAgentBase::destUid], Optional<Version>(endVersion), true, BinaryReader::fromStringRef<Version>(task->params[BackupAgentBase::keyFolderId], Unversioned())));
return Void();
}
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, Version beginVersion, Version endVersion, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, Version endVersion, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
Key doneKey = wait(completionKey.get(tr, taskBucket));
Reference<Task> task(new Task(EraseLogRangeTaskFunc::name, EraseLogRangeTaskFunc::version, doneKey, 1));
copyDefaultParameters(parentTask, task);
task->params[DatabaseBackupAgent::keyBeginVersion] = BinaryWriter::toValue(beginVersion, Unversioned());
task->params[DatabaseBackupAgent::keyBeginVersion] = BinaryWriter::toValue(1, Unversioned()); //FIXME: remove in 6.X, only needed for 5.2 backward compatibility
task->params[DatabaseBackupAgent::keyEndVersion] = BinaryWriter::toValue(endVersion, Unversioned());
if (!waitFor) {
@ -749,7 +748,7 @@ namespace dbBackup {
// Do not erase at the first time
if (prevBeginVersion > 0) {
addTaskVector.push_back(EraseLogRangeTaskFunc::addTask(tr, taskBucket, task, prevBeginVersion, beginVersion, TaskCompletionKey::joinWith(allPartsDone)));
addTaskVector.push_back(EraseLogRangeTaskFunc::addTask(tr, taskBucket, task, beginVersion, TaskCompletionKey::joinWith(allPartsDone)));
}
Void _ = wait(waitForAll(addTaskVector) && taskBucket->finish(tr, task));
@ -856,7 +855,7 @@ namespace dbBackup {
}
Version backupUid = BinaryReader::fromStringRef<Version>(task->params[BackupAgentBase::keyFolderId], Unversioned());
Void _ = wait(eraseLogData(taskBucket->src, logUidValue, destUidValue, Optional<Version>(), Optional<Version>(), true, backupUid));
Void _ = wait(eraseLogData(taskBucket->src, logUidValue, destUidValue, Optional<Version>(), true, backupUid));
return Void();
}
@ -952,7 +951,7 @@ namespace dbBackup {
}
if (prevBeginVersion > 0) {
addTaskVector.push_back(EraseLogRangeTaskFunc::addTask(tr, taskBucket, task, prevBeginVersion, beginVersion, TaskCompletionKey::joinWith(allPartsDone)));
addTaskVector.push_back(EraseLogRangeTaskFunc::addTask(tr, taskBucket, task, beginVersion, TaskCompletionKey::joinWith(allPartsDone)));
}
Void _ = wait(waitForAll(addTaskVector) && taskBucket->finish(tr, task));

View File

@ -1911,30 +1911,25 @@ namespace fileBackup {
state Reference<FlowLock> lock(new FlowLock(CLIENT_KNOBS->BACKUP_LOCK_BYTES));
Void _ = wait(checkTaskVersion(cx, task, EraseLogRangeTaskFunc::name, EraseLogRangeTaskFunc::version));
state Version beginVersion = Params.beginVersion().get(task);
state Version endVersion = Params.endVersion().get(task);
state Key destUidValue = Params.destUidValue().get(task);
state BackupConfig config(task);
state Key logUidValue = config.getUidAsKey();
if (beginVersion == 0) {
Void _ = wait(eraseLogData(cx, logUidValue, destUidValue));
} else {
Void _ = wait(eraseLogData(cx, logUidValue, destUidValue, Optional<Version>(beginVersion), Optional<Version>(endVersion)));
}
Void _ = wait(eraseLogData(cx, logUidValue, destUidValue, endVersion != 0 ? Optional<Version>(endVersion) : Optional<Version>()));
return Void();
}
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, UID logUid, TaskCompletionKey completionKey, Key destUidValue, Version beginVersion = 0, Version endVersion = 0, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, UID logUid, TaskCompletionKey completionKey, Key destUidValue, Version endVersion = 0, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
Key key = wait(addBackupTask(EraseLogRangeTaskFunc::name,
EraseLogRangeTaskFunc::version,
tr, taskBucket, completionKey,
BackupConfig(logUid),
waitFor,
[=](Reference<Task> task) {
Params.beginVersion().set(task, beginVersion);
Params.beginVersion().set(task, 1); //FIXME: remove in 6.X, only needed for 5.2 backward compatibility
Params.endVersion().set(task, endVersion);
Params.destUidValue().set(task, destUidValue);
},
@ -2039,7 +2034,7 @@ namespace fileBackup {
// Do not erase at the first time
if (prevBeginVersion > 0) {
state Key destUidValue = wait(config.destUidValue().getOrThrow(tr));
Key _ = wait(EraseLogRangeTaskFunc::addTask(tr, taskBucket, config.getUid(), TaskCompletionKey::joinWith(logDispatchBatchFuture), destUidValue, prevBeginVersion, beginVersion));
Key _ = wait(EraseLogRangeTaskFunc::addTask(tr, taskBucket, config.getUid(), TaskCompletionKey::joinWith(logDispatchBatchFuture), destUidValue, beginVersion));
}
Void _ = wait(taskBucket->finish(tr, task));
@ -3481,8 +3476,8 @@ public:
tr->set(destUidLookupPath, destUidValue);
}
}
Version initVersion = 1;
tr->set(config.getUidAsKey().withPrefix(destUidValue).withPrefix(backupLatestVersionsPrefix), BinaryWriter::toValue<Version>(initVersion, Unversioned()));
tr->set(config.getUidAsKey().withPrefix(destUidValue).withPrefix(backupLatestVersionsPrefix), BinaryWriter::toValue<Version>(tr->getReadVersion().get(), Unversioned()));
config.destUidValue().set(tr, destUidValue);
// Point the tag to this new uid

View File

@ -113,7 +113,7 @@ ClientKnobs::ClientKnobs(bool randomize) {
init( BACKUP_MAP_KEY_UPPER_LIMIT, 1e5 ); if( buggifyMapLimits ) BACKUP_MAP_KEY_UPPER_LIMIT = 30;
init( BACKUP_COPY_TASKS, 90 );
init( BACKUP_BLOCK_SIZE, LOG_RANGE_BLOCK_SIZE/10 );
init( BACKUP_TASKS_PER_AGENT, 20 );
init( BACKUP_TASKS_PER_AGENT, 10 );
init( SIM_BACKUP_TASKS_PER_AGENT, 10 );
init( BACKUP_RANGEFILE_BLOCK_SIZE, 1024 * 1024);
init( BACKUP_LOGFILE_BLOCK_SIZE, 1024 * 1024);
@ -164,6 +164,11 @@ ClientKnobs::ClientKnobs(bool randomize) {
init( BLOBSTORE_MAX_SEND_BYTES_PER_SECOND, 1e9 );
init( BLOBSTORE_MAX_RECV_BYTES_PER_SECOND, 1e9 );
init( BLOBSTORE_LIST_REQUESTS_PER_SECOND, 25 );
init( BLOBSTORE_WRITE_REQUESTS_PER_SECOND, 50 );
init( BLOBSTORE_READ_REQUESTS_PER_SECOND, 100 );
init( BLOBSTORE_DELETE_REQUESTS_PER_SECOND, 200 );
// Client Status Info
init(CSI_SAMPLING_PROBABILITY, -1.0);
init(CSI_SIZE_LIMIT, std::numeric_limits<int64_t>::max());

View File

@ -151,6 +151,10 @@ public:
int BLOBSTORE_REQUEST_TRIES;
int BLOBSTORE_REQUEST_TIMEOUT;
int BLOBSTORE_REQUESTS_PER_SECOND;
int BLOBSTORE_LIST_REQUESTS_PER_SECOND;
int BLOBSTORE_WRITE_REQUESTS_PER_SECOND;
int BLOBSTORE_READ_REQUESTS_PER_SECOND;
int BLOBSTORE_DELETE_REQUESTS_PER_SECOND;
int BLOBSTORE_CONCURRENT_REQUESTS;
int BLOBSTORE_MULTIPART_MAX_PART_SIZE;
int BLOBSTORE_MULTIPART_MIN_PART_SIZE;

View File

@ -135,7 +135,7 @@ public:
~DLThreadSingleAssignmentVar() {
lock.assertNotEntered();
if(f) {
ASSERT(futureRefCount == 1);
ASSERT_ABORT(futureRefCount == 1);
api->futureDestroy(f);
}
}

View File

@ -93,7 +93,7 @@ LocationInfo::~LocationInfo() {
for( auto const& alternative : getAlternatives() )
handles.push_back( alternative.v.getVersion.getEndpoint().token ); // must match above choice of UID
std::sort( handles.begin(), handles.end() );
ASSERT( handles.size() );
ASSERT_ABORT( handles.size() );
auto it = cx->ssid_locationInfo.find( handles );
if( it != cx->ssid_locationInfo.end() )
@ -549,7 +549,7 @@ DatabaseContext::~DatabaseContext() {
monitorMasterProxiesInfoChange.cancel();
for(auto it = ssid_locationInfo.begin(); it != ssid_locationInfo.end(); it = ssid_locationInfo.erase(it))
it->second->notifyContextDestroyed();
ASSERT( ssid_locationInfo.empty() );
ASSERT_ABORT( ssid_locationInfo.empty() );
locationCache.insert( allKeys, Reference<LocationInfo>() );
}
@ -2456,11 +2456,6 @@ ACTOR static Future<Void> tryCommit( Database cx, Reference<TransactionLogInfo>
}
Future<Void> Transaction::commitMutations() {
cx->transactionsCommitStarted++;
if(options.readOnly)
return transaction_read_only();
try {
//if this is a read-only transaction return immediately
if( !tr.transaction.write_conflict_ranges.size() && !tr.transaction.mutations.size() ) {
@ -2471,6 +2466,11 @@ Future<Void> Transaction::commitMutations() {
return Void();
}
cx->transactionsCommitStarted++;
if(options.readOnly)
return transaction_read_only();
cx->mutationsPerCommit.addSample(tr.transaction.mutations.size());
cx->bytesPerCommit.addSample(tr.transaction.mutations.expectedSize());

View File

@ -325,7 +325,7 @@ ACTOR Future<StatusObject> clientStatusFetcher(Reference<ClusterConnectionFile>
description += ClusterConnectionFile(f->getFilename()).getConnectionString().toString().c_str();
description += "\nThe current connection string is: ";
description += f->getConnectionString().toString().c_str();
description += "\nVerify cluster file is writable and has not been overwritten externally. To change coordinators without manual intervention, the cluster file and its containing folder must be writable by all servers and clients. If a majority of the coordinators referenced by the old connection string are lost, the database will stop working until the correct cluster file is distributed to all processes.";
description += "\nVerify the cluster file and its parent directory are writable and that the cluster file has not been overwritten externally. To change coordinators without manual intervention, the cluster file and its containing folder must be writable by all servers and clients. If a majority of the coordinators referenced by the old connection string are lost, the database will stop working until the correct cluster file is distributed to all processes.";
messages->push_back(makeMessage("incorrect_cluster_file_contents", description.c_str()));
}

View File

@ -233,7 +233,7 @@ Future<Void> AsyncFileCached::quiesce() {
AsyncFileCached::~AsyncFileCached() {
while ( !pages.empty() ) {
auto ok = pages.begin()->second->evict();
ASSERT( ok );
ASSERT_ABORT( ok );
}
openFiles.erase( filename );
}

View File

@ -450,7 +450,7 @@ struct AFCPage : public EvictablePage, public FastAllocated<AFCPage> {
virtual ~AFCPage() {
clearDirty();
ASSERT( flushableIndex == -1 );
ASSERT_ABORT( flushableIndex == -1 );
}
void setDirty() {

View File

@ -57,6 +57,10 @@ BlobStoreEndpoint::BlobKnobs::BlobKnobs() {
request_timeout = CLIENT_KNOBS->BLOBSTORE_REQUEST_TIMEOUT;
requests_per_second = CLIENT_KNOBS->BLOBSTORE_REQUESTS_PER_SECOND;
concurrent_requests = CLIENT_KNOBS->BLOBSTORE_CONCURRENT_REQUESTS;
list_requests_per_second = CLIENT_KNOBS->BLOBSTORE_LIST_REQUESTS_PER_SECOND;
write_requests_per_second = CLIENT_KNOBS->BLOBSTORE_WRITE_REQUESTS_PER_SECOND;
read_requests_per_second = CLIENT_KNOBS->BLOBSTORE_READ_REQUESTS_PER_SECOND;
delete_requests_per_second = CLIENT_KNOBS->BLOBSTORE_DELETE_REQUESTS_PER_SECOND;
multipart_max_part_size = CLIENT_KNOBS->BLOBSTORE_MULTIPART_MAX_PART_SIZE;
multipart_min_part_size = CLIENT_KNOBS->BLOBSTORE_MULTIPART_MIN_PART_SIZE;
concurrent_uploads = CLIENT_KNOBS->BLOBSTORE_CONCURRENT_UPLOADS;
@ -79,6 +83,10 @@ bool BlobStoreEndpoint::BlobKnobs::set(StringRef name, int value) {
TRY_PARAM(request_tries, rt);
TRY_PARAM(request_timeout, rto);
TRY_PARAM(requests_per_second, rps);
TRY_PARAM(list_requests_per_second, lrps);
TRY_PARAM(write_requests_per_second, wrps);
TRY_PARAM(read_requests_per_second, rrps);
TRY_PARAM(delete_requests_per_second, drps);
TRY_PARAM(concurrent_requests, cr);
TRY_PARAM(multipart_max_part_size, maxps);
TRY_PARAM(multipart_min_part_size, minps);
@ -107,6 +115,10 @@ std::string BlobStoreEndpoint::BlobKnobs::getURLParameters() const {
_CHECK_PARAM(request_tries, rt);
_CHECK_PARAM(request_timeout, rto);
_CHECK_PARAM(requests_per_second, rps);
_CHECK_PARAM(list_requests_per_second, lrps);
_CHECK_PARAM(write_requests_per_second, wrps);
_CHECK_PARAM(read_requests_per_second, rrps);
_CHECK_PARAM(delete_requests_per_second, drps);
_CHECK_PARAM(concurrent_requests, cr);
_CHECK_PARAM(multipart_max_part_size, maxps);
_CHECK_PARAM(multipart_min_part_size, minps);
@ -195,6 +207,8 @@ std::string BlobStoreEndpoint::getResourceURL(std::string resource) {
}
ACTOR Future<bool> objectExists_impl(Reference<BlobStoreEndpoint> b, std::string bucket, std::string object) {
Void _ = wait(b->requestRateRead->getAllowance(1));
std::string resource = std::string("/") + bucket + "/" + object;
HTTP::Headers headers;
@ -207,6 +221,8 @@ Future<bool> BlobStoreEndpoint::objectExists(std::string const &bucket, std::str
}
ACTOR Future<Void> deleteObject_impl(Reference<BlobStoreEndpoint> b, std::string bucket, std::string object) {
Void _ = wait(b->requestRateDelete->getAllowance(1));
std::string resource = std::string("/") + bucket + "/" + object;
HTTP::Headers headers;
Reference<HTTP::Response> r = wait(b->doRequest("DELETE", resource, headers, NULL, 0, {200, 204, 404}));
@ -273,9 +289,10 @@ Future<Void> BlobStoreEndpoint::deleteRecursively(std::string const &bucket, std
}
ACTOR Future<Void> createBucket_impl(Reference<BlobStoreEndpoint> b, std::string bucket) {
Void _ = wait(b->requestRateWrite->getAllowance(1));
std::string resource = std::string("/") + bucket;
HTTP::Headers headers;
Reference<HTTP::Response> r = wait(b->doRequest("PUT", resource, headers, NULL, 0, {200, 409}));
return Void();
}
@ -285,6 +302,8 @@ Future<Void> BlobStoreEndpoint::createBucket(std::string const &bucket) {
}
ACTOR Future<int64_t> objectSize_impl(Reference<BlobStoreEndpoint> b, std::string bucket, std::string object) {
Void _ = wait(b->requestRateRead->getAllowance(1));
std::string resource = std::string("/") + bucket + "/" + object;
HTTP::Headers headers;
@ -789,6 +808,8 @@ void BlobStoreEndpoint::setAuthHeaders(std::string const &verb, std::string cons
}
ACTOR Future<std::string> readEntireFile_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object) {
Void _ = wait(bstore->requestRateRead->getAllowance(1));
std::string resource = std::string("/") + bucket + "/" + object;
HTTP::Headers headers;
Reference<HTTP::Response> r = wait(bstore->doRequest("GET", resource, headers, NULL, 0, {200, 404}));
@ -805,6 +826,7 @@ ACTOR Future<Void> writeEntireFileFromBuffer_impl(Reference<BlobStoreEndpoint> b
if(contentLen > bstore->knobs.multipart_max_part_size)
throw file_too_large();
Void _ = wait(bstore->requestRateWrite->getAllowance(1));
Void _ = wait(bstore->concurrentUploads.take());
state FlowLock::Releaser uploadReleaser(bstore->concurrentUploads, 1);
@ -856,6 +878,8 @@ Future<Void> BlobStoreEndpoint::writeEntireFileFromBuffer(std::string const &buc
ACTOR Future<int> readObject_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object, void *data, int length, int64_t offset) {
if(length <= 0)
return 0;
Void _ = wait(bstore->requestRateRead->getAllowance(1));
std::string resource = std::string("/") + bucket + "/" + object;
HTTP::Headers headers;
headers["Range"] = format("bytes=%lld-%lld", offset, offset + length - 1);
@ -874,6 +898,8 @@ Future<int> BlobStoreEndpoint::readObject(std::string const &bucket, std::string
}
ACTOR static Future<std::string> beginMultiPartUpload_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object) {
Void _ = wait(bstore->requestRateWrite->getAllowance(1));
std::string resource = std::string("/") + bucket + "/" + object + "?uploads";
HTTP::Headers headers;
Reference<HTTP::Response> r = wait(bstore->doRequest("POST", resource, headers, NULL, 0, {200}));
@ -892,6 +918,7 @@ Future<std::string> BlobStoreEndpoint::beginMultiPartUpload(std::string const &b
}
ACTOR Future<std::string> uploadPart_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object, std::string uploadID, unsigned int partNumber, UnsentPacketQueue *pContent, int contentLen, std::string contentMD5) {
Void _ = wait(bstore->requestRateWrite->getAllowance(1));
Void _ = wait(bstore->concurrentUploads.take());
state FlowLock::Releaser uploadReleaser(bstore->concurrentUploads, 1);
@ -921,6 +948,7 @@ Future<std::string> BlobStoreEndpoint::uploadPart(std::string const &bucket, std
ACTOR Future<Void> finishMultiPartUpload_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object, std::string uploadID, BlobStoreEndpoint::MultiPartSetT parts) {
state UnsentPacketQueue part_list(); // NonCopyable state var so must be declared at top of actor
Void _ = wait(bstore->requestRateWrite->getAllowance(1));
std::string manifest = "<CompleteMultipartUpload>";
for(auto &p : parts)

View File

@ -55,6 +55,10 @@ public:
request_tries,
request_timeout,
requests_per_second,
list_requests_per_second,
write_requests_per_second,
read_requests_per_second,
delete_requests_per_second,
multipart_max_part_size,
multipart_min_part_size,
concurrent_requests,
@ -78,6 +82,10 @@ public:
"request_tries (or rt) Number of times to try each request until a parseable HTTP response other than 429 is received.",
"request_timeout (or rto) Number of seconds to wait for a request to succeed after a connection is established.",
"requests_per_second (or rps) Max number of requests to start per second.",
"list_requests_per_second (or lrps) Max number of list requests to start per second.",
"write_requests_per_second (or wrps) Max number of write requests to start per second.",
"read_requests_per_second (or rrps) Max number of read requests to start per second.",
"delete_requests_per_second (or drps) Max number of delete requests to start per second.",
"multipart_max_part_size (or maxps) Max part size for multipart uploads.",
"multipart_min_part_size (or minps) Min part size for multipart uploads.",
"concurrent_requests (or cr) Max number of total requests in progress at once, regardless of operation-specific concurrency limits.",
@ -97,6 +105,10 @@ public:
BlobStoreEndpoint(std::string const &host, std::string service, std::string const &key, std::string const &secret, BlobKnobs const &knobs = BlobKnobs())
: host(host), service(service), key(key), secret(secret), lookupSecret(secret.empty()), knobs(knobs),
requestRate(new SpeedLimit(knobs.requests_per_second, 1)),
requestRateList(new SpeedLimit(knobs.list_requests_per_second, 1)),
requestRateWrite(new SpeedLimit(knobs.write_requests_per_second, 1)),
requestRateRead(new SpeedLimit(knobs.read_requests_per_second, 1)),
requestRateDelete(new SpeedLimit(knobs.delete_requests_per_second, 1)),
sendRate(new SpeedLimit(knobs.max_send_bytes_per_second, 1)),
recvRate(new SpeedLimit(knobs.max_recv_bytes_per_second, 1)),
concurrentRequests(knobs.concurrent_requests),
@ -135,6 +147,10 @@ public:
// Speed and concurrency limits
Reference<IRateControl> requestRate;
Reference<IRateControl> requestRateList;
Reference<IRateControl> requestRateWrite;
Reference<IRateControl> requestRateRead;
Reference<IRateControl> requestRateDelete;
Reference<IRateControl> sendRate;
Reference<IRateControl> recvRate;
FlowLock concurrentRequests;

View File

@ -323,7 +323,7 @@ Reference<ITLSPolicy> TLSOptions::get_policy(PolicyType type) {
if (platform::getEnvironmentVar("FDB_TLS_VERIFY_PEERS", verifyPeerString))
set_verify_peers({ verifyPeerString });
else
set_verify_peers({ std::string("Check.Valid=0")});
set_verify_peers({ std::string("Check.Valid=1")});
}
if (!ca_set) {
std::string caFile;

View File

@ -120,6 +120,7 @@ static void simInitTLS() {
Reference<TLSOptions> options( new TLSOptions );
options->set_cert_data( certBytes );
options->set_key_data( certBytes );
options->set_verify_peers(std::vector<std::string>(1, "Check.Valid=0"));
options->register_network();
}

View File

@ -1558,7 +1558,7 @@ static StatusObject faultToleranceStatusFetcher(DatabaseConfiguration configurat
static std::string getIssueDescription(std::string name) {
if(name == "incorrect_cluster_file_contents") {
return "Cluster file contents do not match current cluster connection string. Verify cluster file is writable and has not been overwritten externally.";
return "Cluster file contents do not match current cluster connection string. Verify the cluster file and its parent directory are writable and that the cluster file has not been overwritten externally.";
}
// FIXME: name and description will be the same unless the message is 'incorrect_cluster_file_contents', which is currently the only possible message

View File

@ -425,7 +425,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
tLogData->bytesDurable += bytesInput.getValue() - bytesDurable.getValue();
TraceEvent("TLogBytesWhenRemoved", tli.id()).detail("sharedBytesInput", tLogData->bytesInput).detail("sharedBytesDurable", tLogData->bytesDurable).detail("localBytesInput", bytesInput.getValue()).detail("localBytesDurable", bytesDurable.getValue());
ASSERT(tLogData->bytesDurable <= tLogData->bytesInput);
ASSERT_ABORT(tLogData->bytesDurable <= tLogData->bytesInput);
endRole(tli.id(), "TLog", "Error", true);
if(!tLogData->terminated) {

View File

@ -460,7 +460,7 @@ static int asyncDeviceCharacteristics(sqlite3_file *pFile){ return 0; }
//resulting in a locking error
auto itr = SharedMemoryInfo::table.find(filename);
if(itr != SharedMemoryInfo::table.end()) {
ASSERT(itr->second.refcount == 0);
ASSERT_ABORT(itr->second.refcount == 0);
itr->second.cleanup();
}
}

View File

@ -964,7 +964,7 @@ vector<TestSpec> readTests( ifstream& ifs ) {
if(value == "true")
spec.phases = TestWorkload::CHECK;
} else if( attrib == "StderrSeverity" ) {
TraceEvent("StderrSeverity").detail("newSeverity", value);
TraceEvent("StderrSeverity").detail("NewSeverity", value);
}
else if (attrib == "ClientInfoLogging") {
if (value == "false") {

View File

@ -224,19 +224,23 @@ struct ClientTransactionProfileCorrectnessWorkload : TestWorkload {
state KeySelector begin = firstGreaterOrEqual(CLIENT_LATENCY_INFO_PREFIX.withPrefix(fdbClientInfoPrefixRange.begin));
state KeySelector end = firstGreaterOrEqual(strinc(begin.getKey()));
loop {
state int keysLimit = 10;
state Transaction tr(cx);
loop {
try {
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
state Standalone<RangeResultRef> kvRange = wait(tr.getRange(begin, end, 10));
state Standalone<RangeResultRef> kvRange = wait(tr.getRange(begin, end, keysLimit));
if (kvRange.empty())
break;
txInfoEntries.arena().dependsOn(kvRange.arena());
txInfoEntries.append(txInfoEntries.arena(), kvRange.begin(), kvRange.size());
begin = firstGreaterThan(kvRange.back().key);
tr.reset();
}
catch (Error& e) {
if (e.code() == error_code_transaction_too_old)
keysLimit = std::max(1, keysLimit / 2);
Void _ = wait(tr.onError(e));
}
}

View File

@ -35,7 +35,7 @@ class ThreadPool : public IThreadPool, public ReferenceCounted<ThreadPool> {
Event stopped;
static thread_local IThreadPoolReceiver* threadUserObject;
explicit Thread(ThreadPool *pool, IThreadPoolReceiver *userObject) : pool(pool), userObject(userObject) {}
~Thread() { ASSERT(!userObject); }
~Thread() { ASSERT_ABORT(!userObject); }
void run() {
deprioritizeThread();

View File

@ -874,13 +874,13 @@ namespace actorcompiler
case "\r\n": LineCount++; break;
case "\n": LineCount++; break;
}
if (tokens[i].Value.StartsWith("/*")) LineCount += tokens[i].Value.Count(c=>c=='\n');
if (BraceDepth < 0) throw new Error(LineCount, "Mismatched braces");
if (ParenDepth < 0) throw new Error(LineCount, "Mismatched parenthesis");
tokens[i].Position = i;
tokens[i].SourceLine = LineCount;
tokens[i].BraceDepth = BraceDepth;
tokens[i].ParenDepth = ParenDepth;
if (tokens[i].Value.StartsWith("/*")) LineCount += tokens[i].Value.Count(c=>c=='\n');
switch (tokens[i].Value)
{
case "{": BraceDepth++; if (BraceDepth==1) lastBrace = tokens[i]; break;

View File

@ -32,7 +32,7 @@
<Wix xmlns='http://schemas.microsoft.com/wix/2006/wi'>
<Product Name='$(var.Title)'
Id='{D1D7D9A5-3BE3-4601-A02A-271175FA1589}'
Id='{E60C53B7-DA5E-49BA-9F5F-8FC668D1DD4C}'
UpgradeCode='{A95EA002-686E-4164-8356-C715B7F8B1C8}'
Version='$(var.Version)'
Manufacturer='$(var.Manufacturer)'

View File

@ -1,7 +1,7 @@
<?xml version="1.0"?>
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<Version>5.2.2</Version>
<Version>5.2.6</Version>
<PackageName>5.2</PackageName>
</PropertyGroup>
</Project>