Merge branch 'release-6.2'

# Conflicts:
#	CMakeLists.txt
#	bindings/c/test/mako/mako.c
#	documentation/sphinx/source/release-notes.rst
#	fdbbackup/backup.actor.cpp
#	fdbclient/NativeAPI.actor.cpp
#	fdbclient/NativeAPI.actor.h
#	fdbserver/DataDistributionQueue.actor.cpp
#	fdbserver/Knobs.cpp
#	fdbserver/Knobs.h
#	fdbserver/LogRouter.actor.cpp
#	fdbserver/SkipList.cpp
#	fdbserver/fdbserver.actor.cpp
#	flow/CMakeLists.txt
#	flow/Knobs.cpp
#	flow/Knobs.h
#	flow/flow.vcxproj
#	flow/flow.vcxproj.filters
#	versions.target
This commit is contained in:
Evan Tschannen 2020-03-06 18:22:46 -08:00
commit 303df197cf
54 changed files with 1545 additions and 747 deletions

View File

@ -25,6 +25,7 @@
#include "flow/DeterministicRandom.h" #include "flow/DeterministicRandom.h"
#include "flow/SystemMonitor.h" #include "flow/SystemMonitor.h"
#include "flow/TLSConfig.actor.h"
#include "flow/actorcompiler.h" // This must be the last #include. #include "flow/actorcompiler.h" // This must be the last #include.
using namespace FDB; using namespace FDB;
@ -82,7 +83,7 @@ void fdb_flow_test() {
fdb->setupNetwork(); fdb->setupNetwork();
startThread(networkThread, fdb); startThread(networkThread, fdb);
g_network = newNet2(false); g_network = newNet2(TLSConfig());
openTraceFile(NetworkAddress(), 1000000, 1000000, "."); openTraceFile(NetworkAddress(), 1000000, 1000000, ".");
systemMonitor(); systemMonitor();

View File

@ -28,6 +28,7 @@
#include "bindings/flow/FDBLoanerTypes.h" #include "bindings/flow/FDBLoanerTypes.h"
#include "fdbrpc/fdbrpc.h" #include "fdbrpc/fdbrpc.h"
#include "flow/DeterministicRandom.h" #include "flow/DeterministicRandom.h"
#include "flow/TLSConfig.actor.h"
#include "flow/actorcompiler.h" // This must be the last #include. #include "flow/actorcompiler.h" // This must be the last #include.
// Otherwise we have to type setupNetwork(), FDB::open(), etc. // Otherwise we have to type setupNetwork(), FDB::open(), etc.
@ -1748,7 +1749,7 @@ ACTOR void startTest(std::string clusterFilename, StringRef prefix, int apiVersi
populateOpsThatCreateDirectories(); // FIXME populateOpsThatCreateDirectories(); // FIXME
// This is "our" network // This is "our" network
g_network = newNet2(false); g_network = newNet2(TLSConfig());
ASSERT(!API::isAPIVersionSelected()); ASSERT(!API::isAPIVersionSelected());
try { try {
@ -1791,7 +1792,7 @@ ACTOR void startTest(std::string clusterFilename, StringRef prefix, int apiVersi
ACTOR void _test_versionstamp() { ACTOR void _test_versionstamp() {
try { try {
g_network = newNet2(false); g_network = newNet2(TLSConfig());
API *fdb = FDB::API::selectAPIVersion(700); API *fdb = FDB::API::selectAPIVersion(700);

View File

@ -10,38 +10,38 @@ macOS
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server. The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
* `FoundationDB-6.2.17.pkg <https://www.foundationdb.org/downloads/6.2.17/macOS/installers/FoundationDB-6.2.17.pkg>`_ * `FoundationDB-6.2.18.pkg <https://www.foundationdb.org/downloads/6.2.18/macOS/installers/FoundationDB-6.2.18.pkg>`_
Ubuntu Ubuntu
------ ------
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x. The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
* `foundationdb-clients-6.2.17-1_amd64.deb <https://www.foundationdb.org/downloads/6.2.17/ubuntu/installers/foundationdb-clients_6.2.17-1_amd64.deb>`_ * `foundationdb-clients-6.2.18-1_amd64.deb <https://www.foundationdb.org/downloads/6.2.18/ubuntu/installers/foundationdb-clients_6.2.18-1_amd64.deb>`_
* `foundationdb-server-6.2.17-1_amd64.deb <https://www.foundationdb.org/downloads/6.2.17/ubuntu/installers/foundationdb-server_6.2.17-1_amd64.deb>`_ (depends on the clients package) * `foundationdb-server-6.2.18-1_amd64.deb <https://www.foundationdb.org/downloads/6.2.18/ubuntu/installers/foundationdb-server_6.2.18-1_amd64.deb>`_ (depends on the clients package)
RHEL/CentOS EL6 RHEL/CentOS EL6
--------------- ---------------
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x. The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
* `foundationdb-clients-6.2.17-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.2.17/rhel6/installers/foundationdb-clients-6.2.17-1.el6.x86_64.rpm>`_ * `foundationdb-clients-6.2.18-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.2.18/rhel6/installers/foundationdb-clients-6.2.18-1.el6.x86_64.rpm>`_
* `foundationdb-server-6.2.17-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.2.17/rhel6/installers/foundationdb-server-6.2.17-1.el6.x86_64.rpm>`_ (depends on the clients package) * `foundationdb-server-6.2.18-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.2.18/rhel6/installers/foundationdb-server-6.2.18-1.el6.x86_64.rpm>`_ (depends on the clients package)
RHEL/CentOS EL7 RHEL/CentOS EL7
--------------- ---------------
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x. The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
* `foundationdb-clients-6.2.17-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.2.17/rhel7/installers/foundationdb-clients-6.2.17-1.el7.x86_64.rpm>`_ * `foundationdb-clients-6.2.18-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.2.18/rhel7/installers/foundationdb-clients-6.2.18-1.el7.x86_64.rpm>`_
* `foundationdb-server-6.2.17-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.2.17/rhel7/installers/foundationdb-server-6.2.17-1.el7.x86_64.rpm>`_ (depends on the clients package) * `foundationdb-server-6.2.18-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.2.18/rhel7/installers/foundationdb-server-6.2.18-1.el7.x86_64.rpm>`_ (depends on the clients package)
Windows Windows
------- -------
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server. The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
* `foundationdb-6.2.17-x64.msi <https://www.foundationdb.org/downloads/6.2.17/windows/installers/foundationdb-6.2.17-x64.msi>`_ * `foundationdb-6.2.18-x64.msi <https://www.foundationdb.org/downloads/6.2.18/windows/installers/foundationdb-6.2.18-x64.msi>`_
API Language Bindings API Language Bindings
===================== =====================
@ -58,18 +58,18 @@ On macOS and Windows, the FoundationDB Python API bindings are installed as part
If you need to use the FoundationDB Python API from other Python installations or paths, use the Python package manager ``pip`` (``pip install foundationdb``) or download the Python package: If you need to use the FoundationDB Python API from other Python installations or paths, use the Python package manager ``pip`` (``pip install foundationdb``) or download the Python package:
* `foundationdb-6.2.17.tar.gz <https://www.foundationdb.org/downloads/6.2.17/bindings/python/foundationdb-6.2.17.tar.gz>`_ * `foundationdb-6.2.18.tar.gz <https://www.foundationdb.org/downloads/6.2.18/bindings/python/foundationdb-6.2.18.tar.gz>`_
Ruby 1.9.3/2.0.0+ Ruby 1.9.3/2.0.0+
----------------- -----------------
* `fdb-6.2.17.gem <https://www.foundationdb.org/downloads/6.2.17/bindings/ruby/fdb-6.2.17.gem>`_ * `fdb-6.2.18.gem <https://www.foundationdb.org/downloads/6.2.18/bindings/ruby/fdb-6.2.18.gem>`_
Java 8+ Java 8+
------- -------
* `fdb-java-6.2.17.jar <https://www.foundationdb.org/downloads/6.2.17/bindings/java/fdb-java-6.2.17.jar>`_ * `fdb-java-6.2.18.jar <https://www.foundationdb.org/downloads/6.2.18/bindings/java/fdb-java-6.2.18.jar>`_
* `fdb-java-6.2.17-javadoc.jar <https://www.foundationdb.org/downloads/6.2.17/bindings/java/fdb-java-6.2.17-javadoc.jar>`_ * `fdb-java-6.2.18-javadoc.jar <https://www.foundationdb.org/downloads/6.2.18/bindings/java/fdb-java-6.2.18-javadoc.jar>`_
Go 1.11+ Go 1.11+
-------- --------

View File

@ -572,6 +572,7 @@
"missing_data", "missing_data",
"healing", "healing",
"optimizing_team_collections", "optimizing_team_collections",
"healthy_populating_region",
"healthy_repartitioning", "healthy_repartitioning",
"healthy_removing_server", "healthy_removing_server",
"healthy_rebalancing", "healthy_rebalancing",
@ -606,6 +607,7 @@
"missing_data", "missing_data",
"healing", "healing",
"optimizing_team_collections", "optimizing_team_collections",
"healthy_populating_region",
"healthy_repartitioning", "healthy_repartitioning",
"healthy_removing_server", "healthy_removing_server",
"healthy_rebalancing", "healthy_rebalancing",

View File

@ -2,13 +2,40 @@
Release Notes Release Notes
############# #############
6.2.18
======
Fixes
-----
* When configuring a cluster to usable_regions=2, data distribution would not react to machine failures while copying data to the remote region. `(PR #2774) <https://github.com/apple/foundationdb/pull/2774>`_.
* When a cluster is configured with usable_regions=2, data distribution could push a cluster into saturation by relocating too many shards simulatenously. `(PR #2776) <https://github.com/apple/foundationdb/pull/2776>`_.
* Do not allow the cluster controller to mark any process as failed within 30 seconds of startup. `(PR #2780) <https://github.com/apple/foundationdb/pull/2780>`_.
* Backup could not establish TLS connections (broken in 6.2.16). `(PR #2775) <https://github.com/apple/foundationdb/pull/2775>`_.
* Certificates were not refreshed automatically (broken in 6.2.16). `(PR #2781) <https://github.com/apple/foundationdb/pull/2781>`_.
Performance
-----------
* Improved the efficiency of establishing large numbers of network connections. `(PR #2777) <https://github.com/apple/foundationdb/pull/2777>`_.
Features
--------
* Add support for setting knobs to modify the behavior of fdbcli. `(PR #2773) <https://github.com/apple/foundationdb/pull/2773>`_.
Other Changes
-------------
* Setting invalid knobs in backup and DR binaries is now a warning instead of an error and will not result in the application being terminated. `(PR #2773) <https://github.com/apple/foundationdb/pull/2773>`_.
6.2.17 6.2.17
====== ======
Fixes Fixes
----- -----
* Restored the ability to set TLS configuration using environment variables. `(PR #2755) <https://github.com/apple/foundationdb/pull/2755>`_. * Restored the ability to set TLS configuration using environment variables (broken in 6.2.16). `(PR #2755) <https://github.com/apple/foundationdb/pull/2755>`_.
6.2.16 6.2.16
====== ======

View File

@ -26,6 +26,7 @@
#include "flow/serialize.h" #include "flow/serialize.h"
#include "flow/IRandom.h" #include "flow/IRandom.h"
#include "flow/genericactors.actor.h" #include "flow/genericactors.actor.h"
#include "flow/TLSConfig.actor.h"
#include "fdbclient/FDBTypes.h" #include "fdbclient/FDBTypes.h"
#include "fdbclient/BackupAgent.actor.h" #include "fdbclient/BackupAgent.actor.h"
@ -3224,22 +3225,22 @@ int main(int argc, char* argv[]) {
blobCredentials.push_back(args->OptionArg()); blobCredentials.push_back(args->OptionArg());
break; break;
#ifndef TLS_DISABLED #ifndef TLS_DISABLED
case TLSParams::OPT_TLS_PLUGIN: case TLSConfig::OPT_TLS_PLUGIN:
args->OptionArg(); args->OptionArg();
break; break;
case TLSParams::OPT_TLS_CERTIFICATES: case TLSConfig::OPT_TLS_CERTIFICATES:
tlsCertPath = args->OptionArg(); tlsCertPath = args->OptionArg();
break; break;
case TLSParams::OPT_TLS_PASSWORD: case TLSConfig::OPT_TLS_PASSWORD:
tlsPassword = args->OptionArg(); tlsPassword = args->OptionArg();
break; break;
case TLSParams::OPT_TLS_CA_FILE: case TLSConfig::OPT_TLS_CA_FILE:
tlsCAPath = args->OptionArg(); tlsCAPath = args->OptionArg();
break; break;
case TLSParams::OPT_TLS_KEY: case TLSConfig::OPT_TLS_KEY:
tlsKeyPath = args->OptionArg(); tlsKeyPath = args->OptionArg();
break; break;
case TLSParams::OPT_TLS_VERIFY_PEERS: case TLSConfig::OPT_TLS_VERIFY_PEERS:
tlsVerifyPeers = args->OptionArg(); tlsVerifyPeers = args->OptionArg();
break; break;
#endif #endif
@ -3356,15 +3357,19 @@ int main(int argc, char* argv[]) {
if (!flowKnobs->setKnob( k->first, k->second ) && if (!flowKnobs->setKnob( k->first, k->second ) &&
!clientKnobs->setKnob( k->first, k->second )) !clientKnobs->setKnob( k->first, k->second ))
{ {
fprintf(stderr, "Unrecognized knob option '%s'\n", k->first.c_str()); fprintf(stderr, "WARNING: Unrecognized knob option '%s'\n", k->first.c_str());
return FDB_EXIT_ERROR; TraceEvent(SevWarnAlways, "UnrecognizedKnobOption").detail("Knob", printable(k->first));
} }
} catch (Error& e) { } catch (Error& e) {
if (e.code() == error_code_invalid_option_value) { if (e.code() == error_code_invalid_option_value) {
fprintf(stderr, "Invalid value '%s' for option '%s'\n", k->second.c_str(), k->first.c_str()); fprintf(stderr, "WARNING: Invalid value '%s' for knob option '%s'\n", k->second.c_str(), k->first.c_str());
return FDB_EXIT_ERROR; TraceEvent(SevWarnAlways, "InvalidKnobValue").detail("Knob", printable(k->first)).detail("Value", printable(k->second));
}
else {
fprintf(stderr, "ERROR: Failed to set knob option '%s': %s\n", k->first.c_str(), e.what());
TraceEvent(SevError, "FailedToSetKnob").detail("Knob", printable(k->first)).detail("Value", printable(k->second)).error(e);
throw;
} }
throw;
} }
} }

View File

@ -34,6 +34,7 @@
#include "flow/DeterministicRandom.h" #include "flow/DeterministicRandom.h"
#include "flow/Platform.h" #include "flow/Platform.h"
#include "flow/TLSConfig.actor.h"
#include "flow/SimpleOpt.h" #include "flow/SimpleOpt.h"
#include "fdbcli/FlowLineNoise.h" #include "fdbcli/FlowLineNoise.h"
@ -68,7 +69,8 @@ enum {
OPT_NO_STATUS, OPT_NO_STATUS,
OPT_STATUS_FROM_JSON, OPT_STATUS_FROM_JSON,
OPT_VERSION, OPT_VERSION,
OPT_TRACE_FORMAT OPT_TRACE_FORMAT,
OPT_KNOB
}; };
CSimpleOpt::SOption g_rgOptions[] = { { OPT_CONNFILE, "-C", SO_REQ_SEP }, CSimpleOpt::SOption g_rgOptions[] = { { OPT_CONNFILE, "-C", SO_REQ_SEP },
@ -86,12 +88,13 @@ CSimpleOpt::SOption g_rgOptions[] = { { OPT_CONNFILE, "-C", SO_REQ_SEP },
{ OPT_VERSION, "--version", SO_NONE }, { OPT_VERSION, "--version", SO_NONE },
{ OPT_VERSION, "-v", SO_NONE }, { OPT_VERSION, "-v", SO_NONE },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP }, { OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
#ifndef TLS_DISABLED #ifndef TLS_DISABLED
TLS_OPTION_FLAGS TLS_OPTION_FLAGS
#endif #endif
SO_END_OF_OPTIONS }; SO_END_OF_OPTIONS };
void printAtCol(const char* text, int col) { void printAtCol(const char* text, int col) {
const char* iter = text; const char* iter = text;
@ -422,6 +425,8 @@ static void printProgramUsage(const char* name) {
#ifndef TLS_DISABLED #ifndef TLS_DISABLED
TLS_HELP TLS_HELP
#endif #endif
" --knob_KNOBNAME KNOBVALUE\n"
" Changes a knob option. KNOBNAME should be lowercase.\n"
" -v, --version Print FoundationDB CLI version information and exit.\n" " -v, --version Print FoundationDB CLI version information and exit.\n"
" -h, --help Display this help and exit.\n"); " -h, --help Display this help and exit.\n");
} }
@ -2478,6 +2483,8 @@ struct CLIOptions {
std::string tlsCAPath; std::string tlsCAPath;
std::string tlsPassword; std::string tlsPassword;
std::vector<std::pair<std::string, std::string>> knobs;
CLIOptions( int argc, char* argv[] ) CLIOptions( int argc, char* argv[] )
: trace(false), : trace(false),
exit_timeout(0), exit_timeout(0),
@ -2501,9 +2508,38 @@ struct CLIOptions {
} }
if (exit_timeout && !exec.present()) { if (exit_timeout && !exec.present()) {
fprintf(stderr, "ERROR: --timeout may only be specified with --exec\n"); fprintf(stderr, "ERROR: --timeout may only be specified with --exec\n");
exit_code = 1; exit_code = FDB_EXIT_ERROR;
return; return;
} }
delete FLOW_KNOBS;
FlowKnobs* flowKnobs = new FlowKnobs(true);
FLOW_KNOBS = flowKnobs;
delete CLIENT_KNOBS;
ClientKnobs* clientKnobs = new ClientKnobs(true);
CLIENT_KNOBS = clientKnobs;
for(auto k=knobs.begin(); k!=knobs.end(); ++k) {
try {
if (!flowKnobs->setKnob( k->first, k->second ) &&
!clientKnobs->setKnob( k->first, k->second ))
{
fprintf(stderr, "WARNING: Unrecognized knob option '%s'\n", k->first.c_str());
TraceEvent(SevWarnAlways, "UnrecognizedKnobOption").detail("Knob", printable(k->first));
}
} catch (Error& e) {
if (e.code() == error_code_invalid_option_value) {
fprintf(stderr, "WARNING: Invalid value '%s' for knob option '%s'\n", k->second.c_str(), k->first.c_str());
TraceEvent(SevWarnAlways, "InvalidKnobValue").detail("Knob", printable(k->first)).detail("Value", printable(k->second));
}
else {
fprintf(stderr, "ERROR: Failed to set knob option '%s': %s\n", k->first.c_str(), e.what());
TraceEvent(SevError, "FailedToSetKnob").detail("Knob", printable(k->first)).detail("Value", printable(k->second)).error(e);
exit_code = FDB_EXIT_ERROR;
}
}
}
} }
int processArg(CSimpleOpt& args) { int processArg(CSimpleOpt& args) {
@ -2540,22 +2576,22 @@ struct CLIOptions {
#ifndef TLS_DISABLED #ifndef TLS_DISABLED
// TLS Options // TLS Options
case TLSParams::OPT_TLS_PLUGIN: case TLSConfig::OPT_TLS_PLUGIN:
args.OptionArg(); args.OptionArg();
break; break;
case TLSParams::OPT_TLS_CERTIFICATES: case TLSConfig::OPT_TLS_CERTIFICATES:
tlsCertPath = args.OptionArg(); tlsCertPath = args.OptionArg();
break; break;
case TLSParams::OPT_TLS_CA_FILE: case TLSConfig::OPT_TLS_CA_FILE:
tlsCAPath = args.OptionArg(); tlsCAPath = args.OptionArg();
break; break;
case TLSParams::OPT_TLS_KEY: case TLSConfig::OPT_TLS_KEY:
tlsKeyPath = args.OptionArg(); tlsKeyPath = args.OptionArg();
break; break;
case TLSParams::OPT_TLS_PASSWORD: case TLSConfig::OPT_TLS_PASSWORD:
tlsPassword = args.OptionArg(); tlsPassword = args.OptionArg();
break; break;
case TLSParams::OPT_TLS_VERIFY_PEERS: case TLSConfig::OPT_TLS_VERIFY_PEERS:
tlsVerifyPeers = args.OptionArg(); tlsVerifyPeers = args.OptionArg();
break; break;
#endif #endif
@ -2570,6 +2606,16 @@ struct CLIOptions {
} }
traceFormat = args.OptionArg(); traceFormat = args.OptionArg();
break; break;
case OPT_KNOB: {
std::string syn = args.OptionSyntax();
if (!StringRef(syn).startsWith(LiteralStringRef("--knob_"))) {
fprintf(stderr, "ERROR: unable to parse knob option '%s'\n", syn.c_str());
return FDB_EXIT_ERROR;
}
syn = syn.substr(7);
knobs.push_back( std::make_pair( syn, args.OptionArg() ) );
break;
}
case OPT_VERSION: case OPT_VERSION:
printVersion(); printVersion();
return FDB_EXIT_SUCCESS; return FDB_EXIT_SUCCESS;

View File

@ -510,6 +510,7 @@ ACTOR Future<BlobStoreEndpoint::ReusableConnection> connect_impl(Reference<BlobS
if (service.empty()) if (service.empty())
service = b->knobs.secure_connection ? "https" : "http"; service = b->knobs.secure_connection ? "https" : "http";
state Reference<IConnection> conn = wait(INetworkConnections::net()->connect(b->host, service, b->knobs.secure_connection ? true : false)); state Reference<IConnection> conn = wait(INetworkConnections::net()->connect(b->host, service, b->knobs.secure_connection ? true : false));
wait(conn->connectHandshake());
TraceEvent("BlobStoreEndpointNewConnection").suppressFor(60) TraceEvent("BlobStoreEndpointNewConnection").suppressFor(60)
.detail("RemoteEndpoint", conn->getPeerAddress()) .detail("RemoteEndpoint", conn->getPeerAddress())

View File

@ -93,6 +93,7 @@ struct ClientVersionRef {
} }
ClientVersionRef(Arena &arena, ClientVersionRef const& cv) : clientVersion(arena, cv.clientVersion), sourceVersion(arena, cv.sourceVersion), protocolVersion(arena, cv.protocolVersion) {} ClientVersionRef(Arena &arena, ClientVersionRef const& cv) : clientVersion(arena, cv.clientVersion), sourceVersion(arena, cv.sourceVersion), protocolVersion(arena, cv.protocolVersion) {}
ClientVersionRef(StringRef clientVersion, StringRef sourceVersion, StringRef protocolVersion) : clientVersion(clientVersion), sourceVersion(sourceVersion), protocolVersion(protocolVersion) {}
ClientVersionRef(std::string versionString) { ClientVersionRef(std::string versionString) {
size_t index = versionString.find(","); size_t index = versionString.find(",");
if(index == versionString.npos) { if(index == versionString.npos) {

View File

@ -160,12 +160,34 @@ public:
CounterCollection cc; CounterCollection cc;
Counter transactionReadVersions; Counter transactionReadVersions;
Counter transactionReadVersionsCompleted;
Counter transactionReadVersionBatches;
Counter transactionBatchReadVersions;
Counter transactionDefaultReadVersions;
Counter transactionImmediateReadVersions;
Counter transactionBatchReadVersionsCompleted;
Counter transactionDefaultReadVersionsCompleted;
Counter transactionImmediateReadVersionsCompleted;
Counter transactionLogicalReads; Counter transactionLogicalReads;
Counter transactionPhysicalReads; Counter transactionPhysicalReads;
Counter transactionPhysicalReadsCompleted;
Counter transactionGetKeyRequests;
Counter transactionGetValueRequests;
Counter transactionGetRangeRequests;
Counter transactionWatchRequests;
Counter transactionGetAddressesForKeyRequests;
Counter transactionBytesRead;
Counter transactionKeysRead;
Counter transactionMetadataVersionReads;
Counter transactionCommittedMutations; Counter transactionCommittedMutations;
Counter transactionCommittedMutationBytes; Counter transactionCommittedMutationBytes;
Counter transactionSetMutations;
Counter transactionClearMutations;
Counter transactionAtomicMutations;
Counter transactionsCommitStarted; Counter transactionsCommitStarted;
Counter transactionsCommitCompleted; Counter transactionsCommitCompleted;
Counter transactionKeyServerLocationRequests;
Counter transactionKeyServerLocationRequestsCompleted;
Counter transactionsTooOld; Counter transactionsTooOld;
Counter transactionsFutureVersions; Counter transactionsFutureVersions;
Counter transactionsNotCommitted; Counter transactionsNotCommitted;

View File

@ -192,10 +192,10 @@ public:
int CONSISTENCY_CHECK_RATE_LIMIT_MAX; int CONSISTENCY_CHECK_RATE_LIMIT_MAX;
int CONSISTENCY_CHECK_ONE_ROUND_TARGET_COMPLETION_TIME; int CONSISTENCY_CHECK_ONE_ROUND_TARGET_COMPLETION_TIME;
// fdbcli // fdbcli
int CLI_CONNECT_PARALLELISM; int CLI_CONNECT_PARALLELISM;
double CLI_CONNECT_TIMEOUT; double CLI_CONNECT_TIMEOUT;
ClientKnobs(bool randomize = false); ClientKnobs(bool randomize = false);
}; };

View File

@ -686,7 +686,7 @@ void shrinkProxyList( ClientDBInfo& ni, std::vector<UID>& lastProxyUIDs, std::ve
} }
// Leader is the process that will be elected by coordinators as the cluster controller // Leader is the process that will be elected by coordinators as the cluster controller
ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration( Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<ClientDBInfo>> clientInfo, MonitorLeaderInfo info, Standalone<VectorRef<ClientVersionRef>> supportedVersions, Key traceLogGroup) { ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration( Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<ClientDBInfo>> clientInfo, MonitorLeaderInfo info, Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> supportedVersions, Key traceLogGroup) {
state ClusterConnectionString cs = info.intermediateConnFile->getConnectionString(); state ClusterConnectionString cs = info.intermediateConnFile->getConnectionString();
state vector<NetworkAddress> addrs = cs.coordinators(); state vector<NetworkAddress> addrs = cs.coordinators();
state int idx = 0; state int idx = 0;
@ -702,7 +702,7 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration( Reference<ClusterCo
req.clusterKey = cs.clusterKey(); req.clusterKey = cs.clusterKey();
req.coordinators = cs.coordinators(); req.coordinators = cs.coordinators();
req.knownClientInfoID = clientInfo->get().id; req.knownClientInfoID = clientInfo->get().id;
req.supportedVersions = supportedVersions; req.supportedVersions = supportedVersions->get();
req.traceLogGroup = traceLogGroup; req.traceLogGroup = traceLogGroup;
ClusterConnectionString fileConnectionString; ClusterConnectionString fileConnectionString;
@ -755,7 +755,7 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration( Reference<ClusterCo
} }
} }
ACTOR Future<Void> monitorProxies( Reference<AsyncVar<Reference<ClusterConnectionFile>>> connFile, Reference<AsyncVar<ClientDBInfo>> clientInfo, Standalone<VectorRef<ClientVersionRef>> supportedVersions, Key traceLogGroup ) { ACTOR Future<Void> monitorProxies( Reference<AsyncVar<Reference<ClusterConnectionFile>>> connFile, Reference<AsyncVar<ClientDBInfo>> clientInfo, Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> supportedVersions, Key traceLogGroup ) {
state MonitorLeaderInfo info(connFile->get()); state MonitorLeaderInfo info(connFile->get());
loop { loop {
choose { choose {

View File

@ -57,7 +57,7 @@ Future<Void> monitorLeader( Reference<ClusterConnectionFile> const& connFile, Re
Future<Void> monitorLeaderForProxies( Value const& key, vector<NetworkAddress> const& coordinators, ClientData* const& clientData ); Future<Void> monitorLeaderForProxies( Value const& key, vector<NetworkAddress> const& coordinators, ClientData* const& clientData );
Future<Void> monitorProxies( Reference<AsyncVar<Reference<ClusterConnectionFile>>> const& connFile, Reference<AsyncVar<ClientDBInfo>> const& clientInfo, Standalone<VectorRef<ClientVersionRef>> const& supportedVersions, Key const& traceLogGroup ); Future<Void> monitorProxies( Reference<AsyncVar<Reference<ClusterConnectionFile>>> const& connFile, Reference<AsyncVar<ClientDBInfo>> const& clientInfo, Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> const& supportedVersions, Key const& traceLogGroup );
void shrinkProxyList( ClientDBInfo& ni, std::vector<UID>& lastProxyUIDs, std::vector<MasterProxyInterface>& lastProxies ); void shrinkProxyList( ClientDBInfo& ni, std::vector<UID>& lastProxyUIDs, std::vector<MasterProxyInterface>& lastProxies );

View File

@ -42,7 +42,7 @@
#include "flow/Knobs.h" #include "flow/Knobs.h"
#include "flow/Platform.h" #include "flow/Platform.h"
#include "flow/SystemMonitor.h" #include "flow/SystemMonitor.h"
#include "flow/TLSPolicy.h" #include "flow/TLSConfig.actor.h"
#include "flow/UnitTest.h" #include "flow/UnitTest.h"
#if defined(CMAKE_BUILD) || !defined(WIN32) #if defined(CMAKE_BUILD) || !defined(WIN32)
@ -66,15 +66,21 @@ using std::min;
using std::pair; using std::pair;
NetworkOptions networkOptions; NetworkOptions networkOptions;
TLSParams tlsParams; TLSConfig tlsConfig(TLSEndpointType::CLIENT);
static Reference<TLSPolicy> tlsPolicy;
static void initTLSPolicy() { // The default values, TRACE_DEFAULT_ROLL_SIZE and TRACE_DEFAULT_MAX_LOGS_SIZE are located in Trace.h.
#ifndef TLS_DISABLED NetworkOptions::NetworkOptions()
if (!tlsPolicy) { : localAddress(""), clusterFile(""), traceDirectory(Optional<std::string>()),
tlsPolicy = Reference<TLSPolicy>(new TLSPolicy(TLSPolicy::Is::CLIENT)); traceRollSize(TRACE_DEFAULT_ROLL_SIZE), traceMaxLogsSize(TRACE_DEFAULT_MAX_LOGS_SIZE), traceLogGroup("default"),
} traceFormat("xml"), traceClockSource("now"), slowTaskProfilingEnabled(false) {
#endif
Standalone<VectorRef<ClientVersionRef>> defaultSupportedVersions;
StringRef sourceVersion = StringRef((const uint8_t*)getSourceVersion(), strlen(getSourceVersion()));
std::string protocolVersionString = format("%llx", currentProtocolVersion.version());
defaultSupportedVersions.push_back_deep(defaultSupportedVersions.arena(), ClientVersionRef(LiteralStringRef(FDB_VT_VERSION), sourceVersion, protocolVersionString));
supportedVersions = ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>::from(defaultSupportedVersions);
} }
static const Key CLIENT_LATENCY_INFO_PREFIX = LiteralStringRef("client_latency/"); static const Key CLIENT_LATENCY_INFO_PREFIX = LiteralStringRef("client_latency/");
@ -494,24 +500,25 @@ ACTOR static Future<HealthMetrics> getHealthMetricsActor(DatabaseContext *cx, bo
Future<HealthMetrics> DatabaseContext::getHealthMetrics(bool detailed = false) { Future<HealthMetrics> DatabaseContext::getHealthMetrics(bool detailed = false) {
return getHealthMetricsActor(this, detailed); return getHealthMetricsActor(this, detailed);
} }
DatabaseContext::DatabaseContext(Reference<AsyncVar<Reference<ClusterConnectionFile>>> connectionFile, DatabaseContext::DatabaseContext(
Reference<AsyncVar<ClientDBInfo>> clientInfo, Future<Void> clientInfoMonitor, Reference<AsyncVar<Reference<ClusterConnectionFile>>> connectionFile, Reference<AsyncVar<ClientDBInfo>> clientInfo, Future<Void> clientInfoMonitor,
TaskPriority taskID, LocalityData const& clientLocality, TaskPriority taskID, LocalityData const& clientLocality, bool enableLocalityLoadBalance, bool lockAware, bool internal, int apiVersion, bool switchable )
bool enableLocalityLoadBalance, bool lockAware, bool internal, int apiVersion, : connectionFile(connectionFile),clientInfo(clientInfo), clientInfoMonitor(clientInfoMonitor), taskID(taskID), clientLocality(clientLocality), enableLocalityLoadBalance(enableLocalityLoadBalance),
bool switchable) lockAware(lockAware), apiVersion(apiVersion), switchable(switchable), provisional(false), cc("TransactionMetrics"), transactionReadVersions("ReadVersions", cc),
: connectionFile(connectionFile), clientInfo(clientInfo), clientInfoMonitor(clientInfoMonitor), taskID(taskID), transactionReadVersionsCompleted("ReadVersionsCompleted", cc), transactionReadVersionBatches("ReadVersionBatches", cc), transactionBatchReadVersions("BatchPriorityReadVersions", cc),
clientLocality(clientLocality), enableLocalityLoadBalance(enableLocalityLoadBalance), lockAware(lockAware), transactionDefaultReadVersions("DefaultPriorityReadVersions", cc), transactionImmediateReadVersions("ImmediatePriorityReadVersions", cc),
apiVersion(apiVersion), switchable(switchable), provisional(false), cc("TransactionMetrics"), transactionBatchReadVersionsCompleted("BatchPriorityReadVersionsCompleted", cc), transactionDefaultReadVersionsCompleted("DefaultPriorityReadVersionsCompleted", cc),
transactionReadVersions("ReadVersions", cc), transactionLogicalReads("LogicalUncachedReads", cc), transactionImmediateReadVersionsCompleted("ImmediatePriorityReadVersionsCompleted", cc), transactionLogicalReads("LogicalUncachedReads", cc), transactionPhysicalReads("PhysicalReadRequests", cc),
transactionPhysicalReads("PhysicalReadRequests", cc), transactionCommittedMutations("CommittedMutations", cc), transactionPhysicalReadsCompleted("PhysicalReadRequestsCompleted", cc), transactionGetKeyRequests("GetKeyRequests", cc), transactionGetValueRequests("GetValueRequests", cc),
transactionCommittedMutationBytes("CommittedMutationBytes", cc), transactionsCommitStarted("CommitStarted", cc), transactionGetRangeRequests("GetRangeRequests", cc), transactionWatchRequests("WatchRequests", cc), transactionGetAddressesForKeyRequests("GetAddressesForKeyRequests", cc),
transactionsCommitCompleted("CommitCompleted", cc), transactionsTooOld("TooOld", cc), transactionBytesRead("BytesRead", cc), transactionKeysRead("KeysRead", cc), transactionMetadataVersionReads("MetadataVersionReads", cc), transactionCommittedMutations("CommittedMutations", cc),
transactionsFutureVersions("FutureVersions", cc), transactionsNotCommitted("NotCommitted", cc), transactionCommittedMutationBytes("CommittedMutationBytes", cc), transactionSetMutations("SetMutations", cc), transactionClearMutations("ClearMutations", cc),
transactionsMaybeCommitted("MaybeCommitted", cc), transactionsResourceConstrained("ResourceConstrained", cc), transactionAtomicMutations("AtomicMutations", cc), transactionsCommitStarted("CommitStarted", cc), transactionsCommitCompleted("CommitCompleted", cc),
transactionsThrottled("Throttled", cc), transactionsProcessBehind("ProcessBehind", cc), outstandingWatches(0), transactionKeyServerLocationRequests("KeyServerLocationRequests", cc), transactionKeyServerLocationRequestsCompleted("KeyServerLocationRequestsCompleted", cc), transactionsTooOld("TooOld", cc),
latencies(1000), readLatencies(1000), commitLatencies(1000), GRVLatencies(1000), mutationsPerCommit(1000), transactionsFutureVersions("FutureVersions", cc), transactionsNotCommitted("NotCommitted", cc), transactionsMaybeCommitted("MaybeCommitted", cc),
bytesPerCommit(1000), mvCacheInsertLocation(0), healthMetricsLastUpdated(0), detailedHealthMetricsLastUpdated(0), transactionsResourceConstrained("ResourceConstrained", cc), transactionsThrottled("Throttled", cc), transactionsProcessBehind("ProcessBehind", cc), outstandingWatches(0), latencies(1000), readLatencies(1000), commitLatencies(1000),
internal(internal) { GRVLatencies(1000), mutationsPerCommit(1000), bytesPerCommit(1000), mvCacheInsertLocation(0), healthMetricsLastUpdated(0), detailedHealthMetricsLastUpdated(0), internal(internal)
{
dbId = deterministicRandom()->randomUniqueID(); dbId = deterministicRandom()->randomUniqueID();
connected = clientInfo->get().proxies.size() ? Void() : clientInfo->onChange(); connected = clientInfo->get().proxies.size() ? Void() : clientInfo->onChange();
@ -532,17 +539,22 @@ DatabaseContext::DatabaseContext(Reference<AsyncVar<Reference<ClusterConnectionF
clientStatusUpdater.actor = clientStatusUpdateActor(this); clientStatusUpdater.actor = clientStatusUpdateActor(this);
} }
DatabaseContext::DatabaseContext(const Error& err) DatabaseContext::DatabaseContext( const Error &err ) : deferredError(err), cc("TransactionMetrics"), transactionReadVersions("ReadVersions", cc),
: deferredError(err), cc("TransactionMetrics"), transactionReadVersions("ReadVersions", cc), transactionReadVersionsCompleted("ReadVersionsCompleted", cc), transactionReadVersionBatches("ReadVersionBatches", cc), transactionBatchReadVersions("BatchPriorityReadVersions", cc),
transactionLogicalReads("LogicalUncachedReads", cc), transactionPhysicalReads("PhysicalReadRequests", cc), transactionDefaultReadVersions("DefaultPriorityReadVersions", cc), transactionImmediateReadVersions("ImmediatePriorityReadVersions", cc),
transactionCommittedMutations("CommittedMutations", cc), transactionBatchReadVersionsCompleted("BatchPriorityReadVersionsCompleted", cc), transactionDefaultReadVersionsCompleted("DefaultPriorityReadVersionsCompleted", cc),
transactionCommittedMutationBytes("CommittedMutationBytes", cc), transactionsCommitStarted("CommitStarted", cc), transactionImmediateReadVersionsCompleted("ImmediatePriorityReadVersionsCompleted", cc), transactionLogicalReads("LogicalUncachedReads", cc), transactionPhysicalReads("PhysicalReadRequests", cc),
transactionsCommitCompleted("CommitCompleted", cc), transactionsTooOld("TooOld", cc), transactionPhysicalReadsCompleted("PhysicalReadRequestsCompleted", cc), transactionGetKeyRequests("GetKeyRequests", cc), transactionGetValueRequests("GetValueRequests", cc),
transactionsFutureVersions("FutureVersions", cc), transactionsNotCommitted("NotCommitted", cc), transactionGetRangeRequests("GetRangeRequests", cc), transactionWatchRequests("WatchRequests", cc), transactionGetAddressesForKeyRequests("GetAddressesForKeyRequests", cc),
transactionsMaybeCommitted("MaybeCommitted", cc), transactionsResourceConstrained("ResourceConstrained", cc), transactionBytesRead("BytesRead", cc), transactionKeysRead("KeysRead", cc), transactionMetadataVersionReads("MetadataVersionReads", cc), transactionCommittedMutations("CommittedMutations", cc),
transactionsThrottled("Throttled", cc), transactionsProcessBehind("ProcessBehind", cc), latencies(1000), transactionCommittedMutationBytes("CommittedMutationBytes", cc), transactionSetMutations("SetMutations", cc), transactionClearMutations("ClearMutations", cc),
readLatencies(1000), commitLatencies(1000), GRVLatencies(1000), mutationsPerCommit(1000), bytesPerCommit(1000), transactionAtomicMutations("AtomicMutations", cc), transactionsCommitStarted("CommitStarted", cc), transactionsCommitCompleted("CommitCompleted", cc),
internal(false) {} transactionKeyServerLocationRequests("KeyServerLocationRequests", cc), transactionKeyServerLocationRequestsCompleted("KeyServerLocationRequestsCompleted", cc), transactionsTooOld("TooOld", cc),
transactionsFutureVersions("FutureVersions", cc), transactionsNotCommitted("NotCommitted", cc), transactionsMaybeCommitted("MaybeCommitted", cc),
transactionsResourceConstrained("ResourceConstrained", cc), transactionsThrottled("Throttled", cc), transactionsProcessBehind("ProcessBehind", cc), latencies(1000), readLatencies(1000), commitLatencies(1000),
GRVLatencies(1000), mutationsPerCommit(1000), bytesPerCommit(1000),
internal(false) {}
Database DatabaseContext::create(Reference<AsyncVar<ClientDBInfo>> clientInfo, Future<Void> clientInfoMonitor, LocalityData clientLocality, bool enableLocalityLoadBalance, TaskPriority taskID, bool lockAware, int apiVersion, bool switchable) { Database DatabaseContext::create(Reference<AsyncVar<ClientDBInfo>> clientInfo, Future<Void> clientInfoMonitor, LocalityData clientLocality, bool enableLocalityLoadBalance, TaskPriority taskID, bool lockAware, int apiVersion, bool switchable) {
return Database( new DatabaseContext( Reference<AsyncVar<Reference<ClusterConnectionFile>>>(), clientInfo, clientInfoMonitor, taskID, clientLocality, enableLocalityLoadBalance, lockAware, true, apiVersion, switchable ) ); return Database( new DatabaseContext( Reference<AsyncVar<Reference<ClusterConnectionFile>>>(), clientInfo, clientInfoMonitor, taskID, clientLocality, enableLocalityLoadBalance, lockAware, true, apiVersion, switchable ) );
@ -889,48 +901,40 @@ void setNetworkOption(FDBNetworkOptions::Option option, Optional<StringRef> valu
break; break;
case FDBNetworkOptions::TLS_CERT_PATH: case FDBNetworkOptions::TLS_CERT_PATH:
validateOptionValue(value, true); validateOptionValue(value, true);
tlsParams.tlsCertBytes = ""; tlsConfig.setCertificatePath(value.get().toString());
tlsParams.tlsCertPath = value.get().toString();
break; break;
case FDBNetworkOptions::TLS_CERT_BYTES: { case FDBNetworkOptions::TLS_CERT_BYTES: {
validateOptionValue(value, true); validateOptionValue(value, true);
tlsParams.tlsCertPath = ""; tlsConfig.setCertificateBytes(value.get().toString());
tlsParams.tlsCertBytes = value.get().toString();
break; break;
} }
case FDBNetworkOptions::TLS_CA_PATH: { case FDBNetworkOptions::TLS_CA_PATH: {
validateOptionValue(value, true); validateOptionValue(value, true);
tlsParams.tlsCABytes = ""; tlsConfig.setCAPath(value.get().toString());
tlsParams.tlsCAPath = value.get().toString();
break; break;
} }
case FDBNetworkOptions::TLS_CA_BYTES: { case FDBNetworkOptions::TLS_CA_BYTES: {
validateOptionValue(value, true); validateOptionValue(value, true);
tlsParams.tlsCAPath = ""; tlsConfig.setCABytes(value.get().toString());
tlsParams.tlsCABytes = value.get().toString();
break; break;
} }
case FDBNetworkOptions::TLS_PASSWORD: case FDBNetworkOptions::TLS_PASSWORD:
validateOptionValue(value, true); validateOptionValue(value, true);
tlsParams.tlsPassword = value.get().toString(); tlsConfig.setPassword(value.get().toString());
break; break;
case FDBNetworkOptions::TLS_KEY_PATH: case FDBNetworkOptions::TLS_KEY_PATH:
validateOptionValue(value, true); validateOptionValue(value, true);
tlsParams.tlsKeyBytes = ""; tlsConfig.setKeyPath(value.get().toString());
tlsParams.tlsKeyPath = value.get().toString();
break; break;
case FDBNetworkOptions::TLS_KEY_BYTES: { case FDBNetworkOptions::TLS_KEY_BYTES: {
validateOptionValue(value, true); validateOptionValue(value, true);
tlsParams.tlsKeyPath = ""; tlsConfig.setKeyBytes(value.get().toString());
tlsParams.tlsKeyBytes = value.get().toString();
break; break;
} }
case FDBNetworkOptions::TLS_VERIFY_PEERS: case FDBNetworkOptions::TLS_VERIFY_PEERS:
validateOptionValue(value, true); validateOptionValue(value, true);
initTLSPolicy(); tlsConfig.clearVerifyPeers();
#ifndef TLS_DISABLED tlsConfig.addVerifyPeers( value.get().toString() );
tlsPolicy->set_verify_peers({ value.get().toString() });
#endif
break; break;
case FDBNetworkOptions::CLIENT_BUGGIFY_ENABLE: case FDBNetworkOptions::CLIENT_BUGGIFY_ENABLE:
enableBuggify(true, BuggifyType::Client); enableBuggify(true, BuggifyType::Client);
@ -957,18 +961,19 @@ void setNetworkOption(FDBNetworkOptions::Option option, Optional<StringRef> valu
ASSERT(g_network); ASSERT(g_network);
ASSERT(value.present()); ASSERT(value.present());
networkOptions.supportedVersions.resize(networkOptions.supportedVersions.arena(), 0); Standalone<VectorRef<ClientVersionRef>> supportedVersions;
std::string versionString = value.get().toString(); std::string versionString = value.get().toString();
size_t index = 0; size_t index = 0;
size_t nextIndex = 0; size_t nextIndex = 0;
while(nextIndex != versionString.npos) { while(nextIndex != versionString.npos) {
nextIndex = versionString.find(';', index); nextIndex = versionString.find(';', index);
networkOptions.supportedVersions.push_back_deep(networkOptions.supportedVersions.arena(), ClientVersionRef(versionString.substr(index, nextIndex-index))); supportedVersions.push_back_deep(supportedVersions.arena(), ClientVersionRef(versionString.substr(index, nextIndex-index)));
index = nextIndex + 1; index = nextIndex + 1;
} }
ASSERT(networkOptions.supportedVersions.size() > 0); ASSERT(supportedVersions.size() > 0);
networkOptions.supportedVersions->set(supportedVersions);
break; break;
} }
@ -988,9 +993,7 @@ void setupNetwork(uint64_t transportId, bool useMetrics) {
if (!networkOptions.logClientInfo.present()) if (!networkOptions.logClientInfo.present())
networkOptions.logClientInfo = true; networkOptions.logClientInfo = true;
initTLSPolicy(); g_network = newNet2(tlsConfig, false, useMetrics || networkOptions.traceDirectory.present());
g_network = newNet2(false, useMetrics || networkOptions.traceDirectory.present(), tlsPolicy, tlsParams);
FlowTransport::createInstance(true, transportId); FlowTransport::createInstance(true, transportId);
Net2FileSystem::newFileSystem(); Net2FileSystem::newFileSystem();
} }
@ -1165,9 +1168,11 @@ ACTOR Future< pair<KeyRange,Reference<LocationInfo>> > getKeyLocation_internal(
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocation.Before"); g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocation.Before");
loop { loop {
++cx->transactionKeyServerLocationRequests;
choose { choose {
when ( wait( cx->onMasterProxiesChanged() ) ) {} when ( wait( cx->onMasterProxiesChanged() ) ) {}
when ( GetKeyServerLocationsReply rep = wait( loadBalance( cx->getMasterProxies(info.useProvisionalProxies), &MasterProxyInterface::getKeyServersLocations, GetKeyServerLocationsRequest(key, Optional<KeyRef>(), 100, isBackward, key.arena()), TaskPriority::DefaultPromiseEndpoint ) ) ) { when ( GetKeyServerLocationsReply rep = wait( loadBalance( cx->getMasterProxies(info.useProvisionalProxies), &MasterProxyInterface::getKeyServersLocations, GetKeyServerLocationsRequest(key, Optional<KeyRef>(), 100, isBackward, key.arena()), TaskPriority::DefaultPromiseEndpoint ) ) ) {
++cx->transactionKeyServerLocationRequestsCompleted;
if( info.debugID.present() ) if( info.debugID.present() )
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocation.After"); g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocation.After");
ASSERT( rep.results.size() == 1 ); ASSERT( rep.results.size() == 1 );
@ -1202,9 +1207,11 @@ ACTOR Future< vector< pair<KeyRange,Reference<LocationInfo>> > > getKeyRangeLoca
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocations.Before"); g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocations.Before");
loop { loop {
++cx->transactionKeyServerLocationRequests;
choose { choose {
when ( wait( cx->onMasterProxiesChanged() ) ) {} when ( wait( cx->onMasterProxiesChanged() ) ) {}
when ( GetKeyServerLocationsReply _rep = wait( loadBalance( cx->getMasterProxies(info.useProvisionalProxies), &MasterProxyInterface::getKeyServersLocations, GetKeyServerLocationsRequest(keys.begin, keys.end, limit, reverse, keys.arena()), TaskPriority::DefaultPromiseEndpoint ) ) ) { when ( GetKeyServerLocationsReply _rep = wait( loadBalance( cx->getMasterProxies(info.useProvisionalProxies), &MasterProxyInterface::getKeyServersLocations, GetKeyServerLocationsRequest(keys.begin, keys.end, limit, reverse, keys.arena()), TaskPriority::DefaultPromiseEndpoint ) ) ) {
++cx->transactionKeyServerLocationRequestsCompleted;
state GetKeyServerLocationsReply rep = _rep; state GetKeyServerLocationsReply rep = _rep;
if( info.debugID.present() ) if( info.debugID.present() )
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocations.After"); g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocations.After");
@ -1302,8 +1309,6 @@ ACTOR Future<Optional<Value>> getValue( Future<Version> version, Key key, Databa
state uint64_t startTime; state uint64_t startTime;
state double startTimeD; state double startTimeD;
try { try {
//GetValueReply r = wait( deterministicRandom()->randomChoice( ssi->get() ).getValue.getReply( GetValueRequest(key,ver) ) );
//return r.value;
if( info.debugID.present() ) { if( info.debugID.present() ) {
getValueID = nondeterministicRandom()->randomUniqueID(); getValueID = nondeterministicRandom()->randomUniqueID();
@ -1320,19 +1325,26 @@ ACTOR Future<Optional<Value>> getValue( Future<Version> version, Key key, Databa
startTimeD = now(); startTimeD = now();
++cx->transactionPhysicalReads; ++cx->transactionPhysicalReads;
if (CLIENT_BUGGIFY) {
throw deterministicRandom()->randomChoice(
std::vector<Error>{ transaction_too_old(), future_version() });
}
state GetValueReply reply; state GetValueReply reply;
choose { try {
when(wait(cx->connectionFileChanged())) { throw transaction_too_old(); } if (CLIENT_BUGGIFY) {
when(GetValueReply _reply = throw deterministicRandom()->randomChoice(
wait(loadBalance(ssi.second, &StorageServerInterface::getValue, std::vector<Error>{ transaction_too_old(), future_version() });
GetValueRequest(key, ver, getValueID), TaskPriority::DefaultPromiseEndpoint, false,
cx->enableLocalityLoadBalance ? &cx->queueModel : nullptr))) {
reply = _reply;
} }
choose {
when(wait(cx->connectionFileChanged())) { throw transaction_too_old(); }
when(GetValueReply _reply =
wait(loadBalance(ssi.second, &StorageServerInterface::getValue,
GetValueRequest(key, ver, getValueID), TaskPriority::DefaultPromiseEndpoint, false,
cx->enableLocalityLoadBalance ? &cx->queueModel : nullptr))) {
reply = _reply;
}
}
++cx->transactionPhysicalReadsCompleted;
}
catch(Error&) {
++cx->transactionPhysicalReadsCompleted;
throw;
} }
double latency = now() - startTimeD; double latency = now() - startTimeD;
@ -1351,6 +1363,9 @@ ACTOR Future<Optional<Value>> getValue( Future<Version> version, Key key, Databa
.detail("ReqVersion", ver) .detail("ReqVersion", ver)
.detail("ReplySize", reply.value.present() ? reply.value.get().size() : -1);*/ .detail("ReplySize", reply.value.present() ? reply.value.get().size() : -1);*/
} }
cx->transactionBytesRead += reply.value.present() ? reply.value.get().size() : 0;
++cx->transactionKeysRead;
return reply.value; return reply.value;
} catch (Error& e) { } catch (Error& e) {
cx->getValueCompleted->latency = timer_int() - startTime; cx->getValueCompleted->latency = timer_int() - startTime;
@ -1398,14 +1413,20 @@ ACTOR Future<Key> getKey( Database cx, KeySelector k, Future<Version> version, T
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKey.Before"); //.detail("StartKey", k.getKey()).detail("Offset",k.offset).detail("OrEqual",k.orEqual); g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKey.Before"); //.detail("StartKey", k.getKey()).detail("Offset",k.offset).detail("OrEqual",k.orEqual);
++cx->transactionPhysicalReads; ++cx->transactionPhysicalReads;
state GetKeyReply reply; state GetKeyReply reply;
choose { try {
when(wait(cx->connectionFileChanged())) { throw transaction_too_old(); } choose {
when(GetKeyReply _reply = when(wait(cx->connectionFileChanged())) { throw transaction_too_old(); }
wait(loadBalance(ssi.second, &StorageServerInterface::getKey, GetKeyRequest(k, version.get()), when(GetKeyReply _reply =
TaskPriority::DefaultPromiseEndpoint, false, wait(loadBalance(ssi.second, &StorageServerInterface::getKey, GetKeyRequest(k, version.get()),
cx->enableLocalityLoadBalance ? &cx->queueModel : nullptr))) { TaskPriority::DefaultPromiseEndpoint, false,
reply = _reply; cx->enableLocalityLoadBalance ? &cx->queueModel : nullptr))) {
reply = _reply;
}
} }
++cx->transactionPhysicalReadsCompleted;
} catch(Error&) {
++cx->transactionPhysicalReadsCompleted;
throw;
} }
if( info.debugID.present() ) if( info.debugID.present() )
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKey.After"); //.detail("NextKey",reply.sel.key).detail("Offset", reply.sel.offset).detail("OrEqual", k.orEqual); g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKey.After"); //.detail("NextKey",reply.sel.key).detail("Offset", reply.sel.offset).detail("OrEqual", k.orEqual);
@ -1584,14 +1605,20 @@ ACTOR Future<Standalone<RangeResultRef>> getExactRange( Database cx, Version ver
} }
++cx->transactionPhysicalReads; ++cx->transactionPhysicalReads;
state GetKeyValuesReply rep; state GetKeyValuesReply rep;
choose { try {
when(wait(cx->connectionFileChanged())) { throw transaction_too_old(); } choose {
when(GetKeyValuesReply _rep = when(wait(cx->connectionFileChanged())) { throw transaction_too_old(); }
wait(loadBalance(locations[shard].second, &StorageServerInterface::getKeyValues, req, when(GetKeyValuesReply _rep =
TaskPriority::DefaultPromiseEndpoint, false, wait(loadBalance(locations[shard].second, &StorageServerInterface::getKeyValues, req,
cx->enableLocalityLoadBalance ? &cx->queueModel : nullptr))) { TaskPriority::DefaultPromiseEndpoint, false,
rep = _rep; cx->enableLocalityLoadBalance ? &cx->queueModel : nullptr))) {
rep = _rep;
}
} }
++cx->transactionPhysicalReadsCompleted;
} catch(Error&) {
++cx->transactionPhysicalReadsCompleted;
throw;
} }
if( info.debugID.present() ) if( info.debugID.present() )
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getExactRange.After"); g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getExactRange.After");
@ -1740,14 +1767,19 @@ ACTOR Future<Standalone<RangeResultRef>> getRangeFallback( Database cx, Version
return r; return r;
} }
void getRangeFinished(Reference<TransactionLogInfo> trLogInfo, double startTime, KeySelector begin, KeySelector end, bool snapshot, void getRangeFinished(Database cx, Reference<TransactionLogInfo> trLogInfo, double startTime, KeySelector begin, KeySelector end, bool snapshot,
Promise<std::pair<Key, Key>> conflictRange, bool reverse, Standalone<RangeResultRef> result) Promise<std::pair<Key, Key>> conflictRange, bool reverse, Standalone<RangeResultRef> result)
{ {
int64_t bytes = 0;
for(const KeyValueRef &kv : result) {
bytes += kv.key.size() + kv.value.size();
}
cx->transactionBytesRead += bytes;
cx->transactionKeysRead += result.size();
if( trLogInfo ) { if( trLogInfo ) {
int rangeSize = 0; trLogInfo->addLog(FdbClientLogEvents::EventGetRange(startTime, now()-startTime, bytes, begin.getKey(), end.getKey()));
for (const KeyValueRef &kv : result.contents())
rangeSize += kv.key.size() + kv.value.size();
trLogInfo->addLog(FdbClientLogEvents::EventGetRange(startTime, now()-startTime, rangeSize, begin.getKey(), end.getKey()));
} }
if( !snapshot ) { if( !snapshot ) {
@ -1813,7 +1845,7 @@ ACTOR Future<Standalone<RangeResultRef>> getRange( Database cx, Reference<Transa
loop { loop {
if( end.getKey() == allKeys.begin && (end.offset < 1 || end.isFirstGreaterOrEqual()) ) { if( end.getKey() == allKeys.begin && (end.offset < 1 || end.isFirstGreaterOrEqual()) ) {
getRangeFinished(trLogInfo, startTime, originalBegin, originalEnd, snapshot, conflictRange, reverse, output); getRangeFinished(cx, trLogInfo, startTime, originalBegin, originalEnd, snapshot, conflictRange, reverse, output);
return output; return output;
} }
@ -1865,12 +1897,21 @@ ACTOR Future<Standalone<RangeResultRef>> getRange( Database cx, Reference<Transa
} }
++cx->transactionPhysicalReads; ++cx->transactionPhysicalReads;
if (CLIENT_BUGGIFY) { ++cx->transactionGetRangeRequests;
throw deterministicRandom()->randomChoice(std::vector<Error>{ state GetKeyValuesReply rep;
transaction_too_old(), future_version() try {
}); if (CLIENT_BUGGIFY) {
throw deterministicRandom()->randomChoice(std::vector<Error>{
transaction_too_old(), future_version()
});
}
GetKeyValuesReply _rep = wait( loadBalance(beginServer.second, &StorageServerInterface::getKeyValues, req, TaskPriority::DefaultPromiseEndpoint, false, cx->enableLocalityLoadBalance ? &cx->queueModel : NULL ) );
rep = _rep;
++cx->transactionPhysicalReadsCompleted;
} catch(Error&) {
++cx->transactionPhysicalReadsCompleted;
throw;
} }
GetKeyValuesReply rep = wait( loadBalance(beginServer.second, &StorageServerInterface::getKeyValues, req, TaskPriority::DefaultPromiseEndpoint, false, cx->enableLocalityLoadBalance ? &cx->queueModel : NULL ) );
if( info.debugID.present() ) { if( info.debugID.present() ) {
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getRange.After");//.detail("SizeOf", rep.data.size()); g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getRange.After");//.detail("SizeOf", rep.data.size());
@ -1906,7 +1947,7 @@ ACTOR Future<Standalone<RangeResultRef>> getRange( Database cx, Reference<Transa
if( BUGGIFY && limits.hasByteLimit() && output.size() > std::max(1, originalLimits.minRows) ) { if( BUGGIFY && limits.hasByteLimit() && output.size() > std::max(1, originalLimits.minRows) ) {
output.more = true; output.more = true;
output.resize(output.arena(), deterministicRandom()->randomInt(std::max(1,originalLimits.minRows),output.size())); output.resize(output.arena(), deterministicRandom()->randomInt(std::max(1,originalLimits.minRows),output.size()));
getRangeFinished(trLogInfo, startTime, originalBegin, originalEnd, snapshot, conflictRange, reverse, output); getRangeFinished(cx, trLogInfo, startTime, originalBegin, originalEnd, snapshot, conflictRange, reverse, output);
return output; return output;
} }
@ -1915,7 +1956,7 @@ ACTOR Future<Standalone<RangeResultRef>> getRange( Database cx, Reference<Transa
output.readThrough = reverse ? shard.begin : shard.end; output.readThrough = reverse ? shard.begin : shard.end;
} }
getRangeFinished(trLogInfo, startTime, originalBegin, originalEnd, snapshot, conflictRange, reverse, output); getRangeFinished(cx, trLogInfo, startTime, originalBegin, originalEnd, snapshot, conflictRange, reverse, output);
return output; return output;
} }
@ -1929,7 +1970,7 @@ ACTOR Future<Standalone<RangeResultRef>> getRange( Database cx, Reference<Transa
} }
output.more = modifiedSelectors || limits.isReached() || rep.more; output.more = modifiedSelectors || limits.isReached() || rep.more;
getRangeFinished(trLogInfo, startTime, originalBegin, originalEnd, snapshot, conflictRange, reverse, output); getRangeFinished(cx, trLogInfo, startTime, originalBegin, originalEnd, snapshot, conflictRange, reverse, output);
return output; return output;
} }
@ -1941,7 +1982,7 @@ ACTOR Future<Standalone<RangeResultRef>> getRange( Database cx, Reference<Transa
if( !rep.data.size() ) { if( !rep.data.size() ) {
Standalone<RangeResultRef> result = wait( getRangeFallback(cx, version, originalBegin, originalEnd, originalLimits, reverse, info ) ); Standalone<RangeResultRef> result = wait( getRangeFallback(cx, version, originalBegin, originalEnd, originalLimits, reverse, info ) );
getRangeFinished(trLogInfo, startTime, originalBegin, originalEnd, snapshot, conflictRange, reverse, result); getRangeFinished(cx, trLogInfo, startTime, originalBegin, originalEnd, snapshot, conflictRange, reverse, result);
return result; return result;
} }
@ -1970,7 +2011,7 @@ ACTOR Future<Standalone<RangeResultRef>> getRange( Database cx, Reference<Transa
if (e.code() == error_code_wrong_shard_server) { if (e.code() == error_code_wrong_shard_server) {
Standalone<RangeResultRef> result = wait( getRangeFallback(cx, version, originalBegin, originalEnd, originalLimits, reverse, info ) ); Standalone<RangeResultRef> result = wait( getRangeFallback(cx, version, originalBegin, originalEnd, originalLimits, reverse, info ) );
getRangeFinished(trLogInfo, startTime, originalBegin, originalEnd, snapshot, conflictRange, reverse, result); getRangeFinished(cx, trLogInfo, startTime, originalBegin, originalEnd, snapshot, conflictRange, reverse, result);
return result; return result;
} }
@ -2048,6 +2089,7 @@ void Transaction::setVersion( Version v ) {
Future<Optional<Value>> Transaction::get( const Key& key, bool snapshot ) { Future<Optional<Value>> Transaction::get( const Key& key, bool snapshot ) {
++cx->transactionLogicalReads; ++cx->transactionLogicalReads;
++cx->transactionGetValueRequests;
//ASSERT (key < allKeys.end); //ASSERT (key < allKeys.end);
//There are no keys in the database with size greater than KEY_SIZE_LIMIT //There are no keys in the database with size greater than KEY_SIZE_LIMIT
@ -2063,6 +2105,7 @@ Future<Optional<Value>> Transaction::get( const Key& key, bool snapshot ) {
tr.transaction.read_conflict_ranges.push_back(tr.arena, singleKeyRange(key, tr.arena)); tr.transaction.read_conflict_ranges.push_back(tr.arena, singleKeyRange(key, tr.arena));
if(key == metadataVersionKey) { if(key == metadataVersionKey) {
++cx->transactionMetadataVersionReads;
if(!ver.isReady() || metadataVersion.isSet()) { if(!ver.isReady() || metadataVersion.isSet()) {
return metadataVersion.getFuture(); return metadataVersion.getFuture();
} else { } else {
@ -2146,6 +2189,7 @@ Future<Version> Transaction::getRawReadVersion() {
} }
Future< Void > Transaction::watch( Reference<Watch> watch ) { Future< Void > Transaction::watch( Reference<Watch> watch ) {
++cx->transactionWatchRequests;
return ::watch(watch, cx, this); return ::watch(watch, cx, this);
} }
@ -2182,6 +2226,7 @@ ACTOR Future<Standalone<VectorRef<const char*>>> getAddressesForKeyActor(Key key
Future< Standalone< VectorRef< const char*>>> Transaction::getAddressesForKey( const Key& key ) { Future< Standalone< VectorRef< const char*>>> Transaction::getAddressesForKey( const Key& key ) {
++cx->transactionLogicalReads; ++cx->transactionLogicalReads;
++cx->transactionGetAddressesForKeyRequests;
auto ver = getReadVersion(); auto ver = getReadVersion();
return getAddressesForKeyActor(key, ver, cx, info, options); return getAddressesForKeyActor(key, ver, cx, info, options);
@ -2205,6 +2250,7 @@ ACTOR Future< Key > getKeyAndConflictRange(
Future< Key > Transaction::getKey( const KeySelector& key, bool snapshot ) { Future< Key > Transaction::getKey( const KeySelector& key, bool snapshot ) {
++cx->transactionLogicalReads; ++cx->transactionLogicalReads;
++cx->transactionGetKeyRequests;
if( snapshot ) if( snapshot )
return ::getKey(cx, key, getReadVersion(), info); return ::getKey(cx, key, getReadVersion(), info);
@ -2221,6 +2267,7 @@ Future< Standalone<RangeResultRef> > Transaction::getRange(
bool reverse ) bool reverse )
{ {
++cx->transactionLogicalReads; ++cx->transactionLogicalReads;
++cx->transactionGetRangeRequests;
if( limits.isReached() ) if( limits.isReached() )
return Standalone<RangeResultRef>(); return Standalone<RangeResultRef>();
@ -2297,7 +2344,7 @@ void Transaction::makeSelfConflicting() {
} }
void Transaction::set( const KeyRef& key, const ValueRef& value, bool addConflictRange ) { void Transaction::set( const KeyRef& key, const ValueRef& value, bool addConflictRange ) {
++cx->transactionSetMutations;
if(key.size() > (key.startsWith(systemKeys.begin) ? CLIENT_KNOBS->SYSTEM_KEY_SIZE_LIMIT : CLIENT_KNOBS->KEY_SIZE_LIMIT)) if(key.size() > (key.startsWith(systemKeys.begin) ? CLIENT_KNOBS->SYSTEM_KEY_SIZE_LIMIT : CLIENT_KNOBS->KEY_SIZE_LIMIT))
throw key_too_large(); throw key_too_large();
if(value.size() > CLIENT_KNOBS->VALUE_SIZE_LIMIT) if(value.size() > CLIENT_KNOBS->VALUE_SIZE_LIMIT)
@ -2315,6 +2362,7 @@ void Transaction::set( const KeyRef& key, const ValueRef& value, bool addConflic
} }
void Transaction::atomicOp(const KeyRef& key, const ValueRef& operand, MutationRef::Type operationType, bool addConflictRange) { void Transaction::atomicOp(const KeyRef& key, const ValueRef& operand, MutationRef::Type operationType, bool addConflictRange) {
++cx->transactionAtomicMutations;
if(key.size() > (key.startsWith(systemKeys.begin) ? CLIENT_KNOBS->SYSTEM_KEY_SIZE_LIMIT : CLIENT_KNOBS->KEY_SIZE_LIMIT)) if(key.size() > (key.startsWith(systemKeys.begin) ? CLIENT_KNOBS->SYSTEM_KEY_SIZE_LIMIT : CLIENT_KNOBS->KEY_SIZE_LIMIT))
throw key_too_large(); throw key_too_large();
if(operand.size() > CLIENT_KNOBS->VALUE_SIZE_LIMIT) if(operand.size() > CLIENT_KNOBS->VALUE_SIZE_LIMIT)
@ -2341,6 +2389,7 @@ void Transaction::atomicOp(const KeyRef& key, const ValueRef& operand, MutationR
} }
void Transaction::clear( const KeyRangeRef& range, bool addConflictRange ) { void Transaction::clear( const KeyRangeRef& range, bool addConflictRange ) {
++cx->transactionClearMutations;
auto &req = tr; auto &req = tr;
auto &t = req.transaction; auto &t = req.transaction;
@ -2363,7 +2412,7 @@ void Transaction::clear( const KeyRangeRef& range, bool addConflictRange ) {
t.write_conflict_ranges.push_back( req.arena, r ); t.write_conflict_ranges.push_back( req.arena, r );
} }
void Transaction::clear( const KeyRef& key, bool addConflictRange ) { void Transaction::clear( const KeyRef& key, bool addConflictRange ) {
++cx->transactionClearMutations;
//There aren't any keys in the database with size larger than KEY_SIZE_LIMIT //There aren't any keys in the database with size larger than KEY_SIZE_LIMIT
if(key.size() > (key.startsWith(systemKeys.begin) ? CLIENT_KNOBS->SYSTEM_KEY_SIZE_LIMIT : CLIENT_KNOBS->KEY_SIZE_LIMIT)) if(key.size() > (key.startsWith(systemKeys.begin) ? CLIENT_KNOBS->SYSTEM_KEY_SIZE_LIMIT : CLIENT_KNOBS->KEY_SIZE_LIMIT))
return; return;
@ -3003,6 +3052,7 @@ void Transaction::setOption( FDBTransactionOptions::Option option, Optional<Stri
ACTOR Future<GetReadVersionReply> getConsistentReadVersion( DatabaseContext *cx, uint32_t transactionCount, uint32_t flags, Optional<UID> debugID ) { ACTOR Future<GetReadVersionReply> getConsistentReadVersion( DatabaseContext *cx, uint32_t transactionCount, uint32_t flags, Optional<UID> debugID ) {
try { try {
++cx->transactionReadVersionBatches;
if( debugID.present() ) if( debugID.present() )
g_traceBatch.addEvent("TransactionDebug", debugID.get().first(), "NativeAPI.getConsistentReadVersion.Before"); g_traceBatch.addEvent("TransactionDebug", debugID.get().first(), "NativeAPI.getConsistentReadVersion.Before");
loop { loop {
@ -3089,7 +3139,21 @@ ACTOR Future<Version> extractReadVersion(DatabaseContext* cx, uint32_t flags, Re
if(rep.locked && !lockAware) if(rep.locked && !lockAware)
throw database_locked(); throw database_locked();
if (rep.version > cx->metadataVersionCache[cx->mvCacheInsertLocation].first) { ++cx->transactionReadVersionsCompleted;
if((flags & GetReadVersionRequest::PRIORITY_SYSTEM_IMMEDIATE) == GetReadVersionRequest::PRIORITY_SYSTEM_IMMEDIATE) {
++cx->transactionImmediateReadVersionsCompleted;
}
else if((flags & GetReadVersionRequest::PRIORITY_DEFAULT) == GetReadVersionRequest::PRIORITY_DEFAULT) {
++cx->transactionDefaultReadVersionsCompleted;
}
else if((flags & GetReadVersionRequest::PRIORITY_BATCH) == GetReadVersionRequest::PRIORITY_BATCH) {
++cx->transactionBatchReadVersionsCompleted;
}
else {
ASSERT(false);
}
if(rep.version > cx->metadataVersionCache[cx->mvCacheInsertLocation].first) {
cx->mvCacheInsertLocation = (cx->mvCacheInsertLocation + 1) % cx->metadataVersionCache.size(); cx->mvCacheInsertLocation = (cx->mvCacheInsertLocation + 1) % cx->metadataVersionCache.size();
cx->metadataVersionCache[cx->mvCacheInsertLocation] = std::make_pair(rep.version, rep.metadataVersion); cx->metadataVersionCache[cx->mvCacheInsertLocation] = std::make_pair(rep.version, rep.metadataVersion);
} }
@ -3102,6 +3166,18 @@ Future<Version> Transaction::getReadVersion(uint32_t flags) {
if (!readVersion.isValid()) { if (!readVersion.isValid()) {
++cx->transactionReadVersions; ++cx->transactionReadVersions;
flags |= options.getReadVersionFlags; flags |= options.getReadVersionFlags;
if((flags & GetReadVersionRequest::PRIORITY_SYSTEM_IMMEDIATE) == GetReadVersionRequest::PRIORITY_SYSTEM_IMMEDIATE) {
++cx->transactionImmediateReadVersions;
}
else if((flags & GetReadVersionRequest::PRIORITY_DEFAULT) == GetReadVersionRequest::PRIORITY_DEFAULT) {
++cx->transactionDefaultReadVersions;
}
else if((flags & GetReadVersionRequest::PRIORITY_BATCH) == GetReadVersionRequest::PRIORITY_BATCH) {
++cx->transactionBatchReadVersions;
}
else {
ASSERT(false);
}
auto& batcher = cx->versionBatcher[ flags ]; auto& batcher = cx->versionBatcher[ flags ];
if (!batcher.actor.isValid()) { if (!batcher.actor.isValid()) {

View File

@ -25,7 +25,6 @@
#elif !defined(FDBCLIENT_NATIVEAPI_ACTOR_H) #elif !defined(FDBCLIENT_NATIVEAPI_ACTOR_H)
#define FDBCLIENT_NATIVEAPI_ACTOR_H #define FDBCLIENT_NATIVEAPI_ACTOR_H
#include "flow/flow.h" #include "flow/flow.h"
#include "flow/TDMetric.actor.h" #include "flow/TDMetric.actor.h"
#include "fdbclient/FDBTypes.h" #include "fdbclient/FDBTypes.h"
@ -60,14 +59,10 @@ struct NetworkOptions {
std::string traceFormat; std::string traceFormat;
std::string traceClockSource; std::string traceClockSource;
Optional<bool> logClientInfo; Optional<bool> logClientInfo;
Standalone<VectorRef<ClientVersionRef>> supportedVersions; Reference<ReferencedObject<Standalone<VectorRef<ClientVersionRef>>>> supportedVersions;
bool slowTaskProfilingEnabled; bool slowTaskProfilingEnabled;
// The default values, TRACE_DEFAULT_ROLL_SIZE and TRACE_DEFAULT_MAX_LOGS_SIZE are located in Trace.h. NetworkOptions();
NetworkOptions()
: localAddress(""), clusterFile(""), traceDirectory(Optional<std::string>()),
traceRollSize(TRACE_DEFAULT_ROLL_SIZE), traceMaxLogsSize(TRACE_DEFAULT_MAX_LOGS_SIZE), traceLogGroup("default"),
traceFormat("xml"), traceClockSource("now"), slowTaskProfilingEnabled(false) {}
}; };
class Database { class Database {

View File

@ -605,6 +605,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
"missing_data", "missing_data",
"healing", "healing",
"optimizing_team_collections", "optimizing_team_collections",
"healthy_populating_region",
"healthy_repartitioning", "healthy_repartitioning",
"healthy_removing_server", "healthy_removing_server",
"healthy_rebalancing", "healthy_rebalancing",
@ -639,6 +640,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
"missing_data", "missing_data",
"healing", "healing",
"optimizing_team_collections", "optimizing_team_collections",
"healthy_populating_region",
"healthy_repartitioning", "healthy_repartitioning",
"healthy_removing_server", "healthy_removing_server",
"healthy_rebalancing", "healthy_rebalancing",

View File

@ -990,6 +990,7 @@ ACTOR static Future<Void> connectionIncoming( TransportData* self, Reference<ICo
ACTOR static Future<Void> listen( TransportData* self, NetworkAddress listenAddr ) { ACTOR static Future<Void> listen( TransportData* self, NetworkAddress listenAddr ) {
state ActorCollectionNoErrors incoming; // Actors monitoring incoming connections that haven't yet been associated with a peer state ActorCollectionNoErrors incoming; // Actors monitoring incoming connections that haven't yet been associated with a peer
state Reference<IListener> listener = INetworkConnections::net()->listen( listenAddr ); state Reference<IListener> listener = INetworkConnections::net()->listen( listenAddr );
state uint64_t connectionCount = 0;
try { try {
loop { loop {
Reference<IConnection> conn = wait( listener->accept() ); Reference<IConnection> conn = wait( listener->accept() );
@ -999,7 +1000,10 @@ ACTOR static Future<Void> listen( TransportData* self, NetworkAddress listenAddr
.detail("ListenAddress", listenAddr.toString()); .detail("ListenAddress", listenAddr.toString());
incoming.add( connectionIncoming(self, conn) ); incoming.add( connectionIncoming(self, conn) );
} }
wait(delay(0) || delay(FLOW_KNOBS->CONNECTION_ACCEPT_DELAY, TaskPriority::WriteSocket)); connectionCount++;
if( connectionCount%(FLOW_KNOBS->ACCEPT_BATCH_SIZE) == 0 ) {
wait(delay(0, TaskPriority::AcceptSocket));
}
} }
} catch (Error& e) { } catch (Error& e) {
TraceEvent(SevError, "ListenError").error(e); TraceEvent(SevError, "ListenError").error(e);

View File

@ -81,30 +81,4 @@ private:
double value; double value;
}; };
struct GlobalCounters {
vector<PerfIntCounter*> ints;
vector<PerfDoubleCounter*> doubles;
PerfDoubleCounter conflictTime;
PerfIntCounter conflictBatches;
PerfIntCounter conflictKeys;
PerfIntCounter conflictTransactions;
GlobalCounters() :
conflictTime("Conflict detection time", doubles),
conflictBatches("Conflict batches", ints),
conflictKeys("Conflict keys", ints),
conflictTransactions("Conflict transactions", ints)
{
}
void clear() {
for(int i=0; i<ints.size(); i++)
ints[i]->clear();
for(int i=0; i<doubles.size(); i++)
doubles[i]->clear();
}
};
extern GlobalCounters g_counters;
#endif #endif

View File

@ -30,6 +30,7 @@
#include "fdbrpc/TraceFileIO.h" #include "fdbrpc/TraceFileIO.h"
#include "flow/FaultInjection.h" #include "flow/FaultInjection.h"
#include "flow/network.h" #include "flow/network.h"
#include "flow/TLSConfig.actor.h"
#include "fdbrpc/Net2FileSystem.h" #include "fdbrpc/Net2FileSystem.h"
#include "fdbrpc/Replication.h" #include "fdbrpc/Replication.h"
#include "fdbrpc/ReplicationUtils.h" #include "fdbrpc/ReplicationUtils.h"
@ -1594,7 +1595,7 @@ public:
Sim2() : time(0.0), timerTime(0.0), taskCount(0), yielded(false), yield_limit(0), currentTaskID(TaskPriority::Zero) { Sim2() : time(0.0), timerTime(0.0), taskCount(0), yielded(false), yield_limit(0), currentTaskID(TaskPriority::Zero) {
// Not letting currentProcess be NULL eliminates some annoying special cases // Not letting currentProcess be NULL eliminates some annoying special cases
currentProcess = new ProcessInfo("NoMachine", LocalityData(Optional<Standalone<StringRef>>(), StringRef(), StringRef(), StringRef()), ProcessClass(), {NetworkAddress()}, this, "", ""); currentProcess = new ProcessInfo("NoMachine", LocalityData(Optional<Standalone<StringRef>>(), StringRef(), StringRef(), StringRef()), ProcessClass(), {NetworkAddress()}, this, "", "");
g_network = net2 = newNet2(false, true); g_network = net2 = newNet2(TLSConfig(), false, true);
Net2FileSystem::newFileSystem(); Net2FileSystem::newFileSystem();
check_yield(TaskPriority::Zero); check_yield(TaskPriority::Zero);
} }

View File

@ -1936,11 +1936,12 @@ ACTOR Future<Void> failureDetectionServer( UID uniqueID, ClusterControllerData*
//TraceEvent("FailureDetectionPoll", uniqueID).detail("PivotDelay", pivotDelay).detail("Clients", currentStatus.size()); //TraceEvent("FailureDetectionPoll", uniqueID).detail("PivotDelay", pivotDelay).detail("Clients", currentStatus.size());
//TraceEvent("FailureDetectionAcceptableDelay").detail("Delay", acceptableDelay1000); //TraceEvent("FailureDetectionAcceptableDelay").detail("Delay", acceptableDelay1000);
bool tooManyLogGenerations = std::max(self->db.unfinishedRecoveries, self->db.logGenerations) > CLIENT_KNOBS->FAILURE_MAX_GENERATIONS; bool useEmergencyDelay = (std::max(self->db.unfinishedRecoveries, self->db.logGenerations) > CLIENT_KNOBS->FAILURE_MAX_GENERATIONS) ||
(now() - self->startTime < CLIENT_KNOBS->FAILURE_EMERGENCY_DELAY);
for(auto it = currentStatus.begin(); it != currentStatus.end(); ) { for(auto it = currentStatus.begin(); it != currentStatus.end(); ) {
double delay = t - it->second.lastRequestTime; double delay = t - it->second.lastRequestTime;
if ( it->first != g_network->getLocalAddresses() && ( tooManyLogGenerations ? if ( it->first != g_network->getLocalAddresses() && ( useEmergencyDelay ?
( delay > CLIENT_KNOBS->FAILURE_EMERGENCY_DELAY ) : ( delay > CLIENT_KNOBS->FAILURE_EMERGENCY_DELAY ) :
( delay > pivotDelay * 2 + FLOW_KNOBS->SERVER_REQUEST_INTERVAL + CLIENT_KNOBS->FAILURE_MIN_DELAY || delay > CLIENT_KNOBS->FAILURE_MAX_DELAY ) ) ) { ( delay > pivotDelay * 2 + FLOW_KNOBS->SERVER_REQUEST_INTERVAL + CLIENT_KNOBS->FAILURE_MIN_DELAY || delay > CLIENT_KNOBS->FAILURE_MAX_DELAY ) ) ) {
//printf("Failure Detection Server: Status of '%s' is now '%s' after %f sec\n", it->first.toString().c_str(), "Failed", now() - it->second.lastRequestTime); //printf("Failure Detection Server: Status of '%s' is now '%s' after %f sec\n", it->first.toString().c_str(), "Failed", now() - it->second.lastRequestTime);

View File

@ -2987,7 +2987,9 @@ ACTOR Future<Void> teamTracker(DDTeamCollection* self, Reference<TCTeamInfo> tea
lastWrongConfiguration = anyWrongConfiguration; lastWrongConfiguration = anyWrongConfiguration;
state int lastPriority = team->getPriority(); state int lastPriority = team->getPriority();
if( serversLeft < self->configuration.storageTeamSize ) { if(team->size() == 0) {
team->setPriority( SERVER_KNOBS->PRIORITY_POPULATE_REGION );
} else if( serversLeft < self->configuration.storageTeamSize ) {
if( serversLeft == 0 ) if( serversLeft == 0 )
team->setPriority( SERVER_KNOBS->PRIORITY_TEAM_0_LEFT ); team->setPriority( SERVER_KNOBS->PRIORITY_TEAM_0_LEFT );
else if( serversLeft == 1 ) else if( serversLeft == 1 )
@ -3004,10 +3006,11 @@ ACTOR Future<Void> teamTracker(DDTeamCollection* self, Reference<TCTeamInfo> tea
team->setPriority( SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY ); team->setPriority( SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY );
} }
} }
else if( anyUndesired ) else if( anyUndesired ) {
team->setPriority( SERVER_KNOBS->PRIORITY_TEAM_CONTAINS_UNDESIRED_SERVER ); team->setPriority( SERVER_KNOBS->PRIORITY_TEAM_CONTAINS_UNDESIRED_SERVER );
else } else {
team->setPriority( SERVER_KNOBS->PRIORITY_TEAM_HEALTHY ); team->setPriority( SERVER_KNOBS->PRIORITY_TEAM_HEALTHY );
}
if(lastPriority != team->getPriority()) { if(lastPriority != team->getPriority()) {
self->priority_teams[lastPriority]--; self->priority_teams[lastPriority]--;
@ -3747,7 +3750,7 @@ ACTOR Future<Void> monitorStorageServerRecruitment(DDTeamCollection* self) {
state bool recruiting = false; state bool recruiting = false;
TraceEvent("StorageServerRecruitment", self->distributorId) TraceEvent("StorageServerRecruitment", self->distributorId)
.detail("State", "Idle") .detail("State", "Idle")
.trackLatest(("StorageServerRecruitment_" + self->distributorId.toString()).c_str()); .trackLatest("StorageServerRecruitment_" + self->distributorId.toString());
loop { loop {
if( !recruiting ) { if( !recruiting ) {
while(self->recruitingStream.get() == 0) { while(self->recruitingStream.get() == 0) {
@ -3755,7 +3758,7 @@ ACTOR Future<Void> monitorStorageServerRecruitment(DDTeamCollection* self) {
} }
TraceEvent("StorageServerRecruitment", self->distributorId) TraceEvent("StorageServerRecruitment", self->distributorId)
.detail("State", "Recruiting") .detail("State", "Recruiting")
.trackLatest(("StorageServerRecruitment_" + self->distributorId.toString()).c_str()); .trackLatest("StorageServerRecruitment_" + self->distributorId.toString());
recruiting = true; recruiting = true;
} else { } else {
loop { loop {
@ -3766,7 +3769,7 @@ ACTOR Future<Void> monitorStorageServerRecruitment(DDTeamCollection* self) {
} }
TraceEvent("StorageServerRecruitment", self->distributorId) TraceEvent("StorageServerRecruitment", self->distributorId)
.detail("State", "Idle") .detail("State", "Idle")
.trackLatest(("StorageServerRecruitment_" + self->distributorId.toString()).c_str()); .trackLatest("StorageServerRecruitment_" + self->distributorId.toString());
recruiting = false; recruiting = false;
} }
} }
@ -4500,7 +4503,7 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self)
actors.push_back( pollMoveKeysLock(cx, lock) ); actors.push_back( pollMoveKeysLock(cx, lock) );
actors.push_back( reportErrorsExcept( dataDistributionTracker( initData, cx, output, shardsAffectedByTeamFailure, getShardMetrics, getAverageShardBytes.getFuture(), readyToStart, anyZeroHealthyTeams, self->ddId ), "DDTracker", self->ddId, &normalDDQueueErrors() ) ); actors.push_back( reportErrorsExcept( dataDistributionTracker( initData, cx, output, shardsAffectedByTeamFailure, getShardMetrics, getAverageShardBytes.getFuture(), readyToStart, anyZeroHealthyTeams, self->ddId ), "DDTracker", self->ddId, &normalDDQueueErrors() ) );
actors.push_back( reportErrorsExcept( dataDistributionQueue( cx, output, input.getFuture(), getShardMetrics, processingUnhealthy, tcis, shardsAffectedByTeamFailure, lock, getAverageShardBytes, self->ddId, storageTeamSize, &lastLimited ), "DDQueue", self->ddId, &normalDDQueueErrors() ) ); actors.push_back( reportErrorsExcept( dataDistributionQueue( cx, output, input.getFuture(), getShardMetrics, processingUnhealthy, tcis, shardsAffectedByTeamFailure, lock, getAverageShardBytes, self->ddId, storageTeamSize, configuration.storageTeamSize, &lastLimited ), "DDQueue", self->ddId, &normalDDQueueErrors() ) );
vector<DDTeamCollection*> teamCollectionsPtrs; vector<DDTeamCollection*> teamCollectionsPtrs;
Reference<DDTeamCollection> primaryTeamCollection( new DDTeamCollection(cx, self->ddId, lock, output, shardsAffectedByTeamFailure, configuration, primaryDcId, configuration.usableRegions > 1 ? remoteDcIds : std::vector<Optional<Key>>(), readyToStart.getFuture(), zeroHealthyTeams[0], true, processingUnhealthy) ); Reference<DDTeamCollection> primaryTeamCollection( new DDTeamCollection(cx, self->ddId, lock, output, shardsAffectedByTeamFailure, configuration, primaryDcId, configuration.usableRegions > 1 ? remoteDcIds : std::vector<Optional<Key>>(), readyToStart.getFuture(), zeroHealthyTeams[0], true, processingUnhealthy) );

View File

@ -220,6 +220,7 @@ Future<Void> dataDistributionQueue(
PromiseStream<Promise<int64_t>> const& getAverageShardBytes, PromiseStream<Promise<int64_t>> const& getAverageShardBytes,
UID const& distributorId, UID const& distributorId,
int const& teamSize, int const& teamSize,
int const& singleRegionTeamSize,
double* const& lastLimited); double* const& lastLimited);
//Holds the permitted size and IO Bounds for a shard //Holds the permitted size and IO Bounds for a shard

View File

@ -57,7 +57,8 @@ struct RelocateData {
rs.priority == SERVER_KNOBS->PRIORITY_TEAM_REDUNDANT), interval("QueuedRelocation") {} rs.priority == SERVER_KNOBS->PRIORITY_TEAM_REDUNDANT), interval("QueuedRelocation") {}
static bool isHealthPriority(int priority) { static bool isHealthPriority(int priority) {
return priority == SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY || return priority == SERVER_KNOBS->PRIORITY_POPULATE_REGION ||
priority == SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY ||
priority == SERVER_KNOBS->PRIORITY_TEAM_2_LEFT || priority == SERVER_KNOBS->PRIORITY_TEAM_2_LEFT ||
priority == SERVER_KNOBS->PRIORITY_TEAM_1_LEFT || priority == SERVER_KNOBS->PRIORITY_TEAM_1_LEFT ||
priority == SERVER_KNOBS->PRIORITY_TEAM_0_LEFT || priority == SERVER_KNOBS->PRIORITY_TEAM_0_LEFT ||
@ -286,30 +287,31 @@ struct Busyness {
}; };
// find the "workFactor" for this, were it launched now // find the "workFactor" for this, were it launched now
int getWorkFactor( RelocateData const& relocation ) { int getWorkFactor( RelocateData const& relocation, int singleRegionTeamSize ) {
// Avoid the divide by 0!
ASSERT( relocation.src.size() );
if( relocation.healthPriority == SERVER_KNOBS->PRIORITY_TEAM_1_LEFT || relocation.healthPriority == SERVER_KNOBS->PRIORITY_TEAM_0_LEFT ) if( relocation.healthPriority == SERVER_KNOBS->PRIORITY_TEAM_1_LEFT || relocation.healthPriority == SERVER_KNOBS->PRIORITY_TEAM_0_LEFT )
return WORK_FULL_UTILIZATION / SERVER_KNOBS->RELOCATION_PARALLELISM_PER_SOURCE_SERVER; return WORK_FULL_UTILIZATION / SERVER_KNOBS->RELOCATION_PARALLELISM_PER_SOURCE_SERVER;
else if( relocation.healthPriority == SERVER_KNOBS->PRIORITY_TEAM_2_LEFT ) else if( relocation.healthPriority == SERVER_KNOBS->PRIORITY_TEAM_2_LEFT )
return WORK_FULL_UTILIZATION / 2 / SERVER_KNOBS->RELOCATION_PARALLELISM_PER_SOURCE_SERVER; return WORK_FULL_UTILIZATION / 2 / SERVER_KNOBS->RELOCATION_PARALLELISM_PER_SOURCE_SERVER;
else // for now we assume that any message at a lower priority can best be assumed to have a full team left for work else // for now we assume that any message at a lower priority can best be assumed to have a full team left for work
return WORK_FULL_UTILIZATION / relocation.src.size() / SERVER_KNOBS->RELOCATION_PARALLELISM_PER_SOURCE_SERVER; return WORK_FULL_UTILIZATION / singleRegionTeamSize / SERVER_KNOBS->RELOCATION_PARALLELISM_PER_SOURCE_SERVER;
} }
// Data movement's resource control: Do not overload source servers used for the RelocateData // Data movement's resource control: Do not overload source servers used for the RelocateData
// return true if servers are not too busy to launch the relocation // return true if servers are not too busy to launch the relocation
// This ensure source servers will not be overloaded. // This ensure source servers will not be overloaded.
bool canLaunch( RelocateData & relocation, int teamSize, std::map<UID, Busyness> & busymap, bool canLaunch( RelocateData & relocation, int teamSize, int singleRegionTeamSize, std::map<UID, Busyness> & busymap,
std::vector<RelocateData> cancellableRelocations ) { std::vector<RelocateData> cancellableRelocations ) {
// assert this has not already been launched // assert this has not already been launched
ASSERT( relocation.workFactor == 0 ); ASSERT( relocation.workFactor == 0 );
ASSERT( relocation.src.size() != 0 ); ASSERT( relocation.src.size() != 0 );
ASSERT( teamSize >= singleRegionTeamSize );
// find the "workFactor" for this, were it launched now // find the "workFactor" for this, were it launched now
int workFactor = getWorkFactor( relocation ); int workFactor = getWorkFactor( relocation, singleRegionTeamSize );
int neededServers = std::max( 1, (int)relocation.src.size() - teamSize + 1 ); int neededServers = std::min<int>( relocation.src.size(), teamSize - singleRegionTeamSize + 1 );
if(SERVER_KNOBS->USE_OLD_NEEDED_SERVERS) {
neededServers = std::max( 1, (int)relocation.src.size() - teamSize + 1 );
}
// see if each of the SS can launch this task // see if each of the SS can launch this task
for( int i = 0; i < relocation.src.size(); i++ ) { for( int i = 0; i < relocation.src.size(); i++ ) {
// For each source server for this relocation, copy and modify its busyness to reflect work that WOULD be cancelled // For each source server for this relocation, copy and modify its busyness to reflect work that WOULD be cancelled
@ -330,9 +332,9 @@ bool canLaunch( RelocateData & relocation, int teamSize, std::map<UID, Busyness>
} }
// update busyness for each server // update busyness for each server
void launch( RelocateData & relocation, std::map<UID, Busyness> & busymap ) { void launch( RelocateData & relocation, std::map<UID, Busyness> & busymap, int singleRegionTeamSize ) {
// if we are here this means that we can launch and should adjust all the work the servers can do // if we are here this means that we can launch and should adjust all the work the servers can do
relocation.workFactor = getWorkFactor( relocation ); relocation.workFactor = getWorkFactor( relocation, singleRegionTeamSize );
for( int i = 0; i < relocation.src.size(); i++ ) for( int i = 0; i < relocation.src.size(); i++ )
busymap[ relocation.src[i] ].addWork( relocation.priority, relocation.workFactor ); busymap[ relocation.src[i] ].addWork( relocation.priority, relocation.workFactor );
} }
@ -361,6 +363,7 @@ struct DDQueueData {
int queuedRelocations; int queuedRelocations;
int64_t bytesWritten; int64_t bytesWritten;
int teamSize; int teamSize;
int singleRegionTeamSize;
std::map<UID, Busyness> busymap; // UID is serverID std::map<UID, Busyness> busymap; // UID is serverID
@ -397,7 +400,7 @@ struct DDQueueData {
// ensure a team remover will not start before the previous one finishes removing a team and move away data // ensure a team remover will not start before the previous one finishes removing a team and move away data
// NOTE: split and merge shard have higher priority. If they have to wait for unhealthyRelocations = 0, // NOTE: split and merge shard have higher priority. If they have to wait for unhealthyRelocations = 0,
// deadlock may happen: split/merge shard waits for unhealthyRelocations, while blocks team_redundant. // deadlock may happen: split/merge shard waits for unhealthyRelocations, while blocks team_redundant.
if (healthPriority == SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY || healthPriority == SERVER_KNOBS->PRIORITY_TEAM_2_LEFT || if (healthPriority == SERVER_KNOBS->PRIORITY_POPULATE_REGION || healthPriority == SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY || healthPriority == SERVER_KNOBS->PRIORITY_TEAM_2_LEFT ||
healthPriority == SERVER_KNOBS->PRIORITY_TEAM_1_LEFT || healthPriority == SERVER_KNOBS->PRIORITY_TEAM_0_LEFT || healthPriority == SERVER_KNOBS->PRIORITY_TEAM_REDUNDANT) { healthPriority == SERVER_KNOBS->PRIORITY_TEAM_1_LEFT || healthPriority == SERVER_KNOBS->PRIORITY_TEAM_0_LEFT || healthPriority == SERVER_KNOBS->PRIORITY_TEAM_REDUNDANT) {
unhealthyRelocations++; unhealthyRelocations++;
rawProcessingUnhealthy->set(true); rawProcessingUnhealthy->set(true);
@ -405,7 +408,7 @@ struct DDQueueData {
priority_relocations[priority]++; priority_relocations[priority]++;
} }
void finishRelocation(int priority, int healthPriority) { void finishRelocation(int priority, int healthPriority) {
if (healthPriority == SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY || healthPriority == SERVER_KNOBS->PRIORITY_TEAM_2_LEFT || if (healthPriority == SERVER_KNOBS->PRIORITY_POPULATE_REGION || healthPriority == SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY || healthPriority == SERVER_KNOBS->PRIORITY_TEAM_2_LEFT ||
healthPriority == SERVER_KNOBS->PRIORITY_TEAM_1_LEFT || healthPriority == SERVER_KNOBS->PRIORITY_TEAM_0_LEFT || healthPriority == SERVER_KNOBS->PRIORITY_TEAM_REDUNDANT) { healthPriority == SERVER_KNOBS->PRIORITY_TEAM_1_LEFT || healthPriority == SERVER_KNOBS->PRIORITY_TEAM_0_LEFT || healthPriority == SERVER_KNOBS->PRIORITY_TEAM_REDUNDANT) {
unhealthyRelocations--; unhealthyRelocations--;
ASSERT(unhealthyRelocations >= 0); ASSERT(unhealthyRelocations >= 0);
@ -418,10 +421,10 @@ struct DDQueueData {
DDQueueData( UID mid, MoveKeysLock lock, Database cx, std::vector<TeamCollectionInterface> teamCollections, DDQueueData( UID mid, MoveKeysLock lock, Database cx, std::vector<TeamCollectionInterface> teamCollections,
Reference<ShardsAffectedByTeamFailure> sABTF, PromiseStream<Promise<int64_t>> getAverageShardBytes, Reference<ShardsAffectedByTeamFailure> sABTF, PromiseStream<Promise<int64_t>> getAverageShardBytes,
int teamSize, PromiseStream<RelocateShard> output, FutureStream<RelocateShard> input, PromiseStream<GetMetricsRequest> getShardMetrics, double* lastLimited ) : int teamSize, int singleRegionTeamSize, PromiseStream<RelocateShard> output, FutureStream<RelocateShard> input, PromiseStream<GetMetricsRequest> getShardMetrics, double* lastLimited ) :
activeRelocations( 0 ), queuedRelocations( 0 ), bytesWritten ( 0 ), teamCollections( teamCollections ), activeRelocations( 0 ), queuedRelocations( 0 ), bytesWritten ( 0 ), teamCollections( teamCollections ),
shardsAffectedByTeamFailure( sABTF ), getAverageShardBytes( getAverageShardBytes ), distributorId( mid ), lock( lock ), shardsAffectedByTeamFailure( sABTF ), getAverageShardBytes( getAverageShardBytes ), distributorId( mid ), lock( lock ),
cx( cx ), teamSize( teamSize ), output( output ), input( input ), getShardMetrics( getShardMetrics ), startMoveKeysParallelismLock( SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM ), cx( cx ), teamSize( teamSize ), singleRegionTeamSize( singleRegionTeamSize ), output( output ), input( input ), getShardMetrics( getShardMetrics ), startMoveKeysParallelismLock( SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM ),
finishMoveKeysParallelismLock( SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM ), lastLimited(lastLimited), finishMoveKeysParallelismLock( SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM ), lastLimited(lastLimited),
suppressIntervals(0), lastInterval(0), unhealthyRelocations(0), rawProcessingUnhealthy( new AsyncVar<bool>(false) ) {} suppressIntervals(0), lastInterval(0), unhealthyRelocations(0), rawProcessingUnhealthy( new AsyncVar<bool>(false) ) {}
@ -823,7 +826,7 @@ struct DDQueueData {
// SOMEDAY: the list of source servers may be outdated since they were fetched when the work was put in the // SOMEDAY: the list of source servers may be outdated since they were fetched when the work was put in the
// queue // queue
// FIXME: we need spare capacity even when we're just going to be cancelling work via TEAM_HEALTHY // FIXME: we need spare capacity even when we're just going to be cancelling work via TEAM_HEALTHY
if( !canLaunch( rd, teamSize, busymap, cancellableRelocations ) ) { if( !canLaunch( rd, teamSize, singleRegionTeamSize, busymap, cancellableRelocations ) ) {
//logRelocation( rd, "SkippingQueuedRelocation" ); //logRelocation( rd, "SkippingQueuedRelocation" );
continue; continue;
} }
@ -861,7 +864,7 @@ struct DDQueueData {
RelocateData& rrs = inFlight.rangeContaining(ranges[r].begin)->value(); RelocateData& rrs = inFlight.rangeContaining(ranges[r].begin)->value();
rrs.keys = ranges[r]; rrs.keys = ranges[r];
launch( rrs, busymap ); launch( rrs, busymap, singleRegionTeamSize );
activeRelocations++; activeRelocations++;
startRelocation(rrs.priority, rrs.healthPriority); startRelocation(rrs.priority, rrs.healthPriority);
// Start the actor that relocates data in the rrs.keys // Start the actor that relocates data in the rrs.keys
@ -936,7 +939,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
while( tciIndex < self->teamCollections.size() ) { while( tciIndex < self->teamCollections.size() ) {
double inflightPenalty = SERVER_KNOBS->INFLIGHT_PENALTY_HEALTHY; double inflightPenalty = SERVER_KNOBS->INFLIGHT_PENALTY_HEALTHY;
if(rd.healthPriority == SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY || rd.healthPriority == SERVER_KNOBS->PRIORITY_TEAM_2_LEFT) inflightPenalty = SERVER_KNOBS->INFLIGHT_PENALTY_UNHEALTHY; if(rd.healthPriority == SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY || rd.healthPriority == SERVER_KNOBS->PRIORITY_TEAM_2_LEFT) inflightPenalty = SERVER_KNOBS->INFLIGHT_PENALTY_UNHEALTHY;
if(rd.healthPriority == SERVER_KNOBS->PRIORITY_TEAM_1_LEFT || rd.healthPriority == SERVER_KNOBS->PRIORITY_TEAM_0_LEFT) inflightPenalty = SERVER_KNOBS->INFLIGHT_PENALTY_ONE_LEFT; if(rd.healthPriority == SERVER_KNOBS->PRIORITY_POPULATE_REGION || rd.healthPriority == SERVER_KNOBS->PRIORITY_TEAM_1_LEFT || rd.healthPriority == SERVER_KNOBS->PRIORITY_TEAM_0_LEFT) inflightPenalty = SERVER_KNOBS->INFLIGHT_PENALTY_ONE_LEFT;
auto req = GetTeamRequest(rd.wantsNewServers, rd.priority == SERVER_KNOBS->PRIORITY_REBALANCE_UNDERUTILIZED_TEAM, true, false, inflightPenalty); auto req = GetTeamRequest(rd.wantsNewServers, rd.priority == SERVER_KNOBS->PRIORITY_REBALANCE_UNDERUTILIZED_TEAM, true, false, inflightPenalty);
req.completeSources = rd.completeSources; req.completeSources = rd.completeSources;
@ -1028,10 +1031,23 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
//FIXME: do not add data in flight to servers that were already in the src. //FIXME: do not add data in flight to servers that were already in the src.
healthyDestinations.addDataInFlightToTeam(+metrics.bytes); healthyDestinations.addDataInFlightToTeam(+metrics.bytes);
TraceEvent(relocateShardInterval.severity, "RelocateShardHasDestination", distributorId) if (SERVER_KNOBS->DD_ENABLE_VERBOSE_TRACING) {
.detail("PairId", relocateShardInterval.pairID) // StorageMetrics is the rd shard's metrics, e.g., bytes and write bandwidth
.detail("DestinationTeam", describe(destIds)) TraceEvent(SevInfo, "RelocateShardDecision", distributorId)
.detail("ExtraIds", describe(extraIds)); .detail("PairId", relocateShardInterval.pairID)
.detail("Priority", rd.priority)
.detail("KeyBegin", rd.keys.begin)
.detail("KeyEnd", rd.keys.end)
.detail("StorageMetrics", metrics.toString())
.detail("SourceServers", describe(rd.src))
.detail("DestinationTeam", describe(destIds))
.detail("ExtraIds", describe(extraIds));
} else {
TraceEvent(relocateShardInterval.severity, "RelocateShardHasDestination", distributorId)
.detail("PairId", relocateShardInterval.pairID)
.detail("DestinationTeam", describe(destIds))
.detail("ExtraIds", describe(extraIds));
}
state Error error = success(); state Error error = success();
state Promise<Void> dataMovementComplete; state Promise<Void> dataMovementComplete;
@ -1405,9 +1421,10 @@ ACTOR Future<Void> dataDistributionQueue(
PromiseStream<Promise<int64_t>> getAverageShardBytes, PromiseStream<Promise<int64_t>> getAverageShardBytes,
UID distributorId, UID distributorId,
int teamSize, int teamSize,
int singleRegionTeamSize,
double* lastLimited) double* lastLimited)
{ {
state DDQueueData self( distributorId, lock, cx, teamCollections, shardsAffectedByTeamFailure, getAverageShardBytes, teamSize, output, input, getShardMetrics, lastLimited ); state DDQueueData self( distributorId, lock, cx, teamCollections, shardsAffectedByTeamFailure, getAverageShardBytes, teamSize, singleRegionTeamSize, output, input, getShardMetrics, lastLimited );
state std::set<UID> serversToLaunchFrom; state std::set<UID> serversToLaunchFrom;
state KeyRange keysToLaunchFrom; state KeyRange keysToLaunchFrom;
state RelocateData launchData; state RelocateData launchData;
@ -1506,6 +1523,7 @@ ACTOR Future<Void> dataDistributionQueue(
.detail( "PriorityTeamContainsUndesiredServer", self.priority_relocations[SERVER_KNOBS->PRIORITY_TEAM_CONTAINS_UNDESIRED_SERVER] ) .detail( "PriorityTeamContainsUndesiredServer", self.priority_relocations[SERVER_KNOBS->PRIORITY_TEAM_CONTAINS_UNDESIRED_SERVER] )
.detail( "PriorityTeamRedundant", self.priority_relocations[SERVER_KNOBS->PRIORITY_TEAM_REDUNDANT] ) .detail( "PriorityTeamRedundant", self.priority_relocations[SERVER_KNOBS->PRIORITY_TEAM_REDUNDANT] )
.detail( "PriorityMergeShard", self.priority_relocations[SERVER_KNOBS->PRIORITY_MERGE_SHARD] ) .detail( "PriorityMergeShard", self.priority_relocations[SERVER_KNOBS->PRIORITY_MERGE_SHARD] )
.detail( "PriorityPopulateRegion", self.priority_relocations[SERVER_KNOBS->PRIORITY_POPULATE_REGION] )
.detail( "PriorityTeamUnhealthy", self.priority_relocations[SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY] ) .detail( "PriorityTeamUnhealthy", self.priority_relocations[SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY] )
.detail( "PriorityTeam2Left", self.priority_relocations[SERVER_KNOBS->PRIORITY_TEAM_2_LEFT] ) .detail( "PriorityTeam2Left", self.priority_relocations[SERVER_KNOBS->PRIORITY_TEAM_2_LEFT] )
.detail( "PriorityTeam1Left", self.priority_relocations[SERVER_KNOBS->PRIORITY_TEAM_1_LEFT] ) .detail( "PriorityTeam1Left", self.priority_relocations[SERVER_KNOBS->PRIORITY_TEAM_1_LEFT] )

View File

@ -105,6 +105,7 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs, bool isSimula
init( INFLIGHT_PENALTY_HEALTHY, 1.0 ); init( INFLIGHT_PENALTY_HEALTHY, 1.0 );
init( INFLIGHT_PENALTY_UNHEALTHY, 500.0 ); init( INFLIGHT_PENALTY_UNHEALTHY, 500.0 );
init( INFLIGHT_PENALTY_ONE_LEFT, 1000.0 ); init( INFLIGHT_PENALTY_ONE_LEFT, 1000.0 );
init( USE_OLD_NEEDED_SERVERS, false );
init( PRIORITY_RECOVER_MOVE, 110 ); init( PRIORITY_RECOVER_MOVE, 110 );
init( PRIORITY_REBALANCE_UNDERUTILIZED_TEAM, 120 ); init( PRIORITY_REBALANCE_UNDERUTILIZED_TEAM, 120 );
@ -113,6 +114,7 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs, bool isSimula
init( PRIORITY_TEAM_CONTAINS_UNDESIRED_SERVER, 150 ); init( PRIORITY_TEAM_CONTAINS_UNDESIRED_SERVER, 150 );
init( PRIORITY_TEAM_REDUNDANT, 200 ); init( PRIORITY_TEAM_REDUNDANT, 200 );
init( PRIORITY_MERGE_SHARD, 340 ); init( PRIORITY_MERGE_SHARD, 340 );
init( PRIORITY_POPULATE_REGION, 600 );
init( PRIORITY_TEAM_UNHEALTHY, 700 ); init( PRIORITY_TEAM_UNHEALTHY, 700 );
init( PRIORITY_TEAM_2_LEFT, 709 ); init( PRIORITY_TEAM_2_LEFT, 709 );
init( PRIORITY_TEAM_1_LEFT, 800 ); init( PRIORITY_TEAM_1_LEFT, 800 );
@ -211,6 +213,7 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs, bool isSimula
init( DD_EXCLUDE_MIN_REPLICAS, 1 ); init( DD_EXCLUDE_MIN_REPLICAS, 1 );
init( DD_VALIDATE_LOCALITY, true ); if( randomize && BUGGIFY ) DD_VALIDATE_LOCALITY = false; init( DD_VALIDATE_LOCALITY, true ); if( randomize && BUGGIFY ) DD_VALIDATE_LOCALITY = false;
init( DD_CHECK_INVALID_LOCALITY_DELAY, 60 ); if( randomize && BUGGIFY ) DD_CHECK_INVALID_LOCALITY_DELAY = 1 + deterministicRandom()->random01() * 600; init( DD_CHECK_INVALID_LOCALITY_DELAY, 60 ); if( randomize && BUGGIFY ) DD_CHECK_INVALID_LOCALITY_DELAY = 1 + deterministicRandom()->random01() * 600;
init( DD_ENABLE_VERBOSE_TRACING, false ); if( randomize && BUGGIFY ) DD_ENABLE_VERBOSE_TRACING = true;
// TeamRemover // TeamRemover
init( TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER, false ); if( randomize && BUGGIFY ) TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER = deterministicRandom()->random01() < 0.1 ? true : false; // false by default. disable the consistency check when it's true init( TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER, false ); if( randomize && BUGGIFY ) TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER = deterministicRandom()->random01() < 0.1 ? true : false; // false by default. disable the consistency check when it's true

View File

@ -105,6 +105,7 @@ public:
double INFLIGHT_PENALTY_REDUNDANT; double INFLIGHT_PENALTY_REDUNDANT;
double INFLIGHT_PENALTY_UNHEALTHY; double INFLIGHT_PENALTY_UNHEALTHY;
double INFLIGHT_PENALTY_ONE_LEFT; double INFLIGHT_PENALTY_ONE_LEFT;
bool USE_OLD_NEEDED_SERVERS;
// Higher priorities are executed first // Higher priorities are executed first
// Priority/100 is the "priority group"/"superpriority". Priority inversion // Priority/100 is the "priority group"/"superpriority". Priority inversion
@ -118,6 +119,7 @@ public:
int PRIORITY_TEAM_CONTAINS_UNDESIRED_SERVER; int PRIORITY_TEAM_CONTAINS_UNDESIRED_SERVER;
int PRIORITY_TEAM_REDUNDANT; int PRIORITY_TEAM_REDUNDANT;
int PRIORITY_MERGE_SHARD; int PRIORITY_MERGE_SHARD;
int PRIORITY_POPULATE_REGION;
int PRIORITY_TEAM_UNHEALTHY; int PRIORITY_TEAM_UNHEALTHY;
int PRIORITY_TEAM_2_LEFT; int PRIORITY_TEAM_2_LEFT;
int PRIORITY_TEAM_1_LEFT; int PRIORITY_TEAM_1_LEFT;
@ -169,6 +171,7 @@ public:
int DD_EXCLUDE_MIN_REPLICAS; int DD_EXCLUDE_MIN_REPLICAS;
bool DD_VALIDATE_LOCALITY; bool DD_VALIDATE_LOCALITY;
int DD_CHECK_INVALID_LOCALITY_DELAY; int DD_CHECK_INVALID_LOCALITY_DELAY;
bool DD_ENABLE_VERBOSE_TRACING;
// TeamRemover to remove redundant teams // TeamRemover to remove redundant teams
bool TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER; // disable the machineTeamRemover actor bool TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER; // disable the machineTeamRemover actor

View File

@ -95,6 +95,7 @@ struct LogRouterData {
CounterCollection cc; CounterCollection cc;
Future<Void> logger; Future<Void> logger;
Reference<EventCacheHolder> eventCacheHolder;
std::vector<Reference<TagData>> tag_data; //we only store data for the remote tag locality std::vector<Reference<TagData>> tag_data; //we only store data for the remote tag locality
@ -130,8 +131,11 @@ struct LogRouterData {
} }
} }
eventCacheHolder = Reference<EventCacheHolder>( new EventCacheHolder(dbgid.shortString() + ".PeekLocation") );
specialCounter(cc, "Version", [this](){return this->version.get(); }); specialCounter(cc, "Version", [this](){return this->version.get(); });
specialCounter(cc, "MinPopped", [this](){return this->minPopped.get(); }); specialCounter(cc, "MinPopped", [this](){return this->minPopped.get(); });
specialCounter(cc, "FetchedVersions", [this](){ return std::max<Version>(0, std::min<Version>(SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS, this->version.get() - this->minPopped.get())); });
specialCounter(cc, "MinKnownCommittedVersion", [this](){ return this->minKnownCommittedVersion; }); specialCounter(cc, "MinKnownCommittedVersion", [this](){ return this->minKnownCommittedVersion; });
specialCounter(cc, "PoppedVersion", [this](){ return this->poppedVersion; }); specialCounter(cc, "PoppedVersion", [this](){ return this->poppedVersion; });
logger = traceCounters("LogRouterMetrics", dbgid, SERVER_KNOBS->WORKER_LOGGING_INTERVAL, &cc, "LogRouterMetrics"); logger = traceCounters("LogRouterMetrics", dbgid, SERVER_KNOBS->WORKER_LOGGING_INTERVAL, &cc, "LogRouterMetrics");
@ -224,15 +228,20 @@ ACTOR Future<Void> pullAsyncData( LogRouterData *self ) {
state std::vector<int> tags; // an optimization to avoid reallocating vector memory in every loop state std::vector<int> tags; // an optimization to avoid reallocating vector memory in every loop
loop { loop {
loop choose { loop {
when(wait(r ? r->getMore(TaskPriority::TLogCommit) : Never())) { break; } choose {
when(wait(dbInfoChange)) { // FIXME: does this actually happen? when(wait( r ? r->getMore(TaskPriority::TLogCommit) : Never() ) ) {
if (self->logSystem->get()) { break;
r = self->logSystem->get()->peekLogRouter(self->dbgid, tagAt, self->routerTag); }
} else { when( wait( dbInfoChange ) ) { //FIXME: does this actually happen?
r = Reference<ILogSystem::IPeekCursor>(); if( self->logSystem->get() ) {
r = self->logSystem->get()->peekLogRouter( self->dbgid, tagAt, self->routerTag );
TraceEvent("LogRouterPeekLocation", self->dbgid).detail("LogID", r->getPrimaryPeekLocation()).trackLatest(self->eventCacheHolder->trackingKey);
} else {
r = Reference<ILogSystem::IPeekCursor>();
}
dbInfoChange = self->logSystem->onChange();
} }
dbInfoChange = self->logSystem->onChange();
} }
} }

View File

@ -379,6 +379,8 @@ struct ILogSystem {
virtual Version getMinKnownCommittedVersion() = 0; virtual Version getMinKnownCommittedVersion() = 0;
virtual Optional<UID> getPrimaryPeekLocation() = 0;
virtual void addref() = 0; virtual void addref() = 0;
virtual void delref() = 0; virtual void delref() = 0;
@ -424,6 +426,7 @@ struct ILogSystem {
virtual const LogMessageVersion& version(); virtual const LogMessageVersion& version();
virtual Version popped(); virtual Version popped();
virtual Version getMinKnownCommittedVersion(); virtual Version getMinKnownCommittedVersion();
virtual Optional<UID> getPrimaryPeekLocation();
virtual void addref() { virtual void addref() {
ReferenceCounted<ServerPeekCursor>::addref(); ReferenceCounted<ServerPeekCursor>::addref();
@ -473,6 +476,7 @@ struct ILogSystem {
virtual const LogMessageVersion& version(); virtual const LogMessageVersion& version();
virtual Version popped(); virtual Version popped();
virtual Version getMinKnownCommittedVersion(); virtual Version getMinKnownCommittedVersion();
virtual Optional<UID> getPrimaryPeekLocation();
virtual void addref() { virtual void addref() {
ReferenceCounted<MergedPeekCursor>::addref(); ReferenceCounted<MergedPeekCursor>::addref();
@ -519,6 +523,7 @@ struct ILogSystem {
virtual const LogMessageVersion& version(); virtual const LogMessageVersion& version();
virtual Version popped(); virtual Version popped();
virtual Version getMinKnownCommittedVersion(); virtual Version getMinKnownCommittedVersion();
virtual Optional<UID> getPrimaryPeekLocation();
virtual void addref() { virtual void addref() {
ReferenceCounted<SetPeekCursor>::addref(); ReferenceCounted<SetPeekCursor>::addref();
@ -553,6 +558,7 @@ struct ILogSystem {
virtual const LogMessageVersion& version(); virtual const LogMessageVersion& version();
virtual Version popped(); virtual Version popped();
virtual Version getMinKnownCommittedVersion(); virtual Version getMinKnownCommittedVersion();
virtual Optional<UID> getPrimaryPeekLocation();
virtual void addref() { virtual void addref() {
ReferenceCounted<MultiCursor>::addref(); ReferenceCounted<MultiCursor>::addref();
@ -624,6 +630,7 @@ struct ILogSystem {
virtual const LogMessageVersion& version(); virtual const LogMessageVersion& version();
virtual Version popped(); virtual Version popped();
virtual Version getMinKnownCommittedVersion(); virtual Version getMinKnownCommittedVersion();
virtual Optional<UID> getPrimaryPeekLocation();
virtual void addref() { virtual void addref() {
ReferenceCounted<BufferedCursor>::addref(); ReferenceCounted<BufferedCursor>::addref();

View File

@ -286,6 +286,13 @@ const LogMessageVersion& ILogSystem::ServerPeekCursor::version() { return messag
Version ILogSystem::ServerPeekCursor::getMinKnownCommittedVersion() { return results.minKnownCommittedVersion; } Version ILogSystem::ServerPeekCursor::getMinKnownCommittedVersion() { return results.minKnownCommittedVersion; }
Optional<UID> ILogSystem::ServerPeekCursor::getPrimaryPeekLocation() {
if(interf) {
return interf->get().id();
}
return Optional<UID>();
}
Version ILogSystem::ServerPeekCursor::popped() { return poppedVersion; } Version ILogSystem::ServerPeekCursor::popped() { return poppedVersion; }
ILogSystem::MergedPeekCursor::MergedPeekCursor( vector< Reference<ILogSystem::IPeekCursor> > const& serverCursors, Version begin ) ILogSystem::MergedPeekCursor::MergedPeekCursor( vector< Reference<ILogSystem::IPeekCursor> > const& serverCursors, Version begin )
@ -516,6 +523,13 @@ Version ILogSystem::MergedPeekCursor::getMinKnownCommittedVersion() {
return serverCursors[currentCursor]->getMinKnownCommittedVersion(); return serverCursors[currentCursor]->getMinKnownCommittedVersion();
} }
Optional<UID> ILogSystem::MergedPeekCursor::getPrimaryPeekLocation() {
if(bestServer >= 0) {
return serverCursors[bestServer]->getPrimaryPeekLocation();
}
return Optional<UID>();
}
Version ILogSystem::MergedPeekCursor::popped() { Version ILogSystem::MergedPeekCursor::popped() {
Version poppedVersion = 0; Version poppedVersion = 0;
for (auto& c : serverCursors) for (auto& c : serverCursors)
@ -819,6 +833,13 @@ Version ILogSystem::SetPeekCursor::getMinKnownCommittedVersion() {
return serverCursors[currentSet][currentCursor]->getMinKnownCommittedVersion(); return serverCursors[currentSet][currentCursor]->getMinKnownCommittedVersion();
} }
Optional<UID> ILogSystem::SetPeekCursor::getPrimaryPeekLocation() {
if(bestServer >= 0 && bestSet >= 0) {
return serverCursors[bestSet][bestServer]->getPrimaryPeekLocation();
}
return Optional<UID>();
}
Version ILogSystem::SetPeekCursor::popped() { Version ILogSystem::SetPeekCursor::popped() {
Version poppedVersion = 0; Version poppedVersion = 0;
for (auto& cursors : serverCursors) { for (auto& cursors : serverCursors) {
@ -913,6 +934,10 @@ Version ILogSystem::MultiCursor::getMinKnownCommittedVersion() {
return cursors.back()->getMinKnownCommittedVersion(); return cursors.back()->getMinKnownCommittedVersion();
} }
Optional<UID> ILogSystem::MultiCursor::getPrimaryPeekLocation() {
return cursors.back()->getPrimaryPeekLocation();
}
Version ILogSystem::MultiCursor::popped() { Version ILogSystem::MultiCursor::popped() {
return std::max(poppedVersion, cursors.back()->popped()); return std::max(poppedVersion, cursors.back()->popped());
} }
@ -1154,6 +1179,10 @@ Version ILogSystem::BufferedCursor::getMinKnownCommittedVersion() {
return minKnownCommittedVersion; return minKnownCommittedVersion;
} }
Optional<UID> ILogSystem::BufferedCursor::getPrimaryPeekLocation() {
return Optional<UID>();
}
Version ILogSystem::BufferedCursor::popped() { Version ILogSystem::BufferedCursor::popped() {
if(initialPoppedVersion == poppedVersion) { if(initialPoppedVersion == poppedVersion) {
return 0; return 0;

View File

@ -687,7 +687,7 @@ void updateRate(RatekeeperData* self, RatekeeperLimits* limits) {
.detail("LimitingStorageServerVersionLag", limitingVersionLag) .detail("LimitingStorageServerVersionLag", limitingVersionLag)
.detail("WorstStorageServerDurabilityLag", worstDurabilityLag) .detail("WorstStorageServerDurabilityLag", worstDurabilityLag)
.detail("LimitingStorageServerDurabilityLag", limitingDurabilityLag) .detail("LimitingStorageServerDurabilityLag", limitingDurabilityLag)
.trackLatest(name.c_str()); .trackLatest(name);
} }
} }

View File

@ -43,14 +43,6 @@ struct ProxyRequestsInfo {
namespace{ namespace{
struct Resolver : ReferenceCounted<Resolver> { struct Resolver : ReferenceCounted<Resolver> {
Resolver( UID dbgid, int proxyCount, int resolverCount )
: dbgid(dbgid), proxyCount(proxyCount), resolverCount(resolverCount), version(-1), conflictSet( newConflictSet() ), iopsSample( SERVER_KNOBS->KEY_BYTES_PER_SAMPLE ), debugMinRecentStateVersion(0)
{
}
~Resolver() {
destroyConflictSet( conflictSet );
}
UID dbgid; UID dbgid;
int proxyCount, resolverCount; int proxyCount, resolverCount;
NotifiedVersion version; NotifiedVersion version;
@ -65,6 +57,45 @@ struct Resolver : ReferenceCounted<Resolver> {
TransientStorageMetricSample iopsSample; TransientStorageMetricSample iopsSample;
Version debugMinRecentStateVersion; Version debugMinRecentStateVersion;
CounterCollection cc;
Counter resolveBatchIn;
Counter resolveBatchStart;
Counter resolvedTransactions;
Counter resolvedBytes;
Counter resolvedReadConflictRanges;
Counter resolvedWriteConflictRanges;
Counter transactionsAccepted;
Counter transactionsTooOld;
Counter transactionsConflicted;
Counter resolvedStateTransactions;
Counter resolvedStateMutations;
Counter resolvedStateBytes;
Counter resolveBatchOut;
Counter metricsRequests;
Counter splitRequests;
Future<Void> logger;
Resolver( UID dbgid, int proxyCount, int resolverCount )
: dbgid(dbgid), proxyCount(proxyCount), resolverCount(resolverCount), version(-1), conflictSet( newConflictSet() ), iopsSample( SERVER_KNOBS->KEY_BYTES_PER_SAMPLE ), debugMinRecentStateVersion(0),
cc("Resolver", dbgid.toString()),
resolveBatchIn("ResolveBatchIn", cc), resolveBatchStart("ResolveBatchStart", cc), resolvedTransactions("ResolvedTransactions", cc), resolvedBytes("ResolvedBytes", cc),
resolvedReadConflictRanges("ResolvedReadConflictRanges", cc), resolvedWriteConflictRanges("ResolvedWriteConflictRanges", cc), transactionsAccepted("TransactionsAccepted", cc),
transactionsTooOld("TransactionsTooOld", cc), transactionsConflicted("TransactionsConflicted", cc), resolvedStateTransactions("ResolvedStateTransactions", cc),
resolvedStateMutations("ResolvedStateMutations", cc), resolvedStateBytes("ResolvedStateBytes", cc), resolveBatchOut("ResolveBatchOut", cc), metricsRequests("MetricsRequests", cc),
splitRequests("SplitRequests", cc)
{
specialCounter(cc, "Version", [this](){ return this->version.get(); });
specialCounter(cc, "NeededVersion", [this](){ return this->neededVersion.get(); });
specialCounter(cc, "TotalStateBytes", [this](){ return this->totalStateBytes.get(); });
logger = traceCounters("ResolverMetrics", dbgid, SERVER_KNOBS->WORKER_LOGGING_INTERVAL, &cc, "ResolverMetrics");
}
~Resolver() {
destroyConflictSet( conflictSet );
}
}; };
} // namespace } // namespace
@ -78,6 +109,8 @@ ACTOR Future<Void> resolveBatch(
state NetworkAddress proxyAddress = req.prevVersion >= 0 ? req.reply.getEndpoint().getPrimaryAddress() : NetworkAddress(); state NetworkAddress proxyAddress = req.prevVersion >= 0 ? req.reply.getEndpoint().getPrimaryAddress() : NetworkAddress();
state ProxyRequestsInfo &proxyInfo = self->proxyInfoMap[proxyAddress]; state ProxyRequestsInfo &proxyInfo = self->proxyInfoMap[proxyAddress];
++self->resolveBatchIn;
if(req.debugID.present()) { if(req.debugID.present()) {
debugID = nondeterministicRandom()->randomUniqueID(); debugID = nondeterministicRandom()->randomUniqueID();
g_traceBatch.addAttach("CommitAttachID", req.debugID.get().first(), debugID.get().first()); g_traceBatch.addAttach("CommitAttachID", req.debugID.get().first(), debugID.get().first());
@ -120,6 +153,10 @@ ACTOR Future<Void> resolveBatch(
} }
if (self->version.get() == req.prevVersion) { // Not a duplicate (check relies on no waiting between here and self->version.set() below!) if (self->version.get() == req.prevVersion) { // Not a duplicate (check relies on no waiting between here and self->version.set() below!)
++self->resolveBatchStart;
self->resolvedTransactions += req.transactions.size();
self->resolvedBytes += req.transactions.expectedSize();
if(proxyInfo.lastVersion > 0) { if(proxyInfo.lastVersion > 0) {
proxyInfo.outstandingBatches.erase(proxyInfo.outstandingBatches.begin(), proxyInfo.outstandingBatches.upper_bound(req.lastReceivedVersion)); proxyInfo.outstandingBatches.erase(proxyInfo.outstandingBatches.begin(), proxyInfo.outstandingBatches.upper_bound(req.lastReceivedVersion));
} }
@ -135,11 +172,12 @@ ACTOR Future<Void> resolveBatch(
// Detect conflicts // Detect conflicts
double expire = now() + SERVER_KNOBS->SAMPLE_EXPIRATION_TIME; double expire = now() + SERVER_KNOBS->SAMPLE_EXPIRATION_TIME;
double tstart = timer();
ConflictBatch conflictBatch( self->conflictSet ); ConflictBatch conflictBatch( self->conflictSet );
int keys = 0; int keys = 0;
for(int t=0; t<req.transactions.size(); t++) { for(int t=0; t<req.transactions.size(); t++) {
conflictBatch.addTransaction( req.transactions[t] ); conflictBatch.addTransaction( req.transactions[t] );
self->resolvedReadConflictRanges += req.transactions[t].read_conflict_ranges.size();
self->resolvedWriteConflictRanges += req.transactions[t].write_conflict_ranges.size();
keys += req.transactions[t].write_conflict_ranges.size()*2 + req.transactions[t].read_conflict_ranges.size()*2; keys += req.transactions[t].write_conflict_ranges.size()*2 + req.transactions[t].read_conflict_ranges.size()*2;
if(self->resolverCount > 1) { if(self->resolverCount > 1) {
@ -150,10 +188,6 @@ ACTOR Future<Void> resolveBatch(
} }
} }
conflictBatch.detectConflicts( req.version, req.version - SERVER_KNOBS->MAX_WRITE_TRANSACTION_LIFE_VERSIONS, commitList, &tooOldList); conflictBatch.detectConflicts( req.version, req.version - SERVER_KNOBS->MAX_WRITE_TRANSACTION_LIFE_VERSIONS, commitList, &tooOldList);
g_counters.conflictTime += timer() - tstart;
++g_counters.conflictBatches;
g_counters.conflictTransactions += req.transactions.size();
g_counters.conflictKeys += keys;
ResolveTransactionBatchReply &reply = proxyInfo.outstandingBatches[req.version]; ResolveTransactionBatchReply &reply = proxyInfo.outstandingBatches[req.version];
reply.debugID = req.debugID; reply.debugID = req.debugID;
@ -161,18 +195,30 @@ ACTOR Future<Void> resolveBatch(
for(int c=0; c<commitList.size(); c++) for(int c=0; c<commitList.size(); c++)
reply.committed[commitList[c]] = ConflictBatch::TransactionCommitted; reply.committed[commitList[c]] = ConflictBatch::TransactionCommitted;
for (int c = 0; c<tooOldList.size(); c++) for (int c = 0; c<tooOldList.size(); c++) {
ASSERT(reply.committed[tooOldList[c]] == ConflictBatch::TransactionConflict);
reply.committed[tooOldList[c]] = ConflictBatch::TransactionTooOld; reply.committed[tooOldList[c]] = ConflictBatch::TransactionTooOld;
}
self->transactionsAccepted += commitList.size();
self->transactionsTooOld += tooOldList.size();
self->transactionsConflicted += req.transactions.size() - commitList.size() - tooOldList.size();
ASSERT(req.prevVersion >= 0 || req.txnStateTransactions.size() == 0); // The master's request should not have any state transactions ASSERT(req.prevVersion >= 0 || req.txnStateTransactions.size() == 0); // The master's request should not have any state transactions
auto& stateTransactions = self->recentStateTransactions[ req.version ]; auto& stateTransactions = self->recentStateTransactions[ req.version ];
int64_t stateMutations = 0;
int64_t stateBytes = 0; int64_t stateBytes = 0;
for(int t : req.txnStateTransactions) { for(int t : req.txnStateTransactions) {
stateMutations += req.transactions[t].mutations.size();
stateBytes += req.transactions[t].mutations.expectedSize(); stateBytes += req.transactions[t].mutations.expectedSize();
stateTransactions.push_back_deep(stateTransactions.arena(), StateTransactionRef(reply.committed[t] == ConflictBatch::TransactionCommitted, req.transactions[t].mutations)); stateTransactions.push_back_deep(stateTransactions.arena(), StateTransactionRef(reply.committed[t] == ConflictBatch::TransactionCommitted, req.transactions[t].mutations));
} }
self->resolvedStateTransactions += req.txnStateTransactions.size();
self->resolvedStateMutations += stateMutations;
self->resolvedStateBytes += stateBytes;
if(stateBytes > 0) if(stateBytes > 0)
self->recentStateTransactionSizes.push_back(std::make_pair(req.version, stateBytes)); self->recentStateTransactionSizes.push_back(std::make_pair(req.version, stateBytes));
@ -255,6 +301,8 @@ ACTOR Future<Void> resolveBatch(
req.reply.send(Never()); req.reply.send(Never());
} }
++self->resolveBatchOut;
return Void(); return Void();
} }
@ -273,9 +321,11 @@ ACTOR Future<Void> resolverCore(
actors.add( resolveBatch(self, batch) ); actors.add( resolveBatch(self, batch) );
} }
when ( ResolutionMetricsRequest req = waitNext( resolver.metrics.getFuture() ) ) { when ( ResolutionMetricsRequest req = waitNext( resolver.metrics.getFuture() ) ) {
++self->metricsRequests;
req.reply.send(self->iopsSample.getEstimate(allKeys)); req.reply.send(self->iopsSample.getEstimate(allKeys));
} }
when ( ResolutionSplitRequest req = waitNext( resolver.split.getFuture() ) ) { when ( ResolutionSplitRequest req = waitNext( resolver.split.getFuture() ) ) {
++self->splitRequests;
ResolutionSplitReply rep; ResolutionSplitReply rep;
rep.key = self->iopsSample.splitEstimate(req.range, req.offset, req.front); rep.key = self->iopsSample.splitEstimate(req.range, req.offset, req.front);
rep.used = self->iopsSample.getEstimate(req.front ? KeyRangeRef(req.range.begin, rep.key) : KeyRangeRef(rep.key, req.range.end)); rep.used = self->iopsSample.getEstimate(req.front ? KeyRangeRef(req.range.begin, rep.key) : KeyRangeRef(rep.key, req.range.end));

View File

@ -836,10 +836,14 @@ void ConflictBatch::detectConflicts(Version now, Version newOldestVersion, std::
t = timer(); t = timer();
mergeWriteConflictRanges(now); mergeWriteConflictRanges(now);
g_merge += timer() - t; g_merge += timer() - t;
for (int i = 0; i < transactionCount; i++) { for (int i = 0; i < transactionCount; i++) {
if (!transactionConflictStatus[i]) nonConflicting.push_back(i); if (tooOldTransactions && transactionInfo[i]->tooOld) {
if (tooOldTransactions && transactionInfo[i]->tooOld) tooOldTransactions->push_back(i); tooOldTransactions->push_back(i);
}
else if (!transactionConflictStatus[i]) {
nonConflicting.push_back( i );
}
} }
delete[] transactionConflictStatus; delete[] transactionConflictStatus;

View File

@ -1433,29 +1433,30 @@ ACTOR static Future<JsonBuilderObject> dataStatusFetcher(WorkerDetails ddWorker,
stateSectionObj["description"] = "No replicas remain of some data"; stateSectionObj["description"] = "No replicas remain of some data";
stateSectionObj["min_replicas_remaining"] = 0; stateSectionObj["min_replicas_remaining"] = 0;
replicas = 0; replicas = 0;
} } else if (highestPriority >= SERVER_KNOBS->PRIORITY_TEAM_1_LEFT) {
else if (highestPriority >= SERVER_KNOBS->PRIORITY_TEAM_1_LEFT) {
stateSectionObj["healthy"] = false; stateSectionObj["healthy"] = false;
stateSectionObj["name"] = "healing"; stateSectionObj["name"] = "healing";
stateSectionObj["description"] = "Only one replica remains of some data"; stateSectionObj["description"] = "Only one replica remains of some data";
stateSectionObj["min_replicas_remaining"] = 1; stateSectionObj["min_replicas_remaining"] = 1;
replicas = 1; replicas = 1;
} } else if (highestPriority >= SERVER_KNOBS->PRIORITY_TEAM_2_LEFT) {
else if (highestPriority >= SERVER_KNOBS->PRIORITY_TEAM_2_LEFT) {
stateSectionObj["healthy"] = false; stateSectionObj["healthy"] = false;
stateSectionObj["name"] = "healing"; stateSectionObj["name"] = "healing";
stateSectionObj["description"] = "Only two replicas remain of some data"; stateSectionObj["description"] = "Only two replicas remain of some data";
stateSectionObj["min_replicas_remaining"] = 2; stateSectionObj["min_replicas_remaining"] = 2;
replicas = 2; replicas = 2;
} } else if (highestPriority >= SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY) {
else if (highestPriority >= SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY) {
stateSectionObj["healthy"] = false; stateSectionObj["healthy"] = false;
stateSectionObj["name"] = "healing"; stateSectionObj["name"] = "healing";
stateSectionObj["description"] = "Restoring replication factor"; stateSectionObj["description"] = "Restoring replication factor";
} else if (highestPriority >= SERVER_KNOBS->PRIORITY_POPULATE_REGION) {
stateSectionObj["healthy"] = true;
stateSectionObj["name"] = "healthy_populating_region";
stateSectionObj["description"] = "Populating remote region";
} else if (highestPriority >= SERVER_KNOBS->PRIORITY_MERGE_SHARD) { } else if (highestPriority >= SERVER_KNOBS->PRIORITY_MERGE_SHARD) {
stateSectionObj["healthy"] = true; stateSectionObj["healthy"] = true;
stateSectionObj["name"] = "healthy_repartitioning"; stateSectionObj["name"] = "healthy_repartitioning";
stateSectionObj["description"] = "Repartitioning."; stateSectionObj["description"] = "Repartitioning";
} else if (highestPriority >= SERVER_KNOBS->PRIORITY_TEAM_REDUNDANT) { } else if (highestPriority >= SERVER_KNOBS->PRIORITY_TEAM_REDUNDANT) {
stateSectionObj["healthy"] = true; stateSectionObj["healthy"] = true;
stateSectionObj["name"] = "optimizing_team_collections"; stateSectionObj["name"] = "optimizing_team_collections";

View File

@ -54,7 +54,7 @@
#include "fdbrpc/Net2FileSystem.h" #include "fdbrpc/Net2FileSystem.h"
#include "fdbrpc/AsyncFileCached.actor.h" #include "fdbrpc/AsyncFileCached.actor.h"
#include "fdbserver/CoroFlow.h" #include "fdbserver/CoroFlow.h"
#include "flow/TLSPolicy.h" #include "flow/TLSConfig.actor.h"
#if defined(CMAKE_BUILD) || !defined(WIN32) #if defined(CMAKE_BUILD) || !defined(WIN32)
#include "versions.h" #include "versions.h"
#endif #endif
@ -175,8 +175,6 @@ CSimpleOpt::SOption g_rgOptions[] = {
// clang-format on // clang-format on
GlobalCounters g_counters;
extern void dsltest(); extern void dsltest();
extern void pingtest(); extern void pingtest();
extern void copyTest(); extern void copyTest();
@ -941,9 +939,7 @@ struct CLIOptions {
int minTesterCount = 1; int minTesterCount = 1;
bool testOnServers = false; bool testOnServers = false;
Reference<TLSPolicy> tlsPolicy = Reference<TLSPolicy>(new TLSPolicy(TLSPolicy::Is::SERVER)); TLSConfig tlsConfig = TLSConfig(TLSEndpointType::SERVER);
TLSParams tlsParams;
std::vector<std::string> tlsVerifyPeers;
double fileIoTimeout = 0.0; double fileIoTimeout = 0.0;
bool fileIoWarnOnly = false; bool fileIoWarnOnly = false;
uint64_t rsssize = -1; uint64_t rsssize = -1;
@ -1370,23 +1366,23 @@ private:
break; break;
#ifndef TLS_DISABLED #ifndef TLS_DISABLED
case TLSParams::OPT_TLS_PLUGIN: case TLSConfig::OPT_TLS_PLUGIN:
args.OptionArg(); args.OptionArg();
break; break;
case TLSParams::OPT_TLS_CERTIFICATES: case TLSConfig::OPT_TLS_CERTIFICATES:
tlsParams.tlsCertPath = args.OptionArg(); tlsConfig.setCertificatePath(args.OptionArg());
break; break;
case TLSParams::OPT_TLS_PASSWORD: case TLSConfig::OPT_TLS_PASSWORD:
tlsParams.tlsPassword = args.OptionArg(); tlsConfig.setPassword(args.OptionArg());
break; break;
case TLSParams::OPT_TLS_CA_FILE: case TLSConfig::OPT_TLS_CA_FILE:
tlsParams.tlsCAPath = args.OptionArg(); tlsConfig.setCAPath(args.OptionArg());
break; break;
case TLSParams::OPT_TLS_KEY: case TLSConfig::OPT_TLS_KEY:
tlsParams.tlsKeyPath = args.OptionArg(); tlsConfig.setKeyPath(args.OptionArg());
break; break;
case TLSParams::OPT_TLS_VERIFY_PEERS: case TLSConfig::OPT_TLS_VERIFY_PEERS:
tlsVerifyPeers.push_back(args.OptionArg()); tlsConfig.addVerifyPeers(args.OptionArg());
break; break;
#endif #endif
} }
@ -1581,9 +1577,11 @@ int main(int argc, char* argv[]) {
} }
} catch (Error& e) { } catch (Error& e) {
if (e.code() == error_code_invalid_option_value) { if (e.code() == error_code_invalid_option_value) {
fprintf(stderr, "WARNING: Invalid value '%s' for option '%s'\n", k->second.c_str(), k->first.c_str()); fprintf(stderr, "WARNING: Invalid value '%s' for knob option '%s'\n", k->second.c_str(), k->first.c_str());
TraceEvent(SevWarnAlways, "InvalidKnobValue").detail("Knob", printable(k->first)).detail("Value", printable(k->second)); TraceEvent(SevWarnAlways, "InvalidKnobValue").detail("Knob", printable(k->first)).detail("Value", printable(k->second));
} else { } else {
fprintf(stderr, "ERROR: Failed to set knob option '%s': %s\n", k->first.c_str(), e.what());
TraceEvent(SevError, "FailedToSetKnob").detail("Knob", printable(k->first)).detail("Value", printable(k->second)).error(e);
throw; throw;
} }
} }
@ -1625,18 +1623,7 @@ int main(int argc, char* argv[]) {
startNewSimulator(); startNewSimulator();
openTraceFile(NetworkAddress(), opts.rollsize, opts.maxLogsSize, opts.logFolder, "trace", opts.logGroup); openTraceFile(NetworkAddress(), opts.rollsize, opts.maxLogsSize, opts.logFolder, "trace", opts.logGroup);
} else { } else {
#ifndef TLS_DISABLED g_network = newNet2(opts.tlsConfig, opts.useThreadPool, true);
if ( opts.tlsVerifyPeers.size() ) {
try {
opts.tlsPolicy->set_verify_peers( opts.tlsVerifyPeers );
} catch( Error &e ) {
fprintf(stderr, "ERROR: The format of the --tls_verify_peers option is incorrect.\n");
printHelpTeaser(argv[0]);
flushAndExit(FDB_EXIT_ERROR);
}
}
#endif
g_network = newNet2(opts.useThreadPool, true, opts.tlsPolicy, opts.tlsParams);
FlowTransport::createInstance(false, 1); FlowTransport::createInstance(false, 1);
const bool expectsPublicAddress = (role == FDBD || role == NetworkTestServer || role == Restore); const bool expectsPublicAddress = (role == FDBD || role == NetworkTestServer || role == Restore);

View File

@ -673,7 +673,7 @@ void startRole(const Role &role, UID roleId, UID workerId, const std::map<std::s
for(auto it = details.begin(); it != details.end(); it++) for(auto it = details.begin(); it != details.end(); it++)
ev.detail(it->first.c_str(), it->second); ev.detail(it->first.c_str(), it->second);
ev.trackLatest( (roleId.shortString() + ".Role" ).c_str() ); ev.trackLatest( roleId.shortString() + ".Role" );
// Update roles map, log Roles metrics // Update roles map, log Roles metrics
g_roles.insert({role.roleName, roleId.shortString()}); g_roles.insert({role.roleName, roleId.shortString()});
@ -691,7 +691,7 @@ void endRole(const Role &role, UID id, std::string reason, bool ok, Error e) {
.detail("As", role.roleName) .detail("As", role.roleName)
.detail("Reason", reason); .detail("Reason", reason);
ev.trackLatest( (id.shortString() + ".Role").c_str() ); ev.trackLatest( id.shortString() + ".Role" );
} }
if(!ok) { if(!ok) {

274
flow/Arena.cpp Normal file
View File

@ -0,0 +1,274 @@
/*
* Arena.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "Arena.h"
void ArenaBlock::delref() {
if (delref_no_destroy()) destroy();
}
bool ArenaBlock::isTiny() const {
return tinySize != NOT_TINY;
}
int ArenaBlock::size() const {
if (isTiny())
return tinySize;
else
return bigSize;
}
int ArenaBlock::used() const {
if (isTiny())
return tinyUsed;
else
return bigUsed;
}
int ArenaBlock::unused() const {
if (isTiny())
return tinySize - tinyUsed;
else
return bigSize - bigUsed;
}
const void* ArenaBlock::getData() const {
return this;
}
const void* ArenaBlock::getNextData() const {
return (const uint8_t*)getData() + used();
}
size_t ArenaBlock::totalSize() {
if (isTiny()) return size();
size_t s = size();
int o = nextBlockOffset;
while (o) {
ArenaBlockRef* r = (ArenaBlockRef*)((char*)getData() + o);
s += r->next->totalSize();
o = r->nextBlockOffset;
}
return s;
}
// just for debugging:
void ArenaBlock::getUniqueBlocks(std::set<ArenaBlock*>& a) {
a.insert(this);
if (isTiny()) return;
int o = nextBlockOffset;
while (o) {
ArenaBlockRef* r = (ArenaBlockRef*)((char*)getData() + o);
r->next->getUniqueBlocks(a);
o = r->nextBlockOffset;
}
return;
}
int ArenaBlock::addUsed(int bytes) {
if (isTiny()) {
int t = tinyUsed;
tinyUsed += bytes;
return t;
} else {
int t = bigUsed;
bigUsed += bytes;
return t;
}
}
void ArenaBlock::makeReference(ArenaBlock* next) {
ArenaBlockRef* r = (ArenaBlockRef*)((char*)getData() + bigUsed);
r->next = next;
r->nextBlockOffset = nextBlockOffset;
nextBlockOffset = bigUsed;
bigUsed += sizeof(ArenaBlockRef);
}
void ArenaBlock::dependOn(Reference<ArenaBlock>& self, ArenaBlock* other) {
other->addref();
if (!self || self->isTiny() || self->unused() < sizeof(ArenaBlockRef))
create(SMALL, self)->makeReference(other);
else
self->makeReference(other);
}
void* ArenaBlock::allocate(Reference<ArenaBlock>& self, int bytes) {
ArenaBlock* b = self.getPtr();
if (!self || self->unused() < bytes) b = create(bytes, self);
return (char*)b->getData() + b->addUsed(bytes);
}
// Return an appropriately-sized ArenaBlock to store the given data
ArenaBlock* ArenaBlock::create(int dataSize, Reference<ArenaBlock>& next) {
ArenaBlock* b;
if (dataSize <= SMALL - TINY_HEADER && !next) {
if (dataSize <= 16 - TINY_HEADER) {
b = (ArenaBlock*)FastAllocator<16>::allocate();
b->tinySize = 16;
INSTRUMENT_ALLOCATE("Arena16");
} else if (dataSize <= 32 - TINY_HEADER) {
b = (ArenaBlock*)FastAllocator<32>::allocate();
b->tinySize = 32;
INSTRUMENT_ALLOCATE("Arena32");
} else {
b = (ArenaBlock*)FastAllocator<64>::allocate();
b->tinySize = 64;
INSTRUMENT_ALLOCATE("Arena64");
}
b->tinyUsed = TINY_HEADER;
} else {
int reqSize = dataSize + sizeof(ArenaBlock);
if (next) reqSize += sizeof(ArenaBlockRef);
if (reqSize < LARGE) {
// Each block should be larger than the previous block, up to a limit, to minimize allocations
// Worst-case allocation pattern: 1 +10 +17 +42 +67 +170 +323 +681 +1348 +2728 +2210 +2211 (+1K +3K+1 +4K)*
// Overhead: 4X for small arenas, 3X intermediate, 1.33X for large arenas
int prevSize = next ? next->size() : 0;
reqSize = std::max(reqSize, std::min(prevSize * 2, std::max(LARGE - 1, reqSize * 4)));
}
if (reqSize < LARGE) {
if (reqSize <= 128) {
b = (ArenaBlock*)FastAllocator<128>::allocate();
b->bigSize = 128;
INSTRUMENT_ALLOCATE("Arena128");
} else if (reqSize <= 256) {
b = (ArenaBlock*)FastAllocator<256>::allocate();
b->bigSize = 256;
INSTRUMENT_ALLOCATE("Arena256");
} else if (reqSize <= 512) {
b = (ArenaBlock*)FastAllocator<512>::allocate();
b->bigSize = 512;
INSTRUMENT_ALLOCATE("Arena512");
} else if (reqSize <= 1024) {
b = (ArenaBlock*)FastAllocator<1024>::allocate();
b->bigSize = 1024;
INSTRUMENT_ALLOCATE("Arena1024");
} else if (reqSize <= 2048) {
b = (ArenaBlock*)FastAllocator<2048>::allocate();
b->bigSize = 2048;
INSTRUMENT_ALLOCATE("Arena2048");
} else if (reqSize <= 4096) {
b = (ArenaBlock*)FastAllocator<4096>::allocate();
b->bigSize = 4096;
INSTRUMENT_ALLOCATE("Arena4096");
} else {
b = (ArenaBlock*)FastAllocator<8192>::allocate();
b->bigSize = 8192;
INSTRUMENT_ALLOCATE("Arena8192");
}
b->tinySize = b->tinyUsed = NOT_TINY;
b->bigUsed = sizeof(ArenaBlock);
} else {
#ifdef ALLOC_INSTRUMENTATION
allocInstr["ArenaHugeKB"].alloc((reqSize + 1023) >> 10);
#endif
b = (ArenaBlock*)new uint8_t[reqSize];
b->tinySize = b->tinyUsed = NOT_TINY;
b->bigSize = reqSize;
b->bigUsed = sizeof(ArenaBlock);
if (FLOW_KNOBS && g_trace_depth == 0 &&
nondeterministicRandom()->random01() < (reqSize / FLOW_KNOBS->HUGE_ARENA_LOGGING_BYTES)) {
hugeArenaSample(reqSize);
}
g_hugeArenaMemory.fetch_add(reqSize);
// If the new block has less free space than the old block, make the old block depend on it
if (next && !next->isTiny() && next->unused() >= reqSize - dataSize) {
b->nextBlockOffset = 0;
b->setrefCountUnsafe(1);
next->makeReference(b);
return b;
}
}
b->nextBlockOffset = 0;
if (next) b->makeReference(next.getPtr());
}
b->setrefCountUnsafe(1);
next.setPtrUnsafe(b);
return b;
}
void ArenaBlock::destroy() {
// If the stack never contains more than one item, nothing will be allocated from stackArena.
// If stackArena is used, it will always be a linked list, so destroying *it* will not create another arena
ArenaBlock* tinyStack = this;
Arena stackArena;
VectorRef<ArenaBlock*> stack(&tinyStack, 1);
while (stack.size()) {
ArenaBlock* b = stack.end()[-1];
stack.pop_back();
if (!b->isTiny()) {
int o = b->nextBlockOffset;
while (o) {
ArenaBlockRef* br = (ArenaBlockRef*)((char*)b->getData() + o);
if (br->next->delref_no_destroy()) stack.push_back(stackArena, br->next);
o = br->nextBlockOffset;
}
}
b->destroyLeaf();
}
}
void ArenaBlock::destroyLeaf() {
if (isTiny()) {
if (tinySize <= 16) {
FastAllocator<16>::release(this);
INSTRUMENT_RELEASE("Arena16");
} else if (tinySize <= 32) {
FastAllocator<32>::release(this);
INSTRUMENT_RELEASE("Arena32");
} else {
FastAllocator<64>::release(this);
INSTRUMENT_RELEASE("Arena64");
}
} else {
if (bigSize <= 128) {
FastAllocator<128>::release(this);
INSTRUMENT_RELEASE("Arena128");
} else if (bigSize <= 256) {
FastAllocator<256>::release(this);
INSTRUMENT_RELEASE("Arena256");
} else if (bigSize <= 512) {
FastAllocator<512>::release(this);
INSTRUMENT_RELEASE("Arena512");
} else if (bigSize <= 1024) {
FastAllocator<1024>::release(this);
INSTRUMENT_RELEASE("Arena1024");
} else if (bigSize <= 2048) {
FastAllocator<2048>::release(this);
INSTRUMENT_RELEASE("Arena2048");
} else if (bigSize <= 4096) {
FastAllocator<4096>::release(this);
INSTRUMENT_RELEASE("Arena4096");
} else if (bigSize <= 8192) {
FastAllocator<8192>::release(this);
INSTRUMENT_RELEASE("Arena8192");
} else {
#ifdef ALLOC_INSTRUMENTATION
allocInstr["ArenaHugeKB"].dealloc((bigSize + 1023) >> 10);
#endif
g_hugeArenaMemory.fetch_sub(bigSize);
delete[](uint8_t*) this;
}
}
}

View File

@ -143,166 +143,27 @@ struct ArenaBlock : NonCopyable, ThreadSafeReferenceCounted<ArenaBlock>
uint32_t bigSize, bigUsed; // include block header uint32_t bigSize, bigUsed; // include block header
uint32_t nextBlockOffset; uint32_t nextBlockOffset;
void delref() { void delref();
if (delref_no_destroy()) bool isTiny() const;
destroy(); int size() const;
} int used() const;
int unused() const;
bool isTiny() const { return tinySize != NOT_TINY; } const void* getData() const;
int size() const { if (isTiny()) return tinySize; else return bigSize; } const void* getNextData() const;
int used() const { if (isTiny()) return tinyUsed; else return bigUsed; } size_t totalSize();
inline int unused() const { if (isTiny()) return tinySize-tinyUsed; else return bigSize-bigUsed; }
const void* getData() const { return this; }
const void* getNextData() const { return (const uint8_t*)getData() + used(); }
size_t totalSize() {
if (isTiny()) return size();
size_t s = size();
int o = nextBlockOffset;
while (o) {
ArenaBlockRef* r = (ArenaBlockRef*)((char*)getData() + o);
s += r->next->totalSize();
o = r->nextBlockOffset;
}
return s;
}
// just for debugging: // just for debugging:
void getUniqueBlocks(std::set<ArenaBlock*>& a) { void getUniqueBlocks(std::set<ArenaBlock*>& a);
a.insert(this); int addUsed(int bytes);
if (isTiny()) return; void makeReference(ArenaBlock* next);
static void dependOn(Reference<ArenaBlock>& self, ArenaBlock* other);
int o = nextBlockOffset; static void* allocate(Reference<ArenaBlock>& self, int bytes);
while (o) {
ArenaBlockRef* r = (ArenaBlockRef*)((char*)getData() + o);
r->next->getUniqueBlocks(a);
o = r->nextBlockOffset;
}
return;
}
inline int addUsed( int bytes ) {
if (isTiny()) {
int t = tinyUsed;
tinyUsed += bytes;
return t;
} else {
int t = bigUsed;
bigUsed += bytes;
return t;
}
}
void makeReference( ArenaBlock* next ) {
ArenaBlockRef* r = (ArenaBlockRef*)((char*)getData() + bigUsed);
r->next = next;
r->nextBlockOffset = nextBlockOffset;
nextBlockOffset = bigUsed;
bigUsed += sizeof(ArenaBlockRef);
}
static void dependOn( Reference<ArenaBlock>& self, ArenaBlock* other ) {
other->addref();
if (!self || self->isTiny() || self->unused() < sizeof(ArenaBlockRef))
create( SMALL, self )->makeReference(other);
else
self->makeReference( other );
}
static inline void* allocate( Reference<ArenaBlock>& self, int bytes ) {
ArenaBlock* b = self.getPtr();
if (!self || self->unused() < bytes)
b = create( bytes, self );
return (char*)b->getData() + b->addUsed(bytes);
}
// Return an appropriately-sized ArenaBlock to store the given data // Return an appropriately-sized ArenaBlock to store the given data
static ArenaBlock* create( int dataSize, Reference<ArenaBlock>& next ) { static ArenaBlock* create(int dataSize, Reference<ArenaBlock>& next);
ArenaBlock* b; void destroy();
if (dataSize <= SMALL-TINY_HEADER && !next) { void destroyLeaf();
if (dataSize <= 16-TINY_HEADER) { b = (ArenaBlock*)FastAllocator<16>::allocate(); b->tinySize = 16; INSTRUMENT_ALLOCATE("Arena16"); }
else if (dataSize <= 32-TINY_HEADER) { b = (ArenaBlock*)FastAllocator<32>::allocate(); b->tinySize = 32; INSTRUMENT_ALLOCATE("Arena32"); }
else { b = (ArenaBlock*)FastAllocator<64>::allocate(); b->tinySize=64; INSTRUMENT_ALLOCATE("Arena64"); }
b->tinyUsed = TINY_HEADER;
} else {
int reqSize = dataSize + sizeof(ArenaBlock);
if (next) reqSize += sizeof(ArenaBlockRef);
if (reqSize < LARGE) {
// Each block should be larger than the previous block, up to a limit, to minimize allocations
// Worst-case allocation pattern: 1 +10 +17 +42 +67 +170 +323 +681 +1348 +2728 +2210 +2211 (+1K +3K+1 +4K)*
// Overhead: 4X for small arenas, 3X intermediate, 1.33X for large arenas
int prevSize = next ? next->size() : 0;
reqSize = std::max( reqSize, std::min( prevSize*2, std::max( LARGE-1, reqSize*4 ) ) );
}
if (reqSize < LARGE) {
if (reqSize <= 128) { b = (ArenaBlock*)FastAllocator<128>::allocate(); b->bigSize = 128; INSTRUMENT_ALLOCATE("Arena128"); }
else if (reqSize <= 256) { b = (ArenaBlock*)FastAllocator<256>::allocate(); b->bigSize = 256; INSTRUMENT_ALLOCATE("Arena256"); }
else if (reqSize <= 512) { b = (ArenaBlock*)FastAllocator<512>::allocate(); b->bigSize = 512; INSTRUMENT_ALLOCATE("Arena512"); }
else if (reqSize <= 1024) { b = (ArenaBlock*)FastAllocator<1024>::allocate(); b->bigSize = 1024; INSTRUMENT_ALLOCATE("Arena1024"); }
else if (reqSize <= 2048) { b = (ArenaBlock*)FastAllocator<2048>::allocate(); b->bigSize = 2048; INSTRUMENT_ALLOCATE("Arena2048"); }
else if (reqSize <= 4096) { b = (ArenaBlock*)FastAllocator<4096>::allocate(); b->bigSize = 4096; INSTRUMENT_ALLOCATE("Arena4096"); }
else { b = (ArenaBlock*)FastAllocator<8192>::allocate(); b->bigSize = 8192; INSTRUMENT_ALLOCATE("Arena8192"); }
b->tinySize = b->tinyUsed = NOT_TINY;
b->bigUsed = sizeof(ArenaBlock);
} else {
#ifdef ALLOC_INSTRUMENTATION
allocInstr[ "ArenaHugeKB" ].alloc( (reqSize+1023)>>10 );
#endif
b = (ArenaBlock*)new uint8_t[ reqSize ];
b->tinySize = b->tinyUsed = NOT_TINY;
b->bigSize = reqSize;
b->bigUsed = sizeof(ArenaBlock);
if(FLOW_KNOBS && g_trace_depth == 0 && nondeterministicRandom()->random01() < (reqSize / FLOW_KNOBS->HUGE_ARENA_LOGGING_BYTES)) {
hugeArenaSample(reqSize);
}
g_hugeArenaMemory.fetch_add(reqSize);
// If the new block has less free space than the old block, make the old block depend on it
if (next && !next->isTiny() && next->unused() >= reqSize-dataSize) {
b->nextBlockOffset = 0;
b->setrefCountUnsafe(1);
next->makeReference(b);
return b;
}
}
b->nextBlockOffset = 0;
if (next) b->makeReference(next.getPtr());
}
b->setrefCountUnsafe(1);
next.setPtrUnsafe(b);
return b;
}
inline void destroy();
void destroyLeaf() {
if (isTiny()) {
if (tinySize <= 16) { FastAllocator<16>::release(this); INSTRUMENT_RELEASE("Arena16");}
else if (tinySize <= 32) { FastAllocator<32>::release(this); INSTRUMENT_RELEASE("Arena32"); }
else { FastAllocator<64>::release(this); INSTRUMENT_RELEASE("Arena64"); }
} else {
if (bigSize <= 128) { FastAllocator<128>::release(this); INSTRUMENT_RELEASE("Arena128"); }
else if (bigSize <= 256) { FastAllocator<256>::release(this); INSTRUMENT_RELEASE("Arena256"); }
else if (bigSize <= 512) { FastAllocator<512>::release(this); INSTRUMENT_RELEASE("Arena512"); }
else if (bigSize <= 1024) { FastAllocator<1024>::release(this); INSTRUMENT_RELEASE("Arena1024"); }
else if (bigSize <= 2048) { FastAllocator<2048>::release(this); INSTRUMENT_RELEASE("Arena2048"); }
else if (bigSize <= 4096) { FastAllocator<4096>::release(this); INSTRUMENT_RELEASE("Arena4096"); }
else if (bigSize <= 8192) { FastAllocator<8192>::release(this); INSTRUMENT_RELEASE("Arena8192"); }
else {
#ifdef ALLOC_INSTRUMENTATION
allocInstr[ "ArenaHugeKB" ].dealloc( (bigSize+1023)>>10 );
#endif
g_hugeArenaMemory.fetch_sub(bigSize);
delete[] (uint8_t*)this;
}
}
}
private: private:
static void* operator new(size_t s); // not implemented static void* operator new(size_t s); // not implemented
}; };
inline Arena::Arena() : impl( NULL ) {} inline Arena::Arena() : impl( NULL ) {}
@ -1208,28 +1069,5 @@ struct dynamic_size_traits<VectorRef<V, VecSerStrategy::String>> : std::true_typ
} }
}; };
void ArenaBlock::destroy() {
// If the stack never contains more than one item, nothing will be allocated from stackArena.
// If stackArena is used, it will always be a linked list, so destroying *it* will not create another arena
ArenaBlock* tinyStack = this;
Arena stackArena;
VectorRef<ArenaBlock*> stack( &tinyStack, 1 );
while (stack.size()) {
ArenaBlock* b = stack.end()[-1];
stack.pop_back();
if (!b->isTiny()) {
int o = b->nextBlockOffset;
while (o) {
ArenaBlockRef* br = (ArenaBlockRef*)((char*)b->getData() + o);
if (br->next->delref_no_destroy())
stack.push_back( stackArena, br->next );
o = br->nextBlockOffset;
}
}
b->destroyLeaf();
}
}
#endif #endif

View File

@ -3,6 +3,7 @@ find_package(Threads REQUIRED)
set(FLOW_SRCS set(FLOW_SRCS
ActorCollection.actor.cpp ActorCollection.actor.cpp
ActorCollection.h ActorCollection.h
Arena.cpp
Arena.h Arena.h
AsioReactor.h AsioReactor.h
CompressedInt.actor.cpp CompressedInt.actor.cpp
@ -51,6 +52,8 @@ set(FLOW_SRCS
SystemMonitor.h SystemMonitor.h
TDMetric.actor.h TDMetric.actor.h
TDMetric.cpp TDMetric.cpp
TLSConfig.actor.cpp
TLSConfig.actor.h
ThreadHelper.actor.h ThreadHelper.actor.h
ThreadHelper.cpp ThreadHelper.cpp
ThreadPrimitives.cpp ThreadPrimitives.cpp
@ -58,27 +61,25 @@ set(FLOW_SRCS
ThreadSafeQueue.h ThreadSafeQueue.h
Trace.cpp Trace.cpp
Trace.h Trace.h
TLSPolicy.h
TLSPolicy.cpp
UnitTest.cpp UnitTest.cpp
UnitTest.h UnitTest.h
XmlTraceLogFormatter.h
XmlTraceLogFormatter.cpp XmlTraceLogFormatter.cpp
XmlTraceLogFormatter.h
actorcompiler.h actorcompiler.h
crc32c.h crc32c.h
crc32c.cpp crc32c.cpp
error_definitions.h error_definitions.h
${CMAKE_CURRENT_BINARY_DIR}/SourceVersion.h ${CMAKE_CURRENT_BINARY_DIR}/SourceVersion.h
flat_buffers.h
flat_buffers.cpp flat_buffers.cpp
flat_buffers.h
flow.cpp flow.cpp
flow.h flow.h
genericactors.actor.cpp genericactors.actor.cpp
genericactors.actor.h genericactors.actor.h
network.cpp network.cpp
network.h network.h
serialize.h
serialize.cpp serialize.cpp
serialize.h
stacktrace.amalgamation.cpp stacktrace.amalgamation.cpp
stacktrace.h stacktrace.h
version.cpp) version.cpp)

View File

@ -68,7 +68,7 @@ FlowKnobs::FlowKnobs(bool randomize, bool isSimulated) {
init( MAX_RECONNECTION_TIME, 0.5 ); init( MAX_RECONNECTION_TIME, 0.5 );
init( RECONNECTION_TIME_GROWTH_RATE, 1.2 ); init( RECONNECTION_TIME_GROWTH_RATE, 1.2 );
init( RECONNECTION_RESET_TIME, 5.0 ); init( RECONNECTION_RESET_TIME, 5.0 );
init( CONNECTION_ACCEPT_DELAY, 0.5 ); init( ACCEPT_BATCH_SIZE, 10 );
init( TOO_MANY_CONNECTIONS_CLOSED_RESET_DELAY, 5.0 ); init( TOO_MANY_CONNECTIONS_CLOSED_RESET_DELAY, 5.0 );
init( TOO_MANY_CONNECTIONS_CLOSED_TIMEOUT, 20.0 ); init( TOO_MANY_CONNECTIONS_CLOSED_TIMEOUT, 20.0 );
init( PEER_UNAVAILABLE_FOR_LONG_TIME_TIMEOUT, 3600.0 ); init( PEER_UNAVAILABLE_FOR_LONG_TIME_TIMEOUT, 3600.0 );

View File

@ -87,7 +87,7 @@ public:
double MAX_RECONNECTION_TIME; double MAX_RECONNECTION_TIME;
double RECONNECTION_TIME_GROWTH_RATE; double RECONNECTION_TIME_GROWTH_RATE;
double RECONNECTION_RESET_TIME; double RECONNECTION_RESET_TIME;
double CONNECTION_ACCEPT_DELAY; int ACCEPT_BATCH_SIZE;
int TLS_CERT_REFRESH_DELAY_SECONDS; int TLS_CERT_REFRESH_DELAY_SECONDS;
double TLS_SERVER_CONNECTION_THROTTLE_TIMEOUT; double TLS_SERVER_CONNECTION_THROTTLE_TIMEOUT;

View File

@ -37,7 +37,11 @@
#include "flow/AsioReactor.h" #include "flow/AsioReactor.h"
#include "flow/Profiler.h" #include "flow/Profiler.h"
#include "flow/ProtocolVersion.h" #include "flow/ProtocolVersion.h"
#include "flow/TLSPolicy.h" #include "flow/TLSConfig.actor.h"
#include "flow/genericactors.actor.h"
// See the comment in TLSConfig.actor.h for the explanation of why this module breaking include was done.
#include "fdbrpc/IAsyncFile.h"
#ifdef WIN32 #ifdef WIN32
#include <mmsystem.h> #include <mmsystem.h>
@ -111,7 +115,7 @@ thread_local INetwork* thread_network = 0;
class Net2 sealed : public INetwork, public INetworkConnections { class Net2 sealed : public INetwork, public INetworkConnections {
public: public:
Net2(bool useThreadPool, bool useMetrics, Reference<TLSPolicy> tlsPolicy, const TLSParams& tlsParams); Net2(const TLSConfig& tlsConfig, bool useThreadPool, bool useMetrics);
void initTLS(); void initTLS();
void run(); void run();
void initMetrics(); void initMetrics();
@ -157,16 +161,12 @@ public:
ASIOReactor reactor; ASIOReactor reactor;
#ifndef TLS_DISABLED #ifndef TLS_DISABLED
boost::asio::ssl::context sslContext; AsyncVar<Reference<ReferencedObject<boost::asio::ssl::context>>> sslContextVar;
#endif #endif
Reference<TLSPolicy> tlsPolicy; TLSConfig tlsConfig;
TLSParams tlsParams; Future<Void> backgroundCertRefresh;
bool tlsInitialized; bool tlsInitialized;
std::string get_password() const {
return tlsParams.tlsPassword;
}
INetworkConnections *network; // initially this, but can be changed INetworkConnections *network; // initially this, but can be changed
int64_t tsc_begin, tsc_end; int64_t tsc_begin, tsc_end;
@ -460,12 +460,13 @@ private:
}; };
class Listener : public IListener, ReferenceCounted<Listener> { class Listener : public IListener, ReferenceCounted<Listener> {
boost::asio::io_context& io_service;
NetworkAddress listenAddress; NetworkAddress listenAddress;
tcp::acceptor acceptor; tcp::acceptor acceptor;
public: public:
Listener( boost::asio::io_service& io_service, NetworkAddress listenAddress ) Listener( boost::asio::io_context& io_service, NetworkAddress listenAddress )
: listenAddress(listenAddress), acceptor( io_service, tcpEndpoint( listenAddress ) ) : io_service(io_service), listenAddress(listenAddress), acceptor( io_service, tcpEndpoint( listenAddress ) )
{ {
platform::setCloseOnExec(acceptor.native_handle()); platform::setCloseOnExec(acceptor.native_handle());
} }
@ -482,7 +483,7 @@ public:
private: private:
ACTOR static Future<Reference<IConnection>> doAccept( Listener* self ) { ACTOR static Future<Reference<IConnection>> doAccept( Listener* self ) {
state Reference<Connection> conn( new Connection( self->acceptor.get_io_service() ) ); state Reference<Connection> conn( new Connection( self->io_service ) );
state tcp::acceptor::endpoint_type peer_endpoint; state tcp::acceptor::endpoint_type peer_endpoint;
try { try {
BindPromise p("N2_AcceptError", UID()); BindPromise p("N2_AcceptError", UID());
@ -513,13 +514,13 @@ public:
closeSocket(); closeSocket();
} }
explicit SSLConnection( boost::asio::io_service& io_service, boost::asio::ssl::context& context ) explicit SSLConnection( boost::asio::io_service& io_service, Reference<ReferencedObject<boost::asio::ssl::context>> context )
: id(nondeterministicRandom()->randomUniqueID()), socket(io_service), ssl_sock(socket, context) : id(nondeterministicRandom()->randomUniqueID()), socket(io_service), ssl_sock(socket, context->mutate()), sslContext(context)
{ {
} }
// This is not part of the IConnection interface, because it is wrapped by INetwork::connect() // This is not part of the IConnection interface, because it is wrapped by INetwork::connect()
ACTOR static Future<Reference<IConnection>> connect( boost::asio::io_service* ios, boost::asio::ssl::context* context, NetworkAddress addr ) { ACTOR static Future<Reference<IConnection>> connect( boost::asio::io_service* ios, Reference<ReferencedObject<boost::asio::ssl::context>> context, NetworkAddress addr ) {
std::pair<IPAddress,uint16_t> peerIP = std::make_pair(addr.ip, addr.port); std::pair<IPAddress,uint16_t> peerIP = std::make_pair(addr.ip, addr.port);
auto iter(g_network->networkInfo.serverTLSConnectionThrottler.find(peerIP)); auto iter(g_network->networkInfo.serverTLSConnectionThrottler.find(peerIP));
if(iter != g_network->networkInfo.serverTLSConnectionThrottler.end()) { if(iter != g_network->networkInfo.serverTLSConnectionThrottler.end()) {
@ -534,7 +535,7 @@ public:
} }
} }
state Reference<SSLConnection> self( new SSLConnection(*ios, *context) ); state Reference<SSLConnection> self( new SSLConnection(*ios, context) );
self->peer_address = addr; self->peer_address = addr;
try { try {
@ -737,6 +738,7 @@ private:
tcp::socket socket; tcp::socket socket;
ssl_socket ssl_sock; ssl_socket ssl_sock;
NetworkAddress peer_address; NetworkAddress peer_address;
Reference<ReferencedObject<boost::asio::ssl::context>> sslContext;
struct SendBufferIterator { struct SendBufferIterator {
typedef boost::asio::const_buffer value_type; typedef boost::asio::const_buffer value_type;
@ -794,13 +796,14 @@ private:
}; };
class SSLListener : public IListener, ReferenceCounted<SSLListener> { class SSLListener : public IListener, ReferenceCounted<SSLListener> {
boost::asio::io_context& io_service;
NetworkAddress listenAddress; NetworkAddress listenAddress;
tcp::acceptor acceptor; tcp::acceptor acceptor;
boost::asio::ssl::context* context; AsyncVar<Reference<ReferencedObject<boost::asio::ssl::context>>> *contextVar;
public: public:
SSLListener( boost::asio::io_service& io_service, boost::asio::ssl::context* context, NetworkAddress listenAddress ) SSLListener( boost::asio::io_context& io_service, AsyncVar<Reference<ReferencedObject<boost::asio::ssl::context>>>* contextVar, NetworkAddress listenAddress )
: listenAddress(listenAddress), acceptor( io_service, tcpEndpoint( listenAddress ) ), context(context) : io_service(io_service), listenAddress(listenAddress), acceptor( io_service, tcpEndpoint( listenAddress ) ), contextVar(contextVar)
{ {
platform::setCloseOnExec(acceptor.native_handle()); platform::setCloseOnExec(acceptor.native_handle());
} }
@ -817,7 +820,7 @@ public:
private: private:
ACTOR static Future<Reference<IConnection>> doAccept( SSLListener* self ) { ACTOR static Future<Reference<IConnection>> doAccept( SSLListener* self ) {
state Reference<SSLConnection> conn( new SSLConnection( self->acceptor.get_io_service(), *self->context) ); state Reference<SSLConnection> conn( new SSLConnection( self->io_service, self->contextVar->get() ) );
state tcp::acceptor::endpoint_type peer_endpoint; state tcp::acceptor::endpoint_type peer_endpoint;
try { try {
BindPromise p("N2_AcceptError", UID()); BindPromise p("N2_AcceptError", UID());
@ -856,7 +859,7 @@ bool insecurely_always_accept(bool _1, boost::asio::ssl::verify_context& _2) {
} }
#endif #endif
Net2::Net2(bool useThreadPool, bool useMetrics, Reference<TLSPolicy> tlsPolicy, const TLSParams& tlsParams) Net2::Net2(const TLSConfig& tlsConfig, bool useThreadPool, bool useMetrics)
: useThreadPool(useThreadPool), : useThreadPool(useThreadPool),
network(this), network(this),
reactor(this), reactor(this),
@ -867,10 +870,9 @@ Net2::Net2(bool useThreadPool, bool useMetrics, Reference<TLSPolicy> tlsPolicy,
lastMinTaskID(TaskPriority::Zero), lastMinTaskID(TaskPriority::Zero),
numYields(0), numYields(0),
tlsInitialized(false), tlsInitialized(false),
tlsPolicy(tlsPolicy), tlsConfig(tlsConfig)
tlsParams(tlsParams)
#ifndef TLS_DISABLED #ifndef TLS_DISABLED
,sslContext(boost::asio::ssl::context(boost::asio::ssl::context::tlsv12)) ,sslContextVar({ReferencedObject<boost::asio::ssl::context>::from(boost::asio::ssl::context(boost::asio::ssl::context::tls))})
#endif #endif
{ {
@ -897,96 +899,127 @@ Net2::Net2(bool useThreadPool, bool useMetrics, Reference<TLSPolicy> tlsPolicy,
} }
#ifndef TLS_DISABLED
void ConfigureSSLContext( const LoadedTLSConfig& loaded, boost::asio::ssl::context* context ) {
try {
context->set_options(boost::asio::ssl::context::default_workarounds);
context->set_verify_mode(boost::asio::ssl::context::verify_peer | boost::asio::ssl::verify_fail_if_no_peer_cert);
if (loaded.isTLSEnabled()) {
Reference<TLSPolicy> tlsPolicy = Reference<TLSPolicy>(new TLSPolicy(loaded.getEndpointType()));
tlsPolicy->set_verify_peers({ loaded.getVerifyPeers() });
context->set_verify_callback([policy=tlsPolicy](bool preverified, boost::asio::ssl::verify_context& ctx) {
return policy->verify_peer(preverified, ctx.native_handle());
});
} else {
context->set_verify_callback(boost::bind(&insecurely_always_accept, _1, _2));
}
context->set_password_callback(
[password=loaded.getPassword()](size_t, boost::asio::ssl::context::password_purpose) {
return password;
});
const std::string& certBytes = loaded.getCertificateBytes();
if ( certBytes.size() ) {
context->use_certificate_chain(boost::asio::buffer(certBytes.data(), certBytes.size()));
}
const std::string& CABytes = loaded.getCABytes();
if ( CABytes.size() ) {
context->add_certificate_authority(boost::asio::buffer(CABytes.data(), CABytes.size()));
}
const std::string& keyBytes = loaded.getKeyBytes();
if (keyBytes.size()) {
context->use_private_key(boost::asio::buffer(keyBytes.data(), keyBytes.size()), boost::asio::ssl::context::pem);
}
} catch (boost::system::system_error& e) {
TraceEvent("TLSConfigureError").detail("What", e.what()).detail("Value", e.code().value()).detail("WhichMeans", TLSPolicy::ErrorString(e.code()));
throw tls_error();
}
}
ACTOR static Future<Void> watchFileForChanges( std::string filename, AsyncTrigger* fileChanged ) {
if (filename == "") {
return Never();
}
state std::time_t lastModTime = wait(IAsyncFileSystem::filesystem()->lastWriteTime(filename));
loop {
wait(delay(FLOW_KNOBS->TLS_CERT_REFRESH_DELAY_SECONDS));
try {
std::time_t modtime = wait(IAsyncFileSystem::filesystem()->lastWriteTime(filename));
if (lastModTime != modtime) {
lastModTime = modtime;
fileChanged->trigger();
}
} catch (Error& e) {
if (e.code() == error_code_io_error) {
// EACCES, ELOOP, ENOENT all come out as io_error(), but are more of a system
// configuration issue than an FDB problem. If we managed to load valid
// certificates, then there's no point in crashing, but we should complain
// loudly. IAsyncFile will log the error, but not necessarily as a warning.
TraceEvent(SevWarnAlways, "TLSCertificateRefreshStatError").detail("File", filename);
} else {
throw;
}
}
}
}
ACTOR static Future<Void> reloadCertificatesOnChange( TLSConfig config, AsyncVar<Reference<ReferencedObject<boost::asio::ssl::context>>>* contextVar ) {
if (FLOW_KNOBS->TLS_CERT_REFRESH_DELAY_SECONDS <= 0) {
return Void();
}
loop {
// Early in bootup, the filesystem might not be initialized yet. Wait until it is.
if (IAsyncFileSystem::filesystem() != nullptr) {
break;
}
wait(delay(1.0));
}
state int mismatches = 0;
state AsyncTrigger fileChanged;
state std::vector<Future<Void>> lifetimes;
lifetimes.push_back(watchFileForChanges(config.getCertificatePathSync(), &fileChanged));
lifetimes.push_back(watchFileForChanges(config.getKeyPathSync(), &fileChanged));
lifetimes.push_back(watchFileForChanges(config.getCAPathSync(), &fileChanged));
loop {
wait( fileChanged.onTrigger() );
TraceEvent("TLSCertificateRefreshBegin");
try {
LoadedTLSConfig loaded = wait( config.loadAsync() );
boost::asio::ssl::context context(boost::asio::ssl::context::tls);
ConfigureSSLContext(loaded, &context);
TraceEvent(SevInfo, "TLSCertificateRefreshSucceeded");
mismatches = 0;
contextVar->set(ReferencedObject<boost::asio::ssl::context>::from(std::move(context)));
} catch (Error &e) {
if (e.code() == error_code_actor_cancelled) {
throw;
}
// Some files didn't match up, they should in the future, and we'll retry then.
mismatches++;
TraceEvent(SevWarn, "TLSCertificateRefreshMismatch").error(e).detail("mismatches", mismatches);
}
}
}
#endif
void Net2::initTLS() { void Net2::initTLS() {
if(tlsInitialized) { if(tlsInitialized) {
return; return;
} }
#ifndef TLS_DISABLED #ifndef TLS_DISABLED
try { try {
const char *defaultCertFileName = "fdb.pem"; boost::asio::ssl::context newContext(boost::asio::ssl::context::tls);
ConfigureSSLContext( tlsConfig.loadSync(), &newContext );
if( tlsPolicy && !tlsPolicy->rules.size() ) { sslContextVar.set(ReferencedObject<boost::asio::ssl::context>::from(std::move(newContext)));
std::string verify_peers; backgroundCertRefresh = reloadCertificatesOnChange( tlsConfig, &sslContextVar );
if (platform::getEnvironmentVar("FDB_TLS_VERIFY_PEERS", verify_peers)) { } catch (Error& e) {
tlsPolicy->set_verify_peers({ verify_peers }); TraceEvent("Net2TLSInitError").error(e);
} else {
tlsPolicy->set_verify_peers({ std::string("Check.Valid=1")});
}
}
sslContext.set_options(boost::asio::ssl::context::default_workarounds);
sslContext.set_verify_mode(boost::asio::ssl::context::verify_peer | boost::asio::ssl::verify_fail_if_no_peer_cert);
if (tlsPolicy) {
Reference<TLSPolicy> policy = tlsPolicy;
sslContext.set_verify_callback([policy](bool preverified, boost::asio::ssl::verify_context& ctx) {
return policy->verify_peer(preverified, ctx.native_handle());
});
} else {
sslContext.set_verify_callback(boost::bind(&insecurely_always_accept, _1, _2));
}
if ( !tlsParams.tlsPassword.size() ) {
platform::getEnvironmentVar( "FDB_TLS_PASSWORD", tlsParams.tlsPassword );
}
sslContext.set_password_callback(std::bind(&Net2::get_password, this));
if ( tlsParams.tlsCertBytes.size() ) {
sslContext.use_certificate_chain(boost::asio::buffer(tlsParams.tlsCertBytes.data(), tlsParams.tlsCertBytes.size()));
}
else {
if ( !tlsParams.tlsCertPath.size() ) {
if ( !platform::getEnvironmentVar( "FDB_TLS_CERTIFICATE_FILE", tlsParams.tlsCertPath ) ) {
if( fileExists(defaultCertFileName) ) {
tlsParams.tlsCertPath = defaultCertFileName;
} else if( fileExists( joinPath(platform::getDefaultConfigPath(), defaultCertFileName) ) ) {
tlsParams.tlsCertPath = joinPath(platform::getDefaultConfigPath(), defaultCertFileName);
}
}
}
if ( tlsParams.tlsCertPath.size() ) {
sslContext.use_certificate_chain_file(tlsParams.tlsCertPath);
}
}
if ( tlsParams.tlsCABytes.size() ) {
sslContext.add_certificate_authority(boost::asio::buffer(tlsParams.tlsCABytes.data(), tlsParams.tlsCABytes.size()));
}
else {
if ( !tlsParams.tlsCAPath.size() ) {
platform::getEnvironmentVar("FDB_TLS_CA_FILE", tlsParams.tlsCAPath);
}
if ( tlsParams.tlsCAPath.size() ) {
try {
std::string cert = readFileBytes(tlsParams.tlsCAPath, FLOW_KNOBS->CERT_FILE_MAX_SIZE);
sslContext.add_certificate_authority(boost::asio::buffer(cert.data(), cert.size()));
}
catch (Error& e) {
fprintf(stderr, "Error reading CA file %s: %s\n", tlsParams.tlsCAPath.c_str(), e.what());
TraceEvent("Net2TLSReadCAError").error(e);
throw tls_error();
}
}
}
if (tlsParams.tlsKeyBytes.size()) {
sslContext.use_private_key(boost::asio::buffer(tlsParams.tlsKeyBytes.data(), tlsParams.tlsKeyBytes.size()), boost::asio::ssl::context::pem);
} else {
if (!tlsParams.tlsKeyPath.size()) {
if(!platform::getEnvironmentVar( "FDB_TLS_KEY_FILE", tlsParams.tlsKeyPath)) {
if( fileExists(defaultCertFileName) ) {
tlsParams.tlsKeyPath = defaultCertFileName;
} else if( fileExists( joinPath(platform::getDefaultConfigPath(), defaultCertFileName) ) ) {
tlsParams.tlsKeyPath = joinPath(platform::getDefaultConfigPath(), defaultCertFileName);
}
}
}
if (tlsParams.tlsKeyPath.size()) {
sslContext.use_private_key_file(tlsParams.tlsKeyPath, boost::asio::ssl::context::pem);
}
}
} catch(boost::system::system_error e) {
fprintf(stderr, "Error initializing TLS: %s\n", e.what());
TraceEvent("Net2TLSInitError").detail("Message", e.what());
throw tls_error(); throw tls_error();
} }
#endif #endif
@ -1351,7 +1384,7 @@ Future< Reference<IConnection> > Net2::connect( NetworkAddress toAddr, std::stri
#ifndef TLS_DISABLED #ifndef TLS_DISABLED
initTLS(); initTLS();
if ( toAddr.isTLS() ) { if ( toAddr.isTLS() ) {
return SSLConnection::connect(&this->reactor.ios, &this->sslContext, toAddr); return SSLConnection::connect(&this->reactor.ios, this->sslContextVar.get(), toAddr);
} }
#endif #endif
@ -1431,7 +1464,7 @@ Reference<IListener> Net2::listen( NetworkAddress localAddr ) {
#ifndef TLS_DISABLED #ifndef TLS_DISABLED
initTLS(); initTLS();
if ( localAddr.isTLS() ) { if ( localAddr.isTLS() ) {
return Reference<IListener>(new SSLListener( reactor.ios, &this->sslContext, localAddr )); return Reference<IListener>(new SSLListener( reactor.ios, &this->sslContextVar, localAddr ));
} }
#endif #endif
return Reference<IListener>( new Listener( reactor.ios, localAddr ) ); return Reference<IListener>( new Listener( reactor.ios, localAddr ) );
@ -1531,9 +1564,9 @@ void ASIOReactor::wake() {
} // namespace net2 } // namespace net2
INetwork* newNet2(bool useThreadPool, bool useMetrics, Reference<TLSPolicy> policy, const TLSParams& tlsParams) { INetwork* newNet2(const TLSConfig& tlsConfig, bool useThreadPool, bool useMetrics) {
try { try {
N2::g_net2 = new N2::Net2(useThreadPool, useMetrics, policy, tlsParams); N2::g_net2 = new N2::Net2(tlsConfig, useThreadPool, useMetrics);
} }
catch(boost::system::system_error e) { catch(boost::system::system_error e) {
TraceEvent("Net2InitError").detail("Message", e.what()); TraceEvent("Net2InitError").detail("Message", e.what());

View File

@ -91,7 +91,7 @@ ACTOR Future<Void> traceCounters(std::string traceEventName, UID traceEventID, d
counters->logToTraceEvent(te); counters->logToTraceEvent(te);
if (!trackLatestName.empty()) { if (!trackLatestName.empty()) {
te.trackLatest(trackLatestName.c_str()); te.trackLatest(trackLatestName);
} }
last_interval = now(); last_interval = now();

View File

@ -101,7 +101,7 @@ SystemStatistics customSystemMonitor(std::string eventName, StatisticsState *sta
.detail("ConnectionsEstablished", (double) (netData.countConnEstablished - statState->networkState.countConnEstablished) / currentStats.elapsed) .detail("ConnectionsEstablished", (double) (netData.countConnEstablished - statState->networkState.countConnEstablished) / currentStats.elapsed)
.detail("ConnectionsClosed", ((netData.countConnClosedWithError - statState->networkState.countConnClosedWithError) + (netData.countConnClosedWithoutError - statState->networkState.countConnClosedWithoutError)) / currentStats.elapsed) .detail("ConnectionsClosed", ((netData.countConnClosedWithError - statState->networkState.countConnClosedWithError) + (netData.countConnClosedWithoutError - statState->networkState.countConnClosedWithoutError)) / currentStats.elapsed)
.detail("ConnectionErrors", (netData.countConnClosedWithError - statState->networkState.countConnClosedWithError) / currentStats.elapsed) .detail("ConnectionErrors", (netData.countConnClosedWithError - statState->networkState.countConnClosedWithError) / currentStats.elapsed)
.trackLatest(eventName.c_str()); .trackLatest(eventName);
TraceEvent("MemoryMetrics") TraceEvent("MemoryMetrics")
.DETAILALLOCATORMEMUSAGE(16) .DETAILALLOCATORMEMUSAGE(16)

View File

@ -1,5 +1,5 @@
/* /*
* TLSPolicy.cpp * TLSConfig.actor.cpp
* *
* This source file is part of the FoundationDB open source project * This source file is part of the FoundationDB open source project
* *
@ -18,8 +18,11 @@
* limitations under the License. * limitations under the License.
*/ */
#include "flow/TLSPolicy.h" #define PRIVATE_EXCEPT_FOR_TLSCONFIG_CPP public
#include "flow/TLSConfig.actor.h"
#undef PRIVATE_EXCEPT_FOR_TLSCONFIG_CPP
// To force typeinfo to only be emitted once.
TLSPolicy::~TLSPolicy() {} TLSPolicy::~TLSPolicy() {}
#ifndef TLS_DISABLED #ifndef TLS_DISABLED
@ -39,18 +42,183 @@ TLSPolicy::~TLSPolicy() {}
#include <string> #include <string>
#include <sstream> #include <sstream>
#include <utility> #include <utility>
#include <boost/asio/ssl/context.hpp>
// This include breaks module dependencies, but we need to do async file reads.
// So either we include fdbrpc here, or this file is moved to fdbrpc/, and then
// Net2, which depends on us, includes fdbrpc/.
//
// Either way, the only way to break this dependency cycle is to move all of
// AsyncFile to flow/
#include "fdbrpc/IAsyncFile.h"
#include "flow/Platform.h"
#include "flow/FastRef.h" #include "flow/FastRef.h"
#include "flow/Trace.h" #include "flow/Trace.h"
#include "flow/genericactors.actor.h"
#include "flow/actorcompiler.h" // This must be the last #include.
std::vector<std::string> LoadedTLSConfig::getVerifyPeers() const {
if (tlsVerifyPeers.size()) {
return tlsVerifyPeers;
}
std::string envVerifyPeers;
if (platform::getEnvironmentVar("FDB_TLS_VERIFY_PEERS", envVerifyPeers)) {
return {envVerifyPeers};
}
return {"Check.Valid=1"};
}
std::string LoadedTLSConfig::getPassword() const {
if (tlsPassword.size()) {
return tlsPassword;
}
std::string envPassword;
platform::getEnvironmentVar("FDB_TLS_PASSWORD", envPassword);
return envPassword;
}
std::string TLSConfig::getCertificatePathSync() const {
if (tlsCertPath.size()) {
return tlsCertPath;
}
std::string envCertPath;
if (platform::getEnvironmentVar("FDB_TLS_CERTIFICATE_FILE", envCertPath)) {
return envCertPath;
}
const char *defaultCertFileName = "fdb.pem";
if( fileExists(defaultCertFileName) ) {
return defaultCertFileName;
}
if( fileExists( joinPath(platform::getDefaultConfigPath(), defaultCertFileName) ) ) {
return joinPath(platform::getDefaultConfigPath(), defaultCertFileName);
}
return std::string();
}
std::string TLSConfig::getKeyPathSync() const {
if (tlsKeyPath.size()) {
return tlsKeyPath;
}
std::string envKeyPath;
if (platform::getEnvironmentVar("FDB_TLS_KEY_FILE", envKeyPath)) {
return envKeyPath;
}
const char *defaultCertFileName = "fdb.pem";
if( fileExists(defaultCertFileName) ) {
return defaultCertFileName;
}
if( fileExists( joinPath(platform::getDefaultConfigPath(), defaultCertFileName) ) ) {
return joinPath(platform::getDefaultConfigPath(), defaultCertFileName);
}
return std::string();
}
std::string TLSConfig::getCAPathSync() const {
if (tlsCAPath.size()) {
return tlsCAPath;
}
std::string envCAPath;
platform::getEnvironmentVar("FDB_TLS_CA_FILE", envCAPath);
return envCAPath;
}
LoadedTLSConfig TLSConfig::loadSync() const {
LoadedTLSConfig loaded;
const std::string certPath = getCertificatePathSync();
if (certPath.size()) {
loaded.tlsCertBytes = readFileBytes( certPath, FLOW_KNOBS->CERT_FILE_MAX_SIZE );
} else {
loaded.tlsCertBytes = tlsCertBytes;
}
const std::string keyPath = getKeyPathSync();
if (keyPath.size()) {
loaded.tlsKeyBytes = readFileBytes( keyPath, FLOW_KNOBS->CERT_FILE_MAX_SIZE );
} else {
loaded.tlsKeyBytes = tlsKeyBytes;
}
const std::string CAPath = getCAPathSync();
if (CAPath.size()) {
loaded.tlsCABytes = readFileBytes( CAPath, FLOW_KNOBS->CERT_FILE_MAX_SIZE );
} else {
loaded.tlsCABytes = tlsCABytes;
}
loaded.tlsPassword = tlsPassword;
loaded.tlsVerifyPeers = tlsVerifyPeers;
loaded.endpointType = endpointType;
return loaded;
}
// And now do the same thing, but async...
ACTOR static Future<Void> readEntireFile( std::string filename, std::string* destination ) {
state Reference<IAsyncFile> file = wait(IAsyncFileSystem::filesystem()->open(filename, IAsyncFile::OPEN_READONLY | IAsyncFile::OPEN_UNCACHED, 0));
state int64_t filesize = wait(file->size());
if (filesize > FLOW_KNOBS->CERT_FILE_MAX_SIZE) {
throw file_too_large();
}
destination->resize(filesize);
wait(success(file->read(const_cast<char*>(destination->c_str()), filesize, 0)));
return Void();
}
ACTOR Future<LoadedTLSConfig> TLSConfig::loadAsync(const TLSConfig* self) {
state LoadedTLSConfig loaded;
state std::vector<Future<Void>> reads;
const std::string& certPath = self->getCertificatePathSync();
if (certPath.size()) {
reads.push_back( readEntireFile( certPath, &loaded.tlsCertBytes ) );
} else {
loaded.tlsCertBytes = self->tlsCertBytes;
}
const std::string& keyPath = self->getKeyPathSync();
if (keyPath.size()) {
reads.push_back( readEntireFile( keyPath, &loaded.tlsKeyBytes ) );
} else {
loaded.tlsKeyBytes = self->tlsKeyBytes;
}
const std::string& CAPath = self->getCAPathSync();
if (CAPath.size()) {
reads.push_back( readEntireFile( CAPath, &loaded.tlsCABytes ) );
} else {
loaded.tlsCABytes = self->tlsKeyBytes;
}
wait(waitForAll(reads));
loaded.tlsPassword = self->tlsPassword;
loaded.tlsVerifyPeers = self->tlsVerifyPeers;
loaded.endpointType = self->endpointType;
return loaded;
}
std::string TLSPolicy::ErrorString(boost::system::error_code e) { std::string TLSPolicy::ErrorString(boost::system::error_code e) {
char* str = ERR_error_string(e.value(), NULL); char* str = ERR_error_string(e.value(), NULL);
return std::string(str); return std::string(str);
} }
// To force typeinfo to only be emitted once.
std::string TLSPolicy::toString() const { std::string TLSPolicy::toString() const {
std::stringstream ss; std::stringstream ss;
ss << "TLSPolicy{ Rules=["; ss << "TLSPolicy{ Rules=[";

286
flow/TLSConfig.actor.h Normal file
View File

@ -0,0 +1,286 @@
/*
* TLSConfig.actor.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// When actually compiled (NO_INTELLISENSE), include the generated version of this file. In intellisense use the source version.
#if defined(NO_INTELLISENSE) && !defined(FLOW_TLS_CONFIG_ACTOR_G_H)
#define FLOW_TLS_CONFIG_ACTOR_G_H
#include "flow/TLSConfig.actor.g.h"
#elif !defined(FLOW_TLS_CONFIG_ACTOR_H)
#define FLOW_TLS_CONFIG_ACTOR_H
#pragma once
#include <map>
#include <string>
#include <vector>
#include <boost/system/system_error.hpp>
#include "flow/FastRef.h"
#include "flow/Knobs.h"
#include "flow/flow.h"
#ifndef TLS_DISABLED
#include <openssl/x509.h>
typedef int NID;
enum class MatchType {
EXACT,
PREFIX,
SUFFIX,
};
enum class X509Location {
// This NID is located within a X509_NAME
NAME,
// This NID is an X509 extension, and should be parsed accordingly
EXTENSION,
};
struct Criteria {
Criteria( const std::string& s )
: criteria(s), match_type(MatchType::EXACT), location(X509Location::NAME) {}
Criteria( const std::string& s, MatchType mt )
: criteria(s), match_type(mt), location(X509Location::NAME) {}
Criteria( const std::string& s, X509Location loc)
: criteria(s), match_type(MatchType::EXACT), location(loc) {}
Criteria( const std::string& s, MatchType mt, X509Location loc)
: criteria(s), match_type(mt), location(loc) {}
std::string criteria;
MatchType match_type;
X509Location location;
bool operator==(const Criteria& c) const {
return criteria == c.criteria && match_type == c.match_type && location == c.location;
}
};
#endif
#include "flow/actorcompiler.h" // This must be the last #include.
enum class TLSEndpointType {
UNSET = 0,
CLIENT,
SERVER
};
class TLSConfig;
template <typename T> class LoadAsyncActorState;
// TODO: Remove this once this code is merged with master/to-be 7.0 and actors can access private variables.
#ifndef PRIVATE_EXCEPT_FOR_TLSCONFIG_CPP
#define PRIVATE_EXCEPT_FOR_TLSCONFIG_CPP private
#endif
class LoadedTLSConfig {
public:
std::string getCertificateBytes() const {
return tlsCertBytes;
}
std::string getKeyBytes() const {
return tlsKeyBytes;
}
std::string getCABytes() const {
return tlsCABytes;
}
// Return the explicitly set verify peers string.
// If no verify peers string was set, return the environment setting
// If no environment setting exists, return "Check.Valid=1"
std::vector<std::string> getVerifyPeers() const;
// Return the explicitly set password.
// If no password was set, return the environment setting
// If no environment setting exists, return an empty string
std::string getPassword() const;
TLSEndpointType getEndpointType() const {
return endpointType;
}
bool isTLSEnabled() const {
return endpointType != TLSEndpointType::UNSET;
}
PRIVATE_EXCEPT_FOR_TLSCONFIG_CPP:
std::string tlsCertBytes, tlsKeyBytes, tlsCABytes;
std::string tlsPassword;
std::vector<std::string> tlsVerifyPeers;
TLSEndpointType endpointType = TLSEndpointType::UNSET;
friend class TLSConfig;
template <typename T>
friend class LoadAsyncActorState;
};
class TLSConfig {
public:
enum { OPT_TLS = 100000, OPT_TLS_PLUGIN, OPT_TLS_CERTIFICATES, OPT_TLS_KEY, OPT_TLS_VERIFY_PEERS, OPT_TLS_CA_FILE, OPT_TLS_PASSWORD };
TLSConfig() = default;
explicit TLSConfig( TLSEndpointType endpointType )
: endpointType( endpointType ) {
}
void setCertificatePath( const std::string& path ) {
tlsCertPath = path;
tlsCertBytes = "";
}
void setCertificateBytes( const std::string& bytes ) {
tlsCertBytes = bytes;
tlsCertPath = "";
}
void setKeyPath( const std::string& path ) {
tlsKeyPath = path;
tlsKeyBytes = "";
}
void setKeyBytes( const std::string& bytes ) {
tlsKeyBytes = bytes;
tlsKeyPath = "";
}
void setCAPath( const std::string& path ) {
tlsCAPath = path;
tlsCABytes = "";
}
void setCABytes( const std::string& bytes ) {
tlsCABytes = bytes;
tlsCAPath = "";
}
void setPassword( const std::string& password ) {
tlsPassword = password;
}
void clearVerifyPeers() {
tlsVerifyPeers.clear();
}
void addVerifyPeers( const std::string& verifyPeers ) {
tlsVerifyPeers.push_back( verifyPeers );
}
// Load all specified certificates into memory, and return an object that
// allows access to them.
// If self has any certificates by path, they will be *synchronously* loaded from disk.
LoadedTLSConfig loadSync() const;
// Load all specified certificates into memory, and return an object that
// allows access to them.
// If self has any certificates by path, they will be *asynchronously* loaded from disk.
Future<LoadedTLSConfig> loadAsync() const {
return loadAsync(this);
}
// Return the explicitly set path.
// If one was not set, return the path from the environment.
// (Cert and Key only) If neither exist, check for fdb.pem in cwd
// (Cert and Key only) If fdb.pem doesn't exist, check for it in default config dir
// Otherwise return the empty string.
// Theoretically, fileExists() can block, so these functions are labelled as synchronous
// TODO: make an easy to use Future<bool> fileExists, and port lots of code over to it.
std::string getCertificatePathSync() const;
std::string getKeyPathSync() const;
std::string getCAPathSync() const;
PRIVATE_EXCEPT_FOR_TLSCONFIG_CPP:
ACTOR static Future<LoadedTLSConfig> loadAsync(const TLSConfig* self);
template <typename T>
friend class LoadAsyncActorState;
std::string tlsCertPath, tlsKeyPath, tlsCAPath;
std::string tlsCertBytes, tlsKeyBytes, tlsCABytes;
std::string tlsPassword;
std::vector<std::string> tlsVerifyPeers;
TLSEndpointType endpointType = TLSEndpointType::UNSET;
};
class TLSPolicy : ReferenceCounted<TLSPolicy> {
public:
TLSPolicy(TLSEndpointType client) : is_client(client == TLSEndpointType::CLIENT) {}
virtual ~TLSPolicy();
virtual void addref() { ReferenceCounted<TLSPolicy>::addref(); }
virtual void delref() { ReferenceCounted<TLSPolicy>::delref(); }
#ifndef TLS_DISABLED
static std::string ErrorString(boost::system::error_code e);
void set_verify_peers(std::vector<std::string> verify_peers);
bool verify_peer(bool preverified, X509_STORE_CTX* store_ctx);
std::string toString() const;
struct Rule {
explicit Rule(std::string input);
std::string toString() const;
std::map< NID, Criteria > subject_criteria;
std::map< NID, Criteria > issuer_criteria;
std::map< NID, Criteria > root_criteria;
bool verify_cert = true;
bool verify_time = true;
};
std::vector<Rule> rules;
#endif
bool is_client;
};
#define TLS_PLUGIN_FLAG "--tls_plugin"
#define TLS_CERTIFICATE_FILE_FLAG "--tls_certificate_file"
#define TLS_KEY_FILE_FLAG "--tls_key_file"
#define TLS_VERIFY_PEERS_FLAG "--tls_verify_peers"
#define TLS_CA_FILE_FLAG "--tls_ca_file"
#define TLS_PASSWORD_FLAG "--tls_password"
#define TLS_OPTION_FLAGS \
{ TLSConfig::OPT_TLS_PLUGIN, TLS_PLUGIN_FLAG, SO_REQ_SEP }, \
{ TLSConfig::OPT_TLS_CERTIFICATES, TLS_CERTIFICATE_FILE_FLAG, SO_REQ_SEP }, \
{ TLSConfig::OPT_TLS_KEY, TLS_KEY_FILE_FLAG, SO_REQ_SEP }, \
{ TLSConfig::OPT_TLS_VERIFY_PEERS, TLS_VERIFY_PEERS_FLAG, SO_REQ_SEP }, \
{ TLSConfig::OPT_TLS_PASSWORD, TLS_PASSWORD_FLAG, SO_REQ_SEP }, \
{ TLSConfig::OPT_TLS_CA_FILE, TLS_CA_FILE_FLAG, SO_REQ_SEP },
#define TLS_HELP \
" " TLS_CERTIFICATE_FILE_FLAG " CERTFILE\n" \
" The path of a file containing the TLS certificate and CA\n" \
" chain.\n" \
" " TLS_CA_FILE_FLAG " CERTAUTHFILE\n" \
" The path of a file containing the CA certificates chain.\n" \
" " TLS_KEY_FILE_FLAG " KEYFILE\n" \
" The path of a file containing the private key corresponding\n" \
" to the TLS certificate.\n" \
" " TLS_PASSWORD_FLAG " PASSCODE\n" \
" The passphrase of encrypted private key\n" \
" " TLS_VERIFY_PEERS_FLAG " CONSTRAINTS\n" \
" The constraints by which to validate TLS peers. The contents\n" \
" and format of CONSTRAINTS are plugin-specific.\n"
#include "flow/unactorcompiler.h"
#endif

View File

@ -1,145 +0,0 @@
/*
* TLSPolicy.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _FLOW_TLSPOLICY_H_
#define _FLOW_TLSPOLICY_H_
#pragma once
#include <map>
#include <string>
#include <vector>
#include <boost/system/system_error.hpp>
#include "flow/FastRef.h"
#ifndef TLS_DISABLED
#include <openssl/x509.h>
typedef int NID;
enum class MatchType {
EXACT,
PREFIX,
SUFFIX,
};
enum class X509Location {
// This NID is located within a X509_NAME
NAME,
// This NID is an X509 extension, and should be parsed accordingly
EXTENSION,
};
struct Criteria {
Criteria( const std::string& s )
: criteria(s), match_type(MatchType::EXACT), location(X509Location::NAME) {}
Criteria( const std::string& s, MatchType mt )
: criteria(s), match_type(mt), location(X509Location::NAME) {}
Criteria( const std::string& s, X509Location loc)
: criteria(s), match_type(MatchType::EXACT), location(loc) {}
Criteria( const std::string& s, MatchType mt, X509Location loc)
: criteria(s), match_type(mt), location(loc) {}
std::string criteria;
MatchType match_type;
X509Location location;
bool operator==(const Criteria& c) const {
return criteria == c.criteria && match_type == c.match_type && location == c.location;
}
};
#endif
struct TLSParams {
enum { OPT_TLS = 100000, OPT_TLS_PLUGIN, OPT_TLS_CERTIFICATES, OPT_TLS_KEY, OPT_TLS_VERIFY_PEERS, OPT_TLS_CA_FILE, OPT_TLS_PASSWORD };
std::string tlsCertPath, tlsKeyPath, tlsCAPath, tlsPassword;
std::string tlsCertBytes, tlsKeyBytes, tlsCABytes;
};
class TLSPolicy : ReferenceCounted<TLSPolicy> {
public:
enum class Is {
CLIENT,
SERVER
};
TLSPolicy(Is client) : is_client(client == Is::CLIENT) {}
virtual ~TLSPolicy();
virtual void addref() { ReferenceCounted<TLSPolicy>::addref(); }
virtual void delref() { ReferenceCounted<TLSPolicy>::delref(); }
#ifndef TLS_DISABLED
static std::string ErrorString(boost::system::error_code e);
void set_verify_peers(std::vector<std::string> verify_peers);
bool verify_peer(bool preverified, X509_STORE_CTX* store_ctx);
std::string toString() const;
struct Rule {
explicit Rule(std::string input);
std::string toString() const;
std::map< NID, Criteria > subject_criteria;
std::map< NID, Criteria > issuer_criteria;
std::map< NID, Criteria > root_criteria;
bool verify_cert = true;
bool verify_time = true;
};
std::vector<Rule> rules;
#endif
bool is_client;
};
#define TLS_PLUGIN_FLAG "--tls_plugin"
#define TLS_CERTIFICATE_FILE_FLAG "--tls_certificate_file"
#define TLS_KEY_FILE_FLAG "--tls_key_file"
#define TLS_VERIFY_PEERS_FLAG "--tls_verify_peers"
#define TLS_CA_FILE_FLAG "--tls_ca_file"
#define TLS_PASSWORD_FLAG "--tls_password"
#define TLS_OPTION_FLAGS \
{ TLSParams::OPT_TLS_PLUGIN, TLS_PLUGIN_FLAG, SO_REQ_SEP }, \
{ TLSParams::OPT_TLS_CERTIFICATES, TLS_CERTIFICATE_FILE_FLAG, SO_REQ_SEP }, \
{ TLSParams::OPT_TLS_KEY, TLS_KEY_FILE_FLAG, SO_REQ_SEP }, \
{ TLSParams::OPT_TLS_VERIFY_PEERS, TLS_VERIFY_PEERS_FLAG, SO_REQ_SEP }, \
{ TLSParams::OPT_TLS_PASSWORD, TLS_PASSWORD_FLAG, SO_REQ_SEP }, \
{ TLSParams::OPT_TLS_CA_FILE, TLS_CA_FILE_FLAG, SO_REQ_SEP },
#define TLS_HELP \
" " TLS_CERTIFICATE_FILE_FLAG " CERTFILE\n" \
" The path of a file containing the TLS certificate and CA\n" \
" chain.\n" \
" " TLS_CA_FILE_FLAG " CERTAUTHFILE\n" \
" The path of a file containing the CA certificates chain.\n" \
" " TLS_KEY_FILE_FLAG " KEYFILE\n" \
" The path of a file containing the private key corresponding\n" \
" to the TLS certificate.\n" \
" " TLS_PASSWORD_FLAG " PASSCODE\n" \
" The passphrase of encrypted private key\n" \
" " TLS_VERIFY_PEERS_FLAG " CONSTRAINTS\n" \
" The constraints by which to validate TLS peers. The contents\n" \
" and format of CONSTRAINTS are plugin-specific.\n"
#endif

View File

@ -915,7 +915,7 @@ TraceEvent& TraceEvent::detailfNoMetric( std::string&& key, const char* valueFor
return *this; return *this;
} }
TraceEvent& TraceEvent::trackLatest( const char *trackingKey ){ TraceEvent& TraceEvent::trackLatest(const std::string& trackingKey ){
ASSERT(!logged); ASSERT(!logged);
this->trackingKey = trackingKey; this->trackingKey = trackingKey;
ASSERT( this->trackingKey.size() != 0 && this->trackingKey[0] != '/' && this->trackingKey[0] != '\\'); ASSERT( this->trackingKey.size() != 0 && this->trackingKey[0] != '/' && this->trackingKey[0] != '\\');

View File

@ -453,7 +453,7 @@ private:
TraceEvent& detailImpl( std::string&& key, std::string&& value, bool writeEventMetricField=true ); TraceEvent& detailImpl( std::string&& key, std::string&& value, bool writeEventMetricField=true );
public: public:
TraceEvent& backtrace(const std::string& prefix = ""); TraceEvent& backtrace(const std::string& prefix = "");
TraceEvent& trackLatest( const char* trackingKey ); TraceEvent& trackLatest(const std::string& trackingKey );
TraceEvent& sample( double sampleRate, bool logSampleRate=true ); TraceEvent& sample( double sampleRate, bool logSampleRate=true );
// Sets the maximum length a field can be before it gets truncated. A value of 0 uses the default, a negative value // Sets the maximum length a field can be before it gets truncated. A value of 0 uses the default, a negative value
@ -560,6 +560,16 @@ private:
extern LatestEventCache latestEventCache; extern LatestEventCache latestEventCache;
struct EventCacheHolder : public ReferenceCounted<EventCacheHolder> {
std::string trackingKey;
EventCacheHolder(const std::string& trackingKey) : trackingKey(trackingKey) {}
~EventCacheHolder() {
latestEventCache.clear(trackingKey);
}
};
// Evil but potentially useful for verbose messages: // Evil but potentially useful for verbose messages:
#if CENABLED(0, NOT_IN_CLEAN) #if CENABLED(0, NOT_IN_CLEAN)
#define TRACE( t, m ) if (TraceEvent::isEnabled(t)) TraceEvent(t,m) #define TRACE( t, m ) if (TraceEvent::isEnabled(t)) TraceEvent(t,m)

View File

@ -485,4 +485,16 @@ TEST_CASE("/flow/FlatBuffers/Standalone") {
return Void(); return Void();
} }
// Meant to be run with valgrind or asan, to catch heap buffer overflows
TEST_CASE("/flow/FlatBuffers/Void") {
Standalone<StringRef> msg = ObjectWriter::toValue(Void(), Unversioned());
auto buffer = std::make_unique<uint8_t[]>(msg.size()); // Make a heap allocation of precisely the right size, so
// that asan or valgrind will catch any overflows
memcpy(buffer.get(), msg.begin(), msg.size());
ObjectReader rd(buffer.get(), Unversioned());
Void x;
rd.deserialize(x);
return Void();
}
} // namespace unit_tests } // namespace unit_tests

View File

@ -927,28 +927,30 @@ struct LoadSaveHelper : Context {
static constexpr bool isSerializing = false; static constexpr bool isSerializing = false;
static constexpr bool is_fb_visitor = true; static constexpr bool is_fb_visitor = true;
const uint16_t* vtable;
const uint8_t* current; const uint8_t* current;
SerializeFun(const uint16_t* vtable, const uint8_t* current, Context& context) SerializeFun(const uint8_t* current, Context& context) : Context(context), current(current) {}
: Context(context), vtable(vtable), current(current) {}
template <class... Args> template <class... Args>
void operator()(Args&... members) { void operator()(Args&... members) {
if (sizeof...(Args) == 0) {
return;
}
uint32_t current_offset = interpret_as<uint32_t>(current);
current += current_offset;
int32_t vtable_offset = interpret_as<int32_t>(current);
const uint16_t* vtable = reinterpret_cast<const uint16_t*>(current - vtable_offset);
int i = 0; int i = 0;
uint16_t vtable_length = vtable[i++] / sizeof(uint16_t); uint16_t vtable_length = vtable[i++] / sizeof(uint16_t);
uint16_t table_length = vtable[i++]; uint16_t table_length = vtable[i++];
for_each(LoadMember<Context>{ vtable, current, vtable_length, table_length, i, this->context() }, members...); for_each(LoadMember<Context>{ vtable, current, vtable_length, table_length, i, this->context() },
members...);
} }
}; };
template <class Member> template <class Member>
std::enable_if_t<expect_serialize_member<Member>> load(Member& member, const uint8_t* current) { std::enable_if_t<expect_serialize_member<Member>> load(Member& member, const uint8_t* current) {
uint32_t current_offset = interpret_as<uint32_t>(current); SerializeFun fun(current, this->context());
current += current_offset;
int32_t vtable_offset = interpret_as<int32_t>(current);
const uint16_t* vtable = reinterpret_cast<const uint16_t*>(current - vtable_offset);
SerializeFun fun(vtable, current, this->context());
if constexpr (serializable_traits<Member>::value) { if constexpr (serializable_traits<Member>::value) {
serializable_traits<Member>::serialize(fun, member); serializable_traits<Member>::serialize(fun, member);
} else { } else {

View File

@ -15,6 +15,7 @@
<ActorCompiler Include="CompressedInt.actor.cpp" /> <ActorCompiler Include="CompressedInt.actor.cpp" />
<ClCompile Include="crc32c.cpp" /> <ClCompile Include="crc32c.cpp" />
<ClCompile Include="crc32c-generated-constants.cpp" /> <ClCompile Include="crc32c-generated-constants.cpp" />
<ClCompile Include="Arena.cpp" />
<ClCompile Include="DeterministicRandom.cpp" /> <ClCompile Include="DeterministicRandom.cpp" />
<ClCompile Include="Deque.cpp" /> <ClCompile Include="Deque.cpp" />
<ClCompile Include="Error.cpp" /> <ClCompile Include="Error.cpp" />
@ -51,7 +52,7 @@
<ClCompile Include="version.cpp" /> <ClCompile Include="version.cpp" />
<ClCompile Include="SignalSafeUnwind.cpp" /> <ClCompile Include="SignalSafeUnwind.cpp" />
<ClCompile Include="serialize.cpp" /> <ClCompile Include="serialize.cpp" />
<ClCompile Include="TLSPolicy.cpp" /> <ActorCompiler Include="TLSConfig.actor.cpp" />
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ClInclude Include="CompressedInt.h" /> <ClInclude Include="CompressedInt.h" />
@ -96,7 +97,9 @@
<ClInclude Include="Platform.h" /> <ClInclude Include="Platform.h" />
<ClInclude Include="ThreadSafeQueue.h" /> <ClInclude Include="ThreadSafeQueue.h" />
<ClInclude Include="Trace.h" /> <ClInclude Include="Trace.h" />
<ClInclude Include="TLSPolicy.h" /> <ActorCompiler Include="TLSConfig.actor.h" >
<EnableCompile>false</EnableCompile>
</ActorCompiler>
<ClInclude Include="SignalSafeUnwind.h" /> <ClInclude Include="SignalSafeUnwind.h" />
<ClInclude Include="UnitTest.h" /> <ClInclude Include="UnitTest.h" />
<ActorCompiler Include="ThreadHelper.actor.h"> <ActorCompiler Include="ThreadHelper.actor.h">

View File

@ -15,6 +15,7 @@
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>
<ClCompile Include="crc32c-generated-constants.cpp" /> <ClCompile Include="crc32c-generated-constants.cpp" />
<ClCompile Include="Arena.cpp" />
<ClCompile Include="DeterministicRandom.cpp" /> <ClCompile Include="DeterministicRandom.cpp" />
<ClCompile Include="Error.cpp" /> <ClCompile Include="Error.cpp" />
<ClCompile Include="FastAlloc.cpp" /> <ClCompile Include="FastAlloc.cpp" />

View File

@ -639,6 +639,46 @@ protected:
} }
}; };
template <class V>
class ReferencedObject : NonCopyable, public ReferenceCounted<ReferencedObject<V>> {
public:
ReferencedObject() : value() {}
ReferencedObject(V const& v) : value(v) {}
ReferencedObject(V&& v) : value(std::move(v)) {}
ReferencedObject(ReferencedObject&& r) : value(std::move(r.value)) {}
void operator=(ReferencedObject&& r) {
value = std::move(r.value);
}
V const& get() const {
return value;
}
V& mutate() {
return value;
}
void set(V const& v) {
value = v;
}
void set(V&& v) {
value = std::move(v);
}
static Reference<ReferencedObject<V>> from(V const& v) {
return Reference<ReferencedObject<V>>(new ReferencedObject<V>(v));
}
static Reference<ReferencedObject<V>> from(V&& v) {
return Reference<ReferencedObject<V>>(new ReferencedObject<V>(std::move(v)));
}
private:
V value;
};
template <class V> template <class V>
class AsyncVar : NonCopyable, public ReferenceCounted<AsyncVar<V>> { class AsyncVar : NonCopyable, public ReferenceCounted<AsyncVar<V>> {
public: public:

View File

@ -32,7 +32,6 @@
#endif #endif
#include "flow/serialize.h" #include "flow/serialize.h"
#include "flow/IRandom.h" #include "flow/IRandom.h"
#include "flow/TLSPolicy.h"
enum class TaskPriority { enum class TaskPriority {
Max = 1000000, Max = 1000000,
@ -44,6 +43,7 @@ enum class TaskPriority {
DiskIOComplete = 9150, DiskIOComplete = 9150,
LoadBalancedEndpoint = 9000, LoadBalancedEndpoint = 9000,
ReadSocket = 9000, ReadSocket = 9000,
AcceptSocket = 8950,
Handshake = 8900, Handshake = 8900,
CoordinationReply = 8810, CoordinationReply = 8810,
Coordination = 8800, Coordination = 8800,
@ -412,9 +412,10 @@ typedef void* flowGlobalType;
typedef NetworkAddress (*NetworkAddressFuncPtr)(); typedef NetworkAddress (*NetworkAddressFuncPtr)();
typedef NetworkAddressList (*NetworkAddressesFuncPtr)(); typedef NetworkAddressList (*NetworkAddressesFuncPtr)();
class TLSConfig;
class INetwork; class INetwork;
extern INetwork* g_network; extern INetwork* g_network;
extern INetwork* newNet2(bool useThreadPool = false, bool useMetrics = false, Reference<TLSPolicy> policy = Reference<TLSPolicy>(), const TLSParams& tlsParams = TLSParams()); extern INetwork* newNet2(const TLSConfig& tlsConfig, bool useThreadPool = false, bool useMetrics = false);
class INetwork { class INetwork {
public: public:

View File

@ -32,7 +32,7 @@
<Wix xmlns='http://schemas.microsoft.com/wix/2006/wi'> <Wix xmlns='http://schemas.microsoft.com/wix/2006/wi'>
<Product Name='$(var.Title)' <Product Name='$(var.Title)'
Id='{F1D7D860-8033-4592-8A68-18E4BD8DAB5C}' Id='{3742289A-DBB1-4931-B01E-45C5BBB689F0}'
UpgradeCode='{A95EA002-686E-4164-8356-C715B7F8B1C8}' UpgradeCode='{A95EA002-686E-4164-8356-C715B7F8B1C8}'
Version='$(var.Version)' Version='$(var.Version)'
Manufacturer='$(var.Manufacturer)' Manufacturer='$(var.Manufacturer)'