2017-05-26 04:48:44 +08:00
|
|
|
/*
|
|
|
|
* masterserver.actor.cpp
|
|
|
|
*
|
|
|
|
* This source file is part of the FoundationDB open source project
|
|
|
|
*
|
2022-03-22 04:36:23 +08:00
|
|
|
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
Add fdbcli command to read/write version epoch (#6480)
* Initialize cluster version at wall-clock time
Previously, new clusters would begin at version 0. After this change,
clusters will initialize at a version matching wall-clock time. Instead
of using the Unix epoch (or Windows epoch), FDB clusters will use a new
epoch, defaulting to January 1, 2010, 01:00:00+00:00. In the future,
this base epoch will be modifiable through fdbcli, allowing
administrators to advance the cluster version.
Basing the version off of time allows different FDB clusters to share
data without running into version issues.
* Send version epoch to master
* Cleanup
* Update fdbserver/storageserver.actor.cpp
Co-authored-by: A.J. Beamon <aj.beamon@snowflake.com>
* Jump directly to expected version if possible
* Fix initial version issue on storage servers
* Add random recovery offset to start version in simulation
* Type fixes
* Disable reference time by default
Enable on a cluster using the fdbcli command `versionepoch add 0`.
* Use correct recoveryTransactionVersion when recovering
* Allow version epoch to be adjusted forwards (to decrease the version)
* Set version epoch in simulation
* Add quiet database check to ensure small version offset
* Fix initial version issue on storage servers
* Disable reference time by default
Enable on a cluster using the fdbcli command `versionepoch add 0`.
* Add fdbcli command to read/write version epoch
* Cause recovery when version epoch is set
* Handle optional version epoch key
* Add ability to clear the version epoch
This causes version advancement to revert to the old methodology whereas
versions attempt to advance by about a million versions per second,
instead of trying to match the clock.
* Update transaction access
* Modify version epoch to use microseconds instead of seconds
* Modify fdbcli version target API
Move commands from `versionepoch` to `targetversion` top level command.
* Add fdbcli tests for
* Temporarily disable targetversion cli tests
* Fix version epoch fetch issue
* Fix Arena issue
* Reduce max version jump in simulation to 1,000,000
* Rework fdbcli API
It now requires two commands to fully switch a cluster to using the
version epoch. First, enable the version epoch with `versionepoch
enable` or `versionepoch set <versionepoch>`. At this point, versions
will be given out at a faster or slower rate in an attempt to reach the
expected version. Then, run `versionepoch commit` to perform a one time
jump to the expected version. This is essentially irreversible.
* Temporarily disable old targetversion tests
* Cleanup
* Move version epoch buggify to sequencer
This will cause some issues with the QuietDatabase check for the version
offset - namely, it won't do anything, since the version epoch is not
being written to the txnStateStore in simulation. This will get fixed in
the future.
Co-authored-by: A.J. Beamon <aj.beamon@snowflake.com>
2022-04-09 03:33:19 +08:00
|
|
|
#include <algorithm>
|
2020-08-06 03:20:52 +08:00
|
|
|
#include <iterator>
|
|
|
|
|
|
|
|
#include "fdbrpc/sim_validation.h"
|
2018-10-20 01:30:13 +08:00
|
|
|
#include "fdbserver/CoordinatedState.h"
|
2021-03-11 02:06:03 +08:00
|
|
|
#include "fdbserver/CoordinationInterface.h" // copy constructors for ServerCoordinators class
|
2020-08-06 03:20:52 +08:00
|
|
|
#include "fdbserver/Knobs.h"
|
|
|
|
#include "fdbserver/MasterInterface.h"
|
2022-03-22 12:35:48 +08:00
|
|
|
#include "fdbserver/ResolutionBalancer.actor.h"
|
2020-08-06 03:20:52 +08:00
|
|
|
#include "fdbserver/ServerDBInfo.h"
|
|
|
|
#include "flow/ActorCollection.h"
|
|
|
|
#include "flow/Trace.h"
|
2021-06-30 02:58:59 +08:00
|
|
|
#include "fdbclient/VersionVector.h"
|
2020-08-06 03:20:52 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
struct MasterData : NonCopyable, ReferenceCounted<MasterData> {
|
|
|
|
UID dbgid;
|
|
|
|
|
|
|
|
Version lastEpochEnd, // The last version in the old epoch not (to be) rolled back in this recovery
|
2021-03-11 02:06:03 +08:00
|
|
|
recoveryTransactionVersion; // The first version in this epoch
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-07-14 09:36:29 +08:00
|
|
|
NotifiedVersion prevTLogVersion; // Order of transactions to tlogs
|
|
|
|
|
2021-07-14 08:39:42 +08:00
|
|
|
NotifiedVersion liveCommittedVersion; // The largest live committed version reported by commit proxies.
|
2020-06-12 05:07:37 +08:00
|
|
|
bool databaseLocked;
|
2020-06-11 06:55:23 +08:00
|
|
|
Optional<Value> proxyMetadataVersion;
|
2020-07-15 15:37:41 +08:00
|
|
|
Version minKnownCommittedVersion;
|
2020-06-11 06:55:23 +08:00
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
ServerCoordinators coordinators;
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
Version version; // The last version assigned to a proxy by getVersion()
|
2017-05-26 04:48:44 +08:00
|
|
|
double lastVersionTime;
|
Add fdbcli command to read/write version epoch (#6480)
* Initialize cluster version at wall-clock time
Previously, new clusters would begin at version 0. After this change,
clusters will initialize at a version matching wall-clock time. Instead
of using the Unix epoch (or Windows epoch), FDB clusters will use a new
epoch, defaulting to January 1, 2010, 01:00:00+00:00. In the future,
this base epoch will be modifiable through fdbcli, allowing
administrators to advance the cluster version.
Basing the version off of time allows different FDB clusters to share
data without running into version issues.
* Send version epoch to master
* Cleanup
* Update fdbserver/storageserver.actor.cpp
Co-authored-by: A.J. Beamon <aj.beamon@snowflake.com>
* Jump directly to expected version if possible
* Fix initial version issue on storage servers
* Add random recovery offset to start version in simulation
* Type fixes
* Disable reference time by default
Enable on a cluster using the fdbcli command `versionepoch add 0`.
* Use correct recoveryTransactionVersion when recovering
* Allow version epoch to be adjusted forwards (to decrease the version)
* Set version epoch in simulation
* Add quiet database check to ensure small version offset
* Fix initial version issue on storage servers
* Disable reference time by default
Enable on a cluster using the fdbcli command `versionepoch add 0`.
* Add fdbcli command to read/write version epoch
* Cause recovery when version epoch is set
* Handle optional version epoch key
* Add ability to clear the version epoch
This causes version advancement to revert to the old methodology whereas
versions attempt to advance by about a million versions per second,
instead of trying to match the clock.
* Update transaction access
* Modify version epoch to use microseconds instead of seconds
* Modify fdbcli version target API
Move commands from `versionepoch` to `targetversion` top level command.
* Add fdbcli tests for
* Temporarily disable targetversion cli tests
* Fix version epoch fetch issue
* Fix Arena issue
* Reduce max version jump in simulation to 1,000,000
* Rework fdbcli API
It now requires two commands to fully switch a cluster to using the
version epoch. First, enable the version epoch with `versionepoch
enable` or `versionepoch set <versionepoch>`. At this point, versions
will be given out at a faster or slower rate in an attempt to reach the
expected version. Then, run `versionepoch commit` to perform a one time
jump to the expected version. This is essentially irreversible.
* Temporarily disable old targetversion tests
* Cleanup
* Move version epoch buggify to sequencer
This will cause some issues with the QuietDatabase check for the version
offset - namely, it won't do anything, since the version epoch is not
being written to the txnStateStore in simulation. This will get fixed in
the future.
Co-authored-by: A.J. Beamon <aj.beamon@snowflake.com>
2022-04-09 03:33:19 +08:00
|
|
|
Optional<Version> referenceVersion;
|
2018-01-17 10:12:40 +08:00
|
|
|
|
2020-09-16 13:29:49 +08:00
|
|
|
std::map<UID, CommitProxyVersionReplies> lastCommitProxyVersionReplies;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
MasterInterface myInterface;
|
|
|
|
|
2022-03-22 12:35:48 +08:00
|
|
|
ResolutionBalancer resolutionBalancer;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2018-07-01 21:39:04 +08:00
|
|
|
bool forceRecovery;
|
2019-07-24 02:45:04 +08:00
|
|
|
|
2021-07-01 23:36:00 +08:00
|
|
|
// Captures the latest commit version targeted for each storage server in the cluster.
|
2021-07-15 03:31:01 +08:00
|
|
|
// @todo We need to ensure that the latest commit versions of storage servers stay
|
2021-07-14 03:11:28 +08:00
|
|
|
// up-to-date in the presence of key range splits/merges.
|
2021-06-29 03:50:09 +08:00
|
|
|
VersionVector ssVersionVector;
|
|
|
|
|
2021-04-07 13:13:15 +08:00
|
|
|
CounterCollection cc;
|
|
|
|
Counter getCommitVersionRequests;
|
|
|
|
Counter getLiveCommittedVersionRequests;
|
|
|
|
Counter reportLiveCommittedVersionRequests;
|
2021-10-26 22:37:46 +08:00
|
|
|
// This counter gives an estimate of the number of non-empty peeks that storage servers
|
|
|
|
// should do from tlogs (in the worst case, ignoring blocking peek timeouts).
|
|
|
|
Counter versionVectorTagUpdates;
|
2021-11-06 05:30:15 +08:00
|
|
|
Counter waitForPrevCommitRequests;
|
|
|
|
Counter nonWaitForPrevCommitRequests;
|
2021-10-07 04:35:51 +08:00
|
|
|
LatencySample versionVectorSizeOnCVReply;
|
2021-11-06 05:30:15 +08:00
|
|
|
LatencySample waitForPrevLatencies;
|
2021-04-07 13:13:15 +08:00
|
|
|
|
2022-01-20 00:12:04 +08:00
|
|
|
PromiseStream<Future<Void>> addActor;
|
2021-09-22 05:03:14 +08:00
|
|
|
|
2021-04-07 13:13:15 +08:00
|
|
|
Future<Void> logger;
|
2022-03-22 12:35:48 +08:00
|
|
|
Future<Void> balancer;
|
2021-04-07 13:13:15 +08:00
|
|
|
|
2021-07-12 12:11:21 +08:00
|
|
|
MasterData(Reference<AsyncVar<ServerDBInfo> const> const& dbInfo,
|
2021-03-11 02:06:03 +08:00
|
|
|
MasterInterface const& myInterface,
|
|
|
|
ServerCoordinators const& coordinators,
|
|
|
|
ClusterControllerFullInterface const& clusterController,
|
|
|
|
Standalone<StringRef> const& dbId,
|
2022-03-31 13:18:58 +08:00
|
|
|
PromiseStream<Future<Void>> addActor,
|
2021-03-11 02:06:03 +08:00
|
|
|
bool forceRecovery)
|
2021-07-23 13:48:27 +08:00
|
|
|
: dbgid(myInterface.id()), lastEpochEnd(invalidVersion), recoveryTransactionVersion(invalidVersion),
|
2022-01-07 04:15:51 +08:00
|
|
|
liveCommittedVersion(invalidVersion), databaseLocked(false), minKnownCommittedVersion(invalidVersion),
|
2022-03-22 07:38:23 +08:00
|
|
|
coordinators(coordinators), version(invalidVersion), lastVersionTime(0), myInterface(myInterface),
|
2022-03-22 12:35:48 +08:00
|
|
|
resolutionBalancer(&version), forceRecovery(forceRecovery), cc("Master", dbgid.toString()),
|
2021-04-07 13:13:15 +08:00
|
|
|
getCommitVersionRequests("GetCommitVersionRequests", cc),
|
|
|
|
getLiveCommittedVersionRequests("GetLiveCommittedVersionRequests", cc),
|
2021-09-22 05:03:14 +08:00
|
|
|
reportLiveCommittedVersionRequests("ReportLiveCommittedVersionRequests", cc),
|
2021-10-26 22:37:46 +08:00
|
|
|
versionVectorTagUpdates("VersionVectorTagUpdates", cc),
|
2021-11-06 05:30:15 +08:00
|
|
|
waitForPrevCommitRequests("WaitForPrevCommitRequests", cc),
|
2022-02-04 07:44:34 +08:00
|
|
|
nonWaitForPrevCommitRequests("NonWaitForPrevCommitRequests", cc),
|
2021-10-13 06:44:12 +08:00
|
|
|
versionVectorSizeOnCVReply("VersionVectorSizeOnCVReply",
|
|
|
|
dbgid,
|
|
|
|
SERVER_KNOBS->LATENCY_METRICS_LOGGING_INTERVAL,
|
|
|
|
SERVER_KNOBS->LATENCY_SAMPLE_SIZE),
|
2021-11-06 05:30:15 +08:00
|
|
|
waitForPrevLatencies("WaitForPrevLatencies",
|
|
|
|
dbgid,
|
|
|
|
SERVER_KNOBS->LATENCY_METRICS_LOGGING_INTERVAL,
|
2022-02-04 07:44:34 +08:00
|
|
|
SERVER_KNOBS->LATENCY_SAMPLE_SIZE),
|
|
|
|
addActor(addActor) {
|
2021-04-07 13:13:15 +08:00
|
|
|
logger = traceCounters("MasterMetrics", dbgid, SERVER_KNOBS->WORKER_LOGGING_INTERVAL, &cc, "MasterMetrics");
|
2021-03-11 02:06:03 +08:00
|
|
|
if (forceRecovery && !myInterface.locality.dcId().present()) {
|
2021-07-27 10:55:10 +08:00
|
|
|
TraceEvent(SevError, "ForcedRecoveryRequiresDcID").log();
|
2019-02-19 06:54:28 +08:00
|
|
|
forceRecovery = false;
|
|
|
|
}
|
2022-03-22 12:35:48 +08:00
|
|
|
balancer = resolutionBalancer.resolutionBalancing();
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2022-03-22 04:48:09 +08:00
|
|
|
~MasterData() = default;
|
2017-05-26 04:48:44 +08:00
|
|
|
};
|
|
|
|
|
2022-04-29 05:03:03 +08:00
|
|
|
Version figureVersion(Version current,
|
|
|
|
double now,
|
|
|
|
Version reference,
|
|
|
|
int64_t toAdd,
|
|
|
|
double maxVersionRateModifier,
|
|
|
|
int64_t maxVersionRateOffset) {
|
|
|
|
// Versions should roughly follow wall-clock time, based on the
|
|
|
|
// system clock of the current machine and an FDB-specific epoch.
|
|
|
|
// Calculate the expected version and determine whether we need to
|
|
|
|
// hand out versions faster or slower to stay in sync with the
|
|
|
|
// clock.
|
|
|
|
Version expected = now * SERVER_KNOBS->VERSIONS_PER_SECOND - reference;
|
|
|
|
|
|
|
|
// Attempt to jump directly to the expected version. But make
|
|
|
|
// sure that versions are still being handed out at a rate
|
|
|
|
// around VERSIONS_PER_SECOND. This rate is scaled depending on
|
|
|
|
// how far off the calculated version is from the expected
|
|
|
|
// version.
|
|
|
|
int64_t maxOffset = std::min(static_cast<int64_t>(toAdd * maxVersionRateModifier), maxVersionRateOffset);
|
|
|
|
return std::clamp(expected, current + toAdd - maxOffset, current + toAdd + maxOffset);
|
|
|
|
}
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
ACTOR Future<Void> getVersion(Reference<MasterData> self, GetCommitVersionRequest req) {
|
2022-05-03 03:56:51 +08:00
|
|
|
state Span span("M:getVersion"_loc, req.spanContext);
|
2021-03-11 02:06:03 +08:00
|
|
|
state std::map<UID, CommitProxyVersionReplies>::iterator proxyItr =
|
|
|
|
self->lastCommitProxyVersionReplies.find(req.requestingProxy); // lastCommitProxyVersionReplies never changes
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-04-07 13:13:15 +08:00
|
|
|
++self->getCommitVersionRequests;
|
|
|
|
|
2020-09-16 13:29:49 +08:00
|
|
|
if (proxyItr == self->lastCommitProxyVersionReplies.end()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
// Request from invalid proxy (e.g. from duplicate recruitment request)
|
|
|
|
req.reply.send(Never());
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(proxyItr->second.latestRequestNum.get() < req.requestNum - 1); // Commit version request queued up
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(proxyItr->second.latestRequestNum.whenAtLeast(req.requestNum - 1));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
auto itr = proxyItr->second.replies.find(req.requestNum);
|
|
|
|
if (itr != proxyItr->second.replies.end()) {
|
|
|
|
TEST(true); // Duplicate request for sequence
|
|
|
|
req.reply.send(itr->second);
|
2021-03-11 02:06:03 +08:00
|
|
|
} else if (req.requestNum <= proxyItr->second.latestRequestNum.get()) {
|
|
|
|
TEST(true); // Old request for previously acknowledged sequence - may be impossible with current FlowTransport
|
|
|
|
ASSERT(req.requestNum <
|
|
|
|
proxyItr->second.latestRequestNum.get()); // The latest request can never be acknowledged
|
2017-05-26 04:48:44 +08:00
|
|
|
req.reply.send(Never());
|
2021-03-11 02:06:03 +08:00
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
GetCommitVersionReply rep;
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (self->version == invalidVersion) {
|
2017-05-26 04:48:44 +08:00
|
|
|
self->lastVersionTime = now();
|
|
|
|
self->version = self->recoveryTransactionVersion;
|
|
|
|
rep.prevVersion = self->lastEpochEnd;
|
2021-09-24 04:05:41 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
double t1 = now();
|
2021-03-11 02:06:03 +08:00
|
|
|
if (BUGGIFY) {
|
2017-05-26 04:48:44 +08:00
|
|
|
t1 = self->lastVersionTime;
|
|
|
|
}
|
Add fdbcli command to read/write version epoch (#6480)
* Initialize cluster version at wall-clock time
Previously, new clusters would begin at version 0. After this change,
clusters will initialize at a version matching wall-clock time. Instead
of using the Unix epoch (or Windows epoch), FDB clusters will use a new
epoch, defaulting to January 1, 2010, 01:00:00+00:00. In the future,
this base epoch will be modifiable through fdbcli, allowing
administrators to advance the cluster version.
Basing the version off of time allows different FDB clusters to share
data without running into version issues.
* Send version epoch to master
* Cleanup
* Update fdbserver/storageserver.actor.cpp
Co-authored-by: A.J. Beamon <aj.beamon@snowflake.com>
* Jump directly to expected version if possible
* Fix initial version issue on storage servers
* Add random recovery offset to start version in simulation
* Type fixes
* Disable reference time by default
Enable on a cluster using the fdbcli command `versionepoch add 0`.
* Use correct recoveryTransactionVersion when recovering
* Allow version epoch to be adjusted forwards (to decrease the version)
* Set version epoch in simulation
* Add quiet database check to ensure small version offset
* Fix initial version issue on storage servers
* Disable reference time by default
Enable on a cluster using the fdbcli command `versionepoch add 0`.
* Add fdbcli command to read/write version epoch
* Cause recovery when version epoch is set
* Handle optional version epoch key
* Add ability to clear the version epoch
This causes version advancement to revert to the old methodology whereas
versions attempt to advance by about a million versions per second,
instead of trying to match the clock.
* Update transaction access
* Modify version epoch to use microseconds instead of seconds
* Modify fdbcli version target API
Move commands from `versionepoch` to `targetversion` top level command.
* Add fdbcli tests for
* Temporarily disable targetversion cli tests
* Fix version epoch fetch issue
* Fix Arena issue
* Reduce max version jump in simulation to 1,000,000
* Rework fdbcli API
It now requires two commands to fully switch a cluster to using the
version epoch. First, enable the version epoch with `versionepoch
enable` or `versionepoch set <versionepoch>`. At this point, versions
will be given out at a faster or slower rate in an attempt to reach the
expected version. Then, run `versionepoch commit` to perform a one time
jump to the expected version. This is essentially irreversible.
* Temporarily disable old targetversion tests
* Cleanup
* Move version epoch buggify to sequencer
This will cause some issues with the QuietDatabase check for the version
offset - namely, it won't do anything, since the version epoch is not
being written to the txnStateStore in simulation. This will get fixed in
the future.
Co-authored-by: A.J. Beamon <aj.beamon@snowflake.com>
2022-04-09 03:33:19 +08:00
|
|
|
|
|
|
|
Version toAdd =
|
2021-03-11 02:06:03 +08:00
|
|
|
std::max<Version>(1,
|
|
|
|
std::min<Version>(SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS,
|
|
|
|
SERVER_KNOBS->VERSIONS_PER_SECOND * (t1 - self->lastVersionTime)));
|
|
|
|
|
Add fdbcli command to read/write version epoch (#6480)
* Initialize cluster version at wall-clock time
Previously, new clusters would begin at version 0. After this change,
clusters will initialize at a version matching wall-clock time. Instead
of using the Unix epoch (or Windows epoch), FDB clusters will use a new
epoch, defaulting to January 1, 2010, 01:00:00+00:00. In the future,
this base epoch will be modifiable through fdbcli, allowing
administrators to advance the cluster version.
Basing the version off of time allows different FDB clusters to share
data without running into version issues.
* Send version epoch to master
* Cleanup
* Update fdbserver/storageserver.actor.cpp
Co-authored-by: A.J. Beamon <aj.beamon@snowflake.com>
* Jump directly to expected version if possible
* Fix initial version issue on storage servers
* Add random recovery offset to start version in simulation
* Type fixes
* Disable reference time by default
Enable on a cluster using the fdbcli command `versionepoch add 0`.
* Use correct recoveryTransactionVersion when recovering
* Allow version epoch to be adjusted forwards (to decrease the version)
* Set version epoch in simulation
* Add quiet database check to ensure small version offset
* Fix initial version issue on storage servers
* Disable reference time by default
Enable on a cluster using the fdbcli command `versionepoch add 0`.
* Add fdbcli command to read/write version epoch
* Cause recovery when version epoch is set
* Handle optional version epoch key
* Add ability to clear the version epoch
This causes version advancement to revert to the old methodology whereas
versions attempt to advance by about a million versions per second,
instead of trying to match the clock.
* Update transaction access
* Modify version epoch to use microseconds instead of seconds
* Modify fdbcli version target API
Move commands from `versionepoch` to `targetversion` top level command.
* Add fdbcli tests for
* Temporarily disable targetversion cli tests
* Fix version epoch fetch issue
* Fix Arena issue
* Reduce max version jump in simulation to 1,000,000
* Rework fdbcli API
It now requires two commands to fully switch a cluster to using the
version epoch. First, enable the version epoch with `versionepoch
enable` or `versionepoch set <versionepoch>`. At this point, versions
will be given out at a faster or slower rate in an attempt to reach the
expected version. Then, run `versionepoch commit` to perform a one time
jump to the expected version. This is essentially irreversible.
* Temporarily disable old targetversion tests
* Cleanup
* Move version epoch buggify to sequencer
This will cause some issues with the QuietDatabase check for the version
offset - namely, it won't do anything, since the version epoch is not
being written to the txnStateStore in simulation. This will get fixed in
the future.
Co-authored-by: A.J. Beamon <aj.beamon@snowflake.com>
2022-04-09 03:33:19 +08:00
|
|
|
rep.prevVersion = self->version;
|
|
|
|
if (self->referenceVersion.present()) {
|
2022-04-29 05:03:03 +08:00
|
|
|
self->version = figureVersion(self->version,
|
|
|
|
g_network->timer(),
|
|
|
|
self->referenceVersion.get(),
|
|
|
|
toAdd,
|
|
|
|
SERVER_KNOBS->MAX_VERSION_RATE_MODIFIER,
|
|
|
|
SERVER_KNOBS->MAX_VERSION_RATE_OFFSET);
|
Add fdbcli command to read/write version epoch (#6480)
* Initialize cluster version at wall-clock time
Previously, new clusters would begin at version 0. After this change,
clusters will initialize at a version matching wall-clock time. Instead
of using the Unix epoch (or Windows epoch), FDB clusters will use a new
epoch, defaulting to January 1, 2010, 01:00:00+00:00. In the future,
this base epoch will be modifiable through fdbcli, allowing
administrators to advance the cluster version.
Basing the version off of time allows different FDB clusters to share
data without running into version issues.
* Send version epoch to master
* Cleanup
* Update fdbserver/storageserver.actor.cpp
Co-authored-by: A.J. Beamon <aj.beamon@snowflake.com>
* Jump directly to expected version if possible
* Fix initial version issue on storage servers
* Add random recovery offset to start version in simulation
* Type fixes
* Disable reference time by default
Enable on a cluster using the fdbcli command `versionepoch add 0`.
* Use correct recoveryTransactionVersion when recovering
* Allow version epoch to be adjusted forwards (to decrease the version)
* Set version epoch in simulation
* Add quiet database check to ensure small version offset
* Fix initial version issue on storage servers
* Disable reference time by default
Enable on a cluster using the fdbcli command `versionepoch add 0`.
* Add fdbcli command to read/write version epoch
* Cause recovery when version epoch is set
* Handle optional version epoch key
* Add ability to clear the version epoch
This causes version advancement to revert to the old methodology whereas
versions attempt to advance by about a million versions per second,
instead of trying to match the clock.
* Update transaction access
* Modify version epoch to use microseconds instead of seconds
* Modify fdbcli version target API
Move commands from `versionepoch` to `targetversion` top level command.
* Add fdbcli tests for
* Temporarily disable targetversion cli tests
* Fix version epoch fetch issue
* Fix Arena issue
* Reduce max version jump in simulation to 1,000,000
* Rework fdbcli API
It now requires two commands to fully switch a cluster to using the
version epoch. First, enable the version epoch with `versionepoch
enable` or `versionepoch set <versionepoch>`. At this point, versions
will be given out at a faster or slower rate in an attempt to reach the
expected version. Then, run `versionepoch commit` to perform a one time
jump to the expected version. This is essentially irreversible.
* Temporarily disable old targetversion tests
* Cleanup
* Move version epoch buggify to sequencer
This will cause some issues with the QuietDatabase check for the version
offset - namely, it won't do anything, since the version epoch is not
being written to the txnStateStore in simulation. This will get fixed in
the future.
Co-authored-by: A.J. Beamon <aj.beamon@snowflake.com>
2022-04-09 03:33:19 +08:00
|
|
|
ASSERT_GT(self->version, rep.prevVersion);
|
|
|
|
} else {
|
|
|
|
self->version = self->version + toAdd;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
TEST(self->version - rep.prevVersion == 1); // Minimum possible version gap
|
2021-04-14 01:48:52 +08:00
|
|
|
|
|
|
|
bool maxVersionGap = self->version - rep.prevVersion == SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS;
|
|
|
|
TEST(maxVersionGap); // Maximum possible version gap
|
2017-05-26 04:48:44 +08:00
|
|
|
self->lastVersionTime = t1;
|
|
|
|
|
2022-03-22 12:35:48 +08:00
|
|
|
self->resolutionBalancer.setChangesInReply(req.requestingProxy, rep);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
rep.version = self->version;
|
|
|
|
rep.requestNum = req.requestNum;
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
proxyItr->second.replies.erase(proxyItr->second.replies.begin(),
|
|
|
|
proxyItr->second.replies.upper_bound(req.mostRecentProcessedRequestNum));
|
2017-05-26 04:48:44 +08:00
|
|
|
proxyItr->second.replies[req.requestNum] = rep;
|
|
|
|
ASSERT(rep.prevVersion >= 0);
|
2021-09-24 04:05:41 +08:00
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
req.reply.send(rep);
|
|
|
|
|
|
|
|
ASSERT(proxyItr->second.latestRequestNum.get() == req.requestNum - 1);
|
|
|
|
proxyItr->second.latestRequestNum.set(req.requestNum);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
ACTOR Future<Void> provideVersions(Reference<MasterData> self) {
|
|
|
|
state ActorCollection versionActors(false);
|
|
|
|
|
2022-03-22 12:35:48 +08:00
|
|
|
loop choose {
|
|
|
|
when(GetCommitVersionRequest req = waitNext(self->myInterface.getCommitVersion.getFuture())) {
|
|
|
|
versionActors.add(getVersion(self, req));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2022-03-22 12:35:48 +08:00
|
|
|
when(wait(versionActors.getResult())) {}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-14 08:39:42 +08:00
|
|
|
void updateLiveCommittedVersion(Reference<MasterData> self, ReportRawCommittedVersionRequest req) {
|
|
|
|
self->minKnownCommittedVersion = std::max(self->minKnownCommittedVersion, req.minKnownCommittedVersion);
|
2021-10-26 22:37:46 +08:00
|
|
|
|
2021-07-14 08:39:42 +08:00
|
|
|
if (req.version > self->liveCommittedVersion.get()) {
|
2021-08-11 03:47:18 +08:00
|
|
|
if (SERVER_KNOBS->ENABLE_VERSION_VECTOR && req.writtenTags.present()) {
|
|
|
|
// TraceEvent("Received ReportRawCommittedVersionRequest").detail("Version",req.version);
|
|
|
|
self->ssVersionVector.setVersion(req.writtenTags.get(), req.version);
|
2021-10-29 06:48:35 +08:00
|
|
|
self->versionVectorTagUpdates += req.writtenTags.get().size();
|
2021-08-11 03:47:18 +08:00
|
|
|
}
|
2022-03-31 05:43:47 +08:00
|
|
|
auto curTime = now();
|
|
|
|
// add debug here to change liveCommittedVersion to time bound of now()
|
|
|
|
debug_advanceVersionTimestamp(self->liveCommittedVersion.get(), curTime + CLIENT_KNOBS->MAX_VERSION_CACHE_LAG);
|
|
|
|
// also add req.version but with no time bound
|
|
|
|
debug_advanceVersionTimestamp(req.version, std::numeric_limits<double>::max());
|
2021-07-14 08:39:42 +08:00
|
|
|
self->databaseLocked = req.locked;
|
|
|
|
self->proxyMetadataVersion = req.metadataVersion;
|
|
|
|
// Note the set call switches context to any waiters on liveCommittedVersion before continuing.
|
|
|
|
self->liveCommittedVersion.set(req.version);
|
|
|
|
}
|
|
|
|
++self->reportLiveCommittedVersionRequests;
|
|
|
|
}
|
|
|
|
|
|
|
|
ACTOR Future<Void> waitForPrev(Reference<MasterData> self, ReportRawCommittedVersionRequest req) {
|
2021-11-06 05:30:15 +08:00
|
|
|
state double startTime = now();
|
2021-07-14 08:39:42 +08:00
|
|
|
wait(self->liveCommittedVersion.whenAtLeast(req.prevVersion.get()));
|
2021-11-06 05:30:15 +08:00
|
|
|
double latency = now() - startTime;
|
|
|
|
self->waitForPrevLatencies.addMeasurement(latency);
|
|
|
|
++self->waitForPrevCommitRequests;
|
2021-07-14 08:39:42 +08:00
|
|
|
updateLiveCommittedVersion(self, req);
|
|
|
|
req.reply.send(Void());
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2020-06-10 02:09:46 +08:00
|
|
|
ACTOR Future<Void> serveLiveCommittedVersion(Reference<MasterData> self) {
|
|
|
|
loop {
|
|
|
|
choose {
|
2020-06-11 06:55:23 +08:00
|
|
|
when(GetRawCommittedVersionRequest req = waitNext(self->myInterface.getLiveCommittedVersion.getFuture())) {
|
|
|
|
if (req.debugID.present())
|
2021-03-11 02:06:03 +08:00
|
|
|
g_traceBatch.addEvent("TransactionDebug",
|
|
|
|
req.debugID.get().first(),
|
|
|
|
"MasterServer.serveLiveCommittedVersion.GetRawCommittedVersion");
|
2020-06-11 06:55:23 +08:00
|
|
|
|
2021-07-14 08:39:42 +08:00
|
|
|
if (self->liveCommittedVersion.get() == invalidVersion) {
|
|
|
|
self->liveCommittedVersion.set(self->recoveryTransactionVersion);
|
2020-06-10 02:09:46 +08:00
|
|
|
}
|
2021-04-07 13:13:15 +08:00
|
|
|
++self->getLiveCommittedVersionRequests;
|
2020-07-15 15:37:41 +08:00
|
|
|
GetRawCommittedVersionReply reply;
|
2021-07-14 08:39:42 +08:00
|
|
|
reply.version = self->liveCommittedVersion.get();
|
2020-06-12 05:07:37 +08:00
|
|
|
reply.locked = self->databaseLocked;
|
2020-06-11 06:55:23 +08:00
|
|
|
reply.metadataVersion = self->proxyMetadataVersion;
|
2020-07-15 15:37:41 +08:00
|
|
|
reply.minKnownCommittedVersion = self->minKnownCommittedVersion;
|
2022-04-13 04:05:32 +08:00
|
|
|
if (SERVER_KNOBS->ENABLE_VERSION_VECTOR) {
|
|
|
|
self->ssVersionVector.getDelta(req.maxVersion, reply.ssVersionVectorDelta);
|
|
|
|
self->versionVectorSizeOnCVReply.addMeasurement(reply.ssVersionVectorDelta.size());
|
|
|
|
}
|
2020-06-11 06:55:23 +08:00
|
|
|
req.reply.send(reply);
|
2020-06-10 02:09:46 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(ReportRawCommittedVersionRequest req =
|
|
|
|
waitNext(self->myInterface.reportLiveCommittedVersion.getFuture())) {
|
2021-10-19 08:18:18 +08:00
|
|
|
if (SERVER_KNOBS->ENABLE_VERSION_VECTOR && req.prevVersion.present() &&
|
2021-07-14 08:39:42 +08:00
|
|
|
(self->liveCommittedVersion.get() != invalidVersion) &&
|
|
|
|
(self->liveCommittedVersion.get() < req.prevVersion.get())) {
|
|
|
|
self->addActor.send(waitForPrev(self, req));
|
|
|
|
} else {
|
|
|
|
updateLiveCommittedVersion(self, req);
|
2021-11-06 05:30:15 +08:00
|
|
|
++self->nonWaitForPrevCommitRequests;
|
2021-07-14 08:39:42 +08:00
|
|
|
req.reply.send(Void());
|
2020-06-11 06:55:23 +08:00
|
|
|
}
|
2020-06-10 02:09:46 +08:00
|
|
|
}
|
2021-07-15 22:23:07 +08:00
|
|
|
}
|
2020-06-10 02:09:46 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-07 04:15:51 +08:00
|
|
|
ACTOR Future<Void> updateRecoveryData(Reference<MasterData> self) {
|
2017-05-26 04:48:44 +08:00
|
|
|
loop {
|
2022-03-19 06:14:02 +08:00
|
|
|
UpdateRecoveryDataRequest req = waitNext(self->myInterface.updateRecoveryData.getFuture());
|
|
|
|
TraceEvent("UpdateRecoveryData", self->dbgid)
|
2022-03-19 06:57:34 +08:00
|
|
|
.detail("RecoveryTxnVersion", req.recoveryTransactionVersion)
|
|
|
|
.detail("LastEpochEnd", req.lastEpochEnd)
|
Add fdbcli command to read/write version epoch (#6480)
* Initialize cluster version at wall-clock time
Previously, new clusters would begin at version 0. After this change,
clusters will initialize at a version matching wall-clock time. Instead
of using the Unix epoch (or Windows epoch), FDB clusters will use a new
epoch, defaulting to January 1, 2010, 01:00:00+00:00. In the future,
this base epoch will be modifiable through fdbcli, allowing
administrators to advance the cluster version.
Basing the version off of time allows different FDB clusters to share
data without running into version issues.
* Send version epoch to master
* Cleanup
* Update fdbserver/storageserver.actor.cpp
Co-authored-by: A.J. Beamon <aj.beamon@snowflake.com>
* Jump directly to expected version if possible
* Fix initial version issue on storage servers
* Add random recovery offset to start version in simulation
* Type fixes
* Disable reference time by default
Enable on a cluster using the fdbcli command `versionepoch add 0`.
* Use correct recoveryTransactionVersion when recovering
* Allow version epoch to be adjusted forwards (to decrease the version)
* Set version epoch in simulation
* Add quiet database check to ensure small version offset
* Fix initial version issue on storage servers
* Disable reference time by default
Enable on a cluster using the fdbcli command `versionepoch add 0`.
* Add fdbcli command to read/write version epoch
* Cause recovery when version epoch is set
* Handle optional version epoch key
* Add ability to clear the version epoch
This causes version advancement to revert to the old methodology whereas
versions attempt to advance by about a million versions per second,
instead of trying to match the clock.
* Update transaction access
* Modify version epoch to use microseconds instead of seconds
* Modify fdbcli version target API
Move commands from `versionepoch` to `targetversion` top level command.
* Add fdbcli tests for
* Temporarily disable targetversion cli tests
* Fix version epoch fetch issue
* Fix Arena issue
* Reduce max version jump in simulation to 1,000,000
* Rework fdbcli API
It now requires two commands to fully switch a cluster to using the
version epoch. First, enable the version epoch with `versionepoch
enable` or `versionepoch set <versionepoch>`. At this point, versions
will be given out at a faster or slower rate in an attempt to reach the
expected version. Then, run `versionepoch commit` to perform a one time
jump to the expected version. This is essentially irreversible.
* Temporarily disable old targetversion tests
* Cleanup
* Move version epoch buggify to sequencer
This will cause some issues with the QuietDatabase check for the version
offset - namely, it won't do anything, since the version epoch is not
being written to the txnStateStore in simulation. This will get fixed in
the future.
Co-authored-by: A.J. Beamon <aj.beamon@snowflake.com>
2022-04-09 03:33:19 +08:00
|
|
|
.detail("NumCommitProxies", req.commitProxies.size())
|
|
|
|
.detail("VersionEpoch", req.versionEpoch);
|
2022-03-19 06:14:02 +08:00
|
|
|
|
|
|
|
if (self->recoveryTransactionVersion == invalidVersion ||
|
2022-03-19 06:57:34 +08:00
|
|
|
req.recoveryTransactionVersion > self->recoveryTransactionVersion) {
|
2022-03-19 06:14:02 +08:00
|
|
|
self->recoveryTransactionVersion = req.recoveryTransactionVersion;
|
|
|
|
}
|
|
|
|
if (self->lastEpochEnd == invalidVersion || req.lastEpochEnd > self->lastEpochEnd) {
|
|
|
|
self->lastEpochEnd = req.lastEpochEnd;
|
|
|
|
}
|
|
|
|
if (req.commitProxies.size() > 0) {
|
|
|
|
self->lastCommitProxyVersionReplies.clear();
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2022-03-22 12:35:48 +08:00
|
|
|
for (auto& p : req.commitProxies) {
|
2022-03-19 06:14:02 +08:00
|
|
|
self->lastCommitProxyVersionReplies[p.id()] = CommitProxyVersionReplies();
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
Add fdbcli command to read/write version epoch (#6480)
* Initialize cluster version at wall-clock time
Previously, new clusters would begin at version 0. After this change,
clusters will initialize at a version matching wall-clock time. Instead
of using the Unix epoch (or Windows epoch), FDB clusters will use a new
epoch, defaulting to January 1, 2010, 01:00:00+00:00. In the future,
this base epoch will be modifiable through fdbcli, allowing
administrators to advance the cluster version.
Basing the version off of time allows different FDB clusters to share
data without running into version issues.
* Send version epoch to master
* Cleanup
* Update fdbserver/storageserver.actor.cpp
Co-authored-by: A.J. Beamon <aj.beamon@snowflake.com>
* Jump directly to expected version if possible
* Fix initial version issue on storage servers
* Add random recovery offset to start version in simulation
* Type fixes
* Disable reference time by default
Enable on a cluster using the fdbcli command `versionepoch add 0`.
* Use correct recoveryTransactionVersion when recovering
* Allow version epoch to be adjusted forwards (to decrease the version)
* Set version epoch in simulation
* Add quiet database check to ensure small version offset
* Fix initial version issue on storage servers
* Disable reference time by default
Enable on a cluster using the fdbcli command `versionepoch add 0`.
* Add fdbcli command to read/write version epoch
* Cause recovery when version epoch is set
* Handle optional version epoch key
* Add ability to clear the version epoch
This causes version advancement to revert to the old methodology whereas
versions attempt to advance by about a million versions per second,
instead of trying to match the clock.
* Update transaction access
* Modify version epoch to use microseconds instead of seconds
* Modify fdbcli version target API
Move commands from `versionepoch` to `targetversion` top level command.
* Add fdbcli tests for
* Temporarily disable targetversion cli tests
* Fix version epoch fetch issue
* Fix Arena issue
* Reduce max version jump in simulation to 1,000,000
* Rework fdbcli API
It now requires two commands to fully switch a cluster to using the
version epoch. First, enable the version epoch with `versionepoch
enable` or `versionepoch set <versionepoch>`. At this point, versions
will be given out at a faster or slower rate in an attempt to reach the
expected version. Then, run `versionepoch commit` to perform a one time
jump to the expected version. This is essentially irreversible.
* Temporarily disable old targetversion tests
* Cleanup
* Move version epoch buggify to sequencer
This will cause some issues with the QuietDatabase check for the version
offset - namely, it won't do anything, since the version epoch is not
being written to the txnStateStore in simulation. This will get fixed in
the future.
Co-authored-by: A.J. Beamon <aj.beamon@snowflake.com>
2022-04-09 03:33:19 +08:00
|
|
|
if (req.versionEpoch.present()) {
|
|
|
|
self->referenceVersion = req.versionEpoch.get();
|
|
|
|
} else if (BUGGIFY) {
|
|
|
|
// Cannot use a positive version epoch in simulation because of the
|
|
|
|
// clock starting at 0. A positive version epoch would mean the initial
|
|
|
|
// cluster version was negative.
|
|
|
|
// TODO: Increase the size of this interval after fixing the issue
|
|
|
|
// with restoring ranges with large version gaps.
|
|
|
|
self->referenceVersion = deterministicRandom()->randomInt64(-1e6, 0);
|
|
|
|
}
|
2022-03-19 06:14:02 +08:00
|
|
|
|
2022-03-22 12:35:48 +08:00
|
|
|
self->resolutionBalancer.setCommitProxies(req.commitProxies);
|
|
|
|
self->resolutionBalancer.setResolvers(req.resolvers);
|
2022-03-19 06:14:02 +08:00
|
|
|
|
|
|
|
req.reply.send(Void());
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::set<int> const& normalMasterErrors() {
|
|
|
|
static std::set<int> s;
|
|
|
|
if (s.empty()) {
|
2021-03-11 02:06:03 +08:00
|
|
|
s.insert(error_code_tlog_stopped);
|
2022-01-07 04:15:51 +08:00
|
|
|
s.insert(error_code_tlog_failed);
|
2020-09-11 08:44:15 +08:00
|
|
|
s.insert(error_code_commit_proxy_failed);
|
2021-03-11 02:06:03 +08:00
|
|
|
s.insert(error_code_grv_proxy_failed);
|
2022-01-07 04:15:51 +08:00
|
|
|
s.insert(error_code_resolver_failed);
|
|
|
|
s.insert(error_code_backup_worker_failed);
|
2021-03-11 02:06:03 +08:00
|
|
|
s.insert(error_code_recruitment_failed);
|
|
|
|
s.insert(error_code_no_more_servers);
|
2022-01-07 04:15:51 +08:00
|
|
|
s.insert(error_code_cluster_recovery_failed);
|
2021-03-11 02:06:03 +08:00
|
|
|
s.insert(error_code_coordinated_state_conflict);
|
|
|
|
s.insert(error_code_master_max_versions_in_flight);
|
|
|
|
s.insert(error_code_worker_removed);
|
|
|
|
s.insert(error_code_new_coordinators_timed_out);
|
|
|
|
s.insert(error_code_broken_promise);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> masterServer(MasterInterface mi,
|
2021-07-12 12:11:21 +08:00
|
|
|
Reference<AsyncVar<ServerDBInfo> const> db,
|
2021-08-15 07:51:39 +08:00
|
|
|
Reference<AsyncVar<Optional<ClusterControllerFullInterface>> const> ccInterface,
|
2021-03-11 02:06:03 +08:00
|
|
|
ServerCoordinators coordinators,
|
|
|
|
LifetimeToken lifetime,
|
|
|
|
bool forceRecovery) {
|
2020-07-21 02:29:37 +08:00
|
|
|
state Future<Void> ccTimeout = delay(SERVER_KNOBS->CC_INTERFACE_TIMEOUT);
|
2021-03-11 02:06:03 +08:00
|
|
|
while (!ccInterface->get().present() || db->get().clusterInterface != ccInterface->get().get()) {
|
2020-07-21 02:29:37 +08:00
|
|
|
wait(ccInterface->onChange() || db->onChange() || ccTimeout);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (ccTimeout.isReady()) {
|
|
|
|
TraceEvent("MasterTerminated", mi.id())
|
|
|
|
.detail("Reason", "Timeout")
|
|
|
|
.detail("CCInterface", ccInterface->get().present() ? ccInterface->get().get().id() : UID())
|
|
|
|
.detail("DBInfoInterface", db->get().clusterInterface.id());
|
2020-07-21 02:29:37 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
state Future<Void> onDBChange = Void();
|
2017-09-08 06:32:08 +08:00
|
|
|
state PromiseStream<Future<Void>> addActor;
|
2021-03-11 02:06:03 +08:00
|
|
|
state Reference<MasterData> self(new MasterData(
|
|
|
|
db, mi, coordinators, db->get().clusterInterface, LiteralStringRef(""), addActor, forceRecovery));
|
2022-01-07 04:15:51 +08:00
|
|
|
state Future<Void> collection = actorCollection(addActor.getFuture());
|
|
|
|
|
|
|
|
addActor.send(traceRole(Role::MASTER, mi.id()));
|
|
|
|
addActor.send(provideVersions(self));
|
|
|
|
addActor.send(serveLiveCommittedVersion(self));
|
|
|
|
addActor.send(updateRecoveryData(self));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
TEST(!lifetime.isStillValid(db->get().masterLifetime, mi.id() == db->get().master.id())); // Master born doomed
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent("MasterLifetime", self->dbgid).detail("LifetimeToken", lifetime.toString());
|
|
|
|
|
|
|
|
try {
|
|
|
|
loop choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(onDBChange)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
onDBChange = db->onChange();
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!lifetime.isStillValid(db->get().masterLifetime, mi.id() == db->get().master.id())) {
|
|
|
|
TraceEvent("MasterTerminated", mi.id())
|
|
|
|
.detail("Reason", "LifetimeToken")
|
|
|
|
.detail("MyToken", lifetime.toString())
|
|
|
|
.detail("CurrentToken", db->get().masterLifetime.toString());
|
|
|
|
TEST(true); // Master replaced, dying
|
|
|
|
if (BUGGIFY)
|
|
|
|
wait(delay(5));
|
2017-05-26 04:48:44 +08:00
|
|
|
throw worker_removed();
|
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(collection)) {
|
|
|
|
ASSERT(false);
|
|
|
|
throw internal_error();
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
} catch (Error& e) {
|
2018-04-25 07:10:14 +08:00
|
|
|
state Error err = e;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (e.code() != error_code_actor_cancelled) {
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(delay(0.0));
|
2018-04-25 07:10:14 +08:00
|
|
|
}
|
2022-01-07 04:15:51 +08:00
|
|
|
while (!addActor.isEmpty()) {
|
|
|
|
addActor.getFuture().pop();
|
2018-04-25 07:10:14 +08:00
|
|
|
}
|
2020-01-25 03:03:25 +08:00
|
|
|
|
2022-01-07 04:15:51 +08:00
|
|
|
TEST(err.code() == error_code_tlog_failed); // Master: terminated due to tLog failure
|
2021-04-14 01:48:52 +08:00
|
|
|
TEST(err.code() == error_code_commit_proxy_failed); // Master: terminated due to commit proxy failure
|
|
|
|
TEST(err.code() == error_code_grv_proxy_failed); // Master: terminated due to GRV proxy failure
|
2022-01-07 04:15:51 +08:00
|
|
|
TEST(err.code() == error_code_resolver_failed); // Master: terminated due to resolver failure
|
|
|
|
TEST(err.code() == error_code_backup_worker_failed); // Master: terminated due to backup worker failure
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2019-05-21 05:22:31 +08:00
|
|
|
if (normalMasterErrors().count(err.code())) {
|
2018-04-25 07:10:14 +08:00
|
|
|
TraceEvent("MasterTerminated", mi.id()).error(err);
|
2017-05-26 04:48:44 +08:00
|
|
|
return Void();
|
|
|
|
}
|
2018-04-25 07:10:14 +08:00
|
|
|
throw err;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
2022-04-29 05:03:03 +08:00
|
|
|
|
|
|
|
TEST_CASE("/fdbserver/MasterServer/FigureVersion/Simple") {
|
|
|
|
ASSERT_EQ(
|
|
|
|
figureVersion(0, 1.0, 0, 1e6, SERVER_KNOBS->MAX_VERSION_RATE_MODIFIER, SERVER_KNOBS->MAX_VERSION_RATE_OFFSET),
|
|
|
|
1e6);
|
|
|
|
ASSERT_EQ(figureVersion(1e6, 1.5, 0, 100, 0.1, 1e6), 1000110);
|
|
|
|
ASSERT_EQ(figureVersion(1e6, 1.5, 0, 550000, 0.1, 1e6), 1500000);
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_CASE("/fdbserver/MasterServer/FigureVersion/Small") {
|
|
|
|
// Should always advance by at least 1 version.
|
|
|
|
ASSERT_EQ(figureVersion(1e6, 2.0, 0, 1, 0.0001, 1e6), 1000001);
|
|
|
|
ASSERT_EQ(figureVersion(1e6, 0.0, 0, 1, 0.1, 1e6), 1000001);
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_CASE("/fdbserver/MasterServer/FigureVersion/MaxOffset") {
|
|
|
|
ASSERT_EQ(figureVersion(1e6, 10.0, 0, 5e6, 0.1, 1e6), 6500000);
|
|
|
|
ASSERT_EQ(figureVersion(1e6, 20.0, 0, 15e6, 0.1, 1e6), 17e6);
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_CASE("/fdbserver/MasterServer/FigureVersion/PositiveReferenceVersion") {
|
|
|
|
ASSERT_EQ(figureVersion(1e6, 3.0, 1e6, 1e6, 0.1, 1e6), 2e6);
|
|
|
|
ASSERT_EQ(figureVersion(1e6, 3.0, 1e6, 100, 0.1, 1e6), 1000110);
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_CASE("/fdbserver/MasterServer/FigureVersion/NegativeReferenceVersion") {
|
|
|
|
ASSERT_EQ(figureVersion(0, 2.0, -1e6, 3e6, 0.1, 1e6), 3e6);
|
|
|
|
ASSERT_EQ(figureVersion(0, 2.0, -1e6, 5e5, 0.1, 1e6), 550000);
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_CASE("/fdbserver/MasterServer/FigureVersion/Overflow") {
|
|
|
|
// The upper range used in std::clamp should overflow.
|
|
|
|
ASSERT_EQ(figureVersion(std::numeric_limits<Version>::max() - static_cast<Version>(1e6), 1.0, 0, 1e6, 0.1, 1e6),
|
|
|
|
std::numeric_limits<Version>::max() - static_cast<Version>(1e6 * 0.1));
|
|
|
|
return Void();
|
|
|
|
}
|