2019-05-07 07:56:49 +08:00
|
|
|
/*
|
2019-05-13 12:53:09 +08:00
|
|
|
* RestoreApplier.actor.h
|
2019-05-07 07:56:49 +08:00
|
|
|
*
|
|
|
|
* This source file is part of the FoundationDB open source project
|
|
|
|
*
|
|
|
|
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2019-05-13 12:53:09 +08:00
|
|
|
// This file declears RestoreApplier interface and actors
|
2019-05-07 07:56:49 +08:00
|
|
|
|
|
|
|
#pragma once
|
2019-05-13 12:53:09 +08:00
|
|
|
#if defined(NO_INTELLISENSE) && !defined(FDBSERVER_RESTORE_APPLIER_G_H)
|
2019-08-02 08:00:13 +08:00
|
|
|
#define FDBSERVER_RESTORE_APPLIER_G_H
|
|
|
|
#include "fdbserver/RestoreApplier.actor.g.h"
|
2019-05-13 12:53:09 +08:00
|
|
|
#elif !defined(FDBSERVER_RESTORE_APPLIER_H)
|
2019-08-02 08:00:13 +08:00
|
|
|
#define FDBSERVER_RESTORE_APPLIER_H
|
2019-05-07 07:56:49 +08:00
|
|
|
|
|
|
|
#include <sstream>
|
|
|
|
#include "flow/Stats.h"
|
|
|
|
#include "fdbclient/FDBTypes.h"
|
2020-02-13 13:45:29 +08:00
|
|
|
#include "fdbclient/Atomic.h"
|
2019-05-07 07:56:49 +08:00
|
|
|
#include "fdbclient/CommitTransaction.h"
|
|
|
|
#include "fdbrpc/fdbrpc.h"
|
|
|
|
#include "fdbrpc/Locality.h"
|
2019-05-13 12:53:09 +08:00
|
|
|
#include "fdbserver/CoordinationInterface.h"
|
2019-09-26 15:18:37 +08:00
|
|
|
#include "fdbclient/RestoreWorkerInterface.actor.h"
|
2019-05-10 11:55:44 +08:00
|
|
|
#include "fdbserver/RestoreUtil.h"
|
|
|
|
#include "fdbserver/RestoreRoleCommon.actor.h"
|
2019-05-07 07:56:49 +08:00
|
|
|
|
2019-05-10 11:55:44 +08:00
|
|
|
#include "flow/actorcompiler.h" // has to be last include
|
|
|
|
|
2020-02-13 13:45:29 +08:00
|
|
|
Value applyAtomicOp(Optional<StringRef> existingValue, Value value, MutationRef::Type type);
|
|
|
|
|
2020-02-13 05:57:08 +08:00
|
|
|
struct StagingKey {
|
|
|
|
Key key; // TODO: Maybe not needed?
|
|
|
|
Value val;
|
|
|
|
MutationRef::Type type; // set or clear
|
|
|
|
Version version; // largest version of set or clear for the key
|
|
|
|
std::map<Version, MutationsVec> pendingMutations; // mutations not set or clear type
|
|
|
|
|
2020-02-13 13:45:29 +08:00
|
|
|
explicit StagingKey() : version(0), type(MutationRef::MAX_ATOMIC_OP) {}
|
2020-02-13 05:57:08 +08:00
|
|
|
|
|
|
|
void add(const MutationRef& m, Version newVersion) {
|
|
|
|
if (version < newVersion) {
|
|
|
|
if (m.type == MutationRef::SetValue || m.type == MutationRef::ClearRange) {
|
|
|
|
key = m.param1;
|
|
|
|
val = m.param2;
|
2020-02-13 13:45:29 +08:00
|
|
|
type = (MutationRef::Type)m.type;
|
2020-02-13 05:57:08 +08:00
|
|
|
version = newVersion;
|
|
|
|
} else {
|
|
|
|
if (pendingMutations.find(newVersion) == pendingMutations.end()) {
|
|
|
|
pendingMutations.emplace(newVersion, MutationsVec());
|
|
|
|
}
|
|
|
|
// TODO: Do we really need deep copy?
|
2020-02-13 13:45:29 +08:00
|
|
|
MutationsVec& mutations = pendingMutations[newVersion];
|
|
|
|
mutations.push_back_deep(mutations.arena(), m);
|
2020-02-13 05:57:08 +08:00
|
|
|
}
|
|
|
|
} else if (version == newVersion) {
|
|
|
|
TraceEvent("FastRestoreApplierStagingKeyMutationAtSameVersion")
|
|
|
|
.detail("Version", newVersion)
|
|
|
|
.detail("NewMutation", m.toString())
|
|
|
|
.detail("ExistingKeyType", typeString[type]);
|
|
|
|
if (m.type == MutationRef::SetValue || m.type == MutationRef::ClearRange) {
|
|
|
|
TraceEvent(SevError, "FastRestoreApplierStagingKeyMutationAtSameVersionUnhandled")
|
|
|
|
.detail("Version", newVersion)
|
|
|
|
.detail("NewMutation", m.toString())
|
|
|
|
.detail("ExistingKeyType", typeString[type]);
|
|
|
|
}
|
|
|
|
} // else input mutation is old and can be ignored
|
|
|
|
}
|
2020-02-13 13:45:29 +08:00
|
|
|
|
|
|
|
void precomputeResult() {
|
2020-02-14 01:24:53 +08:00
|
|
|
TraceEvent(SevDebug, "FastRestoreApplierPrecomputeResult")
|
|
|
|
.detail("Key", key)
|
|
|
|
.detail("Version", version)
|
|
|
|
.detail("LargestPendingVersion", (pendingMutations.empty() ? -1 : pendingMutations.rbegin()->first));
|
2020-02-13 13:45:29 +08:00
|
|
|
std::map<Version, MutationsVec>::iterator lb = pendingMutations.lower_bound(version);
|
2020-02-14 01:24:53 +08:00
|
|
|
if (lb == pendingMutations.end()) {
|
|
|
|
return;
|
|
|
|
}
|
2020-02-13 13:45:29 +08:00
|
|
|
if (lb->first == version) {
|
|
|
|
// Sanity check mutations at version are either atomicOps which can be ignored or the same value as buffered
|
|
|
|
for (int i = 0; i < lb->second.size(); i++) {
|
|
|
|
MutationRef m = lb->second[i];
|
|
|
|
if (m.type == MutationRef::SetValue || m.type == MutationRef::ClearRange) {
|
|
|
|
if (std::tie(type, key, val) != std::tie(m.type, m.param1, m.param2)) {
|
|
|
|
TraceEvent(SevError, "FastRestoreApplierPrecomputeResultUnhandledSituation")
|
|
|
|
.detail("BufferedType", typeString[type])
|
|
|
|
.detail("PendingType", typeString[m.type])
|
|
|
|
.detail("BufferedVal", val.toString())
|
|
|
|
.detail("PendingVal", m.param2.toString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
while (lb != pendingMutations.end()) {
|
|
|
|
if (lb->first == version) {
|
|
|
|
continue;
|
|
|
|
}
|
2020-02-14 01:24:53 +08:00
|
|
|
for (auto& mutation : lb->second) {
|
|
|
|
if (isAtomicOp((MutationRef::Type) mutation.type)) {
|
|
|
|
val = applyAtomicOp(val, mutation.param2, (MutationRef::Type)mutation.type);
|
|
|
|
} else if (mutation.type == MutationRef::SetValue || mutation.type == MutationRef::ClearRange) {
|
|
|
|
TraceEvent(SevError, "FastRestoreApplierPrecomputeResultUnexpectedSet")
|
|
|
|
.detail("Type", typeString[mutation.type])
|
|
|
|
.detail("Version", lb->first);
|
|
|
|
} else {
|
|
|
|
TraceEvent(SevWarnAlways, "FastRestoreApplierPrecomputeResultSkipUnexpectedBackupMutation")
|
|
|
|
.detail("Type", typeString[mutation.type])
|
|
|
|
.detail("Version", lb->first);
|
|
|
|
}
|
2020-02-13 13:45:29 +08:00
|
|
|
}
|
|
|
|
version = lb->first;
|
2020-02-14 01:14:59 +08:00
|
|
|
type = MutationRef::SetValue; // Precomputed result should be set to DB.
|
2020-02-13 13:45:29 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Does the key has at least 1 set or clear mutation to get the base value
|
|
|
|
bool hasBaseValue() {
|
|
|
|
if (version > 0) {
|
|
|
|
ASSERT(type == MutationRef::SetValue || type == MutationRef::ClearRange);
|
|
|
|
}
|
|
|
|
return version > 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Has all pendingMutations been pre-applied to the val?
|
|
|
|
bool hasPrecomputed() {
|
2020-02-14 01:24:53 +08:00
|
|
|
ASSERT(pendingMutations.empty() || pendingMutations.rbegin()->first >= pendingMutations.begin()->first);
|
|
|
|
return pendingMutations.empty() || version >= pendingMutations.rbegin()->first;
|
2020-02-13 13:45:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int expectedMutationSize() { return key.size() + val.size(); }
|
|
|
|
};
|
2020-02-13 05:57:08 +08:00
|
|
|
|
|
|
|
struct StagingKeyRange {
|
2020-02-13 13:45:29 +08:00
|
|
|
Standalone<MutationRef> mutation;
|
2020-02-13 05:57:08 +08:00
|
|
|
Version version;
|
2020-02-13 13:45:29 +08:00
|
|
|
|
|
|
|
explicit StagingKeyRange(MutationRef m, Version newVersion) : mutation(m), version(newVersion) {}
|
|
|
|
|
|
|
|
bool operator<(const StagingKeyRange& rhs) const {
|
|
|
|
return std::tie(version, mutation.type, mutation.param1, mutation.param2) <
|
|
|
|
std::tie(rhs.version, rhs.mutation.type, rhs.mutation.param1, rhs.mutation.param2);
|
|
|
|
}
|
|
|
|
};
|
2020-02-13 05:57:08 +08:00
|
|
|
|
2020-01-15 06:18:41 +08:00
|
|
|
struct ApplierBatchData : public ReferenceCounted<ApplierBatchData> {
|
2019-12-20 08:50:39 +08:00
|
|
|
// processedFileState: key: RestoreAsset; value: largest version of mutation received on the applier
|
|
|
|
std::map<RestoreAsset, NotifiedVersion> processedFileState;
|
2019-05-30 04:26:17 +08:00
|
|
|
Optional<Future<Void>> dbApplier;
|
2020-01-15 05:23:33 +08:00
|
|
|
VersionedMutationsMap kvOps; // Mutations at each version
|
2020-02-13 05:57:08 +08:00
|
|
|
std::map<Key, StagingKey> stagingKeys;
|
|
|
|
std::set<StagingKeyRange> stagingKeyRanges;
|
2020-02-13 13:45:29 +08:00
|
|
|
FlowLock applyStagingKeysBatchLock;
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2020-02-11 07:06:03 +08:00
|
|
|
Future<Void> pollMetrics;
|
|
|
|
|
|
|
|
// Status counters
|
|
|
|
struct Counters {
|
|
|
|
CounterCollection cc;
|
2020-02-12 03:44:53 +08:00
|
|
|
Counter receivedBytes, receivedWeightedBytes, receivedMutations, receivedAtomicOps;
|
|
|
|
Counter appliedWeightedBytes, appliedMutations, appliedAtomicOps;
|
2020-02-11 07:06:03 +08:00
|
|
|
Counter appliedTxns;
|
|
|
|
|
|
|
|
Counters(ApplierBatchData* self, UID applierInterfID, int batchIndex)
|
|
|
|
: cc("ApplierBatch", applierInterfID.toString() + ":" + std::to_string(batchIndex)),
|
|
|
|
receivedBytes("ReceivedBytes", cc), receivedMutations("ReceivedMutations", cc),
|
2020-02-12 03:44:53 +08:00
|
|
|
receivedAtomicOps("ReceivedAtomicOps", cc),
|
|
|
|
receivedWeightedBytes("ReceivedWeightedMutations", cc),
|
|
|
|
appliedWeightedBytes("AppliedWeightedBytes", cc), appliedMutations("AppliedMutations", cc),
|
2020-02-11 07:06:03 +08:00
|
|
|
appliedAtomicOps("AppliedAtomicOps", cc), appliedTxns("AppliedTxns", cc) {}
|
|
|
|
} counters;
|
|
|
|
|
2020-01-15 06:18:41 +08:00
|
|
|
void addref() { return ReferenceCounted<ApplierBatchData>::addref(); }
|
|
|
|
void delref() { return ReferenceCounted<ApplierBatchData>::delref(); }
|
2019-05-23 04:30:33 +08:00
|
|
|
|
2020-02-13 13:45:29 +08:00
|
|
|
explicit ApplierBatchData(UID nodeID, int batchIndex)
|
|
|
|
: counters(this, nodeID, batchIndex), applyStagingKeysBatchLock(SERVER_KNOBS->FASTRESTORE_APPLYING_PARALLELISM) {
|
2020-02-11 07:06:03 +08:00
|
|
|
pollMetrics =
|
2020-02-11 08:52:56 +08:00
|
|
|
traceCounters("FastRestoreApplierMetrics", nodeID, SERVER_KNOBS->FASTRESTORE_ROLE_LOGGING_DELAY, &counters.cc,
|
2020-02-11 07:06:03 +08:00
|
|
|
nodeID.toString() + "/RestoreApplierMetrics/" + std::to_string(batchIndex));
|
2020-02-11 09:01:59 +08:00
|
|
|
TraceEvent("FastRestoreApplierMetricsCreated").detail("Node", nodeID);
|
2020-02-11 07:06:03 +08:00
|
|
|
}
|
2020-01-15 06:18:41 +08:00
|
|
|
~ApplierBatchData() = default;
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2020-02-13 06:12:38 +08:00
|
|
|
void addMutation(MutationRef m, Version ver) {
|
2020-02-13 13:45:29 +08:00
|
|
|
if (!isRangeMutation(m)) {
|
|
|
|
if (stagingKeys.find(m.param1) == stagingKeys.end()) {
|
|
|
|
stagingKeys.emplace(m.param1, StagingKey());
|
|
|
|
}
|
|
|
|
stagingKeys[m.param1].add(m, ver);
|
|
|
|
} else {
|
|
|
|
stagingKeyRanges.insert(StagingKeyRange(m, ver));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return true if all staging keys have been precomputed
|
|
|
|
bool allKeysPrecomputed() {
|
|
|
|
for (auto& stagingKey : stagingKeys) {
|
|
|
|
if (!stagingKey.second.hasPrecomputed()) {
|
|
|
|
TraceEvent("FastRestoreApplierAllKeysPrecomputedFalse")
|
|
|
|
.detail("Key", stagingKey.first)
|
|
|
|
.detail("BufferedVersion", stagingKey.second.version)
|
|
|
|
.detail("MaxPendingVersion", stagingKey.second.pendingMutations.rbegin()->first);
|
|
|
|
return false;
|
|
|
|
}
|
2020-02-13 06:12:38 +08:00
|
|
|
}
|
2020-02-14 01:24:53 +08:00
|
|
|
TraceEvent("FastRestoreApplierAllKeysPrecomputed");
|
2020-02-13 13:45:29 +08:00
|
|
|
return true;
|
2020-02-13 06:12:38 +08:00
|
|
|
}
|
|
|
|
|
2020-01-15 06:18:41 +08:00
|
|
|
void reset() {
|
2019-05-10 11:55:44 +08:00
|
|
|
kvOps.clear();
|
2019-05-30 04:26:17 +08:00
|
|
|
dbApplier = Optional<Future<Void>>();
|
2019-05-10 11:55:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void sanityCheckMutationOps() {
|
2019-08-02 08:00:13 +08:00
|
|
|
if (kvOps.empty()) return;
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2019-08-02 08:00:13 +08:00
|
|
|
ASSERT_WE_THINK(isKVOpsSorted());
|
|
|
|
ASSERT_WE_THINK(allOpsAreKnown());
|
2019-05-10 11:55:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool isKVOpsSorted() {
|
|
|
|
bool ret = true;
|
|
|
|
auto prev = kvOps.begin();
|
2019-08-02 08:00:13 +08:00
|
|
|
for (auto it = kvOps.begin(); it != kvOps.end(); ++it) {
|
|
|
|
if (prev->first > it->first) {
|
2019-05-10 11:55:44 +08:00
|
|
|
ret = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
prev = it;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool allOpsAreKnown() {
|
|
|
|
bool ret = true;
|
2019-08-02 08:00:13 +08:00
|
|
|
for (auto it = kvOps.begin(); it != kvOps.end(); ++it) {
|
|
|
|
for (auto m = it->second.begin(); m != it->second.end(); ++m) {
|
|
|
|
if (m->type == MutationRef::SetValue || m->type == MutationRef::ClearRange ||
|
|
|
|
isAtomicOp((MutationRef::Type)m->type))
|
2019-05-10 11:55:44 +08:00
|
|
|
continue;
|
|
|
|
else {
|
2019-06-05 13:17:08 +08:00
|
|
|
TraceEvent(SevError, "FastRestore").detail("UnknownMutationType", m->type);
|
2019-05-10 11:55:44 +08:00
|
|
|
ret = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-01-15 06:18:41 +08:00
|
|
|
struct RestoreApplierData : RestoreRoleData, public ReferenceCounted<RestoreApplierData> {
|
|
|
|
// Buffer for uncommitted data at ongoing version batches
|
|
|
|
std::map<int, Reference<ApplierBatchData>> batch;
|
|
|
|
NotifiedVersion finishedBatch; // The version batch that has been applied to DB
|
|
|
|
|
|
|
|
void addref() { return ReferenceCounted<RestoreApplierData>::addref(); }
|
|
|
|
void delref() { return ReferenceCounted<RestoreApplierData>::delref(); }
|
|
|
|
|
|
|
|
explicit RestoreApplierData(UID applierInterfID, int assignedIndex) {
|
|
|
|
nodeID = applierInterfID;
|
|
|
|
nodeIndex = assignedIndex;
|
|
|
|
|
|
|
|
// Q: Why do we need to initMetric?
|
|
|
|
// version.initMetric(LiteralStringRef("RestoreApplier.Version"), cc.id);
|
|
|
|
|
|
|
|
role = RestoreRole::Applier;
|
|
|
|
}
|
|
|
|
|
|
|
|
~RestoreApplierData() = default;
|
|
|
|
|
2020-01-28 10:13:14 +08:00
|
|
|
void initVersionBatch(int batchIndex) {
|
|
|
|
TraceEvent("FastRestoreApplierInitVersionBatch", id()).detail("BatchIndex", batchIndex);
|
2020-02-11 07:06:03 +08:00
|
|
|
batch[batchIndex] = Reference<ApplierBatchData>(new ApplierBatchData(nodeID, batchIndex));
|
2020-01-15 06:18:41 +08:00
|
|
|
}
|
|
|
|
|
2020-01-17 08:19:51 +08:00
|
|
|
void resetPerRestoreRequest() {
|
|
|
|
batch.clear();
|
|
|
|
finishedBatch = NotifiedVersion(0);
|
|
|
|
}
|
|
|
|
|
2020-01-15 06:18:41 +08:00
|
|
|
std::string describeNode() {
|
|
|
|
std::stringstream ss;
|
|
|
|
ss << "NodeID:" << nodeID.toString() << " nodeIndex:" << nodeIndex;
|
|
|
|
return ss.str();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-07-25 07:59:05 +08:00
|
|
|
ACTOR Future<Void> restoreApplierCore(RestoreApplierInterface applierInterf, int nodeIndex, Database cx);
|
2019-05-10 11:55:44 +08:00
|
|
|
|
|
|
|
#include "flow/unactorcompiler.h"
|
2019-09-26 14:19:42 +08:00
|
|
|
#endif
|