2019-05-07 07:56:49 +08:00
|
|
|
/*
|
|
|
|
* RestoreLoader.actor.cpp
|
|
|
|
*
|
|
|
|
* This source file is part of the FoundationDB open source project
|
|
|
|
*
|
2020-02-15 03:27:02 +08:00
|
|
|
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
2019-05-07 07:56:49 +08:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2019-05-13 12:53:09 +08:00
|
|
|
// This file implements the functions and actors used by the RestoreLoader role.
|
|
|
|
// The RestoreLoader role starts with the restoreLoaderCore actor
|
|
|
|
|
2020-04-01 03:13:02 +08:00
|
|
|
#include "flow/UnitTest.h"
|
2019-05-10 11:55:44 +08:00
|
|
|
#include "fdbclient/BackupContainer.h"
|
2020-04-22 04:42:24 +08:00
|
|
|
#include "fdbclient/BackupAgent.actor.h"
|
2019-05-10 11:55:44 +08:00
|
|
|
#include "fdbserver/RestoreLoader.actor.h"
|
2020-02-18 06:36:09 +08:00
|
|
|
#include "fdbserver/RestoreRoleCommon.actor.h"
|
2020-08-07 07:47:08 +08:00
|
|
|
#include "fdbserver/StorageMetrics.actor.h"
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2019-08-02 08:00:13 +08:00
|
|
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2020-04-22 02:36:21 +08:00
|
|
|
// SerializedMutationListMap: Buffered mutation lists from data blocks in log files
|
|
|
|
// Key is the signature/version of the mutation list; Value.first is the mutation list which may come from multiple
|
|
|
|
// data blocks of log file; Value.second is the largest part number of the mutation list, which is used to sanity check
|
|
|
|
// the data blocks for the same mutation list are concatenated in increasing order of part number.
|
2020-04-14 09:09:29 +08:00
|
|
|
typedef std::map<Standalone<StringRef>, std::pair<Standalone<StringRef>, uint32_t>> SerializedMutationListMap;
|
2019-06-01 02:09:31 +08:00
|
|
|
|
2020-01-16 05:39:06 +08:00
|
|
|
std::vector<UID> getApplierIDs(std::map<Key, UID>& rangeToApplier);
|
2020-07-09 05:14:28 +08:00
|
|
|
void splitMutation(const KeyRangeMap<UID>& krMap, MutationRef m, Arena& mvector_arena, VectorRef<MutationRef>& mvector,
|
|
|
|
Arena& nodeIDs_arena, VectorRef<UID>& nodeIDs);
|
2020-04-16 04:32:52 +08:00
|
|
|
void _parseSerializedMutation(KeyRangeMap<Version>* pRangeVersions,
|
|
|
|
std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsIter,
|
2019-12-03 06:33:31 +08:00
|
|
|
SerializedMutationListMap* mutationMap,
|
2020-08-05 04:35:36 +08:00
|
|
|
std::map<LoadingParam, SampledMutationsVec>::iterator samplesIter, LoaderCounters* cc,
|
2020-02-19 08:41:59 +08:00
|
|
|
const RestoreAsset& asset);
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2019-10-25 03:47:51 +08:00
|
|
|
void handleRestoreSysInfoRequest(const RestoreSysInfoRequest& req, Reference<RestoreLoaderData> self);
|
2019-12-20 08:50:39 +08:00
|
|
|
ACTOR Future<Void> handleLoadFileRequest(RestoreLoadFileRequest req, Reference<RestoreLoaderData> self);
|
2019-11-13 08:28:09 +08:00
|
|
|
ACTOR Future<Void> handleSendMutationsRequest(RestoreSendMutationsToAppliersRequest req,
|
|
|
|
Reference<RestoreLoaderData> self);
|
2020-08-16 22:38:30 +08:00
|
|
|
ACTOR Future<Void> sendMutationsToApplier(
|
|
|
|
std::priority_queue<RestoreLoaderSchedSendLoadParamRequest>* sendLoadParamQueue,
|
2020-08-19 14:48:02 +08:00
|
|
|
std::map<int, int>* inflightSendLoadParamReqs, NotifiedVersion* finishedBatch, VersionedMutationsMap* pkvOps,
|
2020-08-18 13:20:54 +08:00
|
|
|
int batchIndex, RestoreAsset asset, bool isRangeFile, std::map<Key, UID>* pRangeToApplier,
|
|
|
|
std::map<UID, RestoreApplierInterface>* pApplierInterfaces);
|
2019-12-11 09:22:51 +08:00
|
|
|
ACTOR static Future<Void> _parseLogFileToMutationsOnLoader(NotifiedVersion* pProcessedFileOffset,
|
|
|
|
SerializedMutationListMap* mutationMap,
|
2019-12-20 08:50:39 +08:00
|
|
|
Reference<IBackupContainer> bc, RestoreAsset asset);
|
2020-09-09 22:23:18 +08:00
|
|
|
ACTOR static Future<Void> parseLogFileToMutationsOnLoader(NotifiedVersion* pProcessedFileOffset,
|
|
|
|
SerializedMutationListMap* mutationMap,
|
|
|
|
Reference<IBackupContainer> bc, RestoreAsset asset);
|
2019-11-21 13:04:18 +08:00
|
|
|
ACTOR static Future<Void> _parseRangeFileToMutationsOnLoader(
|
2019-12-03 06:33:31 +08:00
|
|
|
std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsIter,
|
2020-08-05 14:04:38 +08:00
|
|
|
std::map<LoadingParam, SampledMutationsVec>::iterator samplesIter, LoaderCounters* cc,
|
|
|
|
Reference<IBackupContainer> bc, Version version, RestoreAsset asset);
|
2020-02-27 05:43:30 +08:00
|
|
|
ACTOR Future<Void> handleFinishVersionBatchRequest(RestoreVersionBatchRequest req, Reference<RestoreLoaderData> self);
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2020-08-16 09:33:24 +08:00
|
|
|
// Dispatch requests based on node's business (i.e, cpu usage for now) and requests' priorities
|
|
|
|
// Requests for earlier version batches are preferred; which is equivalent to
|
|
|
|
// sendMuttionsRequests are preferred than loadingFileRequests
|
|
|
|
ACTOR Future<Void> dispatchRequests(Reference<RestoreLoaderData> self) {
|
|
|
|
try {
|
2020-08-17 00:43:04 +08:00
|
|
|
state int curVBInflightReqs = 0;
|
|
|
|
state int sendLoadParams = 0;
|
2020-08-27 06:21:33 +08:00
|
|
|
state int lastLoadReqs = 0;
|
2020-08-16 09:33:24 +08:00
|
|
|
loop {
|
2020-08-16 21:29:09 +08:00
|
|
|
TraceEvent(SevDebug, "FastRestoreLoaderDispatchRequests", self->id())
|
|
|
|
.detail("SendingQueue", self->sendingQueue.size())
|
|
|
|
.detail("LoadingQueue", self->loadingQueue.size())
|
2020-08-17 00:43:04 +08:00
|
|
|
.detail("SendingLoadParamQueue", self->sendLoadParamQueue.size())
|
2020-08-16 21:29:09 +08:00
|
|
|
.detail("InflightSendingReqs", self->inflightSendingReqs)
|
|
|
|
.detail("InflightSendingReqsThreshold", SERVER_KNOBS->FASTRESTORE_SCHED_INFLIGHT_SEND_REQS)
|
|
|
|
.detail("InflightLoadingReqs", self->inflightLoadingReqs)
|
|
|
|
.detail("InflightLoadingReqsThreshold", SERVER_KNOBS->FASTRESTORE_SCHED_INFLIGHT_LOAD_REQS)
|
2020-08-27 06:21:33 +08:00
|
|
|
.detail("LastLoadFileRequests", lastLoadReqs)
|
|
|
|
.detail("LoadFileRequestsBatchThreshold", SERVER_KNOBS->FASTRESTORE_SCHED_LOAD_REQ_BATCHSIZE)
|
2020-08-17 00:43:04 +08:00
|
|
|
.detail("LastDispatchSendLoadParamReqsForCurrentVB", curVBInflightReqs)
|
|
|
|
.detail("LastDispatchSendLoadParamReqsForFutureVB", sendLoadParams)
|
2020-08-16 21:29:09 +08:00
|
|
|
.detail("CpuUsage", self->cpuUsage)
|
|
|
|
.detail("TargetCpuUsage", SERVER_KNOBS->FASTRESTORE_SCHED_TARGET_CPU_PERCENT)
|
|
|
|
.detail("MaxCpuUsage", SERVER_KNOBS->FASTRESTORE_SCHED_MAX_CPU_PERCENT);
|
2020-08-25 07:29:50 +08:00
|
|
|
|
2020-08-25 10:59:56 +08:00
|
|
|
// TODO: Pop old requests whose version batch <= finishedBatch.get()
|
|
|
|
// TODO2: Simulate delayed request can be too old by introducing artificial delay
|
2020-08-25 07:29:50 +08:00
|
|
|
if (SERVER_KNOBS->FASTRESTORE_EXPENSIVE_VALIDATION) {
|
|
|
|
// Sanity check: All requests before and in finishedBatch must have been processed; otherwise,
|
|
|
|
// those requests may cause segmentation fault after applier remove the batch data
|
|
|
|
if (!self->loadingQueue.empty() && self->loadingQueue.top().batchIndex <= self->finishedBatch.get()) {
|
|
|
|
// Still has pending requests from earlier batchIndex and current batchIndex, which should not
|
|
|
|
// happen
|
|
|
|
TraceEvent(SevError, "FastRestoreLoaderSchedulerHasOldLoadFileRequests")
|
|
|
|
.detail("FinishedBatchIndex", self->finishedBatch.get())
|
|
|
|
.detail("PendingRequest", self->loadingQueue.top().toString());
|
|
|
|
}
|
|
|
|
if (!self->sendingQueue.empty() && self->sendingQueue.top().batchIndex <= self->finishedBatch.get()) {
|
|
|
|
TraceEvent(SevError, "FastRestoreLoaderSchedulerHasOldSendRequests")
|
|
|
|
.detail("FinishedBatchIndex", self->finishedBatch.get())
|
|
|
|
.detail("PendingRequest", self->sendingQueue.top().toString());
|
|
|
|
}
|
|
|
|
if (!self->sendLoadParamQueue.empty() &&
|
|
|
|
self->sendLoadParamQueue.top().batchIndex <= self->finishedBatch.get()) {
|
|
|
|
TraceEvent(SevError, "FastRestoreLoaderSchedulerHasOldSendLoadParamRequests")
|
|
|
|
.detail("FinishedBatchIndex", self->finishedBatch.get())
|
|
|
|
.detail("PendingRequest", self->sendLoadParamQueue.top().toString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-27 01:30:31 +08:00
|
|
|
if (!self->sendingQueue.empty()) {
|
|
|
|
// Only release one sendMutationRequest at a time because it sends all data for a version batch
|
|
|
|
// and it takes large amount of resource
|
2020-08-16 09:33:24 +08:00
|
|
|
const RestoreSendMutationsToAppliersRequest& req = self->sendingQueue.top();
|
|
|
|
// Dispatch the request if it is the next version batch to process or if cpu usage is low
|
2020-08-16 12:40:24 +08:00
|
|
|
if (req.batchIndex - 1 == self->finishedSendingVB ||
|
|
|
|
self->cpuUsage < SERVER_KNOBS->FASTRESTORE_SCHED_TARGET_CPU_PERCENT) {
|
2020-08-16 09:33:24 +08:00
|
|
|
self->addActor.send(handleSendMutationsRequest(req, self));
|
|
|
|
self->sendingQueue.pop();
|
|
|
|
}
|
|
|
|
}
|
2020-08-17 00:43:04 +08:00
|
|
|
// When shall the node pause the process of other requests, e.g., load file requests
|
2020-08-27 01:30:31 +08:00
|
|
|
// TODO: Revisit if we should have (self->inflightSendingReqs > 0 && self->inflightLoadingReqs > 0)
|
|
|
|
if ((self->inflightSendingReqs > 0 && self->inflightLoadingReqs > 0) &&
|
|
|
|
(self->inflightSendingReqs >= SERVER_KNOBS->FASTRESTORE_SCHED_INFLIGHT_SEND_REQS ||
|
2020-08-16 13:18:48 +08:00
|
|
|
self->inflightLoadingReqs >= SERVER_KNOBS->FASTRESTORE_SCHED_INFLIGHT_LOAD_REQS ||
|
2020-08-16 12:40:24 +08:00
|
|
|
(self->inflightSendingReqs >= 1 &&
|
|
|
|
self->cpuUsage >= SERVER_KNOBS->FASTRESTORE_SCHED_TARGET_CPU_PERCENT) ||
|
2020-08-27 01:30:31 +08:00
|
|
|
self->cpuUsage >= SERVER_KNOBS->FASTRESTORE_SCHED_MAX_CPU_PERCENT)) {
|
2020-08-16 12:40:24 +08:00
|
|
|
if (self->inflightSendingReqs >= SERVER_KNOBS->FASTRESTORE_SCHED_INFLIGHT_SEND_REQS) {
|
2020-08-19 06:44:22 +08:00
|
|
|
TraceEvent(SevWarn, "FastRestoreLoaderTooManyInflightRequests")
|
2020-08-16 09:33:24 +08:00
|
|
|
.detail("VersionBatchesBlockedAtSendingMutationsToAppliers", self->inflightSendingReqs)
|
2020-08-19 06:44:22 +08:00
|
|
|
.detail("CpuUsage", self->cpuUsage)
|
|
|
|
.detail("InflightSendingReq", self->inflightSendingReqs)
|
|
|
|
.detail("InflightSendingReqThreshold", SERVER_KNOBS->FASTRESTORE_SCHED_INFLIGHT_SEND_REQS)
|
|
|
|
.detail("InflightLoadingReq", self->inflightLoadingReqs)
|
|
|
|
.detail("InflightLoadingReqThreshold", SERVER_KNOBS->FASTRESTORE_SCHED_INFLIGHT_LOAD_REQS);
|
2020-08-16 09:33:24 +08:00
|
|
|
}
|
2020-08-16 12:40:24 +08:00
|
|
|
wait(delay(SERVER_KNOBS->FASTRESTORE_SCHED_UPDATE_DELAY));
|
2020-08-16 09:33:24 +08:00
|
|
|
updateProcessStats(self);
|
|
|
|
continue;
|
|
|
|
}
|
2020-08-16 22:38:30 +08:00
|
|
|
// Dispatch queued requests of sending mutations per loading param
|
|
|
|
while (!self->sendLoadParamQueue.empty()) { // dispatch current VB first
|
|
|
|
const RestoreLoaderSchedSendLoadParamRequest& req = self->sendLoadParamQueue.top();
|
|
|
|
if (req.batchIndex - 1 > self->finishedSendingVB) { // future VB
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
req.toSched.send(Void());
|
|
|
|
self->sendLoadParamQueue.pop();
|
|
|
|
}
|
|
|
|
}
|
2020-08-17 00:43:04 +08:00
|
|
|
sendLoadParams = 0;
|
|
|
|
curVBInflightReqs = self->inflightSendLoadParamReqs[self->finishedSendingVB + 1];
|
2020-08-16 22:38:30 +08:00
|
|
|
while (!self->sendLoadParamQueue.empty()) {
|
|
|
|
const RestoreLoaderSchedSendLoadParamRequest& req = self->sendLoadParamQueue.top();
|
|
|
|
if (curVBInflightReqs >= SERVER_KNOBS->FASTRESTORE_SCHED_INFLIGHT_SENDPARAM_THRESHOLD ||
|
|
|
|
sendLoadParams >= SERVER_KNOBS->FASTRESTORE_SCHED_SEND_FUTURE_VB_REQS_BATCH) {
|
2020-08-16 23:13:28 +08:00
|
|
|
// Too many future VB requests are released
|
2020-08-16 22:38:30 +08:00
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
req.toSched.send(Void());
|
|
|
|
self->sendLoadParamQueue.pop();
|
|
|
|
sendLoadParams++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-16 09:33:24 +08:00
|
|
|
// Dispatch loading backup file requests
|
2020-08-27 06:21:33 +08:00
|
|
|
lastLoadReqs = 0;
|
2020-08-16 09:33:24 +08:00
|
|
|
while (!self->loadingQueue.empty()) {
|
2020-08-27 06:21:33 +08:00
|
|
|
if (lastLoadReqs >= SERVER_KNOBS->FASTRESTORE_SCHED_LOAD_REQ_BATCHSIZE) {
|
2020-08-16 12:40:24 +08:00
|
|
|
break;
|
|
|
|
}
|
2020-08-16 09:33:24 +08:00
|
|
|
const RestoreLoadFileRequest& req = self->loadingQueue.top();
|
2020-08-27 06:21:33 +08:00
|
|
|
if (req.batchIndex <= self->finishedBatch.get()) {
|
|
|
|
TraceEvent(SevError, "FastRestoreLoaderDispatchRestoreLoadFileRequestTooOld")
|
|
|
|
.detail("FinishedBatchIndex", self->finishedBatch.get())
|
|
|
|
.detail("RequestBatchIndex", req.batchIndex);
|
|
|
|
req.reply.send(RestoreLoadFileReply(req.param, true));
|
|
|
|
self->loadingQueue.pop();
|
|
|
|
ASSERT(false); // Check if this ever happens easily
|
|
|
|
} else {
|
|
|
|
self->addActor.send(handleLoadFileRequest(req, self));
|
|
|
|
self->loadingQueue.pop();
|
|
|
|
lastLoadReqs++;
|
|
|
|
}
|
2020-08-16 09:33:24 +08:00
|
|
|
}
|
2020-08-16 21:29:09 +08:00
|
|
|
|
2020-08-16 12:40:24 +08:00
|
|
|
if (self->cpuUsage >= SERVER_KNOBS->FASTRESTORE_SCHED_TARGET_CPU_PERCENT) {
|
2020-08-17 00:43:04 +08:00
|
|
|
wait(delay(SERVER_KNOBS->FASTRESTORE_SCHED_UPDATE_DELAY));
|
2020-08-16 09:33:24 +08:00
|
|
|
}
|
2020-08-16 21:29:09 +08:00
|
|
|
updateProcessStats(self);
|
|
|
|
|
2020-08-17 00:43:04 +08:00
|
|
|
if (self->loadingQueue.empty() && self->sendingQueue.empty() && self->sendLoadParamQueue.empty()) {
|
2020-08-16 21:29:09 +08:00
|
|
|
TraceEvent(SevDebug, "FastRestoreLoaderDispatchRequestsWaitOnRequests", self->id())
|
|
|
|
.detail("HasPendingRequests", self->hasPendingRequests->get());
|
2020-08-16 13:18:48 +08:00
|
|
|
self->hasPendingRequests->set(false);
|
2020-08-17 00:43:04 +08:00
|
|
|
wait(self->hasPendingRequests->onChange()); // CAREFUL:Improper req release may cause restore stuck here
|
2020-08-16 12:55:02 +08:00
|
|
|
}
|
2020-08-16 09:33:24 +08:00
|
|
|
}
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() != error_code_actor_cancelled) {
|
|
|
|
TraceEvent(SevError, "FastRestoreLoaderDispatchRequests").error(e, true);
|
|
|
|
throw e;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2020-08-05 04:35:36 +08:00
|
|
|
ACTOR Future<Void> restoreLoaderCore(RestoreLoaderInterface loaderInterf, int nodeIndex, Database cx,
|
|
|
|
RestoreControllerInterface ci) {
|
2019-08-02 08:00:13 +08:00
|
|
|
state Reference<RestoreLoaderData> self =
|
2020-08-05 04:35:36 +08:00
|
|
|
Reference<RestoreLoaderData>(new RestoreLoaderData(loaderInterf.id(), nodeIndex, ci));
|
2020-08-16 09:33:24 +08:00
|
|
|
state Future<Void> error = actorCollection(self->addActor.getFuture());
|
|
|
|
state ActorCollection actors(false); // actors whose errors can be ignored
|
2019-05-23 04:30:33 +08:00
|
|
|
state Future<Void> exitRole = Never();
|
2020-08-16 12:55:02 +08:00
|
|
|
state bool hasQueuedRequests = false;
|
2020-02-10 11:19:32 +08:00
|
|
|
|
2020-06-29 03:33:07 +08:00
|
|
|
actors.add(updateProcessMetrics(self));
|
2020-05-05 02:20:53 +08:00
|
|
|
actors.add(traceProcessMetrics(self, "RestoreLoader"));
|
2020-02-10 11:19:32 +08:00
|
|
|
|
2020-08-16 09:33:24 +08:00
|
|
|
self->addActor.send(dispatchRequests(self));
|
|
|
|
|
2019-05-10 11:55:44 +08:00
|
|
|
loop {
|
|
|
|
state std::string requestTypeStr = "[Init]";
|
|
|
|
|
|
|
|
try {
|
|
|
|
choose {
|
2019-08-02 08:00:13 +08:00
|
|
|
when(RestoreSimpleRequest req = waitNext(loaderInterf.heartbeat.getFuture())) {
|
2019-05-10 11:55:44 +08:00
|
|
|
requestTypeStr = "heartbeat";
|
2019-05-23 04:30:33 +08:00
|
|
|
actors.add(handleHeartbeat(req, loaderInterf.id()));
|
2019-05-10 11:55:44 +08:00
|
|
|
}
|
2019-08-02 08:00:13 +08:00
|
|
|
when(RestoreSysInfoRequest req = waitNext(loaderInterf.updateRestoreSysInfo.getFuture())) {
|
2019-07-25 07:59:05 +08:00
|
|
|
requestTypeStr = "updateRestoreSysInfo";
|
2019-10-24 06:05:03 +08:00
|
|
|
handleRestoreSysInfoRequest(req, self);
|
2019-07-25 07:59:05 +08:00
|
|
|
}
|
2019-08-02 08:00:13 +08:00
|
|
|
when(RestoreLoadFileRequest req = waitNext(loaderInterf.loadFile.getFuture())) {
|
2019-05-28 09:39:30 +08:00
|
|
|
requestTypeStr = "loadFile";
|
2020-08-16 21:29:09 +08:00
|
|
|
hasQueuedRequests = !self->loadingQueue.empty() || !self->sendingQueue.empty();
|
2019-05-10 11:55:44 +08:00
|
|
|
self->initBackupContainer(req.param.url);
|
2020-08-16 09:33:24 +08:00
|
|
|
self->loadingQueue.push(req);
|
2020-08-16 12:55:02 +08:00
|
|
|
if (!hasQueuedRequests) {
|
|
|
|
self->hasPendingRequests->set(true);
|
|
|
|
}
|
2019-05-10 11:55:44 +08:00
|
|
|
}
|
2019-11-13 08:28:09 +08:00
|
|
|
when(RestoreSendMutationsToAppliersRequest req = waitNext(loaderInterf.sendMutations.getFuture())) {
|
|
|
|
requestTypeStr = "sendMutations";
|
2020-08-16 21:29:09 +08:00
|
|
|
hasQueuedRequests = !self->loadingQueue.empty() || !self->sendingQueue.empty();
|
2020-08-16 09:33:24 +08:00
|
|
|
self->sendingQueue.push(req);
|
2020-08-16 12:55:02 +08:00
|
|
|
if (!hasQueuedRequests) {
|
|
|
|
self->hasPendingRequests->set(true);
|
|
|
|
}
|
2019-11-13 08:28:09 +08:00
|
|
|
}
|
2019-08-02 08:00:13 +08:00
|
|
|
when(RestoreVersionBatchRequest req = waitNext(loaderInterf.initVersionBatch.getFuture())) {
|
2019-05-10 11:55:44 +08:00
|
|
|
requestTypeStr = "initVersionBatch";
|
2020-01-18 03:06:07 +08:00
|
|
|
actors.add(handleInitVersionBatchRequest(req, self));
|
2019-05-10 11:55:44 +08:00
|
|
|
}
|
2020-02-27 05:43:30 +08:00
|
|
|
when(RestoreVersionBatchRequest req = waitNext(loaderInterf.finishVersionBatch.getFuture())) {
|
|
|
|
requestTypeStr = "finishVersionBatch";
|
|
|
|
actors.add(handleFinishVersionBatchRequest(req, self));
|
|
|
|
}
|
2020-01-17 08:19:51 +08:00
|
|
|
when(RestoreFinishRequest req = waitNext(loaderInterf.finishRestore.getFuture())) {
|
2019-05-11 07:48:01 +08:00
|
|
|
requestTypeStr = "finishRestore";
|
2019-10-24 06:05:03 +08:00
|
|
|
handleFinishRestoreRequest(req, self);
|
2020-01-17 08:19:51 +08:00
|
|
|
if (req.terminate) {
|
|
|
|
exitRole = Void();
|
|
|
|
}
|
2019-05-11 07:48:01 +08:00
|
|
|
}
|
2020-06-06 07:44:59 +08:00
|
|
|
when(wait(actors.getResult())) {}
|
2019-08-02 08:00:13 +08:00
|
|
|
when(wait(exitRole)) {
|
2020-05-02 04:35:13 +08:00
|
|
|
TraceEvent("FastRestoreLoaderCoreExitRole", self->id());
|
2019-05-23 04:30:33 +08:00
|
|
|
break;
|
2019-05-10 11:55:44 +08:00
|
|
|
}
|
2020-08-16 09:33:24 +08:00
|
|
|
when(wait(error)) { TraceEvent("FastRestoreLoaderActorCollectionError", self->id()); }
|
2019-05-10 11:55:44 +08:00
|
|
|
}
|
2019-08-02 08:00:13 +08:00
|
|
|
} catch (Error& e) {
|
2020-08-07 01:41:12 +08:00
|
|
|
TraceEvent(e.code() == error_code_broken_promise ? SevError : SevWarnAlways, "FastRestoreLoaderError",
|
|
|
|
self->id())
|
2020-06-06 07:44:59 +08:00
|
|
|
.detail("RequestType", requestTypeStr)
|
|
|
|
.error(e, true);
|
|
|
|
actors.clear(false);
|
2019-06-01 02:09:31 +08:00
|
|
|
break;
|
2019-05-10 11:55:44 +08:00
|
|
|
}
|
|
|
|
}
|
2019-06-01 02:09:31 +08:00
|
|
|
|
2019-05-10 11:55:44 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2020-04-18 13:31:40 +08:00
|
|
|
static inline bool _logMutationTooOld(KeyRangeMap<Version>* pRangeVersions, KeyRangeRef keyRange, Version v) {
|
2020-08-06 02:32:04 +08:00
|
|
|
ASSERT(pRangeVersions != nullptr);
|
2020-04-16 04:32:52 +08:00
|
|
|
auto ranges = pRangeVersions->intersectingRanges(keyRange);
|
|
|
|
Version minVersion = MAX_VERSION;
|
|
|
|
for (auto r = ranges.begin(); r != ranges.end(); ++r) {
|
|
|
|
minVersion = std::min(minVersion, r->value());
|
|
|
|
}
|
2020-08-06 02:32:04 +08:00
|
|
|
ASSERT(minVersion != MAX_VERSION); // pRangeVersions is initialized as entired keyspace, ranges cannot be empty
|
2020-04-16 04:32:52 +08:00
|
|
|
return minVersion >= v;
|
|
|
|
}
|
|
|
|
|
2020-04-18 13:31:40 +08:00
|
|
|
static inline bool logMutationTooOld(KeyRangeMap<Version>* pRangeVersions, MutationRef mutation, Version v) {
|
2020-04-16 04:32:52 +08:00
|
|
|
return isRangeMutation(mutation)
|
|
|
|
? _logMutationTooOld(pRangeVersions, KeyRangeRef(mutation.param1, mutation.param2), v)
|
|
|
|
: _logMutationTooOld(pRangeVersions, KeyRangeRef(singleKeyRange(mutation.param1)), v);
|
|
|
|
}
|
|
|
|
|
2019-07-25 07:59:05 +08:00
|
|
|
// Assume: Only update the local data if it (applierInterf) has not been set
|
2019-10-25 03:47:51 +08:00
|
|
|
void handleRestoreSysInfoRequest(const RestoreSysInfoRequest& req, Reference<RestoreLoaderData> self) {
|
2020-02-19 08:41:19 +08:00
|
|
|
TraceEvent("FastRestoreLoader", self->id()).detail("HandleRestoreSysInfoRequest", self->id());
|
2019-07-25 07:59:05 +08:00
|
|
|
ASSERT(self.isValid());
|
2019-08-02 08:00:13 +08:00
|
|
|
|
2019-07-25 07:59:05 +08:00
|
|
|
// The loader has received the appliers interfaces
|
2019-08-02 08:00:13 +08:00
|
|
|
if (!self->appliersInterf.empty()) {
|
2019-07-25 07:59:05 +08:00
|
|
|
req.reply.send(RestoreCommonReply(self->id()));
|
2019-10-24 06:05:03 +08:00
|
|
|
return;
|
2019-07-25 07:59:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
self->appliersInterf = req.sysInfo.appliers;
|
2020-04-16 04:32:52 +08:00
|
|
|
// Update rangeVersions
|
2020-04-19 13:42:42 +08:00
|
|
|
ASSERT(req.rangeVersions.size() > 0); // At least the min version of range files will be used
|
2020-04-16 04:32:52 +08:00
|
|
|
ASSERT(self->rangeVersions.size() == 1); // rangeVersions has not been set
|
|
|
|
for (auto rv = req.rangeVersions.begin(); rv != req.rangeVersions.end(); ++rv) {
|
|
|
|
self->rangeVersions.insert(rv->first, rv->second);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Debug message for range version in each loader
|
|
|
|
auto ranges = self->rangeVersions.ranges();
|
|
|
|
int i = 0;
|
|
|
|
for (auto r = ranges.begin(); r != ranges.end(); ++r) {
|
|
|
|
TraceEvent("FastRestoreLoader", self->id())
|
|
|
|
.detail("RangeIndex", i++)
|
|
|
|
.detail("RangeBegin", r->begin())
|
|
|
|
.detail("RangeEnd", r->end())
|
|
|
|
.detail("Version", r->value());
|
|
|
|
}
|
2019-08-02 08:00:13 +08:00
|
|
|
|
|
|
|
req.reply.send(RestoreCommonReply(self->id()));
|
2019-07-25 07:59:05 +08:00
|
|
|
}
|
|
|
|
|
2020-02-18 06:36:09 +08:00
|
|
|
// Parse a data block in a partitioned mutation log file and store mutations
|
|
|
|
// into "kvOpsIter" and samples into "samplesIter".
|
|
|
|
ACTOR static Future<Void> _parsePartitionedLogFileOnLoader(
|
2020-04-16 04:32:52 +08:00
|
|
|
KeyRangeMap<Version>* pRangeVersions, NotifiedVersion* processedFileOffset,
|
2020-04-18 09:32:14 +08:00
|
|
|
std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsIter,
|
2020-08-05 04:35:36 +08:00
|
|
|
std::map<LoadingParam, SampledMutationsVec>::iterator samplesIter, LoaderCounters* cc,
|
|
|
|
Reference<IBackupContainer> bc, RestoreAsset asset) {
|
2020-02-18 06:36:09 +08:00
|
|
|
state Standalone<StringRef> buf = makeString(asset.len);
|
|
|
|
state Reference<IAsyncFile> file = wait(bc->readFile(asset.filename));
|
|
|
|
int rLen = wait(file->read(mutateString(buf), asset.len, asset.offset));
|
|
|
|
if (rLen != asset.len) throw restore_bad_read();
|
|
|
|
|
2020-09-09 21:46:08 +08:00
|
|
|
if (BUGGIFY && deterministicRandom()->random01() < 0.01) { // Simulate blob failures
|
|
|
|
double i = deterministicRandom()->random01();
|
|
|
|
if (i < 0.5) {
|
|
|
|
throw http_request_failed();
|
|
|
|
} else if (i < 0.7) {
|
|
|
|
throw connection_failed();
|
|
|
|
} else if (i < 0.8) {
|
|
|
|
throw timed_out();
|
|
|
|
} else if (i < 0.9) {
|
|
|
|
throw lookup_failed();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-26 00:31:34 +08:00
|
|
|
TraceEvent("FastRestoreLoaderDecodingLogFile")
|
|
|
|
.detail("BatchIndex", asset.batchIndex)
|
|
|
|
.detail("Filename", asset.filename)
|
2020-02-18 06:36:09 +08:00
|
|
|
.detail("Offset", asset.offset)
|
|
|
|
.detail("Length", asset.len);
|
|
|
|
|
|
|
|
// Ensure data blocks in the same file are processed in order
|
|
|
|
wait(processedFileOffset->whenAtLeast(asset.offset));
|
|
|
|
ASSERT(processedFileOffset->get() == asset.offset);
|
|
|
|
|
2020-06-19 08:40:16 +08:00
|
|
|
Arena tempArena;
|
2020-02-19 05:21:29 +08:00
|
|
|
StringRefReader reader(buf, restore_corrupted_data());
|
2020-02-18 06:36:09 +08:00
|
|
|
try {
|
|
|
|
// Read block header
|
|
|
|
if (reader.consume<int32_t>() != PARTITIONED_MLOG_VERSION) throw restore_unsupported_file_version();
|
|
|
|
|
|
|
|
VersionedMutationsMap& kvOps = kvOpsIter->second;
|
|
|
|
while (1) {
|
|
|
|
// If eof reached or first key len bytes is 0xFF then end of block was reached.
|
|
|
|
if (reader.eof() || *reader.rptr == 0xFF) break;
|
|
|
|
|
|
|
|
// Deserialize messages written in saveMutationsToFile().
|
2020-02-22 03:47:51 +08:00
|
|
|
LogMessageVersion msgVersion;
|
2020-03-12 06:39:09 +08:00
|
|
|
msgVersion.version = reader.consumeNetworkUInt64();
|
|
|
|
msgVersion.sub = reader.consumeNetworkUInt32();
|
|
|
|
int msgSize = reader.consumeNetworkInt32();
|
2020-02-18 06:36:09 +08:00
|
|
|
const uint8_t* message = reader.consume(msgSize);
|
|
|
|
|
|
|
|
// Skip mutations out of the version range
|
2020-02-22 03:47:51 +08:00
|
|
|
if (!asset.isInVersionRange(msgVersion.version)) continue;
|
2020-02-18 06:36:09 +08:00
|
|
|
|
2020-02-25 08:57:31 +08:00
|
|
|
VersionedMutationsMap::iterator it;
|
2020-02-22 03:47:51 +08:00
|
|
|
bool inserted;
|
|
|
|
std::tie(it, inserted) = kvOps.emplace(msgVersion, MutationsVec());
|
2020-05-21 05:42:30 +08:00
|
|
|
// A clear mutation can be split into multiple mutations with the same (version, sub).
|
|
|
|
// See saveMutationsToFile(). Current tests only use one key range per backup, thus
|
|
|
|
// only one clear mutation is generated (i.e., always inserted).
|
2020-02-22 03:47:51 +08:00
|
|
|
ASSERT(inserted);
|
2020-02-18 06:36:09 +08:00
|
|
|
|
|
|
|
ArenaReader rd(buf.arena(), StringRef(message, msgSize), AssumeVersion(currentProtocolVersion));
|
|
|
|
MutationRef mutation;
|
|
|
|
rd >> mutation;
|
|
|
|
|
2020-04-16 04:32:52 +08:00
|
|
|
// Skip mutation whose commitVesion < range kv's version
|
|
|
|
if (logMutationTooOld(pRangeVersions, mutation, msgVersion.version)) {
|
2020-04-18 09:32:14 +08:00
|
|
|
cc->oldLogMutations += 1;
|
2020-04-16 04:32:52 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-02-18 06:36:09 +08:00
|
|
|
// Should this mutation be skipped?
|
|
|
|
if (mutation.param1 >= asset.range.end ||
|
|
|
|
(isRangeMutation(mutation) && mutation.param2 < asset.range.begin) ||
|
|
|
|
(!isRangeMutation(mutation) && mutation.param1 < asset.range.begin)) {
|
|
|
|
continue;
|
|
|
|
}
|
2020-06-23 02:43:24 +08:00
|
|
|
|
2020-02-18 06:36:09 +08:00
|
|
|
// Only apply mutation within the asset.range
|
2020-06-23 02:43:24 +08:00
|
|
|
ASSERT(asset.removePrefix.size() == 0);
|
2020-02-18 06:36:09 +08:00
|
|
|
if (isRangeMutation(mutation)) {
|
|
|
|
mutation.param1 = mutation.param1 >= asset.range.begin ? mutation.param1 : asset.range.begin;
|
|
|
|
mutation.param2 = mutation.param2 < asset.range.end ? mutation.param2 : asset.range.end;
|
2020-06-19 01:35:41 +08:00
|
|
|
// Remove prefix or add prefix when we restore to a new key space
|
2020-06-24 01:43:28 +08:00
|
|
|
if (asset.hasPrefix()) { // Avoid creating new Key
|
|
|
|
mutation.param1 =
|
|
|
|
mutation.param1.removePrefix(asset.removePrefix).withPrefix(asset.addPrefix, tempArena);
|
|
|
|
mutation.param2 =
|
|
|
|
mutation.param2.removePrefix(asset.removePrefix).withPrefix(asset.addPrefix, tempArena);
|
|
|
|
}
|
2020-06-19 01:35:41 +08:00
|
|
|
} else {
|
2020-06-24 01:43:28 +08:00
|
|
|
if (asset.hasPrefix()) { // Avoid creating new Key
|
|
|
|
mutation.param1 =
|
|
|
|
mutation.param1.removePrefix(asset.removePrefix).withPrefix(asset.addPrefix, tempArena);
|
|
|
|
}
|
2020-02-18 06:36:09 +08:00
|
|
|
}
|
|
|
|
|
2020-04-01 07:00:51 +08:00
|
|
|
TraceEvent(SevFRMutationInfo, "FastRestoreDecodePartitionedLogFile")
|
2020-02-22 03:47:51 +08:00
|
|
|
.detail("CommitVersion", msgVersion.toString())
|
2020-02-18 06:36:09 +08:00
|
|
|
.detail("ParsedMutation", mutation.toString());
|
|
|
|
it->second.push_back_deep(it->second.arena(), mutation);
|
2020-08-05 04:35:36 +08:00
|
|
|
cc->loadedLogBytes += mutation.totalSize();
|
2020-08-07 07:47:08 +08:00
|
|
|
// Sampling data similar to SS sample kvs
|
|
|
|
ByteSampleInfo sampleInfo = isKeyValueInSample(KeyValueRef(mutation.param1, mutation.param2));
|
|
|
|
if (sampleInfo.inSample) {
|
|
|
|
cc->sampledLogBytes += sampleInfo.sampledSize;
|
2020-08-05 04:35:36 +08:00
|
|
|
samplesIter->second.push_back_deep(samplesIter->second.arena(),
|
2020-08-07 07:47:08 +08:00
|
|
|
SampledMutation(mutation.param1, sampleInfo.sampledSize));
|
2020-02-18 06:36:09 +08:00
|
|
|
}
|
|
|
|
}
|
2020-02-20 03:24:17 +08:00
|
|
|
|
|
|
|
// Make sure any remaining bytes in the block are 0xFF
|
|
|
|
for (auto b : reader.remainder()) {
|
|
|
|
if (b != 0xFF) throw restore_corrupted_data_padding();
|
|
|
|
}
|
2020-02-18 06:36:09 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
TraceEvent(SevWarn, "FileRestoreCorruptLogFileBlock")
|
|
|
|
.error(e)
|
2020-08-26 00:31:34 +08:00
|
|
|
.detail("BatchIndex", asset.batchIndex)
|
2020-02-18 06:36:09 +08:00
|
|
|
.detail("Filename", file->getFilename())
|
|
|
|
.detail("BlockOffset", asset.offset)
|
|
|
|
.detail("BlockLen", asset.len);
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
processedFileOffset->set(asset.offset + asset.len);
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2020-09-09 22:23:18 +08:00
|
|
|
// wrapper of _parsePartitionedLogFileOnLoader to retry on blob error
|
|
|
|
ACTOR static Future<Void> parsePartitionedLogFileOnLoader(
|
|
|
|
KeyRangeMap<Version>* pRangeVersions, NotifiedVersion* processedFileOffset,
|
|
|
|
std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsIter,
|
|
|
|
std::map<LoadingParam, SampledMutationsVec>::iterator samplesIter, LoaderCounters* cc,
|
|
|
|
Reference<IBackupContainer> bc, RestoreAsset asset) {
|
|
|
|
state int readFileRetries = 0;
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
wait(_parsePartitionedLogFileOnLoader(pRangeVersions, processedFileOffset, kvOpsIter, samplesIter, cc, bc,
|
|
|
|
asset));
|
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() == error_code_restore_bad_read || e.code() == error_code_restore_unsupported_file_version ||
|
|
|
|
e.code() == error_code_restore_corrupted_data_padding) { // no retriable error
|
|
|
|
TraceEvent(SevError, "FileRestoreCorruptedPartitionedLogFileBlock").error(e);
|
|
|
|
throw;
|
|
|
|
} else if (e.code() == error_code_http_request_failed || e.code() == error_code_connection_failed ||
|
|
|
|
e.code() == error_code_timed_out || e.code() == error_code_lookup_failed) {
|
|
|
|
// blob http request failure, retry
|
|
|
|
TraceEvent(SevWarnAlways, "FastRestoreDecodedPartitionedLogFileConnectionFailure")
|
|
|
|
.detail("Retries", ++readFileRetries)
|
|
|
|
.error(e);
|
|
|
|
wait(delayJittered(0.1));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2020-04-16 04:32:52 +08:00
|
|
|
ACTOR Future<Void> _processLoadingParam(KeyRangeMap<Version>* pRangeVersions, LoadingParam param,
|
|
|
|
Reference<LoaderBatchData> batchData, UID loaderID,
|
2020-01-16 05:39:06 +08:00
|
|
|
Reference<IBackupContainer> bc) {
|
2019-12-11 14:55:40 +08:00
|
|
|
// Temporary data structure for parsing log files into (version, <K, V, mutationType>)
|
|
|
|
// Must use StandAlone to save mutations, otherwise, the mutationref memory will be corrupted
|
|
|
|
// mutationMap: Key is the unique identifier for a batch of mutation logs at the same version
|
|
|
|
state SerializedMutationListMap mutationMap;
|
|
|
|
state NotifiedVersion processedFileOffset(0);
|
|
|
|
state std::vector<Future<Void>> fileParserFutures;
|
2020-01-16 05:39:06 +08:00
|
|
|
state std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsPerLPIter = batchData->kvOpsPerLP.end();
|
2020-08-05 04:35:36 +08:00
|
|
|
state std::map<LoadingParam, SampledMutationsVec>::iterator samplesIter = batchData->sampleMutations.end();
|
2019-12-11 14:55:40 +08:00
|
|
|
|
2020-08-26 00:31:34 +08:00
|
|
|
TraceEvent("FastRestoreLoaderProcessLoadingParam", loaderID)
|
|
|
|
.detail("BatchIndex", param.asset.batchIndex)
|
|
|
|
.detail("LoadingParam", param.toString());
|
2019-08-02 08:00:13 +08:00
|
|
|
ASSERT(param.blockSize > 0);
|
2020-02-18 06:36:09 +08:00
|
|
|
ASSERT(param.asset.offset % param.blockSize == 0); // Parse file must be at block boundary.
|
2020-01-16 05:39:06 +08:00
|
|
|
ASSERT(batchData->kvOpsPerLP.find(param) == batchData->kvOpsPerLP.end());
|
2019-12-11 14:55:40 +08:00
|
|
|
|
2019-11-14 02:57:21 +08:00
|
|
|
// NOTE: map's iterator is guaranteed to be stable, but pointer may not.
|
2020-02-18 06:36:09 +08:00
|
|
|
bool inserted;
|
|
|
|
std::tie(kvOpsPerLPIter, inserted) = batchData->kvOpsPerLP.emplace(param, VersionedMutationsMap());
|
|
|
|
ASSERT(inserted);
|
2020-08-05 04:35:36 +08:00
|
|
|
std::tie(samplesIter, inserted) = batchData->sampleMutations.emplace(param, SampledMutationsVec());
|
2020-02-18 06:36:09 +08:00
|
|
|
ASSERT(inserted);
|
2019-06-05 02:40:23 +08:00
|
|
|
|
2019-12-23 09:16:40 +08:00
|
|
|
for (int64_t j = param.asset.offset; j < param.asset.len; j += param.blockSize) {
|
2019-12-21 13:44:40 +08:00
|
|
|
RestoreAsset subAsset = param.asset;
|
2019-12-21 14:24:32 +08:00
|
|
|
subAsset.offset = j;
|
|
|
|
subAsset.len = std::min<int64_t>(param.blockSize, param.asset.len - j);
|
2019-08-02 08:00:13 +08:00
|
|
|
if (param.isRangeFile) {
|
2020-02-10 11:19:32 +08:00
|
|
|
fileParserFutures.push_back(_parseRangeFileToMutationsOnLoader(
|
|
|
|
kvOpsPerLPIter, samplesIter, &batchData->counters, bc, param.rangeVersion.get(), subAsset));
|
2019-05-28 09:39:30 +08:00
|
|
|
} else {
|
2019-12-20 08:50:39 +08:00
|
|
|
// TODO: Sanity check the log file's range is overlapped with the restored version range
|
2020-02-18 06:36:09 +08:00
|
|
|
if (param.isPartitionedLog()) {
|
2020-09-09 22:23:18 +08:00
|
|
|
fileParserFutures.push_back(parsePartitionedLogFileOnLoader(pRangeVersions, &processedFileOffset,
|
|
|
|
kvOpsPerLPIter, samplesIter,
|
|
|
|
&batchData->counters, bc, subAsset));
|
2020-02-18 06:36:09 +08:00
|
|
|
} else {
|
2020-04-14 09:09:29 +08:00
|
|
|
fileParserFutures.push_back(
|
2020-09-09 22:23:18 +08:00
|
|
|
parseLogFileToMutationsOnLoader(&processedFileOffset, &mutationMap, bc, subAsset));
|
2020-02-18 06:36:09 +08:00
|
|
|
}
|
2019-05-28 09:39:30 +08:00
|
|
|
}
|
2019-05-10 11:55:44 +08:00
|
|
|
}
|
2019-08-02 08:00:13 +08:00
|
|
|
wait(waitForAll(fileParserFutures));
|
|
|
|
|
2020-02-18 06:36:09 +08:00
|
|
|
if (!param.isRangeFile && !param.isPartitionedLog()) {
|
2020-04-16 04:32:52 +08:00
|
|
|
_parseSerializedMutation(pRangeVersions, kvOpsPerLPIter, &mutationMap, samplesIter, &batchData->counters,
|
|
|
|
param.asset);
|
2019-05-10 11:55:44 +08:00
|
|
|
}
|
2019-08-02 08:00:13 +08:00
|
|
|
|
2020-08-26 00:31:34 +08:00
|
|
|
TraceEvent("FastRestoreLoaderProcessLoadingParamDone", loaderID)
|
|
|
|
.detail("BatchIndex", param.asset.batchIndex)
|
|
|
|
.detail("LoadingParam", param.toString());
|
2019-08-02 08:00:13 +08:00
|
|
|
|
2019-05-28 09:39:30 +08:00
|
|
|
return Void();
|
|
|
|
}
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2019-10-17 11:30:11 +08:00
|
|
|
// A loader can process multiple RestoreLoadFileRequest in parallel.
|
2019-12-20 08:50:39 +08:00
|
|
|
ACTOR Future<Void> handleLoadFileRequest(RestoreLoadFileRequest req, Reference<RestoreLoaderData> self) {
|
2020-01-16 05:39:06 +08:00
|
|
|
state Reference<LoaderBatchData> batchData = self->batch[req.batchIndex];
|
2020-01-22 06:49:13 +08:00
|
|
|
state bool isDuplicated = true;
|
2020-06-28 06:16:38 +08:00
|
|
|
state bool printTrace = false;
|
2020-01-16 05:39:06 +08:00
|
|
|
ASSERT(batchData.isValid());
|
2020-08-19 02:58:57 +08:00
|
|
|
ASSERT(req.batchIndex > self->finishedBatch.get());
|
2020-01-24 06:40:08 +08:00
|
|
|
bool paramExist = batchData->processedFileParams.find(req.param) != batchData->processedFileParams.end();
|
|
|
|
bool isReady = paramExist ? batchData->processedFileParams[req.param].isReady() : false;
|
2020-01-16 05:39:06 +08:00
|
|
|
|
2020-06-28 06:16:38 +08:00
|
|
|
batchData->loadFileReqs += 1;
|
|
|
|
printTrace = (batchData->loadFileReqs % 10 == 1);
|
2020-06-30 01:22:07 +08:00
|
|
|
// TODO: Make the actor priority lower than sendMutation priority. (Unsure it will help performance though)
|
2020-06-28 06:16:38 +08:00
|
|
|
TraceEvent(printTrace ? SevInfo : SevFRDebugInfo, "FastRestoreLoaderPhaseLoadFile", self->id())
|
2020-01-24 06:40:08 +08:00
|
|
|
.detail("BatchIndex", req.batchIndex)
|
|
|
|
.detail("ProcessLoadParam", req.param.toString())
|
|
|
|
.detail("NotProcessed", !paramExist)
|
2020-02-27 06:35:03 +08:00
|
|
|
.detail("Processed", isReady)
|
2020-02-29 03:22:53 +08:00
|
|
|
.detail("CurrentMemory", getSystemStatistics().processMemory);
|
2020-05-05 02:31:39 +08:00
|
|
|
// Loader destroy batchData once the batch finishes and self->finishedBatch.set(req.batchIndex);
|
|
|
|
ASSERT(self->finishedBatch.get() < req.batchIndex);
|
2020-02-27 06:35:03 +08:00
|
|
|
|
2020-02-27 06:40:01 +08:00
|
|
|
wait(isSchedulable(self, req.batchIndex, __FUNCTION__));
|
2020-02-27 06:35:03 +08:00
|
|
|
|
2020-01-16 05:39:06 +08:00
|
|
|
if (batchData->processedFileParams.find(req.param) == batchData->processedFileParams.end()) {
|
2020-06-25 13:10:54 +08:00
|
|
|
TraceEvent(SevFRDebugInfo, "FastRestoreLoadFile", self->id())
|
2020-01-28 10:13:20 +08:00
|
|
|
.detail("BatchIndex", req.batchIndex)
|
|
|
|
.detail("ProcessLoadParam", req.param.toString());
|
2020-01-16 05:39:06 +08:00
|
|
|
ASSERT(batchData->sampleMutations.find(req.param) == batchData->sampleMutations.end());
|
2020-04-16 04:32:52 +08:00
|
|
|
batchData->processedFileParams[req.param] =
|
|
|
|
_processLoadingParam(&self->rangeVersions, req.param, batchData, self->id(), self->bc);
|
2020-08-16 12:40:24 +08:00
|
|
|
self->inflightLoadingReqs++;
|
2020-01-22 06:49:13 +08:00
|
|
|
isDuplicated = false;
|
2019-11-05 03:47:29 +08:00
|
|
|
} else {
|
2020-06-25 13:10:54 +08:00
|
|
|
TraceEvent(SevFRDebugInfo, "FastRestoreLoadFile", self->id())
|
2020-01-28 10:13:20 +08:00
|
|
|
.detail("BatchIndex", req.batchIndex)
|
|
|
|
.detail("WaitOnProcessLoadParam", req.param.toString());
|
2019-05-14 16:49:44 +08:00
|
|
|
}
|
2020-02-22 03:47:51 +08:00
|
|
|
auto it = batchData->processedFileParams.find(req.param);
|
|
|
|
ASSERT(it != batchData->processedFileParams.end());
|
|
|
|
wait(it->second); // wait on the processing of the req.param.
|
2019-05-23 04:30:33 +08:00
|
|
|
|
2020-08-05 04:35:36 +08:00
|
|
|
// Send sampled mutations back to controller: batchData->sampleMutations[req.param]
|
2020-08-05 14:04:38 +08:00
|
|
|
std::vector<Future<RestoreCommonReply>> fSendSamples;
|
2020-08-05 04:35:36 +08:00
|
|
|
SampledMutationsVec& samples = batchData->sampleMutations[req.param];
|
2020-08-07 01:41:12 +08:00
|
|
|
SampledMutationsVec sampleBatch = SampledMutationsVec(); // sampleBatch: Standalone pointer to the created object
|
2020-08-08 07:50:36 +08:00
|
|
|
long sampleBatchSize = 0;
|
2020-08-05 04:35:36 +08:00
|
|
|
for (int i = 0; i < samples.size(); ++i) {
|
|
|
|
sampleBatchSize += samples[i].totalSize();
|
2020-08-05 14:04:38 +08:00
|
|
|
sampleBatch.push_back_deep(sampleBatch.arena(), samples[i]); // TODO: may not need deep copy
|
2020-08-05 04:35:36 +08:00
|
|
|
if (sampleBatchSize >= SERVER_KNOBS->FASTRESTORE_SAMPLE_MSG_BYTES) {
|
|
|
|
fSendSamples.push_back(self->ci.samples.getReply(
|
|
|
|
RestoreSamplesRequest(deterministicRandom()->randomUniqueID(), req.batchIndex, sampleBatch)));
|
|
|
|
sampleBatchSize = 0;
|
|
|
|
sampleBatch = SampledMutationsVec();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (sampleBatchSize > 0) {
|
|
|
|
fSendSamples.push_back(self->ci.samples.getReply(
|
|
|
|
RestoreSamplesRequest(deterministicRandom()->randomUniqueID(), req.batchIndex, sampleBatch)));
|
|
|
|
sampleBatchSize = 0;
|
|
|
|
}
|
2020-08-07 01:41:12 +08:00
|
|
|
|
|
|
|
try {
|
|
|
|
state int samplesMessages = fSendSamples.size();
|
|
|
|
wait(waitForAll(fSendSamples));
|
|
|
|
} catch (Error& e) { // In case ci.samples throws broken_promise due to unstable network
|
2020-09-09 21:40:41 +08:00
|
|
|
if (e.code() == error_code_broken_promise || e.code() == error_code_operation_cancelled) {
|
2020-08-07 01:41:12 +08:00
|
|
|
TraceEvent(SevWarnAlways, "FastRestoreLoaderPhaseLoadFileSendSamples")
|
2020-09-09 21:40:41 +08:00
|
|
|
.detail("SamplesMessages", samplesMessages)
|
|
|
|
.error(e, true);
|
2020-08-07 01:41:12 +08:00
|
|
|
} else {
|
|
|
|
TraceEvent(SevError, "FastRestoreLoaderPhaseLoadFileSendSamplesUnexpectedError").error(e, true);
|
|
|
|
}
|
|
|
|
}
|
2020-08-05 04:35:36 +08:00
|
|
|
|
|
|
|
// Ack restore controller the param is processed
|
2020-08-16 12:40:24 +08:00
|
|
|
self->inflightLoadingReqs--;
|
2020-08-05 04:35:36 +08:00
|
|
|
req.reply.send(RestoreLoadFileReply(req.param, isDuplicated));
|
2020-06-28 06:16:38 +08:00
|
|
|
TraceEvent(printTrace ? SevInfo : SevFRDebugInfo, "FastRestoreLoaderPhaseLoadFileDone", self->id())
|
2020-01-24 06:40:08 +08:00
|
|
|
.detail("BatchIndex", req.batchIndex)
|
|
|
|
.detail("ProcessLoadParam", req.param.toString());
|
2020-06-30 01:26:01 +08:00
|
|
|
|
2019-11-13 08:28:09 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2020-02-27 06:35:03 +08:00
|
|
|
// Send buffered mutations to appliers.
|
|
|
|
// Do not need to block on low memory usage because this actor should not increase memory usage.
|
2019-11-13 08:28:09 +08:00
|
|
|
ACTOR Future<Void> handleSendMutationsRequest(RestoreSendMutationsToAppliersRequest req,
|
|
|
|
Reference<RestoreLoaderData> self) {
|
2020-08-27 06:21:33 +08:00
|
|
|
state Reference<LoaderBatchData> batchData;
|
|
|
|
state Reference<LoaderBatchStatus> batchStatus;
|
2020-01-22 06:49:13 +08:00
|
|
|
state bool isDuplicated = true;
|
|
|
|
|
2020-08-27 06:21:33 +08:00
|
|
|
if (req.batchIndex <= self->finishedBatch.get()) {
|
|
|
|
TraceEvent(SevWarn, "FastRestoreLoaderRestoreSendMutationsToAppliersRequestTooOld")
|
|
|
|
.detail("FinishedBatchIndex", self->finishedBatch.get())
|
|
|
|
.detail("RequestBatchIndex", req.batchIndex);
|
|
|
|
req.reply.send(RestoreCommonReply(self->id(), isDuplicated));
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
batchData = self->batch[req.batchIndex];
|
|
|
|
batchStatus = self->status[req.batchIndex];
|
2020-08-18 13:20:54 +08:00
|
|
|
ASSERT(batchData.isValid() && batchStatus.isValid());
|
2020-08-27 06:21:33 +08:00
|
|
|
// Loader destroy batchData once the batch finishes and self->finishedBatch.set(req.batchIndex);
|
2020-08-19 02:58:57 +08:00
|
|
|
ASSERT(req.batchIndex > self->finishedBatch.get());
|
2020-01-24 06:40:08 +08:00
|
|
|
TraceEvent("FastRestoreLoaderPhaseSendMutations", self->id())
|
2020-01-22 06:49:13 +08:00
|
|
|
.detail("BatchIndex", req.batchIndex)
|
|
|
|
.detail("UseRangeFile", req.useRangeFile)
|
|
|
|
.detail("LoaderSendStatus", batchStatus->toString());
|
2020-08-17 00:43:04 +08:00
|
|
|
// The VB must finish loading phase before it can send mutations; update finishedLoadingVB for scheduler
|
2020-08-16 22:38:30 +08:00
|
|
|
self->finishedLoadingVB = std::max(self->finishedLoadingVB, req.batchIndex);
|
2020-01-22 06:49:13 +08:00
|
|
|
|
2020-03-28 09:38:06 +08:00
|
|
|
// Ensure each file is sent exactly once by using batchStatus->sendAllLogs and batchStatus->sendAllRanges
|
2020-01-22 06:49:13 +08:00
|
|
|
if (!req.useRangeFile) {
|
2020-01-28 10:13:14 +08:00
|
|
|
if (!batchStatus->sendAllLogs.present()) { // Has not sent
|
2020-01-22 06:49:13 +08:00
|
|
|
batchStatus->sendAllLogs = Never();
|
|
|
|
isDuplicated = false;
|
|
|
|
TraceEvent(SevInfo, "FastRestoreSendMutationsProcessLogRequest", self->id())
|
|
|
|
.detail("BatchIndex", req.batchIndex)
|
|
|
|
.detail("UseRangeFile", req.useRangeFile);
|
2020-01-28 10:13:14 +08:00
|
|
|
} else if (!batchStatus->sendAllLogs.get().isReady()) { // In the process of sending
|
2020-01-22 06:49:13 +08:00
|
|
|
TraceEvent(SevDebug, "FastRestoreSendMutationsWaitDuplicateLogRequest", self->id())
|
|
|
|
.detail("BatchIndex", req.batchIndex)
|
|
|
|
.detail("UseRangeFile", req.useRangeFile);
|
|
|
|
wait(batchStatus->sendAllLogs.get());
|
2020-01-28 10:13:14 +08:00
|
|
|
} else { // Already sent
|
2020-01-22 06:49:13 +08:00
|
|
|
TraceEvent(SevDebug, "FastRestoreSendMutationsSkipDuplicateLogRequest", self->id())
|
|
|
|
.detail("BatchIndex", req.batchIndex)
|
|
|
|
.detail("UseRangeFile", req.useRangeFile);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (!batchStatus->sendAllRanges.present()) {
|
2020-01-28 10:13:14 +08:00
|
|
|
batchStatus->sendAllRanges = Never();
|
2020-01-22 06:49:13 +08:00
|
|
|
isDuplicated = false;
|
|
|
|
TraceEvent(SevInfo, "FastRestoreSendMutationsProcessRangeRequest", self->id())
|
|
|
|
.detail("BatchIndex", req.batchIndex)
|
|
|
|
.detail("UseRangeFile", req.useRangeFile);
|
|
|
|
} else if (!batchStatus->sendAllRanges.get().isReady()) {
|
|
|
|
TraceEvent(SevDebug, "FastRestoreSendMutationsWaitDuplicateRangeRequest", self->id())
|
|
|
|
.detail("BatchIndex", req.batchIndex)
|
|
|
|
.detail("UseRangeFile", req.useRangeFile);
|
|
|
|
wait(batchStatus->sendAllRanges.get());
|
2020-02-14 03:26:56 +08:00
|
|
|
} else {
|
2020-01-22 06:49:13 +08:00
|
|
|
TraceEvent(SevDebug, "FastRestoreSendMutationsSkipDuplicateRangeRequest", self->id())
|
|
|
|
.detail("BatchIndex", req.batchIndex)
|
|
|
|
.detail("UseRangeFile", req.useRangeFile);
|
|
|
|
}
|
|
|
|
}
|
2019-12-11 14:55:40 +08:00
|
|
|
|
2020-01-22 06:49:13 +08:00
|
|
|
if (!isDuplicated) {
|
2020-08-16 09:33:24 +08:00
|
|
|
self->inflightSendingReqs++;
|
2020-01-24 12:22:05 +08:00
|
|
|
vector<Future<Void>> fSendMutations;
|
2020-01-22 06:49:13 +08:00
|
|
|
batchData->rangeToApplier = req.rangeToApplier;
|
2020-02-25 08:57:31 +08:00
|
|
|
for (auto& [loadParam, kvOps] : batchData->kvOpsPerLP) {
|
|
|
|
if (loadParam.isRangeFile == req.useRangeFile) {
|
2020-01-22 06:49:13 +08:00
|
|
|
// Send the parsed mutation to applier who will apply the mutation to DB
|
2020-08-18 13:20:54 +08:00
|
|
|
fSendMutations.push_back(
|
|
|
|
sendMutationsToApplier(&self->sendLoadParamQueue, &self->inflightSendLoadParamReqs,
|
2020-08-19 14:48:02 +08:00
|
|
|
&self->finishedBatch, &kvOps, req.batchIndex, loadParam.asset,
|
2020-08-18 13:20:54 +08:00
|
|
|
loadParam.isRangeFile, &batchData->rangeToApplier, &self->appliersInterf));
|
2020-01-22 06:49:13 +08:00
|
|
|
}
|
|
|
|
}
|
2020-01-24 12:22:05 +08:00
|
|
|
wait(waitForAll(fSendMutations));
|
2020-08-16 09:33:24 +08:00
|
|
|
self->inflightSendingReqs--;
|
2020-01-22 06:49:13 +08:00
|
|
|
if (req.useRangeFile) {
|
|
|
|
batchStatus->sendAllRanges = Void(); // Finish sending kvs parsed from range files
|
|
|
|
} else {
|
|
|
|
batchStatus->sendAllLogs = Void();
|
2019-11-13 08:28:09 +08:00
|
|
|
}
|
2020-06-29 02:11:58 +08:00
|
|
|
if ((batchStatus->sendAllRanges.present() && batchStatus->sendAllRanges.get().isReady()) &&
|
|
|
|
(batchStatus->sendAllLogs.present() && batchStatus->sendAllLogs.get().isReady())) {
|
|
|
|
// Both log and range files have been sent.
|
2020-08-18 13:20:54 +08:00
|
|
|
self->finishedSendingVB = std::max(self->finishedSendingVB, req.batchIndex);
|
2020-06-29 02:11:58 +08:00
|
|
|
batchData->kvOpsPerLP.clear();
|
|
|
|
}
|
2019-11-13 08:28:09 +08:00
|
|
|
}
|
2019-11-13 10:23:14 +08:00
|
|
|
|
2020-01-24 06:40:08 +08:00
|
|
|
TraceEvent("FastRestoreLoaderPhaseSendMutationsDone", self->id())
|
|
|
|
.detail("BatchIndex", req.batchIndex)
|
|
|
|
.detail("UseRangeFile", req.useRangeFile)
|
|
|
|
.detail("LoaderSendStatus", batchStatus->toString());
|
2020-01-22 06:49:13 +08:00
|
|
|
req.reply.send(RestoreCommonReply(self->id(), isDuplicated));
|
2019-05-10 11:55:44 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2020-07-09 05:14:28 +08:00
|
|
|
void buildApplierRangeMap(KeyRangeMap<UID>* krMap, std::map<Key, UID>* pRangeToApplier) {
|
|
|
|
std::map<Key, UID>::iterator beginKey = pRangeToApplier->begin();
|
|
|
|
std::map<Key, UID>::iterator endKey = std::next(beginKey, 1);
|
|
|
|
while (endKey != pRangeToApplier->end()) {
|
|
|
|
krMap->insert(KeyRangeRef(beginKey->first, endKey->first), beginKey->second);
|
|
|
|
beginKey = endKey;
|
|
|
|
endKey++;
|
|
|
|
}
|
|
|
|
if (beginKey != pRangeToApplier->end()) {
|
|
|
|
krMap->insert(KeyRangeRef(beginKey->first, normalKeys.end), beginKey->second);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-28 10:13:14 +08:00
|
|
|
// Assume: kvOps data are from the same RestoreAsset.
|
2020-01-16 05:39:06 +08:00
|
|
|
// Input: pkvOps: versioned kv mutation for the asset in the version batch (batchIndex)
|
|
|
|
// isRangeFile: is pkvOps from range file? Let receiver (applier) know if the mutation is log mutation;
|
|
|
|
// pRangeToApplier: range to applierID mapping, deciding which applier is responsible for which range
|
|
|
|
// pApplierInterfaces: applier interfaces to send the mutations to
|
2020-08-16 22:38:30 +08:00
|
|
|
ACTOR Future<Void> sendMutationsToApplier(
|
|
|
|
std::priority_queue<RestoreLoaderSchedSendLoadParamRequest>* sendLoadParamQueue,
|
2020-08-19 14:48:02 +08:00
|
|
|
std::map<int, int>* inflightSendLoadParamReqs, NotifiedVersion* finishedBatch, VersionedMutationsMap* pkvOps,
|
2020-08-18 13:20:54 +08:00
|
|
|
int batchIndex, RestoreAsset asset, bool isRangeFile, std::map<Key, UID>* pRangeToApplier,
|
|
|
|
std::map<UID, RestoreApplierInterface>* pApplierInterfaces) {
|
2019-08-02 08:00:13 +08:00
|
|
|
state VersionedMutationsMap& kvOps = *pkvOps;
|
2019-12-12 23:44:57 +08:00
|
|
|
state VersionedMutationsMap::iterator kvOp = kvOps.begin();
|
2019-05-23 04:30:33 +08:00
|
|
|
state int kvCount = 0;
|
|
|
|
state int splitMutationIndex = 0;
|
2020-04-12 13:31:55 +08:00
|
|
|
state Version msgIndex = 1; // Monotonically increased index for send message, must start at 1
|
2020-01-16 05:39:06 +08:00
|
|
|
state std::vector<UID> applierIDs = getApplierIDs(*pRangeToApplier);
|
2020-04-12 13:31:55 +08:00
|
|
|
state double msgSize = 0; // size of mutations in the message
|
2019-05-23 04:30:33 +08:00
|
|
|
|
2020-08-16 23:19:46 +08:00
|
|
|
// Wait for scheduler to kick it off
|
|
|
|
Promise<Void> toSched;
|
|
|
|
sendLoadParamQueue->push(RestoreLoaderSchedSendLoadParamRequest(batchIndex, toSched, now()));
|
|
|
|
wait(toSched.getFuture());
|
2020-08-19 14:48:02 +08:00
|
|
|
if (finishedBatch->get() >= batchIndex) {
|
|
|
|
TraceEvent(SevError, "FastRestoreLoaderSendMutationToApplierLateRequest")
|
|
|
|
.detail("FinishedBatchIndex", finishedBatch->get())
|
|
|
|
.detail("RequestBatchIndex", batchIndex);
|
|
|
|
ASSERT(false);
|
2020-08-25 01:45:46 +08:00
|
|
|
return Void();
|
2020-08-18 13:20:54 +08:00
|
|
|
}
|
2020-08-25 01:45:46 +08:00
|
|
|
|
2020-08-16 23:19:46 +08:00
|
|
|
(*inflightSendLoadParamReqs)[batchIndex]++;
|
|
|
|
|
2020-01-24 06:40:08 +08:00
|
|
|
TraceEvent("FastRestoreLoaderSendMutationToApplier")
|
2019-08-02 08:00:13 +08:00
|
|
|
.detail("IsRangeFile", isRangeFile)
|
2020-01-10 03:31:27 +08:00
|
|
|
.detail("EndVersion", asset.endVersion)
|
2020-01-06 09:54:44 +08:00
|
|
|
.detail("RestoreAsset", asset.toString());
|
2019-06-01 02:09:31 +08:00
|
|
|
|
2020-01-24 06:40:08 +08:00
|
|
|
// There should be no mutation at asset.endVersion version because it is exclusive
|
2020-02-25 08:57:31 +08:00
|
|
|
if (kvOps.lower_bound(LogMessageVersion(asset.endVersion)) != kvOps.end()) {
|
2020-01-24 06:40:08 +08:00
|
|
|
TraceEvent(SevError, "FastRestoreLoaderSendMutationToApplier")
|
|
|
|
.detail("BatchIndex", batchIndex)
|
|
|
|
.detail("RestoreAsset", asset.toString())
|
|
|
|
.detail("IsRangeFile", isRangeFile)
|
|
|
|
.detail("Data loss at version", asset.endVersion);
|
2020-02-22 03:47:51 +08:00
|
|
|
} else {
|
|
|
|
// Ensure there is a mutation request sent at endVersion, so that applier can advance its notifiedVersion
|
|
|
|
kvOps[LogMessageVersion(asset.endVersion)] = MutationsVec(); // Empty mutation vector will be handled by applier
|
2019-05-23 04:30:33 +08:00
|
|
|
}
|
|
|
|
|
2019-06-01 02:09:31 +08:00
|
|
|
splitMutationIndex = 0;
|
|
|
|
kvCount = 0;
|
2019-08-02 08:00:13 +08:00
|
|
|
|
2020-04-22 13:04:42 +08:00
|
|
|
// applierVersionedMutationsBuffer is the mutation-and-its-version vector to be sent to each applier
|
|
|
|
state std::map<UID, VersionedMutationsVec> applierVersionedMutationsBuffer;
|
2020-04-14 05:50:43 +08:00
|
|
|
state int mIndex = 0;
|
|
|
|
state LogMessageVersion commitVersion;
|
2020-04-22 13:29:57 +08:00
|
|
|
state std::vector<Future<Void>> fSends;
|
2020-03-17 09:20:02 +08:00
|
|
|
for (auto& applierID : applierIDs) {
|
2020-04-22 13:04:42 +08:00
|
|
|
applierVersionedMutationsBuffer[applierID] = VersionedMutationsVec();
|
2020-03-17 09:20:02 +08:00
|
|
|
}
|
2020-07-09 05:14:28 +08:00
|
|
|
KeyRangeMap<UID> krMap;
|
|
|
|
buildApplierRangeMap(&krMap, pRangeToApplier);
|
2019-08-02 08:00:13 +08:00
|
|
|
for (kvOp = kvOps.begin(); kvOp != kvOps.end(); kvOp++) {
|
2020-04-14 05:50:43 +08:00
|
|
|
commitVersion = kvOp->first;
|
2020-02-22 03:47:51 +08:00
|
|
|
ASSERT(commitVersion.version >= asset.beginVersion);
|
|
|
|
ASSERT(commitVersion.version <= asset.endVersion); // endVersion is an empty commit to ensure progress
|
2020-04-14 05:50:43 +08:00
|
|
|
for (mIndex = 0; mIndex < kvOp->second.size(); mIndex++) {
|
|
|
|
MutationRef& kvm = kvOp->second[mIndex];
|
2019-06-01 02:09:31 +08:00
|
|
|
// Send the mutation to applier
|
2019-08-02 08:00:13 +08:00
|
|
|
if (isRangeMutation(kvm)) {
|
2019-12-07 15:16:49 +08:00
|
|
|
MutationsVec mvector;
|
|
|
|
Standalone<VectorRef<UID>> nodeIDs;
|
2019-06-01 02:09:31 +08:00
|
|
|
// Because using a vector of mutations causes overhead, and the range mutation should happen rarely;
|
|
|
|
// We handle the range mutation and key mutation differently for the benefit of avoiding memory copy
|
2020-07-09 05:14:28 +08:00
|
|
|
splitMutation(krMap, kvm, mvector.arena(), mvector.contents(), nodeIDs.arena(), nodeIDs.contents());
|
2019-06-01 02:09:31 +08:00
|
|
|
ASSERT(mvector.size() == nodeIDs.size());
|
|
|
|
|
2020-03-15 00:42:42 +08:00
|
|
|
if (debugMutation("RestoreLoader", commitVersion.version, kvm)) {
|
|
|
|
TraceEvent e("DebugSplit");
|
|
|
|
int i = 0;
|
|
|
|
for (auto& [key, uid] : *pRangeToApplier) {
|
|
|
|
e.detail(format("Range%d", i).c_str(), printable(key))
|
|
|
|
.detail(format("UID%d", i).c_str(), uid.toString());
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
}
|
2019-08-02 08:00:13 +08:00
|
|
|
for (splitMutationIndex = 0; splitMutationIndex < mvector.size(); splitMutationIndex++) {
|
2019-06-01 02:09:31 +08:00
|
|
|
MutationRef mutation = mvector[splitMutationIndex];
|
|
|
|
UID applierID = nodeIDs[splitMutationIndex];
|
2020-03-15 00:42:42 +08:00
|
|
|
if (debugMutation("RestoreLoader", commitVersion.version, mutation)) {
|
|
|
|
TraceEvent("SplittedMutation")
|
|
|
|
.detail("Version", commitVersion.toString())
|
|
|
|
.detail("Mutation", mutation.toString());
|
|
|
|
}
|
2020-04-25 01:30:15 +08:00
|
|
|
// CAREFUL: The splitted mutations' lifetime is shorter than the for-loop
|
|
|
|
// Must use deep copy for splitted mutations
|
|
|
|
applierVersionedMutationsBuffer[applierID].push_back_deep(
|
2020-04-22 13:04:42 +08:00
|
|
|
applierVersionedMutationsBuffer[applierID].arena(), VersionedMutation(mutation, commitVersion));
|
2020-04-12 13:31:55 +08:00
|
|
|
msgSize += mutation.expectedSize();
|
2019-06-01 02:09:31 +08:00
|
|
|
|
|
|
|
kvCount++;
|
2019-05-23 04:30:33 +08:00
|
|
|
}
|
2019-06-01 02:09:31 +08:00
|
|
|
} else { // mutation operates on a particular key
|
2020-01-16 05:39:06 +08:00
|
|
|
std::map<Key, UID>::iterator itlow = pRangeToApplier->upper_bound(kvm.param1);
|
2019-06-05 02:40:23 +08:00
|
|
|
--itlow; // make sure itlow->first <= m.param1
|
2019-08-02 08:00:13 +08:00
|
|
|
ASSERT(itlow->first <= kvm.param1);
|
2019-06-01 02:09:31 +08:00
|
|
|
UID applierID = itlow->second;
|
|
|
|
kvCount++;
|
|
|
|
|
2020-03-17 09:20:02 +08:00
|
|
|
if (debugMutation("RestoreLoader", commitVersion.version, kvm)) {
|
|
|
|
TraceEvent("SendMutation")
|
|
|
|
.detail("Applier", applierID)
|
|
|
|
.detail("Version", commitVersion.toString())
|
|
|
|
.detail("Mutation", kvm.toString());
|
|
|
|
}
|
2020-05-02 01:47:44 +08:00
|
|
|
// kvm data is saved in pkvOps in batchData, so shallow copy is ok here.
|
2020-04-26 08:30:01 +08:00
|
|
|
applierVersionedMutationsBuffer[applierID].push_back(applierVersionedMutationsBuffer[applierID].arena(),
|
|
|
|
VersionedMutation(kvm, commitVersion));
|
2020-04-12 13:31:55 +08:00
|
|
|
msgSize += kvm.expectedSize();
|
2019-06-01 02:09:31 +08:00
|
|
|
}
|
2020-03-17 09:20:02 +08:00
|
|
|
|
2020-04-14 04:59:12 +08:00
|
|
|
// Batch mutations at multiple versions up to FASTRESTORE_LOADER_SEND_MUTATION_MSG_BYTES size
|
|
|
|
// to improve bandwidth from a loader to appliers
|
2020-04-14 08:28:21 +08:00
|
|
|
if (msgSize >= SERVER_KNOBS->FASTRESTORE_LOADER_SEND_MUTATION_MSG_BYTES) {
|
2020-04-22 13:21:59 +08:00
|
|
|
std::vector<std::pair<UID, RestoreSendVersionedMutationsRequest>> requests;
|
2020-04-22 13:04:42 +08:00
|
|
|
for (const UID& applierID : applierIDs) {
|
|
|
|
requests.emplace_back(
|
|
|
|
applierID, RestoreSendVersionedMutationsRequest(batchIndex, asset, msgIndex, isRangeFile,
|
|
|
|
applierVersionedMutationsBuffer[applierID]));
|
|
|
|
}
|
2020-08-18 11:34:33 +08:00
|
|
|
TraceEvent(SevInfo, "FastRestoreLoaderSendMutationToApplier")
|
2020-04-22 13:04:42 +08:00
|
|
|
.detail("MessageIndex", msgIndex)
|
|
|
|
.detail("RestoreAsset", asset.toString())
|
|
|
|
.detail("Requests", requests.size());
|
2020-04-26 08:21:17 +08:00
|
|
|
fSends.push_back(sendBatchRequests(&RestoreApplierInterface::sendMutationVector, *pApplierInterfaces,
|
|
|
|
requests, TaskPriority::RestoreLoaderSendMutations));
|
2020-04-22 13:04:42 +08:00
|
|
|
msgIndex++;
|
|
|
|
msgSize = 0;
|
|
|
|
for (auto& applierID : applierIDs) {
|
|
|
|
applierVersionedMutationsBuffer[applierID] = VersionedMutationsVec();
|
|
|
|
}
|
2020-03-17 09:20:02 +08:00
|
|
|
}
|
2020-04-14 04:59:12 +08:00
|
|
|
} // Mutations at the same LogMessageVersion
|
2019-10-17 11:30:11 +08:00
|
|
|
} // all versions of mutations in the same file
|
2019-05-23 04:30:33 +08:00
|
|
|
|
2020-04-14 08:28:21 +08:00
|
|
|
// Send the remaining mutations in the applierMutationsBuffer
|
|
|
|
if (msgSize > 0) {
|
|
|
|
// TODO: Sanity check each asset has been received exactly once!
|
2020-04-22 13:21:59 +08:00
|
|
|
std::vector<std::pair<UID, RestoreSendVersionedMutationsRequest>> requests;
|
2020-04-22 13:04:42 +08:00
|
|
|
for (const UID& applierID : applierIDs) {
|
|
|
|
requests.emplace_back(applierID,
|
|
|
|
RestoreSendVersionedMutationsRequest(batchIndex, asset, msgIndex, isRangeFile,
|
|
|
|
applierVersionedMutationsBuffer[applierID]));
|
|
|
|
}
|
2020-08-18 11:34:33 +08:00
|
|
|
TraceEvent(SevInfo, "FastRestoreLoaderSendMutationToApplier")
|
2020-04-22 13:04:42 +08:00
|
|
|
.detail("MessageIndex", msgIndex)
|
|
|
|
.detail("RestoreAsset", asset.toString())
|
|
|
|
.detail("Requests", requests.size());
|
2020-04-26 08:21:17 +08:00
|
|
|
fSends.push_back(sendBatchRequests(&RestoreApplierInterface::sendMutationVector, *pApplierInterfaces, requests,
|
|
|
|
TaskPriority::RestoreLoaderSendMutations));
|
2020-04-14 08:28:21 +08:00
|
|
|
}
|
2020-04-26 08:21:17 +08:00
|
|
|
wait(waitForAll(fSends));
|
2020-04-14 08:28:21 +08:00
|
|
|
|
2020-08-16 22:38:30 +08:00
|
|
|
(*inflightSendLoadParamReqs)[batchIndex]--;
|
2020-08-20 01:39:49 +08:00
|
|
|
|
2020-08-25 08:27:40 +08:00
|
|
|
if (finishedBatch->get() < batchIndex) {
|
2020-08-20 01:39:49 +08:00
|
|
|
kvOps = VersionedMutationsMap(); // Free memory for parsed mutations at the restore asset.
|
|
|
|
TraceEvent("FastRestoreLoaderSendMutationToApplierDone")
|
|
|
|
.detail("BatchIndex", batchIndex)
|
|
|
|
.detail("RestoreAsset", asset.toString())
|
|
|
|
.detail("Mutations", kvCount);
|
|
|
|
} else {
|
|
|
|
TraceEvent(SevWarnAlways, "FastRestoreLoaderSendMutationToApplierDoneTooLate")
|
|
|
|
.detail("BatchIndex", batchIndex)
|
|
|
|
.detail("FinishedBatchIndex", finishedBatch->get())
|
|
|
|
.detail("RestoreAsset", asset.toString())
|
|
|
|
.detail("Mutations", kvCount);
|
|
|
|
}
|
|
|
|
|
2019-05-23 04:30:33 +08:00
|
|
|
return Void();
|
|
|
|
}
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2020-07-09 02:08:23 +08:00
|
|
|
// Splits a clear range mutation for Appliers and puts results of splitted mutations and
|
|
|
|
// Applier IDs into "mvector" and "nodeIDs" on return.
|
2020-07-09 05:14:28 +08:00
|
|
|
void splitMutation(const KeyRangeMap<UID>& krMap, MutationRef m, Arena& mvector_arena, VectorRef<MutationRef>& mvector,
|
|
|
|
Arena& nodeIDs_arena, VectorRef<UID>& nodeIDs) {
|
2020-05-01 10:12:31 +08:00
|
|
|
TraceEvent(SevDebug, "FastRestoreSplitMutation").detail("Mutation", m.toString());
|
2020-07-09 02:08:23 +08:00
|
|
|
ASSERT(mvector.empty());
|
|
|
|
ASSERT(nodeIDs.empty());
|
2020-07-09 00:19:05 +08:00
|
|
|
auto r = krMap.intersectingRanges(KeyRangeRef(m.param1, m.param2));
|
|
|
|
for (auto i = r.begin(); i != r.end(); ++i) {
|
|
|
|
// Calculate the overlap range
|
|
|
|
KeyRef rangeBegin = m.param1 > i->range().begin ? m.param1 : i->range().begin;
|
|
|
|
KeyRef rangeEnd = m.param2 < i->range().end ? m.param2 : i->range().end;
|
|
|
|
KeyRange krange1(KeyRangeRef(rangeBegin, rangeEnd));
|
|
|
|
mvector.push_back_deep(mvector_arena, MutationRef(MutationRef::ClearRange, rangeBegin, rangeEnd));
|
|
|
|
nodeIDs.push_back(nodeIDs_arena, i->value());
|
2019-05-10 11:55:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-02 08:00:13 +08:00
|
|
|
// key_input format:
|
|
|
|
// [logRangeMutation.first][hash_value_of_commit_version:1B][bigEndian64(commitVersion)][bigEndian32(part)]
|
2019-05-31 02:18:24 +08:00
|
|
|
// value_input: serialized binary of mutations at the same version
|
2020-04-14 09:09:29 +08:00
|
|
|
bool concatenateBackupMutationForLogFile(SerializedMutationListMap* pMutationMap, Standalone<StringRef> key_input,
|
|
|
|
Standalone<StringRef> val_input, const RestoreAsset& asset) {
|
2019-08-02 08:00:13 +08:00
|
|
|
SerializedMutationListMap& mutationMap = *pMutationMap;
|
2019-11-23 05:12:04 +08:00
|
|
|
const int key_prefix_len = sizeof(uint8_t) + sizeof(Version) + sizeof(uint32_t);
|
2019-05-31 02:18:24 +08:00
|
|
|
|
2020-02-19 05:21:29 +08:00
|
|
|
StringRefReader readerKey(key_input, restore_corrupted_data()); // read key_input!
|
2019-11-23 05:12:04 +08:00
|
|
|
int logRangeMutationFirstLength = key_input.size() - key_prefix_len;
|
2019-05-10 11:55:44 +08:00
|
|
|
bool concatenated = false;
|
|
|
|
|
2019-11-23 05:12:04 +08:00
|
|
|
ASSERT_WE_THINK(key_input.size() >= key_prefix_len);
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2019-08-02 08:00:13 +08:00
|
|
|
if (logRangeMutationFirstLength > 0) {
|
|
|
|
// Strip out the [logRangeMutation.first]; otherwise, the following readerKey.consume will produce wrong value
|
|
|
|
readerKey.consume(logRangeMutationFirstLength);
|
2019-05-10 11:55:44 +08:00
|
|
|
}
|
|
|
|
|
2019-08-02 09:20:42 +08:00
|
|
|
readerKey.consume<uint8_t>(); // uint8_t hashValue = readerKey.consume<uint8_t>()
|
2019-11-23 03:47:25 +08:00
|
|
|
Version commitVersion = readerKey.consumeNetworkUInt64();
|
2019-12-20 08:50:39 +08:00
|
|
|
// Skip mutations not in [asset.beginVersion, asset.endVersion), which is what we are only processing right now
|
2019-12-24 07:01:27 +08:00
|
|
|
if (!asset.isInVersionRange(commitVersion)) {
|
2019-12-20 08:50:39 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-08-02 08:00:13 +08:00
|
|
|
uint32_t part = readerKey.consumeNetworkUInt32();
|
|
|
|
// Use commitVersion as id
|
2019-11-23 03:47:25 +08:00
|
|
|
Standalone<StringRef> id = StringRef((uint8_t*)&commitVersion, sizeof(Version));
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2020-02-18 06:36:09 +08:00
|
|
|
auto it = mutationMap.find(id);
|
|
|
|
if (it == mutationMap.end()) {
|
2020-04-14 09:09:29 +08:00
|
|
|
mutationMap.emplace(id, std::make_pair(val_input, 0));
|
2019-08-02 08:00:13 +08:00
|
|
|
if (part != 0) {
|
2020-05-02 01:27:01 +08:00
|
|
|
TraceEvent(SevError, "FastRestoreLoader")
|
2020-02-25 08:57:31 +08:00
|
|
|
.detail("FirstPartNotZero", part)
|
|
|
|
.detail("KeyInput", getHexString(key_input));
|
2019-05-10 11:55:44 +08:00
|
|
|
}
|
2019-09-04 06:50:21 +08:00
|
|
|
} else { // Concatenate the val string with the same commitVersion
|
2020-04-14 09:09:29 +08:00
|
|
|
it->second.first =
|
|
|
|
it->second.first.contents().withSuffix(val_input.contents()); // Assign the new Areana to the map's value
|
|
|
|
auto& currentPart = it->second.second;
|
2020-02-25 08:57:31 +08:00
|
|
|
if (part != (currentPart + 1)) {
|
2019-05-31 02:18:24 +08:00
|
|
|
// Check if the same range or log file has been processed more than once!
|
2020-05-02 01:27:01 +08:00
|
|
|
TraceEvent(SevError, "FastRestoreLoader")
|
2020-02-25 08:57:31 +08:00
|
|
|
.detail("CurrentPart1", currentPart)
|
|
|
|
.detail("CurrentPart2", part)
|
|
|
|
.detail("KeyInput", getHexString(key_input))
|
|
|
|
.detail("Hint", "Check if the same range or log file has been processed more than once");
|
2019-05-10 11:55:44 +08:00
|
|
|
}
|
2020-02-25 08:57:31 +08:00
|
|
|
currentPart = part;
|
2019-05-10 11:55:44 +08:00
|
|
|
concatenated = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return concatenated;
|
|
|
|
}
|
|
|
|
|
2019-08-02 08:00:13 +08:00
|
|
|
// Parse the kv pair (version, serialized_mutation), which are the results parsed from log file, into
|
|
|
|
// (version, <K, V, mutationType>) pair;
|
|
|
|
// Put the parsed versioned mutations into *pkvOps.
|
2020-02-19 08:41:59 +08:00
|
|
|
//
|
2019-08-02 08:00:13 +08:00
|
|
|
// Input key: [commitVersion_of_the_mutation_batch:uint64_t];
|
|
|
|
// Input value: [includeVersion:uint64_t][val_length:uint32_t][encoded_list_of_mutations], where
|
|
|
|
// includeVersion is the serialized version in the batch commit. It is not the commitVersion in Input key.
|
2020-02-12 03:44:53 +08:00
|
|
|
//
|
2019-08-02 08:00:13 +08:00
|
|
|
// val_length is always equal to (val.size() - 12); otherwise,
|
|
|
|
// we may not get the entire mutation list for the version encoded_list_of_mutations:
|
|
|
|
// [mutation1][mutation2]...[mutationk], where
|
|
|
|
// a mutation is encoded as [type:uint32_t][keyLength:uint32_t][valueLength:uint32_t][keyContent][valueContent]
|
2020-04-16 04:32:52 +08:00
|
|
|
void _parseSerializedMutation(KeyRangeMap<Version>* pRangeVersions,
|
|
|
|
std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsIter,
|
2019-12-03 06:33:31 +08:00
|
|
|
SerializedMutationListMap* pmutationMap,
|
2020-08-05 04:35:36 +08:00
|
|
|
std::map<LoadingParam, SampledMutationsVec>::iterator samplesIter, LoaderCounters* cc,
|
2020-02-19 08:41:59 +08:00
|
|
|
const RestoreAsset& asset) {
|
2019-11-14 02:57:21 +08:00
|
|
|
VersionedMutationsMap& kvOps = kvOpsIter->second;
|
2020-08-05 04:35:36 +08:00
|
|
|
SampledMutationsVec& samples = samplesIter->second;
|
2019-08-02 08:00:13 +08:00
|
|
|
SerializedMutationListMap& mutationMap = *pmutationMap;
|
|
|
|
|
2020-06-25 12:25:30 +08:00
|
|
|
TraceEvent(SevFRMutationInfo, "FastRestoreLoaderParseSerializedLogMutation")
|
2020-08-26 00:31:34 +08:00
|
|
|
.detail("BatchIndex", asset.batchIndex)
|
2020-06-25 12:25:30 +08:00
|
|
|
.detail("RestoreAsset", asset.toString());
|
2020-06-23 10:47:14 +08:00
|
|
|
|
2020-06-19 08:40:16 +08:00
|
|
|
Arena tempArena;
|
2019-08-02 08:00:13 +08:00
|
|
|
for (auto& m : mutationMap) {
|
2019-05-10 11:55:44 +08:00
|
|
|
StringRef k = m.first.contents();
|
2020-04-14 09:09:29 +08:00
|
|
|
StringRef val = m.second.first.contents();
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2020-02-19 05:21:29 +08:00
|
|
|
StringRefReader kReader(k, restore_corrupted_data());
|
2019-05-31 02:18:24 +08:00
|
|
|
uint64_t commitVersion = kReader.consume<uint64_t>(); // Consume little Endian data
|
2019-12-20 08:50:39 +08:00
|
|
|
// We have already filter the commit not in [beginVersion, endVersion) when we concatenate kv pair in log file
|
2019-12-23 09:16:40 +08:00
|
|
|
ASSERT_WE_THINK(asset.isInVersionRange(commitVersion));
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2020-02-19 05:21:29 +08:00
|
|
|
StringRefReader vReader(val, restore_corrupted_data());
|
2019-05-31 02:18:24 +08:00
|
|
|
vReader.consume<uint64_t>(); // Consume the includeVersion
|
2019-11-23 03:47:25 +08:00
|
|
|
// TODO(xumengpanda): verify the protocol version is compatible and raise error if needed
|
|
|
|
|
|
|
|
// Parse little endian value, confirmed it is correct!
|
|
|
|
uint32_t val_length_decoded = vReader.consume<uint32_t>();
|
|
|
|
ASSERT(val_length_decoded == val.size() - sizeof(uint64_t) - sizeof(uint32_t));
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2020-03-27 01:03:11 +08:00
|
|
|
int sub = 0;
|
2019-05-10 11:55:44 +08:00
|
|
|
while (1) {
|
|
|
|
// stop when reach the end of the string
|
2019-08-02 08:00:13 +08:00
|
|
|
if (vReader.eof()) { //|| *reader.rptr == 0xFF
|
2019-05-10 11:55:44 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-05-31 02:18:24 +08:00
|
|
|
uint32_t type = vReader.consume<uint32_t>();
|
|
|
|
uint32_t kLen = vReader.consume<uint32_t>();
|
|
|
|
uint32_t vLen = vReader.consume<uint32_t>();
|
2019-08-02 08:00:13 +08:00
|
|
|
const uint8_t* k = vReader.consume(kLen);
|
|
|
|
const uint8_t* v = vReader.consume(vLen);
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2019-08-02 08:00:13 +08:00
|
|
|
MutationRef mutation((MutationRef::Type)type, KeyRef(k, kLen), KeyRef(v, vLen));
|
2019-12-23 09:16:40 +08:00
|
|
|
// Should this mutation be skipped?
|
2020-04-16 04:32:52 +08:00
|
|
|
// Skip mutation whose commitVesion < range kv's version
|
|
|
|
if (logMutationTooOld(pRangeVersions, mutation, commitVersion)) {
|
2020-04-18 09:32:14 +08:00
|
|
|
cc->oldLogMutations += 1;
|
2020-04-16 04:32:52 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-12-23 09:16:40 +08:00
|
|
|
if (mutation.param1 >= asset.range.end ||
|
|
|
|
(isRangeMutation(mutation) && mutation.param2 < asset.range.begin) ||
|
|
|
|
(!isRangeMutation(mutation) && mutation.param1 < asset.range.begin)) {
|
|
|
|
continue;
|
|
|
|
}
|
2020-06-22 13:18:07 +08:00
|
|
|
// Only apply mutation within the asset.range and apply removePrefix and addPrefix
|
2020-06-23 02:43:24 +08:00
|
|
|
ASSERT(asset.removePrefix.size() == 0);
|
2019-12-20 08:50:39 +08:00
|
|
|
if (isRangeMutation(mutation)) {
|
|
|
|
mutation.param1 = mutation.param1 >= asset.range.begin ? mutation.param1 : asset.range.begin;
|
|
|
|
mutation.param2 = mutation.param2 < asset.range.end ? mutation.param2 : asset.range.end;
|
2020-06-19 01:35:41 +08:00
|
|
|
// Remove prefix or add prefix if we restore data to a new key space
|
2020-06-24 01:43:28 +08:00
|
|
|
if (asset.hasPrefix()) { // Avoid creating new Key
|
|
|
|
mutation.param1 =
|
|
|
|
mutation.param1.removePrefix(asset.removePrefix).withPrefix(asset.addPrefix, tempArena);
|
|
|
|
mutation.param2 =
|
|
|
|
mutation.param2.removePrefix(asset.removePrefix).withPrefix(asset.addPrefix, tempArena);
|
|
|
|
}
|
2020-06-19 01:35:41 +08:00
|
|
|
} else {
|
2020-06-24 01:43:28 +08:00
|
|
|
if (asset.hasPrefix()) { // Avoid creating new Key
|
|
|
|
mutation.param1 =
|
|
|
|
mutation.param1.removePrefix(asset.removePrefix).withPrefix(asset.addPrefix, tempArena);
|
|
|
|
}
|
2019-12-20 08:50:39 +08:00
|
|
|
}
|
2019-12-23 09:16:40 +08:00
|
|
|
|
2020-08-01 07:00:15 +08:00
|
|
|
cc->loadedLogBytes += mutation.totalSize();
|
2020-02-12 03:44:53 +08:00
|
|
|
|
2020-04-01 07:00:51 +08:00
|
|
|
TraceEvent(SevFRMutationInfo, "FastRestoreDecodeLogFile")
|
2019-11-05 08:10:08 +08:00
|
|
|
.detail("CommitVersion", commitVersion)
|
|
|
|
.detail("ParsedMutation", mutation.toString());
|
2020-03-27 01:03:11 +08:00
|
|
|
|
|
|
|
auto it = kvOps.insert(std::make_pair(LogMessageVersion(commitVersion, sub++), MutationsVec()));
|
|
|
|
ASSERT(it.second); // inserted is true
|
2020-03-28 04:07:08 +08:00
|
|
|
ASSERT(sub < std::numeric_limits<int32_t>::max()); // range file mutation uses int32_max as subversion
|
2020-02-22 03:47:51 +08:00
|
|
|
it.first->second.push_back_deep(it.first->second.arena(), mutation);
|
2020-03-27 01:03:11 +08:00
|
|
|
|
2020-08-07 07:47:08 +08:00
|
|
|
// Sampling data similar to how SS sample bytes
|
|
|
|
ByteSampleInfo sampleInfo = isKeyValueInSample(KeyValueRef(mutation.param1, mutation.param2));
|
|
|
|
if (sampleInfo.inSample) {
|
|
|
|
cc->sampledLogBytes += sampleInfo.sampledSize;
|
|
|
|
samples.push_back_deep(samples.arena(), SampledMutation(mutation.param1, sampleInfo.sampledSize));
|
2019-12-03 06:33:31 +08:00
|
|
|
}
|
2019-08-02 08:00:13 +08:00
|
|
|
ASSERT_WE_THINK(kLen >= 0 && kLen < val.size());
|
|
|
|
ASSERT_WE_THINK(vLen >= 0 && vLen < val.size());
|
2019-05-10 11:55:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-31 02:18:24 +08:00
|
|
|
// Parsing the data blocks in a range file
|
2020-02-04 08:17:39 +08:00
|
|
|
// kvOpsIter: saves the parsed versioned-mutations for the sepcific LoadingParam;
|
|
|
|
// samplesIter: saves the sampled mutations from the parsed versioned-mutations;
|
|
|
|
// bc: backup container to read the backup file
|
|
|
|
// version: the version the parsed mutations should be at
|
|
|
|
// asset: RestoreAsset about which backup data should be parsed
|
2019-11-21 13:04:18 +08:00
|
|
|
ACTOR static Future<Void> _parseRangeFileToMutationsOnLoader(
|
2019-12-03 06:33:31 +08:00
|
|
|
std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsIter,
|
2020-08-05 04:35:36 +08:00
|
|
|
std::map<LoadingParam, SampledMutationsVec>::iterator samplesIter, LoaderCounters* cc,
|
|
|
|
Reference<IBackupContainer> bc, Version version, RestoreAsset asset) {
|
2019-11-14 02:57:21 +08:00
|
|
|
state VersionedMutationsMap& kvOps = kvOpsIter->second;
|
2020-08-05 04:35:36 +08:00
|
|
|
state SampledMutationsVec& sampleMutations = samplesIter->second;
|
2019-08-02 08:00:13 +08:00
|
|
|
|
2020-06-25 13:10:54 +08:00
|
|
|
TraceEvent(SevFRDebugInfo, "FastRestoreDecodedRangeFile")
|
2020-08-26 00:31:34 +08:00
|
|
|
.detail("BatchIndex", asset.batchIndex)
|
2019-12-20 08:50:39 +08:00
|
|
|
.detail("Filename", asset.filename)
|
|
|
|
.detail("Version", version)
|
2019-12-21 14:00:36 +08:00
|
|
|
.detail("BeginVersion", asset.beginVersion)
|
2020-06-19 08:40:16 +08:00
|
|
|
.detail("EndVersion", asset.endVersion)
|
|
|
|
.detail("RestoreAsset", asset.toString());
|
2019-12-21 14:24:32 +08:00
|
|
|
// Sanity check the range file is within the restored version range
|
2019-12-24 07:01:27 +08:00
|
|
|
ASSERT_WE_THINK(asset.isInVersionRange(version));
|
2019-12-20 08:50:39 +08:00
|
|
|
|
2020-04-28 02:18:04 +08:00
|
|
|
state Standalone<VectorRef<KeyValueRef>> blockData;
|
2020-09-09 21:40:41 +08:00
|
|
|
// should retry here
|
2020-09-09 22:23:18 +08:00
|
|
|
state int readFileRetries = 0;
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
// The set of key value version is rangeFile.version. the key-value set in the same range file has the same
|
|
|
|
// version
|
|
|
|
Reference<IAsyncFile> inFile = wait(bc->readFile(asset.filename));
|
|
|
|
Standalone<VectorRef<KeyValueRef>> kvs =
|
|
|
|
wait(fileBackup::decodeRangeFileBlock(inFile, asset.offset, asset.len));
|
|
|
|
TraceEvent("FastRestoreLoaderDecodedRangeFile")
|
|
|
|
.detail("BatchIndex", asset.batchIndex)
|
|
|
|
.detail("Filename", asset.filename)
|
|
|
|
.detail("DataSize", kvs.contents().size());
|
|
|
|
blockData = kvs;
|
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() == error_code_restore_bad_read || e.code() == error_code_restore_unsupported_file_version ||
|
|
|
|
e.code() == error_code_restore_corrupted_data_padding) { // no retriable error
|
|
|
|
TraceEvent(SevError, "FileRestoreCorruptedRangeFileBlock").error(e);
|
|
|
|
throw;
|
|
|
|
} else if (e.code() == error_code_http_request_failed || e.code() == error_code_connection_failed ||
|
|
|
|
e.code() == error_code_timed_out || e.code() == error_code_lookup_failed) {
|
|
|
|
// blob http request failure, retry
|
|
|
|
TraceEvent(SevWarnAlways, "FastRestoreDecodedRangeFileConnectionFailure")
|
|
|
|
.detail("Retries", ++readFileRetries)
|
|
|
|
.error(e);
|
|
|
|
wait(delayJittered(0.1));
|
|
|
|
}
|
|
|
|
}
|
2020-04-22 04:42:24 +08:00
|
|
|
}
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2019-08-02 08:00:13 +08:00
|
|
|
// First and last key are the range for this file
|
2019-11-26 14:31:53 +08:00
|
|
|
KeyRange fileRange = KeyRangeRef(blockData.front().key, blockData.back().key);
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2019-08-02 08:00:13 +08:00
|
|
|
// If fileRange doesn't intersect restore range then we're done.
|
2019-12-20 08:50:39 +08:00
|
|
|
if (!fileRange.intersects(asset.range)) {
|
2019-08-02 08:00:13 +08:00
|
|
|
return Void();
|
|
|
|
}
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2019-08-02 08:00:13 +08:00
|
|
|
// We know the file range intersects the restore range but there could still be keys outside the restore range.
|
|
|
|
// Find the subvector of kv pairs that intersect the restore range.
|
|
|
|
// Note that the first and last keys are just the range endpoints for this file.
|
|
|
|
// They are metadata, not the real data.
|
|
|
|
int rangeStart = 1;
|
|
|
|
int rangeEnd = blockData.size() - 1; // The rangeStart and rangeEnd is [,)
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2019-08-02 08:00:13 +08:00
|
|
|
// Slide start from begining, stop if something in range is found
|
2019-05-10 11:55:44 +08:00
|
|
|
// Move rangeStart and rangeEnd until they is within restoreRange
|
2019-12-20 08:50:39 +08:00
|
|
|
while (rangeStart < rangeEnd && !asset.range.contains(blockData[rangeStart].key)) {
|
2019-05-10 11:55:44 +08:00
|
|
|
++rangeStart;
|
2019-05-31 02:18:24 +08:00
|
|
|
}
|
2019-08-02 08:00:13 +08:00
|
|
|
// Side end from back, stop if something at (rangeEnd-1) is found in range
|
2019-12-20 08:50:39 +08:00
|
|
|
while (rangeEnd > rangeStart && !asset.range.contains(blockData[rangeEnd - 1].key)) {
|
2019-05-10 11:55:44 +08:00
|
|
|
--rangeEnd;
|
2019-05-31 02:18:24 +08:00
|
|
|
}
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2019-08-02 08:00:13 +08:00
|
|
|
// Now data only contains the kv mutation within restoreRange
|
2019-11-26 14:31:53 +08:00
|
|
|
VectorRef<KeyValueRef> data = blockData.slice(rangeStart, rangeEnd);
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2020-03-15 06:54:47 +08:00
|
|
|
// Note we give INT_MAX as the sub sequence number to override any log mutations.
|
|
|
|
const LogMessageVersion msgVersion(version, std::numeric_limits<int32_t>::max());
|
|
|
|
|
|
|
|
// Convert KV in data into SET mutations of different keys in kvOps
|
2020-06-19 08:40:16 +08:00
|
|
|
Arena tempArena;
|
2020-03-15 06:54:47 +08:00
|
|
|
for (const KeyValueRef& kv : data) {
|
2019-08-02 08:00:13 +08:00
|
|
|
// NOTE: The KV pairs in range files are the real KV pairs in original DB.
|
2020-06-19 01:35:41 +08:00
|
|
|
MutationRef m(MutationRef::Type::SetValue, kv.key, kv.value);
|
|
|
|
// Remove prefix or add prefix in case we restore data to a different sub keyspace
|
2020-06-24 01:43:28 +08:00
|
|
|
if (asset.hasPrefix()) { // Avoid creating new Key
|
|
|
|
ASSERT(asset.removePrefix.size() == 0);
|
|
|
|
m.param1 = m.param1.removePrefix(asset.removePrefix).withPrefix(asset.addPrefix, tempArena);
|
|
|
|
}
|
|
|
|
|
2020-02-10 11:19:32 +08:00
|
|
|
cc->loadedRangeBytes += m.totalSize();
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2019-05-31 02:18:24 +08:00
|
|
|
// We cache all kv operations into kvOps, and apply all kv operations later in one place
|
2020-03-12 06:39:09 +08:00
|
|
|
auto it = kvOps.insert(std::make_pair(msgVersion, MutationsVec()));
|
2020-04-01 07:00:51 +08:00
|
|
|
TraceEvent(SevFRMutationInfo, "FastRestoreDecodeRangeFile")
|
2020-08-26 00:31:34 +08:00
|
|
|
.detail("BatchIndex", asset.batchIndex)
|
2019-11-05 08:10:08 +08:00
|
|
|
.detail("CommitVersion", version)
|
|
|
|
.detail("ParsedMutationKV", m.toString());
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2020-02-22 03:47:51 +08:00
|
|
|
it.first->second.push_back_deep(it.first->second.arena(), m);
|
2019-12-05 12:39:58 +08:00
|
|
|
// Sampling (FASTRESTORE_SAMPLING_PERCENT%) data
|
2020-08-07 07:47:08 +08:00
|
|
|
ByteSampleInfo sampleInfo = isKeyValueInSample(KeyValueRef(m.param1, m.param2));
|
|
|
|
if (sampleInfo.inSample) {
|
|
|
|
cc->sampledRangeBytes += sampleInfo.sampledSize;
|
|
|
|
sampleMutations.push_back_deep(sampleMutations.arena(), SampledMutation(m.param1, sampleInfo.sampledSize));
|
2019-12-03 06:33:31 +08:00
|
|
|
}
|
2019-05-31 02:18:24 +08:00
|
|
|
}
|
2019-05-10 11:55:44 +08:00
|
|
|
|
2019-05-31 02:18:24 +08:00
|
|
|
return Void();
|
2019-08-02 08:00:13 +08:00
|
|
|
}
|
|
|
|
|
2020-02-04 08:17:39 +08:00
|
|
|
// Parse data blocks in a log file into a vector of <string, string> pairs.
|
|
|
|
// Each pair.second contains the mutations at a version encoded in pair.first;
|
|
|
|
// Step 1: decodeLogFileBlock into <string, string> pairs;
|
|
|
|
// Step 2: Concatenate the second of pairs with the same pair.first.
|
|
|
|
// pProcessedFileOffset: ensure each data block is processed in order exactly once;
|
|
|
|
// pMutationMap: concatenated mutation list string at the mutation's commit version
|
2019-10-18 01:12:15 +08:00
|
|
|
ACTOR static Future<Void> _parseLogFileToMutationsOnLoader(NotifiedVersion* pProcessedFileOffset,
|
|
|
|
SerializedMutationListMap* pMutationMap,
|
2019-12-20 08:50:39 +08:00
|
|
|
Reference<IBackupContainer> bc, RestoreAsset asset) {
|
|
|
|
Reference<IAsyncFile> inFile = wait(bc->readFile(asset.filename));
|
2019-08-02 08:00:13 +08:00
|
|
|
// decodeLogFileBlock() must read block by block!
|
|
|
|
state Standalone<VectorRef<KeyValueRef>> data =
|
2019-12-20 08:50:39 +08:00
|
|
|
wait(parallelFileRestore::decodeLogFileBlock(inFile, asset.offset, asset.len));
|
2020-06-23 10:47:14 +08:00
|
|
|
TraceEvent("FastRestoreLoaderDecodeLogFile")
|
2020-08-26 00:31:34 +08:00
|
|
|
.detail("BatchIndex", asset.batchIndex)
|
2020-06-23 10:47:14 +08:00
|
|
|
.detail("RestoreAsset", asset.toString())
|
2019-10-17 11:30:11 +08:00
|
|
|
.detail("DataSize", data.contents().size());
|
|
|
|
|
|
|
|
// Ensure data blocks in the same file are processed in order
|
2019-12-20 08:50:39 +08:00
|
|
|
wait(pProcessedFileOffset->whenAtLeast(asset.offset));
|
2019-10-17 11:30:11 +08:00
|
|
|
|
2019-12-20 08:50:39 +08:00
|
|
|
if (pProcessedFileOffset->get() == asset.offset) {
|
2020-03-15 06:54:47 +08:00
|
|
|
for (const KeyValueRef& kv : data) {
|
2020-08-01 07:00:15 +08:00
|
|
|
// Concatenate the backup param1 and param2 (KV) at the same version.
|
2020-04-14 09:09:29 +08:00
|
|
|
concatenateBackupMutationForLogFile(pMutationMap, kv.key, kv.value, asset);
|
2019-10-17 11:30:11 +08:00
|
|
|
}
|
2019-12-20 08:50:39 +08:00
|
|
|
pProcessedFileOffset->set(asset.offset + asset.len);
|
2019-05-31 02:18:24 +08:00
|
|
|
}
|
2019-05-10 11:55:44 +08:00
|
|
|
|
|
|
|
return Void();
|
2019-08-02 08:00:13 +08:00
|
|
|
}
|
2020-01-16 05:39:06 +08:00
|
|
|
|
2020-09-09 22:23:18 +08:00
|
|
|
// retry on _parseLogFileToMutationsOnLoader
|
|
|
|
ACTOR static Future<Void> parseLogFileToMutationsOnLoader(NotifiedVersion* pProcessedFileOffset,
|
|
|
|
SerializedMutationListMap* pMutationMap,
|
|
|
|
Reference<IBackupContainer> bc, RestoreAsset asset) {
|
|
|
|
state int readFileRetries = 0;
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
wait(_parseLogFileToMutationsOnLoader(pProcessedFileOffset, pMutationMap, bc, asset));
|
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() == error_code_restore_bad_read || e.code() == error_code_restore_unsupported_file_version ||
|
|
|
|
e.code() == error_code_restore_corrupted_data_padding) { // non retriable error
|
|
|
|
TraceEvent(SevError, "FileRestoreCorruptedLogFileBlock").error(e);
|
|
|
|
throw;
|
|
|
|
} else if (e.code() == error_code_http_request_failed || e.code() == error_code_connection_failed ||
|
|
|
|
e.code() == error_code_timed_out || e.code() == error_code_lookup_failed) {
|
|
|
|
// blob http request failure, retry
|
|
|
|
TraceEvent(SevWarnAlways, "FastRestoreDecodedLogFileConnectionFailure")
|
|
|
|
.detail("Retries", ++readFileRetries)
|
|
|
|
.error(e);
|
|
|
|
wait(delayJittered(0.1));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2020-01-16 05:39:06 +08:00
|
|
|
// Return applier IDs that are used to apply key-values
|
|
|
|
std::vector<UID> getApplierIDs(std::map<Key, UID>& rangeToApplier) {
|
|
|
|
std::vector<UID> applierIDs;
|
|
|
|
for (auto& applier : rangeToApplier) {
|
|
|
|
applierIDs.push_back(applier.second);
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT(!applierIDs.empty());
|
|
|
|
return applierIDs;
|
2020-02-27 05:43:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Notify loaders that the version batch (index) has been applied.
|
|
|
|
// This affects which version batch each loader can release actors even when the worker has low memory
|
|
|
|
ACTOR Future<Void> handleFinishVersionBatchRequest(RestoreVersionBatchRequest req, Reference<RestoreLoaderData> self) {
|
|
|
|
// Ensure batch (i-1) is applied before batch i
|
|
|
|
TraceEvent("FastRestoreLoaderHandleFinishVersionBatch", self->id())
|
|
|
|
.detail("FinishedBatchIndex", self->finishedBatch.get())
|
|
|
|
.detail("RequestedBatchIndex", req.batchIndex);
|
|
|
|
wait(self->finishedBatch.whenAtLeast(req.batchIndex - 1));
|
|
|
|
if (self->finishedBatch.get() == req.batchIndex - 1) {
|
2020-08-19 06:44:22 +08:00
|
|
|
// Sanity check: All requests before and in this batchIndex must have been processed; otherwise,
|
|
|
|
// those requests may cause segmentation fault after applier remove the batch data
|
2020-08-27 06:21:33 +08:00
|
|
|
while (!self->loadingQueue.empty() && self->loadingQueue.top().batchIndex <= req.batchIndex) {
|
2020-08-19 06:44:22 +08:00
|
|
|
// Still has pending requests from earlier batchIndex and current batchIndex, which should not happen
|
2020-08-27 06:21:33 +08:00
|
|
|
TraceEvent(SevWarn, "FastRestoreLoaderHasPendingLoadFileRequests")
|
2020-08-19 06:44:22 +08:00
|
|
|
.detail("PendingRequest", self->loadingQueue.top().toString());
|
2020-08-27 06:21:33 +08:00
|
|
|
self->loadingQueue.pop();
|
2020-08-19 06:44:22 +08:00
|
|
|
}
|
2020-08-27 06:21:33 +08:00
|
|
|
while (!self->sendingQueue.empty() && self->sendingQueue.top().batchIndex <= req.batchIndex) {
|
|
|
|
TraceEvent(SevWarn, "FastRestoreLoaderHasPendingSendRequests")
|
2020-08-19 06:44:22 +08:00
|
|
|
.detail("PendingRequest", self->sendingQueue.top().toString());
|
2020-08-27 06:21:33 +08:00
|
|
|
self->sendingQueue.pop();
|
2020-08-19 06:44:22 +08:00
|
|
|
}
|
2020-08-27 06:21:33 +08:00
|
|
|
while (!self->sendLoadParamQueue.empty() && self->sendLoadParamQueue.top().batchIndex <= req.batchIndex) {
|
|
|
|
TraceEvent(SevWarn, "FastRestoreLoaderHasPendingSendLoadParamRequests")
|
2020-08-19 06:44:22 +08:00
|
|
|
.detail("PendingRequest", self->sendLoadParamQueue.top().toString());
|
2020-08-27 06:21:33 +08:00
|
|
|
self->sendLoadParamQueue.pop();
|
2020-08-19 06:44:22 +08:00
|
|
|
}
|
|
|
|
|
2020-02-27 05:43:30 +08:00
|
|
|
self->finishedBatch.set(req.batchIndex);
|
2020-05-05 02:31:39 +08:00
|
|
|
// Clean up batchData
|
|
|
|
self->batch.erase(req.batchIndex);
|
|
|
|
self->status.erase(req.batchIndex);
|
2020-02-27 05:43:30 +08:00
|
|
|
}
|
2020-02-27 06:35:03 +08:00
|
|
|
if (self->delayedActors > 0) {
|
|
|
|
self->checkMemory.trigger();
|
|
|
|
}
|
2020-02-27 05:43:30 +08:00
|
|
|
req.reply.send(RestoreCommonReply(self->id(), false));
|
|
|
|
return Void();
|
2020-03-02 12:39:56 +08:00
|
|
|
}
|
2020-07-09 08:56:10 +08:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
void oldSplitMutation(std::map<Key, UID>* pRangeToApplier, MutationRef m, Arena& mvector_arena,
|
|
|
|
VectorRef<MutationRef>& mvector, Arena& nodeIDs_arena, VectorRef<UID>& nodeIDs) {
|
|
|
|
// mvector[i] should be mapped to nodeID[i]
|
|
|
|
ASSERT(mvector.empty());
|
|
|
|
ASSERT(nodeIDs.empty());
|
|
|
|
// key range [m->param1, m->param2)
|
|
|
|
std::map<Key, UID>::iterator itlow, itup; // we will return [itlow, itup)
|
|
|
|
itlow = pRangeToApplier->lower_bound(m.param1); // lower_bound returns the iterator that is >= m.param1
|
|
|
|
if (itlow == pRangeToApplier->end()) {
|
|
|
|
--itlow;
|
|
|
|
mvector.push_back_deep(mvector_arena, m);
|
|
|
|
nodeIDs.push_back(nodeIDs_arena, itlow->second);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (itlow->first > m.param1) {
|
|
|
|
if (itlow != pRangeToApplier->begin()) {
|
|
|
|
--itlow;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
itup = pRangeToApplier->upper_bound(m.param2); // return rmap::end if no key is after m.param2.
|
|
|
|
ASSERT(itup == pRangeToApplier->end() || itup->first > m.param2);
|
|
|
|
|
|
|
|
std::map<Key, UID>::iterator itApplier;
|
|
|
|
while (itlow != itup) {
|
|
|
|
Standalone<MutationRef> curm; // current mutation
|
|
|
|
curm.type = m.type;
|
|
|
|
// The first split mutation should starts with m.first.
|
|
|
|
// The later ones should start with the rangeToApplier boundary.
|
|
|
|
if (m.param1 > itlow->first) {
|
|
|
|
curm.param1 = m.param1;
|
|
|
|
} else {
|
|
|
|
curm.param1 = itlow->first;
|
|
|
|
}
|
|
|
|
itApplier = itlow;
|
|
|
|
itlow++;
|
|
|
|
if (itlow == itup) {
|
|
|
|
ASSERT(m.param2 <= normalKeys.end);
|
|
|
|
curm.param2 = m.param2;
|
|
|
|
} else if (m.param2 < itlow->first) {
|
|
|
|
UNREACHABLE();
|
|
|
|
curm.param2 = m.param2;
|
|
|
|
} else {
|
|
|
|
curm.param2 = itlow->first;
|
|
|
|
}
|
|
|
|
ASSERT(curm.param1 <= curm.param2);
|
|
|
|
// itup > m.param2: (itup-1) may be out of mutation m's range
|
|
|
|
// Ensure the added mutations have overlap with mutation m
|
|
|
|
if (m.param1 < curm.param2 && m.param2 > curm.param1) {
|
|
|
|
mvector.push_back_deep(mvector_arena, curm);
|
|
|
|
nodeIDs.push_back(nodeIDs_arena, itApplier->second);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test splitMutation
|
|
|
|
TEST_CASE("/FastRestore/RestoreLoader/splitMutation") {
|
|
|
|
std::map<Key, UID> rangeToApplier;
|
|
|
|
MutationsVec mvector;
|
|
|
|
Standalone<VectorRef<UID>> nodeIDs;
|
|
|
|
|
|
|
|
// Prepare RangeToApplier
|
|
|
|
rangeToApplier.emplace(normalKeys.begin, deterministicRandom()->randomUniqueID());
|
|
|
|
int numAppliers = deterministicRandom()->randomInt(1, 50);
|
|
|
|
for (int i = 0; i < numAppliers; ++i) {
|
|
|
|
Key k = Key(deterministicRandom()->randomAlphaNumeric(deterministicRandom()->randomInt(1, 1000)));
|
|
|
|
UID node = deterministicRandom()->randomUniqueID();
|
|
|
|
rangeToApplier.emplace(k, node);
|
|
|
|
TraceEvent("RangeToApplier").detail("Key", k).detail("Node", node);
|
|
|
|
}
|
|
|
|
Key k1 = Key(deterministicRandom()->randomAlphaNumeric(deterministicRandom()->randomInt(1, 500)));
|
|
|
|
Key k2 = Key(deterministicRandom()->randomAlphaNumeric(deterministicRandom()->randomInt(1, 1000)));
|
|
|
|
Key beginK = k1 < k2 ? k1 : k2;
|
|
|
|
Key endK = k1 < k2 ? k2 : k1;
|
|
|
|
Standalone<MutationRef> mutation(MutationRef(MutationRef::ClearRange, beginK.contents(), endK.contents()));
|
|
|
|
|
|
|
|
// Method 1: Use old splitMutation
|
|
|
|
oldSplitMutation(&rangeToApplier, mutation, mvector.arena(), mvector.contents(), nodeIDs.arena(),
|
|
|
|
nodeIDs.contents());
|
|
|
|
ASSERT(mvector.size() == nodeIDs.size());
|
|
|
|
|
|
|
|
// Method 2: Use new intersection based method
|
|
|
|
KeyRangeMap<UID> krMap;
|
|
|
|
buildApplierRangeMap(&krMap, &rangeToApplier);
|
|
|
|
|
|
|
|
MutationsVec mvector2;
|
|
|
|
Standalone<VectorRef<UID>> nodeIDs2;
|
|
|
|
splitMutation(krMap, mutation, mvector2.arena(), mvector2.contents(), nodeIDs2.arena(), nodeIDs2.contents());
|
|
|
|
ASSERT(mvector2.size() == nodeIDs2.size());
|
|
|
|
|
|
|
|
ASSERT(mvector.size() == mvector2.size());
|
|
|
|
int splitMutationIndex = 0;
|
|
|
|
for (; splitMutationIndex < mvector.size(); splitMutationIndex++) {
|
|
|
|
MutationRef result = mvector[splitMutationIndex];
|
|
|
|
MutationRef result2 = mvector2[splitMutationIndex];
|
|
|
|
UID applierID = nodeIDs[splitMutationIndex];
|
|
|
|
UID applierID2 = nodeIDs2[splitMutationIndex];
|
|
|
|
KeyRange krange(KeyRangeRef(result.param1, result.param2));
|
|
|
|
KeyRange krange2(KeyRangeRef(result2.param1, result2.param2));
|
|
|
|
TraceEvent("Result")
|
|
|
|
.detail("KeyRange1", krange.toString())
|
|
|
|
.detail("KeyRange2", krange2.toString())
|
|
|
|
.detail("ApplierID1", applierID)
|
|
|
|
.detail("ApplierID2", applierID2);
|
|
|
|
if (krange != krange2 || applierID != applierID2) {
|
|
|
|
TraceEvent(SevError, "IncorrectResult")
|
|
|
|
.detail("Mutation", mutation.toString())
|
|
|
|
.detail("KeyRange1", krange.toString())
|
|
|
|
.detail("KeyRange2", krange2.toString())
|
|
|
|
.detail("ApplierID1", applierID)
|
|
|
|
.detail("ApplierID2", applierID2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|