foundationdb/fdbserver/RestoreLoader.actor.cpp

603 lines
27 KiB
C++
Raw Normal View History

/*
* RestoreLoader.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// This file implements the functions and actors used by the RestoreLoader role.
// The RestoreLoader role starts with the restoreLoaderCore actor
#include "fdbclient/BackupContainer.h"
#include "fdbserver/RestoreLoader.actor.h"
2019-08-02 08:00:13 +08:00
#include "flow/actorcompiler.h" // This must be the last #include.
2019-08-02 08:00:13 +08:00
// SerializedMutationListMap:
// Key is the signature/version of the mutation list, Value is the mutation list (or part of the mutation list)
typedef std::map<Standalone<StringRef>, Standalone<StringRef>> SerializedMutationListMap;
// SerializedMutationPartMap:
// Key has the same semantics as SerializedMutationListMap; Value is the part number of the splitted mutation list
typedef std::map<Standalone<StringRef>, uint32_t> SerializedMutationPartMap;
2019-08-02 08:00:13 +08:00
void splitMutation(Reference<RestoreLoaderData> self, MutationRef m, Arena& mvector_arena,
VectorRef<MutationRef>& mvector, Arena& nodeIDs_arena, VectorRef<UID>& nodeIDs);
2019-11-21 13:04:18 +08:00
void _parseSerializedMutation(std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsIter,
SerializedMutationListMap* mutationMap,
std::map<LoadingParam, MutationsVec>::iterator samplesIter, const RestoreAsset& asset);
2019-10-25 03:47:51 +08:00
void handleRestoreSysInfoRequest(const RestoreSysInfoRequest& req, Reference<RestoreLoaderData> self);
2019-12-20 08:50:39 +08:00
ACTOR Future<Void> handleLoadFileRequest(RestoreLoadFileRequest req, Reference<RestoreLoaderData> self);
ACTOR Future<Void> handleSendMutationsRequest(RestoreSendMutationsToAppliersRequest req,
Reference<RestoreLoaderData> self);
2019-08-02 08:00:13 +08:00
ACTOR Future<Void> sendMutationsToApplier(Reference<RestoreLoaderData> self, VersionedMutationsMap* kvOps,
2019-12-21 14:00:36 +08:00
bool isRangeFile, Version startVersion, Version endVersion,
RestoreAsset asset);
ACTOR static Future<Void> _parseLogFileToMutationsOnLoader(NotifiedVersion* pProcessedFileOffset,
SerializedMutationListMap* mutationMap,
SerializedMutationPartMap* mutationPartMap,
2019-12-20 08:50:39 +08:00
Reference<IBackupContainer> bc, RestoreAsset asset);
2019-11-21 13:04:18 +08:00
ACTOR static Future<Void> _parseRangeFileToMutationsOnLoader(
std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsIter,
std::map<LoadingParam, MutationsVec>::iterator samplesIter, Reference<IBackupContainer> bc, Version version,
2019-12-20 08:50:39 +08:00
RestoreAsset asset);
ACTOR Future<Void> restoreLoaderCore(RestoreLoaderInterface loaderInterf, int nodeIndex, Database cx) {
2019-08-02 08:00:13 +08:00
state Reference<RestoreLoaderData> self =
Reference<RestoreLoaderData>(new RestoreLoaderData(loaderInterf.id(), nodeIndex));
state ActorCollection actors(false);
state Future<Void> exitRole = Never();
loop {
state std::string requestTypeStr = "[Init]";
try {
choose {
2019-08-02 08:00:13 +08:00
when(RestoreSimpleRequest req = waitNext(loaderInterf.heartbeat.getFuture())) {
requestTypeStr = "heartbeat";
actors.add(handleHeartbeat(req, loaderInterf.id()));
}
2019-08-02 08:00:13 +08:00
when(RestoreSysInfoRequest req = waitNext(loaderInterf.updateRestoreSysInfo.getFuture())) {
requestTypeStr = "updateRestoreSysInfo";
handleRestoreSysInfoRequest(req, self);
}
2019-08-02 08:00:13 +08:00
when(RestoreLoadFileRequest req = waitNext(loaderInterf.loadFile.getFuture())) {
requestTypeStr = "loadFile";
self->initBackupContainer(req.param.url);
2019-12-20 08:50:39 +08:00
actors.add(handleLoadFileRequest(req, self));
}
when(RestoreSendMutationsToAppliersRequest req = waitNext(loaderInterf.sendMutations.getFuture())) {
requestTypeStr = "sendMutations";
actors.add(handleSendMutationsRequest(req, self));
}
2019-08-02 08:00:13 +08:00
when(RestoreVersionBatchRequest req = waitNext(loaderInterf.initVersionBatch.getFuture())) {
requestTypeStr = "initVersionBatch";
wait(handleInitVersionBatchRequest(req, self));
}
2019-08-02 08:00:13 +08:00
when(RestoreVersionBatchRequest req = waitNext(loaderInterf.finishRestore.getFuture())) {
requestTypeStr = "finishRestore";
handleFinishRestoreRequest(req, self);
exitRole = Void();
}
2019-08-02 08:00:13 +08:00
when(wait(exitRole)) {
TraceEvent("FastRestore").detail("RestoreLoaderCore", "ExitRole").detail("NodeID", self->id());
break;
}
}
2019-08-02 08:00:13 +08:00
} catch (Error& e) {
TraceEvent(SevWarn, "FastRestore")
.detail("RestoreLoaderError", e.what())
.detail("RequestType", requestTypeStr);
break;
}
}
return Void();
}
// Assume: Only update the local data if it (applierInterf) has not been set
2019-10-25 03:47:51 +08:00
void handleRestoreSysInfoRequest(const RestoreSysInfoRequest& req, Reference<RestoreLoaderData> self) {
TraceEvent("FastRestore").detail("HandleRestoreSysInfoRequest", self->id());
ASSERT(self.isValid());
2019-08-02 08:00:13 +08:00
// The loader has received the appliers interfaces
2019-08-02 08:00:13 +08:00
if (!self->appliersInterf.empty()) {
req.reply.send(RestoreCommonReply(self->id()));
return;
}
self->appliersInterf = req.sysInfo.appliers;
2019-08-02 08:00:13 +08:00
req.reply.send(RestoreCommonReply(self->id()));
}
ACTOR Future<Void> _processLoadingParam(LoadingParam param, Reference<RestoreLoaderData> self) {
// Temporary data structure for parsing log files into (version, <K, V, mutationType>)
// Must use StandAlone to save mutations, otherwise, the mutationref memory will be corrupted
// mutationMap: Key is the unique identifier for a batch of mutation logs at the same version
state SerializedMutationListMap mutationMap;
state std::map<Standalone<StringRef>, uint32_t> mutationPartMap; // Sanity check the data parsing is correct
state NotifiedVersion processedFileOffset(0);
state std::vector<Future<Void>> fileParserFutures;
state std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsPerLPIter = self->kvOpsPerLP.end();
state std::map<LoadingParam, MutationsVec>::iterator samplesIter = self->sampleMutations.end();
// Q: How to record the param's fields inside LoadingParam Refer to storageMetrics
TraceEvent("FastRestore").detail("Loader", self->id()).detail("StartProcessLoadParam", param.toString());
2019-08-02 08:00:13 +08:00
ASSERT(param.blockSize > 0);
2019-12-20 08:50:39 +08:00
ASSERT(param.asset.offset % param.blockSize == 0); // Parse file must be at block bondary.
ASSERT(self->kvOpsPerLP.find(param) == self->kvOpsPerLP.end());
// NOTE: map's iterator is guaranteed to be stable, but pointer may not.
2019-11-21 13:04:18 +08:00
// state VersionedMutationsMap* kvOps = &self->kvOpsPerLP[param];
self->kvOpsPerLP.emplace(param, VersionedMutationsMap());
self->sampleMutations.emplace(param, MutationsVec());
kvOpsPerLPIter = self->kvOpsPerLP.find(param);
samplesIter = self->sampleMutations.find(param);
for (int64_t j = param.asset.offset; j < param.asset.len; j += param.blockSize) {
RestoreAsset subAsset = param.asset;
subAsset.offset = j;
subAsset.len = std::min<int64_t>(param.blockSize, param.asset.len - j);
2019-08-02 08:00:13 +08:00
if (param.isRangeFile) {
2019-12-21 14:00:36 +08:00
fileParserFutures.push_back(
_parseRangeFileToMutationsOnLoader(kvOpsPerLPIter, samplesIter, self->bc, param.endVersion, subAsset));
} else {
2019-12-20 08:50:39 +08:00
// TODO: Sanity check the log file's range is overlapped with the restored version range
fileParserFutures.push_back(_parseLogFileToMutationsOnLoader(&processedFileOffset, &mutationMap,
&mutationPartMap, self->bc, subAsset));
}
}
2019-08-02 08:00:13 +08:00
wait(waitForAll(fileParserFutures));
if (!param.isRangeFile) {
2019-12-20 08:50:39 +08:00
_parseSerializedMutation(kvOpsPerLPIter, &mutationMap, samplesIter, param.asset);
}
2019-08-02 08:00:13 +08:00
2019-12-20 08:50:39 +08:00
TraceEvent("FastRestore").detail("Loader", self->id()).detail("FinishLoadingFile", param.asset.filename);
2019-08-02 08:00:13 +08:00
return Void();
}
// A loader can process multiple RestoreLoadFileRequest in parallel.
2019-12-20 08:50:39 +08:00
ACTOR Future<Void> handleLoadFileRequest(RestoreLoadFileRequest req, Reference<RestoreLoaderData> self) {
2019-08-02 08:00:13 +08:00
if (self->processedFileParams.find(req.param) == self->processedFileParams.end()) {
TraceEvent("FastRestore").detail("Loader", self->id()).detail("ProcessLoadParam", req.param.toString());
ASSERT(self->sampleMutations.find(req.param) == self->sampleMutations.end());
self->processedFileParams[req.param] = Never();
2019-08-02 08:00:13 +08:00
self->processedFileParams[req.param] = _processLoadingParam(req.param, self);
2019-11-05 03:47:29 +08:00
} else {
TraceEvent("FastRestore").detail("Loader", self->id()).detail("WaitOnProcessLoadParam", req.param.toString());
}
2019-08-02 08:00:13 +08:00
ASSERT(self->processedFileParams.find(req.param) != self->processedFileParams.end());
wait(self->processedFileParams[req.param]); // wait on the processing of the req.param.
req.reply.send(RestoreLoadFileReply(req.param, self->sampleMutations[req.param]));
// TODO: clear self->sampleMutations[req.param] memory to save memory on loader
return Void();
}
ACTOR Future<Void> handleSendMutationsRequest(RestoreSendMutationsToAppliersRequest req,
Reference<RestoreLoaderData> self) {
state std::map<LoadingParam, VersionedMutationsMap>::iterator item = self->kvOpsPerLP.begin();
self->rangeToApplier = req.rangeToApplier;
for (; item != self->kvOpsPerLP.end(); item++) {
if (item->first.isRangeFile == req.useRangeFile) {
// Send the parsed mutation to applier who will apply the mutation to DB
wait(sendMutationsToApplier(self, &item->second, item->first.isRangeFile, item->first.prevVersion,
2019-12-20 08:50:39 +08:00
item->first.endVersion, item->first.asset));
}
}
2019-05-30 04:42:35 +08:00
req.reply.send(RestoreCommonReply(self->id()));
return Void();
}
// TODO: This function can be revised better
// Assume: kvOps data are from the same file.
2019-08-02 08:00:13 +08:00
ACTOR Future<Void> sendMutationsToApplier(Reference<RestoreLoaderData> self, VersionedMutationsMap* pkvOps,
2019-12-20 08:50:39 +08:00
bool isRangeFile, Version startVersion, Version endVersion,
RestoreAsset asset) {
2019-08-02 08:00:13 +08:00
state VersionedMutationsMap& kvOps = *pkvOps;
2019-12-12 23:44:57 +08:00
state VersionedMutationsMap::iterator kvOp = kvOps.begin();
state int kvCount = 0;
state int splitMutationIndex = 0;
state std::vector<UID> applierIDs = self->getWorkingApplierIDs();
state std::vector<std::pair<UID, RestoreSendVersionedMutationsRequest>> requests;
state Version prevVersion = startVersion;
TraceEvent("FastRestore_SendMutationToApplier")
.detail("Loader", self->id())
2019-08-02 08:00:13 +08:00
.detail("IsRangeFile", isRangeFile)
.detail("StartVersion", startVersion)
.detail("EndVersion", endVersion)
2019-12-20 08:50:39 +08:00
.detail("FileIndex", asset.filename);
// Ensure there is a mutation request sent at endVersion, so that applier can advance its notifiedVersion
2019-08-02 08:00:13 +08:00
if (kvOps.find(endVersion) == kvOps.end()) {
2019-12-07 14:00:40 +08:00
kvOps[endVersion] = MutationsVec(); // Empty mutation vector will be handled by applier
}
splitMutationIndex = 0;
kvCount = 0;
2019-08-02 08:00:13 +08:00
for (kvOp = kvOps.begin(); kvOp != kvOps.end(); kvOp++) {
// applierMutationsBuffer is the mutation vector to be sent to each applier
// applierMutationsSize is buffered mutation vector size for each applier
std::map<UID, MutationsVec> applierMutationsBuffer;
std::map<UID, double> applierMutationsSize;
2019-08-02 08:00:13 +08:00
for (auto& applierID : applierIDs) {
2019-12-07 14:00:40 +08:00
applierMutationsBuffer[applierID] = MutationsVec();
applierMutationsSize[applierID] = 0.0;
}
Version commitVersion = kvOp->first;
for (int mIndex = 0; mIndex < kvOp->second.size(); mIndex++) {
MutationRef kvm = kvOp->second[mIndex];
// Send the mutation to applier
2019-08-02 08:00:13 +08:00
if (isRangeMutation(kvm)) {
MutationsVec mvector;
Standalone<VectorRef<UID>> nodeIDs;
// Because using a vector of mutations causes overhead, and the range mutation should happen rarely;
// We handle the range mutation and key mutation differently for the benefit of avoiding memory copy
// WARNING: The splitMutation() may have bugs
splitMutation(self, kvm, mvector.arena(), mvector.contents(), nodeIDs.arena(), nodeIDs.contents());
ASSERT(mvector.size() == nodeIDs.size());
2019-08-02 08:00:13 +08:00
for (splitMutationIndex = 0; splitMutationIndex < mvector.size(); splitMutationIndex++) {
MutationRef mutation = mvector[splitMutationIndex];
UID applierID = nodeIDs[splitMutationIndex];
2019-08-02 08:00:13 +08:00
// printf("SPLITTED MUTATION: %d: mutation:%s applierID:%s\n", splitMutationIndex,
// mutation.toString().c_str(), applierID.toString().c_str());
applierMutationsBuffer[applierID].push_back_deep(applierMutationsBuffer[applierID].arena(), mutation);
applierMutationsSize[applierID] += mutation.expectedSize();
kvCount++;
}
} else { // mutation operates on a particular key
std::map<Key, UID>::iterator itlow = self->rangeToApplier.upper_bound(kvm.param1);
--itlow; // make sure itlow->first <= m.param1
2019-08-02 08:00:13 +08:00
ASSERT(itlow->first <= kvm.param1);
MutationRef mutation = kvm;
UID applierID = itlow->second;
2019-08-02 08:00:13 +08:00
// printf("KV--Applier: K:%s ApplierID:%s\n", kvm.param1.toString().c_str(),
// applierID.toString().c_str());
kvCount++;
2019-08-02 08:00:13 +08:00
applierMutationsBuffer[applierID].push_back_deep(applierMutationsBuffer[applierID].arena(), mutation);
applierMutationsSize[applierID] += mutation.expectedSize();
}
} // Mutations at the same version
// Send the mutations to appliers for each version
2019-08-02 08:00:13 +08:00
for (auto& applierID : applierIDs) {
requests.push_back(std::make_pair(
2019-12-20 08:50:39 +08:00
applierID, RestoreSendVersionedMutationsRequest(asset, prevVersion, commitVersion, isRangeFile,
applierMutationsBuffer[applierID])));
}
TraceEvent(SevDebug, "FastRestore_Debug")
.detail("Loader", self->id())
.detail("PrevVersion", prevVersion)
.detail("CommitVersion", commitVersion)
.detail("Filename", asset.filename);
ASSERT(prevVersion < commitVersion);
prevVersion = commitVersion;
2019-08-02 08:00:13 +08:00
wait(sendBatchRequests(&RestoreApplierInterface::sendMutationVector, self->appliersInterf, requests));
2019-12-12 23:44:57 +08:00
requests.clear();
2019-12-12 23:44:57 +08:00
} // all versions of mutations in the same file
TraceEvent("FastRestore").detail("LoaderSendMutationOnAppliers", kvCount);
return Void();
}
// TODO: Add a unit test for this function
2019-08-02 08:00:13 +08:00
void splitMutation(Reference<RestoreLoaderData> self, MutationRef m, Arena& mvector_arena,
VectorRef<MutationRef>& mvector, Arena& nodeIDs_arena, VectorRef<UID>& nodeIDs) {
// mvector[i] should be mapped to nodeID[i]
ASSERT(mvector.empty());
ASSERT(nodeIDs.empty());
// key range [m->param1, m->param2)
2019-08-02 08:00:13 +08:00
std::map<Standalone<KeyRef>, UID>::iterator itlow, itup; // we will return [itlow, itup)
2019-09-04 06:50:21 +08:00
itlow = self->rangeToApplier.lower_bound(m.param1); // lower_bound returns the iterator that is >= m.param1
2019-08-02 08:00:13 +08:00
if (itlow->first > m.param1) {
2019-09-04 06:50:21 +08:00
if (itlow != self->rangeToApplier.begin()) {
--itlow;
}
}
2019-09-04 06:50:21 +08:00
itup = self->rangeToApplier.upper_bound(m.param2); // return rmap::end if no key is after m.param2.
ASSERT(itup == self->rangeToApplier.end() || itup->first > m.param2);
2019-05-14 08:24:57 +08:00
std::map<Standalone<KeyRef>, UID>::iterator itApplier;
2019-05-14 08:24:57 +08:00
while (itlow != itup) {
2019-08-02 08:00:13 +08:00
Standalone<MutationRef> curm; // current mutation
curm.type = m.type;
2019-08-02 08:00:13 +08:00
// The first split mutation should starts with m.first.
2019-09-04 06:50:21 +08:00
// The later ones should start with the rangeToApplier boundary.
2019-08-02 08:00:13 +08:00
if (m.param1 > itlow->first) {
curm.param1 = m.param1;
} else {
curm.param1 = itlow->first;
}
itApplier = itlow;
itlow++;
2019-05-14 08:24:57 +08:00
if (itlow == itup) {
2019-08-02 08:00:13 +08:00
ASSERT(m.param2 <= normalKeys.end);
2019-05-14 08:24:57 +08:00
curm.param2 = m.param2;
2019-08-02 08:00:13 +08:00
} else if (m.param2 < itlow->first) {
UNREACHABLE();
2019-05-14 08:24:57 +08:00
curm.param2 = m.param2;
} else {
curm.param2 = itlow->first;
}
2019-08-02 08:00:13 +08:00
ASSERT(curm.param1 <= curm.param2);
2019-05-14 08:24:57 +08:00
mvector.push_back_deep(mvector_arena, curm);
nodeIDs.push_back(nodeIDs_arena, itApplier->second);
}
}
2019-08-02 08:00:13 +08:00
// key_input format:
// [logRangeMutation.first][hash_value_of_commit_version:1B][bigEndian64(commitVersion)][bigEndian32(part)]
// value_input: serialized binary of mutations at the same version
2019-08-02 08:00:13 +08:00
bool concatenateBackupMutationForLogFile(std::map<Standalone<StringRef>, Standalone<StringRef>>* pMutationMap,
std::map<Standalone<StringRef>, uint32_t>* pMutationPartMap,
2019-12-20 08:50:39 +08:00
Standalone<StringRef> key_input, Standalone<StringRef> val_input,
const RestoreAsset& asset) {
2019-08-02 08:00:13 +08:00
SerializedMutationListMap& mutationMap = *pMutationMap;
std::map<Standalone<StringRef>, uint32_t>& mutationPartMap = *pMutationPartMap;
const int key_prefix_len = sizeof(uint8_t) + sizeof(Version) + sizeof(uint32_t);
BackupStringRefReader readerKey(key_input, restore_corrupted_data()); // read key_input!
int logRangeMutationFirstLength = key_input.size() - key_prefix_len;
bool concatenated = false;
ASSERT_WE_THINK(key_input.size() >= key_prefix_len);
2019-08-02 08:00:13 +08:00
if (logRangeMutationFirstLength > 0) {
// Strip out the [logRangeMutation.first]; otherwise, the following readerKey.consume will produce wrong value
readerKey.consume(logRangeMutationFirstLength);
}
readerKey.consume<uint8_t>(); // uint8_t hashValue = readerKey.consume<uint8_t>()
Version commitVersion = readerKey.consumeNetworkUInt64();
2019-12-20 08:50:39 +08:00
// Skip mutations not in [asset.beginVersion, asset.endVersion), which is what we are only processing right now
if (commitVersion < asset.beginVersion || commitVersion >= asset.endVersion) {
return false;
}
2019-08-02 08:00:13 +08:00
uint32_t part = readerKey.consumeNetworkUInt32();
// Use commitVersion as id
Standalone<StringRef> id = StringRef((uint8_t*)&commitVersion, sizeof(Version));
2019-08-02 08:00:13 +08:00
if (mutationMap.find(id) == mutationMap.end()) {
mutationMap.insert(std::make_pair(id, val_input));
2019-08-02 08:00:13 +08:00
if (part != 0) {
2019-09-04 06:50:21 +08:00
TraceEvent(SevError, "FastRestore").detail("FirstPartNotZero", part).detail("KeyInput", getHexString(key_input));
}
mutationPartMap.insert(std::make_pair(id, part));
2019-09-04 06:50:21 +08:00
} else { // Concatenate the val string with the same commitVersion
2019-08-02 08:00:13 +08:00
mutationMap[id] =
mutationMap[id].contents().withSuffix(val_input.contents()); // Assign the new Areana to the map's value
if (part != (mutationPartMap[id] + 1)) {
// Check if the same range or log file has been processed more than once!
2019-09-04 06:50:21 +08:00
TraceEvent(SevError, "FastRestore")
.detail("CurrentPart1", mutationPartMap[id])
.detail("CurrentPart2", part)
.detail("KeyInput", getHexString(key_input))
.detail("Hint", "Check if the same range or log file has been processed more than once");
}
mutationPartMap[id] = part;
concatenated = true;
}
return concatenated;
}
2019-08-02 08:00:13 +08:00
// Parse the kv pair (version, serialized_mutation), which are the results parsed from log file, into
// (version, <K, V, mutationType>) pair;
// Put the parsed versioned mutations into *pkvOps.
//
// Input key: [commitVersion_of_the_mutation_batch:uint64_t];
// Input value: [includeVersion:uint64_t][val_length:uint32_t][encoded_list_of_mutations], where
// includeVersion is the serialized version in the batch commit. It is not the commitVersion in Input key.
//
// val_length is always equal to (val.size() - 12); otherwise,
// we may not get the entire mutation list for the version encoded_list_of_mutations:
// [mutation1][mutation2]...[mutationk], where
// a mutation is encoded as [type:uint32_t][keyLength:uint32_t][valueLength:uint32_t][keyContent][valueContent]
2019-11-21 13:04:18 +08:00
void _parseSerializedMutation(std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsIter,
SerializedMutationListMap* pmutationMap,
std::map<LoadingParam, MutationsVec>::iterator samplesIter, const RestoreAsset& asset) {
VersionedMutationsMap& kvOps = kvOpsIter->second;
MutationsVec& samples = samplesIter->second;
2019-08-02 08:00:13 +08:00
SerializedMutationListMap& mutationMap = *pmutationMap;
for (auto& m : mutationMap) {
StringRef k = m.first.contents();
StringRef val = m.second.contents();
BackupStringRefReader kReader(k, restore_corrupted_data());
uint64_t commitVersion = kReader.consume<uint64_t>(); // Consume little Endian data
2019-12-20 08:50:39 +08:00
// We have already filter the commit not in [beginVersion, endVersion) when we concatenate kv pair in log file
ASSERT_WE_THINK(asset.isInVersionRange(commitVersion));
2019-12-07 14:00:40 +08:00
kvOps.insert(std::make_pair(commitVersion, MutationsVec()));
BackupStringRefReader vReader(val, restore_corrupted_data());
vReader.consume<uint64_t>(); // Consume the includeVersion
// TODO(xumengpanda): verify the protocol version is compatible and raise error if needed
// Parse little endian value, confirmed it is correct!
uint32_t val_length_decoded = vReader.consume<uint32_t>();
ASSERT(val_length_decoded == val.size() - sizeof(uint64_t) - sizeof(uint32_t));
while (1) {
// stop when reach the end of the string
2019-08-02 08:00:13 +08:00
if (vReader.eof()) { //|| *reader.rptr == 0xFF
break;
}
uint32_t type = vReader.consume<uint32_t>();
uint32_t kLen = vReader.consume<uint32_t>();
uint32_t vLen = vReader.consume<uint32_t>();
2019-08-02 08:00:13 +08:00
const uint8_t* k = vReader.consume(kLen);
const uint8_t* v = vReader.consume(vLen);
2019-08-02 08:00:13 +08:00
MutationRef mutation((MutationRef::Type)type, KeyRef(k, kLen), KeyRef(v, vLen));
// Should this mutation be skipped?
if (mutation.param1 >= asset.range.end ||
(isRangeMutation(mutation) && mutation.param2 < asset.range.begin) ||
(!isRangeMutation(mutation) && mutation.param1 < asset.range.begin)) {
continue;
}
// Only apply mutation within the asset.range
2019-12-20 08:50:39 +08:00
if (isRangeMutation(mutation)) {
mutation.param1 = mutation.param1 >= asset.range.begin ? mutation.param1 : asset.range.begin;
mutation.param2 = mutation.param2 < asset.range.end ? mutation.param2 : asset.range.end;
}
TraceEvent(SevFRMutationInfo, "FastRestore_VerboseDebug")
.detail("CommitVersion", commitVersion)
.detail("ParsedMutation", mutation.toString());
kvOps[commitVersion].push_back_deep(kvOps[commitVersion].arena(), mutation);
// Sampling (FASTRESTORE_SAMPLING_PERCENT%) data
if (deterministicRandom()->random01() * 100 < SERVER_KNOBS->FASTRESTORE_SAMPLING_PERCENT) {
samples.push_back_deep(samples.arena(), mutation);
}
2019-08-02 08:00:13 +08:00
ASSERT_WE_THINK(kLen >= 0 && kLen < val.size());
ASSERT_WE_THINK(vLen >= 0 && vLen < val.size());
}
}
}
// Parsing the data blocks in a range file
2019-11-21 13:04:18 +08:00
ACTOR static Future<Void> _parseRangeFileToMutationsOnLoader(
std::map<LoadingParam, VersionedMutationsMap>::iterator kvOpsIter,
std::map<LoadingParam, MutationsVec>::iterator samplesIter, Reference<IBackupContainer> bc, Version version,
2019-12-20 08:50:39 +08:00
RestoreAsset asset) {
state VersionedMutationsMap& kvOps = kvOpsIter->second;
state MutationsVec& sampleMutations = samplesIter->second;
2019-08-02 08:00:13 +08:00
2019-12-20 08:50:39 +08:00
TraceEvent("FastRestoreDecodedRangeFile")
.detail("Filename", asset.filename)
.detail("Version", version)
2019-12-21 14:00:36 +08:00
.detail("BeginVersion", asset.beginVersion)
.detail("EndVersion", asset.endVersion);
// Sanity check the range file is within the restored version range
2019-12-20 08:50:39 +08:00
ASSERT_WE_THINK(version >= asset.beginVersion && version < asset.endVersion);
2019-08-02 08:00:13 +08:00
// The set of key value version is rangeFile.version. the key-value set in the same range file has the same version
2019-12-20 08:50:39 +08:00
Reference<IAsyncFile> inFile = wait(bc->readFile(asset.filename));
2019-11-26 14:31:53 +08:00
Standalone<VectorRef<KeyValueRef>> blockData =
2019-12-20 08:50:39 +08:00
wait(parallelFileRestore::decodeRangeFileBlock(inFile, asset.offset, asset.len));
TraceEvent("FastRestore")
.detail("DecodedRangeFile", asset.filename)
.detail("DataSize", blockData.contents().size());
2019-08-02 08:00:13 +08:00
// First and last key are the range for this file
2019-11-26 14:31:53 +08:00
KeyRange fileRange = KeyRangeRef(blockData.front().key, blockData.back().key);
2019-08-02 08:00:13 +08:00
// If fileRange doesn't intersect restore range then we're done.
2019-12-20 08:50:39 +08:00
if (!fileRange.intersects(asset.range)) {
2019-08-02 08:00:13 +08:00
return Void();
}
2019-08-02 08:00:13 +08:00
// We know the file range intersects the restore range but there could still be keys outside the restore range.
// Find the subvector of kv pairs that intersect the restore range.
// Note that the first and last keys are just the range endpoints for this file.
// They are metadata, not the real data.
int rangeStart = 1;
int rangeEnd = blockData.size() - 1; // The rangeStart and rangeEnd is [,)
2019-08-02 08:00:13 +08:00
// Slide start from begining, stop if something in range is found
// Move rangeStart and rangeEnd until they is within restoreRange
2019-12-20 08:50:39 +08:00
while (rangeStart < rangeEnd && !asset.range.contains(blockData[rangeStart].key)) {
++rangeStart;
}
2019-08-02 08:00:13 +08:00
// Side end from back, stop if something at (rangeEnd-1) is found in range
2019-12-20 08:50:39 +08:00
while (rangeEnd > rangeStart && !asset.range.contains(blockData[rangeEnd - 1].key)) {
--rangeEnd;
}
2019-08-02 08:00:13 +08:00
// Now data only contains the kv mutation within restoreRange
2019-11-26 14:31:53 +08:00
VectorRef<KeyValueRef> data = blockData.slice(rangeStart, rangeEnd);
int start = 0;
int end = data.size();
// Convert KV in data into mutations in kvOps
2019-08-02 08:00:13 +08:00
for (int i = start; i < end; ++i) {
// NOTE: The KV pairs in range files are the real KV pairs in original DB.
2019-12-11 08:29:08 +08:00
// Should NOT add prefix or remove surfix for the backup data!
2019-08-02 08:00:13 +08:00
MutationRef m(MutationRef::Type::SetValue, data[i].key,
data[i].value); // ASSUME: all operation in range file is set.
// We cache all kv operations into kvOps, and apply all kv operations later in one place
2019-12-07 14:00:40 +08:00
kvOps.insert(std::make_pair(version, MutationsVec()));
TraceEvent(SevFRMutationInfo, "FastRestore_VerboseDebug")
.detail("CommitVersion", version)
.detail("ParsedMutationKV", m.toString());
ASSERT_WE_THINK(kvOps.find(version) != kvOps.end());
kvOps[version].push_back_deep(kvOps[version].arena(), m);
// Sampling (FASTRESTORE_SAMPLING_PERCENT%) data
if (deterministicRandom()->random01() * 100 < SERVER_KNOBS->FASTRESTORE_SAMPLING_PERCENT) {
sampleMutations.push_back_deep(sampleMutations.arena(), m);
}
}
return Void();
2019-08-02 08:00:13 +08:00
}
// Parse data blocks in a log file into a vector of <string, string> pairs. Each pair.second contains the mutations at a
// version encoded in pair.first Step 1: decodeLogFileBlock into <string, string> pairs Step 2: Concatenate the
// pair.second of pairs with the same pair.first.
ACTOR static Future<Void> _parseLogFileToMutationsOnLoader(NotifiedVersion* pProcessedFileOffset,
SerializedMutationListMap* pMutationMap,
SerializedMutationPartMap* pMutationPartMap,
2019-12-20 08:50:39 +08:00
Reference<IBackupContainer> bc, RestoreAsset asset) {
Reference<IAsyncFile> inFile = wait(bc->readFile(asset.filename));
2019-08-02 08:00:13 +08:00
// decodeLogFileBlock() must read block by block!
state Standalone<VectorRef<KeyValueRef>> data =
2019-12-20 08:50:39 +08:00
wait(parallelFileRestore::decodeLogFileBlock(inFile, asset.offset, asset.len));
TraceEvent("FastRestore")
2019-12-20 08:50:39 +08:00
.detail("DecodedLogFile", asset.filename)
.detail("Offset", asset.offset)
.detail("Length", asset.len)
.detail("DataSize", data.contents().size());
// Ensure data blocks in the same file are processed in order
2019-12-20 08:50:39 +08:00
wait(pProcessedFileOffset->whenAtLeast(asset.offset));
2019-12-20 08:50:39 +08:00
if (pProcessedFileOffset->get() == asset.offset) {
2019-11-26 14:31:53 +08:00
int start = 0;
int end = data.size();
int numConcatenated = 0;
for (int i = start; i < end; ++i) {
// Key k = data[i].key.withPrefix(mutationLogPrefix);
// ValueRef v = data[i].value;
// Concatenate the backuped param1 and param2 (KV) at the same version.
bool concatenated =
2019-12-20 08:50:39 +08:00
concatenateBackupMutationForLogFile(pMutationMap, pMutationPartMap, data[i].key, data[i].value, asset);
numConcatenated += (concatenated ? 1 : 0);
}
2019-12-20 08:50:39 +08:00
pProcessedFileOffset->set(asset.offset + asset.len);
}
return Void();
2019-08-02 08:00:13 +08:00
}