2019-04-25 06:12:37 +08:00
|
|
|
/*
|
2019-10-03 05:47:09 +08:00
|
|
|
* BackupWorker.actor.cpp
|
2019-04-25 06:12:37 +08:00
|
|
|
*
|
|
|
|
* This source file is part of the FoundationDB open source project
|
|
|
|
*
|
|
|
|
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2020-01-22 08:57:30 +08:00
|
|
|
#include "fdbclient/BackupAgent.actor.h"
|
2019-09-10 01:21:16 +08:00
|
|
|
#include "fdbclient/BackupContainer.h"
|
2020-01-08 02:27:52 +08:00
|
|
|
#include "fdbclient/DatabaseContext.h"
|
|
|
|
#include "fdbclient/MasterProxyInterface.h"
|
2019-05-24 07:06:23 +08:00
|
|
|
#include "fdbclient/SystemData.h"
|
2019-04-25 06:12:37 +08:00
|
|
|
#include "fdbserver/BackupInterface.h"
|
2020-01-22 08:57:30 +08:00
|
|
|
#include "fdbserver/BackupProgress.actor.h"
|
2019-09-10 01:21:16 +08:00
|
|
|
#include "fdbserver/LogProtocolMessage.h"
|
2019-04-25 06:12:37 +08:00
|
|
|
#include "fdbserver/LogSystem.h"
|
|
|
|
#include "fdbserver/ServerDBInfo.h"
|
|
|
|
#include "fdbserver/WaitFailure.h"
|
2019-05-24 07:06:23 +08:00
|
|
|
#include "fdbserver/WorkerInterface.actor.h"
|
2019-04-25 06:12:37 +08:00
|
|
|
#include "flow/Error.h"
|
2019-12-04 08:05:12 +08:00
|
|
|
|
2019-04-25 06:12:37 +08:00
|
|
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
|
|
|
|
2019-09-10 01:21:16 +08:00
|
|
|
struct VersionedMessage {
|
|
|
|
LogMessageVersion version;
|
|
|
|
StringRef message;
|
2019-11-13 08:44:59 +08:00
|
|
|
VectorRef<Tag> tags;
|
2019-09-17 06:56:23 +08:00
|
|
|
Arena arena; // Keep a reference to the memory containing the message
|
2019-09-10 01:21:16 +08:00
|
|
|
|
2019-11-13 08:44:59 +08:00
|
|
|
VersionedMessage(LogMessageVersion v, StringRef m, const VectorRef<Tag>& t, const Arena& a)
|
2019-09-17 06:56:23 +08:00
|
|
|
: version(v), message(m), tags(t), arena(a) {}
|
2019-09-10 01:21:16 +08:00
|
|
|
const Version getVersion() const { return version.version; }
|
|
|
|
const uint32_t getSubVersion() const { return version.sub; }
|
2020-02-13 02:02:27 +08:00
|
|
|
|
|
|
|
// Returns true if the message is a mutation that should be backuped, i.e.,
|
|
|
|
// either key is not in system key space or is not a metadataVersionKey.
|
|
|
|
bool isBackupMessage(MutationRef* m) const {
|
|
|
|
for (Tag tag : tags) {
|
|
|
|
if (tag.locality == tagLocalitySpecial || tag.locality == tagLocalityTxs) {
|
|
|
|
return false; // skip Txs mutations
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-13 08:57:56 +08:00
|
|
|
ArenaReader reader(arena, message, AssumeVersion(currentProtocolVersion));
|
2020-02-13 02:02:27 +08:00
|
|
|
|
|
|
|
// Return false for LogProtocolMessage.
|
|
|
|
if (LogProtocolMessage::isNextIn(reader)) return false;
|
|
|
|
|
|
|
|
reader >> *m;
|
|
|
|
return normalKeys.contains(m->param1) || m->param1 == metadataVersionKey;
|
|
|
|
}
|
2019-09-10 01:21:16 +08:00
|
|
|
};
|
|
|
|
|
2019-04-25 06:12:37 +08:00
|
|
|
struct BackupData {
|
2019-05-24 07:06:23 +08:00
|
|
|
const UID myId;
|
2019-07-24 02:45:04 +08:00
|
|
|
const Tag tag; // LogRouter tag for this worker, i.e., (-2, i)
|
2019-05-24 07:06:23 +08:00
|
|
|
const Version startVersion;
|
2019-09-22 12:55:19 +08:00
|
|
|
const Optional<Version> endVersion; // old epoch's end version (inclusive), or empty for current epoch
|
2019-07-30 01:37:42 +08:00
|
|
|
const LogEpoch recruitedEpoch;
|
|
|
|
const LogEpoch backupEpoch;
|
2019-04-25 06:12:37 +08:00
|
|
|
Version minKnownCommittedVersion;
|
2020-01-25 03:01:58 +08:00
|
|
|
Version savedVersion;
|
2019-04-25 06:12:37 +08:00
|
|
|
AsyncVar<Reference<ILogSystem>> logSystem;
|
2019-05-24 07:06:23 +08:00
|
|
|
Database cx;
|
2019-09-10 01:21:16 +08:00
|
|
|
std::vector<VersionedMessage> messages;
|
2019-09-22 12:55:19 +08:00
|
|
|
AsyncVar<bool> pullFinished;
|
2019-04-25 06:12:37 +08:00
|
|
|
|
2020-01-31 00:35:02 +08:00
|
|
|
struct PerBackupInfo {
|
|
|
|
PerBackupInfo() = default;
|
2020-01-31 13:21:05 +08:00
|
|
|
PerBackupInfo(BackupData* data, Version v) : self(data), startVersion(v) {}
|
2020-01-31 00:35:02 +08:00
|
|
|
|
2020-02-11 05:44:08 +08:00
|
|
|
bool isRunning() {
|
|
|
|
return container.isReady() && ranges.isReady() && !stopped;
|
2020-01-31 00:35:02 +08:00
|
|
|
}
|
|
|
|
|
2020-01-31 13:21:05 +08:00
|
|
|
BackupData* self = nullptr;
|
2020-01-31 00:35:02 +08:00
|
|
|
Version startVersion = invalidVersion;
|
2020-02-05 06:30:32 +08:00
|
|
|
Version lastSavedVersion = invalidVersion;
|
2020-02-15 11:32:11 +08:00
|
|
|
Future<Optional<Reference<IBackupContainer>>> container;
|
2020-01-31 00:35:02 +08:00
|
|
|
Future<Optional<std::vector<KeyRange>>> ranges; // Key ranges of this backup
|
|
|
|
bool allWorkerStarted = false; // Only worker with Tag(-2,0) uses & sets this field
|
|
|
|
bool stopped = false; // Is the backup stopped?
|
|
|
|
};
|
|
|
|
|
|
|
|
std::map<UID, PerBackupInfo> backups; // Backup UID to infos
|
|
|
|
AsyncTrigger changedTrigger;
|
2020-01-22 08:57:30 +08:00
|
|
|
|
2019-07-24 02:45:04 +08:00
|
|
|
CounterCollection cc;
|
|
|
|
Future<Void> logger;
|
|
|
|
|
2020-01-25 03:01:58 +08:00
|
|
|
explicit BackupData(UID id, Reference<AsyncVar<ServerDBInfo>> db, const InitializeBackupRequest& req)
|
|
|
|
: myId(id), tag(req.routerTag), startVersion(req.startVersion), endVersion(req.endVersion),
|
2019-07-30 01:37:42 +08:00
|
|
|
recruitedEpoch(req.recruitedEpoch), backupEpoch(req.backupEpoch), minKnownCommittedVersion(invalidVersion),
|
2020-01-25 03:01:58 +08:00
|
|
|
savedVersion(invalidVersion), cc("BackupWorker", myId.toString()) {
|
2019-07-24 05:32:55 +08:00
|
|
|
cx = openDBOnServer(db, TaskPriority::DefaultEndpoint, true, true);
|
2019-09-22 12:55:19 +08:00
|
|
|
pullFinished.set(false);
|
2019-07-24 02:45:04 +08:00
|
|
|
|
2019-08-12 11:15:50 +08:00
|
|
|
specialCounter(cc, "SavedVersion", [this]() { return this->savedVersion; });
|
2019-07-24 02:45:04 +08:00
|
|
|
specialCounter(cc, "MinKnownCommittedVersion", [this]() { return this->minKnownCommittedVersion; });
|
2019-09-23 04:23:27 +08:00
|
|
|
specialCounter(cc, "MsgQ", [this]() { return this->messages.size(); });
|
2019-07-24 02:45:04 +08:00
|
|
|
logger = traceCounters("BackupWorkerMetrics", myId, SERVER_KNOBS->WORKER_LOGGING_INTERVAL, &cc,
|
|
|
|
"BackupWorkerMetrics");
|
2019-05-24 07:06:23 +08:00
|
|
|
}
|
2019-08-15 05:19:50 +08:00
|
|
|
|
2020-01-31 13:21:05 +08:00
|
|
|
// Inserts a backup's single range into rangeMap.
|
2020-02-06 03:45:16 +08:00
|
|
|
template <class T>
|
|
|
|
void insertRange(KeyRangeMap<std::set<T>>& keyRangeMap, KeyRangeRef range, T value) {
|
|
|
|
for (auto& logRange : keyRangeMap.modify(range)) {
|
|
|
|
logRange->value().insert(value);
|
2020-01-31 13:21:05 +08:00
|
|
|
}
|
2020-02-06 03:45:16 +08:00
|
|
|
for (auto& logRange : keyRangeMap.modify(singleKeyRange(metadataVersionKey))) {
|
|
|
|
logRange->value().insert(value);
|
2020-01-31 13:21:05 +08:00
|
|
|
}
|
|
|
|
TraceEvent("BackupWorkerInsertRange", myId)
|
2020-02-06 03:45:16 +08:00
|
|
|
.detail("Value", value)
|
2020-01-31 13:21:05 +08:00
|
|
|
.detail("Begin", range.begin)
|
|
|
|
.detail("End", range.end);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Inserts a backup's ranges into rangeMap.
|
2020-02-06 03:45:16 +08:00
|
|
|
template <class T>
|
|
|
|
void insertRanges(KeyRangeMap<std::set<T>>& keyRangeMap, const Optional<std::vector<KeyRange>>& ranges, T value) {
|
2020-01-31 13:21:05 +08:00
|
|
|
if (!ranges.present() || ranges.get().empty()) {
|
|
|
|
// insert full ranges of normal keys
|
2020-02-06 03:45:16 +08:00
|
|
|
return insertRange(keyRangeMap, normalKeys, value);
|
2020-01-31 13:21:05 +08:00
|
|
|
}
|
|
|
|
for (const auto& range : ranges.get()) {
|
2020-02-06 03:45:16 +08:00
|
|
|
insertRange(keyRangeMap, range, value);
|
2020-01-31 13:21:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-15 05:19:50 +08:00
|
|
|
void pop() {
|
2019-08-15 08:00:20 +08:00
|
|
|
const LogEpoch oldest = logSystem.get()->getOldestBackupEpoch();
|
|
|
|
if (backupEpoch > oldest) {
|
|
|
|
// Defer pop if old epoch hasn't finished popping yet.
|
|
|
|
TraceEvent("BackupWorkerPopDeferred", myId)
|
|
|
|
.suppressFor(1.0)
|
|
|
|
.detail("BackupEpoch", backupEpoch)
|
|
|
|
.detail("OldestEpoch", oldest)
|
|
|
|
.detail("Version", savedVersion);
|
|
|
|
return;
|
|
|
|
}
|
2019-08-15 05:19:50 +08:00
|
|
|
const Tag popTag = logSystem.get()->getPseudoPopTag(tag, ProcessClass::BackupClass);
|
|
|
|
logSystem.get()->pop(savedVersion, popTag);
|
|
|
|
}
|
2019-09-23 04:23:27 +08:00
|
|
|
|
|
|
|
void eraseMessagesAfterEndVersion() {
|
|
|
|
ASSERT(endVersion.present());
|
|
|
|
const Version ver = endVersion.get();
|
|
|
|
while (!messages.empty()) {
|
|
|
|
if (messages.back().getVersion() > ver) {
|
|
|
|
messages.pop_back();
|
|
|
|
} else {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-01-31 00:35:02 +08:00
|
|
|
|
|
|
|
// Give a list of current active backups, compare with current list and decide
|
|
|
|
// to start new backups and stop ones not in the active state.
|
|
|
|
void onBackupChanges(const std::vector<std::pair<UID, Version>>& uidVersions) {
|
|
|
|
std::set<UID> stopList;
|
|
|
|
for (auto it : backups) {
|
|
|
|
stopList.insert(it.first);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool modified = false;
|
|
|
|
for (const auto uidVersion : uidVersions) {
|
|
|
|
const UID uid = uidVersion.first;
|
|
|
|
|
|
|
|
auto it = backups.find(uid);
|
|
|
|
if (it == backups.end()) {
|
|
|
|
modified = true;
|
2020-01-31 13:21:05 +08:00
|
|
|
auto inserted = backups.emplace(uid, BackupData::PerBackupInfo(this, uidVersion.second));
|
2020-01-31 00:35:02 +08:00
|
|
|
|
|
|
|
// Open the container and get key ranges
|
|
|
|
BackupConfig config(uid);
|
2020-02-15 11:32:11 +08:00
|
|
|
inserted.first->second.container = config.backupContainer().get(cx);
|
2020-01-31 00:35:02 +08:00
|
|
|
inserted.first->second.ranges = config.backupRanges().get(cx);
|
|
|
|
} else {
|
|
|
|
stopList.erase(uid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (UID uid : stopList) {
|
|
|
|
auto it = backups.find(uid);
|
|
|
|
ASSERT(it != backups.end());
|
|
|
|
it->second.stopped = true;
|
|
|
|
modified = true;
|
|
|
|
}
|
|
|
|
if (modified) changedTrigger.trigger();
|
|
|
|
}
|
2019-04-25 06:12:37 +08:00
|
|
|
};
|
|
|
|
|
2020-01-10 02:15:42 +08:00
|
|
|
// Monitors "backupStartedKey". If "started" is true, wait until the key is set;
|
|
|
|
// otherwise, wait until the key is cleared.
|
2020-01-31 00:35:02 +08:00
|
|
|
ACTOR Future<Void> monitorBackupStartedKeyChanges(BackupData* self, bool started) {
|
2020-01-08 02:27:52 +08:00
|
|
|
loop {
|
|
|
|
state ReadYourWritesTransaction tr(self->cx);
|
|
|
|
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
2020-01-23 11:34:40 +08:00
|
|
|
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
2020-01-22 08:57:30 +08:00
|
|
|
Optional<Value> value = wait(tr.get(backupStartedKey));
|
2020-02-06 02:33:51 +08:00
|
|
|
std::vector<std::pair<UID, Version>> uidVersions;
|
2020-01-25 03:01:58 +08:00
|
|
|
if (value.present()) {
|
2020-02-06 02:33:51 +08:00
|
|
|
uidVersions = decodeBackupStartedValue(value.get());
|
|
|
|
TraceEvent e("BackupWorkerGotStartKey", self->myId);
|
|
|
|
int i = 1;
|
|
|
|
for (auto uidVersion : uidVersions) {
|
|
|
|
e.detail(format("BackupID%d", i), uidVersion.first)
|
|
|
|
.detail(format("Version%d", i), uidVersion.second);
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
self->onBackupChanges(uidVersions);
|
2020-01-25 03:01:58 +08:00
|
|
|
if (started) return Void();
|
2020-01-31 00:35:02 +08:00
|
|
|
} else {
|
2020-02-06 02:33:51 +08:00
|
|
|
TraceEvent("BackupWorkerEmptyStartKey", self->myId);
|
|
|
|
self->onBackupChanges(uidVersions);
|
2020-01-31 00:35:02 +08:00
|
|
|
|
|
|
|
if (!started) {
|
|
|
|
return Void();
|
|
|
|
}
|
2020-01-10 02:15:42 +08:00
|
|
|
}
|
2020-01-08 02:27:52 +08:00
|
|
|
|
|
|
|
state Future<Void> watchFuture = tr.watch(backupStartedKey);
|
|
|
|
wait(tr.commit());
|
|
|
|
wait(watchFuture);
|
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
|
|
|
wait(tr.onError(e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-22 08:57:30 +08:00
|
|
|
// Monitor all backup worker in the recruited epoch has been started. If so,
|
|
|
|
// set the "allWorkerStarted" key of the BackupConfig to true, which in turn
|
2020-01-31 00:35:02 +08:00
|
|
|
// unblocks StartFullBackupTaskFunc::_execute. Note only worker with Tag (-2,0)
|
|
|
|
// runs this actor so that the key is set by one process.
|
2020-01-22 08:57:30 +08:00
|
|
|
ACTOR Future<Void> monitorAllWorkerStarted(BackupData* self) {
|
|
|
|
loop {
|
2020-01-31 00:35:02 +08:00
|
|
|
wait(delay(SERVER_KNOBS->WORKER_LOGGING_INTERVAL / 2.0) || self->changedTrigger.onTrigger());
|
|
|
|
if (self->backups.empty()) {
|
2020-01-25 03:01:58 +08:00
|
|
|
continue;
|
2020-01-22 08:57:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// check all workers have started by checking their progress is larger
|
|
|
|
// than the backup's start version.
|
|
|
|
state Reference<BackupProgress> progress(new BackupProgress(self->myId, {}));
|
|
|
|
wait(getBackupProgress(self->cx, self->myId, progress));
|
|
|
|
std::map<Tag, Version> tagVersions = progress->getEpochStatus(self->recruitedEpoch);
|
|
|
|
|
|
|
|
state std::vector<UID> ready;
|
|
|
|
if (tagVersions.size() == self->logSystem.get()->getLogRouterTags()) {
|
|
|
|
// Check every version is larger than backup's startVersion
|
2020-01-31 00:35:02 +08:00
|
|
|
for (auto& uidInfo : self->backups) {
|
|
|
|
if (uidInfo.second.allWorkerStarted) continue;
|
2020-01-22 08:57:30 +08:00
|
|
|
bool saved = true;
|
|
|
|
for (const std::pair<Tag, Version> tv : tagVersions) {
|
2020-01-31 00:35:02 +08:00
|
|
|
if (tv.second < uidInfo.second.startVersion) {
|
2020-01-22 08:57:30 +08:00
|
|
|
saved = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (saved) {
|
2020-01-31 00:35:02 +08:00
|
|
|
ready.push_back(uidInfo.first);
|
|
|
|
uidInfo.second.allWorkerStarted = true;
|
2020-01-22 08:57:30 +08:00
|
|
|
}
|
|
|
|
}
|
2020-01-31 00:35:02 +08:00
|
|
|
if (ready.empty()) continue;
|
2020-01-22 08:57:30 +08:00
|
|
|
|
|
|
|
// Set "allWorkerStarted" key for ready backups
|
|
|
|
loop {
|
|
|
|
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(self->cx));
|
|
|
|
try {
|
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
|
|
|
|
|
|
|
state std::vector<Future<Optional<Value>>> readyValues;
|
|
|
|
state std::vector<BackupConfig> configs;
|
|
|
|
for (UID uid : ready) {
|
|
|
|
configs.emplace_back(uid);
|
|
|
|
readyValues.push_back(tr->get(configs.back().allWorkerStarted().key));
|
|
|
|
}
|
|
|
|
wait(waitForAll(readyValues));
|
|
|
|
for (int i = 0; i < readyValues.size(); i++) {
|
|
|
|
if (!readyValues[i].get().present()) {
|
|
|
|
configs[i].allWorkerStarted().set(tr, true);
|
|
|
|
TraceEvent("BackupWorkerSetReady", self->myId).detail("BackupID", ready[i].toString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wait(tr->commit());
|
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
|
|
|
wait(tr->onError(e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-01 07:14:58 +08:00
|
|
|
ACTOR Future<Void> saveProgress(BackupData* self, Version backupVersion) {
|
2019-05-24 07:06:23 +08:00
|
|
|
state Transaction tr(self->cx);
|
2019-08-15 05:19:50 +08:00
|
|
|
state Key key = backupProgressKeyFor(self->myId);
|
2019-05-24 07:06:23 +08:00
|
|
|
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
|
|
|
|
2019-07-30 01:37:42 +08:00
|
|
|
WorkerBackupStatus status(self->backupEpoch, backupVersion, self->tag);
|
2019-08-15 05:19:50 +08:00
|
|
|
tr.set(key, backupProgressValue(status));
|
|
|
|
tr.addReadConflictRange(singleKeyRange(key));
|
2019-05-24 07:06:23 +08:00
|
|
|
wait(tr.commit());
|
|
|
|
return Void();
|
|
|
|
} catch (Error& e) {
|
|
|
|
wait(tr.onError(e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-18 11:14:14 +08:00
|
|
|
// Return a block of contiguous padding bytes, growing if needed.
|
|
|
|
static Value makePadding(int size) {
|
|
|
|
static Value pad;
|
|
|
|
if (pad.size() < size) {
|
|
|
|
pad = makeString(size);
|
|
|
|
memset(mutateString(pad), '\xff', pad.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
return pad.substr(0, size);
|
|
|
|
}
|
|
|
|
|
2020-02-13 02:02:27 +08:00
|
|
|
// Write a mutation to a log file. Note the mutation can be different from
|
|
|
|
// message.message for clear mutations.
|
|
|
|
ACTOR Future<Void> addMutation(Reference<IBackupFile> logFile, VersionedMessage message, StringRef mutation,
|
|
|
|
int64_t* blockEnd, int blockSize) {
|
|
|
|
state int bytes = sizeof(Version) + sizeof(uint32_t) + sizeof(int) + mutation.size();
|
2020-01-31 13:21:05 +08:00
|
|
|
|
|
|
|
// Convert to big Endianness for version.version, version.sub, and msgSize
|
|
|
|
// The decoder assumes 0xFF is the end, so little endian can easily be
|
|
|
|
// mistaken as the end. In contrast, big endian for version almost guarantee
|
|
|
|
// the first byte is not 0xFF (should always be 0x00).
|
|
|
|
BinaryWriter wr(Unversioned());
|
2020-02-13 02:02:27 +08:00
|
|
|
wr << bigEndian64(message.version.version) << bigEndian32(message.version.sub)
|
|
|
|
<< bigEndian32(mutation.size());
|
2020-01-31 13:21:05 +08:00
|
|
|
state Standalone<StringRef> header = wr.toValue();
|
|
|
|
|
|
|
|
// Start a new block if needed
|
|
|
|
if (logFile->size() + bytes > *blockEnd) {
|
|
|
|
// Write padding if needed
|
|
|
|
const int bytesLeft = *blockEnd - logFile->size();
|
|
|
|
if (bytesLeft > 0) {
|
|
|
|
state Value paddingFFs = makePadding(bytesLeft);
|
|
|
|
wait(logFile->append(paddingFFs.begin(), bytesLeft));
|
|
|
|
}
|
|
|
|
|
|
|
|
*blockEnd += blockSize;
|
|
|
|
// TODO: add block header
|
|
|
|
}
|
|
|
|
|
|
|
|
wait(logFile->append((void*)header.begin(), header.size()));
|
2020-02-13 02:02:27 +08:00
|
|
|
wait(logFile->append(mutation.begin(), mutation.size()));
|
2020-01-31 13:21:05 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2019-09-22 12:55:19 +08:00
|
|
|
// Saves messages in the range of [0, numMsg) to a file and then remove these
|
2020-01-31 00:35:02 +08:00
|
|
|
// messages. The file format is a sequence of (Version, sub#, msgSize, message).
|
|
|
|
// Note only ready backups are saved.
|
2019-09-22 12:55:19 +08:00
|
|
|
ACTOR Future<Void> saveMutationsToFile(BackupData* self, Version popVersion, int numMsg) {
|
2020-01-08 06:15:29 +08:00
|
|
|
state int blockSize = SERVER_KNOBS->BACKUP_FILE_BLOCK_BYTES;
|
2020-01-31 00:35:02 +08:00
|
|
|
state std::vector<Future<Reference<IBackupFile>>> logFileFutures;
|
2020-02-06 03:45:16 +08:00
|
|
|
state std::vector<Reference<IBackupFile>> logFiles;
|
|
|
|
state std::vector<int64_t> blockEnds;
|
|
|
|
state std::set<UID> activeUids; // active Backups' UIDs
|
|
|
|
state KeyRangeMap<std::set<int>> keyRangeMap; // range to index in logFileFutures, logFiles, & blockEnds
|
2020-02-13 02:02:27 +08:00
|
|
|
state std::vector<Standalone<StringRef>> mutations;
|
|
|
|
state int idx;
|
2020-01-31 00:35:02 +08:00
|
|
|
|
2020-02-05 08:00:13 +08:00
|
|
|
for (auto it = self->backups.begin(); it != self->backups.end();) {
|
|
|
|
if (!it->second.isRunning()) {
|
|
|
|
if (it->second.stopped) {
|
2020-02-15 11:32:11 +08:00
|
|
|
TraceEvent("BackupWorkerRemoveStoppedContainer", self->myId).detail("BackupId", it->first);
|
2020-02-05 08:00:13 +08:00
|
|
|
it = self->backups.erase(it);
|
|
|
|
} else {
|
|
|
|
it++;
|
|
|
|
}
|
2020-01-31 00:35:02 +08:00
|
|
|
continue;
|
|
|
|
}
|
2020-02-15 11:32:11 +08:00
|
|
|
if (!it->second.container.get().present()) {
|
|
|
|
TraceEvent("BackupWorkerNoContainer", self->myId).detail("BackupId", it->first);
|
|
|
|
it = self->backups.erase(it);
|
|
|
|
continue;
|
|
|
|
}
|
2020-02-06 03:45:16 +08:00
|
|
|
const int index = logFileFutures.size();
|
|
|
|
activeUids.insert(it->first);
|
|
|
|
self->insertRanges(keyRangeMap, it->second.ranges.get(), index);
|
2020-02-05 08:00:13 +08:00
|
|
|
if (it->second.lastSavedVersion == invalidVersion) {
|
|
|
|
it->second.lastSavedVersion = self->messages[0].getVersion();
|
2020-02-05 06:30:32 +08:00
|
|
|
}
|
2020-02-15 11:32:11 +08:00
|
|
|
logFileFutures.push_back(it->second.container.get().get()->writeTaggedLogFile(
|
2020-02-05 08:00:13 +08:00
|
|
|
it->second.lastSavedVersion, popVersion + 1, blockSize, self->tag.id));
|
|
|
|
it++;
|
2020-01-31 00:35:02 +08:00
|
|
|
}
|
2020-02-13 02:02:27 +08:00
|
|
|
if (activeUids.empty()) {
|
|
|
|
// stop early if there is no active backups
|
|
|
|
TraceEvent("BackupWorkerSkip", self->myId).detail("Count", numMsg);
|
|
|
|
return Void();
|
|
|
|
}
|
2020-02-11 05:44:08 +08:00
|
|
|
keyRangeMap.coalesce(allKeys);
|
2020-01-31 00:35:02 +08:00
|
|
|
wait(waitForAll(logFileFutures));
|
|
|
|
|
|
|
|
std::transform(logFileFutures.begin(), logFileFutures.end(), std::back_inserter(logFiles),
|
|
|
|
[](const Future<Reference<IBackupFile>>& f) { return f.get(); });
|
|
|
|
|
|
|
|
for (const auto& file : logFiles) {
|
|
|
|
TraceEvent("OpenMutationFile", self->myId)
|
|
|
|
.detail("TagId", self->tag.id)
|
|
|
|
.detail("File", file->getFileName());
|
|
|
|
}
|
|
|
|
|
2020-02-06 03:45:16 +08:00
|
|
|
blockEnds = std::vector<int64_t>(logFiles.size(), 0);
|
2020-02-13 02:02:27 +08:00
|
|
|
for (idx = 0; idx < numMsg; idx++) {
|
2020-02-13 03:43:14 +08:00
|
|
|
const auto& message = self->messages[idx];
|
2020-02-13 02:02:27 +08:00
|
|
|
MutationRef m;
|
|
|
|
if (!message.isBackupMessage(&m)) continue;
|
2019-11-18 11:14:14 +08:00
|
|
|
|
2020-01-31 13:21:05 +08:00
|
|
|
std::vector<Future<Void>> adds;
|
|
|
|
if (m.type != MutationRef::Type::ClearRange) {
|
2020-02-06 03:45:16 +08:00
|
|
|
for (int index : keyRangeMap[m.param1]) {
|
2020-02-13 02:02:27 +08:00
|
|
|
adds.push_back(addMutation(logFiles[index], message, message.message, &blockEnds[index], blockSize));
|
2020-01-31 13:21:05 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
KeyRangeRef mutationRange(m.param1, m.param2);
|
2020-02-13 03:43:14 +08:00
|
|
|
KeyRangeRef intersectionRange;
|
2020-01-31 13:21:05 +08:00
|
|
|
|
|
|
|
// Find intersection ranges and create mutations for sub-ranges
|
2020-02-06 03:45:16 +08:00
|
|
|
for (auto range : keyRangeMap.intersectingRanges(mutationRange)) {
|
2020-01-31 13:21:05 +08:00
|
|
|
const auto& subrange = range.range();
|
|
|
|
intersectionRange = mutationRange & subrange;
|
|
|
|
MutationRef subm(MutationRef::Type::ClearRange, intersectionRange.begin, intersectionRange.end);
|
2020-02-13 02:02:27 +08:00
|
|
|
BinaryWriter wr(AssumeVersion(currentProtocolVersion));
|
|
|
|
wr << subm;
|
|
|
|
mutations.push_back(wr.toValue());
|
2020-02-06 03:45:16 +08:00
|
|
|
for (int index : range.value()) {
|
2020-02-13 02:02:27 +08:00
|
|
|
adds.push_back(
|
|
|
|
addMutation(logFiles[index], message, mutations.back(), &blockEnds[index], blockSize));
|
2020-01-31 00:35:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-01-31 13:21:05 +08:00
|
|
|
wait(waitForAll(adds));
|
2020-02-13 02:02:27 +08:00
|
|
|
mutations.clear();
|
2019-09-22 12:55:19 +08:00
|
|
|
}
|
|
|
|
|
2020-01-31 00:35:02 +08:00
|
|
|
std::vector<Future<Void>> finished;
|
|
|
|
std::transform(logFiles.begin(), logFiles.end(), std::back_inserter(finished),
|
|
|
|
[](const Reference<IBackupFile>& f) { return f->finish(); });
|
|
|
|
|
|
|
|
wait(waitForAll(finished));
|
|
|
|
|
|
|
|
for (const auto& file : logFiles) {
|
|
|
|
TraceEvent("CloseMutationFile", self->myId)
|
|
|
|
.detail("FileSize", file->size())
|
|
|
|
.detail("TagId", self->tag.id)
|
|
|
|
.detail("File", file->getFileName());
|
|
|
|
}
|
2020-02-06 03:45:16 +08:00
|
|
|
for (const UID uid : activeUids) {
|
|
|
|
self->backups[uid].lastSavedVersion = popVersion + 1;
|
2020-02-05 06:30:32 +08:00
|
|
|
}
|
2019-11-18 11:14:14 +08:00
|
|
|
|
2019-09-22 12:55:19 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2019-09-23 04:23:27 +08:00
|
|
|
// Uploads self->messages to cloud storage and updates savedVersion.
|
2019-05-23 01:52:46 +08:00
|
|
|
ACTOR Future<Void> uploadData(BackupData* self) {
|
2019-05-24 07:06:23 +08:00
|
|
|
state Version popVersion = invalidVersion;
|
|
|
|
|
|
|
|
loop {
|
2019-09-22 12:55:19 +08:00
|
|
|
if (self->endVersion.present() && self->savedVersion >= self->endVersion.get()) {
|
|
|
|
self->messages.clear();
|
2019-08-12 11:15:50 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2019-08-21 06:09:45 +08:00
|
|
|
// FIXME: knobify the delay of 10s. This delay is sensitive, as it is the
|
|
|
|
// lag TLog might have. Changing to 20s may fail consistency check.
|
|
|
|
state Future<Void> uploadDelay = delay(10);
|
2019-08-15 05:19:50 +08:00
|
|
|
|
2020-01-25 03:01:58 +08:00
|
|
|
const Version maxPopVersion =
|
|
|
|
self->endVersion.present() ? self->endVersion.get() : self->minKnownCommittedVersion;
|
2019-08-12 11:15:50 +08:00
|
|
|
if (self->messages.empty()) {
|
|
|
|
// Even though messages is empty, we still want to advance popVersion.
|
2020-01-25 03:01:58 +08:00
|
|
|
popVersion = std::max(popVersion, maxPopVersion);
|
2019-08-12 11:15:50 +08:00
|
|
|
} else {
|
2020-02-13 03:43:14 +08:00
|
|
|
state int numMsg = 0;
|
2019-09-22 12:55:19 +08:00
|
|
|
for (const auto& message : self->messages) {
|
|
|
|
if (message.getVersion() > maxPopVersion) break;
|
|
|
|
popVersion = std::max(popVersion, message.getVersion());
|
2019-09-10 01:21:16 +08:00
|
|
|
numMsg++;
|
2019-08-12 11:15:50 +08:00
|
|
|
}
|
2019-09-10 01:21:16 +08:00
|
|
|
if (numMsg > 0) {
|
2019-09-22 12:55:19 +08:00
|
|
|
wait(saveMutationsToFile(self, popVersion, numMsg));
|
2020-02-13 03:43:14 +08:00
|
|
|
self->messages.erase(self->messages.begin(), self->messages.begin() + numMsg);
|
2019-09-10 01:21:16 +08:00
|
|
|
}
|
|
|
|
}
|
2019-09-23 04:23:27 +08:00
|
|
|
if (self->pullFinished.get() && self->messages.empty()) {
|
|
|
|
// Advance popVersion to the endVersion to avoid gap between last
|
|
|
|
// message version and the endVersion.
|
|
|
|
popVersion = self->endVersion.get();
|
|
|
|
}
|
2019-08-12 11:15:50 +08:00
|
|
|
|
|
|
|
if (popVersion > self->savedVersion) {
|
2019-09-22 12:55:19 +08:00
|
|
|
wait(saveProgress(self, popVersion));
|
|
|
|
TraceEvent("BackupWorkerSavedProgress", self->myId)
|
2019-09-29 03:48:28 +08:00
|
|
|
.detail("Tag", self->tag.toString())
|
2019-09-22 12:55:19 +08:00
|
|
|
.detail("Version", popVersion)
|
|
|
|
.detail("MsgQ", self->messages.size());
|
|
|
|
self->savedVersion = std::max(popVersion, self->savedVersion);
|
|
|
|
self->pop();
|
2019-07-24 02:45:04 +08:00
|
|
|
}
|
2019-08-12 11:15:50 +08:00
|
|
|
|
2019-09-23 04:23:27 +08:00
|
|
|
if (!self->pullFinished.get()) {
|
|
|
|
wait(uploadDelay || self->pullFinished.onChange());
|
|
|
|
}
|
2019-05-24 07:06:23 +08:00
|
|
|
}
|
2019-05-23 01:52:46 +08:00
|
|
|
}
|
|
|
|
|
2019-04-25 06:12:37 +08:00
|
|
|
// Pulls data from TLog servers using LogRouter tag.
|
|
|
|
ACTOR Future<Void> pullAsyncData(BackupData* self) {
|
|
|
|
state Future<Void> logSystemChange = Void();
|
|
|
|
state Reference<ILogSystem::IPeekCursor> r;
|
2020-01-25 03:01:58 +08:00
|
|
|
state Version tagAt = std::max(self->startVersion, self->savedVersion);
|
2019-04-25 06:12:37 +08:00
|
|
|
|
2020-01-25 03:01:58 +08:00
|
|
|
TraceEvent("BackupWorkerPull", self->myId);
|
2019-04-25 06:12:37 +08:00
|
|
|
loop {
|
|
|
|
loop choose {
|
2019-07-24 05:32:55 +08:00
|
|
|
when (wait(r ? r->getMore(TaskPriority::TLogCommit) : Never())) {
|
2019-04-25 06:12:37 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
when (wait(logSystemChange)) {
|
|
|
|
if (self->logSystem.get()) {
|
2019-05-24 07:06:23 +08:00
|
|
|
r = self->logSystem.get()->peekLogRouter(self->myId, tagAt, self->tag);
|
2019-04-25 06:12:37 +08:00
|
|
|
} else {
|
|
|
|
r = Reference<ILogSystem::IPeekCursor>();
|
|
|
|
}
|
|
|
|
logSystemChange = self->logSystem.onChange();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
self->minKnownCommittedVersion = std::max(self->minKnownCommittedVersion, r->getMinKnownCommittedVersion());
|
|
|
|
|
2019-09-10 01:21:16 +08:00
|
|
|
// Note we aggressively peek (uncommitted) messages, but only committed
|
|
|
|
// messages/mutations will be flushed to disk/blob in uploadData().
|
2019-04-25 06:12:37 +08:00
|
|
|
while (r->hasMessage()) {
|
2019-09-18 11:53:04 +08:00
|
|
|
self->messages.emplace_back(r->version(), r->getMessage(), r->getTags(), r->arena());
|
2019-04-25 06:12:37 +08:00
|
|
|
r->nextMessage();
|
|
|
|
}
|
|
|
|
|
2020-01-10 02:15:42 +08:00
|
|
|
tagAt = r->version().version;
|
2019-08-07 02:14:32 +08:00
|
|
|
TraceEvent("BackupWorkerGot", self->myId).suppressFor(1.0).detail("V", tagAt);
|
2019-09-22 12:55:19 +08:00
|
|
|
if (self->endVersion.present() && tagAt > self->endVersion.get()) {
|
2019-09-23 04:23:27 +08:00
|
|
|
self->eraseMessagesAfterEndVersion();
|
2019-08-13 10:10:46 +08:00
|
|
|
TraceEvent("BackupWorkerFinishPull", self->myId)
|
2019-09-29 03:48:28 +08:00
|
|
|
.detail("Tag", self->tag.toString())
|
2019-08-13 10:10:46 +08:00
|
|
|
.detail("VersionGot", tagAt)
|
2019-09-22 12:55:19 +08:00
|
|
|
.detail("EndVersion", self->endVersion.get())
|
|
|
|
.detail("MsgQ", self->messages.size());
|
|
|
|
self->pullFinished.set(true);
|
2019-08-13 10:10:46 +08:00
|
|
|
return Void();
|
|
|
|
}
|
2019-08-12 11:15:50 +08:00
|
|
|
wait(yield());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-08 06:15:29 +08:00
|
|
|
ACTOR Future<Void> monitorBackupKeyOrPullData(BackupData* self) {
|
2020-01-25 03:01:58 +08:00
|
|
|
state Future<Void> started, pullFinished;
|
|
|
|
|
2020-01-08 02:27:52 +08:00
|
|
|
loop {
|
2020-01-31 00:35:02 +08:00
|
|
|
started = monitorBackupStartedKeyChanges(self, true);
|
2020-01-08 06:15:29 +08:00
|
|
|
loop {
|
|
|
|
GetReadVersionRequest request(1, GetReadVersionRequest::PRIORITY_DEFAULT |
|
|
|
|
GetReadVersionRequest::FLAG_USE_MIN_KNOWN_COMMITTED_VERSION);
|
|
|
|
|
|
|
|
choose {
|
|
|
|
when(wait(started)) { break; }
|
|
|
|
when(wait(self->cx->onMasterProxiesChanged())) {}
|
|
|
|
when(GetReadVersionReply reply = wait(loadBalance(self->cx->getMasterProxies(false),
|
|
|
|
&MasterProxyInterface::getConsistentReadVersion,
|
|
|
|
request, self->cx->taskID))) {
|
|
|
|
self->savedVersion = std::max(reply.version, self->savedVersion);
|
|
|
|
self->minKnownCommittedVersion = std::max(reply.version, self->minKnownCommittedVersion);
|
2020-01-25 03:01:58 +08:00
|
|
|
TraceEvent("BackupWorkerNoopPop", self->myId).detail("SavedVersion", self->savedVersion);
|
2020-01-08 06:15:29 +08:00
|
|
|
self->pop(); // Pop while the worker is in this NOOP state.
|
|
|
|
wait(delay(SERVER_KNOBS->BACKUP_NOOP_POP_DELAY, self->cx->taskID));
|
|
|
|
}
|
|
|
|
}
|
2020-01-08 02:27:52 +08:00
|
|
|
}
|
|
|
|
|
2020-01-31 00:35:02 +08:00
|
|
|
Future<Void> stopped = monitorBackupStartedKeyChanges(self, false);
|
2020-01-25 03:01:58 +08:00
|
|
|
pullFinished = pullAsyncData(self);
|
2020-01-10 02:15:42 +08:00
|
|
|
wait(stopped || pullFinished);
|
|
|
|
if (pullFinished.isReady()) return Void(); // backup is done for some old epoch.
|
2020-02-06 02:33:51 +08:00
|
|
|
pullFinished = Future<Void>(); // cancels pullAsyncData()
|
2020-01-25 03:01:58 +08:00
|
|
|
TraceEvent("BackupWorkerPaused", self->myId);
|
2020-01-08 06:15:29 +08:00
|
|
|
}
|
2020-01-08 02:27:52 +08:00
|
|
|
}
|
|
|
|
|
2019-06-01 07:14:58 +08:00
|
|
|
ACTOR Future<Void> checkRemoved(Reference<AsyncVar<ServerDBInfo>> db, LogEpoch recoveryCount,
|
|
|
|
BackupData* self) {
|
2019-05-16 07:13:04 +08:00
|
|
|
loop {
|
|
|
|
bool isDisplaced =
|
2019-08-13 04:15:15 +08:00
|
|
|
db->get().recoveryCount > recoveryCount && db->get().recoveryState != RecoveryState::UNINITIALIZED;
|
2019-05-16 07:13:04 +08:00
|
|
|
if (isDisplaced) {
|
2019-06-01 07:14:58 +08:00
|
|
|
TraceEvent("BackupWorkerDisplaced", self->myId)
|
|
|
|
.detail("RecoveryCount", recoveryCount)
|
2019-08-12 11:15:50 +08:00
|
|
|
.detail("SavedVersion", self->savedVersion)
|
2019-08-13 04:15:15 +08:00
|
|
|
.detail("BackupWorkers", describe(db->get().logSystemConfig.tLogs))
|
2019-06-01 07:14:58 +08:00
|
|
|
.detail("DBRecoveryCount", db->get().recoveryCount)
|
2019-07-24 02:45:04 +08:00
|
|
|
.detail("RecoveryState", (int)db->get().recoveryState);
|
2019-06-01 07:14:58 +08:00
|
|
|
throw worker_removed();
|
2019-05-16 07:13:04 +08:00
|
|
|
}
|
|
|
|
wait(db->onChange());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-21 05:22:31 +08:00
|
|
|
ACTOR Future<Void> backupWorker(BackupInterface interf, InitializeBackupRequest req,
|
|
|
|
Reference<AsyncVar<ServerDBInfo>> db) {
|
2020-01-25 03:01:58 +08:00
|
|
|
state BackupData self(interf.id(), db, req);
|
2019-04-25 06:12:37 +08:00
|
|
|
state PromiseStream<Future<Void>> addActor;
|
2019-05-16 07:13:04 +08:00
|
|
|
state Future<Void> error = actorCollection(addActor.getFuture());
|
2019-04-25 06:12:37 +08:00
|
|
|
state Future<Void> dbInfoChange = Void();
|
|
|
|
|
2019-09-29 03:48:28 +08:00
|
|
|
TraceEvent("BackupWorkerStart", self.myId)
|
2019-06-01 07:14:58 +08:00
|
|
|
.detail("Tag", req.routerTag.toString())
|
|
|
|
.detail("StartVersion", req.startVersion)
|
2019-08-12 11:15:50 +08:00
|
|
|
.detail("EndVersion", req.endVersion.present() ? req.endVersion.get() : -1)
|
2019-07-30 01:37:42 +08:00
|
|
|
.detail("LogEpoch", req.recruitedEpoch)
|
|
|
|
.detail("BackupEpoch", req.backupEpoch);
|
2019-04-25 06:12:37 +08:00
|
|
|
try {
|
2020-01-08 02:27:52 +08:00
|
|
|
addActor.send(monitorBackupKeyOrPullData(&self));
|
2019-08-15 05:19:50 +08:00
|
|
|
addActor.send(checkRemoved(db, req.recruitedEpoch, &self));
|
2019-05-21 05:22:31 +08:00
|
|
|
addActor.send(waitFailureServer(interf.waitFailure.getFuture()));
|
2020-01-22 08:57:30 +08:00
|
|
|
if (req.recruitedEpoch == req.backupEpoch && req.routerTag.id == 0) {
|
|
|
|
addActor.send(monitorAllWorkerStarted(&self));
|
|
|
|
}
|
2019-04-25 06:12:37 +08:00
|
|
|
|
2019-08-19 12:22:26 +08:00
|
|
|
state Future<Void> done = uploadData(&self);
|
|
|
|
|
2019-04-25 06:12:37 +08:00
|
|
|
loop choose {
|
2019-05-16 07:13:04 +08:00
|
|
|
when(wait(dbInfoChange)) {
|
2019-04-25 06:12:37 +08:00
|
|
|
dbInfoChange = db->onChange();
|
2019-08-09 07:02:49 +08:00
|
|
|
Reference<ILogSystem> ls = ILogSystem::fromServerDBInfo(self.myId, db->get(), true);
|
2019-08-15 08:00:20 +08:00
|
|
|
bool hasPseudoLocality = ls.isValid() && ls->hasPseudoLocality(tagLocalityBackup);
|
2019-12-04 08:05:12 +08:00
|
|
|
LogEpoch oldestBackupEpoch = 0;
|
2019-08-15 08:00:20 +08:00
|
|
|
if (hasPseudoLocality) {
|
2019-08-09 07:02:49 +08:00
|
|
|
self.logSystem.set(ls);
|
2019-08-15 05:19:50 +08:00
|
|
|
self.pop();
|
2019-12-04 08:05:12 +08:00
|
|
|
oldestBackupEpoch = ls->getOldestBackupEpoch();
|
2019-08-09 07:02:49 +08:00
|
|
|
}
|
2019-09-29 03:48:28 +08:00
|
|
|
TraceEvent("BackupWorkerLogSystem", self.myId)
|
2019-08-15 08:00:20 +08:00
|
|
|
.detail("HasBackupLocality", hasPseudoLocality)
|
2019-12-04 08:05:12 +08:00
|
|
|
.detail("OldestBackupEpoch", oldestBackupEpoch)
|
2019-08-15 08:00:20 +08:00
|
|
|
.detail("Tag", self.tag.toString());
|
2019-04-25 06:12:37 +08:00
|
|
|
}
|
2019-08-19 12:22:26 +08:00
|
|
|
when(wait(done)) {
|
2019-09-29 03:48:28 +08:00
|
|
|
TraceEvent("BackupWorkerDone", self.myId).detail("BackupEpoch", self.backupEpoch);
|
2019-08-15 08:00:20 +08:00
|
|
|
// Notify master so that this worker can be removed from log system, then this
|
|
|
|
// worker (for an old epoch's unfinished work) can safely exit.
|
|
|
|
wait(brokenPromiseToNever(db->get().master.notifyBackupWorkerDone.getReply(
|
2019-09-23 04:23:27 +08:00
|
|
|
BackupWorkerDoneRequest(self.myId, self.backupEpoch))));
|
2019-08-15 08:00:20 +08:00
|
|
|
break;
|
2019-06-01 07:14:58 +08:00
|
|
|
}
|
2019-05-16 07:13:04 +08:00
|
|
|
when(wait(error)) {}
|
2019-04-25 06:12:37 +08:00
|
|
|
}
|
2019-05-16 07:13:04 +08:00
|
|
|
} catch (Error& e) {
|
2019-09-29 03:48:28 +08:00
|
|
|
TraceEvent("BackupWorkerTerminated", self.myId).error(e, true);
|
2019-06-01 07:14:58 +08:00
|
|
|
if (e.code() != error_code_actor_cancelled && e.code() != error_code_worker_removed) {
|
2019-04-25 06:12:37 +08:00
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Void();
|
2020-02-11 05:44:08 +08:00
|
|
|
}
|
|
|
|
|