2017-06-30 06:50:19 +08:00
|
|
|
/*
|
|
|
|
* LogRouter.actor.cpp
|
|
|
|
*
|
|
|
|
* This source file is part of the FoundationDB open source project
|
|
|
|
*
|
2022-03-22 04:36:23 +08:00
|
|
|
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
2021-03-11 02:06:03 +08:00
|
|
|
*
|
2017-06-30 06:50:19 +08:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
2021-03-11 02:06:03 +08:00
|
|
|
*
|
2017-06-30 06:50:19 +08:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2021-03-11 02:06:03 +08:00
|
|
|
*
|
2017-06-30 06:50:19 +08:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2022-04-16 00:35:41 +08:00
|
|
|
#include "fdbclient/Atomic.h"
|
2019-02-18 07:41:16 +08:00
|
|
|
#include "fdbclient/NativeAPI.actor.h"
|
2022-04-16 00:35:41 +08:00
|
|
|
#include "fdbclient/SystemData.h"
|
2020-07-10 07:39:15 +08:00
|
|
|
#include "fdbrpc/Stats.h"
|
2022-04-16 00:35:41 +08:00
|
|
|
#include "fdbserver/ApplyMetadataMutation.h"
|
2018-10-20 01:30:13 +08:00
|
|
|
#include "fdbserver/Knobs.h"
|
|
|
|
#include "fdbserver/LogSystem.h"
|
2022-04-16 00:35:41 +08:00
|
|
|
#include "fdbserver/WaitFailure.h"
|
|
|
|
#include "fdbserver/WorkerInterface.actor.h"
|
2018-10-20 01:30:13 +08:00
|
|
|
#include "fdbserver/RecoveryState.h"
|
2022-04-16 00:35:41 +08:00
|
|
|
#include "fdbserver/ServerDBInfo.h"
|
|
|
|
#include "fdbserver/TLogInterface.h"
|
|
|
|
#include "flow/ActorCollection.h"
|
2020-11-10 07:04:37 +08:00
|
|
|
#include "flow/Arena.h"
|
|
|
|
#include "flow/Histogram.h"
|
2017-06-30 06:50:19 +08:00
|
|
|
#include "flow/TDMetric.actor.h"
|
2022-04-16 00:35:41 +08:00
|
|
|
#include "flow/network.h"
|
2022-05-19 06:20:23 +08:00
|
|
|
#include "flow/DebugTrace.h"
|
2021-03-11 02:06:03 +08:00
|
|
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
2017-06-30 06:50:19 +08:00
|
|
|
|
|
|
|
struct LogRouterData {
|
2018-03-18 02:08:37 +08:00
|
|
|
struct TagData : NonCopyable, public ReferenceCounted<TagData> {
|
2017-06-30 06:50:19 +08:00
|
|
|
std::deque<std::pair<Version, LengthPrefixedStringRef>> version_messages;
|
|
|
|
Version popped;
|
2018-06-27 09:20:28 +08:00
|
|
|
Version durableKnownCommittedVersion;
|
2018-03-18 02:08:37 +08:00
|
|
|
Tag tag;
|
2017-06-30 06:50:19 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
TagData(Tag tag, Version popped, Version durableKnownCommittedVersion)
|
2021-07-23 13:48:27 +08:00
|
|
|
: popped(popped), durableKnownCommittedVersion(durableKnownCommittedVersion), tag(tag) {}
|
2017-06-30 06:50:19 +08:00
|
|
|
|
2020-06-10 08:33:41 +08:00
|
|
|
TagData(TagData&& r) noexcept
|
2021-07-23 13:48:27 +08:00
|
|
|
: version_messages(std::move(r.version_messages)), popped(r.popped),
|
|
|
|
durableKnownCommittedVersion(r.durableKnownCommittedVersion), tag(r.tag) {}
|
2020-06-10 08:33:41 +08:00
|
|
|
void operator=(TagData&& r) noexcept {
|
2017-06-30 06:50:19 +08:00
|
|
|
version_messages = std::move(r.version_messages);
|
2018-03-18 02:08:37 +08:00
|
|
|
tag = r.tag;
|
2017-06-30 06:50:19 +08:00
|
|
|
popped = r.popped;
|
2018-06-27 09:20:28 +08:00
|
|
|
durableKnownCommittedVersion = r.durableKnownCommittedVersion;
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Erase messages not needed to update *from* versions >= before (thus, messages with toversion <= before)
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> eraseMessagesBefore(TagData* self,
|
|
|
|
Version before,
|
|
|
|
LogRouterData* tlogData,
|
|
|
|
TaskPriority taskID) {
|
|
|
|
while (!self->version_messages.empty() && self->version_messages.front().first < before) {
|
2017-06-30 06:50:19 +08:00
|
|
|
Version version = self->version_messages.front().first;
|
|
|
|
int64_t messagesErased = 0;
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
while (!self->version_messages.empty() && self->version_messages.front().first == version) {
|
2017-06-30 06:50:19 +08:00
|
|
|
++messagesErased;
|
|
|
|
|
|
|
|
self->version_messages.pop_front();
|
|
|
|
}
|
|
|
|
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(yield(taskID));
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
Future<Void> eraseMessagesBefore(Version before, LogRouterData* tlogData, TaskPriority taskID) {
|
2017-06-30 06:50:19 +08:00
|
|
|
return eraseMessagesBefore(this, before, tlogData, taskID);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-07-24 02:45:04 +08:00
|
|
|
const UID dbgid;
|
2017-06-30 06:50:19 +08:00
|
|
|
Reference<AsyncVar<Reference<ILogSystem>>> logSystem;
|
2020-10-31 02:42:09 +08:00
|
|
|
Optional<UID> primaryPeekLocation;
|
2020-11-06 08:33:07 +08:00
|
|
|
NotifiedVersion version; // The largest version at which the log router has peeked mutations
|
|
|
|
// from satellite tLog or primary tLogs.
|
2020-10-28 00:11:56 +08:00
|
|
|
NotifiedVersion minPopped; // The minimum version among all tags that has been popped by remote tLogs.
|
2019-07-24 02:45:04 +08:00
|
|
|
const Version startVersion;
|
2020-10-28 00:11:56 +08:00
|
|
|
Version minKnownCommittedVersion; // The minimum durable version among all LRs.
|
|
|
|
// A LR's durable version is the maximum version of mutations that have been
|
2020-11-06 08:33:07 +08:00
|
|
|
// popped by remote tLog.
|
2019-02-19 08:47:38 +08:00
|
|
|
Version poppedVersion;
|
2017-06-30 06:50:19 +08:00
|
|
|
Deque<std::pair<Version, Standalone<VectorRef<uint8_t>>>> messageBlocks;
|
|
|
|
Tag routerTag;
|
2018-04-18 06:03:22 +08:00
|
|
|
bool allowPops;
|
2018-04-29 09:04:57 +08:00
|
|
|
LogSet logSet;
|
2020-11-02 13:22:23 +08:00
|
|
|
bool foundEpochEnd; // Cluster is not fully recovered yet. LR has to handle recovery
|
2020-10-28 00:11:56 +08:00
|
|
|
double waitForVersionTime = 0; // The total amount of time LR waits for remote tLog to peek and pop its data.
|
|
|
|
double maxWaitForVersionTime = 0; // The max one-instance wait time when LR must wait for remote tLog to pop data.
|
|
|
|
double getMoreTime = 0; // The total amount of time LR waits for satellite tLog's data to become available.
|
|
|
|
double maxGetMoreTime = 0; // The max wait time LR spent in a pull-data-request to satellite tLog.
|
2020-11-05 08:34:32 +08:00
|
|
|
int64_t generation = -1;
|
2020-11-10 10:54:21 +08:00
|
|
|
Reference<Histogram> peekLatencyDist;
|
2017-06-30 06:50:19 +08:00
|
|
|
|
2019-09-13 05:26:37 +08:00
|
|
|
struct PeekTrackerData {
|
|
|
|
std::map<int, Promise<std::pair<Version, bool>>> sequence_version;
|
|
|
|
double lastUpdate;
|
|
|
|
};
|
|
|
|
|
|
|
|
std::map<UID, PeekTrackerData> peekTracker;
|
|
|
|
|
2019-05-03 07:43:20 +08:00
|
|
|
CounterCollection cc;
|
2020-10-28 00:11:56 +08:00
|
|
|
Counter getMoreCount; // Increase by 1 when LR tries to pull data from satellite tLog.
|
|
|
|
Counter
|
|
|
|
getMoreBlockedCount; // Increase by 1 if data is not available when LR tries to pull data from satellite tLog.
|
2019-05-03 07:43:20 +08:00
|
|
|
Future<Void> logger;
|
2020-03-06 10:17:06 +08:00
|
|
|
Reference<EventCacheHolder> eventCacheHolder;
|
2021-07-21 01:42:00 +08:00
|
|
|
int activePeekStreams = 0;
|
2019-05-03 07:43:20 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
std::vector<Reference<TagData>> tag_data; // we only store data for the remote tag locality
|
2018-03-18 02:08:37 +08:00
|
|
|
|
|
|
|
Reference<TagData> getTagData(Tag tag) {
|
|
|
|
ASSERT(tag.locality == tagLocalityRemoteLog);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (tag.id >= tag_data.size()) {
|
|
|
|
tag_data.resize(tag.id + 1);
|
2018-03-18 02:08:37 +08:00
|
|
|
}
|
|
|
|
return tag_data[tag.id];
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
// only callable after getTagData returns a null reference
|
2018-04-19 03:07:29 +08:00
|
|
|
Reference<TagData> createTagData(Tag tag, Version popped, Version knownCommittedVersion) {
|
2020-11-07 15:50:55 +08:00
|
|
|
auto newTagData = makeReference<TagData>(tag, popped, knownCommittedVersion);
|
2018-03-18 02:08:37 +08:00
|
|
|
tag_data[tag.id] = newTagData;
|
|
|
|
return newTagData;
|
|
|
|
}
|
|
|
|
|
2020-11-02 03:24:23 +08:00
|
|
|
LogRouterData(UID dbgid, const InitializeLogRouterRequest& req)
|
2021-07-23 13:48:27 +08:00
|
|
|
: dbgid(dbgid), logSystem(new AsyncVar<Reference<ILogSystem>>()), version(req.startVersion - 1), minPopped(0),
|
|
|
|
startVersion(req.startVersion), minKnownCommittedVersion(0), poppedVersion(0), routerTag(req.routerTag),
|
|
|
|
allowPops(false), foundEpochEnd(false), generation(req.recoveryCount),
|
2021-03-11 02:06:03 +08:00
|
|
|
peekLatencyDist(Histogram::getHistogram(LiteralStringRef("LogRouter"),
|
|
|
|
LiteralStringRef("PeekTLogLatency"),
|
2021-07-23 13:48:27 +08:00
|
|
|
Histogram::Unit::microseconds)),
|
|
|
|
cc("LogRouter", dbgid.toString()), getMoreCount("GetMoreCount", cc),
|
|
|
|
getMoreBlockedCount("GetMoreBlockedCount", cc) {
|
2021-03-11 02:06:03 +08:00
|
|
|
// setup just enough of a logSet to be able to call getPushLocations
|
2018-04-29 09:04:57 +08:00
|
|
|
logSet.logServers.resize(req.tLogLocalities.size());
|
|
|
|
logSet.tLogPolicy = req.tLogPolicy;
|
2018-04-30 04:47:32 +08:00
|
|
|
logSet.locality = req.locality;
|
2018-04-29 09:04:57 +08:00
|
|
|
logSet.updateLocalitySet(req.tLogLocalities);
|
2018-05-01 01:58:41 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int i = 0; i < req.tLogLocalities.size(); i++) {
|
2018-05-01 01:58:41 +08:00
|
|
|
Tag tag(tagLocalityRemoteLog, i);
|
|
|
|
auto tagData = getTagData(tag);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!tagData) {
|
2018-05-01 01:58:41 +08:00
|
|
|
tagData = createTagData(tag, 0, 0);
|
|
|
|
}
|
|
|
|
}
|
2019-05-03 07:43:20 +08:00
|
|
|
|
2020-11-07 15:50:55 +08:00
|
|
|
eventCacheHolder = makeReference<EventCacheHolder>(dbgid.shortString() + ".PeekLocation");
|
2020-03-06 10:17:06 +08:00
|
|
|
|
2020-11-02 13:22:23 +08:00
|
|
|
// FetchedVersions: How many version of mutations buffered at LR and have not been popped by remote tLogs
|
2020-10-28 00:11:56 +08:00
|
|
|
specialCounter(cc, "Version", [this]() { return this->version.get(); });
|
2021-03-11 02:06:03 +08:00
|
|
|
specialCounter(cc, "MinPopped", [this]() { return this->minPopped.get(); });
|
2020-11-02 13:22:23 +08:00
|
|
|
// TODO: Add minPopped locality and minPoppedId, similar as tLog Metrics
|
2021-03-11 02:06:03 +08:00
|
|
|
specialCounter(cc, "FetchedVersions", [this]() {
|
|
|
|
return std::max<Version>(0,
|
|
|
|
std::min<Version>(SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS,
|
|
|
|
this->version.get() - this->minPopped.get()));
|
|
|
|
});
|
|
|
|
specialCounter(cc, "MinKnownCommittedVersion", [this]() { return this->minKnownCommittedVersion; });
|
|
|
|
specialCounter(cc, "PoppedVersion", [this]() { return this->poppedVersion; });
|
|
|
|
specialCounter(cc, "FoundEpochEnd", [this]() { return this->foundEpochEnd; });
|
|
|
|
specialCounter(cc, "WaitForVersionMS", [this]() {
|
|
|
|
double val = this->waitForVersionTime;
|
|
|
|
this->waitForVersionTime = 0;
|
2021-06-05 03:09:13 +08:00
|
|
|
return int64_t(1000 * val);
|
2021-03-11 02:06:03 +08:00
|
|
|
});
|
|
|
|
specialCounter(cc, "WaitForVersionMaxMS", [this]() {
|
|
|
|
double val = this->maxWaitForVersionTime;
|
|
|
|
this->maxWaitForVersionTime = 0;
|
2021-06-05 03:09:13 +08:00
|
|
|
return int64_t(1000 * val);
|
2021-03-11 02:06:03 +08:00
|
|
|
});
|
|
|
|
specialCounter(cc, "GetMoreMS", [this]() {
|
|
|
|
double val = this->getMoreTime;
|
|
|
|
this->getMoreTime = 0;
|
2021-06-05 03:09:13 +08:00
|
|
|
return int64_t(1000 * val);
|
2021-03-11 02:06:03 +08:00
|
|
|
});
|
|
|
|
specialCounter(cc, "GetMoreMaxMS", [this]() {
|
|
|
|
double val = this->maxGetMoreTime;
|
|
|
|
this->maxGetMoreTime = 0;
|
2021-06-05 03:09:13 +08:00
|
|
|
return int64_t(1000 * val);
|
2021-03-11 02:06:03 +08:00
|
|
|
});
|
2020-11-05 01:44:28 +08:00
|
|
|
specialCounter(cc, "Generation", [this]() { return this->generation; });
|
2021-07-21 01:42:00 +08:00
|
|
|
specialCounter(cc, "ActivePeekStreams", [this]() { return this->activePeekStreams; });
|
2021-03-11 02:06:03 +08:00
|
|
|
logger = traceCounters("LogRouterMetrics",
|
|
|
|
dbgid,
|
|
|
|
SERVER_KNOBS->WORKER_LOGGING_INTERVAL,
|
|
|
|
&cc,
|
|
|
|
"LogRouterMetrics",
|
|
|
|
[this](TraceEvent& te) {
|
2020-10-31 07:20:08 +08:00
|
|
|
te.detail("PrimaryPeekLocation", this->primaryPeekLocation);
|
2020-10-31 08:06:22 +08:00
|
|
|
te.detail("RouterTag", this->routerTag.toString());
|
2020-10-31 07:20:08 +08:00
|
|
|
});
|
2018-04-29 09:04:57 +08:00
|
|
|
}
|
2017-06-30 06:50:19 +08:00
|
|
|
};
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
void commitMessages(LogRouterData* self, Version version, const std::vector<TagsAndMessage>& taggedMessages) {
|
|
|
|
if (!taggedMessages.size()) {
|
2017-06-30 06:50:19 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-03-17 02:40:21 +08:00
|
|
|
int msgSize = 0;
|
2020-12-27 13:46:20 +08:00
|
|
|
for (const auto& i : taggedMessages) {
|
2018-03-17 02:40:21 +08:00
|
|
|
msgSize += i.message.size();
|
|
|
|
}
|
2017-06-30 06:50:19 +08:00
|
|
|
|
|
|
|
// Grab the last block in the blocks list so we can share its arena
|
|
|
|
// We pop all of the elements of it to create a "fresh" vector that starts at the end of the previous vector
|
|
|
|
Standalone<VectorRef<uint8_t>> block;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (self->messageBlocks.empty()) {
|
2017-06-30 06:50:19 +08:00
|
|
|
block = Standalone<VectorRef<uint8_t>>();
|
2018-03-17 02:40:21 +08:00
|
|
|
block.reserve(block.arena(), std::max<int64_t>(SERVER_KNOBS->TLOG_MESSAGE_BLOCK_BYTES, msgSize));
|
2021-03-11 02:06:03 +08:00
|
|
|
} else {
|
2017-06-30 06:50:19 +08:00
|
|
|
block = self->messageBlocks.back().second;
|
|
|
|
}
|
|
|
|
|
|
|
|
block.pop_front(block.size());
|
|
|
|
|
2020-12-27 13:46:20 +08:00
|
|
|
for (const auto& msg : taggedMessages) {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (msg.message.size() > block.capacity() - block.size()) {
|
2019-04-17 13:34:56 +08:00
|
|
|
self->messageBlocks.emplace_back(version, block);
|
2018-03-17 02:40:21 +08:00
|
|
|
block = Standalone<VectorRef<uint8_t>>();
|
|
|
|
block.reserve(block.arena(), std::max<int64_t>(SERVER_KNOBS->TLOG_MESSAGE_BLOCK_BYTES, msgSize));
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
|
2018-03-17 02:40:21 +08:00
|
|
|
block.append(block.arena(), msg.message.begin(), msg.message.size());
|
2020-12-27 13:46:20 +08:00
|
|
|
for (const auto& tag : msg.tags) {
|
2018-03-18 02:08:37 +08:00
|
|
|
auto tagData = self->getTagData(tag);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!tagData) {
|
2018-04-19 03:07:29 +08:00
|
|
|
tagData = self->createTagData(tag, 0, 0);
|
2018-03-17 02:40:21 +08:00
|
|
|
}
|
2017-06-30 06:50:19 +08:00
|
|
|
|
2018-03-18 02:08:37 +08:00
|
|
|
if (version >= tagData->popped) {
|
2021-03-11 02:06:03 +08:00
|
|
|
tagData->version_messages.emplace_back(
|
|
|
|
version, LengthPrefixedStringRef((uint32_t*)(block.end() - msg.message.size())));
|
|
|
|
if (tagData->version_messages.back().second.expectedSize() > SERVER_KNOBS->MAX_MESSAGE_SIZE) {
|
|
|
|
TraceEvent(SevWarnAlways, "LargeMessage")
|
|
|
|
.detail("Size", tagData->version_messages.back().second.expectedSize());
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-06-22 06:29:46 +08:00
|
|
|
|
2018-03-17 02:40:21 +08:00
|
|
|
msgSize -= msg.message.size();
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
2019-04-17 13:34:56 +08:00
|
|
|
self->messageBlocks.emplace_back(version, block);
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> waitForVersion(LogRouterData* self, Version ver) {
|
|
|
|
// The only time the log router should allow a gap in versions larger than MAX_READ_TRANSACTION_LIFE_VERSIONS is
|
|
|
|
// when processing epoch end. Since one set of log routers is created per generation of transaction logs, the gap
|
|
|
|
// caused by epoch end will be within MAX_VERSIONS_IN_FLIGHT of the log routers start version.
|
2020-05-30 00:04:57 +08:00
|
|
|
state double startTime = now();
|
2021-03-11 02:06:03 +08:00
|
|
|
if (self->version.get() < self->startVersion) {
|
2020-10-28 00:11:56 +08:00
|
|
|
// Log router needs to wait for remote tLogs to process data, whose version is less than self->startVersion,
|
|
|
|
// before the log router can pull more data (i.e., data after self->startVersion) from satellite tLog;
|
|
|
|
// This prevents LR from getting OOM due to it pulls too much data from satellite tLog at once;
|
|
|
|
// Note: each commit writes data to both primary tLog and satellite tLog. Satellite tLog can be viewed as
|
|
|
|
// a part of primary tLogs.
|
2021-03-11 02:06:03 +08:00
|
|
|
if (ver > self->startVersion) {
|
2019-02-19 06:58:34 +08:00
|
|
|
self->version.set(self->startVersion);
|
2020-10-28 00:11:56 +08:00
|
|
|
// Wait for remote tLog to peek and pop from LR,
|
|
|
|
// so that LR's minPopped version can increase to self->startVersion
|
2019-02-19 06:58:34 +08:00
|
|
|
wait(self->minPopped.whenAtLeast(self->version.get()));
|
|
|
|
}
|
2020-05-30 00:04:57 +08:00
|
|
|
self->waitForVersionTime += now() - startTime;
|
|
|
|
self->maxWaitForVersionTime = std::max(self->maxWaitForVersionTime, now() - startTime);
|
2019-02-19 06:58:34 +08:00
|
|
|
return Void();
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!self->foundEpochEnd) {
|
2020-11-14 14:14:01 +08:00
|
|
|
// Similar to proxy that does not keep more than MAX_READ_TRANSACTION_LIFE_VERSIONS transactions oustanding;
|
|
|
|
// Log router does not keep more than MAX_READ_TRANSACTION_LIFE_VERSIONS transactions outstanding because
|
|
|
|
// remote SS cannot roll back to more than MAX_READ_TRANSACTION_LIFE_VERSIONS ago.
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(self->minPopped.whenAtLeast(
|
|
|
|
std::min(self->version.get(), ver - SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS)));
|
2019-02-16 06:33:01 +08:00
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
while (self->minPopped.get() + SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS < ver) {
|
|
|
|
if (self->minPopped.get() + SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS > self->version.get()) {
|
|
|
|
self->version.set(self->minPopped.get() + SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS);
|
2019-06-25 17:47:35 +08:00
|
|
|
wait(yield(TaskPriority::TLogCommit));
|
2019-02-16 06:33:01 +08:00
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(self->minPopped.whenAtLeast((self->minPopped.get() + 1)));
|
2019-02-16 06:33:01 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (ver >= self->startVersion + SERVER_KNOBS->MAX_VERSIONS_IN_FLIGHT) {
|
2019-02-16 06:33:01 +08:00
|
|
|
self->foundEpochEnd = true;
|
|
|
|
}
|
2020-05-30 00:04:57 +08:00
|
|
|
self->waitForVersionTime += now() - startTime;
|
|
|
|
self->maxWaitForVersionTime = std::max(self->maxWaitForVersionTime, now() - startTime);
|
2019-02-16 06:33:01 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-02-20 13:44:07 +08:00
|
|
|
// Log router (LR) asynchronously pull data from satellite tLogs (preferred) or primary tLogs at tag (self->routerTag)
|
|
|
|
// for the version range from the LR's current version (exclusive) to its epoch's end version or recovery version.
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> pullAsyncData(LogRouterData* self) {
|
2017-06-30 06:50:19 +08:00
|
|
|
state Future<Void> dbInfoChange = Void();
|
|
|
|
state Reference<ILogSystem::IPeekCursor> r;
|
2018-04-18 02:16:48 +08:00
|
|
|
state Version tagAt = self->version.get() + 1;
|
2017-06-30 06:50:19 +08:00
|
|
|
state Version lastVer = 0;
|
2020-01-17 13:21:25 +08:00
|
|
|
state std::vector<int> tags; // an optimization to avoid reallocating vector memory in every loop
|
2017-06-30 06:50:19 +08:00
|
|
|
|
|
|
|
loop {
|
|
|
|
loop {
|
2020-05-30 03:07:24 +08:00
|
|
|
Future<Void> getMoreF = Never();
|
2021-03-11 02:06:03 +08:00
|
|
|
if (r) {
|
2020-05-30 03:07:24 +08:00
|
|
|
getMoreF = r->getMore(TaskPriority::TLogCommit);
|
|
|
|
++self->getMoreCount;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!getMoreF.isReady()) {
|
2020-05-30 03:07:24 +08:00
|
|
|
++self->getMoreBlockedCount;
|
|
|
|
}
|
2020-05-30 00:04:57 +08:00
|
|
|
}
|
|
|
|
state double startTime = now();
|
2017-06-30 06:50:19 +08:00
|
|
|
choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(getMoreF)) {
|
2020-11-10 07:04:37 +08:00
|
|
|
double peekTime = now() - startTime;
|
2020-11-10 10:54:21 +08:00
|
|
|
self->peekLatencyDist->sampleSeconds(peekTime);
|
2020-11-10 07:04:37 +08:00
|
|
|
self->getMoreTime += peekTime;
|
|
|
|
self->maxGetMoreTime = std::max(self->maxGetMoreTime, peekTime);
|
2017-06-30 06:50:19 +08:00
|
|
|
break;
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(dbInfoChange)) { // FIXME: does this actually happen?
|
|
|
|
if (self->logSystem->get()) {
|
|
|
|
r = self->logSystem->get()->peekLogRouter(self->dbgid, tagAt, self->routerTag);
|
2020-10-31 02:42:09 +08:00
|
|
|
self->primaryPeekLocation = r->getPrimaryPeekLocation();
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent("LogRouterPeekLocation", self->dbgid)
|
|
|
|
.detail("LogID", r->getPrimaryPeekLocation())
|
|
|
|
.trackLatest(self->eventCacheHolder->trackingKey);
|
2020-03-06 10:17:06 +08:00
|
|
|
} else {
|
2017-06-30 06:50:19 +08:00
|
|
|
r = Reference<ILogSystem::IPeekCursor>();
|
2020-03-06 10:17:06 +08:00
|
|
|
}
|
2017-06-30 06:50:19 +08:00
|
|
|
dbInfoChange = self->logSystem->onChange();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-22 06:29:46 +08:00
|
|
|
self->minKnownCommittedVersion = std::max(self->minKnownCommittedVersion, r->getMinKnownCommittedVersion());
|
|
|
|
|
2018-03-30 06:12:38 +08:00
|
|
|
state Version ver = 0;
|
|
|
|
state std::vector<TagsAndMessage> messages;
|
2019-11-06 10:07:30 +08:00
|
|
|
state Arena arena;
|
2017-06-30 06:50:19 +08:00
|
|
|
while (true) {
|
2018-03-30 06:12:38 +08:00
|
|
|
state bool foundMessage = r->hasMessage();
|
2017-06-30 06:50:19 +08:00
|
|
|
if (!foundMessage || r->version().version != ver) {
|
|
|
|
ASSERT(r->version().version > lastVer);
|
|
|
|
if (ver) {
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(waitForVersion(self, ver));
|
2019-02-16 06:33:01 +08:00
|
|
|
|
2018-03-17 02:40:21 +08:00
|
|
|
commitMessages(self, ver, messages);
|
2021-03-11 02:06:03 +08:00
|
|
|
self->version.set(ver);
|
2019-06-25 17:47:35 +08:00
|
|
|
wait(yield(TaskPriority::TLogCommit));
|
2018-06-09 02:11:08 +08:00
|
|
|
//TraceEvent("LogRouterVersion").detail("Ver",ver);
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
lastVer = ver;
|
|
|
|
ver = r->version().version;
|
2018-03-17 02:40:21 +08:00
|
|
|
messages.clear();
|
2019-11-06 10:07:30 +08:00
|
|
|
arena = Arena();
|
2017-06-30 06:50:19 +08:00
|
|
|
|
|
|
|
if (!foundMessage) {
|
2021-03-11 02:06:03 +08:00
|
|
|
ver--; // ver is the next possible version we will get data for
|
|
|
|
if (ver > self->version.get()) {
|
|
|
|
wait(waitForVersion(self, ver));
|
2019-02-16 06:33:01 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
self->version.set(ver);
|
2019-06-25 17:47:35 +08:00
|
|
|
wait(yield(TaskPriority::TLogCommit));
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-17 02:40:21 +08:00
|
|
|
TagsAndMessage tagAndMsg;
|
|
|
|
tagAndMsg.message = r->getMessageWithTags();
|
2020-01-17 13:21:25 +08:00
|
|
|
tags.clear();
|
2018-04-29 09:04:57 +08:00
|
|
|
self->logSet.getPushLocations(r->getTags(), tags, 0);
|
2019-11-06 10:07:30 +08:00
|
|
|
tagAndMsg.tags.reserve(arena, tags.size());
|
2019-04-17 13:34:56 +08:00
|
|
|
for (const auto& t : tags) {
|
2019-11-06 10:07:30 +08:00
|
|
|
tagAndMsg.tags.push_back(arena, Tag(tagLocalityRemoteLog, t));
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
2018-03-17 02:40:21 +08:00
|
|
|
messages.push_back(std::move(tagAndMsg));
|
2017-06-30 06:50:19 +08:00
|
|
|
|
|
|
|
r->nextMessage();
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
tagAt = std::max(r->version().version, self->version.get() + 1);
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
std::deque<std::pair<Version, LengthPrefixedStringRef>>& get_version_messages(LogRouterData* self, Tag tag) {
|
2018-03-18 02:08:37 +08:00
|
|
|
auto tagData = self->getTagData(tag);
|
|
|
|
if (!tagData) {
|
2017-06-30 06:50:19 +08:00
|
|
|
static std::deque<std::pair<Version, LengthPrefixedStringRef>> empty;
|
|
|
|
return empty;
|
|
|
|
}
|
2018-03-18 02:08:37 +08:00
|
|
|
return tagData->version_messages;
|
2017-06-30 06:50:19 +08:00
|
|
|
};
|
|
|
|
|
2021-07-21 01:42:00 +08:00
|
|
|
void peekMessagesFromMemory(LogRouterData* self, Tag tag, Version begin, BinaryWriter& messages, Version& endVersion) {
|
2021-03-11 02:06:03 +08:00
|
|
|
ASSERT(!messages.getLength());
|
2017-06-30 06:50:19 +08:00
|
|
|
|
2021-07-21 01:42:00 +08:00
|
|
|
auto& deque = get_version_messages(self, tag);
|
2019-03-19 06:03:43 +08:00
|
|
|
//TraceEvent("TLogPeekMem", self->dbgid).detail("Tag", req.tag1).detail("PDS", self->persistentDataSequence).detail("PDDS", self->persistentDataDurableSequence).detail("Oldest", map1.empty() ? 0 : map1.begin()->key ).detail("OldestMsgCount", map1.empty() ? 0 : map1.begin()->value.size());
|
2017-06-30 06:50:19 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
auto it = std::lower_bound(deque.begin(),
|
|
|
|
deque.end(),
|
2021-07-21 01:42:00 +08:00
|
|
|
std::make_pair(begin, LengthPrefixedStringRef()),
|
2021-09-09 02:22:54 +08:00
|
|
|
[](const auto& l, const auto& r) -> bool { return l.first < r.first; });
|
2017-06-30 06:50:19 +08:00
|
|
|
|
|
|
|
Version currentVersion = -1;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (; it != deque.end(); ++it) {
|
|
|
|
if (it->first != currentVersion) {
|
2017-06-30 06:50:19 +08:00
|
|
|
if (messages.getLength() >= SERVER_KNOBS->DESIRED_TOTAL_BYTES) {
|
2018-04-19 07:06:44 +08:00
|
|
|
endVersion = currentVersion + 1;
|
2018-06-09 02:11:08 +08:00
|
|
|
//TraceEvent("TLogPeekMessagesReached2", self->dbgid);
|
2017-06-30 06:50:19 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
currentVersion = it->first;
|
2019-09-06 02:30:02 +08:00
|
|
|
messages << VERSION_HEADER << currentVersion;
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
messages << it->second.toStringRef();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
Version poppedVersion(LogRouterData* self, Tag tag) {
|
2018-03-18 02:08:37 +08:00
|
|
|
auto tagData = self->getTagData(tag);
|
|
|
|
if (!tagData)
|
2017-06-30 06:50:19 +08:00
|
|
|
return Version(0);
|
2018-03-18 02:08:37 +08:00
|
|
|
return tagData->popped;
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
|
2021-08-08 00:53:22 +08:00
|
|
|
// Common logics to peek TLog and create TLogPeekReply that serves both streaming peek or normal peek request
|
|
|
|
ACTOR template <typename PromiseType>
|
|
|
|
Future<Void> logRouterPeekMessages(PromiseType replyPromise,
|
|
|
|
LogRouterData* self,
|
|
|
|
Version reqBegin,
|
|
|
|
Tag reqTag,
|
|
|
|
bool reqReturnIfBlocked = false,
|
|
|
|
bool reqOnlySpilled = false,
|
|
|
|
Optional<std::pair<UID, int>> reqSequence = Optional<std::pair<UID, int>>()) {
|
2017-06-30 06:50:19 +08:00
|
|
|
state BinaryWriter messages(Unversioned());
|
2021-07-31 10:08:22 +08:00
|
|
|
state int sequence = -1;
|
2019-09-13 05:26:37 +08:00
|
|
|
state UID peekId;
|
|
|
|
|
2021-08-08 00:53:22 +08:00
|
|
|
if (reqSequence.present()) {
|
2021-07-31 10:08:22 +08:00
|
|
|
try {
|
2021-08-08 00:53:22 +08:00
|
|
|
peekId = reqSequence.get().first;
|
|
|
|
sequence = reqSequence.get().second;
|
2021-07-31 10:08:22 +08:00
|
|
|
if (sequence >= SERVER_KNOBS->PARALLEL_GET_MORE_REQUESTS &&
|
2021-08-08 00:53:22 +08:00
|
|
|
self->peekTracker.find(peekId) == self->peekTracker.end()) {
|
2021-07-31 10:08:22 +08:00
|
|
|
throw operation_obsolete();
|
|
|
|
}
|
|
|
|
auto& trackerData = self->peekTracker[peekId];
|
|
|
|
if (sequence == 0 && trackerData.sequence_version.find(0) == trackerData.sequence_version.end()) {
|
2021-08-08 00:53:22 +08:00
|
|
|
trackerData.sequence_version[0].send(std::make_pair(reqBegin, reqOnlySpilled));
|
2021-07-31 10:08:22 +08:00
|
|
|
}
|
|
|
|
auto seqBegin = trackerData.sequence_version.begin();
|
|
|
|
// The peek cursor and this comparison need to agree about the maximum number of in-flight requests.
|
|
|
|
while (trackerData.sequence_version.size() &&
|
2021-08-08 00:53:22 +08:00
|
|
|
seqBegin->first <= sequence - SERVER_KNOBS->PARALLEL_GET_MORE_REQUESTS) {
|
2021-07-31 10:08:22 +08:00
|
|
|
if (seqBegin->second.canBeSet()) {
|
|
|
|
seqBegin->second.sendError(operation_obsolete());
|
|
|
|
}
|
|
|
|
trackerData.sequence_version.erase(seqBegin);
|
|
|
|
seqBegin = trackerData.sequence_version.begin();
|
2019-09-13 05:26:37 +08:00
|
|
|
}
|
|
|
|
|
2021-07-31 10:08:22 +08:00
|
|
|
if (trackerData.sequence_version.size() && sequence < seqBegin->first) {
|
|
|
|
throw operation_obsolete();
|
|
|
|
}
|
2021-07-21 01:42:00 +08:00
|
|
|
|
2021-07-31 10:08:22 +08:00
|
|
|
trackerData.lastUpdate = now();
|
|
|
|
std::pair<Version, bool> prevPeekData = wait(trackerData.sequence_version[sequence].getFuture());
|
2021-08-08 00:53:22 +08:00
|
|
|
reqBegin = prevPeekData.first;
|
|
|
|
reqOnlySpilled = prevPeekData.second;
|
2021-07-31 10:08:22 +08:00
|
|
|
wait(yield());
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() == error_code_timed_out || e.code() == error_code_operation_obsolete) {
|
2021-08-08 00:53:22 +08:00
|
|
|
replyPromise.sendError(e);
|
2021-07-31 10:08:22 +08:00
|
|
|
return Void();
|
|
|
|
} else {
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
2019-09-13 05:26:37 +08:00
|
|
|
}
|
2017-06-30 06:50:19 +08:00
|
|
|
|
2022-05-19 06:20:23 +08:00
|
|
|
DebugLogTraceEvent("LogRouterPeek0", self->dbgid)
|
|
|
|
.detail("ReturnIfBlocked", reqReturnIfBlocked)
|
|
|
|
.detail("Tag", reqTag.toString())
|
|
|
|
.detail("Ver", self->version.get())
|
|
|
|
.detail("Begin", reqBegin);
|
|
|
|
|
2021-08-08 00:53:22 +08:00
|
|
|
if (reqReturnIfBlocked && self->version.get() < reqBegin) {
|
|
|
|
replyPromise.sendError(end_of_stream());
|
|
|
|
if (reqSequence.present()) {
|
2019-09-13 05:26:37 +08:00
|
|
|
auto& trackerData = self->peekTracker[peekId];
|
2021-07-31 10:08:22 +08:00
|
|
|
auto& sequenceData = trackerData.sequence_version[sequence + 1];
|
2019-09-13 05:26:37 +08:00
|
|
|
if (!sequenceData.isSet()) {
|
2021-08-08 00:53:22 +08:00
|
|
|
sequenceData.send(std::make_pair(reqBegin, reqOnlySpilled));
|
2019-09-13 05:26:37 +08:00
|
|
|
}
|
|
|
|
}
|
2021-07-31 10:08:22 +08:00
|
|
|
return Void();
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
|
2021-08-08 00:53:22 +08:00
|
|
|
if (self->version.get() < reqBegin) {
|
|
|
|
wait(self->version.whenAtLeast(reqBegin));
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(delay(SERVER_KNOBS->TLOG_PEEK_DELAY, g_network->getCurrentTask()));
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
|
2022-02-16 06:08:45 +08:00
|
|
|
state double startTime = now();
|
|
|
|
|
2021-08-08 00:53:22 +08:00
|
|
|
Version poppedVer = poppedVersion(self, reqTag);
|
2017-06-30 06:50:19 +08:00
|
|
|
|
2021-08-08 00:53:22 +08:00
|
|
|
if (poppedVer > reqBegin || reqBegin < self->startVersion) {
|
2021-03-11 02:06:03 +08:00
|
|
|
// This should only happen if a packet is sent multiple times and the reply is not needed.
|
2018-04-09 12:24:05 +08:00
|
|
|
// Since we are using popped differently, do not send a reply.
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent(SevWarnAlways, "LogRouterPeekPopped", self->dbgid)
|
2021-08-08 00:53:22 +08:00
|
|
|
.detail("Begin", reqBegin)
|
|
|
|
.detail("Popped", poppedVer)
|
|
|
|
.detail("Start", self->startVersion);
|
2022-04-16 00:35:41 +08:00
|
|
|
if (std::is_same<PromiseType, Promise<TLogPeekReply>>::value) {
|
|
|
|
// kills logRouterPeekStream actor, otherwise that actor becomes stuck
|
|
|
|
throw operation_obsolete();
|
|
|
|
}
|
2021-08-08 00:53:22 +08:00
|
|
|
replyPromise.send(Never());
|
|
|
|
if (reqSequence.present()) {
|
2019-09-13 05:26:37 +08:00
|
|
|
auto& trackerData = self->peekTracker[peekId];
|
2021-07-31 10:08:22 +08:00
|
|
|
auto& sequenceData = trackerData.sequence_version[sequence + 1];
|
2019-09-13 05:26:37 +08:00
|
|
|
if (!sequenceData.isSet()) {
|
2021-08-08 00:53:22 +08:00
|
|
|
sequenceData.send(std::make_pair(reqBegin, reqOnlySpilled));
|
2019-09-13 05:26:37 +08:00
|
|
|
}
|
|
|
|
}
|
2021-07-31 10:08:22 +08:00
|
|
|
return Void();
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
|
2022-02-16 06:08:45 +08:00
|
|
|
state Version endVersion;
|
|
|
|
// Run the peek logic in a loop to account for the case where there is no data to return to the caller, and we may
|
|
|
|
// want to wait a little bit instead of just sending back an empty message. This feature is controlled by a knob.
|
|
|
|
loop {
|
|
|
|
endVersion = self->version.get() + 1;
|
|
|
|
peekMessagesFromMemory(self, reqTag, reqBegin, messages, endVersion);
|
|
|
|
|
2022-02-17 05:48:02 +08:00
|
|
|
// Reply the peek request when
|
|
|
|
// - Have data return to the caller, or
|
|
|
|
// - Batching empty peek is disabled, or
|
|
|
|
// - Batching empty peek interval has been reached.
|
2022-02-16 06:08:45 +08:00
|
|
|
if (messages.getLength() > 0 || !SERVER_KNOBS->PEEK_BATCHING_EMPTY_MSG ||
|
|
|
|
now() - startTime > SERVER_KNOBS->PEEK_BATCHING_EMPTY_MSG_INTERVAL) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
state Version waitUntilVersion = self->version.get() + 1;
|
|
|
|
|
|
|
|
// Currently, from `reqBegin` to self->version are all empty peeks. Wait for more version, or the empty batching
|
|
|
|
// interval has expired.
|
2022-02-17 05:48:02 +08:00
|
|
|
wait(self->version.whenAtLeast(waitUntilVersion) ||
|
|
|
|
delay(SERVER_KNOBS->PEEK_BATCHING_EMPTY_MSG_INTERVAL - (now() - startTime)));
|
2022-02-16 06:08:45 +08:00
|
|
|
if (self->version.get() < waitUntilVersion) {
|
|
|
|
break; // We know that from `reqBegin` to self->version are all empty messages. Skip re-executing the peek
|
|
|
|
// logic.
|
|
|
|
}
|
|
|
|
}
|
2017-06-30 06:50:19 +08:00
|
|
|
|
|
|
|
TLogPeekReply reply;
|
|
|
|
reply.maxKnownVersion = self->version.get();
|
2019-02-19 08:47:38 +08:00
|
|
|
reply.minKnownCommittedVersion = self->poppedVersion;
|
2021-08-08 00:53:22 +08:00
|
|
|
reply.messages = StringRef(reply.arena, messages.toValue());
|
2018-05-07 00:32:41 +08:00
|
|
|
reply.popped = self->minPopped.get() >= self->startVersion ? self->minPopped.get() : 0;
|
2017-06-30 06:50:19 +08:00
|
|
|
reply.end = endVersion;
|
2019-05-15 08:07:49 +08:00
|
|
|
reply.onlySpilled = false;
|
2017-06-30 06:50:19 +08:00
|
|
|
|
2021-08-08 00:53:22 +08:00
|
|
|
if (reqSequence.present()) {
|
2019-09-13 05:26:37 +08:00
|
|
|
auto& trackerData = self->peekTracker[peekId];
|
|
|
|
trackerData.lastUpdate = now();
|
2021-07-31 10:08:22 +08:00
|
|
|
auto& sequenceData = trackerData.sequence_version[sequence + 1];
|
|
|
|
if (trackerData.sequence_version.size() && sequence + 1 < trackerData.sequence_version.begin()->first) {
|
2021-08-08 00:53:22 +08:00
|
|
|
replyPromise.sendError(operation_obsolete());
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!sequenceData.isSet())
|
2020-01-22 10:23:16 +08:00
|
|
|
sequenceData.sendError(operation_obsolete());
|
2021-07-31 10:08:22 +08:00
|
|
|
return Void();
|
2019-09-13 05:26:37 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (sequenceData.isSet()) {
|
|
|
|
if (sequenceData.getFuture().get().first != reply.end) {
|
|
|
|
TEST(true); // tlog peek second attempt ended at a different version
|
2021-08-08 00:53:22 +08:00
|
|
|
replyPromise.sendError(operation_obsolete());
|
2021-07-31 10:08:22 +08:00
|
|
|
return Void();
|
2019-09-13 05:26:37 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
sequenceData.send(std::make_pair(reply.end, reply.onlySpilled));
|
|
|
|
}
|
2021-08-08 00:53:22 +08:00
|
|
|
reply.begin = reqBegin;
|
2019-09-13 05:26:37 +08:00
|
|
|
}
|
|
|
|
|
2021-08-08 00:53:22 +08:00
|
|
|
replyPromise.send(reply);
|
2022-05-19 06:20:23 +08:00
|
|
|
DebugLogTraceEvent("LogRouterPeek4", self->dbgid)
|
Fix orphaned storage server due to force recovery (#6914)
* Fix orphaned storage server due to force recovery
The force recovery can roll back the transaction that adds a storage server.
However, the storage server may now at version B > A, the recovery version.
As a result, its peek to buddy TLog won't return TLogPeekReply::popped to
trigger its exit, and instead getting a higher version C > B back. To the
storage server, this means the message is empty, thus not removing itself and
keeps peeking.
The fix is to instead of using recovery version as the popped version for the
SS, we use the recovery transaction version, which is the first transaction
after the recovery. Force recovery bumps this version to a much higher version
than the SS's version. So the TLog would set TLogPeekReply::popped to trigger
the storage server exit.
* Fix tlog peek to disallow return empty message between recoveredAt and recovery txn version
This contract today is not explicitly set and can cause storage server to fail
with assertion "rollbackVersion >= data->storageVersion()". This is because if
such an empty version is returned, SS may advance its storage version to a
value larger than the rollback version set in the recovery transaction.
The fix is to block peek reply until recovery transaction has been received.
* Move recoveryTxnReceived to be per LogData
This is because a shared TLog can have a first generation TLog which is already
setting the promise, thus later generations won't wait for the recovery version.
For the current generation, all peeks need to wait, while for older generations,
there is no need to wait (by checking if they are stopped).
* For initial commit, poppedVersion needs to be at least 2
To get rid of the previous unsuccessful recovery's recruited seed
storage servers.
2022-05-03 08:17:37 +08:00
|
|
|
.detail("Tag", reqTag.toString())
|
|
|
|
.detail("ReqBegin", reqBegin)
|
|
|
|
.detail("End", reply.end)
|
|
|
|
.detail("MessageSize", reply.messages.size())
|
|
|
|
.detail("PoppedVersion", self->poppedVersion);
|
2017-06-30 06:50:19 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-08-08 00:53:22 +08:00
|
|
|
// This actor keep pushing TLogPeekStreamReply until it's removed from the cluster or should recover
|
|
|
|
ACTOR Future<Void> logRouterPeekStream(LogRouterData* self, TLogPeekStreamRequest req) {
|
|
|
|
self->activePeekStreams++;
|
|
|
|
|
|
|
|
state Version begin = req.begin;
|
|
|
|
state bool onlySpilled = false;
|
|
|
|
req.reply.setByteLimit(std::min(SERVER_KNOBS->MAXIMUM_PEEK_BYTES, req.limitBytes));
|
|
|
|
loop {
|
|
|
|
state TLogPeekStreamReply reply;
|
|
|
|
state Promise<TLogPeekReply> promise;
|
|
|
|
state Future<TLogPeekReply> future(promise.getFuture());
|
|
|
|
try {
|
|
|
|
wait(req.reply.onReady() && store(reply.rep, future) &&
|
|
|
|
logRouterPeekMessages(promise, self, begin, req.tag, req.returnIfBlocked, onlySpilled));
|
|
|
|
|
|
|
|
reply.rep.begin = begin;
|
|
|
|
req.reply.send(reply);
|
|
|
|
begin = reply.rep.end;
|
|
|
|
onlySpilled = reply.rep.onlySpilled;
|
|
|
|
if (reply.rep.end > self->version.get()) {
|
|
|
|
wait(delay(SERVER_KNOBS->TLOG_PEEK_DELAY, g_network->getCurrentTask()));
|
|
|
|
} else {
|
|
|
|
wait(delay(0, g_network->getCurrentTask()));
|
|
|
|
}
|
|
|
|
} catch (Error& e) {
|
|
|
|
self->activePeekStreams--;
|
2022-05-19 06:20:23 +08:00
|
|
|
TraceEvent(SevDebug, "LogRouterPeekStreamEnd", self->dbgid)
|
2022-02-25 04:25:52 +08:00
|
|
|
.errorUnsuppressed(e)
|
2022-05-19 06:20:23 +08:00
|
|
|
.detail("Tag", req.tag)
|
2022-02-25 04:25:52 +08:00
|
|
|
.detail("PeerAddr", req.reply.getEndpoint().getPrimaryAddress());
|
2021-08-08 00:53:22 +08:00
|
|
|
|
|
|
|
if (e.code() == error_code_end_of_stream || e.code() == error_code_operation_obsolete) {
|
|
|
|
req.reply.sendError(e);
|
|
|
|
return Void();
|
|
|
|
} else {
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-07-31 10:08:22 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> cleanupPeekTrackers(LogRouterData* self) {
|
2019-09-13 07:27:39 +08:00
|
|
|
loop {
|
|
|
|
double minTimeUntilExpiration = SERVER_KNOBS->PEEK_TRACKER_EXPIRATION_TIME;
|
|
|
|
auto it = self->peekTracker.begin();
|
2021-03-11 02:06:03 +08:00
|
|
|
while (it != self->peekTracker.end()) {
|
2019-09-13 07:27:39 +08:00
|
|
|
double timeUntilExpiration = it->second.lastUpdate + SERVER_KNOBS->PEEK_TRACKER_EXPIRATION_TIME - now();
|
2021-03-11 02:06:03 +08:00
|
|
|
if (timeUntilExpiration < 1.0e-6) {
|
|
|
|
for (auto seq : it->second.sequence_version) {
|
|
|
|
if (!seq.second.isSet()) {
|
2019-09-13 07:27:39 +08:00
|
|
|
seq.second.sendError(timed_out());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
it = self->peekTracker.erase(it);
|
|
|
|
} else {
|
|
|
|
minTimeUntilExpiration = std::min(minTimeUntilExpiration, timeUntilExpiration);
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(delay(minTimeUntilExpiration));
|
2019-09-13 07:27:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> logRouterPop(LogRouterData* self, TLogPopRequest req) {
|
2018-03-18 02:08:37 +08:00
|
|
|
auto tagData = self->getTagData(req.tag);
|
|
|
|
if (!tagData) {
|
2018-06-27 09:20:28 +08:00
|
|
|
tagData = self->createTagData(req.tag, req.to, req.durableKnownCommittedVersion);
|
2018-03-18 02:08:37 +08:00
|
|
|
} else if (req.to > tagData->popped) {
|
|
|
|
tagData->popped = req.to;
|
2018-06-27 09:20:28 +08:00
|
|
|
tagData->durableKnownCommittedVersion = req.durableKnownCommittedVersion;
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(tagData->eraseMessagesBefore(req.to, self, TaskPriority::TLogPop));
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
state Version minPopped = std::numeric_limits<Version>::max();
|
2018-04-19 03:07:29 +08:00
|
|
|
state Version minKnownCommittedVersion = std::numeric_limits<Version>::max();
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto it : self->tag_data) {
|
|
|
|
if (it) {
|
|
|
|
minPopped = std::min(it->popped, minPopped);
|
|
|
|
minKnownCommittedVersion = std::min(it->durableKnownCommittedVersion, minKnownCommittedVersion);
|
2018-03-18 02:08:37 +08:00
|
|
|
}
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
while (!self->messageBlocks.empty() && self->messageBlocks.front().first < minPopped) {
|
2017-06-30 06:50:19 +08:00
|
|
|
self->messageBlocks.pop_front();
|
2019-06-25 17:47:35 +08:00
|
|
|
wait(yield(TaskPriority::TLogPop));
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
|
2019-02-19 08:47:38 +08:00
|
|
|
self->poppedVersion = std::min(minKnownCommittedVersion, self->minKnownCommittedVersion);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (self->logSystem->get() && self->allowPops) {
|
2019-04-24 06:39:26 +08:00
|
|
|
const Tag popTag = self->logSystem->get()->getPseudoPopTag(self->routerTag, ProcessClass::LogRouterClass);
|
|
|
|
self->logSystem->get()->pop(self->poppedVersion, popTag);
|
2017-07-16 06:15:03 +08:00
|
|
|
}
|
2017-06-30 06:50:19 +08:00
|
|
|
req.reply.send(Void());
|
2018-04-09 12:24:05 +08:00
|
|
|
self->minPopped.set(std::max(minPopped, self->minPopped.get()));
|
2017-06-30 06:50:19 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> logRouterCore(TLogInterface interf,
|
|
|
|
InitializeLogRouterRequest req,
|
2021-07-12 13:05:26 +08:00
|
|
|
Reference<AsyncVar<ServerDBInfo> const> db) {
|
2018-04-29 09:04:57 +08:00
|
|
|
state LogRouterData logRouterData(interf.id(), req);
|
2017-06-30 06:50:19 +08:00
|
|
|
state PromiseStream<Future<Void>> addActor;
|
2021-03-11 02:06:03 +08:00
|
|
|
state Future<Void> error = actorCollection(addActor.getFuture());
|
2017-06-30 06:50:19 +08:00
|
|
|
state Future<Void> dbInfoChange = Void();
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
addActor.send(pullAsyncData(&logRouterData));
|
|
|
|
addActor.send(cleanupPeekTrackers(&logRouterData));
|
|
|
|
addActor.send(traceRole(Role::LOG_ROUTER, interf.id()));
|
2017-06-30 06:50:19 +08:00
|
|
|
|
|
|
|
loop choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(dbInfoChange)) {
|
2017-06-30 06:50:19 +08:00
|
|
|
dbInfoChange = db->onChange();
|
2021-03-11 02:06:03 +08:00
|
|
|
logRouterData.allowPops = db->get().recoveryState == RecoveryState::FULLY_RECOVERED &&
|
|
|
|
db->get().recoveryCount >= req.recoveryCount;
|
|
|
|
logRouterData.logSystem->set(ILogSystem::fromServerDBInfo(logRouterData.dbgid, db->get(), true));
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(TLogPeekRequest req = waitNext(interf.peekMessages.getFuture())) {
|
2021-08-08 00:53:22 +08:00
|
|
|
addActor.send(logRouterPeekMessages(
|
|
|
|
req.reply, &logRouterData, req.begin, req.tag, req.returnIfBlocked, req.onlySpilled, req.sequence));
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
2021-07-21 01:42:00 +08:00
|
|
|
when(TLogPeekStreamRequest req = waitNext(interf.peekStreamMessages.getFuture())) {
|
2021-08-08 00:53:22 +08:00
|
|
|
TraceEvent(SevDebug, "LogRouterPeekStream", logRouterData.dbgid)
|
2021-07-27 00:36:53 +08:00
|
|
|
.detail("Token", interf.peekStreamMessages.getEndpoint().token);
|
2021-08-08 00:53:22 +08:00
|
|
|
addActor.send(logRouterPeekStream(&logRouterData, req));
|
2021-07-21 01:42:00 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(TLogPopRequest req = waitNext(interf.popMessages.getFuture())) {
|
2020-10-28 00:11:56 +08:00
|
|
|
// Request from remote tLog to pop data from LR
|
2021-03-11 02:06:03 +08:00
|
|
|
addActor.send(logRouterPop(&logRouterData, req));
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(error)) {}
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-12 13:05:26 +08:00
|
|
|
ACTOR Future<Void> checkRemoved(Reference<AsyncVar<ServerDBInfo> const> db,
|
2021-03-11 02:06:03 +08:00
|
|
|
uint64_t recoveryCount,
|
|
|
|
TLogInterface myInterface) {
|
2019-06-01 01:20:57 +08:00
|
|
|
loop {
|
|
|
|
bool isDisplaced =
|
|
|
|
((db->get().recoveryCount > recoveryCount && db->get().recoveryState != RecoveryState::UNINITIALIZED) ||
|
|
|
|
(db->get().recoveryCount == recoveryCount && db->get().recoveryState == RecoveryState::FULLY_RECOVERED));
|
|
|
|
isDisplaced = isDisplaced && !db->get().logSystemConfig.hasLogRouter(myInterface.id());
|
2018-03-30 06:12:38 +08:00
|
|
|
if (isDisplaced) {
|
2017-07-10 05:46:16 +08:00
|
|
|
throw worker_removed();
|
|
|
|
}
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(db->onChange());
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> logRouter(TLogInterface interf,
|
|
|
|
InitializeLogRouterRequest req,
|
2021-07-12 13:05:26 +08:00
|
|
|
Reference<AsyncVar<ServerDBInfo> const> db) {
|
2017-06-30 06:50:19 +08:00
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent("LogRouterStart", interf.id())
|
|
|
|
.detail("Start", req.startVersion)
|
|
|
|
.detail("Tag", req.routerTag.toString())
|
|
|
|
.detail("Localities", req.tLogLocalities.size())
|
|
|
|
.detail("Locality", req.locality);
|
2018-04-29 09:04:57 +08:00
|
|
|
state Future<Void> core = logRouterCore(interf, req, db);
|
2021-03-11 02:06:03 +08:00
|
|
|
loop choose {
|
2018-08-11 04:57:10 +08:00
|
|
|
when(wait(core)) { return Void(); }
|
|
|
|
when(wait(checkRemoved(db, req.recoveryCount, interf))) {}
|
2017-06-30 06:50:19 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() == error_code_actor_cancelled || e.code() == error_code_worker_removed) {
|
2022-02-25 04:25:52 +08:00
|
|
|
TraceEvent("LogRouterTerminated", interf.id()).errorUnsuppressed(e);
|
2017-06-30 06:50:19 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|