2017-05-26 04:48:44 +08:00
|
|
|
/*
|
|
|
|
* ClusterController.actor.cpp
|
|
|
|
*
|
|
|
|
* This source file is part of the FoundationDB open source project
|
|
|
|
*
|
2019-02-16 09:29:52 +08:00
|
|
|
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2019-05-16 07:13:04 +08:00
|
|
|
#include <algorithm>
|
|
|
|
#include <iterator>
|
|
|
|
#include <map>
|
|
|
|
#include <set>
|
|
|
|
#include <vector>
|
|
|
|
|
2021-12-08 13:43:58 +08:00
|
|
|
#include "fdbclient/SystemData.h"
|
2017-05-26 04:48:44 +08:00
|
|
|
#include "fdbrpc/FailureMonitor.h"
|
|
|
|
#include "flow/ActorCollection.h"
|
2021-10-07 07:16:24 +08:00
|
|
|
#include "flow/SystemMonitor.h"
|
2021-10-11 11:44:56 +08:00
|
|
|
#include "fdbclient/ClusterConnectionMemoryRecord.h"
|
2019-02-18 07:41:16 +08:00
|
|
|
#include "fdbclient/NativeAPI.actor.h"
|
2019-04-25 06:12:37 +08:00
|
|
|
#include "fdbserver/BackupInterface.h"
|
2017-05-26 04:48:44 +08:00
|
|
|
#include "fdbserver/CoordinationInterface.h"
|
2018-12-14 05:31:37 +08:00
|
|
|
#include "fdbserver/DataDistributorInterface.h"
|
2018-10-20 01:30:13 +08:00
|
|
|
#include "fdbserver/Knobs.h"
|
2021-05-12 07:57:05 +08:00
|
|
|
#include "fdbserver/ConfigBroadcaster.h"
|
2019-02-18 10:55:52 +08:00
|
|
|
#include "fdbserver/MoveKeys.actor.h"
|
2019-02-18 11:13:26 +08:00
|
|
|
#include "fdbserver/WorkerInterface.actor.h"
|
2018-10-20 01:30:13 +08:00
|
|
|
#include "fdbserver/LeaderElection.h"
|
2019-01-29 01:25:15 +08:00
|
|
|
#include "fdbserver/LogSystemConfig.h"
|
2018-10-20 01:30:13 +08:00
|
|
|
#include "fdbserver/WaitFailure.h"
|
2019-02-15 08:24:46 +08:00
|
|
|
#include "fdbserver/RatekeeperInterface.h"
|
2021-09-15 23:35:58 +08:00
|
|
|
#include "fdbserver/BlobManagerInterface.h"
|
2018-10-20 01:30:13 +08:00
|
|
|
#include "fdbserver/ServerDBInfo.h"
|
|
|
|
#include "fdbserver/Status.h"
|
2019-01-19 08:18:34 +08:00
|
|
|
#include "fdbserver/LatencyBandConfig.h"
|
2017-05-26 04:48:44 +08:00
|
|
|
#include "fdbclient/DatabaseContext.h"
|
2021-03-16 09:03:54 +08:00
|
|
|
#include "fdbclient/GlobalConfig.actor.h"
|
2018-10-20 01:30:13 +08:00
|
|
|
#include "fdbserver/RecoveryState.h"
|
2017-05-26 04:48:44 +08:00
|
|
|
#include "fdbclient/ReadYourWrites.h"
|
|
|
|
#include "fdbrpc/Replication.h"
|
|
|
|
#include "fdbrpc/ReplicationUtils.h"
|
2017-09-26 03:40:24 +08:00
|
|
|
#include "fdbclient/KeyBackedTypes.h"
|
2018-08-02 09:09:54 +08:00
|
|
|
#include "flow/Util.h"
|
2021-03-11 02:06:03 +08:00
|
|
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
void failAfter(Future<Void> trigger, Endpoint e);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-09-16 22:24:15 +08:00
|
|
|
// This is used to artificially amplify the used count for processes
|
|
|
|
// occupied by non-singletons. This ultimately makes it less desirable
|
|
|
|
// for singletons to use those processes as well. This constant should
|
|
|
|
// be increased if we ever have more than 100 singletons (unlikely).
|
2021-09-13 21:58:38 +08:00
|
|
|
static const int PID_USED_AMP_FOR_NON_SINGLETON = 100;
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
struct WorkerInfo : NonCopyable {
|
|
|
|
Future<Void> watcher;
|
2017-11-15 05:57:37 +08:00
|
|
|
ReplyPromise<RegisterWorkerReply> reply;
|
2017-05-26 04:48:44 +08:00
|
|
|
Generation gen;
|
|
|
|
int reboots;
|
|
|
|
ProcessClass initialClass;
|
2018-02-10 08:48:55 +08:00
|
|
|
ClusterControllerPriorityInfo priorityInfo;
|
2019-03-09 00:25:07 +08:00
|
|
|
WorkerDetails details;
|
2019-03-23 09:00:16 +08:00
|
|
|
Future<Void> haltRatekeeper;
|
|
|
|
Future<Void> haltDistributor;
|
2021-09-15 23:35:58 +08:00
|
|
|
Future<Void> haltBlobManager;
|
2020-04-06 14:09:36 +08:00
|
|
|
Standalone<VectorRef<StringRef>> issues;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
WorkerInfo()
|
|
|
|
: gen(-1), reboots(0),
|
|
|
|
priorityInfo(ProcessClass::UnsetFit, false, ClusterControllerPriorityInfo::FitnessUnknown) {}
|
|
|
|
WorkerInfo(Future<Void> watcher,
|
|
|
|
ReplyPromise<RegisterWorkerReply> reply,
|
|
|
|
Generation gen,
|
|
|
|
WorkerInterface interf,
|
|
|
|
ProcessClass initialClass,
|
|
|
|
ProcessClass processClass,
|
|
|
|
ClusterControllerPriorityInfo priorityInfo,
|
|
|
|
bool degraded,
|
|
|
|
Standalone<VectorRef<StringRef>> issues)
|
|
|
|
: watcher(watcher), reply(reply), gen(gen), reboots(0), initialClass(initialClass), priorityInfo(priorityInfo),
|
|
|
|
details(interf, processClass, degraded), issues(issues) {}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2020-06-10 08:33:41 +08:00
|
|
|
WorkerInfo(WorkerInfo&& r) noexcept
|
|
|
|
: watcher(std::move(r.watcher)), reply(std::move(r.reply)), gen(r.gen), reboots(r.reboots),
|
|
|
|
initialClass(r.initialClass), priorityInfo(r.priorityInfo), details(std::move(r.details)),
|
2021-09-15 23:35:58 +08:00
|
|
|
haltRatekeeper(r.haltRatekeeper), haltDistributor(r.haltDistributor), haltBlobManager(r.haltBlobManager),
|
|
|
|
issues(r.issues) {}
|
2020-06-10 08:33:41 +08:00
|
|
|
void operator=(WorkerInfo&& r) noexcept {
|
2017-05-26 04:48:44 +08:00
|
|
|
watcher = std::move(r.watcher);
|
|
|
|
reply = std::move(r.reply);
|
|
|
|
gen = r.gen;
|
|
|
|
reboots = r.reboots;
|
|
|
|
initialClass = r.initialClass;
|
2018-02-10 08:48:55 +08:00
|
|
|
priorityInfo = r.priorityInfo;
|
2019-03-09 00:25:07 +08:00
|
|
|
details = std::move(r.details);
|
2019-11-13 05:01:29 +08:00
|
|
|
haltRatekeeper = r.haltRatekeeper;
|
|
|
|
haltDistributor = r.haltDistributor;
|
2021-09-15 23:35:58 +08:00
|
|
|
haltBlobManager = r.haltBlobManager;
|
2020-04-06 14:09:36 +08:00
|
|
|
issues = r.issues;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-02-10 08:48:55 +08:00
|
|
|
struct WorkerFitnessInfo {
|
2019-03-09 00:25:07 +08:00
|
|
|
WorkerDetails worker;
|
2018-02-10 08:48:55 +08:00
|
|
|
ProcessClass::Fitness fitness;
|
|
|
|
int used;
|
|
|
|
|
|
|
|
WorkerFitnessInfo() : fitness(ProcessClass::NeverAssign), used(0) {}
|
2021-03-11 02:06:03 +08:00
|
|
|
WorkerFitnessInfo(WorkerDetails worker, ProcessClass::Fitness fitness, int used)
|
|
|
|
: worker(worker), fitness(fitness), used(used) {}
|
2018-02-10 08:48:55 +08:00
|
|
|
};
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
class ClusterControllerData {
|
|
|
|
public:
|
|
|
|
struct DBInfo {
|
|
|
|
Reference<AsyncVar<ClientDBInfo>> clientInfo;
|
2020-04-12 10:30:05 +08:00
|
|
|
Reference<AsyncVar<ServerDBInfo>> serverInfo;
|
2017-05-26 04:48:44 +08:00
|
|
|
std::map<NetworkAddress, double> incompatibleConnections;
|
2019-02-19 06:54:28 +08:00
|
|
|
AsyncTrigger forceMasterFailure;
|
2017-05-26 04:48:44 +08:00
|
|
|
int64_t masterRegistrationCount;
|
2020-04-06 14:09:36 +08:00
|
|
|
int64_t dbInfoCount;
|
2018-06-14 09:14:14 +08:00
|
|
|
bool recoveryStalled;
|
2018-07-01 21:39:04 +08:00
|
|
|
bool forceRecovery;
|
2021-03-11 02:06:03 +08:00
|
|
|
DatabaseConfiguration config; // Asynchronously updated via master registration
|
2017-11-16 09:15:24 +08:00
|
|
|
DatabaseConfiguration fullyRecoveredConfig;
|
2017-05-26 04:48:44 +08:00
|
|
|
Database db;
|
2018-09-01 01:51:55 +08:00
|
|
|
int unfinishedRecoveries;
|
|
|
|
int logGenerations;
|
2019-11-13 05:01:29 +08:00
|
|
|
bool cachePopulated;
|
2019-07-26 08:15:31 +08:00
|
|
|
std::map<NetworkAddress, std::pair<double, OpenDatabaseRequest>> clientStatus;
|
2021-10-18 11:47:11 +08:00
|
|
|
Future<Void> clientCounter;
|
|
|
|
int clientCount;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2020-11-07 15:50:55 +08:00
|
|
|
DBInfo()
|
2021-07-25 02:20:51 +08:00
|
|
|
: clientInfo(new AsyncVar<ClientDBInfo>()), serverInfo(new AsyncVar<ServerDBInfo>()),
|
|
|
|
masterRegistrationCount(0), dbInfoCount(0), recoveryStalled(false), forceRecovery(false),
|
|
|
|
db(DatabaseContext::create(clientInfo,
|
|
|
|
Future<Void>(),
|
|
|
|
LocalityData(),
|
|
|
|
EnableLocalityLoadBalance::True,
|
|
|
|
TaskPriority::DefaultEndpoint,
|
|
|
|
LockAware::True)), // SOMEDAY: Locality!
|
2021-10-18 11:47:11 +08:00
|
|
|
unfinishedRecoveries(0), logGenerations(0), cachePopulated(false), clientCount(0) {
|
|
|
|
clientCounter = countClients(this);
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2019-02-15 08:24:46 +08:00
|
|
|
void setDistributor(const DataDistributorInterface& interf) {
|
2020-04-12 10:30:05 +08:00
|
|
|
auto newInfo = serverInfo->get();
|
2019-05-11 05:01:52 +08:00
|
|
|
newInfo.id = deterministicRandom()->randomUniqueID();
|
2020-04-06 14:09:36 +08:00
|
|
|
newInfo.infoGeneration = ++dbInfoCount;
|
2019-02-15 08:24:46 +08:00
|
|
|
newInfo.distributor = interf;
|
2021-03-11 02:06:03 +08:00
|
|
|
serverInfo->set(newInfo);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2019-02-15 08:24:46 +08:00
|
|
|
void setRatekeeper(const RatekeeperInterface& interf) {
|
2020-04-12 10:30:05 +08:00
|
|
|
auto newInfo = serverInfo->get();
|
2019-05-11 05:01:52 +08:00
|
|
|
newInfo.id = deterministicRandom()->randomUniqueID();
|
2020-04-06 14:09:36 +08:00
|
|
|
newInfo.infoGeneration = ++dbInfoCount;
|
2019-02-15 08:24:46 +08:00
|
|
|
newInfo.ratekeeper = interf;
|
2021-03-11 02:06:03 +08:00
|
|
|
serverInfo->set(newInfo);
|
2019-02-15 08:24:46 +08:00
|
|
|
}
|
|
|
|
|
2021-09-15 23:35:58 +08:00
|
|
|
void setBlobManager(const BlobManagerInterface& interf) {
|
|
|
|
auto newInfo = serverInfo->get();
|
|
|
|
newInfo.id = deterministicRandom()->randomUniqueID();
|
|
|
|
newInfo.infoGeneration = ++dbInfoCount;
|
|
|
|
newInfo.blobManager = interf;
|
|
|
|
serverInfo->set(newInfo);
|
|
|
|
}
|
|
|
|
|
2019-02-15 08:24:46 +08:00
|
|
|
void clearInterf(ProcessClass::ClassType t) {
|
2020-04-12 10:30:05 +08:00
|
|
|
auto newInfo = serverInfo->get();
|
2019-05-11 05:01:52 +08:00
|
|
|
newInfo.id = deterministicRandom()->randomUniqueID();
|
2020-04-06 14:09:36 +08:00
|
|
|
newInfo.infoGeneration = ++dbInfoCount;
|
2019-02-15 08:24:46 +08:00
|
|
|
if (t == ProcessClass::DataDistributorClass) {
|
|
|
|
newInfo.distributor = Optional<DataDistributorInterface>();
|
2019-03-27 23:24:25 +08:00
|
|
|
} else if (t == ProcessClass::RatekeeperClass) {
|
2019-02-15 08:24:46 +08:00
|
|
|
newInfo.ratekeeper = Optional<RatekeeperInterface>();
|
2021-09-15 23:35:58 +08:00
|
|
|
} else if (t == ProcessClass::BlobManagerClass) {
|
|
|
|
newInfo.blobManager = Optional<BlobManagerInterface>();
|
2019-02-15 08:24:46 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
serverInfo->set(newInfo);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-10-18 11:47:11 +08:00
|
|
|
|
|
|
|
ACTOR static Future<Void> countClients(DBInfo* self) {
|
|
|
|
loop {
|
|
|
|
wait(delay(SERVER_KNOBS->CC_PRUNE_CLIENTS_INTERVAL));
|
|
|
|
|
|
|
|
self->clientCount = 0;
|
|
|
|
for (auto itr = self->clientStatus.begin(); itr != self->clientStatus.end();) {
|
|
|
|
if (now() - itr->second.first < 2 * SERVER_KNOBS->COORDINATOR_REGISTER_INTERVAL) {
|
|
|
|
self->clientCount += itr->second.second.clientCount;
|
|
|
|
++itr;
|
|
|
|
} else {
|
|
|
|
itr = self->clientStatus.erase(itr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct UpdateWorkerList {
|
2021-03-11 02:06:03 +08:00
|
|
|
Future<Void> init(Database const& db) { return update(this, db); }
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
void set(Optional<Standalone<StringRef>> processID, Optional<ProcessData> data) {
|
2017-05-26 04:48:44 +08:00
|
|
|
delta[processID] = data;
|
|
|
|
anyDelta.set(true);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::map<Optional<Standalone<StringRef>>, Optional<ProcessData>> delta;
|
|
|
|
AsyncVar<bool> anyDelta;
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR static Future<Void> update(UpdateWorkerList* self, Database db) {
|
2017-05-26 04:48:44 +08:00
|
|
|
// The Database we are using is based on worker registrations to this cluster controller, which come only
|
2021-03-11 02:06:03 +08:00
|
|
|
// from master servers that we started, so it shouldn't be possible for multiple cluster controllers to
|
|
|
|
// fight.
|
2017-05-26 04:48:44 +08:00
|
|
|
state Transaction tr(db);
|
|
|
|
loop {
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
tr.clear(workerListKeys);
|
|
|
|
wait(tr.commit());
|
2017-05-26 04:48:44 +08:00
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(tr.onError(e));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
loop {
|
2021-07-27 02:43:07 +08:00
|
|
|
tr.reset();
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
// Wait for some changes
|
|
|
|
while (!self->anyDelta.get())
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(self->anyDelta.onChange());
|
2017-05-26 04:48:44 +08:00
|
|
|
self->anyDelta.set(false);
|
|
|
|
|
|
|
|
state std::map<Optional<Standalone<StringRef>>, Optional<ProcessData>> delta;
|
2021-03-11 02:06:03 +08:00
|
|
|
delta.swap(self->delta);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
TraceEvent("UpdateWorkerList").detail("DeltaCount", delta.size());
|
|
|
|
|
|
|
|
// Do a transaction to write the changes
|
|
|
|
loop {
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto w = delta.begin(); w != delta.end(); ++w) {
|
2017-05-26 04:48:44 +08:00
|
|
|
if (w->second.present()) {
|
2021-03-11 02:06:03 +08:00
|
|
|
tr.set(workerListKeyFor(w->first.get()), workerListValue(w->second.get()));
|
2017-05-26 04:48:44 +08:00
|
|
|
} else
|
2021-03-11 02:06:03 +08:00
|
|
|
tr.clear(workerListKeyFor(w->first.get()));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(tr.commit());
|
2017-05-26 04:48:44 +08:00
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(tr.onError(e));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
bool workerAvailable(WorkerInfo const& worker, bool checkStable) {
|
|
|
|
return (now() - startTime < 2 * FLOW_KNOBS->SERVER_REQUEST_INTERVAL) ||
|
|
|
|
(IFailureMonitor::failureMonitor().getState(worker.details.interf.storage.getEndpoint()).isAvailable() &&
|
|
|
|
(!checkStable || worker.reboots < 2));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool isLongLivedStateless(Optional<Key> const& processId) {
|
|
|
|
return (db.serverInfo->get().distributor.present() &&
|
|
|
|
db.serverInfo->get().distributor.get().locality.processId() == processId) ||
|
|
|
|
(db.serverInfo->get().ratekeeper.present() &&
|
2021-09-15 23:35:58 +08:00
|
|
|
db.serverInfo->get().ratekeeper.get().locality.processId() == processId) ||
|
|
|
|
(db.serverInfo->get().blobManager.present() &&
|
|
|
|
db.serverInfo->get().blobManager.get().locality.processId() == processId);
|
2021-03-11 02:06:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
WorkerDetails getStorageWorker(RecruitStorageRequest const& req) {
|
|
|
|
std::set<Optional<Standalone<StringRef>>> excludedMachines(req.excludeMachines.begin(),
|
|
|
|
req.excludeMachines.end());
|
|
|
|
std::set<Optional<Standalone<StringRef>>> includeDCs(req.includeDCs.begin(), req.includeDCs.end());
|
|
|
|
std::set<AddressExclusion> excludedAddresses(req.excludeAddresses.begin(), req.excludeAddresses.end());
|
|
|
|
|
|
|
|
for (auto& it : id_worker)
|
|
|
|
if (workerAvailable(it.second, false) &&
|
|
|
|
!excludedMachines.count(it.second.details.interf.locality.zoneId()) &&
|
|
|
|
(includeDCs.size() == 0 || includeDCs.count(it.second.details.interf.locality.dcId())) &&
|
|
|
|
!addressExcluded(excludedAddresses, it.second.details.interf.address()) &&
|
|
|
|
(!it.second.details.interf.secondaryAddress().present() ||
|
|
|
|
!addressExcluded(excludedAddresses, it.second.details.interf.secondaryAddress().get())) &&
|
|
|
|
it.second.details.processClass.machineClassFitness(ProcessClass::Storage) <= ProcessClass::UnsetFit) {
|
2019-03-09 00:25:07 +08:00
|
|
|
return it.second.details;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (req.criticalRecruitment) {
|
2017-05-26 04:48:44 +08:00
|
|
|
ProcessClass::Fitness bestFit = ProcessClass::NeverAssign;
|
2019-03-09 00:25:07 +08:00
|
|
|
Optional<WorkerDetails> bestInfo;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : id_worker) {
|
|
|
|
ProcessClass::Fitness fit = it.second.details.processClass.machineClassFitness(ProcessClass::Storage);
|
|
|
|
if (workerAvailable(it.second, false) &&
|
|
|
|
!excludedMachines.count(it.second.details.interf.locality.zoneId()) &&
|
|
|
|
(includeDCs.size() == 0 || includeDCs.count(it.second.details.interf.locality.dcId())) &&
|
|
|
|
!addressExcluded(excludedAddresses, it.second.details.interf.address()) && fit < bestFit) {
|
2017-05-26 04:48:44 +08:00
|
|
|
bestFit = fit;
|
2019-03-09 00:25:07 +08:00
|
|
|
bestInfo = it.second.details;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (bestInfo.present()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
return bestInfo.get();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
throw no_more_servers();
|
|
|
|
}
|
|
|
|
|
2021-09-21 03:42:20 +08:00
|
|
|
// Returns a worker that can be used by a blob worker
|
|
|
|
// Note: we restrict the set of possible workers to those in the same DC as the BM/CC
|
|
|
|
WorkerDetails getBlobWorker(RecruitBlobWorkerRequest const& req) {
|
|
|
|
std::set<AddressExclusion> excludedAddresses(req.excludeAddresses.begin(), req.excludeAddresses.end());
|
|
|
|
for (auto& it : id_worker) {
|
2021-09-29 07:15:32 +08:00
|
|
|
// the worker must be available, have the same dcID as CC,
|
|
|
|
// not be one of the excluded addrs from req and have the approriate fitness
|
2021-09-21 03:42:20 +08:00
|
|
|
if (workerAvailable(it.second, false) &&
|
|
|
|
clusterControllerDcId == it.second.details.interf.locality.dcId() &&
|
|
|
|
!addressExcluded(excludedAddresses, it.second.details.interf.address()) &&
|
|
|
|
(!it.second.details.interf.secondaryAddress().present() ||
|
|
|
|
!addressExcluded(excludedAddresses, it.second.details.interf.secondaryAddress().get())) &&
|
2021-09-29 07:15:32 +08:00
|
|
|
it.second.details.processClass.machineClassFitness(ProcessClass::BlobWorker) == ProcessClass::BestFit) {
|
2021-09-21 03:42:20 +08:00
|
|
|
return it.second.details;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
throw no_more_servers();
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
std::vector<WorkerDetails> getWorkersForSeedServers(
|
|
|
|
DatabaseConfiguration const& conf,
|
|
|
|
Reference<IReplicationPolicy> const& policy,
|
|
|
|
Optional<Optional<Standalone<StringRef>>> const& dcId = Optional<Optional<Standalone<StringRef>>>()) {
|
2021-09-17 08:42:34 +08:00
|
|
|
std::map<ProcessClass::Fitness, std::vector<WorkerDetails>> fitness_workers;
|
2019-03-09 00:25:07 +08:00
|
|
|
std::vector<WorkerDetails> results;
|
2019-03-14 04:14:39 +08:00
|
|
|
Reference<LocalitySet> logServerSet = Reference<LocalitySet>(new LocalityMap<WorkerDetails>());
|
2021-03-11 02:06:03 +08:00
|
|
|
LocalityMap<WorkerDetails>* logServerMap = (LocalityMap<WorkerDetails>*)logServerSet.getPtr();
|
2017-09-15 08:06:00 +08:00
|
|
|
bool bCompleted = false;
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : id_worker) {
|
|
|
|
auto fitness = it.second.details.processClass.machineClassFitness(ProcessClass::Storage);
|
|
|
|
if (workerAvailable(it.second, false) && !conf.isExcludedServer(it.second.details.interf.addresses()) &&
|
2021-06-29 12:45:02 +08:00
|
|
|
!isExcludedDegradedServer(it.second.details.interf.addresses()) &&
|
2021-03-11 02:06:03 +08:00
|
|
|
fitness != ProcessClass::NeverAssign &&
|
|
|
|
(!dcId.present() || it.second.details.interf.locality.dcId() == dcId.get())) {
|
|
|
|
fitness_workers[fitness].push_back(it.second.details);
|
2017-09-15 08:06:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : fitness_workers) {
|
|
|
|
for (auto& worker : it.second) {
|
2019-03-09 00:25:07 +08:00
|
|
|
logServerMap->add(worker.interf.locality, &worker);
|
2017-09-15 08:06:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<LocalityEntry> bestSet;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (logServerSet->selectReplicas(policy, bestSet)) {
|
2017-09-15 08:06:00 +08:00
|
|
|
results.reserve(bestSet.size());
|
|
|
|
for (auto& entry : bestSet) {
|
|
|
|
auto object = logServerMap->getObject(entry);
|
|
|
|
results.push_back(*object);
|
|
|
|
}
|
|
|
|
bCompleted = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
logServerSet->clear();
|
|
|
|
logServerSet.clear();
|
|
|
|
|
|
|
|
if (!bCompleted) {
|
|
|
|
throw no_more_servers();
|
|
|
|
}
|
|
|
|
|
|
|
|
return results;
|
|
|
|
}
|
|
|
|
|
2021-04-21 15:22:33 +08:00
|
|
|
// Adds workers to the result such that each field is used in the result set as evenly as possible,
|
|
|
|
// with a secondary criteria of minimizing the reuse of zoneIds
|
|
|
|
// only add workers which have a field which is already in the result set
|
|
|
|
void addWorkersByLowestField(StringRef field,
|
|
|
|
int desired,
|
2021-04-27 01:16:18 +08:00
|
|
|
const std::vector<WorkerDetails>& workers,
|
2021-04-21 15:22:33 +08:00
|
|
|
std::set<WorkerDetails>& resultSet) {
|
|
|
|
typedef Optional<Standalone<StringRef>> Field;
|
|
|
|
typedef Optional<Standalone<StringRef>> Zone;
|
2021-04-27 01:16:18 +08:00
|
|
|
typedef std::tuple<int, bool, Field> FieldCount;
|
2021-04-21 15:22:33 +08:00
|
|
|
typedef std::pair<int, Zone> ZoneCount;
|
|
|
|
|
|
|
|
std::priority_queue<FieldCount, std::vector<FieldCount>, std::greater<FieldCount>> fieldQueue;
|
|
|
|
std::map<Field, std::priority_queue<ZoneCount, std::vector<ZoneCount>, std::greater<ZoneCount>>>
|
|
|
|
field_zoneQueue;
|
|
|
|
|
2021-04-27 01:16:18 +08:00
|
|
|
std::map<Field, std::pair<int, bool>> field_count;
|
2021-04-21 15:22:33 +08:00
|
|
|
std::map<Zone, std::pair<int, Field>> zone_count;
|
|
|
|
std::map<Zone, std::vector<WorkerDetails>> zone_workers;
|
|
|
|
|
|
|
|
// Count the amount of fields and zones already in the result set
|
|
|
|
for (auto& worker : resultSet) {
|
|
|
|
auto thisField = worker.interf.locality.get(field);
|
|
|
|
auto thisZone = worker.interf.locality.zoneId();
|
|
|
|
auto thisDc = worker.interf.locality.dcId();
|
|
|
|
|
2021-04-27 01:16:18 +08:00
|
|
|
auto& fitness = field_count[thisField];
|
|
|
|
fitness.first++;
|
|
|
|
fitness.second = thisDc == clusterControllerDcId;
|
|
|
|
|
|
|
|
auto& zc = zone_count[thisZone];
|
|
|
|
zc.first++;
|
|
|
|
zc.second = thisField;
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (auto& worker : workers) {
|
|
|
|
auto thisField = worker.interf.locality.get(field);
|
|
|
|
auto thisZone = worker.interf.locality.zoneId();
|
|
|
|
|
|
|
|
if (field_count.count(thisField)) {
|
2021-04-27 01:16:18 +08:00
|
|
|
zone_workers[thisZone].push_back(worker);
|
2021-04-21 15:22:33 +08:00
|
|
|
zone_count[thisZone].second = thisField;
|
|
|
|
}
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-04-21 15:22:33 +08:00
|
|
|
// try to avoid fields in the cluster controller datacenter if everything else is equal
|
|
|
|
for (auto& it : field_count) {
|
2021-04-27 01:16:18 +08:00
|
|
|
fieldQueue.push(std::make_tuple(it.second.first, it.second.second, it.first));
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (auto& it : zone_count) {
|
|
|
|
field_zoneQueue[it.second.second].push(std::make_pair(it.second.first, it.first));
|
|
|
|
}
|
|
|
|
|
|
|
|
// start with the least used field, and try to find a worker with that field
|
|
|
|
while (fieldQueue.size()) {
|
|
|
|
auto lowestField = fieldQueue.top();
|
2021-04-27 01:16:18 +08:00
|
|
|
auto& lowestZoneQueue = field_zoneQueue[std::get<2>(lowestField)];
|
2021-04-21 15:22:33 +08:00
|
|
|
bool added = false;
|
|
|
|
// start with the least used zoneId, and try and find a worker with that zone
|
|
|
|
while (lowestZoneQueue.size() && !added) {
|
|
|
|
auto lowestZone = lowestZoneQueue.top();
|
|
|
|
auto& zoneWorkers = zone_workers[lowestZone.second];
|
|
|
|
|
|
|
|
while (zoneWorkers.size() && !added) {
|
|
|
|
if (!resultSet.count(zoneWorkers.back())) {
|
|
|
|
resultSet.insert(zoneWorkers.back());
|
2021-04-27 01:16:18 +08:00
|
|
|
if (resultSet.size() == desired) {
|
2021-04-21 15:22:33 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
added = true;
|
|
|
|
}
|
|
|
|
zoneWorkers.pop_back();
|
|
|
|
}
|
|
|
|
lowestZoneQueue.pop();
|
|
|
|
if (added && zoneWorkers.size()) {
|
2021-04-27 01:16:18 +08:00
|
|
|
++lowestZone.first;
|
|
|
|
lowestZoneQueue.push(lowestZone);
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
fieldQueue.pop();
|
|
|
|
if (added) {
|
2021-04-27 01:16:18 +08:00
|
|
|
++std::get<0>(lowestField);
|
|
|
|
fieldQueue.push(lowestField);
|
2021-03-18 14:22:42 +08:00
|
|
|
}
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adds workers to the result which minimize the reuse of zoneIds
|
2021-04-27 01:16:18 +08:00
|
|
|
void addWorkersByLowestZone(int desired,
|
|
|
|
const std::vector<WorkerDetails>& workers,
|
|
|
|
std::set<WorkerDetails>& resultSet) {
|
2021-04-21 15:22:33 +08:00
|
|
|
typedef Optional<Standalone<StringRef>> Zone;
|
|
|
|
typedef std::pair<int, Zone> ZoneCount;
|
|
|
|
|
|
|
|
std::map<Zone, int> zone_count;
|
|
|
|
std::map<Zone, std::vector<WorkerDetails>> zone_workers;
|
|
|
|
std::priority_queue<ZoneCount, std::vector<ZoneCount>, std::greater<ZoneCount>> zoneQueue;
|
|
|
|
|
2021-04-27 01:16:18 +08:00
|
|
|
for (const auto& worker : workers) {
|
2021-04-21 15:22:33 +08:00
|
|
|
auto thisZone = worker.interf.locality.zoneId();
|
|
|
|
zone_count[thisZone] = 0;
|
|
|
|
zone_workers[thisZone].push_back(worker);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto& worker : resultSet) {
|
|
|
|
auto thisZone = worker.interf.locality.zoneId();
|
|
|
|
zone_count[thisZone]++;
|
2021-03-18 14:22:42 +08:00
|
|
|
}
|
|
|
|
|
2021-04-21 15:22:33 +08:00
|
|
|
for (auto& it : zone_count) {
|
|
|
|
zoneQueue.push(std::make_pair(it.second, it.first));
|
|
|
|
}
|
|
|
|
|
|
|
|
while (zoneQueue.size()) {
|
|
|
|
auto lowestZone = zoneQueue.top();
|
|
|
|
auto& zoneWorkers = zone_workers[lowestZone.second];
|
2021-03-19 03:18:03 +08:00
|
|
|
|
2021-04-21 15:22:33 +08:00
|
|
|
bool added = false;
|
|
|
|
while (zoneWorkers.size() && !added) {
|
|
|
|
if (!resultSet.count(zoneWorkers.back())) {
|
|
|
|
resultSet.insert(zoneWorkers.back());
|
2021-04-27 01:16:18 +08:00
|
|
|
if (resultSet.size() == desired) {
|
2021-04-21 15:22:33 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
added = true;
|
|
|
|
}
|
|
|
|
zoneWorkers.pop_back();
|
|
|
|
}
|
|
|
|
zoneQueue.pop();
|
|
|
|
if (added && zoneWorkers.size()) {
|
2021-04-27 01:16:18 +08:00
|
|
|
++lowestZone.first;
|
|
|
|
zoneQueue.push(lowestZone);
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-03-19 03:18:03 +08:00
|
|
|
|
2021-05-14 03:20:31 +08:00
|
|
|
// Log the reason why the worker is considered as unavailable.
|
2021-05-18 10:15:33 +08:00
|
|
|
void logWorkerUnavailable(const Severity severity,
|
|
|
|
const UID& id,
|
2021-05-14 03:20:31 +08:00
|
|
|
const std::string& method,
|
|
|
|
const std::string& reason,
|
|
|
|
const WorkerDetails& details,
|
|
|
|
const ProcessClass::Fitness& fitness,
|
|
|
|
const std::set<Optional<Key>>& dcIds) {
|
2021-03-18 14:22:42 +08:00
|
|
|
// Construct the list of DCs where the TLog recruitment is happening. This is mainly for logging purpose.
|
|
|
|
std::string dcList;
|
|
|
|
for (const auto& dc : dcIds) {
|
|
|
|
if (!dcList.empty()) {
|
|
|
|
dcList += ',';
|
|
|
|
}
|
2021-03-18 14:46:11 +08:00
|
|
|
dcList += printable(dc);
|
2021-03-18 14:22:42 +08:00
|
|
|
}
|
2021-05-18 10:15:33 +08:00
|
|
|
// Logging every possible options is a lot for every recruitment; logging all of the options with GoodFit or
|
|
|
|
// BestFit may work because there should only be like 30 tlog class processes. Plus, the recruitment happens
|
|
|
|
// only during initial database creation and recovery. So these trace events should be sparse.
|
|
|
|
if (fitness == ProcessClass::GoodFit || fitness == ProcessClass::BestFit ||
|
|
|
|
fitness == ProcessClass::NeverAssign) {
|
|
|
|
TraceEvent(severity, "GetTLogTeamWorkerUnavailable", id)
|
|
|
|
.detail("TLogRecruitMethod", method)
|
2021-03-19 03:18:03 +08:00
|
|
|
.detail("Reason", reason)
|
|
|
|
.detail("WorkerID", details.interf.id())
|
|
|
|
.detail("WorkerDC", details.interf.locality.dcId())
|
|
|
|
.detail("Address", details.interf.addresses().toString())
|
2021-03-20 05:10:38 +08:00
|
|
|
.detail("Fitness", fitness)
|
|
|
|
.detail("RecruitmentDcIds", dcList);
|
2021-05-18 10:15:33 +08:00
|
|
|
}
|
|
|
|
}
|
2021-05-14 03:20:31 +08:00
|
|
|
|
2021-04-21 15:22:33 +08:00
|
|
|
// A TLog recruitment method specialized for three_data_hall and three_datacenter configurations
|
|
|
|
// It attempts to evenly recruit processes from across data_halls or datacenters
|
|
|
|
std::vector<WorkerDetails> getWorkersForTlogsComplex(DatabaseConfiguration const& conf,
|
|
|
|
int32_t desired,
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int>& id_used,
|
|
|
|
StringRef field,
|
|
|
|
int minFields,
|
|
|
|
int minPerField,
|
|
|
|
bool allowDegraded,
|
|
|
|
bool checkStable,
|
2021-04-27 01:09:44 +08:00
|
|
|
const std::set<Optional<Key>>& dcIds,
|
|
|
|
const std::vector<UID>& exclusionWorkerIds) {
|
2021-09-17 08:42:34 +08:00
|
|
|
std::map<std::tuple<ProcessClass::Fitness, int, bool>, std::vector<WorkerDetails>> fitness_workers;
|
2021-03-19 03:18:03 +08:00
|
|
|
|
|
|
|
// Go through all the workers to list all the workers that can be recruited.
|
|
|
|
for (const auto& [worker_process_id, worker_info] : id_worker) {
|
|
|
|
const auto& worker_details = worker_info.details;
|
2021-03-20 02:04:53 +08:00
|
|
|
auto fitness = worker_details.processClass.machineClassFitness(ProcessClass::TLog);
|
2021-04-21 15:22:33 +08:00
|
|
|
|
2021-03-19 03:18:03 +08:00
|
|
|
if (std::find(exclusionWorkerIds.begin(), exclusionWorkerIds.end(), worker_details.interf.id()) !=
|
2021-03-11 02:06:03 +08:00
|
|
|
exclusionWorkerIds.end()) {
|
2021-05-18 10:15:33 +08:00
|
|
|
logWorkerUnavailable(SevInfo, id, "complex", "Worker is excluded", worker_details, fitness, dcIds);
|
2021-03-19 03:18:03 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!workerAvailable(worker_info, checkStable)) {
|
2021-05-18 10:15:33 +08:00
|
|
|
logWorkerUnavailable(SevInfo, id, "complex", "Worker is not available", worker_details, fitness, dcIds);
|
2021-03-19 03:18:03 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (conf.isExcludedServer(worker_details.interf.addresses())) {
|
2021-05-18 10:15:33 +08:00
|
|
|
logWorkerUnavailable(SevInfo,
|
|
|
|
id,
|
|
|
|
"complex",
|
|
|
|
"Worker server is excluded from the cluster",
|
|
|
|
worker_details,
|
|
|
|
fitness,
|
|
|
|
dcIds);
|
2021-03-19 03:18:03 +08:00
|
|
|
continue;
|
|
|
|
}
|
2021-06-29 12:45:02 +08:00
|
|
|
if (isExcludedDegradedServer(worker_details.interf.addresses())) {
|
|
|
|
logWorkerUnavailable(SevInfo,
|
|
|
|
id,
|
|
|
|
"complex",
|
|
|
|
"Worker server is excluded from the cluster due to degradation",
|
|
|
|
worker_details,
|
|
|
|
fitness,
|
|
|
|
dcIds);
|
|
|
|
continue;
|
|
|
|
}
|
2021-03-19 03:18:03 +08:00
|
|
|
if (fitness == ProcessClass::NeverAssign) {
|
2021-05-18 10:15:33 +08:00
|
|
|
logWorkerUnavailable(
|
|
|
|
SevDebug, id, "complex", "Worker's fitness is NeverAssign", worker_details, fitness, dcIds);
|
2021-03-19 03:18:03 +08:00
|
|
|
continue;
|
2017-05-27 05:20:11 +08:00
|
|
|
}
|
2021-03-19 03:18:03 +08:00
|
|
|
if (!dcIds.empty() && dcIds.count(worker_details.interf.locality.dcId()) == 0) {
|
2021-05-18 10:15:33 +08:00
|
|
|
logWorkerUnavailable(
|
|
|
|
SevDebug, id, "complex", "Worker is not in the target DC", worker_details, fitness, dcIds);
|
2021-05-14 03:20:31 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!allowDegraded && worker_details.degraded) {
|
|
|
|
logWorkerUnavailable(
|
2021-05-18 10:15:33 +08:00
|
|
|
SevInfo, id, "complex", "Worker is degraded and not allowed", worker_details, fitness, dcIds);
|
2021-03-19 03:18:03 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-04-27 01:09:44 +08:00
|
|
|
fitness_workers[std::make_tuple(
|
|
|
|
fitness, id_used[worker_process_id], isLongLivedStateless(worker_process_id))]
|
2021-03-30 03:31:26 +08:00
|
|
|
.push_back(worker_details);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-04-27 01:09:44 +08:00
|
|
|
auto requiredFitness = ProcessClass::NeverAssign;
|
|
|
|
int requiredUsed = 1e6;
|
2021-04-21 15:22:33 +08:00
|
|
|
|
|
|
|
typedef Optional<Standalone<StringRef>> Field;
|
|
|
|
typedef Optional<Standalone<StringRef>> Zone;
|
|
|
|
std::map<Field, std::pair<std::set<Zone>, std::vector<WorkerDetails>>> field_zones;
|
|
|
|
std::set<Field> fieldsWithMin;
|
|
|
|
std::map<Field, int> field_count;
|
|
|
|
std::map<Field, std::tuple<ProcessClass::Fitness, int, bool>> field_fitness;
|
|
|
|
|
|
|
|
// Determine the best required workers by finding the workers with enough unique zoneIds per field
|
2021-03-27 05:06:59 +08:00
|
|
|
for (auto workerIter = fitness_workers.begin(); workerIter != fitness_workers.end(); ++workerIter) {
|
2021-04-21 15:22:33 +08:00
|
|
|
deterministicRandom()->randomShuffle(workerIter->second);
|
2021-03-27 05:06:59 +08:00
|
|
|
auto fitness = std::get<0>(workerIter->first);
|
|
|
|
auto used = std::get<1>(workerIter->first);
|
2021-04-27 01:09:44 +08:00
|
|
|
|
|
|
|
if (fitness > requiredFitness || (fitness == requiredFitness && used > requiredUsed)) {
|
2021-03-27 05:06:59 +08:00
|
|
|
break;
|
|
|
|
}
|
2020-01-08 11:53:09 +08:00
|
|
|
|
2021-03-27 05:06:59 +08:00
|
|
|
for (auto& worker : workerIter->second) {
|
2021-04-21 15:22:33 +08:00
|
|
|
auto thisField = worker.interf.locality.get(field);
|
|
|
|
auto& zones = field_zones[thisField];
|
|
|
|
if (zones.first.insert(worker.interf.locality.zoneId()).second) {
|
|
|
|
zones.second.push_back(worker);
|
|
|
|
if (zones.first.size() == minPerField) {
|
|
|
|
fieldsWithMin.insert(thisField);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
field_count[thisField]++;
|
2021-04-27 01:16:18 +08:00
|
|
|
field_fitness.insert(
|
|
|
|
{ thisField,
|
|
|
|
std::make_tuple(fitness, used, worker.interf.locality.dcId() == clusterControllerDcId) });
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
|
|
|
if (fieldsWithMin.size() >= minFields) {
|
2021-04-27 01:16:18 +08:00
|
|
|
requiredFitness = fitness;
|
|
|
|
requiredUsed = used;
|
2021-03-27 05:06:59 +08:00
|
|
|
}
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
2021-03-19 03:18:03 +08:00
|
|
|
|
2021-04-21 15:22:33 +08:00
|
|
|
if (fieldsWithMin.size() < minFields) {
|
|
|
|
throw no_more_servers();
|
|
|
|
}
|
2021-03-27 05:06:59 +08:00
|
|
|
|
2021-04-27 01:09:44 +08:00
|
|
|
std::set<Field> chosenFields;
|
2021-04-21 15:22:33 +08:00
|
|
|
// If we cannot use all of the fields, use the fields which allow the best workers to be chosen
|
|
|
|
if (fieldsWithMin.size() * minPerField > desired) {
|
|
|
|
std::vector<std::tuple<ProcessClass::Fitness, int, bool, int, Field>> orderedFields;
|
|
|
|
for (auto& it : fieldsWithMin) {
|
|
|
|
auto& fitness = field_fitness[it];
|
2021-05-11 07:32:02 +08:00
|
|
|
orderedFields.emplace_back(
|
|
|
|
std::get<0>(fitness), std::get<1>(fitness), std::get<2>(fitness), field_count[it], it);
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
|
|
|
std::sort(orderedFields.begin(), orderedFields.end());
|
|
|
|
int totalFields = desired / minPerField;
|
|
|
|
int maxCount = 0;
|
2021-04-27 01:09:44 +08:00
|
|
|
for (int i = 0; i < orderedFields.size() && chosenFields.size() < totalFields; i++) {
|
|
|
|
if (chosenFields.size() == totalFields - 1 && maxCount + std::get<3>(orderedFields[i]) < desired) {
|
2021-04-21 15:22:33 +08:00
|
|
|
for (int j = i + 1; j < orderedFields.size(); j++) {
|
|
|
|
if (maxCount + std::get<3>(orderedFields[j]) >= desired) {
|
2021-04-27 01:09:44 +08:00
|
|
|
chosenFields.insert(std::get<4>(orderedFields[j]));
|
2021-04-21 15:22:33 +08:00
|
|
|
break;
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
2021-04-27 01:09:44 +08:00
|
|
|
if (chosenFields.size() < totalFields) {
|
|
|
|
maxCount += std::get<3>(orderedFields[i]);
|
|
|
|
chosenFields.insert(std::get<4>(orderedFields[i]));
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
|
|
|
}
|
2021-04-27 01:09:44 +08:00
|
|
|
} else {
|
|
|
|
chosenFields = fieldsWithMin;
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a result set with fulfills the minField and minPerField requirements before adding more workers
|
|
|
|
std::set<WorkerDetails> resultSet;
|
2021-04-27 01:09:44 +08:00
|
|
|
for (auto& it : chosenFields) {
|
2021-04-21 15:22:33 +08:00
|
|
|
auto& w = field_zones[it].second;
|
|
|
|
for (int i = 0; i < minPerField; i++) {
|
|
|
|
resultSet.insert(w[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Continue adding workers to the result set until we reach the desired number of workers
|
2021-04-27 01:09:44 +08:00
|
|
|
for (auto workerIter = fitness_workers.begin();
|
|
|
|
workerIter != fitness_workers.end() && resultSet.size() < desired;
|
|
|
|
++workerIter) {
|
|
|
|
auto fitness = std::get<0>(workerIter->first);
|
|
|
|
auto used = std::get<1>(workerIter->first);
|
|
|
|
|
|
|
|
if (fitness > requiredFitness || (fitness == requiredFitness && used > requiredUsed)) {
|
2021-04-21 15:22:33 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (workerIter->second.size() + resultSet.size() <= desired) {
|
|
|
|
for (auto& worker : workerIter->second) {
|
2021-04-27 01:09:44 +08:00
|
|
|
if (chosenFields.count(worker.interf.locality.get(field))) {
|
2021-04-21 15:22:33 +08:00
|
|
|
resultSet.insert(worker);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
addWorkersByLowestField(field, desired, workerIter->second, resultSet);
|
2021-03-19 03:18:03 +08:00
|
|
|
}
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
2021-03-19 03:18:03 +08:00
|
|
|
|
2021-04-21 15:22:33 +08:00
|
|
|
for (auto& result : resultSet) {
|
|
|
|
id_used[result.interf.locality.processId()]++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return std::vector<WorkerDetails>(resultSet.begin(), resultSet.end());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to recruit TLogs without degraded processes and see if it improves the configuration
|
|
|
|
std::vector<WorkerDetails> getWorkersForTlogsComplex(DatabaseConfiguration const& conf,
|
|
|
|
int32_t desired,
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int>& id_used,
|
|
|
|
StringRef field,
|
|
|
|
int minFields,
|
|
|
|
int minPerField,
|
|
|
|
bool checkStable,
|
2021-04-27 01:09:44 +08:00
|
|
|
const std::set<Optional<Key>>& dcIds,
|
|
|
|
const std::vector<UID>& exclusionWorkerIds) {
|
|
|
|
desired = std::max(desired, minFields * minPerField);
|
2021-04-21 15:22:33 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> withDegradedUsed = id_used;
|
|
|
|
auto withDegraded = getWorkersForTlogsComplex(conf,
|
|
|
|
desired,
|
|
|
|
withDegradedUsed,
|
|
|
|
field,
|
|
|
|
minFields,
|
|
|
|
minPerField,
|
|
|
|
true,
|
|
|
|
checkStable,
|
|
|
|
dcIds,
|
|
|
|
exclusionWorkerIds);
|
|
|
|
RoleFitness withDegradedFitness(withDegraded, ProcessClass::TLog, withDegradedUsed);
|
2021-04-27 01:09:44 +08:00
|
|
|
ASSERT(withDegraded.size() <= desired);
|
2021-04-21 15:22:33 +08:00
|
|
|
|
|
|
|
bool usedDegraded = false;
|
|
|
|
for (auto& it : withDegraded) {
|
|
|
|
if (it.degraded) {
|
|
|
|
usedDegraded = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!usedDegraded) {
|
|
|
|
id_used = withDegradedUsed;
|
|
|
|
return withDegraded;
|
|
|
|
}
|
|
|
|
|
|
|
|
try {
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int> withoutDegradedUsed = id_used;
|
|
|
|
auto withoutDegraded = getWorkersForTlogsComplex(conf,
|
|
|
|
desired,
|
|
|
|
withoutDegradedUsed,
|
|
|
|
field,
|
|
|
|
minFields,
|
|
|
|
minPerField,
|
|
|
|
false,
|
|
|
|
checkStable,
|
|
|
|
dcIds,
|
|
|
|
exclusionWorkerIds);
|
|
|
|
RoleFitness withoutDegradedFitness(withoutDegraded, ProcessClass::TLog, withoutDegradedUsed);
|
2021-04-27 01:16:18 +08:00
|
|
|
ASSERT(withoutDegraded.size() <= desired);
|
2021-04-21 15:22:33 +08:00
|
|
|
|
|
|
|
if (withDegradedFitness < withoutDegradedFitness) {
|
|
|
|
id_used = withDegradedUsed;
|
|
|
|
return withDegraded;
|
|
|
|
}
|
|
|
|
id_used = withoutDegradedUsed;
|
|
|
|
return withoutDegraded;
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() != error_code_no_more_servers) {
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
id_used = withDegradedUsed;
|
|
|
|
return withDegraded;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// A TLog recruitment method specialized for single, double, and triple configurations
|
|
|
|
// It recruits processes from with unique zoneIds until it reaches the desired amount
|
|
|
|
std::vector<WorkerDetails> getWorkersForTlogsSimple(DatabaseConfiguration const& conf,
|
|
|
|
int32_t required,
|
|
|
|
int32_t desired,
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int>& id_used,
|
|
|
|
bool checkStable,
|
2021-04-27 01:09:44 +08:00
|
|
|
const std::set<Optional<Key>>& dcIds,
|
|
|
|
const std::vector<UID>& exclusionWorkerIds) {
|
2021-09-17 08:42:34 +08:00
|
|
|
std::map<std::tuple<ProcessClass::Fitness, int, bool, bool, bool>, std::vector<WorkerDetails>> fitness_workers;
|
2021-04-21 15:22:33 +08:00
|
|
|
|
|
|
|
// Go through all the workers to list all the workers that can be recruited.
|
|
|
|
for (const auto& [worker_process_id, worker_info] : id_worker) {
|
|
|
|
const auto& worker_details = worker_info.details;
|
|
|
|
auto fitness = worker_details.processClass.machineClassFitness(ProcessClass::TLog);
|
2021-05-18 10:15:33 +08:00
|
|
|
|
2021-04-21 15:22:33 +08:00
|
|
|
if (std::find(exclusionWorkerIds.begin(), exclusionWorkerIds.end(), worker_details.interf.id()) !=
|
2021-05-14 03:20:31 +08:00
|
|
|
exclusionWorkerIds.end()) {
|
2021-05-18 10:15:33 +08:00
|
|
|
logWorkerUnavailable(SevInfo, id, "simple", "Worker is excluded", worker_details, fitness, dcIds);
|
2021-05-14 03:20:31 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!workerAvailable(worker_info, checkStable)) {
|
2021-05-18 10:15:33 +08:00
|
|
|
logWorkerUnavailable(SevInfo, id, "simple", "Worker is not available", worker_details, fitness, dcIds);
|
2021-05-14 03:20:31 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (conf.isExcludedServer(worker_details.interf.addresses())) {
|
2021-05-18 10:15:33 +08:00
|
|
|
logWorkerUnavailable(SevInfo,
|
|
|
|
id,
|
|
|
|
"simple",
|
|
|
|
"Worker server is excluded from the cluster",
|
|
|
|
worker_details,
|
|
|
|
fitness,
|
|
|
|
dcIds);
|
2021-05-14 03:20:31 +08:00
|
|
|
continue;
|
|
|
|
}
|
2021-06-29 12:45:02 +08:00
|
|
|
if (isExcludedDegradedServer(worker_details.interf.addresses())) {
|
|
|
|
logWorkerUnavailable(SevInfo,
|
|
|
|
id,
|
|
|
|
"simple",
|
|
|
|
"Worker server is excluded from the cluster due to degradation",
|
|
|
|
worker_details,
|
|
|
|
fitness,
|
|
|
|
dcIds);
|
|
|
|
continue;
|
|
|
|
}
|
2021-05-14 03:20:31 +08:00
|
|
|
if (fitness == ProcessClass::NeverAssign) {
|
2021-05-18 10:15:33 +08:00
|
|
|
logWorkerUnavailable(
|
|
|
|
SevDebug, id, "complex", "Worker's fitness is NeverAssign", worker_details, fitness, dcIds);
|
2021-05-14 03:20:31 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!dcIds.empty() && dcIds.count(worker_details.interf.locality.dcId()) == 0) {
|
2021-05-18 10:15:33 +08:00
|
|
|
logWorkerUnavailable(
|
|
|
|
SevDebug, id, "simple", "Worker is not in the target DC", worker_details, fitness, dcIds);
|
2021-03-19 03:18:03 +08:00
|
|
|
continue;
|
2017-05-27 05:20:11 +08:00
|
|
|
}
|
2021-03-19 03:18:03 +08:00
|
|
|
|
2021-04-21 15:22:33 +08:00
|
|
|
// This worker is a candidate for TLog recruitment.
|
|
|
|
bool inCCDC = worker_details.interf.locality.dcId() == clusterControllerDcId;
|
|
|
|
// Prefer recruiting a TransactionClass non-degraded process over a LogClass degraded process
|
|
|
|
if (worker_details.degraded) {
|
|
|
|
fitness = std::max(fitness, ProcessClass::GoodFit);
|
|
|
|
}
|
|
|
|
|
2021-04-27 01:09:44 +08:00
|
|
|
fitness_workers[std::make_tuple(fitness,
|
|
|
|
id_used[worker_process_id],
|
|
|
|
worker_details.degraded,
|
2021-12-06 12:26:13 +08:00
|
|
|
isLongLivedStateless(worker_process_id),
|
|
|
|
inCCDC)]
|
2021-04-21 15:22:33 +08:00
|
|
|
.push_back(worker_details);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto requiredFitness = ProcessClass::BestFit;
|
|
|
|
int requiredUsed = 0;
|
|
|
|
|
|
|
|
std::set<Optional<Standalone<StringRef>>> zones;
|
|
|
|
std::set<WorkerDetails> resultSet;
|
|
|
|
|
|
|
|
// Determine the best required workers by finding the workers with enough unique zoneIds
|
|
|
|
for (auto workerIter = fitness_workers.begin(); workerIter != fitness_workers.end(); ++workerIter) {
|
|
|
|
auto fitness = std::get<0>(workerIter->first);
|
|
|
|
auto used = std::get<1>(workerIter->first);
|
|
|
|
deterministicRandom()->randomShuffle(workerIter->second);
|
|
|
|
for (auto& worker : workerIter->second) {
|
|
|
|
if (!zones.count(worker.interf.locality.zoneId())) {
|
|
|
|
zones.insert(worker.interf.locality.zoneId());
|
|
|
|
resultSet.insert(worker);
|
2021-04-27 01:16:18 +08:00
|
|
|
if (resultSet.size() == required) {
|
2021-04-21 15:22:33 +08:00
|
|
|
break;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
|
|
|
}
|
2021-04-27 01:16:18 +08:00
|
|
|
if (resultSet.size() == required) {
|
2021-04-21 15:22:33 +08:00
|
|
|
requiredFitness = fitness;
|
|
|
|
requiredUsed = used;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-29 07:38:04 +08:00
|
|
|
if (resultSet.size() < required) {
|
|
|
|
throw no_more_servers();
|
|
|
|
}
|
|
|
|
|
2021-04-21 15:22:33 +08:00
|
|
|
// Continue adding workers to the result set until we reach the desired number of workers
|
2021-04-27 01:09:44 +08:00
|
|
|
for (auto workerIter = fitness_workers.begin();
|
|
|
|
workerIter != fitness_workers.end() && resultSet.size() < desired;
|
|
|
|
++workerIter) {
|
2021-04-21 15:22:33 +08:00
|
|
|
auto fitness = std::get<0>(workerIter->first);
|
|
|
|
auto used = std::get<1>(workerIter->first);
|
|
|
|
if (fitness > requiredFitness || (fitness == requiredFitness && used > requiredUsed)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (workerIter->second.size() + resultSet.size() <= desired) {
|
|
|
|
for (auto& worker : workerIter->second) {
|
|
|
|
resultSet.insert(worker);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
addWorkersByLowestZone(desired, workerIter->second, resultSet);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-29 07:40:30 +08:00
|
|
|
ASSERT(resultSet.size() >= required && resultSet.size() <= desired);
|
2021-04-27 01:09:44 +08:00
|
|
|
|
2021-04-21 15:22:33 +08:00
|
|
|
for (auto& result : resultSet) {
|
|
|
|
id_used[result.interf.locality.processId()]++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return std::vector<WorkerDetails>(resultSet.begin(), resultSet.end());
|
|
|
|
}
|
|
|
|
|
|
|
|
// A backup method for TLog recruitment that is used for custom policies, but does a worse job
|
|
|
|
// selecting the best workers.
|
|
|
|
// conf: the database configuration.
|
|
|
|
// required: the required number of TLog workers to select.
|
|
|
|
// desired: the desired number of TLog workers to select.
|
|
|
|
// policy: the TLog replication policy the selection needs to satisfy.
|
|
|
|
// id_used: keep track of process IDs of selected workers.
|
|
|
|
// checkStable: when true, only select from workers that are considered as stable worker (not rebooted more than
|
|
|
|
// twice recently).
|
|
|
|
// dcIds: the target data centers the workers are in. The selected workers must all be from these
|
|
|
|
// data centers:
|
|
|
|
// exclusionWorkerIds: the workers to be excluded from the selection.
|
2021-04-27 01:16:18 +08:00
|
|
|
std::vector<WorkerDetails> getWorkersForTlogsBackup(
|
|
|
|
DatabaseConfiguration const& conf,
|
|
|
|
int32_t required,
|
|
|
|
int32_t desired,
|
|
|
|
Reference<IReplicationPolicy> const& policy,
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int>& id_used,
|
|
|
|
bool checkStable = false,
|
|
|
|
const std::set<Optional<Key>>& dcIds = std::set<Optional<Key>>(),
|
|
|
|
const std::vector<UID>& exclusionWorkerIds = {}) {
|
2021-09-17 08:42:34 +08:00
|
|
|
std::map<std::tuple<ProcessClass::Fitness, int, bool, bool>, std::vector<WorkerDetails>> fitness_workers;
|
2021-04-21 15:22:33 +08:00
|
|
|
std::vector<WorkerDetails> results;
|
|
|
|
Reference<LocalitySet> logServerSet = Reference<LocalitySet>(new LocalityMap<WorkerDetails>());
|
|
|
|
LocalityMap<WorkerDetails>* logServerMap = (LocalityMap<WorkerDetails>*)logServerSet.getPtr();
|
|
|
|
bool bCompleted = false;
|
|
|
|
desired = std::max(required, desired);
|
|
|
|
|
|
|
|
// Go through all the workers to list all the workers that can be recruited.
|
|
|
|
for (const auto& [worker_process_id, worker_info] : id_worker) {
|
|
|
|
const auto& worker_details = worker_info.details;
|
|
|
|
auto fitness = worker_details.processClass.machineClassFitness(ProcessClass::TLog);
|
2021-05-18 10:15:33 +08:00
|
|
|
|
2021-04-21 15:22:33 +08:00
|
|
|
if (std::find(exclusionWorkerIds.begin(), exclusionWorkerIds.end(), worker_details.interf.id()) !=
|
2021-05-14 03:20:31 +08:00
|
|
|
exclusionWorkerIds.end()) {
|
2021-05-18 10:15:33 +08:00
|
|
|
logWorkerUnavailable(SevInfo, id, "deprecated", "Worker is excluded", worker_details, fitness, dcIds);
|
2021-05-14 03:20:31 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!workerAvailable(worker_info, checkStable)) {
|
2021-05-18 10:15:33 +08:00
|
|
|
logWorkerUnavailable(
|
|
|
|
SevInfo, id, "deprecated", "Worker is not available", worker_details, fitness, dcIds);
|
2021-05-14 03:20:31 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (conf.isExcludedServer(worker_details.interf.addresses())) {
|
2021-05-18 10:15:33 +08:00
|
|
|
logWorkerUnavailable(SevInfo,
|
|
|
|
id,
|
|
|
|
"deprecated",
|
|
|
|
"Worker server is excluded from the cluster",
|
|
|
|
worker_details,
|
|
|
|
fitness,
|
|
|
|
dcIds);
|
2021-05-14 03:20:31 +08:00
|
|
|
continue;
|
|
|
|
}
|
2021-06-29 12:45:02 +08:00
|
|
|
if (isExcludedDegradedServer(worker_details.interf.addresses())) {
|
|
|
|
logWorkerUnavailable(SevInfo,
|
|
|
|
id,
|
|
|
|
"deprecated",
|
|
|
|
"Worker server is excluded from the cluster due to degradation",
|
|
|
|
worker_details,
|
|
|
|
fitness,
|
|
|
|
dcIds);
|
|
|
|
continue;
|
|
|
|
}
|
2021-05-14 03:20:31 +08:00
|
|
|
if (fitness == ProcessClass::NeverAssign) {
|
|
|
|
logWorkerUnavailable(
|
2021-05-18 10:15:33 +08:00
|
|
|
SevDebug, id, "complex", "Worker's fitness is NeverAssign", worker_details, fitness, dcIds);
|
2021-05-14 03:20:31 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!dcIds.empty() && dcIds.count(worker_details.interf.locality.dcId()) == 0) {
|
|
|
|
logWorkerUnavailable(
|
2021-05-18 10:15:33 +08:00
|
|
|
SevDebug, id, "deprecated", "Worker is not in the target DC", worker_details, fitness, dcIds);
|
2021-03-19 03:18:03 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This worker is a candidate for TLog recruitment.
|
2021-03-30 03:31:26 +08:00
|
|
|
bool inCCDC = worker_details.interf.locality.dcId() == clusterControllerDcId;
|
2021-04-08 07:04:08 +08:00
|
|
|
// Prefer recruiting a TransactionClass non-degraded process over a LogClass degraded process
|
|
|
|
if (worker_details.degraded) {
|
|
|
|
fitness = std::max(fitness, ProcessClass::GoodFit);
|
|
|
|
}
|
2021-04-21 15:22:33 +08:00
|
|
|
|
2021-03-30 03:31:26 +08:00
|
|
|
fitness_workers[std::make_tuple(fitness, id_used[worker_process_id], worker_details.degraded, inCCDC)]
|
|
|
|
.push_back(worker_details);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-04-08 10:57:24 +08:00
|
|
|
auto requiredFitness = ProcessClass::BestFit;
|
|
|
|
int requiredUsed = 0;
|
|
|
|
bool requiredDegraded = false;
|
|
|
|
bool requiredInCCDC = false;
|
|
|
|
|
2021-04-08 11:40:42 +08:00
|
|
|
// Determine the minimum fitness and used necessary to fulfill the policy
|
2021-03-27 05:06:59 +08:00
|
|
|
for (auto workerIter = fitness_workers.begin(); workerIter != fitness_workers.end(); ++workerIter) {
|
|
|
|
auto fitness = std::get<0>(workerIter->first);
|
|
|
|
auto used = std::get<1>(workerIter->first);
|
2021-04-08 10:57:24 +08:00
|
|
|
if (fitness > requiredFitness || used > requiredUsed) {
|
|
|
|
if (logServerSet->size() >= required && logServerSet->validate(policy)) {
|
2021-03-27 05:06:59 +08:00
|
|
|
bCompleted = true;
|
|
|
|
break;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-04-08 12:31:14 +08:00
|
|
|
requiredFitness = fitness;
|
|
|
|
requiredUsed = used;
|
2021-03-27 05:06:59 +08:00
|
|
|
}
|
2020-01-08 11:53:09 +08:00
|
|
|
|
2021-04-08 10:57:24 +08:00
|
|
|
if (std::get<2>(workerIter->first)) {
|
|
|
|
requiredDegraded = true;
|
|
|
|
}
|
|
|
|
if (std::get<3>(workerIter->first)) {
|
|
|
|
requiredInCCDC = true;
|
|
|
|
}
|
2021-03-27 05:06:59 +08:00
|
|
|
for (auto& worker : workerIter->second) {
|
|
|
|
logServerMap->add(worker.interf.locality, &worker);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-08 10:57:24 +08:00
|
|
|
if (!bCompleted && !(logServerSet->size() >= required && logServerSet->validate(policy))) {
|
2018-04-09 12:24:05 +08:00
|
|
|
std::vector<LocalityData> tLocalities;
|
|
|
|
for (auto& object : logServerMap->getObjects()) {
|
2019-03-09 00:25:07 +08:00
|
|
|
tLocalities.push_back(object->interf.locality);
|
2018-04-09 12:24:05 +08:00
|
|
|
}
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
logServerSet->clear();
|
|
|
|
logServerSet.clear();
|
|
|
|
throw no_more_servers();
|
|
|
|
}
|
|
|
|
|
2021-04-08 11:40:42 +08:00
|
|
|
// If we have less than the desired amount, return all of the processes we have
|
|
|
|
if (logServerSet->size() <= desired) {
|
2021-04-08 10:57:24 +08:00
|
|
|
for (auto& object : logServerMap->getObjects()) {
|
|
|
|
results.push_back(*object);
|
|
|
|
}
|
|
|
|
for (auto& result : results) {
|
|
|
|
id_used[result.interf.locality.processId()]++;
|
|
|
|
}
|
|
|
|
return results;
|
|
|
|
}
|
|
|
|
|
2021-04-08 11:40:42 +08:00
|
|
|
// If we have added any degraded processes, try and remove them to see if we can still
|
|
|
|
// have the desired amount of processes
|
2021-04-08 10:57:24 +08:00
|
|
|
if (requiredDegraded) {
|
|
|
|
logServerMap->clear();
|
|
|
|
for (auto workerIter = fitness_workers.begin(); workerIter != fitness_workers.end(); ++workerIter) {
|
|
|
|
auto fitness = std::get<0>(workerIter->first);
|
|
|
|
auto used = std::get<1>(workerIter->first);
|
2021-04-08 12:31:14 +08:00
|
|
|
if (fitness > requiredFitness || (fitness == requiredFitness && used > requiredUsed)) {
|
|
|
|
break;
|
|
|
|
}
|
2021-04-08 10:57:24 +08:00
|
|
|
auto addingDegraded = std::get<2>(workerIter->first);
|
|
|
|
if (addingDegraded) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
for (auto& worker : workerIter->second) {
|
|
|
|
logServerMap->add(worker.interf.locality, &worker);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (logServerSet->size() >= desired && logServerSet->validate(policy)) {
|
|
|
|
requiredDegraded = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-08 11:40:42 +08:00
|
|
|
// If we have added any processes in the CC DC, try and remove them to see if we can still
|
|
|
|
// have the desired amount of processes
|
2021-04-08 10:57:24 +08:00
|
|
|
if (requiredInCCDC) {
|
|
|
|
logServerMap->clear();
|
|
|
|
for (auto workerIter = fitness_workers.begin(); workerIter != fitness_workers.end(); ++workerIter) {
|
|
|
|
auto fitness = std::get<0>(workerIter->first);
|
|
|
|
auto used = std::get<1>(workerIter->first);
|
2021-04-08 12:31:14 +08:00
|
|
|
if (fitness > requiredFitness || (fitness == requiredFitness && used > requiredUsed)) {
|
|
|
|
break;
|
|
|
|
}
|
2021-04-08 10:57:24 +08:00
|
|
|
auto addingDegraded = std::get<2>(workerIter->first);
|
|
|
|
auto inCCDC = std::get<3>(workerIter->first);
|
|
|
|
if (inCCDC || (!requiredDegraded && addingDegraded)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
for (auto& worker : workerIter->second) {
|
|
|
|
logServerMap->add(worker.interf.locality, &worker);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (logServerSet->size() >= desired && logServerSet->validate(policy)) {
|
|
|
|
requiredInCCDC = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
logServerMap->clear();
|
|
|
|
for (auto workerIter = fitness_workers.begin(); workerIter != fitness_workers.end(); ++workerIter) {
|
|
|
|
auto fitness = std::get<0>(workerIter->first);
|
|
|
|
auto used = std::get<1>(workerIter->first);
|
2021-04-08 12:31:14 +08:00
|
|
|
if (fitness > requiredFitness || (fitness == requiredFitness && used > requiredUsed)) {
|
|
|
|
break;
|
|
|
|
}
|
2021-04-08 10:57:24 +08:00
|
|
|
auto addingDegraded = std::get<2>(workerIter->first);
|
|
|
|
auto inCCDC = std::get<3>(workerIter->first);
|
|
|
|
if ((!requiredInCCDC && inCCDC) || (!requiredDegraded && addingDegraded)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
for (auto& worker : workerIter->second) {
|
|
|
|
logServerMap->add(worker.interf.locality, &worker);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (logServerSet->size() == desired) {
|
|
|
|
for (auto& object : logServerMap->getObjects()) {
|
|
|
|
results.push_back(*object);
|
|
|
|
}
|
|
|
|
for (auto& result : results) {
|
|
|
|
id_used[result.interf.locality.processId()]++;
|
|
|
|
}
|
|
|
|
return results;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<LocalityEntry> bestSet;
|
|
|
|
std::vector<LocalityData> tLocalities;
|
|
|
|
|
2021-04-08 11:40:42 +08:00
|
|
|
// We have more than the desired number of processes, so use the policy engine to
|
|
|
|
// pick a diverse subset of them
|
2021-04-08 10:57:24 +08:00
|
|
|
bCompleted = findBestPolicySet(bestSet,
|
|
|
|
logServerSet,
|
|
|
|
policy,
|
|
|
|
desired,
|
|
|
|
SERVER_KNOBS->POLICY_RATING_TESTS,
|
|
|
|
SERVER_KNOBS->POLICY_GENERATIONS);
|
|
|
|
ASSERT(bCompleted);
|
|
|
|
results.reserve(results.size() + bestSet.size());
|
|
|
|
for (auto& entry : bestSet) {
|
|
|
|
auto object = logServerMap->getObject(entry);
|
|
|
|
ASSERT(object);
|
|
|
|
results.push_back(*object);
|
|
|
|
tLocalities.push_back(object->interf.locality);
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
for (auto& result : results) {
|
2019-03-09 00:25:07 +08:00
|
|
|
id_used[result.interf.locality.processId()]++;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent("GetTLogTeamDone")
|
|
|
|
.detail("Policy", policy->info())
|
|
|
|
.detail("Results", results.size())
|
|
|
|
.detail("Processes", logServerSet->size())
|
|
|
|
.detail("Workers", id_worker.size())
|
|
|
|
.detail("Required", required)
|
|
|
|
.detail("Desired", desired)
|
2021-04-08 10:57:24 +08:00
|
|
|
.detail("Fitness", requiredFitness)
|
|
|
|
.detail("Used", requiredUsed)
|
|
|
|
.detail("AddingDegraded", requiredDegraded)
|
|
|
|
.detail("InCCDC", requiredInCCDC)
|
|
|
|
.detail("BestCount", bestSet.size())
|
|
|
|
.detail("BestZones", ::describeZones(tLocalities))
|
|
|
|
.detail("BestDataHalls", ::describeDataHalls(tLocalities));
|
2017-05-26 04:48:44 +08:00
|
|
|
return results;
|
|
|
|
}
|
|
|
|
|
2021-04-21 15:22:33 +08:00
|
|
|
// Selects the best method for TLog recruitment based on the specified policy
|
|
|
|
std::vector<WorkerDetails> getWorkersForTlogs(DatabaseConfiguration const& conf,
|
|
|
|
int32_t required,
|
|
|
|
int32_t desired,
|
|
|
|
Reference<IReplicationPolicy> const& policy,
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int>& id_used,
|
|
|
|
bool checkStable = false,
|
2021-04-27 01:16:18 +08:00
|
|
|
const std::set<Optional<Key>>& dcIds = std::set<Optional<Key>>(),
|
|
|
|
const std::vector<UID>& exclusionWorkerIds = {}) {
|
2021-04-21 15:22:33 +08:00
|
|
|
desired = std::max(required, desired);
|
|
|
|
bool useSimple = false;
|
|
|
|
if (policy->name() == "Across") {
|
|
|
|
PolicyAcross* pa1 = (PolicyAcross*)policy.getPtr();
|
|
|
|
Reference<IReplicationPolicy> embedded = pa1->embeddedPolicy();
|
|
|
|
if (embedded->name() == "Across") {
|
|
|
|
PolicyAcross* pa2 = (PolicyAcross*)embedded.getPtr();
|
|
|
|
if (pa2->attributeKey() == "zoneid" && pa2->embeddedPolicyName() == "One") {
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int> testUsed = id_used;
|
|
|
|
|
|
|
|
auto workers = getWorkersForTlogsComplex(conf,
|
|
|
|
desired,
|
|
|
|
id_used,
|
|
|
|
pa1->attributeKey(),
|
|
|
|
pa1->getCount(),
|
|
|
|
pa2->getCount(),
|
|
|
|
checkStable,
|
|
|
|
dcIds,
|
|
|
|
exclusionWorkerIds);
|
|
|
|
|
|
|
|
if (g_network->isSimulated()) {
|
2021-10-30 02:18:47 +08:00
|
|
|
try {
|
|
|
|
auto testWorkers = getWorkersForTlogsBackup(
|
|
|
|
conf, required, desired, policy, testUsed, checkStable, dcIds, exclusionWorkerIds);
|
|
|
|
RoleFitness testFitness(testWorkers, ProcessClass::TLog, testUsed);
|
|
|
|
RoleFitness fitness(workers, ProcessClass::TLog, id_used);
|
|
|
|
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int> field_count;
|
|
|
|
std::set<Optional<Standalone<StringRef>>> zones;
|
|
|
|
for (auto& worker : testWorkers) {
|
|
|
|
if (!zones.count(worker.interf.locality.zoneId())) {
|
|
|
|
field_count[worker.interf.locality.get(pa1->attributeKey())]++;
|
|
|
|
zones.insert(worker.interf.locality.zoneId());
|
|
|
|
}
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
2021-10-30 02:18:47 +08:00
|
|
|
// backup recruitment is not required to use degraded processes that have better fitness
|
|
|
|
// so we cannot compare degraded between the two methods
|
|
|
|
testFitness.degraded = fitness.degraded;
|
2021-04-21 15:22:33 +08:00
|
|
|
|
2021-10-30 02:18:47 +08:00
|
|
|
int minField = 100;
|
2021-04-21 15:22:33 +08:00
|
|
|
|
2021-10-30 02:18:47 +08:00
|
|
|
for (auto& f : field_count) {
|
|
|
|
minField = std::min(minField, f.second);
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
2021-10-30 02:18:47 +08:00
|
|
|
|
|
|
|
if (fitness > testFitness && minField > 1) {
|
|
|
|
for (auto& w : testWorkers) {
|
|
|
|
TraceEvent("TestTLogs").detail("Interf", w.interf.address());
|
|
|
|
}
|
|
|
|
for (auto& w : workers) {
|
|
|
|
TraceEvent("RealTLogs").detail("Interf", w.interf.address());
|
|
|
|
}
|
|
|
|
TraceEvent("FitnessCompare")
|
|
|
|
.detail("TestF", testFitness.toString())
|
|
|
|
.detail("RealF", fitness.toString());
|
|
|
|
ASSERT(false);
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
2021-10-30 02:18:47 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
ASSERT(false); // Simulation only validation should not throw errors
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return workers;
|
|
|
|
}
|
|
|
|
} else if (pa1->attributeKey() == "zoneid" && embedded->name() == "One") {
|
|
|
|
ASSERT(pa1->getCount() == required);
|
|
|
|
useSimple = true;
|
|
|
|
}
|
|
|
|
} else if (policy->name() == "One") {
|
|
|
|
useSimple = true;
|
|
|
|
}
|
|
|
|
if (useSimple) {
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int> testUsed = id_used;
|
|
|
|
|
|
|
|
auto workers =
|
|
|
|
getWorkersForTlogsSimple(conf, required, desired, id_used, checkStable, dcIds, exclusionWorkerIds);
|
|
|
|
|
|
|
|
if (g_network->isSimulated()) {
|
2021-10-30 02:18:47 +08:00
|
|
|
try {
|
|
|
|
auto testWorkers = getWorkersForTlogsBackup(
|
|
|
|
conf, required, desired, policy, testUsed, checkStable, dcIds, exclusionWorkerIds);
|
|
|
|
RoleFitness testFitness(testWorkers, ProcessClass::TLog, testUsed);
|
|
|
|
RoleFitness fitness(workers, ProcessClass::TLog, id_used);
|
|
|
|
// backup recruitment is not required to use degraded processes that have better fitness
|
|
|
|
// so we cannot compare degraded between the two methods
|
|
|
|
testFitness.degraded = fitness.degraded;
|
|
|
|
|
|
|
|
if (fitness > testFitness) {
|
|
|
|
for (auto& w : testWorkers) {
|
|
|
|
TraceEvent("TestTLogs").detail("Interf", w.interf.address());
|
|
|
|
}
|
|
|
|
for (auto& w : workers) {
|
|
|
|
TraceEvent("RealTLogs").detail("Interf", w.interf.address());
|
|
|
|
}
|
|
|
|
TraceEvent("FitnessCompare")
|
|
|
|
.detail("TestF", testFitness.toString())
|
|
|
|
.detail("RealF", fitness.toString());
|
|
|
|
ASSERT(false);
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
2021-10-30 02:18:47 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
ASSERT(false); // Simulation only validation should not throw errors
|
2021-04-21 15:22:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return workers;
|
|
|
|
}
|
2021-04-27 01:16:18 +08:00
|
|
|
TraceEvent(g_network->isSimulated() ? SevError : SevWarnAlways, "PolicyEngineNotOptimized");
|
2021-04-21 15:22:33 +08:00
|
|
|
return getWorkersForTlogsBackup(
|
|
|
|
conf, required, desired, policy, id_used, checkStable, dcIds, exclusionWorkerIds);
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
// FIXME: This logic will fallback unnecessarily when usable dcs > 1 because it does not check all combinations of
|
|
|
|
// potential satellite locations
|
|
|
|
std::vector<WorkerDetails> getWorkersForSatelliteLogs(const DatabaseConfiguration& conf,
|
|
|
|
const RegionInfo& region,
|
|
|
|
const RegionInfo& remoteRegion,
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int>& id_used,
|
|
|
|
bool& satelliteFallback,
|
|
|
|
bool checkStable = false) {
|
2018-06-29 14:15:32 +08:00
|
|
|
int startDC = 0;
|
|
|
|
loop {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (startDC > 0 && startDC >= region.satellites.size() + 1 -
|
|
|
|
(satelliteFallback ? region.satelliteTLogUsableDcsFallback
|
|
|
|
: region.satelliteTLogUsableDcs)) {
|
|
|
|
if (satelliteFallback || region.satelliteTLogUsableDcsFallback == 0) {
|
2018-06-29 14:15:32 +08:00
|
|
|
throw no_more_servers();
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!goodRecruitmentTime.isReady()) {
|
2018-06-29 14:15:32 +08:00
|
|
|
throw operation_failed();
|
|
|
|
}
|
|
|
|
satelliteFallback = true;
|
|
|
|
startDC = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
try {
|
2019-03-19 03:17:59 +08:00
|
|
|
bool remoteDCUsedAsSatellite = false;
|
2018-06-29 14:15:32 +08:00
|
|
|
std::set<Optional<Key>> satelliteDCs;
|
2019-10-15 09:30:15 +08:00
|
|
|
int32_t desiredSatelliteTLogs = 0;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int s = startDC;
|
|
|
|
s < std::min<int>(startDC + (satelliteFallback ? region.satelliteTLogUsableDcsFallback
|
|
|
|
: region.satelliteTLogUsableDcs),
|
|
|
|
region.satellites.size());
|
|
|
|
s++) {
|
2018-06-29 14:15:32 +08:00
|
|
|
satelliteDCs.insert(region.satellites[s].dcId);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (region.satellites[s].satelliteDesiredTLogCount == -1 || desiredSatelliteTLogs == -1) {
|
2019-10-15 09:30:15 +08:00
|
|
|
desiredSatelliteTLogs = -1;
|
|
|
|
} else {
|
|
|
|
desiredSatelliteTLogs += region.satellites[s].satelliteDesiredTLogCount;
|
|
|
|
}
|
2019-03-19 03:17:59 +08:00
|
|
|
if (region.satellites[s].dcId == remoteRegion.dcId) {
|
|
|
|
remoteDCUsedAsSatellite = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
std::vector<UID> exclusionWorkerIds;
|
2021-03-11 02:06:03 +08:00
|
|
|
// FIXME: If remote DC is used as satellite then this logic only ensures that required number of remote
|
|
|
|
// TLogs can be recruited. It does not balance the number of desired TLogs across the satellite and
|
|
|
|
// remote sides.
|
2019-03-19 03:17:59 +08:00
|
|
|
if (remoteDCUsedAsSatellite) {
|
2021-03-11 02:06:03 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> tmpIdUsed;
|
|
|
|
auto remoteLogs = getWorkersForTlogs(conf,
|
|
|
|
conf.getRemoteTLogReplicationFactor(),
|
|
|
|
conf.getRemoteTLogReplicationFactor(),
|
|
|
|
conf.getRemoteTLogPolicy(),
|
|
|
|
tmpIdUsed,
|
|
|
|
false,
|
|
|
|
{ remoteRegion.dcId },
|
|
|
|
{});
|
|
|
|
std::transform(remoteLogs.begin(),
|
|
|
|
remoteLogs.end(),
|
|
|
|
std::back_inserter(exclusionWorkerIds),
|
|
|
|
[](const WorkerDetails& in) { return in.interf.id(); });
|
|
|
|
}
|
|
|
|
if (satelliteFallback) {
|
|
|
|
return getWorkersForTlogs(conf,
|
|
|
|
region.satelliteTLogReplicationFactorFallback,
|
|
|
|
desiredSatelliteTLogs > 0 ? desiredSatelliteTLogs
|
|
|
|
: conf.getDesiredSatelliteLogs(region.dcId) *
|
|
|
|
region.satelliteTLogUsableDcsFallback /
|
|
|
|
region.satelliteTLogUsableDcs,
|
|
|
|
region.satelliteTLogPolicyFallback,
|
|
|
|
id_used,
|
|
|
|
checkStable,
|
|
|
|
satelliteDCs,
|
|
|
|
exclusionWorkerIds);
|
2018-06-29 14:15:32 +08:00
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
return getWorkersForTlogs(conf,
|
|
|
|
region.satelliteTLogReplicationFactor,
|
|
|
|
desiredSatelliteTLogs > 0 ? desiredSatelliteTLogs
|
|
|
|
: conf.getDesiredSatelliteLogs(region.dcId),
|
|
|
|
region.satelliteTLogPolicy,
|
|
|
|
id_used,
|
|
|
|
checkStable,
|
|
|
|
satelliteDCs,
|
|
|
|
exclusionWorkerIds);
|
|
|
|
}
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() != error_code_no_more_servers) {
|
2018-06-29 14:15:32 +08:00
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
startDC++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-23 09:00:16 +08:00
|
|
|
ProcessClass::Fitness getBestFitnessForRoleInDatacenter(ProcessClass::ClusterRole role) {
|
|
|
|
ProcessClass::Fitness bestFitness = ProcessClass::NeverAssign;
|
|
|
|
for (const auto& it : id_worker) {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (it.second.priorityInfo.isExcluded ||
|
|
|
|
it.second.details.interf.locality.dcId() != clusterControllerDcId) {
|
2019-03-23 09:00:16 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
bestFitness = std::min(bestFitness, it.second.details.processClass.machineClassFitness(role));
|
|
|
|
}
|
|
|
|
return bestFitness;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
WorkerFitnessInfo getWorkerForRoleInDatacenter(Optional<Standalone<StringRef>> const& dcId,
|
|
|
|
ProcessClass::ClusterRole role,
|
|
|
|
ProcessClass::Fitness unacceptableFitness,
|
|
|
|
DatabaseConfiguration const& conf,
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int>& id_used,
|
2021-04-27 10:26:25 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> preferredSharing = {},
|
2021-03-11 02:06:03 +08:00
|
|
|
bool checkStable = false) {
|
2021-09-17 08:42:34 +08:00
|
|
|
std::map<std::tuple<ProcessClass::Fitness, int, bool, int>, std::vector<WorkerDetails>> fitness_workers;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : id_worker) {
|
|
|
|
auto fitness = it.second.details.processClass.machineClassFitness(role);
|
2021-06-29 12:45:02 +08:00
|
|
|
if (conf.isExcludedServer(it.second.details.interf.addresses()) ||
|
|
|
|
isExcludedDegradedServer(it.second.details.interf.addresses())) {
|
2018-02-10 08:48:55 +08:00
|
|
|
fitness = std::max(fitness, ProcessClass::ExcludeFit);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (workerAvailable(it.second, checkStable) && fitness < unacceptableFitness &&
|
|
|
|
it.second.details.interf.locality.dcId() == dcId) {
|
2021-04-27 00:57:46 +08:00
|
|
|
auto sharing = preferredSharing.find(it.first);
|
2021-04-09 05:29:12 +08:00
|
|
|
fitness_workers[std::make_tuple(fitness,
|
|
|
|
id_used[it.first],
|
2021-12-06 12:26:13 +08:00
|
|
|
isLongLivedStateless(it.first),
|
|
|
|
sharing != preferredSharing.end() ? sharing->second : 1e6)]
|
2021-04-09 05:29:12 +08:00
|
|
|
.push_back(it.second.details);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-09 05:29:12 +08:00
|
|
|
if (fitness_workers.size()) {
|
|
|
|
auto worker = deterministicRandom()->randomChoice(fitness_workers.begin()->second);
|
|
|
|
id_used[worker.interf.locality.processId()]++;
|
|
|
|
return WorkerFitnessInfo(worker,
|
|
|
|
std::max(ProcessClass::GoodFit, std::get<0>(fitness_workers.begin()->first)),
|
|
|
|
std::get<1>(fitness_workers.begin()->first));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
throw no_more_servers();
|
|
|
|
}
|
|
|
|
|
2021-09-17 08:42:34 +08:00
|
|
|
std::vector<WorkerDetails> getWorkersForRoleInDatacenter(
|
2021-03-11 02:06:03 +08:00
|
|
|
Optional<Standalone<StringRef>> const& dcId,
|
|
|
|
ProcessClass::ClusterRole role,
|
|
|
|
int amount,
|
|
|
|
DatabaseConfiguration const& conf,
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int>& id_used,
|
2021-04-27 10:26:25 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> preferredSharing = {},
|
2021-03-11 02:06:03 +08:00
|
|
|
Optional<WorkerFitnessInfo> minWorker = Optional<WorkerFitnessInfo>(),
|
|
|
|
bool checkStable = false) {
|
2021-09-17 08:42:34 +08:00
|
|
|
std::map<std::tuple<ProcessClass::Fitness, int, bool, int>, std::vector<WorkerDetails>> fitness_workers;
|
|
|
|
std::vector<WorkerDetails> results;
|
2020-08-06 15:01:57 +08:00
|
|
|
if (minWorker.present()) {
|
|
|
|
results.push_back(minWorker.get().worker);
|
2020-03-17 02:50:17 +08:00
|
|
|
}
|
|
|
|
if (amount <= results.size()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
return results;
|
2020-03-17 02:50:17 +08:00
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2020-08-06 15:01:57 +08:00
|
|
|
for (auto& it : id_worker) {
|
2021-03-11 02:06:03 +08:00
|
|
|
auto fitness = it.second.details.processClass.machineClassFitness(role);
|
2020-08-06 15:01:57 +08:00
|
|
|
if (workerAvailable(it.second, checkStable) &&
|
|
|
|
!conf.isExcludedServer(it.second.details.interf.addresses()) &&
|
2021-06-29 12:45:02 +08:00
|
|
|
!isExcludedDegradedServer(it.second.details.interf.addresses()) &&
|
2020-08-06 15:01:57 +08:00
|
|
|
it.second.details.interf.locality.dcId() == dcId &&
|
|
|
|
(!minWorker.present() ||
|
|
|
|
(it.second.details.interf.id() != minWorker.get().worker.interf.id() &&
|
|
|
|
(fitness < minWorker.get().fitness ||
|
|
|
|
(fitness == minWorker.get().fitness && id_used[it.first] <= minWorker.get().used))))) {
|
2021-04-27 00:57:46 +08:00
|
|
|
auto sharing = preferredSharing.find(it.first);
|
|
|
|
fitness_workers[std::make_tuple(fitness,
|
|
|
|
id_used[it.first],
|
2021-12-06 12:26:13 +08:00
|
|
|
isLongLivedStateless(it.first),
|
|
|
|
sharing != preferredSharing.end() ? sharing->second : 1e6)]
|
2021-04-27 00:57:46 +08:00
|
|
|
.push_back(it.second.details);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : fitness_workers) {
|
2021-04-27 00:57:46 +08:00
|
|
|
deterministicRandom()->randomShuffle(it.second);
|
|
|
|
for (int i = 0; i < it.second.size(); i++) {
|
|
|
|
results.push_back(it.second[i]);
|
|
|
|
id_used[it.second[i].interf.locality.processId()]++;
|
|
|
|
if (results.size() == amount)
|
|
|
|
return results;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return results;
|
|
|
|
}
|
|
|
|
|
2021-04-08 07:04:08 +08:00
|
|
|
// Allows the comparison of two different recruitments to determine which one is better
|
|
|
|
// Tlog recruitment is different from all the other roles, in that it avoids degraded processes
|
|
|
|
// And tried to avoid recruitment in the same DC as the cluster controller
|
2018-02-10 08:48:55 +08:00
|
|
|
struct RoleFitness {
|
|
|
|
ProcessClass::Fitness bestFit;
|
|
|
|
ProcessClass::Fitness worstFit;
|
2019-01-11 02:28:32 +08:00
|
|
|
ProcessClass::ClusterRole role;
|
2018-02-10 08:48:55 +08:00
|
|
|
int count;
|
2021-04-08 10:57:24 +08:00
|
|
|
int worstUsed = 1;
|
2021-04-27 01:01:54 +08:00
|
|
|
bool degraded = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
RoleFitness(int bestFit, int worstFit, int count, ProcessClass::ClusterRole role)
|
2021-07-25 02:20:51 +08:00
|
|
|
: bestFit((ProcessClass::Fitness)bestFit), worstFit((ProcessClass::Fitness)worstFit), role(role),
|
|
|
|
count(count) {}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
RoleFitness(int fitness, int count, ProcessClass::ClusterRole role)
|
2021-07-25 02:20:51 +08:00
|
|
|
: bestFit((ProcessClass::Fitness)fitness), worstFit((ProcessClass::Fitness)fitness), role(role),
|
|
|
|
count(count) {}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
RoleFitness()
|
|
|
|
: bestFit(ProcessClass::NeverAssign), worstFit(ProcessClass::NeverAssign), role(ProcessClass::NoRole),
|
2021-04-08 10:57:24 +08:00
|
|
|
count(0) {}
|
2021-04-08 07:04:08 +08:00
|
|
|
|
2021-09-17 08:42:34 +08:00
|
|
|
RoleFitness(const std::vector<WorkerDetails>& workers,
|
2021-04-08 07:04:08 +08:00
|
|
|
ProcessClass::ClusterRole role,
|
2021-04-21 15:22:33 +08:00
|
|
|
const std::map<Optional<Standalone<StringRef>>, int>& id_used)
|
2021-04-08 07:04:08 +08:00
|
|
|
: role(role) {
|
2021-04-08 10:57:24 +08:00
|
|
|
// Every recruitment will attempt to recruit the preferred amount through GoodFit,
|
|
|
|
// So a recruitment which only has BestFit is not better than one that has a GoodFit process
|
2019-10-15 09:32:17 +08:00
|
|
|
worstFit = ProcessClass::GoodFit;
|
2021-04-27 01:01:54 +08:00
|
|
|
degraded = false;
|
2018-01-05 03:33:02 +08:00
|
|
|
bestFit = ProcessClass::NeverAssign;
|
2021-04-08 07:04:08 +08:00
|
|
|
worstUsed = 1;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : workers) {
|
|
|
|
auto thisFit = it.processClass.machineClassFitness(role);
|
2021-04-08 07:04:08 +08:00
|
|
|
auto thisUsed = id_used.find(it.interf.locality.processId());
|
2021-04-21 15:22:33 +08:00
|
|
|
|
2021-04-08 07:04:08 +08:00
|
|
|
if (thisUsed == id_used.end()) {
|
|
|
|
TraceEvent(SevError, "UsedNotFound").detail("ProcessId", it.interf.locality.processId().get());
|
|
|
|
ASSERT(false);
|
|
|
|
}
|
|
|
|
if (thisUsed->second == 0) {
|
|
|
|
TraceEvent(SevError, "UsedIsZero").detail("ProcessId", it.interf.locality.processId().get());
|
|
|
|
ASSERT(false);
|
|
|
|
}
|
2021-04-21 15:22:33 +08:00
|
|
|
|
|
|
|
bestFit = std::min(bestFit, thisFit);
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (thisFit > worstFit) {
|
2019-03-09 03:40:00 +08:00
|
|
|
worstFit = thisFit;
|
2021-04-21 15:22:33 +08:00
|
|
|
worstUsed = thisUsed->second;
|
2021-03-11 02:06:03 +08:00
|
|
|
} else if (thisFit == worstFit) {
|
2021-04-27 01:01:54 +08:00
|
|
|
worstUsed = std::max(worstUsed, thisUsed->second);
|
2019-03-09 03:40:00 +08:00
|
|
|
}
|
2021-04-27 01:01:54 +08:00
|
|
|
degraded = degraded || it.degraded;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-04-08 10:57:24 +08:00
|
|
|
|
2018-02-10 08:48:55 +08:00
|
|
|
count = workers.size();
|
2021-04-08 10:57:24 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
// degraded is only used for recruitment of tlogs
|
|
|
|
if (role != ProcessClass::TLog) {
|
2021-04-27 01:01:54 +08:00
|
|
|
degraded = false;
|
2019-03-22 02:23:49 +08:00
|
|
|
}
|
2018-02-10 08:48:55 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
bool operator<(RoleFitness const& r) const {
|
|
|
|
if (worstFit != r.worstFit)
|
|
|
|
return worstFit < r.worstFit;
|
2021-04-08 07:04:08 +08:00
|
|
|
if (worstUsed != r.worstUsed)
|
|
|
|
return worstUsed < r.worstUsed;
|
|
|
|
if (count != r.count)
|
|
|
|
return count > r.count;
|
2021-04-27 01:01:54 +08:00
|
|
|
if (degraded != r.degraded)
|
|
|
|
return r.degraded;
|
2019-02-01 10:20:14 +08:00
|
|
|
// FIXME: TLog recruitment process does not guarantee the best fit is not worsened.
|
2021-03-11 02:06:03 +08:00
|
|
|
if (role != ProcessClass::TLog && role != ProcessClass::LogRouter && bestFit != r.bestFit)
|
|
|
|
return bestFit < r.bestFit;
|
2021-04-08 07:04:08 +08:00
|
|
|
return false;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2020-07-11 05:37:47 +08:00
|
|
|
bool operator>(RoleFitness const& r) const { return r < *this; }
|
|
|
|
bool operator<=(RoleFitness const& r) const { return !(*this > r); }
|
|
|
|
bool operator>=(RoleFitness const& r) const { return !(*this < r); }
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
bool betterCount(RoleFitness const& r) const {
|
|
|
|
if (count > r.count)
|
|
|
|
return true;
|
|
|
|
if (worstFit != r.worstFit)
|
|
|
|
return worstFit < r.worstFit;
|
2021-04-08 07:04:08 +08:00
|
|
|
if (worstUsed != r.worstUsed)
|
|
|
|
return worstUsed < r.worstUsed;
|
2021-04-27 01:01:54 +08:00
|
|
|
if (degraded != r.degraded)
|
|
|
|
return r.degraded;
|
2019-03-09 03:40:00 +08:00
|
|
|
return false;
|
2018-06-23 01:15:24 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
bool operator==(RoleFitness const& r) const {
|
2021-04-08 07:04:08 +08:00
|
|
|
return worstFit == r.worstFit && worstUsed == r.worstUsed && bestFit == r.bestFit && count == r.count &&
|
2021-04-27 01:01:54 +08:00
|
|
|
degraded == r.degraded;
|
2021-03-11 02:06:03 +08:00
|
|
|
}
|
2018-06-29 14:15:32 +08:00
|
|
|
|
2021-04-27 01:01:54 +08:00
|
|
|
std::string toString() const { return format("%d %d %d %d %d", worstFit, worstUsed, count, degraded, bestFit); }
|
2019-10-18 04:18:31 +08:00
|
|
|
};
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
std::set<Optional<Standalone<StringRef>>> getDatacenters(DatabaseConfiguration const& conf,
|
|
|
|
bool checkStable = false) {
|
2017-05-26 04:48:44 +08:00
|
|
|
std::set<Optional<Standalone<StringRef>>> result;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : id_worker)
|
2021-06-29 12:45:02 +08:00
|
|
|
if (workerAvailable(it.second, checkStable) &&
|
|
|
|
!conf.isExcludedServer(it.second.details.interf.addresses()) &&
|
|
|
|
!isExcludedDegradedServer(it.second.details.interf.addresses()))
|
2019-03-09 00:25:07 +08:00
|
|
|
result.insert(it.second.details.interf.locality.dcId());
|
2017-05-26 04:48:44 +08:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
void updateKnownIds(std::map<Optional<Standalone<StringRef>>, int>* id_used) {
|
2019-02-13 07:50:44 +08:00
|
|
|
(*id_used)[masterProcessId]++;
|
|
|
|
(*id_used)[clusterControllerProcessId]++;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
RecruitRemoteFromConfigurationReply findRemoteWorkersForConfiguration(
|
|
|
|
RecruitRemoteFromConfigurationRequest const& req) {
|
2017-09-12 08:40:46 +08:00
|
|
|
RecruitRemoteFromConfigurationReply result;
|
2021-03-11 02:06:03 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> id_used;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2019-02-13 07:50:44 +08:00
|
|
|
updateKnownIds(&id_used);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2017-09-12 08:40:46 +08:00
|
|
|
std::set<Optional<Key>> remoteDC;
|
|
|
|
remoteDC.insert(req.dcId);
|
2018-06-18 10:31:15 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
auto remoteLogs = getWorkersForTlogs(req.configuration,
|
|
|
|
req.configuration.getRemoteTLogReplicationFactor(),
|
|
|
|
req.configuration.getDesiredRemoteLogs(),
|
|
|
|
req.configuration.getRemoteTLogPolicy(),
|
|
|
|
id_used,
|
|
|
|
false,
|
|
|
|
remoteDC,
|
|
|
|
req.exclusionWorkerIds);
|
|
|
|
for (int i = 0; i < remoteLogs.size(); i++) {
|
2019-03-09 00:25:07 +08:00
|
|
|
result.remoteTLogs.push_back(remoteLogs[i].interf);
|
2017-09-12 08:40:46 +08:00
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
auto logRouters = getWorkersForRoleInDatacenter(
|
|
|
|
req.dcId, ProcessClass::LogRouter, req.logRouterCount, req.configuration, id_used);
|
|
|
|
for (int i = 0; i < logRouters.size(); i++) {
|
2019-03-09 00:25:07 +08:00
|
|
|
result.logRouters.push_back(logRouters[i].interf);
|
2017-09-12 08:40:46 +08:00
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!goodRemoteRecruitmentTime.isReady() &&
|
|
|
|
((RoleFitness(
|
|
|
|
SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredRemoteLogs(), ProcessClass::TLog)
|
2021-04-21 15:22:33 +08:00
|
|
|
.betterCount(RoleFitness(remoteLogs, ProcessClass::TLog, id_used))) ||
|
2021-03-11 02:06:03 +08:00
|
|
|
(RoleFitness(SERVER_KNOBS->EXPECTED_LOG_ROUTER_FITNESS, req.logRouterCount, ProcessClass::LogRouter)
|
2021-04-21 15:22:33 +08:00
|
|
|
.betterCount(RoleFitness(logRouters, ProcessClass::LogRouter, id_used))))) {
|
2017-09-12 08:40:46 +08:00
|
|
|
throw operation_failed();
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2017-09-12 08:40:46 +08:00
|
|
|
return result;
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-07-01 10:50:08 +08:00
|
|
|
// Given datacenter ID, returns the primary and remote regions.
|
2021-07-02 02:01:52 +08:00
|
|
|
std::pair<RegionInfo, RegionInfo> getPrimaryAndRemoteRegion(const std::vector<RegionInfo>& regions, Key dcId) {
|
2021-07-01 10:50:08 +08:00
|
|
|
RegionInfo region;
|
|
|
|
RegionInfo remoteRegion;
|
|
|
|
for (const auto& r : regions) {
|
|
|
|
if (r.dcId == dcId) {
|
|
|
|
region = r;
|
|
|
|
} else {
|
|
|
|
remoteRegion = r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return std::make_pair(region, remoteRegion);
|
|
|
|
}
|
|
|
|
|
2021-04-27 01:13:59 +08:00
|
|
|
ErrorOr<RecruitFromConfigurationReply> findWorkersForConfigurationFromDC(RecruitFromConfigurationRequest const& req,
|
2021-10-30 07:42:48 +08:00
|
|
|
Optional<Key> dcId,
|
|
|
|
bool checkGoodRecruitment) {
|
2017-09-12 08:40:46 +08:00
|
|
|
RecruitFromConfigurationReply result;
|
2021-03-11 02:06:03 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> id_used;
|
2019-02-13 07:50:44 +08:00
|
|
|
updateKnownIds(&id_used);
|
2017-10-06 08:09:44 +08:00
|
|
|
|
2018-03-06 11:27:46 +08:00
|
|
|
ASSERT(dcId.present());
|
2018-06-29 14:15:32 +08:00
|
|
|
|
2018-03-06 11:27:46 +08:00
|
|
|
std::set<Optional<Key>> primaryDC;
|
|
|
|
primaryDC.insert(dcId);
|
|
|
|
result.dcId = dcId;
|
2018-06-29 14:15:32 +08:00
|
|
|
|
2021-07-01 10:50:08 +08:00
|
|
|
auto [region, remoteRegion] = getPrimaryAndRemoteRegion(req.configuration.regions, dcId.get());
|
2018-06-29 14:15:32 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (req.recruitSeedServers) {
|
|
|
|
auto primaryStorageServers =
|
|
|
|
getWorkersForSeedServers(req.configuration, req.configuration.storagePolicy, dcId);
|
|
|
|
for (int i = 0; i < primaryStorageServers.size(); i++) {
|
2019-03-09 00:25:07 +08:00
|
|
|
result.storageServers.push_back(primaryStorageServers[i].interf);
|
2017-10-06 08:09:44 +08:00
|
|
|
}
|
|
|
|
}
|
2018-06-29 14:15:32 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
auto tlogs = getWorkersForTlogs(req.configuration,
|
|
|
|
req.configuration.tLogReplicationFactor,
|
|
|
|
req.configuration.getDesiredLogs(),
|
|
|
|
req.configuration.tLogPolicy,
|
|
|
|
id_used,
|
|
|
|
false,
|
|
|
|
primaryDC);
|
|
|
|
for (int i = 0; i < tlogs.size(); i++) {
|
2019-03-09 00:25:07 +08:00
|
|
|
result.tLogs.push_back(tlogs[i].interf);
|
2017-09-12 08:40:46 +08:00
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2019-03-09 00:25:07 +08:00
|
|
|
std::vector<WorkerDetails> satelliteLogs;
|
2020-08-06 15:01:57 +08:00
|
|
|
if (region.satelliteTLogReplicationFactor > 0 && req.configuration.usableRegions > 1) {
|
|
|
|
satelliteLogs =
|
|
|
|
getWorkersForSatelliteLogs(req.configuration, region, remoteRegion, id_used, result.satelliteFallback);
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int i = 0; i < satelliteLogs.size(); i++) {
|
2019-03-09 00:25:07 +08:00
|
|
|
result.satelliteTLogs.push_back(satelliteLogs[i].interf);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-27 00:57:46 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> preferredSharing;
|
2021-03-11 02:06:03 +08:00
|
|
|
auto first_commit_proxy = getWorkerForRoleInDatacenter(
|
2021-04-27 00:57:46 +08:00
|
|
|
dcId, ProcessClass::CommitProxy, ProcessClass::ExcludeFit, req.configuration, id_used, preferredSharing);
|
|
|
|
preferredSharing[first_commit_proxy.worker.interf.locality.processId()] = 0;
|
2021-03-11 02:06:03 +08:00
|
|
|
auto first_grv_proxy = getWorkerForRoleInDatacenter(
|
2021-04-27 00:57:46 +08:00
|
|
|
dcId, ProcessClass::GrvProxy, ProcessClass::ExcludeFit, req.configuration, id_used, preferredSharing);
|
|
|
|
preferredSharing[first_grv_proxy.worker.interf.locality.processId()] = 1;
|
2021-03-11 02:06:03 +08:00
|
|
|
auto first_resolver = getWorkerForRoleInDatacenter(
|
2021-04-27 00:57:46 +08:00
|
|
|
dcId, ProcessClass::Resolver, ProcessClass::ExcludeFit, req.configuration, id_used, preferredSharing);
|
|
|
|
preferredSharing[first_resolver.worker.interf.locality.processId()] = 2;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
2021-04-08 07:04:08 +08:00
|
|
|
// If one of the first process recruitments is forced to share a process, allow all of next recruitments
|
|
|
|
// to also share a process.
|
2021-04-08 11:06:30 +08:00
|
|
|
auto maxUsed = std::max({ first_commit_proxy.used, first_grv_proxy.used, first_resolver.used });
|
2021-04-08 07:04:08 +08:00
|
|
|
first_commit_proxy.used = maxUsed;
|
|
|
|
first_grv_proxy.used = maxUsed;
|
|
|
|
first_resolver.used = maxUsed;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
|
|
|
auto commit_proxies = getWorkersForRoleInDatacenter(dcId,
|
|
|
|
ProcessClass::CommitProxy,
|
|
|
|
req.configuration.getDesiredCommitProxies(),
|
|
|
|
req.configuration,
|
|
|
|
id_used,
|
2021-04-27 00:57:46 +08:00
|
|
|
preferredSharing,
|
2021-03-11 02:06:03 +08:00
|
|
|
first_commit_proxy);
|
|
|
|
auto grv_proxies = getWorkersForRoleInDatacenter(dcId,
|
|
|
|
ProcessClass::GrvProxy,
|
|
|
|
req.configuration.getDesiredGrvProxies(),
|
|
|
|
req.configuration,
|
|
|
|
id_used,
|
2021-04-27 00:57:46 +08:00
|
|
|
preferredSharing,
|
2021-03-11 02:06:03 +08:00
|
|
|
first_grv_proxy);
|
|
|
|
auto resolvers = getWorkersForRoleInDatacenter(dcId,
|
|
|
|
ProcessClass::Resolver,
|
|
|
|
req.configuration.getDesiredResolvers(),
|
|
|
|
req.configuration,
|
|
|
|
id_used,
|
2021-04-27 00:57:46 +08:00
|
|
|
preferredSharing,
|
2021-03-11 02:06:03 +08:00
|
|
|
first_resolver);
|
|
|
|
for (int i = 0; i < commit_proxies.size(); i++)
|
|
|
|
result.commitProxies.push_back(commit_proxies[i].interf);
|
|
|
|
for (int i = 0; i < grv_proxies.size(); i++)
|
|
|
|
result.grvProxies.push_back(grv_proxies[i].interf);
|
|
|
|
for (int i = 0; i < resolvers.size(); i++)
|
2020-07-29 22:26:55 +08:00
|
|
|
result.resolvers.push_back(resolvers[i].interf);
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (req.maxOldLogRouters > 0) {
|
|
|
|
if (tlogs.size() == 1) {
|
2020-01-03 07:05:44 +08:00
|
|
|
result.oldLogRouters.push_back(tlogs[0].interf);
|
2019-12-10 05:12:13 +08:00
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int i = 0; i < tlogs.size(); i++) {
|
|
|
|
if (tlogs[i].interf.locality.processId() != clusterControllerProcessId) {
|
2020-01-03 07:05:44 +08:00
|
|
|
result.oldLogRouters.push_back(tlogs[i].interf);
|
2019-12-10 05:12:13 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-03-30 06:12:38 +08:00
|
|
|
}
|
2018-03-07 08:31:21 +08:00
|
|
|
|
2020-02-05 02:09:16 +08:00
|
|
|
if (req.configuration.backupWorkerEnabled) {
|
2020-01-28 05:14:52 +08:00
|
|
|
const int nBackup = std::max<int>(
|
|
|
|
(req.configuration.desiredLogRouterCount > 0 ? req.configuration.desiredLogRouterCount : tlogs.size()),
|
|
|
|
req.maxOldLogRouters);
|
|
|
|
auto backupWorkers =
|
|
|
|
getWorkersForRoleInDatacenter(dcId, ProcessClass::Backup, nBackup, req.configuration, id_used);
|
2021-03-11 02:06:03 +08:00
|
|
|
std::transform(backupWorkers.begin(),
|
|
|
|
backupWorkers.end(),
|
|
|
|
std::back_inserter(result.backupWorkers),
|
2020-01-28 05:14:52 +08:00
|
|
|
[](const WorkerDetails& w) { return w.interf; });
|
|
|
|
}
|
2019-05-16 07:13:04 +08:00
|
|
|
|
2021-10-30 07:42:48 +08:00
|
|
|
if (!goodRecruitmentTime.isReady() && checkGoodRecruitment &&
|
2020-08-06 15:01:57 +08:00
|
|
|
(RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredLogs(), ProcessClass::TLog)
|
2021-04-21 15:22:33 +08:00
|
|
|
.betterCount(RoleFitness(tlogs, ProcessClass::TLog, id_used)) ||
|
2020-08-06 15:01:57 +08:00
|
|
|
(region.satelliteTLogReplicationFactor > 0 && req.configuration.usableRegions > 1 &&
|
2021-03-11 02:06:03 +08:00
|
|
|
RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS,
|
|
|
|
req.configuration.getDesiredSatelliteLogs(dcId),
|
2020-08-06 15:01:57 +08:00
|
|
|
ProcessClass::TLog)
|
2021-04-21 15:22:33 +08:00
|
|
|
.betterCount(RoleFitness(satelliteLogs, ProcessClass::TLog, id_used))) ||
|
2021-03-11 02:06:03 +08:00
|
|
|
RoleFitness(SERVER_KNOBS->EXPECTED_COMMIT_PROXY_FITNESS,
|
|
|
|
req.configuration.getDesiredCommitProxies(),
|
2020-09-11 08:44:15 +08:00
|
|
|
ProcessClass::CommitProxy)
|
2021-04-21 15:22:33 +08:00
|
|
|
.betterCount(RoleFitness(commit_proxies, ProcessClass::CommitProxy, id_used)) ||
|
2021-03-11 02:06:03 +08:00
|
|
|
RoleFitness(SERVER_KNOBS->EXPECTED_GRV_PROXY_FITNESS,
|
|
|
|
req.configuration.getDesiredGrvProxies(),
|
2020-08-06 15:01:57 +08:00
|
|
|
ProcessClass::GrvProxy)
|
2021-04-21 15:22:33 +08:00
|
|
|
.betterCount(RoleFitness(grv_proxies, ProcessClass::GrvProxy, id_used)) ||
|
2021-03-11 02:06:03 +08:00
|
|
|
RoleFitness(SERVER_KNOBS->EXPECTED_RESOLVER_FITNESS,
|
|
|
|
req.configuration.getDesiredResolvers(),
|
2020-08-06 15:01:57 +08:00
|
|
|
ProcessClass::Resolver)
|
2021-04-21 15:22:33 +08:00
|
|
|
.betterCount(RoleFitness(resolvers, ProcessClass::Resolver, id_used)))) {
|
2018-02-10 08:48:55 +08:00
|
|
|
return operation_failed();
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2021-10-30 07:42:48 +08:00
|
|
|
RecruitFromConfigurationReply findWorkersForConfigurationDispatch(RecruitFromConfigurationRequest const& req,
|
|
|
|
bool checkGoodRecruitment) {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (req.configuration.regions.size() > 1) {
|
2018-06-14 09:14:14 +08:00
|
|
|
std::vector<RegionInfo> regions = req.configuration.regions;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (regions[0].priority == regions[1].priority && regions[1].dcId == clusterControllerDcId.get()) {
|
2021-05-25 06:57:50 +08:00
|
|
|
TraceEvent("CCSwitchPrimaryDc", id)
|
2021-05-21 02:36:55 +08:00
|
|
|
.detail("CCDcId", clusterControllerDcId.get())
|
2021-05-25 06:57:50 +08:00
|
|
|
.detail("OldPrimaryDcId", regions[0].dcId)
|
|
|
|
.detail("NewPrimaryDcId", regions[1].dcId);
|
2018-06-14 09:14:14 +08:00
|
|
|
std::swap(regions[0], regions[1]);
|
|
|
|
}
|
2018-07-07 05:44:11 +08:00
|
|
|
|
2021-04-09 06:36:52 +08:00
|
|
|
if (regions[1].dcId == clusterControllerDcId.get() &&
|
2021-03-11 02:06:03 +08:00
|
|
|
(!versionDifferenceUpdated || datacenterVersionDifference >= SERVER_KNOBS->MAX_VERSION_DIFFERENCE)) {
|
2021-04-09 06:36:52 +08:00
|
|
|
if (regions[1].priority >= 0) {
|
2021-05-25 06:57:50 +08:00
|
|
|
TraceEvent("CCSwitchPrimaryDcVersionDifference", id)
|
2021-05-21 02:36:55 +08:00
|
|
|
.detail("CCDcId", clusterControllerDcId.get())
|
2021-05-25 06:57:50 +08:00
|
|
|
.detail("OldPrimaryDcId", regions[0].dcId)
|
|
|
|
.detail("NewPrimaryDcId", regions[1].dcId);
|
2021-04-09 06:36:52 +08:00
|
|
|
std::swap(regions[0], regions[1]);
|
|
|
|
} else {
|
2021-04-09 07:00:37 +08:00
|
|
|
TraceEvent(SevWarnAlways, "CCDcPriorityNegative")
|
2021-04-09 06:36:52 +08:00
|
|
|
.detail("DcId", regions[1].dcId)
|
2021-05-21 02:36:55 +08:00
|
|
|
.detail("Priority", regions[1].priority)
|
2021-05-25 06:57:50 +08:00
|
|
|
.detail("FindWorkersInDc", regions[0].dcId)
|
2021-05-21 02:36:55 +08:00
|
|
|
.detail("Warning", "Failover did not happen but CC is in remote DC");
|
2021-04-09 06:36:52 +08:00
|
|
|
}
|
2018-07-07 05:44:11 +08:00
|
|
|
}
|
|
|
|
|
2021-05-25 06:57:50 +08:00
|
|
|
TraceEvent("CCFindWorkersForConfiguration", id)
|
2021-05-21 02:36:55 +08:00
|
|
|
.detail("CCDcId", clusterControllerDcId.get())
|
|
|
|
.detail("Region0DcId", regions[0].dcId)
|
|
|
|
.detail("Region1DcId", regions[1].dcId)
|
|
|
|
.detail("DatacenterVersionDifference", datacenterVersionDifference)
|
|
|
|
.detail("VersionDifferenceUpdated", versionDifferenceUpdated);
|
|
|
|
|
2018-02-10 08:48:55 +08:00
|
|
|
bool setPrimaryDesired = false;
|
2017-09-12 08:40:46 +08:00
|
|
|
try {
|
2021-10-30 07:42:48 +08:00
|
|
|
auto reply = findWorkersForConfigurationFromDC(req, regions[0].dcId, checkGoodRecruitment);
|
2018-02-10 08:48:55 +08:00
|
|
|
setPrimaryDesired = true;
|
2021-09-17 08:42:34 +08:00
|
|
|
std::vector<Optional<Key>> dcPriority;
|
2018-06-14 09:14:14 +08:00
|
|
|
dcPriority.push_back(regions[0].dcId);
|
|
|
|
dcPriority.push_back(regions[1].dcId);
|
2018-02-10 08:48:55 +08:00
|
|
|
desiredDcIds.set(dcPriority);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (reply.isError()) {
|
2018-02-10 08:48:55 +08:00
|
|
|
throw reply.getError();
|
2021-03-11 02:06:03 +08:00
|
|
|
} else if (regions[0].dcId == clusterControllerDcId.get()) {
|
2018-02-10 08:48:55 +08:00
|
|
|
return reply.get();
|
|
|
|
}
|
2021-05-21 02:36:55 +08:00
|
|
|
TraceEvent(SevWarn, "CCRecruitmentFailed", id)
|
|
|
|
.detail("Reason", "Recruited Txn system and CC are in different DCs")
|
2021-05-25 06:57:50 +08:00
|
|
|
.detail("CCDcId", clusterControllerDcId.get())
|
|
|
|
.detail("RecruitedTxnSystemDcId", regions[0].dcId);
|
2018-02-10 08:48:55 +08:00
|
|
|
throw no_more_servers();
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
2021-10-30 07:42:48 +08:00
|
|
|
if (!goodRemoteRecruitmentTime.isReady() && regions[1].dcId != clusterControllerDcId.get() &&
|
|
|
|
checkGoodRecruitment) {
|
2018-06-26 09:20:16 +08:00
|
|
|
throw operation_failed();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (e.code() != error_code_no_more_servers || regions[1].priority < 0) {
|
2017-09-12 08:40:46 +08:00
|
|
|
throw;
|
|
|
|
}
|
2021-05-25 06:57:50 +08:00
|
|
|
TraceEvent(SevWarn, "AttemptingRecruitmentInRemoteDc", id)
|
2021-05-21 02:36:55 +08:00
|
|
|
.detail("SetPrimaryDesired", setPrimaryDesired)
|
|
|
|
.error(e);
|
2021-10-30 07:42:48 +08:00
|
|
|
auto reply = findWorkersForConfigurationFromDC(req, regions[1].dcId, checkGoodRecruitment);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!setPrimaryDesired) {
|
2021-09-17 08:42:34 +08:00
|
|
|
std::vector<Optional<Key>> dcPriority;
|
2018-06-14 09:14:14 +08:00
|
|
|
dcPriority.push_back(regions[1].dcId);
|
|
|
|
dcPriority.push_back(regions[0].dcId);
|
2018-02-10 08:48:55 +08:00
|
|
|
desiredDcIds.set(dcPriority);
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (reply.isError()) {
|
2018-02-10 08:48:55 +08:00
|
|
|
throw reply.getError();
|
2019-03-23 08:08:58 +08:00
|
|
|
} else if (regions[1].dcId == clusterControllerDcId.get()) {
|
2018-02-10 08:48:55 +08:00
|
|
|
return reply.get();
|
|
|
|
}
|
|
|
|
throw;
|
2017-09-12 08:40:46 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
} else if (req.configuration.regions.size() == 1) {
|
2021-09-17 08:42:34 +08:00
|
|
|
std::vector<Optional<Key>> dcPriority;
|
2018-03-16 01:59:30 +08:00
|
|
|
dcPriority.push_back(req.configuration.regions[0].dcId);
|
|
|
|
desiredDcIds.set(dcPriority);
|
2021-10-30 07:42:48 +08:00
|
|
|
auto reply =
|
|
|
|
findWorkersForConfigurationFromDC(req, req.configuration.regions[0].dcId, checkGoodRecruitment);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (reply.isError()) {
|
2018-03-16 01:59:30 +08:00
|
|
|
throw reply.getError();
|
2019-03-23 08:08:58 +08:00
|
|
|
} else if (req.configuration.regions[0].dcId == clusterControllerDcId.get()) {
|
2018-03-16 01:59:30 +08:00
|
|
|
return reply.get();
|
|
|
|
}
|
|
|
|
throw no_more_servers();
|
2017-09-12 08:40:46 +08:00
|
|
|
} else {
|
|
|
|
RecruitFromConfigurationReply result;
|
2021-03-11 02:06:03 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> id_used;
|
2019-02-13 07:50:44 +08:00
|
|
|
updateKnownIds(&id_used);
|
2021-03-11 02:06:03 +08:00
|
|
|
auto tlogs = getWorkersForTlogs(req.configuration,
|
|
|
|
req.configuration.tLogReplicationFactor,
|
|
|
|
req.configuration.getDesiredLogs(),
|
|
|
|
req.configuration.tLogPolicy,
|
|
|
|
id_used);
|
|
|
|
for (int i = 0; i < tlogs.size(); i++) {
|
2019-03-09 00:25:07 +08:00
|
|
|
result.tLogs.push_back(tlogs[i].interf);
|
2017-09-12 08:40:46 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (req.maxOldLogRouters > 0) {
|
|
|
|
if (tlogs.size() == 1) {
|
2020-01-03 07:05:44 +08:00
|
|
|
result.oldLogRouters.push_back(tlogs[0].interf);
|
2019-12-10 05:12:13 +08:00
|
|
|
} else {
|
2020-08-06 15:01:57 +08:00
|
|
|
for (int i = 0; i < tlogs.size(); i++) {
|
|
|
|
if (tlogs[i].interf.locality.processId() != clusterControllerProcessId) {
|
2020-01-03 07:05:44 +08:00
|
|
|
result.oldLogRouters.push_back(tlogs[i].interf);
|
2019-12-10 05:12:13 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-06 15:01:57 +08:00
|
|
|
if (req.recruitSeedServers) {
|
2021-03-11 02:06:03 +08:00
|
|
|
auto primaryStorageServers =
|
|
|
|
getWorkersForSeedServers(req.configuration, req.configuration.storagePolicy);
|
|
|
|
for (int i = 0; i < primaryStorageServers.size(); i++)
|
2019-03-09 00:25:07 +08:00
|
|
|
result.storageServers.push_back(primaryStorageServers[i].interf);
|
2017-10-06 08:09:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
auto datacenters = getDatacenters(req.configuration);
|
2018-02-10 08:48:55 +08:00
|
|
|
|
2021-04-08 07:04:08 +08:00
|
|
|
std::tuple<RoleFitness, RoleFitness, RoleFitness> bestFitness;
|
2018-02-10 08:48:55 +08:00
|
|
|
int numEquivalent = 1;
|
|
|
|
Optional<Key> bestDC;
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto dcId : datacenters) {
|
2018-02-10 08:48:55 +08:00
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
// SOMEDAY: recruitment in other DCs besides the clusterControllerDcID will not account for the
|
|
|
|
// processes used by the master and cluster controller properly.
|
2018-02-10 08:48:55 +08:00
|
|
|
auto used = id_used;
|
2021-04-27 00:57:46 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> preferredSharing;
|
|
|
|
auto first_commit_proxy = getWorkerForRoleInDatacenter(dcId,
|
|
|
|
ProcessClass::CommitProxy,
|
|
|
|
ProcessClass::ExcludeFit,
|
|
|
|
req.configuration,
|
|
|
|
used,
|
|
|
|
preferredSharing);
|
|
|
|
preferredSharing[first_commit_proxy.worker.interf.locality.processId()] = 0;
|
|
|
|
auto first_grv_proxy = getWorkerForRoleInDatacenter(dcId,
|
|
|
|
ProcessClass::GrvProxy,
|
|
|
|
ProcessClass::ExcludeFit,
|
|
|
|
req.configuration,
|
|
|
|
used,
|
|
|
|
preferredSharing);
|
|
|
|
preferredSharing[first_grv_proxy.worker.interf.locality.processId()] = 1;
|
|
|
|
auto first_resolver = getWorkerForRoleInDatacenter(dcId,
|
|
|
|
ProcessClass::Resolver,
|
|
|
|
ProcessClass::ExcludeFit,
|
|
|
|
req.configuration,
|
|
|
|
used,
|
|
|
|
preferredSharing);
|
|
|
|
preferredSharing[first_resolver.worker.interf.locality.processId()] = 2;
|
2020-08-06 15:01:57 +08:00
|
|
|
|
2021-04-08 07:04:08 +08:00
|
|
|
// If one of the first process recruitments is forced to share a process, allow all of next
|
|
|
|
// recruitments to also share a process.
|
2021-04-08 11:06:30 +08:00
|
|
|
auto maxUsed = std::max({ first_commit_proxy.used, first_grv_proxy.used, first_resolver.used });
|
2021-04-08 07:04:08 +08:00
|
|
|
first_commit_proxy.used = maxUsed;
|
|
|
|
first_grv_proxy.used = maxUsed;
|
|
|
|
first_resolver.used = maxUsed;
|
2020-08-06 15:01:57 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
auto commit_proxies = getWorkersForRoleInDatacenter(dcId,
|
|
|
|
ProcessClass::CommitProxy,
|
2020-09-11 08:44:15 +08:00
|
|
|
req.configuration.getDesiredCommitProxies(),
|
2021-03-11 02:06:03 +08:00
|
|
|
req.configuration,
|
|
|
|
used,
|
2021-04-27 00:57:46 +08:00
|
|
|
preferredSharing,
|
2021-03-11 02:06:03 +08:00
|
|
|
first_commit_proxy);
|
2021-03-27 05:06:59 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
auto grv_proxies = getWorkersForRoleInDatacenter(dcId,
|
|
|
|
ProcessClass::GrvProxy,
|
2020-08-06 15:01:57 +08:00
|
|
|
req.configuration.getDesiredGrvProxies(),
|
2021-03-11 02:06:03 +08:00
|
|
|
req.configuration,
|
|
|
|
used,
|
2021-04-27 00:57:46 +08:00
|
|
|
preferredSharing,
|
2021-03-11 02:06:03 +08:00
|
|
|
first_grv_proxy);
|
2021-03-27 05:06:59 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
auto resolvers = getWorkersForRoleInDatacenter(dcId,
|
|
|
|
ProcessClass::Resolver,
|
|
|
|
req.configuration.getDesiredResolvers(),
|
|
|
|
req.configuration,
|
|
|
|
used,
|
2021-04-27 00:57:46 +08:00
|
|
|
preferredSharing,
|
2021-03-11 02:06:03 +08:00
|
|
|
first_resolver);
|
2018-02-10 08:48:55 +08:00
|
|
|
|
2021-04-21 15:22:33 +08:00
|
|
|
auto fitness = std::make_tuple(RoleFitness(commit_proxies, ProcessClass::CommitProxy, used),
|
|
|
|
RoleFitness(grv_proxies, ProcessClass::GrvProxy, used),
|
|
|
|
RoleFitness(resolvers, ProcessClass::Resolver, used));
|
2018-02-10 08:48:55 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (dcId == clusterControllerDcId) {
|
2018-02-10 08:48:55 +08:00
|
|
|
bestFitness = fitness;
|
|
|
|
bestDC = dcId;
|
2020-08-12 09:54:54 +08:00
|
|
|
for (int i = 0; i < resolvers.size(); i++) {
|
2019-03-09 00:25:07 +08:00
|
|
|
result.resolvers.push_back(resolvers[i].interf);
|
2020-08-12 09:54:54 +08:00
|
|
|
}
|
2020-09-11 08:44:15 +08:00
|
|
|
for (int i = 0; i < commit_proxies.size(); i++) {
|
|
|
|
result.commitProxies.push_back(commit_proxies[i].interf);
|
2020-08-12 09:54:54 +08:00
|
|
|
}
|
|
|
|
for (int i = 0; i < grv_proxies.size(); i++) {
|
|
|
|
result.grvProxies.push_back(grv_proxies[i].interf);
|
|
|
|
}
|
2018-03-30 06:12:38 +08:00
|
|
|
|
2020-02-05 02:09:16 +08:00
|
|
|
if (req.configuration.backupWorkerEnabled) {
|
2020-01-28 05:14:52 +08:00
|
|
|
const int nBackup = std::max<int>(tlogs.size(), req.maxOldLogRouters);
|
2021-03-11 02:06:03 +08:00
|
|
|
auto backupWorkers = getWorkersForRoleInDatacenter(
|
2021-04-08 07:04:08 +08:00
|
|
|
dcId, ProcessClass::Backup, nBackup, req.configuration, used);
|
2021-03-11 02:06:03 +08:00
|
|
|
std::transform(backupWorkers.begin(),
|
|
|
|
backupWorkers.end(),
|
2020-01-28 05:14:52 +08:00
|
|
|
std::back_inserter(result.backupWorkers),
|
|
|
|
[](const WorkerDetails& w) { return w.interf; });
|
|
|
|
}
|
2019-12-11 05:28:49 +08:00
|
|
|
|
2018-02-10 08:48:55 +08:00
|
|
|
break;
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (fitness < bestFitness) {
|
2018-02-10 08:48:55 +08:00
|
|
|
bestFitness = fitness;
|
|
|
|
numEquivalent = 1;
|
|
|
|
bestDC = dcId;
|
2020-08-06 15:01:57 +08:00
|
|
|
} else if (fitness == bestFitness &&
|
|
|
|
deterministicRandom()->random01() < 1.0 / ++numEquivalent) {
|
2018-02-10 08:48:55 +08:00
|
|
|
bestDC = dcId;
|
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() != error_code_no_more_servers) {
|
2018-02-10 08:48:55 +08:00
|
|
|
throw;
|
|
|
|
}
|
2017-09-12 08:40:46 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (bestDC != clusterControllerDcId) {
|
2021-07-27 10:55:10 +08:00
|
|
|
TraceEvent("BestDCIsNotClusterDC").log();
|
2021-09-17 08:42:34 +08:00
|
|
|
std::vector<Optional<Key>> dcPriority;
|
2018-02-10 08:48:55 +08:00
|
|
|
dcPriority.push_back(bestDC);
|
|
|
|
desiredDcIds.set(dcPriority);
|
|
|
|
throw no_more_servers();
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
// If this cluster controller dies, do not prioritize recruiting the next one in the same DC
|
2021-09-17 08:42:34 +08:00
|
|
|
desiredDcIds.set(std::vector<Optional<Key>>());
|
2020-08-06 15:01:57 +08:00
|
|
|
TraceEvent("FindWorkersForConfig")
|
|
|
|
.detail("Replication", req.configuration.tLogReplicationFactor)
|
|
|
|
.detail("DesiredLogs", req.configuration.getDesiredLogs())
|
|
|
|
.detail("ActualLogs", result.tLogs.size())
|
2020-09-11 08:44:15 +08:00
|
|
|
.detail("DesiredCommitProxies", req.configuration.getDesiredCommitProxies())
|
|
|
|
.detail("ActualCommitProxies", result.commitProxies.size())
|
2020-08-06 15:01:57 +08:00
|
|
|
.detail("DesiredGrvProxies", req.configuration.getDesiredGrvProxies())
|
|
|
|
.detail("ActualGrvProxies", result.grvProxies.size())
|
|
|
|
.detail("DesiredResolvers", req.configuration.getDesiredResolvers())
|
2020-07-29 22:26:55 +08:00
|
|
|
.detail("ActualResolvers", result.resolvers.size());
|
2017-09-12 08:40:46 +08:00
|
|
|
|
2021-10-30 07:42:48 +08:00
|
|
|
if (!goodRecruitmentTime.isReady() && checkGoodRecruitment &&
|
2021-03-11 02:06:03 +08:00
|
|
|
(RoleFitness(
|
|
|
|
SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredLogs(), ProcessClass::TLog)
|
2021-04-21 15:22:33 +08:00
|
|
|
.betterCount(RoleFitness(tlogs, ProcessClass::TLog, id_used)) ||
|
2021-03-11 02:06:03 +08:00
|
|
|
RoleFitness(SERVER_KNOBS->EXPECTED_COMMIT_PROXY_FITNESS,
|
|
|
|
req.configuration.getDesiredCommitProxies(),
|
2020-09-11 08:44:15 +08:00
|
|
|
ProcessClass::CommitProxy)
|
2021-04-08 07:04:08 +08:00
|
|
|
.betterCount(std::get<0>(bestFitness)) ||
|
2021-03-11 02:06:03 +08:00
|
|
|
RoleFitness(SERVER_KNOBS->EXPECTED_GRV_PROXY_FITNESS,
|
|
|
|
req.configuration.getDesiredGrvProxies(),
|
2020-08-06 15:01:57 +08:00
|
|
|
ProcessClass::GrvProxy)
|
2021-04-08 07:04:08 +08:00
|
|
|
.betterCount(std::get<1>(bestFitness)) ||
|
2021-03-11 02:06:03 +08:00
|
|
|
RoleFitness(SERVER_KNOBS->EXPECTED_RESOLVER_FITNESS,
|
|
|
|
req.configuration.getDesiredResolvers(),
|
2020-08-06 15:01:57 +08:00
|
|
|
ProcessClass::Resolver)
|
2021-04-08 07:04:08 +08:00
|
|
|
.betterCount(std::get<2>(bestFitness)))) {
|
2017-09-12 08:40:46 +08:00
|
|
|
throw operation_failed();
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-27 01:13:59 +08:00
|
|
|
void updateIdUsed(const std::vector<WorkerInterface>& workers,
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int>& id_used) {
|
|
|
|
for (auto& it : workers) {
|
|
|
|
id_used[it.locality.processId()]++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void compareWorkers(const DatabaseConfiguration& conf,
|
|
|
|
const std::vector<WorkerInterface>& first,
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int>& firstUsed,
|
|
|
|
const std::vector<WorkerInterface>& second,
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int>& secondUsed,
|
|
|
|
ProcessClass::ClusterRole role,
|
|
|
|
std::string description) {
|
|
|
|
std::vector<WorkerDetails> firstDetails;
|
|
|
|
for (auto& it : first) {
|
|
|
|
auto w = id_worker.find(it.locality.processId());
|
|
|
|
ASSERT(w != id_worker.end());
|
|
|
|
ASSERT(!conf.isExcludedServer(w->second.details.interf.addresses()));
|
|
|
|
firstDetails.push_back(w->second.details);
|
|
|
|
//TraceEvent("CompareAddressesFirst").detail(description.c_str(), w->second.details.interf.address());
|
|
|
|
}
|
|
|
|
RoleFitness firstFitness(firstDetails, role, firstUsed);
|
|
|
|
|
|
|
|
std::vector<WorkerDetails> secondDetails;
|
|
|
|
for (auto& it : second) {
|
|
|
|
auto w = id_worker.find(it.locality.processId());
|
|
|
|
ASSERT(w != id_worker.end());
|
|
|
|
ASSERT(!conf.isExcludedServer(w->second.details.interf.addresses()));
|
|
|
|
secondDetails.push_back(w->second.details);
|
|
|
|
//TraceEvent("CompareAddressesSecond").detail(description.c_str(), w->second.details.interf.address());
|
|
|
|
}
|
|
|
|
RoleFitness secondFitness(secondDetails, role, secondUsed);
|
|
|
|
|
|
|
|
if (!(firstFitness == secondFitness)) {
|
|
|
|
TraceEvent(SevError, "NonDeterministicRecruitment")
|
|
|
|
.detail("FirstFitness", firstFitness.toString())
|
|
|
|
.detail("SecondFitness", secondFitness.toString())
|
|
|
|
.detail("ClusterRole", role);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
RecruitFromConfigurationReply findWorkersForConfiguration(RecruitFromConfigurationRequest const& req) {
|
2021-10-30 07:42:48 +08:00
|
|
|
RecruitFromConfigurationReply rep = findWorkersForConfigurationDispatch(req, true);
|
2021-04-27 01:13:59 +08:00
|
|
|
if (g_network->isSimulated()) {
|
2021-10-30 02:18:47 +08:00
|
|
|
try {
|
|
|
|
// FIXME: The logic to pick a satellite in a remote region is not
|
|
|
|
// deterministic and can therefore break this nondeterminism check.
|
|
|
|
// Since satellites will generally be in the primary region,
|
|
|
|
// disable the determinism check for remote region satellites.
|
|
|
|
bool remoteDCUsedAsSatellite = false;
|
|
|
|
if (req.configuration.regions.size() > 1) {
|
|
|
|
auto [region, remoteRegion] =
|
|
|
|
getPrimaryAndRemoteRegion(req.configuration.regions, req.configuration.regions[0].dcId);
|
|
|
|
for (const auto& satellite : region.satellites) {
|
|
|
|
if (satellite.dcId == remoteRegion.dcId) {
|
|
|
|
remoteDCUsedAsSatellite = true;
|
|
|
|
}
|
2021-07-01 10:50:08 +08:00
|
|
|
}
|
|
|
|
}
|
2021-10-30 02:18:47 +08:00
|
|
|
if (!remoteDCUsedAsSatellite) {
|
2021-10-30 07:42:48 +08:00
|
|
|
RecruitFromConfigurationReply compare = findWorkersForConfigurationDispatch(req, false);
|
2021-10-30 02:18:47 +08:00
|
|
|
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int> firstUsed;
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int> secondUsed;
|
|
|
|
updateKnownIds(&firstUsed);
|
|
|
|
updateKnownIds(&secondUsed);
|
|
|
|
|
|
|
|
// auto mworker = id_worker.find(masterProcessId);
|
|
|
|
//TraceEvent("CompareAddressesMaster")
|
|
|
|
// .detail("Master",
|
|
|
|
// mworker != id_worker.end() ? mworker->second.details.interf.address() :
|
|
|
|
// NetworkAddress());
|
|
|
|
|
|
|
|
updateIdUsed(rep.tLogs, firstUsed);
|
|
|
|
updateIdUsed(compare.tLogs, secondUsed);
|
|
|
|
compareWorkers(
|
|
|
|
req.configuration, rep.tLogs, firstUsed, compare.tLogs, secondUsed, ProcessClass::TLog, "TLog");
|
|
|
|
updateIdUsed(rep.satelliteTLogs, firstUsed);
|
|
|
|
updateIdUsed(compare.satelliteTLogs, secondUsed);
|
|
|
|
compareWorkers(req.configuration,
|
|
|
|
rep.satelliteTLogs,
|
|
|
|
firstUsed,
|
|
|
|
compare.satelliteTLogs,
|
|
|
|
secondUsed,
|
|
|
|
ProcessClass::TLog,
|
|
|
|
"Satellite");
|
|
|
|
updateIdUsed(rep.commitProxies, firstUsed);
|
|
|
|
updateIdUsed(compare.commitProxies, secondUsed);
|
|
|
|
updateIdUsed(rep.grvProxies, firstUsed);
|
|
|
|
updateIdUsed(compare.grvProxies, secondUsed);
|
|
|
|
updateIdUsed(rep.resolvers, firstUsed);
|
|
|
|
updateIdUsed(compare.resolvers, secondUsed);
|
|
|
|
compareWorkers(req.configuration,
|
|
|
|
rep.commitProxies,
|
|
|
|
firstUsed,
|
|
|
|
compare.commitProxies,
|
|
|
|
secondUsed,
|
|
|
|
ProcessClass::CommitProxy,
|
|
|
|
"CommitProxy");
|
|
|
|
compareWorkers(req.configuration,
|
|
|
|
rep.grvProxies,
|
|
|
|
firstUsed,
|
|
|
|
compare.grvProxies,
|
|
|
|
secondUsed,
|
|
|
|
ProcessClass::GrvProxy,
|
|
|
|
"GrvProxy");
|
|
|
|
compareWorkers(req.configuration,
|
|
|
|
rep.resolvers,
|
|
|
|
firstUsed,
|
|
|
|
compare.resolvers,
|
|
|
|
secondUsed,
|
|
|
|
ProcessClass::Resolver,
|
|
|
|
"Resolver");
|
|
|
|
updateIdUsed(rep.backupWorkers, firstUsed);
|
|
|
|
updateIdUsed(compare.backupWorkers, secondUsed);
|
|
|
|
compareWorkers(req.configuration,
|
|
|
|
rep.backupWorkers,
|
|
|
|
firstUsed,
|
|
|
|
compare.backupWorkers,
|
|
|
|
secondUsed,
|
|
|
|
ProcessClass::Backup,
|
|
|
|
"Backup");
|
|
|
|
}
|
|
|
|
} catch (Error& e) {
|
|
|
|
ASSERT(false); // Simulation only validation should not throw errors
|
2021-07-01 10:50:08 +08:00
|
|
|
}
|
2021-04-27 01:13:59 +08:00
|
|
|
}
|
|
|
|
return rep;
|
|
|
|
}
|
|
|
|
|
2021-02-11 05:45:06 +08:00
|
|
|
// Check if txn system is recruited successfully in each region
|
2018-06-14 09:14:14 +08:00
|
|
|
void checkRegions(const std::vector<RegionInfo>& regions) {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (desiredDcIds.get().present() && desiredDcIds.get().get().size() == 2 &&
|
|
|
|
desiredDcIds.get().get()[0].get() == regions[0].dcId &&
|
|
|
|
desiredDcIds.get().get()[1].get() == regions[1].dcId) {
|
2018-06-14 09:14:14 +08:00
|
|
|
return;
|
|
|
|
}
|
2018-06-29 14:15:32 +08:00
|
|
|
|
2018-06-14 09:14:14 +08:00
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> id_used;
|
2021-04-09 05:29:12 +08:00
|
|
|
getWorkerForRoleInDatacenter(regions[0].dcId,
|
|
|
|
ProcessClass::ClusterController,
|
|
|
|
ProcessClass::ExcludeFit,
|
|
|
|
db.config,
|
|
|
|
id_used,
|
2021-04-27 10:26:25 +08:00
|
|
|
{},
|
2021-04-09 05:29:12 +08:00
|
|
|
true);
|
2021-03-11 02:06:03 +08:00
|
|
|
getWorkerForRoleInDatacenter(
|
2021-04-27 10:26:25 +08:00
|
|
|
regions[0].dcId, ProcessClass::Master, ProcessClass::ExcludeFit, db.config, id_used, {}, true);
|
2018-06-29 14:15:32 +08:00
|
|
|
|
2018-06-14 09:14:14 +08:00
|
|
|
std::set<Optional<Key>> primaryDC;
|
|
|
|
primaryDC.insert(regions[0].dcId);
|
2021-03-11 02:06:03 +08:00
|
|
|
getWorkersForTlogs(db.config,
|
|
|
|
db.config.tLogReplicationFactor,
|
|
|
|
db.config.getDesiredLogs(),
|
|
|
|
db.config.tLogPolicy,
|
|
|
|
id_used,
|
|
|
|
true,
|
|
|
|
primaryDC);
|
|
|
|
if (regions[0].satelliteTLogReplicationFactor > 0 && db.config.usableRegions > 1) {
|
2018-06-29 14:15:32 +08:00
|
|
|
bool satelliteFallback = false;
|
2019-03-19 03:17:59 +08:00
|
|
|
getWorkersForSatelliteLogs(db.config, regions[0], regions[1], id_used, satelliteFallback, true);
|
2018-06-14 09:14:14 +08:00
|
|
|
}
|
2018-02-10 08:48:55 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
getWorkerForRoleInDatacenter(
|
2021-04-27 10:26:25 +08:00
|
|
|
regions[0].dcId, ProcessClass::Resolver, ProcessClass::ExcludeFit, db.config, id_used, {}, true);
|
2021-03-11 02:06:03 +08:00
|
|
|
getWorkerForRoleInDatacenter(
|
2021-04-27 10:26:25 +08:00
|
|
|
regions[0].dcId, ProcessClass::CommitProxy, ProcessClass::ExcludeFit, db.config, id_used, {}, true);
|
2021-03-11 02:06:03 +08:00
|
|
|
getWorkerForRoleInDatacenter(
|
2021-04-27 10:26:25 +08:00
|
|
|
regions[0].dcId, ProcessClass::GrvProxy, ProcessClass::ExcludeFit, db.config, id_used, {}, true);
|
2018-02-10 08:48:55 +08:00
|
|
|
|
2021-09-17 08:42:34 +08:00
|
|
|
std::vector<Optional<Key>> dcPriority;
|
2018-06-14 09:14:14 +08:00
|
|
|
dcPriority.push_back(regions[0].dcId);
|
|
|
|
dcPriority.push_back(regions[1].dcId);
|
|
|
|
desiredDcIds.set(dcPriority);
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() != error_code_no_more_servers) {
|
2018-06-14 09:14:14 +08:00
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void checkRecoveryStalled() {
|
2021-03-11 02:06:03 +08:00
|
|
|
if ((db.serverInfo->get().recoveryState == RecoveryState::RECRUITING ||
|
|
|
|
db.serverInfo->get().recoveryState == RecoveryState::ACCEPTING_COMMITS ||
|
|
|
|
db.serverInfo->get().recoveryState == RecoveryState::ALL_LOGS_RECRUITED) &&
|
|
|
|
db.recoveryStalled) {
|
2019-03-23 08:08:58 +08:00
|
|
|
if (db.config.regions.size() > 1) {
|
2018-06-14 09:14:14 +08:00
|
|
|
auto regions = db.config.regions;
|
2021-09-24 05:22:10 +08:00
|
|
|
if (clusterControllerDcId.get() == regions[0].dcId && regions[1].priority >= 0) {
|
2018-06-14 09:14:14 +08:00
|
|
|
std::swap(regions[0], regions[1]);
|
2018-02-10 08:48:55 +08:00
|
|
|
}
|
2021-09-24 05:22:10 +08:00
|
|
|
ASSERT(regions[1].priority < 0 || clusterControllerDcId.get() == regions[1].dcId);
|
2018-06-14 09:14:14 +08:00
|
|
|
checkRegions(regions);
|
2018-02-10 08:48:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-17 08:42:34 +08:00
|
|
|
void updateIdUsed(const std::vector<WorkerDetails>& workers,
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int>& id_used) {
|
2021-04-08 07:04:08 +08:00
|
|
|
for (auto& it : workers) {
|
|
|
|
id_used[it.interf.locality.processId()]++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
// FIXME: determine when to fail the cluster controller when a primaryDC has not been set
|
2021-03-17 04:17:56 +08:00
|
|
|
|
|
|
|
// This function returns true when the cluster controller determines it is worth forcing
|
|
|
|
// a master recovery in order to change the recruited processes in the transaction subsystem.
|
2017-05-26 04:48:44 +08:00
|
|
|
bool betterMasterExists() {
|
2020-04-12 10:30:05 +08:00
|
|
|
const ServerDBInfo dbi = db.serverInfo->get();
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (dbi.recoveryState < RecoveryState::ACCEPTING_COMMITS) {
|
2017-05-26 04:48:44 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
// Do not trigger better master exists if the cluster controller is excluded, since the master will change
|
|
|
|
// anyways once the cluster controller is moved
|
|
|
|
if (id_worker[clusterControllerProcessId].priorityInfo.isExcluded) {
|
2021-04-08 11:06:30 +08:00
|
|
|
TraceEvent("NewRecruitmentIsWorse", id).detail("Reason", "ClusterControllerExcluded");
|
2018-09-22 07:14:39 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-03-23 08:08:58 +08:00
|
|
|
if (db.config.regions.size() > 1 && db.config.regions[0].priority > db.config.regions[1].priority &&
|
2021-03-11 02:06:03 +08:00
|
|
|
db.config.regions[0].dcId != clusterControllerDcId.get() && versionDifferenceUpdated &&
|
2021-10-07 07:16:24 +08:00
|
|
|
datacenterVersionDifference < SERVER_KNOBS->MAX_VERSION_DIFFERENCE && remoteDCIsHealthy()) {
|
2018-06-14 09:14:14 +08:00
|
|
|
checkRegions(db.config.regions);
|
|
|
|
}
|
2018-02-10 08:48:55 +08:00
|
|
|
|
2017-11-17 08:58:55 +08:00
|
|
|
// Get master process
|
2017-05-26 04:48:44 +08:00
|
|
|
auto masterWorker = id_worker.find(dbi.master.locality.processId());
|
2021-03-11 02:06:03 +08:00
|
|
|
if (masterWorker == id_worker.end()) {
|
2021-04-08 11:06:30 +08:00
|
|
|
TraceEvent("NewRecruitmentIsWorse", id)
|
2021-04-08 07:04:08 +08:00
|
|
|
.detail("Reason", "CannotFindMaster")
|
|
|
|
.detail("ProcessID", dbi.master.locality.processId());
|
2017-05-26 04:48:44 +08:00
|
|
|
return false;
|
2017-10-25 03:58:54 +08:00
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2017-11-17 08:58:55 +08:00
|
|
|
// Get tlog processes
|
2019-03-09 00:25:07 +08:00
|
|
|
std::vector<WorkerDetails> tlogs;
|
|
|
|
std::vector<WorkerDetails> remote_tlogs;
|
|
|
|
std::vector<WorkerDetails> satellite_tlogs;
|
|
|
|
std::vector<WorkerDetails> log_routers;
|
2018-04-27 13:18:07 +08:00
|
|
|
std::set<NetworkAddress> logRouterAddresses;
|
2020-01-18 04:49:34 +08:00
|
|
|
std::vector<WorkerDetails> backup_workers;
|
|
|
|
std::set<NetworkAddress> backup_addresses;
|
2018-02-10 08:48:55 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& logSet : dbi.logSystemConfig.tLogs) {
|
|
|
|
for (auto& it : logSet.tLogs) {
|
2020-04-19 12:29:38 +08:00
|
|
|
auto tlogWorker = id_worker.find(it.interf().filteredLocality.processId());
|
2021-04-08 07:04:08 +08:00
|
|
|
if (tlogWorker == id_worker.end()) {
|
2021-04-08 11:06:30 +08:00
|
|
|
TraceEvent("NewRecruitmentIsWorse", id)
|
2021-04-08 07:04:08 +08:00
|
|
|
.detail("Reason", "CannotFindTLog")
|
|
|
|
.detail("ProcessID", it.interf().filteredLocality.processId());
|
2018-02-10 08:48:55 +08:00
|
|
|
return false;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
|
|
|
if (tlogWorker->second.priorityInfo.isExcluded) {
|
|
|
|
TraceEvent("BetterMasterExists", id)
|
|
|
|
.detail("Reason", "TLogExcluded")
|
|
|
|
.detail("ProcessID", it.interf().filteredLocality.processId());
|
2018-02-10 08:48:55 +08:00
|
|
|
return true;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
2018-02-10 08:48:55 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (logSet.isLocal && logSet.locality == tagLocalitySatellite) {
|
2019-03-09 00:25:07 +08:00
|
|
|
satellite_tlogs.push_back(tlogWorker->second.details);
|
2020-08-06 15:01:57 +08:00
|
|
|
} else if (logSet.isLocal) {
|
2019-03-09 00:25:07 +08:00
|
|
|
tlogs.push_back(tlogWorker->second.details);
|
2018-02-10 08:48:55 +08:00
|
|
|
} else {
|
2019-03-09 00:25:07 +08:00
|
|
|
remote_tlogs.push_back(tlogWorker->second.details);
|
2018-02-10 08:48:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-06 15:01:57 +08:00
|
|
|
for (auto& it : logSet.logRouters) {
|
2020-04-19 12:29:38 +08:00
|
|
|
auto tlogWorker = id_worker.find(it.interf().filteredLocality.processId());
|
2021-04-08 07:04:08 +08:00
|
|
|
if (tlogWorker == id_worker.end()) {
|
2021-04-08 11:06:30 +08:00
|
|
|
TraceEvent("NewRecruitmentIsWorse", id)
|
2021-04-08 07:04:08 +08:00
|
|
|
.detail("Reason", "CannotFindLogRouter")
|
|
|
|
.detail("ProcessID", it.interf().filteredLocality.processId());
|
2018-02-10 08:48:55 +08:00
|
|
|
return false;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
|
|
|
if (tlogWorker->second.priorityInfo.isExcluded) {
|
|
|
|
TraceEvent("BetterMasterExists", id)
|
|
|
|
.detail("Reason", "LogRouterExcluded")
|
|
|
|
.detail("ProcessID", it.interf().filteredLocality.processId());
|
2018-02-10 08:48:55 +08:00
|
|
|
return true;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!logRouterAddresses.count(tlogWorker->second.details.interf.address())) {
|
|
|
|
logRouterAddresses.insert(tlogWorker->second.details.interf.address());
|
2019-03-09 00:25:07 +08:00
|
|
|
log_routers.push_back(tlogWorker->second.details);
|
2018-04-27 13:18:07 +08:00
|
|
|
}
|
2018-02-10 08:48:55 +08:00
|
|
|
}
|
2020-01-18 04:49:34 +08:00
|
|
|
|
|
|
|
for (const auto& worker : logSet.backupWorkers) {
|
|
|
|
auto workerIt = id_worker.find(worker.interf().locality.processId());
|
2021-04-08 07:04:08 +08:00
|
|
|
if (workerIt == id_worker.end()) {
|
2021-04-08 11:06:30 +08:00
|
|
|
TraceEvent("NewRecruitmentIsWorse", id)
|
2021-04-08 07:04:08 +08:00
|
|
|
.detail("Reason", "CannotFindBackupWorker")
|
|
|
|
.detail("ProcessID", worker.interf().locality.processId());
|
2021-03-11 02:06:03 +08:00
|
|
|
return false;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
|
|
|
if (workerIt->second.priorityInfo.isExcluded) {
|
|
|
|
TraceEvent("BetterMasterExists", id)
|
|
|
|
.detail("Reason", "BackupWorkerExcluded")
|
|
|
|
.detail("ProcessID", worker.interf().locality.processId());
|
2021-03-11 02:06:03 +08:00
|
|
|
return true;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
2020-01-18 04:49:34 +08:00
|
|
|
if (backup_addresses.count(workerIt->second.details.interf.address()) == 0) {
|
|
|
|
backup_addresses.insert(workerIt->second.details.interf.address());
|
|
|
|
backup_workers.push_back(workerIt->second.details);
|
|
|
|
}
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2020-09-11 08:44:15 +08:00
|
|
|
// Get commit proxy classes
|
|
|
|
std::vector<WorkerDetails> commitProxyClasses;
|
|
|
|
for (auto& it : dbi.client.commitProxies) {
|
|
|
|
auto commitProxyWorker = id_worker.find(it.processId);
|
2021-04-08 07:04:08 +08:00
|
|
|
if (commitProxyWorker == id_worker.end()) {
|
2021-04-08 11:06:30 +08:00
|
|
|
TraceEvent("NewRecruitmentIsWorse", id)
|
2021-04-08 07:04:08 +08:00
|
|
|
.detail("Reason", "CannotFindCommitProxy")
|
|
|
|
.detail("ProcessID", it.processId);
|
2021-03-11 02:06:03 +08:00
|
|
|
return false;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
|
|
|
if (commitProxyWorker->second.priorityInfo.isExcluded) {
|
|
|
|
TraceEvent("BetterMasterExists", id)
|
|
|
|
.detail("Reason", "CommitProxyExcluded")
|
|
|
|
.detail("ProcessID", it.processId);
|
2021-03-11 02:06:03 +08:00
|
|
|
return true;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
2020-09-11 08:44:15 +08:00
|
|
|
commitProxyClasses.push_back(commitProxyWorker->second.details);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2020-08-06 15:01:57 +08:00
|
|
|
// Get grv proxy classes
|
|
|
|
std::vector<WorkerDetails> grvProxyClasses;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : dbi.client.grvProxies) {
|
2020-07-15 15:37:41 +08:00
|
|
|
auto grvProxyWorker = id_worker.find(it.processId);
|
2021-04-08 07:04:08 +08:00
|
|
|
if (grvProxyWorker == id_worker.end()) {
|
2021-04-08 11:06:30 +08:00
|
|
|
TraceEvent("NewRecruitmentIsWorse", id)
|
2021-04-08 07:04:08 +08:00
|
|
|
.detail("Reason", "CannotFindGrvProxy")
|
|
|
|
.detail("ProcessID", it.processId);
|
2020-07-15 15:37:41 +08:00
|
|
|
return false;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
|
|
|
if (grvProxyWorker->second.priorityInfo.isExcluded) {
|
|
|
|
TraceEvent("BetterMasterExists", id)
|
|
|
|
.detail("Reason", "GrvProxyExcluded")
|
|
|
|
.detail("ProcessID", it.processId);
|
2020-07-15 15:37:41 +08:00
|
|
|
return true;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
2020-08-06 15:01:57 +08:00
|
|
|
grvProxyClasses.push_back(grvProxyWorker->second.details);
|
2020-07-15 15:37:41 +08:00
|
|
|
}
|
|
|
|
|
2017-11-17 08:58:55 +08:00
|
|
|
// Get resolver classes
|
2019-03-09 00:25:07 +08:00
|
|
|
std::vector<WorkerDetails> resolverClasses;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : dbi.resolvers) {
|
2017-05-26 04:48:44 +08:00
|
|
|
auto resolverWorker = id_worker.find(it.locality.processId());
|
2021-04-08 07:04:08 +08:00
|
|
|
if (resolverWorker == id_worker.end()) {
|
2021-04-08 11:06:30 +08:00
|
|
|
TraceEvent("NewRecruitmentIsWorse", id)
|
2021-04-08 07:04:08 +08:00
|
|
|
.detail("Reason", "CannotFindResolver")
|
|
|
|
.detail("ProcessID", it.locality.processId());
|
2017-05-26 04:48:44 +08:00
|
|
|
return false;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
|
|
|
if (resolverWorker->second.priorityInfo.isExcluded) {
|
|
|
|
TraceEvent("BetterMasterExists", id)
|
|
|
|
.detail("Reason", "ResolverExcluded")
|
|
|
|
.detail("ProcessID", it.locality.processId());
|
2017-11-17 08:58:55 +08:00
|
|
|
return true;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
2019-03-09 00:25:07 +08:00
|
|
|
resolverClasses.push_back(resolverWorker->second.details);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
// Check master fitness. Don't return false if master is excluded in case all the processes are excluded, we
|
|
|
|
// still need master for recovery.
|
|
|
|
ProcessClass::Fitness oldMasterFit =
|
|
|
|
masterWorker->second.details.processClass.machineClassFitness(ProcessClass::Master);
|
|
|
|
if (db.config.isExcludedServer(dbi.master.addresses())) {
|
2017-11-17 08:58:55 +08:00
|
|
|
oldMasterFit = std::max(oldMasterFit, ProcessClass::ExcludeFit);
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> id_used;
|
2021-04-08 07:04:08 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> old_id_used;
|
2018-02-10 08:48:55 +08:00
|
|
|
id_used[clusterControllerProcessId]++;
|
2021-04-08 07:04:08 +08:00
|
|
|
old_id_used[clusterControllerProcessId]++;
|
2021-03-11 02:06:03 +08:00
|
|
|
WorkerFitnessInfo mworker = getWorkerForRoleInDatacenter(
|
2021-04-27 10:26:25 +08:00
|
|
|
clusterControllerDcId, ProcessClass::Master, ProcessClass::NeverAssign, db.config, id_used, {}, true);
|
2021-03-11 02:06:03 +08:00
|
|
|
auto newMasterFit = mworker.worker.processClass.machineClassFitness(ProcessClass::Master);
|
|
|
|
if (db.config.isExcludedServer(mworker.worker.interf.addresses())) {
|
2019-10-15 09:32:17 +08:00
|
|
|
newMasterFit = std::max(newMasterFit, ProcessClass::ExcludeFit);
|
|
|
|
}
|
2017-11-17 08:58:55 +08:00
|
|
|
|
2021-04-08 07:04:08 +08:00
|
|
|
old_id_used[masterWorker->first]++;
|
|
|
|
if (oldMasterFit < newMasterFit) {
|
2021-04-08 11:06:30 +08:00
|
|
|
TraceEvent("NewRecruitmentIsWorse", id)
|
2021-04-08 07:04:08 +08:00
|
|
|
.detail("OldMasterFit", oldMasterFit)
|
|
|
|
.detail("NewMasterFit", newMasterFit)
|
|
|
|
.detail("OldIsCC", dbi.master.locality.processId() == clusterControllerProcessId)
|
|
|
|
.detail("NewIsCC", mworker.worker.interf.locality.processId() == clusterControllerProcessId);
|
|
|
|
;
|
2017-11-17 08:58:55 +08:00
|
|
|
return false;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (oldMasterFit > newMasterFit || (dbi.master.locality.processId() == clusterControllerProcessId &&
|
2021-04-08 07:04:08 +08:00
|
|
|
mworker.worker.interf.locality.processId() != clusterControllerProcessId)) {
|
|
|
|
TraceEvent("BetterMasterExists", id)
|
|
|
|
.detail("OldMasterFit", oldMasterFit)
|
|
|
|
.detail("NewMasterFit", newMasterFit)
|
|
|
|
.detail("OldIsCC", dbi.master.locality.processId() == clusterControllerProcessId)
|
|
|
|
.detail("NewIsCC", mworker.worker.interf.locality.processId() == clusterControllerProcessId);
|
2017-11-17 08:58:55 +08:00
|
|
|
return true;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
2017-11-17 08:58:55 +08:00
|
|
|
|
2018-02-10 08:48:55 +08:00
|
|
|
std::set<Optional<Key>> primaryDC;
|
|
|
|
std::set<Optional<Key>> remoteDC;
|
2018-03-06 11:27:46 +08:00
|
|
|
|
|
|
|
RegionInfo region;
|
2019-03-19 03:17:59 +08:00
|
|
|
RegionInfo remoteRegion;
|
2019-03-23 08:08:58 +08:00
|
|
|
if (db.config.regions.size()) {
|
2018-03-06 11:27:46 +08:00
|
|
|
primaryDC.insert(clusterControllerDcId);
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& r : db.config.regions) {
|
|
|
|
if (r.dcId != clusterControllerDcId.get()) {
|
2018-03-06 11:27:46 +08:00
|
|
|
ASSERT(remoteDC.empty());
|
|
|
|
remoteDC.insert(r.dcId);
|
2019-03-19 03:17:59 +08:00
|
|
|
remoteRegion = r;
|
2018-02-10 08:48:55 +08:00
|
|
|
} else {
|
2018-03-06 11:27:46 +08:00
|
|
|
ASSERT(region.dcId == StringRef());
|
|
|
|
region = r;
|
|
|
|
}
|
|
|
|
}
|
2018-02-10 08:48:55 +08:00
|
|
|
}
|
2017-11-17 08:58:55 +08:00
|
|
|
|
2018-02-10 08:48:55 +08:00
|
|
|
// Check tLog fitness
|
2021-04-08 07:04:08 +08:00
|
|
|
updateIdUsed(tlogs, old_id_used);
|
2021-04-21 15:22:33 +08:00
|
|
|
RoleFitness oldTLogFit(tlogs, ProcessClass::TLog, old_id_used);
|
2021-03-11 02:06:03 +08:00
|
|
|
auto newTLogs = getWorkersForTlogs(db.config,
|
|
|
|
db.config.tLogReplicationFactor,
|
|
|
|
db.config.getDesiredLogs(),
|
|
|
|
db.config.tLogPolicy,
|
|
|
|
id_used,
|
|
|
|
true,
|
|
|
|
primaryDC);
|
2021-04-21 15:22:33 +08:00
|
|
|
RoleFitness newTLogFit(newTLogs, ProcessClass::TLog, id_used);
|
2017-11-17 08:58:55 +08:00
|
|
|
|
2018-06-29 14:15:32 +08:00
|
|
|
bool oldSatelliteFallback = false;
|
2019-10-15 09:32:17 +08:00
|
|
|
|
2021-03-24 04:02:31 +08:00
|
|
|
if (region.satelliteTLogPolicyFallback.isValid()) {
|
|
|
|
for (auto& logSet : dbi.logSystemConfig.tLogs) {
|
|
|
|
if (region.satelliteTLogPolicy.isValid() && logSet.isLocal && logSet.locality == tagLocalitySatellite) {
|
|
|
|
oldSatelliteFallback = logSet.tLogPolicy->info() != region.satelliteTLogPolicy->info();
|
|
|
|
ASSERT(!oldSatelliteFallback ||
|
|
|
|
(region.satelliteTLogPolicyFallback.isValid() &&
|
|
|
|
logSet.tLogPolicy->info() == region.satelliteTLogPolicyFallback->info()));
|
|
|
|
break;
|
|
|
|
}
|
2018-06-29 14:15:32 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-08 07:04:08 +08:00
|
|
|
updateIdUsed(satellite_tlogs, old_id_used);
|
2021-04-21 15:22:33 +08:00
|
|
|
RoleFitness oldSatelliteTLogFit(satellite_tlogs, ProcessClass::TLog, old_id_used);
|
2018-06-29 14:15:32 +08:00
|
|
|
bool newSatelliteFallback = false;
|
2021-04-08 07:04:08 +08:00
|
|
|
auto newSatelliteTLogs = satellite_tlogs;
|
|
|
|
RoleFitness newSatelliteTLogFit = oldSatelliteTLogFit;
|
|
|
|
if (region.satelliteTLogReplicationFactor > 0 && db.config.usableRegions > 1) {
|
|
|
|
newSatelliteTLogs =
|
|
|
|
getWorkersForSatelliteLogs(db.config, region, remoteRegion, id_used, newSatelliteFallback, true);
|
2021-04-21 15:22:33 +08:00
|
|
|
newSatelliteTLogFit = RoleFitness(newSatelliteTLogs, ProcessClass::TLog, id_used);
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
2018-02-10 08:48:55 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
std::map<Optional<Key>, int32_t> satellite_priority;
|
|
|
|
for (auto& r : region.satellites) {
|
2019-10-15 09:31:23 +08:00
|
|
|
satellite_priority[r.dcId] = r.priority;
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t oldSatelliteRegionFit = std::numeric_limits<int32_t>::max();
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : satellite_tlogs) {
|
2020-08-06 15:01:57 +08:00
|
|
|
if (satellite_priority.count(it.interf.locality.dcId())) {
|
2019-10-15 09:31:23 +08:00
|
|
|
oldSatelliteRegionFit = std::min(oldSatelliteRegionFit, satellite_priority[it.interf.locality.dcId()]);
|
|
|
|
} else {
|
|
|
|
oldSatelliteRegionFit = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int32_t newSatelliteRegionFit = std::numeric_limits<int32_t>::max();
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : newSatelliteTLogs) {
|
|
|
|
if (satellite_priority.count(it.interf.locality.dcId())) {
|
2019-10-15 09:31:23 +08:00
|
|
|
newSatelliteRegionFit = std::min(newSatelliteRegionFit, satellite_priority[it.interf.locality.dcId()]);
|
|
|
|
} else {
|
|
|
|
newSatelliteRegionFit = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-08 07:04:08 +08:00
|
|
|
if (oldSatelliteFallback && !newSatelliteFallback) {
|
|
|
|
TraceEvent("BetterMasterExists", id)
|
|
|
|
.detail("OldSatelliteFallback", oldSatelliteFallback)
|
|
|
|
.detail("NewSatelliteFallback", newSatelliteFallback);
|
2021-03-11 02:06:03 +08:00
|
|
|
return true;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
|
|
|
if (!oldSatelliteFallback && newSatelliteFallback) {
|
2021-04-08 11:06:30 +08:00
|
|
|
TraceEvent("NewRecruitmentIsWorse", id)
|
2021-04-08 07:04:08 +08:00
|
|
|
.detail("OldSatelliteFallback", oldSatelliteFallback)
|
|
|
|
.detail("NewSatelliteFallback", newSatelliteFallback);
|
2018-06-29 14:15:32 +08:00
|
|
|
return false;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
2018-02-10 08:48:55 +08:00
|
|
|
|
2021-04-08 07:04:08 +08:00
|
|
|
if (oldSatelliteRegionFit < newSatelliteRegionFit) {
|
|
|
|
TraceEvent("BetterMasterExists", id)
|
|
|
|
.detail("OldSatelliteRegionFit", oldSatelliteRegionFit)
|
|
|
|
.detail("NewSatelliteRegionFit", newSatelliteRegionFit);
|
2021-03-11 02:06:03 +08:00
|
|
|
return true;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
|
|
|
if (oldSatelliteRegionFit > newSatelliteRegionFit) {
|
2021-04-08 11:06:30 +08:00
|
|
|
TraceEvent("NewRecruitmentIsWorse", id)
|
2021-04-08 07:04:08 +08:00
|
|
|
.detail("OldSatelliteRegionFit", oldSatelliteRegionFit)
|
|
|
|
.detail("NewSatelliteRegionFit", newSatelliteRegionFit);
|
2019-10-15 09:31:23 +08:00
|
|
|
return false;
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
2019-10-15 09:31:23 +08:00
|
|
|
|
2021-04-08 07:04:08 +08:00
|
|
|
updateIdUsed(remote_tlogs, old_id_used);
|
2021-04-21 15:22:33 +08:00
|
|
|
RoleFitness oldRemoteTLogFit(remote_tlogs, ProcessClass::TLog, old_id_used);
|
2019-03-19 03:17:59 +08:00
|
|
|
std::vector<UID> exclusionWorkerIds;
|
2021-03-11 02:06:03 +08:00
|
|
|
auto fn = [](const WorkerDetails& in) { return in.interf.id(); };
|
2019-03-19 03:17:59 +08:00
|
|
|
std::transform(newTLogs.begin(), newTLogs.end(), std::back_inserter(exclusionWorkerIds), fn);
|
|
|
|
std::transform(newSatelliteTLogs.begin(), newSatelliteTLogs.end(), std::back_inserter(exclusionWorkerIds), fn);
|
2021-04-08 07:04:08 +08:00
|
|
|
RoleFitness newRemoteTLogFit = oldRemoteTLogFit;
|
|
|
|
if (db.config.usableRegions > 1 && (dbi.recoveryState == RecoveryState::ALL_LOGS_RECRUITED ||
|
|
|
|
dbi.recoveryState == RecoveryState::FULLY_RECOVERED)) {
|
|
|
|
newRemoteTLogFit = RoleFitness(getWorkersForTlogs(db.config,
|
|
|
|
db.config.getRemoteTLogReplicationFactor(),
|
|
|
|
db.config.getDesiredRemoteLogs(),
|
|
|
|
db.config.getRemoteTLogPolicy(),
|
|
|
|
id_used,
|
|
|
|
true,
|
|
|
|
remoteDC,
|
|
|
|
exclusionWorkerIds),
|
|
|
|
ProcessClass::TLog,
|
2021-04-21 15:22:33 +08:00
|
|
|
id_used);
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
int oldRouterCount =
|
|
|
|
oldTLogFit.count * std::max<int>(1, db.config.desiredLogRouterCount / std::max(1, oldTLogFit.count));
|
|
|
|
int newRouterCount =
|
|
|
|
newTLogFit.count * std::max<int>(1, db.config.desiredLogRouterCount / std::max(1, newTLogFit.count));
|
2021-04-08 07:04:08 +08:00
|
|
|
updateIdUsed(log_routers, old_id_used);
|
2021-04-21 15:22:33 +08:00
|
|
|
RoleFitness oldLogRoutersFit(log_routers, ProcessClass::LogRouter, old_id_used);
|
2021-04-08 07:04:08 +08:00
|
|
|
RoleFitness newLogRoutersFit = oldLogRoutersFit;
|
|
|
|
if (db.config.usableRegions > 1 && dbi.recoveryState == RecoveryState::FULLY_RECOVERED) {
|
2021-04-27 10:26:25 +08:00
|
|
|
newLogRoutersFit = RoleFitness(getWorkersForRoleInDatacenter(*remoteDC.begin(),
|
|
|
|
ProcessClass::LogRouter,
|
|
|
|
newRouterCount,
|
|
|
|
db.config,
|
|
|
|
id_used,
|
|
|
|
{},
|
|
|
|
Optional<WorkerFitnessInfo>(),
|
|
|
|
true),
|
|
|
|
ProcessClass::LogRouter,
|
|
|
|
id_used);
|
2021-04-08 07:04:08 +08:00
|
|
|
}
|
2018-02-10 08:48:55 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (oldLogRoutersFit.count < oldRouterCount) {
|
2018-04-27 13:18:07 +08:00
|
|
|
oldLogRoutersFit.worstFit = ProcessClass::NeverAssign;
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (newLogRoutersFit.count < newRouterCount) {
|
2018-04-27 13:18:07 +08:00
|
|
|
newLogRoutersFit.worstFit = ProcessClass::NeverAssign;
|
|
|
|
}
|
2020-07-15 15:37:41 +08:00
|
|
|
|
2020-08-06 15:01:57 +08:00
|
|
|
// Check proxy/grvProxy/resolver fitness
|
2021-04-08 07:04:08 +08:00
|
|
|
updateIdUsed(commitProxyClasses, old_id_used);
|
|
|
|
updateIdUsed(grvProxyClasses, old_id_used);
|
|
|
|
updateIdUsed(resolverClasses, old_id_used);
|
2021-04-21 15:22:33 +08:00
|
|
|
RoleFitness oldCommitProxyFit(commitProxyClasses, ProcessClass::CommitProxy, old_id_used);
|
|
|
|
RoleFitness oldGrvProxyFit(grvProxyClasses, ProcessClass::GrvProxy, old_id_used);
|
|
|
|
RoleFitness oldResolverFit(resolverClasses, ProcessClass::Resolver, old_id_used);
|
2020-08-06 15:01:57 +08:00
|
|
|
|
2021-04-27 00:57:46 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> preferredSharing;
|
2021-04-09 05:29:12 +08:00
|
|
|
auto first_commit_proxy = getWorkerForRoleInDatacenter(clusterControllerDcId,
|
|
|
|
ProcessClass::CommitProxy,
|
|
|
|
ProcessClass::ExcludeFit,
|
|
|
|
db.config,
|
|
|
|
id_used,
|
2021-04-27 00:57:46 +08:00
|
|
|
preferredSharing,
|
2021-04-09 05:29:12 +08:00
|
|
|
true);
|
2021-04-27 00:57:46 +08:00
|
|
|
preferredSharing[first_commit_proxy.worker.interf.locality.processId()] = 0;
|
2021-04-09 05:29:12 +08:00
|
|
|
auto first_grv_proxy = getWorkerForRoleInDatacenter(clusterControllerDcId,
|
|
|
|
ProcessClass::GrvProxy,
|
|
|
|
ProcessClass::ExcludeFit,
|
|
|
|
db.config,
|
|
|
|
id_used,
|
2021-04-27 00:57:46 +08:00
|
|
|
preferredSharing,
|
2021-04-09 05:29:12 +08:00
|
|
|
true);
|
2021-04-27 00:57:46 +08:00
|
|
|
preferredSharing[first_grv_proxy.worker.interf.locality.processId()] = 1;
|
2021-04-09 05:29:12 +08:00
|
|
|
auto first_resolver = getWorkerForRoleInDatacenter(clusterControllerDcId,
|
|
|
|
ProcessClass::Resolver,
|
|
|
|
ProcessClass::ExcludeFit,
|
|
|
|
db.config,
|
|
|
|
id_used,
|
2021-04-27 00:57:46 +08:00
|
|
|
preferredSharing,
|
2021-04-09 05:29:12 +08:00
|
|
|
true);
|
2021-04-27 00:57:46 +08:00
|
|
|
preferredSharing[first_resolver.worker.interf.locality.processId()] = 2;
|
2021-04-08 11:06:30 +08:00
|
|
|
auto maxUsed = std::max({ first_commit_proxy.used, first_grv_proxy.used, first_resolver.used });
|
2021-04-08 07:04:08 +08:00
|
|
|
first_commit_proxy.used = maxUsed;
|
|
|
|
first_grv_proxy.used = maxUsed;
|
|
|
|
first_resolver.used = maxUsed;
|
2021-03-11 02:06:03 +08:00
|
|
|
auto commit_proxies = getWorkersForRoleInDatacenter(clusterControllerDcId,
|
|
|
|
ProcessClass::CommitProxy,
|
|
|
|
db.config.getDesiredCommitProxies(),
|
|
|
|
db.config,
|
|
|
|
id_used,
|
2021-04-27 00:57:46 +08:00
|
|
|
preferredSharing,
|
2021-03-11 02:06:03 +08:00
|
|
|
first_commit_proxy,
|
|
|
|
true);
|
|
|
|
auto grv_proxies = getWorkersForRoleInDatacenter(clusterControllerDcId,
|
|
|
|
ProcessClass::GrvProxy,
|
|
|
|
db.config.getDesiredGrvProxies(),
|
|
|
|
db.config,
|
|
|
|
id_used,
|
2021-04-27 00:57:46 +08:00
|
|
|
preferredSharing,
|
2021-03-11 02:06:03 +08:00
|
|
|
first_grv_proxy,
|
|
|
|
true);
|
|
|
|
auto resolvers = getWorkersForRoleInDatacenter(clusterControllerDcId,
|
|
|
|
ProcessClass::Resolver,
|
|
|
|
db.config.getDesiredResolvers(),
|
|
|
|
db.config,
|
|
|
|
id_used,
|
2021-04-27 00:57:46 +08:00
|
|
|
preferredSharing,
|
2021-03-11 02:06:03 +08:00
|
|
|
first_resolver,
|
|
|
|
true);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-04-21 15:22:33 +08:00
|
|
|
RoleFitness newCommitProxyFit(commit_proxies, ProcessClass::CommitProxy, id_used);
|
|
|
|
RoleFitness newGrvProxyFit(grv_proxies, ProcessClass::GrvProxy, id_used);
|
|
|
|
RoleFitness newResolverFit(resolvers, ProcessClass::Resolver, id_used);
|
2021-03-17 03:14:19 +08:00
|
|
|
|
2020-01-18 04:49:34 +08:00
|
|
|
// Check backup worker fitness
|
2021-04-08 07:04:08 +08:00
|
|
|
updateIdUsed(backup_workers, old_id_used);
|
2021-04-21 15:22:33 +08:00
|
|
|
RoleFitness oldBackupWorkersFit(backup_workers, ProcessClass::Backup, old_id_used);
|
2020-01-18 04:49:34 +08:00
|
|
|
const int nBackup = backup_addresses.size();
|
2021-03-17 03:21:21 +08:00
|
|
|
RoleFitness newBackupWorkersFit(getWorkersForRoleInDatacenter(clusterControllerDcId,
|
|
|
|
ProcessClass::Backup,
|
|
|
|
nBackup,
|
|
|
|
db.config,
|
|
|
|
id_used,
|
2021-04-27 10:26:25 +08:00
|
|
|
{},
|
2021-03-17 03:21:21 +08:00
|
|
|
Optional<WorkerFitnessInfo>(),
|
|
|
|
true),
|
2021-04-08 07:04:08 +08:00
|
|
|
ProcessClass::Backup,
|
2021-04-21 15:22:33 +08:00
|
|
|
id_used);
|
2021-04-08 07:04:08 +08:00
|
|
|
|
|
|
|
auto oldFit = std::make_tuple(oldTLogFit,
|
|
|
|
oldSatelliteTLogFit,
|
|
|
|
oldCommitProxyFit,
|
|
|
|
oldGrvProxyFit,
|
|
|
|
oldResolverFit,
|
|
|
|
oldBackupWorkersFit,
|
|
|
|
oldRemoteTLogFit,
|
|
|
|
oldLogRoutersFit);
|
|
|
|
auto newFit = std::make_tuple(newTLogFit,
|
|
|
|
newSatelliteTLogFit,
|
|
|
|
newCommitProxyFit,
|
|
|
|
newGrvProxyFit,
|
|
|
|
newResolverFit,
|
|
|
|
newBackupWorkersFit,
|
|
|
|
newRemoteTLogFit,
|
|
|
|
newLogRoutersFit);
|
|
|
|
|
|
|
|
if (oldFit > newFit) {
|
2020-01-18 04:49:34 +08:00
|
|
|
TraceEvent("BetterMasterExists", id)
|
|
|
|
.detail("OldMasterFit", oldMasterFit)
|
|
|
|
.detail("NewMasterFit", newMasterFit)
|
|
|
|
.detail("OldTLogFit", oldTLogFit.toString())
|
|
|
|
.detail("NewTLogFit", newTLogFit.toString())
|
|
|
|
.detail("OldSatelliteFit", oldSatelliteTLogFit.toString())
|
|
|
|
.detail("NewSatelliteFit", newSatelliteTLogFit.toString())
|
2021-04-08 07:04:08 +08:00
|
|
|
.detail("OldCommitProxyFit", oldCommitProxyFit.toString())
|
|
|
|
.detail("NewCommitProxyFit", newCommitProxyFit.toString())
|
|
|
|
.detail("OldGrvProxyFit", oldGrvProxyFit.toString())
|
|
|
|
.detail("NewGrvProxyFit", newGrvProxyFit.toString())
|
|
|
|
.detail("OldResolverFit", oldResolverFit.toString())
|
|
|
|
.detail("NewResolverFit", newResolverFit.toString())
|
|
|
|
.detail("OldBackupWorkerFit", oldBackupWorkersFit.toString())
|
|
|
|
.detail("NewBackupWorkerFit", newBackupWorkersFit.toString())
|
2020-01-18 04:49:34 +08:00
|
|
|
.detail("OldRemoteFit", oldRemoteTLogFit.toString())
|
|
|
|
.detail("NewRemoteFit", newRemoteTLogFit.toString())
|
|
|
|
.detail("OldRouterFit", oldLogRoutersFit.toString())
|
|
|
|
.detail("NewRouterFit", newLogRoutersFit.toString())
|
|
|
|
.detail("OldSatelliteFallback", oldSatelliteFallback)
|
|
|
|
.detail("NewSatelliteFallback", newSatelliteFallback);
|
2017-05-26 04:48:44 +08:00
|
|
|
return true;
|
|
|
|
}
|
2017-11-17 08:58:55 +08:00
|
|
|
|
2021-04-08 07:04:08 +08:00
|
|
|
if (oldFit < newFit) {
|
2021-04-08 11:06:30 +08:00
|
|
|
TraceEvent("NewRecruitmentIsWorse", id)
|
2021-04-08 07:04:08 +08:00
|
|
|
.detail("OldMasterFit", oldMasterFit)
|
|
|
|
.detail("NewMasterFit", newMasterFit)
|
|
|
|
.detail("OldTLogFit", oldTLogFit.toString())
|
|
|
|
.detail("NewTLogFit", newTLogFit.toString())
|
|
|
|
.detail("OldSatelliteFit", oldSatelliteTLogFit.toString())
|
|
|
|
.detail("NewSatelliteFit", newSatelliteTLogFit.toString())
|
|
|
|
.detail("OldCommitProxyFit", oldCommitProxyFit.toString())
|
|
|
|
.detail("NewCommitProxyFit", newCommitProxyFit.toString())
|
|
|
|
.detail("OldGrvProxyFit", oldGrvProxyFit.toString())
|
|
|
|
.detail("NewGrvProxyFit", newGrvProxyFit.toString())
|
|
|
|
.detail("OldResolverFit", oldResolverFit.toString())
|
|
|
|
.detail("NewResolverFit", newResolverFit.toString())
|
|
|
|
.detail("OldBackupWorkerFit", oldBackupWorkersFit.toString())
|
|
|
|
.detail("NewBackupWorkerFit", newBackupWorkersFit.toString())
|
|
|
|
.detail("OldRemoteFit", oldRemoteTLogFit.toString())
|
|
|
|
.detail("NewRemoteFit", newRemoteTLogFit.toString())
|
|
|
|
.detail("OldRouterFit", oldLogRoutersFit.toString())
|
|
|
|
.detail("NewRouterFit", newLogRoutersFit.toString())
|
|
|
|
.detail("OldSatelliteFallback", oldSatelliteFallback)
|
|
|
|
.detail("NewSatelliteFallback", newSatelliteFallback);
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-09-13 21:58:38 +08:00
|
|
|
// Returns true iff processId is currently being used
|
|
|
|
// for any non-singleton role other than master
|
|
|
|
bool isUsedNotMaster(Optional<Key> processId) const {
|
2019-03-20 06:21:46 +08:00
|
|
|
ASSERT(masterProcessId.present());
|
2021-03-11 02:06:03 +08:00
|
|
|
if (processId == masterProcessId)
|
|
|
|
return false;
|
2019-03-20 06:21:46 +08:00
|
|
|
|
2020-04-12 10:30:05 +08:00
|
|
|
auto& dbInfo = db.serverInfo->get();
|
2019-11-14 04:58:55 +08:00
|
|
|
for (const auto& tlogset : dbInfo.logSystemConfig.tLogs) {
|
2021-03-11 02:06:03 +08:00
|
|
|
for (const auto& tlog : tlogset.tLogs) {
|
|
|
|
if (tlog.present() && tlog.interf().filteredLocality.processId() == processId)
|
|
|
|
return true;
|
2019-11-14 04:58:55 +08:00
|
|
|
}
|
|
|
|
}
|
2020-09-11 08:44:15 +08:00
|
|
|
for (const CommitProxyInterface& interf : dbInfo.client.commitProxies) {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (interf.processId == processId)
|
|
|
|
return true;
|
2019-03-20 02:29:19 +08:00
|
|
|
}
|
2020-07-15 15:37:41 +08:00
|
|
|
for (const GrvProxyInterface& interf : dbInfo.client.grvProxies) {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (interf.processId == processId)
|
|
|
|
return true;
|
2020-07-15 15:37:41 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
for (const ResolverInterface& interf : dbInfo.resolvers) {
|
|
|
|
if (interf.locality.processId() == processId)
|
|
|
|
return true;
|
2019-03-20 02:29:19 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (processId == clusterControllerProcessId)
|
|
|
|
return true;
|
2019-11-13 06:22:36 +08:00
|
|
|
|
2019-03-20 02:29:19 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-09-13 21:58:38 +08:00
|
|
|
// Returns true iff
|
|
|
|
// - role is master, or
|
|
|
|
// - role is a singleton AND worker's pid is being used for any non-singleton role
|
|
|
|
bool onMasterIsBetter(const WorkerDetails& worker, ProcessClass::ClusterRole role) const {
|
2019-03-20 02:29:19 +08:00
|
|
|
ASSERT(masterProcessId.present());
|
2019-03-21 01:00:31 +08:00
|
|
|
const auto& pid = worker.interf.locality.processId();
|
2021-09-15 23:35:58 +08:00
|
|
|
if ((role != ProcessClass::DataDistributor && role != ProcessClass::Ratekeeper &&
|
|
|
|
role != ProcessClass::BlobManager) ||
|
2021-03-11 02:06:03 +08:00
|
|
|
pid == masterProcessId.get()) {
|
2019-03-20 06:58:25 +08:00
|
|
|
return false;
|
|
|
|
}
|
2019-11-14 04:58:55 +08:00
|
|
|
return isUsedNotMaster(pid);
|
2019-03-20 02:29:19 +08:00
|
|
|
}
|
|
|
|
|
2021-09-13 21:58:38 +08:00
|
|
|
// Returns a map of <pid, numRolesUsingPid> for all non-singleton roles
|
2021-03-11 02:06:03 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> getUsedIds() {
|
2019-01-29 01:25:15 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> idUsed;
|
2019-02-13 07:50:44 +08:00
|
|
|
updateKnownIds(&idUsed);
|
|
|
|
|
2020-04-12 10:30:05 +08:00
|
|
|
auto& dbInfo = db.serverInfo->get();
|
2019-02-13 07:50:44 +08:00
|
|
|
for (const auto& tlogset : dbInfo.logSystemConfig.tLogs) {
|
2021-03-11 02:06:03 +08:00
|
|
|
for (const auto& tlog : tlogset.tLogs) {
|
2019-02-01 02:10:41 +08:00
|
|
|
if (tlog.present()) {
|
2020-04-19 12:29:38 +08:00
|
|
|
idUsed[tlog.interf().filteredLocality.processId()]++;
|
2019-01-29 01:25:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-09-11 08:44:15 +08:00
|
|
|
for (const CommitProxyInterface& interf : dbInfo.client.commitProxies) {
|
2020-05-02 05:30:50 +08:00
|
|
|
ASSERT(interf.processId.present());
|
|
|
|
idUsed[interf.processId]++;
|
2019-01-29 01:25:15 +08:00
|
|
|
}
|
2020-07-15 15:37:41 +08:00
|
|
|
for (const GrvProxyInterface& interf : dbInfo.client.grvProxies) {
|
|
|
|
ASSERT(interf.processId.present());
|
|
|
|
idUsed[interf.processId]++;
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
for (const ResolverInterface& interf : dbInfo.resolvers) {
|
2019-01-29 01:25:15 +08:00
|
|
|
ASSERT(interf.locality.processId().present());
|
|
|
|
idUsed[interf.locality.processId()]++;
|
|
|
|
}
|
2019-02-13 07:50:44 +08:00
|
|
|
return idUsed;
|
2019-01-29 01:25:15 +08:00
|
|
|
}
|
|
|
|
|
2021-08-28 08:07:47 +08:00
|
|
|
// Updates work health signals in `workerHealth` based on `req`.
|
|
|
|
void updateWorkerHealth(const UpdateWorkerHealthRequest& req) {
|
|
|
|
std::string degradedPeersString;
|
|
|
|
for (int i = 0; i < req.degradedPeers.size(); ++i) {
|
2021-09-10 11:29:28 +08:00
|
|
|
degradedPeersString += (i == 0 ? "" : " ") + req.degradedPeers[i].toString();
|
2021-08-28 08:07:47 +08:00
|
|
|
}
|
|
|
|
TraceEvent("ClusterControllerUpdateWorkerHealth")
|
|
|
|
.detail("WorkerAddress", req.address)
|
|
|
|
.detail("DegradedPeers", degradedPeersString);
|
|
|
|
|
|
|
|
// `req.degradedPeers` contains the latest peer performance view from the worker. Clear the worker if the
|
|
|
|
// requested worker doesn't see any degraded peers.
|
|
|
|
if (req.degradedPeers.empty()) {
|
|
|
|
workerHealth.erase(req.address);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
double currentTime = now();
|
|
|
|
|
|
|
|
// Current `workerHealth` doesn't have any information about the incoming worker. Add the worker into
|
|
|
|
// `workerHealth`.
|
|
|
|
if (workerHealth.find(req.address) == workerHealth.end()) {
|
|
|
|
workerHealth[req.address] = {};
|
|
|
|
for (const auto& degradedPeer : req.degradedPeers) {
|
|
|
|
workerHealth[req.address].degradedPeers[degradedPeer] = { currentTime, currentTime };
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The incoming worker already exists in `workerHealth`.
|
|
|
|
|
|
|
|
auto& health = workerHealth[req.address];
|
|
|
|
|
|
|
|
// First, remove any degraded peers recorded in the `workerHealth`, but aren't in the incoming request. These
|
|
|
|
// machines network performance should have recovered.
|
|
|
|
std::unordered_set<NetworkAddress> recoveredPeers;
|
|
|
|
for (const auto& [peer, times] : health.degradedPeers) {
|
|
|
|
recoveredPeers.insert(peer);
|
|
|
|
}
|
|
|
|
for (const auto& peer : req.degradedPeers) {
|
|
|
|
if (recoveredPeers.find(peer) != recoveredPeers.end()) {
|
|
|
|
recoveredPeers.erase(peer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (const auto& peer : recoveredPeers) {
|
|
|
|
health.degradedPeers.erase(peer);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the worker's degradedPeers.
|
|
|
|
for (const auto& peer : req.degradedPeers) {
|
|
|
|
auto it = health.degradedPeers.find(peer);
|
|
|
|
if (it == health.degradedPeers.end()) {
|
|
|
|
health.degradedPeers[peer] = { currentTime, currentTime };
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
it->second.lastRefreshTime = currentTime;
|
|
|
|
}
|
|
|
|
}
|
2021-06-24 05:59:00 +08:00
|
|
|
|
2021-06-29 12:45:02 +08:00
|
|
|
// Checks that if any worker or their degraded peers have recovered. If so, remove them from `workerHealth`.
|
|
|
|
void updateRecoveredWorkers() {
|
|
|
|
double currentTime = now();
|
|
|
|
for (auto& [workerAddress, health] : workerHealth) {
|
|
|
|
for (auto it = health.degradedPeers.begin(); it != health.degradedPeers.end();) {
|
|
|
|
if (currentTime - it->second.lastRefreshTime > SERVER_KNOBS->CC_DEGRADED_LINK_EXPIRATION_INTERVAL) {
|
2021-07-13 01:44:26 +08:00
|
|
|
TraceEvent("WorkerPeerHealthRecovered").detail("Worker", workerAddress).detail("Peer", it->first);
|
2021-06-29 12:45:02 +08:00
|
|
|
health.degradedPeers.erase(it++);
|
|
|
|
} else {
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto it = workerHealth.begin(); it != workerHealth.end();) {
|
|
|
|
if (it->second.degradedPeers.empty()) {
|
2021-07-13 01:44:26 +08:00
|
|
|
TraceEvent("WorkerAllPeerHealthRecovered").detail("Worker", it->first);
|
2021-06-29 12:45:02 +08:00
|
|
|
workerHealth.erase(it++);
|
|
|
|
} else {
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns a list of servers who are experiencing degraded links. These are candidates to perform exclusion. Note
|
|
|
|
// that only one endpoint of a bad link will be included in this list.
|
|
|
|
std::unordered_set<NetworkAddress> getServersWithDegradedLink() {
|
|
|
|
updateRecoveredWorkers();
|
|
|
|
|
|
|
|
// Build a map keyed by measured degraded peer. This map gives the info that who complains a particular server.
|
|
|
|
std::unordered_map<NetworkAddress, std::unordered_set<NetworkAddress>> degradedLinkDst2Src;
|
|
|
|
double currentTime = now();
|
|
|
|
for (const auto& [server, health] : workerHealth) {
|
|
|
|
for (const auto& [degradedPeer, times] : health.degradedPeers) {
|
|
|
|
if (currentTime - times.startTime < SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL) {
|
|
|
|
// This degraded link is not long enough to be considered as degraded.
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
degradedLinkDst2Src[degradedPeer].insert(server);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sort degraded peers based on the number of workers complaining about it.
|
|
|
|
std::vector<std::pair<int, NetworkAddress>> count2DegradedPeer;
|
|
|
|
for (const auto& [degradedPeer, complainers] : degradedLinkDst2Src) {
|
|
|
|
count2DegradedPeer.push_back({ complainers.size(), degradedPeer });
|
|
|
|
}
|
|
|
|
std::sort(count2DegradedPeer.begin(), count2DegradedPeer.end(), std::greater<>());
|
|
|
|
|
|
|
|
// Go through all reported degraded peers by decreasing order of the number of complainers. For a particular
|
|
|
|
// degraded peer, if a complainer has already be considered as degraded, we skip the current examine degraded
|
|
|
|
// peer since there has been one endpoint on the link between degradedPeer and complainer considered as
|
|
|
|
// degraded. This is to address the issue that both endpoints on a bad link may be considered as degraded
|
|
|
|
// server.
|
|
|
|
//
|
|
|
|
// For example, if server A is already considered as a degraded server, and A complains B, we won't add B as
|
|
|
|
// degraded since A is already considered as degraded.
|
|
|
|
std::unordered_set<NetworkAddress> currentDegradedServers;
|
|
|
|
for (const auto& [complainerCount, badServer] : count2DegradedPeer) {
|
|
|
|
for (const auto& complainer : degradedLinkDst2Src[badServer]) {
|
|
|
|
if (currentDegradedServers.find(complainer) == currentDegradedServers.end()) {
|
|
|
|
currentDegradedServers.insert(badServer);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// For degraded server that are complained by more than SERVER_KNOBS->CC_DEGRADED_PEER_DEGREE_TO_EXCLUDE, we
|
|
|
|
// don't know if it is a hot server, or the network is bad. We remove from the returned degraded server list.
|
|
|
|
std::unordered_set<NetworkAddress> currentDegradedServersWithinLimit;
|
|
|
|
for (const auto& badServer : currentDegradedServers) {
|
|
|
|
if (degradedLinkDst2Src[badServer].size() <= SERVER_KNOBS->CC_DEGRADED_PEER_DEGREE_TO_EXCLUDE) {
|
|
|
|
currentDegradedServersWithinLimit.insert(badServer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return currentDegradedServersWithinLimit;
|
|
|
|
}
|
|
|
|
|
2021-09-10 11:29:28 +08:00
|
|
|
// Whether the transaction system (in primary DC if in HA setting) contains degraded servers.
|
|
|
|
bool transactionSystemContainsDegradedServers() {
|
2021-06-29 12:45:02 +08:00
|
|
|
const ServerDBInfo dbi = db.serverInfo->get();
|
|
|
|
for (const auto& excludedServer : degradedServers) {
|
|
|
|
if (dbi.master.addresses().contains(excludedServer)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto& logSet : dbi.logSystemConfig.tLogs) {
|
|
|
|
if (!logSet.isLocal || logSet.locality == tagLocalitySatellite) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
for (const auto& tlog : logSet.tLogs) {
|
|
|
|
if (tlog.present() && tlog.interf().addresses().contains(excludedServer)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto& proxy : dbi.client.grvProxies) {
|
|
|
|
if (proxy.addresses().contains(excludedServer)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto& proxy : dbi.client.commitProxies) {
|
|
|
|
if (proxy.addresses().contains(excludedServer)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto& resolver : dbi.resolvers) {
|
|
|
|
if (resolver.addresses().contains(excludedServer)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-09-10 11:29:28 +08:00
|
|
|
// Whether transaction system in the remote DC, e.g. log router and tlogs in the remote DC, contains degraded
|
|
|
|
// servers.
|
|
|
|
bool remoteTransactionSystemContainsDegradedServers() {
|
|
|
|
if (db.config.usableRegions <= 1) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (const auto& excludedServer : degradedServers) {
|
2021-09-11 05:51:55 +08:00
|
|
|
if (addressInDbAndRemoteDc(excludedServer, db.serverInfo)) {
|
|
|
|
return true;
|
2021-09-10 11:29:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-10-07 07:16:24 +08:00
|
|
|
// Returns true if remote DC is healthy and can failover to.
|
|
|
|
bool remoteDCIsHealthy() {
|
|
|
|
// When we just start, we ignore any remote DC health info since the current CC may be elected at wrong DC due
|
|
|
|
// to that all the processes are still starting.
|
|
|
|
if (machineStartTime() == 0) {
|
|
|
|
return true;
|
|
|
|
}
|
2021-10-08 10:46:37 +08:00
|
|
|
|
2021-10-07 07:16:24 +08:00
|
|
|
if (now() - machineStartTime() < SERVER_KNOBS->INITIAL_UPDATE_CROSS_DC_INFO_DELAY) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-10-08 10:46:37 +08:00
|
|
|
// When remote DC health is not monitored, we may not know whether the remote is healthy or not. So return false
|
|
|
|
// here to prevent failover.
|
|
|
|
if (!remoteDCMonitorStarted) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-10-07 07:16:24 +08:00
|
|
|
return !remoteTransactionSystemContainsDegradedServers();
|
|
|
|
}
|
|
|
|
|
2021-09-10 11:29:28 +08:00
|
|
|
// Returns true when the cluster controller should trigger a recovery due to degraded servers used in the
|
|
|
|
// transaction system in the primary data center.
|
|
|
|
bool shouldTriggerRecoveryDueToDegradedServers() {
|
|
|
|
if (degradedServers.size() > SERVER_KNOBS->CC_MAX_EXCLUSION_DUE_TO_HEALTH) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (db.serverInfo->get().recoveryState < RecoveryState::ACCEPTING_COMMITS) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do not trigger recovery if the cluster controller is excluded, since the master will change
|
|
|
|
// anyways once the cluster controller is moved
|
|
|
|
if (id_worker[clusterControllerProcessId].priorityInfo.isExcluded) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return transactionSystemContainsDegradedServers();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true when the cluster controller should trigger a failover due to degraded servers used in the
|
|
|
|
// transaction system in the primary data center, and no degradation in the remote data center.
|
|
|
|
bool shouldTriggerFailoverDueToDegradedServers() {
|
|
|
|
if (db.config.usableRegions <= 1) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-10-07 07:16:24 +08:00
|
|
|
if (SERVER_KNOBS->CC_FAILOVER_DUE_TO_HEALTH_MIN_DEGRADATION >
|
|
|
|
SERVER_KNOBS->CC_FAILOVER_DUE_TO_HEALTH_MAX_DEGRADATION) {
|
|
|
|
TraceEvent(SevWarn, "TriggerFailoverDueToDegradedServersInvalidConfig")
|
|
|
|
.suppressFor(1.0)
|
|
|
|
.detail("Min", SERVER_KNOBS->CC_FAILOVER_DUE_TO_HEALTH_MIN_DEGRADATION)
|
|
|
|
.detail("Max", SERVER_KNOBS->CC_FAILOVER_DUE_TO_HEALTH_MAX_DEGRADATION);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-09-10 11:29:28 +08:00
|
|
|
if (degradedServers.size() < SERVER_KNOBS->CC_FAILOVER_DUE_TO_HEALTH_MIN_DEGRADATION ||
|
|
|
|
degradedServers.size() > SERVER_KNOBS->CC_FAILOVER_DUE_TO_HEALTH_MAX_DEGRADATION) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do not trigger recovery if the cluster controller is excluded, since the master will change
|
|
|
|
// anyways once the cluster controller is moved
|
|
|
|
if (id_worker[clusterControllerProcessId].priorityInfo.isExcluded) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return transactionSystemContainsDegradedServers() && !remoteTransactionSystemContainsDegradedServers();
|
|
|
|
}
|
|
|
|
|
2021-06-29 12:45:02 +08:00
|
|
|
int recentRecoveryCountDueToHealth() {
|
|
|
|
while (!recentHealthTriggeredRecoveryTime.empty() &&
|
|
|
|
now() - recentHealthTriggeredRecoveryTime.front() > SERVER_KNOBS->CC_TRACKING_HEALTH_RECOVERY_INTERVAL) {
|
|
|
|
recentHealthTriggeredRecoveryTime.pop();
|
|
|
|
}
|
|
|
|
return recentHealthTriggeredRecoveryTime.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool isExcludedDegradedServer(const NetworkAddressList& a) {
|
|
|
|
for (const auto& server : excludedDegradedServers) {
|
|
|
|
if (a.contains(server))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, WorkerInfo> id_worker;
|
|
|
|
std::map<Optional<Standalone<StringRef>>, ProcessClass>
|
|
|
|
id_class; // contains the mapping from process id to process class from the database
|
2021-05-04 04:14:16 +08:00
|
|
|
RangeResult lastProcessClasses;
|
2017-05-26 04:48:44 +08:00
|
|
|
bool gotProcessClasses;
|
2017-11-16 09:15:24 +08:00
|
|
|
bool gotFullyRecoveredConfig;
|
2017-05-26 04:48:44 +08:00
|
|
|
Optional<Standalone<StringRef>> masterProcessId;
|
2017-10-26 02:35:29 +08:00
|
|
|
Optional<Standalone<StringRef>> clusterControllerProcessId;
|
2018-02-10 08:48:55 +08:00
|
|
|
Optional<Standalone<StringRef>> clusterControllerDcId;
|
2021-09-17 08:42:34 +08:00
|
|
|
AsyncVar<Optional<std::vector<Optional<Key>>>> desiredDcIds; // desired DC priorities
|
|
|
|
AsyncVar<std::pair<bool, Optional<std::vector<Optional<Key>>>>>
|
2021-03-11 02:06:03 +08:00
|
|
|
changingDcIds; // current DC priorities to change first, and whether that is the cluster controller
|
2021-09-17 08:42:34 +08:00
|
|
|
AsyncVar<std::pair<bool, Optional<std::vector<Optional<Key>>>>>
|
2021-03-11 02:06:03 +08:00
|
|
|
changedDcIds; // current DC priorities to change second, and whether the cluster controller has been changed
|
2017-05-26 04:48:44 +08:00
|
|
|
UID id;
|
|
|
|
std::vector<RecruitFromConfigurationRequest> outstandingRecruitmentRequests;
|
2017-09-12 08:40:46 +08:00
|
|
|
std::vector<RecruitRemoteFromConfigurationRequest> outstandingRemoteRecruitmentRequests;
|
2017-05-26 04:48:44 +08:00
|
|
|
std::vector<std::pair<RecruitStorageRequest, double>> outstandingStorageRequests;
|
2021-09-21 03:42:20 +08:00
|
|
|
std::vector<std::pair<RecruitBlobWorkerRequest, double>> outstandingBlobWorkerRequests;
|
2017-05-26 04:48:44 +08:00
|
|
|
ActorCollection ac;
|
|
|
|
UpdateWorkerList updateWorkerList;
|
2018-06-28 14:02:08 +08:00
|
|
|
Future<Void> outstandingRequestChecker;
|
2020-02-20 08:48:30 +08:00
|
|
|
Future<Void> outstandingRemoteRequestChecker;
|
2020-04-06 14:09:36 +08:00
|
|
|
AsyncTrigger updateDBInfo;
|
2020-04-18 06:05:01 +08:00
|
|
|
std::set<Endpoint> updateDBInfoEndpoints;
|
2020-04-06 14:09:36 +08:00
|
|
|
std::set<Endpoint> removedDBInfoEndpoints;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
DBInfo db;
|
|
|
|
Database cx;
|
|
|
|
double startTime;
|
2020-02-20 08:48:30 +08:00
|
|
|
Future<Void> goodRecruitmentTime;
|
|
|
|
Future<Void> goodRemoteRecruitmentTime;
|
2018-06-14 09:14:14 +08:00
|
|
|
Version datacenterVersionDifference;
|
2019-01-19 03:30:18 +08:00
|
|
|
PromiseStream<Future<Void>> addActor;
|
2019-04-25 06:12:37 +08:00
|
|
|
bool versionDifferenceUpdated;
|
2021-09-13 21:58:38 +08:00
|
|
|
|
2021-10-08 12:04:29 +08:00
|
|
|
bool remoteDCMonitorStarted;
|
|
|
|
bool remoteTransactionSystemDegraded;
|
|
|
|
|
2021-09-13 21:58:38 +08:00
|
|
|
// recruitX is used to signal when role X needs to be (re)recruited.
|
|
|
|
// recruitingXID is used to track the ID of X's interface which is being recruited.
|
|
|
|
// We use AsyncVars to kill (i.e. halt) singletons that have been replaced.
|
2021-12-07 05:12:27 +08:00
|
|
|
double lastRecruitTime = 0;
|
2021-09-13 21:58:38 +08:00
|
|
|
AsyncVar<bool> recruitDistributor;
|
|
|
|
Optional<UID> recruitingDistributorID;
|
2019-03-23 09:22:45 +08:00
|
|
|
AsyncVar<bool> recruitRatekeeper;
|
2021-09-13 21:58:38 +08:00
|
|
|
Optional<UID> recruitingRatekeeperID;
|
2021-09-15 23:35:58 +08:00
|
|
|
AsyncVar<bool> recruitBlobManager;
|
|
|
|
Optional<UID> recruitingBlobManagerID;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-08-28 08:07:47 +08:00
|
|
|
// Stores the health information from a particular worker's perspective.
|
|
|
|
struct WorkerHealth {
|
|
|
|
struct DegradedTimes {
|
|
|
|
double startTime = 0;
|
|
|
|
double lastRefreshTime = 0;
|
|
|
|
};
|
|
|
|
std::unordered_map<NetworkAddress, DegradedTimes> degradedPeers;
|
2021-06-24 05:59:00 +08:00
|
|
|
|
2021-08-28 08:07:47 +08:00
|
|
|
// TODO(zhewu): Include disk and CPU signals.
|
|
|
|
};
|
2021-06-24 05:59:00 +08:00
|
|
|
std::unordered_map<NetworkAddress, WorkerHealth> workerHealth;
|
2021-06-29 12:45:02 +08:00
|
|
|
std::unordered_set<NetworkAddress>
|
|
|
|
degradedServers; // The servers that the cluster controller is considered as degraded. The servers in this list
|
|
|
|
// are not excluded unless they are added to `excludedDegradedServers`.
|
|
|
|
std::unordered_set<NetworkAddress>
|
|
|
|
excludedDegradedServers; // The degraded servers to be excluded when assigning workers to roles.
|
|
|
|
std::queue<double> recentHealthTriggeredRecoveryTime;
|
2021-06-24 05:59:00 +08:00
|
|
|
|
2019-10-04 06:29:11 +08:00
|
|
|
CounterCollection clusterControllerMetrics;
|
|
|
|
|
|
|
|
Counter openDatabaseRequests;
|
|
|
|
Counter registerWorkerRequests;
|
|
|
|
Counter getWorkersRequests;
|
|
|
|
Counter getClientWorkersRequests;
|
|
|
|
Counter registerMasterRequests;
|
|
|
|
Counter statusRequests;
|
|
|
|
|
2021-09-22 07:01:54 +08:00
|
|
|
Reference<EventCacheHolder> recruitedMasterWorkerEventHolder;
|
2021-09-22 05:03:14 +08:00
|
|
|
|
2021-04-27 06:54:08 +08:00
|
|
|
ClusterControllerData(ClusterControllerFullInterface const& ccInterface,
|
|
|
|
LocalityData const& locality,
|
|
|
|
ServerCoordinators const& coordinators)
|
2021-07-25 02:20:51 +08:00
|
|
|
: gotProcessClasses(false), gotFullyRecoveredConfig(false), clusterControllerProcessId(locality.processId()),
|
|
|
|
clusterControllerDcId(locality.dcId()), id(ccInterface.id()), ac(false), outstandingRequestChecker(Void()),
|
|
|
|
outstandingRemoteRequestChecker(Void()), startTime(now()), goodRecruitmentTime(Never()),
|
2021-03-11 02:06:03 +08:00
|
|
|
goodRemoteRecruitmentTime(Never()), datacenterVersionDifference(0), versionDifferenceUpdated(false),
|
2021-10-08 10:46:37 +08:00
|
|
|
remoteDCMonitorStarted(false), remoteTransactionSystemDegraded(false), recruitDistributor(false),
|
2021-10-14 04:37:56 +08:00
|
|
|
recruitRatekeeper(false), recruitBlobManager(false),
|
2021-03-11 02:06:03 +08:00
|
|
|
clusterControllerMetrics("ClusterController", id.toString()),
|
|
|
|
openDatabaseRequests("OpenDatabaseRequests", clusterControllerMetrics),
|
|
|
|
registerWorkerRequests("RegisterWorkerRequests", clusterControllerMetrics),
|
|
|
|
getWorkersRequests("GetWorkersRequests", clusterControllerMetrics),
|
|
|
|
getClientWorkersRequests("GetClientWorkersRequests", clusterControllerMetrics),
|
|
|
|
registerMasterRequests("RegisterMasterRequests", clusterControllerMetrics),
|
2021-09-22 05:03:14 +08:00
|
|
|
statusRequests("StatusRequests", clusterControllerMetrics),
|
2021-09-22 07:01:54 +08:00
|
|
|
recruitedMasterWorkerEventHolder(makeReference<EventCacheHolder>("RecruitedMasterWorker")) {
|
2020-04-12 10:30:05 +08:00
|
|
|
auto serverInfo = ServerDBInfo();
|
2019-05-11 05:01:52 +08:00
|
|
|
serverInfo.id = deterministicRandom()->randomUniqueID();
|
2020-04-06 14:09:36 +08:00
|
|
|
serverInfo.infoGeneration = ++db.dbInfoCount;
|
2017-05-26 04:48:44 +08:00
|
|
|
serverInfo.masterLifetime.ccID = id;
|
|
|
|
serverInfo.clusterInterface = ccInterface;
|
2018-09-29 03:12:06 +08:00
|
|
|
serverInfo.myLocality = locality;
|
2021-03-11 02:06:03 +08:00
|
|
|
db.serverInfo->set(serverInfo);
|
2021-07-17 15:11:40 +08:00
|
|
|
cx = openDBOnServer(db.serverInfo, TaskPriority::DefaultEndpoint, LockAware::True);
|
2021-10-18 11:47:11 +08:00
|
|
|
|
|
|
|
specialCounter(clusterControllerMetrics, "ClientCount", [this]() { return db.clientCount; });
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
~ClusterControllerData() {
|
|
|
|
ac.clear(false);
|
|
|
|
id_worker.clear();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-09-13 21:58:38 +08:00
|
|
|
// Wrapper for singleton interfaces
|
|
|
|
template <class Interface>
|
|
|
|
struct Singleton {
|
|
|
|
const Optional<Interface>& interface;
|
|
|
|
|
|
|
|
Singleton(const Optional<Interface>& interface) : interface(interface) {}
|
|
|
|
|
|
|
|
virtual Role getRole() const = 0;
|
|
|
|
virtual ProcessClass::ClusterRole getClusterRole() const = 0;
|
|
|
|
|
2021-09-16 22:24:15 +08:00
|
|
|
virtual void setInterfaceToDbInfo(ClusterControllerData* cc) const = 0;
|
2021-09-13 21:58:38 +08:00
|
|
|
virtual void halt(ClusterControllerData* cc, Optional<Standalone<StringRef>> pid) const = 0;
|
|
|
|
virtual void recruit(ClusterControllerData* cc) const = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct RatekeeperSingleton : Singleton<RatekeeperInterface> {
|
|
|
|
|
|
|
|
RatekeeperSingleton(const Optional<RatekeeperInterface>& interface) : Singleton(interface) {}
|
|
|
|
|
|
|
|
Role getRole() const { return Role::RATEKEEPER; }
|
|
|
|
ProcessClass::ClusterRole getClusterRole() const { return ProcessClass::Ratekeeper; }
|
|
|
|
|
2021-09-16 22:24:15 +08:00
|
|
|
void setInterfaceToDbInfo(ClusterControllerData* cc) const {
|
2021-09-13 21:58:38 +08:00
|
|
|
if (interface.present()) {
|
|
|
|
cc->db.setRatekeeper(interface.get());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
void halt(ClusterControllerData* cc, Optional<Standalone<StringRef>> pid) const {
|
|
|
|
if (interface.present()) {
|
|
|
|
cc->id_worker[pid].haltRatekeeper =
|
|
|
|
brokenPromiseToNever(interface.get().haltRatekeeper.getReply(HaltRatekeeperRequest(cc->id)));
|
|
|
|
}
|
|
|
|
}
|
2021-12-07 05:12:27 +08:00
|
|
|
void recruit(ClusterControllerData* cc) const {
|
|
|
|
cc->lastRecruitTime = now();
|
|
|
|
cc->recruitRatekeeper.set(true);
|
|
|
|
}
|
2021-09-13 21:58:38 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct DataDistributorSingleton : Singleton<DataDistributorInterface> {
|
|
|
|
|
|
|
|
DataDistributorSingleton(const Optional<DataDistributorInterface>& interface) : Singleton(interface) {}
|
|
|
|
|
|
|
|
Role getRole() const { return Role::DATA_DISTRIBUTOR; }
|
|
|
|
ProcessClass::ClusterRole getClusterRole() const { return ProcessClass::DataDistributor; }
|
|
|
|
|
2021-09-16 22:24:15 +08:00
|
|
|
void setInterfaceToDbInfo(ClusterControllerData* cc) const {
|
2021-09-13 21:58:38 +08:00
|
|
|
if (interface.present()) {
|
|
|
|
cc->db.setDistributor(interface.get());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
void halt(ClusterControllerData* cc, Optional<Standalone<StringRef>> pid) const {
|
|
|
|
if (interface.present()) {
|
|
|
|
cc->id_worker[pid].haltDistributor =
|
|
|
|
brokenPromiseToNever(interface.get().haltDataDistributor.getReply(HaltDataDistributorRequest(cc->id)));
|
|
|
|
}
|
|
|
|
}
|
2021-12-07 05:12:27 +08:00
|
|
|
void recruit(ClusterControllerData* cc) const {
|
|
|
|
cc->lastRecruitTime = now();
|
|
|
|
cc->recruitDistributor.set(true);
|
|
|
|
}
|
2021-09-13 21:58:38 +08:00
|
|
|
};
|
|
|
|
|
2021-09-15 23:35:58 +08:00
|
|
|
struct BlobManagerSingleton : Singleton<BlobManagerInterface> {
|
|
|
|
|
|
|
|
BlobManagerSingleton(const Optional<BlobManagerInterface>& interface) : Singleton(interface) {}
|
|
|
|
|
|
|
|
Role getRole() const { return Role::BLOB_MANAGER; }
|
|
|
|
ProcessClass::ClusterRole getClusterRole() const { return ProcessClass::BlobManager; }
|
|
|
|
|
|
|
|
void setInterfaceToDbInfo(ClusterControllerData* cc) const {
|
|
|
|
if (interface.present()) {
|
|
|
|
cc->db.setBlobManager(interface.get());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
void halt(ClusterControllerData* cc, Optional<Standalone<StringRef>> pid) const {
|
|
|
|
if (interface.present()) {
|
|
|
|
cc->id_worker[pid].haltBlobManager =
|
|
|
|
brokenPromiseToNever(interface.get().haltBlobManager.getReply(HaltBlobManagerRequest(cc->id)));
|
|
|
|
}
|
|
|
|
}
|
2021-12-07 05:12:27 +08:00
|
|
|
void recruit(ClusterControllerData* cc) const {
|
|
|
|
cc->lastRecruitTime = now();
|
|
|
|
cc->recruitBlobManager.set(true);
|
|
|
|
}
|
2021-12-08 13:43:58 +08:00
|
|
|
void haltBlobGranules(ClusterControllerData* cc, Optional<Standalone<StringRef>> pid) const {
|
|
|
|
if (interface.present()) {
|
|
|
|
cc->id_worker[pid].haltBlobManager =
|
|
|
|
brokenPromiseToNever(interface.get().haltBlobGranules.getReply(HaltBlobGranulesRequest(cc->id)));
|
|
|
|
}
|
|
|
|
}
|
2021-09-15 23:35:58 +08:00
|
|
|
};
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> clusterWatchDatabase(ClusterControllerData* cluster, ClusterControllerData::DBInfo* db) {
|
2017-05-26 04:48:44 +08:00
|
|
|
state MasterInterface iMaster;
|
|
|
|
|
|
|
|
// SOMEDAY: If there is already a non-failed master referenced by zkMasterInfo, use that one until it fails
|
|
|
|
// When this someday is implemented, make sure forced failures still cause the master to be recruited again
|
|
|
|
|
|
|
|
loop {
|
2021-07-27 10:55:10 +08:00
|
|
|
TraceEvent("CCWDB", cluster->id).log();
|
2017-05-26 04:48:44 +08:00
|
|
|
try {
|
|
|
|
state double recoveryStart = now();
|
|
|
|
TraceEvent("CCWDB", cluster->id).detail("Recruiting", "Master");
|
2018-02-10 08:48:55 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
// We must recruit the master in the same data center as the cluster controller.
|
|
|
|
// This should always be possible, because we can recruit the master on the same process as the cluster
|
|
|
|
// controller.
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int> id_used;
|
2018-02-10 08:48:55 +08:00
|
|
|
id_used[cluster->clusterControllerProcessId]++;
|
2021-03-11 02:06:03 +08:00
|
|
|
state WorkerFitnessInfo masterWorker = cluster->getWorkerForRoleInDatacenter(
|
|
|
|
cluster->clusterControllerDcId, ProcessClass::Master, ProcessClass::NeverAssign, db->config, id_used);
|
|
|
|
if ((masterWorker.worker.processClass.machineClassFitness(ProcessClass::Master) >
|
|
|
|
SERVER_KNOBS->EXPECTED_MASTER_FITNESS ||
|
|
|
|
masterWorker.worker.interf.locality.processId() == cluster->clusterControllerProcessId) &&
|
|
|
|
!cluster->goodRecruitmentTime.isReady()) {
|
|
|
|
TraceEvent("CCWDB", cluster->id)
|
|
|
|
.detail("Fitness", masterWorker.worker.processClass.machineClassFitness(ProcessClass::Master));
|
|
|
|
wait(delay(SERVER_KNOBS->ATTEMPT_RECRUITMENT_DELAY));
|
2017-05-26 04:48:44 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
RecruitMasterRequest rmq;
|
2020-04-12 10:30:05 +08:00
|
|
|
rmq.lifetime = db->serverInfo->get().masterLifetime;
|
2018-07-01 21:39:04 +08:00
|
|
|
rmq.forceRecovery = db->forceRecovery;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2019-03-09 00:25:07 +08:00
|
|
|
cluster->masterProcessId = masterWorker.worker.interf.locality.processId();
|
2018-09-01 01:51:55 +08:00
|
|
|
cluster->db.unfinishedRecoveries++;
|
2021-03-11 02:06:03 +08:00
|
|
|
state Future<ErrorOr<MasterInterface>> fNewMaster = masterWorker.worker.interf.master.tryGetReply(rmq);
|
|
|
|
wait(ready(fNewMaster) || db->forceMasterFailure.onTrigger());
|
2019-02-19 06:54:28 +08:00
|
|
|
if (fNewMaster.isReady() && fNewMaster.get().present()) {
|
|
|
|
TraceEvent("CCWDB", cluster->id).detail("Recruited", fNewMaster.get().get().id());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
// for status tool
|
|
|
|
TraceEvent("RecruitedMasterWorker", cluster->id)
|
2021-03-11 02:06:03 +08:00
|
|
|
.detail("Address", fNewMaster.get().get().address())
|
2021-09-22 07:27:25 +08:00
|
|
|
.trackLatest(cluster->recruitedMasterWorkerEventHolder->trackingKey);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2019-02-19 06:54:28 +08:00
|
|
|
iMaster = fNewMaster.get().get();
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
db->masterRegistrationCount = 0;
|
2018-06-14 09:14:14 +08:00
|
|
|
db->recoveryStalled = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2020-04-12 10:30:05 +08:00
|
|
|
auto dbInfo = ServerDBInfo();
|
2017-05-26 04:48:44 +08:00
|
|
|
dbInfo.master = iMaster;
|
2019-05-11 05:01:52 +08:00
|
|
|
dbInfo.id = deterministicRandom()->randomUniqueID();
|
2020-04-06 14:09:36 +08:00
|
|
|
dbInfo.infoGeneration = ++db->dbInfoCount;
|
2020-04-12 10:30:05 +08:00
|
|
|
dbInfo.masterLifetime = db->serverInfo->get().masterLifetime;
|
2017-05-26 04:48:44 +08:00
|
|
|
++dbInfo.masterLifetime;
|
2020-04-12 10:30:05 +08:00
|
|
|
dbInfo.clusterInterface = db->serverInfo->get().clusterInterface;
|
|
|
|
dbInfo.distributor = db->serverInfo->get().distributor;
|
|
|
|
dbInfo.ratekeeper = db->serverInfo->get().ratekeeper;
|
2021-09-15 23:35:58 +08:00
|
|
|
dbInfo.blobManager = db->serverInfo->get().blobManager;
|
2020-04-12 10:30:05 +08:00
|
|
|
dbInfo.latencyBandConfig = db->serverInfo->get().latencyBandConfig;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent("CCWDB", cluster->id)
|
|
|
|
.detail("Lifetime", dbInfo.masterLifetime.toString())
|
|
|
|
.detail("ChangeID", dbInfo.id);
|
|
|
|
db->serverInfo->set(dbInfo);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
state Future<Void> spinDelay = delay(
|
|
|
|
SERVER_KNOBS
|
|
|
|
->MASTER_SPIN_DELAY); // Don't retry master recovery more than once per second, but don't delay
|
|
|
|
// the "first" recovery after more than a second of normal operation
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
TraceEvent("CCWDB", cluster->id).detail("Watching", iMaster.id());
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
// Master failure detection is pretty sensitive, but if we are in the middle of a very long recovery we
|
|
|
|
// really don't want to have to start over
|
2017-05-26 04:48:44 +08:00
|
|
|
loop choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(waitFailureClient(
|
|
|
|
iMaster.waitFailure,
|
|
|
|
db->masterRegistrationCount
|
|
|
|
? SERVER_KNOBS->MASTER_FAILURE_REACTION_TIME
|
|
|
|
: (now() - recoveryStart) * SERVER_KNOBS->MASTER_FAILURE_SLOPE_DURING_RECOVERY,
|
|
|
|
db->masterRegistrationCount ? -SERVER_KNOBS->MASTER_FAILURE_REACTION_TIME /
|
|
|
|
SERVER_KNOBS->SECONDS_BEFORE_NO_FAILURE_DELAY
|
|
|
|
: SERVER_KNOBS->MASTER_FAILURE_SLOPE_DURING_RECOVERY) ||
|
|
|
|
db->forceMasterFailure.onTrigger())) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
when(wait(db->serverInfo->onChange())) {}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2019-02-19 06:54:28 +08:00
|
|
|
wait(spinDelay);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
TEST(true); // clusterWatchDatabase() master failed
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent(SevWarn, "DetectedFailedMaster", cluster->id).detail("OldMaster", iMaster.id());
|
2017-05-26 04:48:44 +08:00
|
|
|
} else {
|
2021-09-22 02:51:20 +08:00
|
|
|
TEST(true); // clusterWatchDatabase() !newMaster.present()
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(delay(SERVER_KNOBS->MASTER_SPIN_DELAY));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
} catch (Error& e) {
|
|
|
|
TraceEvent("CCWDB", cluster->id).error(e, true).detail("Master", iMaster.id());
|
2021-03-11 02:06:03 +08:00
|
|
|
if (e.code() == error_code_actor_cancelled)
|
|
|
|
throw;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
bool ok = e.code() == error_code_no_more_servers;
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent(ok ? SevWarn : SevError, "ClusterWatchDatabaseRetrying", cluster->id).error(e);
|
2017-05-26 04:48:44 +08:00
|
|
|
if (!ok)
|
|
|
|
throw e;
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(delay(SERVER_KNOBS->ATTEMPT_RECRUITMENT_DELAY));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> clusterGetServerInfo(ClusterControllerData::DBInfo* db,
|
|
|
|
UID knownServerInfoID,
|
2020-04-12 10:30:05 +08:00
|
|
|
ReplyPromise<ServerDBInfo> reply) {
|
2021-03-11 02:06:03 +08:00
|
|
|
while (db->serverInfo->get().id == knownServerInfoID) {
|
2017-05-26 04:48:44 +08:00
|
|
|
choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(yieldedFuture(db->serverInfo->onChange()))) {}
|
|
|
|
when(wait(delayJittered(300))) { break; } // The server might be long gone!
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
reply.send(db->serverInfo->get());
|
2017-05-26 04:48:44 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2019-07-26 08:15:31 +08:00
|
|
|
ACTOR Future<Void> clusterOpenDatabase(ClusterControllerData::DBInfo* db, OpenDatabaseRequest req) {
|
|
|
|
db->clientStatus[req.reply.getEndpoint().getPrimaryAddress()] = std::make_pair(now(), req);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (db->clientStatus.size() > 10000) {
|
2019-07-26 08:15:31 +08:00
|
|
|
TraceEvent(SevWarnAlways, "TooManyClientStatusEntries").suppressFor(1.0);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2020-01-08 11:53:09 +08:00
|
|
|
|
2019-07-26 08:15:31 +08:00
|
|
|
while (db->clientInfo->get().id == req.knownClientInfoID) {
|
2017-05-26 04:48:44 +08:00
|
|
|
choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(db->clientInfo->onChange())) {}
|
|
|
|
when(wait(delayJittered(SERVER_KNOBS->COORDINATOR_REGISTER_INTERVAL))) {
|
|
|
|
break;
|
|
|
|
} // The client might be long gone!
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
req.reply.send(db->clientInfo->get());
|
2017-05-26 04:48:44 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
void checkOutstandingRecruitmentRequests(ClusterControllerData* self) {
|
|
|
|
for (int i = 0; i < self->outstandingRecruitmentRequests.size(); i++) {
|
2017-05-26 04:48:44 +08:00
|
|
|
RecruitFromConfigurationRequest& req = self->outstandingRecruitmentRequests[i];
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
RecruitFromConfigurationReply rep = self->findWorkersForConfiguration(req);
|
|
|
|
req.reply.send(rep);
|
|
|
|
swapAndPop(&self->outstandingRecruitmentRequests, i--);
|
2017-05-26 04:48:44 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() == error_code_no_more_servers || e.code() == error_code_operation_failed) {
|
|
|
|
TraceEvent(SevWarn, "RecruitTLogMatchingSetNotAvailable", self->id).error(e);
|
|
|
|
} else {
|
|
|
|
TraceEvent(SevError, "RecruitTLogsRequestError", self->id).error(e);
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
void checkOutstandingRemoteRecruitmentRequests(ClusterControllerData* self) {
|
|
|
|
for (int i = 0; i < self->outstandingRemoteRecruitmentRequests.size(); i++) {
|
2017-09-12 08:40:46 +08:00
|
|
|
RecruitRemoteFromConfigurationRequest& req = self->outstandingRemoteRecruitmentRequests[i];
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
RecruitRemoteFromConfigurationReply rep = self->findRemoteWorkersForConfiguration(req);
|
|
|
|
req.reply.send(rep);
|
|
|
|
swapAndPop(&self->outstandingRemoteRecruitmentRequests, i--);
|
2017-09-12 08:40:46 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() == error_code_no_more_servers || e.code() == error_code_operation_failed) {
|
|
|
|
TraceEvent(SevWarn, "RecruitRemoteTLogMatchingSetNotAvailable", self->id).error(e);
|
|
|
|
} else {
|
|
|
|
TraceEvent(SevError, "RecruitRemoteTLogsRequestError", self->id).error(e);
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
void checkOutstandingStorageRequests(ClusterControllerData* self) {
|
|
|
|
for (int i = 0; i < self->outstandingStorageRequests.size(); i++) {
|
2017-05-26 04:48:44 +08:00
|
|
|
auto& req = self->outstandingStorageRequests[i];
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (req.second < now()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
req.first.reply.sendError(timed_out());
|
2021-03-11 02:06:03 +08:00
|
|
|
swapAndPop(&self->outstandingStorageRequests, i--);
|
2017-05-26 04:48:44 +08:00
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!self->gotProcessClasses && !req.first.criticalRecruitment)
|
2017-05-26 04:48:44 +08:00
|
|
|
throw no_more_servers();
|
|
|
|
|
|
|
|
auto worker = self->getStorageWorker(req.first);
|
|
|
|
RecruitStorageReply rep;
|
2019-03-09 00:25:07 +08:00
|
|
|
rep.worker = worker.interf;
|
|
|
|
rep.processClass = worker.processClass;
|
2021-03-11 02:06:03 +08:00
|
|
|
req.first.reply.send(rep);
|
|
|
|
swapAndPop(&self->outstandingStorageRequests, i--);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() == error_code_no_more_servers) {
|
2019-08-13 01:08:12 +08:00
|
|
|
TraceEvent(SevWarn, "RecruitStorageNotAvailable", self->id)
|
2019-08-20 04:47:48 +08:00
|
|
|
.suppressFor(1.0)
|
2019-08-13 01:08:12 +08:00
|
|
|
.detail("OutstandingReq", i)
|
|
|
|
.detail("IsCriticalRecruitment", req.first.criticalRecruitment)
|
|
|
|
.error(e);
|
2017-05-26 04:48:44 +08:00
|
|
|
} else {
|
|
|
|
TraceEvent(SevError, "RecruitStorageError", self->id).error(e);
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-29 07:15:32 +08:00
|
|
|
// When workers aren't available at the time of request, the request
|
|
|
|
// gets added to a list of outstanding reqs. Here, we try to resolve these
|
|
|
|
// outstanding requests.
|
2021-09-21 03:42:20 +08:00
|
|
|
void checkOutstandingBlobWorkerRequests(ClusterControllerData* self) {
|
|
|
|
for (int i = 0; i < self->outstandingBlobWorkerRequests.size(); i++) {
|
|
|
|
auto& req = self->outstandingBlobWorkerRequests[i];
|
|
|
|
try {
|
|
|
|
if (req.second < now()) {
|
|
|
|
req.first.reply.sendError(timed_out());
|
|
|
|
swapAndPop(&self->outstandingBlobWorkerRequests, i--);
|
|
|
|
} else {
|
|
|
|
if (!self->gotProcessClasses)
|
|
|
|
throw no_more_servers();
|
|
|
|
|
|
|
|
auto worker = self->getBlobWorker(req.first);
|
|
|
|
RecruitBlobWorkerReply rep;
|
|
|
|
rep.worker = worker.interf;
|
|
|
|
rep.processClass = worker.processClass;
|
|
|
|
req.first.reply.send(rep);
|
2021-09-29 07:15:32 +08:00
|
|
|
// can remove it once we know the worker was found
|
2021-09-21 03:42:20 +08:00
|
|
|
swapAndPop(&self->outstandingBlobWorkerRequests, i--);
|
|
|
|
}
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() == error_code_no_more_servers) {
|
|
|
|
TraceEvent(SevWarn, "RecruitBlobWorkerNotAvailable", self->id)
|
|
|
|
.suppressFor(1.0)
|
|
|
|
.detail("OutstandingReq", i)
|
|
|
|
.error(e);
|
|
|
|
} else {
|
|
|
|
TraceEvent(SevError, "RecruitBlobWorkerError", self->id).error(e);
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-13 21:58:38 +08:00
|
|
|
// Finds and returns a new process for role
|
|
|
|
WorkerDetails findNewProcessForSingleton(ClusterControllerData* self,
|
|
|
|
const ProcessClass::ClusterRole role,
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int>& id_used) {
|
|
|
|
// find new process in cluster for role
|
|
|
|
WorkerDetails newWorker =
|
|
|
|
self->getWorkerForRoleInDatacenter(
|
|
|
|
self->clusterControllerDcId, role, ProcessClass::NeverAssign, self->db.config, id_used, {}, true)
|
|
|
|
.worker;
|
|
|
|
|
|
|
|
// check if master's process is actually better suited for role
|
|
|
|
if (self->onMasterIsBetter(newWorker, role)) {
|
|
|
|
newWorker = self->id_worker[self->masterProcessId.get()].details;
|
|
|
|
}
|
|
|
|
|
|
|
|
// acknowledge that the pid is now potentially used by this role as well
|
|
|
|
id_used[newWorker.interf.locality.processId()]++;
|
|
|
|
|
|
|
|
return newWorker;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return best possible fitness for singleton. Note that lower fitness is better.
|
|
|
|
ProcessClass::Fitness findBestFitnessForSingleton(const ClusterControllerData* self,
|
|
|
|
const WorkerDetails& worker,
|
|
|
|
const ProcessClass::ClusterRole& role) {
|
|
|
|
auto bestFitness = worker.processClass.machineClassFitness(role);
|
|
|
|
// If the process has been marked as excluded, we take the max with ExcludeFit to ensure its fit
|
|
|
|
// is at least as bad as ExcludeFit. This assists with successfully offboarding such processes
|
|
|
|
// and removing them from the cluster.
|
|
|
|
if (self->db.config.isExcludedServer(worker.interf.addresses())) {
|
|
|
|
bestFitness = std::max(bestFitness, ProcessClass::ExcludeFit);
|
|
|
|
}
|
|
|
|
return bestFitness;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true iff the singleton is healthy. "Healthy" here means that
|
|
|
|
// the singleton is stable (see below) and doesn't need to be rerecruited.
|
|
|
|
// Side effects: (possibly) initiates recruitment
|
|
|
|
template <class Interface>
|
2021-09-15 22:44:01 +08:00
|
|
|
bool isHealthySingleton(ClusterControllerData* self,
|
2021-09-13 21:58:38 +08:00
|
|
|
const WorkerDetails& newWorker,
|
|
|
|
const Singleton<Interface>& singleton,
|
|
|
|
const ProcessClass::Fitness& bestFitness,
|
|
|
|
const Optional<UID> recruitingID) {
|
|
|
|
// A singleton is stable if it exists in cluster, has not been killed off of proc and is not being recruited
|
|
|
|
bool isStableSingleton = singleton.interface.present() &&
|
|
|
|
self->id_worker.count(singleton.interface.get().locality.processId()) &&
|
|
|
|
(!recruitingID.present() || (recruitingID.get() == singleton.interface.get().id()));
|
|
|
|
|
|
|
|
if (!isStableSingleton) {
|
|
|
|
return false; // not healthy because unstable
|
|
|
|
}
|
|
|
|
|
|
|
|
auto& currWorker = self->id_worker[singleton.interface.get().locality.processId()];
|
|
|
|
auto currFitness = currWorker.details.processClass.machineClassFitness(singleton.getClusterRole());
|
|
|
|
if (currWorker.priorityInfo.isExcluded) {
|
|
|
|
currFitness = ProcessClass::ExcludeFit;
|
|
|
|
}
|
|
|
|
// If any of the following conditions are met, we will switch the singleton's process:
|
|
|
|
// - if the current proc is used by some non-master, non-singleton role
|
|
|
|
// - if the current fitness is less than optimal (lower fitness is better)
|
|
|
|
// - if currently at peak fitness but on same process as master, and the new worker is on different process
|
|
|
|
bool shouldRerecruit =
|
|
|
|
self->isUsedNotMaster(currWorker.details.interf.locality.processId()) || bestFitness < currFitness ||
|
|
|
|
(currFitness == bestFitness && currWorker.details.interf.locality.processId() == self->masterProcessId &&
|
|
|
|
newWorker.interf.locality.processId() != self->masterProcessId);
|
|
|
|
if (shouldRerecruit) {
|
|
|
|
std::string roleAbbr = singleton.getRole().abbreviation;
|
|
|
|
TraceEvent(("CCHalt" + roleAbbr).c_str(), self->id)
|
|
|
|
.detail(roleAbbr + "ID", singleton.interface.get().id())
|
|
|
|
.detail("Excluded", currWorker.priorityInfo.isExcluded)
|
|
|
|
.detail("Fitness", currFitness)
|
|
|
|
.detail("BestFitness", bestFitness);
|
2021-09-15 22:44:01 +08:00
|
|
|
singleton.recruit(self); // SIDE EFFECT: initiating recruitment
|
2021-09-13 21:58:38 +08:00
|
|
|
return false; // not healthy since needed to be rerecruited
|
|
|
|
} else {
|
|
|
|
return true; // healthy because doesn't need to be rerecruited
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns a mapping from pid->pidCount for pids
|
2021-09-21 04:41:04 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> getColocCounts(
|
|
|
|
const std::vector<Optional<Standalone<StringRef>>>& pids) {
|
2021-09-13 21:58:38 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> counts;
|
|
|
|
for (const auto& pid : pids) {
|
2021-10-19 03:04:22 +08:00
|
|
|
if (pid.present()) {
|
|
|
|
++counts[pid];
|
|
|
|
}
|
2021-09-13 21:58:38 +08:00
|
|
|
}
|
|
|
|
return counts;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Checks if there exists a better process for each singleton (e.g. DD) compared
|
|
|
|
// to the process it is currently on.
|
2021-10-19 03:04:22 +08:00
|
|
|
// Note: there is a lot of extra logic here to only recruit the blob manager when gate is open.
|
|
|
|
// When adding new singletons, just follow the ratekeeper/data distributor examples.
|
2021-09-13 21:58:38 +08:00
|
|
|
void checkBetterSingletons(ClusterControllerData* self) {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!self->masterProcessId.present() ||
|
|
|
|
self->db.serverInfo->get().recoveryState < RecoveryState::ACCEPTING_COMMITS) {
|
2019-03-20 06:58:25 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-09-13 21:58:38 +08:00
|
|
|
// note: this map doesn't consider pids used by existing singletons
|
2019-11-13 06:22:36 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> id_used = self->getUsedIds();
|
2021-09-13 21:58:38 +08:00
|
|
|
|
|
|
|
// We prefer spreading out other roles more than separating singletons on their own process
|
|
|
|
// so we artificially amplify the pid count for the processes used by non-singleton roles.
|
|
|
|
// In other words, we make the processes used for other roles less desirable to be used
|
|
|
|
// by singletons as well.
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : id_used) {
|
2021-09-13 21:58:38 +08:00
|
|
|
it.second *= PID_USED_AMP_FOR_NON_SINGLETON;
|
2019-11-13 06:22:36 +08:00
|
|
|
}
|
2019-03-20 06:58:25 +08:00
|
|
|
|
2021-09-13 21:58:38 +08:00
|
|
|
// Try to find a new process for each singleton.
|
|
|
|
WorkerDetails newRKWorker = findNewProcessForSingleton(self, ProcessClass::Ratekeeper, id_used);
|
|
|
|
WorkerDetails newDDWorker = findNewProcessForSingleton(self, ProcessClass::DataDistributor, id_used);
|
2021-10-19 03:04:22 +08:00
|
|
|
|
|
|
|
WorkerDetails newBMWorker;
|
2021-12-07 04:10:20 +08:00
|
|
|
if (self->db.config.blobGranulesEnabled) {
|
2021-10-19 03:04:22 +08:00
|
|
|
newBMWorker = findNewProcessForSingleton(self, ProcessClass::BlobManager, id_used);
|
|
|
|
}
|
2021-09-13 21:58:38 +08:00
|
|
|
|
|
|
|
// Find best possible fitnesses for each singleton.
|
|
|
|
auto bestFitnessForRK = findBestFitnessForSingleton(self, newRKWorker, ProcessClass::Ratekeeper);
|
|
|
|
auto bestFitnessForDD = findBestFitnessForSingleton(self, newDDWorker, ProcessClass::DataDistributor);
|
2021-10-19 03:04:22 +08:00
|
|
|
|
|
|
|
ProcessClass::Fitness bestFitnessForBM;
|
2021-12-07 04:10:20 +08:00
|
|
|
if (self->db.config.blobGranulesEnabled) {
|
2021-10-19 03:04:22 +08:00
|
|
|
bestFitnessForBM = findBestFitnessForSingleton(self, newBMWorker, ProcessClass::BlobManager);
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
2020-04-12 10:30:05 +08:00
|
|
|
auto& db = self->db.serverInfo->get();
|
2021-09-13 21:58:38 +08:00
|
|
|
auto rkSingleton = RatekeeperSingleton(db.ratekeeper);
|
|
|
|
auto ddSingleton = DataDistributorSingleton(db.distributor);
|
2021-10-19 03:04:22 +08:00
|
|
|
BlobManagerSingleton bmSingleton(db.blobManager);
|
2021-09-13 21:58:38 +08:00
|
|
|
|
2021-09-15 22:44:01 +08:00
|
|
|
// Check if the singletons are healthy.
|
|
|
|
// side effect: try to rerecruit the singletons to more optimal processes
|
|
|
|
bool rkHealthy = isHealthySingleton<RatekeeperInterface>(
|
2021-09-13 21:58:38 +08:00
|
|
|
self, newRKWorker, rkSingleton, bestFitnessForRK, self->recruitingRatekeeperID);
|
|
|
|
|
2021-09-15 22:44:01 +08:00
|
|
|
bool ddHealthy = isHealthySingleton<DataDistributorInterface>(
|
2021-09-13 21:58:38 +08:00
|
|
|
self, newDDWorker, ddSingleton, bestFitnessForDD, self->recruitingDistributorID);
|
|
|
|
|
2021-10-19 03:04:22 +08:00
|
|
|
bool bmHealthy = true;
|
2021-12-07 04:10:20 +08:00
|
|
|
if (self->db.config.blobGranulesEnabled) {
|
2021-10-19 03:04:22 +08:00
|
|
|
bmHealthy = isHealthySingleton<BlobManagerInterface>(
|
|
|
|
self, newBMWorker, bmSingleton, bestFitnessForBM, self->recruitingBlobManagerID);
|
|
|
|
}
|
2021-09-15 23:35:58 +08:00
|
|
|
|
2021-09-13 21:58:38 +08:00
|
|
|
// if any of the singletons are unhealthy (rerecruited or not stable), then do not
|
|
|
|
// consider any further re-recruitments
|
2021-09-15 23:35:58 +08:00
|
|
|
if (!(rkHealthy && ddHealthy && bmHealthy)) {
|
2021-09-13 21:58:38 +08:00
|
|
|
return;
|
2019-03-20 06:58:25 +08:00
|
|
|
}
|
|
|
|
|
2021-09-13 21:58:38 +08:00
|
|
|
// if we reach here, we know that the singletons are healthy so let's
|
|
|
|
// check if we can colocate the singletons in a more optimal way
|
2021-09-16 01:41:59 +08:00
|
|
|
Optional<Standalone<StringRef>> currRKProcessId = rkSingleton.interface.get().locality.processId();
|
|
|
|
Optional<Standalone<StringRef>> currDDProcessId = ddSingleton.interface.get().locality.processId();
|
2021-09-13 21:58:38 +08:00
|
|
|
Optional<Standalone<StringRef>> newRKProcessId = newRKWorker.interf.locality.processId();
|
2021-09-16 10:17:27 +08:00
|
|
|
Optional<Standalone<StringRef>> newDDProcessId = newDDWorker.interf.locality.processId();
|
2021-09-13 21:58:38 +08:00
|
|
|
|
2021-10-19 03:04:22 +08:00
|
|
|
Optional<Standalone<StringRef>> currBMProcessId, newBMProcessId;
|
2021-12-07 04:10:20 +08:00
|
|
|
if (self->db.config.blobGranulesEnabled) {
|
|
|
|
|
2021-10-19 03:04:22 +08:00
|
|
|
currBMProcessId = bmSingleton.interface.get().locality.processId();
|
|
|
|
newBMProcessId = newBMWorker.interf.locality.processId();
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<Optional<Standalone<StringRef>>> currPids = { currRKProcessId, currDDProcessId };
|
|
|
|
std::vector<Optional<Standalone<StringRef>>> newPids = { newRKProcessId, newDDProcessId };
|
2021-12-07 04:10:20 +08:00
|
|
|
if (self->db.config.blobGranulesEnabled) {
|
2021-10-19 03:04:22 +08:00
|
|
|
currPids.emplace_back(currBMProcessId);
|
|
|
|
newPids.emplace_back(newBMProcessId);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto currColocMap = getColocCounts(currPids);
|
|
|
|
auto newColocMap = getColocCounts(newPids);
|
|
|
|
|
|
|
|
// if the knob is disabled, the BM coloc counts should have no affect on the coloc counts check below
|
2021-12-07 04:10:20 +08:00
|
|
|
if (!self->db.config.blobGranulesEnabled) {
|
2021-10-19 03:04:22 +08:00
|
|
|
ASSERT(currColocMap[currBMProcessId] == 0);
|
|
|
|
ASSERT(newColocMap[newBMProcessId] == 0);
|
|
|
|
}
|
2021-09-13 21:58:38 +08:00
|
|
|
|
2021-09-15 23:35:58 +08:00
|
|
|
// if the new coloc counts are collectively better (i.e. each singleton's coloc count has not increased)
|
2021-09-16 01:41:59 +08:00
|
|
|
if (newColocMap[newRKProcessId] <= currColocMap[currRKProcessId] &&
|
2021-09-15 23:35:58 +08:00
|
|
|
newColocMap[newDDProcessId] <= currColocMap[currDDProcessId] &&
|
|
|
|
newColocMap[newBMProcessId] <= currColocMap[currBMProcessId]) {
|
2021-09-13 21:58:38 +08:00
|
|
|
// rerecruit the singleton for which we have found a better process, if any
|
2021-09-16 01:41:59 +08:00
|
|
|
if (newColocMap[newRKProcessId] < currColocMap[currRKProcessId]) {
|
2021-09-13 21:58:38 +08:00
|
|
|
rkSingleton.recruit(self);
|
2021-09-16 01:41:59 +08:00
|
|
|
} else if (newColocMap[newDDProcessId] < currColocMap[currDDProcessId]) {
|
2021-09-13 21:58:38 +08:00
|
|
|
ddSingleton.recruit(self);
|
2021-12-07 04:10:20 +08:00
|
|
|
} else if (self->db.config.blobGranulesEnabled && newColocMap[newBMProcessId] < currColocMap[currBMProcessId]) {
|
2021-09-15 23:35:58 +08:00
|
|
|
bmSingleton.recruit(self);
|
2019-03-20 06:58:25 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> doCheckOutstandingRequests(ClusterControllerData* self) {
|
2018-06-29 14:15:32 +08:00
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(delay(SERVER_KNOBS->CHECK_OUTSTANDING_INTERVAL));
|
2021-12-07 05:50:31 +08:00
|
|
|
while (now() - self->lastRecruitTime < SERVER_KNOBS->SINGLETON_RECRUIT_BME_DELAY ||
|
|
|
|
!self->goodRecruitmentTime.isReady()) {
|
|
|
|
if (now() - self->lastRecruitTime < SERVER_KNOBS->SINGLETON_RECRUIT_BME_DELAY) {
|
|
|
|
wait(delay(SERVER_KNOBS->SINGLETON_RECRUIT_BME_DELAY + 0.001 - (now() - self->lastRecruitTime)));
|
|
|
|
}
|
|
|
|
if (!self->goodRecruitmentTime.isReady()) {
|
|
|
|
wait(self->goodRecruitmentTime);
|
|
|
|
}
|
2021-12-07 05:12:27 +08:00
|
|
|
}
|
2018-06-29 14:15:32 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
checkOutstandingRecruitmentRequests(self);
|
|
|
|
checkOutstandingStorageRequests(self);
|
2021-10-19 03:04:22 +08:00
|
|
|
|
2021-12-07 04:10:20 +08:00
|
|
|
if (self->db.config.blobGranulesEnabled) {
|
2021-10-19 03:04:22 +08:00
|
|
|
checkOutstandingBlobWorkerRequests(self);
|
|
|
|
}
|
2021-09-13 21:58:38 +08:00
|
|
|
checkBetterSingletons(self);
|
2018-06-29 14:15:32 +08:00
|
|
|
|
|
|
|
self->checkRecoveryStalled();
|
|
|
|
if (self->betterMasterExists()) {
|
2019-02-19 06:54:28 +08:00
|
|
|
self->db.forceMasterFailure.trigger();
|
2020-04-12 10:30:05 +08:00
|
|
|
TraceEvent("MasterRegistrationKill", self->id).detail("MasterId", self->db.serverInfo->get().master.id());
|
2018-06-29 14:15:32 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() != error_code_no_more_servers) {
|
2020-02-20 08:48:30 +08:00
|
|
|
TraceEvent(SevError, "CheckOutstandingError").error(e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> doCheckOutstandingRemoteRequests(ClusterControllerData* self) {
|
2020-02-20 08:48:30 +08:00
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(delay(SERVER_KNOBS->CHECK_OUTSTANDING_INTERVAL));
|
|
|
|
while (!self->goodRemoteRecruitmentTime.isReady()) {
|
2020-02-20 08:48:30 +08:00
|
|
|
wait(self->goodRemoteRecruitmentTime);
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
checkOutstandingRemoteRecruitmentRequests(self);
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() != error_code_no_more_servers) {
|
2018-06-29 14:15:32 +08:00
|
|
|
TraceEvent(SevError, "CheckOutstandingError").error(e);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
void checkOutstandingRequests(ClusterControllerData* self) {
|
|
|
|
if (self->outstandingRemoteRequestChecker.isReady()) {
|
2020-02-20 08:48:30 +08:00
|
|
|
self->outstandingRemoteRequestChecker = doCheckOutstandingRemoteRequests(self);
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (self->outstandingRequestChecker.isReady()) {
|
2020-02-20 08:48:30 +08:00
|
|
|
self->outstandingRequestChecker = doCheckOutstandingRequests(self);
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> rebootAndCheck(ClusterControllerData* cluster, Optional<Standalone<StringRef>> processID) {
|
2019-02-18 10:46:59 +08:00
|
|
|
{
|
|
|
|
auto watcher = cluster->id_worker.find(processID);
|
|
|
|
ASSERT(watcher != cluster->id_worker.end());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2019-02-18 10:46:59 +08:00
|
|
|
watcher->second.reboots++;
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(delay(g_network->isSimulated() ? SERVER_KNOBS->SIM_SHUTDOWN_TIMEOUT : SERVER_KNOBS->SHUTDOWN_TIMEOUT));
|
2019-02-18 10:46:59 +08:00
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2019-02-18 10:46:59 +08:00
|
|
|
{
|
|
|
|
auto watcher = cluster->id_worker.find(processID);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (watcher != cluster->id_worker.end()) {
|
2019-02-18 10:46:59 +08:00
|
|
|
watcher->second.reboots--;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (watcher->second.reboots < 2)
|
|
|
|
checkOutstandingRequests(cluster);
|
2019-02-18 10:46:59 +08:00
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> workerAvailabilityWatch(WorkerInterface worker,
|
|
|
|
ProcessClass startingClass,
|
|
|
|
ClusterControllerData* cluster) {
|
2019-05-30 04:43:21 +08:00
|
|
|
state Future<Void> failed =
|
|
|
|
(worker.address() == g_network->getLocalAddress() || startingClass.classType() == ProcessClass::TesterClass)
|
|
|
|
? Never()
|
|
|
|
: waitFailureClient(worker.waitFailure, SERVER_KNOBS->WORKER_FAILURE_TIME);
|
2021-03-11 02:06:03 +08:00
|
|
|
cluster->updateWorkerList.set(worker.locality.processId(),
|
|
|
|
ProcessData(worker.locality, startingClass, worker.stableAddress()));
|
|
|
|
// This switching avoids a race where the worker can be added to id_worker map after the workerAvailabilityWatch
|
|
|
|
// fails for the worker.
|
2019-01-11 02:28:32 +08:00
|
|
|
wait(delay(0));
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
loop {
|
|
|
|
choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(IFailureMonitor::failureMonitor().onStateEqual(
|
|
|
|
worker.storage.getEndpoint(),
|
|
|
|
FailureStatus(
|
|
|
|
IFailureMonitor::failureMonitor().getState(worker.storage.getEndpoint()).isAvailable())))) {
|
|
|
|
if (IFailureMonitor::failureMonitor().getState(worker.storage.getEndpoint()).isAvailable()) {
|
|
|
|
cluster->ac.add(rebootAndCheck(cluster, worker.locality.processId()));
|
|
|
|
checkOutstandingRequests(cluster);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(failed)) { // remove workers that have failed
|
|
|
|
WorkerInfo& failedWorkerInfo = cluster->id_worker[worker.locality.processId()];
|
2019-12-07 05:28:44 +08:00
|
|
|
|
2017-10-05 08:11:12 +08:00
|
|
|
if (!failedWorkerInfo.reply.isSet()) {
|
2021-03-11 02:06:03 +08:00
|
|
|
failedWorkerInfo.reply.send(
|
|
|
|
RegisterWorkerReply(failedWorkerInfo.details.processClass, failedWorkerInfo.priorityInfo));
|
2017-10-05 08:11:12 +08:00
|
|
|
}
|
2019-03-21 07:03:36 +08:00
|
|
|
if (worker.locality.processId() == cluster->masterProcessId) {
|
|
|
|
cluster->masterProcessId = Optional<Key>();
|
|
|
|
}
|
2021-04-30 16:42:54 +08:00
|
|
|
TraceEvent("ClusterControllerWorkerFailed", cluster->id)
|
2021-05-14 03:20:31 +08:00
|
|
|
.detail("ProcessId", worker.locality.processId())
|
|
|
|
.detail("ProcessClass", failedWorkerInfo.details.processClass.toString())
|
|
|
|
.detail("Address", worker.address());
|
2020-04-06 14:09:36 +08:00
|
|
|
cluster->removedDBInfoEndpoints.insert(worker.updateServerDBInfo.getEndpoint());
|
2021-03-11 02:06:03 +08:00
|
|
|
cluster->id_worker.erase(worker.locality.processId());
|
|
|
|
cluster->updateWorkerList.set(worker.locality.processId(), Optional<ProcessData>());
|
2017-05-26 04:48:44 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct FailureStatusInfo {
|
|
|
|
FailureStatus status;
|
|
|
|
double lastRequestTime;
|
|
|
|
double penultimateRequestTime;
|
|
|
|
|
|
|
|
FailureStatusInfo() : lastRequestTime(0), penultimateRequestTime(0) {}
|
|
|
|
|
|
|
|
void insertRequest(double now) {
|
|
|
|
penultimateRequestTime = lastRequestTime;
|
|
|
|
lastRequestTime = now;
|
|
|
|
}
|
|
|
|
|
|
|
|
double latency(double now) const {
|
2021-03-11 02:06:03 +08:00
|
|
|
return std::max(now - lastRequestTime, lastRequestTime - penultimateRequestTime);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-09-17 08:42:34 +08:00
|
|
|
ACTOR Future<std::vector<TLogInterface>> requireAll(std::vector<Future<Optional<std::vector<TLogInterface>>>> in) {
|
|
|
|
state std::vector<TLogInterface> out;
|
2017-05-26 04:48:44 +08:00
|
|
|
state int i;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (i = 0; i < in.size(); i++) {
|
2021-09-17 08:42:34 +08:00
|
|
|
Optional<std::vector<TLogInterface>> x = wait(in[i]);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!x.present())
|
|
|
|
throw recruitment_failed();
|
2017-05-26 04:48:44 +08:00
|
|
|
out.insert(out.end(), x.get().begin(), x.get().end());
|
|
|
|
}
|
|
|
|
return out;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
void clusterRecruitStorage(ClusterControllerData* self, RecruitStorageRequest req) {
|
2017-05-26 04:48:44 +08:00
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!self->gotProcessClasses && !req.criticalRecruitment)
|
2017-05-26 04:48:44 +08:00
|
|
|
throw no_more_servers();
|
|
|
|
auto worker = self->getStorageWorker(req);
|
|
|
|
RecruitStorageReply rep;
|
2019-03-09 00:25:07 +08:00
|
|
|
rep.worker = worker.interf;
|
|
|
|
rep.processClass = worker.processClass;
|
2021-03-11 02:06:03 +08:00
|
|
|
req.reply.send(rep);
|
|
|
|
} catch (Error& e) {
|
2017-05-26 04:48:44 +08:00
|
|
|
if (e.code() == error_code_no_more_servers) {
|
2021-03-11 02:06:03 +08:00
|
|
|
self->outstandingStorageRequests.push_back(std::make_pair(req, now() + SERVER_KNOBS->RECRUITMENT_TIMEOUT));
|
2019-08-13 01:08:12 +08:00
|
|
|
TraceEvent(SevWarn, "RecruitStorageNotAvailable", self->id)
|
|
|
|
.detail("IsCriticalRecruitment", req.criticalRecruitment)
|
|
|
|
.error(e);
|
2017-05-26 04:48:44 +08:00
|
|
|
} else {
|
|
|
|
TraceEvent(SevError, "RecruitStorageError", self->id).error(e);
|
2021-03-11 02:06:03 +08:00
|
|
|
throw; // Any other error will bring down the cluster controller
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-29 07:15:32 +08:00
|
|
|
// Trys to send a reply to req with a worker (process) that a blob worker can be recruited on
|
|
|
|
// Otherwise, add the req to a list of outstanding reqs that will eventually be dealt with
|
2021-09-21 03:42:20 +08:00
|
|
|
void clusterRecruitBlobWorker(ClusterControllerData* self, RecruitBlobWorkerRequest req) {
|
|
|
|
try {
|
|
|
|
if (!self->gotProcessClasses)
|
|
|
|
throw no_more_servers();
|
|
|
|
auto worker = self->getBlobWorker(req);
|
|
|
|
RecruitBlobWorkerReply rep;
|
|
|
|
rep.worker = worker.interf;
|
|
|
|
rep.processClass = worker.processClass;
|
|
|
|
req.reply.send(rep);
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() == error_code_no_more_servers) {
|
|
|
|
self->outstandingBlobWorkerRequests.push_back(
|
|
|
|
std::make_pair(req, now() + SERVER_KNOBS->RECRUITMENT_TIMEOUT));
|
|
|
|
TraceEvent(SevWarn, "RecruitBlobWorkerNotAvailable", self->id).error(e);
|
|
|
|
} else {
|
|
|
|
TraceEvent(SevError, "RecruitBlobWorkerError", self->id).error(e);
|
|
|
|
throw; // Any other error will bring down the cluster controller
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> clusterRecruitFromConfiguration(ClusterControllerData* self, RecruitFromConfigurationRequest req) {
|
2017-05-26 04:48:44 +08:00
|
|
|
// At the moment this doesn't really need to be an actor (it always completes immediately)
|
2021-03-11 02:06:03 +08:00
|
|
|
TEST(true); // ClusterController RecruitTLogsRequest
|
2017-05-26 04:48:44 +08:00
|
|
|
loop {
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
auto rep = self->findWorkersForConfiguration(req);
|
|
|
|
req.reply.send(rep);
|
2017-05-26 04:48:44 +08:00
|
|
|
return Void();
|
|
|
|
} catch (Error& e) {
|
2020-02-20 08:48:30 +08:00
|
|
|
if (e.code() == error_code_no_more_servers && self->goodRecruitmentTime.isReady()) {
|
2021-03-11 02:06:03 +08:00
|
|
|
self->outstandingRecruitmentRequests.push_back(req);
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent(SevWarn, "RecruitFromConfigurationNotAvailable", self->id).error(e);
|
|
|
|
return Void();
|
2021-03-11 02:06:03 +08:00
|
|
|
} else if (e.code() == error_code_operation_failed || e.code() == error_code_no_more_servers) {
|
|
|
|
// recruitment not good enough, try again
|
2021-08-28 08:07:47 +08:00
|
|
|
TraceEvent("RecruitFromConfigurationRetry", self->id)
|
|
|
|
.error(e)
|
|
|
|
.detail("GoodRecruitmentTimeReady", self->goodRecruitmentTime.isReady());
|
2021-08-28 03:57:39 +08:00
|
|
|
while (!self->goodRecruitmentTime.isReady()) {
|
|
|
|
wait(lowPriorityDelay(SERVER_KNOBS->ATTEMPT_RECRUITMENT_DELAY));
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent(SevError, "RecruitFromConfigurationError", self->id).error(e);
|
2021-03-11 02:06:03 +08:00
|
|
|
throw; // goodbye, cluster controller
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(lowPriorityDelay(SERVER_KNOBS->ATTEMPT_RECRUITMENT_DELAY));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> clusterRecruitRemoteFromConfiguration(ClusterControllerData* self,
|
|
|
|
RecruitRemoteFromConfigurationRequest req) {
|
2017-09-12 08:40:46 +08:00
|
|
|
// At the moment this doesn't really need to be an actor (it always completes immediately)
|
2021-03-11 02:06:03 +08:00
|
|
|
TEST(true); // ClusterController RecruitTLogsRequest Remote
|
2017-09-12 08:40:46 +08:00
|
|
|
loop {
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
RecruitRemoteFromConfigurationReply rep = self->findRemoteWorkersForConfiguration(req);
|
|
|
|
req.reply.send(rep);
|
2017-09-12 08:40:46 +08:00
|
|
|
return Void();
|
|
|
|
} catch (Error& e) {
|
2020-02-20 08:48:30 +08:00
|
|
|
if (e.code() == error_code_no_more_servers && self->goodRemoteRecruitmentTime.isReady()) {
|
2021-03-11 02:06:03 +08:00
|
|
|
self->outstandingRemoteRecruitmentRequests.push_back(req);
|
2017-09-12 08:40:46 +08:00
|
|
|
TraceEvent(SevWarn, "RecruitRemoteFromConfigurationNotAvailable", self->id).error(e);
|
|
|
|
return Void();
|
2021-03-11 02:06:03 +08:00
|
|
|
} else if (e.code() == error_code_operation_failed || e.code() == error_code_no_more_servers) {
|
|
|
|
// recruitment not good enough, try again
|
2021-08-28 08:07:47 +08:00
|
|
|
TraceEvent("RecruitRemoteFromConfigurationRetry", self->id)
|
|
|
|
.error(e)
|
|
|
|
.detail("GoodRecruitmentTimeReady", self->goodRemoteRecruitmentTime.isReady());
|
2021-08-28 03:57:39 +08:00
|
|
|
while (!self->goodRemoteRecruitmentTime.isReady()) {
|
|
|
|
wait(lowPriorityDelay(SERVER_KNOBS->ATTEMPT_RECRUITMENT_DELAY));
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
} else {
|
2017-09-12 08:40:46 +08:00
|
|
|
TraceEvent(SevError, "RecruitRemoteFromConfigurationError", self->id).error(e);
|
2021-03-11 02:06:03 +08:00
|
|
|
throw; // goodbye, cluster controller
|
2017-09-12 08:40:46 +08:00
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(lowPriorityDelay(SERVER_KNOBS->ATTEMPT_RECRUITMENT_DELAY));
|
2017-09-12 08:40:46 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
void clusterRegisterMaster(ClusterControllerData* self, RegisterMasterRequest const& req) {
|
|
|
|
req.reply.send(Void());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2020-03-18 05:45:07 +08:00
|
|
|
TraceEvent("MasterRegistrationReceived", self->id)
|
|
|
|
.detail("MasterId", req.id)
|
|
|
|
.detail("Master", req.mi.toString())
|
|
|
|
.detail("Tlogs", describe(req.logSystemConfig.tLogs))
|
|
|
|
.detail("Resolvers", req.resolvers.size())
|
|
|
|
.detail("RecoveryState", (int)req.recoveryState)
|
|
|
|
.detail("RegistrationCount", req.registrationCount)
|
2020-09-11 08:44:15 +08:00
|
|
|
.detail("CommitProxies", req.commitProxies.size())
|
2020-08-12 09:54:54 +08:00
|
|
|
.detail("GrvProxies", req.grvProxies.size())
|
2020-03-18 05:45:07 +08:00
|
|
|
.detail("RecoveryCount", req.recoveryCount)
|
|
|
|
.detail("Stalled", req.recoveryStalled)
|
2021-10-14 13:22:49 +08:00
|
|
|
.detail("OldestBackupEpoch", req.logSystemConfig.oldestBackupEpoch)
|
|
|
|
.detail("ClusterId", req.clusterId);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
// make sure the request comes from an active database
|
2017-05-26 04:48:44 +08:00
|
|
|
auto db = &self->db;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (db->serverInfo->get().master.id() != req.id || req.registrationCount <= db->masterRegistrationCount) {
|
|
|
|
TraceEvent("MasterRegistrationNotFound", self->id)
|
|
|
|
.detail("MasterId", req.id)
|
|
|
|
.detail("ExistingId", db->serverInfo->get().master.id())
|
|
|
|
.detail("RegCount", req.registrationCount)
|
|
|
|
.detail("ExistingRegCount", db->masterRegistrationCount);
|
2017-05-26 04:48:44 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (req.recoveryState == RecoveryState::FULLY_RECOVERED) {
|
2018-09-01 01:51:55 +08:00
|
|
|
self->db.unfinishedRecoveries = 0;
|
|
|
|
self->db.logGenerations = 0;
|
2021-03-11 02:06:03 +08:00
|
|
|
ASSERT(!req.logSystemConfig.oldTLogs.size());
|
2018-09-01 01:51:55 +08:00
|
|
|
} else {
|
|
|
|
self->db.logGenerations = std::max<int>(self->db.logGenerations, req.logSystemConfig.oldTLogs.size());
|
|
|
|
}
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
db->masterRegistrationCount = req.registrationCount;
|
2018-06-14 09:14:14 +08:00
|
|
|
db->recoveryStalled = req.recoveryStalled;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (req.configuration.present()) {
|
2017-11-15 05:57:37 +08:00
|
|
|
db->config = req.configuration.get();
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (req.recoveryState >= RecoveryState::ACCEPTING_COMMITS) {
|
2017-11-16 09:15:24 +08:00
|
|
|
self->gotFullyRecoveredConfig = true;
|
|
|
|
db->fullyRecoveredConfig = req.configuration.get();
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : self->id_worker) {
|
|
|
|
bool isExcludedFromConfig =
|
|
|
|
db->fullyRecoveredConfig.isExcludedServer(it.second.details.interf.addresses());
|
|
|
|
if (it.second.priorityInfo.isExcluded != isExcludedFromConfig) {
|
2018-02-10 08:48:55 +08:00
|
|
|
it.second.priorityInfo.isExcluded = isExcludedFromConfig;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!it.second.reply.isSet()) {
|
|
|
|
it.second.reply.send(
|
|
|
|
RegisterWorkerReply(it.second.details.processClass, it.second.priorityInfo));
|
2018-02-10 08:48:55 +08:00
|
|
|
}
|
2017-11-16 09:15:24 +08:00
|
|
|
}
|
2017-11-15 05:57:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
bool isChanged = false;
|
2020-04-12 10:30:05 +08:00
|
|
|
auto dbInfo = self->db.serverInfo->get();
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
if (dbInfo.recoveryState != req.recoveryState) {
|
|
|
|
dbInfo.recoveryState = req.recoveryState;
|
|
|
|
isChanged = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dbInfo.priorCommittedLogServers != req.priorCommittedLogServers) {
|
|
|
|
dbInfo.priorCommittedLogServers = req.priorCommittedLogServers;
|
|
|
|
isChanged = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct the client information
|
2020-09-11 08:44:15 +08:00
|
|
|
if (db->clientInfo->get().commitProxies != req.commitProxies ||
|
|
|
|
db->clientInfo->get().grvProxies != req.grvProxies) {
|
2017-05-26 04:48:44 +08:00
|
|
|
isChanged = true;
|
2021-03-06 03:28:15 +08:00
|
|
|
// TODO why construct a new one and not just copy the old one and change proxies + id?
|
2017-05-26 04:48:44 +08:00
|
|
|
ClientDBInfo clientInfo;
|
2019-05-11 05:01:52 +08:00
|
|
|
clientInfo.id = deterministicRandom()->randomUniqueID();
|
2020-09-11 08:44:15 +08:00
|
|
|
clientInfo.commitProxies = req.commitProxies;
|
2020-07-15 15:37:41 +08:00
|
|
|
clientInfo.grvProxies = req.grvProxies;
|
2021-03-11 02:06:03 +08:00
|
|
|
db->clientInfo->set(clientInfo);
|
2017-05-26 04:48:44 +08:00
|
|
|
dbInfo.client = db->clientInfo->get();
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!dbInfo.logSystemConfig.isEqual(req.logSystemConfig)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
isChanged = true;
|
|
|
|
dbInfo.logSystemConfig = req.logSystemConfig;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (dbInfo.resolvers != req.resolvers) {
|
2017-05-26 04:48:44 +08:00
|
|
|
isChanged = true;
|
|
|
|
dbInfo.resolvers = req.resolvers;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (dbInfo.recoveryCount != req.recoveryCount) {
|
2017-05-26 04:48:44 +08:00
|
|
|
isChanged = true;
|
|
|
|
dbInfo.recoveryCount = req.recoveryCount;
|
|
|
|
}
|
|
|
|
|
2021-10-14 13:22:49 +08:00
|
|
|
if (dbInfo.clusterId != req.clusterId) {
|
|
|
|
isChanged = true;
|
|
|
|
dbInfo.clusterId = req.clusterId;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (isChanged) {
|
2019-05-11 05:01:52 +08:00
|
|
|
dbInfo.id = deterministicRandom()->randomUniqueID();
|
2020-04-06 14:09:36 +08:00
|
|
|
dbInfo.infoGeneration = ++self->db.dbInfoCount;
|
2021-03-11 02:06:03 +08:00
|
|
|
self->db.serverInfo->set(dbInfo);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2018-06-28 14:02:08 +08:00
|
|
|
checkOutstandingRequests(self);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-09-13 21:58:38 +08:00
|
|
|
// Halts the registering (i.e. requesting) singleton if one is already in the process of being recruited
|
|
|
|
// or, halts the existing singleton in favour of the requesting one
|
|
|
|
template <class Interface>
|
|
|
|
void haltRegisteringOrCurrentSingleton(ClusterControllerData* self,
|
|
|
|
const WorkerInterface& worker,
|
|
|
|
const Singleton<Interface>& currSingleton,
|
|
|
|
const Singleton<Interface>& registeringSingleton,
|
|
|
|
const Optional<UID> recruitingID) {
|
|
|
|
ASSERT(currSingleton.getRole() == registeringSingleton.getRole());
|
|
|
|
const UID registeringID = registeringSingleton.interface.get().id();
|
|
|
|
const std::string roleName = currSingleton.getRole().roleName;
|
|
|
|
const std::string roleAbbr = currSingleton.getRole().abbreviation;
|
|
|
|
|
|
|
|
// halt the requesting singleton if it isn't the one currently being recruited
|
|
|
|
if ((recruitingID.present() && recruitingID.get() != registeringID) ||
|
|
|
|
self->clusterControllerDcId != worker.locality.dcId()) {
|
|
|
|
TraceEvent(("CCHaltRegistering" + roleName).c_str(), self->id)
|
|
|
|
.detail(roleAbbr + "ID", registeringID)
|
|
|
|
.detail("DcID", printable(self->clusterControllerDcId))
|
|
|
|
.detail("ReqDcID", printable(worker.locality.dcId()))
|
|
|
|
.detail("Recruiting" + roleAbbr + "ID", recruitingID.present() ? recruitingID.get() : UID());
|
|
|
|
registeringSingleton.halt(self, worker.locality.processId());
|
|
|
|
} else if (!recruitingID.present()) {
|
|
|
|
// if not currently recruiting, then halt previous one in favour of requesting one
|
|
|
|
TraceEvent(("CCRegister" + roleName).c_str(), self->id).detail(roleAbbr + "ID", registeringID);
|
|
|
|
if (currSingleton.interface.present() && currSingleton.interface.get().id() != registeringID &&
|
|
|
|
self->id_worker.count(currSingleton.interface.get().locality.processId())) {
|
|
|
|
TraceEvent(("CCHaltPrevious" + roleName).c_str(), self->id)
|
|
|
|
.detail(roleAbbr + "ID", currSingleton.interface.get().id())
|
|
|
|
.detail("DcID", printable(self->clusterControllerDcId))
|
|
|
|
.detail("ReqDcID", printable(worker.locality.dcId()))
|
|
|
|
.detail("Recruiting" + roleAbbr + "ID", recruitingID.present() ? recruitingID.get() : UID());
|
|
|
|
currSingleton.halt(self, currSingleton.interface.get().locality.processId());
|
|
|
|
}
|
|
|
|
// set the curr singleton if it doesn't exist or its different from the requesting one
|
|
|
|
if (!currSingleton.interface.present() || currSingleton.interface.get().id() != registeringID) {
|
2021-09-16 22:24:15 +08:00
|
|
|
registeringSingleton.setInterfaceToDbInfo(self);
|
2021-09-13 21:58:38 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-06 09:50:11 +08:00
|
|
|
void registerWorker(RegisterWorkerRequest req, ClusterControllerData* self, ConfigBroadcaster* configBroadcaster) {
|
2019-03-24 11:55:03 +08:00
|
|
|
const WorkerInterface& w = req.wi;
|
2017-10-13 08:11:58 +08:00
|
|
|
ProcessClass newProcessClass = req.processClass;
|
2021-03-11 02:06:03 +08:00
|
|
|
auto info = self->id_worker.find(w.locality.processId());
|
2018-02-10 08:48:55 +08:00
|
|
|
ClusterControllerPriorityInfo newPriorityInfo = req.priorityInfo;
|
2019-03-22 08:56:04 +08:00
|
|
|
newPriorityInfo.processClassFitness = newProcessClass.machineClassFitness(ProcessClass::ClusterController);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto it : req.incompatiblePeers) {
|
2020-04-07 11:58:43 +08:00
|
|
|
self->db.incompatibleConnections[it] = now() + SERVER_KNOBS->INCOMPATIBLE_PEERS_LOGGING_INTERVAL;
|
2020-04-06 14:09:36 +08:00
|
|
|
}
|
2020-04-11 04:45:16 +08:00
|
|
|
self->removedDBInfoEndpoints.erase(w.updateServerDBInfo.getEndpoint());
|
2020-04-06 14:09:36 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (info == self->id_worker.end()) {
|
|
|
|
TraceEvent("ClusterControllerActualWorkers", self->id)
|
|
|
|
.detail("WorkerId", w.id())
|
|
|
|
.detail("ProcessId", w.locality.processId())
|
|
|
|
.detail("ZoneId", w.locality.zoneId())
|
|
|
|
.detail("DataHall", w.locality.dataHallId())
|
|
|
|
.detail("PClass", req.processClass.toString())
|
|
|
|
.detail("Workers", self->id_worker.size());
|
2020-02-20 08:48:30 +08:00
|
|
|
self->goodRecruitmentTime = lowPriorityDelay(SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY);
|
2020-02-21 05:46:22 +08:00
|
|
|
self->goodRemoteRecruitmentTime = lowPriorityDelay(SERVER_KNOBS->WAIT_FOR_GOOD_REMOTE_RECRUITMENT_DELAY);
|
2018-11-05 15:07:56 +08:00
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent("ClusterControllerWorkerAlreadyRegistered", self->id)
|
|
|
|
.suppressFor(1.0)
|
|
|
|
.detail("WorkerId", w.id())
|
|
|
|
.detail("ProcessId", w.locality.processId())
|
|
|
|
.detail("ZoneId", w.locality.zoneId())
|
|
|
|
.detail("DataHall", w.locality.dataHallId())
|
|
|
|
.detail("PClass", req.processClass.toString())
|
2021-04-21 15:22:33 +08:00
|
|
|
.detail("Workers", self->id_worker.size())
|
|
|
|
.detail("Degraded", req.degraded);
|
2021-03-11 02:06:03 +08:00
|
|
|
}
|
|
|
|
if (w.address() == g_network->getLocalAddress()) {
|
|
|
|
if (self->changingDcIds.get().first) {
|
|
|
|
if (self->changingDcIds.get().second.present()) {
|
|
|
|
newPriorityInfo.dcFitness = ClusterControllerPriorityInfo::calculateDCFitness(
|
|
|
|
w.locality.dcId(), self->changingDcIds.get().second.get());
|
|
|
|
}
|
|
|
|
} else if (self->changedDcIds.get().second.present()) {
|
|
|
|
newPriorityInfo.dcFitness = ClusterControllerPriorityInfo::calculateDCFitness(
|
|
|
|
w.locality.dcId(), self->changedDcIds.get().second.get());
|
2018-06-29 15:10:29 +08:00
|
|
|
}
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!self->changingDcIds.get().first) {
|
|
|
|
if (self->changingDcIds.get().second.present()) {
|
|
|
|
newPriorityInfo.dcFitness = ClusterControllerPriorityInfo::calculateDCFitness(
|
|
|
|
w.locality.dcId(), self->changingDcIds.get().second.get());
|
2018-06-29 15:10:29 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
} else if (self->changedDcIds.get().second.present()) {
|
|
|
|
newPriorityInfo.dcFitness = ClusterControllerPriorityInfo::calculateDCFitness(
|
|
|
|
w.locality.dcId(), self->changedDcIds.get().second.get());
|
2018-02-10 08:48:55 +08:00
|
|
|
}
|
2017-10-26 02:35:29 +08:00
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2017-11-15 05:57:37 +08:00
|
|
|
// Check process class and exclusive property
|
2021-03-11 02:06:03 +08:00
|
|
|
if (info == self->id_worker.end() || info->second.details.interf.id() != w.id() ||
|
|
|
|
req.generation >= info->second.gen) {
|
|
|
|
if (self->gotProcessClasses) {
|
2017-11-15 05:57:37 +08:00
|
|
|
auto classIter = self->id_class.find(w.locality.processId());
|
2019-02-01 10:20:14 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (classIter != self->id_class.end() && (classIter->second.classSource() == ProcessClass::DBSource ||
|
|
|
|
req.initialClass.classType() == ProcessClass::UnsetClass)) {
|
2017-11-15 05:57:37 +08:00
|
|
|
newProcessClass = classIter->second;
|
|
|
|
} else {
|
|
|
|
newProcessClass = req.initialClass;
|
|
|
|
}
|
2018-02-10 08:48:55 +08:00
|
|
|
newPriorityInfo.processClassFitness = newProcessClass.machineClassFitness(ProcessClass::ClusterController);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (self->gotFullyRecoveredConfig) {
|
2020-04-11 04:45:16 +08:00
|
|
|
newPriorityInfo.isExcluded = self->db.fullyRecoveredConfig.isExcludedServer(w.addresses());
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2017-10-13 08:11:58 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (info == self->id_worker.end()) {
|
|
|
|
self->id_worker[w.locality.processId()] = WorkerInfo(workerAvailabilityWatch(w, newProcessClass, self),
|
|
|
|
req.reply,
|
|
|
|
req.generation,
|
|
|
|
w,
|
|
|
|
req.initialClass,
|
|
|
|
newProcessClass,
|
|
|
|
newPriorityInfo,
|
|
|
|
req.degraded,
|
|
|
|
req.issues);
|
|
|
|
if (!self->masterProcessId.present() &&
|
|
|
|
w.locality.processId() == self->db.serverInfo->get().master.locality.processId()) {
|
2019-03-24 11:55:03 +08:00
|
|
|
self->masterProcessId = w.locality.processId();
|
|
|
|
}
|
2021-08-06 09:50:11 +08:00
|
|
|
if (configBroadcaster != nullptr) {
|
2021-08-07 03:42:07 +08:00
|
|
|
self->addActor.send(configBroadcaster->registerWorker(
|
|
|
|
req.lastSeenKnobVersion,
|
|
|
|
req.knobConfigClassSet,
|
|
|
|
self->id_worker[w.locality.processId()].watcher,
|
2021-08-10 02:49:57 +08:00
|
|
|
self->id_worker[w.locality.processId()].details.interf.configBroadcastInterface));
|
2021-08-06 09:50:11 +08:00
|
|
|
}
|
2021-11-09 01:43:02 +08:00
|
|
|
self->updateDBInfoEndpoints.insert(w.updateServerDBInfo.getEndpoint());
|
|
|
|
self->updateDBInfo.trigger();
|
2021-03-11 02:06:03 +08:00
|
|
|
checkOutstandingRequests(self);
|
|
|
|
} else if (info->second.details.interf.id() != w.id() || req.generation >= info->second.gen) {
|
2017-09-26 01:36:03 +08:00
|
|
|
if (!info->second.reply.isSet()) {
|
2021-03-11 02:06:03 +08:00
|
|
|
info->second.reply.send(Never());
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
info->second.reply = req.reply;
|
2019-03-09 00:25:07 +08:00
|
|
|
info->second.details.processClass = newProcessClass;
|
2018-02-10 08:48:55 +08:00
|
|
|
info->second.priorityInfo = newPriorityInfo;
|
2017-11-15 05:57:37 +08:00
|
|
|
info->second.initialClass = req.initialClass;
|
2019-03-09 03:40:00 +08:00
|
|
|
info->second.details.degraded = req.degraded;
|
2017-05-26 04:48:44 +08:00
|
|
|
info->second.gen = req.generation;
|
2020-04-06 14:09:36 +08:00
|
|
|
info->second.issues = req.issues;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (info->second.details.interf.id() != w.id()) {
|
2020-04-06 14:09:36 +08:00
|
|
|
self->removedDBInfoEndpoints.insert(info->second.details.interf.updateServerDBInfo.getEndpoint());
|
2019-03-09 00:25:07 +08:00
|
|
|
info->second.details.interf = w;
|
2021-03-11 02:06:03 +08:00
|
|
|
info->second.watcher = workerAvailabilityWatch(w, newProcessClass, self);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-11-04 03:51:21 +08:00
|
|
|
if (req.requestDbInfo) {
|
|
|
|
self->updateDBInfoEndpoints.insert(w.updateServerDBInfo.getEndpoint());
|
|
|
|
self->updateDBInfo.trigger();
|
|
|
|
}
|
2021-08-06 09:50:11 +08:00
|
|
|
if (configBroadcaster != nullptr) {
|
2021-08-07 03:42:07 +08:00
|
|
|
self->addActor.send(
|
|
|
|
configBroadcaster->registerWorker(req.lastSeenKnobVersion,
|
|
|
|
req.knobConfigClassSet,
|
|
|
|
info->second.watcher,
|
2021-08-10 02:49:57 +08:00
|
|
|
info->second.details.interf.configBroadcastInterface));
|
2021-08-06 09:50:11 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
checkOutstandingRequests(self);
|
2019-03-23 08:08:54 +08:00
|
|
|
} else {
|
|
|
|
TEST(true); // Received an old worker registration request.
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-09-13 21:58:38 +08:00
|
|
|
// For each singleton
|
|
|
|
// - if the registering singleton conflicts with the singleton being recruited, kill the registering one
|
|
|
|
// - if the singleton is not being recruited, kill the existing one in favour of the registering one
|
|
|
|
if (req.distributorInterf.present()) {
|
|
|
|
auto currSingleton = DataDistributorSingleton(self->db.serverInfo->get().distributor);
|
|
|
|
auto registeringSingleton = DataDistributorSingleton(req.distributorInterf);
|
|
|
|
haltRegisteringOrCurrentSingleton<DataDistributorInterface>(
|
|
|
|
self, w, currSingleton, registeringSingleton, self->recruitingDistributorID);
|
2019-01-29 03:29:39 +08:00
|
|
|
}
|
2021-09-13 21:58:38 +08:00
|
|
|
|
2019-03-20 02:29:19 +08:00
|
|
|
if (req.ratekeeperInterf.present()) {
|
2021-09-13 21:58:38 +08:00
|
|
|
auto currSingleton = RatekeeperSingleton(self->db.serverInfo->get().ratekeeper);
|
|
|
|
auto registeringSingleton = RatekeeperSingleton(req.ratekeeperInterf);
|
|
|
|
haltRegisteringOrCurrentSingleton<RatekeeperInterface>(
|
|
|
|
self, w, currSingleton, registeringSingleton, self->recruitingRatekeeperID);
|
2019-01-29 03:29:39 +08:00
|
|
|
}
|
2019-11-13 05:01:29 +08:00
|
|
|
|
2021-12-07 04:10:20 +08:00
|
|
|
if (self->db.config.blobGranulesEnabled && req.blobManagerInterf.present()) {
|
2021-09-15 23:35:58 +08:00
|
|
|
auto currSingleton = BlobManagerSingleton(self->db.serverInfo->get().blobManager);
|
|
|
|
auto registeringSingleton = BlobManagerSingleton(req.blobManagerInterf);
|
|
|
|
haltRegisteringOrCurrentSingleton<BlobManagerInterface>(
|
|
|
|
self, w, currSingleton, registeringSingleton, self->recruitingBlobManagerID);
|
|
|
|
}
|
|
|
|
|
2019-11-13 05:01:29 +08:00
|
|
|
// Notify the worker to register again with new process class/exclusive property
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!req.reply.isSet() && newPriorityInfo != req.priorityInfo) {
|
|
|
|
req.reply.send(RegisterWorkerReply(newProcessClass, newPriorityInfo));
|
2019-11-13 05:01:29 +08:00
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2017-09-28 07:31:38 +08:00
|
|
|
#define TIME_KEEPER_VERSION LiteralStringRef("1")
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> timeKeeperSetVersion(ClusterControllerData* self) {
|
2020-11-07 15:50:55 +08:00
|
|
|
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(self->cx);
|
2017-10-19 05:31:31 +08:00
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
2018-01-09 10:21:00 +08:00
|
|
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
2017-10-19 05:31:31 +08:00
|
|
|
tr->set(timeKeeperVersionKey, TIME_KEEPER_VERSION);
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(tr->commit());
|
2017-10-19 05:31:31 +08:00
|
|
|
break;
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(tr->onError(e));
|
2017-09-28 07:31:38 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-10-19 03:04:22 +08:00
|
|
|
// This actor periodically gets read version and writes it to cluster with current timestamp as key. To avoid
|
|
|
|
// running out of space, it limits the max number of entries and clears old entries on each update. This mapping is
|
|
|
|
// used from backup and restore to get the version information for a timestamp.
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> timeKeeper(ClusterControllerData* self) {
|
2017-09-26 03:40:24 +08:00
|
|
|
state KeyBackedMap<int64_t, Version> versionMap(timeKeeperPrefixRange.begin);
|
|
|
|
|
2021-07-27 10:55:10 +08:00
|
|
|
TraceEvent("TimeKeeperStarted").log();
|
2017-09-28 07:31:38 +08:00
|
|
|
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(timeKeeperSetVersion(self));
|
2017-09-28 07:31:38 +08:00
|
|
|
|
2017-09-26 03:40:24 +08:00
|
|
|
loop {
|
2020-11-07 15:50:55 +08:00
|
|
|
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(self->cx);
|
2017-10-19 05:31:31 +08:00
|
|
|
loop {
|
|
|
|
try {
|
2021-08-19 06:22:58 +08:00
|
|
|
state UID debugID = deterministicRandom()->randomUniqueID();
|
|
|
|
if (!g_network->isSimulated()) {
|
|
|
|
// This is done to provide an arbitrary logged transaction every ~10s.
|
|
|
|
// FIXME: replace or augment this with logging on the proxy which tracks
|
|
|
|
// how long it is taking to hear responses from each other component.
|
|
|
|
tr->debugTransaction(debugID);
|
|
|
|
}
|
2017-10-19 05:31:31 +08:00
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
2018-01-09 10:21:00 +08:00
|
|
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
2017-09-26 03:40:24 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
Optional<Value> disableValue = wait(tr->get(timeKeeperDisableKey));
|
|
|
|
if (disableValue.present()) {
|
2017-10-19 05:31:31 +08:00
|
|
|
break;
|
|
|
|
}
|
2017-09-26 03:40:24 +08:00
|
|
|
|
2017-10-19 05:31:31 +08:00
|
|
|
Version v = tr->getReadVersion().get();
|
|
|
|
int64_t currentTime = (int64_t)now();
|
|
|
|
versionMap.set(tr, currentTime, v);
|
2021-08-18 16:26:55 +08:00
|
|
|
if (!g_network->isSimulated()) {
|
2021-08-19 06:29:13 +08:00
|
|
|
TraceEvent("TimeKeeperCommit", debugID).detail("Version", v);
|
2021-08-18 16:26:55 +08:00
|
|
|
}
|
2017-10-19 05:31:31 +08:00
|
|
|
int64_t ttl = currentTime - SERVER_KNOBS->TIME_KEEPER_DELAY * SERVER_KNOBS->TIME_KEEPER_MAX_ENTRIES;
|
|
|
|
if (ttl > 0) {
|
|
|
|
versionMap.erase(tr, 0, ttl);
|
2017-09-29 04:13:24 +08:00
|
|
|
}
|
2017-10-19 05:31:31 +08:00
|
|
|
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(tr->commit());
|
2017-10-19 05:31:31 +08:00
|
|
|
break;
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(tr->onError(e));
|
2017-09-26 03:40:24 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(delay(SERVER_KNOBS->TIME_KEEPER_DELAY));
|
2017-09-26 03:40:24 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> statusServer(FutureStream<StatusRequest> requests,
|
|
|
|
ClusterControllerData* self,
|
2021-05-19 01:47:16 +08:00
|
|
|
ServerCoordinators coordinators,
|
|
|
|
ConfigBroadcaster const* configBroadcaster) {
|
2017-05-26 04:48:44 +08:00
|
|
|
// Seconds since the END of the last GetStatus executed
|
|
|
|
state double last_request_time = 0.0;
|
|
|
|
|
|
|
|
// Place to accumulate a batch of requests to respond to
|
|
|
|
state std::vector<StatusRequest> requests_batch;
|
|
|
|
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
// Wait til first request is ready
|
|
|
|
StatusRequest req = waitNext(requests);
|
2019-10-04 06:29:11 +08:00
|
|
|
++self->statusRequests;
|
2017-05-26 04:48:44 +08:00
|
|
|
requests_batch.push_back(req);
|
|
|
|
|
|
|
|
// Earliest time at which we may begin a new request
|
|
|
|
double next_allowed_request_time = last_request_time + SERVER_KNOBS->STATUS_MIN_TIME_BETWEEN_REQUESTS;
|
|
|
|
|
|
|
|
// Wait if needed to satisfy min_time knob, also allows more requets to queue up.
|
|
|
|
double minwait = std::max(next_allowed_request_time - now(), 0.0);
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(delay(minwait));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
// Get all requests that are ready right *now*, before GetStatus() begins.
|
|
|
|
// All of these requests will be responded to with the next GetStatus() result.
|
2019-01-29 07:37:30 +08:00
|
|
|
// If requests are batched, do not respond to more than MAX_STATUS_REQUESTS_PER_SECOND
|
|
|
|
// requests per second
|
|
|
|
while (requests.isReady()) {
|
|
|
|
auto req = requests.pop();
|
|
|
|
if (SERVER_KNOBS->STATUS_MIN_TIME_BETWEEN_REQUESTS > 0.0 &&
|
2021-03-11 02:06:03 +08:00
|
|
|
requests_batch.size() + 1 >
|
|
|
|
SERVER_KNOBS->STATUS_MIN_TIME_BETWEEN_REQUESTS * SERVER_KNOBS->MAX_STATUS_REQUESTS_PER_SECOND) {
|
|
|
|
TraceEvent(SevWarnAlways, "TooManyStatusRequests")
|
|
|
|
.suppressFor(1.0)
|
|
|
|
.detail("BatchSize", requests_batch.size());
|
2019-01-29 07:37:30 +08:00
|
|
|
req.reply.sendError(server_overloaded());
|
|
|
|
} else {
|
|
|
|
requests_batch.push_back(req);
|
|
|
|
}
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
// Get status but trap errors to send back to client.
|
2021-09-17 08:42:34 +08:00
|
|
|
std::vector<WorkerDetails> workers;
|
2020-04-18 06:05:01 +08:00
|
|
|
std::vector<ProcessIssues> workerIssues;
|
2020-04-06 14:09:36 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : self->id_worker) {
|
2019-03-09 00:25:07 +08:00
|
|
|
workers.push_back(it.second.details);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (it.second.issues.size()) {
|
2020-04-18 06:05:01 +08:00
|
|
|
workerIssues.push_back(ProcessIssues(it.second.details.interf.address(), it.second.issues));
|
2020-04-06 14:09:36 +08:00
|
|
|
}
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
std::vector<NetworkAddress> incompatibleConnections;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto it = self->db.incompatibleConnections.begin(); it != self->db.incompatibleConnections.end();) {
|
|
|
|
if (it->second < now()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
it = self->db.incompatibleConnections.erase(it);
|
|
|
|
} else {
|
|
|
|
incompatibleConnections.push_back(it->first);
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
state ErrorOr<StatusReply> result = wait(errorOr(clusterGetStatus(self->db.serverInfo,
|
|
|
|
self->cx,
|
|
|
|
workers,
|
|
|
|
workerIssues,
|
|
|
|
&self->db.clientStatus,
|
|
|
|
coordinators,
|
|
|
|
incompatibleConnections,
|
2021-05-19 01:47:16 +08:00
|
|
|
self->datacenterVersionDifference,
|
|
|
|
configBroadcaster)));
|
2019-02-27 08:20:05 +08:00
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
if (result.isError() && result.getError().code() == error_code_actor_cancelled)
|
|
|
|
throw result.getError();
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
// Update last_request_time now because GetStatus is finished and the delay is to be measured between
|
|
|
|
// requests
|
2017-05-26 04:48:44 +08:00
|
|
|
last_request_time = now();
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
while (!requests_batch.empty()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
if (result.isError())
|
|
|
|
requests_batch.back().reply.sendError(result.getError());
|
|
|
|
else
|
|
|
|
requests_batch.back().reply.send(result.get());
|
|
|
|
requests_batch.pop_back();
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(yield());
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent(SevError, "StatusServerError").error(e);
|
|
|
|
throw e;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> monitorProcessClasses(ClusterControllerData* self) {
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
state ReadYourWritesTransaction trVer(self->db.db);
|
2017-05-26 04:48:44 +08:00
|
|
|
loop {
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
trVer.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
trVer.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
Optional<Value> val = wait(trVer.get(processClassVersionKey));
|
|
|
|
|
|
|
|
if (val.present())
|
|
|
|
break;
|
|
|
|
|
2021-05-04 04:14:16 +08:00
|
|
|
RangeResult processClasses = wait(trVer.getRange(processClassKeys, CLIENT_KNOBS->TOO_MANY));
|
2021-03-11 02:06:03 +08:00
|
|
|
ASSERT(!processClasses.more && processClasses.size() < CLIENT_KNOBS->TOO_MANY);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
trVer.clear(processClassKeys);
|
|
|
|
trVer.set(processClassVersionKey, processClassVersionValue);
|
|
|
|
for (auto it : processClasses) {
|
|
|
|
UID processUid = decodeProcessClassKeyOld(it.key);
|
|
|
|
trVer.set(processClassKeyFor(processUid.toString()), it.value);
|
|
|
|
}
|
|
|
|
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(trVer.commit());
|
2021-07-27 10:55:10 +08:00
|
|
|
TraceEvent("ProcessClassUpgrade").log();
|
2017-05-26 04:48:44 +08:00
|
|
|
break;
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
wait(trVer.onError(e));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
loop {
|
2021-03-11 02:06:03 +08:00
|
|
|
state ReadYourWritesTransaction tr(self->db.db);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
loop {
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
2021-05-04 04:14:16 +08:00
|
|
|
RangeResult processClasses = wait(tr.getRange(processClassKeys, CLIENT_KNOBS->TOO_MANY));
|
2021-03-11 02:06:03 +08:00
|
|
|
ASSERT(!processClasses.more && processClasses.size() < CLIENT_KNOBS->TOO_MANY);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (processClasses != self->lastProcessClasses || !self->gotProcessClasses) {
|
2017-05-26 04:48:44 +08:00
|
|
|
self->id_class.clear();
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int i = 0; i < processClasses.size(); i++) {
|
|
|
|
auto c = decodeProcessClassValue(processClasses[i].value);
|
|
|
|
ASSERT(c.classSource() != ProcessClass::CommandLineSource);
|
|
|
|
self->id_class[decodeProcessClassKey(processClasses[i].key)] = c;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& w : self->id_worker) {
|
2017-05-26 04:48:44 +08:00
|
|
|
auto classIter = self->id_class.find(w.first);
|
2017-09-26 01:36:03 +08:00
|
|
|
ProcessClass newProcessClass;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (classIter != self->id_class.end() &&
|
|
|
|
(classIter->second.classSource() == ProcessClass::DBSource ||
|
|
|
|
w.second.initialClass.classType() == ProcessClass::UnsetClass)) {
|
2017-09-26 01:36:03 +08:00
|
|
|
newProcessClass = classIter->second;
|
2017-05-26 04:48:44 +08:00
|
|
|
} else {
|
2017-09-26 01:36:03 +08:00
|
|
|
newProcessClass = w.second.initialClass;
|
|
|
|
}
|
|
|
|
|
2019-03-09 00:25:07 +08:00
|
|
|
if (newProcessClass != w.second.details.processClass) {
|
|
|
|
w.second.details.processClass = newProcessClass;
|
2021-03-11 02:06:03 +08:00
|
|
|
w.second.priorityInfo.processClassFitness =
|
|
|
|
newProcessClass.machineClassFitness(ProcessClass::ClusterController);
|
2017-10-05 06:48:55 +08:00
|
|
|
if (!w.second.reply.isSet()) {
|
2021-03-11 02:06:03 +08:00
|
|
|
w.second.reply.send(
|
|
|
|
RegisterWorkerReply(w.second.details.processClass, w.second.priorityInfo));
|
2017-10-05 06:48:55 +08:00
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
self->lastProcessClasses = processClasses;
|
|
|
|
self->gotProcessClasses = true;
|
2021-03-11 02:06:03 +08:00
|
|
|
checkOutstandingRequests(self);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
state Future<Void> watchFuture = tr.watch(processClassChangeKey);
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(tr.commit());
|
|
|
|
wait(watchFuture);
|
2017-05-26 04:48:44 +08:00
|
|
|
break;
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
wait(tr.onError(e));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-19 08:18:34 +08:00
|
|
|
ACTOR Future<Void> monitorServerInfoConfig(ClusterControllerData::DBInfo* db) {
|
|
|
|
loop {
|
|
|
|
state ReadYourWritesTransaction tr(db->db);
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
|
|
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
|
|
|
|
|
|
|
|
Optional<Value> configVal = wait(tr.get(latencyBandConfigKey));
|
|
|
|
Optional<LatencyBandConfig> config;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (configVal.present()) {
|
2019-01-19 08:18:34 +08:00
|
|
|
config = LatencyBandConfig::parse(configVal.get());
|
|
|
|
}
|
|
|
|
|
2020-04-12 10:30:05 +08:00
|
|
|
auto serverInfo = db->serverInfo->get();
|
2021-03-11 02:06:03 +08:00
|
|
|
if (config != serverInfo.latencyBandConfig) {
|
2019-01-19 08:18:34 +08:00
|
|
|
TraceEvent("LatencyBandConfigChanged").detail("Present", config.present());
|
2019-05-11 05:01:52 +08:00
|
|
|
serverInfo.id = deterministicRandom()->randomUniqueID();
|
2020-04-07 11:58:43 +08:00
|
|
|
serverInfo.infoGeneration = ++db->dbInfoCount;
|
2019-01-19 08:18:34 +08:00
|
|
|
serverInfo.latencyBandConfig = config;
|
2020-04-12 10:30:05 +08:00
|
|
|
db->serverInfo->set(serverInfo);
|
2019-01-19 08:18:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
state Future<Void> configChangeFuture = tr.watch(latencyBandConfigKey);
|
2019-01-25 03:43:26 +08:00
|
|
|
|
|
|
|
wait(tr.commit());
|
|
|
|
wait(configChangeFuture);
|
2019-01-19 08:18:34 +08:00
|
|
|
|
|
|
|
break;
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
2020-01-08 11:53:09 +08:00
|
|
|
wait(tr.onError(e));
|
2019-01-19 08:18:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-20 08:37:01 +08:00
|
|
|
// Monitors the global configuration version key for changes. When changes are
|
|
|
|
// made, the global configuration history is read and any updates are sent to
|
|
|
|
// all processes in the system by updating the ClientDBInfo object. The
|
|
|
|
// GlobalConfig actor class contains the functionality to read the latest
|
|
|
|
// history and update the processes local view.
|
2021-03-16 09:03:54 +08:00
|
|
|
ACTOR Future<Void> monitorGlobalConfig(ClusterControllerData::DBInfo* db) {
|
2017-05-26 04:48:44 +08:00
|
|
|
loop {
|
|
|
|
state ReadYourWritesTransaction tr(db->db);
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
2021-02-13 10:55:01 +08:00
|
|
|
state Optional<Value> globalConfigVersion = wait(tr.get(globalConfigVersionKey));
|
2021-06-03 11:21:44 +08:00
|
|
|
state ClientDBInfo clientInfo = db->serverInfo->get().client;
|
2021-02-13 10:55:01 +08:00
|
|
|
|
|
|
|
if (globalConfigVersion.present()) {
|
2021-03-16 09:03:54 +08:00
|
|
|
// Since the history keys end with versionstamps, they
|
|
|
|
// should be sorted correctly (versionstamps are stored in
|
|
|
|
// big-endian order).
|
2021-05-04 04:14:16 +08:00
|
|
|
RangeResult globalConfigHistory =
|
2021-03-16 09:03:54 +08:00
|
|
|
wait(tr.getRange(globalConfigHistoryKeys, CLIENT_KNOBS->TOO_MANY));
|
|
|
|
// If the global configuration version key has been set,
|
|
|
|
// the history should contain at least one item.
|
|
|
|
ASSERT(globalConfigHistory.size() > 0);
|
|
|
|
clientInfo.history.clear();
|
|
|
|
|
|
|
|
for (const auto& kv : globalConfigHistory) {
|
|
|
|
ObjectReader reader(kv.value.begin(), IncludeVersion());
|
|
|
|
if (reader.protocolVersion() != g_network->protocolVersion()) {
|
|
|
|
// If the protocol version has changed, the
|
|
|
|
// GlobalConfig actor should refresh its view by
|
|
|
|
// reading the entire global configuration key
|
2021-03-20 04:28:03 +08:00
|
|
|
// range. Setting the version to the max int64_t
|
|
|
|
// will always cause the global configuration
|
|
|
|
// updater to refresh its view of the configuration
|
|
|
|
// keyspace.
|
2021-03-16 09:03:54 +08:00
|
|
|
clientInfo.history.clear();
|
2021-03-20 04:28:03 +08:00
|
|
|
clientInfo.history.emplace_back(std::numeric_limits<Version>::max());
|
2021-03-16 09:03:54 +08:00
|
|
|
break;
|
2021-02-13 10:55:01 +08:00
|
|
|
}
|
2021-03-20 04:28:03 +08:00
|
|
|
|
|
|
|
VersionHistory vh;
|
2021-03-16 09:03:54 +08:00
|
|
|
reader.deserialize(vh);
|
2021-02-13 10:55:01 +08:00
|
|
|
|
2021-03-16 09:03:54 +08:00
|
|
|
// Read commit version out of versionstamp at end of key.
|
|
|
|
BinaryReader versionReader =
|
|
|
|
BinaryReader(kv.key.removePrefix(globalConfigHistoryPrefix), Unversioned());
|
|
|
|
Version historyCommitVersion;
|
|
|
|
versionReader >> historyCommitVersion;
|
|
|
|
historyCommitVersion = bigEndian64(historyCommitVersion);
|
|
|
|
vh.version = historyCommitVersion;
|
|
|
|
|
2021-03-18 11:41:46 +08:00
|
|
|
clientInfo.history.push_back(std::move(vh));
|
2021-02-13 10:55:01 +08:00
|
|
|
}
|
|
|
|
|
2021-03-24 07:22:39 +08:00
|
|
|
if (clientInfo.history.size() > 0) {
|
|
|
|
// The first item in the historical list of mutations
|
|
|
|
// is only used to:
|
|
|
|
// a) Recognize that some historical changes may have
|
|
|
|
// been missed, and the entire global
|
|
|
|
// configuration keyspace needs to be read, or..
|
|
|
|
// b) Check which historical updates have already
|
|
|
|
// been applied. If this is the case, the first
|
|
|
|
// history item must have a version greater than
|
|
|
|
// or equal to whatever version the global
|
|
|
|
// configuration was last updated at, and
|
|
|
|
// therefore won't need to be applied again.
|
|
|
|
clientInfo.history[0].mutations = Standalone<VectorRef<MutationRef>>();
|
|
|
|
}
|
|
|
|
|
2019-05-11 05:01:52 +08:00
|
|
|
clientInfo.id = deterministicRandom()->randomUniqueID();
|
2021-06-03 11:21:44 +08:00
|
|
|
// Update ServerDBInfo so fdbserver processes receive updated history.
|
|
|
|
ServerDBInfo serverInfo = db->serverInfo->get();
|
|
|
|
serverInfo.id = deterministicRandom()->randomUniqueID();
|
|
|
|
serverInfo.infoGeneration = ++db->dbInfoCount;
|
|
|
|
serverInfo.client = clientInfo;
|
|
|
|
db->serverInfo->set(serverInfo);
|
|
|
|
|
|
|
|
// Update ClientDBInfo so client processes receive updated history.
|
2017-05-26 04:48:44 +08:00
|
|
|
db->clientInfo->set(clientInfo);
|
|
|
|
}
|
2019-02-01 10:20:14 +08:00
|
|
|
|
2021-02-13 10:55:01 +08:00
|
|
|
state Future<Void> globalConfigFuture = tr.watch(globalConfigVersionKey);
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(tr.commit());
|
2021-03-24 07:22:39 +08:00
|
|
|
wait(globalConfigFuture);
|
|
|
|
break;
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(tr.onError(e));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-23 00:45:12 +08:00
|
|
|
ACTOR Future<Void> monitorClientLibChangeCounter(ClusterControllerData::DBInfo* db) {
|
|
|
|
state ClientDBInfo clientInfo;
|
|
|
|
state ReadYourWritesTransaction tr;
|
|
|
|
state Future<Void> clientLibChangeFuture;
|
|
|
|
|
|
|
|
loop {
|
|
|
|
tr = ReadYourWritesTransaction(db->db);
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
|
|
|
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
|
|
|
|
|
|
|
|
Optional<Value> counterVal = wait(tr.get(clientLibChangeCounterKey));
|
|
|
|
if (counterVal.present() && counterVal.get().size() == sizeof(uint64_t)) {
|
|
|
|
uint64_t changeCounter = *reinterpret_cast<const uint64_t*>(counterVal.get().begin());
|
|
|
|
|
|
|
|
clientInfo = db->serverInfo->get().client;
|
|
|
|
if (changeCounter != clientInfo.clientLibChangeCounter) {
|
|
|
|
TraceEvent("ClientLibChangeCounterChanged").detail("Value", changeCounter);
|
|
|
|
clientInfo.id = deterministicRandom()->randomUniqueID();
|
|
|
|
clientInfo.clientLibChangeCounter = changeCounter;
|
|
|
|
db->clientInfo->set(clientInfo);
|
|
|
|
|
|
|
|
ServerDBInfo serverInfo = db->serverInfo->get();
|
|
|
|
serverInfo.id = deterministicRandom()->randomUniqueID();
|
|
|
|
serverInfo.infoGeneration = ++db->dbInfoCount;
|
|
|
|
serverInfo.client = clientInfo;
|
|
|
|
db->serverInfo->set(serverInfo);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
clientLibChangeFuture = tr.watch(clientLibChangeCounterKey);
|
|
|
|
wait(tr.commit());
|
|
|
|
wait(clientLibChangeFuture);
|
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
|
|
|
wait(tr.onError(e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> updatedChangingDatacenters(ClusterControllerData* self) {
|
|
|
|
// do not change the cluster controller until all the processes have had a chance to register
|
|
|
|
wait(delay(SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY));
|
2018-02-10 08:48:55 +08:00
|
|
|
loop {
|
2018-06-29 16:11:59 +08:00
|
|
|
state Future<Void> onChange = self->desiredDcIds.onChange();
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!self->desiredDcIds.get().present()) {
|
|
|
|
self->changingDcIds.set(std::make_pair(false, self->desiredDcIds.get()));
|
2018-06-29 16:11:59 +08:00
|
|
|
} else {
|
2018-06-29 15:10:29 +08:00
|
|
|
auto& worker = self->id_worker[self->clusterControllerProcessId];
|
2021-03-11 02:06:03 +08:00
|
|
|
uint8_t newFitness = ClusterControllerPriorityInfo::calculateDCFitness(
|
|
|
|
worker.details.interf.locality.dcId(), self->desiredDcIds.get().get());
|
|
|
|
self->changingDcIds.set(
|
|
|
|
std::make_pair(worker.priorityInfo.dcFitness > newFitness, self->desiredDcIds.get()));
|
|
|
|
|
|
|
|
TraceEvent("UpdateChangingDatacenter", self->id)
|
|
|
|
.detail("OldFitness", worker.priorityInfo.dcFitness)
|
|
|
|
.detail("NewFitness", newFitness);
|
|
|
|
if (worker.priorityInfo.dcFitness > newFitness) {
|
2018-06-29 15:10:29 +08:00
|
|
|
worker.priorityInfo.dcFitness = newFitness;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!worker.reply.isSet()) {
|
|
|
|
worker.reply.send(RegisterWorkerReply(worker.details.processClass, worker.priorityInfo));
|
2018-06-29 15:10:29 +08:00
|
|
|
}
|
2018-06-29 16:11:59 +08:00
|
|
|
} else {
|
|
|
|
state int currentFit = ProcessClass::BestFit;
|
2021-03-11 02:06:03 +08:00
|
|
|
while (currentFit <= ProcessClass::NeverAssign) {
|
2018-06-29 16:11:59 +08:00
|
|
|
bool updated = false;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : self->id_worker) {
|
|
|
|
if ((!it.second.priorityInfo.isExcluded &&
|
|
|
|
it.second.priorityInfo.processClassFitness == currentFit) ||
|
|
|
|
currentFit == ProcessClass::NeverAssign) {
|
|
|
|
uint8_t fitness = ClusterControllerPriorityInfo::calculateDCFitness(
|
|
|
|
it.second.details.interf.locality.dcId(), self->changingDcIds.get().second.get());
|
|
|
|
if (it.first != self->clusterControllerProcessId &&
|
|
|
|
it.second.priorityInfo.dcFitness != fitness) {
|
2018-06-29 16:11:59 +08:00
|
|
|
updated = true;
|
|
|
|
it.second.priorityInfo.dcFitness = fitness;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!it.second.reply.isSet()) {
|
|
|
|
it.second.reply.send(
|
|
|
|
RegisterWorkerReply(it.second.details.processClass, it.second.priorityInfo));
|
2018-06-29 16:11:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (updated && currentFit < ProcessClass::NeverAssign) {
|
|
|
|
wait(delay(SERVER_KNOBS->CC_CLASS_DELAY));
|
2018-06-29 16:11:59 +08:00
|
|
|
}
|
|
|
|
currentFit++;
|
|
|
|
}
|
2018-02-10 08:48:55 +08:00
|
|
|
}
|
|
|
|
}
|
2018-06-29 15:10:29 +08:00
|
|
|
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(onChange);
|
2018-02-10 08:48:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> updatedChangedDatacenters(ClusterControllerData* self) {
|
2018-02-10 08:48:55 +08:00
|
|
|
state Future<Void> changeDelay = delay(SERVER_KNOBS->CC_CHANGE_DELAY);
|
2018-06-29 16:11:59 +08:00
|
|
|
state Future<Void> onChange = self->changingDcIds.onChange();
|
2018-02-10 08:48:55 +08:00
|
|
|
loop {
|
|
|
|
choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(onChange)) {
|
2018-06-29 16:11:59 +08:00
|
|
|
changeDelay = delay(SERVER_KNOBS->CC_CHANGE_DELAY);
|
|
|
|
onChange = self->changingDcIds.onChange();
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(changeDelay)) {
|
2018-02-10 08:48:55 +08:00
|
|
|
changeDelay = Never();
|
2018-06-29 16:11:59 +08:00
|
|
|
onChange = self->changingDcIds.onChange();
|
|
|
|
|
2018-06-29 15:10:29 +08:00
|
|
|
self->changedDcIds.set(self->changingDcIds.get());
|
2021-03-11 02:06:03 +08:00
|
|
|
if (self->changedDcIds.get().second.present()) {
|
2018-07-03 01:06:54 +08:00
|
|
|
TraceEvent("UpdateChangedDatacenter", self->id).detail("CCFirst", self->changedDcIds.get().first);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!self->changedDcIds.get().first) {
|
2018-06-29 15:10:29 +08:00
|
|
|
auto& worker = self->id_worker[self->clusterControllerProcessId];
|
2021-03-11 02:06:03 +08:00
|
|
|
uint8_t newFitness = ClusterControllerPriorityInfo::calculateDCFitness(
|
|
|
|
worker.details.interf.locality.dcId(), self->changedDcIds.get().second.get());
|
|
|
|
if (worker.priorityInfo.dcFitness != newFitness) {
|
2018-06-29 15:10:29 +08:00
|
|
|
worker.priorityInfo.dcFitness = newFitness;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!worker.reply.isSet()) {
|
|
|
|
worker.reply.send(
|
|
|
|
RegisterWorkerReply(worker.details.processClass, worker.priorityInfo));
|
2018-06-29 15:10:29 +08:00
|
|
|
}
|
2018-02-10 08:48:55 +08:00
|
|
|
}
|
2018-06-29 16:11:59 +08:00
|
|
|
} else {
|
|
|
|
state int currentFit = ProcessClass::BestFit;
|
2021-03-11 02:06:03 +08:00
|
|
|
while (currentFit <= ProcessClass::NeverAssign) {
|
2018-06-29 16:11:59 +08:00
|
|
|
bool updated = false;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : self->id_worker) {
|
|
|
|
if ((!it.second.priorityInfo.isExcluded &&
|
|
|
|
it.second.priorityInfo.processClassFitness == currentFit) ||
|
|
|
|
currentFit == ProcessClass::NeverAssign) {
|
|
|
|
uint8_t fitness = ClusterControllerPriorityInfo::calculateDCFitness(
|
|
|
|
it.second.details.interf.locality.dcId(),
|
|
|
|
self->changedDcIds.get().second.get());
|
|
|
|
if (it.first != self->clusterControllerProcessId &&
|
|
|
|
it.second.priorityInfo.dcFitness != fitness) {
|
2018-06-29 16:11:59 +08:00
|
|
|
updated = true;
|
|
|
|
it.second.priorityInfo.dcFitness = fitness;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!it.second.reply.isSet()) {
|
|
|
|
it.second.reply.send(RegisterWorkerReply(it.second.details.processClass,
|
|
|
|
it.second.priorityInfo));
|
2018-06-29 16:11:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (updated && currentFit < ProcessClass::NeverAssign) {
|
|
|
|
wait(delay(SERVER_KNOBS->CC_CLASS_DELAY));
|
2018-06-29 16:11:59 +08:00
|
|
|
}
|
|
|
|
currentFit++;
|
|
|
|
}
|
2018-02-10 08:48:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> updateDatacenterVersionDifference(ClusterControllerData* self) {
|
2018-06-22 07:34:36 +08:00
|
|
|
state double lastLogTime = 0;
|
2018-06-14 09:14:14 +08:00
|
|
|
loop {
|
|
|
|
self->versionDifferenceUpdated = false;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (self->db.serverInfo->get().recoveryState >= RecoveryState::ACCEPTING_COMMITS &&
|
|
|
|
self->db.config.usableRegions == 1) {
|
|
|
|
bool oldDifferenceTooLarge = !self->versionDifferenceUpdated ||
|
|
|
|
self->datacenterVersionDifference >= SERVER_KNOBS->MAX_VERSION_DIFFERENCE;
|
2018-06-14 09:14:14 +08:00
|
|
|
self->versionDifferenceUpdated = true;
|
|
|
|
self->datacenterVersionDifference = 0;
|
2018-07-07 05:44:11 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (oldDifferenceTooLarge) {
|
2018-07-07 05:44:11 +08:00
|
|
|
checkOutstandingRequests(self);
|
|
|
|
}
|
|
|
|
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(self->db.serverInfo->onChange());
|
2018-06-14 09:14:14 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
state Optional<TLogInterface> primaryLog;
|
|
|
|
state Optional<TLogInterface> remoteLog;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (self->db.serverInfo->get().recoveryState >= RecoveryState::ALL_LOGS_RECRUITED) {
|
|
|
|
for (auto& logSet : self->db.serverInfo->get().logSystemConfig.tLogs) {
|
|
|
|
if (logSet.isLocal && logSet.locality != tagLocalitySatellite) {
|
|
|
|
for (auto& tLog : logSet.tLogs) {
|
|
|
|
if (tLog.present()) {
|
2018-06-14 09:14:14 +08:00
|
|
|
primaryLog = tLog.interf();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!logSet.isLocal) {
|
|
|
|
for (auto& tLog : logSet.tLogs) {
|
|
|
|
if (tLog.present()) {
|
2018-06-14 09:14:14 +08:00
|
|
|
remoteLog = tLog.interf();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!primaryLog.present() || !remoteLog.present()) {
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(self->db.serverInfo->onChange());
|
2018-06-14 09:14:14 +08:00
|
|
|
continue;
|
|
|
|
}
|
2018-06-22 07:31:52 +08:00
|
|
|
|
2018-06-14 09:14:14 +08:00
|
|
|
state Future<Void> onChange = self->db.serverInfo->onChange();
|
|
|
|
loop {
|
2021-03-11 02:06:03 +08:00
|
|
|
state Future<TLogQueuingMetricsReply> primaryMetrics =
|
|
|
|
brokenPromiseToNever(primaryLog.get().getQueuingMetrics.getReply(TLogQueuingMetricsRequest()));
|
|
|
|
state Future<TLogQueuingMetricsReply> remoteMetrics =
|
|
|
|
brokenPromiseToNever(remoteLog.get().getQueuingMetrics.getReply(TLogQueuingMetricsRequest()));
|
2018-06-22 07:31:52 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
wait((success(primaryMetrics) && success(remoteMetrics)) || onChange);
|
|
|
|
if (onChange.isReady()) {
|
2018-06-14 09:14:14 +08:00
|
|
|
break;
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
|
|
|
if (primaryMetrics.get().v > 0 && remoteMetrics.get().v > 0) {
|
|
|
|
bool oldDifferenceTooLarge = !self->versionDifferenceUpdated ||
|
|
|
|
self->datacenterVersionDifference >= SERVER_KNOBS->MAX_VERSION_DIFFERENCE;
|
2020-05-11 08:49:09 +08:00
|
|
|
self->versionDifferenceUpdated = true;
|
|
|
|
self->datacenterVersionDifference = primaryMetrics.get().v - remoteMetrics.get().v;
|
2018-06-14 09:14:14 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (oldDifferenceTooLarge && self->datacenterVersionDifference < SERVER_KNOBS->MAX_VERSION_DIFFERENCE) {
|
2020-05-11 08:49:09 +08:00
|
|
|
checkOutstandingRequests(self);
|
|
|
|
}
|
2018-07-07 05:44:11 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (now() - lastLogTime > SERVER_KNOBS->CLUSTER_CONTROLLER_LOGGING_DELAY) {
|
2020-05-11 08:49:09 +08:00
|
|
|
lastLogTime = now();
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent("DatacenterVersionDifference", self->id)
|
|
|
|
.detail("Difference", self->datacenterVersionDifference);
|
2020-05-11 08:49:09 +08:00
|
|
|
}
|
2018-06-22 07:31:52 +08:00
|
|
|
}
|
2018-06-14 09:14:14 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(delay(SERVER_KNOBS->VERSION_LAG_METRIC_INTERVAL) || onChange);
|
|
|
|
if (onChange.isReady()) {
|
2018-06-14 09:14:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-19 03:04:22 +08:00
|
|
|
// A background actor that periodically checks remote DC health, and `checkOutstandingRequests` if remote DC
|
|
|
|
// recovers.
|
2021-09-10 11:29:28 +08:00
|
|
|
ACTOR Future<Void> updateRemoteDCHealth(ClusterControllerData* self) {
|
2021-10-19 03:04:22 +08:00
|
|
|
// The purpose of the initial delay is to wait for the cluster to achieve a steady state before checking remote
|
|
|
|
// DC health, since remote DC healthy may trigger a failover, and we don't want that to happen too frequently.
|
2021-09-10 11:29:28 +08:00
|
|
|
wait(delay(SERVER_KNOBS->INITIAL_UPDATE_CROSS_DC_INFO_DELAY));
|
2021-10-08 10:46:37 +08:00
|
|
|
|
|
|
|
self->remoteDCMonitorStarted = true;
|
|
|
|
|
2021-10-19 03:04:22 +08:00
|
|
|
// When the remote DC health just start, we may just recover from a health degradation. Check if we can failback
|
|
|
|
// if we are currently in the remote DC in the database configuration.
|
2021-10-08 10:46:37 +08:00
|
|
|
if (!self->remoteTransactionSystemDegraded) {
|
|
|
|
checkOutstandingRequests(self);
|
|
|
|
}
|
|
|
|
|
2021-09-10 11:29:28 +08:00
|
|
|
loop {
|
|
|
|
bool oldRemoteTransactionSystemDegraded = self->remoteTransactionSystemDegraded;
|
|
|
|
self->remoteTransactionSystemDegraded = self->remoteTransactionSystemContainsDegradedServers();
|
|
|
|
|
|
|
|
if (oldRemoteTransactionSystemDegraded && !self->remoteTransactionSystemDegraded) {
|
|
|
|
checkOutstandingRequests(self);
|
|
|
|
}
|
|
|
|
wait(delay(SERVER_KNOBS->CHECK_REMOTE_HEALTH_INTERVAL));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-19 06:54:28 +08:00
|
|
|
ACTOR Future<Void> doEmptyCommit(Database cx) {
|
|
|
|
state Transaction tr(cx);
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
|
|
|
tr.makeSelfConflicting();
|
|
|
|
wait(tr.commit());
|
|
|
|
return Void();
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
wait(tr.onError(e));
|
2019-02-19 06:54:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> handleForcedRecoveries(ClusterControllerData* self, ClusterControllerFullInterface interf) {
|
2019-02-19 06:54:28 +08:00
|
|
|
loop {
|
2021-03-11 02:06:03 +08:00
|
|
|
state ForceRecoveryRequest req = waitNext(interf.clientInterface.forceRecovery.getFuture());
|
|
|
|
TraceEvent("ForcedRecoveryStart", self->id)
|
|
|
|
.detail("ClusterControllerDcId", self->clusterControllerDcId)
|
|
|
|
.detail("DcId", req.dcId.printable());
|
2019-02-19 06:54:28 +08:00
|
|
|
state Future<Void> fCommit = doEmptyCommit(self->cx);
|
2019-02-20 08:05:20 +08:00
|
|
|
wait(fCommit || delay(SERVER_KNOBS->FORCE_RECOVERY_CHECK_DELAY));
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!fCommit.isReady() || fCommit.isError()) {
|
2019-03-23 08:08:58 +08:00
|
|
|
if (self->clusterControllerDcId != req.dcId) {
|
2021-09-17 08:42:34 +08:00
|
|
|
std::vector<Optional<Key>> dcPriority;
|
2019-02-19 06:54:28 +08:00
|
|
|
dcPriority.push_back(req.dcId);
|
|
|
|
dcPriority.push_back(self->clusterControllerDcId);
|
|
|
|
self->desiredDcIds.set(dcPriority);
|
|
|
|
} else {
|
|
|
|
self->db.forceRecovery = true;
|
|
|
|
self->db.forceMasterFailure.trigger();
|
|
|
|
}
|
|
|
|
wait(fCommit);
|
|
|
|
}
|
2021-07-27 10:55:10 +08:00
|
|
|
TraceEvent("ForcedRecoveryFinish", self->id).log();
|
2019-02-19 06:54:28 +08:00
|
|
|
self->db.forceRecovery = false;
|
|
|
|
req.reply.send(Void());
|
2019-02-19 09:09:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-13 21:58:38 +08:00
|
|
|
ACTOR Future<Void> startDataDistributor(ClusterControllerData* self) {
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(delay(0.0)); // If master fails at the same time, give it a chance to clear master PID.
|
2018-12-14 05:31:37 +08:00
|
|
|
|
2021-07-27 10:55:10 +08:00
|
|
|
TraceEvent("CCStartDataDistributor", self->id).log();
|
2019-02-14 03:54:35 +08:00
|
|
|
loop {
|
2019-01-31 01:05:12 +08:00
|
|
|
try {
|
2021-09-13 21:58:38 +08:00
|
|
|
state bool noDistributor = !self->db.serverInfo->get().distributor.present();
|
2021-03-11 02:06:03 +08:00
|
|
|
while (!self->masterProcessId.present() ||
|
|
|
|
self->masterProcessId != self->db.serverInfo->get().master.locality.processId() ||
|
|
|
|
self->db.serverInfo->get().recoveryState < RecoveryState::ACCEPTING_COMMITS) {
|
2019-03-21 07:03:36 +08:00
|
|
|
wait(self->db.serverInfo->onChange() || delay(SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY));
|
2019-01-31 01:05:12 +08:00
|
|
|
}
|
2021-09-13 21:58:38 +08:00
|
|
|
if (noDistributor && self->db.serverInfo->get().distributor.present()) {
|
|
|
|
// Existing distributor registers while waiting, so skip.
|
|
|
|
return Void();
|
2019-01-31 01:05:12 +08:00
|
|
|
}
|
2019-02-01 02:51:25 +08:00
|
|
|
|
2021-09-13 21:58:38 +08:00
|
|
|
std::map<Optional<Standalone<StringRef>>, int> idUsed = self->getUsedIds();
|
|
|
|
WorkerFitnessInfo ddWorker = self->getWorkerForRoleInDatacenter(self->clusterControllerDcId,
|
|
|
|
ProcessClass::DataDistributor,
|
|
|
|
ProcessClass::NeverAssign,
|
|
|
|
self->db.config,
|
|
|
|
idUsed);
|
|
|
|
InitializeDataDistributorRequest req(deterministicRandom()->randomUniqueID());
|
|
|
|
state WorkerDetails worker = ddWorker.worker;
|
2019-03-20 02:29:19 +08:00
|
|
|
if (self->onMasterIsBetter(worker, ProcessClass::DataDistributor)) {
|
|
|
|
worker = self->id_worker[self->masterProcessId.get()].details;
|
|
|
|
}
|
2020-01-08 11:53:09 +08:00
|
|
|
|
2021-09-13 21:58:38 +08:00
|
|
|
self->recruitingDistributorID = req.reqId;
|
|
|
|
TraceEvent("CCRecruitDataDistributor", self->id)
|
|
|
|
.detail("Addr", worker.interf.address())
|
|
|
|
.detail("DDID", req.reqId);
|
2019-01-31 01:05:12 +08:00
|
|
|
|
2021-09-13 21:58:38 +08:00
|
|
|
ErrorOr<DataDistributorInterface> ddInterf = wait(worker.interf.dataDistributor.getReplyUnlessFailedFor(
|
2021-03-11 02:06:03 +08:00
|
|
|
req, SERVER_KNOBS->WAIT_FOR_DISTRIBUTOR_JOIN_DELAY, 0));
|
2021-09-13 21:58:38 +08:00
|
|
|
|
|
|
|
if (ddInterf.present()) {
|
|
|
|
self->recruitDistributor.set(false);
|
|
|
|
self->recruitingDistributorID = ddInterf.get().id();
|
|
|
|
const auto& distributor = self->db.serverInfo->get().distributor;
|
|
|
|
TraceEvent("CCDataDistributorRecruited", self->id)
|
|
|
|
.detail("Addr", worker.interf.address())
|
|
|
|
.detail("DDID", ddInterf.get().id());
|
|
|
|
if (distributor.present() && distributor.get().id() != ddInterf.get().id() &&
|
|
|
|
self->id_worker.count(distributor.get().locality.processId())) {
|
|
|
|
|
|
|
|
TraceEvent("CCHaltDataDistributorAfterRecruit", self->id)
|
|
|
|
.detail("DDID", distributor.get().id())
|
|
|
|
.detail("DcID", printable(self->clusterControllerDcId));
|
|
|
|
|
|
|
|
DataDistributorSingleton(distributor).halt(self, distributor.get().locality.processId());
|
|
|
|
}
|
|
|
|
if (!distributor.present() || distributor.get().id() != ddInterf.get().id()) {
|
|
|
|
self->db.setDistributor(ddInterf.get());
|
|
|
|
}
|
|
|
|
checkOutstandingRequests(self);
|
|
|
|
return Void();
|
2019-01-31 01:05:12 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
2019-07-05 23:12:25 +08:00
|
|
|
TraceEvent("CCDataDistributorRecruitError", self->id).error(e);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (e.code() != error_code_no_more_servers) {
|
2019-01-31 01:05:12 +08:00
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(lowPriorityDelay(SERVER_KNOBS->ATTEMPT_RECRUITMENT_DELAY));
|
2018-12-14 05:31:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> monitorDataDistributor(ClusterControllerData* self) {
|
|
|
|
while (self->db.serverInfo->get().recoveryState < RecoveryState::ACCEPTING_COMMITS) {
|
2019-03-25 07:48:24 +08:00
|
|
|
wait(self->db.serverInfo->onChange());
|
2018-12-14 05:31:37 +08:00
|
|
|
}
|
|
|
|
|
2019-02-01 02:51:25 +08:00
|
|
|
loop {
|
2021-09-13 21:58:38 +08:00
|
|
|
if (self->db.serverInfo->get().distributor.present() && !self->recruitDistributor.get()) {
|
|
|
|
choose {
|
|
|
|
when(wait(waitFailureClient(self->db.serverInfo->get().distributor.get().waitFailure,
|
2021-09-16 01:41:59 +08:00
|
|
|
SERVER_KNOBS->DD_FAILURE_TIME))) {
|
2021-09-13 21:58:38 +08:00
|
|
|
TraceEvent("CCDataDistributorDied", self->id)
|
|
|
|
.detail("DDID", self->db.serverInfo->get().distributor.get().id());
|
|
|
|
self->db.clearInterf(ProcessClass::DataDistributorClass);
|
|
|
|
}
|
|
|
|
when(wait(self->recruitDistributor.onChange())) {}
|
|
|
|
}
|
2019-02-13 07:50:44 +08:00
|
|
|
} else {
|
2021-09-13 21:58:38 +08:00
|
|
|
wait(startDataDistributor(self));
|
2019-02-15 08:24:46 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> startRatekeeper(ClusterControllerData* self) {
|
|
|
|
wait(delay(0.0)); // If master fails at the same time, give it a chance to clear master PID.
|
2019-03-21 07:03:36 +08:00
|
|
|
|
2021-07-27 10:55:10 +08:00
|
|
|
TraceEvent("CCStartRatekeeper", self->id).log();
|
2019-02-15 08:24:46 +08:00
|
|
|
loop {
|
|
|
|
try {
|
2020-04-12 10:30:05 +08:00
|
|
|
state bool no_ratekeeper = !self->db.serverInfo->get().ratekeeper.present();
|
2021-03-11 02:06:03 +08:00
|
|
|
while (!self->masterProcessId.present() ||
|
|
|
|
self->masterProcessId != self->db.serverInfo->get().master.locality.processId() ||
|
|
|
|
self->db.serverInfo->get().recoveryState < RecoveryState::ACCEPTING_COMMITS) {
|
2019-03-21 07:03:36 +08:00
|
|
|
wait(self->db.serverInfo->onChange() || delay(SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY));
|
2019-02-15 08:24:46 +08:00
|
|
|
}
|
2020-04-12 10:30:05 +08:00
|
|
|
if (no_ratekeeper && self->db.serverInfo->get().ratekeeper.present()) {
|
2019-03-22 13:20:00 +08:00
|
|
|
// Existing ratekeeper registers while waiting, so skip.
|
|
|
|
return Void();
|
2019-02-15 08:24:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
std::map<Optional<Standalone<StringRef>>, int> id_used = self->getUsedIds();
|
2021-03-11 02:06:03 +08:00
|
|
|
WorkerFitnessInfo rkWorker = self->getWorkerForRoleInDatacenter(self->clusterControllerDcId,
|
|
|
|
ProcessClass::Ratekeeper,
|
|
|
|
ProcessClass::NeverAssign,
|
|
|
|
self->db.config,
|
|
|
|
id_used);
|
2019-05-11 05:01:52 +08:00
|
|
|
InitializeRatekeeperRequest req(deterministicRandom()->randomUniqueID());
|
2019-03-20 02:29:19 +08:00
|
|
|
state WorkerDetails worker = rkWorker.worker;
|
2019-03-27 23:24:25 +08:00
|
|
|
if (self->onMasterIsBetter(worker, ProcessClass::Ratekeeper)) {
|
2019-03-20 02:29:19 +08:00
|
|
|
worker = self->id_worker[self->masterProcessId.get()].details;
|
|
|
|
}
|
2019-03-23 09:22:45 +08:00
|
|
|
|
2019-03-20 02:29:19 +08:00
|
|
|
self->recruitingRatekeeperID = req.reqId;
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent("CCRecruitRatekeeper", self->id)
|
|
|
|
.detail("Addr", worker.interf.address())
|
|
|
|
.detail("RKID", req.reqId);
|
2019-02-15 08:24:46 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ErrorOr<RatekeeperInterface> interf = wait(
|
|
|
|
worker.interf.ratekeeper.getReplyUnlessFailedFor(req, SERVER_KNOBS->WAIT_FOR_RATEKEEPER_JOIN_DELAY, 0));
|
2019-02-15 08:24:46 +08:00
|
|
|
if (interf.present()) {
|
2019-03-23 09:22:45 +08:00
|
|
|
self->recruitRatekeeper.set(false);
|
2019-03-25 02:04:39 +08:00
|
|
|
self->recruitingRatekeeperID = interf.get().id();
|
2020-04-12 10:30:05 +08:00
|
|
|
const auto& ratekeeper = self->db.serverInfo->get().ratekeeper;
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent("CCRatekeeperRecruited", self->id)
|
|
|
|
.detail("Addr", worker.interf.address())
|
|
|
|
.detail("RKID", interf.get().id());
|
|
|
|
if (ratekeeper.present() && ratekeeper.get().id() != interf.get().id() &&
|
|
|
|
self->id_worker.count(ratekeeper.get().locality.processId())) {
|
|
|
|
TraceEvent("CCHaltRatekeeperAfterRecruit", self->id)
|
|
|
|
.detail("RKID", ratekeeper.get().id())
|
|
|
|
.detail("DcID", printable(self->clusterControllerDcId));
|
2021-09-13 21:58:38 +08:00
|
|
|
RatekeeperSingleton(ratekeeper).halt(self, ratekeeper.get().locality.processId());
|
2021-03-11 02:06:03 +08:00
|
|
|
}
|
|
|
|
if (!ratekeeper.present() || ratekeeper.get().id() != interf.get().id()) {
|
2019-03-23 08:56:16 +08:00
|
|
|
self->db.setRatekeeper(interf.get());
|
|
|
|
}
|
2019-03-23 09:22:45 +08:00
|
|
|
checkOutstandingRequests(self);
|
2019-03-21 04:54:15 +08:00
|
|
|
return Void();
|
2019-02-15 08:24:46 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
2019-07-05 23:12:25 +08:00
|
|
|
TraceEvent("CCRatekeeperRecruitError", self->id).error(e);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (e.code() != error_code_no_more_servers) {
|
2019-02-15 08:24:46 +08:00
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(lowPriorityDelay(SERVER_KNOBS->ATTEMPT_RECRUITMENT_DELAY));
|
2019-02-15 08:24:46 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> monitorRatekeeper(ClusterControllerData* self) {
|
|
|
|
while (self->db.serverInfo->get().recoveryState < RecoveryState::ACCEPTING_COMMITS) {
|
2019-03-25 07:48:24 +08:00
|
|
|
wait(self->db.serverInfo->onChange());
|
2019-02-15 08:24:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
loop {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (self->db.serverInfo->get().ratekeeper.present() && !self->recruitRatekeeper.get()) {
|
2019-03-23 09:22:45 +08:00
|
|
|
choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(waitFailureClient(self->db.serverInfo->get().ratekeeper.get().waitFailure,
|
|
|
|
SERVER_KNOBS->RATEKEEPER_FAILURE_TIME))) {
|
2019-07-05 23:12:25 +08:00
|
|
|
TraceEvent("CCRatekeeperDied", self->id)
|
2021-03-11 02:06:03 +08:00
|
|
|
.detail("RKID", self->db.serverInfo->get().ratekeeper.get().id());
|
2019-03-27 23:24:25 +08:00
|
|
|
self->db.clearInterf(ProcessClass::RatekeeperClass);
|
2019-03-21 01:00:31 +08:00
|
|
|
}
|
2019-03-23 09:22:45 +08:00
|
|
|
when(wait(self->recruitRatekeeper.onChange())) {}
|
2019-03-20 06:21:46 +08:00
|
|
|
}
|
2019-02-15 08:24:46 +08:00
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(startRatekeeper(self));
|
2018-12-14 05:31:37 +08:00
|
|
|
}
|
2019-02-19 06:54:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-15 23:35:58 +08:00
|
|
|
// Acquires the BM lock by getting the next epoch no.
|
|
|
|
ACTOR Future<int64_t> getNextBMEpoch(ClusterControllerData* self) {
|
|
|
|
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(self->cx);
|
|
|
|
|
|
|
|
loop {
|
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
try {
|
|
|
|
Optional<Value> oldEpoch = wait(tr->get(blobManagerEpochKey));
|
|
|
|
state int64_t newEpoch = oldEpoch.present() ? decodeBlobManagerEpochValue(oldEpoch.get()) + 1 : 1;
|
|
|
|
tr->set(blobManagerEpochKey, blobManagerEpochValueFor(newEpoch));
|
|
|
|
|
|
|
|
wait(tr->commit());
|
|
|
|
return newEpoch;
|
|
|
|
} catch (Error& e) {
|
|
|
|
printf("Acquiring blob manager lock got error %s\n", e.name());
|
|
|
|
wait(tr->onError(e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ACTOR Future<Void> startBlobManager(ClusterControllerData* self) {
|
|
|
|
wait(delay(0.0)); // If master fails at the same time, give it a chance to clear master PID.
|
|
|
|
|
|
|
|
TraceEvent("CCStartBlobManager", self->id).log();
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
state bool noBlobManager = !self->db.serverInfo->get().blobManager.present();
|
|
|
|
while (!self->masterProcessId.present() ||
|
|
|
|
self->masterProcessId != self->db.serverInfo->get().master.locality.processId() ||
|
|
|
|
self->db.serverInfo->get().recoveryState < RecoveryState::ACCEPTING_COMMITS) {
|
|
|
|
wait(self->db.serverInfo->onChange() || delay(SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY));
|
|
|
|
}
|
|
|
|
if (noBlobManager && self->db.serverInfo->get().blobManager.present()) {
|
|
|
|
// Existing blob manager registers while waiting, so skip.
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
state std::map<Optional<Standalone<StringRef>>, int> id_used = self->getUsedIds();
|
|
|
|
state WorkerFitnessInfo bmWorker = self->getWorkerForRoleInDatacenter(self->clusterControllerDcId,
|
|
|
|
ProcessClass::BlobManager,
|
|
|
|
ProcessClass::NeverAssign,
|
|
|
|
self->db.config,
|
|
|
|
id_used);
|
|
|
|
|
|
|
|
int64_t nextEpoch = wait(getNextBMEpoch(self));
|
2021-12-05 09:08:23 +08:00
|
|
|
if (!self->masterProcessId.present() ||
|
|
|
|
self->masterProcessId != self->db.serverInfo->get().master.locality.processId() ||
|
|
|
|
self->db.serverInfo->get().recoveryState < RecoveryState::ACCEPTING_COMMITS) {
|
|
|
|
continue;
|
|
|
|
}
|
2021-09-15 23:35:58 +08:00
|
|
|
InitializeBlobManagerRequest req(deterministicRandom()->randomUniqueID(), nextEpoch);
|
|
|
|
state WorkerDetails worker = bmWorker.worker;
|
|
|
|
if (self->onMasterIsBetter(worker, ProcessClass::BlobManager)) {
|
|
|
|
worker = self->id_worker[self->masterProcessId.get()].details;
|
|
|
|
}
|
|
|
|
|
|
|
|
self->recruitingBlobManagerID = req.reqId;
|
|
|
|
TraceEvent("CCRecruitBlobManager", self->id)
|
|
|
|
.detail("Addr", worker.interf.address())
|
|
|
|
.detail("BMID", req.reqId);
|
|
|
|
|
|
|
|
ErrorOr<BlobManagerInterface> interf = wait(worker.interf.blobManager.getReplyUnlessFailedFor(
|
|
|
|
req, SERVER_KNOBS->WAIT_FOR_BLOB_MANAGER_JOIN_DELAY, 0));
|
|
|
|
if (interf.present()) {
|
|
|
|
self->recruitBlobManager.set(false);
|
|
|
|
self->recruitingBlobManagerID = interf.get().id();
|
|
|
|
const auto& blobManager = self->db.serverInfo->get().blobManager;
|
|
|
|
TraceEvent("CCBlobManagerRecruited", self->id)
|
|
|
|
.detail("Addr", worker.interf.address())
|
|
|
|
.detail("BMID", interf.get().id());
|
|
|
|
if (blobManager.present() && blobManager.get().id() != interf.get().id() &&
|
|
|
|
self->id_worker.count(blobManager.get().locality.processId())) {
|
|
|
|
TraceEvent("CCHaltBlobManagerAfterRecruit", self->id)
|
|
|
|
.detail("BMID", blobManager.get().id())
|
|
|
|
.detail("DcID", printable(self->clusterControllerDcId));
|
|
|
|
BlobManagerSingleton(blobManager).halt(self, blobManager.get().locality.processId());
|
|
|
|
}
|
|
|
|
if (!blobManager.present() || blobManager.get().id() != interf.get().id()) {
|
|
|
|
self->db.setBlobManager(interf.get());
|
|
|
|
}
|
|
|
|
checkOutstandingRequests(self);
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
} catch (Error& e) {
|
|
|
|
TraceEvent("CCBlobManagerRecruitError", self->id).error(e);
|
|
|
|
if (e.code() != error_code_no_more_servers) {
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wait(lowPriorityDelay(SERVER_KNOBS->ATTEMPT_RECRUITMENT_DELAY));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-08 13:43:58 +08:00
|
|
|
ACTOR Future<Void> watchBlobGranulesConfigKey(ClusterControllerData* self) {
|
|
|
|
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(self->cx);
|
|
|
|
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
Key blobGranuleConfigKey = configKeysPrefix.withSuffix(StringRef("blob_granules_enabled"));
|
|
|
|
state Future<Void> watch = tr->watch(blobGranuleConfigKey);
|
|
|
|
wait(tr->commit());
|
|
|
|
wait(watch);
|
|
|
|
return Void();
|
|
|
|
} catch (Error& e) {
|
|
|
|
wait(tr->onError(e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-15 23:35:58 +08:00
|
|
|
ACTOR Future<Void> monitorBlobManager(ClusterControllerData* self) {
|
|
|
|
while (self->db.serverInfo->get().recoveryState < RecoveryState::ACCEPTING_COMMITS) {
|
|
|
|
wait(self->db.serverInfo->onChange());
|
|
|
|
}
|
|
|
|
|
|
|
|
loop {
|
2021-12-08 13:43:58 +08:00
|
|
|
state Future<Void> watchConfigChange = watchBlobGranulesConfigKey(self);
|
2021-09-15 23:35:58 +08:00
|
|
|
if (self->db.serverInfo->get().blobManager.present() && !self->recruitBlobManager.get()) {
|
|
|
|
choose {
|
|
|
|
when(wait(waitFailureClient(self->db.serverInfo->get().blobManager.get().waitFailure,
|
|
|
|
SERVER_KNOBS->BLOB_MANAGER_FAILURE_TIME))) {
|
|
|
|
TraceEvent("CCBlobManagerDied", self->id)
|
|
|
|
.detail("BMID", self->db.serverInfo->get().blobManager.get().id());
|
|
|
|
self->db.clearInterf(ProcessClass::BlobManagerClass);
|
|
|
|
}
|
|
|
|
when(wait(self->recruitBlobManager.onChange())) {}
|
2021-12-08 13:43:58 +08:00
|
|
|
when(wait(watchConfigChange)) {
|
2021-12-08 23:59:36 +08:00
|
|
|
// if there is a blob manager present but blob granules are now disabled, stop the BM
|
2021-12-08 13:43:58 +08:00
|
|
|
if (!self->db.config.blobGranulesEnabled) {
|
|
|
|
const auto& blobManager = self->db.serverInfo->get().blobManager;
|
|
|
|
BlobManagerSingleton(blobManager)
|
|
|
|
.haltBlobGranules(self, blobManager.get().locality.processId());
|
|
|
|
}
|
|
|
|
}
|
2021-09-15 23:35:58 +08:00
|
|
|
}
|
|
|
|
} else {
|
2021-12-08 13:43:58 +08:00
|
|
|
wait(watchConfigChange);
|
2021-12-08 23:59:36 +08:00
|
|
|
// if there is no blob manager present but blob granules are now enabled, recruit a BM
|
2021-12-08 13:43:58 +08:00
|
|
|
if (self->db.config.blobGranulesEnabled) {
|
|
|
|
wait(startBlobManager(self));
|
|
|
|
}
|
2021-09-15 23:35:58 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> dbInfoUpdater(ClusterControllerData* self) {
|
2020-04-06 14:09:36 +08:00
|
|
|
state Future<Void> dbInfoChange = self->db.serverInfo->onChange();
|
|
|
|
state Future<Void> updateDBInfo = self->updateDBInfo.onTrigger();
|
|
|
|
loop {
|
|
|
|
choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(updateDBInfo)) { wait(delay(SERVER_KNOBS->DBINFO_BATCH_DELAY) || dbInfoChange); }
|
2020-04-06 14:09:36 +08:00
|
|
|
when(wait(dbInfoChange)) {}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
2020-04-18 06:05:01 +08:00
|
|
|
UpdateServerDBInfoRequest req;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (dbInfoChange.isReady()) {
|
|
|
|
for (auto& it : self->id_worker) {
|
2020-04-18 06:05:01 +08:00
|
|
|
req.broadcastInfo.push_back(it.second.details.interf.updateServerDBInfo.getEndpoint());
|
2020-04-06 14:09:36 +08:00
|
|
|
}
|
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto it : self->removedDBInfoEndpoints) {
|
2020-04-18 07:45:22 +08:00
|
|
|
self->updateDBInfoEndpoints.erase(it);
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
req.broadcastInfo =
|
|
|
|
std::vector<Endpoint>(self->updateDBInfoEndpoints.begin(), self->updateDBInfoEndpoints.end());
|
2020-04-06 14:09:36 +08:00
|
|
|
}
|
|
|
|
|
2020-04-18 06:05:01 +08:00
|
|
|
self->updateDBInfoEndpoints.clear();
|
2020-04-06 14:09:36 +08:00
|
|
|
self->removedDBInfoEndpoints.clear();
|
2021-03-11 02:06:03 +08:00
|
|
|
|
2020-04-06 14:09:36 +08:00
|
|
|
dbInfoChange = self->db.serverInfo->onChange();
|
|
|
|
updateDBInfo = self->updateDBInfo.onTrigger();
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
req.serializedDbInfo =
|
|
|
|
BinaryWriter::toValue(self->db.serverInfo->get(), AssumeVersion(g_network->protocolVersion()));
|
2020-04-06 14:09:36 +08:00
|
|
|
|
2021-07-27 10:55:10 +08:00
|
|
|
TraceEvent("DBInfoStartBroadcast", self->id).log();
|
2020-04-06 14:09:36 +08:00
|
|
|
choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(std::vector<Endpoint> notUpdated =
|
|
|
|
wait(broadcastDBInfoRequest(req, SERVER_KNOBS->DBINFO_SEND_AMOUNT, Optional<Endpoint>(), false))) {
|
2020-09-01 01:37:00 +08:00
|
|
|
TraceEvent("DBInfoFinishBroadcast", self->id).detail("NotUpdated", notUpdated.size());
|
2021-03-11 02:06:03 +08:00
|
|
|
if (notUpdated.size()) {
|
2020-04-18 07:45:22 +08:00
|
|
|
self->updateDBInfoEndpoints.insert(notUpdated.begin(), notUpdated.end());
|
2020-04-06 14:09:36 +08:00
|
|
|
self->updateDBInfo.trigger();
|
|
|
|
}
|
|
|
|
}
|
2020-04-11 08:02:11 +08:00
|
|
|
when(wait(dbInfoChange)) {}
|
2020-04-06 14:09:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-29 12:45:02 +08:00
|
|
|
// The actor that periodically monitors the health of tracked workers.
|
|
|
|
ACTOR Future<Void> workerHealthMonitor(ClusterControllerData* self) {
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
while (!self->goodRecruitmentTime.isReady()) {
|
2021-07-26 13:37:08 +08:00
|
|
|
wait(lowPriorityDelay(SERVER_KNOBS->CC_WORKER_HEALTH_CHECKING_INTERVAL));
|
2021-06-29 12:45:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
self->degradedServers = self->getServersWithDegradedLink();
|
|
|
|
|
|
|
|
// Compare `self->degradedServers` with `self->excludedDegradedServers` and remove those that have
|
|
|
|
// recovered.
|
|
|
|
for (auto it = self->excludedDegradedServers.begin(); it != self->excludedDegradedServers.end();) {
|
|
|
|
if (self->degradedServers.find(*it) == self->degradedServers.end()) {
|
|
|
|
self->excludedDegradedServers.erase(it++);
|
|
|
|
} else {
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!self->degradedServers.empty()) {
|
|
|
|
std::string degradedServerString;
|
|
|
|
for (const auto& server : self->degradedServers) {
|
|
|
|
degradedServerString += server.toString() + " ";
|
|
|
|
}
|
|
|
|
TraceEvent("ClusterControllerHealthMonitor").detail("DegradedServers", degradedServerString);
|
|
|
|
|
2021-10-19 03:04:22 +08:00
|
|
|
// Check if the cluster controller should trigger a recovery to exclude any degraded servers from
|
|
|
|
// the transaction system.
|
2021-06-29 12:45:02 +08:00
|
|
|
if (self->shouldTriggerRecoveryDueToDegradedServers()) {
|
|
|
|
if (SERVER_KNOBS->CC_HEALTH_TRIGGER_RECOVERY) {
|
|
|
|
if (self->recentRecoveryCountDueToHealth() < SERVER_KNOBS->CC_MAX_HEALTH_RECOVERY_COUNT) {
|
|
|
|
self->recentHealthTriggeredRecoveryTime.push(now());
|
|
|
|
self->excludedDegradedServers = self->degradedServers;
|
|
|
|
TraceEvent("DegradedServerDetectedAndTriggerRecovery")
|
|
|
|
.detail("RecentRecoveryCountDueToHealth", self->recentRecoveryCountDueToHealth());
|
|
|
|
self->db.forceMasterFailure.trigger();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
self->excludedDegradedServers.clear();
|
2021-07-27 10:55:10 +08:00
|
|
|
TraceEvent("DegradedServerDetectedAndSuggestRecovery").log();
|
2021-06-29 12:45:02 +08:00
|
|
|
}
|
2021-09-10 11:29:28 +08:00
|
|
|
} else if (self->shouldTriggerFailoverDueToDegradedServers()) {
|
2021-10-08 10:46:37 +08:00
|
|
|
double ccUpTime = now() - machineStartTime();
|
|
|
|
if (SERVER_KNOBS->CC_HEALTH_TRIGGER_FAILOVER &&
|
|
|
|
ccUpTime > SERVER_KNOBS->INITIAL_UPDATE_CROSS_DC_INFO_DELAY) {
|
2021-09-10 11:29:28 +08:00
|
|
|
TraceEvent("DegradedServerDetectedAndTriggerFailover").log();
|
2021-10-07 07:16:24 +08:00
|
|
|
std::vector<Optional<Key>> dcPriority;
|
2021-09-10 11:29:28 +08:00
|
|
|
auto remoteDcId = self->db.config.regions[0].dcId == self->clusterControllerDcId.get()
|
|
|
|
? self->db.config.regions[1].dcId
|
|
|
|
: self->db.config.regions[0].dcId;
|
2021-09-11 05:51:55 +08:00
|
|
|
|
2021-10-19 03:04:22 +08:00
|
|
|
// Switch the current primary DC and remote DC in desiredDcIds, so that the remote DC
|
|
|
|
// becomes the new primary, and the primary DC becomes the new remote.
|
2021-09-10 11:29:28 +08:00
|
|
|
dcPriority.push_back(remoteDcId);
|
|
|
|
dcPriority.push_back(self->clusterControllerDcId);
|
|
|
|
self->desiredDcIds.set(dcPriority);
|
|
|
|
} else {
|
2021-10-08 10:46:37 +08:00
|
|
|
TraceEvent("DegradedServerDetectedAndSuggestFailover").detail("CCUpTime", ccUpTime);
|
2021-09-10 11:29:28 +08:00
|
|
|
}
|
2021-06-29 12:45:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
wait(delay(SERVER_KNOBS->CC_WORKER_HEALTH_CHECKING_INTERVAL));
|
|
|
|
} catch (Error& e) {
|
|
|
|
TraceEvent(SevWarnAlways, "ClusterControllerHealthMonitorError").error(e);
|
2021-08-20 07:31:31 +08:00
|
|
|
if (e.code() == error_code_actor_cancelled) {
|
|
|
|
throw;
|
|
|
|
}
|
2021-06-29 12:45:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> clusterControllerCore(ClusterControllerFullInterface interf,
|
|
|
|
Future<Void> leaderFail,
|
|
|
|
ServerCoordinators coordinators,
|
2021-05-19 01:47:16 +08:00
|
|
|
LocalityData locality,
|
2021-08-07 14:18:10 +08:00
|
|
|
ConfigDBType configDBType) {
|
2021-04-27 06:54:08 +08:00
|
|
|
state ClusterControllerData self(interf, locality, coordinators);
|
2021-08-07 14:18:10 +08:00
|
|
|
state ConfigBroadcaster configBroadcaster(coordinators, configDBType);
|
2021-03-11 02:06:03 +08:00
|
|
|
state Future<Void> coordinationPingDelay = delay(SERVER_KNOBS->WORKER_COORDINATION_PING_DELAY);
|
2017-05-26 04:48:44 +08:00
|
|
|
state uint64_t step = 0;
|
2021-03-11 02:06:03 +08:00
|
|
|
state Future<ErrorOr<Void>> error = errorOr(actorCollection(self.addActor.getFuture()));
|
|
|
|
|
|
|
|
self.addActor.send(clusterWatchDatabase(&self, &self.db)); // Start the master database
|
|
|
|
self.addActor.send(self.updateWorkerList.init(self.db.db));
|
2021-06-11 11:57:50 +08:00
|
|
|
self.addActor.send(statusServer(interf.clientInterface.databaseStatus.getFuture(),
|
|
|
|
&self,
|
|
|
|
coordinators,
|
2021-08-07 14:18:10 +08:00
|
|
|
(configDBType == ConfigDBType::DISABLED) ? nullptr : &configBroadcaster));
|
2021-03-11 02:06:03 +08:00
|
|
|
self.addActor.send(timeKeeper(&self));
|
|
|
|
self.addActor.send(monitorProcessClasses(&self));
|
|
|
|
self.addActor.send(monitorServerInfoConfig(&self.db));
|
2021-03-16 09:03:54 +08:00
|
|
|
self.addActor.send(monitorGlobalConfig(&self.db));
|
2021-10-23 00:45:12 +08:00
|
|
|
self.addActor.send(monitorClientLibChangeCounter(&self.db));
|
2021-03-11 02:06:03 +08:00
|
|
|
self.addActor.send(updatedChangingDatacenters(&self));
|
|
|
|
self.addActor.send(updatedChangedDatacenters(&self));
|
|
|
|
self.addActor.send(updateDatacenterVersionDifference(&self));
|
|
|
|
self.addActor.send(handleForcedRecoveries(&self, interf));
|
|
|
|
self.addActor.send(monitorDataDistributor(&self));
|
|
|
|
self.addActor.send(monitorRatekeeper(&self));
|
2021-12-08 13:43:58 +08:00
|
|
|
self.addActor.send(monitorBlobManager(&self));
|
2021-05-29 02:15:52 +08:00
|
|
|
// self.addActor.send(monitorTSSMapping(&self));
|
2021-03-11 02:06:03 +08:00
|
|
|
self.addActor.send(dbInfoUpdater(&self));
|
|
|
|
self.addActor.send(traceCounters("ClusterControllerMetrics",
|
|
|
|
self.id,
|
|
|
|
SERVER_KNOBS->STORAGE_LOGGING_DELAY,
|
|
|
|
&self.clusterControllerMetrics,
|
|
|
|
self.id.toString() + "/ClusterControllerMetrics"));
|
|
|
|
self.addActor.send(traceRole(Role::CLUSTER_CONTROLLER, interf.id()));
|
|
|
|
// printf("%s: I am the cluster controller\n", g_network->getLocalAddress().toString().c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-06-29 12:45:02 +08:00
|
|
|
if (SERVER_KNOBS->CC_ENABLE_WORKER_HEALTH_MONITOR) {
|
|
|
|
self.addActor.send(workerHealthMonitor(&self));
|
2021-10-07 07:16:24 +08:00
|
|
|
self.addActor.send(updateRemoteDCHealth(&self));
|
2021-06-29 12:45:02 +08:00
|
|
|
}
|
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
loop choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(ErrorOr<Void> err = wait(error)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
if (err.isError()) {
|
2018-09-06 06:06:14 +08:00
|
|
|
endRole(Role::CLUSTER_CONTROLLER, interf.id(), "Stop Received Error", false, err.getError());
|
2021-03-11 02:06:03 +08:00
|
|
|
} else {
|
2018-09-06 06:06:14 +08:00
|
|
|
endRole(Role::CLUSTER_CONTROLLER, interf.id(), "Stop Received Signal", true);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
// We shut down normally even if there was a serious error (so this fdbserver may be re-elected cluster
|
|
|
|
// controller)
|
2017-05-26 04:48:44 +08:00
|
|
|
return Void();
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(OpenDatabaseRequest req = waitNext(interf.clientInterface.openDatabase.getFuture())) {
|
2019-10-04 06:29:11 +08:00
|
|
|
++self.openDatabaseRequests;
|
2019-07-26 08:15:31 +08:00
|
|
|
self.addActor.send(clusterOpenDatabase(&self.db, req));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(RecruitFromConfigurationRequest req = waitNext(interf.recruitFromConfiguration.getFuture())) {
|
|
|
|
self.addActor.send(clusterRecruitFromConfiguration(&self, req));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(RecruitRemoteFromConfigurationRequest req = waitNext(interf.recruitRemoteFromConfiguration.getFuture())) {
|
|
|
|
self.addActor.send(clusterRecruitRemoteFromConfiguration(&self, req));
|
2017-09-12 08:40:46 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(RecruitStorageRequest req = waitNext(interf.recruitStorage.getFuture())) {
|
|
|
|
clusterRecruitStorage(&self, req);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-09-21 03:42:20 +08:00
|
|
|
when(RecruitBlobWorkerRequest req = waitNext(interf.recruitBlobWorker.getFuture())) {
|
|
|
|
clusterRecruitBlobWorker(&self, req);
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(RegisterWorkerRequest req = waitNext(interf.registerWorker.getFuture())) {
|
2019-10-04 06:29:11 +08:00
|
|
|
++self.registerWorkerRequests;
|
2021-08-17 01:49:47 +08:00
|
|
|
registerWorker(req, &self, (configDBType == ConfigDBType::DISABLED) ? nullptr : &configBroadcaster);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(GetWorkersRequest req = waitNext(interf.getWorkers.getFuture())) {
|
2019-10-04 06:29:11 +08:00
|
|
|
++self.getWorkersRequests;
|
2021-09-17 08:42:34 +08:00
|
|
|
std::vector<WorkerDetails> workers;
|
2017-10-25 03:58:54 +08:00
|
|
|
|
2021-08-15 07:51:39 +08:00
|
|
|
for (auto const& [id, worker] : self.id_worker) {
|
2021-03-11 02:06:03 +08:00
|
|
|
if ((req.flags & GetWorkersRequest::NON_EXCLUDED_PROCESSES_ONLY) &&
|
2021-08-15 07:51:39 +08:00
|
|
|
self.db.config.isExcludedServer(worker.details.interf.addresses())) {
|
2017-10-25 03:58:54 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if ((req.flags & GetWorkersRequest::TESTER_CLASS_ONLY) &&
|
2021-08-15 07:51:39 +08:00
|
|
|
worker.details.processClass.classType() != ProcessClass::TesterClass) {
|
2017-10-25 03:58:54 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-08-15 07:51:39 +08:00
|
|
|
workers.push_back(worker.details);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2017-10-25 03:58:54 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
req.reply.send(workers);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(GetClientWorkersRequest req = waitNext(interf.clientInterface.getClientWorkers.getFuture())) {
|
2019-10-04 06:29:11 +08:00
|
|
|
++self.getClientWorkersRequests;
|
2021-09-17 08:42:34 +08:00
|
|
|
std::vector<ClientWorkerInterface> workers;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : self.id_worker) {
|
2019-03-09 00:25:07 +08:00
|
|
|
if (it.second.details.processClass.classType() != ProcessClass::TesterClass) {
|
|
|
|
workers.push_back(it.second.details.interf.clientInterface);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
req.reply.send(workers);
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(coordinationPingDelay)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
CoordinationPingMessage message(self.id, step++);
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto& it : self.id_worker)
|
2019-03-09 00:25:07 +08:00
|
|
|
it.second.details.interf.coordinationPing.send(message);
|
2021-03-11 02:06:03 +08:00
|
|
|
coordinationPingDelay = delay(SERVER_KNOBS->WORKER_COORDINATION_PING_DELAY);
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent("CoordinationPingSent", self.id).detail("TimeStep", message.timeStep);
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(RegisterMasterRequest req = waitNext(interf.registerMaster.getFuture())) {
|
2019-10-04 06:29:11 +08:00
|
|
|
++self.registerMasterRequests;
|
2021-03-11 02:06:03 +08:00
|
|
|
clusterRegisterMaster(&self, req);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-06-24 05:59:00 +08:00
|
|
|
when(UpdateWorkerHealthRequest req = waitNext(interf.updateWorkerHealth.getFuture())) {
|
2021-06-29 12:45:02 +08:00
|
|
|
if (SERVER_KNOBS->CC_ENABLE_WORKER_HEALTH_MONITOR) {
|
2021-06-24 05:59:00 +08:00
|
|
|
self.updateWorkerHealth(req);
|
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(GetServerDBInfoRequest req = waitNext(interf.getServerDBInfo.getFuture())) {
|
|
|
|
self.addActor.send(clusterGetServerInfo(&self.db, req.knownServerInfoID, req.reply));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(leaderFail)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
// We are no longer the leader if this has changed.
|
2018-09-06 06:06:14 +08:00
|
|
|
endRole(Role::CLUSTER_CONTROLLER, interf.id(), "Leader Replaced", true);
|
2017-05-26 04:48:44 +08:00
|
|
|
TEST(true); // Lost Cluster Controller Role
|
|
|
|
return Void();
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(ReplyPromise<Void> ping = waitNext(interf.clientInterface.ping.getFuture())) { ping.send(Void()); }
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> replaceInterface(ClusterControllerFullInterface interf) {
|
2019-05-30 07:57:13 +08:00
|
|
|
loop {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (interf.hasMessage()) {
|
2019-05-30 07:57:13 +08:00
|
|
|
wait(delay(SERVER_KNOBS->REPLACE_INTERFACE_DELAY));
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
wait(delay(SERVER_KNOBS->REPLACE_INTERFACE_CHECK_DELAY));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> clusterController(ServerCoordinators coordinators,
|
|
|
|
Reference<AsyncVar<Optional<ClusterControllerFullInterface>>> currentCC,
|
|
|
|
bool hasConnected,
|
|
|
|
Reference<AsyncVar<ClusterControllerPriorityInfo>> asyncPriorityInfo,
|
2021-05-19 01:47:16 +08:00
|
|
|
LocalityData locality,
|
2021-08-07 14:18:10 +08:00
|
|
|
ConfigDBType configDBType) {
|
2017-05-26 04:48:44 +08:00
|
|
|
loop {
|
|
|
|
state ClusterControllerFullInterface cci;
|
|
|
|
state bool inRole = false;
|
|
|
|
cci.initEndpoints();
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
// Register as a possible leader; wait to be elected
|
|
|
|
state Future<Void> leaderFail =
|
|
|
|
tryBecomeLeader(coordinators, cci, currentCC, hasConnected, asyncPriorityInfo);
|
|
|
|
state Future<Void> shouldReplace = replaceInterface(cci);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
while (!currentCC->get().present() || currentCC->get().get() != cci) {
|
|
|
|
choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(currentCC->onChange())) {}
|
|
|
|
when(wait(leaderFail)) {
|
|
|
|
ASSERT(false);
|
|
|
|
throw internal_error();
|
|
|
|
}
|
|
|
|
when(wait(shouldReplace)) { break; }
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!shouldReplace.isReady()) {
|
2019-05-30 07:57:13 +08:00
|
|
|
shouldReplace = Future<Void>();
|
|
|
|
hasConnected = true;
|
|
|
|
startRole(Role::CLUSTER_CONTROLLER, cci.id(), UID());
|
|
|
|
inRole = true;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-08-07 14:18:10 +08:00
|
|
|
wait(clusterControllerCore(cci, leaderFail, coordinators, locality, configDBType));
|
2019-05-30 07:57:13 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
2017-05-26 04:48:44 +08:00
|
|
|
if (inRole)
|
2021-03-11 02:06:03 +08:00
|
|
|
endRole(Role::CLUSTER_CONTROLLER,
|
|
|
|
cci.id(),
|
|
|
|
"Error",
|
|
|
|
e.code() == error_code_actor_cancelled || e.code() == error_code_coordinators_changed,
|
|
|
|
e);
|
2017-05-26 04:48:44 +08:00
|
|
|
else
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent(e.code() == error_code_coordinators_changed ? SevInfo : SevError,
|
|
|
|
"ClusterControllerCandidateError",
|
|
|
|
cci.id())
|
|
|
|
.error(e);
|
2017-05-26 04:48:44 +08:00
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-11 11:44:56 +08:00
|
|
|
ACTOR Future<Void> clusterController(Reference<IClusterConnectionRecord> connRecord,
|
2021-03-11 02:06:03 +08:00
|
|
|
Reference<AsyncVar<Optional<ClusterControllerFullInterface>>> currentCC,
|
|
|
|
Reference<AsyncVar<ClusterControllerPriorityInfo>> asyncPriorityInfo,
|
|
|
|
Future<Void> recoveredDiskFiles,
|
2021-05-19 01:47:16 +08:00
|
|
|
LocalityData locality,
|
2021-08-07 14:18:10 +08:00
|
|
|
ConfigDBType configDBType) {
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(recoveredDiskFiles);
|
2017-05-26 04:48:44 +08:00
|
|
|
state bool hasConnected = false;
|
|
|
|
loop {
|
|
|
|
try {
|
2021-10-11 11:44:56 +08:00
|
|
|
ServerCoordinators coordinators(connRecord);
|
2021-08-07 14:18:10 +08:00
|
|
|
wait(clusterController(coordinators, currentCC, hasConnected, asyncPriorityInfo, locality, configDBType));
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() != error_code_coordinators_changed)
|
2017-05-26 04:48:44 +08:00
|
|
|
throw; // Expected to terminate fdbserver
|
|
|
|
}
|
|
|
|
|
|
|
|
hasConnected = true;
|
|
|
|
}
|
2018-05-09 08:17:17 +08:00
|
|
|
}
|
2021-06-24 05:59:00 +08:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
// Tests `ClusterControllerData::updateWorkerHealth()` can update `ClusterControllerData::workerHealth` based on
|
|
|
|
// `UpdateWorkerHealth` request correctly.
|
|
|
|
TEST_CASE("/fdbserver/clustercontroller/updateWorkerHealth") {
|
2021-08-28 08:07:47 +08:00
|
|
|
// Create a testing ClusterControllerData. Most of the internal states do not matter in this test.
|
|
|
|
state ClusterControllerData data(ClusterControllerFullInterface(),
|
|
|
|
LocalityData(),
|
2021-10-11 11:44:56 +08:00
|
|
|
ServerCoordinators(Reference<IClusterConnectionRecord>(
|
|
|
|
new ClusterConnectionMemoryRecord(ClusterConnectionString()))));
|
2021-08-28 08:07:47 +08:00
|
|
|
state NetworkAddress workerAddress(IPAddress(0x01010101), 1);
|
|
|
|
state NetworkAddress badPeer1(IPAddress(0x02020202), 1);
|
|
|
|
state NetworkAddress badPeer2(IPAddress(0x03030303), 1);
|
|
|
|
state NetworkAddress badPeer3(IPAddress(0x04040404), 1);
|
|
|
|
|
|
|
|
// Create a `UpdateWorkerHealthRequest` with two bad peers, and they should appear in the `workerAddress`'s
|
|
|
|
// degradedPeers.
|
|
|
|
{
|
|
|
|
UpdateWorkerHealthRequest req;
|
|
|
|
req.address = workerAddress;
|
|
|
|
req.degradedPeers.push_back(badPeer1);
|
|
|
|
req.degradedPeers.push_back(badPeer2);
|
|
|
|
data.updateWorkerHealth(req);
|
|
|
|
ASSERT(data.workerHealth.find(workerAddress) != data.workerHealth.end());
|
|
|
|
auto& health = data.workerHealth[workerAddress];
|
|
|
|
ASSERT_EQ(health.degradedPeers.size(), 2);
|
|
|
|
ASSERT(health.degradedPeers.find(badPeer1) != health.degradedPeers.end());
|
|
|
|
ASSERT_EQ(health.degradedPeers[badPeer1].startTime, health.degradedPeers[badPeer1].lastRefreshTime);
|
|
|
|
ASSERT(health.degradedPeers.find(badPeer2) != health.degradedPeers.end());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a `UpdateWorkerHealthRequest` with two bad peers, one from the previous test and a new one.
|
|
|
|
// The one from the previous test should have lastRefreshTime updated.
|
|
|
|
// The other one from the previous test not included in this test should be removed.
|
|
|
|
{
|
|
|
|
// Make the time to move so that now() guarantees to return a larger value than before.
|
|
|
|
wait(delay(0.001));
|
|
|
|
UpdateWorkerHealthRequest req;
|
|
|
|
req.address = workerAddress;
|
|
|
|
req.degradedPeers.push_back(badPeer1);
|
|
|
|
req.degradedPeers.push_back(badPeer3);
|
|
|
|
data.updateWorkerHealth(req);
|
|
|
|
ASSERT(data.workerHealth.find(workerAddress) != data.workerHealth.end());
|
|
|
|
auto& health = data.workerHealth[workerAddress];
|
|
|
|
ASSERT_EQ(health.degradedPeers.size(), 2);
|
|
|
|
ASSERT(health.degradedPeers.find(badPeer1) != health.degradedPeers.end());
|
|
|
|
ASSERT_LT(health.degradedPeers[badPeer1].startTime, health.degradedPeers[badPeer1].lastRefreshTime);
|
|
|
|
ASSERT(health.degradedPeers.find(badPeer2) == health.degradedPeers.end());
|
|
|
|
ASSERT(health.degradedPeers.find(badPeer3) != health.degradedPeers.end());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a `UpdateWorkerHealthRequest` with empty `degradedPeers`, which should remove the worker from
|
|
|
|
// `workerHealth`.
|
|
|
|
{
|
|
|
|
UpdateWorkerHealthRequest req;
|
|
|
|
req.address = workerAddress;
|
|
|
|
data.updateWorkerHealth(req);
|
|
|
|
ASSERT(data.workerHealth.find(workerAddress) == data.workerHealth.end());
|
|
|
|
}
|
|
|
|
|
|
|
|
return Void();
|
2021-06-24 05:59:00 +08:00
|
|
|
}
|
|
|
|
|
2021-06-29 12:45:02 +08:00
|
|
|
TEST_CASE("/fdbserver/clustercontroller/updateRecoveredWorkers") {
|
|
|
|
// Create a testing ClusterControllerData. Most of the internal states do not matter in this test.
|
|
|
|
ClusterControllerData data(ClusterControllerFullInterface(),
|
|
|
|
LocalityData(),
|
2021-10-11 11:44:56 +08:00
|
|
|
ServerCoordinators(Reference<IClusterConnectionRecord>(
|
|
|
|
new ClusterConnectionMemoryRecord(ClusterConnectionString()))));
|
2021-06-29 12:45:02 +08:00
|
|
|
NetworkAddress worker1(IPAddress(0x01010101), 1);
|
|
|
|
NetworkAddress worker2(IPAddress(0x11111111), 1);
|
|
|
|
NetworkAddress badPeer1(IPAddress(0x02020202), 1);
|
|
|
|
NetworkAddress badPeer2(IPAddress(0x03030303), 1);
|
|
|
|
|
|
|
|
// Create following test scenario:
|
|
|
|
// worker1 -> badPeer1 active
|
|
|
|
// worker1 -> badPeer2 recovered
|
|
|
|
// worker2 -> badPeer2 recovered
|
|
|
|
data.workerHealth[worker1].degradedPeers[badPeer1] = {
|
|
|
|
now() - SERVER_KNOBS->CC_DEGRADED_LINK_EXPIRATION_INTERVAL - 1, now()
|
|
|
|
};
|
|
|
|
data.workerHealth[worker1].degradedPeers[badPeer2] = {
|
|
|
|
now() - SERVER_KNOBS->CC_DEGRADED_LINK_EXPIRATION_INTERVAL - 1,
|
|
|
|
now() - SERVER_KNOBS->CC_DEGRADED_LINK_EXPIRATION_INTERVAL - 1
|
|
|
|
};
|
|
|
|
data.workerHealth[worker2].degradedPeers[badPeer2] = {
|
|
|
|
now() - SERVER_KNOBS->CC_DEGRADED_LINK_EXPIRATION_INTERVAL - 1,
|
|
|
|
now() - SERVER_KNOBS->CC_DEGRADED_LINK_EXPIRATION_INTERVAL - 1
|
|
|
|
};
|
|
|
|
data.updateRecoveredWorkers();
|
|
|
|
|
|
|
|
ASSERT_EQ(data.workerHealth.size(), 1);
|
|
|
|
ASSERT(data.workerHealth.find(worker1) != data.workerHealth.end());
|
|
|
|
ASSERT(data.workerHealth[worker1].degradedPeers.find(badPeer1) != data.workerHealth[worker1].degradedPeers.end());
|
|
|
|
ASSERT(data.workerHealth[worker1].degradedPeers.find(badPeer2) == data.workerHealth[worker1].degradedPeers.end());
|
|
|
|
ASSERT(data.workerHealth.find(worker2) == data.workerHealth.end());
|
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_CASE("/fdbserver/clustercontroller/getServersWithDegradedLink") {
|
|
|
|
// Create a testing ClusterControllerData. Most of the internal states do not matter in this test.
|
|
|
|
ClusterControllerData data(ClusterControllerFullInterface(),
|
|
|
|
LocalityData(),
|
2021-10-11 11:44:56 +08:00
|
|
|
ServerCoordinators(Reference<IClusterConnectionRecord>(
|
|
|
|
new ClusterConnectionMemoryRecord(ClusterConnectionString()))));
|
2021-06-29 12:45:02 +08:00
|
|
|
NetworkAddress worker(IPAddress(0x01010101), 1);
|
|
|
|
NetworkAddress badPeer1(IPAddress(0x02020202), 1);
|
|
|
|
NetworkAddress badPeer2(IPAddress(0x03030303), 1);
|
|
|
|
NetworkAddress badPeer3(IPAddress(0x04040404), 1);
|
|
|
|
NetworkAddress badPeer4(IPAddress(0x05050505), 1);
|
|
|
|
|
2021-10-19 03:04:22 +08:00
|
|
|
// Test that a reported degraded link should stay for sometime before being considered as a degraded link by
|
|
|
|
// cluster controller.
|
2021-06-29 12:45:02 +08:00
|
|
|
{
|
|
|
|
data.workerHealth[worker].degradedPeers[badPeer1] = { now(), now() };
|
|
|
|
ASSERT(data.getServersWithDegradedLink().empty());
|
|
|
|
data.workerHealth.clear();
|
|
|
|
}
|
|
|
|
|
2021-10-19 03:04:22 +08:00
|
|
|
// Test that when there is only one reported degraded link, getServersWithDegradedLink can return correct
|
|
|
|
// degraded server.
|
2021-06-29 12:45:02 +08:00
|
|
|
{
|
|
|
|
data.workerHealth[worker].degradedPeers[badPeer1] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
auto degradedServers = data.getServersWithDegradedLink();
|
|
|
|
ASSERT(degradedServers.size() == 1);
|
|
|
|
ASSERT(degradedServers.find(badPeer1) != degradedServers.end());
|
|
|
|
data.workerHealth.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that if both A complains B and B compalins A, only one of the server will be chosen as degraded server.
|
|
|
|
{
|
|
|
|
data.workerHealth[worker].degradedPeers[badPeer1] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
data.workerHealth[badPeer1].degradedPeers[worker] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
auto degradedServers = data.getServersWithDegradedLink();
|
|
|
|
ASSERT(degradedServers.size() == 1);
|
|
|
|
ASSERT(degradedServers.find(worker) != degradedServers.end() ||
|
|
|
|
degradedServers.find(badPeer1) != degradedServers.end());
|
|
|
|
data.workerHealth.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that if B complains A and C complains A, A is selected as degraded server instead of B or C.
|
|
|
|
{
|
|
|
|
ASSERT(SERVER_KNOBS->CC_DEGRADED_PEER_DEGREE_TO_EXCLUDE < 4);
|
|
|
|
data.workerHealth[worker].degradedPeers[badPeer1] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
data.workerHealth[badPeer1].degradedPeers[worker] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
data.workerHealth[worker].degradedPeers[badPeer2] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
data.workerHealth[badPeer2].degradedPeers[worker] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
auto degradedServers = data.getServersWithDegradedLink();
|
|
|
|
ASSERT(degradedServers.size() == 1);
|
|
|
|
ASSERT(degradedServers.find(worker) != degradedServers.end());
|
|
|
|
data.workerHealth.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that if the number of complainers exceeds the threshold, no degraded server is returned.
|
|
|
|
{
|
|
|
|
ASSERT(SERVER_KNOBS->CC_DEGRADED_PEER_DEGREE_TO_EXCLUDE < 4);
|
|
|
|
data.workerHealth[badPeer1].degradedPeers[worker] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
data.workerHealth[badPeer2].degradedPeers[worker] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
data.workerHealth[badPeer3].degradedPeers[worker] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
data.workerHealth[badPeer4].degradedPeers[worker] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
ASSERT(data.getServersWithDegradedLink().empty());
|
|
|
|
data.workerHealth.clear();
|
|
|
|
}
|
|
|
|
|
2021-10-19 03:04:22 +08:00
|
|
|
// Test that if the degradation is reported both ways between A and other 4 servers, no degraded server is
|
|
|
|
// returned.
|
2021-06-29 12:45:02 +08:00
|
|
|
{
|
|
|
|
ASSERT(SERVER_KNOBS->CC_DEGRADED_PEER_DEGREE_TO_EXCLUDE < 4);
|
|
|
|
data.workerHealth[worker].degradedPeers[badPeer1] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
data.workerHealth[badPeer1].degradedPeers[worker] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
data.workerHealth[worker].degradedPeers[badPeer2] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
data.workerHealth[badPeer2].degradedPeers[worker] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
data.workerHealth[worker].degradedPeers[badPeer3] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
data.workerHealth[badPeer3].degradedPeers[worker] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
data.workerHealth[worker].degradedPeers[badPeer4] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
data.workerHealth[badPeer4].degradedPeers[worker] = { now() - SERVER_KNOBS->CC_MIN_DEGRADATION_INTERVAL - 1,
|
|
|
|
now() };
|
|
|
|
ASSERT(data.getServersWithDegradedLink().empty());
|
|
|
|
data.workerHealth.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_CASE("/fdbserver/clustercontroller/recentRecoveryCountDueToHealth") {
|
|
|
|
// Create a testing ClusterControllerData. Most of the internal states do not matter in this test.
|
|
|
|
ClusterControllerData data(ClusterControllerFullInterface(),
|
|
|
|
LocalityData(),
|
2021-10-11 11:44:56 +08:00
|
|
|
ServerCoordinators(Reference<IClusterConnectionRecord>(
|
|
|
|
new ClusterConnectionMemoryRecord(ClusterConnectionString()))));
|
2021-06-29 12:45:02 +08:00
|
|
|
|
|
|
|
ASSERT_EQ(data.recentRecoveryCountDueToHealth(), 0);
|
|
|
|
|
|
|
|
data.recentHealthTriggeredRecoveryTime.push(now() - SERVER_KNOBS->CC_TRACKING_HEALTH_RECOVERY_INTERVAL - 1);
|
|
|
|
ASSERT_EQ(data.recentRecoveryCountDueToHealth(), 0);
|
|
|
|
|
|
|
|
data.recentHealthTriggeredRecoveryTime.push(now() - SERVER_KNOBS->CC_TRACKING_HEALTH_RECOVERY_INTERVAL + 1);
|
|
|
|
ASSERT_EQ(data.recentRecoveryCountDueToHealth(), 1);
|
|
|
|
|
|
|
|
data.recentHealthTriggeredRecoveryTime.push(now());
|
|
|
|
ASSERT_EQ(data.recentRecoveryCountDueToHealth(), 2);
|
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_CASE("/fdbserver/clustercontroller/shouldTriggerRecoveryDueToDegradedServers") {
|
|
|
|
// Create a testing ClusterControllerData. Most of the internal states do not matter in this test.
|
|
|
|
ClusterControllerData data(ClusterControllerFullInterface(),
|
|
|
|
LocalityData(),
|
2021-10-11 11:44:56 +08:00
|
|
|
ServerCoordinators(Reference<IClusterConnectionRecord>(
|
|
|
|
new ClusterConnectionMemoryRecord(ClusterConnectionString()))));
|
2021-06-29 12:45:02 +08:00
|
|
|
NetworkAddress master(IPAddress(0x01010101), 1);
|
|
|
|
NetworkAddress tlog(IPAddress(0x02020202), 1);
|
|
|
|
NetworkAddress satelliteTlog(IPAddress(0x03030303), 1);
|
|
|
|
NetworkAddress remoteTlog(IPAddress(0x04040404), 1);
|
|
|
|
NetworkAddress logRouter(IPAddress(0x05050505), 1);
|
|
|
|
NetworkAddress backup(IPAddress(0x06060606), 1);
|
|
|
|
NetworkAddress proxy(IPAddress(0x07070707), 1);
|
|
|
|
NetworkAddress resolver(IPAddress(0x08080808), 1);
|
2021-09-11 05:51:55 +08:00
|
|
|
UID testUID(1, 2);
|
2021-06-29 12:45:02 +08:00
|
|
|
|
|
|
|
// Create a ServerDBInfo using above addresses.
|
|
|
|
ServerDBInfo testDbInfo;
|
|
|
|
testDbInfo.master.changeCoordinators =
|
2021-09-11 05:51:55 +08:00
|
|
|
RequestStream<struct ChangeCoordinatorsRequest>(Endpoint({ master }, testUID));
|
2021-06-29 12:45:02 +08:00
|
|
|
|
|
|
|
TLogInterface localTLogInterf;
|
2021-09-11 05:51:55 +08:00
|
|
|
localTLogInterf.peekMessages = RequestStream<struct TLogPeekRequest>(Endpoint({ tlog }, testUID));
|
2021-06-29 12:45:02 +08:00
|
|
|
TLogInterface localLogRouterInterf;
|
2021-09-11 05:51:55 +08:00
|
|
|
localLogRouterInterf.peekMessages = RequestStream<struct TLogPeekRequest>(Endpoint({ logRouter }, testUID));
|
2021-06-29 12:45:02 +08:00
|
|
|
BackupInterface backupInterf;
|
2021-09-11 05:51:55 +08:00
|
|
|
backupInterf.waitFailure = RequestStream<ReplyPromise<Void>>(Endpoint({ backup }, testUID));
|
2021-06-29 12:45:02 +08:00
|
|
|
TLogSet localTLogSet;
|
|
|
|
localTLogSet.isLocal = true;
|
|
|
|
localTLogSet.tLogs.push_back(OptionalInterface(localTLogInterf));
|
|
|
|
localTLogSet.logRouters.push_back(OptionalInterface(localLogRouterInterf));
|
|
|
|
localTLogSet.backupWorkers.push_back(OptionalInterface(backupInterf));
|
|
|
|
testDbInfo.logSystemConfig.tLogs.push_back(localTLogSet);
|
|
|
|
|
|
|
|
TLogInterface sateTLogInterf;
|
2021-09-11 05:51:55 +08:00
|
|
|
sateTLogInterf.peekMessages = RequestStream<struct TLogPeekRequest>(Endpoint({ satelliteTlog }, testUID));
|
2021-06-29 12:45:02 +08:00
|
|
|
TLogSet sateTLogSet;
|
|
|
|
sateTLogSet.isLocal = true;
|
|
|
|
sateTLogSet.locality = tagLocalitySatellite;
|
|
|
|
sateTLogSet.tLogs.push_back(OptionalInterface(sateTLogInterf));
|
|
|
|
testDbInfo.logSystemConfig.tLogs.push_back(sateTLogSet);
|
|
|
|
|
|
|
|
TLogInterface remoteTLogInterf;
|
2021-09-11 05:51:55 +08:00
|
|
|
remoteTLogInterf.peekMessages = RequestStream<struct TLogPeekRequest>(Endpoint({ remoteTlog }, testUID));
|
2021-06-29 12:45:02 +08:00
|
|
|
TLogSet remoteTLogSet;
|
|
|
|
remoteTLogSet.isLocal = false;
|
|
|
|
remoteTLogSet.tLogs.push_back(OptionalInterface(remoteTLogInterf));
|
|
|
|
testDbInfo.logSystemConfig.tLogs.push_back(remoteTLogSet);
|
|
|
|
|
|
|
|
GrvProxyInterface proxyInterf;
|
2021-09-11 05:51:55 +08:00
|
|
|
proxyInterf.getConsistentReadVersion = RequestStream<struct GetReadVersionRequest>(Endpoint({ proxy }, testUID));
|
2021-06-29 12:45:02 +08:00
|
|
|
testDbInfo.client.grvProxies.push_back(proxyInterf);
|
|
|
|
|
|
|
|
ResolverInterface resolverInterf;
|
2021-09-11 05:51:55 +08:00
|
|
|
resolverInterf.resolve = RequestStream<struct ResolveTransactionBatchRequest>(Endpoint({ resolver }, testUID));
|
2021-06-29 12:45:02 +08:00
|
|
|
testDbInfo.resolvers.push_back(resolverInterf);
|
|
|
|
|
|
|
|
testDbInfo.recoveryState = RecoveryState::ACCEPTING_COMMITS;
|
|
|
|
|
|
|
|
// No recovery when no degraded servers.
|
|
|
|
data.db.serverInfo->set(testDbInfo);
|
|
|
|
ASSERT(!data.shouldTriggerRecoveryDueToDegradedServers());
|
|
|
|
|
|
|
|
// Trigger recovery when master is degraded.
|
|
|
|
data.degradedServers.insert(master);
|
|
|
|
ASSERT(data.shouldTriggerRecoveryDueToDegradedServers());
|
|
|
|
data.degradedServers.clear();
|
|
|
|
|
|
|
|
// Trigger recovery when primary TLog is degraded.
|
|
|
|
data.degradedServers.insert(tlog);
|
|
|
|
ASSERT(data.shouldTriggerRecoveryDueToDegradedServers());
|
|
|
|
data.degradedServers.clear();
|
|
|
|
|
|
|
|
// No recovery when satellite Tlog is degraded.
|
|
|
|
data.degradedServers.insert(satelliteTlog);
|
|
|
|
ASSERT(!data.shouldTriggerRecoveryDueToDegradedServers());
|
|
|
|
data.degradedServers.clear();
|
|
|
|
|
|
|
|
// No recovery when remote tlog is degraded.
|
|
|
|
data.degradedServers.insert(remoteTlog);
|
|
|
|
ASSERT(!data.shouldTriggerRecoveryDueToDegradedServers());
|
|
|
|
data.degradedServers.clear();
|
|
|
|
|
|
|
|
// No recovery when log router is degraded.
|
|
|
|
data.degradedServers.insert(logRouter);
|
|
|
|
ASSERT(!data.shouldTriggerRecoveryDueToDegradedServers());
|
|
|
|
data.degradedServers.clear();
|
|
|
|
|
|
|
|
// No recovery when backup worker is degraded.
|
|
|
|
data.degradedServers.insert(backup);
|
|
|
|
ASSERT(!data.shouldTriggerRecoveryDueToDegradedServers());
|
|
|
|
data.degradedServers.clear();
|
|
|
|
|
|
|
|
// Trigger recovery when proxy is degraded.
|
|
|
|
data.degradedServers.insert(proxy);
|
|
|
|
ASSERT(data.shouldTriggerRecoveryDueToDegradedServers());
|
|
|
|
data.degradedServers.clear();
|
|
|
|
|
|
|
|
// Trigger recovery when resolver is degraded.
|
|
|
|
data.degradedServers.insert(resolver);
|
|
|
|
ASSERT(data.shouldTriggerRecoveryDueToDegradedServers());
|
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-09-10 11:29:28 +08:00
|
|
|
TEST_CASE("/fdbserver/clustercontroller/shouldTriggerFailoverDueToDegradedServers") {
|
|
|
|
// Create a testing ClusterControllerData. Most of the internal states do not matter in this test.
|
|
|
|
ClusterControllerData data(ClusterControllerFullInterface(),
|
|
|
|
LocalityData(),
|
2021-10-11 11:44:56 +08:00
|
|
|
ServerCoordinators(Reference<IClusterConnectionRecord>(
|
|
|
|
new ClusterConnectionMemoryRecord(ClusterConnectionString()))));
|
2021-09-10 11:29:28 +08:00
|
|
|
NetworkAddress master(IPAddress(0x01010101), 1);
|
|
|
|
NetworkAddress tlog(IPAddress(0x02020202), 1);
|
|
|
|
NetworkAddress satelliteTlog(IPAddress(0x03030303), 1);
|
|
|
|
NetworkAddress remoteTlog(IPAddress(0x04040404), 1);
|
|
|
|
NetworkAddress logRouter(IPAddress(0x05050505), 1);
|
|
|
|
NetworkAddress backup(IPAddress(0x06060606), 1);
|
|
|
|
NetworkAddress proxy(IPAddress(0x07070707), 1);
|
|
|
|
NetworkAddress proxy2(IPAddress(0x08080808), 1);
|
|
|
|
NetworkAddress resolver(IPAddress(0x09090909), 1);
|
2021-09-11 05:51:55 +08:00
|
|
|
UID testUID(1, 2);
|
2021-09-10 11:29:28 +08:00
|
|
|
|
|
|
|
data.db.config.usableRegions = 2;
|
|
|
|
|
|
|
|
// Create a ServerDBInfo using above addresses.
|
|
|
|
ServerDBInfo testDbInfo;
|
|
|
|
testDbInfo.master.changeCoordinators =
|
2021-09-11 05:51:55 +08:00
|
|
|
RequestStream<struct ChangeCoordinatorsRequest>(Endpoint({ master }, testUID));
|
2021-09-10 11:29:28 +08:00
|
|
|
|
|
|
|
TLogInterface localTLogInterf;
|
2021-09-11 05:51:55 +08:00
|
|
|
localTLogInterf.peekMessages = RequestStream<struct TLogPeekRequest>(Endpoint({ tlog }, testUID));
|
2021-09-10 11:29:28 +08:00
|
|
|
TLogInterface localLogRouterInterf;
|
2021-09-11 05:51:55 +08:00
|
|
|
localLogRouterInterf.peekMessages = RequestStream<struct TLogPeekRequest>(Endpoint({ logRouter }, testUID));
|
2021-09-10 11:29:28 +08:00
|
|
|
BackupInterface backupInterf;
|
2021-09-11 05:51:55 +08:00
|
|
|
backupInterf.waitFailure = RequestStream<ReplyPromise<Void>>(Endpoint({ backup }, testUID));
|
2021-09-10 11:29:28 +08:00
|
|
|
TLogSet localTLogSet;
|
|
|
|
localTLogSet.isLocal = true;
|
|
|
|
localTLogSet.tLogs.push_back(OptionalInterface(localTLogInterf));
|
|
|
|
localTLogSet.logRouters.push_back(OptionalInterface(localLogRouterInterf));
|
|
|
|
localTLogSet.backupWorkers.push_back(OptionalInterface(backupInterf));
|
|
|
|
testDbInfo.logSystemConfig.tLogs.push_back(localTLogSet);
|
|
|
|
|
|
|
|
TLogInterface sateTLogInterf;
|
2021-09-11 05:51:55 +08:00
|
|
|
sateTLogInterf.peekMessages = RequestStream<struct TLogPeekRequest>(Endpoint({ satelliteTlog }, testUID));
|
2021-09-10 11:29:28 +08:00
|
|
|
TLogSet sateTLogSet;
|
|
|
|
sateTLogSet.isLocal = true;
|
|
|
|
sateTLogSet.locality = tagLocalitySatellite;
|
|
|
|
sateTLogSet.tLogs.push_back(OptionalInterface(sateTLogInterf));
|
|
|
|
testDbInfo.logSystemConfig.tLogs.push_back(sateTLogSet);
|
|
|
|
|
|
|
|
TLogInterface remoteTLogInterf;
|
2021-09-11 05:51:55 +08:00
|
|
|
remoteTLogInterf.peekMessages = RequestStream<struct TLogPeekRequest>(Endpoint({ remoteTlog }, testUID));
|
2021-09-10 11:29:28 +08:00
|
|
|
TLogSet remoteTLogSet;
|
|
|
|
remoteTLogSet.isLocal = false;
|
|
|
|
remoteTLogSet.tLogs.push_back(OptionalInterface(remoteTLogInterf));
|
|
|
|
testDbInfo.logSystemConfig.tLogs.push_back(remoteTLogSet);
|
|
|
|
|
|
|
|
GrvProxyInterface grvProxyInterf;
|
2021-09-11 05:51:55 +08:00
|
|
|
grvProxyInterf.getConsistentReadVersion = RequestStream<struct GetReadVersionRequest>(Endpoint({ proxy }, testUID));
|
2021-09-10 11:29:28 +08:00
|
|
|
testDbInfo.client.grvProxies.push_back(grvProxyInterf);
|
|
|
|
|
|
|
|
CommitProxyInterface commitProxyInterf;
|
2021-09-11 05:51:55 +08:00
|
|
|
commitProxyInterf.commit = RequestStream<struct CommitTransactionRequest>(Endpoint({ proxy2 }, testUID));
|
2021-09-10 11:29:28 +08:00
|
|
|
testDbInfo.client.commitProxies.push_back(commitProxyInterf);
|
|
|
|
|
|
|
|
ResolverInterface resolverInterf;
|
2021-09-11 05:51:55 +08:00
|
|
|
resolverInterf.resolve = RequestStream<struct ResolveTransactionBatchRequest>(Endpoint({ resolver }, testUID));
|
2021-09-10 11:29:28 +08:00
|
|
|
testDbInfo.resolvers.push_back(resolverInterf);
|
|
|
|
|
|
|
|
testDbInfo.recoveryState = RecoveryState::ACCEPTING_COMMITS;
|
|
|
|
|
|
|
|
// No failover when no degraded servers.
|
|
|
|
data.db.serverInfo->set(testDbInfo);
|
|
|
|
ASSERT(!data.shouldTriggerFailoverDueToDegradedServers());
|
|
|
|
|
|
|
|
// No failover when small number of degraded servers
|
|
|
|
data.degradedServers.insert(master);
|
|
|
|
ASSERT(!data.shouldTriggerFailoverDueToDegradedServers());
|
|
|
|
data.degradedServers.clear();
|
|
|
|
|
2021-09-11 05:51:55 +08:00
|
|
|
// Trigger failover when enough servers in the txn system are degraded.
|
2021-09-10 11:29:28 +08:00
|
|
|
data.degradedServers.insert(master);
|
|
|
|
data.degradedServers.insert(tlog);
|
|
|
|
data.degradedServers.insert(proxy);
|
|
|
|
data.degradedServers.insert(proxy2);
|
|
|
|
data.degradedServers.insert(resolver);
|
|
|
|
ASSERT(data.shouldTriggerFailoverDueToDegradedServers());
|
|
|
|
|
|
|
|
// No failover when usable region is 1.
|
|
|
|
data.db.config.usableRegions = 1;
|
|
|
|
ASSERT(!data.shouldTriggerFailoverDueToDegradedServers());
|
|
|
|
data.db.config.usableRegions = 2;
|
|
|
|
|
|
|
|
// No failover when remote is also degraded.
|
|
|
|
data.degradedServers.insert(remoteTlog);
|
|
|
|
ASSERT(!data.shouldTriggerFailoverDueToDegradedServers());
|
|
|
|
data.degradedServers.clear();
|
|
|
|
|
|
|
|
// No failover when some are not from transaction system
|
|
|
|
data.degradedServers.insert(NetworkAddress(IPAddress(0x13131313), 1));
|
|
|
|
data.degradedServers.insert(NetworkAddress(IPAddress(0x13131313), 2));
|
|
|
|
data.degradedServers.insert(NetworkAddress(IPAddress(0x13131313), 3));
|
|
|
|
data.degradedServers.insert(NetworkAddress(IPAddress(0x13131313), 4));
|
|
|
|
data.degradedServers.insert(NetworkAddress(IPAddress(0x13131313), 5));
|
|
|
|
ASSERT(!data.shouldTriggerFailoverDueToDegradedServers());
|
|
|
|
data.degradedServers.clear();
|
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-06-24 05:59:00 +08:00
|
|
|
} // namespace
|