foundationdb/fdbserver/QuietDatabase.actor.cpp

507 lines
23 KiB
C++
Raw Normal View History

2017-05-26 04:48:44 +08:00
/*
* QuietDatabase.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
2017-05-26 04:48:44 +08:00
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
2017-05-26 04:48:44 +08:00
* http://www.apache.org/licenses/LICENSE-2.0
*
2017-05-26 04:48:44 +08:00
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cinttypes>
2017-05-26 04:48:44 +08:00
#include "flow/ActorCollection.h"
#include "fdbrpc/simulator.h"
#include "flow/Trace.h"
#include "fdbclient/NativeAPI.actor.h"
2017-05-26 04:48:44 +08:00
#include "fdbclient/DatabaseContext.h"
#include "fdbserver/TesterInterface.actor.h"
#include "fdbserver/WorkerInterface.actor.h"
#include "fdbserver/ServerDBInfo.h"
#include "fdbserver/Status.h"
#include "fdbclient/ManagementAPI.actor.h"
2019-05-22 01:53:36 +08:00
#include <boost/lexical_cast.hpp>
#include "flow/actorcompiler.h" // This must be the last #include.
2017-05-26 04:48:44 +08:00
ACTOR Future<vector<WorkerDetails>> getWorkers( Reference<AsyncVar<ServerDBInfo>> dbInfo, int flags = 0 ) {
2017-05-26 04:48:44 +08:00
loop {
choose {
when( vector<WorkerDetails> w = wait( brokenPromiseToNever( dbInfo->get().clusterInterface.getWorkers.getReply( GetWorkersRequest( flags ) ) ) ) ) {
2017-05-26 04:48:44 +08:00
return w;
}
when( wait( dbInfo->onChange() ) ) {}
2017-05-26 04:48:44 +08:00
}
}
}
//Gets the WorkerInterface representing the Master server.
ACTOR Future<WorkerInterface> getMasterWorker( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
TraceEvent("GetMasterWorker").detail("Stage", "GettingWorkers");
2017-05-26 04:48:44 +08:00
loop {
state vector<WorkerDetails> workers = wait( getWorkers( dbInfo ) );
2017-05-26 04:48:44 +08:00
for( int i = 0; i < workers.size(); i++ ) {
if( workers[i].interf.address() == dbInfo->get().master.address() ) {
TraceEvent("GetMasterWorker").detail("Stage", "GotWorkers").detail("MasterId", dbInfo->get().master.id()).detail("WorkerId", workers[i].interf.id());
return workers[i].interf;
2017-05-26 04:48:44 +08:00
}
}
TraceEvent(SevWarn, "GetMasterWorkerError")
.detail("Error", "MasterWorkerNotFound")
2017-05-26 04:48:44 +08:00
.detail("Master", dbInfo->get().master.id()).detail("MasterAddress", dbInfo->get().master.address())
.detail("WorkerCount", workers.size());
wait(delay(1.0));
2017-05-26 04:48:44 +08:00
}
}
// Gets the WorkerInterface representing the data distributor.
ACTOR Future<WorkerInterface> getDataDistributorWorker( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
TraceEvent("GetDataDistributorWorker").detail("Stage", "GettingWorkers");
loop {
state vector<WorkerDetails> workers = wait( getWorkers( dbInfo ) );
if (!dbInfo->get().distributor.present()) continue;
for( int i = 0; i < workers.size(); i++ ) {
if( workers[i].interf.address() == dbInfo->get().distributor.get().address() ) {
TraceEvent("GetDataDistributorWorker").detail("Stage", "GotWorkers")
.detail("DataDistributorId", dbInfo->get().distributor.get().id())
.detail("WorkerId", workers[i].interf.id());
return workers[i].interf;
}
}
TraceEvent(SevWarn, "GetDataDistributorWorker")
.detail("Error", "DataDistributorWorkerNotFound")
.detail("DataDistributorId", dbInfo->get().distributor.get().id())
.detail("DataDistributorAddress", dbInfo->get().distributor.get().address())
.detail("WorkerCount", workers.size());
}
}
// Gets the number of bytes in flight from the data distributor.
ACTOR Future<int64_t> getDataInFlight( Database cx, WorkerInterface distributorWorker ) {
2017-05-26 04:48:44 +08:00
try {
TraceEvent("DataInFlight").detail("Stage", "ContactingDataDistributor");
TraceEventFields md = wait( timeoutError(distributorWorker.eventLogRequest.getReply(
EventLogRequest( LiteralStringRef("TotalDataInFlight") ) ), 1.0 ) );
2019-05-22 01:53:36 +08:00
int64_t dataInFlight = boost::lexical_cast<int64_t>(md.getValue("TotalBytes"));
2017-05-26 04:48:44 +08:00
return dataInFlight;
} catch( Error &e ) {
TraceEvent("QuietDatabaseFailure", distributorWorker.id()).error(e).detail("Reason", "Failed to extract DataInFlight");
2017-05-26 04:48:44 +08:00
throw;
}
}
// Gets the number of bytes in flight from the data distributor.
2017-05-26 04:48:44 +08:00
ACTOR Future<int64_t> getDataInFlight( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
WorkerInterface distributorInterf = wait( getDataDistributorWorker(cx, dbInfo) );
int64_t dataInFlight = wait(getDataInFlight(cx, distributorInterf));
2017-05-26 04:48:44 +08:00
return dataInFlight;
}
//Computes the queue size for storage servers and tlogs using the bytesInput and bytesDurable attributes
int64_t getQueueSize( const TraceEventFields& md ) {
double inputRate, durableRate;
double inputRoughness, durableRoughness;
int64_t inputBytes, durableBytes;
2017-05-26 04:48:44 +08:00
sscanf(md.getValue("BytesInput").c_str(), "%lf %lf %" SCNd64, &inputRate, &inputRoughness, &inputBytes);
sscanf(md.getValue("BytesDurable").c_str(), "%lf %lf %" SCNd64, &durableRate, &durableRoughness, &durableBytes);
2017-05-26 04:48:44 +08:00
return inputBytes - durableBytes;
2017-05-26 04:48:44 +08:00
}
//Computes the popped version lag for tlogs
int64_t getPoppedVersionLag( const TraceEventFields& md ) {
2019-05-22 01:53:36 +08:00
int64_t persistentDataDurableVersion = boost::lexical_cast<int64_t>(md.getValue("PersistentDataDurableVersion"));
int64_t queuePoppedVersion = boost::lexical_cast<int64_t>(md.getValue("QueuePoppedVersion"));
return persistentDataDurableVersion - queuePoppedVersion;
}
2017-05-26 04:48:44 +08:00
// This is not robust in the face of a TLog failure
ACTOR Future<std::pair<int64_t,int64_t>> getTLogQueueInfo( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
TraceEvent("MaxTLogQueueSize").detail("Stage", "ContactingLogs");
state std::vector<WorkerDetails> workers = wait(getWorkers(dbInfo));
std::map<NetworkAddress, WorkerInterface> workersMap;
for(auto worker : workers) {
workersMap[worker.interf.address()] = worker.interf;
}
2017-05-26 04:48:44 +08:00
state std::vector<Future<TraceEventFields>> messages;
2017-05-26 04:48:44 +08:00
state std::vector<TLogInterface> tlogs = dbInfo->get().logSystemConfig.allPresentLogs();
for(int i = 0; i < tlogs.size(); i++) {
auto itr = workersMap.find(tlogs[i].address());
if(itr == workersMap.end()) {
TraceEvent("QuietDatabaseFailure").detail("Reason", "Could not find worker for log server").detail("Tlog", tlogs[i].id());
throw attribute_not_found();
}
messages.push_back( timeoutError(itr->second.eventLogRequest.getReply(
EventLogRequest( StringRef(tlogs[i].id().toString() + "/TLogMetrics") ) ), 1.0 ) );
2017-05-26 04:48:44 +08:00
}
wait( waitForAll( messages ) );
2017-05-26 04:48:44 +08:00
TraceEvent("MaxTLogQueueSize").detail("Stage", "ComputingMax").detail("MessageCount", messages.size());
2017-05-26 04:48:44 +08:00
state int64_t maxQueueSize = 0;
state int64_t maxPoppedVersionLag = 0;
2017-05-26 04:48:44 +08:00
state int i = 0;
for(; i < messages.size(); i++) {
try {
maxQueueSize = std::max( maxQueueSize, getQueueSize( messages[i].get() ) );
maxPoppedVersionLag = std::max( maxPoppedVersionLag, getPoppedVersionLag( messages[i].get() ) );
2017-05-26 04:48:44 +08:00
} catch( Error &e ) {
TraceEvent("QuietDatabaseFailure").detail("Reason", "Failed to extract MaxTLogQueue").detail("Tlog", tlogs[i].id());
2017-05-26 04:48:44 +08:00
throw;
}
}
return std::make_pair( maxQueueSize, maxPoppedVersionLag );
2017-05-26 04:48:44 +08:00
}
ACTOR Future<vector<StorageServerInterface>> getStorageServers( Database cx, bool use_system_priority = false) {
state Transaction tr( cx );
if (use_system_priority)
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
2017-05-26 04:48:44 +08:00
loop {
try {
Standalone<RangeResultRef> serverList = wait( tr.getRange( serverListKeys, CLIENT_KNOBS->TOO_MANY ) );
ASSERT( !serverList.more && serverList.size() < CLIENT_KNOBS->TOO_MANY );
vector<StorageServerInterface> servers;
for( int i = 0; i < serverList.size(); i++ )
servers.push_back( decodeServerListValue( serverList[i].value ) );
return servers;
}
catch(Error &e) {
wait( tr.onError(e) );
2017-05-26 04:48:44 +08:00
}
}
}
//Gets the maximum size of all the storage server queues
ACTOR Future<int64_t> getMaxStorageServerQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
TraceEvent("MaxStorageServerQueueSize").detail("Stage", "ContactingStorageServers");
Future<std::vector<StorageServerInterface>> serversFuture = getStorageServers(cx);
state Future<std::vector<WorkerDetails>> workersFuture = getWorkers(dbInfo);
state std::vector<StorageServerInterface> servers = wait(serversFuture);
state std::vector<WorkerDetails> workers = wait(workersFuture);
std::map<NetworkAddress, WorkerInterface> workersMap;
for(auto worker : workers) {
workersMap[worker.interf.address()] = worker.interf;
}
2017-05-26 04:48:44 +08:00
state std::vector<Future<TraceEventFields>> messages;
2017-05-26 04:48:44 +08:00
for(int i = 0; i < servers.size(); i++) {
auto itr = workersMap.find(servers[i].address());
if(itr == workersMap.end()) {
TraceEvent("QuietDatabaseFailure").detail("Reason", "Could not find worker for storage server").detail("SS", servers[i].id());
throw attribute_not_found();
}
messages.push_back( timeoutError(itr->second.eventLogRequest.getReply(
EventLogRequest( StringRef(servers[i].id().toString() + "/StorageMetrics") ) ), 1.0 ) );
2017-05-26 04:48:44 +08:00
}
wait( waitForAll(messages) );
2017-05-26 04:48:44 +08:00
TraceEvent("MaxStorageServerQueueSize").detail("Stage", "ComputingMax").detail("MessageCount", messages.size());
2017-05-26 04:48:44 +08:00
state int64_t maxQueueSize = 0;
state int i = 0;
for(; i < messages.size(); i++) {
try {
maxQueueSize = std::max( maxQueueSize, getQueueSize( messages[i].get() ) );
} catch( Error &e ) {
TraceEvent("QuietDatabaseFailure").detail("Reason", "Failed to extract MaxStorageServerQueue").detail("SS", servers[i].id());
2017-05-26 04:48:44 +08:00
throw;
}
}
return maxQueueSize;
}
//Gets the size of the data distribution queue. If reportInFlight is true, then data in flight is considered part of the queue
ACTOR Future<int64_t> getDataDistributionQueueSize( Database cx, WorkerInterface distributorWorker, bool reportInFlight) {
2017-05-26 04:48:44 +08:00
try {
TraceEvent("DataDistributionQueueSize").detail("Stage", "ContactingDataDistributor");
2017-05-26 04:48:44 +08:00
TraceEventFields movingDataMessage = wait( timeoutError(distributorWorker.eventLogRequest.getReply(
EventLogRequest( LiteralStringRef("MovingData") ) ), 1.0 ) );
2017-05-26 04:48:44 +08:00
TraceEvent("DataDistributionQueueSize").detail("Stage", "GotString");
2017-05-26 04:48:44 +08:00
2019-05-22 01:53:36 +08:00
int64_t inQueue = boost::lexical_cast<int64_t>(movingDataMessage.getValue("InQueue"));
2017-05-26 04:48:44 +08:00
if(reportInFlight) {
2019-05-22 01:53:36 +08:00
int64_t inFlight = boost::lexical_cast<int64_t>(movingDataMessage.getValue("InFlight"));
2017-05-26 04:48:44 +08:00
inQueue += inFlight;
}
return inQueue;
} catch( Error &e ) {
TraceEvent("QuietDatabaseFailure", distributorWorker.id()).detail("Reason", "Failed to extract DataDistributionQueueSize");
2017-05-26 04:48:44 +08:00
throw;
}
}
//Gets the size of the data distribution queue. If reportInFlight is true, then data in flight is considered part of the queue
//Convenience method that first finds the master worker from a zookeeper interface
ACTOR Future<int64_t> getDataDistributionQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo, bool reportInFlight ) {
WorkerInterface distributorInterf = wait( getDataDistributorWorker(cx, dbInfo) );
int64_t inQueue = wait( getDataDistributionQueueSize( cx, distributorInterf, reportInFlight) );
2017-05-26 04:48:44 +08:00
return inQueue;
}
2019-02-13 05:41:18 +08:00
// Gets if the number of process and machine teams does not exceed the maximum allowed number of teams
2019-02-16 09:03:40 +08:00
ACTOR Future<bool> getTeamCollectionValid(Database cx, WorkerInterface dataDistributorWorker) {
state int attempts = 0;
loop {
try {
TraceEvent("GetTeamCollectionValid").detail("Stage", "ContactingMaster");
2019-02-13 05:41:18 +08:00
TraceEventFields teamCollectionInfoMessage = wait(timeoutError(
2019-02-16 09:03:40 +08:00
dataDistributorWorker.eventLogRequest.getReply(EventLogRequest(LiteralStringRef("TeamCollectionInfo"))), 1.0));
TraceEvent("GetTeamCollectionValid").detail("Stage", "GotString");
2019-07-12 13:05:20 +08:00
state int64_t currentTeamNumber =
boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("CurrentTeamNumber"));
state int64_t desiredTeamNumber =
boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("DesiredTeamNumber"));
state int64_t maxTeamNumber =
boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("MaxTeamNumber"));
state int64_t currentMachineTeamNumber =
boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("CurrentMachineTeamNumber"));
state int64_t healthyMachineTeamCount =
boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("CurrentHealthyMachineTeamNumber"));
state int64_t desiredMachineTeamNumber =
boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("DesiredMachineTeams"));
state int64_t maxMachineTeamNumber =
boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("MaxMachineTeams"));
// TODO: Get finer granularity check
state int64_t minServerTeamOnServer =
2019-06-27 10:38:12 +08:00
boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("MinTeamNumberOnServer"));
state int64_t maxServerTeamOnServer =
2019-06-27 10:38:12 +08:00
boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("MaxTeamNumberOnServer"));
state int64_t minMachineTeamOnMachine =
2019-06-27 10:38:12 +08:00
boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("MinMachineTeamNumberOnMachine"));
state int64_t maxMachineTeamOnMachine =
2019-06-27 10:38:12 +08:00
boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("MaxMachineTeamNumberOnMachine"));
2019-07-12 13:05:20 +08:00
// The if condition should be consistent with the condition in serverTeamRemover() and
// machineTeamRemover() that decides if redundant teams exist.
2019-07-12 13:05:20 +08:00
// Team number is always valid when we disable teamRemover, which avoids false positive in simulation test.
// The minimun team number per server (and per machine) should be no less than 0 so that newly added machine
// can host data on it.
if ((!SERVER_KNOBS->TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER &&
healthyMachineTeamCount > desiredMachineTeamNumber) ||
(!SERVER_KNOBS->TR_FLAG_DISABLE_SERVER_TEAM_REMOVER && currentTeamNumber > desiredTeamNumber) ||
((minMachineTeamOnMachine <= 0 || minServerTeamOnServer <= 0) &&
SERVER_KNOBS->DESIRED_TEAMS_PER_SERVER == 3)) {
if (attempts++ < 10) {
wait(delay(60));
continue; // We may not receive the most recent TeamCollectionInfo
}
2019-06-29 07:01:05 +08:00
// When DESIRED_TEAMS_PER_SERVER == 1, we see minMachineTeamOnMachine can be 0 in one out of 30k test
// cases. Only check DESIRED_TEAMS_PER_SERVER == 3 for now since it is mostly used configuration.
2019-07-12 13:05:20 +08:00
// TODO: Remove the constraint SERVER_KNOBS->DESIRED_TEAMS_PER_SERVER == 3 to ensure that
// the minimun team number per server (and per machine) is always > 0 for any number of replicas
TraceEvent("GetTeamCollectionValid")
.detail("CurrentTeamNumber", currentTeamNumber)
2019-02-13 05:41:18 +08:00
.detail("DesiredTeamNumber", desiredTeamNumber)
.detail("MaxTeamNumber", maxTeamNumber)
.detail("CurrentHealthyMachineTeamNumber", healthyMachineTeamCount)
.detail("DesiredMachineTeams", desiredMachineTeamNumber)
.detail("CurrentMachineTeamNumber", currentMachineTeamNumber)
.detail("MaxMachineTeams", maxMachineTeamNumber)
2019-06-27 10:38:12 +08:00
.detail("MinTeamNumberOnServer", minServerTeamOnServer)
.detail("MaxTeamNumberOnServer", maxServerTeamOnServer)
.detail("MinMachineTeamNumberOnMachine", minMachineTeamOnMachine)
.detail("MaxMachineTeamNumberOnMachine", maxMachineTeamOnMachine)
.detail("DesiredTeamsPerServer", SERVER_KNOBS->DESIRED_TEAMS_PER_SERVER)
.detail("MaxTeamsPerServer", SERVER_KNOBS->MAX_TEAMS_PER_SERVER);
return false;
} else {
return true;
}
2019-02-13 05:41:18 +08:00
} catch (Error& e) {
2019-02-16 09:03:40 +08:00
TraceEvent("QuietDatabaseFailure", dataDistributorWorker.id())
2019-02-13 05:41:18 +08:00
.detail("Reason", "Failed to extract GetTeamCollectionValid information");
attempts++;
2019-02-13 05:41:18 +08:00
if (attempts > 10) {
2019-02-16 09:03:40 +08:00
TraceEvent("QuietDatabaseNoTeamCollectionInfo", dataDistributorWorker.id())
2019-02-13 05:41:18 +08:00
.detail("Reason", "Had never called build team to build any team");
return true;
}
2019-02-13 05:41:18 +08:00
// throw;
wait(delay(10.0));
}
};
}
2019-02-13 05:41:18 +08:00
// Gets if the number of process and machine teams does not exceed the maximum allowed number of teams
// Convenience method that first finds the master worker from a zookeeper interface
ACTOR Future<bool> getTeamCollectionValid(Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo) {
2019-02-16 09:03:40 +08:00
WorkerInterface dataDistributorWorker = wait(getDataDistributorWorker(cx, dbInfo));
bool valid = wait(getTeamCollectionValid(cx, dataDistributorWorker));
return valid;
}
// Checks that data distribution is active
ACTOR Future<bool> getDataDistributionActive( Database cx, WorkerInterface distributorWorker ) {
2017-05-26 04:48:44 +08:00
try {
TraceEvent("DataDistributionActive").detail("Stage", "ContactingDataDistributor");
2017-05-26 04:48:44 +08:00
TraceEventFields activeMessage = wait( timeoutError(distributorWorker.eventLogRequest.getReply(
EventLogRequest( LiteralStringRef("DDTrackerStarting") ) ), 1.0 ) );
2017-05-26 04:48:44 +08:00
return activeMessage.getValue("State") == "Active";
2017-05-26 04:48:44 +08:00
} catch( Error &e ) {
TraceEvent("QuietDatabaseFailure", distributorWorker.id()).detail("Reason", "Failed to extract DataDistributionActive");
2017-05-26 04:48:44 +08:00
throw;
}
}
// Checks to see if any storage servers are being recruited
ACTOR Future<bool> getStorageServersRecruiting( Database cx, WorkerInterface distributorWorker, UID distributorUID ) {
2017-05-26 04:48:44 +08:00
try {
TraceEvent("StorageServersRecruiting").detail("Stage", "ContactingDataDistributor");
TraceEventFields recruitingMessage = wait( timeoutError(distributorWorker.eventLogRequest.getReply(
EventLogRequest( StringRef( "StorageServerRecruitment_" + distributorUID.toString()) ) ), 1.0 ) );
2017-05-26 04:48:44 +08:00
TraceEvent("StorageServersRecruiting").detail("Message", recruitingMessage.toString());
return recruitingMessage.getValue("State") == "Recruiting";
2017-05-26 04:48:44 +08:00
} catch( Error &e ) {
TraceEvent("QuietDatabaseFailure", distributorWorker.id())
.detail("Reason", "Failed to extract StorageServersRecruiting")
.detail("DataDistributorID", distributorUID);
2017-05-26 04:48:44 +08:00
throw;
}
}
ACTOR Future<Void> repairDeadDatacenter(Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo, std::string context) {
if(g_network->isSimulated() && g_simulator.usableRegions > 1) {
bool primaryDead = g_simulator.datacenterDead(g_simulator.primaryDcId);
bool remoteDead = g_simulator.datacenterDead(g_simulator.remoteDcId);
ASSERT(!primaryDead || !remoteDead);
if(primaryDead || remoteDead) {
TraceEvent(SevWarnAlways, "DisablingFearlessConfiguration").detail("Location", context).detail("Stage", "Repopulate").detail("RemoteDead", remoteDead).detail("PrimaryDead", primaryDead);
g_simulator.usableRegions = 1;
wait(success( changeConfig( cx, (primaryDead ? g_simulator.disablePrimary : g_simulator.disableRemote) + " repopulate_anti_quorum=1", true ) ));
while( dbInfo->get().recoveryState < RecoveryState::STORAGE_RECOVERED ) {
wait( dbInfo->onChange() );
}
TraceEvent(SevWarnAlways, "DisablingFearlessConfiguration").detail("Location", context).detail("Stage", "Usable_Regions");
wait(success( changeConfig( cx, "usable_regions=1", true ) ));
}
}
return Void();
}
ACTOR Future<Void> reconfigureAfter(Database cx, double time, Reference<AsyncVar<ServerDBInfo>> dbInfo, std::string context) {
wait( delay(time) );
wait( repairDeadDatacenter(cx, dbInfo, context) );
return Void();
}
2017-05-26 04:48:44 +08:00
ACTOR Future<Void> waitForQuietDatabase( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo, std::string phase, int64_t dataInFlightGate = 2e6,
int64_t maxTLogQueueGate = 5e6, int64_t maxStorageServerQueueGate = 5e6, int64_t maxDataDistributionQueueSize = 0, int64_t maxPoppedVersionLag = 30e6 ) {
state Future<Void> reconfig = reconfigureAfter(cx, 100 + (deterministicRandom()->random01()*100), dbInfo, "QuietDatabase");
2017-05-26 04:48:44 +08:00
TraceEvent(("QuietDatabase" + phase + "Begin").c_str());
//In a simulated environment, wait 5 seconds so that workers can move to their optimal locations
if(g_network->isSimulated())
wait(delay(5.0));
2017-05-26 04:48:44 +08:00
//Require 3 consecutive successful quiet database checks spaced 2 second apart
2017-05-26 04:48:44 +08:00
state int numSuccesses = 0;
loop {
try {
TraceEvent("QuietDatabaseWaitingOnDataDistributor");
WorkerInterface distributorWorker = wait( getDataDistributorWorker( cx, dbInfo ) );
UID distributorUID = dbInfo->get().distributor.get().id();
TraceEvent("QuietDatabaseGotDataDistributor", distributorUID).detail("Locality", distributorWorker.locality.toString());
state Future<int64_t> dataInFlight = getDataInFlight( cx, distributorWorker);
state Future<std::pair<int64_t,int64_t>> tLogQueueInfo = getTLogQueueInfo( cx, dbInfo );
state Future<int64_t> dataDistributionQueueSize = getDataDistributionQueueSize( cx, distributorWorker, dataInFlightGate == 0);
2019-02-16 09:03:40 +08:00
state Future<bool> teamCollectionValid = getTeamCollectionValid(cx, distributorWorker);
state Future<int64_t> storageQueueSize = getMaxStorageServerQueueSize( cx, dbInfo );
state Future<bool> dataDistributionActive = getDataDistributionActive( cx, distributorWorker );
state Future<bool> storageServersRecruiting = getStorageServersRecruiting ( cx, distributorWorker, distributorUID );
2017-05-26 04:48:44 +08:00
wait(success(dataInFlight) && success(tLogQueueInfo) && success(dataDistributionQueueSize) &&
2019-02-13 05:41:18 +08:00
success(teamCollectionValid) && success(storageQueueSize) && success(dataDistributionActive) &&
success(storageServersRecruiting));
2019-05-22 02:31:34 +08:00
2017-05-26 04:48:44 +08:00
TraceEvent(("QuietDatabase" + phase).c_str())
2019-05-22 02:31:34 +08:00
.detail("DataInFlight", dataInFlight.get())
.detail("MaxTLogQueueSize", tLogQueueInfo.get().first)
.detail("MaxTLogPoppedVersionLag", tLogQueueInfo.get().second)
2019-05-22 02:31:34 +08:00
.detail("DataDistributionQueueSize", dataDistributionQueueSize.get())
.detail("TeamCollectionValid", teamCollectionValid.get())
.detail("MaxStorageQueueSize", storageQueueSize.get())
.detail("DataDistributionActive", dataDistributionActive.get())
.detail("StorageServersRecruiting", storageServersRecruiting.get());
2019-02-13 05:41:18 +08:00
if (dataInFlight.get() > dataInFlightGate || tLogQueueInfo.get().first > maxTLogQueueGate || tLogQueueInfo.get().second > maxPoppedVersionLag ||
2019-02-13 05:41:18 +08:00
dataDistributionQueueSize.get() > maxDataDistributionQueueSize ||
storageQueueSize.get() > maxStorageServerQueueGate || dataDistributionActive.get() == false ||
storageServersRecruiting.get() == true || teamCollectionValid.get() == false) {
2017-05-26 04:48:44 +08:00
wait( delay( 1.0 ) );
2017-05-26 04:48:44 +08:00
numSuccesses = 0;
} else {
if(++numSuccesses == 3) {
2017-05-26 04:48:44 +08:00
TraceEvent(("QuietDatabase" + phase + "Done").c_str());
break;
}
else
wait(delay( 2.0 ) );
2017-05-26 04:48:44 +08:00
}
} catch (Error& e) {
if( e.code() != error_code_actor_cancelled && e.code() != error_code_attribute_not_found && e.code() != error_code_timed_out)
TraceEvent(("QuietDatabase" + phase + "Error").c_str()).error(e);
//Client invalid operation occurs if we don't get back a message from one of the servers, often corrected by retrying
if(e.code() != error_code_attribute_not_found && e.code() != error_code_timed_out)
throw;
TraceEvent(("QuietDatabase" + phase + "Retry").c_str()).error(e);
wait(delay(1.0));
2017-05-26 04:48:44 +08:00
numSuccesses = 0;
}
}
return Void();
}
Future<Void> quietDatabase( Database const& cx, Reference<AsyncVar<ServerDBInfo>> const& dbInfo, std::string phase, int64_t dataInFlightGate,
int64_t maxTLogQueueGate, int64_t maxStorageServerQueueGate, int64_t maxDataDistributionQueueSize, int64_t maxPoppedVersionLag ) {
return waitForQuietDatabase(cx, dbInfo, phase, dataInFlightGate, maxTLogQueueGate, maxStorageServerQueueGate, maxDataDistributionQueueSize, maxPoppedVersionLag);
2017-05-26 04:48:44 +08:00
}