removed verbose trace messages
This commit is contained in:
parent
2434d06726
commit
8b73a1c998
|
@ -42,6 +42,8 @@ ClientKnobs::ClientKnobs(bool randomize) {
|
|||
init( FAILURE_EMERGENCY_DELAY, 30.0 );
|
||||
init( FAILURE_MAX_GENERATIONS, 10 );
|
||||
|
||||
init( COORDINATOR_RECONNECTION_DELAY, 1.0 );
|
||||
|
||||
// wrong_shard_server sometimes comes from the only nonfailed server, so we need to avoid a fast spin
|
||||
|
||||
init( WRONG_SHARD_SERVER_DELAY, .01 ); if( randomize && BUGGIFY ) WRONG_SHARD_SERVER_DELAY = deterministicRandom()->random01(); // FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY; // SOMEDAY: This delay can limit performance of retrieving data when the cache is mostly wrong (e.g. dumping the database after a test)
|
||||
|
|
|
@ -41,6 +41,8 @@ public:
|
|||
double FAILURE_EMERGENCY_DELAY;
|
||||
double FAILURE_MAX_GENERATIONS;
|
||||
|
||||
double COORDINATOR_RECONNECTION_DELAY;
|
||||
|
||||
// wrong_shard_server sometimes comes from the only nonfailed server, so we need to avoid a fast spin
|
||||
double WRONG_SHARD_SERVER_DELAY; // SOMEDAY: This delay can limit performance of retrieving data when the cache is mostly wrong (e.g. dumping the database after a test)
|
||||
double FUTURE_VERSION_RETRY_DELAY;
|
||||
|
|
|
@ -538,10 +538,9 @@ ACTOR Future<Void> getClientInfoFromLeader( Reference<AsyncVar<Optional<ClusterC
|
|||
}
|
||||
|
||||
loop {
|
||||
TraceEvent("SendMessageToCC", knownLeader->get().get().clientInterface.id()).detail("ClientID", clientData->clientInfo->get().id);
|
||||
choose {
|
||||
when( ClientDBInfo ni = wait( brokenPromiseToNever( knownLeader->get().get().clientInterface.openDatabase.getReply( clientData->getRequest() ) ) ) ) {
|
||||
TraceEvent("GotClientInfo", knownLeader->get().get().clientInterface.id()).detail("Proxy0", ni.proxies.size() ? ni.proxies[0].id() : UID()).detail("ClientID", ni.id);
|
||||
TraceEvent("MonitorLeaderForProxiesGotClientInfo", knownLeader->get().get().clientInterface.id()).detail("Proxy0", ni.proxies.size() ? ni.proxies[0].id() : UID()).detail("ClientID", ni.id);
|
||||
clientData->clientInfo->set(ni);
|
||||
}
|
||||
when( wait( knownLeader->onChange() ) ) {}
|
||||
|
@ -556,7 +555,7 @@ ACTOR Future<Void> monitorLeaderForProxies( Value serializedInfo, ClientData* cl
|
|||
state Future<Void> allActors;
|
||||
state Reference<AsyncVar<Optional<ClusterControllerClientInterface>>> knownLeader(new AsyncVar<Optional<ClusterControllerClientInterface>>{});
|
||||
state ClusterConnectionString cs(serializedInfo.toString());
|
||||
try {
|
||||
|
||||
for(auto s = cs.coordinators().begin(); s != cs.coordinators().end(); ++s) {
|
||||
clientLeaderServers.push_back( ClientLeaderRegInterface( *s ) );
|
||||
}
|
||||
|
@ -566,7 +565,6 @@ ACTOR Future<Void> monitorLeaderForProxies( Value serializedInfo, ClientData* cl
|
|||
std::vector<Future<Void>> actors;
|
||||
// Ask all coordinators if the worker is considered as a leader (leader nominee) by the coordinator.
|
||||
for(int i=0; i<clientLeaderServers.size(); i++) {
|
||||
TraceEvent("MonitorLeaderForProxiesMon").detail("Addr", clientLeaderServers[i].openDatabase.getEndpoint().getPrimaryAddress()).detail("Key", cs.clusterKey().printable());
|
||||
actors.push_back( monitorNominee( cs.clusterKey(), clientLeaderServers[i], &nomineeChange, &nominees[i] ) );
|
||||
}
|
||||
actors.push_back( getClientInfoFromLeader( knownLeader, clientData ) );
|
||||
|
@ -590,21 +588,15 @@ ACTOR Future<Void> monitorLeaderForProxies( Value serializedInfo, ClientData* cl
|
|||
ObjectReader reader(leader.get().first.serializedInfo.begin());
|
||||
ClusterControllerClientInterface res;
|
||||
reader.deserialize(res);
|
||||
TraceEvent("MonitorLeaderForProxiesParse1", res.clientInterface.id()).detail("Key", cs.clusterKey().printable());
|
||||
knownLeader->set(res);
|
||||
} else {
|
||||
ClusterControllerClientInterface res = BinaryReader::fromStringRef<ClusterControllerClientInterface>( leader.get().first.serializedInfo, IncludeVersion() );
|
||||
TraceEvent("MonitorLeaderForProxiesParse2", res.clientInterface.id()).detail("Key", cs.clusterKey().printable());
|
||||
knownLeader->set(res);
|
||||
}
|
||||
}
|
||||
}
|
||||
wait( nomineeChange.onTrigger() || allActors );
|
||||
}
|
||||
} catch( Error &e ) {
|
||||
TraceEvent("MonitorLeaderForProxiesError").error(e,true).detail("Key", cs.clusterKey().printable()).backtrace();
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
// Leader is the process that will be elected by coordinators as the cluster controller
|
||||
|
@ -614,7 +606,6 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration( Reference<ClusterCo
|
|||
state int idx = 0;
|
||||
state int successIdx = 0;
|
||||
deterministicRandom()->randomShuffle(addrs);
|
||||
try {
|
||||
loop {
|
||||
state ClientLeaderRegInterface clientLeaderServer( addrs[idx] );
|
||||
state OpenDatabaseCoordRequest req;
|
||||
|
@ -622,9 +613,7 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration( Reference<ClusterCo
|
|||
req.serializedInfo = info.intermediateConnFile->getConnectionString().toString();
|
||||
req.knownClientInfoID = clientInfo->get().id;
|
||||
|
||||
TraceEvent("MPOG_Start").detail("Addr", addrs[idx]).detail("Key", cs.clusterKey().printable());
|
||||
state ErrorOr<ClientDBInfo> rep = wait( clientLeaderServer.openDatabase.tryGetReply( req, TaskPriority::CoordinationReply ) );
|
||||
TraceEvent("MPOG_Reply").detail("Addr", addrs[idx]).detail("Present", rep.present()).detail("Key", cs.clusterKey().printable()).detail("Proxy0", rep.present() && rep.get().proxies.size() ? rep.get().proxies[0].id() : UID());
|
||||
if (rep.present()) {
|
||||
if( rep.get().forward.present() ) {
|
||||
TraceEvent("MonitorProxiesForwarding").detail("NewConnStr", rep.get().forward.get().toString()).detail("OldConnStr", info.intermediateConnFile->getConnectionString().toString());
|
||||
|
@ -647,14 +636,10 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration( Reference<ClusterCo
|
|||
clientInfo->set( rep.get() );
|
||||
successIdx = idx;
|
||||
} else if(idx == successIdx) {
|
||||
wait(delay(1.0));
|
||||
wait(delay(CLIENT_KNOBS->COORDINATOR_RECONNECTION_DELAY));
|
||||
}
|
||||
idx = (idx+1)%addrs.size();
|
||||
}
|
||||
} catch (Error &e) {
|
||||
TraceEvent("MPOG_Error").error(e,true).detail("Key", cs.clusterKey().printable());
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> monitorProxies( Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<ClientDBInfo>> clientInfo ) {
|
||||
|
|
|
@ -209,9 +209,7 @@ TEST_CASE("/fdbserver/Coordination/localGenerationReg/simple") {
|
|||
}
|
||||
|
||||
ACTOR Future<Void> openDatabase(ClientData* db, int* clientCount, Reference<AsyncVar<bool>> hasConnectedClients, OpenDatabaseCoordRequest req) {
|
||||
try {
|
||||
if(db->clientInfo->get().id != req.knownClientInfoID && !db->clientInfo->get().forward.present()) {
|
||||
TraceEvent("OpenDatabaseCoordReply").detail("Forward", db->clientInfo->get().forward.present()).detail("Key", req.key.printable());
|
||||
req.reply.send( db->clientInfo->get() );
|
||||
return Void();
|
||||
}
|
||||
|
@ -220,7 +218,6 @@ ACTOR Future<Void> openDatabase(ClientData* db, int* clientCount, Reference<Asyn
|
|||
|
||||
db->clientStatusInfoMap[req.reply.getEndpoint().getPrimaryAddress()] = ClientStatusInfo(req.traceLogGroup.toString(), req.supportedVersions, req.issues);
|
||||
|
||||
TraceEvent("OpenDatabaseCoordWait").detail("Key", req.key.printable());
|
||||
while (db->clientInfo->get().id == req.knownClientInfoID && !db->clientInfo->get().forward.present()) {
|
||||
choose {
|
||||
when (wait( db->clientInfo->onChange() )) {}
|
||||
|
@ -235,12 +232,8 @@ ACTOR Future<Void> openDatabase(ClientData* db, int* clientCount, Reference<Asyn
|
|||
if(--(*clientCount) == 0) {
|
||||
hasConnectedClients->set(false);
|
||||
}
|
||||
TraceEvent("OpenDatabaseCoordReply").detail("Forward", db->clientInfo->get().forward.present()).detail("Key", req.key.printable()).detail("Proxy0", db->clientInfo->get().proxies.size() ? db->clientInfo->get().proxies[0].id() : UID());
|
||||
|
||||
return Void();
|
||||
} catch( Error &e ) {
|
||||
TraceEvent("OpenDatabaseCoordError").error(e,true).detail("Key", req.key.printable());
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
// This actor implements a *single* leader-election register (essentially, it ignores
|
||||
|
@ -261,7 +254,6 @@ ACTOR Future<Void> leaderRegister(LeaderElectionRegInterface interf, Key key) {
|
|||
state ActorCollection actors(false);
|
||||
state Future<Void> leaderMon;
|
||||
|
||||
try {
|
||||
loop choose {
|
||||
when ( OpenDatabaseCoordRequest req = waitNext( interf.openDatabase.getFuture() ) ) {
|
||||
if(!leaderMon.isValid()) {
|
||||
|
@ -386,10 +378,6 @@ ACTOR Future<Void> leaderRegister(LeaderElectionRegInterface interf, Key key) {
|
|||
}
|
||||
when( wait(actors.getResult()) ) {}
|
||||
}
|
||||
} catch (Error &e ) {
|
||||
TraceEvent("LeaderRegisterError").error(e,true);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
// Generation register values are stored without prefixing in the coordinated state, but always begin with an alphanumeric character
|
||||
|
@ -485,7 +473,6 @@ ACTOR Future<Void> leaderServer(LeaderElectionRegInterface interf, OnDemandStore
|
|||
loop choose {
|
||||
when ( OpenDatabaseCoordRequest req = waitNext( interf.openDatabase.getFuture() ) ) {
|
||||
Optional<LeaderInfo> forward = regs.getForward(req.key);
|
||||
TraceEvent("OpenDatabaseCoordReq").detail("Forward", forward.present()).detail("Key", req.key.printable());
|
||||
if( forward.present() ) {
|
||||
ClientDBInfo info;
|
||||
info.forward = forward.get().serializedInfo;
|
||||
|
|
Loading…
Reference in New Issue