fix: do not log that data distribution is initialized until readyToStart is ready

This commit is contained in:
Evan Tschannen 2017-06-30 16:21:59 -07:00
parent 158b0186e4
commit aa1c903b52
1 changed files with 6 additions and 4 deletions

View File

@ -485,11 +485,12 @@ struct DDTeamCollection {
int desiredDataCenters, int desiredDataCenters,
IRepPolicyRef replicationPolicy, IRepPolicyRef replicationPolicy,
KeyValueStoreType storeType, KeyValueStoreType storeType,
PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > const& serverChanges ) PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > const& serverChanges,
Future<Void> readyToStart )
:cx(cx), masterId(masterId), lock(lock), output(output), shardsAffectedByTeamFailure(shardsAffectedByTeamFailure), doBuildTeams( true ), teamBuilder( Void() ), :cx(cx), masterId(masterId), lock(lock), output(output), shardsAffectedByTeamFailure(shardsAffectedByTeamFailure), doBuildTeams( true ), teamBuilder( Void() ),
teamSize( teamSize ), minDataCenters( minDataCenters ), desiredDataCenters( desiredDataCenters ), replicationPolicy(replicationPolicy), storeType( storeType ), serverChanges(serverChanges), teamSize( teamSize ), minDataCenters( minDataCenters ), desiredDataCenters( desiredDataCenters ), replicationPolicy(replicationPolicy), storeType( storeType ), serverChanges(serverChanges),
initialFailureReactionDelay( delay( BUGGIFY ? 0 : SERVER_KNOBS->INITIAL_FAILURE_REACTION_DELAY, TaskDataDistribution ) ), healthyTeamCount( 0 ), initialFailureReactionDelay( delay( BUGGIFY ? 0 : SERVER_KNOBS->INITIAL_FAILURE_REACTION_DELAY, TaskDataDistribution ) ), healthyTeamCount( 0 ),
initializationDoneActor(logOnCompletion(initialFailureReactionDelay, this)), optimalTeamCount( 0 ), recruitingStream(0), restartRecruiting( SERVER_KNOBS->DEBOUNCE_RECRUITING_DELAY ), initializationDoneActor(logOnCompletion(readyToStart && initialFailureReactionDelay, this)), optimalTeamCount( 0 ), recruitingStream(0), restartRecruiting( SERVER_KNOBS->DEBOUNCE_RECRUITING_DELAY ),
unhealthyServers(0) unhealthyServers(0)
{ {
TraceEvent("DDTrackerStarting", masterId) TraceEvent("DDTrackerStarting", masterId)
@ -1774,7 +1775,7 @@ ACTOR Future<Void> dataDistributionTeamCollection(
Future<Void> readyToStart ) Future<Void> readyToStart )
{ {
state DDTeamCollection self( cx, masterId, lock, output, shardsAffectedByTeamFailure, teamSize, minDataCenters, state DDTeamCollection self( cx, masterId, lock, output, shardsAffectedByTeamFailure, teamSize, minDataCenters,
desiredDataCenters, replicationPolicy, storeType, serverChanges ); desiredDataCenters, replicationPolicy, storeType, serverChanges, readyToStart );
state Future<Void> loggingTrigger = Void(); state Future<Void> loggingTrigger = Void();
state PromiseStream<Void> serverRemoved; state PromiseStream<Void> serverRemoved;
@ -2157,7 +2158,8 @@ DDTeamCollection* testTeamCollection(int teamSize, IRepPolicyRef policy, int pro
-1, -1,
policy, policy,
KeyValueStoreType(), KeyValueStoreType(),
PromiseStream<std::pair<UID, Optional<StorageServerInterface>>>() PromiseStream<std::pair<UID, Optional<StorageServerInterface>>>(),
Future<Void>(Void())
); );
for(int id = 1; id <= processCount; id++) { for(int id = 1; id <= processCount; id++) {