2017-05-26 04:48:44 +08:00
|
|
|
/*
|
|
|
|
* Coordination.actor.cpp
|
|
|
|
*
|
|
|
|
* This source file is part of the FoundationDB open source project
|
|
|
|
*
|
|
|
|
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "flow/actorcompiler.h"
|
|
|
|
#include "fdbserver/CoordinationInterface.h"
|
|
|
|
#include "IKeyValueStore.h"
|
|
|
|
#include "flow/ActorCollection.h"
|
|
|
|
#include "Knobs.h"
|
|
|
|
#include "flow/UnitTest.h"
|
|
|
|
#include "flow/IndexedSet.h"
|
|
|
|
|
|
|
|
// This module implements coordinationServer() and the interfaces in CoordinationInterface.h
|
|
|
|
|
|
|
|
struct GenerationRegVal {
|
|
|
|
UniqueGeneration readGen, writeGen;
|
|
|
|
Optional<Value> val;
|
|
|
|
template <class Ar>
|
|
|
|
void serialize(Ar& ar) {
|
|
|
|
ar & readGen & writeGen & val;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// UID WLTOKEN_CLIENTLEADERREG_GETLEADER( -1, 2 ); // from fdbclient/MonitorLeader.actor.cpp
|
|
|
|
UID WLTOKEN_LEADERELECTIONREG_CANDIDACY( -1, 3 );
|
|
|
|
UID WLTOKEN_LEADERELECTIONREG_LEADERHEARTBEAT( -1, 4 );
|
|
|
|
UID WLTOKEN_LEADERELECTIONREG_FORWARD( -1, 5 );
|
|
|
|
UID WLTOKEN_GENERATIONREG_READ( -1, 6 );
|
|
|
|
UID WLTOKEN_GENERATIONREG_WRITE( -1, 7 );
|
|
|
|
|
|
|
|
GenerationRegInterface::GenerationRegInterface( NetworkAddress remote )
|
|
|
|
: read( Endpoint(remote, WLTOKEN_GENERATIONREG_READ) ),
|
|
|
|
write( Endpoint(remote, WLTOKEN_GENERATIONREG_WRITE) )
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
GenerationRegInterface::GenerationRegInterface( INetwork* local )
|
|
|
|
{
|
|
|
|
read.makeWellKnownEndpoint( WLTOKEN_GENERATIONREG_READ, TaskCoordination );
|
|
|
|
write.makeWellKnownEndpoint( WLTOKEN_GENERATIONREG_WRITE, TaskCoordination );
|
|
|
|
}
|
|
|
|
|
|
|
|
LeaderElectionRegInterface::LeaderElectionRegInterface(NetworkAddress remote)
|
|
|
|
: ClientLeaderRegInterface(remote),
|
|
|
|
candidacy( Endpoint(remote, WLTOKEN_LEADERELECTIONREG_CANDIDACY) ),
|
|
|
|
leaderHeartbeat( Endpoint(remote, WLTOKEN_LEADERELECTIONREG_LEADERHEARTBEAT) ),
|
|
|
|
forward( Endpoint(remote, WLTOKEN_LEADERELECTIONREG_FORWARD) )
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
LeaderElectionRegInterface::LeaderElectionRegInterface(INetwork* local)
|
|
|
|
: ClientLeaderRegInterface(local)
|
|
|
|
{
|
|
|
|
candidacy.makeWellKnownEndpoint( WLTOKEN_LEADERELECTIONREG_CANDIDACY, TaskCoordination );
|
|
|
|
leaderHeartbeat.makeWellKnownEndpoint( WLTOKEN_LEADERELECTIONREG_LEADERHEARTBEAT, TaskCoordination );
|
|
|
|
forward.makeWellKnownEndpoint( WLTOKEN_LEADERELECTIONREG_FORWARD, TaskCoordination );
|
|
|
|
}
|
|
|
|
|
|
|
|
ServerCoordinators::ServerCoordinators( Reference<ClusterConnectionFile> cf )
|
|
|
|
: ClientCoordinators(cf)
|
|
|
|
{
|
|
|
|
ClusterConnectionString cs = ccf->getConnectionString();
|
|
|
|
for(auto s = cs.coordinators().begin(); s != cs.coordinators().end(); ++s) {
|
|
|
|
leaderElectionServers.push_back( LeaderElectionRegInterface( *s ) );
|
|
|
|
stateServers.push_back( GenerationRegInterface( *s ) );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The coordination server wants to create its key value store only if it is actually used
|
|
|
|
struct OnDemandStore {
|
|
|
|
public:
|
|
|
|
OnDemandStore( std::string folder, UID myID ) : folder(folder), store(NULL), myID(myID) {}
|
|
|
|
~OnDemandStore() { if (store) store->close(); }
|
|
|
|
|
|
|
|
IKeyValueStore* get() {
|
|
|
|
if (!store) open();
|
|
|
|
return store;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool exists() {
|
|
|
|
if (store)
|
|
|
|
return true;
|
|
|
|
return fileExists( joinPath(folder, "coordination-0.fdq") ) || fileExists( joinPath(folder, "coordination-1.fdq") ) || fileExists( joinPath(folder, "coordination.fdb") );
|
|
|
|
}
|
|
|
|
|
|
|
|
IKeyValueStore* operator->() { return get(); }
|
|
|
|
|
|
|
|
Future<Void> getError() { return onErr(err.getFuture()); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::string folder;
|
|
|
|
UID myID;
|
|
|
|
IKeyValueStore* store;
|
|
|
|
Promise<Future<Void>> err;
|
|
|
|
|
|
|
|
ACTOR static Future<Void> onErr( Future<Future<Void>> e ) {
|
|
|
|
Future<Void> f = wait(e);
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(f);
|
2017-05-26 04:48:44 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
void open() {
|
|
|
|
platform::createDirectory( folder );
|
|
|
|
store = keyValueStoreMemory( joinPath(folder, "coordination-"), myID, 500e6 );
|
|
|
|
err.send( store->getError() );
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
ACTOR Future<Void> localGenerationReg( GenerationRegInterface interf, OnDemandStore* pstore ) {
|
|
|
|
state GenerationRegVal v;
|
|
|
|
state OnDemandStore& store = *pstore;
|
|
|
|
// SOMEDAY: concurrent access to different keys?
|
|
|
|
loop choose {
|
|
|
|
when ( GenerationRegReadRequest _req = waitNext( interf.read.getFuture() ) ) {
|
|
|
|
TraceEvent("GenerationRegReadRequest").detail("From", _req.reply.getEndpoint().address).detail("K", printable(_req.key));
|
|
|
|
state GenerationRegReadRequest req = _req;
|
|
|
|
Optional<Value> rawV = wait( store->readValue( req.key ) );
|
|
|
|
v = rawV.present() ? BinaryReader::fromStringRef<GenerationRegVal>( rawV.get(), IncludeVersion() ) : GenerationRegVal();
|
|
|
|
TraceEvent("GenerationRegReadReply").detail("RVSize", rawV.present() ? rawV.get().size() : -1).detail("VWG", v.writeGen.generation);
|
|
|
|
if (v.readGen < req.gen) {
|
|
|
|
v.readGen = req.gen;
|
|
|
|
store->set( KeyValueRef( req.key, BinaryWriter::toValue(v, IncludeVersion()) ) );
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(store->commit());
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
req.reply.send( GenerationRegReadReply( v.val, v.writeGen, v.readGen ) );
|
|
|
|
}
|
|
|
|
when ( GenerationRegWriteRequest _wrq = waitNext( interf.write.getFuture() ) ) {
|
|
|
|
state GenerationRegWriteRequest wrq = _wrq;
|
|
|
|
Optional<Value> rawV = wait( store->readValue( wrq.kv.key ) );
|
|
|
|
v = rawV.present() ? BinaryReader::fromStringRef<GenerationRegVal>( rawV.get(), IncludeVersion() ) : GenerationRegVal();
|
|
|
|
if (v.readGen <= wrq.gen && v.writeGen < wrq.gen) {
|
|
|
|
v.writeGen = wrq.gen;
|
|
|
|
v.val = wrq.kv.value;
|
|
|
|
store->set( KeyValueRef( wrq.kv.key, BinaryWriter::toValue(v, IncludeVersion()) ) );
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(store->commit());
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent("GenerationRegWrote").detail("From", wrq.reply.getEndpoint().address).detail("Key", printable(wrq.kv.key))
|
2018-06-09 02:11:08 +08:00
|
|
|
.detail("ReqGen", wrq.gen.generation).detail("Returning", v.writeGen.generation);
|
2017-05-26 04:48:44 +08:00
|
|
|
wrq.reply.send( v.writeGen );
|
|
|
|
} else {
|
|
|
|
TraceEvent("GenerationRegWriteFail").detail("From", wrq.reply.getEndpoint().address).detail("Key", printable(wrq.kv.key))
|
2018-06-09 02:11:08 +08:00
|
|
|
.detail("ReqGen", wrq.gen.generation).detail("ReadGen", v.readGen.generation).detail("WriteGen", v.writeGen.generation);
|
2017-05-26 04:48:44 +08:00
|
|
|
wrq.reply.send( std::max( v.readGen, v.writeGen ) );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_CASE("fdbserver/Coordination/localGenerationReg/simple") {
|
|
|
|
state GenerationRegInterface reg;
|
|
|
|
state OnDemandStore store("simfdb/unittests/", //< FIXME
|
|
|
|
g_random->randomUniqueID());
|
|
|
|
state Future<Void> actor = localGenerationReg(reg, &store);
|
|
|
|
state Key the_key = g_random->randomAlphaNumeric( g_random->randomInt(0, 10) );
|
|
|
|
|
|
|
|
state UniqueGeneration firstGen(0, g_random->randomUniqueID());
|
|
|
|
|
|
|
|
GenerationRegReadReply r = wait(reg.read.getReply(GenerationRegReadRequest(the_key, firstGen)));
|
|
|
|
// If there was no prior write(_,_,0) or a data loss fault,
|
|
|
|
// returns (Optional(),0,gen2)
|
|
|
|
ASSERT(!r.value.present());
|
|
|
|
ASSERT(r.gen == UniqueGeneration());
|
|
|
|
ASSERT(r.rgen == firstGen);
|
|
|
|
|
|
|
|
UniqueGeneration g = wait(reg.write.getReply(GenerationRegWriteRequest(KeyValueRef(the_key, LiteralStringRef("Value1")), firstGen)));
|
|
|
|
// (gen1==gen is considered a "successful" write)
|
|
|
|
ASSERT(g == firstGen);
|
|
|
|
|
|
|
|
GenerationRegReadReply r = wait(reg.read.getReply(GenerationRegReadRequest(the_key, UniqueGeneration())));
|
|
|
|
// read(key,gen2) returns (value,gen,rgen).
|
|
|
|
// There was some earlier or concurrent write(key,value,gen).
|
|
|
|
ASSERT(r.value == LiteralStringRef("Value1"));
|
|
|
|
ASSERT(r.gen == firstGen);
|
|
|
|
// There was some earlier or concurrent read(key,rgen).
|
|
|
|
ASSERT(r.rgen == firstGen);
|
|
|
|
// If there is a write(key,_,gen1)=>gen1 s.t. gen1 < gen2 OR the write completed before this read started, then gen >= gen1.
|
|
|
|
ASSERT(r.gen >= firstGen);
|
|
|
|
// If there is a read(key,gen1) that completed before this read started, then rgen >= gen1
|
|
|
|
ASSERT(r.rgen >= firstGen);
|
|
|
|
|
|
|
|
ASSERT(!actor.isReady());
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
// This actor implements a *single* leader-election register (essentially, it ignores
|
|
|
|
// the .key member of each request). It returns any time the leader election is in the
|
|
|
|
// default state, so that only active registers consume memory.
|
|
|
|
ACTOR Future<Void> leaderRegister(LeaderElectionRegInterface interf, Key key) {
|
|
|
|
state std::set<LeaderInfo> availableCandidates;
|
|
|
|
state std::set<LeaderInfo> availableLeaders;
|
|
|
|
state Optional<LeaderInfo> currentNominee;
|
2018-08-09 08:29:32 +08:00
|
|
|
state Deque<ReplyPromise<Optional<LeaderInfo>>> notify;
|
2017-05-26 04:48:44 +08:00
|
|
|
state Future<Void> nextInterval = delay( 0 );
|
|
|
|
state double candidateDelay = SERVER_KNOBS->CANDIDATE_MIN_DELAY;
|
|
|
|
state int leaderIntervalCount = 0;
|
2018-08-09 08:29:32 +08:00
|
|
|
state Future<Void> notifyCheck = delay(SERVER_KNOBS->NOTIFICATION_FULL_CLEAR_TIME / SERVER_KNOBS->MIN_NOTIFICATIONS);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
loop choose {
|
|
|
|
when ( GetLeaderRequest req = waitNext( interf.getLeader.getFuture() ) ) {
|
2018-07-07 08:40:29 +08:00
|
|
|
if (currentNominee.present() && currentNominee.get().changeID != req.knownLeader) {
|
2017-05-26 04:48:44 +08:00
|
|
|
req.reply.send( currentNominee.get() );
|
2018-07-07 08:40:29 +08:00
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
notify.push_back( req.reply );
|
2018-07-07 08:40:29 +08:00
|
|
|
if(notify.size() > SERVER_KNOBS->MAX_NOTIFICATIONS) {
|
2018-08-09 08:29:32 +08:00
|
|
|
TraceEvent(SevWarnAlways, "TooManyNotifications").detail("Amount", notify.size());
|
2018-07-07 08:40:29 +08:00
|
|
|
for(int i=0; i<notify.size(); i++)
|
|
|
|
notify[i].send( currentNominee.get() );
|
|
|
|
notify.clear();
|
|
|
|
}
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
when ( CandidacyRequest req = waitNext( interf.candidacy.getFuture() ) ) {
|
|
|
|
//TraceEvent("CandidacyRequest").detail("Nominee", req.myInfo.changeID );
|
2017-10-13 08:11:58 +08:00
|
|
|
availableCandidates.erase( LeaderInfo(req.prevChangeID) );
|
2017-05-26 04:48:44 +08:00
|
|
|
availableCandidates.insert( req.myInfo );
|
2018-07-07 08:40:29 +08:00
|
|
|
if (currentNominee.present() && currentNominee.get().changeID != req.knownLeader) {
|
2017-05-26 04:48:44 +08:00
|
|
|
req.reply.send( currentNominee.get() );
|
2018-07-07 08:40:29 +08:00
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
notify.push_back( req.reply );
|
2018-07-07 08:40:29 +08:00
|
|
|
if(notify.size() > SERVER_KNOBS->MAX_NOTIFICATIONS) {
|
2018-08-09 08:29:32 +08:00
|
|
|
TraceEvent(SevWarnAlways, "TooManyNotifications").detail("Amount", notify.size());
|
2018-07-07 08:40:29 +08:00
|
|
|
for(int i=0; i<notify.size(); i++)
|
|
|
|
notify[i].send( currentNominee.get() );
|
|
|
|
notify.clear();
|
|
|
|
}
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
when (LeaderHeartbeatRequest req = waitNext( interf.leaderHeartbeat.getFuture() ) ) {
|
|
|
|
//TODO: use notify to only send a heartbeat once per interval
|
2017-10-13 08:11:58 +08:00
|
|
|
availableLeaders.erase( LeaderInfo(req.prevChangeID) );
|
2017-05-26 04:48:44 +08:00
|
|
|
availableLeaders.insert( req.myInfo );
|
2017-10-13 08:11:58 +08:00
|
|
|
req.reply.send( currentNominee.present() && currentNominee.get().equalInternalId(req.myInfo) );
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
when (ForwardRequest req = waitNext( interf.forward.getFuture() ) ) {
|
|
|
|
LeaderInfo newInfo;
|
|
|
|
newInfo.forward = true;
|
|
|
|
newInfo.serializedInfo = req.conn.toString();
|
|
|
|
for(int i=0; i<notify.size(); i++)
|
|
|
|
notify[i].send( newInfo );
|
2018-07-07 08:40:29 +08:00
|
|
|
notify.clear();
|
2017-05-26 04:48:44 +08:00
|
|
|
req.reply.send( Void() );
|
|
|
|
return Void();
|
|
|
|
}
|
2018-08-11 04:57:10 +08:00
|
|
|
when ( wait(nextInterval) ) {
|
2017-05-26 04:48:44 +08:00
|
|
|
if (!availableLeaders.size() && !availableCandidates.size() && !notify.size() &&
|
|
|
|
!currentNominee.present())
|
|
|
|
{
|
|
|
|
// Our state is back to the initial state, so we can safely stop this actor
|
|
|
|
TraceEvent("EndingLeaderNomination").detail("Key", printable(key));
|
|
|
|
return Void();
|
|
|
|
} else {
|
2017-09-30 07:34:55 +08:00
|
|
|
Optional<LeaderInfo> nextNominee;
|
|
|
|
if (availableLeaders.size() && availableCandidates.size()) {
|
2017-11-15 05:57:37 +08:00
|
|
|
nextNominee = ( *availableLeaders.begin() < *availableCandidates.begin() ) ? *availableLeaders.begin() : *availableCandidates.begin();
|
2017-10-13 08:11:58 +08:00
|
|
|
} else if (availableLeaders.size()) {
|
2017-09-30 07:34:55 +08:00
|
|
|
nextNominee = *availableLeaders.begin();
|
2017-10-13 08:11:58 +08:00
|
|
|
} else if (availableCandidates.size()) {
|
2017-09-30 07:34:55 +08:00
|
|
|
nextNominee = *availableCandidates.begin();
|
2017-10-13 08:11:58 +08:00
|
|
|
} else {
|
2017-09-30 07:34:55 +08:00
|
|
|
nextNominee = Optional<LeaderInfo>();
|
2017-10-13 08:11:58 +08:00
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2018-08-11 08:11:24 +08:00
|
|
|
bool foundCurrentNominee = false;
|
|
|
|
if(currentNominee.present()) {
|
|
|
|
for(auto& it : availableLeaders) {
|
|
|
|
if(currentNominee.get().equalInternalId(it)) {
|
|
|
|
foundCurrentNominee = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-14 11:56:02 +08:00
|
|
|
if ( !nextNominee.present() || !foundCurrentNominee || currentNominee.get().leaderChangeRequired(nextNominee.get()) ) {
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent("NominatingLeader").detail("Nominee", nextNominee.present() ? nextNominee.get().changeID : UID())
|
|
|
|
.detail("Changed", nextNominee != currentNominee).detail("Key", printable(key));
|
|
|
|
for(int i=0; i<notify.size(); i++)
|
|
|
|
notify[i].send( nextNominee );
|
|
|
|
notify.clear();
|
|
|
|
currentNominee = nextNominee;
|
2018-08-14 11:56:02 +08:00
|
|
|
} else if (currentNominee.get().equalInternalId(nextNominee.get())) {
|
|
|
|
// leader becomes better
|
|
|
|
currentNominee = nextNominee;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if( availableLeaders.size() ) {
|
|
|
|
nextInterval = delay( SERVER_KNOBS->POLLING_FREQUENCY );
|
|
|
|
if(leaderIntervalCount++ > 5) {
|
|
|
|
candidateDelay = SERVER_KNOBS->CANDIDATE_MIN_DELAY;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
nextInterval = delay( candidateDelay );
|
|
|
|
candidateDelay = std::min(SERVER_KNOBS->CANDIDATE_MAX_DELAY, candidateDelay * SERVER_KNOBS->CANDIDATE_GROWTH_RATE);
|
|
|
|
leaderIntervalCount = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
availableLeaders.clear();
|
|
|
|
availableCandidates.clear();
|
|
|
|
}
|
|
|
|
}
|
2018-08-09 08:29:32 +08:00
|
|
|
when( Void _ = wait(notifyCheck) ) {
|
|
|
|
notifyCheck = delay( SERVER_KNOBS->NOTIFICATION_FULL_CLEAR_TIME / std::max<double>(SERVER_KNOBS->MIN_NOTIFICATIONS, notify.size()) );
|
|
|
|
if(!notify.empty() && currentNominee.present()) {
|
|
|
|
notify.front().send( currentNominee.get() );
|
|
|
|
notify.pop_front();
|
|
|
|
}
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generation register values are stored without prefixing in the coordinated state, but always begin with an alphanumeric character
|
|
|
|
// (they are always derived from a ClusterConnectionString key).
|
|
|
|
// Forwarding values are stored in this range:
|
|
|
|
const KeyRangeRef fwdKeys( LiteralStringRef( "\xff" "fwd" ), LiteralStringRef( "\xff" "fwe" ) );
|
|
|
|
|
|
|
|
struct LeaderRegisterCollection {
|
|
|
|
// SOMEDAY: Factor this into a generic tool? Extend ActorCollection to support removal actions? What?
|
|
|
|
ActorCollection actors;
|
|
|
|
Map<Key, LeaderElectionRegInterface> registerInterfaces;
|
|
|
|
Map<Key, LeaderInfo> forward;
|
|
|
|
OnDemandStore *pStore;
|
|
|
|
|
|
|
|
LeaderRegisterCollection( OnDemandStore *pStore ) : actors( false ), pStore( pStore ) {}
|
|
|
|
|
|
|
|
ACTOR static Future<Void> init( LeaderRegisterCollection *self ) {
|
|
|
|
if( !self->pStore->exists() )
|
|
|
|
return Void();
|
|
|
|
OnDemandStore &store = *self->pStore;
|
|
|
|
Standalone<VectorRef<KeyValueRef>> forwardingInfo = wait( store->readRange( fwdKeys ) );
|
|
|
|
for( int i = 0; i < forwardingInfo.size(); i++ ) {
|
|
|
|
LeaderInfo forwardInfo;
|
|
|
|
forwardInfo.forward = true;
|
|
|
|
forwardInfo.serializedInfo = forwardingInfo[i].value;
|
|
|
|
self->forward[ forwardingInfo[i].key.removePrefix( fwdKeys.begin ) ] = forwardInfo;
|
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
Future<Void> onError() { return actors.getResult(); }
|
|
|
|
|
|
|
|
Optional<LeaderInfo> getForward(KeyRef key) {
|
|
|
|
auto i = forward.find( key );
|
|
|
|
if (i == forward.end())
|
|
|
|
return Optional<LeaderInfo>();
|
|
|
|
return i->value;
|
|
|
|
}
|
|
|
|
|
|
|
|
ACTOR static Future<Void> setForward(LeaderRegisterCollection *self, KeyRef key, ClusterConnectionString conn) {
|
|
|
|
LeaderInfo forwardInfo;
|
|
|
|
forwardInfo.forward = true;
|
|
|
|
forwardInfo.serializedInfo = conn.toString();
|
|
|
|
self->forward[ key ] = forwardInfo;
|
|
|
|
OnDemandStore &store = *self->pStore;
|
|
|
|
store->set( KeyValueRef( key.withPrefix( fwdKeys.begin ), conn.toString() ) );
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(store->commit());
|
2017-05-26 04:48:44 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
LeaderElectionRegInterface& getInterface(KeyRef key) {
|
|
|
|
auto i = registerInterfaces.find( key );
|
|
|
|
if (i == registerInterfaces.end()) {
|
|
|
|
Key k = key;
|
|
|
|
Future<Void> a = wrap(this, k, leaderRegister(registerInterfaces[k], k) );
|
|
|
|
if (a.isError()) throw a.getError();
|
|
|
|
ASSERT( !a.isReady() );
|
|
|
|
actors.add( a );
|
|
|
|
i = registerInterfaces.find( key );
|
|
|
|
}
|
|
|
|
ASSERT( i != registerInterfaces.end() );
|
|
|
|
return i->value;
|
|
|
|
}
|
|
|
|
|
|
|
|
ACTOR static Future<Void> wrap( LeaderRegisterCollection* self, Key key, Future<Void> actor ) {
|
|
|
|
state Error e;
|
|
|
|
try {
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(actor);
|
2017-05-26 04:48:44 +08:00
|
|
|
} catch (Error& err) {
|
|
|
|
if (err.code() == error_code_actor_cancelled)
|
|
|
|
throw;
|
|
|
|
e = err;
|
|
|
|
}
|
|
|
|
self->registerInterfaces.erase(key);
|
|
|
|
if (e.code() != invalid_error_code) throw e;
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
// leaderServer multiplexes multiple leaderRegisters onto a single LeaderElectionRegInterface,
|
|
|
|
// creating and destroying them on demand.
|
|
|
|
ACTOR Future<Void> leaderServer(LeaderElectionRegInterface interf, OnDemandStore *pStore) {
|
|
|
|
state LeaderRegisterCollection regs( pStore );
|
|
|
|
state ActorCollection forwarders(false);
|
|
|
|
|
2018-08-11 04:57:10 +08:00
|
|
|
wait( LeaderRegisterCollection::init( ®s ) );
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
loop choose {
|
|
|
|
when ( GetLeaderRequest req = waitNext( interf.getLeader.getFuture() ) ) {
|
|
|
|
Optional<LeaderInfo> forward = regs.getForward(req.key);
|
|
|
|
if( forward.present() )
|
|
|
|
req.reply.send( forward.get() );
|
|
|
|
else
|
|
|
|
regs.getInterface(req.key).getLeader.send( req );
|
|
|
|
}
|
|
|
|
when ( CandidacyRequest req = waitNext( interf.candidacy.getFuture() ) ) {
|
|
|
|
Optional<LeaderInfo> forward = regs.getForward(req.key);
|
|
|
|
if( forward.present() )
|
|
|
|
req.reply.send( forward.get() );
|
|
|
|
else
|
|
|
|
regs.getInterface(req.key).candidacy.send(req);
|
|
|
|
}
|
|
|
|
when ( LeaderHeartbeatRequest req = waitNext( interf.leaderHeartbeat.getFuture() ) ) {
|
|
|
|
Optional<LeaderInfo> forward = regs.getForward(req.key);
|
|
|
|
if( forward.present() )
|
|
|
|
req.reply.send( false );
|
|
|
|
else
|
|
|
|
regs.getInterface(req.key).leaderHeartbeat.send(req);
|
|
|
|
}
|
|
|
|
when ( ForwardRequest req = waitNext( interf.forward.getFuture() ) ) {
|
|
|
|
Optional<LeaderInfo> forward = regs.getForward(req.key);
|
|
|
|
if( forward.present() )
|
|
|
|
req.reply.send( Void() );
|
|
|
|
else {
|
|
|
|
forwarders.add( LeaderRegisterCollection::setForward( ®s, req.key, ClusterConnectionString(req.conn.toString()) ) );
|
|
|
|
regs.getInterface(req.key).forward.send(req);
|
|
|
|
}
|
|
|
|
}
|
2018-08-11 04:57:10 +08:00
|
|
|
when( wait( forwarders.getResult() ) ) { ASSERT(false); throw internal_error(); }
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ACTOR Future<Void> coordinationServer(std::string dataFolder) {
|
|
|
|
state UID myID = g_random->randomUniqueID();
|
|
|
|
state LeaderElectionRegInterface myLeaderInterface( g_network );
|
|
|
|
state GenerationRegInterface myInterface( g_network );
|
|
|
|
state OnDemandStore store( dataFolder, myID );
|
|
|
|
|
2018-06-09 02:11:08 +08:00
|
|
|
TraceEvent("CoordinationServer", myID).detail("MyInterfaceAddr", myInterface.read.getEndpoint().address).detail("Folder", dataFolder);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
try {
|
2018-08-11 04:57:10 +08:00
|
|
|
wait( localGenerationReg(myInterface, &store) || leaderServer(myLeaderInterface, &store) || store.getError() );
|
2017-05-26 04:48:44 +08:00
|
|
|
throw internal_error();
|
|
|
|
} catch (Error& e) {
|
|
|
|
TraceEvent("CoordinationServerError", myID).error(e, true);
|
|
|
|
throw;
|
|
|
|
}
|
2018-07-07 08:40:29 +08:00
|
|
|
}
|