2017-05-26 04:48:44 +08:00
/*
* worker . actor . cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013 - 2018 Apple Inc . and the FoundationDB project authors
2018-02-22 02:25:11 +08:00
*
2017-05-26 04:48:44 +08:00
* Licensed under the Apache License , Version 2.0 ( the " License " ) ;
* you may not use this file except in compliance with the License .
* You may obtain a copy of the License at
2018-02-22 02:25:11 +08:00
*
2017-05-26 04:48:44 +08:00
* http : //www.apache.org/licenses/LICENSE-2.0
2018-02-22 02:25:11 +08:00
*
2017-05-26 04:48:44 +08:00
* Unless required by applicable law or agreed to in writing , software
* distributed under the License is distributed on an " AS IS " BASIS ,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
* See the License for the specific language governing permissions and
* limitations under the License .
*/
2020-05-05 16:00:17 +08:00
# include <tuple>
2019-02-23 04:15:23 +08:00
# include <boost/lexical_cast.hpp>
2020-02-26 07:00:18 +08:00
# include "fdbrpc/Locality.h"
# include "fdbclient/StorageServerInterface.h"
2020-05-09 08:14:42 +08:00
# include "fdbserver/Knobs.h"
2017-05-26 04:48:44 +08:00
# include "flow/ActorCollection.h"
# include "flow/SystemMonitor.h"
# include "flow/TDMetric.actor.h"
# include "fdbrpc/simulator.h"
2019-02-18 07:41:16 +08:00
# include "fdbclient/NativeAPI.actor.h"
2017-07-25 04:13:06 +08:00
# include "fdbclient/MetricLogger.h"
2019-04-25 06:12:37 +08:00
# include "fdbserver/BackupInterface.h"
2019-02-18 11:13:26 +08:00
# include "fdbserver/WorkerInterface.actor.h"
2018-10-20 01:30:13 +08:00
# include "fdbserver/IKeyValueStore.h"
# include "fdbserver/WaitFailure.h"
2019-02-18 11:25:16 +08:00
# include "fdbserver/TesterInterface.actor.h" // for poisson()
2018-10-20 01:30:13 +08:00
# include "fdbserver/IDiskQueue.h"
2017-05-26 04:48:44 +08:00
# include "fdbclient/DatabaseContext.h"
2018-12-14 05:31:37 +08:00
# include "fdbserver/DataDistributorInterface.h"
2018-10-20 01:30:13 +08:00
# include "fdbserver/ServerDBInfo.h"
2019-04-21 03:58:24 +08:00
# include "fdbserver/FDBExecHelper.actor.h"
2017-05-26 04:48:44 +08:00
# include "fdbserver/CoordinationInterface.h"
# include "fdbclient/MonitorLeader.h"
2017-10-12 05:13:16 +08:00
# include "fdbclient/ClientWorkerInterface.h"
# include "flow/Profiler.h"
2020-03-13 05:34:19 +08:00
# include "flow/ThreadHelper.actor.h"
2020-05-09 07:27:57 +08:00
# include "flow/Trace.h"
2017-05-26 04:48:44 +08:00
# ifdef __linux__
2019-04-06 05:45:58 +08:00
# include <fcntl.h>
# include <stdio.h>
# include <sys/stat.h>
# include <sys/types.h>
# include <unistd.h>
2020-02-02 02:00:06 +08:00
# endif
# if defined(__linux__) || defined(__FreeBSD__)
2017-05-26 04:48:44 +08:00
# ifdef USE_GPERFTOOLS
# include "gperftools/profiler.h"
2019-04-04 06:57:16 +08:00
# include "gperftools/heap-profiler.h"
2017-05-26 04:48:44 +08:00
# endif
# include <unistd.h>
# include <thread>
# include <execinfo.h>
# endif
2018-08-11 06:18:24 +08:00
# include "flow/actorcompiler.h" // This must be the last #include.
2017-05-26 04:48:44 +08:00
# if CENABLED(0, NOT_IN_CLEAN)
extern IKeyValueStore * keyValueStoreCompressTestData ( IKeyValueStore * store ) ;
# define KV_STORE(filename,uid) keyValueStoreCompressTestData(keyValueStoreSQLite(filename,uid))
# elif CENABLED(0, NOT_IN_CLEAN)
# define KV_STORE(filename,uid) keyValueStoreSQLite(filename,uid)
# else
# define KV_STORE(filename,uid) keyValueStoreMemory(filename,uid)
# endif
2020-04-06 14:09:36 +08:00
ACTOR Future < std : : vector < Endpoint > > tryDBInfoBroadcast ( RequestStream < UpdateServerDBInfoRequest > stream , UpdateServerDBInfoRequest req ) {
2020-04-12 11:54:17 +08:00
ErrorOr < std : : vector < Endpoint > > rep = wait ( stream . getReplyUnlessFailedFor ( req , SERVER_KNOBS - > DBINFO_FAILED_DELAY , 0 ) ) ;
2020-04-11 08:02:11 +08:00
if ( rep . present ( ) ) {
return rep . get ( ) ;
2020-04-06 14:09:36 +08:00
}
2020-04-11 08:02:11 +08:00
req . broadcastInfo . push_back ( stream . getEndpoint ( ) ) ;
return req . broadcastInfo ;
2020-04-06 14:09:36 +08:00
}
ACTOR Future < std : : vector < Endpoint > > broadcastDBInfoRequest ( UpdateServerDBInfoRequest req , int sendAmount , Optional < Endpoint > sender , bool sendReply ) {
state std : : vector < Future < std : : vector < Endpoint > > > replies ;
state ReplyPromise < std : : vector < Endpoint > > reply = req . reply ;
resetReply ( req ) ;
int currentStream = 0 ;
2020-04-11 04:45:16 +08:00
std : : vector < Endpoint > broadcastEndpoints = req . broadcastInfo ;
for ( int i = 0 ; i < sendAmount & & currentStream < broadcastEndpoints . size ( ) ; i + + ) {
2020-04-07 11:58:43 +08:00
std : : vector < Endpoint > endpoints ;
2020-04-11 04:45:16 +08:00
RequestStream < UpdateServerDBInfoRequest > cur ( broadcastEndpoints [ currentStream + + ] ) ;
while ( currentStream < broadcastEndpoints . size ( ) * ( i + 1 ) / sendAmount ) {
endpoints . push_back ( broadcastEndpoints [ currentStream + + ] ) ;
2020-04-06 14:09:36 +08:00
}
2020-04-07 11:58:43 +08:00
req . broadcastInfo = endpoints ;
2020-04-06 14:09:36 +08:00
replies . push_back ( tryDBInfoBroadcast ( cur , req ) ) ;
resetReply ( req ) ;
}
wait ( waitForAll ( replies ) ) ;
std : : vector < Endpoint > notUpdated ;
if ( sender . present ( ) ) {
notUpdated . push_back ( sender . get ( ) ) ;
}
for ( auto & it : replies ) {
notUpdated . insert ( notUpdated . end ( ) , it . get ( ) . begin ( ) , it . get ( ) . end ( ) ) ;
}
if ( sendReply ) {
reply . send ( notUpdated ) ;
}
return notUpdated ;
}
2019-04-10 02:17:58 +08:00
2017-05-26 04:48:44 +08:00
ACTOR static Future < Void > extractClientInfo ( Reference < AsyncVar < ServerDBInfo > > db , Reference < AsyncVar < ClientDBInfo > > info ) {
2020-09-11 08:44:15 +08:00
state std : : vector < UID > lastCommitProxyUIDs ;
state std : : vector < CommitProxyInterface > lastCommitProxies ;
2020-07-15 15:37:41 +08:00
state std : : vector < UID > lastGrvProxyUIDs ;
state std : : vector < GrvProxyInterface > lastGrvProxies ;
2017-05-26 04:48:44 +08:00
loop {
2020-01-11 04:20:30 +08:00
ClientDBInfo ni = db - > get ( ) . client ;
2020-09-11 08:44:15 +08:00
shrinkProxyList ( ni , lastCommitProxyUIDs , lastCommitProxies , lastGrvProxyUIDs , lastGrvProxies ) ;
2020-01-11 04:20:30 +08:00
info - > set ( ni ) ;
2018-08-11 04:57:10 +08:00
wait ( db - > onChange ( ) ) ;
2017-05-26 04:48:44 +08:00
}
}
2019-06-25 17:47:35 +08:00
Database openDBOnServer ( Reference < AsyncVar < ServerDBInfo > > const & db , TaskPriority taskID , bool enableLocalityLoadBalance , bool lockAware ) {
2017-05-26 04:48:44 +08:00
Reference < AsyncVar < ClientDBInfo > > info ( new AsyncVar < ClientDBInfo > ) ;
return DatabaseContext : : create ( info , extractClientInfo ( db , info ) , enableLocalityLoadBalance ? db - > get ( ) . myLocality : LocalityData ( ) , enableLocalityLoadBalance , taskID , lockAware ) ;
}
struct ErrorInfo {
Error error ;
2018-09-06 06:06:14 +08:00
const Role & role ;
2017-05-26 04:48:44 +08:00
UID id ;
2018-09-06 06:06:14 +08:00
ErrorInfo ( Error e , const Role & role , UID id ) : error ( e ) , role ( role ) , id ( id ) { }
2017-05-26 04:48:44 +08:00
template < class Ar > void serialize ( Ar & ) { ASSERT ( false ) ; }
} ;
2017-05-27 08:43:28 +08:00
Error checkIOTimeout ( Error const & e ) {
2017-06-01 08:03:15 +08:00
// Convert all_errors to io_timeout if global timeout bool was set
2017-06-16 08:40:19 +08:00
bool timeoutOccurred = ( bool ) g_network - > global ( INetwork : : enASIOTimedOut ) ;
// In simulation, have to check global timed out flag for both this process and the machine process on which IO is done
if ( g_network - > isSimulated ( ) & & ! timeoutOccurred )
timeoutOccurred = g_pSimulator - > getCurrentProcess ( ) - > machine - > machineProcess - > global ( INetwork : : enASIOTimedOut ) ;
if ( timeoutOccurred ) {
TEST ( true ) ; // Timeout occurred
2017-05-27 08:43:28 +08:00
Error timeout = io_timeout ( ) ;
2017-06-16 08:40:19 +08:00
// Preserve injectedness of error
if ( e . isInjectedFault ( ) )
2017-05-27 08:43:28 +08:00
timeout = timeout . asInjectedFault ( ) ;
return timeout ;
}
return e ;
}
2017-05-26 04:48:44 +08:00
ACTOR Future < Void > forwardError ( PromiseStream < ErrorInfo > errors ,
2018-09-06 06:06:14 +08:00
Role role , UID id ,
2017-05-26 04:48:44 +08:00
Future < Void > process )
{
try {
2018-08-11 04:57:10 +08:00
wait ( process ) ;
2018-09-06 06:06:14 +08:00
errors . send ( ErrorInfo ( success ( ) , role , id ) ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
} catch ( Error & e ) {
2018-09-06 06:06:14 +08:00
errors . send ( ErrorInfo ( e , role , id ) ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
}
}
ACTOR Future < Void > handleIOErrors ( Future < Void > actor , IClosable * store , UID id , Future < Void > onClosed = Void ( ) ) {
2018-07-07 05:41:36 +08:00
state Future < ErrorOr < Void > > storeError = actor . isReady ( ) ? Never ( ) : errorOr ( store - > getError ( ) ) ;
2017-05-26 04:48:44 +08:00
choose {
when ( state ErrorOr < Void > e = wait ( errorOr ( actor ) ) ) {
2018-10-20 09:55:35 +08:00
if ( e . isError ( ) & & e . getError ( ) . code ( ) = = error_code_please_reboot ) {
2018-10-09 08:26:10 +08:00
// no need to wait.
} else {
2018-10-24 08:05:42 +08:00
wait ( onClosed ) ;
2018-10-09 08:26:10 +08:00
}
2019-07-10 07:37:54 +08:00
if ( e . isError ( ) & & e . getError ( ) . code ( ) = = error_code_broken_promise & & ! storeError . isReady ( ) ) {
wait ( delay ( 0.00001 + FLOW_KNOBS - > MAX_BUGGIFIED_DELAY ) ) ;
}
2018-10-24 07:53:41 +08:00
if ( storeError . isReady ( ) ) throw storeError . get ( ) . getError ( ) ;
2017-05-26 04:48:44 +08:00
if ( e . isError ( ) ) throw e . getError ( ) ; else return e . get ( ) ;
}
2018-07-07 05:41:36 +08:00
when ( ErrorOr < Void > e = wait ( storeError ) ) {
2017-05-26 04:48:44 +08:00
TraceEvent ( " WorkerTerminatingByIOError " , id ) . error ( e . getError ( ) , true ) ;
actor . cancel ( ) ;
// file_not_found can occur due to attempting to open a partially deleted DiskQueue, which should not be reported SevError.
if ( e . getError ( ) . code ( ) = = error_code_file_not_found ) {
TEST ( true ) ; // Worker terminated with file_not_found error
return Void ( ) ;
}
throw e . getError ( ) ;
}
}
}
2017-05-27 08:43:28 +08:00
ACTOR Future < Void > workerHandleErrors ( FutureStream < ErrorInfo > errors ) {
2017-05-26 04:48:44 +08:00
loop choose {
2017-05-27 08:43:28 +08:00
when ( ErrorInfo _err = waitNext ( errors ) ) {
ErrorInfo err = _err ;
2017-05-26 04:48:44 +08:00
bool ok =
err . error . code ( ) = = error_code_success | |
err . error . code ( ) = = error_code_please_reboot | |
err . error . code ( ) = = error_code_actor_cancelled | |
2018-10-15 18:43:43 +08:00
err . error . code ( ) = = error_code_coordinators_changed | | // The worker server was cancelled
err . error . code ( ) = = error_code_shutdown_in_progress ;
2017-05-26 04:48:44 +08:00
2018-11-22 03:18:26 +08:00
if ( ! ok ) {
2017-06-16 08:40:19 +08:00
err . error = checkIOTimeout ( err . error ) ; // Possibly convert error to io_timeout
2018-08-30 05:40:39 +08:00
}
2017-06-16 08:40:19 +08:00
2018-09-06 06:06:14 +08:00
endRole ( err . role , err . id , " Error " , ok , err . error ) ;
2017-05-26 04:48:44 +08:00
2020-05-30 00:02:55 +08:00
if ( err . error . code ( ) = = error_code_please_reboot | | err . error . code ( ) = = error_code_io_timeout | | ( err . role = = Role : : SHARED_TRANSACTION_LOG & & err . error . code ( ) = = error_code_io_error ) ) throw err . error ;
2017-05-26 04:48:44 +08:00
}
}
}
// Improve simulation code coverage by sometimes deferring the destruction of workerInterface (and therefore "endpoint not found" responses to clients
// for an extra second, so that clients are more likely to see broken_promise errors
ACTOR template < class T > Future < Void > zombie ( T workerInterface , Future < Void > worker ) {
try {
2018-08-11 04:57:10 +08:00
wait ( worker ) ;
2017-05-26 04:48:44 +08:00
if ( BUGGIFY )
2018-08-11 04:57:10 +08:00
wait ( delay ( 1.0 ) ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
} catch ( Error & e ) {
throw ;
}
}
ACTOR Future < Void > loadedPonger ( FutureStream < LoadedPingRequest > pings ) {
2019-02-18 10:46:59 +08:00
state Standalone < StringRef > payloadBack ( std : : string ( 20480 , ' . ' ) ) ;
2017-05-26 04:48:44 +08:00
loop {
LoadedPingRequest pong = waitNext ( pings ) ;
LoadedReply rep ;
rep . payload = ( pong . loadReply ? payloadBack : LiteralStringRef ( " " ) ) ;
rep . id = pong . id ;
pong . reply . send ( rep ) ;
}
}
StringRef fileStoragePrefix = LiteralStringRef ( " storage- " ) ;
StringRef fileLogDataPrefix = LiteralStringRef ( " log- " ) ;
2019-02-23 04:15:23 +08:00
StringRef fileVersionedLogDataPrefix = LiteralStringRef ( " log2- " ) ;
2017-05-26 04:48:44 +08:00
StringRef fileLogQueuePrefix = LiteralStringRef ( " logqueue- " ) ;
2017-09-22 14:51:55 +08:00
StringRef tlogQueueExtension = LiteralStringRef ( " fdq " ) ;
2020-06-30 00:25:36 +08:00
enum class FilesystemCheck {
FILES_ONLY ,
DIRECTORIES_ONLY ,
FILES_AND_DIRECTORIES ,
} ;
struct KeyValueStoreSuffix {
KeyValueStoreType type ;
std : : string suffix ;
FilesystemCheck check ;
} ;
KeyValueStoreSuffix bTreeV1Suffix = { KeyValueStoreType : : SSD_BTREE_V1 , " .fdb " , FilesystemCheck : : FILES_ONLY } ;
KeyValueStoreSuffix bTreeV2Suffix = { KeyValueStoreType : : SSD_BTREE_V2 , " .sqlite " , FilesystemCheck : : FILES_ONLY } ;
KeyValueStoreSuffix memorySuffix = { KeyValueStoreType : : MEMORY , " -0.fdq " , FilesystemCheck : : FILES_ONLY } ;
KeyValueStoreSuffix memoryRTSuffix = { KeyValueStoreType : : MEMORY_RADIXTREE , " -0.fdr " , FilesystemCheck : : FILES_ONLY } ;
KeyValueStoreSuffix redwoodSuffix = { KeyValueStoreType : : SSD_REDWOOD_V1 , " .redwood " , FilesystemCheck : : FILES_ONLY } ;
KeyValueStoreSuffix rocksdbSuffix = { KeyValueStoreType : : SSD_ROCKSDB_V1 , " .rocksdb " ,
FilesystemCheck : : DIRECTORIES_ONLY } ;
2017-05-26 04:48:44 +08:00
std : : string validationFilename = " _validate " ;
std : : string filenameFromSample ( KeyValueStoreType storeType , std : : string folder , std : : string sample_filename ) {
if ( storeType = = KeyValueStoreType : : SSD_BTREE_V1 )
return joinPath ( folder , sample_filename ) ;
else if ( storeType = = KeyValueStoreType : : SSD_BTREE_V2 )
2017-08-29 02:25:37 +08:00
return joinPath ( folder , sample_filename ) ;
2020-02-18 09:31:41 +08:00
else if ( storeType = = KeyValueStoreType : : MEMORY | | storeType = = KeyValueStoreType : : MEMORY_RADIXTREE )
2017-05-26 04:48:44 +08:00
return joinPath ( folder , sample_filename . substr ( 0 , sample_filename . size ( ) - 5 ) ) ;
2017-09-22 14:51:55 +08:00
else if ( storeType = = KeyValueStoreType : : SSD_REDWOOD_V1 )
return joinPath ( folder , sample_filename ) ;
2020-06-16 00:45:36 +08:00
else if ( storeType = = KeyValueStoreType : : SSD_ROCKSDB_V1 )
return joinPath ( folder , sample_filename ) ;
2017-05-26 04:48:44 +08:00
UNREACHABLE ( ) ;
}
std : : string filenameFromId ( KeyValueStoreType storeType , std : : string folder , std : : string prefix , UID id ) {
2020-02-13 08:55:33 +08:00
if ( storeType = = KeyValueStoreType : : SSD_BTREE_V1 )
2017-05-26 04:48:44 +08:00
return joinPath ( folder , prefix + id . toString ( ) + " .fdb " ) ;
else if ( storeType = = KeyValueStoreType : : SSD_BTREE_V2 )
2017-08-29 02:25:37 +08:00
return joinPath ( folder , prefix + id . toString ( ) + " .sqlite " ) ;
2020-02-13 08:55:33 +08:00
else if ( storeType = = KeyValueStoreType : : MEMORY | | storeType = = KeyValueStoreType : : MEMORY_RADIXTREE )
2017-05-26 04:48:44 +08:00
return joinPath ( folder , prefix + id . toString ( ) + " - " ) ;
2017-09-22 14:51:55 +08:00
else if ( storeType = = KeyValueStoreType : : SSD_REDWOOD_V1 )
return joinPath ( folder , prefix + id . toString ( ) + " .redwood " ) ;
2020-06-16 00:45:36 +08:00
else if ( storeType = = KeyValueStoreType : : SSD_ROCKSDB_V1 )
return joinPath ( folder , prefix + id . toString ( ) + " .rocksdb " ) ;
2017-05-26 04:48:44 +08:00
UNREACHABLE ( ) ;
}
2019-04-03 20:27:11 +08:00
2019-02-23 04:15:23 +08:00
struct TLogOptions {
TLogOptions ( ) = default ;
TLogOptions ( TLogVersion v , TLogSpillType s ) : version ( v ) , spillType ( s ) { }
TLogVersion version = TLogVersion : : DEFAULT ;
2019-10-03 16:27:36 +08:00
TLogSpillType spillType = TLogSpillType : : UNSET ;
2019-02-23 04:15:23 +08:00
static ErrorOr < TLogOptions > FromStringRef ( StringRef s ) {
TLogOptions options ;
2019-02-27 08:47:04 +08:00
for ( StringRef key = s . eat ( " _ " ) , value = s . eat ( " _ " ) ;
s . size ( ) ! = 0 | | key . size ( ) ;
key = s . eat ( " _ " ) , value = s . eat ( " _ " ) ) {
if ( key . size ( ) ! = 0 & & value . size ( ) = = 0 ) return default_error_or ( ) ;
2019-02-23 04:15:23 +08:00
if ( key = = LiteralStringRef ( " V " ) ) {
ErrorOr < TLogVersion > tLogVersion = TLogVersion : : FromStringRef ( value ) ;
if ( tLogVersion . isError ( ) ) return tLogVersion . getError ( ) ;
options . version = tLogVersion . get ( ) ;
} else if ( key = = LiteralStringRef ( " LS " ) ) {
ErrorOr < TLogSpillType > tLogSpillType = TLogSpillType : : FromStringRef ( value ) ;
if ( tLogSpillType . isError ( ) ) return tLogSpillType . getError ( ) ;
options . spillType = tLogSpillType . get ( ) ;
} else {
return default_error_or ( ) ;
}
}
return options ;
}
bool operator = = ( const TLogOptions & o ) {
2019-10-03 16:27:36 +08:00
return version = = o . version & &
( spillType = = o . spillType | | version > = TLogVersion : : V5 ) ;
2019-02-23 04:15:23 +08:00
}
std : : string toPrefix ( ) const {
2019-10-03 16:27:36 +08:00
std : : string toReturn = " " ;
switch ( version ) {
case TLogVersion : : UNSET :
ASSERT ( false ) ;
case TLogVersion : : V2 :
return " " ;
case TLogVersion : : V3 :
case TLogVersion : : V4 :
toReturn =
" V_ " + boost : : lexical_cast < std : : string > ( version ) +
" _LS_ " + boost : : lexical_cast < std : : string > ( spillType ) ;
break ;
case TLogVersion : : V5 :
toReturn = " V_ " + boost : : lexical_cast < std : : string > ( version ) ;
break ;
}
2019-02-23 04:15:23 +08:00
ASSERT_WE_THINK ( FromStringRef ( toReturn ) . get ( ) = = * this ) ;
return toReturn + " - " ;
}
} ;
TLogFn tLogFnForOptions ( TLogOptions options ) {
2019-10-03 16:25:42 +08:00
switch ( options . version ) {
2019-07-09 13:22:45 +08:00
case TLogVersion : : V2 :
2019-10-03 16:25:42 +08:00
if ( options . spillType = = TLogSpillType : : REFERENCE )
ASSERT ( false ) ;
2019-07-09 13:22:45 +08:00
return oldTLog_6_0 : : tLog ;
case TLogVersion : : V3 :
case TLogVersion : : V4 :
2019-10-03 16:25:42 +08:00
if ( options . spillType = = TLogSpillType : : VALUE )
return oldTLog_6_0 : : tLog ;
else
return oldTLog_6_2 : : tLog ;
2019-10-03 08:00:24 +08:00
case TLogVersion : : V5 :
return tLog ;
2019-07-09 13:22:45 +08:00
default :
ASSERT ( false ) ;
}
2019-10-03 08:00:24 +08:00
return tLog ;
2019-02-23 04:15:23 +08:00
}
2017-05-26 04:48:44 +08:00
struct DiskStore {
2019-02-08 09:02:47 +08:00
enum COMPONENT { TLogData , Storage , UNSET } ;
2017-05-26 04:48:44 +08:00
2019-02-08 09:02:47 +08:00
UID storeID = UID ( ) ;
std : : string filename = " " ; // For KVStoreMemory just the base filename to be passed to IDiskQueue
COMPONENT storedComponent = UNSET ;
KeyValueStoreType storeType = KeyValueStoreType : : END ;
2019-02-23 04:15:23 +08:00
TLogOptions tLogOptions ;
2017-05-26 04:48:44 +08:00
} ;
2020-06-30 00:25:36 +08:00
std : : vector < DiskStore > getDiskStores ( std : : string folder , std : : string suffix , KeyValueStoreType type ,
FilesystemCheck check ) {
2017-05-26 04:48:44 +08:00
std : : vector < DiskStore > result ;
2020-06-30 00:25:36 +08:00
vector < std : : string > files ;
if ( check = = FilesystemCheck : : FILES_ONLY | | check = = FilesystemCheck : : FILES_AND_DIRECTORIES ) {
files = platform : : listFiles ( folder , suffix ) ;
}
if ( check = = FilesystemCheck : : DIRECTORIES_ONLY | | check = = FilesystemCheck : : FILES_AND_DIRECTORIES ) {
for ( const auto & directory : platform : : listDirectories ( folder ) ) {
if ( StringRef ( directory ) . endsWith ( suffix ) ) {
files . push_back ( directory ) ;
}
2020-06-19 01:21:14 +08:00
}
}
2017-05-26 04:48:44 +08:00
for ( int idx = 0 ; idx < files . size ( ) ; idx + + ) {
DiskStore store ;
store . storeType = type ;
2019-02-20 14:02:07 +08:00
StringRef filename = StringRef ( files [ idx ] ) ;
Standalone < StringRef > prefix ;
if ( filename . startsWith ( fileStoragePrefix ) ) {
2017-05-26 04:48:44 +08:00
store . storedComponent = DiskStore : : Storage ;
prefix = fileStoragePrefix ;
}
2019-02-20 14:02:07 +08:00
else if ( filename . startsWith ( fileVersionedLogDataPrefix ) ) {
2019-02-08 09:02:47 +08:00
store . storedComponent = DiskStore : : TLogData ;
2019-02-27 08:47:04 +08:00
// Use the option string that's in the file rather than tLogOptions.toPrefix(),
// because they might be different if a new option was introduced in this version.
2019-02-23 04:15:23 +08:00
StringRef optionsString = filename . removePrefix ( fileVersionedLogDataPrefix ) . eat ( " - " ) ;
2019-03-19 06:03:43 +08:00
TraceEvent ( " DiskStoreVersioned " ) . detail ( " Filename " , filename ) ;
2019-02-23 04:15:23 +08:00
ErrorOr < TLogOptions > tLogOptions = TLogOptions : : FromStringRef ( optionsString ) ;
if ( tLogOptions . isError ( ) ) {
2019-03-19 06:03:43 +08:00
TraceEvent ( SevWarn , " DiskStoreMalformedFilename " ) . detail ( " Filename " , filename ) ;
2019-02-23 04:15:23 +08:00
continue ;
}
2019-03-19 06:03:43 +08:00
TraceEvent ( " DiskStoreVersionedSuccess " ) . detail ( " Filename " , filename ) ;
2019-02-23 04:15:23 +08:00
store . tLogOptions = tLogOptions . get ( ) ;
prefix = filename . substr ( 0 , fileVersionedLogDataPrefix . size ( ) + optionsString . size ( ) + 1 ) ;
2019-02-08 09:02:47 +08:00
}
2019-02-20 14:02:07 +08:00
else if ( filename . startsWith ( fileLogDataPrefix ) ) {
2019-03-19 06:03:43 +08:00
TraceEvent ( " DiskStoreUnversioned " ) . detail ( " Filename " , filename ) ;
2017-05-26 04:48:44 +08:00
store . storedComponent = DiskStore : : TLogData ;
2019-02-23 04:15:23 +08:00
store . tLogOptions . version = TLogVersion : : V2 ;
store . tLogOptions . spillType = TLogSpillType : : VALUE ;
2017-05-26 04:48:44 +08:00
prefix = fileLogDataPrefix ;
}
else
continue ;
store . storeID = UID : : fromString ( files [ idx ] . substr ( prefix . size ( ) , 32 ) ) ;
store . filename = filenameFromSample ( type , folder , files [ idx ] ) ;
result . push_back ( store ) ;
}
return result ;
}
std : : vector < DiskStore > getDiskStores ( std : : string folder ) {
2020-06-30 00:25:36 +08:00
auto result = getDiskStores ( folder , bTreeV1Suffix . suffix , bTreeV1Suffix . type , bTreeV1Suffix . check ) ;
auto result1 = getDiskStores ( folder , bTreeV2Suffix . suffix , bTreeV2Suffix . type , bTreeV2Suffix . check ) ;
2017-05-26 04:48:44 +08:00
result . insert ( result . end ( ) , result1 . begin ( ) , result1 . end ( ) ) ;
2020-06-30 00:25:36 +08:00
auto result2 = getDiskStores ( folder , memorySuffix . suffix , memorySuffix . type , memorySuffix . check ) ;
2017-05-26 04:48:44 +08:00
result . insert ( result . end ( ) , result2 . begin ( ) , result2 . end ( ) ) ;
2020-06-30 00:25:36 +08:00
auto result3 = getDiskStores ( folder , redwoodSuffix . suffix , redwoodSuffix . type , redwoodSuffix . check ) ;
2017-09-22 14:51:55 +08:00
result . insert ( result . end ( ) , result3 . begin ( ) , result3 . end ( ) ) ;
2020-06-30 00:25:36 +08:00
auto result4 = getDiskStores ( folder , memoryRTSuffix . suffix , memoryRTSuffix . type , memoryRTSuffix . check ) ;
2020-06-16 00:45:36 +08:00
result . insert ( result . end ( ) , result4 . begin ( ) , result4 . end ( ) ) ;
2020-06-30 00:25:36 +08:00
auto result5 = getDiskStores ( folder , rocksdbSuffix . suffix , rocksdbSuffix . type , rocksdbSuffix . check ) ;
2020-06-16 00:48:19 +08:00
result . insert ( result . end ( ) , result5 . begin ( ) , result5 . end ( ) ) ;
2017-05-26 04:48:44 +08:00
return result ;
}
2020-03-13 01:18:31 +08:00
// Register the worker interf to cluster controller (cc) and
// re-register the worker when key roles interface, e.g., cc, dd, ratekeeper, change.
2019-01-29 03:29:39 +08:00
ACTOR Future < Void > registrationClient (
Reference < AsyncVar < Optional < ClusterControllerFullInterface > > > ccInterface ,
WorkerInterface interf ,
Reference < AsyncVar < ClusterControllerPriorityInfo > > asyncPriorityInfo ,
ProcessClass initialClass ,
2019-03-09 00:46:34 +08:00
Reference < AsyncVar < Optional < DataDistributorInterface > > > ddInterf ,
2019-03-13 09:31:25 +08:00
Reference < AsyncVar < Optional < RatekeeperInterface > > > rkInterf ,
2019-11-13 05:01:29 +08:00
Reference < AsyncVar < bool > > degraded ,
PromiseStream < ErrorInfo > errors ,
LocalityData locality ,
2020-04-06 14:09:36 +08:00
Reference < AsyncVar < ServerDBInfo > > dbInfo ,
2020-04-07 12:09:44 +08:00
Reference < ClusterConnectionFile > connFile ,
Reference < AsyncVar < std : : set < std : : string > > > issues ) {
2017-05-26 04:48:44 +08:00
// Keeps the cluster controller (as it may be re-elected) informed that this worker exists
// The cluster controller uses waitFailureClient to find out if we die, and returns from registrationReply (requiring us to re-register)
2019-01-29 03:29:39 +08:00
// The registration request piggybacks optional distributor interface if it exists.
2017-05-26 04:48:44 +08:00
state Generation requestGeneration = 0 ;
2018-02-10 08:48:55 +08:00
state ProcessClass processClass = initialClass ;
2019-11-13 05:01:29 +08:00
state Reference < AsyncVar < Optional < std : : pair < uint16_t , StorageServerInterface > > > > scInterf ( new AsyncVar < Optional < std : : pair < uint16_t , StorageServerInterface > > > ( ) ) ;
state Future < Void > cacheProcessFuture ;
state Future < Void > cacheErrorsFuture ;
2020-04-06 14:09:36 +08:00
state Optional < double > incorrectTime ;
2017-05-26 04:48:44 +08:00
loop {
2019-12-07 05:28:44 +08:00
RegisterWorkerRequest request ( interf , initialClass , processClass , asyncPriorityInfo - > get ( ) , requestGeneration + + , ddInterf - > get ( ) , rkInterf - > get ( ) , degraded - > get ( ) ) ;
2020-04-07 12:09:44 +08:00
for ( auto const & i : issues - > get ( ) ) {
request . issues . push_back_deep ( request . issues . arena ( ) , i ) ;
}
2020-04-06 14:09:36 +08:00
ClusterConnectionString fileConnectionString ;
if ( connFile & & ! connFile - > fileContentsUpToDate ( fileConnectionString ) ) {
request . issues . push_back_deep ( request . issues . arena ( ) , LiteralStringRef ( " incorrect_cluster_file_contents " ) ) ;
std : : string connectionString = connFile - > getConnectionString ( ) . toString ( ) ;
if ( ! incorrectTime . present ( ) ) {
incorrectTime = now ( ) ;
}
if ( connFile - > canGetFilename ( ) ) {
// Don't log a SevWarnAlways initially to account for transient issues (e.g. someone else changing the file right before us)
TraceEvent ( now ( ) - incorrectTime . get ( ) > 300 ? SevWarnAlways : SevWarn , " IncorrectClusterFileContents " )
. detail ( " Filename " , connFile - > getFilename ( ) )
. detail ( " ConnectionStringFromFile " , fileConnectionString . toString ( ) )
. detail ( " CurrentConnectionString " , connectionString ) ;
}
}
else {
incorrectTime = Optional < double > ( ) ;
}
auto peers = FlowTransport : : transport ( ) . getIncompatiblePeers ( ) ;
for ( auto it = peers - > begin ( ) ; it ! = peers - > end ( ) ; ) {
2020-04-11 08:02:11 +08:00
if ( now ( ) - it - > second . second > FLOW_KNOBS - > INCOMPATIBLE_PEER_DELAY_BEFORE_LOGGING ) {
2020-04-07 11:58:43 +08:00
request . incompatiblePeers . push_back ( it - > first ) ;
2020-04-06 14:09:36 +08:00
it = peers - > erase ( it ) ;
} else {
it + + ;
}
}
2019-01-29 03:29:39 +08:00
Future < RegisterWorkerReply > registrationReply = ccInterface - > get ( ) . present ( ) ? brokenPromiseToNever ( ccInterface - > get ( ) . get ( ) . registerWorker . getReply ( request ) ) : Never ( ) ;
2017-09-26 01:36:03 +08:00
choose {
2017-11-15 05:57:37 +08:00
when ( RegisterWorkerReply reply = wait ( registrationReply ) ) {
2020-03-13 01:18:31 +08:00
processClass = reply . processClass ;
2018-02-10 08:48:55 +08:00
asyncPriorityInfo - > set ( reply . priorityInfo ) ;
2017-09-26 01:36:03 +08:00
}
2019-03-09 00:46:34 +08:00
when ( wait ( ccInterface - > onChange ( ) ) ) { }
2019-01-30 02:14:11 +08:00
when ( wait ( ddInterf - > onChange ( ) ) ) { }
2019-02-15 08:24:46 +08:00
when ( wait ( rkInterf - > onChange ( ) ) ) { }
2019-03-09 00:46:34 +08:00
when ( wait ( degraded - > onChange ( ) ) ) { }
2020-04-06 14:09:36 +08:00
when ( wait ( FlowTransport : : transport ( ) . onIncompatibleChanged ( ) ) ) { }
2020-04-07 12:09:44 +08:00
when ( wait ( issues - > onChange ( ) ) ) { }
2017-09-26 01:36:03 +08:00
}
2017-05-26 04:48:44 +08:00
}
}
2020-02-02 02:00:06 +08:00
# if (defined(__linux__) || defined(__FreeBSD__)) && defined(USE_GPERFTOOLS)
2017-05-26 04:48:44 +08:00
//A set of threads that should be profiled
std : : set < std : : thread : : id > profiledThreads ;
//Returns whether or not a given thread should be profiled
int filter_in_thread ( void * arg ) {
return profiledThreads . count ( std : : this_thread : : get_id ( ) ) > 0 ? 1 : 0 ;
}
# endif
//Enables the calling thread to be profiled
void registerThreadForProfiling ( ) {
2020-02-02 02:00:06 +08:00
# if (defined(__linux__) || defined(__FreeBSD__)) && defined(USE_GPERFTOOLS)
2017-05-26 04:48:44 +08:00
//Not sure if this is actually needed, but a call to backtrace was advised here:
//http://groups.google.com/group/google-perftools/browse_thread/thread/0dfd74532e038eb8/2686d9f24ac4365f?pli=1
profiledThreads . insert ( std : : this_thread : : get_id ( ) ) ;
const int num_levels = 100 ;
void * pc [ num_levels ] ;
backtrace ( pc , num_levels ) ;
# endif
}
//Starts or stops the CPU profiler
void updateCpuProfiler ( ProfilerRequest req ) {
2017-10-12 05:13:16 +08:00
switch ( req . type ) {
case ProfilerRequest : : Type : : GPROF :
2020-02-02 02:00:06 +08:00
# if (defined(__linux__) || defined(__FreeBSD__)) && defined(USE_GPERFTOOLS) && !defined(VALGRIND)
2017-10-12 05:13:16 +08:00
switch ( req . action ) {
case ProfilerRequest : : Action : : ENABLE : {
2017-10-17 07:04:09 +08:00
const char * path = ( const char * ) req . outputFile . begin ( ) ;
2017-10-12 05:13:16 +08:00
ProfilerOptions * options = new ProfilerOptions ( ) ;
options - > filter_in_thread = & filter_in_thread ;
2020-08-19 05:18:50 +08:00
options - > filter_in_thread_arg = nullptr ;
2017-10-12 05:13:16 +08:00
ProfilerStartWithOptions ( path , options ) ;
break ;
}
case ProfilerRequest : : Action : : DISABLE :
ProfilerStop ( ) ;
break ;
case ProfilerRequest : : Action : : RUN :
ASSERT ( false ) ; // User should have called runProfiler.
break ;
}
2017-05-26 04:48:44 +08:00
# endif
2017-10-12 05:13:16 +08:00
break ;
case ProfilerRequest : : Type : : FLOW :
switch ( req . action ) {
case ProfilerRequest : : Action : : ENABLE :
startProfiling ( g_network , { } , req . outputFile ) ;
break ;
case ProfilerRequest : : Action : : DISABLE :
stopProfiling ( ) ;
break ;
case ProfilerRequest : : Action : : RUN :
ASSERT ( false ) ; // User should have called runProfiler.
break ;
}
break ;
2019-04-05 04:32:10 +08:00
default :
ASSERT ( false ) ;
2019-04-04 06:57:16 +08:00
break ;
2017-05-26 04:48:44 +08:00
}
2017-10-12 05:13:16 +08:00
}
2019-04-04 11:54:30 +08:00
ACTOR Future < Void > runCpuProfiler ( ProfilerRequest req ) {
2017-11-08 05:54:17 +08:00
if ( req . action = = ProfilerRequest : : Action : : RUN ) {
req . action = ProfilerRequest : : Action : : ENABLE ;
updateCpuProfiler ( req ) ;
2019-04-04 11:54:30 +08:00
wait ( delay ( req . duration ) ) ;
req . action = ProfilerRequest : : Action : : DISABLE ;
updateCpuProfiler ( req ) ;
2017-11-08 05:54:17 +08:00
return Void ( ) ;
} else {
updateCpuProfiler ( req ) ;
return Void ( ) ;
2017-05-26 04:48:44 +08:00
}
}
2019-04-06 05:45:58 +08:00
void runHeapProfiler ( const char * msg ) {
2019-04-04 11:54:30 +08:00
# if defined(__linux__) && defined(USE_GPERFTOOLS) && !defined(VALGRIND)
2019-04-05 06:20:30 +08:00
if ( IsHeapProfilerRunning ( ) ) {
2019-04-06 05:45:58 +08:00
HeapProfilerDump ( msg ) ;
2019-04-05 06:20:30 +08:00
} else {
2019-04-05 06:29:50 +08:00
TraceEvent ( " ProfilerError " ) . detail ( " Message " , " HeapProfiler not running " ) ;
2019-04-05 06:20:30 +08:00
}
# else
TraceEvent ( " ProfilerError " ) . detail ( " Message " , " HeapProfiler Unsupported " ) ;
2019-04-04 11:54:30 +08:00
# endif
}
ACTOR Future < Void > runProfiler ( ProfilerRequest req ) {
if ( req . type = = ProfilerRequest : : Type : : GPROF_HEAP ) {
2019-04-06 05:45:58 +08:00
runHeapProfiler ( " User triggered heap dump " ) ;
2019-04-05 06:20:30 +08:00
} else {
wait ( runCpuProfiler ( req ) ) ;
2019-04-04 11:54:30 +08:00
}
2019-04-05 06:20:30 +08:00
2019-04-04 11:54:30 +08:00
return Void ( ) ;
}
2019-04-06 07:06:30 +08:00
bool checkHighMemory ( int64_t threshold , bool * error ) {
2019-04-06 05:45:58 +08:00
# if defined(__linux__) && defined(USE_GPERFTOOLS) && !defined(VALGRIND)
* error = false ;
uint64_t page_size = sysconf ( _SC_PAGESIZE ) ;
2019-05-15 14:30:58 +08:00
int fd = open ( " /proc/self/statm " , O_RDONLY | O_CLOEXEC ) ;
2019-04-06 05:45:58 +08:00
if ( fd < 0 ) {
TraceEvent ( " OpenStatmFileFailure " ) ;
* error = true ;
return false ;
}
const int buf_sz = 256 ;
char stat_buf [ buf_sz ] ;
ssize_t stat_nread = read ( fd , stat_buf , buf_sz ) ;
if ( stat_nread < 0 ) {
TraceEvent ( " ReadStatmFileFailure " ) ;
* error = true ;
return false ;
}
uint64_t vmsize , rss ;
sscanf ( stat_buf , " %lu %lu " , & vmsize , & rss ) ;
rss * = page_size ;
2019-04-06 07:06:30 +08:00
if ( rss > = threshold ) {
2019-04-06 05:45:58 +08:00
return true ;
}
# else
TraceEvent ( " CheckHighMemoryUnsupported " ) ;
* error = true ;
# endif
2019-04-06 07:06:30 +08:00
return false ;
2019-04-06 05:45:58 +08:00
}
// Runs heap profiler when RSS memory usage is high.
2019-04-06 07:06:30 +08:00
ACTOR Future < Void > monitorHighMemory ( int64_t threshold ) {
if ( threshold < = 0 ) return Void ( ) ;
2019-04-06 05:45:58 +08:00
loop {
bool err = false ;
2019-04-06 07:06:30 +08:00
bool highmem = checkHighMemory ( threshold , & err ) ;
2019-04-06 05:45:58 +08:00
if ( err ) break ;
if ( highmem ) runHeapProfiler ( " Highmem heap dump " ) ;
wait ( delay ( SERVER_KNOBS - > HEAP_PROFILER_INTERVAL ) ) ;
}
return Void ( ) ;
}
2018-10-09 08:26:10 +08:00
ACTOR Future < Void > storageServerRollbackRebooter ( Future < Void > prevStorageServer , KeyValueStoreType storeType , std : : string filename , UID id , LocalityData locality , Reference < AsyncVar < ServerDBInfo > > db , std : : string folder , ActorCollection * filesClosed , int64_t memoryLimit , IKeyValueStore * store ) {
2017-05-26 04:48:44 +08:00
loop {
ErrorOr < Void > e = wait ( errorOr ( prevStorageServer ) ) ;
if ( ! e . isError ( ) ) return Void ( ) ;
else if ( e . getError ( ) . code ( ) ! = error_code_please_reboot ) throw e . getError ( ) ;
2018-03-09 03:14:24 +08:00
TraceEvent ( " StorageServerRequestedReboot " , id ) ;
2017-05-26 04:48:44 +08:00
2018-11-28 01:50:39 +08:00
StorageServerInterface recruited ;
recruited . uniqueID = id ;
recruited . locality = locality ;
recruited . initEndpoints ( ) ;
DUMPTOKEN ( recruited . getValue ) ;
DUMPTOKEN ( recruited . getKey ) ;
DUMPTOKEN ( recruited . getKeyValues ) ;
DUMPTOKEN ( recruited . getShardState ) ;
DUMPTOKEN ( recruited . waitMetrics ) ;
DUMPTOKEN ( recruited . splitMetrics ) ;
2020-01-13 07:30:36 +08:00
DUMPTOKEN ( recruited . getReadHotRanges ) ;
2020-07-01 05:28:15 +08:00
DUMPTOKEN ( recruited . getRangeSplitPoints ) ;
2019-07-26 07:27:32 +08:00
DUMPTOKEN ( recruited . getStorageMetrics ) ;
2018-11-28 01:50:39 +08:00
DUMPTOKEN ( recruited . waitFailure ) ;
DUMPTOKEN ( recruited . getQueuingMetrics ) ;
DUMPTOKEN ( recruited . getKeyValueStoreType ) ;
DUMPTOKEN ( recruited . watchValue ) ;
2018-11-29 08:01:40 +08:00
prevStorageServer = storageServer ( store , recruited , db , folder , Promise < Void > ( ) , Reference < ClusterConnectionFile > ( nullptr ) ) ;
2018-10-20 09:55:35 +08:00
prevStorageServer = handleIOErrors ( prevStorageServer , store , id , store - > onClosed ( ) ) ;
2017-05-26 04:48:44 +08:00
}
}
2020-03-03 09:11:23 +08:00
ACTOR Future < Void > storageCacheRollbackRebooter ( Future < Void > prevStorageCache , UID id , LocalityData locality , Reference < AsyncVar < ServerDBInfo > > db ) {
loop {
ErrorOr < Void > e = wait ( errorOr ( prevStorageCache ) ) ;
if ( ! e . isError ( ) ) {
TraceEvent ( " StorageCacheRequestedReboot1 " , id ) ;
return Void ( ) ;
}
else if ( e . getError ( ) . code ( ) ! = error_code_please_reboot & & e . getError ( ) . code ( ) ! = error_code_worker_removed ) {
TraceEvent ( " StorageCacheRequestedReboot2 " , id ) . detail ( " Code " , e . getError ( ) . code ( ) ) ;
throw e . getError ( ) ;
}
TraceEvent ( " StorageCacheRequestedReboot " , id ) ;
StorageServerInterface recruited ;
recruited . uniqueID = deterministicRandom ( ) - > randomUniqueID ( ) ; // id;
recruited . locality = locality ;
recruited . initEndpoints ( ) ;
DUMPTOKEN ( recruited . getValue ) ;
DUMPTOKEN ( recruited . getKey ) ;
DUMPTOKEN ( recruited . getKeyValues ) ;
DUMPTOKEN ( recruited . getShardState ) ;
DUMPTOKEN ( recruited . waitMetrics ) ;
DUMPTOKEN ( recruited . splitMetrics ) ;
DUMPTOKEN ( recruited . getStorageMetrics ) ;
DUMPTOKEN ( recruited . waitFailure ) ;
DUMPTOKEN ( recruited . getQueuingMetrics ) ;
DUMPTOKEN ( recruited . getKeyValueStoreType ) ;
DUMPTOKEN ( recruited . watchValue ) ;
prevStorageCache = storageCacheServer ( recruited , 0 , db ) ;
}
}
2017-05-26 04:48:44 +08:00
// FIXME: This will not work correctly in simulation as all workers would share the same roles map
std : : set < std : : pair < std : : string , std : : string > > g_roles ;
Standalone < StringRef > roleString ( std : : set < std : : pair < std : : string , std : : string > > roles , bool with_ids ) {
std : : string result ;
for ( auto & r : roles ) {
if ( ! result . empty ( ) )
result . append ( " , " ) ;
result . append ( r . first ) ;
if ( with_ids ) {
result . append ( " : " ) ;
result . append ( r . second ) ;
}
}
return StringRef ( result ) ;
}
2020-02-15 04:33:43 +08:00
void startRole ( const Role & role , UID roleId , UID workerId , const std : : map < std : : string , std : : string > & details , const std : : string & origination ) {
2018-09-06 06:53:12 +08:00
if ( role . includeInTraceRoles ) {
2018-09-06 06:06:14 +08:00
addTraceRole ( role . abbreviation ) ;
}
2017-05-26 04:48:44 +08:00
TraceEvent ev ( " Role " , roleId ) ;
2018-09-06 06:06:14 +08:00
ev . detail ( " As " , role . roleName )
2017-05-26 04:48:44 +08:00
. detail ( " Transition " , " Begin " )
. detail ( " Origination " , origination )
. detail ( " OnWorker " , workerId ) ;
for ( auto it = details . begin ( ) ; it ! = details . end ( ) ; it + + )
ev . detail ( it - > first . c_str ( ) , it - > second ) ;
2020-03-06 10:17:06 +08:00
ev . trackLatest ( roleId . shortString ( ) + " .Role " ) ;
2017-05-26 04:48:44 +08:00
// Update roles map, log Roles metrics
2018-09-06 06:06:14 +08:00
g_roles . insert ( { role . roleName , roleId . shortString ( ) } ) ;
2017-05-26 04:48:44 +08:00
StringMetricHandle ( LiteralStringRef ( " Roles " ) ) = roleString ( g_roles , false ) ;
StringMetricHandle ( LiteralStringRef ( " RolesWithIDs " ) ) = roleString ( g_roles , true ) ;
2018-09-06 06:06:14 +08:00
if ( g_network - > isSimulated ( ) ) g_simulator . addRole ( g_network - > getLocalAddress ( ) , role . roleName ) ;
2017-05-26 04:48:44 +08:00
}
2018-09-06 06:06:14 +08:00
void endRole ( const Role & role , UID id , std : : string reason , bool ok , Error e ) {
2017-05-26 04:48:44 +08:00
{
TraceEvent ev ( " Role " , id ) ;
if ( e . code ( ) ! = invalid_error_code )
ev . error ( e , true ) ;
ev . detail ( " Transition " , " End " )
2018-09-06 06:06:14 +08:00
. detail ( " As " , role . roleName )
2017-05-26 04:48:44 +08:00
. detail ( " Reason " , reason ) ;
2020-03-06 10:17:06 +08:00
ev . trackLatest ( id . shortString ( ) + " .Role " ) ;
2017-05-26 04:48:44 +08:00
}
if ( ! ok ) {
2018-09-06 06:06:14 +08:00
std : : string type = role . roleName + " Failed " ;
2017-05-26 04:48:44 +08:00
TraceEvent err ( SevError , type . c_str ( ) , id ) ;
if ( e . code ( ) ! = invalid_error_code ) {
err . error ( e , true ) ;
}
2018-08-02 05:30:57 +08:00
err . detail ( " Reason " , reason ) ;
2017-05-26 04:48:44 +08:00
}
latestEventCache . clear ( id . shortString ( ) ) ;
// Update roles map, log Roles metrics
2018-09-06 06:06:14 +08:00
g_roles . erase ( { role . roleName , id . shortString ( ) } ) ;
2017-05-26 04:48:44 +08:00
StringMetricHandle ( LiteralStringRef ( " Roles " ) ) = roleString ( g_roles , false ) ;
StringMetricHandle ( LiteralStringRef ( " RolesWithIDs " ) ) = roleString ( g_roles , true ) ;
2018-09-06 06:06:14 +08:00
if ( g_network - > isSimulated ( ) ) g_simulator . removeRole ( g_network - > getLocalAddress ( ) , role . roleName ) ;
2018-09-06 06:53:12 +08:00
if ( role . includeInTraceRoles ) {
2018-09-06 06:06:14 +08:00
removeTraceRole ( role . abbreviation ) ;
}
2017-05-26 04:48:44 +08:00
}
2020-05-09 07:27:57 +08:00
ACTOR Future < Void >
2020-05-15 04:48:19 +08:00
traceRole ( Role role , UID roleId )
2020-05-09 07:27:57 +08:00
{
loop {
2020-05-09 08:14:42 +08:00
wait ( delay ( SERVER_KNOBS - > WORKER_LOGGING_INTERVAL ) ) ;
2020-05-09 07:27:57 +08:00
TraceEvent ( " Role " , roleId )
. detail ( " Transition " , " Refresh " )
2020-05-15 04:48:19 +08:00
. detail ( " As " , role . roleName ) ;
2020-05-09 07:27:57 +08:00
}
}
2019-06-20 02:20:44 +08:00
ACTOR Future < Void > workerSnapCreate ( WorkerSnapRequest snapReq , StringRef snapFolder ) {
state ExecCmdValueString snapArg ( snapReq . snapPayload ) ;
try {
2019-08-29 01:52:56 +08:00
int err = wait ( execHelper ( & snapArg , snapReq . snapUID , snapFolder . toString ( ) , snapReq . role . toString ( ) ) ) ;
2019-06-20 02:20:44 +08:00
std : : string uidStr = snapReq . snapUID . toString ( ) ;
TraceEvent ( " ExecTraceWorker " )
. detail ( " Uid " , uidStr )
. detail ( " Status " , err )
. detail ( " Role " , snapReq . role )
. detail ( " Value " , snapFolder )
. detail ( " ExecPayload " , snapReq . snapPayload ) ;
2019-07-13 07:26:28 +08:00
if ( err ! = 0 ) {
throw operation_failed ( ) ;
}
2019-06-20 02:20:44 +08:00
if ( snapReq . role . toString ( ) = = " storage " ) {
printStorageVersionInfo ( ) ;
}
snapReq . reply . send ( Void ( ) ) ;
} catch ( Error & e ) {
2019-07-04 08:15:23 +08:00
TraceEvent ( " ExecHelperError " ) . error ( e , true /*includeCancelled*/ ) ;
2019-07-21 16:00:29 +08:00
if ( e . code ( ) ! = error_code_operation_cancelled ) {
2019-07-04 08:15:23 +08:00
snapReq . reply . sendError ( e ) ;
2019-07-21 16:00:29 +08:00
} else {
throw e ;
2019-07-04 08:15:23 +08:00
}
2019-06-20 02:20:44 +08:00
}
return Void ( ) ;
}
2020-04-07 12:09:44 +08:00
// TODO: `issues` is right now only updated by `monitorTraceLogIssues` and thus is being `set` on every update.
// It could be changed to `insert` and `trigger` later if we want to use it as a generic way for the caller of this
// function to report issues to cluster controller.
ACTOR Future < Void > monitorTraceLogIssues ( Reference < AsyncVar < std : : set < std : : string > > > issues ) {
2020-03-13 05:34:19 +08:00
state bool pingTimeout = false ;
loop {
wait ( delay ( SERVER_KNOBS - > TRACE_LOG_FLUSH_FAILURE_CHECK_INTERVAL_SECONDS ) ) ;
2020-03-17 04:36:55 +08:00
Future < Void > pingAck = pingTraceLogWriterThread ( ) ;
2020-03-13 05:34:19 +08:00
try {
2020-03-17 04:36:55 +08:00
wait ( timeoutError ( pingAck , SERVER_KNOBS - > TRACE_LOG_PING_TIMEOUT_SECONDS ) ) ;
2020-03-13 05:34:19 +08:00
} catch ( Error & e ) {
if ( e . code ( ) = = error_code_timed_out ) {
pingTimeout = true ;
} else {
throw ;
}
}
2020-04-07 12:09:44 +08:00
std : : set < std : : string > _issues ;
retriveTraceLogIssues ( _issues ) ;
if ( pingTimeout ) {
// Ping trace log writer thread timeout.
_issues . insert ( " trace_log_writer_thread_unresponsive " ) ;
pingTimeout = false ;
2017-05-26 04:48:44 +08:00
}
2020-04-07 12:09:44 +08:00
issues - > set ( _issues ) ;
2017-05-26 04:48:44 +08:00
}
}
2019-10-03 16:27:36 +08:00
class SharedLogsKey {
TLogVersion logVersion ;
TLogSpillType spillType ;
KeyValueStoreType storeType ;
public :
2019-10-05 04:35:52 +08:00
SharedLogsKey ( const TLogOptions & options , KeyValueStoreType kvst )
: logVersion ( options . version ) , spillType ( options . spillType ) , storeType ( kvst ) {
2019-10-03 16:27:36 +08:00
if ( logVersion > = TLogVersion : : V5 )
spillType = TLogSpillType : : UNSET ;
}
bool operator < ( const SharedLogsKey & other ) const {
return std : : tie ( logVersion , spillType , storeType ) <
std : : tie ( other . logVersion , other . spillType , other . storeType ) ;
}
} ;
2019-07-30 14:40:28 +08:00
struct SharedLogsValue {
Future < Void > actor = Void ( ) ;
UID uid = UID ( ) ;
PromiseStream < InitializeTLogRequest > requests ;
SharedLogsValue ( ) = default ;
SharedLogsValue ( Future < Void > actor , UID uid , PromiseStream < InitializeTLogRequest > requests )
: actor ( actor ) , uid ( uid ) , requests ( requests ) {
}
} ;
2019-04-06 07:06:30 +08:00
ACTOR Future < Void > workerServer (
Reference < ClusterConnectionFile > connFile ,
Reference < AsyncVar < Optional < ClusterControllerFullInterface > > > ccInterface ,
LocalityData locality ,
Reference < AsyncVar < ClusterControllerPriorityInfo > > asyncPriorityInfo ,
ProcessClass initialClass , std : : string folder , int64_t memoryLimit ,
std : : string metricsConnFile , std : : string metricsPrefix ,
2019-03-29 19:49:44 +08:00
Promise < Void > recoveredDiskFiles , int64_t memoryProfileThreshold ,
2020-05-11 05:20:50 +08:00
std : : string _coordFolder , std : : string whitelistBinPaths ,
Reference < AsyncVar < ServerDBInfo > > dbInfo ) {
2017-05-26 04:48:44 +08:00
state PromiseStream < ErrorInfo > errors ;
2019-01-29 03:29:39 +08:00
state Reference < AsyncVar < Optional < DataDistributorInterface > > > ddInterf ( new AsyncVar < Optional < DataDistributorInterface > > ( ) ) ;
2019-02-15 08:24:46 +08:00
state Reference < AsyncVar < Optional < RatekeeperInterface > > > rkInterf ( new AsyncVar < Optional < RatekeeperInterface > > ( ) ) ;
2017-05-27 08:43:28 +08:00
state Future < Void > handleErrors = workerHandleErrors ( errors . getFuture ( ) ) ; // Needs to be stopped last
2017-05-26 04:48:44 +08:00
state ActorCollection errorForwarders ( false ) ;
state Future < Void > loggingTrigger = Void ( ) ;
state double loggingDelay = SERVER_KNOBS - > WORKER_LOGGING_INTERVAL ;
state ActorCollection filesClosed ( true ) ;
state Promise < Void > stopping ;
2018-05-06 09:16:28 +08:00
state WorkerCache < InitializeStorageReply > storageCache ;
2017-05-26 04:48:44 +08:00
state Future < Void > metricsLogger ;
2019-04-05 05:11:12 +08:00
state Reference < AsyncVar < bool > > degraded = FlowTransport : : transport ( ) . getDegraded ( ) ;
2019-02-23 04:15:23 +08:00
// tLogFnForOptions() can return a function that doesn't correspond with the FDB version that the
// TLogVersion represents. This can be done if the newer TLog doesn't support a requested option.
// As (store type, spill type) can map to the same TLogFn across multiple TLogVersions, we need to
// decide if we should collapse them into the same SharedTLog instance as well. The answer
// here is no, so that when running with log_version==3, all files should say V=3.
2019-07-30 14:40:28 +08:00
state std : : map < SharedLogsKey , SharedLogsValue > sharedLogs ;
2019-10-08 09:06:49 +08:00
state Reference < AsyncVar < UID > > activeSharedTLog ( new AsyncVar < UID > ( ) ) ;
2020-03-24 01:22:24 +08:00
state WorkerCache < InitializeBackupReply > backupWorkerCache ;
2019-10-08 09:06:49 +08:00
2019-04-23 21:55:55 +08:00
state std : : string coordFolder = abspath ( _coordFolder ) ;
2017-05-26 04:48:44 +08:00
state WorkerInterface interf ( locality ) ;
2019-11-12 07:17:11 +08:00
interf . initEndpoints ( ) ;
2017-05-26 04:48:44 +08:00
2020-03-13 05:34:19 +08:00
state Reference < AsyncVar < std : : set < std : : string > > > issues ( new AsyncVar < std : : set < std : : string > > ( ) ) ;
2019-03-16 14:54:33 +08:00
folder = abspath ( folder ) ;
2017-05-26 04:48:44 +08:00
if ( metricsPrefix . size ( ) > 0 ) {
if ( metricsConnFile . size ( ) > 0 ) {
try {
2019-07-09 05:01:04 +08:00
state Database db = Database : : createDatabase ( metricsConnFile , Database : : API_VERSION_LATEST , true , locality ) ;
2018-09-22 06:58:14 +08:00
metricsLogger = runMetrics ( db , KeyRef ( metricsPrefix ) ) ;
2017-05-26 04:48:44 +08:00
} catch ( Error & e ) {
TraceEvent ( SevWarnAlways , " TDMetricsBadClusterFile " ) . error ( e ) . detail ( " ConnFile " , metricsConnFile ) ;
}
} else {
bool lockAware = metricsPrefix . size ( ) & & metricsPrefix [ 0 ] = = ' \xff ' ;
2019-06-25 17:47:35 +08:00
metricsLogger = runMetrics ( openDBOnServer ( dbInfo , TaskPriority : : DefaultEndpoint , true , lockAware ) , KeyRef ( metricsPrefix ) ) ;
2017-05-26 04:48:44 +08:00
}
}
2019-04-08 14:00:58 +08:00
errorForwarders . add ( resetAfter ( degraded , SERVER_KNOBS - > DEGRADED_RESET_INTERVAL , false , SERVER_KNOBS - > DEGRADED_WARNING_LIMIT , SERVER_KNOBS - > DEGRADED_WARNING_RESET_DELAY , " DegradedReset " ) ) ;
2017-05-26 04:48:44 +08:00
errorForwarders . add ( loadedPonger ( interf . debugPing . getFuture ( ) ) ) ;
errorForwarders . add ( waitFailureServer ( interf . waitFailure . getFuture ( ) ) ) ;
2020-04-07 12:09:44 +08:00
errorForwarders . add ( monitorTraceLogIssues ( issues ) ) ;
2017-09-02 03:53:01 +08:00
errorForwarders . add ( testerServerCore ( interf . testerInterface , connFile , dbInfo , locality ) ) ;
2019-04-06 07:06:30 +08:00
errorForwarders . add ( monitorHighMemory ( memoryProfileThreshold ) ) ;
2017-05-26 04:48:44 +08:00
filesClosed . add ( stopping . getFuture ( ) ) ;
initializeSystemMonitorMachineState ( SystemMonitorMachineState ( folder , locality . zoneId ( ) , locality . machineId ( ) , g_network - > getLocalAddress ( ) . ip ) ) ;
{
auto recruited = interf ; //ghetto! don't we all love a good #define
DUMPTOKEN ( recruited . clientInterface . reboot ) ;
2017-10-12 05:13:16 +08:00
DUMPTOKEN ( recruited . clientInterface . profiler ) ;
2017-05-26 04:48:44 +08:00
DUMPTOKEN ( recruited . tLog ) ;
DUMPTOKEN ( recruited . master ) ;
2020-09-11 08:44:15 +08:00
DUMPTOKEN ( recruited . commitProxy ) ;
2020-07-15 15:37:41 +08:00
DUMPTOKEN ( recruited . grvProxy ) ;
2017-05-26 04:48:44 +08:00
DUMPTOKEN ( recruited . resolver ) ;
DUMPTOKEN ( recruited . storage ) ;
DUMPTOKEN ( recruited . debugPing ) ;
DUMPTOKEN ( recruited . coordinationPing ) ;
DUMPTOKEN ( recruited . waitFailure ) ;
DUMPTOKEN ( recruited . setMetricsRate ) ;
DUMPTOKEN ( recruited . eventLogRequest ) ;
DUMPTOKEN ( recruited . traceBatchDumpRequest ) ;
2020-04-11 04:45:16 +08:00
DUMPTOKEN ( recruited . updateServerDBInfo ) ;
2017-05-26 04:48:44 +08:00
}
2019-08-23 02:02:14 +08:00
state std : : vector < Future < Void > > recoveries ;
2017-05-26 04:48:44 +08:00
try {
std : : vector < DiskStore > stores = getDiskStores ( folder ) ;
bool validateDataFiles = deleteFile ( joinPath ( folder , validationFilename ) ) ;
for ( int f = 0 ; f < stores . size ( ) ; f + + ) {
DiskStore s = stores [ f ] ;
// FIXME: Error handling
if ( s . storedComponent = = DiskStore : : Storage ) {
2017-05-27 08:43:28 +08:00
IKeyValueStore * kv = openKVStore ( s . storeType , s . filename , s . storeID , memoryLimit , false , validateDataFiles ) ;
2017-05-26 04:48:44 +08:00
Future < Void > kvClosed = kv - > onClosed ( ) ;
filesClosed . add ( kvClosed ) ;
StorageServerInterface recruited ;
recruited . uniqueID = s . storeID ;
recruited . locality = locality ;
recruited . initEndpoints ( ) ;
std : : map < std : : string , std : : string > details ;
details [ " StorageEngine " ] = s . storeType . toString ( ) ;
2018-09-06 06:06:14 +08:00
startRole ( Role : : STORAGE_SERVER , recruited . id ( ) , interf . id ( ) , details , " Restored " ) ;
2017-05-26 04:48:44 +08:00
DUMPTOKEN ( recruited . getValue ) ;
DUMPTOKEN ( recruited . getKey ) ;
DUMPTOKEN ( recruited . getKeyValues ) ;
DUMPTOKEN ( recruited . getShardState ) ;
DUMPTOKEN ( recruited . waitMetrics ) ;
DUMPTOKEN ( recruited . splitMetrics ) ;
2020-01-13 07:30:36 +08:00
DUMPTOKEN ( recruited . getReadHotRanges ) ;
2020-07-01 05:28:15 +08:00
DUMPTOKEN ( recruited . getRangeSplitPoints ) ;
2019-07-26 07:27:32 +08:00
DUMPTOKEN ( recruited . getStorageMetrics ) ;
2017-05-26 04:48:44 +08:00
DUMPTOKEN ( recruited . waitFailure ) ;
DUMPTOKEN ( recruited . getQueuingMetrics ) ;
DUMPTOKEN ( recruited . getKeyValueStoreType ) ;
DUMPTOKEN ( recruited . watchValue ) ;
2017-09-16 01:57:58 +08:00
Promise < Void > recovery ;
2018-11-29 08:01:40 +08:00
Future < Void > f = storageServer ( kv , recruited , dbInfo , folder , recovery , connFile ) ;
2017-09-16 01:57:58 +08:00
recoveries . push_back ( recovery . getFuture ( ) ) ;
2018-10-21 11:48:46 +08:00
f = handleIOErrors ( f , kv , s . storeID , kvClosed ) ;
2018-10-09 08:26:10 +08:00
f = storageServerRollbackRebooter ( f , s . storeType , s . filename , recruited . id ( ) , recruited . locality , dbInfo , folder , & filesClosed , memoryLimit , kv ) ;
2018-09-06 06:06:14 +08:00
errorForwarders . add ( forwardError ( errors , Role : : STORAGE_SERVER , recruited . id ( ) , f ) ) ;
2017-05-26 04:48:44 +08:00
} else if ( s . storedComponent = = DiskStore : : TLogData ) {
2019-02-23 04:15:23 +08:00
std : : string logQueueBasename ;
const std : : string filename = basename ( s . filename ) ;
if ( StringRef ( filename ) . startsWith ( fileLogDataPrefix ) ) {
logQueueBasename = fileLogQueuePrefix . toString ( ) ;
} else {
StringRef optionsString = StringRef ( filename ) . removePrefix ( fileVersionedLogDataPrefix ) . eat ( " - " ) ;
logQueueBasename = fileLogQueuePrefix . toString ( ) + optionsString . toString ( ) + " - " ;
}
2019-03-16 14:54:33 +08:00
ASSERT_WE_THINK ( abspath ( parentDirectory ( s . filename ) ) = = folder ) ;
2017-05-26 04:48:44 +08:00
IKeyValueStore * kv = openKVStore ( s . storeType , s . filename , s . storeID , memoryLimit , validateDataFiles ) ;
2019-03-16 12:01:16 +08:00
const DiskQueueVersion dqv = s . tLogOptions . version > = TLogVersion : : V3 ? DiskQueueVersion : : V1 : DiskQueueVersion : : V0 ;
2019-03-16 12:01:20 +08:00
const int64_t diskQueueWarnSize = s . tLogOptions . spillType = = TLogSpillType : : VALUE ? 10 * SERVER_KNOBS - > TARGET_BYTES_PER_TLOG : - 1 ;
2017-05-26 04:48:44 +08:00
IDiskQueue * queue = openDiskQueue (
2019-03-16 12:01:20 +08:00
joinPath ( folder , logQueueBasename + s . storeID . toString ( ) + " - " ) , tlogQueueExtension . toString ( ) , s . storeID , dqv , diskQueueWarnSize ) ;
2017-05-26 04:48:44 +08:00
filesClosed . add ( kv - > onClosed ( ) ) ;
filesClosed . add ( queue - > onClosed ( ) ) ;
std : : map < std : : string , std : : string > details ;
details [ " StorageEngine " ] = s . storeType . toString ( ) ;
2018-09-06 06:06:14 +08:00
startRole ( Role : : SHARED_TRANSACTION_LOG , s . storeID , interf . id ( ) , details , " Restored " ) ;
2017-05-26 04:48:44 +08:00
Promise < Void > oldLog ;
2017-09-16 01:57:58 +08:00
Promise < Void > recovery ;
2019-02-23 04:15:23 +08:00
TLogFn tLogFn = tLogFnForOptions ( s . tLogOptions ) ;
2019-10-03 16:27:36 +08:00
auto & logData = sharedLogs [ SharedLogsKey ( s . tLogOptions , s . storeType ) ] ;
2019-02-08 09:02:47 +08:00
// FIXME: Shouldn't if logData.first isValid && !isReady, shouldn't we
// be sending a fake InitializeTLogRequest rather than calling tLog() ?
2020-02-13 07:11:38 +08:00
Future < Void > tl = tLogFn ( kv , queue , dbInfo , locality , ! logData . actor . isValid ( ) | | logData . actor . isReady ( ) ? logData . requests : PromiseStream < InitializeTLogRequest > ( ) , s . storeID , interf . id ( ) , true , oldLog , recovery , folder , degraded , activeSharedTLog ) ;
2017-09-16 01:57:58 +08:00
recoveries . push_back ( recovery . getFuture ( ) ) ;
2019-07-30 14:40:28 +08:00
activeSharedTLog - > set ( s . storeID ) ;
2017-10-06 08:09:44 +08:00
2017-05-26 04:48:44 +08:00
tl = handleIOErrors ( tl , kv , s . storeID ) ;
tl = handleIOErrors ( tl , queue , s . storeID ) ;
2019-07-30 14:40:28 +08:00
if ( ! logData . actor . isValid ( ) | | logData . actor . isReady ( ) ) {
logData . actor = oldLog . getFuture ( ) | | tl ;
2019-09-11 06:51:30 +08:00
logData . uid = s . storeID ;
2017-05-26 04:48:44 +08:00
}
2018-09-06 06:06:14 +08:00
errorForwarders . add ( forwardError ( errors , Role : : SHARED_TRANSACTION_LOG , s . storeID , tl ) ) ;
2017-05-26 04:48:44 +08:00
}
}
2019-12-07 05:28:44 +08:00
bool hasCache = false ;
// start cache role if we have the right process class
if ( initialClass . classType ( ) = = ProcessClass : : StorageCacheClass ) {
hasCache = true ;
StorageServerInterface recruited ;
recruited . locality = locality ;
recruited . initEndpoints ( ) ;
std : : map < std : : string , std : : string > details ;
startRole ( Role : : STORAGE_CACHE , recruited . id ( ) , interf . id ( ) , details ) ;
// DUMPTOKEN(recruited.getVersion);
DUMPTOKEN ( recruited . getValue ) ;
DUMPTOKEN ( recruited . getKey ) ;
DUMPTOKEN ( recruited . getKeyValues ) ;
DUMPTOKEN ( recruited . getShardState ) ;
DUMPTOKEN ( recruited . waitMetrics ) ;
DUMPTOKEN ( recruited . splitMetrics ) ;
DUMPTOKEN ( recruited . getStorageMetrics ) ;
DUMPTOKEN ( recruited . waitFailure ) ;
DUMPTOKEN ( recruited . getQueuingMetrics ) ;
DUMPTOKEN ( recruited . getKeyValueStoreType ) ;
DUMPTOKEN ( recruited . watchValue ) ;
2020-01-25 03:00:50 +08:00
auto f = storageCacheServer ( recruited , 0 , dbInfo ) ;
2020-03-03 09:11:23 +08:00
f = storageCacheRollbackRebooter ( f , recruited . id ( ) , recruited . locality , dbInfo ) ;
2019-12-07 05:28:44 +08:00
errorForwarders . add ( forwardError ( errors , Role : : STORAGE_CACHE , recruited . id ( ) , f ) ) ;
}
2017-05-26 04:48:44 +08:00
std : : map < std : : string , std : : string > details ;
details [ " Locality " ] = locality . toString ( ) ;
details [ " DataFolder " ] = folder ;
details [ " StoresPresent " ] = format ( " %d " , stores . size ( ) ) ;
2019-12-07 05:28:44 +08:00
details [ " CachePresent " ] = hasCache ? " true " : " false " ;
2018-09-06 06:06:14 +08:00
startRole ( Role : : WORKER , interf . id ( ) , interf . id ( ) , details ) ;
2020-05-09 07:27:57 +08:00
errorForwarders . add ( traceRole ( Role : : WORKER , interf . id ( ) ) ) ;
2017-05-26 04:48:44 +08:00
2018-08-11 04:57:10 +08:00
wait ( waitForAll ( recoveries ) ) ;
2018-05-02 03:05:43 +08:00
recoveredDiskFiles . send ( Void ( ) ) ;
2020-04-07 12:09:44 +08:00
errorForwarders . add ( registrationClient ( ccInterface , interf , asyncPriorityInfo , initialClass , ddInterf , rkInterf , degraded , errors , locality , dbInfo , connFile , issues ) ) ;
2017-09-16 01:57:58 +08:00
TraceEvent ( " RecoveriesComplete " , interf . id ( ) ) ;
2017-05-26 04:48:44 +08:00
loop choose {
2020-04-06 14:09:36 +08:00
when ( UpdateServerDBInfoRequest req = waitNext ( interf . updateServerDBInfo . getFuture ( ) ) ) {
2020-04-12 10:30:05 +08:00
ServerDBInfo localInfo = BinaryReader : : fromStringRef < ServerDBInfo > ( req . serializedDbInfo , AssumeVersion ( currentProtocolVersion ) ) ;
localInfo . myLocality = locality ;
2020-04-14 04:09:21 +08:00
if ( localInfo . infoGeneration < dbInfo - > get ( ) . infoGeneration & & localInfo . clusterInterface = = dbInfo - > get ( ) . clusterInterface ) {
2020-04-12 11:05:03 +08:00
std : : vector < Endpoint > rep = req . broadcastInfo ;
rep . push_back ( interf . updateServerDBInfo . getEndpoint ( ) ) ;
req . reply . send ( rep ) ;
} else {
Optional < Endpoint > notUpdated ;
if ( ! ccInterface - > get ( ) . present ( ) | | localInfo . clusterInterface ! = ccInterface - > get ( ) . get ( ) ) {
notUpdated = interf . updateServerDBInfo . getEndpoint ( ) ;
}
2020-04-18 05:44:58 +08:00
else if ( localInfo . infoGeneration > dbInfo - > get ( ) . infoGeneration | | dbInfo - > get ( ) . clusterInterface ! = ccInterface - > get ( ) . get ( ) ) {
2020-06-16 00:45:36 +08:00
2020-04-12 11:05:03 +08:00
TraceEvent ( " GotServerDBInfoChange " ) . detail ( " ChangeID " , localInfo . id ) . detail ( " MasterID " , localInfo . master . id ( ) )
. detail ( " RatekeeperID " , localInfo . ratekeeper . present ( ) ? localInfo . ratekeeper . get ( ) . id ( ) : UID ( ) )
. detail ( " DataDistributorID " , localInfo . distributor . present ( ) ? localInfo . distributor . get ( ) . id ( ) : UID ( ) ) ;
dbInfo - > set ( localInfo ) ;
}
errorForwarders . add ( success ( broadcastDBInfoRequest ( req , SERVER_KNOBS - > DBINFO_SEND_AMOUNT , notUpdated , true ) ) ) ;
2020-04-06 14:09:36 +08:00
}
}
2017-05-26 04:48:44 +08:00
when ( RebootRequest req = waitNext ( interf . clientInterface . reboot . getFuture ( ) ) ) {
state RebootRequest rebootReq = req ;
2019-10-24 05:19:17 +08:00
// If suspendDuration is INT_MAX, the trace will not be logged if it was inside the next block
// Also a useful trace to have even if suspendDuration is 0
TraceEvent ( " RebootRequestSuspendingProcess " ) . detail ( " Duration " , req . waitForDuration ) ;
2019-06-15 02:35:38 +08:00
if ( req . waitForDuration ) {
flushTraceFileVoid ( ) ;
setProfilingEnabled ( 0 ) ;
g_network - > stop ( ) ;
threadSleep ( req . waitForDuration ) ;
}
2017-05-26 04:48:44 +08:00
if ( rebootReq . checkData ) {
Reference < IAsyncFile > checkFile = wait ( IAsyncFileSystem : : filesystem ( ) - > open ( joinPath ( folder , validationFilename ) , IAsyncFile : : OPEN_CREATE | IAsyncFile : : OPEN_READWRITE , 0600 ) ) ;
2018-08-11 04:57:10 +08:00
wait ( checkFile - > sync ( ) ) ;
2017-05-26 04:48:44 +08:00
}
2017-08-29 02:25:37 +08:00
2017-05-26 04:48:44 +08:00
if ( g_network - > isSimulated ( ) ) {
TraceEvent ( " SimulatedReboot " ) . detail ( " Deletion " , rebootReq . deleteData ) ;
if ( rebootReq . deleteData ) {
throw please_reboot_delete ( ) ;
}
throw please_reboot ( ) ;
}
else {
TraceEvent ( " ProcessReboot " ) ;
ASSERT ( ! rebootReq . deleteData ) ;
flushAndExit ( 0 ) ;
}
}
2017-10-12 05:13:16 +08:00
when ( ProfilerRequest req = waitNext ( interf . clientInterface . profiler . getFuture ( ) ) ) {
2017-10-17 07:04:09 +08:00
state ProfilerRequest profilerReq = req ;
// There really isn't a great "filepath sanitizer" or "filepath escape" function available,
// thus we instead enforce a different requirement. One can only write to a file that's
// beneath the working directory, and we remove the ability to do any symlink or ../..
// tricks by resolving all paths through `abspath` first.
try {
2017-10-13 08:49:41 +08:00
std : : string realLogDir = abspath ( SERVER_KNOBS - > LOG_DIRECTORY ) ;
std : : string realOutPath = abspath ( realLogDir + " / " + profilerReq . outputFile . toString ( ) ) ;
if ( realLogDir . size ( ) < realOutPath . size ( ) & &
strncmp ( realLogDir . c_str ( ) , realOutPath . c_str ( ) , realLogDir . size ( ) ) = = 0 ) {
profilerReq . outputFile = realOutPath ;
uncancellable ( runProfiler ( profilerReq ) ) ;
2017-10-17 07:04:09 +08:00
profilerReq . reply . send ( Void ( ) ) ;
} else {
profilerReq . reply . sendError ( client_invalid_operation ( ) ) ;
}
} catch ( Error & e ) {
profilerReq . reply . sendError ( e ) ;
}
2017-07-13 15:27:56 +08:00
}
2017-05-26 04:48:44 +08:00
when ( RecruitMasterRequest req = waitNext ( interf . master . getFuture ( ) ) ) {
MasterInterface recruited ;
recruited . locality = locality ;
recruited . initEndpoints ( ) ;
2018-09-06 06:06:14 +08:00
startRole ( Role : : MASTER , recruited . id ( ) , interf . id ( ) ) ;
2017-05-26 04:48:44 +08:00
DUMPTOKEN ( recruited . waitFailure ) ;
DUMPTOKEN ( recruited . tlogRejoin ) ;
DUMPTOKEN ( recruited . changeCoordinators ) ;
DUMPTOKEN ( recruited . getCommitVersion ) ;
2020-06-11 06:55:23 +08:00
DUMPTOKEN ( recruited . getLiveCommittedVersion ) ;
DUMPTOKEN ( recruited . reportLiveCommittedVersion ) ;
2020-06-14 07:27:08 +08:00
DUMPTOKEN ( recruited . notifyBackupWorkerDone ) ;
2017-05-26 04:48:44 +08:00
//printf("Recruited as masterServer\n");
2020-07-18 05:59:38 +08:00
Future < Void > masterProcess = masterServer ( recruited , dbInfo , ccInterface , ServerCoordinators ( connFile ) , req . lifetime , req . forceRecovery ) ;
2018-09-06 06:06:14 +08:00
errorForwarders . add ( zombie ( recruited , forwardError ( errors , Role : : MASTER , recruited . id ( ) , masterProcess ) ) ) ;
2017-05-26 04:48:44 +08:00
req . reply . send ( recruited ) ;
}
2018-12-14 05:31:37 +08:00
when ( InitializeDataDistributorRequest req = waitNext ( interf . dataDistributor . getFuture ( ) ) ) {
2019-01-29 01:25:15 +08:00
DataDistributorInterface recruited ( locality ) ;
2019-02-01 02:10:41 +08:00
recruited . initEndpoints ( ) ;
2019-01-29 03:29:39 +08:00
if ( ddInterf - > get ( ) . present ( ) ) {
recruited = ddInterf - > get ( ) . get ( ) ;
2019-02-13 07:50:44 +08:00
TEST ( true ) ; // Recruited while already a data distributor.
2019-01-29 03:29:39 +08:00
} else {
startRole ( Role : : DATA_DISTRIBUTOR , recruited . id ( ) , interf . id ( ) ) ;
2019-03-13 02:34:16 +08:00
DUMPTOKEN ( recruited . waitFailure ) ;
2019-01-29 03:29:39 +08:00
Future < Void > dataDistributorProcess = dataDistributor ( recruited , dbInfo ) ;
2019-02-01 02:10:41 +08:00
errorForwarders . add ( forwardError ( errors , Role : : DATA_DISTRIBUTOR , recruited . id ( ) , setWhenDoneOrError ( dataDistributorProcess , ddInterf , Optional < DataDistributorInterface > ( ) ) ) ) ;
2019-01-29 03:29:39 +08:00
ddInterf - > set ( Optional < DataDistributorInterface > ( recruited ) ) ;
}
2019-01-29 01:25:15 +08:00
TraceEvent ( " DataDistributorReceived " , req . reqId ) . detail ( " DataDistributorId " , recruited . id ( ) ) ;
2018-12-14 05:31:37 +08:00
req . reply . send ( recruited ) ;
}
2019-02-15 08:24:46 +08:00
when ( InitializeRatekeeperRequest req = waitNext ( interf . ratekeeper . getFuture ( ) ) ) {
2019-03-20 02:29:19 +08:00
RatekeeperInterface recruited ( locality , req . reqId ) ;
2019-02-15 08:24:46 +08:00
recruited . initEndpoints ( ) ;
if ( rkInterf - > get ( ) . present ( ) ) {
recruited = rkInterf - > get ( ) . get ( ) ;
TEST ( true ) ; // Recruited while already a ratekeeper.
} else {
2019-03-27 23:15:19 +08:00
startRole ( Role : : RATEKEEPER , recruited . id ( ) , interf . id ( ) ) ;
2019-03-13 02:34:16 +08:00
DUMPTOKEN ( recruited . waitFailure ) ;
DUMPTOKEN ( recruited . getRateInfo ) ;
2019-03-15 06:00:57 +08:00
DUMPTOKEN ( recruited . haltRatekeeper ) ;
2020-08-30 03:35:31 +08:00
DUMPTOKEN ( recruited . reportCommitCostEstimation ) ;
2019-03-13 02:34:16 +08:00
2019-03-27 23:41:19 +08:00
Future < Void > ratekeeperProcess = ratekeeper ( recruited , dbInfo ) ;
2019-03-27 23:15:19 +08:00
errorForwarders . add (
forwardError ( errors , Role : : RATEKEEPER , recruited . id ( ) ,
2019-03-27 23:41:19 +08:00
setWhenDoneOrError ( ratekeeperProcess , rkInterf , Optional < RatekeeperInterface > ( ) ) ) ) ;
2019-02-15 08:24:46 +08:00
rkInterf - > set ( Optional < RatekeeperInterface > ( recruited ) ) ;
}
TraceEvent ( " Ratekeeper_InitRequest " , req . reqId ) . detail ( " RatekeeperId " , recruited . id ( ) ) ;
req . reply . send ( recruited ) ;
}
2019-04-25 06:12:37 +08:00
when ( InitializeBackupRequest req = waitNext ( interf . backup . getFuture ( ) ) ) {
2020-03-24 01:22:24 +08:00
if ( ! backupWorkerCache . exists ( req . reqId ) ) {
BackupInterface recruited ( locality ) ;
recruited . initEndpoints ( ) ;
2019-04-25 06:12:37 +08:00
2020-03-24 01:22:24 +08:00
startRole ( Role : : BACKUP , recruited . id ( ) , interf . id ( ) ) ;
DUMPTOKEN ( recruited . waitFailure ) ;
2019-04-25 06:12:37 +08:00
2020-03-24 01:22:24 +08:00
ReplyPromise < InitializeBackupReply > backupReady = req . reply ;
backupWorkerCache . set ( req . reqId , backupReady . getFuture ( ) ) ;
Future < Void > backupProcess = backupWorker ( recruited , req , dbInfo ) ;
2020-03-24 03:47:42 +08:00
backupProcess = storageCache . removeOnReady ( req . reqId , backupProcess ) ;
2020-03-24 01:22:24 +08:00
errorForwarders . add ( forwardError ( errors , Role : : BACKUP , recruited . id ( ) , backupProcess ) ) ;
TraceEvent ( " BackupInitRequest " , req . reqId ) . detail ( " BackupId " , recruited . id ( ) ) ;
InitializeBackupReply reply ( recruited , req . backupEpoch ) ;
backupReady . send ( reply ) ;
} else {
forwardPromise ( req . reply , backupWorkerCache . get ( req . reqId ) ) ;
}
2019-04-25 06:12:37 +08:00
}
2017-05-26 04:48:44 +08:00
when ( InitializeTLogRequest req = waitNext ( interf . tLog . getFuture ( ) ) ) {
2019-02-20 14:02:07 +08:00
// For now, there's a one-to-one mapping of spill type to TLogVersion.
// With future work, a particular version of the TLog can support multiple
// different spilling strategies, at which point SpillType will need to be
// plumbed down into tLogFn.
2019-02-23 04:15:23 +08:00
if ( req . logVersion < TLogVersion : : MIN_RECRUITABLE ) {
TraceEvent ( SevError , " InitializeTLogInvalidLogVersion " )
. detail ( " Version " , req . logVersion )
. detail ( " MinRecruitable " , TLogVersion : : MIN_RECRUITABLE ) ;
req . reply . sendError ( internal_error ( ) ) ;
2019-02-20 14:02:07 +08:00
}
2019-02-23 04:15:23 +08:00
TLogOptions tLogOptions ( req . logVersion , req . spillType ) ;
TLogFn tLogFn = tLogFnForOptions ( tLogOptions ) ;
2019-10-03 16:27:36 +08:00
auto & logData = sharedLogs [ SharedLogsKey ( tLogOptions , req . storeType ) ] ;
2019-07-30 14:40:28 +08:00
logData . requests . send ( req ) ;
if ( ! logData . actor . isValid ( ) | | logData . actor . isReady ( ) ) {
2019-05-11 05:01:52 +08:00
UID logId = deterministicRandom ( ) - > randomUniqueID ( ) ;
2017-05-26 04:48:44 +08:00
std : : map < std : : string , std : : string > details ;
details [ " ForMaster " ] = req . recruitmentID . shortString ( ) ;
details [ " StorageEngine " ] = req . storeType . toString ( ) ;
2017-08-29 02:25:37 +08:00
2017-05-26 04:48:44 +08:00
//FIXME: start role for every tlog instance, rather that just for the shared actor, also use a different role type for the shared actor
2018-09-06 06:06:14 +08:00
startRole ( Role : : SHARED_TRANSACTION_LOG , logId , interf . id ( ) , details ) ;
2017-05-26 04:48:44 +08:00
2019-02-23 04:15:23 +08:00
const StringRef prefix = req . logVersion > TLogVersion : : V2 ? fileVersionedLogDataPrefix : fileLogDataPrefix ;
std : : string filename = filenameFromId ( req . storeType , folder , prefix . toString ( ) + tLogOptions . toPrefix ( ) , logId ) ;
2017-05-26 04:48:44 +08:00
IKeyValueStore * data = openKVStore ( req . storeType , filename , logId , memoryLimit ) ;
2019-03-16 12:01:16 +08:00
const DiskQueueVersion dqv = tLogOptions . version > = TLogVersion : : V3 ? DiskQueueVersion : : V1 : DiskQueueVersion : : V0 ;
IDiskQueue * queue = openDiskQueue ( joinPath ( folder , fileLogQueuePrefix . toString ( ) + tLogOptions . toPrefix ( ) + logId . toString ( ) + " - " ) , tlogQueueExtension . toString ( ) , logId , dqv ) ;
2017-05-26 04:48:44 +08:00
filesClosed . add ( data - > onClosed ( ) ) ;
filesClosed . add ( queue - > onClosed ( ) ) ;
2017-10-06 08:09:44 +08:00
2020-02-13 07:11:38 +08:00
Future < Void > tLogCore = tLogFn ( data , queue , dbInfo , locality , logData . requests , logId , interf . id ( ) , false , Promise < Void > ( ) , Promise < Void > ( ) , folder , degraded , activeSharedTLog ) ;
2019-07-30 14:40:28 +08:00
tLogCore = handleIOErrors ( tLogCore , data , logId ) ;
tLogCore = handleIOErrors ( tLogCore , queue , logId ) ;
errorForwarders . add ( forwardError ( errors , Role : : SHARED_TRANSACTION_LOG , logId , tLogCore ) ) ;
logData . actor = tLogCore ;
logData . uid = logId ;
2017-05-26 04:48:44 +08:00
}
2019-07-30 14:40:28 +08:00
activeSharedTLog - > set ( logData . uid ) ;
2017-05-26 04:48:44 +08:00
}
when ( InitializeStorageRequest req = waitNext ( interf . storage . getFuture ( ) ) ) {
if ( ! storageCache . exists ( req . reqId ) ) {
StorageServerInterface recruited ( req . interfaceId ) ;
recruited . locality = locality ;
recruited . initEndpoints ( ) ;
std : : map < std : : string , std : : string > details ;
details [ " StorageEngine " ] = req . storeType . toString ( ) ;
2018-09-06 06:06:14 +08:00
startRole ( Role : : STORAGE_SERVER , recruited . id ( ) , interf . id ( ) , details ) ;
2017-05-26 04:48:44 +08:00
DUMPTOKEN ( recruited . getValue ) ;
DUMPTOKEN ( recruited . getKey ) ;
DUMPTOKEN ( recruited . getKeyValues ) ;
DUMPTOKEN ( recruited . getShardState ) ;
DUMPTOKEN ( recruited . waitMetrics ) ;
DUMPTOKEN ( recruited . splitMetrics ) ;
2020-01-13 07:30:36 +08:00
DUMPTOKEN ( recruited . getReadHotRanges ) ;
2020-07-01 05:28:15 +08:00
DUMPTOKEN ( recruited . getRangeSplitPoints ) ;
2019-07-26 07:27:32 +08:00
DUMPTOKEN ( recruited . getStorageMetrics ) ;
2017-05-26 04:48:44 +08:00
DUMPTOKEN ( recruited . waitFailure ) ;
DUMPTOKEN ( recruited . getQueuingMetrics ) ;
DUMPTOKEN ( recruited . getKeyValueStoreType ) ;
DUMPTOKEN ( recruited . watchValue ) ;
//printf("Recruited as storageServer\n");
std : : string filename = filenameFromId ( req . storeType , folder , fileStoragePrefix . toString ( ) , recruited . id ( ) ) ;
IKeyValueStore * data = openKVStore ( req . storeType , filename , recruited . id ( ) , memoryLimit ) ;
Future < Void > kvClosed = data - > onClosed ( ) ;
filesClosed . add ( kvClosed ) ;
2018-05-06 09:16:28 +08:00
ReplyPromise < InitializeStorageReply > storageReady = req . reply ;
2017-05-26 04:48:44 +08:00
storageCache . set ( req . reqId , storageReady . getFuture ( ) ) ;
Future < Void > s = storageServer ( data , recruited , req . seedTag , storageReady , dbInfo , folder ) ;
s = handleIOErrors ( s , data , recruited . id ( ) , kvClosed ) ;
s = storageCache . removeOnReady ( req . reqId , s ) ;
2018-10-09 08:26:10 +08:00
s = storageServerRollbackRebooter ( s , req . storeType , filename , recruited . id ( ) , recruited . locality , dbInfo , folder , & filesClosed , memoryLimit , data ) ;
2018-09-06 06:06:14 +08:00
errorForwarders . add ( forwardError ( errors , Role : : STORAGE_SERVER , recruited . id ( ) , s ) ) ;
2017-05-26 04:48:44 +08:00
} else
forwardPromise ( req . reply , storageCache . get ( req . reqId ) ) ;
}
2020-09-11 08:44:15 +08:00
when ( InitializeCommitProxyRequest req = waitNext ( interf . commitProxy . getFuture ( ) ) ) {
CommitProxyInterface recruited ;
2020-05-02 07:41:20 +08:00
recruited . processId = locality . processId ( ) ;
2019-03-20 04:37:50 +08:00
recruited . provisional = false ;
2017-05-26 04:48:44 +08:00
recruited . initEndpoints ( ) ;
std : : map < std : : string , std : : string > details ;
details [ " ForMaster " ] = req . master . id ( ) . shortString ( ) ;
2020-09-11 08:44:15 +08:00
startRole ( Role : : COMMIT_PROXY , recruited . id ( ) , interf . id ( ) , details ) ;
2017-05-26 04:48:44 +08:00
DUMPTOKEN ( recruited . commit ) ;
DUMPTOKEN ( recruited . getConsistentReadVersion ) ;
DUMPTOKEN ( recruited . getKeyServersLocations ) ;
DUMPTOKEN ( recruited . getStorageServerRejoinInfo ) ;
DUMPTOKEN ( recruited . waitFailure ) ;
DUMPTOKEN ( recruited . txnState ) ;
2020-09-11 08:44:15 +08:00
// printf("Recruited as commitProxyServer\n");
errorForwarders . add (
zombie ( recruited , forwardError ( errors , Role : : COMMIT_PROXY , recruited . id ( ) ,
commitProxyServer ( recruited , req , dbInfo , whitelistBinPaths ) ) ) ) ;
2017-05-26 04:48:44 +08:00
req . reply . send ( recruited ) ;
}
2020-07-15 15:37:41 +08:00
when ( InitializeGrvProxyRequest req = waitNext ( interf . grvProxy . getFuture ( ) ) ) {
GrvProxyInterface recruited ;
recruited . processId = locality . processId ( ) ;
recruited . provisional = false ;
recruited . initEndpoints ( ) ;
std : : map < std : : string , std : : string > details ;
details [ " ForMaster " ] = req . master . id ( ) . shortString ( ) ;
startRole ( Role : : GRV_PROXY , recruited . id ( ) , interf . id ( ) , details ) ;
DUMPTOKEN ( recruited . getConsistentReadVersion ) ;
DUMPTOKEN ( recruited . waitFailure ) ;
DUMPTOKEN ( recruited . getHealthMetrics ) ;
//printf("Recruited as grvProxyServer\n");
errorForwarders . add ( zombie ( recruited , forwardError ( errors , Role : : GRV_PROXY , recruited . id ( ) ,
2020-07-23 14:35:46 +08:00
grvProxyServer ( recruited , req , dbInfo ) ) ) ) ;
2020-07-15 15:37:41 +08:00
req . reply . send ( recruited ) ;
}
2017-05-26 04:48:44 +08:00
when ( InitializeResolverRequest req = waitNext ( interf . resolver . getFuture ( ) ) ) {
ResolverInterface recruited ;
recruited . locality = locality ;
recruited . initEndpoints ( ) ;
std : : map < std : : string , std : : string > details ;
2018-09-06 06:06:14 +08:00
startRole ( Role : : RESOLVER , recruited . id ( ) , interf . id ( ) , details ) ;
2017-05-26 04:48:44 +08:00
DUMPTOKEN ( recruited . resolve ) ;
DUMPTOKEN ( recruited . metrics ) ;
DUMPTOKEN ( recruited . split ) ;
DUMPTOKEN ( recruited . waitFailure ) ;
2018-09-06 06:06:14 +08:00
errorForwarders . add ( zombie ( recruited , forwardError ( errors , Role : : RESOLVER , recruited . id ( ) ,
2017-05-26 04:48:44 +08:00
resolver ( recruited , req , dbInfo ) ) ) ) ;
req . reply . send ( recruited ) ;
}
2017-06-30 06:50:19 +08:00
when ( InitializeLogRouterRequest req = waitNext ( interf . logRouter . getFuture ( ) ) ) {
2018-03-11 01:52:09 +08:00
TLogInterface recruited ( locality ) ;
2017-06-30 06:50:19 +08:00
recruited . initEndpoints ( ) ;
std : : map < std : : string , std : : string > details ;
2018-09-06 06:06:14 +08:00
startRole ( Role : : LOG_ROUTER , recruited . id ( ) , interf . id ( ) , details ) ;
2017-06-30 06:50:19 +08:00
DUMPTOKEN ( recruited . peekMessages ) ;
DUMPTOKEN ( recruited . popMessages ) ;
DUMPTOKEN ( recruited . commit ) ;
DUMPTOKEN ( recruited . lock ) ;
DUMPTOKEN ( recruited . getQueuingMetrics ) ;
DUMPTOKEN ( recruited . confirmRunning ) ;
2020-08-28 08:17:27 +08:00
DUMPTOKEN ( recruited . waitFailure ) ;
DUMPTOKEN ( recruited . recoveryFinished ) ;
DUMPTOKEN ( recruited . disablePopRequest ) ;
DUMPTOKEN ( recruited . enablePopRequest ) ;
DUMPTOKEN ( recruited . snapRequest ) ;
2017-06-30 06:50:19 +08:00
2020-01-08 11:53:09 +08:00
errorForwarders . add ( zombie ( recruited , forwardError ( errors , Role : : LOG_ROUTER , recruited . id ( ) ,
2017-06-30 06:50:19 +08:00
logRouter ( recruited , req , dbInfo ) ) ) ) ;
req . reply . send ( recruited ) ;
}
2017-05-26 04:48:44 +08:00
when ( CoordinationPingMessage m = waitNext ( interf . coordinationPing . getFuture ( ) ) ) {
TraceEvent ( " CoordinationPing " , interf . id ( ) ) . detail ( " CCID " , m . clusterControllerId ) . detail ( " TimeStep " , m . timeStep ) ;
}
when ( SetMetricsLogRateRequest req = waitNext ( interf . setMetricsRate . getFuture ( ) ) ) {
TraceEvent ( " LoggingRateChange " , interf . id ( ) ) . detail ( " OldDelay " , loggingDelay ) . detail ( " NewLogPS " , req . metricsLogsPerSecond ) ;
if ( req . metricsLogsPerSecond ! = 0 ) {
loggingDelay = 1.0 / req . metricsLogsPerSecond ;
loggingTrigger = Void ( ) ;
}
}
when ( EventLogRequest req = waitNext ( interf . eventLogRequest . getFuture ( ) ) ) {
2018-05-03 01:44:38 +08:00
TraceEventFields e ;
2017-05-26 04:48:44 +08:00
if ( req . getLastError )
2018-05-03 01:44:38 +08:00
e = latestEventCache . getLatestError ( ) ;
2017-05-26 04:48:44 +08:00
else
2018-05-03 01:44:38 +08:00
e = latestEventCache . get ( req . eventName . toString ( ) ) ;
2017-05-26 04:48:44 +08:00
req . reply . send ( e ) ;
}
when ( TraceBatchDumpRequest req = waitNext ( interf . traceBatchDumpRequest . getFuture ( ) ) ) {
g_traceBatch . dump ( ) ;
req . reply . send ( Void ( ) ) ;
}
when ( DiskStoreRequest req = waitNext ( interf . diskStoreRequest . getFuture ( ) ) ) {
Standalone < VectorRef < UID > > ids ;
for ( DiskStore d : getDiskStores ( folder ) ) {
bool included = true ;
if ( ! req . includePartialStores ) {
if ( d . storeType = = KeyValueStoreType : : SSD_BTREE_V1 ) {
included = fileExists ( d . filename + " .fdb-wal " ) ;
}
else if ( d . storeType = = KeyValueStoreType : : SSD_BTREE_V2 ) {
included = fileExists ( d . filename + " .sqlite-wal " ) ;
}
2017-09-22 14:51:55 +08:00
else if ( d . storeType = = KeyValueStoreType : : SSD_REDWOOD_V1 ) {
included = fileExists ( d . filename + " 0.pagerlog " ) & & fileExists ( d . filename + " 1.pagerlog " ) ;
2020-06-16 00:45:36 +08:00
}
else if ( d . storeType = = KeyValueStoreType : : SSD_ROCKSDB_V1 ) {
included = fileExists ( joinPath ( d . filename , " CURRENT " ) ) & & fileExists ( joinPath ( d . filename , " IDENTITY " ) ) ;
2019-01-10 10:03:54 +08:00
} else if ( d . storeType = = KeyValueStoreType : : MEMORY ) {
2017-05-26 04:48:44 +08:00
included = fileExists ( d . filename + " 1.fdq " ) ;
2019-01-10 10:03:54 +08:00
} else {
ASSERT ( d . storeType = = KeyValueStoreType : : MEMORY_RADIXTREE ) ;
included = fileExists ( d . filename + " 1.fdr " ) ;
2017-05-26 04:48:44 +08:00
}
if ( d . storedComponent = = DiskStore : : COMPONENT : : TLogData & & included ) {
2019-02-20 14:02:07 +08:00
included = false ;
2019-02-23 04:15:23 +08:00
// The previous code assumed that d.filename is a filename. But that is not true.
// d.filename is a path. Removing a prefix and adding a new one just makes a broken
// directory name. So fileExists would always return false.
// Weirdly, this doesn't break anything, as tested by taking a clean check of FDB,
// setting included to false always, and then running correctness. So I'm just
// improving the situation by actually marking it as broken.
// FIXME: this whole thing
/*
std : : string logDataBasename ;
StringRef filename = d . filename ;
if ( filename . startsWith ( fileLogDataPrefix ) ) {
logDataBasename = fileLogQueuePrefix . toString ( ) + d . filename . substr ( fileLogDataPrefix . size ( ) ) ;
} else {
StringRef optionsString = filename . removePrefix ( fileVersionedLogDataPrefix ) . eat ( " - " ) ;
logDataBasename = fileLogQueuePrefix . toString ( ) + optionsString . toString ( ) + " - " ;
}
2019-02-20 14:02:07 +08:00
TraceEvent ( " DiskStoreRequest " ) . detail ( " FilenameBasename " , logDataBasename ) ;
if ( fileExists ( logDataBasename + " 0.fdq " ) & & fileExists ( logDataBasename + " 1.fdq " ) ) {
included = true ;
}
2019-02-23 04:15:23 +08:00
*/
2017-05-26 04:48:44 +08:00
}
}
if ( included ) {
ids . push_back ( ids . arena ( ) , d . storeID ) ;
}
}
req . reply . send ( ids ) ;
}
2018-08-11 04:57:10 +08:00
when ( wait ( loggingTrigger ) ) {
2017-05-26 04:48:44 +08:00
systemMonitor ( ) ;
2019-06-25 17:47:35 +08:00
loggingTrigger = delay ( loggingDelay , TaskPriority : : FlushTrace ) ;
2017-05-26 04:48:44 +08:00
}
2019-06-20 02:20:44 +08:00
when ( state WorkerSnapRequest snapReq = waitNext ( interf . workerSnapReq . getFuture ( ) ) ) {
Standalone < StringRef > snapFolder = StringRef ( folder ) ;
if ( snapReq . role . toString ( ) = = " coord " ) {
snapFolder = coordFolder ;
}
errorForwarders . add ( workerSnapCreate ( snapReq , snapFolder ) ) ;
}
2018-08-11 04:57:10 +08:00
when ( wait ( errorForwarders . getResult ( ) ) ) { }
when ( wait ( handleErrors ) ) { }
2017-05-26 04:48:44 +08:00
}
} catch ( Error & err ) {
2019-08-28 01:45:09 +08:00
// Make sure actors are cancelled before "recovery" promises are destructed.
2019-08-23 02:02:14 +08:00
for ( auto f : recoveries ) f . cancel ( ) ;
2017-05-26 04:48:44 +08:00
state Error e = err ;
bool ok = e . code ( ) = = error_code_please_reboot | | e . code ( ) = = error_code_actor_cancelled | | e . code ( ) = = error_code_please_reboot_delete ;
2018-09-06 06:06:14 +08:00
endRole ( Role : : WORKER , interf . id ( ) , " WorkerError " , ok , e ) ;
2017-05-26 04:48:44 +08:00
errorForwarders . clear ( false ) ;
2018-11-08 13:09:51 +08:00
sharedLogs . clear ( ) ;
2017-05-26 04:48:44 +08:00
if ( e . code ( ) ! = error_code_actor_cancelled ) { // We get cancelled e.g. when an entire simulation times out, but in that case we won't be restarted and don't need to wait for shutdown
stopping . send ( Void ( ) ) ;
2018-08-11 04:57:10 +08:00
wait ( filesClosed . getResult ( ) ) ; // Wait for complete shutdown of KV stores
wait ( delay ( 0.0 ) ) ; //Unwind the callstack to make sure that IAsyncFile references are all gone
2017-05-26 04:48:44 +08:00
TraceEvent ( SevInfo , " WorkerShutdownComplete " , interf . id ( ) ) ;
}
throw e ;
}
}
ACTOR Future < Void > extractClusterInterface ( Reference < AsyncVar < Optional < ClusterControllerFullInterface > > > a , Reference < AsyncVar < Optional < ClusterInterface > > > b ) {
loop {
if ( a - > get ( ) . present ( ) )
b - > set ( a - > get ( ) . get ( ) . clientInterface ) ;
else
b - > set ( Optional < ClusterInterface > ( ) ) ;
2018-08-11 04:57:10 +08:00
wait ( a - > onChange ( ) ) ;
2017-05-26 04:48:44 +08:00
}
}
static std : : set < int > const & normalWorkerErrors ( ) {
static std : : set < int > s ;
if ( s . empty ( ) ) {
s . insert ( error_code_please_reboot ) ;
s . insert ( error_code_please_reboot_delete ) ;
}
return s ;
}
2020-02-19 08:41:19 +08:00
ACTOR Future < Void > fileNotFoundToNever ( Future < Void > f ) {
2017-05-26 04:48:44 +08:00
try {
2018-08-11 04:57:10 +08:00
wait ( f ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
2020-02-04 03:11:31 +08:00
} catch ( Error & e ) {
if ( e . code ( ) = = error_code_file_not_found ) {
2020-02-19 08:41:19 +08:00
TraceEvent ( SevWarn , " ClusterCoordinatorFailed " ) . error ( e ) ;
2017-05-26 04:48:44 +08:00
return Never ( ) ;
}
throw ;
}
}
ACTOR Future < Void > printTimeout ( ) {
2018-08-11 04:57:10 +08:00
wait ( delay ( 5 ) ) ;
2017-05-26 04:48:44 +08:00
if ( ! g_network - > isSimulated ( ) ) {
fprintf ( stderr , " Warning: FDBD has not joined the cluster after 5 seconds. \n " ) ;
fprintf ( stderr , " Check configuration and availability using the 'status' command with the fdbcli \n " ) ;
}
return Void ( ) ;
}
ACTOR Future < Void > printOnFirstConnected ( Reference < AsyncVar < Optional < ClusterInterface > > > ci ) {
state Future < Void > timeoutFuture = printTimeout ( ) ;
loop {
choose {
2018-08-11 04:57:10 +08:00
when ( wait ( ci - > get ( ) . present ( ) ? IFailureMonitor : : failureMonitor ( ) . onStateEqual ( ci - > get ( ) . get ( ) . openDatabase . getEndpoint ( ) , FailureStatus ( false ) ) : Never ( ) ) ) {
2017-05-26 04:48:44 +08:00
printf ( " FDBD joined cluster. \n " ) ;
TraceEvent ( " FDBDConnected " ) ;
return Void ( ) ;
}
2018-08-11 04:57:10 +08:00
when ( wait ( ci - > onChange ( ) ) ) { }
2017-05-26 04:48:44 +08:00
}
}
}
2018-06-09 02:51:51 +08:00
ClusterControllerPriorityInfo getCCPriorityInfo ( std : : string filePath , ProcessClass processClass ) {
2018-06-08 04:07:19 +08:00
if ( ! fileExists ( filePath ) )
return ClusterControllerPriorityInfo ( ProcessClass ( processClass . classType ( ) , ProcessClass : : CommandLineSource ) . machineClassFitness ( ProcessClass : : ClusterController ) , false , ClusterControllerPriorityInfo : : FitnessUnknown ) ;
std : : string contents ( readFileBytes ( filePath , 1000 ) ) ;
BinaryReader br ( StringRef ( contents ) , IncludeVersion ( ) ) ;
ClusterControllerPriorityInfo priorityInfo ( ProcessClass : : UnsetFit , false , ClusterControllerPriorityInfo : : FitnessUnknown ) ;
br > > priorityInfo ;
2018-06-09 05:03:36 +08:00
if ( ! br . empty ( ) ) {
if ( g_network - > isSimulated ( ) ) {
2018-06-09 07:09:59 +08:00
ASSERT ( false ) ;
2018-06-09 05:03:36 +08:00
}
else {
TraceEvent ( SevWarnAlways , " FitnessFileCorrupted " ) . detail ( " filePath " , filePath ) ;
return ClusterControllerPriorityInfo ( ProcessClass ( processClass . classType ( ) , ProcessClass : : CommandLineSource ) . machineClassFitness ( ProcessClass : : ClusterController ) , false , ClusterControllerPriorityInfo : : FitnessUnknown ) ;
}
}
2018-06-08 04:07:19 +08:00
return priorityInfo ;
}
2018-06-09 02:51:51 +08:00
ACTOR Future < Void > monitorAndWriteCCPriorityInfo ( std : : string filePath , Reference < AsyncVar < ClusterControllerPriorityInfo > > asyncPriorityInfo ) {
2018-06-08 04:07:19 +08:00
loop {
2018-08-11 04:57:10 +08:00
wait ( asyncPriorityInfo - > onChange ( ) ) ;
2020-05-23 08:14:21 +08:00
std : : string contents ( BinaryWriter : : toValue ( asyncPriorityInfo - > get ( ) , IncludeVersion ( ProtocolVersion : : withClusterControllerPriorityInfo ( ) ) ) . toString ( ) ) ;
2018-06-08 04:07:19 +08:00
atomicReplace ( filePath , contents , false ) ;
}
}
ACTOR Future < UID > createAndLockProcessIdFile ( std : : string folder ) {
state UID processIDUid ;
platform : : createDirectory ( folder ) ;
2020-02-20 07:21:42 +08:00
loop {
try {
state std : : string lockFilePath = joinPath ( folder , " processId " ) ;
state ErrorOr < Reference < IAsyncFile > > lockFile = wait ( errorOr ( IAsyncFileSystem : : filesystem ( g_network ) - > open ( lockFilePath , IAsyncFile : : OPEN_READWRITE | IAsyncFile : : OPEN_LOCK , 0600 ) ) ) ;
if ( lockFile . isError ( ) & & lockFile . getError ( ) . code ( ) = = error_code_file_not_found & & ! fileExists ( lockFilePath ) ) {
Reference < IAsyncFile > _lockFile = wait ( IAsyncFileSystem : : filesystem ( ) - > open ( lockFilePath , IAsyncFile : : OPEN_ATOMIC_WRITE_AND_CREATE | IAsyncFile : : OPEN_CREATE | IAsyncFile : : OPEN_LOCK | IAsyncFile : : OPEN_READWRITE , 0600 ) ) ;
lockFile = _lockFile ;
processIDUid = deterministicRandom ( ) - > randomUniqueID ( ) ;
2020-05-23 08:16:59 +08:00
BinaryWriter wr ( IncludeVersion ( ProtocolVersion : : withProcessIDFile ( ) ) ) ;
2020-02-20 07:21:42 +08:00
wr < < processIDUid ;
wait ( lockFile . get ( ) - > write ( wr . getData ( ) , wr . getLength ( ) , 0 ) ) ;
wait ( lockFile . get ( ) - > sync ( ) ) ;
}
else {
if ( lockFile . isError ( ) ) throw lockFile . getError ( ) ; // If we've failed to open the file, throw an exception
2018-06-08 04:07:19 +08:00
2020-02-20 07:21:42 +08:00
int64_t fileSize = wait ( lockFile . get ( ) - > size ( ) ) ;
state Key fileData = makeString ( fileSize ) ;
wait ( success ( lockFile . get ( ) - > read ( mutateString ( fileData ) , fileSize , 0 ) ) ) ;
2020-02-21 15:04:39 +08:00
try {
processIDUid = BinaryReader : : fromStringRef < UID > ( fileData , IncludeVersion ( ) ) ;
return processIDUid ;
} catch ( Error & e ) {
if ( ! g_network - > isSimulated ( ) ) {
throw ;
}
2020-05-06 06:59:02 +08:00
lockFile = ErrorOr < Reference < IAsyncFile > > ( ) ;
wait ( IAsyncFileSystem : : filesystem ( ) - > deleteFile ( lockFilePath , true ) ) ;
2020-02-21 15:04:39 +08:00
}
2020-02-20 07:21:42 +08:00
}
2018-06-08 04:07:19 +08:00
}
2020-02-20 07:21:42 +08:00
catch ( Error & e ) {
if ( e . code ( ) = = error_code_actor_cancelled ) {
throw ;
}
if ( ! e . isInjectedFault ( ) ) {
2018-06-08 04:07:19 +08:00
fprintf ( stderr , " ERROR: error creating or opening process id file `%s'. \n " , joinPath ( folder , " processId " ) . c_str ( ) ) ;
2020-02-20 07:21:42 +08:00
}
2018-06-08 04:07:19 +08:00
TraceEvent ( SevError , " OpenProcessIdError " ) . error ( e ) ;
2020-02-21 15:04:39 +08:00
throw ;
2018-06-08 04:07:19 +08:00
}
}
}
2020-05-11 05:20:50 +08:00
ACTOR Future < MonitorLeaderInfo > monitorLeaderRemotelyOneGeneration ( Reference < ClusterConnectionFile > connFile , Reference < AsyncVar < Value > > result , MonitorLeaderInfo info ) {
state ClusterConnectionString ccf = info . intermediateConnFile - > getConnectionString ( ) ;
2020-06-15 13:26:06 +08:00
state vector < NetworkAddress > addrs = ccf . coordinators ( ) ;
2020-05-02 05:40:21 +08:00
state ElectionResultRequest request ;
state int index = 0 ;
2020-05-11 05:20:50 +08:00
state int successIndex = 0 ;
2020-06-15 13:26:06 +08:00
request . key = ccf . clusterKey ( ) ;
request . coordinators = ccf . coordinators ( ) ;
deterministicRandom ( ) - > randomShuffle ( addrs ) ;
2020-05-02 05:40:21 +08:00
loop {
2020-06-15 13:26:06 +08:00
LeaderElectionRegInterface interf ( addrs [ index ] ) ;
2020-05-08 16:00:18 +08:00
request . reply = ReplyPromise < Optional < LeaderInfo > > ( ) ;
2020-05-02 05:40:21 +08:00
2020-05-11 05:20:50 +08:00
ErrorOr < Optional < LeaderInfo > > leader = wait ( interf . electionResult . tryGetReply ( request ) ) ;
if ( leader . present ( ) ) {
if ( leader . get ( ) . present ( ) ) {
if ( leader . get ( ) . get ( ) . forward ) {
info . intermediateConnFile = Reference < ClusterConnectionFile > ( new ClusterConnectionFile ( connFile - > getFilename ( ) , ClusterConnectionString ( leader . get ( ) . get ( ) . serializedInfo . toString ( ) ) ) ) ;
return info ;
}
if ( connFile ! = info . intermediateConnFile ) {
if ( ! info . hasConnected ) {
TraceEvent ( SevWarnAlways , " IncorrectClusterFileContentsAtConnection " ) . detail ( " Filename " , connFile - > getFilename ( ) )
. detail ( " ConnectionStringFromFile " , connFile - > getConnectionString ( ) . toString ( ) )
. detail ( " CurrentConnectionString " , info . intermediateConnFile - > getConnectionString ( ) . toString ( ) ) ;
}
connFile - > setConnectionString ( info . intermediateConnFile - > getConnectionString ( ) ) ;
info . intermediateConnFile = connFile ;
}
2020-05-02 05:40:21 +08:00
2020-05-11 05:20:50 +08:00
info . hasConnected = true ;
connFile - > notifyConnected ( ) ;
request . knownLeader = leader . get ( ) . get ( ) . changeID ;
2020-05-02 05:40:21 +08:00
2020-05-11 05:20:50 +08:00
ClusterControllerPriorityInfo info = leader . get ( ) . get ( ) . getPriorityInfo ( ) ;
if ( leader . get ( ) . get ( ) . serializedInfo . size ( ) & & ! info . isExcluded & &
( info . dcFitness = = ClusterControllerPriorityInfo : : FitnessPrimary | |
info . dcFitness = = ClusterControllerPriorityInfo : : FitnessPreferred | |
info . dcFitness = = ClusterControllerPriorityInfo : : FitnessUnknown ) ) {
result - > set ( leader . get ( ) . get ( ) . serializedInfo ) ;
} else {
result - > set ( Value ( ) ) ;
}
}
successIndex = index ;
} else {
2020-06-15 13:26:06 +08:00
index = ( index + 1 ) % addrs . size ( ) ;
2020-05-11 05:20:50 +08:00
if ( index = = successIndex ) {
wait ( delay ( CLIENT_KNOBS - > COORDINATOR_RECONNECTION_DELAY ) ) ;
}
2020-05-02 05:40:21 +08:00
}
}
}
2020-05-11 05:20:50 +08:00
ACTOR Future < Void > monitorLeaderRemotelyInternal ( Reference < ClusterConnectionFile > connFile , Reference < AsyncVar < Value > > outSerializedLeaderInfo ) {
state MonitorLeaderInfo info ( connFile ) ;
2020-05-02 05:40:21 +08:00
loop {
2020-05-11 05:20:50 +08:00
MonitorLeaderInfo _info = wait ( monitorLeaderRemotelyOneGeneration ( connFile , outSerializedLeaderInfo , info ) ) ;
info = _info ;
2020-05-02 05:40:21 +08:00
}
}
template < class LeaderInterface >
Future < Void > monitorLeaderRemotely ( Reference < ClusterConnectionFile > const & connFile ,
Reference < AsyncVar < Optional < LeaderInterface > > > const & outKnownLeader ) {
LeaderDeserializer < LeaderInterface > deserializer ;
Reference < AsyncVar < Value > > serializedInfo ( new AsyncVar < Value > ) ;
Future < Void > m = monitorLeaderRemotelyInternal ( connFile , serializedInfo ) ;
return m | | deserializer ( serializedInfo , outKnownLeader ) ;
}
2020-05-11 05:20:50 +08:00
ACTOR Future < Void > monitorLeaderRemotelyWithDelayedCandidacy ( Reference < ClusterConnectionFile > connFile , Reference < AsyncVar < Optional < ClusterControllerFullInterface > > > currentCC , Reference < AsyncVar < ClusterControllerPriorityInfo > > asyncPriorityInfo , Future < Void > recoveredDiskFiles , LocalityData locality , Reference < AsyncVar < ServerDBInfo > > dbInfo ) {
2020-05-05 16:00:17 +08:00
state Future < Void > monitor = monitorLeaderRemotely ( connFile , currentCC ) ;
2020-05-11 05:20:50 +08:00
state Future < Void > timeout ;
2020-05-05 16:00:17 +08:00
2020-05-11 05:20:50 +08:00
wait ( recoveredDiskFiles ) ;
loop {
if ( currentCC - > get ( ) . present ( ) & & dbInfo - > get ( ) . clusterInterface = = currentCC - > get ( ) . get ( ) & & IFailureMonitor : : failureMonitor ( ) . getState ( currentCC - > get ( ) . get ( ) . registerWorker . getEndpoint ( ) ) . isAvailable ( ) ) {
timeout = Future < Void > ( ) ;
} else if ( ! timeout . isValid ( ) ) {
2020-06-11 00:59:56 +08:00
timeout = delay ( SERVER_KNOBS - > MIN_DELAY_CC_WORST_FIT_CANDIDACY_SECONDS + ( deterministicRandom ( ) - > random01 ( ) * ( SERVER_KNOBS - > MAX_DELAY_CC_WORST_FIT_CANDIDACY_SECONDS - SERVER_KNOBS - > MIN_DELAY_CC_WORST_FIT_CANDIDACY_SECONDS ) ) ) ;
2020-05-05 16:00:17 +08:00
}
2020-05-11 05:20:50 +08:00
choose {
when ( wait ( currentCC - > onChange ( ) ) ) { }
when ( wait ( dbInfo - > onChange ( ) ) ) { }
when ( wait ( currentCC - > get ( ) . present ( ) ? IFailureMonitor : : failureMonitor ( ) . onStateChanged ( currentCC - > get ( ) . get ( ) . registerWorker . getEndpoint ( ) ) : Never ( ) ) ) { }
when ( wait ( timeout . isValid ( ) ? timeout : Never ( ) ) ) {
monitor . cancel ( ) ;
wait ( clusterController ( connFile , currentCC , asyncPriorityInfo , recoveredDiskFiles , locality ) ) ;
return Void ( ) ;
}
}
2020-05-05 16:00:17 +08:00
}
}
2017-05-26 04:48:44 +08:00
ACTOR Future < Void > fdbd (
Reference < ClusterConnectionFile > connFile ,
LocalityData localities ,
ProcessClass processClass ,
std : : string dataFolder ,
std : : string coordFolder ,
int64_t memoryLimit ,
std : : string metricsConnFile ,
2019-04-06 07:06:30 +08:00
std : : string metricsPrefix ,
2019-04-03 20:27:11 +08:00
int64_t memoryProfileThreshold ,
2019-04-13 04:23:02 +08:00
std : : string whitelistBinPaths )
2017-05-26 04:48:44 +08:00
{
2019-08-28 01:45:09 +08:00
state vector < Future < Void > > actors ;
2019-08-23 05:21:13 +08:00
state Promise < Void > recoveredDiskFiles ;
2018-06-08 04:07:19 +08:00
2019-08-23 02:02:14 +08:00
try {
2017-05-27 08:43:28 +08:00
ServerCoordinators coordinators ( connFile ) ;
2019-04-03 20:27:11 +08:00
if ( g_network - > isSimulated ( ) ) {
2019-04-21 03:58:24 +08:00
whitelistBinPaths = " ,, random_path, /bin/snap_create.sh,, " ;
2019-04-03 20:27:11 +08:00
}
2019-04-13 04:23:02 +08:00
TraceEvent ( " StartingFDBD " ) . detail ( " ZoneID " , localities . zoneId ( ) ) . detail ( " MachineId " , localities . machineId ( ) ) . detail ( " DiskPath " , dataFolder ) . detail ( " CoordPath " , coordFolder ) . detail ( " WhiteListBinPath " , whitelistBinPaths ) ;
2017-05-27 08:43:28 +08:00
// SOMEDAY: start the services on the machine in a staggered fashion in simulation?
2020-02-19 08:41:19 +08:00
// Endpoints should be registered first before any process trying to connect to it.
// So coordinationServer actor should be the first one executed before any other.
2020-02-19 08:41:59 +08:00
if ( coordFolder . size ( ) ) {
// SOMEDAY: remove the fileNotFound wrapper and make DiskQueue construction safe from errors setting up
// their files
actors . push_back ( fileNotFoundToNever ( coordinationServer ( coordFolder ) ) ) ;
2020-02-19 08:41:19 +08:00
}
2020-02-01 12:23:35 +08:00
2018-06-08 04:07:19 +08:00
state UID processIDUid = wait ( createAndLockProcessIdFile ( dataFolder ) ) ;
2018-09-29 03:12:06 +08:00
localities . set ( LocalityData : : keyProcessId , processIDUid . toString ( ) ) ;
2018-06-08 04:07:19 +08:00
// Only one process can execute on a dataFolder from this point onwards
2018-06-09 02:51:51 +08:00
std : : string fitnessFilePath = joinPath ( dataFolder , " fitness " ) ;
2018-06-08 04:07:19 +08:00
Reference < AsyncVar < Optional < ClusterControllerFullInterface > > > cc ( new AsyncVar < Optional < ClusterControllerFullInterface > > ) ;
Reference < AsyncVar < Optional < ClusterInterface > > > ci ( new AsyncVar < Optional < ClusterInterface > > ) ;
2018-06-09 02:51:51 +08:00
Reference < AsyncVar < ClusterControllerPriorityInfo > > asyncPriorityInfo ( new AsyncVar < ClusterControllerPriorityInfo > ( getCCPriorityInfo ( fitnessFilePath , processClass ) ) ) ;
2020-05-11 05:20:50 +08:00
Reference < AsyncVar < ServerDBInfo > > dbInfo ( new AsyncVar < ServerDBInfo > ( ServerDBInfo ( ) ) ) ;
2018-06-08 04:07:19 +08:00
2019-08-28 01:45:09 +08:00
actors . push_back ( reportErrors ( monitorAndWriteCCPriorityInfo ( fitnessFilePath , asyncPriorityInfo ) , " MonitorAndWriteCCPriorityInfo " ) ) ;
2020-06-06 07:27:04 +08:00
if ( processClass . machineClassFitness ( ProcessClass : : ClusterController ) = = ProcessClass : : NeverAssign ) {
2020-05-02 05:40:21 +08:00
actors . push_back ( reportErrors ( monitorLeader ( connFile , cc ) , " ClusterController " ) ) ;
2020-06-11 00:59:56 +08:00
} else if ( processClass . machineClassFitness ( ProcessClass : : ClusterController ) = = ProcessClass : : WorstFit & & SERVER_KNOBS - > MAX_DELAY_CC_WORST_FIT_CANDIDACY_SECONDS > 0 ) {
2020-05-11 05:20:50 +08:00
actors . push_back ( reportErrors ( monitorLeaderRemotelyWithDelayedCandidacy ( connFile , cc , asyncPriorityInfo , recoveredDiskFiles . getFuture ( ) , localities , dbInfo ) , " ClusterController " ) ) ;
2020-05-02 05:40:21 +08:00
} else {
actors . push_back ( reportErrors ( clusterController ( connFile , cc , asyncPriorityInfo , recoveredDiskFiles . getFuture ( ) , localities ) , " ClusterController " ) ) ;
}
2019-08-28 01:45:09 +08:00
actors . push_back ( reportErrors ( extractClusterInterface ( cc , ci ) , " ExtractClusterInterface " ) ) ;
2020-05-11 05:20:50 +08:00
actors . push_back ( reportErrorsExcept ( workerServer ( connFile , cc , localities , asyncPriorityInfo , processClass , dataFolder , memoryLimit , metricsConnFile , metricsPrefix , recoveredDiskFiles , memoryProfileThreshold , coordFolder , whitelistBinPaths , dbInfo ) , " WorkerServer " , UID ( ) , & normalWorkerErrors ( ) ) ) ;
2017-05-27 08:43:28 +08:00
state Future < Void > firstConnect = reportErrors ( printOnFirstConnected ( ci ) , " ClusterFirstConnectedError " ) ;
2019-08-28 01:45:09 +08:00
wait ( quorum ( actors , 1 ) ) ;
2017-05-27 08:43:28 +08:00
ASSERT ( false ) ; // None of these actors should terminate normally
throw internal_error ( ) ;
2019-08-23 02:02:14 +08:00
} catch ( Error & e ) {
2019-08-28 01:45:09 +08:00
// Make sure actors are cancelled before recoveredDiskFiles is destructed.
// Otherwise, these actors may get a broken promise error.
for ( auto f : actors ) f . cancel ( ) ;
2017-05-27 08:43:28 +08:00
Error err = checkIOTimeout ( e ) ;
throw err ;
}
2017-05-26 04:48:44 +08:00
}
2018-09-06 06:06:14 +08:00
2018-09-06 06:53:12 +08:00
const Role Role : : WORKER ( " Worker " , " WK " , false ) ;
2018-09-06 06:06:14 +08:00
const Role Role : : STORAGE_SERVER ( " StorageServer " , " SS " ) ;
const Role Role : : TRANSACTION_LOG ( " TLog " , " TL " ) ;
2018-09-06 06:53:12 +08:00
const Role Role : : SHARED_TRANSACTION_LOG ( " SharedTLog " , " SL " , false ) ;
2020-09-11 08:44:15 +08:00
const Role Role : : COMMIT_PROXY ( " CommitProxyServer " , " CP " ) ;
2020-07-15 15:37:41 +08:00
const Role Role : : GRV_PROXY ( " GrvProxyServer " , " GP " ) ;
2018-09-06 06:06:14 +08:00
const Role Role : : MASTER ( " MasterServer " , " MS " ) ;
const Role Role : : RESOLVER ( " Resolver " , " RV " ) ;
const Role Role : : CLUSTER_CONTROLLER ( " ClusterController " , " CC " ) ;
const Role Role : : TESTER ( " Tester " , " TS " ) ;
const Role Role : : LOG_ROUTER ( " LogRouter " , " LR " ) ;
2018-12-14 05:31:37 +08:00
const Role Role : : DATA_DISTRIBUTOR ( " DataDistributor " , " DD " ) ;
2019-03-27 23:15:19 +08:00
const Role Role : : RATEKEEPER ( " Ratekeeper " , " RK " ) ;
2019-11-13 05:01:29 +08:00
const Role Role : : STORAGE_CACHE ( " StorageCache " , " SC " ) ;
2019-07-04 02:09:36 +08:00
const Role Role : : COORDINATOR ( " Coordinator " , " CD " ) ;
2019-04-25 06:12:37 +08:00
const Role Role : : BACKUP ( " Backup " , " BK " ) ;