A giant translation of TaskFooPriority -> TaskPriority::Foo
This is so that APIs that take priorities don't take ints, which are common and easy to accidentally pass the wrong thing.
This commit is contained in:
parent
df0baa0066
commit
7a500cd37f
|
@ -85,7 +85,7 @@ void fdb_flow_test() {
|
|||
|
||||
openTraceFile(NetworkAddress(), 1000000, 1000000, ".");
|
||||
systemMonitor();
|
||||
uncancellable(recurring(&systemMonitor, 5.0, TaskFlushTrace));
|
||||
uncancellable(recurring(&systemMonitor, 5.0, TaskPriority::FlushTrace));
|
||||
|
||||
Future<Void> t = _test();
|
||||
|
||||
|
@ -179,7 +179,7 @@ namespace FDB {
|
|||
}
|
||||
|
||||
void backToFutureCallback( FDBFuture* f, void* data ) {
|
||||
g_network->onMainThread( Promise<Void>((SAV<Void>*)data), TaskDefaultOnMainThread ); // SOMEDAY: think about this priority
|
||||
g_network->onMainThread( Promise<Void>((SAV<Void>*)data), TaskPriority::DefaultOnMainThread ); // SOMEDAY: think about this priority
|
||||
}
|
||||
|
||||
// backToFuture<Type>( FDBFuture*, (FDBFuture* -> Type) ) -> Future<Type>
|
||||
|
|
|
@ -419,7 +419,7 @@ ACTOR Future<Void> readCommitted(Database cx, PromiseStream<RangeResultWithVersi
|
|||
|
||||
//add lock
|
||||
releaser.release();
|
||||
wait(lock->take(TaskDefaultYield, limits.bytes + CLIENT_KNOBS->VALUE_SIZE_LIMIT + CLIENT_KNOBS->SYSTEM_KEY_SIZE_LIMIT));
|
||||
wait(lock->take(TaskPriority::DefaultYield, limits.bytes + CLIENT_KNOBS->VALUE_SIZE_LIMIT + CLIENT_KNOBS->SYSTEM_KEY_SIZE_LIMIT));
|
||||
releaser = FlowLock::Releaser(*lock, limits.bytes + CLIENT_KNOBS->VALUE_SIZE_LIMIT + CLIENT_KNOBS->SYSTEM_KEY_SIZE_LIMIT);
|
||||
|
||||
state Standalone<RangeResultRef> values = wait(tr.getRange(begin, end, limits));
|
||||
|
@ -495,7 +495,7 @@ ACTOR Future<Void> readCommitted(Database cx, PromiseStream<RCGroup> results, Fu
|
|||
//add lock
|
||||
wait(active);
|
||||
releaser.release();
|
||||
wait(lock->take(TaskDefaultYield, rangevalue.expectedSize() + rcGroup.items.expectedSize()));
|
||||
wait(lock->take(TaskPriority::DefaultYield, rangevalue.expectedSize() + rcGroup.items.expectedSize()));
|
||||
releaser = FlowLock::Releaser(*lock, rangevalue.expectedSize() + rcGroup.items.expectedSize());
|
||||
|
||||
for (auto & s : rangevalue){
|
||||
|
@ -613,7 +613,7 @@ ACTOR Future<int> dumpData(Database cx, PromiseStream<RCGroup> results, Referenc
|
|||
req.flags = req.flags | CommitTransactionRequest::FLAG_IS_LOCK_AWARE;
|
||||
|
||||
totalBytes += mutationSize;
|
||||
wait( commitLock->take(TaskDefaultYield, mutationSize) );
|
||||
wait( commitLock->take(TaskPriority::DefaultYield, mutationSize) );
|
||||
addActor.send( commitLock->releaseWhen( success(commit.getReply(req)), mutationSize ) );
|
||||
|
||||
if(endOfStream) {
|
||||
|
@ -653,7 +653,7 @@ ACTOR Future<Void> coalesceKeyVersionCache(Key uid, Version endVersion, Referenc
|
|||
req.transaction.read_snapshot = committedVersion->get();
|
||||
req.flags = req.flags | CommitTransactionRequest::FLAG_IS_LOCK_AWARE;
|
||||
|
||||
wait( commitLock->take(TaskDefaultYield, mutationSize) );
|
||||
wait( commitLock->take(TaskPriority::DefaultYield, mutationSize) );
|
||||
addActor.send( commitLock->releaseWhen( success(commit.getReply(req)), mutationSize ) );
|
||||
}
|
||||
|
||||
|
@ -671,7 +671,7 @@ ACTOR Future<Void> applyMutations(Database cx, Key uid, Key addPrefix, Key remov
|
|||
try {
|
||||
loop {
|
||||
if(beginVersion >= *endVersion) {
|
||||
wait( commitLock.take(TaskDefaultYield, CLIENT_KNOBS->BACKUP_LOCK_BYTES) );
|
||||
wait( commitLock.take(TaskPriority::DefaultYield, CLIENT_KNOBS->BACKUP_LOCK_BYTES) );
|
||||
commitLock.release(CLIENT_KNOBS->BACKUP_LOCK_BYTES);
|
||||
if(beginVersion >= *endVersion) {
|
||||
return Void();
|
||||
|
|
|
@ -52,12 +52,12 @@ struct ClusterInterface {
|
|||
}
|
||||
|
||||
void initEndpoints() {
|
||||
openDatabase.getEndpoint( TaskClusterController );
|
||||
failureMonitoring.getEndpoint( TaskFailureMonitor );
|
||||
databaseStatus.getEndpoint( TaskClusterController );
|
||||
ping.getEndpoint( TaskClusterController );
|
||||
getClientWorkers.getEndpoint( TaskClusterController );
|
||||
forceRecovery.getEndpoint( TaskClusterController );
|
||||
openDatabase.getEndpoint( TaskPriority::ClusterController );
|
||||
failureMonitoring.getEndpoint( TaskPriority::FailureMonitor );
|
||||
databaseStatus.getEndpoint( TaskPriority::ClusterController );
|
||||
ping.getEndpoint( TaskPriority::ClusterController );
|
||||
getClientWorkers.getEndpoint( TaskPriority::ClusterController );
|
||||
forceRecovery.getEndpoint( TaskPriority::ClusterController );
|
||||
}
|
||||
|
||||
template <class Ar>
|
||||
|
|
|
@ -54,7 +54,7 @@ public:
|
|||
|
||||
// For internal (fdbserver) use only
|
||||
static Database create( Reference<AsyncVar<Optional<ClusterInterface>>> clusterInterface, Reference<ClusterConnectionFile> connFile, LocalityData const& clientLocality );
|
||||
static Database create( Reference<AsyncVar<ClientDBInfo>> clientInfo, Future<Void> clientInfoMonitor, LocalityData clientLocality, bool enableLocalityLoadBalance, int taskID=TaskDefaultEndpoint, bool lockAware=false, int apiVersion=Database::API_VERSION_LATEST );
|
||||
static Database create( Reference<AsyncVar<ClientDBInfo>> clientInfo, Future<Void> clientInfoMonitor, LocalityData clientLocality, bool enableLocalityLoadBalance, TaskPriority taskID=TaskPriority::DefaultEndpoint, bool lockAware=false, int apiVersion=Database::API_VERSION_LATEST );
|
||||
|
||||
~DatabaseContext();
|
||||
|
||||
|
@ -97,7 +97,7 @@ public:
|
|||
|
||||
//private:
|
||||
explicit DatabaseContext( Reference<Cluster> cluster, Reference<AsyncVar<ClientDBInfo>> clientDBInfo,
|
||||
Future<Void> clientInfoMonitor, Standalone<StringRef> dbId, int taskID, LocalityData const& clientLocality,
|
||||
Future<Void> clientInfoMonitor, Standalone<StringRef> dbId, TaskPriority taskID, LocalityData const& clientLocality,
|
||||
bool enableLocalityLoadBalance, bool lockAware, int apiVersion = Database::API_VERSION_LATEST );
|
||||
|
||||
explicit DatabaseContext( const Error &err );
|
||||
|
@ -161,7 +161,7 @@ public:
|
|||
|
||||
Future<Void> logger;
|
||||
|
||||
int taskID;
|
||||
TaskPriority taskID;
|
||||
|
||||
Int64MetricHandle getValueSubmitted;
|
||||
EventMetricHandle<GetValueComplete> getValueCompleted;
|
||||
|
|
|
@ -41,7 +41,7 @@ ACTOR Future<Void> failureMonitorClientLoop(
|
|||
{
|
||||
state Version version = 0;
|
||||
state Future<FailureMonitoringReply> request = Never();
|
||||
state Future<Void> nextRequest = delay(0, TaskFailureMonitor);
|
||||
state Future<Void> nextRequest = delay(0, TaskPriority::FailureMonitor);
|
||||
state Future<Void> requestTimeout = Never();
|
||||
state double before = now();
|
||||
state double waitfor = 0;
|
||||
|
@ -61,7 +61,7 @@ ACTOR Future<Void> failureMonitorClientLoop(
|
|||
loop {
|
||||
choose {
|
||||
when( FailureMonitoringReply reply = wait( request ) ) {
|
||||
g_network->setCurrentTask(TaskDefaultDelay);
|
||||
g_network->setCurrentTask(TaskPriority::DefaultDelay);
|
||||
request = Never();
|
||||
requestTimeout = Never();
|
||||
if (reply.allOthersFailed) {
|
||||
|
@ -122,10 +122,10 @@ ACTOR Future<Void> failureMonitorClientLoop(
|
|||
}
|
||||
before = now();
|
||||
waitfor = reply.clientRequestIntervalMS * .001;
|
||||
nextRequest = delayJittered( waitfor, TaskFailureMonitor );
|
||||
nextRequest = delayJittered( waitfor, TaskPriority::FailureMonitor );
|
||||
}
|
||||
when( wait( requestTimeout ) ) {
|
||||
g_network->setCurrentTask(TaskDefaultDelay);
|
||||
g_network->setCurrentTask(TaskPriority::DefaultDelay);
|
||||
requestTimeout = Never();
|
||||
TraceEvent(SevWarn, "FailureMonitoringServerDown").detail("OldServerID",controller.id());
|
||||
monitor->setStatus(controlAddr.address, FailureStatus(true));
|
||||
|
@ -136,7 +136,7 @@ ACTOR Future<Void> failureMonitorClientLoop(
|
|||
}
|
||||
}
|
||||
when( wait( nextRequest ) ) {
|
||||
g_network->setCurrentTask(TaskDefaultDelay);
|
||||
g_network->setCurrentTask(TaskPriority::DefaultDelay);
|
||||
nextRequest = Never();
|
||||
|
||||
double elapsed = now() - before;
|
||||
|
@ -152,9 +152,9 @@ ACTOR Future<Void> failureMonitorClientLoop(
|
|||
req.addresses = g_network->getLocalAddresses();
|
||||
if (trackMyStatus)
|
||||
req.senderStatus = FailureStatus(false);
|
||||
request = controller.failureMonitoring.getReply( req, TaskFailureMonitor );
|
||||
request = controller.failureMonitoring.getReply( req, TaskPriority::FailureMonitor );
|
||||
if(!controller.failureMonitoring.getEndpoint().isLocal())
|
||||
requestTimeout = delay( fmState->serverFailedTimeout, TaskFailureMonitor );
|
||||
requestTimeout = delay( fmState->serverFailedTimeout, TaskPriority::FailureMonitor );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -93,7 +93,7 @@ namespace HTTP {
|
|||
loop {
|
||||
// Wait for connection to have something to read
|
||||
wait(conn->onReadable());
|
||||
wait( delay( 0, TaskReadSocket ) );
|
||||
wait( delay( 0, TaskPriority::ReadSocket ) );
|
||||
|
||||
// Read into buffer
|
||||
int originalSize = buf->size();
|
||||
|
@ -353,7 +353,7 @@ namespace HTTP {
|
|||
|
||||
loop {
|
||||
wait(conn->onWritable());
|
||||
wait( delay( 0, TaskWriteSocket ) );
|
||||
wait( delay( 0, TaskPriority::WriteSocket ) );
|
||||
|
||||
// If we already got a response, before finishing sending the request, then close the connection,
|
||||
// set the Connection header to "close" as a hint to the caller that this connection can't be used
|
||||
|
|
|
@ -967,7 +967,7 @@ ACTOR Future<CoordinatorsResult::Type> changeQuorum( Database cx, Reference<IQuo
|
|||
vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||
ClientCoordinators coord( Reference<ClusterConnectionFile>( new ClusterConnectionFile( conn ) ) );
|
||||
for( int i = 0; i < coord.clientLeaderServers.size(); i++ )
|
||||
leaderServers.push_back( retryBrokenPromise( coord.clientLeaderServers[i].getLeader, GetLeaderRequest( coord.clusterKey, UID() ), TaskCoordinationReply ) );
|
||||
leaderServers.push_back( retryBrokenPromise( coord.clientLeaderServers[i].getLeader, GetLeaderRequest( coord.clusterKey, UID() ), TaskPriority::CoordinationReply ) );
|
||||
|
||||
choose {
|
||||
when( wait( waitForAll( leaderServers ) ) ) {}
|
||||
|
@ -1047,7 +1047,7 @@ struct AutoQuorumChange : IQuorumChange {
|
|||
ClientCoordinators coord(ccf);
|
||||
vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||
for( int i = 0; i < coord.clientLeaderServers.size(); i++ )
|
||||
leaderServers.push_back( retryBrokenPromise( coord.clientLeaderServers[i].getLeader, GetLeaderRequest( coord.clusterKey, UID() ), TaskCoordinationReply ) );
|
||||
leaderServers.push_back( retryBrokenPromise( coord.clientLeaderServers[i].getLeader, GetLeaderRequest( coord.clusterKey, UID() ), TaskPriority::CoordinationReply ) );
|
||||
Optional<vector<Optional<LeaderInfo>>> results = wait( timeout( getAll(leaderServers), CLIENT_KNOBS->IS_ACCEPTABLE_DELAY ) );
|
||||
if (!results.present()) return false; // Not all responded
|
||||
for(auto& r : results.get())
|
||||
|
|
|
@ -67,10 +67,10 @@ struct MasterProxyInterface {
|
|||
}
|
||||
|
||||
void initEndpoints() {
|
||||
getConsistentReadVersion.getEndpoint(TaskProxyGetConsistentReadVersion);
|
||||
getRawCommittedVersion.getEndpoint(TaskProxyGetRawCommittedVersion);
|
||||
commit.getEndpoint(TaskProxyCommitDispatcher);
|
||||
getStorageServerRejoinInfo.getEndpoint(TaskProxyStorageRejoin);
|
||||
getConsistentReadVersion.getEndpoint(TaskPriority::ProxyGetConsistentReadVersion);
|
||||
getRawCommittedVersion.getEndpoint(TaskPriority::ProxyGetRawCommittedVersion);
|
||||
commit.getEndpoint(TaskPriority::ProxyCommitDispatcher);
|
||||
getStorageServerRejoinInfo.getEndpoint(TaskPriority::ProxyStorageRejoin);
|
||||
//getKeyServersLocations.getEndpoint(TaskProxyGetKeyServersLocations); //do not increase the priority of these requests, because clients cans bring down the cluster with too many of these messages.
|
||||
}
|
||||
};
|
||||
|
|
|
@ -371,7 +371,7 @@ ClientLeaderRegInterface::ClientLeaderRegInterface( NetworkAddress remote )
|
|||
}
|
||||
|
||||
ClientLeaderRegInterface::ClientLeaderRegInterface( INetwork* local ) {
|
||||
getLeader.makeWellKnownEndpoint( WLTOKEN_CLIENTLEADERREG_GETLEADER, TaskCoordination );
|
||||
getLeader.makeWellKnownEndpoint( WLTOKEN_CLIENTLEADERREG_GETLEADER, TaskPriority::Coordination );
|
||||
}
|
||||
|
||||
// Nominee is the worker among all workers that are considered as leader by a coordinator
|
||||
|
@ -380,7 +380,7 @@ ClientLeaderRegInterface::ClientLeaderRegInterface( INetwork* local ) {
|
|||
ACTOR Future<Void> monitorNominee( Key key, ClientLeaderRegInterface coord, AsyncTrigger* nomineeChange, Optional<LeaderInfo> *info, int generation, Reference<AsyncVar<int>> connectedCoordinatorsNum ) {
|
||||
state bool hasCounted = false;
|
||||
loop {
|
||||
state Optional<LeaderInfo> li = wait( retryBrokenPromise( coord.getLeader, GetLeaderRequest( key, info->present() ? info->get().changeID : UID() ), TaskCoordinationReply ) );
|
||||
state Optional<LeaderInfo> li = wait( retryBrokenPromise( coord.getLeader, GetLeaderRequest( key, info->present() ? info->get().changeID : UID() ), TaskPriority::CoordinationReply ) );
|
||||
if (li.present() && !hasCounted && connectedCoordinatorsNum.isValid()) {
|
||||
connectedCoordinatorsNum->set(connectedCoordinatorsNum->get() + 1);
|
||||
hasCounted = true;
|
||||
|
|
|
@ -509,7 +509,7 @@ Future<HealthMetrics> DatabaseContext::getHealthMetrics(bool detailed = false) {
|
|||
|
||||
DatabaseContext::DatabaseContext(
|
||||
Reference<Cluster> cluster, Reference<AsyncVar<ClientDBInfo>> clientInfo, Future<Void> clientInfoMonitor, Standalone<StringRef> dbId,
|
||||
int taskID, LocalityData const& clientLocality, bool enableLocalityLoadBalance, bool lockAware, int apiVersion )
|
||||
TaskPriority taskID, LocalityData const& clientLocality, bool enableLocalityLoadBalance, bool lockAware, int apiVersion )
|
||||
: cluster(cluster), clientInfo(clientInfo), clientInfoMonitor(clientInfoMonitor), dbId(dbId), taskID(taskID), clientLocality(clientLocality), enableLocalityLoadBalance(enableLocalityLoadBalance),
|
||||
lockAware(lockAware), apiVersion(apiVersion), provisional(false),
|
||||
transactionReadVersions(0), transactionLogicalReads(0), transactionPhysicalReads(0), transactionCommittedMutations(0), transactionCommittedMutationBytes(0),
|
||||
|
@ -629,10 +629,10 @@ Database DatabaseContext::create(Reference<AsyncVar<Optional<ClusterInterface>>>
|
|||
Reference<AsyncVar<ClientDBInfo>> clientInfo(new AsyncVar<ClientDBInfo>());
|
||||
Future<Void> clientInfoMonitor = delayedAsyncVar(connectedCoordinatorsNum, connectedCoordinatorsNumDelayed, CLIENT_KNOBS->CHECK_CONNECTED_COORDINATOR_NUM_DELAY) || monitorClientInfo(clusterInterface, connFile, clientInfo, connectedCoordinatorsNumDelayed);
|
||||
|
||||
return Database(new DatabaseContext(cluster, clientInfo, clientInfoMonitor, LiteralStringRef(""), TaskDefaultEndpoint, clientLocality, true, false));
|
||||
return Database(new DatabaseContext(cluster, clientInfo, clientInfoMonitor, LiteralStringRef(""), TaskPriority::DefaultEndpoint, clientLocality, true, false));
|
||||
}
|
||||
|
||||
Database DatabaseContext::create(Reference<AsyncVar<ClientDBInfo>> clientInfo, Future<Void> clientInfoMonitor, LocalityData clientLocality, bool enableLocalityLoadBalance, int taskID, bool lockAware, int apiVersion) {
|
||||
Database DatabaseContext::create(Reference<AsyncVar<ClientDBInfo>> clientInfo, Future<Void> clientInfoMonitor, LocalityData clientLocality, bool enableLocalityLoadBalance, TaskPriority taskID, bool lockAware, int apiVersion) {
|
||||
return Database( new DatabaseContext( Reference<Cluster>(nullptr), clientInfo, clientInfoMonitor, LiteralStringRef(""), taskID, clientLocality, enableLocalityLoadBalance, lockAware, apiVersion ) );
|
||||
}
|
||||
|
||||
|
@ -820,10 +820,10 @@ Database Database::createDatabase( Reference<ClusterConnectionFile> connFile, in
|
|||
|
||||
DatabaseContext *db;
|
||||
if(preallocatedDb) {
|
||||
db = new (preallocatedDb) DatabaseContext(cluster, clientInfo, clientInfoMonitor, LiteralStringRef(""), TaskDefaultEndpoint, clientLocality, true, false, apiVersion);
|
||||
db = new (preallocatedDb) DatabaseContext(cluster, clientInfo, clientInfoMonitor, LiteralStringRef(""), TaskPriority::DefaultEndpoint, clientLocality, true, false, apiVersion);
|
||||
}
|
||||
else {
|
||||
db = new DatabaseContext(cluster, clientInfo, clientInfoMonitor, LiteralStringRef(""), TaskDefaultEndpoint, clientLocality, true, false, apiVersion);
|
||||
db = new DatabaseContext(cluster, clientInfo, clientInfoMonitor, LiteralStringRef(""), TaskPriority::DefaultEndpoint, clientLocality, true, false, apiVersion);
|
||||
}
|
||||
|
||||
return Database(db);
|
||||
|
@ -879,7 +879,7 @@ void Cluster::init( Reference<ClusterConnectionFile> connFile, bool startClientI
|
|||
initializeSystemMonitorMachineState(SystemMonitorMachineState(IPAddress(publicIP)));
|
||||
|
||||
systemMonitor();
|
||||
uncancellable( recurring( &systemMonitor, CLIENT_KNOBS->SYSTEM_MONITOR_INTERVAL, TaskFlushTrace ) );
|
||||
uncancellable( recurring( &systemMonitor, CLIENT_KNOBS->SYSTEM_MONITOR_INTERVAL, TaskPriority::FlushTrace ) );
|
||||
}
|
||||
|
||||
failMon = failureMonitorClient( clusterInterface, false );
|
||||
|
@ -1235,7 +1235,7 @@ ACTOR Future< pair<KeyRange,Reference<LocationInfo>> > getKeyLocation_internal(
|
|||
loop {
|
||||
choose {
|
||||
when ( wait( cx->onMasterProxiesChanged() ) ) {}
|
||||
when ( GetKeyServerLocationsReply rep = wait( loadBalance( cx->getMasterProxies(info.useProvisionalProxies), &MasterProxyInterface::getKeyServersLocations, GetKeyServerLocationsRequest(key, Optional<KeyRef>(), 100, isBackward, key.arena()), TaskDefaultPromiseEndpoint ) ) ) {
|
||||
when ( GetKeyServerLocationsReply rep = wait( loadBalance( cx->getMasterProxies(info.useProvisionalProxies), &MasterProxyInterface::getKeyServersLocations, GetKeyServerLocationsRequest(key, Optional<KeyRef>(), 100, isBackward, key.arena()), TaskPriority::DefaultPromiseEndpoint ) ) ) {
|
||||
if( info.debugID.present() )
|
||||
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocation.After");
|
||||
ASSERT( rep.results.size() == 1 );
|
||||
|
@ -1272,7 +1272,7 @@ ACTOR Future< vector< pair<KeyRange,Reference<LocationInfo>> > > getKeyRangeLoca
|
|||
loop {
|
||||
choose {
|
||||
when ( wait( cx->onMasterProxiesChanged() ) ) {}
|
||||
when ( GetKeyServerLocationsReply _rep = wait( loadBalance( cx->getMasterProxies(info.useProvisionalProxies), &MasterProxyInterface::getKeyServersLocations, GetKeyServerLocationsRequest(keys.begin, keys.end, limit, reverse, keys.arena()), TaskDefaultPromiseEndpoint ) ) ) {
|
||||
when ( GetKeyServerLocationsReply _rep = wait( loadBalance( cx->getMasterProxies(info.useProvisionalProxies), &MasterProxyInterface::getKeyServersLocations, GetKeyServerLocationsRequest(keys.begin, keys.end, limit, reverse, keys.arena()), TaskPriority::DefaultPromiseEndpoint ) ) ) {
|
||||
state GetKeyServerLocationsReply rep = _rep;
|
||||
if( info.debugID.present() )
|
||||
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKeyLocations.After");
|
||||
|
@ -1393,7 +1393,7 @@ ACTOR Future<Optional<Value>> getValue( Future<Version> version, Key key, Databa
|
|||
}
|
||||
state GetValueReply reply = wait(
|
||||
loadBalance(ssi.second, &StorageServerInterface::getValue, GetValueRequest(key, ver, getValueID),
|
||||
TaskDefaultPromiseEndpoint, false, cx->enableLocalityLoadBalance ? &cx->queueModel : NULL));
|
||||
TaskPriority::DefaultPromiseEndpoint, false, cx->enableLocalityLoadBalance ? &cx->queueModel : NULL));
|
||||
double latency = now() - startTimeD;
|
||||
cx->readLatencies.addSample(latency);
|
||||
if (trLogInfo) {
|
||||
|
@ -1456,7 +1456,7 @@ ACTOR Future<Key> getKey( Database cx, KeySelector k, Future<Version> version, T
|
|||
if( info.debugID.present() )
|
||||
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKey.Before"); //.detail("StartKey", k.getKey()).detail("Offset",k.offset).detail("OrEqual",k.orEqual);
|
||||
++cx->transactionPhysicalReads;
|
||||
GetKeyReply reply = wait( loadBalance( ssi.second, &StorageServerInterface::getKey, GetKeyRequest(k, version.get()), TaskDefaultPromiseEndpoint, false, cx->enableLocalityLoadBalance ? &cx->queueModel : NULL ) );
|
||||
GetKeyReply reply = wait( loadBalance( ssi.second, &StorageServerInterface::getKey, GetKeyRequest(k, version.get()), TaskPriority::DefaultPromiseEndpoint, false, cx->enableLocalityLoadBalance ? &cx->queueModel : NULL ) );
|
||||
if( info.debugID.present() )
|
||||
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKey.After"); //.detail("NextKey",reply.sel.key).detail("Offset", reply.sel.offset).detail("OrEqual", k.orEqual);
|
||||
k = reply.sel;
|
||||
|
@ -1519,7 +1519,7 @@ ACTOR Future< Void > watchValue( Future<Version> version, Key key, Optional<Valu
|
|||
g_traceBatch.addAttach("WatchValueAttachID", info.debugID.get().first(), watchValueID.get().first());
|
||||
g_traceBatch.addEvent("WatchValueDebug", watchValueID.get().first(), "NativeAPI.watchValue.Before"); //.detail("TaskID", g_network->getCurrentTask());
|
||||
}
|
||||
state Version resp = wait( loadBalance( ssi.second, &StorageServerInterface::watchValue, WatchValueRequest(key, value, ver, watchValueID), TaskDefaultPromiseEndpoint ) );
|
||||
state Version resp = wait( loadBalance( ssi.second, &StorageServerInterface::watchValue, WatchValueRequest(key, value, ver, watchValueID), TaskPriority::DefaultPromiseEndpoint ) );
|
||||
if( info.debugID.present() ) {
|
||||
g_traceBatch.addEvent("WatchValueDebug", watchValueID.get().first(), "NativeAPI.watchValue.After"); //.detail("TaskID", g_network->getCurrentTask());
|
||||
}
|
||||
|
@ -1611,7 +1611,7 @@ ACTOR Future<Standalone<RangeResultRef>> getExactRange( Database cx, Version ver
|
|||
.detail("Servers", locations[shard].second->description());*/
|
||||
}
|
||||
++cx->transactionPhysicalReads;
|
||||
GetKeyValuesReply rep = wait( loadBalance( locations[shard].second, &StorageServerInterface::getKeyValues, req, TaskDefaultPromiseEndpoint, false, cx->enableLocalityLoadBalance ? &cx->queueModel : NULL ) );
|
||||
GetKeyValuesReply rep = wait( loadBalance( locations[shard].second, &StorageServerInterface::getKeyValues, req, TaskPriority::DefaultPromiseEndpoint, false, cx->enableLocalityLoadBalance ? &cx->queueModel : NULL ) );
|
||||
if( info.debugID.present() )
|
||||
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getExactRange.After");
|
||||
output.arena().dependsOn( rep.arena );
|
||||
|
@ -1888,7 +1888,7 @@ ACTOR Future<Standalone<RangeResultRef>> getRange( Database cx, Reference<Transa
|
|||
transaction_too_old(), future_version()
|
||||
});
|
||||
}
|
||||
GetKeyValuesReply rep = wait( loadBalance(beginServer.second, &StorageServerInterface::getKeyValues, req, TaskDefaultPromiseEndpoint, false, cx->enableLocalityLoadBalance ? &cx->queueModel : NULL ) );
|
||||
GetKeyValuesReply rep = wait( loadBalance(beginServer.second, &StorageServerInterface::getKeyValues, req, TaskPriority::DefaultPromiseEndpoint, false, cx->enableLocalityLoadBalance ? &cx->queueModel : NULL ) );
|
||||
|
||||
if( info.debugID.present() ) {
|
||||
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getRange.After");//.detail("SizeOf", rep.data.size());
|
||||
|
@ -2698,7 +2698,7 @@ ACTOR static Future<Void> tryCommit( Database cx, Reference<TransactionLogInfo>
|
|||
const std::vector<MasterProxyInterface>& proxies = cx->clientInfo->get().proxies;
|
||||
reply = proxies.size() ? throwErrorOr ( brokenPromiseToMaybeDelivered ( proxies[0].commit.tryGetReply(req) ) ) : Never();
|
||||
} else {
|
||||
reply = loadBalance( cx->getMasterProxies(info.useProvisionalProxies), &MasterProxyInterface::commit, req, TaskDefaultPromiseEndpoint, true );
|
||||
reply = loadBalance( cx->getMasterProxies(info.useProvisionalProxies), &MasterProxyInterface::commit, req, TaskPriority::DefaultPromiseEndpoint, true );
|
||||
}
|
||||
|
||||
choose {
|
||||
|
@ -3073,7 +3073,7 @@ ACTOR Future<Void> readVersionBatcher( DatabaseContext *cx, FutureStream< std::p
|
|||
if (requests.size() == CLIENT_KNOBS->MAX_BATCH_SIZE)
|
||||
send_batch = true;
|
||||
else if (!timeout.isValid())
|
||||
timeout = delay(batchTime, TaskProxyGetConsistentReadVersion);
|
||||
timeout = delay(batchTime, TaskPriority::ProxyGetConsistentReadVersion);
|
||||
}
|
||||
when(wait(timeout.isValid() ? timeout : Never())) {
|
||||
send_batch = true;
|
||||
|
@ -3240,7 +3240,7 @@ ACTOR Future< StorageMetrics > waitStorageMetricsMultipleLocations(
|
|||
WaitMetricsRequest req(locations[i].first, StorageMetrics(), StorageMetrics());
|
||||
req.min.bytes = 0;
|
||||
req.max.bytes = -1;
|
||||
fx[i] = loadBalance( locations[i].second, &StorageServerInterface::waitMetrics, req, TaskDataDistribution );
|
||||
fx[i] = loadBalance( locations[i].second, &StorageServerInterface::waitMetrics, req, TaskPriority::DataDistribution );
|
||||
}
|
||||
wait( waitForAll(fx) );
|
||||
|
||||
|
@ -3271,7 +3271,7 @@ ACTOR Future< StorageMetrics > waitStorageMetrics(
|
|||
int shardLimit )
|
||||
{
|
||||
loop {
|
||||
vector< pair<KeyRange, Reference<LocationInfo>> > locations = wait( getKeyRangeLocations( cx, keys, shardLimit, false, &StorageServerInterface::waitMetrics, TransactionInfo(TaskDataDistribution) ) );
|
||||
vector< pair<KeyRange, Reference<LocationInfo>> > locations = wait( getKeyRangeLocations( cx, keys, shardLimit, false, &StorageServerInterface::waitMetrics, TransactionInfo(TaskPriority::DataDistribution) ) );
|
||||
|
||||
//SOMEDAY: Right now, if there are too many shards we delay and check again later. There may be a better solution to this.
|
||||
if(locations.size() < shardLimit) {
|
||||
|
@ -3281,7 +3281,7 @@ ACTOR Future< StorageMetrics > waitStorageMetrics(
|
|||
fx = waitStorageMetricsMultipleLocations( locations, min, max, permittedError );
|
||||
} else {
|
||||
WaitMetricsRequest req( keys, min, max );
|
||||
fx = loadBalance( locations[0].second, &StorageServerInterface::waitMetrics, req, TaskDataDistribution );
|
||||
fx = loadBalance( locations[0].second, &StorageServerInterface::waitMetrics, req, TaskPriority::DataDistribution );
|
||||
}
|
||||
StorageMetrics x = wait(fx);
|
||||
return x;
|
||||
|
@ -3291,14 +3291,14 @@ ACTOR Future< StorageMetrics > waitStorageMetrics(
|
|||
throw;
|
||||
}
|
||||
cx->invalidateCache(keys);
|
||||
wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, TaskDataDistribution));
|
||||
wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, TaskPriority::DataDistribution));
|
||||
}
|
||||
} else {
|
||||
TraceEvent(SevWarn, "WaitStorageMetricsPenalty")
|
||||
.detail("Keys", keys)
|
||||
.detail("Limit", CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT)
|
||||
.detail("JitteredSecondsOfPenitence", CLIENT_KNOBS->STORAGE_METRICS_TOO_MANY_SHARDS_DELAY);
|
||||
wait(delayJittered(CLIENT_KNOBS->STORAGE_METRICS_TOO_MANY_SHARDS_DELAY, TaskDataDistribution));
|
||||
wait(delayJittered(CLIENT_KNOBS->STORAGE_METRICS_TOO_MANY_SHARDS_DELAY, TaskPriority::DataDistribution));
|
||||
// make sure that the next getKeyRangeLocations() call will actually re-fetch the range
|
||||
cx->invalidateCache( keys );
|
||||
}
|
||||
|
@ -3324,13 +3324,13 @@ Future< StorageMetrics > Transaction::getStorageMetrics( KeyRange const& keys, i
|
|||
ACTOR Future< Standalone<VectorRef<KeyRef>> > splitStorageMetrics( Database cx, KeyRange keys, StorageMetrics limit, StorageMetrics estimated )
|
||||
{
|
||||
loop {
|
||||
state vector< pair<KeyRange, Reference<LocationInfo>> > locations = wait( getKeyRangeLocations( cx, keys, CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT, false, &StorageServerInterface::splitMetrics, TransactionInfo(TaskDataDistribution) ) );
|
||||
state vector< pair<KeyRange, Reference<LocationInfo>> > locations = wait( getKeyRangeLocations( cx, keys, CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT, false, &StorageServerInterface::splitMetrics, TransactionInfo(TaskPriority::DataDistribution) ) );
|
||||
state StorageMetrics used;
|
||||
state Standalone<VectorRef<KeyRef>> results;
|
||||
|
||||
//SOMEDAY: Right now, if there are too many shards we delay and check again later. There may be a better solution to this.
|
||||
if(locations.size() == CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT) {
|
||||
wait(delay(CLIENT_KNOBS->STORAGE_METRICS_TOO_MANY_SHARDS_DELAY, TaskDataDistribution));
|
||||
wait(delay(CLIENT_KNOBS->STORAGE_METRICS_TOO_MANY_SHARDS_DELAY, TaskPriority::DataDistribution));
|
||||
cx->invalidateCache(keys);
|
||||
}
|
||||
else {
|
||||
|
@ -3341,7 +3341,7 @@ ACTOR Future< Standalone<VectorRef<KeyRef>> > splitStorageMetrics( Database cx,
|
|||
state int i = 0;
|
||||
for(; i<locations.size(); i++) {
|
||||
SplitMetricsRequest req( locations[i].first, limit, used, estimated, i == locations.size() - 1 );
|
||||
SplitMetricsReply res = wait( loadBalance( locations[i].second, &StorageServerInterface::splitMetrics, req, TaskDataDistribution ) );
|
||||
SplitMetricsReply res = wait( loadBalance( locations[i].second, &StorageServerInterface::splitMetrics, req, TaskPriority::DataDistribution ) );
|
||||
if( res.splits.size() && res.splits[0] <= results.back() ) { // split points are out of order, possibly because of moving data, throw error to retry
|
||||
ASSERT_WE_THINK(false); // FIXME: This seems impossible and doesn't seem to be covered by testing
|
||||
throw all_alternatives_failed();
|
||||
|
@ -3367,7 +3367,7 @@ ACTOR Future< Standalone<VectorRef<KeyRef>> > splitStorageMetrics( Database cx,
|
|||
throw;
|
||||
}
|
||||
cx->invalidateCache( keys );
|
||||
wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, TaskDataDistribution));
|
||||
wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, TaskPriority::DataDistribution));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -164,10 +164,10 @@ struct TransactionOptions {
|
|||
|
||||
struct TransactionInfo {
|
||||
Optional<UID> debugID;
|
||||
int taskID;
|
||||
TaskPriority taskID;
|
||||
bool useProvisionalProxies;
|
||||
|
||||
explicit TransactionInfo( int taskID ) : taskID(taskID), useProvisionalProxies(false) {}
|
||||
explicit TransactionInfo( TaskPriority taskID ) : taskID(taskID), useProvisionalProxies(false) {}
|
||||
};
|
||||
|
||||
struct TransactionLogInfo : public ReferenceCounted<TransactionLogInfo>, NonCopyable {
|
||||
|
@ -287,7 +287,7 @@ public:
|
|||
void flushTrLogsIfEnabled();
|
||||
|
||||
// These are to permit use as state variables in actors:
|
||||
Transaction() : info( TaskDefaultEndpoint ) {}
|
||||
Transaction() : info( TaskPriority::DefaultEndpoint ) {}
|
||||
void operator=(Transaction&& r) BOOST_NOEXCEPT;
|
||||
|
||||
void reset();
|
||||
|
|
|
@ -291,7 +291,7 @@ ACTOR Future<Optional<StatusObject>> clientCoordinatorsStatusFetcher(Reference<C
|
|||
|
||||
state vector<Future<Optional<LeaderInfo>>> leaderServers;
|
||||
for (int i = 0; i < coord.clientLeaderServers.size(); i++)
|
||||
leaderServers.push_back(retryBrokenPromise(coord.clientLeaderServers[i].getLeader, GetLeaderRequest(coord.clusterKey, UID()), TaskCoordinationReply));
|
||||
leaderServers.push_back(retryBrokenPromise(coord.clientLeaderServers[i].getLeader, GetLeaderRequest(coord.clusterKey, UID()), TaskPriority::CoordinationReply));
|
||||
|
||||
wait( smartQuorum(leaderServers, leaderServers.size() / 2 + 1, 1.5) || delay(2.0) );
|
||||
|
||||
|
|
|
@ -80,9 +80,9 @@ struct StorageServerInterface {
|
|||
bool operator == (StorageServerInterface const& s) const { return uniqueID == s.uniqueID; }
|
||||
bool operator < (StorageServerInterface const& s) const { return uniqueID < s.uniqueID; }
|
||||
void initEndpoints() {
|
||||
getValue.getEndpoint( TaskLoadBalancedEndpoint );
|
||||
getKey.getEndpoint( TaskLoadBalancedEndpoint );
|
||||
getKeyValues.getEndpoint( TaskLoadBalancedEndpoint );
|
||||
getValue.getEndpoint( TaskPriority::LoadBalancedEndpoint );
|
||||
getKey.getEndpoint( TaskPriority::LoadBalancedEndpoint );
|
||||
getKeyValues.getEndpoint( TaskPriority::LoadBalancedEndpoint );
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
ACTOR template <class Tree>
|
||||
Future<Void> deferredCleanupActor( std::vector<Tree> toFree, int taskID = 7000 ) {
|
||||
Future<Void> deferredCleanupActor( std::vector<Tree> toFree, TaskPriority taskID = 7000 ) {
|
||||
state int freeCount = 0;
|
||||
while (!toFree.empty()) {
|
||||
Tree a = std::move( toFree.back() );
|
||||
|
|
|
@ -511,7 +511,7 @@ public:
|
|||
oldestVersion = newOldestVersion;
|
||||
}
|
||||
|
||||
Future<Void> forgetVersionsBeforeAsync( Version newOldestVersion, int taskID = 7000 ) {
|
||||
Future<Void> forgetVersionsBeforeAsync( Version newOldestVersion, TaskPriority taskID = 7000 ) {
|
||||
ASSERT( newOldestVersion <= latestVersion );
|
||||
roots[newOldestVersion] = getRoot(newOldestVersion);
|
||||
|
||||
|
|
|
@ -266,7 +266,7 @@ private:
|
|||
}
|
||||
|
||||
ACTOR static Future<int> read_impl( int fd, void* data, int length, int64_t offset ) {
|
||||
state int taskID = g_network->getCurrentTask();
|
||||
state TaskPriority taskID = g_network->getCurrentTask();
|
||||
state Promise<Void> p;
|
||||
//fprintf(stderr, "eio_read (fd=%d length=%d offset=%lld)\n", fd, length, offset);
|
||||
state eio_req* r = eio_read(fd, data, length, offset, 0, eio_callback, &p);
|
||||
|
@ -289,7 +289,7 @@ private:
|
|||
}
|
||||
|
||||
ACTOR static Future<Void> write_impl( int fd, Reference<ErrorInfo> err, StringRef data, int64_t offset ) {
|
||||
state int taskID = g_network->getCurrentTask();
|
||||
state TaskPriority taskID = g_network->getCurrentTask();
|
||||
state Promise<Void> p;
|
||||
state eio_req* r = eio_write(fd, (void*)data.begin(), data.size(), offset, 0, eio_callback, &p);
|
||||
try { wait( p.getFuture() ); } catch (...) { g_network->setCurrentTask( taskID ); eio_cancel(r); throw; }
|
||||
|
@ -299,7 +299,7 @@ private:
|
|||
}
|
||||
|
||||
ACTOR static Future<Void> truncate_impl( int fd, Reference<ErrorInfo> err, int64_t size ) {
|
||||
state int taskID = g_network->getCurrentTask();
|
||||
state TaskPriority taskID = g_network->getCurrentTask();
|
||||
state Promise<Void> p;
|
||||
state eio_req* r = eio_ftruncate(fd, size, 0, eio_callback, &p);
|
||||
try { wait( p.getFuture() ); } catch (...) { g_network->setCurrentTask( taskID ); eio_cancel(r); throw; }
|
||||
|
@ -330,7 +330,7 @@ private:
|
|||
}
|
||||
|
||||
ACTOR static Future<Void> sync_impl( int fd, Reference<ErrorInfo> err, bool sync_metadata=false ) {
|
||||
state int taskID = g_network->getCurrentTask();
|
||||
state TaskPriority taskID = g_network->getCurrentTask();
|
||||
state Promise<Void> p;
|
||||
state eio_req* r = start_fsync( fd, p, sync_metadata );
|
||||
|
||||
|
@ -350,7 +350,7 @@ private:
|
|||
}
|
||||
|
||||
ACTOR static Future<int64_t> size_impl( int fd ) {
|
||||
state int taskID = g_network->getCurrentTask();
|
||||
state TaskPriority taskID = g_network->getCurrentTask();
|
||||
state Promise<Void> p;
|
||||
state eio_req* r = eio_fstat( fd, 0, eio_callback, &p );
|
||||
try { wait( p.getFuture() ); } catch (...) { g_network->setCurrentTask( taskID ); eio_cancel(r); throw; }
|
||||
|
@ -363,7 +363,7 @@ private:
|
|||
}
|
||||
|
||||
ACTOR static Future<EIO_STRUCT_STAT> stat_impl( std::string filename ) {
|
||||
state int taskID = g_network->getCurrentTask();
|
||||
state TaskPriority taskID = g_network->getCurrentTask();
|
||||
state Promise<Void> p;
|
||||
state EIO_STRUCT_STAT statdata;
|
||||
state eio_req* r = eio_stat( filename.c_str(), 0, eio_callback, &p );
|
||||
|
@ -377,7 +377,7 @@ private:
|
|||
|
||||
ACTOR template <class R> static Future<R> dispatch_impl( std::function<R()> func) {
|
||||
state Dispatch<R> data( func );
|
||||
state int taskID = g_network->getCurrentTask();
|
||||
state TaskPriority taskID = g_network->getCurrentTask();
|
||||
|
||||
state eio_req* r = eio_custom( [](eio_req* req) {
|
||||
// Runs on the eio thread pool
|
||||
|
@ -418,7 +418,7 @@ private:
|
|||
static void eio_want_poll() {
|
||||
want_poll = 1;
|
||||
// SOMEDAY: NULL for deferred error, no analysis of correctness (itp)
|
||||
onMainThreadVoid([](){ poll_eio(); }, NULL, TaskPollEIO);
|
||||
onMainThreadVoid([](){ poll_eio(); }, NULL, TaskPriority::PollEIO);
|
||||
}
|
||||
|
||||
static int eio_callback( eio_req* req ) {
|
||||
|
|
|
@ -472,9 +472,9 @@ private:
|
|||
#endif
|
||||
}
|
||||
|
||||
int getTask() const { return (prio>>32)+1; }
|
||||
TaskPriority getTask() const { return static_cast<TaskPriority>((prio>>32)+1); }
|
||||
|
||||
ACTOR static void deliver( Promise<int> result, bool failed, int r, int task ) {
|
||||
ACTOR static void deliver( Promise<int> result, bool failed, int r, TaskPriority task ) {
|
||||
wait( delay(0, task) );
|
||||
if (failed) result.sendError(io_timeout());
|
||||
else if (r < 0) result.sendError(io_error());
|
||||
|
@ -649,7 +649,7 @@ private:
|
|||
loop {
|
||||
wait(success(ev->read()));
|
||||
|
||||
wait(delay(0, TaskDiskIOComplete));
|
||||
wait(delay(0, TaskPriority::DiskIOComplete));
|
||||
|
||||
linux_ioresult ev[FLOW_KNOBS->MAX_OUTSTANDING];
|
||||
timespec tm; tm.tv_sec = 0; tm.tv_nsec = 0;
|
||||
|
|
|
@ -23,13 +23,13 @@
|
|||
|
||||
std::map<std::string, Future<Void>> AsyncFileNonDurable::filesBeingDeleted;
|
||||
|
||||
ACTOR Future<Void> sendOnProcess( ISimulator::ProcessInfo* process, Promise<Void> promise, int taskID ) {
|
||||
ACTOR Future<Void> sendOnProcess( ISimulator::ProcessInfo* process, Promise<Void> promise, TaskPriority taskID ) {
|
||||
wait( g_simulator.onProcess( process, taskID ) );
|
||||
promise.send(Void());
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> sendErrorOnProcess( ISimulator::ProcessInfo* process, Promise<Void> promise, Error e, int taskID ) {
|
||||
ACTOR Future<Void> sendErrorOnProcess( ISimulator::ProcessInfo* process, Promise<Void> promise, Error e, TaskPriority taskID ) {
|
||||
wait( g_simulator.onProcess( process, taskID ) );
|
||||
promise.sendError(e);
|
||||
return Void();
|
||||
|
|
|
@ -38,8 +38,8 @@
|
|||
#undef max
|
||||
#undef min
|
||||
|
||||
Future<Void> sendOnProcess( ISimulator::ProcessInfo* const& process, Promise<Void> const& promise, int const& taskID );
|
||||
Future<Void> sendErrorOnProcess( ISimulator::ProcessInfo* const& process, Promise<Void> const& promise, Error const& e, int const& taskID );
|
||||
ACTOR Future<Void> sendOnProcess( ISimulator::ProcessInfo* process, Promise<Void> promise, TaskPriority taskID );
|
||||
ACTOR Future<Void> sendErrorOnProcess( ISimulator::ProcessInfo* process, Promise<Void> promise, Error e, TaskPriority taskID );
|
||||
|
||||
ACTOR template <class T>
|
||||
Future<T> sendErrorOnShutdown( Future<T> in ) {
|
||||
|
@ -198,7 +198,7 @@ public:
|
|||
//Creates a new AsyncFileNonDurable which wraps the provided IAsyncFile
|
||||
ACTOR static Future<Reference<IAsyncFile>> open(std::string filename, std::string actualFilename, Future<Reference<IAsyncFile>> wrappedFile, Reference<DiskParameters> diskParameters) {
|
||||
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
|
||||
state int currentTaskID = g_network->getCurrentTask();
|
||||
state TaskPriority currentTaskID = g_network->getCurrentTask();
|
||||
state Future<Void> shutdown = success(currentProcess->shutdownSignal.getFuture());
|
||||
|
||||
//TraceEvent("AsyncFileNonDurableOpenBegin").detail("Filename", filename).detail("Addr", g_simulator.getCurrentProcess()->address);
|
||||
|
@ -391,7 +391,7 @@ private:
|
|||
|
||||
ACTOR Future<int> read(AsyncFileNonDurable *self, void *data, int length, int64_t offset) {
|
||||
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
|
||||
state int currentTaskID = g_network->getCurrentTask();
|
||||
state TaskPriority currentTaskID = g_network->getCurrentTask();
|
||||
wait( g_simulator.onMachine( currentProcess ) );
|
||||
|
||||
try {
|
||||
|
@ -411,7 +411,7 @@ private:
|
|||
//or none of the write. It may also corrupt parts of sectors which have not been written correctly
|
||||
ACTOR Future<Void> write(AsyncFileNonDurable *self, Promise<Void> writeStarted, Future<Future<Void>> ownFuture, void const* data, int length, int64_t offset) {
|
||||
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
|
||||
state int currentTaskID = g_network->getCurrentTask();
|
||||
state TaskPriority currentTaskID = g_network->getCurrentTask();
|
||||
wait( g_simulator.onMachine( currentProcess ) );
|
||||
|
||||
state double delayDuration = deterministicRandom()->random01() * self->maxWriteDelay;
|
||||
|
@ -535,7 +535,7 @@ private:
|
|||
//If a kill interrupts the delay, then the truncate may or may not be performed
|
||||
ACTOR Future<Void> truncate(AsyncFileNonDurable *self, Promise<Void> truncateStarted, Future<Future<Void>> ownFuture, int64_t size) {
|
||||
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
|
||||
state int currentTaskID = g_network->getCurrentTask();
|
||||
state TaskPriority currentTaskID = g_network->getCurrentTask();
|
||||
wait( g_simulator.onMachine( currentProcess ) );
|
||||
|
||||
state double delayDuration = deterministicRandom()->random01() * self->maxWriteDelay;
|
||||
|
@ -573,8 +573,8 @@ private:
|
|||
}
|
||||
}
|
||||
|
||||
if(g_network->check_yield(TaskDefaultYield)) {
|
||||
wait(delay(0, TaskDefaultYield));
|
||||
if(g_network->check_yield(TaskPriority::DefaultYield)) {
|
||||
wait(delay(0, TaskPriority::DefaultYield));
|
||||
}
|
||||
|
||||
//If performing a durable truncate, then pass it through to the file. Otherwise, pass it through with a 1/2 chance
|
||||
|
@ -663,7 +663,7 @@ private:
|
|||
|
||||
ACTOR Future<Void> sync(AsyncFileNonDurable *self, bool durable) {
|
||||
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
|
||||
state int currentTaskID = g_network->getCurrentTask();
|
||||
state TaskPriority currentTaskID = g_network->getCurrentTask();
|
||||
wait( g_simulator.onMachine( currentProcess ) );
|
||||
|
||||
try {
|
||||
|
@ -695,7 +695,7 @@ private:
|
|||
|
||||
ACTOR Future<int64_t> size(AsyncFileNonDurable *self) {
|
||||
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
|
||||
state int currentTaskID = g_network->getCurrentTask();
|
||||
state TaskPriority currentTaskID = g_network->getCurrentTask();
|
||||
|
||||
wait( g_simulator.onMachine( currentProcess ) );
|
||||
|
||||
|
@ -714,7 +714,7 @@ private:
|
|||
//Finishes all outstanding actors on an AsyncFileNonDurable and then deletes it
|
||||
ACTOR Future<Void> deleteFile(AsyncFileNonDurable *self) {
|
||||
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
|
||||
state int currentTaskID = g_network->getCurrentTask();
|
||||
state TaskPriority currentTaskID = g_network->getCurrentTask();
|
||||
state std::string filename = self->filename;
|
||||
|
||||
wait( g_simulator.onMachine( currentProcess ) );
|
||||
|
|
|
@ -172,28 +172,28 @@ struct YieldMockNetwork : INetwork, ReferenceCounted<YieldMockNetwork> {
|
|||
t.send(Void());
|
||||
}
|
||||
|
||||
virtual Future<class Void> delay(double seconds, int taskID) {
|
||||
virtual Future<class Void> delay(double seconds, TaskPriority taskID) {
|
||||
return nextTick.getFuture();
|
||||
}
|
||||
|
||||
virtual Future<class Void> yield(int taskID) {
|
||||
virtual Future<class Void> yield(TaskPriority taskID) {
|
||||
if (check_yield(taskID))
|
||||
return delay(0,taskID);
|
||||
return Void();
|
||||
}
|
||||
|
||||
virtual bool check_yield(int taskID) {
|
||||
virtual bool check_yield(TaskPriority taskID) {
|
||||
if (nextYield > 0) --nextYield;
|
||||
return nextYield == 0;
|
||||
}
|
||||
|
||||
// Delegate everything else. TODO: Make a base class NetworkWrapper for delegating everything in INetwork
|
||||
virtual int getCurrentTask() { return baseNetwork->getCurrentTask(); }
|
||||
virtual void setCurrentTask(int taskID) { baseNetwork->setCurrentTask(taskID); }
|
||||
virtual TaskPriority getCurrentTask() { return baseNetwork->getCurrentTask(); }
|
||||
virtual void setCurrentTask(TaskPriority taskID) { baseNetwork->setCurrentTask(taskID); }
|
||||
virtual double now() { return baseNetwork->now(); }
|
||||
virtual void stop() { return baseNetwork->stop(); }
|
||||
virtual bool isSimulated() const { return baseNetwork->isSimulated(); }
|
||||
virtual void onMainThread(Promise<Void>&& signal, int taskID) { return baseNetwork->onMainThread(std::move(signal), taskID); }
|
||||
virtual void onMainThread(Promise<Void>&& signal, TaskPriority taskID) { return baseNetwork->onMainThread(std::move(signal), taskID); }
|
||||
virtual THREAD_HANDLE startThread(THREAD_FUNC_RETURN(*func) (void *), void *arg) { return baseNetwork->startThread(func,arg); }
|
||||
virtual Future< Reference<class IAsyncFile> > open(std::string filename, int64_t flags, int64_t mode) { return IAsyncFileSystem::filesystem()->open(filename,flags,mode); }
|
||||
virtual Future< Void > deleteFile(std::string filename, bool mustBeDurable) { return IAsyncFileSystem::filesystem()->deleteFile(filename,mustBeDurable); }
|
||||
|
|
|
@ -49,7 +49,7 @@ public:
|
|||
EndpointMap();
|
||||
void insert( NetworkMessageReceiver* r, Endpoint::Token& token, uint32_t priority );
|
||||
NetworkMessageReceiver* get( Endpoint::Token const& token );
|
||||
uint32_t getPriority( Endpoint::Token const& token );
|
||||
TaskPriority getPriority( Endpoint::Token const& token );
|
||||
void remove( Endpoint::Token const& token, NetworkMessageReceiver* r );
|
||||
|
||||
private:
|
||||
|
@ -99,11 +99,11 @@ NetworkMessageReceiver* EndpointMap::get( Endpoint::Token const& token ) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
uint32_t EndpointMap::getPriority( Endpoint::Token const& token ) {
|
||||
TaskPriority EndpointMap::getPriority( Endpoint::Token const& token ) {
|
||||
uint32_t index = token.second();
|
||||
if ( index < data.size() && data[index].token().first() == token.first() && ((data[index].token().second()&0xffffffff00000000LL)|index)==token.second() )
|
||||
return data[index].token().second();
|
||||
return TaskUnknownEndpoint;
|
||||
return static_cast<TaskPriority>(data[index].token().second());
|
||||
return TaskPriority::UnknownEndpoint;
|
||||
}
|
||||
|
||||
void EndpointMap::remove( Endpoint::Token const& token, NetworkMessageReceiver* r ) {
|
||||
|
@ -119,7 +119,7 @@ struct EndpointNotFoundReceiver : NetworkMessageReceiver {
|
|||
EndpointNotFoundReceiver(EndpointMap& endpoints) {
|
||||
//endpoints[WLTOKEN_ENDPOINT_NOT_FOUND] = this;
|
||||
Endpoint::Token e = WLTOKEN_ENDPOINT_NOT_FOUND;
|
||||
endpoints.insert(this, e, TaskDefaultEndpoint);
|
||||
endpoints.insert(this, e, static_cast<uint32_t>(TaskPriority::DefaultEndpoint));
|
||||
ASSERT( e == WLTOKEN_ENDPOINT_NOT_FOUND );
|
||||
}
|
||||
virtual void receive( ArenaReader& reader ) {
|
||||
|
@ -138,7 +138,7 @@ struct EndpointNotFoundReceiver : NetworkMessageReceiver {
|
|||
struct PingReceiver : NetworkMessageReceiver {
|
||||
PingReceiver(EndpointMap& endpoints) {
|
||||
Endpoint::Token e = WLTOKEN_PING_PACKET;
|
||||
endpoints.insert(this, e, TaskReadSocket);
|
||||
endpoints.insert(this, e, static_cast<uint32_t>(TaskPriority::ReadSocket));
|
||||
ASSERT( e == WLTOKEN_PING_PACKET );
|
||||
}
|
||||
virtual void receive( ArenaReader& reader ) {
|
||||
|
@ -435,10 +435,10 @@ struct Peer : NonCopyable {
|
|||
ACTOR static Future<Void> connectionWriter( Peer* self, Reference<IConnection> conn ) {
|
||||
state double lastWriteTime = now();
|
||||
loop {
|
||||
//wait( delay(0, TaskWriteSocket) );
|
||||
wait( delayJittered(std::max<double>(FLOW_KNOBS->MIN_COALESCE_DELAY, FLOW_KNOBS->MAX_COALESCE_DELAY - (now() - lastWriteTime)), TaskWriteSocket) );
|
||||
//wait( delay(500e-6, TaskWriteSocket) );
|
||||
//wait( yield(TaskWriteSocket) );
|
||||
//wait( delay(0, TaskPriority::WriteSocket) );
|
||||
wait( delayJittered(std::max<double>(FLOW_KNOBS->MIN_COALESCE_DELAY, FLOW_KNOBS->MAX_COALESCE_DELAY - (now() - lastWriteTime)), TaskPriority::WriteSocket) );
|
||||
//wait( delay(500e-6, TaskPriority::WriteSocket) );
|
||||
//wait( yield(TaskPriority::WriteSocket) );
|
||||
|
||||
// Send until there is nothing left to send
|
||||
loop {
|
||||
|
@ -453,7 +453,7 @@ struct Peer : NonCopyable {
|
|||
|
||||
TEST(true); // We didn't write everything, so apparently the write buffer is full. Wait for it to be nonfull.
|
||||
wait( conn->onWritable() );
|
||||
wait( yield(TaskWriteSocket) );
|
||||
wait( yield(TaskPriority::WriteSocket) );
|
||||
}
|
||||
|
||||
// Wait until there is something to send
|
||||
|
@ -599,8 +599,8 @@ TransportData::~TransportData() {
|
|||
}
|
||||
|
||||
ACTOR static void deliver(TransportData* self, Endpoint destination, ArenaReader reader, bool inReadSocket) {
|
||||
int priority = self->endpoints.getPriority(destination.token);
|
||||
if (priority < TaskReadSocket || !inReadSocket) {
|
||||
TaskPriority priority = self->endpoints.getPriority(destination.token);
|
||||
if (priority < TaskPriority::ReadSocket || !inReadSocket) {
|
||||
wait( delay(0, priority) );
|
||||
} else {
|
||||
g_network->setCurrentTask( priority );
|
||||
|
@ -634,7 +634,7 @@ ACTOR static void deliver(TransportData* self, Endpoint destination, ArenaReader
|
|||
}
|
||||
|
||||
if( inReadSocket )
|
||||
g_network->setCurrentTask( TaskReadSocket );
|
||||
g_network->setCurrentTask( TaskPriority::ReadSocket );
|
||||
}
|
||||
|
||||
static void scanPackets(TransportData* transport, uint8_t*& unprocessed_begin, uint8_t* e, Arena& arena,
|
||||
|
@ -884,11 +884,11 @@ ACTOR static Future<Void> connectionReader(
|
|||
if (readWillBlock)
|
||||
break;
|
||||
|
||||
wait(yield(TaskReadSocket));
|
||||
wait(yield(TaskPriority::ReadSocket));
|
||||
}
|
||||
|
||||
wait( conn->onReadable() );
|
||||
wait(delay(0, TaskReadSocket)); // We don't want to call conn->read directly from the reactor - we could get stuck in the reactor reading 1 packet at a time
|
||||
wait(delay(0, TaskPriority::ReadSocket)); // We don't want to call conn->read directly from the reactor - we could get stuck in the reactor reading 1 packet at a time
|
||||
}
|
||||
}
|
||||
catch (Error& e) {
|
||||
|
@ -932,7 +932,7 @@ ACTOR static Future<Void> listen( TransportData* self, NetworkAddress listenAddr
|
|||
.detail("FromAddress", conn->getPeerAddress())
|
||||
.detail("ListenAddress", listenAddr.toString());
|
||||
incoming.add( connectionIncoming(self, conn) );
|
||||
wait(delay(0) || delay(FLOW_KNOBS->CONNECTION_ACCEPT_DELAY, TaskWriteSocket));
|
||||
wait(delay(0) || delay(FLOW_KNOBS->CONNECTION_ACCEPT_DELAY, TaskPriority::WriteSocket));
|
||||
}
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevError, "ListenError").error(e);
|
||||
|
@ -1054,7 +1054,7 @@ void FlowTransport::removePeerReference( const Endpoint& endpoint, NetworkMessag
|
|||
}
|
||||
}
|
||||
|
||||
void FlowTransport::addEndpoint( Endpoint& endpoint, NetworkMessageReceiver* receiver, uint32_t taskID ) {
|
||||
void FlowTransport::addEndpoint( Endpoint& endpoint, NetworkMessageReceiver* receiver, TaskPriority taskID ) {
|
||||
endpoint.token = deterministicRandom()->randomUniqueID();
|
||||
if (receiver->isStream()) {
|
||||
endpoint.addresses = self->localAddresses;
|
||||
|
@ -1063,18 +1063,18 @@ void FlowTransport::addEndpoint( Endpoint& endpoint, NetworkMessageReceiver* rec
|
|||
endpoint.addresses = NetworkAddressList();
|
||||
endpoint.token = UID( endpoint.token.first() & ~TOKEN_STREAM_FLAG, endpoint.token.second() );
|
||||
}
|
||||
self->endpoints.insert( receiver, endpoint.token, taskID );
|
||||
self->endpoints.insert( receiver, endpoint.token, static_cast<uint32_t>(taskID) );
|
||||
}
|
||||
|
||||
void FlowTransport::removeEndpoint( const Endpoint& endpoint, NetworkMessageReceiver* receiver ) {
|
||||
self->endpoints.remove(endpoint.token, receiver);
|
||||
}
|
||||
|
||||
void FlowTransport::addWellKnownEndpoint( Endpoint& endpoint, NetworkMessageReceiver* receiver, uint32_t taskID ) {
|
||||
void FlowTransport::addWellKnownEndpoint( Endpoint& endpoint, NetworkMessageReceiver* receiver, TaskPriority taskID ) {
|
||||
endpoint.addresses = self->localAddresses;
|
||||
ASSERT( ((endpoint.token.first() & TOKEN_STREAM_FLAG)!=0) == receiver->isStream() );
|
||||
Endpoint::Token otoken = endpoint.token;
|
||||
self->endpoints.insert( receiver, endpoint.token, taskID );
|
||||
self->endpoints.insert( receiver, endpoint.token, static_cast<uint32_t>(taskID) );
|
||||
ASSERT( endpoint.token == otoken );
|
||||
}
|
||||
|
||||
|
|
|
@ -137,13 +137,13 @@ public:
|
|||
void removePeerReference( const Endpoint&, NetworkMessageReceiver* );
|
||||
// Signal that a peer connection is no longer being used
|
||||
|
||||
void addEndpoint( Endpoint& endpoint, NetworkMessageReceiver*, uint32_t taskID );
|
||||
void addEndpoint( Endpoint& endpoint, NetworkMessageReceiver*, TaskPriority taskID );
|
||||
// Sets endpoint to be a new local endpoint which delivers messages to the given receiver
|
||||
|
||||
void removeEndpoint( const Endpoint&, NetworkMessageReceiver* );
|
||||
// The given local endpoint no longer delivers messages to the given receiver or uses resources
|
||||
|
||||
void addWellKnownEndpoint( Endpoint& endpoint, NetworkMessageReceiver*, uint32_t taskID );
|
||||
void addWellKnownEndpoint( Endpoint& endpoint, NetworkMessageReceiver*, TaskPriority taskID );
|
||||
// Sets endpoint to a new local endpoint (without changing its token) which delivers messages to the given receiver
|
||||
// Implementations may have limitations on when this function is called and what endpoint.token may be!
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ Future< REPLY_TYPE(Request) > loadBalance(
|
|||
Reference<MultiInterface<Multi>> alternatives,
|
||||
RequestStream<Request> Interface::* channel,
|
||||
Request request = Request(),
|
||||
int taskID = TaskDefaultPromiseEndpoint,
|
||||
TaskPriority taskID = TaskPriority::DefaultPromiseEndpoint,
|
||||
bool atMostOnce = false, // if true, throws request_maybe_delivered() instead of retrying automatically
|
||||
QueueModel* model = NULL)
|
||||
{
|
||||
|
|
|
@ -47,7 +47,7 @@ bool firstInBatch(CommitTransactionRequest x) {
|
|||
}
|
||||
|
||||
ACTOR template <class X>
|
||||
Future<Void> batcher(PromiseStream<std::pair<std::vector<X>, int> > out, FutureStream<X> in, double avgMinDelay, double* avgMaxDelay, double emptyBatchTimeout, int maxCount, int desiredBytes, int maxBytes, Optional<PromiseStream<Void>> batchStartedStream, int64_t *commitBatchesMemBytesCount, int64_t commitBatchesMemBytesLimit, int taskID = TaskDefaultDelay, Counter* counter = 0)
|
||||
Future<Void> batcher(PromiseStream<std::pair<std::vector<X>, int> > out, FutureStream<X> in, double avgMinDelay, double* avgMaxDelay, double emptyBatchTimeout, int maxCount, int desiredBytes, int maxBytes, Optional<PromiseStream<Void>> batchStartedStream, int64_t *commitBatchesMemBytesCount, int64_t commitBatchesMemBytesLimit, TaskPriority taskID = TaskPriority::DefaultDelay, Counter* counter = 0)
|
||||
{
|
||||
wait( delayJittered(*avgMaxDelay, taskID) ); // smooth out
|
||||
// This is set up to deliver even zero-size batches if emptyBatchTimeout elapses, because that's what master proxy wants. The source control history
|
||||
|
|
|
@ -48,7 +48,7 @@ struct FlowReceiver : private NetworkMessageReceiver {
|
|||
|
||||
// If already a remote endpoint, returns that. Otherwise makes this
|
||||
// a local endpoint and returns that.
|
||||
const Endpoint& getEndpoint(int taskID) {
|
||||
const Endpoint& getEndpoint(TaskPriority taskID) {
|
||||
if (!endpoint.isValid()) {
|
||||
m_isLocalEndpoint = true;
|
||||
FlowTransport::transport().addEndpoint(endpoint, this, taskID);
|
||||
|
@ -56,7 +56,7 @@ struct FlowReceiver : private NetworkMessageReceiver {
|
|||
return endpoint;
|
||||
}
|
||||
|
||||
void makeWellKnownEndpoint(Endpoint::Token token, int taskID) {
|
||||
void makeWellKnownEndpoint(Endpoint::Token token, TaskPriority taskID) {
|
||||
ASSERT(!endpoint.isValid());
|
||||
m_isLocalEndpoint = true;
|
||||
endpoint.token = token;
|
||||
|
@ -128,7 +128,7 @@ public:
|
|||
~ReplyPromise() { if (sav) sav->delPromiseRef(); }
|
||||
|
||||
ReplyPromise(const Endpoint& endpoint) : sav(new NetSAV<T>(0, 1, endpoint)) {}
|
||||
const Endpoint& getEndpoint(int taskID = TaskDefaultPromiseEndpoint) const { return sav->getEndpoint(taskID); }
|
||||
const Endpoint& getEndpoint(TaskPriority taskID = TaskPriority::DefaultPromiseEndpoint) const { return sav->getEndpoint(taskID); }
|
||||
|
||||
void operator=(const ReplyPromise& rhs) {
|
||||
if (rhs.sav) rhs.sav->addPromiseRef();
|
||||
|
@ -204,19 +204,19 @@ template <class Reply>
|
|||
void resetReply(ReplyPromise<Reply> & p) { p.reset(); }
|
||||
|
||||
template <class Request>
|
||||
void resetReply(Request& r, int taskID) { r.reply.reset(); r.reply.getEndpoint(taskID); }
|
||||
void resetReply(Request& r, TaskPriority taskID) { r.reply.reset(); r.reply.getEndpoint(taskID); }
|
||||
|
||||
template <class Reply>
|
||||
void resetReply(ReplyPromise<Reply> & p, int taskID) { p.reset(); p.getEndpoint(taskID); }
|
||||
void resetReply(ReplyPromise<Reply> & p, TaskPriority taskID) { p.reset(); p.getEndpoint(taskID); }
|
||||
|
||||
template <class Request>
|
||||
void setReplyPriority(Request& r, int taskID) { r.reply.getEndpoint(taskID); }
|
||||
void setReplyPriority(Request& r, TaskPriority taskID) { r.reply.getEndpoint(taskID); }
|
||||
|
||||
template <class Reply>
|
||||
void setReplyPriority(ReplyPromise<Reply> & p, int taskID) { p.getEndpoint(taskID); }
|
||||
void setReplyPriority(ReplyPromise<Reply> & p, TaskPriority taskID) { p.getEndpoint(taskID); }
|
||||
|
||||
template <class Reply>
|
||||
void setReplyPriority(const ReplyPromise<Reply> & p, int taskID) { p.getEndpoint(taskID); }
|
||||
void setReplyPriority(const ReplyPromise<Reply> & p, TaskPriority taskID) { p.getEndpoint(taskID); }
|
||||
|
||||
|
||||
|
||||
|
@ -281,7 +281,7 @@ public:
|
|||
return reportEndpointFailure(getReplyPromise(value).getFuture(), getEndpoint());
|
||||
}
|
||||
template <class X>
|
||||
Future<REPLY_TYPE(X)> getReply(const X& value, int taskID) const {
|
||||
Future<REPLY_TYPE(X)> getReply(const X& value, TaskPriority taskID) const {
|
||||
setReplyPriority(value, taskID);
|
||||
return getReply(value);
|
||||
}
|
||||
|
@ -290,7 +290,7 @@ public:
|
|||
return getReply(ReplyPromise<X>());
|
||||
}
|
||||
template <class X>
|
||||
Future<X> getReplyWithTaskID(int taskID) const {
|
||||
Future<X> getReplyWithTaskID(TaskPriority taskID) const {
|
||||
ReplyPromise<X> reply;
|
||||
reply.getEndpoint(taskID);
|
||||
return getReply(reply);
|
||||
|
@ -302,7 +302,7 @@ public:
|
|||
// If cancelled or returns failure, request was or will be delivered zero or one times.
|
||||
// The caller must be capable of retrying if this request returns failure
|
||||
template <class X>
|
||||
Future<ErrorOr<REPLY_TYPE(X)>> tryGetReply(const X& value, int taskID) const {
|
||||
Future<ErrorOr<REPLY_TYPE(X)>> tryGetReply(const X& value, TaskPriority taskID) const {
|
||||
setReplyPriority(value, taskID);
|
||||
if (queue->isRemoteEndpoint()) {
|
||||
Future<Void> disc = makeDependent<T>(IFailureMonitor::failureMonitor()).onDisconnectOrFailure(getEndpoint(taskID));
|
||||
|
@ -344,7 +344,7 @@ public:
|
|||
// If it returns failure, the failure detector considers the endpoint failed permanently or for the given amount of time
|
||||
// See IFailureMonitor::onFailedFor() for an explanation of the duration and slope parameters.
|
||||
template <class X>
|
||||
Future<ErrorOr<REPLY_TYPE(X)>> getReplyUnlessFailedFor(const X& value, double sustainedFailureDuration, double sustainedFailureSlope, int taskID) const {
|
||||
Future<ErrorOr<REPLY_TYPE(X)>> getReplyUnlessFailedFor(const X& value, double sustainedFailureDuration, double sustainedFailureSlope, TaskPriority taskID) const {
|
||||
// If it is local endpoint, no need for failure monitoring
|
||||
return waitValueOrSignal(getReply(value, taskID),
|
||||
makeDependent<T>(IFailureMonitor::failureMonitor()).onFailedFor(getEndpoint(taskID), sustainedFailureDuration, sustainedFailureSlope),
|
||||
|
@ -388,8 +388,8 @@ public:
|
|||
//queue = (NetNotifiedQueue<T>*)0xdeadbeef;
|
||||
}
|
||||
|
||||
Endpoint getEndpoint(int taskID = TaskDefaultEndpoint) const { return queue->getEndpoint(taskID); }
|
||||
void makeWellKnownEndpoint(Endpoint::Token token, int taskID) {
|
||||
Endpoint getEndpoint(TaskPriority taskID = TaskPriority::DefaultEndpoint) const { return queue->getEndpoint(taskID); }
|
||||
void makeWellKnownEndpoint(Endpoint::Token token, TaskPriority taskID) {
|
||||
queue->makeWellKnownEndpoint(token, taskID);
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ Future<REPLY_TYPE(Req)> retryBrokenPromise( RequestStream<Req> to, Req request )
|
|||
}
|
||||
|
||||
ACTOR template <class Req>
|
||||
Future<REPLY_TYPE(Req)> retryBrokenPromise( RequestStream<Req> to, Req request, int taskID ) {
|
||||
Future<REPLY_TYPE(Req)> retryBrokenPromise( RequestStream<Req> to, Req request, TaskPriority taskID ) {
|
||||
// Like to.getReply(request), except that a broken_promise exception results in retrying request immediately.
|
||||
// Suitable for use with well known endpoints, which are likely to return to existence after the other process restarts.
|
||||
// Not normally useful for ordinary endpoints, which conventionally are permanently destroyed after replying with broken_promise.
|
||||
|
|
|
@ -422,7 +422,7 @@ public:
|
|||
ACTOR static Future<Reference<IAsyncFile>> open( std::string filename, int flags, int mode,
|
||||
Reference<DiskParameters> diskParameters = Reference<DiskParameters>(new DiskParameters(25000, 150000000)), bool delayOnWrite = true ) {
|
||||
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
|
||||
state int currentTaskID = g_network->getCurrentTask();
|
||||
state TaskPriority currentTaskID = g_network->getCurrentTask();
|
||||
|
||||
if(++openCount >= 3000) {
|
||||
TraceEvent(SevError, "TooManyFiles");
|
||||
|
@ -741,11 +741,11 @@ public:
|
|||
// Everything actually network related is delegated to the Sim2Net class; Sim2 is only concerned with simulating machines and time
|
||||
virtual double now() { return time; }
|
||||
|
||||
virtual Future<class Void> delay( double seconds, int taskID ) {
|
||||
ASSERT(taskID >= TaskMinPriority && taskID <= TaskMaxPriority);
|
||||
virtual Future<class Void> delay( double seconds, TaskPriority taskID ) {
|
||||
ASSERT(taskID >= TaskPriority::Min && taskID <= TaskPriority::Max);
|
||||
return delay( seconds, taskID, currentProcess );
|
||||
}
|
||||
Future<class Void> delay( double seconds, int taskID, ProcessInfo* machine ) {
|
||||
Future<class Void> delay( double seconds, TaskPriority taskID, ProcessInfo* machine ) {
|
||||
ASSERT( seconds >= -0.0001 );
|
||||
seconds = std::max(0.0, seconds);
|
||||
Future<Void> f;
|
||||
|
@ -760,13 +760,13 @@ public:
|
|||
|
||||
return f;
|
||||
}
|
||||
ACTOR static Future<Void> checkShutdown(Sim2 *self, int taskID) {
|
||||
ACTOR static Future<Void> checkShutdown(Sim2 *self, TaskPriority taskID) {
|
||||
wait(success(self->getCurrentProcess()->shutdownSignal.getFuture()));
|
||||
self->setCurrentTask(taskID);
|
||||
return Void();
|
||||
}
|
||||
virtual Future<class Void> yield( int taskID ) {
|
||||
if (taskID == TaskDefaultYield) taskID = currentTaskID;
|
||||
virtual Future<class Void> yield( TaskPriority taskID ) {
|
||||
if (taskID == TaskPriority::DefaultYield) taskID = currentTaskID;
|
||||
if (check_yield(taskID)) {
|
||||
// We want to check that yielders can handle actual time elapsing (it sometimes will outside simulation), but
|
||||
// don't want to prevent instantaneous shutdown of "rebooted" machines.
|
||||
|
@ -775,7 +775,7 @@ public:
|
|||
setCurrentTask(taskID);
|
||||
return Void();
|
||||
}
|
||||
virtual bool check_yield( int taskID ) {
|
||||
virtual bool check_yield( TaskPriority taskID ) {
|
||||
if (yielded) return true;
|
||||
if (--yield_limit <= 0) {
|
||||
yield_limit = deterministicRandom()->randomInt(1, 150); // If yield returns false *too* many times in a row, there could be a stack overflow, since we can't deterministically check stack size as the real network does
|
||||
|
@ -783,10 +783,10 @@ public:
|
|||
}
|
||||
return yielded = BUGGIFY_WITH_PROB(0.01);
|
||||
}
|
||||
virtual int getCurrentTask() {
|
||||
virtual TaskPriority getCurrentTask() {
|
||||
return currentTaskID;
|
||||
}
|
||||
virtual void setCurrentTask(int taskID ) {
|
||||
virtual void setCurrentTask(TaskPriority taskID ) {
|
||||
currentTaskID = taskID;
|
||||
}
|
||||
// Sets the taskID/priority of the current task, without yielding
|
||||
|
@ -923,7 +923,7 @@ public:
|
|||
}
|
||||
if ( mustBeDurable || deterministicRandom()->random01() < 0.5 ) {
|
||||
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
|
||||
state int currentTaskID = g_network->getCurrentTask();
|
||||
state TaskPriority currentTaskID = g_network->getCurrentTask();
|
||||
wait( g_simulator.onMachine( currentProcess ) );
|
||||
try {
|
||||
wait( ::delay(0.05 * deterministicRandom()->random01()) );
|
||||
|
@ -949,7 +949,7 @@ public:
|
|||
ACTOR static Future<Void> runLoop(Sim2 *self) {
|
||||
state ISimulator::ProcessInfo *callingMachine = self->currentProcess;
|
||||
while ( !self->isStopped ) {
|
||||
wait( self->net2->yield(TaskDefaultYield) );
|
||||
wait( self->net2->yield(TaskPriority::DefaultYield) );
|
||||
|
||||
self->mutex.enter();
|
||||
if( self->tasks.size() == 0 ) {
|
||||
|
@ -1579,23 +1579,23 @@ public:
|
|||
machines.erase(machineId);
|
||||
}
|
||||
|
||||
Sim2(bool objSerializer) : time(0.0), taskCount(0), yielded(false), yield_limit(0), currentTaskID(-1) {
|
||||
Sim2(bool objSerializer) : time(0.0), taskCount(0), yielded(false), yield_limit(0), currentTaskID(TaskPriority::Zero) {
|
||||
// Not letting currentProcess be NULL eliminates some annoying special cases
|
||||
currentProcess = new ProcessInfo("NoMachine", LocalityData(Optional<Standalone<StringRef>>(), StringRef(), StringRef(), StringRef()), ProcessClass(), {NetworkAddress()}, this, "", "");
|
||||
g_network = net2 = newNet2(false, true, objSerializer);
|
||||
Net2FileSystem::newFileSystem();
|
||||
check_yield(0);
|
||||
check_yield(TaskPriority::Zero);
|
||||
}
|
||||
|
||||
// Implementation
|
||||
struct Task {
|
||||
int taskID;
|
||||
TaskPriority taskID;
|
||||
double time;
|
||||
uint64_t stable;
|
||||
ProcessInfo* machine;
|
||||
Promise<Void> action;
|
||||
Task( double time, int taskID, uint64_t stable, ProcessInfo* machine, Promise<Void>&& action ) : time(time), taskID(taskID), stable(stable), machine(machine), action(std::move(action)) {}
|
||||
Task( double time, int taskID, uint64_t stable, ProcessInfo* machine, Future<Void>& future ) : time(time), taskID(taskID), stable(stable), machine(machine) { future = action.getFuture(); }
|
||||
Task( double time, TaskPriority taskID, uint64_t stable, ProcessInfo* machine, Promise<Void>&& action ) : time(time), taskID(taskID), stable(stable), machine(machine), action(std::move(action)) {}
|
||||
Task( double time, TaskPriority taskID, uint64_t stable, ProcessInfo* machine, Future<Void>& future ) : time(time), taskID(taskID), stable(stable), machine(machine) { future = action.getFuture(); }
|
||||
Task(Task&& rhs) BOOST_NOEXCEPT : time(rhs.time), taskID(rhs.taskID), stable(rhs.stable), machine(rhs.machine), action(std::move(rhs.action)) {}
|
||||
void operator= ( Task const& rhs ) { taskID = rhs.taskID; time = rhs.time; stable = rhs.stable; machine = rhs.machine; action = rhs.action; }
|
||||
Task( Task const& rhs ) : taskID(rhs.taskID), time(rhs.time), stable(rhs.stable), machine(rhs.machine), action(rhs.action) {}
|
||||
|
@ -1642,20 +1642,20 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
virtual void onMainThread( Promise<Void>&& signal, int taskID ) {
|
||||
virtual void onMainThread( Promise<Void>&& signal, TaskPriority taskID ) {
|
||||
// This is presumably coming from either a "fake" thread pool thread, i.e. it is actually on this thread
|
||||
// or a thread created with g_network->startThread
|
||||
ASSERT(getCurrentProcess());
|
||||
|
||||
mutex.enter();
|
||||
ASSERT(taskID >= TaskMinPriority && taskID <= TaskMaxPriority);
|
||||
ASSERT(taskID >= TaskPriority::Min && taskID <= TaskPriority::Max);
|
||||
tasks.push( Task( time, taskID, taskCount++, getCurrentProcess(), std::move(signal) ) );
|
||||
mutex.leave();
|
||||
}
|
||||
virtual Future<Void> onProcess( ISimulator::ProcessInfo *process, int taskID ) {
|
||||
virtual Future<Void> onProcess( ISimulator::ProcessInfo *process, TaskPriority taskID ) {
|
||||
return delay( 0, taskID, process );
|
||||
}
|
||||
virtual Future<Void> onMachine( ISimulator::ProcessInfo *process, int taskID ) {
|
||||
virtual Future<Void> onMachine( ISimulator::ProcessInfo *process, TaskPriority taskID ) {
|
||||
if( process->machine == 0 )
|
||||
return Void();
|
||||
return delay( 0, taskID, process->machine->machineProcess );
|
||||
|
@ -1664,7 +1664,7 @@ public:
|
|||
//time is guarded by ISimulator::mutex. It is not necessary to guard reads on the main thread because
|
||||
//time should only be modified from the main thread.
|
||||
double time;
|
||||
int currentTaskID;
|
||||
TaskPriority currentTaskID;
|
||||
|
||||
//taskCount is guarded by ISimulator::mutex
|
||||
uint64_t taskCount;
|
||||
|
@ -1694,9 +1694,9 @@ void startNewSimulator(bool objSerializer) {
|
|||
}
|
||||
|
||||
ACTOR void doReboot( ISimulator::ProcessInfo *p, ISimulator::KillType kt ) {
|
||||
TraceEvent("RebootingProcessAttempt").detail("ZoneId", p->locality.zoneId()).detail("KillType", kt).detail("Process", p->toString()).detail("StartingClass", p->startingClass.toString()).detail("Failed", p->failed).detail("Excluded", p->excluded).detail("Cleared", p->cleared).detail("Rebooting", p->rebooting).detail("TaskDefaultDelay", TaskDefaultDelay);
|
||||
TraceEvent("RebootingProcessAttempt").detail("ZoneId", p->locality.zoneId()).detail("KillType", kt).detail("Process", p->toString()).detail("StartingClass", p->startingClass.toString()).detail("Failed", p->failed).detail("Excluded", p->excluded).detail("Cleared", p->cleared).detail("Rebooting", p->rebooting).detail("TaskPriority::DefaultDelay", TaskPriority::DefaultDelay);
|
||||
|
||||
wait( g_sim2.delay( 0, TaskDefaultDelay, p ) ); // Switch to the machine in question
|
||||
wait( g_sim2.delay( 0, TaskPriority::DefaultDelay, p ) ); // Switch to the machine in question
|
||||
|
||||
try {
|
||||
ASSERT( kt == ISimulator::RebootProcess || kt == ISimulator::Reboot || kt == ISimulator::RebootAndDelete || kt == ISimulator::RebootProcessAndDelete );
|
||||
|
|
|
@ -137,8 +137,8 @@ public:
|
|||
|
||||
ProcessInfo* getProcess( Endpoint const& endpoint ) { return getProcessByAddress(endpoint.getPrimaryAddress()); }
|
||||
ProcessInfo* getCurrentProcess() { return currentProcess; }
|
||||
virtual Future<Void> onProcess( ISimulator::ProcessInfo *process, int taskID = -1 ) = 0;
|
||||
virtual Future<Void> onMachine( ISimulator::ProcessInfo *process, int taskID = -1 ) = 0;
|
||||
virtual Future<Void> onProcess( ISimulator::ProcessInfo *process, TaskPriority taskID = TaskPriority::Zero ) = 0;
|
||||
virtual Future<Void> onMachine( ISimulator::ProcessInfo *process, TaskPriority taskID = TaskPriority::Zero ) = 0;
|
||||
|
||||
virtual ProcessInfo* newProcess(const char* name, IPAddress ip, uint16_t port, uint16_t listenPerProcess,
|
||||
LocalityData locality, ProcessClass startingClass, const char* dataFolder,
|
||||
|
|
|
@ -107,7 +107,7 @@ public:
|
|||
DBInfo() : masterRegistrationCount(0), recoveryStalled(false), forceRecovery(false), unfinishedRecoveries(0), logGenerations(0),
|
||||
clientInfo( new AsyncVar<ClientDBInfo>( ClientDBInfo() ) ),
|
||||
serverInfo( new AsyncVar<ServerDBInfo>( ServerDBInfo() ) ),
|
||||
db( DatabaseContext::create( clientInfo, Future<Void>(), LocalityData(), true, TaskDefaultEndpoint, true ) ) // SOMEDAY: Locality!
|
||||
db( DatabaseContext::create( clientInfo, Future<Void>(), LocalityData(), true, TaskPriority::DefaultEndpoint, true ) ) // SOMEDAY: Locality!
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -1171,7 +1171,7 @@ public:
|
|||
serverInfo.clusterInterface = ccInterface;
|
||||
serverInfo.myLocality = locality;
|
||||
db.serverInfo->set( serverInfo );
|
||||
cx = openDBOnServer(db.serverInfo, TaskDefaultEndpoint, true, true);
|
||||
cx = openDBOnServer(db.serverInfo, TaskPriority::DefaultEndpoint, true, true);
|
||||
}
|
||||
|
||||
~ClusterControllerData() {
|
||||
|
|
|
@ -63,13 +63,13 @@ struct ClusterControllerFullInterface {
|
|||
|
||||
void initEndpoints() {
|
||||
clientInterface.initEndpoints();
|
||||
recruitFromConfiguration.getEndpoint( TaskClusterController );
|
||||
recruitRemoteFromConfiguration.getEndpoint( TaskClusterController );
|
||||
recruitStorage.getEndpoint( TaskClusterController );
|
||||
registerWorker.getEndpoint( TaskClusterController );
|
||||
getWorkers.getEndpoint( TaskClusterController );
|
||||
registerMaster.getEndpoint( TaskClusterController );
|
||||
getServerDBInfo.getEndpoint( TaskClusterController );
|
||||
recruitFromConfiguration.getEndpoint( TaskPriority::ClusterController );
|
||||
recruitRemoteFromConfiguration.getEndpoint( TaskPriority::ClusterController );
|
||||
recruitStorage.getEndpoint( TaskPriority::ClusterController );
|
||||
registerWorker.getEndpoint( TaskPriority::ClusterController );
|
||||
getWorkers.getEndpoint( TaskPriority::ClusterController );
|
||||
registerMaster.getEndpoint( TaskPriority::ClusterController );
|
||||
getServerDBInfo.getEndpoint( TaskPriority::ClusterController );
|
||||
}
|
||||
|
||||
template <class Ar>
|
||||
|
|
|
@ -52,8 +52,8 @@ GenerationRegInterface::GenerationRegInterface( NetworkAddress remote )
|
|||
|
||||
GenerationRegInterface::GenerationRegInterface( INetwork* local )
|
||||
{
|
||||
read.makeWellKnownEndpoint( WLTOKEN_GENERATIONREG_READ, TaskCoordination );
|
||||
write.makeWellKnownEndpoint( WLTOKEN_GENERATIONREG_WRITE, TaskCoordination );
|
||||
read.makeWellKnownEndpoint( WLTOKEN_GENERATIONREG_READ, TaskPriority::Coordination );
|
||||
write.makeWellKnownEndpoint( WLTOKEN_GENERATIONREG_WRITE, TaskPriority::Coordination );
|
||||
}
|
||||
|
||||
LeaderElectionRegInterface::LeaderElectionRegInterface(NetworkAddress remote)
|
||||
|
@ -67,9 +67,9 @@ LeaderElectionRegInterface::LeaderElectionRegInterface(NetworkAddress remote)
|
|||
LeaderElectionRegInterface::LeaderElectionRegInterface(INetwork* local)
|
||||
: ClientLeaderRegInterface(local)
|
||||
{
|
||||
candidacy.makeWellKnownEndpoint( WLTOKEN_LEADERELECTIONREG_CANDIDACY, TaskCoordination );
|
||||
leaderHeartbeat.makeWellKnownEndpoint( WLTOKEN_LEADERELECTIONREG_LEADERHEARTBEAT, TaskCoordination );
|
||||
forward.makeWellKnownEndpoint( WLTOKEN_LEADERELECTIONREG_FORWARD, TaskCoordination );
|
||||
candidacy.makeWellKnownEndpoint( WLTOKEN_LEADERELECTIONREG_CANDIDACY, TaskPriority::Coordination );
|
||||
leaderHeartbeat.makeWellKnownEndpoint( WLTOKEN_LEADERELECTIONREG_LEADERHEARTBEAT, TaskPriority::Coordination );
|
||||
forward.makeWellKnownEndpoint( WLTOKEN_LEADERELECTIONREG_FORWARD, TaskPriority::Coordination );
|
||||
}
|
||||
|
||||
ServerCoordinators::ServerCoordinators( Reference<ClusterConnectionFile> cf )
|
||||
|
|
|
@ -263,7 +263,7 @@ typedef WorkPool<Coroutine, ThreadUnsafeSpinLock, true> CoroPool;
|
|||
|
||||
|
||||
|
||||
ACTOR void coroSwitcher( Future<Void> what, int taskID, Coro* coro ) {
|
||||
ACTOR void coroSwitcher( Future<Void> what, TaskPriority taskID, Coro* coro ) {
|
||||
try {
|
||||
// state double t = now();
|
||||
wait(what);
|
||||
|
|
|
@ -88,7 +88,7 @@ struct TCMachineInfo : public ReferenceCounted<TCMachineInfo> {
|
|||
|
||||
ACTOR Future<Void> updateServerMetrics( TCServerInfo *server ) {
|
||||
state StorageServerInterface ssi = server->lastKnownInterface;
|
||||
state Future<ErrorOr<GetPhysicalMetricsReply>> metricsRequest = ssi.getPhysicalMetrics.tryGetReply( GetPhysicalMetricsRequest(), TaskDataDistributionLaunch );
|
||||
state Future<ErrorOr<GetPhysicalMetricsReply>> metricsRequest = ssi.getPhysicalMetrics.tryGetReply( GetPhysicalMetricsRequest(), TaskPriority::DataDistributionLaunch );
|
||||
state Future<Void> resetRequest = Never();
|
||||
state Future<std::pair<StorageServerInterface, ProcessClass>> interfaceChanged( server->onInterfaceChanged );
|
||||
state Future<Void> serverRemoved( server->onRemoved );
|
||||
|
@ -104,7 +104,7 @@ ACTOR Future<Void> updateServerMetrics( TCServerInfo *server ) {
|
|||
return Void();
|
||||
}
|
||||
metricsRequest = Never();
|
||||
resetRequest = delay( SERVER_KNOBS->METRIC_DELAY, TaskDataDistributionLaunch );
|
||||
resetRequest = delay( SERVER_KNOBS->METRIC_DELAY, TaskPriority::DataDistributionLaunch );
|
||||
}
|
||||
when( std::pair<StorageServerInterface,ProcessClass> _ssi = wait( interfaceChanged ) ) {
|
||||
ssi = _ssi.first;
|
||||
|
@ -120,7 +120,7 @@ ACTOR Future<Void> updateServerMetrics( TCServerInfo *server ) {
|
|||
}
|
||||
else {
|
||||
resetRequest = Never();
|
||||
metricsRequest = ssi.getPhysicalMetrics.tryGetReply( GetPhysicalMetricsRequest(), TaskDataDistributionLaunch );
|
||||
metricsRequest = ssi.getPhysicalMetrics.tryGetReply( GetPhysicalMetricsRequest(), TaskPriority::DataDistributionLaunch );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -635,9 +635,9 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
shardsAffectedByTeamFailure(shardsAffectedByTeamFailure), doBuildTeams(true), teamBuilder(Void()),
|
||||
badTeamRemover(Void()), redundantTeamRemover(Void()), configuration(configuration),
|
||||
readyToStart(readyToStart), clearHealthyZoneFuture(Void()),
|
||||
checkTeamDelay(delay(SERVER_KNOBS->CHECK_TEAM_DELAY, TaskDataDistribution)),
|
||||
checkTeamDelay(delay(SERVER_KNOBS->CHECK_TEAM_DELAY, TaskPriority::DataDistribution)),
|
||||
initialFailureReactionDelay(
|
||||
delayed(readyToStart, SERVER_KNOBS->INITIAL_FAILURE_REACTION_DELAY, TaskDataDistribution)),
|
||||
delayed(readyToStart, SERVER_KNOBS->INITIAL_FAILURE_REACTION_DELAY, TaskPriority::DataDistribution)),
|
||||
healthyTeamCount(0), storageServerSet(new LocalityMap<UID>()),
|
||||
initializationDoneActor(logOnCompletion(readyToStart && initialFailureReactionDelay, this)),
|
||||
optimalTeamCount(0), recruitingStream(0), restartRecruiting(SERVER_KNOBS->DEBOUNCE_RECRUITING_DELAY),
|
||||
|
@ -671,7 +671,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
|
||||
ACTOR static Future<Void> logOnCompletion( Future<Void> signal, DDTeamCollection* self ) {
|
||||
wait(signal);
|
||||
wait(delay(SERVER_KNOBS->LOG_ON_COMPLETION_DELAY, TaskDataDistribution));
|
||||
wait(delay(SERVER_KNOBS->LOG_ON_COMPLETION_DELAY, TaskPriority::DataDistribution));
|
||||
|
||||
if(!self->primary || self->configuration.usableRegions == 1) {
|
||||
TraceEvent("DDTrackerStarting", self->distributorId)
|
||||
|
@ -1919,7 +1919,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
|
||||
//Building teams can cause servers to become undesired, which can make teams unhealthy.
|
||||
//Let all of these changes get worked out before responding to the get team request
|
||||
wait( delay(0, TaskDataDistributionLaunch) );
|
||||
wait( delay(0, TaskPriority::DataDistributionLaunch) );
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
@ -2232,7 +2232,7 @@ ACTOR Future<Void> waitUntilHealthy(DDTeamCollection* self) {
|
|||
TraceEvent("WaitUntilHealthyStalled", self->distributorId).detail("Primary", self->primary).detail("ZeroHealthy", self->zeroHealthyTeams->get()).detail("ProcessingUnhealthy", self->processingUnhealthy->get());
|
||||
wait(self->zeroHealthyTeams->onChange() || self->processingUnhealthy->onChange());
|
||||
}
|
||||
wait(delay(SERVER_KNOBS->DD_STALL_CHECK_DELAY, TaskLowPriority)); //After the team trackers wait on the initial failure reaction delay, they yield. We want to make sure every tracker has had the opportunity to send their relocations to the queue.
|
||||
wait(delay(SERVER_KNOBS->DD_STALL_CHECK_DELAY, TaskPriority::Low)); //After the team trackers wait on the initial failure reaction delay, they yield. We want to make sure every tracker has had the opportunity to send their relocations to the queue.
|
||||
if(!self->zeroHealthyTeams->get() && !self->processingUnhealthy->get()) {
|
||||
return Void();
|
||||
}
|
||||
|
@ -2638,7 +2638,7 @@ ACTOR Future<Void> trackExcludedServers( DDTeamCollection* self ) {
|
|||
if (nchid != lastChangeID)
|
||||
break;
|
||||
|
||||
wait( delay( SERVER_KNOBS->SERVER_LIST_DELAY, TaskDataDistribution ) ); // FIXME: make this tr.watch( excludedServersVersionKey ) instead
|
||||
wait( delay( SERVER_KNOBS->SERVER_LIST_DELAY, TaskPriority::DataDistribution ) ); // FIXME: make this tr.watch( excludedServersVersionKey ) instead
|
||||
tr = Transaction(self->cx);
|
||||
} catch (Error& e) {
|
||||
wait( tr.onError(e) );
|
||||
|
@ -2757,14 +2757,14 @@ ACTOR Future<Void> serverMetricsPolling( TCServerInfo *server) {
|
|||
state double lastUpdate = now();
|
||||
loop {
|
||||
wait( updateServerMetrics( server ) );
|
||||
wait( delayUntil( lastUpdate + SERVER_KNOBS->STORAGE_METRICS_POLLING_DELAY + SERVER_KNOBS->STORAGE_METRICS_RANDOM_DELAY * deterministicRandom()->random01(), TaskDataDistributionLaunch ) );
|
||||
wait( delayUntil( lastUpdate + SERVER_KNOBS->STORAGE_METRICS_POLLING_DELAY + SERVER_KNOBS->STORAGE_METRICS_RANDOM_DELAY * deterministicRandom()->random01(), TaskPriority::DataDistributionLaunch ) );
|
||||
lastUpdate = now();
|
||||
}
|
||||
}
|
||||
|
||||
//Returns the KeyValueStoreType of server if it is different from self->storeType
|
||||
ACTOR Future<KeyValueStoreType> keyValueStoreTypeTracker(DDTeamCollection* self, TCServerInfo *server) {
|
||||
state KeyValueStoreType type = wait(brokenPromiseToNever(server->lastKnownInterface.getKeyValueStoreType.getReplyWithTaskID<KeyValueStoreType>(TaskDataDistribution)));
|
||||
state KeyValueStoreType type = wait(brokenPromiseToNever(server->lastKnownInterface.getKeyValueStoreType.getReplyWithTaskID<KeyValueStoreType>(TaskPriority::DataDistribution)));
|
||||
if(type == self->configuration.storageServerStoreType && (self->includedDCs.empty() || std::find(self->includedDCs.begin(), self->includedDCs.end(), server->lastKnownInterface.locality.dcId()) != self->includedDCs.end()) )
|
||||
wait(Future<Void>(Never()));
|
||||
|
||||
|
@ -2787,7 +2787,7 @@ ACTOR Future<Void> waitForAllDataRemoved( Database cx, UID serverID, Version add
|
|||
}
|
||||
|
||||
// Wait for any change to the serverKeys for this server
|
||||
wait( delay(SERVER_KNOBS->ALL_DATA_REMOVED_DELAY, TaskDataDistribution) );
|
||||
wait( delay(SERVER_KNOBS->ALL_DATA_REMOVED_DELAY, TaskPriority::DataDistribution) );
|
||||
tr.reset();
|
||||
} catch (Error& e) {
|
||||
wait( tr.onError(e) );
|
||||
|
@ -2830,7 +2830,7 @@ ACTOR Future<Void> storageServerFailureTracker(
|
|||
ASSERT(!inHealthyZone);
|
||||
healthChanged = IFailureMonitor::failureMonitor().onStateEqual( interf.waitFailure.getEndpoint(), FailureStatus(false));
|
||||
} else if(!inHealthyZone) {
|
||||
healthChanged = waitFailureClientStrict(interf.waitFailure, SERVER_KNOBS->DATA_DISTRIBUTION_FAILURE_REACTION_TIME, TaskDataDistribution);
|
||||
healthChanged = waitFailureClientStrict(interf.waitFailure, SERVER_KNOBS->DATA_DISTRIBUTION_FAILURE_REACTION_TIME, TaskPriority::DataDistribution);
|
||||
}
|
||||
choose {
|
||||
when ( wait(healthChanged) ) {
|
||||
|
@ -3120,7 +3120,7 @@ ACTOR Future<Void> monitorStorageServerRecruitment(DDTeamCollection* self) {
|
|||
loop {
|
||||
choose {
|
||||
when( wait( self->recruitingStream.onChange() ) ) {}
|
||||
when( wait( self->recruitingStream.get() == 0 ? delay(SERVER_KNOBS->RECRUITMENT_IDLE_DELAY, TaskDataDistribution) : Future<Void>(Never()) ) ) { break; }
|
||||
when( wait( self->recruitingStream.get() == 0 ? delay(SERVER_KNOBS->RECRUITMENT_IDLE_DELAY, TaskPriority::DataDistribution) : Future<Void>(Never()) ) ) { break; }
|
||||
}
|
||||
}
|
||||
TraceEvent("StorageServerRecruitment", self->distributorId)
|
||||
|
@ -3147,12 +3147,12 @@ ACTOR Future<Void> initializeStorage( DDTeamCollection* self, RecruitStorageRepl
|
|||
|
||||
self->recruitingIds.insert(interfaceId);
|
||||
self->recruitingLocalities.insert(candidateWorker.worker.address());
|
||||
state ErrorOr<InitializeStorageReply> newServer = wait( candidateWorker.worker.storage.tryGetReply( isr, TaskDataDistribution ) );
|
||||
state ErrorOr<InitializeStorageReply> newServer = wait( candidateWorker.worker.storage.tryGetReply( isr, TaskPriority::DataDistribution ) );
|
||||
if(newServer.isError()) {
|
||||
TraceEvent(SevWarn, "DDRecruitmentError").error(newServer.getError());
|
||||
if( !newServer.isError( error_code_recruitment_failed ) && !newServer.isError( error_code_request_maybe_delivered ) )
|
||||
throw newServer.getError();
|
||||
wait( delay(SERVER_KNOBS->STORAGE_RECRUITMENT_DELAY, TaskDataDistribution) );
|
||||
wait( delay(SERVER_KNOBS->STORAGE_RECRUITMENT_DELAY, TaskPriority::DataDistribution) );
|
||||
}
|
||||
self->recruitingIds.erase(interfaceId);
|
||||
self->recruitingLocalities.erase(candidateWorker.worker.address());
|
||||
|
@ -3217,7 +3217,7 @@ ACTOR Future<Void> storageRecruiter( DDTeamCollection* self, Reference<AsyncVar<
|
|||
|
||||
if(!fCandidateWorker.isValid() || fCandidateWorker.isReady() || rsr.excludeAddresses != lastRequest.excludeAddresses || rsr.criticalRecruitment != lastRequest.criticalRecruitment) {
|
||||
lastRequest = rsr;
|
||||
fCandidateWorker = brokenPromiseToNever( db->get().clusterInterface.recruitStorage.getReply( rsr, TaskDataDistribution ) );
|
||||
fCandidateWorker = brokenPromiseToNever( db->get().clusterInterface.recruitStorage.getReply( rsr, TaskPriority::DataDistribution ) );
|
||||
}
|
||||
|
||||
choose {
|
||||
|
@ -3388,7 +3388,7 @@ ACTOR Future<Void> dataDistributionTeamCollection(
|
|||
ACTOR Future<Void> waitForDataDistributionEnabled( Database cx ) {
|
||||
state Transaction tr(cx);
|
||||
loop {
|
||||
wait(delay(SERVER_KNOBS->DD_ENABLED_CHECK_DELAY, TaskDataDistribution));
|
||||
wait(delay(SERVER_KNOBS->DD_ENABLED_CHECK_DELAY, TaskPriority::DataDistribution));
|
||||
|
||||
try {
|
||||
Optional<Value> mode = wait( tr.get( dataDistributionModeKey ) );
|
||||
|
@ -3516,7 +3516,7 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self)
|
|||
state double lastLimited = 0;
|
||||
self->addActor.send( monitorBatchLimitedTime(self->dbInfo, &lastLimited) );
|
||||
|
||||
state Database cx = openDBOnServer(self->dbInfo, TaskDataDistributionLaunch, true, true);
|
||||
state Database cx = openDBOnServer(self->dbInfo, TaskPriority::DataDistributionLaunch, true, true);
|
||||
cx->locationCacheSize = SERVER_KNOBS->DD_LOCATION_CACHE_SIZE;
|
||||
|
||||
//cx->setOption( FDBDatabaseOptions::LOCATION_CACHE_SIZE, StringRef((uint8_t*) &SERVER_KNOBS->DD_LOCATION_CACHE_SIZE, 8) );
|
||||
|
@ -3646,7 +3646,7 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self)
|
|||
}
|
||||
output.send( RelocateShard( keys, unhealthy ? PRIORITY_TEAM_UNHEALTHY : PRIORITY_RECOVER_MOVE ) );
|
||||
}
|
||||
wait( yield(TaskDataDistribution) );
|
||||
wait( yield(TaskPriority::DataDistribution) );
|
||||
}
|
||||
|
||||
vector<TeamCollectionInterface> tcis;
|
||||
|
|
|
@ -512,9 +512,9 @@ struct DDQueueData {
|
|||
|
||||
// FIXME: is the merge case needed
|
||||
if( input.priority == PRIORITY_MERGE_SHARD ) {
|
||||
wait( delay( 0.5, TaskDataDistribution - 2 ) );
|
||||
wait( delay( 0.5, decrementPriority(decrementPriority(TaskPriority::DataDistribution )) ) );
|
||||
} else {
|
||||
wait( delay( 0.0001, TaskDataDistributionLaunch ) );
|
||||
wait( delay( 0.0001, TaskPriority::DataDistributionLaunch ) );
|
||||
}
|
||||
|
||||
loop {
|
||||
|
@ -933,7 +933,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
.detail("Count", stuckCount)
|
||||
.detail("TeamCollectionId", tciIndex)
|
||||
.detail("NumOfTeamCollections", self->teamCollections.size());
|
||||
wait( delay( SERVER_KNOBS->BEST_TEAM_STUCK_DELAY, TaskDataDistributionLaunch ) );
|
||||
wait( delay( SERVER_KNOBS->BEST_TEAM_STUCK_DELAY, TaskPriority::DataDistributionLaunch ) );
|
||||
}
|
||||
|
||||
state std::vector<UID> destIds;
|
||||
|
@ -993,7 +993,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
state Error error = success();
|
||||
state Promise<Void> dataMovementComplete;
|
||||
state Future<Void> doMoveKeys = moveKeys(self->cx, rd.keys, destIds, healthyIds, self->lock, dataMovementComplete, &self->startMoveKeysParallelismLock, &self->finishMoveKeysParallelismLock, self->teamCollections.size() > 1, relocateShardInterval.pairID );
|
||||
state Future<Void> pollHealth = signalledTransferComplete ? Never() : delay( SERVER_KNOBS->HEALTH_POLL_TIME, TaskDataDistributionLaunch );
|
||||
state Future<Void> pollHealth = signalledTransferComplete ? Never() : delay( SERVER_KNOBS->HEALTH_POLL_TIME, TaskPriority::DataDistributionLaunch );
|
||||
try {
|
||||
loop {
|
||||
choose {
|
||||
|
@ -1016,7 +1016,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
self->dataTransferComplete.send(rd);
|
||||
}
|
||||
}
|
||||
pollHealth = signalledTransferComplete ? Never() : delay( SERVER_KNOBS->HEALTH_POLL_TIME, TaskDataDistributionLaunch );
|
||||
pollHealth = signalledTransferComplete ? Never() : delay( SERVER_KNOBS->HEALTH_POLL_TIME, TaskPriority::DataDistributionLaunch );
|
||||
}
|
||||
when( wait( signalledTransferComplete ? Never() : dataMovementComplete.getFuture() ) ) {
|
||||
self->fetchKeysComplete.insert( rd );
|
||||
|
@ -1066,7 +1066,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
|||
} else {
|
||||
TEST(true); // move to removed server
|
||||
healthyDestinations.addDataInFlightToTeam( -metrics.bytes );
|
||||
wait( delay( SERVER_KNOBS->RETRY_RELOCATESHARD_DELAY, TaskDataDistributionLaunch ) );
|
||||
wait( delay( SERVER_KNOBS->RETRY_RELOCATESHARD_DELAY, TaskPriority::DataDistributionLaunch ) );
|
||||
}
|
||||
}
|
||||
} catch (Error& e) {
|
||||
|
@ -1125,7 +1125,7 @@ ACTOR Future<Void> BgDDMountainChopper( DDQueueData* self, int teamCollectionInd
|
|||
state double checkDelay = SERVER_KNOBS->BG_DD_POLLING_INTERVAL;
|
||||
state int resetCount = SERVER_KNOBS->DD_REBALANCE_RESET_AMOUNT;
|
||||
loop {
|
||||
wait( delay(checkDelay, TaskDataDistributionLaunch) );
|
||||
wait( delay(checkDelay, TaskPriority::DataDistributionLaunch) );
|
||||
if (self->priority_relocations[PRIORITY_REBALANCE_OVERUTILIZED_TEAM] < SERVER_KNOBS->DD_REBALANCE_PARALLELISM) {
|
||||
state Optional<Reference<IDataDistributionTeam>> randomTeam = wait( brokenPromiseToNever( self->teamCollections[teamCollectionIndex].getTeam.getReply( GetTeamRequest( true, false, true ) ) ) );
|
||||
if( randomTeam.present() ) {
|
||||
|
@ -1160,7 +1160,7 @@ ACTOR Future<Void> BgDDValleyFiller( DDQueueData* self, int teamCollectionIndex)
|
|||
state double checkDelay = SERVER_KNOBS->BG_DD_POLLING_INTERVAL;
|
||||
state int resetCount = SERVER_KNOBS->DD_REBALANCE_RESET_AMOUNT;
|
||||
loop {
|
||||
wait( delay(checkDelay, TaskDataDistributionLaunch) );
|
||||
wait( delay(checkDelay, TaskPriority::DataDistributionLaunch) );
|
||||
if (self->priority_relocations[PRIORITY_REBALANCE_UNDERUTILIZED_TEAM] < SERVER_KNOBS->DD_REBALANCE_PARALLELISM) {
|
||||
state Optional<Reference<IDataDistributionTeam>> randomTeam = wait( brokenPromiseToNever( self->teamCollections[teamCollectionIndex].getTeam.getReply( GetTeamRequest( true, false, false ) ) ) );
|
||||
if( randomTeam.present() ) {
|
||||
|
@ -1244,7 +1244,7 @@ ACTOR Future<Void> dataDistributionQueue(
|
|||
bool wasEmpty = serversToLaunchFrom.empty();
|
||||
self.queueRelocation( rs, serversToLaunchFrom );
|
||||
if(wasEmpty && !serversToLaunchFrom.empty())
|
||||
launchQueuedWorkTimeout = delay(0, TaskDataDistributionLaunch);
|
||||
launchQueuedWorkTimeout = delay(0, TaskPriority::DataDistributionLaunch);
|
||||
}
|
||||
when ( wait(launchQueuedWorkTimeout) ) {
|
||||
self.launchQueuedWork( serversToLaunchFrom );
|
||||
|
@ -1258,7 +1258,7 @@ ACTOR Future<Void> dataDistributionQueue(
|
|||
when ( RelocateData done = waitNext( self.dataTransferComplete.getFuture() ) ) {
|
||||
complete( done, self.busymap );
|
||||
if(serversToLaunchFrom.empty() && !done.src.empty())
|
||||
launchQueuedWorkTimeout = delay(0, TaskDataDistributionLaunch);
|
||||
launchQueuedWorkTimeout = delay(0, TaskPriority::DataDistributionLaunch);
|
||||
serversToLaunchFrom.insert(done.src.begin(), done.src.end());
|
||||
}
|
||||
when ( RelocateData done = waitNext( self.relocationComplete.getFuture() ) ) {
|
||||
|
@ -1266,7 +1266,7 @@ ACTOR Future<Void> dataDistributionQueue(
|
|||
self.finishRelocation(done.priority);
|
||||
self.fetchKeysComplete.erase( done );
|
||||
//self.logRelocation( done, "ShardRelocatorDone" );
|
||||
actors.add( tag( delay(0, TaskDataDistributionLaunch), done.keys, rangesComplete ) );
|
||||
actors.add( tag( delay(0, TaskPriority::DataDistributionLaunch), done.keys, rangesComplete ) );
|
||||
if( g_network->isSimulated() && debug_isCheckRelocationDuration() && now() - done.startTime > 60 ) {
|
||||
TraceEvent(SevWarnAlways, "RelocationDurationTooLong").detail("Duration", now() - done.startTime);
|
||||
debug_setCheckRelocationDuration(false);
|
||||
|
|
|
@ -140,7 +140,7 @@ ACTOR Future<Void> trackShardBytes(
|
|||
Reference<AsyncVar<Optional<StorageMetrics>>> shardSize,
|
||||
bool addToSizeEstimate = true)
|
||||
{
|
||||
wait( delay( 0, TaskDataDistribution ) );
|
||||
wait( delay( 0, TaskPriority::DataDistribution ) );
|
||||
|
||||
/*TraceEvent("TrackShardBytesStarting")
|
||||
.detail("TrackerID", trackerID)
|
||||
|
@ -260,7 +260,7 @@ ACTOR Future<Void> changeSizes( DataDistributionTracker* self, KeyRangeRef keys,
|
|||
}
|
||||
|
||||
wait( waitForAll( sizes ) );
|
||||
wait( yield(TaskDataDistribution) );
|
||||
wait( yield(TaskPriority::DataDistribution) );
|
||||
|
||||
int64_t newShardsStartingSize = 0;
|
||||
for ( int i = 0; i < sizes.size(); i++ )
|
||||
|
@ -281,7 +281,7 @@ struct HasBeenTrueFor : NonCopyable {
|
|||
Future<Void> set() {
|
||||
if( !trigger.isValid() ) {
|
||||
cleared = Promise<Void>();
|
||||
trigger = delayJittered( SERVER_KNOBS->DD_MERGE_COALESCE_DELAY, TaskDataDistribution - 1 ) || cleared.getFuture();
|
||||
trigger = delayJittered( SERVER_KNOBS->DD_MERGE_COALESCE_DELAY, decrementPriority(TaskPriority::DataDistribution) ) || cleared.getFuture();
|
||||
}
|
||||
return trigger;
|
||||
}
|
||||
|
@ -361,7 +361,7 @@ ACTOR Future<Void> shardSplitter(
|
|||
|
||||
self->sizeChanges.add( changeSizes( self, keys, shardSize->get().get().bytes ) );
|
||||
} else {
|
||||
wait( delay(1.0, TaskDataDistribution) ); //In case the reason the split point was off was due to a discrepancy between storage servers
|
||||
wait( delay(1.0, TaskPriority::DataDistribution) ); //In case the reason the split point was off was due to a discrepancy between storage servers
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
@ -529,7 +529,7 @@ ACTOR Future<Void> shardTracker(
|
|||
wait( yieldedFuture(self->maxShardSize->onChange()) );
|
||||
|
||||
// Since maxShardSize will become present for all shards at once, avoid slow tasks with a short delay
|
||||
wait( delay( 0, TaskDataDistribution ) );
|
||||
wait( delay( 0, TaskPriority::DataDistribution ) );
|
||||
|
||||
/*TraceEvent("ShardTracker", self->distributorId)
|
||||
.detail("Begin", keys.begin)
|
||||
|
@ -546,7 +546,7 @@ ACTOR Future<Void> shardTracker(
|
|||
|
||||
// We could have a lot of actors being released from the previous wait at the same time. Immediately calling
|
||||
// delay(0) mitigates the resulting SlowTask
|
||||
wait( delay(0, TaskDataDistribution) );
|
||||
wait( delay(0, TaskPriority::DataDistribution) );
|
||||
}
|
||||
} catch (Error& e) {
|
||||
if (e.code() != error_code_actor_cancelled)
|
||||
|
@ -593,12 +593,12 @@ ACTOR Future<Void> trackInitialShards(DataDistributionTracker *self, Reference<I
|
|||
|
||||
//This line reduces the priority of shard initialization to prevent interference with failure monitoring.
|
||||
//SOMEDAY: Figure out what this priority should actually be
|
||||
wait( delay( 0.0, TaskDataDistribution ) );
|
||||
wait( delay( 0.0, TaskPriority::DataDistribution ) );
|
||||
|
||||
state int s;
|
||||
for(s=0; s<initData->shards.size()-1; s++) {
|
||||
restartShardTrackers( self, KeyRangeRef( initData->shards[s].key, initData->shards[s+1].key ) );
|
||||
wait( yield( TaskDataDistribution ) );
|
||||
wait( yield( TaskPriority::DataDistribution ) );
|
||||
}
|
||||
|
||||
Future<Void> initialSize = changeSizes( self, KeyRangeRef(allKeys.begin, allKeys.end), 0 );
|
||||
|
|
|
@ -1937,8 +1937,8 @@ KeyValueStoreSQLite::KeyValueStoreSQLite(std::string const& filename, UID id, Ke
|
|||
readCursors.resize(64); //< number of read threads
|
||||
|
||||
sqlite3_soft_heap_limit64( SERVER_KNOBS->SOFT_HEAP_LIMIT ); // SOMEDAY: Is this a performance issue? Should we drop the cache sizes for individual threads?
|
||||
int taskId = g_network->getCurrentTask();
|
||||
g_network->setCurrentTask(TaskDiskWrite);
|
||||
TaskPriority taskId = g_network->getCurrentTask();
|
||||
g_network->setCurrentTask(TaskPriority::DiskWrite);
|
||||
writeThread->addThread( new Writer(filename, type==KeyValueStoreType::SSD_BTREE_V2, checkChecksums, checkIntegrity, writesComplete, springCleaningStats, diskBytesUsed, freeListPages, id, &readCursors) );
|
||||
g_network->setCurrentTask(taskId);
|
||||
auto p = new Writer::InitAction();
|
||||
|
@ -1963,8 +1963,8 @@ StorageBytes KeyValueStoreSQLite::getStorageBytes() {
|
|||
|
||||
void KeyValueStoreSQLite::startReadThreads() {
|
||||
int nReadThreads = readCursors.size();
|
||||
int taskId = g_network->getCurrentTask();
|
||||
g_network->setCurrentTask(TaskDiskRead);
|
||||
TaskPriority taskId = g_network->getCurrentTask();
|
||||
g_network->setCurrentTask(TaskPriority::DiskRead);
|
||||
for(int i=0; i<nReadThreads; i++)
|
||||
readThreads->addThread( new Reader(filename, type==KeyValueStoreType::SSD_BTREE_V2, readsComplete, logID, &readCursors[i]) );
|
||||
g_network->setCurrentTask(taskId);
|
||||
|
|
|
@ -30,7 +30,7 @@ Optional<std::pair<LeaderInfo, bool>> getLeader( const vector<Optional<LeaderInf
|
|||
ACTOR Future<Void> submitCandidacy( Key key, LeaderElectionRegInterface coord, LeaderInfo myInfo, UID prevChangeID, Reference<AsyncVar<vector<Optional<LeaderInfo>>>> nominees, int index ) {
|
||||
loop {
|
||||
auto const& nom = nominees->get()[index];
|
||||
Optional<LeaderInfo> li = wait( retryBrokenPromise( coord.candidacy, CandidacyRequest( key, myInfo, nom.present() ? nom.get().changeID : UID(), prevChangeID ), TaskCoordinationReply ) );
|
||||
Optional<LeaderInfo> li = wait( retryBrokenPromise( coord.candidacy, CandidacyRequest( key, myInfo, nom.present() ? nom.get().changeID : UID(), prevChangeID ), TaskPriority::CoordinationReply ) );
|
||||
|
||||
if (li != nominees->get()[index]) {
|
||||
vector<Optional<LeaderInfo>> v = nominees->get();
|
||||
|
@ -150,7 +150,7 @@ ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators, Valu
|
|||
// we might be breaking the leader election process for someone with better communications but lower ID, so change IDs.
|
||||
if ((!leader.present() || !leader.get().second) && std::count( nominees->get().begin(), nominees->get().end(), myInfo )) {
|
||||
if (!badCandidateTimeout.isValid())
|
||||
badCandidateTimeout = delay( SERVER_KNOBS->POLLING_FREQUENCY*2, TaskCoordinationReply );
|
||||
badCandidateTimeout = delay( SERVER_KNOBS->POLLING_FREQUENCY*2, TaskPriority::CoordinationReply );
|
||||
} else
|
||||
badCandidateTimeout = Future<Void>();
|
||||
|
||||
|
@ -183,12 +183,12 @@ ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators, Valu
|
|||
state vector<Future<Void>> true_heartbeats;
|
||||
state vector<Future<Void>> false_heartbeats;
|
||||
for(int i=0; i<coordinators.leaderElectionServers.size(); i++) {
|
||||
Future<bool> hb = retryBrokenPromise( coordinators.leaderElectionServers[i].leaderHeartbeat, LeaderHeartbeatRequest( coordinators.clusterKey, myInfo, prevChangeID ), TaskCoordinationReply );
|
||||
Future<bool> hb = retryBrokenPromise( coordinators.leaderElectionServers[i].leaderHeartbeat, LeaderHeartbeatRequest( coordinators.clusterKey, myInfo, prevChangeID ), TaskPriority::CoordinationReply );
|
||||
true_heartbeats.push_back( onEqual(hb, true) );
|
||||
false_heartbeats.push_back( onEqual(hb, false) );
|
||||
}
|
||||
|
||||
state Future<Void> rate = delay( SERVER_KNOBS->HEARTBEAT_FREQUENCY, TaskCoordinationReply ) || asyncPriorityInfo->onChange(); // SOMEDAY: Move to server side?
|
||||
state Future<Void> rate = delay( SERVER_KNOBS->HEARTBEAT_FREQUENCY, TaskPriority::CoordinationReply ) || asyncPriorityInfo->onChange(); // SOMEDAY: Move to server side?
|
||||
|
||||
choose {
|
||||
when ( wait( quorum( true_heartbeats, true_heartbeats.size()/2+1 ) ) ) {
|
||||
|
|
|
@ -51,7 +51,7 @@ struct LogRouterData {
|
|||
}
|
||||
|
||||
// Erase messages not needed to update *from* versions >= before (thus, messages with toversion <= before)
|
||||
ACTOR Future<Void> eraseMessagesBefore( TagData *self, Version before, LogRouterData *tlogData, int taskID ) {
|
||||
ACTOR Future<Void> eraseMessagesBefore( TagData *self, Version before, LogRouterData *tlogData, TaskPriority taskID ) {
|
||||
while(!self->version_messages.empty() && self->version_messages.front().first < before) {
|
||||
Version version = self->version_messages.front().first;
|
||||
int64_t messagesErased = 0;
|
||||
|
@ -68,7 +68,7 @@ struct LogRouterData {
|
|||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> eraseMessagesBefore(Version before, LogRouterData *tlogData, int taskID) {
|
||||
Future<Void> eraseMessagesBefore(Version before, LogRouterData *tlogData, TaskPriority taskID) {
|
||||
return eraseMessagesBefore(this, before, tlogData, taskID);
|
||||
}
|
||||
};
|
||||
|
@ -197,7 +197,7 @@ ACTOR Future<Void> waitForVersion( LogRouterData *self, Version ver ) {
|
|||
while(self->minPopped.get() + SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS < ver) {
|
||||
if(self->minPopped.get() + SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS > self->version.get()) {
|
||||
self->version.set( self->minPopped.get() + SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS );
|
||||
wait(yield(TaskTLogCommit));
|
||||
wait(yield(TaskPriority::TLogCommit));
|
||||
} else {
|
||||
wait(self->minPopped.whenAtLeast((self->minPopped.get()+1)));
|
||||
}
|
||||
|
@ -220,7 +220,7 @@ ACTOR Future<Void> pullAsyncData( LogRouterData *self ) {
|
|||
loop {
|
||||
loop {
|
||||
choose {
|
||||
when(wait( r ? r->getMore(TaskTLogCommit) : Never() ) ) {
|
||||
when(wait( r ? r->getMore(TaskPriority::TLogCommit) : Never() ) ) {
|
||||
break;
|
||||
}
|
||||
when( wait( dbInfoChange ) ) { //FIXME: does this actually happen?
|
||||
|
@ -247,7 +247,7 @@ ACTOR Future<Void> pullAsyncData( LogRouterData *self ) {
|
|||
|
||||
commitMessages(self, ver, messages);
|
||||
self->version.set( ver );
|
||||
wait(yield(TaskTLogCommit));
|
||||
wait(yield(TaskPriority::TLogCommit));
|
||||
//TraceEvent("LogRouterVersion").detail("Ver",ver);
|
||||
}
|
||||
lastVer = ver;
|
||||
|
@ -260,7 +260,7 @@ ACTOR Future<Void> pullAsyncData( LogRouterData *self ) {
|
|||
wait( waitForVersion(self, ver) );
|
||||
|
||||
self->version.set( ver );
|
||||
wait(yield(TaskTLogCommit));
|
||||
wait(yield(TaskPriority::TLogCommit));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -370,7 +370,7 @@ ACTOR Future<Void> logRouterPop( LogRouterData* self, TLogPopRequest req ) {
|
|||
} else if (req.to > tagData->popped) {
|
||||
tagData->popped = req.to;
|
||||
tagData->durableKnownCommittedVersion = req.durableKnownCommittedVersion;
|
||||
wait(tagData->eraseMessagesBefore( req.to, self, TaskTLogPop ));
|
||||
wait(tagData->eraseMessagesBefore( req.to, self, TaskPriority::TLogPop ));
|
||||
}
|
||||
|
||||
state Version minPopped = std::numeric_limits<Version>::max();
|
||||
|
@ -384,7 +384,7 @@ ACTOR Future<Void> logRouterPop( LogRouterData* self, TLogPopRequest req ) {
|
|||
|
||||
while(!self->messageBlocks.empty() && self->messageBlocks.front().first < minPopped) {
|
||||
self->messageBlocks.pop_front();
|
||||
wait(yield(TaskTLogPop));
|
||||
wait(yield(TaskPriority::TLogPop));
|
||||
}
|
||||
|
||||
self->poppedVersion = std::min(minKnownCommittedVersion, self->minKnownCommittedVersion);
|
||||
|
|
|
@ -341,7 +341,7 @@ struct ILogSystem {
|
|||
|
||||
//returns immediately if hasMessage() returns true.
|
||||
//returns when either the result of hasMessage() or version() has changed.
|
||||
virtual Future<Void> getMore(int taskID = TaskTLogPeekReply) = 0;
|
||||
virtual Future<Void> getMore(TaskPriority taskID = TaskPriority::TLogPeekReply) = 0;
|
||||
|
||||
//returns when the failure monitor detects that the servers associated with the cursor are failed
|
||||
virtual Future<Void> onFailed() = 0;
|
||||
|
@ -406,7 +406,7 @@ struct ILogSystem {
|
|||
virtual StringRef getMessageWithTags();
|
||||
virtual const std::vector<Tag>& getTags();
|
||||
virtual void advanceTo(LogMessageVersion n);
|
||||
virtual Future<Void> getMore(int taskID = TaskTLogPeekReply);
|
||||
virtual Future<Void> getMore(TaskPriority taskID = TaskPriority::TLogPeekReply);
|
||||
virtual Future<Void> onFailed();
|
||||
virtual bool isActive();
|
||||
virtual bool isExhausted();
|
||||
|
@ -454,7 +454,7 @@ struct ILogSystem {
|
|||
virtual StringRef getMessageWithTags();
|
||||
virtual const std::vector<Tag>& getTags();
|
||||
virtual void advanceTo(LogMessageVersion n);
|
||||
virtual Future<Void> getMore(int taskID = TaskTLogPeekReply);
|
||||
virtual Future<Void> getMore(TaskPriority taskID = TaskPriority::TLogPeekReply);
|
||||
virtual Future<Void> onFailed();
|
||||
virtual bool isActive();
|
||||
virtual bool isExhausted();
|
||||
|
@ -499,7 +499,7 @@ struct ILogSystem {
|
|||
virtual StringRef getMessageWithTags();
|
||||
virtual const std::vector<Tag>& getTags();
|
||||
virtual void advanceTo(LogMessageVersion n);
|
||||
virtual Future<Void> getMore(int taskID = TaskTLogPeekReply);
|
||||
virtual Future<Void> getMore(TaskPriority taskID = TaskPriority::TLogPeekReply);
|
||||
virtual Future<Void> onFailed();
|
||||
virtual bool isActive();
|
||||
virtual bool isExhausted();
|
||||
|
@ -533,7 +533,7 @@ struct ILogSystem {
|
|||
virtual StringRef getMessageWithTags();
|
||||
virtual const std::vector<Tag>& getTags();
|
||||
virtual void advanceTo(LogMessageVersion n);
|
||||
virtual Future<Void> getMore(int taskID = TaskTLogPeekReply);
|
||||
virtual Future<Void> getMore(TaskPriority taskID = TaskPriority::TLogPeekReply);
|
||||
virtual Future<Void> onFailed();
|
||||
virtual bool isActive();
|
||||
virtual bool isExhausted();
|
||||
|
@ -593,7 +593,7 @@ struct ILogSystem {
|
|||
virtual StringRef getMessageWithTags();
|
||||
virtual const std::vector<Tag>& getTags();
|
||||
virtual void advanceTo(LogMessageVersion n);
|
||||
virtual Future<Void> getMore(int taskID = TaskTLogPeekReply);
|
||||
virtual Future<Void> getMore(TaskPriority taskID = TaskPriority::TLogPeekReply);
|
||||
virtual Future<Void> onFailed();
|
||||
virtual bool isActive();
|
||||
virtual bool isExhausted();
|
||||
|
|
|
@ -133,7 +133,7 @@ void ILogSystem::ServerPeekCursor::advanceTo(LogMessageVersion n) {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> serverPeekParallelGetMore( ILogSystem::ServerPeekCursor* self, int taskID ) {
|
||||
ACTOR Future<Void> serverPeekParallelGetMore( ILogSystem::ServerPeekCursor* self, TaskPriority taskID ) {
|
||||
if( !self->interf || self->messageVersion >= self->end ) {
|
||||
wait( Future<Void>(Never()));
|
||||
throw internal_error();
|
||||
|
@ -192,7 +192,7 @@ ACTOR Future<Void> serverPeekParallelGetMore( ILogSystem::ServerPeekCursor* self
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> serverPeekGetMore( ILogSystem::ServerPeekCursor* self, int taskID ) {
|
||||
ACTOR Future<Void> serverPeekGetMore( ILogSystem::ServerPeekCursor* self, TaskPriority taskID ) {
|
||||
if( !self->interf || self->messageVersion >= self->end ) {
|
||||
wait( Future<Void>(Never()));
|
||||
throw internal_error();
|
||||
|
@ -225,7 +225,7 @@ ACTOR Future<Void> serverPeekGetMore( ILogSystem::ServerPeekCursor* self, int ta
|
|||
}
|
||||
}
|
||||
|
||||
Future<Void> ILogSystem::ServerPeekCursor::getMore(int taskID) {
|
||||
Future<Void> ILogSystem::ServerPeekCursor::getMore(TaskPriority taskID) {
|
||||
//TraceEvent("SPC_GetMore", randomID).detail("HasMessage", hasMessage()).detail("More", !more.isValid() || more.isReady()).detail("MessageVersion", messageVersion.toString()).detail("End", end.toString());
|
||||
if( hasMessage() )
|
||||
return Void();
|
||||
|
@ -431,7 +431,7 @@ void ILogSystem::MergedPeekCursor::advanceTo(LogMessageVersion n) {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> mergedPeekGetMore(ILogSystem::MergedPeekCursor* self, LogMessageVersion startVersion, int taskID) {
|
||||
ACTOR Future<Void> mergedPeekGetMore(ILogSystem::MergedPeekCursor* self, LogMessageVersion startVersion, TaskPriority taskID) {
|
||||
loop {
|
||||
//TraceEvent("MPC_GetMoreA", self->randomID).detail("Start", startVersion.toString());
|
||||
if(self->bestServer >= 0 && self->serverCursors[self->bestServer]->isActive()) {
|
||||
|
@ -452,7 +452,7 @@ ACTOR Future<Void> mergedPeekGetMore(ILogSystem::MergedPeekCursor* self, LogMess
|
|||
}
|
||||
}
|
||||
|
||||
Future<Void> ILogSystem::MergedPeekCursor::getMore(int taskID) {
|
||||
Future<Void> ILogSystem::MergedPeekCursor::getMore(TaskPriority taskID) {
|
||||
if(!serverCursors.size())
|
||||
return Never();
|
||||
|
||||
|
@ -692,7 +692,7 @@ void ILogSystem::SetPeekCursor::advanceTo(LogMessageVersion n) {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> setPeekGetMore(ILogSystem::SetPeekCursor* self, LogMessageVersion startVersion, int taskID) {
|
||||
ACTOR Future<Void> setPeekGetMore(ILogSystem::SetPeekCursor* self, LogMessageVersion startVersion, TaskPriority taskID) {
|
||||
loop {
|
||||
//TraceEvent("LPC_GetMore1", self->randomID).detail("Start", startVersion.toString()).detail("Tag", self->tag);
|
||||
if(self->bestServer >= 0 && self->bestSet >= 0 && self->serverCursors[self->bestSet][self->bestServer]->isActive()) {
|
||||
|
@ -753,7 +753,7 @@ ACTOR Future<Void> setPeekGetMore(ILogSystem::SetPeekCursor* self, LogMessageVer
|
|||
}
|
||||
}
|
||||
|
||||
Future<Void> ILogSystem::SetPeekCursor::getMore(int taskID) {
|
||||
Future<Void> ILogSystem::SetPeekCursor::getMore(TaskPriority taskID) {
|
||||
auto startVersion = version();
|
||||
calcHasMessage();
|
||||
if( hasMessage() )
|
||||
|
@ -848,7 +848,7 @@ void ILogSystem::MultiCursor::advanceTo(LogMessageVersion n) {
|
|||
cursors.back()->advanceTo(n);
|
||||
}
|
||||
|
||||
Future<Void> ILogSystem::MultiCursor::getMore(int taskID) {
|
||||
Future<Void> ILogSystem::MultiCursor::getMore(TaskPriority taskID) {
|
||||
LogMessageVersion startVersion = cursors.back()->version();
|
||||
while( cursors.size() > 1 && cursors.back()->version() >= epochEnds.back() ) {
|
||||
poppedVersion = std::max(poppedVersion, cursors.back()->popped());
|
||||
|
@ -964,7 +964,7 @@ void ILogSystem::BufferedCursor::advanceTo(LogMessageVersion n) {
|
|||
ASSERT(false);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> bufferedGetMoreLoader( ILogSystem::BufferedCursor* self, Reference<ILogSystem::IPeekCursor> cursor, Version maxVersion, int taskID ) {
|
||||
ACTOR Future<Void> bufferedGetMoreLoader( ILogSystem::BufferedCursor* self, Reference<ILogSystem::IPeekCursor> cursor, Version maxVersion, TaskPriority taskID ) {
|
||||
loop {
|
||||
wait(yield());
|
||||
if(cursor->version().version >= maxVersion) {
|
||||
|
@ -981,7 +981,7 @@ ACTOR Future<Void> bufferedGetMoreLoader( ILogSystem::BufferedCursor* self, Refe
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> bufferedGetMore( ILogSystem::BufferedCursor* self, int taskID ) {
|
||||
ACTOR Future<Void> bufferedGetMore( ILogSystem::BufferedCursor* self, TaskPriority taskID ) {
|
||||
if( self->messageVersion.version >= self->end ) {
|
||||
wait( Future<Void>(Never()));
|
||||
throw internal_error();
|
||||
|
@ -1015,7 +1015,7 @@ ACTOR Future<Void> bufferedGetMore( ILogSystem::BufferedCursor* self, int taskID
|
|||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> ILogSystem::BufferedCursor::getMore(int taskID) {
|
||||
Future<Void> ILogSystem::BufferedCursor::getMore(TaskPriority taskID) {
|
||||
if( hasMessage() )
|
||||
return Void();
|
||||
return bufferedGetMore(this, taskID);
|
||||
|
|
|
@ -50,7 +50,7 @@ struct MasterInterface {
|
|||
}
|
||||
|
||||
void initEndpoints() {
|
||||
getCommitVersion.getEndpoint( TaskProxyGetConsistentReadVersion );
|
||||
getCommitVersion.getEndpoint( TaskPriority::ProxyGetConsistentReadVersion );
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -158,7 +158,7 @@ ACTOR Future<Void> queueTransactionStartRequests(
|
|||
if (now() - *lastGRVTime > *GRVBatchTime)
|
||||
*lastGRVTime = now() - *GRVBatchTime;
|
||||
|
||||
forwardPromise(GRVTimer, delayJittered(*GRVBatchTime - (now() - *lastGRVTime), TaskProxyGRVTimer));
|
||||
forwardPromise(GRVTimer, delayJittered(*GRVBatchTime - (now() - *lastGRVTime), TaskPriority::ProxyGRVTimer));
|
||||
}
|
||||
|
||||
transactionQueue->push(std::make_pair(req, counter--));
|
||||
|
@ -263,7 +263,7 @@ struct ProxyCommitData {
|
|||
lastVersionTime(0), commitVersionRequestNumber(1), mostRecentProcessedRequestNumber(0),
|
||||
getConsistentReadVersion(getConsistentReadVersion), commit(commit), lastCoalesceTime(0),
|
||||
localCommitBatchesStarted(0), locked(false), commitBatchInterval(SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_INTERVAL_MIN),
|
||||
firstProxy(firstProxy), cx(openDBOnServer(db, TaskDefaultEndpoint, true, true)), db(db),
|
||||
firstProxy(firstProxy), cx(openDBOnServer(db, TaskPriority::DefaultEndpoint, true, true)), db(db),
|
||||
singleKeyMutationEvent(LiteralStringRef("SingleKeyMutation")), commitBatchesMemBytesCount(0), lastTxsPop(0)
|
||||
{}
|
||||
};
|
||||
|
@ -350,7 +350,7 @@ struct ResolutionRequestBuilder {
|
|||
};
|
||||
|
||||
ACTOR Future<Void> commitBatcher(ProxyCommitData *commitData, PromiseStream<std::pair<std::vector<CommitTransactionRequest>, int> > out, FutureStream<CommitTransactionRequest> in, int desiredBytes, int64_t memBytesLimit) {
|
||||
wait(delayJittered(commitData->commitBatchInterval, TaskProxyCommitBatcher));
|
||||
wait(delayJittered(commitData->commitBatchInterval, TaskPriority::ProxyCommitBatcher));
|
||||
|
||||
state double lastBatch = 0;
|
||||
|
||||
|
@ -363,7 +363,7 @@ ACTOR Future<Void> commitBatcher(ProxyCommitData *commitData, PromiseStream<std:
|
|||
timeout = Never();
|
||||
}
|
||||
else {
|
||||
timeout = delayJittered(SERVER_KNOBS->MAX_COMMIT_BATCH_INTERVAL, TaskProxyCommitBatcher);
|
||||
timeout = delayJittered(SERVER_KNOBS->MAX_COMMIT_BATCH_INTERVAL, TaskPriority::ProxyCommitBatcher);
|
||||
}
|
||||
|
||||
while(!timeout.isReady() && !(batch.size() == SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_COUNT_MAX || batchBytes >= desiredBytes)) {
|
||||
|
@ -387,10 +387,10 @@ ACTOR Future<Void> commitBatcher(ProxyCommitData *commitData, PromiseStream<std:
|
|||
if(!batch.size()) {
|
||||
commitData->commitBatchStartNotifications.send(Void());
|
||||
if(now() - lastBatch > commitData->commitBatchInterval) {
|
||||
timeout = delayJittered(SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_INTERVAL_FROM_IDLE, TaskProxyCommitBatcher);
|
||||
timeout = delayJittered(SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_INTERVAL_FROM_IDLE, TaskPriority::ProxyCommitBatcher);
|
||||
}
|
||||
else {
|
||||
timeout = delayJittered(commitData->commitBatchInterval - (now() - lastBatch), TaskProxyCommitBatcher);
|
||||
timeout = delayJittered(commitData->commitBatchInterval - (now() - lastBatch), TaskPriority::ProxyCommitBatcher);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -398,7 +398,7 @@ ACTOR Future<Void> commitBatcher(ProxyCommitData *commitData, PromiseStream<std:
|
|||
out.send({ batch, batchBytes });
|
||||
lastBatch = now();
|
||||
commitData->commitBatchStartNotifications.send(Void());
|
||||
timeout = delayJittered(commitData->commitBatchInterval, TaskProxyCommitBatcher);
|
||||
timeout = delayJittered(commitData->commitBatchInterval, TaskPriority::ProxyCommitBatcher);
|
||||
batch = std::vector<CommitTransactionRequest>();
|
||||
batchBytes = 0;
|
||||
}
|
||||
|
@ -457,7 +457,7 @@ ACTOR Future<Void> commitBatch(
|
|||
ASSERT(SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS <= SERVER_KNOBS->MAX_VERSIONS_IN_FLIGHT); // since we are using just the former to limit the number of versions actually in flight!
|
||||
|
||||
// Active load balancing runs at a very high priority (to obtain accurate estimate of memory used by commit batches) so we need to downgrade here
|
||||
wait(delay(0, TaskProxyCommit));
|
||||
wait(delay(0, TaskPriority::ProxyCommit));
|
||||
|
||||
self->lastVersionTime = t1;
|
||||
|
||||
|
@ -534,7 +534,7 @@ ACTOR Future<Void> commitBatch(
|
|||
vector< Future<ResolveTransactionBatchReply> > replies;
|
||||
for (int r = 0; r<self->resolvers.size(); r++) {
|
||||
requests.requests[r].debugID = debugID;
|
||||
replies.push_back(brokenPromiseToNever(self->resolvers[r].resolve.getReply(requests.requests[r], TaskProxyResolverReply)));
|
||||
replies.push_back(brokenPromiseToNever(self->resolvers[r].resolve.getReply(requests.requests[r], TaskPriority::ProxyResolverReply)));
|
||||
}
|
||||
|
||||
state vector<vector<int>> transactionResolverMap = std::move( requests.transactionResolverMap );
|
||||
|
@ -1135,7 +1135,7 @@ ACTOR Future<GetReadVersionReply> getLiveCommittedVersion(ProxyCommitData* commi
|
|||
|
||||
state vector<Future<GetReadVersionReply>> proxyVersions;
|
||||
for (auto const& p : *otherProxies)
|
||||
proxyVersions.push_back(brokenPromiseToNever(p.getRawCommittedVersion.getReply(GetRawCommittedVersionRequest(debugID), TaskTLogConfirmRunningReply)));
|
||||
proxyVersions.push_back(brokenPromiseToNever(p.getRawCommittedVersion.getReply(GetRawCommittedVersionRequest(debugID), TaskPriority::TLogConfirmRunningReply)));
|
||||
|
||||
if (!(flags&GetReadVersionRequest::FLAG_CAUSAL_READ_RISKY))
|
||||
{
|
||||
|
@ -1292,7 +1292,7 @@ ACTOR static Future<Void> transactionStarter(
|
|||
}
|
||||
|
||||
if (!transactionQueue.empty())
|
||||
forwardPromise(GRVTimer, delayJittered(SERVER_KNOBS->START_TRANSACTION_BATCH_QUEUE_CHECK_INTERVAL, TaskProxyGRVTimer));
|
||||
forwardPromise(GRVTimer, delayJittered(SERVER_KNOBS->START_TRANSACTION_BATCH_QUEUE_CHECK_INTERVAL, TaskPriority::ProxyGRVTimer));
|
||||
|
||||
/*TraceEvent("GRVBatch", proxy.id())
|
||||
.detail("Elapsed", elapsed)
|
||||
|
|
|
@ -130,12 +130,12 @@ ACTOR Future<vector<UID>> addReadWriteDestinations(KeyRangeRef shard, vector<Sto
|
|||
|
||||
state vector< Future<Optional<UID>> > srcChecks;
|
||||
for(int s=0; s<srcInterfs.size(); s++) {
|
||||
srcChecks.push_back( checkReadWrite( srcInterfs[s].getShardState.getReplyUnlessFailedFor( GetShardStateRequest( shard, GetShardStateRequest::NO_WAIT), SERVER_KNOBS->SERVER_READY_QUORUM_INTERVAL, 0, TaskMoveKeys ), srcInterfs[s].id(), 0 ) );
|
||||
srcChecks.push_back( checkReadWrite( srcInterfs[s].getShardState.getReplyUnlessFailedFor( GetShardStateRequest( shard, GetShardStateRequest::NO_WAIT), SERVER_KNOBS->SERVER_READY_QUORUM_INTERVAL, 0, TaskPriority::MoveKeys ), srcInterfs[s].id(), 0 ) );
|
||||
}
|
||||
|
||||
state vector< Future<Optional<UID>> > destChecks;
|
||||
for(int s=0; s<destInterfs.size(); s++) {
|
||||
destChecks.push_back( checkReadWrite( destInterfs[s].getShardState.getReplyUnlessFailedFor( GetShardStateRequest( shard, GetShardStateRequest::NO_WAIT), SERVER_KNOBS->SERVER_READY_QUORUM_INTERVAL, 0, TaskMoveKeys ), destInterfs[s].id(), version ) );
|
||||
destChecks.push_back( checkReadWrite( destInterfs[s].getShardState.getReplyUnlessFailedFor( GetShardStateRequest( shard, GetShardStateRequest::NO_WAIT), SERVER_KNOBS->SERVER_READY_QUORUM_INTERVAL, 0, TaskPriority::MoveKeys ), destInterfs[s].id(), version ) );
|
||||
}
|
||||
|
||||
wait( waitForAll(srcChecks) && waitForAll(destChecks) );
|
||||
|
@ -225,7 +225,7 @@ ACTOR Future<Void> startMoveKeys( Database occ, KeyRange keys, vector<UID> serve
|
|||
state TraceInterval interval("RelocateShard_StartMoveKeys");
|
||||
//state TraceInterval waitInterval("");
|
||||
|
||||
wait( startMoveKeysLock->take( TaskDataDistributionLaunch ) );
|
||||
wait( startMoveKeysLock->take( TaskPriority::DataDistributionLaunch ) );
|
||||
state FlowLock::Releaser releaser( *startMoveKeysLock );
|
||||
|
||||
TraceEvent(SevDebug, interval.begin(), relocationIntervalId);
|
||||
|
@ -255,7 +255,7 @@ ACTOR Future<Void> startMoveKeys( Database occ, KeyRange keys, vector<UID> serve
|
|||
//Keep track of shards for all src servers so that we can preserve their values in serverKeys
|
||||
state Map<UID, VectorRef<KeyRangeRef>> shardMap;
|
||||
|
||||
tr.info.taskID = TaskMoveKeys;
|
||||
tr.info.taskID = TaskPriority::MoveKeys;
|
||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
|
||||
wait( checkMoveKeysLock(&tr, lock) );
|
||||
|
@ -394,11 +394,11 @@ ACTOR Future<Void> startMoveKeys( Database occ, KeyRange keys, vector<UID> serve
|
|||
ACTOR Future<Void> waitForShardReady( StorageServerInterface server, KeyRange keys, Version minVersion, GetShardStateRequest::waitMode mode ) {
|
||||
loop {
|
||||
try {
|
||||
std::pair<Version,Version> rep = wait( server.getShardState.getReply( GetShardStateRequest(keys, mode), TaskMoveKeys ) );
|
||||
std::pair<Version,Version> rep = wait( server.getShardState.getReply( GetShardStateRequest(keys, mode), TaskPriority::MoveKeys ) );
|
||||
if (rep.first >= minVersion) {
|
||||
return Void();
|
||||
}
|
||||
wait( delayJittered( SERVER_KNOBS->SHARD_READY_DELAY, TaskMoveKeys ) );
|
||||
wait( delayJittered( SERVER_KNOBS->SHARD_READY_DELAY, TaskPriority::MoveKeys ) );
|
||||
}
|
||||
catch (Error& e) {
|
||||
if( e.code() != error_code_timed_out ) {
|
||||
|
@ -419,7 +419,7 @@ ACTOR Future<Void> checkFetchingState( Database cx, vector<UID> dest, KeyRange k
|
|||
try {
|
||||
if (BUGGIFY) wait(delay(5));
|
||||
|
||||
tr.info.taskID = TaskMoveKeys;
|
||||
tr.info.taskID = TaskPriority::MoveKeys;
|
||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
|
||||
vector< Future< Optional<Value> > > serverListEntries;
|
||||
|
@ -439,7 +439,7 @@ ACTOR Future<Void> checkFetchingState( Database cx, vector<UID> dest, KeyRange k
|
|||
}
|
||||
|
||||
wait( timeoutError( waitForAll( requests ),
|
||||
SERVER_KNOBS->SERVER_READY_QUORUM_TIMEOUT, TaskMoveKeys ) );
|
||||
SERVER_KNOBS->SERVER_READY_QUORUM_TIMEOUT, TaskPriority::MoveKeys ) );
|
||||
|
||||
dataMovementComplete.send(Void());
|
||||
return Void();
|
||||
|
@ -480,11 +480,11 @@ ACTOR Future<Void> finishMoveKeys( Database occ, KeyRange keys, vector<UID> dest
|
|||
//printf("finishMoveKeys( '%s'-'%s' )\n", keys.begin.toString().c_str(), keys.end.toString().c_str());
|
||||
loop {
|
||||
try {
|
||||
tr.info.taskID = TaskMoveKeys;
|
||||
tr.info.taskID = TaskPriority::MoveKeys;
|
||||
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
|
||||
releaser.release();
|
||||
wait( finishMoveKeysParallelismLock->take( TaskDataDistributionLaunch ) );
|
||||
wait( finishMoveKeysParallelismLock->take( TaskPriority::DataDistributionLaunch ) );
|
||||
releaser = FlowLock::Releaser( *finishMoveKeysParallelismLock );
|
||||
|
||||
wait( checkMoveKeysLock(&tr, lock) );
|
||||
|
@ -632,7 +632,7 @@ ACTOR Future<Void> finishMoveKeys( Database occ, KeyRange keys, vector<UID> dest
|
|||
|
||||
for(int s=0; s<storageServerInterfaces.size(); s++)
|
||||
serverReady.push_back( waitForShardReady( storageServerInterfaces[s], keys, tr.getReadVersion().get(), GetShardStateRequest::READABLE) );
|
||||
wait( timeout( waitForAll( serverReady ), SERVER_KNOBS->SERVER_READY_QUORUM_TIMEOUT, Void(), TaskMoveKeys ) );
|
||||
wait( timeout( waitForAll( serverReady ), SERVER_KNOBS->SERVER_READY_QUORUM_TIMEOUT, Void(), TaskPriority::MoveKeys ) );
|
||||
int count = dest.size() - newDestinations.size();
|
||||
for(int s=0; s<serverReady.size(); s++)
|
||||
count += serverReady[s].isReady() && !serverReady[s].isError();
|
||||
|
@ -808,7 +808,7 @@ ACTOR Future<Void> removeStorageServer( Database cx, UID serverID, MoveKeysLock
|
|||
if (!canRemove) {
|
||||
TEST(true); // The caller had a transaction in flight that assigned keys to the server. Wait for it to reverse its mistake.
|
||||
TraceEvent(SevWarn,"NoCanRemove").detail("Count", noCanRemoveCount++).detail("ServerID", serverID);
|
||||
wait( delayJittered(SERVER_KNOBS->REMOVE_RETRY_DELAY, TaskDataDistributionLaunch) );
|
||||
wait( delayJittered(SERVER_KNOBS->REMOVE_RETRY_DELAY, TaskPriority::DataDistributionLaunch) );
|
||||
tr.reset();
|
||||
TraceEvent("RemoveStorageServerRetrying").detail("CanRemove", canRemove);
|
||||
} else {
|
||||
|
|
|
@ -333,7 +333,7 @@ namespace oldTLog_4_6 {
|
|||
}
|
||||
|
||||
// Erase messages not needed to update *from* versions >= before (thus, messages with toversion <= before)
|
||||
ACTOR Future<Void> eraseMessagesBefore( TagData *self, Version before, int64_t* gBytesErased, Reference<LogData> tlogData, int taskID ) {
|
||||
ACTOR Future<Void> eraseMessagesBefore( TagData *self, Version before, int64_t* gBytesErased, Reference<LogData> tlogData, TaskPriority taskID ) {
|
||||
while(!self->version_messages.empty() && self->version_messages.front().first < before) {
|
||||
Version version = self->version_messages.front().first;
|
||||
std::pair<int, int> &sizes = tlogData->version_sizes[version];
|
||||
|
@ -359,7 +359,7 @@ namespace oldTLog_4_6 {
|
|||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> eraseMessagesBefore(Version before, int64_t* gBytesErased, Reference<LogData> tlogData, int taskID) {
|
||||
Future<Void> eraseMessagesBefore(Version before, int64_t* gBytesErased, Reference<LogData> tlogData, TaskPriority taskID) {
|
||||
return eraseMessagesBefore(this, before, gBytesErased, tlogData, taskID);
|
||||
}
|
||||
};
|
||||
|
@ -526,21 +526,21 @@ namespace oldTLog_4_6 {
|
|||
|
||||
self->persistentData->set( KeyValueRef( persistTagMessagesKey( logData->logId, tag->key, currentVersion ), wr.toValue() ) );
|
||||
|
||||
Future<Void> f = yield(TaskUpdateStorage);
|
||||
Future<Void> f = yield(TaskPriority::UpdateStorage);
|
||||
if(!f.isReady()) {
|
||||
wait(f);
|
||||
msg = std::upper_bound(tag->value.version_messages.begin(), tag->value.version_messages.end(), std::make_pair(currentVersion, LengthPrefixedStringRef()), CompareFirst<std::pair<Version, LengthPrefixedStringRef>>());
|
||||
}
|
||||
}
|
||||
|
||||
wait(yield(TaskUpdateStorage));
|
||||
wait(yield(TaskPriority::UpdateStorage));
|
||||
}
|
||||
|
||||
self->persistentData->set( KeyValueRef( BinaryWriter::toValue(logData->logId,Unversioned()).withPrefix(persistCurrentVersionKeys.begin), BinaryWriter::toValue(newPersistentDataVersion, Unversioned()) ) );
|
||||
logData->persistentDataVersion = newPersistentDataVersion;
|
||||
|
||||
wait( self->persistentData->commit() ); // SOMEDAY: This seems to be running pretty often, should we slow it down???
|
||||
wait( delay(0, TaskUpdateStorage) );
|
||||
wait( delay(0, TaskPriority::UpdateStorage) );
|
||||
|
||||
// Now that the changes we made to persistentData are durable, erase the data we moved from memory and the queue, increase bytesDurable accordingly, and update persistentDataDurableVersion.
|
||||
|
||||
|
@ -548,20 +548,20 @@ namespace oldTLog_4_6 {
|
|||
logData->persistentDataDurableVersion = newPersistentDataVersion;
|
||||
|
||||
for(tag = logData->tag_data.begin(); tag != logData->tag_data.end(); ++tag) {
|
||||
wait(tag->value.eraseMessagesBefore( newPersistentDataVersion+1, &self->bytesDurable, logData, TaskUpdateStorage ));
|
||||
wait(yield(TaskUpdateStorage));
|
||||
wait(tag->value.eraseMessagesBefore( newPersistentDataVersion+1, &self->bytesDurable, logData, TaskPriority::UpdateStorage ));
|
||||
wait(yield(TaskPriority::UpdateStorage));
|
||||
}
|
||||
|
||||
logData->version_sizes.erase(logData->version_sizes.begin(), logData->version_sizes.lower_bound(logData->persistentDataDurableVersion));
|
||||
|
||||
wait(yield(TaskUpdateStorage));
|
||||
wait(yield(TaskPriority::UpdateStorage));
|
||||
|
||||
while(!logData->messageBlocks.empty() && logData->messageBlocks.front().first <= newPersistentDataVersion) {
|
||||
int64_t bytesErased = int64_t(logData->messageBlocks.front().second.size()) * SERVER_KNOBS->TLOG_MESSAGE_BLOCK_OVERHEAD_FACTOR;
|
||||
logData->bytesDurable += bytesErased;
|
||||
self->bytesDurable += bytesErased;
|
||||
logData->messageBlocks.pop_front();
|
||||
wait(yield(TaskUpdateStorage));
|
||||
wait(yield(TaskPriority::UpdateStorage));
|
||||
}
|
||||
|
||||
if(logData->bytesDurable.getValue() > logData->bytesInput.getValue() || self->bytesDurable > self->bytesInput) {
|
||||
|
@ -586,7 +586,7 @@ namespace oldTLog_4_6 {
|
|||
}
|
||||
|
||||
if(!self->queueOrder.size()) {
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskPriority::UpdateStorage) );
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
@ -621,14 +621,14 @@ namespace oldTLog_4_6 {
|
|||
}
|
||||
|
||||
wait( logData->queueCommittedVersion.whenAtLeast( nextVersion ) );
|
||||
wait( delay(0, TaskUpdateStorage) );
|
||||
wait( delay(0, TaskPriority::UpdateStorage) );
|
||||
|
||||
//TraceEvent("TlogUpdatePersist", self->dbgid).detail("LogId", logData->logId).detail("NextVersion", nextVersion).detail("Version", logData->version.get()).detail("PersistentDataDurableVer", logData->persistentDataDurableVersion).detail("QueueCommitVer", logData->queueCommittedVersion.get()).detail("PersistDataVer", logData->persistentDataVersion);
|
||||
if (nextVersion > logData->persistentDataVersion) {
|
||||
self->updatePersist = updatePersistentData(self, logData, nextVersion);
|
||||
wait( self->updatePersist );
|
||||
} else {
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskPriority::UpdateStorage) );
|
||||
}
|
||||
|
||||
if( logData->removed.isReady() ) {
|
||||
|
@ -639,9 +639,9 @@ namespace oldTLog_4_6 {
|
|||
if(logData->persistentDataDurableVersion == logData->version.get()) {
|
||||
self->queueOrder.pop_front();
|
||||
}
|
||||
wait( delay(0.0, TaskUpdateStorage) );
|
||||
wait( delay(0.0, TaskPriority::UpdateStorage) );
|
||||
} else {
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskPriority::UpdateStorage) );
|
||||
}
|
||||
}
|
||||
else if(logData->initialized) {
|
||||
|
@ -650,7 +650,7 @@ namespace oldTLog_4_6 {
|
|||
while( totalSize < SERVER_KNOBS->UPDATE_STORAGE_BYTE_LIMIT && sizeItr != logData->version_sizes.end()
|
||||
&& (logData->bytesInput.getValue() - logData->bytesDurable.getValue() - totalSize >= SERVER_KNOBS->TLOG_SPILL_THRESHOLD || sizeItr->value.first == 0) )
|
||||
{
|
||||
wait( yield(TaskUpdateStorage) );
|
||||
wait( yield(TaskPriority::UpdateStorage) );
|
||||
|
||||
++sizeItr;
|
||||
nextVersion = sizeItr == logData->version_sizes.end() ? logData->version.get() : sizeItr->key;
|
||||
|
@ -662,7 +662,7 @@ namespace oldTLog_4_6 {
|
|||
totalSize += it->second.expectedSize();
|
||||
}
|
||||
|
||||
wait(yield(TaskUpdateStorage));
|
||||
wait(yield(TaskPriority::UpdateStorage));
|
||||
}
|
||||
|
||||
prevVersion = nextVersion;
|
||||
|
@ -673,7 +673,7 @@ namespace oldTLog_4_6 {
|
|||
//TraceEvent("UpdateStorageVer", logData->logId).detail("NextVersion", nextVersion).detail("PersistentDataVersion", logData->persistentDataVersion).detail("TotalSize", totalSize);
|
||||
|
||||
wait( logData->queueCommittedVersion.whenAtLeast( nextVersion ) );
|
||||
wait( delay(0, TaskUpdateStorage) );
|
||||
wait( delay(0, TaskPriority::UpdateStorage) );
|
||||
|
||||
if (nextVersion > logData->persistentDataVersion) {
|
||||
self->updatePersist = updatePersistentData(self, logData, nextVersion);
|
||||
|
@ -681,21 +681,21 @@ namespace oldTLog_4_6 {
|
|||
}
|
||||
|
||||
if( totalSize < SERVER_KNOBS->UPDATE_STORAGE_BYTE_LIMIT ) {
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskPriority::UpdateStorage) );
|
||||
}
|
||||
else {
|
||||
//recovery wants to commit to persistant data when updatePersistentData is not active, this delay ensures that immediately after
|
||||
//updatePersist returns another one has not been started yet.
|
||||
wait( delay(0.0, TaskUpdateStorage) );
|
||||
wait( delay(0.0, TaskPriority::UpdateStorage) );
|
||||
}
|
||||
} else {
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskPriority::UpdateStorage) );
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> updateStorageLoop( TLogData* self ) {
|
||||
wait(delay(0, TaskUpdateStorage));
|
||||
wait(delay(0, TaskPriority::UpdateStorage));
|
||||
|
||||
loop {
|
||||
wait( updateStorage(self) );
|
||||
|
@ -823,7 +823,7 @@ namespace oldTLog_4_6 {
|
|||
ti->value.popped_recently = true;
|
||||
//if (to.epoch == self->epoch())
|
||||
if ( req.to > logData->persistentDataDurableVersion )
|
||||
wait(ti->value.eraseMessagesBefore( req.to, &self->bytesDurable, logData, TaskTLogPop ));
|
||||
wait(ti->value.eraseMessagesBefore( req.to, &self->bytesDurable, logData, TaskPriority::TLogPop ));
|
||||
}
|
||||
|
||||
req.reply.send(Void());
|
||||
|
|
|
@ -297,7 +297,7 @@ struct TLogData : NonCopyable {
|
|||
concurrentLogRouterReads(SERVER_KNOBS->CONCURRENT_LOG_ROUTER_READS),
|
||||
ignorePopRequest(false), ignorePopDeadline(), ignorePopUid(), dataFolder(folder), toBePopped()
|
||||
{
|
||||
cx = openDBOnServer(dbInfo, TaskDefaultEndpoint, true, true);
|
||||
cx = openDBOnServer(dbInfo, TaskPriority::DefaultEndpoint, true, true);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -323,7 +323,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
}
|
||||
|
||||
// Erase messages not needed to update *from* versions >= before (thus, messages with toversion <= before)
|
||||
ACTOR Future<Void> eraseMessagesBefore( TagData *self, Version before, TLogData *tlogData, Reference<LogData> logData, int taskID ) {
|
||||
ACTOR Future<Void> eraseMessagesBefore( TagData *self, Version before, TLogData *tlogData, Reference<LogData> logData, TaskPriority taskID ) {
|
||||
while(!self->versionMessages.empty() && self->versionMessages.front().first < before) {
|
||||
Version version = self->versionMessages.front().first;
|
||||
std::pair<int,int> &sizes = logData->version_sizes[version];
|
||||
|
@ -352,7 +352,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> eraseMessagesBefore(Version before, TLogData *tlogData, Reference<LogData> logData, int taskID) {
|
||||
Future<Void> eraseMessagesBefore(Version before, TLogData *tlogData, Reference<LogData> logData, TaskPriority taskID) {
|
||||
return eraseMessagesBefore(this, before, tlogData, logData, taskID);
|
||||
}
|
||||
};
|
||||
|
@ -607,14 +607,14 @@ ACTOR Future<Void> updatePersistentData( TLogData* self, Reference<LogData> logD
|
|||
|
||||
self->persistentData->set( KeyValueRef( persistTagMessagesKey( logData->logId, tagData->tag, currentVersion ), wr.toValue() ) );
|
||||
|
||||
Future<Void> f = yield(TaskUpdateStorage);
|
||||
Future<Void> f = yield(TaskPriority::UpdateStorage);
|
||||
if(!f.isReady()) {
|
||||
wait(f);
|
||||
msg = std::upper_bound(tagData->versionMessages.begin(), tagData->versionMessages.end(), std::make_pair(currentVersion, LengthPrefixedStringRef()), CompareFirst<std::pair<Version, LengthPrefixedStringRef>>());
|
||||
}
|
||||
}
|
||||
|
||||
wait(yield(TaskUpdateStorage));
|
||||
wait(yield(TaskPriority::UpdateStorage));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -624,7 +624,7 @@ ACTOR Future<Void> updatePersistentData( TLogData* self, Reference<LogData> logD
|
|||
logData->persistentDataVersion = newPersistentDataVersion;
|
||||
|
||||
wait( self->persistentData->commit() ); // SOMEDAY: This seems to be running pretty often, should we slow it down???
|
||||
wait( delay(0, TaskUpdateStorage) );
|
||||
wait( delay(0, TaskPriority::UpdateStorage) );
|
||||
|
||||
// Now that the changes we made to persistentData are durable, erase the data we moved from memory and the queue, increase bytesDurable accordingly, and update persistentDataDurableVersion.
|
||||
|
||||
|
@ -634,22 +634,22 @@ ACTOR Future<Void> updatePersistentData( TLogData* self, Reference<LogData> logD
|
|||
for(tagLocality = 0; tagLocality < logData->tag_data.size(); tagLocality++) {
|
||||
for(tagId = 0; tagId < logData->tag_data[tagLocality].size(); tagId++) {
|
||||
if(logData->tag_data[tagLocality][tagId]) {
|
||||
wait(logData->tag_data[tagLocality][tagId]->eraseMessagesBefore( newPersistentDataVersion+1, self, logData, TaskUpdateStorage ));
|
||||
wait(yield(TaskUpdateStorage));
|
||||
wait(logData->tag_data[tagLocality][tagId]->eraseMessagesBefore( newPersistentDataVersion+1, self, logData, TaskPriority::UpdateStorage ));
|
||||
wait(yield(TaskPriority::UpdateStorage));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logData->version_sizes.erase(logData->version_sizes.begin(), logData->version_sizes.lower_bound(logData->persistentDataDurableVersion));
|
||||
|
||||
wait(yield(TaskUpdateStorage));
|
||||
wait(yield(TaskPriority::UpdateStorage));
|
||||
|
||||
while(!logData->messageBlocks.empty() && logData->messageBlocks.front().first <= newPersistentDataVersion) {
|
||||
int64_t bytesErased = int64_t(logData->messageBlocks.front().second.size()) * SERVER_KNOBS->TLOG_MESSAGE_BLOCK_OVERHEAD_FACTOR;
|
||||
logData->bytesDurable += bytesErased;
|
||||
self->bytesDurable += bytesErased;
|
||||
logData->messageBlocks.pop_front();
|
||||
wait(yield(TaskUpdateStorage));
|
||||
wait(yield(TaskPriority::UpdateStorage));
|
||||
}
|
||||
|
||||
if(logData->bytesDurable.getValue() > logData->bytesInput.getValue() || self->bytesDurable > self->bytesInput) {
|
||||
|
@ -674,7 +674,7 @@ ACTOR Future<Void> updateStorage( TLogData* self ) {
|
|||
}
|
||||
|
||||
if(!self->queueOrder.size()) {
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskPriority::UpdateStorage) );
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
@ -698,7 +698,7 @@ ACTOR Future<Void> updateStorage( TLogData* self ) {
|
|||
}
|
||||
|
||||
wait( logData->queueCommittedVersion.whenAtLeast( nextVersion ) );
|
||||
wait( delay(0, TaskUpdateStorage) );
|
||||
wait( delay(0, TaskPriority::UpdateStorage) );
|
||||
|
||||
//TraceEvent("TlogUpdatePersist", self->dbgid).detail("LogId", logData->logId).detail("NextVersion", nextVersion).detail("Version", logData->version.get()).detail("PersistentDataDurableVer", logData->persistentDataDurableVersion).detail("QueueCommitVer", logData->queueCommittedVersion.get()).detail("PersistDataVer", logData->persistentDataVersion);
|
||||
if (nextVersion > logData->persistentDataVersion) {
|
||||
|
@ -707,7 +707,7 @@ ACTOR Future<Void> updateStorage( TLogData* self ) {
|
|||
wait( updatePersistentData(self, logData, nextVersion) );
|
||||
commitLockReleaser.release();
|
||||
} else {
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskPriority::UpdateStorage) );
|
||||
}
|
||||
|
||||
if( logData->removed.isReady() ) {
|
||||
|
@ -718,9 +718,9 @@ ACTOR Future<Void> updateStorage( TLogData* self ) {
|
|||
if(logData->persistentDataDurableVersion == logData->version.get()) {
|
||||
self->queueOrder.pop_front();
|
||||
}
|
||||
wait( delay(0.0, TaskUpdateStorage) );
|
||||
wait( delay(0.0, TaskPriority::UpdateStorage) );
|
||||
} else {
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskPriority::UpdateStorage) );
|
||||
}
|
||||
}
|
||||
else if(logData->initialized) {
|
||||
|
@ -741,7 +741,7 @@ ACTOR Future<Void> updateStorage( TLogData* self ) {
|
|||
//TraceEvent("UpdateStorageVer", logData->logId).detail("NextVersion", nextVersion).detail("PersistentDataVersion", logData->persistentDataVersion).detail("TotalSize", totalSize);
|
||||
|
||||
wait( logData->queueCommittedVersion.whenAtLeast( nextVersion ) );
|
||||
wait( delay(0, TaskUpdateStorage) );
|
||||
wait( delay(0, TaskPriority::UpdateStorage) );
|
||||
|
||||
if (nextVersion > logData->persistentDataVersion) {
|
||||
wait( self->persistentDataCommitLock.take() );
|
||||
|
@ -751,21 +751,21 @@ ACTOR Future<Void> updateStorage( TLogData* self ) {
|
|||
}
|
||||
|
||||
if( totalSize < SERVER_KNOBS->UPDATE_STORAGE_BYTE_LIMIT ) {
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskPriority::UpdateStorage) );
|
||||
}
|
||||
else {
|
||||
//recovery wants to commit to persistant data when updatePersistentData is not active, this delay ensures that immediately after
|
||||
//updatePersist returns another one has not been started yet.
|
||||
wait( delay(0.0, TaskUpdateStorage) );
|
||||
wait( delay(0.0, TaskPriority::UpdateStorage) );
|
||||
}
|
||||
} else {
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskPriority::UpdateStorage) );
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> updateStorageLoop( TLogData* self ) {
|
||||
wait(delay(0, TaskUpdateStorage));
|
||||
wait(delay(0, TaskPriority::UpdateStorage));
|
||||
|
||||
loop {
|
||||
wait( updateStorage(self) );
|
||||
|
@ -943,7 +943,7 @@ ACTOR Future<Void> tLogPopCore( TLogData* self, Tag inputTag, Version to, Refere
|
|||
}
|
||||
|
||||
if (upTo > logData->persistentDataDurableVersion)
|
||||
wait(tagData->eraseMessagesBefore(upTo, self, logData, TaskTLogPop));
|
||||
wait(tagData->eraseMessagesBefore(upTo, self, logData, TaskPriority::TLogPop));
|
||||
//TraceEvent("TLogPop", self->dbgid).detail("Tag", tag.toString()).detail("To", upTo);
|
||||
}
|
||||
return Void();
|
||||
|
@ -1059,7 +1059,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
|||
if( req.tag.locality == tagLocalityLogRouter ) {
|
||||
wait( self->concurrentLogRouterReads.take() );
|
||||
state FlowLock::Releaser globalReleaser(self->concurrentLogRouterReads);
|
||||
wait( delay(0.0, TaskLowPriority) );
|
||||
wait( delay(0.0, TaskPriority::Low) );
|
||||
}
|
||||
|
||||
if( req.begin <= logData->persistentDataDurableVersion && req.tag != txsTag) {
|
||||
|
@ -1068,7 +1068,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
|||
// slightly faster over keeping the rest of the cluster operating normally.
|
||||
// txsTag is only ever peeked on recovery, and we would still wish to prioritize requests
|
||||
// that impact recovery duration.
|
||||
wait(delay(0, TaskTLogSpilledPeekReply));
|
||||
wait(delay(0, TaskPriority::TLogSpilledPeekReply));
|
||||
}
|
||||
|
||||
Version poppedVer = poppedVersion(logData, req.tag);
|
||||
|
@ -1173,7 +1173,7 @@ ACTOR Future<Void> watchDegraded(TLogData* self) {
|
|||
//This delay is divided into multiple delays to avoid marking the tlog as degraded because of a single SlowTask
|
||||
state int loopCount = 0;
|
||||
while(loopCount < SERVER_KNOBS->TLOG_DEGRADED_DELAY_COUNT) {
|
||||
wait(delay(SERVER_KNOBS->TLOG_DEGRADED_DURATION/SERVER_KNOBS->TLOG_DEGRADED_DELAY_COUNT, TaskLowPriority));
|
||||
wait(delay(SERVER_KNOBS->TLOG_DEGRADED_DURATION/SERVER_KNOBS->TLOG_DEGRADED_DELAY_COUNT, TaskPriority::Low));
|
||||
loopCount++;
|
||||
}
|
||||
TraceEvent(SevWarnAlways, "TLogDegraded", self->dbgid);
|
||||
|
@ -1509,7 +1509,7 @@ ACTOR Future<Void> tLogCommit(
|
|||
.detail("PersistentDataDurableVersion", logData->persistentDataDurableVersion);
|
||||
waitStartT = now();
|
||||
}
|
||||
wait( delayJittered(.005, TaskTLogCommit) );
|
||||
wait( delayJittered(.005, TaskPriority::TLogCommit) );
|
||||
}
|
||||
|
||||
// while exec op is being committed, no new transactions will be admitted.
|
||||
|
@ -1849,7 +1849,7 @@ ACTOR Future<Void> pullAsyncData( TLogData* self, Reference<LogData> logData, st
|
|||
while (!endVersion.present() || logData->version.get() < endVersion.get()) {
|
||||
loop {
|
||||
choose {
|
||||
when(wait( r ? r->getMore(TaskTLogCommit) : Never() ) ) {
|
||||
when(wait( r ? r->getMore(TaskPriority::TLogCommit) : Never() ) ) {
|
||||
break;
|
||||
}
|
||||
when( wait( dbInfoChange ) ) {
|
||||
|
@ -1872,7 +1872,7 @@ ACTOR Future<Void> pullAsyncData( TLogData* self, Reference<LogData> logData, st
|
|||
.detail("PersistentDataDurableVersion", logData->persistentDataDurableVersion);
|
||||
waitStartT = now();
|
||||
}
|
||||
wait( delayJittered(.005, TaskTLogCommit) );
|
||||
wait( delayJittered(.005, TaskPriority::TLogCommit) );
|
||||
}
|
||||
|
||||
state Version ver = 0;
|
||||
|
@ -1912,7 +1912,7 @@ ACTOR Future<Void> pullAsyncData( TLogData* self, Reference<LogData> logData, st
|
|||
|
||||
// Notifies the commitQueue actor to commit persistentQueue, and also unblocks tLogPeekMessages actors
|
||||
logData->version.set( ver );
|
||||
wait( yield(TaskTLogCommit) );
|
||||
wait( yield(TaskPriority::TLogCommit) );
|
||||
}
|
||||
lastVer = ver;
|
||||
ver = r->version().version;
|
||||
|
@ -1949,7 +1949,7 @@ ACTOR Future<Void> pullAsyncData( TLogData* self, Reference<LogData> logData, st
|
|||
|
||||
// Notifies the commitQueue actor to commit persistentQueue, and also unblocks tLogPeekMessages actors
|
||||
logData->version.set( ver );
|
||||
wait( yield(TaskTLogCommit) );
|
||||
wait( yield(TaskPriority::TLogCommit) );
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ public:
|
|||
ready = NotifiedVersion(s);
|
||||
started = false;
|
||||
}
|
||||
Future<bool> order( Seq s, int taskID = TaskDefaultYield ) {
|
||||
Future<bool> order( Seq s, TaskPriority taskID = TaskPriority::DefaultYield ) {
|
||||
if ( ready.get() < s )
|
||||
return waitAndOrder( this, s, taskID );
|
||||
else
|
||||
|
@ -54,7 +54,7 @@ public:
|
|||
return ready.whenAtLeast(v);
|
||||
}
|
||||
private:
|
||||
ACTOR static Future<bool> waitAndOrder( Orderer<Seq>* self, Seq s, int taskID ) {
|
||||
ACTOR static Future<bool> waitAndOrder( Orderer<Seq>* self, Seq s, TaskPriority taskID ) {
|
||||
wait( self->ready.whenAtLeast(s) );
|
||||
wait( yield( taskID ) || self->shutdown.getFuture() );
|
||||
return self->dedup(s);
|
||||
|
|
|
@ -300,7 +300,7 @@ ACTOR Future<Void> trackEachStorageServer(
|
|||
ACTOR Future<Void> monitorServerListChange(
|
||||
Reference<AsyncVar<ServerDBInfo>> dbInfo,
|
||||
PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > serverChanges) {
|
||||
state Database db = openDBOnServer(dbInfo, TaskRatekeeper, true, true);
|
||||
state Database db = openDBOnServer(dbInfo, TaskPriority::Ratekeeper, true, true);
|
||||
state std::map<UID, StorageServerInterface> oldServers;
|
||||
state Transaction tr(db);
|
||||
|
||||
|
@ -629,7 +629,7 @@ void updateRate(RatekeeperData* self, RatekeeperLimits* limits) {
|
|||
}
|
||||
|
||||
ACTOR Future<Void> configurationMonitor(Reference<AsyncVar<ServerDBInfo>> dbInfo, DatabaseConfiguration* conf) {
|
||||
state Database cx = openDBOnServer(dbInfo, TaskDefaultEndpoint, true, true);
|
||||
state Database cx = openDBOnServer(dbInfo, TaskPriority::DefaultEndpoint, true, true);
|
||||
loop {
|
||||
state ReadYourWritesTransaction tr(cx);
|
||||
|
||||
|
|
|
@ -114,9 +114,9 @@ ACTOR Future<Void> resolveBatch(
|
|||
}
|
||||
}
|
||||
|
||||
if (check_yield(TaskDefaultEndpoint)) {
|
||||
wait( delay( 0, TaskLowPriority ) || delay( SERVER_KNOBS->COMMIT_SLEEP_TIME ) ); // FIXME: Is this still right?
|
||||
g_network->setCurrentTask(TaskDefaultEndpoint);
|
||||
if (check_yield(TaskPriority::DefaultEndpoint)) {
|
||||
wait( delay( 0, TaskPriority::Low ) || delay( SERVER_KNOBS->COMMIT_SLEEP_TIME ) ); // FIXME: Is this still right?
|
||||
g_network->setCurrentTask(TaskPriority::DefaultEndpoint);
|
||||
}
|
||||
|
||||
if (self->version.get() == req.prevVersion) { // Not a duplicate (check relies on no waiting between here and self->version.set() below!)
|
||||
|
|
|
@ -44,8 +44,8 @@ struct ResolverInterface {
|
|||
bool operator != ( ResolverInterface const& r ) const { return id() != r.id(); }
|
||||
NetworkAddress address() const { return resolve.getEndpoint().getPrimaryAddress(); }
|
||||
void initEndpoints() {
|
||||
metrics.getEndpoint( TaskResolutionMetrics );
|
||||
split.getEndpoint( TaskResolutionMetrics );
|
||||
metrics.getEndpoint( TaskPriority::ResolutionMetrics );
|
||||
split.getEndpoint( TaskPriority::ResolutionMetrics );
|
||||
}
|
||||
|
||||
template <class Ar>
|
||||
|
|
|
@ -37,7 +37,7 @@ struct RestoreInterface {
|
|||
NetworkAddress address() const { return test.getEndpoint().getPrimaryAddress(); }
|
||||
|
||||
void initEndpoints() {
|
||||
test.getEndpoint( TaskClusterController );
|
||||
test.getEndpoint( TaskPriority::ClusterController );
|
||||
}
|
||||
|
||||
template <class Ar>
|
||||
|
|
|
@ -215,7 +215,7 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<ClusterConnec
|
|||
g_simulator.newProcess("Server", ip, port, listenPerProcess, localities, processClass, dataFolder->c_str(),
|
||||
coordFolder->c_str());
|
||||
wait(g_simulator.onProcess(process,
|
||||
TaskDefaultYield)); // Now switch execution to the process on which we will run
|
||||
TaskPriority::DefaultYield)); // Now switch execution to the process on which we will run
|
||||
state Future<ISimulator::KillType> onShutdown = process->onShutdown();
|
||||
|
||||
try {
|
||||
|
@ -1399,7 +1399,7 @@ ACTOR void setupAndRun(std::string dataFolder, const char *testFile, bool reboot
|
|||
Standalone<StringRef>(deterministicRandom()->randomUniqueID().toString()),
|
||||
Optional<Standalone<StringRef>>()),
|
||||
ProcessClass(ProcessClass::TesterClass, ProcessClass::CommandLineSource), "", ""),
|
||||
TaskDefaultYield));
|
||||
TaskPriority::DefaultYield));
|
||||
Sim2FileSystem::newFileSystem();
|
||||
FlowTransport::createInstance(true, 1);
|
||||
if (tlsOptions->enabled()) {
|
||||
|
|
|
@ -1809,7 +1809,7 @@ ACTOR Future<JsonBuilderObject> layerStatusFetcher(Database cx, JsonBuilderArray
|
|||
ACTOR Future<JsonBuilderObject> lockedStatusFetcher(Reference<AsyncVar<struct ServerDBInfo>> db, JsonBuilderArray *messages, std::set<std::string> *incomplete_reasons) {
|
||||
state JsonBuilderObject statusObj;
|
||||
|
||||
state Database cx = openDBOnServer(db, TaskDefaultEndpoint, true, false); // Open a new database connection that isn't lock-aware
|
||||
state Database cx = openDBOnServer(db, TaskPriority::DefaultEndpoint, true, false); // Open a new database connection that isn't lock-aware
|
||||
state Transaction tr(cx);
|
||||
state int timeoutSeconds = 5;
|
||||
state Future<Void> getTimeout = delay(timeoutSeconds);
|
||||
|
|
|
@ -56,11 +56,11 @@ struct TLogInterface {
|
|||
bool operator == ( TLogInterface const& r ) const { return id() == r.id(); }
|
||||
NetworkAddress address() const { return peekMessages.getEndpoint().getPrimaryAddress(); }
|
||||
void initEndpoints() {
|
||||
getQueuingMetrics.getEndpoint( TaskTLogQueuingMetrics );
|
||||
popMessages.getEndpoint( TaskTLogPop );
|
||||
peekMessages.getEndpoint( TaskTLogPeek );
|
||||
confirmRunning.getEndpoint( TaskTLogConfirmRunning );
|
||||
commit.getEndpoint( TaskTLogCommit );
|
||||
getQueuingMetrics.getEndpoint( TaskPriority::TLogQueuingMetrics );
|
||||
popMessages.getEndpoint( TaskPriority::TLogPop );
|
||||
peekMessages.getEndpoint( TaskPriority::TLogPeek );
|
||||
confirmRunning.getEndpoint( TaskPriority::TLogConfirmRunning );
|
||||
commit.getEndpoint( TaskPriority::TLogCommit );
|
||||
}
|
||||
|
||||
template <class Ar>
|
||||
|
|
|
@ -349,7 +349,7 @@ struct TLogData : NonCopyable {
|
|||
concurrentLogRouterReads(SERVER_KNOBS->CONCURRENT_LOG_ROUTER_READS),
|
||||
ignorePopRequest(false), ignorePopDeadline(), ignorePopUid(), dataFolder(folder), toBePopped()
|
||||
{
|
||||
cx = openDBOnServer(dbInfo, TaskDefaultEndpoint, true, true);
|
||||
cx = openDBOnServer(dbInfo, TaskPriority::DefaultEndpoint, true, true);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -379,7 +379,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
}
|
||||
|
||||
// Erase messages not needed to update *from* versions >= before (thus, messages with toversion <= before)
|
||||
ACTOR Future<Void> eraseMessagesBefore( TagData *self, Version before, TLogData *tlogData, Reference<LogData> logData, int taskID ) {
|
||||
ACTOR Future<Void> eraseMessagesBefore( TagData *self, Version before, TLogData *tlogData, Reference<LogData> logData, TaskPriority taskID ) {
|
||||
while(!self->versionMessages.empty() && self->versionMessages.front().first < before) {
|
||||
Version version = self->versionMessages.front().first;
|
||||
std::pair<int,int> &sizes = logData->version_sizes[version];
|
||||
|
@ -408,7 +408,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> eraseMessagesBefore(Version before, TLogData *tlogData, Reference<LogData> logData, int taskID) {
|
||||
Future<Void> eraseMessagesBefore(Version before, TLogData *tlogData, Reference<LogData> logData, TaskPriority taskID) {
|
||||
return eraseMessagesBefore(this, before, tlogData, logData, taskID);
|
||||
}
|
||||
};
|
||||
|
@ -766,7 +766,7 @@ ACTOR Future<Void> updatePersistentData( TLogData* self, Reference<LogData> logD
|
|||
for(tagId = 0; tagId < logData->tag_data[tagLocality].size(); tagId++) {
|
||||
state Reference<LogData::TagData> tagData = logData->tag_data[tagLocality][tagId];
|
||||
if(tagData) {
|
||||
wait(tagData->eraseMessagesBefore( tagData->popped, self, logData, TaskUpdateStorage ));
|
||||
wait(tagData->eraseMessagesBefore( tagData->popped, self, logData, TaskPriority::UpdateStorage ));
|
||||
state Version currentVersion = 0;
|
||||
// Clear recently popped versions from persistentData if necessary
|
||||
updatePersistentPopped( self, logData, tagData );
|
||||
|
@ -819,7 +819,7 @@ ACTOR Future<Void> updatePersistentData( TLogData* self, Reference<LogData> logD
|
|||
wr << uint32_t(0);
|
||||
}
|
||||
|
||||
Future<Void> f = yield(TaskUpdateStorage);
|
||||
Future<Void> f = yield(TaskPriority::UpdateStorage);
|
||||
if(!f.isReady()) {
|
||||
wait(f);
|
||||
msg = std::upper_bound(tagData->versionMessages.begin(), tagData->versionMessages.end(), std::make_pair(currentVersion, LengthPrefixedStringRef()), CompareFirst<std::pair<Version, LengthPrefixedStringRef>>());
|
||||
|
@ -832,7 +832,7 @@ ACTOR Future<Void> updatePersistentData( TLogData* self, Reference<LogData> logD
|
|||
tagData->poppedLocation = std::min(tagData->poppedLocation, firstLocation);
|
||||
}
|
||||
|
||||
wait(yield(TaskUpdateStorage));
|
||||
wait(yield(TaskPriority::UpdateStorage));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -847,7 +847,7 @@ ACTOR Future<Void> updatePersistentData( TLogData* self, Reference<LogData> logD
|
|||
logData->persistentDataVersion = newPersistentDataVersion;
|
||||
|
||||
wait( self->persistentData->commit() ); // SOMEDAY: This seems to be running pretty often, should we slow it down???
|
||||
wait( delay(0, TaskUpdateStorage) );
|
||||
wait( delay(0, TaskPriority::UpdateStorage) );
|
||||
|
||||
// Now that the changes we made to persistentData are durable, erase the data we moved from memory and the queue, increase bytesDurable accordingly, and update persistentDataDurableVersion.
|
||||
|
||||
|
@ -857,22 +857,22 @@ ACTOR Future<Void> updatePersistentData( TLogData* self, Reference<LogData> logD
|
|||
for(tagLocality = 0; tagLocality < logData->tag_data.size(); tagLocality++) {
|
||||
for(tagId = 0; tagId < logData->tag_data[tagLocality].size(); tagId++) {
|
||||
if(logData->tag_data[tagLocality][tagId]) {
|
||||
wait(logData->tag_data[tagLocality][tagId]->eraseMessagesBefore( newPersistentDataVersion+1, self, logData, TaskUpdateStorage ));
|
||||
wait(yield(TaskUpdateStorage));
|
||||
wait(logData->tag_data[tagLocality][tagId]->eraseMessagesBefore( newPersistentDataVersion+1, self, logData, TaskPriority::UpdateStorage ));
|
||||
wait(yield(TaskPriority::UpdateStorage));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logData->version_sizes.erase(logData->version_sizes.begin(), logData->version_sizes.lower_bound(logData->persistentDataDurableVersion));
|
||||
|
||||
wait(yield(TaskUpdateStorage));
|
||||
wait(yield(TaskPriority::UpdateStorage));
|
||||
|
||||
while(!logData->messageBlocks.empty() && logData->messageBlocks.front().first <= newPersistentDataVersion) {
|
||||
int64_t bytesErased = int64_t(logData->messageBlocks.front().second.size()) * SERVER_KNOBS->TLOG_MESSAGE_BLOCK_OVERHEAD_FACTOR;
|
||||
logData->bytesDurable += bytesErased;
|
||||
self->bytesDurable += bytesErased;
|
||||
logData->messageBlocks.pop_front();
|
||||
wait(yield(TaskUpdateStorage));
|
||||
wait(yield(TaskPriority::UpdateStorage));
|
||||
}
|
||||
|
||||
if(logData->bytesDurable.getValue() > logData->bytesInput.getValue() || self->bytesDurable > self->bytesInput) {
|
||||
|
@ -915,7 +915,7 @@ ACTOR Future<Void> updateStorage( TLogData* self ) {
|
|||
}
|
||||
|
||||
if(!self->spillOrder.size()) {
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskPriority::UpdateStorage) );
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
@ -940,7 +940,7 @@ ACTOR Future<Void> updateStorage( TLogData* self ) {
|
|||
}
|
||||
|
||||
wait( logData->queueCommittedVersion.whenAtLeast( nextVersion ) );
|
||||
wait( delay(0, TaskUpdateStorage) );
|
||||
wait( delay(0, TaskPriority::UpdateStorage) );
|
||||
|
||||
//TraceEvent("TlogUpdatePersist", self->dbgid).detail("LogId", logData->logId).detail("NextVersion", nextVersion).detail("Version", logData->version.get()).detail("PersistentDataDurableVer", logData->persistentDataDurableVersion).detail("QueueCommitVer", logData->queueCommittedVersion.get()).detail("PersistDataVer", logData->persistentDataVersion);
|
||||
if (nextVersion > logData->persistentDataVersion) {
|
||||
|
@ -953,7 +953,7 @@ ACTOR Future<Void> updateStorage( TLogData* self ) {
|
|||
}
|
||||
commitLockReleaser.release();
|
||||
} else {
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskPriority::UpdateStorage) );
|
||||
}
|
||||
|
||||
if( logData->removed.isReady() ) {
|
||||
|
@ -964,9 +964,9 @@ ACTOR Future<Void> updateStorage( TLogData* self ) {
|
|||
if(logData->persistentDataDurableVersion == logData->version.get()) {
|
||||
self->spillOrder.pop_front();
|
||||
}
|
||||
wait( delay(0.0, TaskUpdateStorage) );
|
||||
wait( delay(0.0, TaskPriority::UpdateStorage) );
|
||||
} else {
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskPriority::UpdateStorage) );
|
||||
}
|
||||
}
|
||||
else if(logData->initialized) {
|
||||
|
@ -988,7 +988,7 @@ ACTOR Future<Void> updateStorage( TLogData* self ) {
|
|||
//TraceEvent("UpdateStorageVer", logData->logId).detail("NextVersion", nextVersion).detail("PersistentDataVersion", logData->persistentDataVersion).detail("TotalSize", totalSize);
|
||||
|
||||
wait( logData->queueCommittedVersion.whenAtLeast( nextVersion ) );
|
||||
wait( delay(0, TaskUpdateStorage) );
|
||||
wait( delay(0, TaskPriority::UpdateStorage) );
|
||||
|
||||
if (nextVersion > logData->persistentDataVersion) {
|
||||
wait( self->persistentDataCommitLock.take() );
|
||||
|
@ -1001,21 +1001,21 @@ ACTOR Future<Void> updateStorage( TLogData* self ) {
|
|||
}
|
||||
|
||||
if( totalSize < SERVER_KNOBS->UPDATE_STORAGE_BYTE_LIMIT ) {
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskPriority::UpdateStorage) );
|
||||
}
|
||||
else {
|
||||
//recovery wants to commit to persistant data when updatePersistentData is not active, this delay ensures that immediately after
|
||||
//updatePersist returns another one has not been started yet.
|
||||
wait( delay(0.0, TaskUpdateStorage) );
|
||||
wait( delay(0.0, TaskPriority::UpdateStorage) );
|
||||
}
|
||||
} else {
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskUpdateStorage) );
|
||||
wait( delay(BUGGIFY ? SERVER_KNOBS->BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL : SERVER_KNOBS->TLOG_STORAGE_MIN_UPDATE_INTERVAL, TaskPriority::UpdateStorage) );
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> updateStorageLoop( TLogData* self ) {
|
||||
wait(delay(0, TaskUpdateStorage));
|
||||
wait(delay(0, TaskPriority::UpdateStorage));
|
||||
|
||||
loop {
|
||||
wait( updateStorage(self) );
|
||||
|
@ -1194,7 +1194,7 @@ ACTOR Future<Void> tLogPopCore( TLogData* self, Tag inputTag, Version to, Refere
|
|||
}
|
||||
|
||||
if (upTo > logData->persistentDataDurableVersion)
|
||||
wait(tagData->eraseMessagesBefore(upTo, self, logData, TaskTLogPop));
|
||||
wait(tagData->eraseMessagesBefore(upTo, self, logData, TaskPriority::TLogPop));
|
||||
//TraceEvent("TLogPop", self->dbgid).detail("Tag", tag.toString()).detail("To", upTo);
|
||||
}
|
||||
return Void();
|
||||
|
@ -1346,7 +1346,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
|||
if( req.tag.locality == tagLocalityLogRouter ) {
|
||||
wait( self->concurrentLogRouterReads.take() );
|
||||
state FlowLock::Releaser globalReleaser(self->concurrentLogRouterReads);
|
||||
wait( delay(0.0, TaskLowPriority) );
|
||||
wait( delay(0.0, TaskPriority::Low) );
|
||||
}
|
||||
|
||||
if( req.begin <= logData->persistentDataDurableVersion && req.tag != txsTag) {
|
||||
|
@ -1355,7 +1355,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
|||
// slightly faster over keeping the rest of the cluster operating normally.
|
||||
// txsTag is only ever peeked on recovery, and we would still wish to prioritize requests
|
||||
// that impact recovery duration.
|
||||
wait(delay(0, TaskTLogSpilledPeekReply));
|
||||
wait(delay(0, TaskPriority::TLogSpilledPeekReply));
|
||||
}
|
||||
|
||||
Version poppedVer = poppedVersion(logData, req.tag);
|
||||
|
@ -1456,7 +1456,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
|||
if (earlyEnd) break;
|
||||
}
|
||||
earlyEnd = earlyEnd || (kvrefs.size() >= SERVER_KNOBS->TLOG_SPILL_REFERENCE_MAX_BATCHES_PER_PEEK+1);
|
||||
wait( self->peekMemoryLimiter.take(TaskTLogSpilledPeekReply, commitBytes) );
|
||||
wait( self->peekMemoryLimiter.take(TaskPriority::TLogSpilledPeekReply, commitBytes) );
|
||||
state FlowLock::Releaser memoryReservation(self->peekMemoryLimiter, commitBytes);
|
||||
state std::vector<Future<Standalone<StringRef>>> messageReads;
|
||||
messageReads.reserve( commitLocations.size() );
|
||||
|
@ -1540,7 +1540,7 @@ ACTOR Future<Void> watchDegraded(TLogData* self) {
|
|||
//This delay is divided into multiple delays to avoid marking the tlog as degraded because of a single SlowTask
|
||||
state int loopCount = 0;
|
||||
while(loopCount < SERVER_KNOBS->TLOG_DEGRADED_DELAY_COUNT) {
|
||||
wait(delay(SERVER_KNOBS->TLOG_DEGRADED_DURATION/SERVER_KNOBS->TLOG_DEGRADED_DELAY_COUNT, TaskLowPriority));
|
||||
wait(delay(SERVER_KNOBS->TLOG_DEGRADED_DURATION/SERVER_KNOBS->TLOG_DEGRADED_DELAY_COUNT, TaskPriority::Low));
|
||||
loopCount++;
|
||||
}
|
||||
TraceEvent(SevWarnAlways, "TLogDegraded", self->dbgid);
|
||||
|
@ -1876,7 +1876,7 @@ ACTOR Future<Void> tLogCommit(
|
|||
.detail("PersistentDataDurableVersion", logData->persistentDataDurableVersion);
|
||||
waitStartT = now();
|
||||
}
|
||||
wait( delayJittered(.005, TaskTLogCommit) );
|
||||
wait( delayJittered(.005, TaskPriority::TLogCommit) );
|
||||
}
|
||||
|
||||
// while exec op is being committed, no new transactions will be admitted.
|
||||
|
@ -2223,7 +2223,7 @@ ACTOR Future<Void> pullAsyncData( TLogData* self, Reference<LogData> logData, st
|
|||
while (!endVersion.present() || logData->version.get() < endVersion.get()) {
|
||||
loop {
|
||||
choose {
|
||||
when(wait( r ? r->getMore(TaskTLogCommit) : Never() ) ) {
|
||||
when(wait( r ? r->getMore(TaskPriority::TLogCommit) : Never() ) ) {
|
||||
break;
|
||||
}
|
||||
when( wait( dbInfoChange ) ) {
|
||||
|
@ -2246,7 +2246,7 @@ ACTOR Future<Void> pullAsyncData( TLogData* self, Reference<LogData> logData, st
|
|||
.detail("PersistentDataDurableVersion", logData->persistentDataDurableVersion);
|
||||
waitStartT = now();
|
||||
}
|
||||
wait( delayJittered(.005, TaskTLogCommit) );
|
||||
wait( delayJittered(.005, TaskPriority::TLogCommit) );
|
||||
}
|
||||
|
||||
state Version ver = 0;
|
||||
|
@ -2286,7 +2286,7 @@ ACTOR Future<Void> pullAsyncData( TLogData* self, Reference<LogData> logData, st
|
|||
|
||||
// Notifies the commitQueue actor to commit persistentQueue, and also unblocks tLogPeekMessages actors
|
||||
logData->version.set( ver );
|
||||
wait( yield(TaskTLogCommit) );
|
||||
wait( yield(TaskPriority::TLogCommit) );
|
||||
}
|
||||
lastVer = ver;
|
||||
ver = r->version().version;
|
||||
|
@ -2323,7 +2323,7 @@ ACTOR Future<Void> pullAsyncData( TLogData* self, Reference<LogData> logData, st
|
|||
|
||||
// Notifies the commitQueue actor to commit persistentQueue, and also unblocks tLogPeekMessages actors
|
||||
logData->version.set( ver );
|
||||
wait( yield(TaskTLogCommit) );
|
||||
wait( yield(TaskPriority::TLogCommit) );
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -431,7 +431,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
vector<Future<Void>> tLogCommitResults;
|
||||
for(int loc=0; loc< it->logServers.size(); loc++) {
|
||||
Standalone<StringRef> msg = data.getMessages(location);
|
||||
allReplies.push_back( it->logServers[loc]->get().interf().commit.getReply( TLogCommitRequest( msg.arena(), prevVersion, version, knownCommittedVersion, minKnownCommittedVersion, msg, data.getHasExecOp(), debugID ), TaskTLogCommitReply ) );
|
||||
allReplies.push_back( it->logServers[loc]->get().interf().commit.getReply( TLogCommitRequest( msg.arena(), prevVersion, version, knownCommittedVersion, minKnownCommittedVersion, msg, data.getHasExecOp(), debugID ), TaskPriority::TLogCommitReply ) );
|
||||
Future<Void> commitSuccess = success(allReplies.back());
|
||||
addActor.get().send(commitSuccess);
|
||||
tLogCommitResults.push_back(commitSuccess);
|
||||
|
@ -961,7 +961,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
if( t->get().present() ) {
|
||||
alive.push_back( brokenPromiseToNever(
|
||||
t->get().interf().confirmRunning.getReply( TLogConfirmRunningRequest(debugID),
|
||||
TaskTLogConfirmRunningReply ) ) );
|
||||
TaskPriority::TLogConfirmRunningReply ) ) );
|
||||
numPresent++;
|
||||
} else {
|
||||
alive.push_back( Never() );
|
||||
|
|
|
@ -713,7 +713,7 @@ static int asyncSleep(sqlite3_vfs *pVfs, int microseconds){
|
|||
waitFor( delay(FLOW_KNOBS->MAX_BUGGIFIED_DELAY) );
|
||||
return 0;
|
||||
}
|
||||
waitFor( g_network->delay( microseconds*1e-6, TaskDefaultDelay ) || simCancel );
|
||||
waitFor( g_network->delay( microseconds*1e-6, TaskPriority::DefaultDelay ) || simCancel );
|
||||
return microseconds;
|
||||
} catch( Error &e ) {
|
||||
TraceEvent(SevError, "AsyncSleepError").error(e,true);
|
||||
|
|
|
@ -37,7 +37,7 @@ ACTOR Future<Void> waitFailureServer(FutureStream<ReplyPromise<Void>> waitFailur
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> waitFailureClient(RequestStream<ReplyPromise<Void>> waitFailure, double reactionTime, double reactionSlope, int taskID){
|
||||
ACTOR Future<Void> waitFailureClient(RequestStream<ReplyPromise<Void>> waitFailure, double reactionTime, double reactionSlope, TaskPriority taskID){
|
||||
loop {
|
||||
try {
|
||||
state double start = now();
|
||||
|
@ -55,7 +55,7 @@ ACTOR Future<Void> waitFailureClient(RequestStream<ReplyPromise<Void>> waitFailu
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> waitFailureClientStrict(RequestStream<ReplyPromise<Void>> waitFailure, double failureReactionTime, int taskID){
|
||||
ACTOR Future<Void> waitFailureClientStrict(RequestStream<ReplyPromise<Void>> waitFailure, double failureReactionTime, TaskPriority taskID){
|
||||
loop {
|
||||
wait(waitFailureClient(waitFailure, 0, 0, taskID));
|
||||
wait(delay(failureReactionTime, taskID) || IFailureMonitor::failureMonitor().onStateEqual( waitFailure.getEndpoint(), FailureStatus(false)));
|
||||
|
@ -65,7 +65,7 @@ ACTOR Future<Void> waitFailureClientStrict(RequestStream<ReplyPromise<Void>> wai
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> waitFailureTracker(RequestStream<ReplyPromise<Void>> waitFailure, Reference<AsyncVar<bool>> failed, double reactionTime, double reactionSlope, int taskID){
|
||||
ACTOR Future<Void> waitFailureTracker(RequestStream<ReplyPromise<Void>> waitFailure, Reference<AsyncVar<bool>> failed, double reactionTime, double reactionSlope, TaskPriority taskID){
|
||||
loop {
|
||||
try {
|
||||
failed->set( IFailureMonitor::failureMonitor().getState(waitFailure.getEndpoint()).isFailed() );
|
||||
|
|
|
@ -26,13 +26,13 @@ Future<Void> waitFailureServer(const FutureStream<ReplyPromise<Void>>& waitFailu
|
|||
|
||||
// talks to a wait failure server, returns Void on failure
|
||||
Future<Void> waitFailureClient(const RequestStream<ReplyPromise<Void>>& waitFailure,
|
||||
double const& failureReactionTime=0, double const& failureReactionSlope=0, int const& taskID=TaskDefaultEndpoint);
|
||||
double const& failureReactionTime=0, double const& failureReactionSlope=0, TaskPriority const& taskID=TaskPriority::DefaultEndpoint);
|
||||
|
||||
// talks to a wait failure server, returns Void on failure, reaction time is always waited
|
||||
Future<Void> waitFailureClientStrict(const RequestStream<ReplyPromise<Void>>& waitFailure, double const& failureReactionTime=0, int const& taskID=TaskDefaultEndpoint);
|
||||
Future<Void> waitFailureClientStrict(const RequestStream<ReplyPromise<Void>>& waitFailure, double const& failureReactionTime=0, TaskPriority const& taskID=TaskPriority::DefaultEndpoint);
|
||||
|
||||
// talks to a wait failure server, updates failed to be true or false based on failure status.
|
||||
Future<Void> waitFailureTracker(const RequestStream<ReplyPromise<Void>>& waitFailure, Reference<AsyncVar<bool>> const& failed,
|
||||
double const& failureReactionTime=0, double const& failureReactionSlope=0, int const& taskID=TaskDefaultEndpoint);
|
||||
double const& failureReactionTime=0, double const& failureReactionSlope=0, TaskPriority const& taskID=TaskPriority::DefaultEndpoint);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -392,7 +392,7 @@ void endRole(const Role &role, UID id, std::string reason, bool ok = true, Error
|
|||
|
||||
struct ServerDBInfo;
|
||||
|
||||
class Database openDBOnServer( Reference<AsyncVar<ServerDBInfo>> const& db, int taskID = TaskDefaultEndpoint, bool enableLocalityLoadBalance = true, bool lockAware = false );
|
||||
class Database openDBOnServer( Reference<AsyncVar<ServerDBInfo>> const& db, TaskPriority taskID = TaskPriority::DefaultEndpoint, bool enableLocalityLoadBalance = true, bool lockAware = false );
|
||||
ACTOR Future<Void> extractClusterInterface(Reference<AsyncVar<Optional<struct ClusterControllerFullInterface>>> a,
|
||||
Reference<AsyncVar<Optional<struct ClusterInterface>>> b);
|
||||
|
||||
|
|
|
@ -493,7 +493,7 @@ Future<Void> startSystemMonitor(std::string dataFolder, Optional<Standalone<Stri
|
|||
initializeSystemMonitorMachineState(SystemMonitorMachineState(dataFolder, zoneId, machineId, g_network->getLocalAddress().ip));
|
||||
|
||||
systemMonitor();
|
||||
return recurring( &systemMonitor, 5.0, TaskFlushTrace );
|
||||
return recurring( &systemMonitor, 5.0, TaskPriority::FlushTrace );
|
||||
}
|
||||
|
||||
void testIndexedSet();
|
||||
|
|
|
@ -464,7 +464,7 @@ Future<Void> sendMasterRegistration( MasterData* self, LogSystemConfig const& lo
|
|||
}
|
||||
|
||||
ACTOR Future<Void> updateRegistration( Reference<MasterData> self, Reference<ILogSystem> logSystem ) {
|
||||
state Database cx = openDBOnServer(self->dbInfo, TaskDefaultEndpoint, true, true);
|
||||
state Database cx = openDBOnServer(self->dbInfo, TaskPriority::DefaultEndpoint, true, true);
|
||||
state Future<Void> trigger = self->registrationTrigger.onTrigger();
|
||||
state Future<Void> updateLogsKey;
|
||||
|
||||
|
@ -1017,12 +1017,12 @@ ACTOR Future<Void> resolutionBalancing(Reference<MasterData> self) {
|
|||
state CoalescedKeyRangeMap<int> key_resolver;
|
||||
key_resolver.insert(allKeys, 0);
|
||||
loop {
|
||||
wait(delay(SERVER_KNOBS->MIN_BALANCE_TIME, TaskResolutionMetrics));
|
||||
wait(delay(SERVER_KNOBS->MIN_BALANCE_TIME, TaskPriority::ResolutionMetrics));
|
||||
while(self->resolverChanges.get().size())
|
||||
wait(self->resolverChanges.onChange());
|
||||
state std::vector<Future<int64_t>> futures;
|
||||
for (auto& p : self->resolvers)
|
||||
futures.push_back(brokenPromiseToNever(p.metrics.getReply(ResolutionMetricsRequest(), TaskResolutionMetrics)));
|
||||
futures.push_back(brokenPromiseToNever(p.metrics.getReply(ResolutionMetricsRequest(), TaskPriority::ResolutionMetrics)));
|
||||
wait( waitForAll(futures) );
|
||||
state IndexedSet<std::pair<int64_t, int>, NoMetric> metrics;
|
||||
|
||||
|
@ -1047,7 +1047,7 @@ ACTOR Future<Void> resolutionBalancing(Reference<MasterData> self) {
|
|||
req.offset = amount;
|
||||
req.range = range.first;
|
||||
|
||||
ResolutionSplitReply split = wait( brokenPromiseToNever(self->resolvers[metrics.lastItem()->second].split.getReply(req, TaskResolutionMetrics)) );
|
||||
ResolutionSplitReply split = wait( brokenPromiseToNever(self->resolvers[metrics.lastItem()->second].split.getReply(req, TaskPriority::ResolutionMetrics)) );
|
||||
KeyRangeRef moveRange = range.second ? KeyRangeRef( range.first.begin, split.key ) : KeyRangeRef( split.key, range.first.end );
|
||||
movedRanges.push_back_deep(movedRanges.arena(), ResolverMoveRef(moveRange, dest));
|
||||
TraceEvent("MovingResolutionRange").detail("Src", src).detail("Dest", dest).detail("Amount", amount).detail("StartRange", range.first).detail("MoveRange", moveRange).detail("Used", split.used).detail("KeyResolverRanges", key_resolver.size());
|
||||
|
@ -1185,7 +1185,7 @@ ACTOR Future<Void> trackTlogRecovery( Reference<MasterData> self, Reference<Asyn
|
|||
}
|
||||
|
||||
ACTOR Future<Void> configurationMonitor( Reference<MasterData> self ) {
|
||||
state Database cx = openDBOnServer(self->dbInfo, TaskDefaultEndpoint, true, true);
|
||||
state Database cx = openDBOnServer(self->dbInfo, TaskPriority::DefaultEndpoint, true, true);
|
||||
loop {
|
||||
state ReadYourWritesTransaction tr(cx);
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ NetworkTestInterface::NetworkTestInterface( NetworkAddress remote )
|
|||
|
||||
NetworkTestInterface::NetworkTestInterface( INetwork* local )
|
||||
{
|
||||
test.makeWellKnownEndpoint( WLTOKEN_NETWORKTEST, TaskDefaultEndpoint );
|
||||
test.makeWellKnownEndpoint( WLTOKEN_NETWORKTEST, TaskPriority::DefaultEndpoint );
|
||||
}
|
||||
|
||||
ACTOR Future<Void> networkTestServer() {
|
||||
|
|
|
@ -550,7 +550,7 @@ public:
|
|||
newestDirtyVersion.insert(allKeys, invalidVersion);
|
||||
addShard( ShardInfo::newNotAssigned( allKeys ) );
|
||||
|
||||
cx = openDBOnServer(db, TaskDefaultEndpoint, true, true);
|
||||
cx = openDBOnServer(db, TaskPriority::DefaultEndpoint, true, true);
|
||||
}
|
||||
//~StorageServer() { fclose(log); }
|
||||
|
||||
|
@ -828,7 +828,7 @@ ACTOR Future<Void> getValueQ( StorageServer* data, GetValueRequest req ) {
|
|||
|
||||
// Active load balancing runs at a very high priority (to obtain accurate queue lengths)
|
||||
// so we need to downgrade here
|
||||
wait( delay(0, TaskDefaultEndpoint) );
|
||||
wait( delay(0, TaskPriority::DefaultEndpoint) );
|
||||
|
||||
if( req.debugID.present() )
|
||||
g_traceBatch.addEvent("GetValueDebug", req.debugID.get().first(), "getValueQ.DoRead"); //.detail("TaskID", g_network->getCurrentTask());
|
||||
|
@ -1345,7 +1345,7 @@ ACTOR Future<Void> getKeyValues( StorageServer* data, GetKeyValuesRequest req )
|
|||
|
||||
// Active load balancing runs at a very high priority (to obtain accurate queue lengths)
|
||||
// so we need to downgrade here
|
||||
wait( delay(0, TaskDefaultEndpoint) );
|
||||
wait( delay(0, TaskPriority::DefaultEndpoint) );
|
||||
|
||||
try {
|
||||
if( req.debugID.present() )
|
||||
|
@ -1458,7 +1458,7 @@ ACTOR Future<Void> getKey( StorageServer* data, GetKeyRequest req ) {
|
|||
|
||||
// Active load balancing runs at a very high priority (to obtain accurate queue lengths)
|
||||
// so we need to downgrade here
|
||||
wait( delay(0, TaskDefaultEndpoint) );
|
||||
wait( delay(0, TaskPriority::DefaultEndpoint) );
|
||||
|
||||
try {
|
||||
state Version version = wait( waitForVersion( data, req.version ) );
|
||||
|
@ -2003,7 +2003,7 @@ ACTOR Future<Void> fetchKeys( StorageServer *data, AddingShard* shard ) {
|
|||
|
||||
TraceEvent(SevDebug, "FetchKeysVersionSatisfied", data->thisServerID).detail("FKID", interval.pairID);
|
||||
|
||||
wait( data->fetchKeysParallelismLock.take( TaskDefaultYield, fetchBlockBytes ) );
|
||||
wait( data->fetchKeysParallelismLock.take( TaskPriority::DefaultYield, fetchBlockBytes ) );
|
||||
state FlowLock::Releaser holdingFKPL( data->fetchKeysParallelismLock, fetchBlockBytes );
|
||||
|
||||
state double executeStart = now();
|
||||
|
@ -2590,7 +2590,7 @@ ACTOR Future<Void> update( StorageServer* data, bool* pReceivedUpdate )
|
|||
}
|
||||
|
||||
data->behind = true;
|
||||
wait( delayJittered(.005, TaskTLogPeekReply) );
|
||||
wait( delayJittered(.005, TaskPriority::TLogPeekReply) );
|
||||
}
|
||||
|
||||
while( data->byteSampleClearsTooLarge.get() ) {
|
||||
|
@ -2617,7 +2617,7 @@ ACTOR Future<Void> update( StorageServer* data, bool* pReceivedUpdate )
|
|||
*pReceivedUpdate = true;
|
||||
|
||||
start = now();
|
||||
wait( data->durableVersionLock.take(TaskTLogPeekReply,1) );
|
||||
wait( data->durableVersionLock.take(TaskPriority::TLogPeekReply,1) );
|
||||
state FlowLock::Releaser holdingDVL( data->durableVersionLock );
|
||||
if(now() - start > 0.1)
|
||||
TraceEvent("SSSlowTakeLock1", data->thisServerID).detailf("From", "%016llx", debug_lastLoadBalanceResultEndpointToken).detail("Duration", now() - start).detail("Version", data->version.get());
|
||||
|
@ -2865,11 +2865,11 @@ ACTOR Future<Void> updateStorage(StorageServer* data) {
|
|||
if (g_network->isSimulated()) {
|
||||
double endTime = g_simulator.checkDisabled(format("%s/updateStorage", data->thisServerID.toString().c_str()));
|
||||
if(endTime > now()) {
|
||||
wait(delay(endTime - now(), TaskUpdateStorage));
|
||||
wait(delay(endTime - now(), TaskPriority::UpdateStorage));
|
||||
}
|
||||
}
|
||||
wait( data->desiredOldestVersion.whenAtLeast( data->storageVersion()+1 ) );
|
||||
wait( delay(0, TaskUpdateStorage) );
|
||||
wait( delay(0, TaskPriority::UpdateStorage) );
|
||||
|
||||
state Promise<Void> durableInProgress;
|
||||
data->durableInProgress = durableInProgress.getFuture();
|
||||
|
@ -2882,10 +2882,10 @@ ACTOR Future<Void> updateStorage(StorageServer* data) {
|
|||
state bool done = data->storage.makeVersionMutationsDurable(newOldestVersion, desiredVersion, bytesLeft);
|
||||
// We want to forget things from these data structures atomically with changing oldestVersion (and "before", since oldestVersion.set() may trigger waiting actors)
|
||||
// forgetVersionsBeforeAsync visibly forgets immediately (without waiting) but asynchronously frees memory.
|
||||
Future<Void> finishedForgetting = data->mutableData().forgetVersionsBeforeAsync( newOldestVersion, TaskUpdateStorage );
|
||||
Future<Void> finishedForgetting = data->mutableData().forgetVersionsBeforeAsync( newOldestVersion, TaskPriority::UpdateStorage );
|
||||
data->oldestVersion.set( newOldestVersion );
|
||||
wait( finishedForgetting );
|
||||
wait( yield(TaskUpdateStorage) );
|
||||
wait( yield(TaskPriority::UpdateStorage) );
|
||||
if (done) break;
|
||||
}
|
||||
|
||||
|
@ -2916,7 +2916,7 @@ ACTOR Future<Void> updateStorage(StorageServer* data) {
|
|||
}
|
||||
|
||||
durableInProgress.send(Void());
|
||||
wait( delay(0, TaskUpdateStorage) ); //Setting durableInProgess could cause the storage server to shut down, so delay to check for cancellation
|
||||
wait( delay(0, TaskPriority::UpdateStorage) ); //Setting durableInProgess could cause the storage server to shut down, so delay to check for cancellation
|
||||
|
||||
// Taking and releasing the durableVersionLock ensures that no eager reads both begin before the commit was effective and
|
||||
// are applied after we change the durable version. Also ensure that we have to lock while calling changeDurableVersion,
|
||||
|
@ -2925,9 +2925,9 @@ ACTOR Future<Void> updateStorage(StorageServer* data) {
|
|||
data->popVersion( data->durableVersion.get() + 1 );
|
||||
|
||||
while (!changeDurableVersion( data, newOldestVersion )) {
|
||||
if(g_network->check_yield(TaskUpdateStorage)) {
|
||||
if(g_network->check_yield(TaskPriority::UpdateStorage)) {
|
||||
data->durableVersionLock.release();
|
||||
wait(delay(0, TaskUpdateStorage));
|
||||
wait(delay(0, TaskPriority::UpdateStorage));
|
||||
wait( data->durableVersionLock.take() );
|
||||
}
|
||||
}
|
||||
|
@ -3537,7 +3537,7 @@ ACTOR Future<Void> storageServerCore( StorageServer* self, StorageServerInterfac
|
|||
}
|
||||
}
|
||||
when( GetValueRequest req = waitNext(ssi.getValue.getFuture()) ) {
|
||||
// Warning: This code is executed at extremely high priority (TaskLoadBalancedEndpoint), so downgrade before doing real work
|
||||
// Warning: This code is executed at extremely high priority (TaskPriority::LoadBalancedEndpoint), so downgrade before doing real work
|
||||
if( req.debugID.present() )
|
||||
g_traceBatch.addEvent("GetValueDebug", req.debugID.get().first(), "storageServer.recieved"); //.detail("TaskID", g_network->getCurrentTask());
|
||||
|
||||
|
@ -3552,11 +3552,11 @@ ACTOR Future<Void> storageServerCore( StorageServer* self, StorageServerInterfac
|
|||
actors.add(self->readGuard(req, watchValueQ));
|
||||
}
|
||||
when (GetKeyRequest req = waitNext(ssi.getKey.getFuture())) {
|
||||
// Warning: This code is executed at extremely high priority (TaskLoadBalancedEndpoint), so downgrade before doing real work
|
||||
// Warning: This code is executed at extremely high priority (TaskPriority::LoadBalancedEndpoint), so downgrade before doing real work
|
||||
actors.add(self->readGuard(req , getKey));
|
||||
}
|
||||
when (GetKeyValuesRequest req = waitNext(ssi.getKeyValues.getFuture()) ) {
|
||||
// Warning: This code is executed at extremely high priority (TaskLoadBalancedEndpoint), so downgrade before doing real work
|
||||
// Warning: This code is executed at extremely high priority (TaskPriority::LoadBalancedEndpoint), so downgrade before doing real work
|
||||
actors.add(self->readGuard(req , getKeyValues));
|
||||
}
|
||||
when (GetShardStateRequest req = waitNext(ssi.getShardState.getFuture()) ) {
|
||||
|
|
|
@ -75,7 +75,7 @@ ACTOR static Future<Void> extractClientInfo( Reference<AsyncVar<ServerDBInfo>> d
|
|||
}
|
||||
}
|
||||
|
||||
Database openDBOnServer( Reference<AsyncVar<ServerDBInfo>> const& db, int taskID, bool enableLocalityLoadBalance, bool lockAware ) {
|
||||
Database openDBOnServer( Reference<AsyncVar<ServerDBInfo>> const& db, TaskPriority taskID, bool enableLocalityLoadBalance, bool lockAware ) {
|
||||
Reference<AsyncVar<ClientDBInfo>> info( new AsyncVar<ClientDBInfo> );
|
||||
return DatabaseContext::create( info, extractClientInfo(db, info), enableLocalityLoadBalance ? db->get().myLocality : LocalityData(), enableLocalityLoadBalance, taskID, lockAware );
|
||||
}
|
||||
|
@ -737,7 +737,7 @@ ACTOR Future<Void> workerServer(
|
|||
}
|
||||
} else {
|
||||
bool lockAware = metricsPrefix.size() && metricsPrefix[0] == '\xff';
|
||||
metricsLogger = runMetrics( openDBOnServer( dbInfo, TaskDefaultEndpoint, true, lockAware ), KeyRef(metricsPrefix) );
|
||||
metricsLogger = runMetrics( openDBOnServer( dbInfo, TaskPriority::DefaultEndpoint, true, lockAware ), KeyRef(metricsPrefix) );
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1169,7 +1169,7 @@ ACTOR Future<Void> workerServer(
|
|||
}
|
||||
when( wait( loggingTrigger ) ) {
|
||||
systemMonitor();
|
||||
loggingTrigger = delay( loggingDelay, TaskFlushTrace );
|
||||
loggingTrigger = delay( loggingDelay, TaskPriority::FlushTrace );
|
||||
}
|
||||
when(state ExecuteRequest req = waitNext(interf.execReq.getFuture())) {
|
||||
state ExecCmdValueString execArg(req.execPayload);
|
||||
|
|
|
@ -92,12 +92,12 @@ public:
|
|||
void send( T const& t ) { // Can be called safely from another thread. Call send or sendError at most once.
|
||||
Promise<Void> signal;
|
||||
tagAndForward( &promise, t, signal.getFuture() );
|
||||
g_network->onMainThread( std::move(signal), g_network->getCurrentTask() | 1 );
|
||||
g_network->onMainThread( std::move(signal), incrementPriority( g_network->getCurrentTask() ) );
|
||||
}
|
||||
void sendError( Error const& e ) { // Can be called safely from another thread. Call send or sendError at most once.
|
||||
Promise<Void> signal;
|
||||
tagAndForwardError( &promise, e, signal.getFuture() );
|
||||
g_network->onMainThread( std::move(signal), g_network->getCurrentTask() | 1 );
|
||||
g_network->onMainThread( std::move(signal), incrementPriority( g_network->getCurrentTask() ) );
|
||||
}
|
||||
private:
|
||||
Promise<T> promise;
|
||||
|
@ -106,4 +106,4 @@ private:
|
|||
Reference<IThreadPool> createGenericThreadPool();
|
||||
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -100,9 +100,9 @@ public:
|
|||
|
||||
struct OrderedTask {
|
||||
int64_t priority;
|
||||
int taskID;
|
||||
TaskPriority taskID;
|
||||
Task *task;
|
||||
OrderedTask(int64_t priority, int taskID, Task* task) : priority(priority), taskID(taskID), task(task) {}
|
||||
OrderedTask(int64_t priority, TaskPriority taskID, Task* task) : priority(priority), taskID(taskID), task(task) {}
|
||||
bool operator < (OrderedTask const& rhs) const { return priority < rhs.priority; }
|
||||
};
|
||||
|
||||
|
@ -122,12 +122,12 @@ public:
|
|||
|
||||
// INetwork interface
|
||||
virtual double now() { return currentTime; };
|
||||
virtual Future<Void> delay( double seconds, int taskId );
|
||||
virtual Future<class Void> yield( int taskID );
|
||||
virtual bool check_yield(int taskId);
|
||||
virtual int getCurrentTask() { return currentTaskID; }
|
||||
virtual void setCurrentTask(int taskID ) { priorityMetric = currentTaskID = taskID; }
|
||||
virtual void onMainThread( Promise<Void>&& signal, int taskID );
|
||||
virtual Future<Void> delay( double seconds, TaskPriority taskId );
|
||||
virtual Future<class Void> yield( TaskPriority taskID );
|
||||
virtual bool check_yield(TaskPriority taskId);
|
||||
virtual TaskPriority getCurrentTask() { return currentTaskID; }
|
||||
virtual void setCurrentTask(TaskPriority taskID ) { currentTaskID = taskID; priorityMetric = (int64_t)taskID; }
|
||||
virtual void onMainThread( Promise<Void>&& signal, TaskPriority taskID );
|
||||
virtual void stop() {
|
||||
if ( thread_network == this )
|
||||
stopImmediately();
|
||||
|
@ -157,7 +157,7 @@ public:
|
|||
|
||||
int64_t tsc_begin, tsc_end;
|
||||
double taskBegin;
|
||||
int currentTaskID;
|
||||
TaskPriority currentTaskID;
|
||||
uint64_t tasksIssued;
|
||||
TDMetricCollection tdmetrics;
|
||||
double currentTime;
|
||||
|
@ -167,7 +167,7 @@ public:
|
|||
uint64_t numYields;
|
||||
|
||||
double lastPriorityTrackTime;
|
||||
int lastMinTaskID;
|
||||
TaskPriority lastMinTaskID;
|
||||
double priorityTimer[NetworkMetrics::PRIORITY_BINS];
|
||||
|
||||
std::priority_queue<OrderedTask, std::vector<OrderedTask>> ready;
|
||||
|
@ -175,15 +175,15 @@ public:
|
|||
|
||||
struct DelayedTask : OrderedTask {
|
||||
double at;
|
||||
DelayedTask(double at, int64_t priority, int taskID, Task* task) : at(at), OrderedTask(priority, taskID, task) {}
|
||||
DelayedTask(double at, int64_t priority, TaskPriority taskID, Task* task) : at(at), OrderedTask(priority, taskID, task) {}
|
||||
bool operator < (DelayedTask const& rhs) const { return at > rhs.at; } // Ordering is reversed for priority_queue
|
||||
};
|
||||
std::priority_queue<DelayedTask, std::vector<DelayedTask>> timers;
|
||||
|
||||
void checkForSlowTask(int64_t tscBegin, int64_t tscEnd, double duration, int64_t priority);
|
||||
bool check_yield(int taskId, bool isRunLoop);
|
||||
void checkForSlowTask(int64_t tscBegin, int64_t tscEnd, double duration, TaskPriority priority);
|
||||
bool check_yield(TaskPriority taskId, bool isRunLoop);
|
||||
void processThreadReady();
|
||||
void trackMinPriority( int minTaskID, double now );
|
||||
void trackMinPriority( TaskPriority minTaskID, double now );
|
||||
void stopImmediately() {
|
||||
stopped=true; decltype(ready) _1; ready.swap(_1); decltype(timers) _2; timers.swap(_2);
|
||||
}
|
||||
|
@ -489,8 +489,8 @@ Net2::Net2(bool useThreadPool, bool useMetrics, bool useObjectSerializer)
|
|||
stopped(false),
|
||||
tasksIssued(0),
|
||||
// Until run() is called, yield() will always yield
|
||||
tsc_begin(0), tsc_end(0), taskBegin(0), currentTaskID(TaskDefaultYield),
|
||||
lastMinTaskID(0),
|
||||
tsc_begin(0), tsc_end(0), taskBegin(0), currentTaskID(TaskPriority::DefaultYield),
|
||||
lastMinTaskID(TaskPriority::Zero),
|
||||
numYields(0)
|
||||
{
|
||||
TraceEvent("Net2Starting");
|
||||
|
@ -511,7 +511,7 @@ Net2::Net2(bool useThreadPool, bool useMetrics, bool useObjectSerializer)
|
|||
int priBins[] = { 1, 2050, 3050, 4050, 4950, 5050, 7050, 8050, 10050 };
|
||||
static_assert( sizeof(priBins) == sizeof(int)*NetworkMetrics::PRIORITY_BINS, "Fix priority bins");
|
||||
for(int i=0; i<NetworkMetrics::PRIORITY_BINS; i++)
|
||||
networkMetrics.priorityBins[i] = priBins[i];
|
||||
networkMetrics.priorityBins[i] = static_cast<TaskPriority>(priBins[i]);
|
||||
updateNow();
|
||||
|
||||
}
|
||||
|
@ -579,7 +579,7 @@ void Net2::run() {
|
|||
tsc_begin = __rdtsc();
|
||||
taskBegin = timer_monotonic();
|
||||
runFunc();
|
||||
checkForSlowTask(tsc_begin, __rdtsc(), timer_monotonic() - taskBegin, TaskRunCycleFunction);
|
||||
checkForSlowTask(tsc_begin, __rdtsc(), timer_monotonic() - taskBegin, TaskPriority::RunCycleFunction);
|
||||
}
|
||||
|
||||
double sleepTime = 0;
|
||||
|
@ -607,7 +607,7 @@ void Net2::run() {
|
|||
if ((now-nnow) > FLOW_KNOBS->SLOW_LOOP_CUTOFF && nondeterministicRandom()->random01() < (now-nnow)*FLOW_KNOBS->SLOW_LOOP_SAMPLING_RATE)
|
||||
TraceEvent("SomewhatSlowRunLoopTop").detail("Elapsed", now - nnow);
|
||||
|
||||
if (sleepTime) trackMinPriority( 0, now );
|
||||
if (sleepTime) trackMinPriority( TaskPriority::Zero, now );
|
||||
while (!timers.empty() && timers.top().at < now) {
|
||||
++countTimers;
|
||||
ready.push( timers.top() );
|
||||
|
@ -620,12 +620,12 @@ void Net2::run() {
|
|||
tsc_end = tsc_begin + FLOW_KNOBS->TSC_YIELD_TIME;
|
||||
taskBegin = timer_monotonic();
|
||||
numYields = 0;
|
||||
int minTaskID = TaskMaxPriority;
|
||||
TaskPriority minTaskID = TaskPriority::Max;
|
||||
|
||||
while (!ready.empty()) {
|
||||
++countTasks;
|
||||
currentTaskID = ready.top().taskID;
|
||||
priorityMetric = currentTaskID;
|
||||
priorityMetric = static_cast<int64_t>(currentTaskID);
|
||||
minTaskID = std::min(minTaskID, currentTaskID);
|
||||
Task* task = ready.top().task;
|
||||
ready.pop();
|
||||
|
@ -638,7 +638,7 @@ void Net2::run() {
|
|||
TraceEvent(SevError, "TaskError").error(unknown_error());
|
||||
}
|
||||
|
||||
if (check_yield(TaskMaxPriority, true)) { ++countYields; break; }
|
||||
if (check_yield(TaskPriority::Max, true)) { ++countYields; break; }
|
||||
}
|
||||
|
||||
nnow = timer_monotonic();
|
||||
|
@ -697,10 +697,10 @@ void Net2::run() {
|
|||
#endif
|
||||
}
|
||||
|
||||
void Net2::trackMinPriority( int minTaskID, double now ) {
|
||||
void Net2::trackMinPriority( TaskPriority minTaskID, double now ) {
|
||||
if (minTaskID != lastMinTaskID)
|
||||
for(int c=0; c<NetworkMetrics::PRIORITY_BINS; c++) {
|
||||
int64_t pri = networkMetrics.priorityBins[c];
|
||||
TaskPriority pri = networkMetrics.priorityBins[c];
|
||||
if (pri >= minTaskID && pri < lastMinTaskID) { // busy -> idle
|
||||
double busyFor = lastPriorityTrackTime - priorityTimer[c];
|
||||
networkMetrics.secSquaredPriorityBlocked[c] += busyFor*busyFor;
|
||||
|
@ -723,7 +723,7 @@ void Net2::processThreadReady() {
|
|||
}
|
||||
}
|
||||
|
||||
void Net2::checkForSlowTask(int64_t tscBegin, int64_t tscEnd, double duration, int64_t priority) {
|
||||
void Net2::checkForSlowTask(int64_t tscBegin, int64_t tscEnd, double duration, TaskPriority priority) {
|
||||
int64_t elapsed = tscEnd-tscBegin;
|
||||
if (elapsed > FLOW_KNOBS->TSC_YIELD_TIME && tscBegin > 0) {
|
||||
int i = std::min<double>(NetworkMetrics::SLOW_EVENT_BINS-1, log( elapsed/1e6 ) / log(2.));
|
||||
|
@ -734,7 +734,7 @@ void Net2::checkForSlowTask(int64_t tscBegin, int64_t tscEnd, double duration, i
|
|||
|
||||
slowTaskMetric->clocks = elapsed;
|
||||
slowTaskMetric->duration = (int64_t)(duration*1e9);
|
||||
slowTaskMetric->priority = priority;
|
||||
slowTaskMetric->priority = static_cast<int64_t>(priority);
|
||||
slowTaskMetric->numYields = numYields;
|
||||
slowTaskMetric->log();
|
||||
|
||||
|
@ -748,7 +748,7 @@ void Net2::checkForSlowTask(int64_t tscBegin, int64_t tscEnd, double duration, i
|
|||
}
|
||||
}
|
||||
|
||||
bool Net2::check_yield( int taskID, bool isRunLoop ) {
|
||||
bool Net2::check_yield( TaskPriority taskID, bool isRunLoop ) {
|
||||
if(!isRunLoop && numYields > 0) {
|
||||
++numYields;
|
||||
return true;
|
||||
|
@ -761,8 +761,8 @@ bool Net2::check_yield( int taskID, bool isRunLoop ) {
|
|||
|
||||
processThreadReady();
|
||||
|
||||
if (taskID == TaskDefaultYield) taskID = currentTaskID;
|
||||
if (!ready.empty() && ready.top().priority > (int64_t(taskID)<<32)) {
|
||||
if (taskID == TaskPriority::DefaultYield) taskID = currentTaskID;
|
||||
if (!ready.empty() && ready.top().priority > int64_t(taskID)<<32) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -787,13 +787,13 @@ bool Net2::check_yield( int taskID, bool isRunLoop ) {
|
|||
return false;
|
||||
}
|
||||
|
||||
bool Net2::check_yield( int taskID ) {
|
||||
bool Net2::check_yield( TaskPriority taskID ) {
|
||||
return check_yield(taskID, false);
|
||||
}
|
||||
|
||||
Future<class Void> Net2::yield( int taskID ) {
|
||||
Future<class Void> Net2::yield( TaskPriority taskID ) {
|
||||
++countYieldCalls;
|
||||
if (taskID == TaskDefaultYield) taskID = currentTaskID;
|
||||
if (taskID == TaskPriority::DefaultYield) taskID = currentTaskID;
|
||||
if (check_yield(taskID, false)) {
|
||||
++countYieldCallsTrue;
|
||||
return delay(0, taskID);
|
||||
|
@ -802,7 +802,7 @@ Future<class Void> Net2::yield( int taskID ) {
|
|||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> Net2::delay( double seconds, int taskId ) {
|
||||
Future<Void> Net2::delay( double seconds, TaskPriority taskId ) {
|
||||
if (seconds <= 0.) {
|
||||
PromiseTask* t = new PromiseTask;
|
||||
this->ready.push( OrderedTask( (int64_t(taskId)<<32)-(++tasksIssued), taskId, t) );
|
||||
|
@ -817,7 +817,7 @@ Future<Void> Net2::delay( double seconds, int taskId ) {
|
|||
return t->promise.getFuture();
|
||||
}
|
||||
|
||||
void Net2::onMainThread(Promise<Void>&& signal, int taskID) {
|
||||
void Net2::onMainThread(Promise<Void>&& signal, TaskPriority taskID) {
|
||||
if (stopped) return;
|
||||
PromiseTask* p = new PromiseTask( std::move(signal) );
|
||||
int64_t priority = int64_t(taskID)<<32;
|
||||
|
|
|
@ -248,7 +248,7 @@ struct Profiler {
|
|||
outOffset += self->environmentInfoWriter.getLength();
|
||||
|
||||
loop {
|
||||
wait( self->network->delay(1.0, TaskMinPriority) || self->network->delay(2.0, TaskMaxPriority) );
|
||||
wait( self->network->delay(1.0, TaskPriority::Min) || self->network->delay(2.0, TaskPriority::Max) );
|
||||
|
||||
self->enableSignal(false);
|
||||
std::swap( self->output_buffer, otherBuffer );
|
||||
|
|
|
@ -35,11 +35,11 @@
|
|||
// void onMainThreadVoid( F f ) {
|
||||
// Promise<Void> signal;
|
||||
// doOnMainThreadVoid( signal.getFuture(), f );
|
||||
// g_network->onMainThread( std::move(signal), TaskDefaultOnMainThread );
|
||||
// g_network->onMainThread( std::move(signal), TaskPriority::DefaultOnMainThread );
|
||||
// }
|
||||
|
||||
template <class F>
|
||||
void onMainThreadVoid( F f, Error* err, int taskID = TaskDefaultOnMainThread ) {
|
||||
void onMainThreadVoid( F f, Error* err, TaskPriority taskID = TaskPriority::DefaultOnMainThread ) {
|
||||
Promise<Void> signal;
|
||||
doOnMainThreadVoid( signal.getFuture(), f, err );
|
||||
g_network->onMainThread( std::move(signal), taskID );
|
||||
|
@ -585,7 +585,7 @@ template <class F> ThreadFuture< decltype(fake<F>()().getValue()) > onMainThread
|
|||
returnValue->addref(); // For the ThreadFuture we return
|
||||
Future<Void> cancelFuture = doOnMainThread<decltype(fake<F>()().getValue()), F>( signal.getFuture(), f, returnValue );
|
||||
returnValue->setCancel( std::move(cancelFuture) );
|
||||
g_network->onMainThread( std::move(signal), TaskDefaultOnMainThread );
|
||||
g_network->onMainThread( std::move(signal), TaskPriority::DefaultOnMainThread );
|
||||
return ThreadFuture<decltype(fake<F>()().getValue())>( returnValue );
|
||||
}
|
||||
|
||||
|
|
|
@ -630,7 +630,7 @@ void openTraceFile(const NetworkAddress& na, uint64_t rollsize, uint64_t maxLogs
|
|||
std::string baseName = format("%s.%s.%d", baseOfBase.c_str(), ip.c_str(), na.port);
|
||||
g_traceLog.open( directory, baseName, logGroup, format("%lld", time(NULL)), rollsize, maxLogsSize, !g_network->isSimulated() ? na : Optional<NetworkAddress>());
|
||||
|
||||
uncancellable(recurring(&flushTraceFile, FLOW_KNOBS->TRACE_FLUSH_INTERVAL, TaskFlushTrace));
|
||||
uncancellable(recurring(&flushTraceFile, FLOW_KNOBS->TRACE_FLUSH_INTERVAL, TaskPriority::FlushTrace));
|
||||
g_traceBatch.dump();
|
||||
}
|
||||
|
||||
|
|
14
flow/flow.h
14
flow/flow.h
|
@ -817,7 +817,7 @@ public:
|
|||
return getReplyPromise(value).getFuture();
|
||||
}
|
||||
template <class X>
|
||||
Future<REPLY_TYPE(X)> getReply(const X& value, int taskID) const {
|
||||
Future<REPLY_TYPE(X)> getReply(const X& value, TaskPriority taskID) const {
|
||||
setReplyPriority(value, taskID);
|
||||
return getReplyPromise(value).getFuture();
|
||||
}
|
||||
|
@ -827,7 +827,7 @@ public:
|
|||
return getReply(Promise<X>());
|
||||
}
|
||||
template <class X>
|
||||
Future<X> getReplyWithTaskID(int taskID) const {
|
||||
Future<X> getReplyWithTaskID(TaskPriority taskID) const {
|
||||
Promise<X> reply;
|
||||
reply.getEndpoint(taskID);
|
||||
return getReply(reply);
|
||||
|
@ -908,11 +908,11 @@ struct ActorSingleCallback : SingleCallback<ValueType> {
|
|||
}
|
||||
};
|
||||
inline double now() { return g_network->now(); }
|
||||
inline Future<Void> delay(double seconds, int taskID = TaskDefaultDelay) { return g_network->delay(seconds, taskID); }
|
||||
inline Future<Void> delayUntil(double time, int taskID = TaskDefaultDelay) { return g_network->delay(std::max(0.0, time - g_network->now()), taskID); }
|
||||
inline Future<Void> delayJittered(double seconds, int taskID = TaskDefaultDelay) { return g_network->delay(seconds*(FLOW_KNOBS->DELAY_JITTER_OFFSET + FLOW_KNOBS->DELAY_JITTER_RANGE*deterministicRandom()->random01()), taskID); }
|
||||
inline Future<Void> yield(int taskID = TaskDefaultYield) { return g_network->yield(taskID); }
|
||||
inline bool check_yield(int taskID = TaskDefaultYield) { return g_network->check_yield(taskID); }
|
||||
inline Future<Void> delay(double seconds, TaskPriority taskID = TaskPriority::DefaultDelay) { return g_network->delay(seconds, taskID); }
|
||||
inline Future<Void> delayUntil(double time, TaskPriority taskID = TaskPriority::DefaultDelay) { return g_network->delay(std::max(0.0, time - g_network->now()), taskID); }
|
||||
inline Future<Void> delayJittered(double seconds, TaskPriority taskID = TaskPriority::DefaultDelay) { return g_network->delay(seconds*(FLOW_KNOBS->DELAY_JITTER_OFFSET + FLOW_KNOBS->DELAY_JITTER_RANGE*deterministicRandom()->random01()), taskID); }
|
||||
inline Future<Void> yield(TaskPriority taskID = TaskPriority::DefaultYield) { return g_network->yield(taskID); }
|
||||
inline bool check_yield(TaskPriority taskID = TaskPriority::DefaultYield) { return g_network->check_yield(taskID); }
|
||||
|
||||
#include "flow/genericactors.actor.h"
|
||||
#endif
|
||||
|
|
|
@ -183,7 +183,7 @@ Future<Void> waitForAllReady( std::vector<Future<T>> results ) {
|
|||
}
|
||||
|
||||
ACTOR template <class T>
|
||||
Future<T> timeout( Future<T> what, double time, T timedoutValue, int taskID = TaskDefaultDelay ) {
|
||||
Future<T> timeout( Future<T> what, double time, T timedoutValue, TaskPriority taskID = TaskPriority::DefaultDelay ) {
|
||||
Future<Void> end = delay( time, taskID );
|
||||
choose {
|
||||
when( T t = wait( what ) ) { return t; }
|
||||
|
@ -201,7 +201,7 @@ Future<Optional<T>> timeout( Future<T> what, double time ) {
|
|||
}
|
||||
|
||||
ACTOR template <class T>
|
||||
Future<T> timeoutError( Future<T> what, double time, int taskID = TaskDefaultDelay ) {
|
||||
Future<T> timeoutError( Future<T> what, double time, TaskPriority taskID = TaskPriority::DefaultDelay ) {
|
||||
Future<Void> end = delay( time, taskID );
|
||||
choose {
|
||||
when( T t = wait( what ) ) { return t; }
|
||||
|
@ -210,7 +210,7 @@ Future<T> timeoutError( Future<T> what, double time, int taskID = TaskDefaultDel
|
|||
}
|
||||
|
||||
ACTOR template <class T>
|
||||
Future<T> delayed( Future<T> what, double time = 0.0, int taskID = TaskDefaultDelay ) {
|
||||
Future<T> delayed( Future<T> what, double time = 0.0, TaskPriority taskID = TaskPriority::DefaultDelay ) {
|
||||
try {
|
||||
state T t = wait( what );
|
||||
wait( delay( time, taskID ) );
|
||||
|
@ -223,7 +223,7 @@ Future<T> delayed( Future<T> what, double time = 0.0, int taskID = TaskDefaultDe
|
|||
}
|
||||
|
||||
ACTOR template<class Func>
|
||||
Future<Void> recurring( Func what, double interval, int taskID = TaskDefaultDelay ) {
|
||||
Future<Void> recurring( Func what, double interval, TaskPriority taskID = TaskPriority::DefaultDelay ) {
|
||||
loop choose {
|
||||
when ( wait( delay( interval, taskID ) ) ) { what(); }
|
||||
}
|
||||
|
@ -951,7 +951,7 @@ Future<Void> quorum(std::vector<Future<T>> const& results, int n) {
|
|||
}
|
||||
|
||||
ACTOR template <class T>
|
||||
Future<Void> smartQuorum( std::vector<Future<T>> results, int required, double extraSeconds, int taskID = TaskDefaultDelay ) {
|
||||
Future<Void> smartQuorum( std::vector<Future<T>> results, int required, double extraSeconds, TaskPriority taskID = TaskPriority::DefaultDelay ) {
|
||||
if (results.empty() && required == 0) return Void();
|
||||
wait(quorum(results, required));
|
||||
choose {
|
||||
|
@ -1259,7 +1259,7 @@ struct FlowLock : NonCopyable, public ReferenceCounted<FlowLock> {
|
|||
FlowLock() : permits(1), active(0) {}
|
||||
explicit FlowLock(int64_t permits) : permits(permits), active(0) {}
|
||||
|
||||
Future<Void> take(int taskID = TaskDefaultYield, int64_t amount = 1) {
|
||||
Future<Void> take(TaskPriority taskID = TaskPriority::DefaultYield, int64_t amount = 1) {
|
||||
if (active + amount <= permits || active == 0) {
|
||||
active += amount;
|
||||
return safeYieldActor(this, taskID, amount);
|
||||
|
@ -1298,7 +1298,7 @@ private:
|
|||
int64_t active;
|
||||
Promise<Void> broken_on_destruct;
|
||||
|
||||
ACTOR static Future<Void> takeActor(FlowLock* lock, int taskID, int64_t amount) {
|
||||
ACTOR static Future<Void> takeActor(FlowLock* lock, TaskPriority taskID, int64_t amount) {
|
||||
state std::list<std::pair<Promise<Void>, int64_t>>::iterator it = lock->takers.insert(lock->takers.end(), std::make_pair(Promise<Void>(), amount));
|
||||
|
||||
try {
|
||||
|
@ -1330,7 +1330,7 @@ private:
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> safeYieldActor(FlowLock* lock, int taskID, int64_t amount) {
|
||||
ACTOR static Future<Void> safeYieldActor(FlowLock* lock, TaskPriority taskID, int64_t amount) {
|
||||
try {
|
||||
choose{
|
||||
when(wait(yield(taskID))) {}
|
||||
|
@ -1351,7 +1351,7 @@ private:
|
|||
};
|
||||
|
||||
ACTOR template <class T>
|
||||
Future<Void> yieldPromiseStream( FutureStream<T> input, PromiseStream<T> output, int taskID = TaskDefaultYield ) {
|
||||
Future<Void> yieldPromiseStream( FutureStream<T> input, PromiseStream<T> output, TaskPriority taskID = TaskPriority::DefaultYield ) {
|
||||
loop {
|
||||
T f = waitNext( input );
|
||||
output.send( f );
|
||||
|
|
115
flow/network.h
115
flow/network.h
|
@ -31,55 +31,64 @@
|
|||
#include "flow/IRandom.h"
|
||||
#include "fdbrpc/crc32c.h"
|
||||
|
||||
enum {
|
||||
TaskMaxPriority = 1000000,
|
||||
TaskRunCycleFunction = 20000,
|
||||
TaskFlushTrace = 10500,
|
||||
TaskWriteSocket = 10000,
|
||||
TaskPollEIO = 9900,
|
||||
TaskDiskIOComplete = 9150,
|
||||
TaskLoadBalancedEndpoint = 9000,
|
||||
TaskReadSocket = 9000,
|
||||
TaskCoordinationReply = 8810,
|
||||
TaskCoordination = 8800,
|
||||
TaskFailureMonitor = 8700,
|
||||
TaskResolutionMetrics = 8700,
|
||||
TaskClusterController = 8650,
|
||||
TaskProxyStorageRejoin = 8645,
|
||||
TaskProxyCommitDispatcher = 8640,
|
||||
TaskTLogQueuingMetrics = 8620,
|
||||
TaskTLogPop = 8610,
|
||||
TaskTLogPeekReply = 8600,
|
||||
TaskTLogPeek = 8590,
|
||||
TaskTLogCommitReply = 8580,
|
||||
TaskTLogCommit = 8570,
|
||||
TaskProxyGetRawCommittedVersion = 8565,
|
||||
TaskProxyResolverReply = 8560,
|
||||
TaskProxyCommitBatcher = 8550,
|
||||
TaskProxyCommit = 8540,
|
||||
TaskTLogConfirmRunningReply = 8530,
|
||||
TaskTLogConfirmRunning = 8520,
|
||||
TaskProxyGRVTimer = 8510,
|
||||
TaskProxyGetConsistentReadVersion = 8500,
|
||||
TaskDefaultPromiseEndpoint = 8000,
|
||||
TaskDefaultOnMainThread = 7500,
|
||||
TaskDefaultDelay = 7010,
|
||||
TaskDefaultYield = 7000,
|
||||
TaskDiskRead = 5010,
|
||||
TaskDefaultEndpoint = 5000,
|
||||
TaskUnknownEndpoint = 4000,
|
||||
TaskMoveKeys = 3550,
|
||||
TaskDataDistributionLaunch = 3530,
|
||||
TaskRatekeeper = 3510,
|
||||
TaskDataDistribution = 3500,
|
||||
TaskDiskWrite = 3010,
|
||||
TaskUpdateStorage = 3000,
|
||||
TaskTLogSpilledPeekReply = 2800,
|
||||
TaskLowPriority = 2000,
|
||||
enum class TaskPriority {
|
||||
Max = 1000000,
|
||||
RunCycleFunction = 20000,
|
||||
FlushTrace = 10500,
|
||||
WriteSocket = 10000,
|
||||
PollEIO = 9900,
|
||||
DiskIOComplete = 9150,
|
||||
LoadBalancedEndpoint = 9000,
|
||||
ReadSocket = 9000,
|
||||
CoordinationReply = 8810,
|
||||
Coordination = 8800,
|
||||
FailureMonitor = 8700,
|
||||
ResolutionMetrics = 8700,
|
||||
ClusterController = 8650,
|
||||
ProxyStorageRejoin = 8645,
|
||||
ProxyCommitDispatcher = 8640,
|
||||
TLogQueuingMetrics = 8620,
|
||||
TLogPop = 8610,
|
||||
TLogPeekReply = 8600,
|
||||
TLogPeek = 8590,
|
||||
TLogCommitReply = 8580,
|
||||
TLogCommit = 8570,
|
||||
ProxyGetRawCommittedVersion = 8565,
|
||||
ProxyResolverReply = 8560,
|
||||
ProxyCommitBatcher = 8550,
|
||||
ProxyCommit = 8540,
|
||||
TLogConfirmRunningReply = 8530,
|
||||
TLogConfirmRunning = 8520,
|
||||
ProxyGRVTimer = 8510,
|
||||
ProxyGetConsistentReadVersion = 8500,
|
||||
DefaultPromiseEndpoint = 8000,
|
||||
DefaultOnMainThread = 7500,
|
||||
DefaultDelay = 7010,
|
||||
DefaultYield = 7000,
|
||||
DiskRead = 5010,
|
||||
DefaultEndpoint = 5000,
|
||||
UnknownEndpoint = 4000,
|
||||
MoveKeys = 3550,
|
||||
DataDistributionLaunch = 3530,
|
||||
Ratekeeper = 3510,
|
||||
DataDistribution = 3500,
|
||||
DiskWrite = 3010,
|
||||
UpdateStorage = 3000,
|
||||
TLogSpilledPeekReply = 2800,
|
||||
Low = 2000,
|
||||
|
||||
TaskMinPriority = 1000
|
||||
Min = 1000,
|
||||
Zero = 0
|
||||
};
|
||||
|
||||
inline TaskPriority incrementPriority(TaskPriority p) {
|
||||
return static_cast<TaskPriority>( static_cast<uint64_t>(p) + 1 );
|
||||
}
|
||||
|
||||
inline TaskPriority decrementPriority(TaskPriority p) {
|
||||
return static_cast<TaskPriority>( static_cast<uint64_t>(p) + 1 );
|
||||
}
|
||||
|
||||
class Void;
|
||||
|
||||
template<class T> class Optional;
|
||||
|
@ -270,7 +279,7 @@ struct NetworkMetrics {
|
|||
uint64_t countSlowEvents[SLOW_EVENT_BINS];
|
||||
|
||||
enum { PRIORITY_BINS = 9 };
|
||||
int priorityBins[ PRIORITY_BINS ];
|
||||
TaskPriority priorityBins[ PRIORITY_BINS ];
|
||||
double secSquaredPriorityBlocked[PRIORITY_BINS];
|
||||
|
||||
double oldestAlternativesFailure;
|
||||
|
@ -372,19 +381,19 @@ public:
|
|||
// Provides a clock that advances at a similar rate on all connected endpoints
|
||||
// FIXME: Return a fixed point Time class
|
||||
|
||||
virtual Future<class Void> delay( double seconds, int taskID ) = 0;
|
||||
virtual Future<class Void> delay( double seconds, TaskPriority taskID ) = 0;
|
||||
// The given future will be set after seconds have elapsed
|
||||
|
||||
virtual Future<class Void> yield( int taskID ) = 0;
|
||||
virtual Future<class Void> yield( TaskPriority taskID ) = 0;
|
||||
// The given future will be set immediately or after higher-priority tasks have executed
|
||||
|
||||
virtual bool check_yield( int taskID ) = 0;
|
||||
virtual bool check_yield( TaskPriority taskID ) = 0;
|
||||
// Returns true if a call to yield would result in a delay
|
||||
|
||||
virtual int getCurrentTask() = 0;
|
||||
virtual TaskPriority getCurrentTask() = 0;
|
||||
// Gets the taskID/priority of the current task
|
||||
|
||||
virtual void setCurrentTask(int taskID ) = 0;
|
||||
virtual void setCurrentTask(TaskPriority taskID ) = 0;
|
||||
// Sets the taskID/priority of the current task, without yielding
|
||||
|
||||
virtual flowGlobalType global(int id) = 0;
|
||||
|
@ -396,7 +405,7 @@ public:
|
|||
virtual bool isSimulated() const = 0;
|
||||
// Returns true if this network is a local simulation
|
||||
|
||||
virtual void onMainThread( Promise<Void>&& signal, int taskID ) = 0;
|
||||
virtual void onMainThread( Promise<Void>&& signal, TaskPriority taskID ) = 0;
|
||||
// Executes signal.send(Void()) on a/the thread belonging to this network
|
||||
|
||||
virtual THREAD_HANDLE startThread( THREAD_FUNC_RETURN (*func) (void *), void *arg) = 0;
|
||||
|
|
Loading…
Reference in New Issue