diff --git a/fdbclient/NativeAPI.actor.cpp b/fdbclient/NativeAPI.actor.cpp index c5e8ca1a69..56c94f56e3 100644 --- a/fdbclient/NativeAPI.actor.cpp +++ b/fdbclient/NativeAPI.actor.cpp @@ -2278,7 +2278,7 @@ ACTOR Future> getExactRange( Database cx, Version ver } if (!more || locations[shard].first.empty()) { - TEST(true); + TEST(true); // getExactrange (!more || locations[shard].first.empty()) if(shard == locations.size()-1) { const KeyRangeRef& range = locations[shard].first; KeyRef begin = reverse ? keys.begin : range.end; diff --git a/fdbclient/ReadYourWrites.actor.cpp b/fdbclient/ReadYourWrites.actor.cpp index 9c30932c71..8ad14bab23 100644 --- a/fdbclient/ReadYourWrites.actor.cpp +++ b/fdbclient/ReadYourWrites.actor.cpp @@ -1228,7 +1228,7 @@ ACTOR Future> getWorkerInterfaces (Reference > ReadYourWritesTransaction::get( const Key& key, bool snapshot ) { - TEST(true); + TEST(true); // ReadYourWritesTransaction::get if (getDatabase()->apiVersionAtLeast(630)) { if (specialKeys.contains(key)) { diff --git a/fdbclient/SpecialKeySpace.actor.cpp b/fdbclient/SpecialKeySpace.actor.cpp index 5abe848be3..ced365bc33 100644 --- a/fdbclient/SpecialKeySpace.actor.cpp +++ b/fdbclient/SpecialKeySpace.actor.cpp @@ -243,12 +243,12 @@ ACTOR Future> SpecialKeySpace::getRangeAggregationAct // Handle all corner cases like what RYW does // return if range inverted if (actualBeginOffset >= actualEndOffset && begin.getKey() >= end.getKey()) { - TEST(true); + TEST(true); // inverted range return RangeResultRef(false, false); } // If touches begin or end, return with readToBegin and readThroughEnd flags if (begin.getKey() == moduleBoundary.end || end.getKey() == moduleBoundary.begin) { - TEST(true); + TEST(true); // query touches begin or end return result; } state RangeMap::Ranges ranges = diff --git a/fdbclient/WriteMap.h b/fdbclient/WriteMap.h index 6c41c3f1fb..da7293154c 100644 --- a/fdbclient/WriteMap.h +++ b/fdbclient/WriteMap.h @@ -628,7 +628,7 @@ private: bool end_conflict = it.is_conflict_range(); bool end_unreadable = it.is_unreadable(); - TEST( it.is_conflict_range() != lastConflicted ); + TEST( it.is_conflict_range() != lastConflicted ); // not last conflicted it.tree.clear(); diff --git a/fdbrpc/genericactors.actor.h b/fdbrpc/genericactors.actor.h index 71062d7e80..d5c39df1a4 100644 --- a/fdbrpc/genericactors.actor.h +++ b/fdbrpc/genericactors.actor.h @@ -63,7 +63,7 @@ Future retryBrokenPromise( RequestStream to, Req request, throw; resetReply( request ); wait( delayJittered(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY, taskID) ); - TEST(true); // retryBrokenPromise + TEST(true); // retryBrokenPromise with taskID } } } diff --git a/fdbrpc/sim2.actor.cpp b/fdbrpc/sim2.actor.cpp index 4af0869121..453db6866e 100644 --- a/fdbrpc/sim2.actor.cpp +++ b/fdbrpc/sim2.actor.cpp @@ -556,8 +556,8 @@ private: debugFileCheck("SimpleFileRead", self->filename, data, offset, length); - INJECT_FAULT(io_timeout, "SimpleFile::read"); - INJECT_FAULT(io_error, "SimpleFile::read"); + INJECT_FAULT(io_timeout, "SimpleFile::read"); // SimpleFile::read io_timeout injected + INJECT_FAULT(io_error, "SimpleFile::read"); // SimpleFile::read io_error injected return read_bytes; } @@ -594,8 +594,8 @@ private: debugFileCheck("SimpleFileWrite", self->filename, (void*)data.begin(), offset, data.size()); - INJECT_FAULT(io_timeout, "SimpleFile::write"); - INJECT_FAULT(io_error, "SimpleFile::write"); + INJECT_FAULT(io_timeout, "SimpleFile::write"); // SimpleFile::write inject io_timeout + INJECT_FAULT(io_error, "SimpleFile::write"); // SimpleFile::write inject io_error return Void(); } @@ -621,8 +621,8 @@ private: if (randLog) fprintf( randLog, "SFT2 %s %s %s\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str()); - INJECT_FAULT( io_timeout, "SimpleFile::truncate" ); - INJECT_FAULT( io_error, "SimpleFile::truncate" ); + INJECT_FAULT( io_timeout, "SimpleFile::truncate" ); // SimpleFile::truncate inject io_timeout + INJECT_FAULT( io_error, "SimpleFile::truncate" ); // SimpleFile::truncate inject io_error return Void(); } @@ -654,8 +654,8 @@ private: if (randLog) fprintf( randLog, "SFC2 %s %s %s\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str()); - INJECT_FAULT( io_timeout, "SimpleFile::sync" ); - INJECT_FAULT( io_error, "SimpleFile::sync" ); + INJECT_FAULT( io_timeout, "SimpleFile::sync" ); // SimpleFile::sync inject io_timeout + INJECT_FAULT( io_error, "SimpleFile::sync" ); // SimpleFile::sync inject io_errot return Void(); } @@ -675,7 +675,7 @@ private: if (randLog) fprintf(randLog, "SFS2 %s %s %s %" PRId64 "\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str(), pos); - INJECT_FAULT( io_error, "SimpleFile::size" ); + INJECT_FAULT( io_error, "SimpleFile::size" ); // SimpleFile::size inject io_error return pos; } @@ -1436,7 +1436,7 @@ public: // Check if any processes on machine are rebooting if ( processesOnMachine != processesPerMachine ) { - TEST(true); //Attempted reboot, but the target did not have all of its processes running + TEST(true); //Attempted reboot and kill, but the target did not have all of its processes running TraceEvent(SevWarn, "AbortedKill").detail("KillType", kt).detail("MachineId", machineId).detail("Reason", "Machine processes does not match number of processes per machine").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace(); if (ktFinal) *ktFinal = None; return false; @@ -1547,12 +1547,12 @@ public: .detail("KilledDC", kt==ktMin); TEST(kt != ktMin); // DataCenter kill was rejected by killMachine - TEST((kt==ktMin) && (kt == RebootAndDelete)); // Resulted in a reboot and delete - TEST((kt==ktMin) && (kt == Reboot)); // Resulted in a reboot - TEST((kt==ktMin) && (kt == KillInstantly)); // Resulted in an instant kill - TEST((kt==ktMin) && (kt == InjectFaults)); // Resulted in a kill by injecting faults - TEST((kt==ktMin) && (kt != ktOrig)); // Kill request was downgraded - TEST((kt==ktMin) && (kt == ktOrig)); // Requested kill was done + TEST((kt==ktMin) && (kt == RebootAndDelete)); // Datacenter kill Resulted in a reboot and delete + TEST((kt==ktMin) && (kt == Reboot)); // Datacenter kill Resulted in a reboot + TEST((kt==ktMin) && (kt == KillInstantly)); // Datacenter kill Resulted in an instant kill + TEST((kt==ktMin) && (kt == InjectFaults)); // Datacenter kill Resulted in a kill by injecting faults + TEST((kt==ktMin) && (kt != ktOrig)); // Datacenter Kill request was downgraded + TEST((kt==ktMin) && (kt == ktOrig)); // Datacenter kill - Requested kill was done if (ktFinal) *ktFinal = ktMin; diff --git a/fdbserver/ClusterController.actor.cpp b/fdbserver/ClusterController.actor.cpp index 5e0f85624b..67f61ce79c 100644 --- a/fdbserver/ClusterController.actor.cpp +++ b/fdbserver/ClusterController.actor.cpp @@ -1937,7 +1937,7 @@ ACTOR Future clusterRecruitFromConfiguration( ClusterControllerData* self, ACTOR Future clusterRecruitRemoteFromConfiguration( ClusterControllerData* self, RecruitRemoteFromConfigurationRequest req ) { // At the moment this doesn't really need to be an actor (it always completes immediately) - TEST(true); //ClusterController RecruitTLogsRequest + TEST(true); //ClusterController RecruitTLogsRequest Remote loop { try { RecruitRemoteFromConfigurationReply rep = self->findRemoteWorkersForConfiguration( req ); diff --git a/fdbserver/CommitProxyServer.actor.cpp b/fdbserver/CommitProxyServer.actor.cpp index b91ea7302c..eac0f0d4c2 100644 --- a/fdbserver/CommitProxyServer.actor.cpp +++ b/fdbserver/CommitProxyServer.actor.cpp @@ -547,7 +547,7 @@ ACTOR Future preresolutionProcessing(CommitBatchContext* self) { } // Pre-resolution the commits - TEST(pProxyCommitData->latestLocalCommitBatchResolving.get() < localBatchNumber - 1); + TEST(pProxyCommitData->latestLocalCommitBatchResolving.get() < localBatchNumber - 1); // Wait for local batch wait(pProxyCommitData->latestLocalCommitBatchResolving.whenAtLeast(localBatchNumber - 1)); self->releaseDelay = delay( std::min(SERVER_KNOBS->MAX_PROXY_COMPUTE, diff --git a/fdbserver/CoordinatedState.actor.cpp b/fdbserver/CoordinatedState.actor.cpp index c08686a1b7..f7bdff2cf5 100644 --- a/fdbserver/CoordinatedState.actor.cpp +++ b/fdbserver/CoordinatedState.actor.cpp @@ -239,7 +239,7 @@ struct MovableCoordinatedStateImpl { } // SOMEDAY: If moveState.mode == MovingFrom, read (without locking) old state and assert that it corresponds with our state and is ReallyTo(coordinators) if (moveState.mode == MovableValue::MaybeTo) { - TEST(true); + TEST(true); // Maybe moveto state ASSERT( moveState.other.present() ); wait( self->moveTo( self, &self->cs, ClusterConnectionString( moveState.other.get().toString() ), moveState.value ) ); } diff --git a/fdbserver/DataDistribution.actor.cpp b/fdbserver/DataDistribution.actor.cpp index dd13e434d4..a7c33fb198 100644 --- a/fdbserver/DataDistribution.actor.cpp +++ b/fdbserver/DataDistribution.actor.cpp @@ -2410,7 +2410,7 @@ struct DDTeamCollection : ReferenceCounted { Reference machineInfo; if (machine_info.find(machine_id) == machine_info.end()) { // uid is the first storage server process on the machine - TEST(true); + TEST(true); // First storage server in process on the machine // For each machine, store the first server's localityEntry into machineInfo for later use. LocalityEntry localityEntry = machineLocalityMap.add(locality, &server->id); machineInfo = makeReference(server, localityEntry); @@ -3054,7 +3054,7 @@ ACTOR Future machineTeamRemover(DDTeamCollection* self) { // in the serverTeams vector in the machine team. --teamIndex; self->addTeam(team->getServers(), true, true); - TEST(true); + TEST(true); // Removed machine team } self->doBuildTeams = true; @@ -3133,7 +3133,7 @@ ACTOR Future serverTeamRemover(DDTeamCollection* self) { bool foundTeam = self->removeTeam(st); ASSERT(foundTeam == true); self->addTeam(st->getServers(), true, true); - TEST(true); + TEST(true); // Marked team as a bad team self->doBuildTeams = true; diff --git a/fdbserver/LogSystemPeekCursor.actor.cpp b/fdbserver/LogSystemPeekCursor.actor.cpp index 08250cfd5a..0e71bb624c 100644 --- a/fdbserver/LogSystemPeekCursor.actor.cpp +++ b/fdbserver/LogSystemPeekCursor.actor.cpp @@ -728,7 +728,7 @@ void ILogSystem::SetPeekCursor::updateMessage(int logIdx, bool usePolicy) { c->advanceTo(messageVersion); if( start <= messageVersion && messageVersion < c->version() ) { advancedPast = true; - TEST(true); //Merge peek cursor advanced past desired sequence + TEST(true); //Merge peek cursor with logIdx advanced past desired sequence } } } diff --git a/fdbserver/OldTLogServer_4_6.actor.cpp b/fdbserver/OldTLogServer_4_6.actor.cpp index 237835443a..9805c192de 100644 --- a/fdbserver/OldTLogServer_4_6.actor.cpp +++ b/fdbserver/OldTLogServer_4_6.actor.cpp @@ -461,8 +461,8 @@ namespace oldTLog_4_6 { state Version stopVersion = logData->version.get(); TEST(true); // TLog stopped by recovering master - TEST( logData->stopped ); - TEST( !logData->stopped ); + TEST( logData->stopped ); // LogData already stopped + TEST( !logData->stopped ); // LogData not yet stopped TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get()); @@ -1005,7 +1005,7 @@ namespace oldTLog_4_6 { auto& sequenceData = trackerData.sequence_version[sequence+1]; if(sequenceData.isSet()) { if(sequenceData.getFuture().get() != reply.end) { - TEST(true); //tlog peek second attempt ended at a different version + TEST(true); //tlog peek second attempt ended at a different version (2) req.reply.sendError(operation_obsolete()); return Void(); } diff --git a/fdbserver/OldTLogServer_6_0.actor.cpp b/fdbserver/OldTLogServer_6_0.actor.cpp index a3e105e954..eb8d6aa548 100644 --- a/fdbserver/OldTLogServer_6_0.actor.cpp +++ b/fdbserver/OldTLogServer_6_0.actor.cpp @@ -589,8 +589,8 @@ ACTOR Future tLogLock( TLogData* self, ReplyPromise< TLogLockResult > repl state Version stopVersion = logData->version.get(); TEST(true); // TLog stopped by recovering master - TEST( logData->stopped ); - TEST( !logData->stopped ); + TEST( logData->stopped ); // logData already stopped + TEST( !logData->stopped ); // logData not yet stopped TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get()); @@ -1295,7 +1295,7 @@ ACTOR Future tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere if(sequenceData.isSet()) { trackerData.duplicatePeeks++; if(sequenceData.getFuture().get().first != reply.end) { - TEST(true); //tlog peek second attempt ended at a different version + TEST(true); //tlog peek second attempt ended at a different version (2) req.reply.sendError(operation_obsolete()); return Void(); } diff --git a/fdbserver/OldTLogServer_6_2.actor.cpp b/fdbserver/OldTLogServer_6_2.actor.cpp index cf96f9bc96..a6c84cfd1b 100644 --- a/fdbserver/OldTLogServer_6_2.actor.cpp +++ b/fdbserver/OldTLogServer_6_2.actor.cpp @@ -680,8 +680,8 @@ ACTOR Future tLogLock( TLogData* self, ReplyPromise< TLogLockResult > repl state Version stopVersion = logData->version.get(); TEST(true); // TLog stopped by recovering master - TEST( logData->stopped ); - TEST( !logData->stopped ); + TEST( logData->stopped ); // logData already stopped + TEST( !logData->stopped ); // logData not yet stopped TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get()); @@ -1689,7 +1689,7 @@ ACTOR Future tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere if(sequenceData.isSet()) { trackerData.duplicatePeeks++; if(sequenceData.getFuture().get().first != reply.end) { - TEST(true); //tlog peek second attempt ended at a different version + TEST(true); //tlog peek second attempt ended at a different version (2) req.reply.sendError(operation_obsolete()); return Void(); } diff --git a/fdbserver/SimulatedCluster.actor.cpp b/fdbserver/SimulatedCluster.actor.cpp index c574398eee..14f5f65438 100644 --- a/fdbserver/SimulatedCluster.actor.cpp +++ b/fdbserver/SimulatedCluster.actor.cpp @@ -756,7 +756,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR break; } case 3: { - TEST(true); // Simulated cluster using radix-tree storage engine + TEST(true); // Simulated cluster using redwood storage engine set_config("ssd-redwood-experimental"); break; } @@ -857,7 +857,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR int satellite_replication_type = deterministicRandom()->randomInt(0,3); switch (satellite_replication_type) { case 0: { - TEST( true ); // Simulated cluster using no satellite redundancy mode + TEST( true ); // Simulated cluster using no satellite redundancy mode (>4 datacenters) break; } case 1: { @@ -884,7 +884,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR break; } case 1: { - TEST( true ); // Simulated cluster using no satellite redundancy mode + TEST( true ); // Simulated cluster using no satellite redundancy mode (<4 datacenters) break; } case 2: { @@ -1138,8 +1138,8 @@ void setupSimulatedSystem(vector>* systemActors, std::string baseFo // Use IPv6 25% of the time bool useIPv6 = deterministicRandom()->random01() < 0.25; - TEST( useIPv6 ); - TEST( !useIPv6 ); + TEST( useIPv6 ); // Use IPv6 + TEST( !useIPv6 ); // Use IPv4 vector coordinatorAddresses; if(minimumRegions > 1) { diff --git a/fdbserver/StorageCache.actor.cpp b/fdbserver/StorageCache.actor.cpp index cc7bb034bd..b33f1c44f8 100644 --- a/fdbserver/StorageCache.actor.cpp +++ b/fdbserver/StorageCache.actor.cpp @@ -1196,7 +1196,7 @@ ACTOR Future fetchKeys( StorageCacheData *data, AddingCacheRange* cacheRan lastAvailable = std::max(lastAvailable, r->value()); if (lastAvailable != invalidVersion && lastAvailable >= data->oldestVersion.get()) { - TEST(true); + TEST(true); // wait for oldest version wait( data->oldestVersion.whenAtLeast(lastAvailable+1) ); } diff --git a/fdbserver/StorageMetrics.actor.h b/fdbserver/StorageMetrics.actor.h index 820c2e0ea6..1235b40aba 100644 --- a/fdbserver/StorageMetrics.actor.h +++ b/fdbserver/StorageMetrics.actor.h @@ -212,9 +212,9 @@ struct StorageServerMetrics { void notify( KeyRef key, StorageMetrics& metrics ) { ASSERT (metrics.bytes == 0); // ShardNotifyMetrics if (g_network->isSimulated()) { - TEST(metrics.bytesPerKSecond != 0); // ShardNotifyMetrics - TEST(metrics.iosPerKSecond != 0); // ShardNotifyMetrics - TEST(metrics.bytesReadPerKSecond != 0); // ShardNotifyMetrics + TEST(metrics.bytesPerKSecond != 0); // ShardNotifyMetrics bytes + TEST(metrics.iosPerKSecond != 0); // ShardNotifyMetrics ios + TEST(metrics.bytesReadPerKSecond != 0); // ShardNotifyMetrics bytesRead } double expire = now() + SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL; @@ -232,7 +232,7 @@ struct StorageServerMetrics { auto& v = waitMetricsMap[key]; for(int i=0; iisSimulated()) { - TEST(true); + TEST(true); // shard notify metrics } // ShardNotifyMetrics v[i].send( notifyMetrics ); diff --git a/fdbserver/TLogServer.actor.cpp b/fdbserver/TLogServer.actor.cpp index 2f2936d678..13ca4608e9 100644 --- a/fdbserver/TLogServer.actor.cpp +++ b/fdbserver/TLogServer.actor.cpp @@ -704,8 +704,8 @@ ACTOR Future tLogLock( TLogData* self, ReplyPromise< TLogLockResult > repl state Version stopVersion = logData->version.get(); TEST(true); // TLog stopped by recovering master - TEST( logData->stopped ); - TEST( !logData->stopped ); + TEST( logData->stopped ); // logData already stopped + TEST( !logData->stopped ); // logData not yet stopped TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get()); @@ -1728,7 +1728,7 @@ ACTOR Future tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere if(sequenceData.isSet()) { trackerData.duplicatePeeks++; if(sequenceData.getFuture().get().first != reply.end) { - TEST(true); //tlog peek second attempt ended at a different version + TEST(true); //tlog peek second attempt ended at a different version (2) req.reply.sendError(operation_obsolete()); return Void(); } diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index 6811c38a9f..86f7ef0650 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -2275,8 +2275,8 @@ ACTOR Future fetchKeys( StorageServer *data, AddingShard* shard ) { splitMutations(data, data->shards, *u); } - TEST( true ); - TEST( shard->updates.size() ); + TEST( true ); // fetchkeys has more + TEST( shard->updates.size() ); // Shard has updates ASSERT( otherShard->updates.empty() ); } } diff --git a/fdbserver/workloads/BackupAndParallelRestoreCorrectness.actor.cpp b/fdbserver/workloads/BackupAndParallelRestoreCorrectness.actor.cpp index f3f4bac6f5..b340f5efea 100644 --- a/fdbserver/workloads/BackupAndParallelRestoreCorrectness.actor.cpp +++ b/fdbserver/workloads/BackupAndParallelRestoreCorrectness.actor.cpp @@ -224,7 +224,7 @@ struct BackupAndParallelRestoreCorrectnessWorkload : TestWorkload { // Stop the differential backup, if enabled if (stopDifferentialDelay) { - TEST(!stopDifferentialFuture.isReady()); // Restore starts at specified time + TEST(!stopDifferentialFuture.isReady()); // Restore starts at specified time - stopDifferential not ready wait(stopDifferentialFuture); TraceEvent("BARW_DoBackupWaitToDiscontinue", randomID) .detail("Tag", printable(tag)) diff --git a/fdbserver/workloads/BackupCorrectness.actor.cpp b/fdbserver/workloads/BackupCorrectness.actor.cpp index aab37b77ca..3b340be599 100644 --- a/fdbserver/workloads/BackupCorrectness.actor.cpp +++ b/fdbserver/workloads/BackupCorrectness.actor.cpp @@ -233,7 +233,7 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload { // Stop the differential backup, if enabled if (stopDifferentialDelay) { - TEST(!stopDifferentialFuture.isReady()); //Restore starts at specified time + TEST(!stopDifferentialFuture.isReady()); //Restore starts at specified time - stopDifferential not ready wait(stopDifferentialFuture); TraceEvent("BARW_DoBackupWaitToDiscontinue", randomID).detail("Tag", printable(tag)).detail("DifferentialAfter", stopDifferentialDelay); diff --git a/fdbserver/workloads/BackupToDBCorrectness.actor.cpp b/fdbserver/workloads/BackupToDBCorrectness.actor.cpp index f2747c2e25..563c317cf1 100644 --- a/fdbserver/workloads/BackupToDBCorrectness.actor.cpp +++ b/fdbserver/workloads/BackupToDBCorrectness.actor.cpp @@ -254,7 +254,7 @@ struct BackupToDBCorrectnessWorkload : TestWorkload { // Stop the differential backup, if enabled if (stopDifferentialDelay) { - TEST(!stopDifferentialFuture.isReady()); //Restore starts at specified time + TEST(!stopDifferentialFuture.isReady()); //Restore starts at specified time - stopDifferential not ready wait(stopDifferentialFuture); TraceEvent("BARW_DoBackupWaitToDiscontinue", randomID).detail("Tag", printable(tag)).detail("DifferentialAfter", stopDifferentialDelay); diff --git a/fdbserver/workloads/ConfigureDatabase.actor.cpp b/fdbserver/workloads/ConfigureDatabase.actor.cpp index 30c3a398db..a63046bf07 100644 --- a/fdbserver/workloads/ConfigureDatabase.actor.cpp +++ b/fdbserver/workloads/ConfigureDatabase.actor.cpp @@ -131,7 +131,7 @@ std::string generateRegions() { break; } case 1: { - TEST( true ); // Simulated cluster using no satellite redundancy mode + TEST( true ); // Simulated cluster using no satellite redundancy mode (<5 datacenters) break; } case 2: { diff --git a/fdbserver/workloads/DifferentClustersSameRV.actor.cpp b/fdbserver/workloads/DifferentClustersSameRV.actor.cpp index 63cff7dbd5..626a5dda57 100644 --- a/fdbserver/workloads/DifferentClustersSameRV.actor.cpp +++ b/fdbserver/workloads/DifferentClustersSameRV.actor.cpp @@ -212,7 +212,7 @@ struct DifferentClustersSameRVWorkload : TestWorkload { state Future> val2 = tr2.get(self->keyToRead); wait(success(val1) && success(val2)); // We're reading from different db's with the same read version. We can get a different value. - TEST(val1.get() != val2.get()); + TEST(val1.get() != val2.get()); // reading from different dbs with the same version } catch (Error& e) { wait(tr1.onError(e) && tr2.onError(e)); } diff --git a/fdbserver/workloads/Increment.actor.cpp b/fdbserver/workloads/Increment.actor.cpp index 47ba37e2b7..14f781d5c2 100644 --- a/fdbserver/workloads/Increment.actor.cpp +++ b/fdbserver/workloads/Increment.actor.cpp @@ -107,7 +107,7 @@ struct Increment : TestWorkload { } } bool incrementCheckData( const VectorRef& data, Version v, Increment* self ) { - TEST( self->transactions.getValue() ); + TEST( self->transactions.getValue() ); // incrementCheckData transaction has value if (self->transactions.getValue() && data.size() == 0) { TraceEvent(SevError, "TestFailure").detail("Reason", "No successful increments").detail("Before", nodeCount).detail("After", data.size()).detail("Version", v); return false; diff --git a/fdbserver/workloads/SpecialKeySpaceCorrectness.actor.cpp b/fdbserver/workloads/SpecialKeySpaceCorrectness.actor.cpp index 93c31b0ea5..78c3070011 100644 --- a/fdbserver/workloads/SpecialKeySpaceCorrectness.actor.cpp +++ b/fdbserver/workloads/SpecialKeySpaceCorrectness.actor.cpp @@ -109,7 +109,7 @@ struct SpecialKeySpaceCorrectnessWorkload : TestWorkload { return; } f = success(ryw.get(LiteralStringRef("\xff\xff/status/json"))); - TEST(!f.isReady()); + TEST(!f.isReady()); // status json not ready } ASSERT(f.isError()); ASSERT(f.getError().code() == error_code_transaction_cancelled); @@ -317,7 +317,7 @@ struct SpecialKeySpaceCorrectnessWorkload : TestWorkload { wait(success(tx->getRange( KeyRangeRef(LiteralStringRef("\xff\xff/transaction/"), LiteralStringRef("\xff\xff/transaction0")), CLIENT_KNOBS->TOO_MANY))); - TEST(true); + TEST(true); // read transaction special keyrange tx->reset(); } catch (Error& e) { throw; @@ -341,7 +341,7 @@ struct SpecialKeySpaceCorrectnessWorkload : TestWorkload { KeySelector begin = KeySelectorRef(readConflictRangeKeysRange.begin, false, 1); KeySelector end = KeySelectorRef(LiteralStringRef("\xff\xff/transaction0"), false, 0); wait(success(tx->getRange(begin, end, GetRangeLimits(CLIENT_KNOBS->TOO_MANY)))); - TEST(true); + TEST(true); // end key selector inside module range tx->reset(); } catch (Error& e) { throw; diff --git a/fdbserver/workloads/VersionStamp.actor.cpp b/fdbserver/workloads/VersionStamp.actor.cpp index cb0368c87b..1d9a2cb93b 100644 --- a/fdbserver/workloads/VersionStamp.actor.cpp +++ b/fdbserver/workloads/VersionStamp.actor.cpp @@ -253,7 +253,7 @@ struct VersionStampWorkload : TestWorkload { if (self->failIfDataLost) { ASSERT(result.size() == self->versionStampKey_commit.size()); } else { - TEST(result.size() > 0); // Not all data should always be lost. + TEST(result.size() > 0); // Not all data should always be lost (2) } //TraceEvent("VST_Check1").detail("Size", result.size()).detail("VsKeyCommitSize", self->versionStampKey_commit.size()); diff --git a/flow/Platform.actor.cpp b/flow/Platform.actor.cpp index 16990225ac..265c96743c 100644 --- a/flow/Platform.actor.cpp +++ b/flow/Platform.actor.cpp @@ -233,7 +233,7 @@ static double getProcessorTimeGeneric(int who) { #endif double getProcessorTimeThread() { - INJECT_FAULT( platform_error, "getProcessorTimeThread" ); + INJECT_FAULT( platform_error, "getProcessorTimeThread" ); // Get Thread CPU Time failed #if defined(_WIN32) FILETIME ftCreate, ftExit, ftKernel, ftUser; if (!GetThreadTimes(GetCurrentThread(), &ftCreate, &ftExit, &ftKernel, &ftUser)) { @@ -260,7 +260,7 @@ double getProcessorTimeThread() { } double getProcessorTimeProcess() { - INJECT_FAULT( platform_error, "getProcessorTimeProcess" ); + INJECT_FAULT( platform_error, "getProcessorTimeProcess" ); // Get CPU Process Time failed #if defined(_WIN32) FILETIME ftCreate, ftExit, ftKernel, ftUser; if (!GetProcessTimes(GetCurrentProcess(), &ftCreate, &ftExit, &ftKernel, &ftUser)) { @@ -584,7 +584,7 @@ Error systemErrorCodeToError() { } void getDiskBytes(std::string const& directory, int64_t& free, int64_t& total) { - INJECT_FAULT( platform_error, "getDiskBytes" ); + INJECT_FAULT( platform_error, "getDiskBytes" ); // Get disk bytes failed #if defined(__unixish__) #if defined (__linux__) || defined (__FreeBSD__) struct statvfs buf; @@ -634,7 +634,7 @@ void getDiskBytes(std::string const& directory, int64_t& free, int64_t& total) { #ifdef __unixish__ const char* getInterfaceName(const IPAddress& _ip) { - INJECT_FAULT( platform_error, "getInterfaceName" ); + INJECT_FAULT( platform_error, "getInterfaceName" ); // Get interface name failed static char iname[20]; struct ifaddrs* interfaces = nullptr; @@ -680,7 +680,7 @@ const char* getInterfaceName(const IPAddress& _ip) { #if defined(__linux__) void getNetworkTraffic(const IPAddress& ip, uint64_t& bytesSent, uint64_t& bytesReceived, uint64_t& outSegs, uint64_t& retransSegs) { - INJECT_FAULT( platform_error, "getNetworkTraffic" ); // Even though this function doesn't throw errors, the equivalents for other platforms do, and since all of our simulation testing is on Linux... + INJECT_FAULT( platform_error, "getNetworkTraffic" ); // getNetworkTraffic: Even though this function doesn't throw errors, the equivalents for other platforms do, and since all of our simulation testing is on Linux... const char* ifa_name = nullptr; try { ifa_name = getInterfaceName(ip); @@ -748,7 +748,7 @@ void getNetworkTraffic(const IPAddress& ip, uint64_t& bytesSent, uint64_t& bytes } void getMachineLoad(uint64_t& idleTime, uint64_t& totalTime, bool logDetails) { - INJECT_FAULT( platform_error, "getMachineLoad" ); // Even though this function doesn't throw errors, the equivalents for other platforms do, and since all of our simulation testing is on Linux... + INJECT_FAULT( platform_error, "getMachineLoad" ); // getMachineLoad: Even though this function doesn't throw errors, the equivalents for other platforms do, and since all of our simulation testing is on Linux... std::ifstream stat_stream("/proc/stat", std::ifstream::in); std::string ignore; @@ -765,7 +765,7 @@ void getMachineLoad(uint64_t& idleTime, uint64_t& totalTime, bool logDetails) { } void getDiskStatistics(std::string const& directory, uint64_t& currentIOs, uint64_t& busyTicks, uint64_t& reads, uint64_t& writes, uint64_t& writeSectors, uint64_t& readSectors) { - INJECT_FAULT( platform_error, "getDiskStatistics" ); + INJECT_FAULT( platform_error, "getDiskStatistics" ); // Getting disks statistics failed currentIOs = 0; struct stat buf; @@ -888,7 +888,7 @@ dev_t getDeviceId(std::string path) { #if defined(__FreeBSD__) void getNetworkTraffic(const IPAddress ip, uint64_t& bytesSent, uint64_t& bytesReceived, uint64_t& outSegs, uint64_t& retransSegs) { - INJECT_FAULT( platform_error, "getNetworkTraffic" ); + INJECT_FAULT( platform_error, "getNetworkTraffic" ); // Get Network traffic failed const char* ifa_name = nullptr; try { @@ -955,7 +955,7 @@ void getNetworkTraffic(const IPAddress ip, uint64_t& bytesSent, uint64_t& bytesR } void getMachineLoad(uint64_t& idleTime, uint64_t& totalTime, bool logDetails) { - INJECT_FAULT( platform_error, "getMachineLoad" ); + INJECT_FAULT( platform_error, "getMachineLoad" ); // Getting machine load failed long cur[CPUSTATES], last[CPUSTATES]; size_t cur_sz = sizeof cur; @@ -988,7 +988,7 @@ void getMachineLoad(uint64_t& idleTime, uint64_t& totalTime, bool logDetails) { } void getDiskStatistics(std::string const& directory, uint64_t& currentIOs, uint64_t& busyTicks, uint64_t& reads, uint64_t& writes, uint64_t& writeSectors, uint64_t& readSectors) { - INJECT_FAULT( platform_error, "getDiskStatistics" ); + INJECT_FAULT( platform_error, "getDiskStatistics" ); // getting disk stats failed currentIOs = 0; busyTicks = 0; reads = 0; @@ -1078,7 +1078,7 @@ dev_t getDeviceId(std::string path) { #ifdef __APPLE__ void getNetworkTraffic(const IPAddress& ip, uint64_t& bytesSent, uint64_t& bytesReceived, uint64_t& outSegs, uint64_t& retransSegs) { - INJECT_FAULT( platform_error, "getNetworkTraffic" ); + INJECT_FAULT( platform_error, "getNetworkTraffic" ); // Get network traffic failed (macOS) const char* ifa_name = nullptr; try { @@ -1141,7 +1141,7 @@ void getNetworkTraffic(const IPAddress& ip, uint64_t& bytesSent, uint64_t& bytes } void getMachineLoad(uint64_t& idleTime, uint64_t& totalTime, bool logDetails) { - INJECT_FAULT( platform_error, "getMachineLoad" ); + INJECT_FAULT( platform_error, "getMachineLoad" ); // Getting machine load filed (macOS) mach_msg_type_number_t count = HOST_CPU_LOAD_INFO_COUNT; host_cpu_load_info_data_t r_load; @@ -1155,7 +1155,7 @@ void getMachineLoad(uint64_t& idleTime, uint64_t& totalTime, bool logDetails) { } void getDiskStatistics(std::string const& directory, uint64_t& currentIOs, uint64_t& busyTicks, uint64_t& reads, uint64_t& writes, uint64_t& writeSectors, uint64_t& readSectors) { - INJECT_FAULT( platform_error, "getDiskStatistics" ); + INJECT_FAULT( platform_error, "getDiskStatistics" ); // Getting disk stats failed (macOS) currentIOs = 0; busyTicks = 0; writeSectors = 0; @@ -1716,7 +1716,7 @@ void setMemoryQuota( size_t limit ) { // ASAN doesn't work with memory quotas: https://github.com/google/sanitizers/wiki/AddressSanitizer#ulimit--v return; #endif - INJECT_FAULT( platform_error, "setMemoryQuota" ); + INJECT_FAULT( platform_error, "setMemoryQuota" ); // setting memory quota failed #if defined(_WIN32) HANDLE job = CreateJobObject( nullptr, nullptr ); if (!job) { @@ -1920,7 +1920,7 @@ void setAffinity(int proc) { namespace platform { int getRandomSeed() { - INJECT_FAULT( platform_error, "getRandomSeed" ); + INJECT_FAULT( platform_error, "getRandomSeed" ); // getting a random seed failed int randomSeed; int retryCount = 0; @@ -1963,11 +1963,11 @@ std::string joinPath( std::string const& directory, std::string const& filename } void renamedFile() { - INJECT_FAULT( io_error, "renameFile" ); + INJECT_FAULT( io_error, "renameFile" ); // renaming file failed } void renameFile( std::string const& fromPath, std::string const& toPath ) { - INJECT_FAULT( io_error, "renameFile" ); + INJECT_FAULT( io_error, "renameFile" ); // rename file failed #ifdef _WIN32 if (MoveFile( fromPath.c_str(), toPath.c_str() )) { //renamedFile(); @@ -1997,7 +1997,7 @@ void renameFile( std::string const& fromPath, std::string const& toPath ) { void atomicReplace( std::string const& path, std::string const& content, bool textmode ) { FILE* f = 0; try { - INJECT_FAULT( io_error, "atomicReplace" ); + INJECT_FAULT( io_error, "atomicReplace" ); // atomic rename failed std::string tempfilename = joinPath(parentDirectory(path), deterministicRandom()->randomUniqueID().toString() + ".tmp"); f = textmode ? fopen( tempfilename.c_str(), "wt" FOPEN_CLOEXEC_MODE ) : fopen(tempfilename.c_str(), "wb"); @@ -2081,7 +2081,7 @@ void atomicReplace( std::string const& path, std::string const& content, bool te #error Port me! #endif - INJECT_FAULT( io_error, "atomicReplace" ); + INJECT_FAULT( io_error, "atomicReplace" ); // io_error after atomic rename } catch(Error &e) { TraceEvent(SevWarn, "AtomicReplace").error(e).detail("Path", path).GetLastError(); @@ -2091,12 +2091,12 @@ void atomicReplace( std::string const& path, std::string const& content, bool te } static bool deletedFile() { - INJECT_FAULT( platform_error, "deleteFile" ); + INJECT_FAULT( platform_error, "deleteFile" ); // delete file failed return true; } bool deleteFile( std::string const& filename ) { - INJECT_FAULT( platform_error, "deleteFile" ); + INJECT_FAULT( platform_error, "deleteFile" ); // file deletion failed #ifdef _WIN32 if (DeleteFile(filename.c_str())) return deletedFile(); @@ -2115,12 +2115,14 @@ bool deleteFile( std::string const& filename ) { throw e; } -static void createdDirectory() { INJECT_FAULT( platform_error, "createDirectory" ); } +static void createdDirectory() { + INJECT_FAULT( platform_error, "createDirectory" ); // create dir (noargs) failed +} namespace platform { bool createDirectory( std::string const& directory ) { - INJECT_FAULT( platform_error, "createDirectory" ); + INJECT_FAULT( platform_error, "createDirectory" ); // create dir failed #ifdef _WIN32 if (CreateDirectory( directory.c_str(), nullptr )) { @@ -2261,7 +2263,7 @@ std::string abspath( std::string const& path, bool resolveLinks, bool mustExist } // Returns an absolute path canonicalized to use only CANONICAL_PATH_SEPARATOR - INJECT_FAULT( platform_error, "abspath" ); + INJECT_FAULT( platform_error, "abspath" ); // abspath failed if(!resolveLinks) { // TODO: Not resolving symbolic links does not yet behave well on Windows because of drive letters @@ -2367,7 +2369,7 @@ bool acceptDirectory( FILE_ATTRIBUTE_DATA fileAttributes, std::string const& nam ACTOR Future> findFiles( std::string directory, std::string extension, bool directoryOnly, bool async) { - INJECT_FAULT( platform_error, "findFiles" ); + INJECT_FAULT( platform_error, "findFiles" ); // findFiles failed (Win32) state vector result; state int64_t tsc_begin = __rdtsc(); @@ -2417,7 +2419,7 @@ bool acceptDirectory( FILE_ATTRIBUTE_DATA fileAttributes, std::string const& nam ACTOR Future> findFiles( std::string directory, std::string extension, bool directoryOnly, bool async) { - INJECT_FAULT( platform_error, "findFiles" ); + INJECT_FAULT( platform_error, "findFiles" ); // findFiles failed state vector result; state int64_t tsc_begin = __rdtsc(); diff --git a/flow/coveragetool/Program.cs b/flow/coveragetool/Program.cs index d9e17e4d0a..c4f3fe8cbd 100644 --- a/flow/coveragetool/Program.cs +++ b/flow/coveragetool/Program.cs @@ -36,6 +36,9 @@ namespace coveragetool public string Condition; }; + class ParseException : Exception { + } + class Program { public static int Main(string[] args) @@ -82,10 +85,14 @@ namespace coveragetool .Where( fi=>new FileInfo(fi).LastWriteTimeUtc > outputTime ) .ToLookup(n=>n); - cases = cases + try { + cases = cases .Where(c => exists.Contains(c.File) && !changedFiles.Contains(c.File)) .Concat( changedFiles.SelectMany( f => ParseSource( f.Key ) ) ) .ToArray(); + } catch (ParseException) { + return 1; + } if (!quiet) { Console.WriteLine(" {0}/{1} files scanned", changedFiles.Count, inputPaths.Length); @@ -140,18 +147,38 @@ namespace coveragetool } public static CoverageCase[] ParseSource(string filename) { - var regex = new Regex( @"^([^/]|/[^/])*(TEST|INJECT_FAULT|SHOULD_INJECT_FAULT)[ \t]*\(([^)]*)\)" ); + var regex = new Regex( @"^([^/]|/[^/])*\s+(TEST|INJECT_FAULT|SHOULD_INJECT_FAULT)[ \t]*\(([^)]*)\)" ); var lines = File.ReadAllLines(filename); - return Enumerable.Range(0, lines.Length) + var res = Enumerable.Range(0, lines.Length) .Where( i=>regex.IsMatch(lines[i]) && !lines[i].StartsWith("#define") ) - .Select( i=>new CoverageCase { - File = filename, + .Select( i=>new CoverageCase { + File = filename, Line = i+1, Comment = FindComment(lines[i]), Condition = regex.Match(lines[i]).Groups[3].Value } ) .ToArray(); + var comments = new Dictionary(); + bool failed = false; + foreach(var coverageCase in res) { + if (String.IsNullOrEmpty(coverageCase.Comment) || coverageCase.Comment.Trim() == "") { + failed = true; + Console.Error.WriteLine(String.Format("Error at {0}:{1}: Empty or missing comment", coverageCase.File, coverageCase.Line)); + } + else if (comments.ContainsKey(coverageCase.Comment)) { + failed = true; + var prev = comments[coverageCase.Comment]; + Console.Error.WriteLine(String.Format("Error at {0}:{1}: {2} is not a unique comment", coverageCase.File, coverageCase.Line, coverageCase.Comment)); + Console.Error.WriteLine(String.Format("\tPreviously seen in {0} at {1}", prev.File, prev.Line)); + } else { + comments.Add(coverageCase.Comment, coverageCase); + } + } + if (failed) { + throw new ParseException(); + } + return res; } public static string FindComment(string line) {