Merge pull request #4046 from sfc-gh-mpilman/features/unique-test-macros
Make test macros enforce unique comments
This commit is contained in:
commit
3875ac78c8
|
@ -2278,7 +2278,7 @@ ACTOR Future<Standalone<RangeResultRef>> getExactRange( Database cx, Version ver
|
|||
}
|
||||
|
||||
if (!more || locations[shard].first.empty()) {
|
||||
TEST(true);
|
||||
TEST(true); // getExactrange (!more || locations[shard].first.empty())
|
||||
if(shard == locations.size()-1) {
|
||||
const KeyRangeRef& range = locations[shard].first;
|
||||
KeyRef begin = reverse ? keys.begin : range.end;
|
||||
|
|
|
@ -1228,7 +1228,7 @@ ACTOR Future<Standalone<RangeResultRef>> getWorkerInterfaces (Reference<ClusterC
|
|||
}
|
||||
|
||||
Future< Optional<Value> > ReadYourWritesTransaction::get( const Key& key, bool snapshot ) {
|
||||
TEST(true);
|
||||
TEST(true); // ReadYourWritesTransaction::get
|
||||
|
||||
if (getDatabase()->apiVersionAtLeast(630)) {
|
||||
if (specialKeys.contains(key)) {
|
||||
|
|
|
@ -243,12 +243,12 @@ ACTOR Future<Standalone<RangeResultRef>> SpecialKeySpace::getRangeAggregationAct
|
|||
// Handle all corner cases like what RYW does
|
||||
// return if range inverted
|
||||
if (actualBeginOffset >= actualEndOffset && begin.getKey() >= end.getKey()) {
|
||||
TEST(true);
|
||||
TEST(true); // inverted range
|
||||
return RangeResultRef(false, false);
|
||||
}
|
||||
// If touches begin or end, return with readToBegin and readThroughEnd flags
|
||||
if (begin.getKey() == moduleBoundary.end || end.getKey() == moduleBoundary.begin) {
|
||||
TEST(true);
|
||||
TEST(true); // query touches begin or end
|
||||
return result;
|
||||
}
|
||||
state RangeMap<Key, SpecialKeyRangeReadImpl*, KeyRangeRef>::Ranges ranges =
|
||||
|
|
|
@ -628,7 +628,7 @@ private:
|
|||
bool end_conflict = it.is_conflict_range();
|
||||
bool end_unreadable = it.is_unreadable();
|
||||
|
||||
TEST( it.is_conflict_range() != lastConflicted );
|
||||
TEST( it.is_conflict_range() != lastConflicted ); // not last conflicted
|
||||
|
||||
it.tree.clear();
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ Future<REPLY_TYPE(Req)> retryBrokenPromise( RequestStream<Req> to, Req request,
|
|||
throw;
|
||||
resetReply( request );
|
||||
wait( delayJittered(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY, taskID) );
|
||||
TEST(true); // retryBrokenPromise
|
||||
TEST(true); // retryBrokenPromise with taskID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -556,8 +556,8 @@ private:
|
|||
|
||||
debugFileCheck("SimpleFileRead", self->filename, data, offset, length);
|
||||
|
||||
INJECT_FAULT(io_timeout, "SimpleFile::read");
|
||||
INJECT_FAULT(io_error, "SimpleFile::read");
|
||||
INJECT_FAULT(io_timeout, "SimpleFile::read"); // SimpleFile::read io_timeout injected
|
||||
INJECT_FAULT(io_error, "SimpleFile::read"); // SimpleFile::read io_error injected
|
||||
|
||||
return read_bytes;
|
||||
}
|
||||
|
@ -594,8 +594,8 @@ private:
|
|||
|
||||
debugFileCheck("SimpleFileWrite", self->filename, (void*)data.begin(), offset, data.size());
|
||||
|
||||
INJECT_FAULT(io_timeout, "SimpleFile::write");
|
||||
INJECT_FAULT(io_error, "SimpleFile::write");
|
||||
INJECT_FAULT(io_timeout, "SimpleFile::write"); // SimpleFile::write inject io_timeout
|
||||
INJECT_FAULT(io_error, "SimpleFile::write"); // SimpleFile::write inject io_error
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
@ -621,8 +621,8 @@ private:
|
|||
if (randLog)
|
||||
fprintf( randLog, "SFT2 %s %s %s\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str());
|
||||
|
||||
INJECT_FAULT( io_timeout, "SimpleFile::truncate" );
|
||||
INJECT_FAULT( io_error, "SimpleFile::truncate" );
|
||||
INJECT_FAULT( io_timeout, "SimpleFile::truncate" ); // SimpleFile::truncate inject io_timeout
|
||||
INJECT_FAULT( io_error, "SimpleFile::truncate" ); // SimpleFile::truncate inject io_error
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
@ -654,8 +654,8 @@ private:
|
|||
if (randLog)
|
||||
fprintf( randLog, "SFC2 %s %s %s\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str());
|
||||
|
||||
INJECT_FAULT( io_timeout, "SimpleFile::sync" );
|
||||
INJECT_FAULT( io_error, "SimpleFile::sync" );
|
||||
INJECT_FAULT( io_timeout, "SimpleFile::sync" ); // SimpleFile::sync inject io_timeout
|
||||
INJECT_FAULT( io_error, "SimpleFile::sync" ); // SimpleFile::sync inject io_errot
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
@ -675,7 +675,7 @@ private:
|
|||
|
||||
if (randLog)
|
||||
fprintf(randLog, "SFS2 %s %s %s %" PRId64 "\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str(), pos);
|
||||
INJECT_FAULT( io_error, "SimpleFile::size" );
|
||||
INJECT_FAULT( io_error, "SimpleFile::size" ); // SimpleFile::size inject io_error
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
@ -1436,7 +1436,7 @@ public:
|
|||
|
||||
// Check if any processes on machine are rebooting
|
||||
if ( processesOnMachine != processesPerMachine ) {
|
||||
TEST(true); //Attempted reboot, but the target did not have all of its processes running
|
||||
TEST(true); //Attempted reboot and kill, but the target did not have all of its processes running
|
||||
TraceEvent(SevWarn, "AbortedKill").detail("KillType", kt).detail("MachineId", machineId).detail("Reason", "Machine processes does not match number of processes per machine").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace();
|
||||
if (ktFinal) *ktFinal = None;
|
||||
return false;
|
||||
|
@ -1547,12 +1547,12 @@ public:
|
|||
.detail("KilledDC", kt==ktMin);
|
||||
|
||||
TEST(kt != ktMin); // DataCenter kill was rejected by killMachine
|
||||
TEST((kt==ktMin) && (kt == RebootAndDelete)); // Resulted in a reboot and delete
|
||||
TEST((kt==ktMin) && (kt == Reboot)); // Resulted in a reboot
|
||||
TEST((kt==ktMin) && (kt == KillInstantly)); // Resulted in an instant kill
|
||||
TEST((kt==ktMin) && (kt == InjectFaults)); // Resulted in a kill by injecting faults
|
||||
TEST((kt==ktMin) && (kt != ktOrig)); // Kill request was downgraded
|
||||
TEST((kt==ktMin) && (kt == ktOrig)); // Requested kill was done
|
||||
TEST((kt==ktMin) && (kt == RebootAndDelete)); // Datacenter kill Resulted in a reboot and delete
|
||||
TEST((kt==ktMin) && (kt == Reboot)); // Datacenter kill Resulted in a reboot
|
||||
TEST((kt==ktMin) && (kt == KillInstantly)); // Datacenter kill Resulted in an instant kill
|
||||
TEST((kt==ktMin) && (kt == InjectFaults)); // Datacenter kill Resulted in a kill by injecting faults
|
||||
TEST((kt==ktMin) && (kt != ktOrig)); // Datacenter Kill request was downgraded
|
||||
TEST((kt==ktMin) && (kt == ktOrig)); // Datacenter kill - Requested kill was done
|
||||
|
||||
if (ktFinal) *ktFinal = ktMin;
|
||||
|
||||
|
|
|
@ -1937,7 +1937,7 @@ ACTOR Future<Void> clusterRecruitFromConfiguration( ClusterControllerData* self,
|
|||
|
||||
ACTOR Future<Void> clusterRecruitRemoteFromConfiguration( ClusterControllerData* self, RecruitRemoteFromConfigurationRequest req ) {
|
||||
// At the moment this doesn't really need to be an actor (it always completes immediately)
|
||||
TEST(true); //ClusterController RecruitTLogsRequest
|
||||
TEST(true); //ClusterController RecruitTLogsRequest Remote
|
||||
loop {
|
||||
try {
|
||||
RecruitRemoteFromConfigurationReply rep = self->findRemoteWorkersForConfiguration( req );
|
||||
|
|
|
@ -547,7 +547,7 @@ ACTOR Future<Void> preresolutionProcessing(CommitBatchContext* self) {
|
|||
}
|
||||
|
||||
// Pre-resolution the commits
|
||||
TEST(pProxyCommitData->latestLocalCommitBatchResolving.get() < localBatchNumber - 1);
|
||||
TEST(pProxyCommitData->latestLocalCommitBatchResolving.get() < localBatchNumber - 1); // Wait for local batch
|
||||
wait(pProxyCommitData->latestLocalCommitBatchResolving.whenAtLeast(localBatchNumber - 1));
|
||||
self->releaseDelay = delay(
|
||||
std::min(SERVER_KNOBS->MAX_PROXY_COMPUTE,
|
||||
|
|
|
@ -239,7 +239,7 @@ struct MovableCoordinatedStateImpl {
|
|||
}
|
||||
// SOMEDAY: If moveState.mode == MovingFrom, read (without locking) old state and assert that it corresponds with our state and is ReallyTo(coordinators)
|
||||
if (moveState.mode == MovableValue::MaybeTo) {
|
||||
TEST(true);
|
||||
TEST(true); // Maybe moveto state
|
||||
ASSERT( moveState.other.present() );
|
||||
wait( self->moveTo( self, &self->cs, ClusterConnectionString( moveState.other.get().toString() ), moveState.value ) );
|
||||
}
|
||||
|
|
|
@ -2410,7 +2410,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
Reference<TCMachineInfo> machineInfo;
|
||||
if (machine_info.find(machine_id) == machine_info.end()) {
|
||||
// uid is the first storage server process on the machine
|
||||
TEST(true);
|
||||
TEST(true); // First storage server in process on the machine
|
||||
// For each machine, store the first server's localityEntry into machineInfo for later use.
|
||||
LocalityEntry localityEntry = machineLocalityMap.add(locality, &server->id);
|
||||
machineInfo = makeReference<TCMachineInfo>(server, localityEntry);
|
||||
|
@ -3054,7 +3054,7 @@ ACTOR Future<Void> machineTeamRemover(DDTeamCollection* self) {
|
|||
// in the serverTeams vector in the machine team.
|
||||
--teamIndex;
|
||||
self->addTeam(team->getServers(), true, true);
|
||||
TEST(true);
|
||||
TEST(true); // Removed machine team
|
||||
}
|
||||
|
||||
self->doBuildTeams = true;
|
||||
|
@ -3133,7 +3133,7 @@ ACTOR Future<Void> serverTeamRemover(DDTeamCollection* self) {
|
|||
bool foundTeam = self->removeTeam(st);
|
||||
ASSERT(foundTeam == true);
|
||||
self->addTeam(st->getServers(), true, true);
|
||||
TEST(true);
|
||||
TEST(true); // Marked team as a bad team
|
||||
|
||||
self->doBuildTeams = true;
|
||||
|
||||
|
|
|
@ -728,7 +728,7 @@ void ILogSystem::SetPeekCursor::updateMessage(int logIdx, bool usePolicy) {
|
|||
c->advanceTo(messageVersion);
|
||||
if( start <= messageVersion && messageVersion < c->version() ) {
|
||||
advancedPast = true;
|
||||
TEST(true); //Merge peek cursor advanced past desired sequence
|
||||
TEST(true); //Merge peek cursor with logIdx advanced past desired sequence
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -461,8 +461,8 @@ namespace oldTLog_4_6 {
|
|||
state Version stopVersion = logData->version.get();
|
||||
|
||||
TEST(true); // TLog stopped by recovering master
|
||||
TEST( logData->stopped );
|
||||
TEST( !logData->stopped );
|
||||
TEST( logData->stopped ); // LogData already stopped
|
||||
TEST( !logData->stopped ); // LogData not yet stopped
|
||||
|
||||
TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get());
|
||||
|
||||
|
@ -1005,7 +1005,7 @@ namespace oldTLog_4_6 {
|
|||
auto& sequenceData = trackerData.sequence_version[sequence+1];
|
||||
if(sequenceData.isSet()) {
|
||||
if(sequenceData.getFuture().get() != reply.end) {
|
||||
TEST(true); //tlog peek second attempt ended at a different version
|
||||
TEST(true); //tlog peek second attempt ended at a different version (2)
|
||||
req.reply.sendError(operation_obsolete());
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -589,8 +589,8 @@ ACTOR Future<Void> tLogLock( TLogData* self, ReplyPromise< TLogLockResult > repl
|
|||
state Version stopVersion = logData->version.get();
|
||||
|
||||
TEST(true); // TLog stopped by recovering master
|
||||
TEST( logData->stopped );
|
||||
TEST( !logData->stopped );
|
||||
TEST( logData->stopped ); // logData already stopped
|
||||
TEST( !logData->stopped ); // logData not yet stopped
|
||||
|
||||
TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get());
|
||||
|
||||
|
@ -1295,7 +1295,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
|||
if(sequenceData.isSet()) {
|
||||
trackerData.duplicatePeeks++;
|
||||
if(sequenceData.getFuture().get().first != reply.end) {
|
||||
TEST(true); //tlog peek second attempt ended at a different version
|
||||
TEST(true); //tlog peek second attempt ended at a different version (2)
|
||||
req.reply.sendError(operation_obsolete());
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -680,8 +680,8 @@ ACTOR Future<Void> tLogLock( TLogData* self, ReplyPromise< TLogLockResult > repl
|
|||
state Version stopVersion = logData->version.get();
|
||||
|
||||
TEST(true); // TLog stopped by recovering master
|
||||
TEST( logData->stopped );
|
||||
TEST( !logData->stopped );
|
||||
TEST( logData->stopped ); // logData already stopped
|
||||
TEST( !logData->stopped ); // logData not yet stopped
|
||||
|
||||
TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get());
|
||||
|
||||
|
@ -1689,7 +1689,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
|||
if(sequenceData.isSet()) {
|
||||
trackerData.duplicatePeeks++;
|
||||
if(sequenceData.getFuture().get().first != reply.end) {
|
||||
TEST(true); //tlog peek second attempt ended at a different version
|
||||
TEST(true); //tlog peek second attempt ended at a different version (2)
|
||||
req.reply.sendError(operation_obsolete());
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -756,7 +756,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
|
|||
break;
|
||||
}
|
||||
case 3: {
|
||||
TEST(true); // Simulated cluster using radix-tree storage engine
|
||||
TEST(true); // Simulated cluster using redwood storage engine
|
||||
set_config("ssd-redwood-experimental");
|
||||
break;
|
||||
}
|
||||
|
@ -857,7 +857,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
|
|||
int satellite_replication_type = deterministicRandom()->randomInt(0,3);
|
||||
switch (satellite_replication_type) {
|
||||
case 0: {
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode (>4 datacenters)
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
|
@ -884,7 +884,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
|
|||
break;
|
||||
}
|
||||
case 1: {
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode (<4 datacenters)
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
|
@ -1138,8 +1138,8 @@ void setupSimulatedSystem(vector<Future<Void>>* systemActors, std::string baseFo
|
|||
|
||||
// Use IPv6 25% of the time
|
||||
bool useIPv6 = deterministicRandom()->random01() < 0.25;
|
||||
TEST( useIPv6 );
|
||||
TEST( !useIPv6 );
|
||||
TEST( useIPv6 ); // Use IPv6
|
||||
TEST( !useIPv6 ); // Use IPv4
|
||||
|
||||
vector<NetworkAddress> coordinatorAddresses;
|
||||
if(minimumRegions > 1) {
|
||||
|
|
|
@ -1196,7 +1196,7 @@ ACTOR Future<Void> fetchKeys( StorageCacheData *data, AddingCacheRange* cacheRan
|
|||
lastAvailable = std::max(lastAvailable, r->value());
|
||||
|
||||
if (lastAvailable != invalidVersion && lastAvailable >= data->oldestVersion.get()) {
|
||||
TEST(true);
|
||||
TEST(true); // wait for oldest version
|
||||
wait( data->oldestVersion.whenAtLeast(lastAvailable+1) );
|
||||
}
|
||||
|
||||
|
|
|
@ -212,9 +212,9 @@ struct StorageServerMetrics {
|
|||
void notify( KeyRef key, StorageMetrics& metrics ) {
|
||||
ASSERT (metrics.bytes == 0); // ShardNotifyMetrics
|
||||
if (g_network->isSimulated()) {
|
||||
TEST(metrics.bytesPerKSecond != 0); // ShardNotifyMetrics
|
||||
TEST(metrics.iosPerKSecond != 0); // ShardNotifyMetrics
|
||||
TEST(metrics.bytesReadPerKSecond != 0); // ShardNotifyMetrics
|
||||
TEST(metrics.bytesPerKSecond != 0); // ShardNotifyMetrics bytes
|
||||
TEST(metrics.iosPerKSecond != 0); // ShardNotifyMetrics ios
|
||||
TEST(metrics.bytesReadPerKSecond != 0); // ShardNotifyMetrics bytesRead
|
||||
}
|
||||
|
||||
double expire = now() + SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL;
|
||||
|
@ -232,7 +232,7 @@ struct StorageServerMetrics {
|
|||
auto& v = waitMetricsMap[key];
|
||||
for(int i=0; i<v.size(); i++) {
|
||||
if (g_network->isSimulated()) {
|
||||
TEST(true);
|
||||
TEST(true); // shard notify metrics
|
||||
}
|
||||
// ShardNotifyMetrics
|
||||
v[i].send( notifyMetrics );
|
||||
|
|
|
@ -704,8 +704,8 @@ ACTOR Future<Void> tLogLock( TLogData* self, ReplyPromise< TLogLockResult > repl
|
|||
state Version stopVersion = logData->version.get();
|
||||
|
||||
TEST(true); // TLog stopped by recovering master
|
||||
TEST( logData->stopped );
|
||||
TEST( !logData->stopped );
|
||||
TEST( logData->stopped ); // logData already stopped
|
||||
TEST( !logData->stopped ); // logData not yet stopped
|
||||
|
||||
TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get());
|
||||
|
||||
|
@ -1728,7 +1728,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
|||
if(sequenceData.isSet()) {
|
||||
trackerData.duplicatePeeks++;
|
||||
if(sequenceData.getFuture().get().first != reply.end) {
|
||||
TEST(true); //tlog peek second attempt ended at a different version
|
||||
TEST(true); //tlog peek second attempt ended at a different version (2)
|
||||
req.reply.sendError(operation_obsolete());
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -2275,8 +2275,8 @@ ACTOR Future<Void> fetchKeys( StorageServer *data, AddingShard* shard ) {
|
|||
splitMutations(data, data->shards, *u);
|
||||
}
|
||||
|
||||
TEST( true );
|
||||
TEST( shard->updates.size() );
|
||||
TEST( true ); // fetchkeys has more
|
||||
TEST( shard->updates.size() ); // Shard has updates
|
||||
ASSERT( otherShard->updates.empty() );
|
||||
}
|
||||
}
|
||||
|
|
|
@ -224,7 +224,7 @@ struct BackupAndParallelRestoreCorrectnessWorkload : TestWorkload {
|
|||
|
||||
// Stop the differential backup, if enabled
|
||||
if (stopDifferentialDelay) {
|
||||
TEST(!stopDifferentialFuture.isReady()); // Restore starts at specified time
|
||||
TEST(!stopDifferentialFuture.isReady()); // Restore starts at specified time - stopDifferential not ready
|
||||
wait(stopDifferentialFuture);
|
||||
TraceEvent("BARW_DoBackupWaitToDiscontinue", randomID)
|
||||
.detail("Tag", printable(tag))
|
||||
|
|
|
@ -233,7 +233,7 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
|
|||
|
||||
// Stop the differential backup, if enabled
|
||||
if (stopDifferentialDelay) {
|
||||
TEST(!stopDifferentialFuture.isReady()); //Restore starts at specified time
|
||||
TEST(!stopDifferentialFuture.isReady()); //Restore starts at specified time - stopDifferential not ready
|
||||
wait(stopDifferentialFuture);
|
||||
TraceEvent("BARW_DoBackupWaitToDiscontinue", randomID).detail("Tag", printable(tag)).detail("DifferentialAfter", stopDifferentialDelay);
|
||||
|
||||
|
|
|
@ -254,7 +254,7 @@ struct BackupToDBCorrectnessWorkload : TestWorkload {
|
|||
|
||||
// Stop the differential backup, if enabled
|
||||
if (stopDifferentialDelay) {
|
||||
TEST(!stopDifferentialFuture.isReady()); //Restore starts at specified time
|
||||
TEST(!stopDifferentialFuture.isReady()); //Restore starts at specified time - stopDifferential not ready
|
||||
wait(stopDifferentialFuture);
|
||||
TraceEvent("BARW_DoBackupWaitToDiscontinue", randomID).detail("Tag", printable(tag)).detail("DifferentialAfter", stopDifferentialDelay);
|
||||
|
||||
|
|
|
@ -131,7 +131,7 @@ std::string generateRegions() {
|
|||
break;
|
||||
}
|
||||
case 1: {
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode (<5 datacenters)
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
|
|
|
@ -212,7 +212,7 @@ struct DifferentClustersSameRVWorkload : TestWorkload {
|
|||
state Future<Optional<Value>> val2 = tr2.get(self->keyToRead);
|
||||
wait(success(val1) && success(val2));
|
||||
// We're reading from different db's with the same read version. We can get a different value.
|
||||
TEST(val1.get() != val2.get());
|
||||
TEST(val1.get() != val2.get()); // reading from different dbs with the same version
|
||||
} catch (Error& e) {
|
||||
wait(tr1.onError(e) && tr2.onError(e));
|
||||
}
|
||||
|
|
|
@ -107,7 +107,7 @@ struct Increment : TestWorkload {
|
|||
}
|
||||
}
|
||||
bool incrementCheckData( const VectorRef<KeyValueRef>& data, Version v, Increment* self ) {
|
||||
TEST( self->transactions.getValue() );
|
||||
TEST( self->transactions.getValue() ); // incrementCheckData transaction has value
|
||||
if (self->transactions.getValue() && data.size() == 0) {
|
||||
TraceEvent(SevError, "TestFailure").detail("Reason", "No successful increments").detail("Before", nodeCount).detail("After", data.size()).detail("Version", v);
|
||||
return false;
|
||||
|
|
|
@ -109,7 +109,7 @@ struct SpecialKeySpaceCorrectnessWorkload : TestWorkload {
|
|||
return;
|
||||
}
|
||||
f = success(ryw.get(LiteralStringRef("\xff\xff/status/json")));
|
||||
TEST(!f.isReady());
|
||||
TEST(!f.isReady()); // status json not ready
|
||||
}
|
||||
ASSERT(f.isError());
|
||||
ASSERT(f.getError().code() == error_code_transaction_cancelled);
|
||||
|
@ -317,7 +317,7 @@ struct SpecialKeySpaceCorrectnessWorkload : TestWorkload {
|
|||
wait(success(tx->getRange(
|
||||
KeyRangeRef(LiteralStringRef("\xff\xff/transaction/"), LiteralStringRef("\xff\xff/transaction0")),
|
||||
CLIENT_KNOBS->TOO_MANY)));
|
||||
TEST(true);
|
||||
TEST(true); // read transaction special keyrange
|
||||
tx->reset();
|
||||
} catch (Error& e) {
|
||||
throw;
|
||||
|
@ -341,7 +341,7 @@ struct SpecialKeySpaceCorrectnessWorkload : TestWorkload {
|
|||
KeySelector begin = KeySelectorRef(readConflictRangeKeysRange.begin, false, 1);
|
||||
KeySelector end = KeySelectorRef(LiteralStringRef("\xff\xff/transaction0"), false, 0);
|
||||
wait(success(tx->getRange(begin, end, GetRangeLimits(CLIENT_KNOBS->TOO_MANY))));
|
||||
TEST(true);
|
||||
TEST(true); // end key selector inside module range
|
||||
tx->reset();
|
||||
} catch (Error& e) {
|
||||
throw;
|
||||
|
|
|
@ -253,7 +253,7 @@ struct VersionStampWorkload : TestWorkload {
|
|||
if (self->failIfDataLost) {
|
||||
ASSERT(result.size() == self->versionStampKey_commit.size());
|
||||
} else {
|
||||
TEST(result.size() > 0); // Not all data should always be lost.
|
||||
TEST(result.size() > 0); // Not all data should always be lost (2)
|
||||
}
|
||||
|
||||
//TraceEvent("VST_Check1").detail("Size", result.size()).detail("VsKeyCommitSize", self->versionStampKey_commit.size());
|
||||
|
|
|
@ -233,7 +233,7 @@ static double getProcessorTimeGeneric(int who) {
|
|||
#endif
|
||||
|
||||
double getProcessorTimeThread() {
|
||||
INJECT_FAULT( platform_error, "getProcessorTimeThread" );
|
||||
INJECT_FAULT( platform_error, "getProcessorTimeThread" ); // Get Thread CPU Time failed
|
||||
#if defined(_WIN32)
|
||||
FILETIME ftCreate, ftExit, ftKernel, ftUser;
|
||||
if (!GetThreadTimes(GetCurrentThread(), &ftCreate, &ftExit, &ftKernel, &ftUser)) {
|
||||
|
@ -260,7 +260,7 @@ double getProcessorTimeThread() {
|
|||
}
|
||||
|
||||
double getProcessorTimeProcess() {
|
||||
INJECT_FAULT( platform_error, "getProcessorTimeProcess" );
|
||||
INJECT_FAULT( platform_error, "getProcessorTimeProcess" ); // Get CPU Process Time failed
|
||||
#if defined(_WIN32)
|
||||
FILETIME ftCreate, ftExit, ftKernel, ftUser;
|
||||
if (!GetProcessTimes(GetCurrentProcess(), &ftCreate, &ftExit, &ftKernel, &ftUser)) {
|
||||
|
@ -584,7 +584,7 @@ Error systemErrorCodeToError() {
|
|||
}
|
||||
|
||||
void getDiskBytes(std::string const& directory, int64_t& free, int64_t& total) {
|
||||
INJECT_FAULT( platform_error, "getDiskBytes" );
|
||||
INJECT_FAULT( platform_error, "getDiskBytes" ); // Get disk bytes failed
|
||||
#if defined(__unixish__)
|
||||
#if defined (__linux__) || defined (__FreeBSD__)
|
||||
struct statvfs buf;
|
||||
|
@ -634,7 +634,7 @@ void getDiskBytes(std::string const& directory, int64_t& free, int64_t& total) {
|
|||
|
||||
#ifdef __unixish__
|
||||
const char* getInterfaceName(const IPAddress& _ip) {
|
||||
INJECT_FAULT( platform_error, "getInterfaceName" );
|
||||
INJECT_FAULT( platform_error, "getInterfaceName" ); // Get interface name failed
|
||||
static char iname[20];
|
||||
|
||||
struct ifaddrs* interfaces = nullptr;
|
||||
|
@ -680,7 +680,7 @@ const char* getInterfaceName(const IPAddress& _ip) {
|
|||
#if defined(__linux__)
|
||||
void getNetworkTraffic(const IPAddress& ip, uint64_t& bytesSent, uint64_t& bytesReceived, uint64_t& outSegs,
|
||||
uint64_t& retransSegs) {
|
||||
INJECT_FAULT( platform_error, "getNetworkTraffic" ); // Even though this function doesn't throw errors, the equivalents for other platforms do, and since all of our simulation testing is on Linux...
|
||||
INJECT_FAULT( platform_error, "getNetworkTraffic" ); // getNetworkTraffic: Even though this function doesn't throw errors, the equivalents for other platforms do, and since all of our simulation testing is on Linux...
|
||||
const char* ifa_name = nullptr;
|
||||
try {
|
||||
ifa_name = getInterfaceName(ip);
|
||||
|
@ -748,7 +748,7 @@ void getNetworkTraffic(const IPAddress& ip, uint64_t& bytesSent, uint64_t& bytes
|
|||
}
|
||||
|
||||
void getMachineLoad(uint64_t& idleTime, uint64_t& totalTime, bool logDetails) {
|
||||
INJECT_FAULT( platform_error, "getMachineLoad" ); // Even though this function doesn't throw errors, the equivalents for other platforms do, and since all of our simulation testing is on Linux...
|
||||
INJECT_FAULT( platform_error, "getMachineLoad" ); // getMachineLoad: Even though this function doesn't throw errors, the equivalents for other platforms do, and since all of our simulation testing is on Linux...
|
||||
std::ifstream stat_stream("/proc/stat", std::ifstream::in);
|
||||
|
||||
std::string ignore;
|
||||
|
@ -765,7 +765,7 @@ void getMachineLoad(uint64_t& idleTime, uint64_t& totalTime, bool logDetails) {
|
|||
}
|
||||
|
||||
void getDiskStatistics(std::string const& directory, uint64_t& currentIOs, uint64_t& busyTicks, uint64_t& reads, uint64_t& writes, uint64_t& writeSectors, uint64_t& readSectors) {
|
||||
INJECT_FAULT( platform_error, "getDiskStatistics" );
|
||||
INJECT_FAULT( platform_error, "getDiskStatistics" ); // Getting disks statistics failed
|
||||
currentIOs = 0;
|
||||
|
||||
struct stat buf;
|
||||
|
@ -888,7 +888,7 @@ dev_t getDeviceId(std::string path) {
|
|||
#if defined(__FreeBSD__)
|
||||
void getNetworkTraffic(const IPAddress ip, uint64_t& bytesSent, uint64_t& bytesReceived,
|
||||
uint64_t& outSegs, uint64_t& retransSegs) {
|
||||
INJECT_FAULT( platform_error, "getNetworkTraffic" );
|
||||
INJECT_FAULT( platform_error, "getNetworkTraffic" ); // Get Network traffic failed
|
||||
|
||||
const char* ifa_name = nullptr;
|
||||
try {
|
||||
|
@ -955,7 +955,7 @@ void getNetworkTraffic(const IPAddress ip, uint64_t& bytesSent, uint64_t& bytesR
|
|||
}
|
||||
|
||||
void getMachineLoad(uint64_t& idleTime, uint64_t& totalTime, bool logDetails) {
|
||||
INJECT_FAULT( platform_error, "getMachineLoad" );
|
||||
INJECT_FAULT( platform_error, "getMachineLoad" ); // Getting machine load failed
|
||||
|
||||
long cur[CPUSTATES], last[CPUSTATES];
|
||||
size_t cur_sz = sizeof cur;
|
||||
|
@ -988,7 +988,7 @@ void getMachineLoad(uint64_t& idleTime, uint64_t& totalTime, bool logDetails) {
|
|||
}
|
||||
|
||||
void getDiskStatistics(std::string const& directory, uint64_t& currentIOs, uint64_t& busyTicks, uint64_t& reads, uint64_t& writes, uint64_t& writeSectors, uint64_t& readSectors) {
|
||||
INJECT_FAULT( platform_error, "getDiskStatistics" );
|
||||
INJECT_FAULT( platform_error, "getDiskStatistics" ); // getting disk stats failed
|
||||
currentIOs = 0;
|
||||
busyTicks = 0;
|
||||
reads = 0;
|
||||
|
@ -1078,7 +1078,7 @@ dev_t getDeviceId(std::string path) {
|
|||
#ifdef __APPLE__
|
||||
void getNetworkTraffic(const IPAddress& ip, uint64_t& bytesSent, uint64_t& bytesReceived, uint64_t& outSegs,
|
||||
uint64_t& retransSegs) {
|
||||
INJECT_FAULT( platform_error, "getNetworkTraffic" );
|
||||
INJECT_FAULT( platform_error, "getNetworkTraffic" ); // Get network traffic failed (macOS)
|
||||
|
||||
const char* ifa_name = nullptr;
|
||||
try {
|
||||
|
@ -1141,7 +1141,7 @@ void getNetworkTraffic(const IPAddress& ip, uint64_t& bytesSent, uint64_t& bytes
|
|||
}
|
||||
|
||||
void getMachineLoad(uint64_t& idleTime, uint64_t& totalTime, bool logDetails) {
|
||||
INJECT_FAULT( platform_error, "getMachineLoad" );
|
||||
INJECT_FAULT( platform_error, "getMachineLoad" ); // Getting machine load filed (macOS)
|
||||
mach_msg_type_number_t count = HOST_CPU_LOAD_INFO_COUNT;
|
||||
host_cpu_load_info_data_t r_load;
|
||||
|
||||
|
@ -1155,7 +1155,7 @@ void getMachineLoad(uint64_t& idleTime, uint64_t& totalTime, bool logDetails) {
|
|||
}
|
||||
|
||||
void getDiskStatistics(std::string const& directory, uint64_t& currentIOs, uint64_t& busyTicks, uint64_t& reads, uint64_t& writes, uint64_t& writeSectors, uint64_t& readSectors) {
|
||||
INJECT_FAULT( platform_error, "getDiskStatistics" );
|
||||
INJECT_FAULT( platform_error, "getDiskStatistics" ); // Getting disk stats failed (macOS)
|
||||
currentIOs = 0;
|
||||
busyTicks = 0;
|
||||
writeSectors = 0;
|
||||
|
@ -1716,7 +1716,7 @@ void setMemoryQuota( size_t limit ) {
|
|||
// ASAN doesn't work with memory quotas: https://github.com/google/sanitizers/wiki/AddressSanitizer#ulimit--v
|
||||
return;
|
||||
#endif
|
||||
INJECT_FAULT( platform_error, "setMemoryQuota" );
|
||||
INJECT_FAULT( platform_error, "setMemoryQuota" ); // setting memory quota failed
|
||||
#if defined(_WIN32)
|
||||
HANDLE job = CreateJobObject( nullptr, nullptr );
|
||||
if (!job) {
|
||||
|
@ -1920,7 +1920,7 @@ void setAffinity(int proc) {
|
|||
namespace platform {
|
||||
|
||||
int getRandomSeed() {
|
||||
INJECT_FAULT( platform_error, "getRandomSeed" );
|
||||
INJECT_FAULT( platform_error, "getRandomSeed" ); // getting a random seed failed
|
||||
int randomSeed;
|
||||
int retryCount = 0;
|
||||
|
||||
|
@ -1963,11 +1963,11 @@ std::string joinPath( std::string const& directory, std::string const& filename
|
|||
}
|
||||
|
||||
void renamedFile() {
|
||||
INJECT_FAULT( io_error, "renameFile" );
|
||||
INJECT_FAULT( io_error, "renameFile" ); // renaming file failed
|
||||
}
|
||||
|
||||
void renameFile( std::string const& fromPath, std::string const& toPath ) {
|
||||
INJECT_FAULT( io_error, "renameFile" );
|
||||
INJECT_FAULT( io_error, "renameFile" ); // rename file failed
|
||||
#ifdef _WIN32
|
||||
if (MoveFile( fromPath.c_str(), toPath.c_str() )) {
|
||||
//renamedFile();
|
||||
|
@ -1997,7 +1997,7 @@ void renameFile( std::string const& fromPath, std::string const& toPath ) {
|
|||
void atomicReplace( std::string const& path, std::string const& content, bool textmode ) {
|
||||
FILE* f = 0;
|
||||
try {
|
||||
INJECT_FAULT( io_error, "atomicReplace" );
|
||||
INJECT_FAULT( io_error, "atomicReplace" ); // atomic rename failed
|
||||
|
||||
std::string tempfilename = joinPath(parentDirectory(path), deterministicRandom()->randomUniqueID().toString() + ".tmp");
|
||||
f = textmode ? fopen( tempfilename.c_str(), "wt" FOPEN_CLOEXEC_MODE ) : fopen(tempfilename.c_str(), "wb");
|
||||
|
@ -2081,7 +2081,7 @@ void atomicReplace( std::string const& path, std::string const& content, bool te
|
|||
#error Port me!
|
||||
#endif
|
||||
|
||||
INJECT_FAULT( io_error, "atomicReplace" );
|
||||
INJECT_FAULT( io_error, "atomicReplace" ); // io_error after atomic rename
|
||||
}
|
||||
catch(Error &e) {
|
||||
TraceEvent(SevWarn, "AtomicReplace").error(e).detail("Path", path).GetLastError();
|
||||
|
@ -2091,12 +2091,12 @@ void atomicReplace( std::string const& path, std::string const& content, bool te
|
|||
}
|
||||
|
||||
static bool deletedFile() {
|
||||
INJECT_FAULT( platform_error, "deleteFile" );
|
||||
INJECT_FAULT( platform_error, "deleteFile" ); // delete file failed
|
||||
return true;
|
||||
}
|
||||
|
||||
bool deleteFile( std::string const& filename ) {
|
||||
INJECT_FAULT( platform_error, "deleteFile" );
|
||||
INJECT_FAULT( platform_error, "deleteFile" ); // file deletion failed
|
||||
#ifdef _WIN32
|
||||
if (DeleteFile(filename.c_str()))
|
||||
return deletedFile();
|
||||
|
@ -2115,12 +2115,14 @@ bool deleteFile( std::string const& filename ) {
|
|||
throw e;
|
||||
}
|
||||
|
||||
static void createdDirectory() { INJECT_FAULT( platform_error, "createDirectory" ); }
|
||||
static void createdDirectory() {
|
||||
INJECT_FAULT( platform_error, "createDirectory" ); // create dir (noargs) failed
|
||||
}
|
||||
|
||||
namespace platform {
|
||||
|
||||
bool createDirectory( std::string const& directory ) {
|
||||
INJECT_FAULT( platform_error, "createDirectory" );
|
||||
INJECT_FAULT( platform_error, "createDirectory" ); // create dir failed
|
||||
|
||||
#ifdef _WIN32
|
||||
if (CreateDirectory( directory.c_str(), nullptr )) {
|
||||
|
@ -2261,7 +2263,7 @@ std::string abspath( std::string const& path, bool resolveLinks, bool mustExist
|
|||
}
|
||||
|
||||
// Returns an absolute path canonicalized to use only CANONICAL_PATH_SEPARATOR
|
||||
INJECT_FAULT( platform_error, "abspath" );
|
||||
INJECT_FAULT( platform_error, "abspath" ); // abspath failed
|
||||
|
||||
if(!resolveLinks) {
|
||||
// TODO: Not resolving symbolic links does not yet behave well on Windows because of drive letters
|
||||
|
@ -2367,7 +2369,7 @@ bool acceptDirectory( FILE_ATTRIBUTE_DATA fileAttributes, std::string const& nam
|
|||
|
||||
ACTOR Future<vector<std::string>> findFiles( std::string directory, std::string extension,
|
||||
bool directoryOnly, bool async) {
|
||||
INJECT_FAULT( platform_error, "findFiles" );
|
||||
INJECT_FAULT( platform_error, "findFiles" ); // findFiles failed (Win32)
|
||||
state vector<std::string> result;
|
||||
state int64_t tsc_begin = __rdtsc();
|
||||
|
||||
|
@ -2417,7 +2419,7 @@ bool acceptDirectory( FILE_ATTRIBUTE_DATA fileAttributes, std::string const& nam
|
|||
|
||||
ACTOR Future<vector<std::string>> findFiles( std::string directory, std::string extension,
|
||||
bool directoryOnly, bool async) {
|
||||
INJECT_FAULT( platform_error, "findFiles" );
|
||||
INJECT_FAULT( platform_error, "findFiles" ); // findFiles failed
|
||||
state vector<std::string> result;
|
||||
state int64_t tsc_begin = __rdtsc();
|
||||
|
||||
|
|
|
@ -36,6 +36,9 @@ namespace coveragetool
|
|||
public string Condition;
|
||||
};
|
||||
|
||||
class ParseException : Exception {
|
||||
}
|
||||
|
||||
class Program
|
||||
{
|
||||
public static int Main(string[] args)
|
||||
|
@ -82,10 +85,14 @@ namespace coveragetool
|
|||
.Where( fi=>new FileInfo(fi).LastWriteTimeUtc > outputTime )
|
||||
.ToLookup(n=>n);
|
||||
|
||||
cases = cases
|
||||
try {
|
||||
cases = cases
|
||||
.Where(c => exists.Contains(c.File) && !changedFiles.Contains(c.File))
|
||||
.Concat( changedFiles.SelectMany( f => ParseSource( f.Key ) ) )
|
||||
.ToArray();
|
||||
} catch (ParseException) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!quiet) {
|
||||
Console.WriteLine(" {0}/{1} files scanned", changedFiles.Count, inputPaths.Length);
|
||||
|
@ -140,10 +147,10 @@ namespace coveragetool
|
|||
}
|
||||
public static CoverageCase[] ParseSource(string filename)
|
||||
{
|
||||
var regex = new Regex( @"^([^/]|/[^/])*(TEST|INJECT_FAULT|SHOULD_INJECT_FAULT)[ \t]*\(([^)]*)\)" );
|
||||
var regex = new Regex( @"^([^/]|/[^/])*\s+(TEST|INJECT_FAULT|SHOULD_INJECT_FAULT)[ \t]*\(([^)]*)\)" );
|
||||
|
||||
var lines = File.ReadAllLines(filename);
|
||||
return Enumerable.Range(0, lines.Length)
|
||||
var res = Enumerable.Range(0, lines.Length)
|
||||
.Where( i=>regex.IsMatch(lines[i]) && !lines[i].StartsWith("#define") )
|
||||
.Select( i=>new CoverageCase {
|
||||
File = filename,
|
||||
|
@ -152,6 +159,26 @@ namespace coveragetool
|
|||
Condition = regex.Match(lines[i]).Groups[3].Value
|
||||
} )
|
||||
.ToArray();
|
||||
var comments = new Dictionary<string, CoverageCase>();
|
||||
bool failed = false;
|
||||
foreach(var coverageCase in res) {
|
||||
if (String.IsNullOrEmpty(coverageCase.Comment) || coverageCase.Comment.Trim() == "") {
|
||||
failed = true;
|
||||
Console.Error.WriteLine(String.Format("Error at {0}:{1}: Empty or missing comment", coverageCase.File, coverageCase.Line));
|
||||
}
|
||||
else if (comments.ContainsKey(coverageCase.Comment)) {
|
||||
failed = true;
|
||||
var prev = comments[coverageCase.Comment];
|
||||
Console.Error.WriteLine(String.Format("Error at {0}:{1}: {2} is not a unique comment", coverageCase.File, coverageCase.Line, coverageCase.Comment));
|
||||
Console.Error.WriteLine(String.Format("\tPreviously seen in {0} at {1}", prev.File, prev.Line));
|
||||
} else {
|
||||
comments.Add(coverageCase.Comment, coverageCase);
|
||||
}
|
||||
}
|
||||
if (failed) {
|
||||
throw new ParseException();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
public static string FindComment(string line)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue