don't allow empty coments

This commit is contained in:
Markus Pilman 2020-11-11 14:07:54 -07:00
parent bdd3dbfa7d
commit 1343f40117
12 changed files with 19 additions and 15 deletions

View File

@ -2278,7 +2278,7 @@ ACTOR Future<Standalone<RangeResultRef>> getExactRange( Database cx, Version ver
}
if (!more || locations[shard].first.empty()) {
TEST(true);
TEST(true); // getExactrange (!more || locations[shard].first.empty())
if(shard == locations.size()-1) {
const KeyRangeRef& range = locations[shard].first;
KeyRef begin = reverse ? keys.begin : range.end;

View File

@ -1228,7 +1228,7 @@ ACTOR Future<Standalone<RangeResultRef>> getWorkerInterfaces (Reference<ClusterC
}
Future< Optional<Value> > ReadYourWritesTransaction::get( const Key& key, bool snapshot ) {
TEST(true);
TEST(true); // ReadYourWritesTransaction::get
if (getDatabase()->apiVersionAtLeast(630)) {
if (specialKeys.contains(key)) {

View File

@ -628,7 +628,7 @@ private:
bool end_conflict = it.is_conflict_range();
bool end_unreadable = it.is_unreadable();
TEST( it.is_conflict_range() != lastConflicted );
TEST( it.is_conflict_range() != lastConflicted ); // not last conflicted
it.tree.clear();

View File

@ -547,7 +547,7 @@ ACTOR Future<Void> preresolutionProcessing(CommitBatchContext* self) {
}
// Pre-resolution the commits
TEST(pProxyCommitData->latestLocalCommitBatchResolving.get() < localBatchNumber - 1);
TEST(pProxyCommitData->latestLocalCommitBatchResolving.get() < localBatchNumber - 1); // Wait for local batch
wait(pProxyCommitData->latestLocalCommitBatchResolving.whenAtLeast(localBatchNumber - 1));
self->releaseDelay = delay(
std::min(SERVER_KNOBS->MAX_PROXY_COMPUTE,

View File

@ -239,7 +239,7 @@ struct MovableCoordinatedStateImpl {
}
// SOMEDAY: If moveState.mode == MovingFrom, read (without locking) old state and assert that it corresponds with our state and is ReallyTo(coordinators)
if (moveState.mode == MovableValue::MaybeTo) {
TEST(true);
TEST(true); // Maybe moveto state
ASSERT( moveState.other.present() );
wait( self->moveTo( self, &self->cs, ClusterConnectionString( moveState.other.get().toString() ), moveState.value ) );
}

View File

@ -3133,7 +3133,7 @@ ACTOR Future<Void> serverTeamRemover(DDTeamCollection* self) {
bool foundTeam = self->removeTeam(st);
ASSERT(foundTeam == true);
self->addTeam(st->getServers(), true, true);
TEST(true);
TEST(true); // Marked team as a bad team
self->doBuildTeams = true;

View File

@ -1196,7 +1196,7 @@ ACTOR Future<Void> fetchKeys( StorageCacheData *data, AddingCacheRange* cacheRan
lastAvailable = std::max(lastAvailable, r->value());
if (lastAvailable != invalidVersion && lastAvailable >= data->oldestVersion.get()) {
TEST(true);
TEST(true); // wait for oldest version
wait( data->oldestVersion.whenAtLeast(lastAvailable+1) );
}

View File

@ -232,7 +232,7 @@ struct StorageServerMetrics {
auto& v = waitMetricsMap[key];
for(int i=0; i<v.size(); i++) {
if (g_network->isSimulated()) {
TEST(true);
TEST(true); // shard notify metrics
}
// ShardNotifyMetrics
v[i].send( notifyMetrics );

View File

@ -2275,7 +2275,7 @@ ACTOR Future<Void> fetchKeys( StorageServer *data, AddingShard* shard ) {
splitMutations(data, data->shards, *u);
}
TEST( true );
TEST( true ); // fetchkeys has more
TEST( shard->updates.size() ); // Shard has updates
ASSERT( otherShard->updates.empty() );
}

View File

@ -212,7 +212,7 @@ struct DifferentClustersSameRVWorkload : TestWorkload {
state Future<Optional<Value>> val2 = tr2.get(self->keyToRead);
wait(success(val1) && success(val2));
// We're reading from different db's with the same read version. We can get a different value.
TEST(val1.get() != val2.get());
TEST(val1.get() != val2.get()); // reading from different dbs with the same version
} catch (Error& e) {
wait(tr1.onError(e) && tr2.onError(e));
}

View File

@ -107,7 +107,7 @@ struct Increment : TestWorkload {
}
}
bool incrementCheckData( const VectorRef<KeyValueRef>& data, Version v, Increment* self ) {
TEST( self->transactions.getValue() );
TEST( self->transactions.getValue() ); // incrementCheckData transaction has value
if (self->transactions.getValue() && data.size() == 0) {
TraceEvent(SevError, "TestFailure").detail("Reason", "No successful increments").detail("Before", nodeCount).detail("After", data.size()).detail("Version", v);
return false;

View File

@ -160,10 +160,14 @@ namespace coveragetool
} )
.ToArray();
var comments = new Dictionary<string, CoverageCase>();
bool isUnique = true;
bool failed = false;
foreach(var coverageCase in res) {
if (comments.ContainsKey(coverageCase.Comment)) {
isUnique = false;
if (String.IsNullOrEmpty(coverageCase.Comment) || coverageCase.Comment.Trim() == "") {
failed = true;
Console.Error.WriteLine(String.Format("Error at {0}:{1}: Empty or missing comment", coverageCase.File, coverageCase.Line));
}
else if (comments.ContainsKey(coverageCase.Comment)) {
failed = true;
var prev = comments[coverageCase.Comment];
Console.Error.WriteLine(String.Format("Error at {0}:{1}: {2} is not a unique comment", coverageCase.File, coverageCase.Line, coverageCase.Comment));
Console.Error.WriteLine(String.Format("\tPreviously seen in {0} at {1}", prev.File, prev.Line));
@ -171,7 +175,7 @@ namespace coveragetool
comments.Add(coverageCase.Comment, coverageCase);
}
}
if (!isUnique) {
if (failed) {
throw new ParseException();
}
return res;