[fdbserver] Use STL contains method and std::find for containment checks (#11702)

This commit is contained in:
Syed Paymaan Raza 2024-10-15 11:40:02 -07:00 committed by GitHub
parent b4bad4c1d6
commit c146ee0869
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
68 changed files with 364 additions and 362 deletions

View File

@ -93,9 +93,9 @@ public:
acsBuilder(proxyCommitData_.acsBuilder), epoch(proxyCommitData_.epoch) {
if (encryptMode.isEncryptionEnabled()) {
ASSERT(cipherKeys != nullptr);
ASSERT(cipherKeys->count(SYSTEM_KEYSPACE_ENCRYPT_DOMAIN_ID) > 0);
ASSERT(cipherKeys->contains(SYSTEM_KEYSPACE_ENCRYPT_DOMAIN_ID));
if (FLOW_KNOBS->ENCRYPT_HEADER_AUTH_TOKEN_ENABLED) {
ASSERT(cipherKeys->count(ENCRYPT_HEADER_DOMAIN_ID));
ASSERT(cipherKeys->contains(ENCRYPT_HEADER_DOMAIN_ID));
}
}
// If commit proxy, epoch must be set
@ -115,9 +115,9 @@ public:
accumulativeChecksumIndex(resolverAccumulativeChecksumIndex), epoch(Optional<LogEpoch>()) {
if (encryptMode.isEncryptionEnabled()) {
ASSERT(cipherKeys != nullptr);
ASSERT(cipherKeys->count(SYSTEM_KEYSPACE_ENCRYPT_DOMAIN_ID) > 0);
ASSERT(cipherKeys->contains(SYSTEM_KEYSPACE_ENCRYPT_DOMAIN_ID));
if (FLOW_KNOBS->ENCRYPT_HEADER_AUTH_TOKEN_ENABLED) {
ASSERT(cipherKeys->count(ENCRYPT_HEADER_DOMAIN_ID));
ASSERT(cipherKeys->contains(ENCRYPT_HEADER_DOMAIN_ID));
}
}
}
@ -1184,7 +1184,7 @@ private:
bool foundKey = false;
for (auto& it : vecBackupKeys->intersectingRanges(normalKeys)) {
if (it.value().count(logDestination) > 0) {
if (it.value().contains(logDestination)) {
foundKey = true;
break;
}
@ -1192,7 +1192,7 @@ private:
auto& systemBackupRanges = getSystemBackupRanges();
for (auto r = systemBackupRanges.begin(); !foundKey && r != systemBackupRanges.end(); ++r) {
for (auto& it : vecBackupKeys->intersectingRanges(*r)) {
if (it.value().count(logDestination) > 0) {
if (it.value().contains(logDestination)) {
foundKey = true;
break;
}

View File

@ -92,7 +92,7 @@ std::map<std::tuple<LogEpoch, Version, int>, std::map<Tag, Version>> BackupProgr
auto prev = std::prev(current);
// Previous epoch is gone, consolidate the progress.
for (auto [tag, version] : prev->second) {
if (toCheck.count(tag) > 0) {
if (toCheck.contains(tag)) {
progressIt->second[tag] = std::max(version, progressIt->second[tag]);
toCheck.erase(tag);
}

View File

@ -630,7 +630,7 @@ ACTOR Future<Void> killBlobWorkers(Database cx) {
if (first) {
knownWorkers.insert(interf.id());
}
if (knownWorkers.count(interf.id())) {
if (knownWorkers.contains(interf.id())) {
haltIds.push_back(interf.id());
haltRequests.push_back(interf.haltBlobWorker.tryGetReply(HaltBlobWorkerRequest(1e6, UID())));
}

View File

@ -1000,7 +1000,7 @@ ACTOR Future<Void> doRangeAssignment(Reference<BlobManagerData> bmData,
// actor map, cancelling this actor before it got here
bmData->workerAssignments.insert(assignment.keyRange, workerID.get());
if (bmData->workerStats.count(workerID.get())) {
if (bmData->workerStats.contains(workerID.get())) {
bmData->workerStats[workerID.get()].numGranulesAssigned += 1;
}
@ -1040,7 +1040,7 @@ ACTOR Future<Void> doRangeAssignment(Reference<BlobManagerData> bmData,
req.type = assignment.assign.get().type;
// if that worker isn't alive anymore, add the range back into the stream
if (bmData->workersById.count(workerID.get()) == 0) {
if (!bmData->workersById.contains(workerID.get())) {
throw no_more_servers();
}
state Future<Void> assignFuture = bmData->workersById[workerID.get()].assignBlobRangeRequest.getReply(req);
@ -1073,7 +1073,7 @@ ACTOR Future<Void> doRangeAssignment(Reference<BlobManagerData> bmData,
req.dispose = assignment.revoke.get().dispose;
// if that worker isn't alive anymore, this is a noop
if (bmData->workersById.count(workerID.get())) {
if (bmData->workersById.contains(workerID.get())) {
wait(bmData->workersById[workerID.get()].revokeBlobRangeRequest.getReply(req));
} else {
return Void();
@ -1271,7 +1271,7 @@ static bool handleRangeIsAssign(Reference<BlobManagerData> bmData, RangeAssignme
bmData->assignsInProgress.insert(assignment.keyRange,
doRangeAssignment(bmData, assignment, workerId, bmData->epoch, seqNo));
}
if (bmData->workerStats.count(workerId)) {
if (bmData->workerStats.contains(workerId)) {
bmData->workerStats[workerId].numGranulesAssigned += 1;
}
}
@ -1291,7 +1291,7 @@ static bool handleRangeIsAssign(Reference<BlobManagerData> bmData, RangeAssignme
static bool handleRangeIsRevoke(Reference<BlobManagerData> bmData, RangeAssignment assignment, int64_t seqNo) {
if (assignment.worker.present()) {
// revoke this specific range from this specific worker. Either part of recovery or failing a worker
if (bmData->workerStats.count(assignment.worker.get())) {
if (bmData->workerStats.contains(assignment.worker.get())) {
bmData->workerStats[assignment.worker.get()].numGranulesAssigned -= 1;
}
// if this revoke matches the worker assignment state, mark the range as unassigned
@ -1333,7 +1333,7 @@ static bool handleRangeIsRevoke(Reference<BlobManagerData> bmData, RangeAssignme
// It is fine for multiple disjoint sub-ranges to have the same sequence number since they were part
// of the same logical change
if (bmData->workerStats.count(it.value())) {
if (bmData->workerStats.contains(it.value())) {
bmData->workerStats[it.value()].numGranulesAssigned -= 1;
}
@ -1407,7 +1407,7 @@ ACTOR Future<Void> writeInitialGranuleMapping(Reference<BlobManagerData> bmData,
KeyRangeRef(splitPoints.keys[i], splitPoints.keys[endIdx]),
blobGranuleMappingValueFor(UID())));
for (j = 0; i + j < endIdx; j++) {
if (splitPoints.boundaries.count(splitPoints.keys[i + j])) {
if (splitPoints.boundaries.contains(splitPoints.keys[i + j])) {
tr->set(blobGranuleMergeBoundaryKeyFor(splitPoints.keys[i + j]),
blobGranuleMergeBoundaryValueFor(splitPoints.boundaries[splitPoints.keys[i + j]]));
}
@ -1419,7 +1419,7 @@ ACTOR Future<Void> writeInitialGranuleMapping(Reference<BlobManagerData> bmData,
// Update BlobGranuleMergeBoundary in-memory state.
for (int k = i; k < i + j; k++) {
KeyRef beginKey = splitPoints.keys[k];
if (splitPoints.boundaries.count(beginKey)) {
if (splitPoints.boundaries.contains(beginKey)) {
bmData->mergeBoundaries[beginKey] = splitPoints.boundaries[beginKey];
}
}
@ -1809,7 +1809,7 @@ ACTOR Future<Void> reevaluateInitialSplit(Reference<BlobManagerData> bmData,
if (BM_DEBUG) {
fmt::print("Aligned split ({0}):\n", finalSplit.keys.size());
for (auto& it : finalSplit.keys) {
fmt::print(" {0}{1}\n", it.printable(), finalSplit.boundaries.count(it) ? " *" : "");
fmt::print(" {0}{1}\n", it.printable(), finalSplit.boundaries.contains(it) ? " *" : "");
}
}
@ -1934,7 +1934,7 @@ ACTOR Future<Void> reevaluateInitialSplit(Reference<BlobManagerData> bmData,
blobGranuleMappingKeys.begin,
KeyRangeRef(finalSplit.keys[i], finalSplit.keys[i + 1]),
blobGranuleMappingValueFor(UID())));
if (finalSplit.boundaries.count(finalSplit.keys[i])) {
if (finalSplit.boundaries.contains(finalSplit.keys[i])) {
tr->set(blobGranuleMergeBoundaryKeyFor(finalSplit.keys[i]),
blobGranuleMergeBoundaryValueFor(finalSplit.boundaries[finalSplit.keys[i]]));
}
@ -2063,7 +2063,7 @@ ACTOR Future<Void> maybeSplitRange(Reference<BlobManagerData> bmData,
fmt::print(" {0}:{1}{2}\n",
(i < newGranuleIDs.size() ? newGranuleIDs[i] : UID()).toString().substr(0, 6).c_str(),
splitPoints.keys[i].printable(),
splitPoints.boundaries.count(splitPoints.keys[i]) ? " *" : "");
splitPoints.boundaries.contains(splitPoints.keys[i]) ? " *" : "");
}
}
@ -2195,7 +2195,7 @@ ACTOR Future<Void> maybeSplitRange(Reference<BlobManagerData> bmData,
MutationRef::SetVersionstampedValue);
// Update BlobGranuleMergeBoundary.
if (splitPoints.boundaries.count(splitRange.begin)) {
if (splitPoints.boundaries.contains(splitRange.begin)) {
tr->set(blobGranuleMergeBoundaryKeyFor(splitRange.begin),
blobGranuleMergeBoundaryValueFor(splitPoints.boundaries[splitRange.begin]));
}
@ -2605,7 +2605,7 @@ ACTOR Future<Void> finishMergeGranules(Reference<BlobManagerData> bmData,
// Assert that none of the subsequent granules are hard boundaries.
if (g_network->isSimulated()) {
for (int i = 1; i < parentGranuleRanges.size() - 1; i++) {
ASSERT(!bmData->mergeHardBoundaries.count(parentGranuleRanges[i]));
ASSERT(!bmData->mergeHardBoundaries.contains(parentGranuleRanges[i]));
}
}
@ -2843,7 +2843,7 @@ ACTOR Future<Void> granuleMergeChecker(Reference<BlobManagerData> bmData) {
// 2. Hit the maximum in a merge evaluation window.
// 3. Hit a hard merge boundary meaning we should not merge across them.
if (!it->cvalue().mergeEligible() || currentCandidates.size() == maxRangeSize ||
bmData->mergeHardBoundaries.count(it->range().begin)) {
bmData->mergeHardBoundaries.contains(it->range().begin)) {
if (currentCandidates.size() >= 2) {
mergeChecks.push_back(attemptMerges(bmData, currentCandidates));
}
@ -2859,8 +2859,8 @@ ACTOR Future<Void> granuleMergeChecker(Reference<BlobManagerData> bmData) {
// Conditions:
// 1. Start a new soft merge range.
// 2. End a soft merge range.
if ((!mergeBoundaries.count(curRange.begin) && mergeBoundaries.count(curRange.end)) ||
(mergeBoundaries.count(lastRange.begin) && !mergeBoundaries.count(lastRange.end))) {
if ((!mergeBoundaries.contains(curRange.begin) && mergeBoundaries.contains(curRange.end)) ||
(mergeBoundaries.contains(lastRange.begin) && !mergeBoundaries.contains(lastRange.end))) {
if (currentCandidates.size() >= 2) {
mergeChecks.push_back(attemptMerges(bmData, currentCandidates));
}
@ -2972,10 +2972,10 @@ ACTOR Future<Void> killBlobWorker(Reference<BlobManagerData> bmData, BlobWorkerI
}
Optional<UID> successor = bwId;
while (bmData->workerAffinities.count(successor.get())) {
while (bmData->workerAffinities.contains(successor.get())) {
successor = bmData->workerAffinities[successor.get()];
}
if (successor.get() == bwId || !bmData->workersById.count(successor.get())) {
if (successor.get() == bwId || !bmData->workersById.contains(successor.get())) {
successor = Optional<UID>();
}
@ -3394,16 +3394,16 @@ ACTOR Future<Void> checkBlobWorkerList(Reference<BlobManagerData> bmData, Promis
// add all blob workers to this new blob manager's records and start monitoring it
bool foundAnyNew = false;
for (auto& worker : blobWorkers) {
if (!bmData->deadWorkers.count(worker.id())) {
if (!bmData->deadWorkers.contains(worker.id())) {
bool isFailedOrExcluded = bmData->exclusionTracker.isFailedOrExcluded(worker.stableAddress());
if (!bmData->workerAddresses.count(worker.stableAddress()) &&
if (!bmData->workerAddresses.contains(worker.stableAddress()) &&
worker.locality.dcId() == bmData->dcId && !isFailedOrExcluded) {
bmData->workerAddresses.insert(worker.stableAddress());
bmData->workersById[worker.id()] = worker;
bmData->workerStats[worker.id()] = BlobWorkerInfo();
bmData->addActor.send(monitorBlobWorker(bmData, worker));
foundAnyNew = true;
} else if (!bmData->workersById.count(worker.id())) {
} else if (!bmData->workersById.contains(worker.id())) {
TraceEvent("KillingExtraneousBlobWorker", bmData->id)
.detail("WorkerId", worker.id())
.detail("Addr", worker.stableAddress())
@ -3880,7 +3880,7 @@ ACTOR Future<Void> recoverBlobManager(Reference<BlobManagerData> bmData) {
assignment.seqnoAssigned,
outOfDateAssignments);
}
if (bmData->workerStats.count(workerId)) {
if (bmData->workerStats.contains(workerId)) {
bmData->workerStats[workerId].numGranulesAssigned = reply.get().assignments.size();
}
} else {
@ -4043,11 +4043,11 @@ ACTOR Future<Void> recoverBlobManager(Reference<BlobManagerData> bmData) {
// if worker id is already set to a known worker that replied with it in the mapping, range is already assigned
// there. If not, need to explicitly assign it to someone
if (workerId == UID() || epoch == 0 || !endingWorkers.count(workerId)) {
if (workerId == UID() || epoch == 0 || !endingWorkers.contains(workerId)) {
if (workerId == UID()) {
workerId = workerAffinity;
}
while (bmData->workerAffinities.count(workerId)) {
while (bmData->workerAffinities.contains(workerId)) {
workerId = bmData->workerAffinities[workerId];
CODE_PROBE(true, "Blob worker has affinity after reboot");
}
@ -4058,7 +4058,7 @@ ACTOR Future<Void> recoverBlobManager(Reference<BlobManagerData> bmData) {
RangeAssignment raAssign;
raAssign.isAssign = true;
if (bmData->workersById.count(workerId)) {
if (bmData->workersById.contains(workerId)) {
raAssign.worker = workerId;
}
raAssign.keyRange = range.range();
@ -4122,7 +4122,7 @@ ACTOR Future<Void> chaosRangeMover(Reference<BlobManagerData> bmData) {
while (tries > 0) {
tries--;
auto randomRange = bmData->workerAssignments.randomRange();
if (randomRange.value() != UID() && !alreadyMoved.count(randomRange.range().toString())) {
if (randomRange.value() != UID() && !alreadyMoved.contains(randomRange.range().toString())) {
if (BM_DEBUG) {
fmt::print("Range mover moving range [{0} - {1}): {2}\n",
randomRange.begin().printable().c_str(),
@ -4182,7 +4182,7 @@ ACTOR Future<Void> initializeBlobWorker(Reference<BlobManagerData> self,
// Ask the candidateWorker to initialize a BW only if the worker does not have a pending request
if (numExistingBWOnAddr(self, workerAddr) == 0 &&
self->recruitingLocalities.count(candidateWorker.worker.stableAddress()) == 0) {
!self->recruitingLocalities.contains(candidateWorker.worker.stableAddress())) {
state UID interfaceId = deterministicRandom()->randomUniqueID();
state InitializeBlobWorkerRequest initReq;
@ -4230,13 +4230,13 @@ ACTOR Future<Void> initializeBlobWorker(Reference<BlobManagerData> self,
if (newBlobWorker.present()) {
BlobWorkerInterface bwi = newBlobWorker.get().interf;
if (!self->deadWorkers.count(bwi.id())) {
if (!self->workerAddresses.count(bwi.stableAddress()) && bwi.locality.dcId() == self->dcId) {
if (!self->deadWorkers.contains(bwi.id())) {
if (!self->workerAddresses.contains(bwi.stableAddress()) && bwi.locality.dcId() == self->dcId) {
self->workerAddresses.insert(bwi.stableAddress());
self->workersById[bwi.id()] = bwi;
self->workerStats[bwi.id()] = BlobWorkerInfo();
self->addActor.send(monitorBlobWorker(self, bwi));
} else if (!self->workersById.count(bwi.id())) {
} else if (!self->workersById.contains(bwi.id())) {
self->addActor.send(killBlobWorker(self, bwi, false));
}
}
@ -5970,7 +5970,7 @@ ACTOR Future<Void> blobManager(BlobManagerInterface bmInterf,
if (g_network->isSimulated()) {
UID clusterId = wait(fetchClusterId(self->db));
auto clusterEpoc = std::make_pair(clusterId, epoch);
bool managerEpochAlreadySeen = managerEpochsSeen.count(clusterEpoc);
bool managerEpochAlreadySeen = managerEpochsSeen.contains(clusterEpoc);
if (managerEpochAlreadySeen) {
TraceEvent(SevError, "DuplicateBlobManagersAtEpoch")
.detail("ClusterId", clusterId)

View File

@ -558,7 +558,7 @@ bool isHealthySingleton(ClusterControllerData* self,
const Optional<UID> recruitingID) {
// A singleton is stable if it exists in cluster, has not been killed off of proc and is not being recruited
bool isStableSingleton = singleton.isPresent() &&
self->id_worker.count(singleton.getInterface().locality.processId()) &&
self->id_worker.contains(singleton.getInterface().locality.processId()) &&
(!recruitingID.present() || (recruitingID.get() == singleton.getInterface().id()));
if (!isStableSingleton) {
@ -1149,7 +1149,7 @@ void haltRegisteringOrCurrentSingleton(ClusterControllerData* self,
// if not currently recruiting, then halt previous one in favour of requesting one
TraceEvent(("CCRegister" + roleName).c_str(), self->id).detail(roleAbbr + "ID", registeringID);
if (currSingleton.isPresent() && currSingleton.getInterface().id() != registeringID &&
self->id_worker.count(currSingleton.getInterface().locality.processId())) {
self->id_worker.contains(currSingleton.getInterface().locality.processId())) {
TraceEvent(("CCHaltPrevious" + roleName).c_str(), self->id)
.detail(roleAbbr + "ID", currSingleton.getInterface().id())
.detail("DcID", printable(self->clusterControllerDcId))
@ -1713,7 +1713,7 @@ ACTOR Future<Void> monitorStorageMetadata(ClusterControllerData* self) {
idMetadata[id] = decodeServerMetadataValue(sm.value);
}
for (auto& s : servers) {
if (idMetadata.count(s.id())) {
if (idMetadata.contains(s.id())) {
s.metadata = idMetadata[s.id()];
} else {
TraceEvent(SevWarn, "StorageServerMetadataMissing", self->id).detail("ServerID", s.id());
@ -2236,7 +2236,7 @@ ACTOR Future<Void> startDataDistributor(ClusterControllerData* self, double wait
.detail("Addr", worker.interf.address())
.detail("DDID", ddInterf.get().id());
if (distributor.present() && distributor.get().id() != ddInterf.get().id() &&
self->id_worker.count(distributor.get().locality.processId())) {
self->id_worker.contains(distributor.get().locality.processId())) {
TraceEvent("CCHaltDataDistributorAfterRecruit", self->id)
.detail("DDID", distributor.get().id())
@ -2336,7 +2336,7 @@ ACTOR Future<Void> startRatekeeper(ClusterControllerData* self, double waitTime)
.detail("Addr", worker.interf.address())
.detail("RKID", interf.get().id());
if (ratekeeper.present() && ratekeeper.get().id() != interf.get().id() &&
self->id_worker.count(ratekeeper.get().locality.processId())) {
self->id_worker.contains(ratekeeper.get().locality.processId())) {
TraceEvent("CCHaltRatekeeperAfterRecruit", self->id)
.detail("RKID", ratekeeper.get().id())
.detail("DcID", printable(self->clusterControllerDcId));
@ -2426,7 +2426,7 @@ ACTOR Future<Void> startConsistencyScan(ClusterControllerData* self) {
.detail("Addr", worker.interf.address())
.detail("CKID", interf.get().id());
if (consistencyScan.present() && consistencyScan.get().id() != interf.get().id() &&
self->id_worker.count(consistencyScan.get().locality.processId())) {
self->id_worker.contains(consistencyScan.get().locality.processId())) {
TraceEvent("CCHaltConsistencyScanAfterRecruit", self->id)
.detail("CKID", consistencyScan.get().id())
.detail("DcID", printable(self->clusterControllerDcId));
@ -2528,7 +2528,7 @@ ACTOR Future<Void> startEncryptKeyProxy(ClusterControllerData* self, EncryptionA
.detail("Id", interf.get().id())
.detail("ProcessId", interf.get().locality.processId());
if (encryptKeyProxy.present() && encryptKeyProxy.get().id() != interf.get().id() &&
self->id_worker.count(encryptKeyProxy.get().locality.processId())) {
self->id_worker.contains(encryptKeyProxy.get().locality.processId())) {
TraceEvent("CCEKP_HaltAfterRecruit", self->id)
.detail("Id", encryptKeyProxy.get().id())
.detail("DcId", printable(self->clusterControllerDcId));
@ -2700,7 +2700,7 @@ ACTOR Future<Void> startBlobMigrator(ClusterControllerData* self, double waitTim
.detail("Addr", worker.interf.address())
.detail("MGID", interf.get().id());
if (blobMigrator.present() && blobMigrator.get().id() != interf.get().id() &&
self->id_worker.count(blobMigrator.get().locality.processId())) {
self->id_worker.contains(blobMigrator.get().locality.processId())) {
TraceEvent("CCHaltBlobMigratorAfterRecruit", self->id)
.detail("MGID", blobMigrator.get().id())
.detail("DcID", printable(self->clusterControllerDcId));
@ -2805,7 +2805,7 @@ ACTOR Future<Void> startBlobManager(ClusterControllerData* self, double waitTime
.detail("Addr", worker.interf.address())
.detail("BMID", interf.get().id());
if (blobManager.present() && blobManager.get().id() != interf.get().id() &&
self->id_worker.count(blobManager.get().locality.processId())) {
self->id_worker.contains(blobManager.get().locality.processId())) {
TraceEvent("CCHaltBlobManagerAfterRecruit", self->id)
.detail("BMID", blobManager.get().id())
.detail("DcID", printable(self->clusterControllerDcId));

View File

@ -270,7 +270,7 @@ ACTOR Future<Void> newTLogServers(Reference<ClusterRecoveryData> self,
std::vector<Standalone<CommitTransactionRef>>* initialConfChanges) {
if (self->configuration.usableRegions > 1) {
state Optional<Key> remoteDcId = self->remoteDcIds.size() ? self->remoteDcIds[0] : Optional<Key>();
if (!self->dcId_locality.count(recr.dcId)) {
if (!self->dcId_locality.contains(recr.dcId)) {
int8_t loc = self->getNextLocality();
Standalone<CommitTransactionRef> tr;
tr.set(tr.arena(), tagLocalityListKeyFor(recr.dcId), tagLocalityListValue(loc));
@ -279,7 +279,7 @@ ACTOR Future<Void> newTLogServers(Reference<ClusterRecoveryData> self,
TraceEvent(SevWarn, "UnknownPrimaryDCID", self->dbgid).detail("PrimaryId", recr.dcId).detail("Loc", loc);
}
if (!self->dcId_locality.count(remoteDcId)) {
if (!self->dcId_locality.contains(remoteDcId)) {
int8_t loc = self->getNextLocality();
Standalone<CommitTransactionRef> tr;
tr.set(tr.arena(), tagLocalityListKeyFor(remoteDcId), tagLocalityListValue(loc));
@ -357,7 +357,7 @@ ACTOR Future<Void> newSeedServers(Reference<ClusterRecoveryData> self,
.detail("CandidateWorker", recruits.storageServers[idx].locality.toString());
InitializeStorageRequest isr;
isr.seedTag = dcId_tags.count(recruits.storageServers[idx].locality.dcId())
isr.seedTag = dcId_tags.contains(recruits.storageServers[idx].locality.dcId())
? dcId_tags[recruits.storageServers[idx].locality.dcId()]
: Tag(nextLocality, 0);
isr.storeType = self->configuration.storageServerStoreType;
@ -376,7 +376,7 @@ ACTOR Future<Void> newSeedServers(Reference<ClusterRecoveryData> self,
CODE_PROBE(true, "initial storage recuitment loop failed to get new server");
wait(delay(SERVER_KNOBS->STORAGE_RECRUITMENT_DELAY));
} else {
if (!dcId_tags.count(recruits.storageServers[idx].locality.dcId())) {
if (!dcId_tags.contains(recruits.storageServers[idx].locality.dcId())) {
dcId_tags[recruits.storageServers[idx].locality.dcId()] = Tag(nextLocality, 0);
nextLocality++;
}
@ -758,7 +758,7 @@ ACTOR Future<Void> updateLogsValue(Reference<ClusterRecoveryData> self, Database
bool found = false;
for (auto& logSet : self->logSystem->getLogSystemConfig().tLogs) {
for (auto& log : logSet.tLogs) {
if (logIds.count(log.id())) {
if (logIds.contains(log.id())) {
found = true;
break;
}
@ -1832,7 +1832,7 @@ ACTOR Future<Void> cleanupRecoveryActorCollection(Reference<ClusterRecoveryData>
}
bool isNormalClusterRecoveryError(const Error& error) {
return normalClusterRecoveryErrors().count(error.code());
return normalClusterRecoveryErrors().contains(error.code());
}
std::string& getRecoveryEventName(ClusterRecoveryEventType type) {

View File

@ -427,7 +427,7 @@ ACTOR Future<Void> commitBatcher(ProxyCommitData* commitData,
if (SERVER_KNOBS->STORAGE_QUOTA_ENABLED && !req.bypassStorageQuota() &&
req.tenantInfo.hasTenant() &&
commitData->tenantsOverStorageQuota.count(req.tenantInfo.tenantId) > 0) {
commitData->tenantsOverStorageQuota.contains(req.tenantInfo.tenantId)) {
req.reply.sendError(storage_quota_exceeded());
continue;
}
@ -1056,7 +1056,7 @@ EncryptCipherDomainId getEncryptDetailsFromMutationRef(ProxyCommitData* commitDa
// Parse mutation key to determine mutation encryption domain
StringRef prefix = m.param1.substr(0, TenantAPI::PREFIX_SIZE);
int64_t tenantId = TenantAPI::prefixToId(prefix, EnforceValidTenantId::False);
if (commitData->tenantMap.count(tenantId)) {
if (commitData->tenantMap.contains(tenantId)) {
domainId = tenantId;
} else {
// Leverage 'default encryption domain'
@ -1194,7 +1194,7 @@ void assertResolutionStateMutationsSizeConsistent(const std::vector<ResolveTrans
bool validTenantAccess(MutationRef m, std::map<int64_t, TenantName> const& tenantMap, Optional<int64_t>& tenantId) {
if (isSingleKeyMutation((MutationRef::Type)m.type)) {
tenantId = TenantAPI::extractTenantIdFromMutation(m);
bool isLegalTenant = tenantMap.count(tenantId.get()) > 0;
bool isLegalTenant = tenantMap.contains(tenantId.get());
CODE_PROBE(!isLegalTenant, "Commit proxy access invalid tenant");
return isLegalTenant;
}
@ -1572,7 +1572,7 @@ Error validateAndProcessTenantAccess(CommitTransactionRequest& tr,
if (!isValid) {
return tenant_not_found();
}
if (!tr.isLockAware() && pProxyCommitData->lockedTenants.count(tr.tenantInfo.tenantId) > 0) {
if (!tr.isLockAware() && pProxyCommitData->lockedTenants.contains(tr.tenantInfo.tenantId)) {
CODE_PROBE(true, "Attempt access to locked tenant without lock awareness");
return tenant_locked();
}
@ -1626,7 +1626,7 @@ void applyMetadataEffect(CommitBatchContext* self) {
// check if all tenant ids are valid if committed == true
committed = committed &&
std::all_of(tenantIds.get().begin(), tenantIds.get().end(), [self](const int64_t& tid) {
return self->pProxyCommitData->tenantMap.count(tid);
return self->pProxyCommitData->tenantMap.contains(tid);
});
if (self->debugID.present()) {
@ -1805,7 +1805,7 @@ ACTOR Future<Void> applyMetadataToCommittedTransactions(CommitBatchContext* self
if (pProxyCommitData->encryptMode == EncryptionAtRestMode::DOMAIN_AWARE && !rawAccessTenantIds.empty()) {
std::unordered_set<EncryptCipherDomainId> extraDomainIds;
for (auto tenantId : rawAccessTenantIds) {
if (self->cipherKeys.count(tenantId) == 0) {
if (!self->cipherKeys.contains(tenantId)) {
extraDomainIds.insert(tenantId);
}
}
@ -1892,7 +1892,7 @@ Future<WriteMutationRefVar> writeMutation(CommitBatchContext* self,
CODE_PROBE(true, "Raw access mutation encryption", probe::decoration::rare);
}
ASSERT_NE(domainId, INVALID_ENCRYPT_DOMAIN_ID);
ASSERT(self->cipherKeys.count(domainId) > 0);
ASSERT(self->cipherKeys.contains(domainId));
encryptedMutation =
mutation->encrypt(self->cipherKeys, domainId, *arena, BlobCipherMetrics::TLOG, encryptTime);
}
@ -2827,7 +2827,7 @@ void maybeAddTssMapping(GetKeyServerLocationsReply& reply,
ProxyCommitData* commitData,
std::unordered_set<UID>& included,
UID ssId) {
if (!included.count(ssId)) {
if (!included.contains(ssId)) {
auto mappingItr = commitData->tssMapping.find(ssId);
if (mappingItr != commitData->tssMapping.end()) {
reply.resultsTssMapping.push_back(*mappingItr);
@ -3112,8 +3112,8 @@ ACTOR static Future<Void> doBlobGranuleLocationRequest(GetBlobGranuleLocationsRe
throw blob_granule_transaction_too_old();
}
if (!req.justGranules && !commitData->blobWorkerInterfCache.count(workerId) &&
!bwiLookedUp.count(workerId)) {
if (!req.justGranules && !commitData->blobWorkerInterfCache.contains(workerId) &&
!bwiLookedUp.contains(workerId)) {
bwiLookedUp.insert(workerId);
bwiLookupFutures.push_back(tr.get(blobWorkerListKeyFor(workerId)));
}
@ -3766,7 +3766,7 @@ ACTOR Future<Void> processTransactionStateRequestPart(TransactionStateResolveCon
ASSERT(pContext->pCommitData != nullptr);
ASSERT(pContext->pActors != nullptr);
if (pContext->receivedSequences.count(request.sequence)) {
if (pContext->receivedSequences.contains(request.sequence)) {
if (pContext->receivedSequences.size() == pContext->maxSequence) {
wait(pContext->txnRecovery);
}

View File

@ -464,7 +464,7 @@ class ConfigBroadcasterImpl {
state BroadcastClientDetails client(
watcher, std::move(configClassSet), lastSeenVersion, std::move(broadcastInterface));
if (impl->clients.count(broadcastInterface.id())) {
if (impl->clients.contains(broadcastInterface.id())) {
// Client already registered
return Void();
}

View File

@ -694,19 +694,19 @@ void DDQueue::validate() {
for (auto it = inFlightRanges.begin(); it != inFlightRanges.end(); ++it) {
for (int i = 0; i < it->value().src.size(); i++) {
// each server in the inFlight map is in the busymap
if (!busymap.count(it->value().src[i]))
if (!busymap.contains(it->value().src[i]))
TraceEvent(SevError, "DDQueueValidateError8")
.detail("Problem", "each server in the inFlight map is in the busymap");
// relocate data that is inFlight is not also in the queue
if (queue[it->value().src[i]].count(it->value()))
if (queue[it->value().src[i]].contains(it->value()))
TraceEvent(SevError, "DDQueueValidateError9")
.detail("Problem", "relocate data that is inFlight is not also in the queue");
}
for (int i = 0; i < it->value().completeDests.size(); i++) {
// each server in the inFlight map is in the dest busymap
if (!destBusymap.count(it->value().completeDests[i]))
if (!destBusymap.contains(it->value().completeDests[i]))
TraceEvent(SevError, "DDQueueValidateError10")
.detail("Problem", "each server in the inFlight map is in the destBusymap");
}
@ -853,7 +853,7 @@ void DDQueue::queueRelocation(RelocateShard rs, std::set<UID>& serversToLaunchFr
// ASSERT(queueMapItr->value() == queueMap.rangeContaining(affectedQueuedItems[r].begin)->value());
RelocateData& rrs = queueMapItr->value();
if (rrs.src.size() == 0 && (rrs.keys == rd.keys || fetchingSourcesQueue.count(rrs) > 0)) {
if (rrs.src.size() == 0 && (rrs.keys == rd.keys || fetchingSourcesQueue.contains(rrs))) {
if (rrs.keys != rd.keys) {
delayDelete.insert(rrs);
}
@ -927,7 +927,7 @@ void DDQueue::queueRelocation(RelocateShard rs, std::set<UID>& serversToLaunchFr
}
void DDQueue::completeSourceFetch(const RelocateData& results) {
ASSERT(fetchingSourcesQueue.count(results));
ASSERT(fetchingSourcesQueue.contains(results));
// logRelocation( results, "GotSourceServers" );
@ -960,7 +960,7 @@ void DDQueue::launchQueuedWork(KeyRange keys, const DDEnabledState* ddEnabledSta
std::set<RelocateData, std::greater<RelocateData>> combined;
auto f = queueMap.intersectingRanges(keys);
for (auto it = f.begin(); it != f.end(); ++it) {
if (it->value().src.size() && queue[it->value().src[0]].count(it->value()))
if (it->value().src.size() && queue[it->value().src[0]].contains(it->value()))
combined.insert(it->value());
}
launchQueuedWork(combined, ddEnabledState);
@ -1064,7 +1064,7 @@ void DDQueue::launchQueuedWork(std::set<RelocateData, std::greater<RelocateData>
bool overlappingInFlight = false;
auto intersectingInFlight = inFlight.intersectingRanges(rd.keys);
for (auto it = intersectingInFlight.begin(); it != intersectingInFlight.end(); ++it) {
if (fetchKeysComplete.count(it->value()) && inFlightActors.liveActorAt(it->range().begin) &&
if (fetchKeysComplete.contains(it->value()) && inFlightActors.liveActorAt(it->range().begin) &&
!rd.keys.contains(it->range()) && it->value().priority >= rd.priority &&
rd.healthPriority < SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY) {
@ -1235,7 +1235,7 @@ int DDQueue::getHighestPriorityRelocation() const {
// return true if the servers are throttled as source for read rebalance
bool DDQueue::timeThrottle(const std::vector<UID>& ids) const {
return std::any_of(ids.begin(), ids.end(), [this](const UID& id) {
if (this->lastAsSource.count(id)) {
if (this->lastAsSource.contains(id)) {
return (now() - this->lastAsSource.at(id)) * SERVER_KNOBS->READ_REBALANCE_SRC_PARALLELISM <
SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL;
}
@ -1394,7 +1394,7 @@ static int nonOverlappedServerCount(const std::vector<UID>& srcIds, const std::v
std::unordered_set<UID> srcSet{ srcIds.begin(), srcIds.end() };
int count = 0;
for (int i = 0; i < destIds.size(); i++) {
if (srcSet.count(destIds[i]) == 0) {
if (!srcSet.contains(destIds[i])) {
count++;
}
}
@ -2231,7 +2231,7 @@ ACTOR Future<Void> dataDistributionRelocator(DDQueue* self,
inline double getWorstCpu(const HealthMetrics& metrics, const std::vector<UID>& ids) {
double cpu = 0;
for (auto& id : ids) {
if (metrics.storageStats.count(id)) {
if (metrics.storageStats.contains(id)) {
cpu = std::max(cpu, metrics.storageStats.at(id).cpuUsage);
} else {
// assume the server is too busy to report its stats

View File

@ -343,7 +343,7 @@ ACTOR Future<Void> trackShardMetrics(DataDistributionTracker::SafeAccessor self,
if (e.code() != error_code_actor_cancelled && e.code() != error_code_dd_tracker_cancelled) {
DisabledTraceEvent(SevDebug, "TrackShardError", self()->distributorId).detail("Keys", keys);
// The above loop use Database cx, but those error should only be thrown in a code using transaction.
ASSERT(transactionRetryableErrors.count(e.code()) == 0);
ASSERT(!transactionRetryableErrors.contains(e.code()));
self()->output.sendError(e); // Propagate failure to dataDistributionTracker
}
throw e;
@ -368,7 +368,7 @@ ACTOR Future<Void> readHotDetector(DataDistributionTracker* self) {
} catch (Error& e) {
if (e.code() != error_code_actor_cancelled) {
// Those error should only be thrown in a code using transaction.
ASSERT(transactionRetryableErrors.count(e.code()) == 0);
ASSERT(!transactionRetryableErrors.contains(e.code()));
self->output.sendError(e); // Propagate failure to dataDistributionTracker
}
throw e;
@ -1837,7 +1837,7 @@ void PhysicalShardCollection::PhysicalShard::removeRange(const KeyRange& outRang
PhysicalShardAvailable PhysicalShardCollection::checkPhysicalShardAvailable(uint64_t physicalShardID,
StorageMetrics const& moveInMetrics) {
ASSERT(physicalShardID != UID().first() && physicalShardID != anonymousShardId.first());
ASSERT(physicalShardInstances.count(physicalShardID) > 0);
ASSERT(physicalShardInstances.contains(physicalShardID));
if (physicalShardInstances[physicalShardID].metrics.bytes + moveInMetrics.bytes >
SERVER_KNOBS->MAX_PHYSICAL_SHARD_BYTES) {
return PhysicalShardAvailable::False;
@ -1859,7 +1859,7 @@ void PhysicalShardCollection::updateTeamPhysicalShardIDsMap(uint64_t inputPhysic
ASSERT(inputTeams.size() <= 2);
ASSERT(inputPhysicalShardID != anonymousShardId.first() && inputPhysicalShardID != UID().first());
for (auto inputTeam : inputTeams) {
if (teamPhysicalShardIDs.count(inputTeam) == 0) {
if (!teamPhysicalShardIDs.contains(inputTeam)) {
std::set<uint64_t> physicalShardIDSet;
physicalShardIDSet.insert(inputPhysicalShardID);
teamPhysicalShardIDs.insert(std::make_pair(inputTeam, physicalShardIDSet));
@ -1876,7 +1876,7 @@ void PhysicalShardCollection::insertPhysicalShardToCollection(uint64_t physicalS
uint64_t debugID,
PhysicalShardCreationTime whenCreated) {
ASSERT(physicalShardID != anonymousShardId.first() && physicalShardID != UID().first());
ASSERT(physicalShardInstances.count(physicalShardID) == 0);
ASSERT(!physicalShardInstances.contains(physicalShardID));
physicalShardInstances.insert(
std::make_pair(physicalShardID, PhysicalShard(txnProcessor, physicalShardID, metrics, teams, whenCreated)));
return;
@ -1953,7 +1953,7 @@ Optional<uint64_t> PhysicalShardCollection::trySelectAvailablePhysicalShardFor(
uint64_t debugID) {
ASSERT(team.servers.size() > 0);
// Case: The team is not tracked in the mapping (teamPhysicalShardIDs)
if (teamPhysicalShardIDs.count(team) == 0) {
if (!teamPhysicalShardIDs.contains(team)) {
return Optional<uint64_t>();
}
ASSERT(teamPhysicalShardIDs[team].size() >= 1);
@ -1964,7 +1964,7 @@ Optional<uint64_t> PhysicalShardCollection::trySelectAvailablePhysicalShardFor(
if (physicalShardID == anonymousShardId.first() || physicalShardID == UID().first()) {
ASSERT(false);
}
ASSERT(physicalShardInstances.count(physicalShardID));
ASSERT(physicalShardInstances.contains(physicalShardID));
/*TraceEvent("TryGetPhysicalShardIDCandidates")
.detail("PhysicalShardID", physicalShardID)
.detail("Bytes", physicalShardInstances[physicalShardID].metrics.bytes)
@ -2005,14 +2005,14 @@ uint64_t PhysicalShardCollection::generateNewPhysicalShardID(uint64_t debugID) {
}
void PhysicalShardCollection::reduceMetricsForMoveOut(uint64_t physicalShardID, StorageMetrics const& moveOutMetrics) {
ASSERT(physicalShardInstances.count(physicalShardID) != 0);
ASSERT(physicalShardInstances.contains(physicalShardID));
ASSERT(physicalShardID != UID().first() && physicalShardID != anonymousShardId.first());
physicalShardInstances[physicalShardID].metrics = physicalShardInstances[physicalShardID].metrics - moveOutMetrics;
return;
}
void PhysicalShardCollection::increaseMetricsForMoveIn(uint64_t physicalShardID, StorageMetrics const& moveInMetrics) {
ASSERT(physicalShardInstances.count(physicalShardID) != 0);
ASSERT(physicalShardInstances.contains(physicalShardID));
ASSERT(physicalShardID != UID().first() && physicalShardID != anonymousShardId.first());
physicalShardInstances[physicalShardID].metrics = physicalShardInstances[physicalShardID].metrics + moveInMetrics;
return;
@ -2109,7 +2109,7 @@ std::pair<Optional<ShardsAffectedByTeamFailure::Team>, bool> PhysicalShardCollec
ASSERT(SERVER_KNOBS->SHARD_ENCODE_LOCATION_METADATA);
ASSERT(SERVER_KNOBS->ENABLE_DD_PHYSICAL_SHARD);
ASSERT(inputPhysicalShardID != anonymousShardId.first() && inputPhysicalShardID != UID().first());
if (physicalShardInstances.count(inputPhysicalShardID) == 0) {
if (!physicalShardInstances.contains(inputPhysicalShardID)) {
return { Optional<ShardsAffectedByTeamFailure::Team>(), true };
}
if (!checkPhysicalShardAvailable(inputPhysicalShardID, moveInMetrics)) {
@ -2141,7 +2141,7 @@ void PhysicalShardCollection::initPhysicalShardCollection(KeyRange keys,
ASSERT(physicalShardID != UID().first());
if (physicalShardID != anonymousShardId.first()) {
updateTeamPhysicalShardIDsMap(physicalShardID, selectedTeams, debugID);
if (physicalShardInstances.count(physicalShardID) == 0) {
if (!physicalShardInstances.contains(physicalShardID)) {
insertPhysicalShardToCollection(
physicalShardID, StorageMetrics(), selectedTeams, debugID, PhysicalShardCreationTime::DDInit);
} else {
@ -2181,7 +2181,7 @@ void PhysicalShardCollection::updatePhysicalShardCollection(
// Update physicalShardInstances
// Add the metrics to in-physicalShard
// e.detail("PhysicalShardIDIn", physicalShardID);
if (physicalShardInstances.count(physicalShardID) == 0) {
if (!physicalShardInstances.contains(physicalShardID)) {
// e.detail("Op", "Insert");
insertPhysicalShardToCollection(
physicalShardID, metrics, selectedTeams, debugID, PhysicalShardCreationTime::DDRelocator);
@ -2266,8 +2266,8 @@ void PhysicalShardCollection::cleanUpPhysicalShardCollection() {
}
for (auto it = physicalShardInstances.begin(); it != physicalShardInstances.end();) {
uint64_t physicalShardID = it->first;
ASSERT(physicalShardInstances.count(physicalShardID) > 0);
if (physicalShardsInUse.count(physicalShardID) == 0) {
ASSERT(physicalShardInstances.contains(physicalShardID));
if (!physicalShardsInUse.contains(physicalShardID)) {
/*TraceEvent("PhysicalShardisEmpty")
.detail("PhysicalShard", physicalShardID)
.detail("RemainBytes", physicalShardInstances[physicalShardID].metrics.bytes);*/
@ -2282,7 +2282,7 @@ void PhysicalShardCollection::cleanUpPhysicalShardCollection() {
for (auto [team, _] : teamPhysicalShardIDs) {
for (auto it = teamPhysicalShardIDs[team].begin(); it != teamPhysicalShardIDs[team].end();) {
uint64_t physicalShardID = *it;
if (physicalShardInstances.count(physicalShardID) == 0) {
if (!physicalShardInstances.contains(physicalShardID)) {
// physicalShardID has been removed from physicalShardInstances (see step 1)
// So, remove the physicalShard from teamPhysicalShardID[team]
it = teamPhysicalShardIDs[team].erase(it);
@ -2322,7 +2322,7 @@ void PhysicalShardCollection::logPhysicalShardCollection() {
uint64_t maxPhysicalShardID = 0;
uint64_t minPhysicalShardID = 0;
for (auto physicalShardID : physicalShardIDs) {
ASSERT(physicalShardInstances.count(physicalShardID) > 0);
ASSERT(physicalShardInstances.contains(physicalShardID));
uint64_t id = physicalShardInstances[physicalShardID].id;
int64_t bytes = physicalShardInstances[physicalShardID].metrics.bytes;
if (bytes > maxPhysicalShardBytes) {
@ -2352,14 +2352,14 @@ void PhysicalShardCollection::logPhysicalShardCollection() {
for (auto ssid : team.servers) {
for (auto it = teamPhysicalShardIDs[team].begin(); it != teamPhysicalShardIDs[team].end();) {
uint64_t physicalShardID = *it;
if (storageServerPhysicalShardStatus.count(ssid) != 0) {
if (storageServerPhysicalShardStatus[ssid].count(physicalShardID) == 0) {
ASSERT(physicalShardInstances.count(physicalShardID) > 0);
if (storageServerPhysicalShardStatus.contains(ssid)) {
if (!storageServerPhysicalShardStatus[ssid].contains(physicalShardID)) {
ASSERT(physicalShardInstances.contains(physicalShardID));
storageServerPhysicalShardStatus[ssid].insert(
std::make_pair(physicalShardID, physicalShardInstances[physicalShardID].metrics.bytes));
}
} else {
ASSERT(physicalShardInstances.count(physicalShardID) > 0);
ASSERT(physicalShardInstances.contains(physicalShardID));
std::map<uint64_t, int64_t> tmp;
tmp.insert(std::make_pair(physicalShardID, physicalShardInstances[physicalShardID].metrics.bytes));
storageServerPhysicalShardStatus.insert(std::make_pair(ssid, tmp));

View File

@ -91,7 +91,7 @@ class DDTeamCollectionImpl {
const ProcessData& workerData = workers[i];
AddressExclusion addr(workerData.address.ip, workerData.address.port);
existingAddrs.insert(addr);
if (self->invalidLocalityAddr.count(addr) &&
if (self->invalidLocalityAddr.contains(addr) &&
self->isValidLocality(self->configuration.storagePolicy, workerData.locality)) {
// The locality info on the addr has been corrected
self->invalidLocalityAddr.erase(addr);
@ -104,7 +104,7 @@ class DDTeamCollectionImpl {
// In case system operator permanently excludes workers on the address with invalid locality
for (auto addr = self->invalidLocalityAddr.begin(); addr != self->invalidLocalityAddr.end();) {
if (!existingAddrs.count(*addr)) {
if (!existingAddrs.contains(*addr)) {
// The address no longer has a worker
addr = self->invalidLocalityAddr.erase(addr);
hasCorrectedLocality = true;
@ -452,7 +452,7 @@ public:
bool foundSrc = false;
for (const auto& id : req.src) {
if (self->server_info.count(id)) {
if (self->server_info.contains(id)) {
foundSrc = true;
break;
}
@ -1224,7 +1224,7 @@ public:
}
ASSERT_EQ(tc->primary, t.primary);
// tc->traceAllInfo();
if (tc->server_info.count(t.servers[0])) {
if (tc->server_info.contains(t.servers[0])) {
auto& info = tc->server_info[t.servers[0]];
bool found = false;
@ -2173,14 +2173,14 @@ public:
// Do not retrigger and double-overwrite failed or wiggling servers
auto old = self->excludedServers.getKeys();
for (const auto& o : old) {
if (!exclusionTracker.excluded.count(o) && !exclusionTracker.failed.count(o) &&
if (!exclusionTracker.excluded.contains(o) && !exclusionTracker.failed.contains(o) &&
!(self->excludedServers.count(o) &&
self->excludedServers.get(o) == DDTeamCollection::Status::WIGGLING)) {
self->excludedServers.set(o, DDTeamCollection::Status::NONE);
}
}
for (const auto& n : exclusionTracker.excluded) {
if (!exclusionTracker.failed.count(n)) {
if (!exclusionTracker.failed.contains(n)) {
self->excludedServers.set(n, DDTeamCollection::Status::EXCLUDED);
}
}
@ -2783,7 +2783,7 @@ public:
if (newServer.present()) {
UID id = newServer.get().interf.id();
if (!self->server_and_tss_info.count(id)) {
if (!self->server_and_tss_info.contains(id)) {
if (!recruitTss || tssState->tssRecruitSuccess()) {
self->addServer(newServer.get().interf,
candidateWorker.processClass,
@ -3043,7 +3043,7 @@ public:
UID tssId = itr->second->getId();
StorageServerInterface tssi = itr->second->getLastKnownInterface();
if (self->shouldHandleServer(tssi) && self->server_and_tss_info.count(tssId)) {
if (self->shouldHandleServer(tssi) && self->server_and_tss_info.contains(tssId)) {
Promise<Void> killPromise = itr->second->killTss;
if (killPromise.canBeSet()) {
CODE_PROBE(tssToRecruit < 0, "Killing TSS due to too many TSS");
@ -3171,7 +3171,7 @@ public:
ProcessClass const& processClass = servers[i].second;
if (!self->shouldHandleServer(ssi)) {
continue;
} else if (self->server_and_tss_info.count(serverId)) {
} else if (self->server_and_tss_info.contains(serverId)) {
auto& serverInfo = self->server_and_tss_info[serverId];
if (ssi.getValue.getEndpoint() !=
serverInfo->getLastKnownInterface().getValue.getEndpoint() ||
@ -3185,7 +3185,7 @@ public:
serverInfo->interfaceChanged.getFuture());
currentInterfaceChanged.send(std::make_pair(ssi, processClass));
}
} else if (!self->recruitingIds.count(ssi.id())) {
} else if (!self->recruitingIds.contains(ssi.id())) {
self->addServer(ssi,
processClass,
self->serverTrackerErrorOut,
@ -3263,7 +3263,7 @@ public:
// if perpetual_storage_wiggle_locality has value and not 0(disabled).
if (!localityKeyValues.empty()) {
if (self->server_info.count(res.begin()->first)) {
if (self->server_info.contains(res.begin()->first)) {
auto server = self->server_info.at(res.begin()->first);
for (const auto& [localityKey, localityValue] : localityKeyValues) {
// Update the wigglingId only if it matches the locality.
@ -3975,14 +3975,14 @@ Optional<Reference<IDataDistributionTeam>> DDTeamCollection::findTeamFromServers
const std::set<UID> completeSources(servers.begin(), servers.end());
for (const auto& server : servers) {
if (!server_info.count(server)) {
if (!server_info.contains(server)) {
continue;
}
auto const& teamList = server_info[server]->getTeams();
for (const auto& team : teamList) {
bool found = true;
for (const UID& s : team->getServerIDs()) {
if (!completeSources.count(s)) {
if (!completeSources.contains(s)) {
found = false;
break;
}
@ -5688,7 +5688,7 @@ void DDTeamCollection::addServer(StorageServerInterface newServer,
if (newServer.isTss()) {
tss_info_by_pair[newServer.tssPairID.get()] = r;
if (server_info.count(newServer.tssPairID.get())) {
if (server_info.contains(newServer.tssPairID.get())) {
r->onTSSPairRemoved = server_info[newServer.tssPairID.get()]->onRemoved;
}
} else {
@ -5701,7 +5701,7 @@ void DDTeamCollection::addServer(StorageServerInterface newServer,
if (!newServer.isTss()) {
// link and wake up tss' tracker so it knows when this server gets removed
if (tss_info_by_pair.count(newServer.id())) {
if (tss_info_by_pair.contains(newServer.id())) {
tss_info_by_pair[newServer.id()]->onTSSPairRemoved = r->onRemoved;
if (tss_info_by_pair[newServer.id()]->wakeUpTracker.canBeSet()) {
auto p = tss_info_by_pair[newServer.id()]->wakeUpTracker;
@ -5987,7 +5987,7 @@ void DDTeamCollection::removeServer(UID removedServer) {
Future<Void> DDTeamCollection::excludeStorageServersForWiggle(const UID& id) {
Future<Void> moveFuture = Void();
if (this->server_info.count(id) != 0) {
if (this->server_info.contains(id)) {
auto& info = server_info.at(id);
AddressExclusion addr(info->getLastKnownInterface().address().ip, info->getLastKnownInterface().address().port);

View File

@ -207,7 +207,7 @@ Future<Void> StorageWiggler::onCheck() const {
void StorageWiggler::addServer(const UID& serverId, const StorageMetadataType& metadata) {
// std::cout << "size: " << pq_handles.size() << " add " << serverId.toString() << " DC: "
// << teamCollection->isPrimary() << std::endl;
ASSERT(!pq_handles.count(serverId));
ASSERT(!pq_handles.contains(serverId));
pq_handles[serverId] = wiggle_pq.emplace(metadata, serverId);
}
@ -1730,7 +1730,7 @@ ACTOR Future<std::map<NetworkAddress, std::pair<WorkerInterface, std::string>>>
.detail("SS", server.id());
++storageFailures;
} else {
if (result.count(server.address())) {
if (result.contains(server.address())) {
ASSERT(itr->second.id() == result[server.address()].first.id());
if (result[server.address()].second.find("storage") == std::string::npos)
result[server.address()].second.append(",storage");
@ -1755,7 +1755,7 @@ ACTOR Future<std::map<NetworkAddress, std::pair<WorkerInterface, std::string>>>
TraceEvent(SevWarn, "MissingTlogWorkerInterface").detail("TlogAddress", tlog.address());
throw snap_tlog_failed();
}
if (result.count(tlog.address())) {
if (result.contains(tlog.address())) {
ASSERT(workersMap[tlog.address()].id() == result[tlog.address()].first.id());
result[tlog.address()].second.append(",tlog");
} else {
@ -1779,7 +1779,7 @@ ACTOR Future<std::map<NetworkAddress, std::pair<WorkerInterface, std::string>>>
Optional<NetworkAddress> secondary = worker.interf.tLog.getEndpoint().addresses.secondaryAddress;
if (coordinatorsAddrSet.find(primary) != coordinatorsAddrSet.end() ||
(secondary.present() && (coordinatorsAddrSet.find(secondary.get()) != coordinatorsAddrSet.end()))) {
if (result.count(primary)) {
if (result.contains(primary)) {
ASSERT(workersMap[primary].id() == result[primary].first.id());
result[primary].second.append(",coord");
} else {
@ -1791,7 +1791,7 @@ ACTOR Future<std::map<NetworkAddress, std::pair<WorkerInterface, std::string>>>
for (const auto& worker : workers) {
const auto& processAddress = worker.interf.address();
// skip processes that are already included
if (result.count(processAddress))
if (result.contains(processAddress))
continue;
const auto& processClassType = worker.processClass.classType();
// coordinators are always configured to be recruited
@ -3784,7 +3784,7 @@ ACTOR Future<Void> dataDistributor_impl(DataDistributorInterface di,
}
when(DistributorSnapRequest snapReq = waitNext(di.distributorSnapReq.getFuture())) {
auto& snapUID = snapReq.snapUID;
if (ddSnapReqResultMap.count(snapUID)) {
if (ddSnapReqResultMap.contains(snapUID)) {
CODE_PROBE(true,
"Data distributor received a duplicate finished snapshot request",
probe::decoration::rare);
@ -3793,7 +3793,7 @@ ACTOR Future<Void> dataDistributor_impl(DataDistributorInterface di,
TraceEvent("RetryFinishedDistributorSnapRequest")
.detail("SnapUID", snapUID)
.detail("Result", result.isError() ? result.getError().code() : 0);
} else if (ddSnapReqMap.count(snapReq.snapUID)) {
} else if (ddSnapReqMap.contains(snapReq.snapUID)) {
CODE_PROBE(true, "Data distributor received a duplicate ongoing snapshot request");
TraceEvent("RetryOngoingDistributorSnapRequest").detail("SnapUID", snapUID);
ASSERT(snapReq.snapPayload == ddSnapReqMap[snapUID].snapPayload);
@ -3836,7 +3836,7 @@ ACTOR Future<Void> dataDistributor_impl(DataDistributorInterface di,
}
}
} catch (Error& err) {
if (normalDataDistributorErrors().count(err.code()) == 0) {
if (!(normalDataDistributorErrors().contains(err.code()))) {
TraceEvent("DataDistributorError", di.id()).errorUnsuppressed(err);
throw err;
}

View File

@ -1154,7 +1154,8 @@ ACTOR Future<Void> checkRemoved(Reference<AsyncVar<ServerDBInfo> const> db,
GrvProxyInterface myInterface) {
loop {
if (db->get().recoveryCount >= recoveryCount &&
!std::count(db->get().client.grvProxies.begin(), db->get().client.grvProxies.end(), myInterface)) {
std::find(db->get().client.grvProxies.begin(), db->get().client.grvProxies.end(), myInterface) ==
db->get().client.grvProxies.end()) {
throw worker_removed();
}
wait(db->onChange());

View File

@ -494,7 +494,7 @@ private:
uint32_t opType = (uint32_t)op;
// Make sure the first bit of the optype is empty
ASSERT(opType >> ENCRYPTION_ENABLED_BIT == 0);
if (!enableEncryption || metaOps.count(op) > 0) {
if (!enableEncryption || metaOps.contains(op)) {
OpHeader h = { opType, v1.size(), v2.size() };
log->push(StringRef((const uint8_t*)&h, sizeof(h)));
log->push(v1);
@ -545,7 +545,7 @@ private:
ASSERT(!isOpEncrypted(&h));
// Metadata op types to be excluded from encryption.
static std::unordered_set<OpType> metaOps = { OpSnapshotEnd, OpSnapshotAbort, OpCommit, OpRollback };
if (metaOps.count((OpType)h.op) == 0) {
if (!metaOps.contains((OpType)h.op)) {
// It is not supported to open an encrypted store as unencrypted, or vice-versa.
ASSERT_EQ(encryptedOp, self->enableEncryption);
}

View File

@ -28,7 +28,7 @@
#include "fdbserver/Knobs.h"
void KnobKeyValuePairs::set(const std::string& name, const ParsedKnobValue value) {
ASSERT(knobs.count(name) == 0);
ASSERT(!knobs.contains(name));
knobs[name] = value;
}

View File

@ -44,7 +44,7 @@ LatencyBandsMap::ExpirableBands::ExpirableBands(LatencyBands&& bands)
: latencyBands(std::move(bands)), lastUpdated(now()) {}
Optional<LatencyBands*> LatencyBandsMap::getLatencyBands(TransactionTag tag) {
if (map.size() == maxSize && !map.count(tag)) {
if (map.size() == maxSize && !map.contains(tag)) {
CODE_PROBE(true, "LatencyBandsMap reached maxSize");
return {};
}

View File

@ -203,7 +203,8 @@ ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators,
// If more than 2*SERVER_KNOBS->POLLING_FREQUENCY elapses while we are nominated by some coordinator but
// there is no leader, we might be breaking the leader election process for someone with better
// communications but lower ID, so change IDs.
if ((!leader.present() || !leader.get().second) && std::count(nominees.begin(), nominees.end(), myInfo)) {
if ((!leader.present() || !leader.get().second) &&
std::find(nominees.begin(), nominees.end(), myInfo) != nominees.end()) {
if (!badCandidateTimeout.isValid())
badCandidateTimeout = delay(SERVER_KNOBS->POLLING_FREQUENCY * 2, TaskPriority::CoordinationReply);
} else

View File

@ -157,7 +157,7 @@ void LogSet::checkSatelliteTagLocations() {
std::set<Optional<Key>> zones;
std::set<Optional<Key>> dcs;
for (auto& loc : tLogLocalities) {
if (zones.count(loc.zoneId())) {
if (zones.contains(loc.zoneId())) {
foundDuplicate = true;
break;
}
@ -341,7 +341,7 @@ float LogPushData::getEmptyMessageRatio() const {
bool LogPushData::writeTransactionInfo(int location, uint32_t subseq) {
if (!FLOW_KNOBS->WRITE_TRACING_ENABLED || logSystem->getTLogVersion() < TLogVersion::V6 ||
writtenLocations.count(location) != 0) {
writtenLocations.contains(location)) {
return false;
}

View File

@ -287,13 +287,13 @@ bool LogSystemConfig::isNextGenerationOf(LogSystemConfig const& r) const {
bool LogSystemConfig::hasTLog(UID tid) const {
for (const auto& log : tLogs) {
if (std::count(log.tLogs.begin(), log.tLogs.end(), tid) > 0) {
if (std::find(log.tLogs.begin(), log.tLogs.end(), tid) != log.tLogs.end()) {
return true;
}
}
for (const auto& old : oldTLogs) {
for (const auto& log : old.tLogs) {
if (std::count(log.tLogs.begin(), log.tLogs.end(), tid) > 0) {
if (std::find(log.tLogs.begin(), log.tLogs.end(), tid) != log.tLogs.end()) {
return true;
}
}
@ -303,13 +303,13 @@ bool LogSystemConfig::hasTLog(UID tid) const {
bool LogSystemConfig::hasLogRouter(UID rid) const {
for (const auto& log : tLogs) {
if (std::count(log.logRouters.begin(), log.logRouters.end(), rid) > 0) {
if (std::find(log.logRouters.begin(), log.logRouters.end(), rid) != log.logRouters.end()) {
return true;
}
}
for (const auto& old : oldTLogs) {
for (const auto& log : old.tLogs) {
if (std::count(log.logRouters.begin(), log.logRouters.end(), rid) > 0) {
if (std::find(log.logRouters.begin(), log.logRouters.end(), rid) != log.logRouters.end()) {
return true;
}
}
@ -319,7 +319,7 @@ bool LogSystemConfig::hasLogRouter(UID rid) const {
bool LogSystemConfig::hasBackupWorker(UID bid) const {
for (const auto& log : tLogs) {
if (std::count(log.backupWorkers.begin(), log.backupWorkers.end(), bid) > 0) {
if (std::find(log.backupWorkers.begin(), log.backupWorkers.end(), bid) != log.backupWorkers.end()) {
return true;
}
}

View File

@ -227,7 +227,7 @@ bool MockStorageServer::allShardStatusIn(const KeyRangeRef& range, const std::se
for (auto it = ranges.begin(); it != ranges.end(); ++it) {
// fmt::print("allShardStatusIn: {}: {} \n", id.toString(), it->range().toString());
if (!status.count(it->cvalue().status))
if (!status.contains(it->cvalue().status))
return false;
}
return true;
@ -679,7 +679,7 @@ void MockGlobalState::addStoragePerProcess(uint64_t defaultDiskSpace) {
}
bool MockGlobalState::serverIsSourceForShard(const UID& serverId, KeyRangeRef shard, bool inFlightShard) {
if (!allServers.count(serverId))
if (!allServers.contains(serverId))
return false;
// check serverKeys
@ -703,9 +703,9 @@ bool MockGlobalState::serverIsDestForShard(const UID& serverId, KeyRangeRef shar
TraceEvent(SevDebug, "ServerIsDestForShard")
.detail("ServerId", serverId)
.detail("Keys", shard)
.detail("Contains", allServers.count(serverId));
.detail("Contains", allServers.contains(serverId));
if (!allServers.count(serverId))
if (!allServers.contains(serverId))
return false;
// check serverKeys
@ -723,7 +723,7 @@ bool MockGlobalState::serverIsDestForShard(const UID& serverId, KeyRangeRef shar
}
bool MockGlobalState::allShardsRemovedFromServer(const UID& serverId) {
return allServers.count(serverId) && shardMapping->getNumberOfShards(serverId) == 0;
return allServers.contains(serverId) && shardMapping->getNumberOfShards(serverId) == 0;
}
Future<std::pair<Optional<StorageMetrics>, int>> MockGlobalState::waitStorageMetrics(

View File

@ -892,14 +892,14 @@ ACTOR Future<std::vector<std::vector<UID>>> additionalSources(RangeResult shards
decodeKeyServersValue(UIDtoTagMap, shards[i].value, src, dest);
for (int s = 0; s < src.size(); s++) {
if (!fetching.count(src[s])) {
if (!fetching.contains(src[s])) {
fetching.insert(src[s]);
serverListEntries.push_back(tr->get(serverListKeyFor(src[s])));
}
}
for (int s = 0; s < dest.size(); s++) {
if (!fetching.count(dest[s])) {
if (!fetching.contains(dest[s])) {
fetching.insert(dest[s]);
serverListEntries.push_back(tr->get(serverListKeyFor(dest[s])));
}
@ -1350,7 +1350,7 @@ ACTOR static Future<Void> finishMoveKeys(Database occ,
completeSrc = src;
} else {
for (int i = 0; i < completeSrc.size(); i++) {
if (!srcSet.count(completeSrc[i])) {
if (!srcSet.contains(completeSrc[i])) {
swapAndPop(&completeSrc, i--);
}
}
@ -1405,7 +1405,7 @@ ACTOR static Future<Void> finishMoveKeys(Database occ,
srcSet.insert(src2[s]);
for (int i = 0; i < completeSrc.size(); i++) {
if (!srcSet.count(completeSrc[i])) {
if (!srcSet.contains(completeSrc[i])) {
swapAndPop(&completeSrc, i--);
}
}
@ -1452,7 +1452,7 @@ ACTOR static Future<Void> finishMoveKeys(Database occ,
state std::vector<UID> newDestinations;
std::set<UID> completeSrcSet(completeSrc.begin(), completeSrc.end());
for (auto& it : dest) {
if (!hasRemote || !completeSrcSet.count(it)) {
if (!hasRemote || !completeSrcSet.contains(it)) {
newDestinations.push_back(it);
}
}
@ -1491,7 +1491,7 @@ ACTOR static Future<Void> finishMoveKeys(Database occ,
auto tssPair = tssMapping.find(storageServerInterfaces[s].id());
if (tssPair != tssMapping.end() && waitForTSSCounter > 0 &&
!tssToIgnore.count(tssPair->second.id())) {
!tssToIgnore.contains(tssPair->second.id())) {
tssReadyInterfs.push_back(tssPair->second);
tssReady.push_back(waitForShardReady(
tssPair->second, keys, tr.getReadVersion().get(), GetShardStateRequest::READABLE));
@ -2171,7 +2171,7 @@ ACTOR static Future<Void> finishMoveShards(Database occ,
completeSrc = src;
} else {
for (int i = 0; i < completeSrc.size(); i++) {
if (!srcSet.count(completeSrc[i])) {
if (!srcSet.contains(completeSrc[i])) {
swapAndPop(&completeSrc, i--);
}
}
@ -2187,7 +2187,7 @@ ACTOR static Future<Void> finishMoveShards(Database occ,
state std::vector<UID> newDestinations;
std::set<UID> completeSrcSet(completeSrc.begin(), completeSrc.end());
for (const UID& id : destServers) {
if (!hasRemote || !completeSrcSet.count(id)) {
if (!hasRemote || !completeSrcSet.contains(id)) {
newDestinations.push_back(id);
}
}
@ -2692,7 +2692,7 @@ ACTOR Future<Void> removeStorageServer(Database cx,
allLocalities.insert(dcId_locality[decodeTLogDatacentersKey(it.key)]);
}
if (locality >= 0 && !allLocalities.count(locality)) {
if (locality >= 0 && !allLocalities.contains(locality)) {
for (auto& it : fTagLocalities.get()) {
if (locality == decodeTagLocalityListValue(it.value)) {
tr->clear(it.key);
@ -3316,7 +3316,7 @@ void seedShardServers(Arena& arena, CommitTransactionRef& tr, std::vector<Storag
std::map<UID, Tag> server_tag;
int8_t nextLocality = 0;
for (auto& s : servers) {
if (!dcId_locality.count(s.locality.dcId())) {
if (!dcId_locality.contains(s.locality.dcId())) {
tr.set(arena, tagLocalityListKeyFor(s.locality.dcId()), tagLocalityListValue(nextLocality));
dcId_locality[s.locality.dcId()] = Tag(nextLocality, 0);
nextLocality++;
@ -3398,7 +3398,7 @@ Future<Void> unassignServerKeys(UID traceId, TrType tr, KeyRangeRef keys, std::s
continue;
}
if (ignoreServers.count(id)) {
if (ignoreServers.contains(id)) {
dprint("Ignore un-assignment from {} .\n", id.toString());
continue;
}

View File

@ -128,7 +128,7 @@ public:
const UID serverId = ssi.id();
newServers[serverId] = ssi;
if (oldServers.count(serverId)) {
if (oldServers.contains(serverId)) {
if (ssi.getValue.getEndpoint() != oldServers[serverId].getValue.getEndpoint() ||
ssi.isAcceptingRequests() != oldServers[serverId].isAcceptingRequests()) {
serverChanges.send(std::make_pair(serverId, Optional<StorageServerInterface>(ssi)));
@ -617,7 +617,7 @@ public:
self.maxVersion = std::max(self.maxVersion, req.version);
if (recoveryVersion == std::numeric_limits<Version>::max() &&
self.version_recovery.count(recoveryVersion)) {
self.version_recovery.contains(recoveryVersion)) {
recoveryVersion = self.maxVersion;
self.version_recovery[recoveryVersion] =
self.version_recovery[std::numeric_limits<Version>::max()];
@ -681,7 +681,7 @@ public:
if (recoveryVersion == 0) {
recoveryVersion = std::numeric_limits<Version>::max();
}
if (self.version_recovery.count(recoveryVersion)) {
if (self.version_recovery.contains(recoveryVersion)) {
auto& it = self.version_recovery[recoveryVersion];
double existingEnd = it.second.present() ? it.second.get() : now();
double existingDuration = existingEnd - it.first;
@ -999,7 +999,7 @@ void Ratekeeper::updateRate(RatekeeperLimits* limits) {
ignoredMachines.insert(ss->second->locality.zoneId());
continue;
}
if (ignoredMachines.count(ss->second->locality.zoneId()) > 0) {
if (ignoredMachines.contains(ss->second->locality.zoneId())) {
continue;
}
@ -1021,7 +1021,7 @@ void Ratekeeper::updateRate(RatekeeperLimits* limits) {
ignoredDurabilityLagMachines.insert(ss->second->locality.zoneId());
continue;
}
if (ignoredDurabilityLagMachines.count(ss->second->locality.zoneId()) > 0) {
if (ignoredDurabilityLagMachines.contains(ss->second->locality.zoneId())) {
continue;
}
@ -1215,7 +1215,7 @@ void Ratekeeper::updateRate(RatekeeperLimits* limits) {
minSSVer = std::min(minSSVer, ss.lastReply.version);
// Machines that ratekeeper isn't controlling can fall arbitrarily far behind
if (ignoredMachines.count(it.value.locality.zoneId()) == 0) {
if (!ignoredMachines.contains(it.value.locality.zoneId())) {
minLimitingSSVer = std::min(minLimitingSSVer, ss.lastReply.version);
}
}

View File

@ -34,7 +34,7 @@ void ResolutionBalancer::setResolvers(const std::vector<ResolverInterface>& v) {
}
void ResolutionBalancer::setChangesInReply(UID requestingProxy, GetCommitVersionReply& rep) {
if (resolverNeedingChanges.count(requestingProxy)) {
if (resolverNeedingChanges.contains(requestingProxy)) {
rep.resolverChanges = resolverChanges.get();
rep.resolverChangesVersion = resolverChangesVersion;
resolverNeedingChanges.erase(requestingProxy);
@ -86,12 +86,12 @@ static std::pair<KeyRangeRef, bool> findRange(CoalescedKeyRangeMap<int>& key_res
++it;
// If possible create a new boundary which doesn't exist yet
for (; it != ranges.end(); ++it) {
if (it->value() == src && !borders.count(prev->value()) &&
if (it->value() == src && !borders.contains(prev->value()) &&
std::find(movedRanges.begin(), movedRanges.end(), ResolverMoveRef(it->range(), dest)) ==
movedRanges.end()) {
return std::make_pair(it->range(), true);
}
if (prev->value() == src && !borders.count(it->value()) &&
if (prev->value() == src && !borders.contains(it->value()) &&
std::find(movedRanges.begin(), movedRanges.end(), ResolverMoveRef(prev->range(), dest)) ==
movedRanges.end()) {
return std::make_pair(prev->range(), false);

View File

@ -666,7 +666,7 @@ ACTOR Future<Void> processTransactionStateRequestPart(Reference<Resolver> self,
ASSERT(pContext->pResolverData.getPtr() != nullptr);
ASSERT(pContext->pActors != nullptr);
if (pContext->receivedSequences.count(request.sequence)) {
if (pContext->receivedSequences.contains(request.sequence)) {
// This part is already received. Still we will re-broadcast it to other CommitProxies & Resolvers
pContext->pActors->send(broadcastTxnRequest(request, SERVER_KNOBS->TXN_STATE_SEND_AMOUNT, true));
wait(yield());
@ -795,7 +795,7 @@ ACTOR Future<Void> checkRemoved(Reference<AsyncVar<ServerDBInfo> const> db,
ResolverInterface myInterface) {
loop {
if (db->get().recoveryCount >= recoveryCount &&
!std::count(db->get().resolvers.begin(), db->get().resolvers.end(), myInterface))
std::find(db->get().resolvers.begin(), db->get().resolvers.end(), myInterface) == db->get().resolvers.end())
throw worker_removed();
wait(db->onChange());
}

View File

@ -76,12 +76,12 @@ void ServerThroughputTracker::cleanupUnseenTags(TransactionTagMap<ThroughputCoun
while (it != tagToThroughputCounters.end()) {
auto& [tag, throughputCounters] = *it;
bool seen = false;
if (seenReadTags.count(tag)) {
if (seenReadTags.contains(tag)) {
seen = true;
} else {
throughputCounters.updateThroughput(0, OpType::READ);
}
if (seenWriteTags.count(tag)) {
if (seenWriteTags.contains(tag)) {
seen = true;
} else {
throughputCounters.updateThroughput(0, OpType::WRITE);
@ -102,7 +102,7 @@ void ServerThroughputTracker::cleanupUnseenStorageServers(std::unordered_set<UID
auto it1 = throughput.begin();
while (it1 != throughput.end()) {
auto& [ssId, tagToThroughputCounters] = *it1;
if (seen.count(ssId)) {
if (seen.contains(ssId)) {
++it1;
} else {
auto it2 = tagToThroughputCounters.begin();

View File

@ -202,14 +202,15 @@ void ShardsAffectedByTeamFailure::check() const {
if (EXPENSIVE_VALIDATION || checkMode == CheckMode::ForceCheck) {
for (auto t = team_shards.begin(); t != team_shards.end(); ++t) {
auto i = shard_teams.rangeContaining(t->second.begin);
if (i->range() != t->second || !std::count(i->value().first.begin(), i->value().first.end(), t->first)) {
if (i->range() != t->second ||
std::find(i->value().first.begin(), i->value().first.end(), t->first) == i->value().first.end()) {
ASSERT(false);
}
}
auto rs = shard_teams.ranges();
for (auto i = rs.begin(); i != rs.end(); ++i) {
for (auto t = i->value().first.begin(); t != i->value().first.end(); ++t) {
if (!team_shards.count(std::make_pair(*t, i->range()))) {
if (!team_shards.contains(std::make_pair(*t, i->range()))) {
std::string teamDesc, shards;
for (int k = 0; k < t->servers.size(); k++)
teamDesc += format("%llx ", t->servers[k].first());

View File

@ -332,14 +332,14 @@ JsonBuilderObject machineStatusFetcher(WorkerEvents mMetrics,
std::string machineId = event.getValue("MachineID");
// If this machine ID does not already exist in the machineMap, add it
if (machineJsonMap.count(machineId) == 0) {
if (!machineJsonMap.contains(machineId)) {
statusObj["machine_id"] = machineId;
if (dcIds.count(it->first)) {
if (dcIds.contains(it->first)) {
statusObj["datacenter_id"] = dcIds[it->first];
}
if (locality.count(it->first)) {
if (locality.contains(it->first)) {
statusObj["locality"] = locality[it->first].toJSON<JsonBuilderObject>();
}
@ -393,7 +393,7 @@ JsonBuilderObject machineStatusFetcher(WorkerEvents mMetrics,
tempList.address = it->first;
// Check if the locality data is present and if so, make use of it.
auto localityData = LocalityData();
if (locality.count(it->first)) {
if (locality.contains(it->first)) {
localityData = locality[it->first];
}
@ -819,7 +819,7 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
machineMemoryUsage.insert(std::make_pair(workerItr->interf.locality.machineId(), MachineMemoryInfo()))
.first;
try {
ASSERT(pMetrics.count(workerItr->interf.address()));
ASSERT(pMetrics.contains(workerItr->interf.address()));
const TraceEventFields& processMetrics = pMetrics[workerItr->interf.address()];
const TraceEventFields& programStart = programStarts[workerItr->interf.address()];
@ -947,7 +947,7 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
wait(yield());
state JsonBuilderObject statusObj;
try {
ASSERT(pMetrics.count(workerItr->interf.address()));
ASSERT(pMetrics.contains(workerItr->interf.address()));
NetworkAddress address = workerItr->interf.address();
const TraceEventFields& processMetrics = pMetrics[workerItr->interf.address()];
@ -1037,7 +1037,7 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
}
int64_t memoryLimit = 0;
if (programStarts.count(address)) {
if (programStarts.contains(address)) {
auto const& programStartEvent = programStarts.at(address);
if (programStartEvent.size() > 0) {
@ -1057,7 +1057,7 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
}
// if this process address is in the machine metrics
if (mMetrics.count(address) && mMetrics[address].size()) {
if (mMetrics.contains(address) && mMetrics[address].size()) {
double availableMemory;
availableMemory = mMetrics[address].getDouble("AvailableMemory");
@ -1074,7 +1074,7 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
JsonBuilderArray messages;
if (errors.count(address) && errors[address].size()) {
if (errors.contains(address) && errors[address].size()) {
// returns status object with type and time of error
messages.push_back(getError(errors.at(address)));
}
@ -1088,7 +1088,7 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
}
// If this process had a trace file open error, identified by strAddress, then add it to messages array
if (tracefileOpenErrorMap.count(strAddress)) {
if (tracefileOpenErrorMap.contains(strAddress)) {
messages.push_back(tracefileOpenErrorMap[strAddress]);
}
@ -1573,9 +1573,9 @@ ACTOR static Future<Void> logRangeWarningFetcher(Database cx,
KeyRange range = BinaryReader::fromStringRef<KeyRange>(it.key.removePrefix(destUidLookupPrefix),
IncludeVersion());
UID logUid = BinaryReader::fromStringRef<UID>(it.value, Unversioned());
if (loggingRanges.count(LogRangeAndUID(range, logUid))) {
if (loggingRanges.contains(LogRangeAndUID(range, logUid))) {
std::pair<Key, Key> rangePair = std::make_pair(range.begin, range.end);
if (existingRanges.count(rangePair)) {
if (existingRanges.contains(rangePair)) {
std::string rangeDescription = (range == getDefaultBackupSharedRange())
? "the default backup set"
: format("`%s` - `%s`",
@ -2547,7 +2547,7 @@ static JsonBuilderObject tlogFetcher(int* logFaultTolerance,
int failedLogs = 0;
for (auto& log : tLogSet.tLogs) {
JsonBuilderObject logObj;
bool failed = !log.present() || !address_workers.count(log.interf().address());
bool failed = !log.present() || !address_workers.contains(log.interf().address());
logObj["id"] = log.id().shortString();
logObj["healthy"] = !failed;
if (log.present()) {
@ -3590,7 +3590,7 @@ ACTOR Future<StatusReply> clusterGetStatus(
if (it.isTss()) {
activeTSSCount++;
}
if (wiggleServers.count(it.id())) {
if (wiggleServers.contains(it.id())) {
wiggleServerAddress.push_back(it.address().toString());
}
}

View File

@ -252,7 +252,7 @@ Future<Void> TCServerInfo::updateStoreType() {
void TCServerInfo::removeTeamsContainingServer(UID removedServer) {
for (int t = 0; t < teams.size(); t++) {
auto const& serverIds = teams[t]->getServerIDs();
if (std::count(serverIds.begin(), serverIds.end(), removedServer)) {
if (std::find(serverIds.begin(), serverIds.end(), removedServer) != serverIds.end()) {
teams[t--] = teams.back();
teams.pop_back();
}

View File

@ -555,7 +555,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
bool poppedRecently,
bool unpoppedRecovered) {
if (tag.locality != tagLocalityLogRouter && tag.locality != tagLocalityTxs && tag != txsTag && allTags.size() &&
!allTags.count(tag) && popped <= recoveredAt) {
!allTags.contains(tag) && popped <= recoveredAt) {
popped = recoveredAt + 1;
}
auto newTagData = makeReference<TagData>(tag, popped, 0, nothingPersistent, poppedRecently, unpoppedRecovered);
@ -1345,7 +1345,7 @@ ACTOR Future<Void> tLogPop(TLogData* self, TLogPopRequest req, Reference<LogData
// This actor is just a loop that calls updatePersistentData and popDiskQueue whenever
// (a) there's data to be spilled or (b) we should update metadata after some commits have been fully popped.
ACTOR Future<Void> updateStorage(TLogData* self) {
while (self->spillOrder.size() && !self->id_data.count(self->spillOrder.front())) {
while (self->spillOrder.size() && !self->id_data.contains(self->spillOrder.front())) {
self->spillOrder.pop_front();
}
@ -2532,7 +2532,8 @@ ACTOR Future<Void> rejoinClusterController(TLogData* self,
loop {
auto const& inf = self->dbInfo->get();
bool isDisplaced =
!std::count(inf.priorCommittedLogServers.begin(), inf.priorCommittedLogServers.end(), tli.id());
std::find(inf.priorCommittedLogServers.begin(), inf.priorCommittedLogServers.end(), tli.id()) ==
inf.priorCommittedLogServers.end();
if (isPrimary) {
isDisplaced =
isDisplaced && inf.recoveryCount >= recoveryCount && inf.recoveryState != RecoveryState::UNINITIALIZED;
@ -2791,7 +2792,7 @@ ACTOR Future<Void> serveTLogInterface(TLogData* self,
bool found = false;
if (self->dbInfo->get().recoveryState >= RecoveryState::ACCEPTING_COMMITS) {
for (auto& logs : self->dbInfo->get().logSystemConfig.tLogs) {
if (std::count(logs.tLogs.begin(), logs.tLogs.end(), logData->logId)) {
if (std::find(logs.tLogs.begin(), logs.tLogs.end(), logData->logId) != logs.tLogs.end()) {
found = true;
break;
}
@ -2895,7 +2896,7 @@ void removeLog(TLogData* self, Reference<LogData> logData) {
// actors threw an error immediately
self->id_data.erase(logData->logId);
while (self->popOrder.size() && !self->id_data.count(self->popOrder.front())) {
while (self->popOrder.size() && !self->id_data.contains(self->popOrder.front())) {
self->popOrder.pop_front();
}

View File

@ -219,14 +219,14 @@ Tag TagPartitionedLogSystem::getPseudoPopTag(Tag tag, ProcessClass::ClassType ty
switch (type) {
case ProcessClass::LogRouterClass:
if (tag.locality == tagLocalityLogRouter) {
ASSERT(pseudoLocalities.count(tagLocalityLogRouterMapped) > 0);
ASSERT(pseudoLocalities.contains(tagLocalityLogRouterMapped));
tag.locality = tagLocalityLogRouterMapped;
}
break;
case ProcessClass::BackupClass:
if (tag.locality == tagLocalityLogRouter) {
ASSERT(pseudoLocalities.count(tagLocalityBackup) > 0);
ASSERT(pseudoLocalities.contains(tagLocalityBackup));
tag.locality = tagLocalityBackup;
}
break;
@ -238,7 +238,7 @@ Tag TagPartitionedLogSystem::getPseudoPopTag(Tag tag, ProcessClass::ClassType ty
}
bool TagPartitionedLogSystem::hasPseudoLocality(int8_t locality) const {
return pseudoLocalities.count(locality) > 0;
return pseudoLocalities.contains(locality);
}
Version TagPartitionedLogSystem::popPseudoLocalityTag(Tag tag, Version upTo) {
@ -1856,7 +1856,7 @@ void TagPartitionedLogSystem::setBackupWorkers(const std::vector<InitializeBacku
LogEpoch logsetEpoch = this->epoch;
oldestBackupEpoch = this->epoch;
for (const auto& reply : replies) {
if (removedBackupWorkers.count(reply.interf.id()) > 0) {
if (removedBackupWorkers.contains(reply.interf.id())) {
removedBackupWorkers.erase(reply.interf.id());
continue;
}
@ -2372,7 +2372,7 @@ ACTOR Future<Void> TagPartitionedLogSystem::epochEnd(Reference<AsyncVar<Referenc
foundSpecial = true;
break;
}
if (!lockedLocalities.count(log->locality)) {
if (!lockedLocalities.contains(log->locality)) {
TraceEvent("EpochEndLockExtra").detail("Locality", log->locality);
CODE_PROBE(true, "locking old generations for version information");
lockedLocalities.insert(log->locality);

View File

@ -685,7 +685,7 @@ static int asyncFullPathname(sqlite3_vfs* pVfs, /* VFS */
** and false otherwise.
*/
bool vfsAsyncIsOpen(std::string filename) {
return SharedMemoryInfo::table.count(abspath(filename)) > 0;
return SharedMemoryInfo::table.contains(abspath(filename));
}
/*

View File

@ -3344,7 +3344,7 @@ public:
if (copyNewToOriginal) {
if (g_network->isSimulated()) {
ASSERT(self->remapDestinationsSimOnly.count(p.originalPageID) == 0);
ASSERT(!self->remapDestinationsSimOnly.contains(p.originalPageID));
self->remapDestinationsSimOnly.insert(p.originalPageID);
}
debug_printf("DWALPager(%s) remapCleanup copy %s\n", self->filename.c_str(), p.toString().c_str());
@ -9175,7 +9175,7 @@ TEST_CASE("Lredwood/correctness/unit/deltaTree/RedwoodRecordRef") {
if (deterministicRandom()->coinflip()) {
rec.value = StringRef(arena, v);
}
if (uniqueItems.count(rec) == 0) {
if (!uniqueItems.contains(rec)) {
uniqueItems.insert(rec);
}
}
@ -9352,7 +9352,7 @@ TEST_CASE("Lredwood/correctness/unit/deltaTree/RedwoodRecordRef2") {
if (deterministicRandom()->coinflip()) {
rec.value = StringRef(arena, v);
}
if (uniqueItems.count(rec) == 0) {
if (!uniqueItems.contains(rec)) {
uniqueItems.insert(rec);
}
}
@ -9533,7 +9533,7 @@ TEST_CASE("Lredwood/correctness/unit/deltaTree/IntIntPair") {
nextP.v++;
auto prevP = p;
prevP.v--;
if (uniqueItems.count(p) == 0 && uniqueItems.count(nextP) == 0 && uniqueItems.count(prevP) == 0) {
if (!uniqueItems.contains(p) && !uniqueItems.contains(nextP) && !uniqueItems.contains(prevP)) {
uniqueItems.insert(p);
}
}
@ -9676,8 +9676,8 @@ TEST_CASE("Lredwood/correctness/unit/deltaTree/IntIntPair") {
// Insert record if it, its predecessor, and its successor are not present.
// Test data is intentionally sparse to test finding each record with a directional
// seek from each adjacent possible but not present record.
if (uniqueItems.count(p) == 0 && uniqueItems.count(IntIntPair(p.k, p.v - 1)) == 0 &&
uniqueItems.count(IntIntPair(p.k, p.v + 1)) == 0) {
if (!uniqueItems.contains(p) && !uniqueItems.contains(IntIntPair(p.k, p.v - 1)) &&
!uniqueItems.contains(IntIntPair(p.k, p.v + 1))) {
if (!cur2.insert(p)) {
shouldBeFull = true;
break;

View File

@ -2229,7 +2229,7 @@ int main(int argc, char* argv[]) {
const std::set<std::string> allowedDirectories = { ".", "..", "backups", "unittests", "fdbblob" };
for (const auto& dir : directories) {
if (dir.size() != 32 && allowedDirectories.count(dir) == 0 && dir.find("snap") == std::string::npos) {
if (dir.size() != 32 && !allowedDirectories.contains(dir) && dir.find("snap") == std::string::npos) {
TraceEvent(SevError, "IncompatibleDirectoryFound")
.detail("DataFolder", dataFolder)

View File

@ -351,9 +351,9 @@ public:
.detail("Worker", it.second.details.interf.address())
.detail("WorkerAvailable", workerAvailable(it.second, false))
.detail("RecoverDiskFiles", it.second.details.recoveredDiskFiles)
.detail("NotExcludedMachine", !excludedMachines.count(it.second.details.interf.locality.zoneId()))
.detail("NotExcludedMachine", !excludedMachines.contains(it.second.details.interf.locality.zoneId()))
.detail("IncludeDC",
(includeDCs.size() == 0 || includeDCs.count(it.second.details.interf.locality.dcId())))
(includeDCs.size() == 0 || includeDCs.contains(it.second.details.interf.locality.dcId())))
.detail("NotExcludedAddress", !addressExcluded(excludedAddresses, it.second.details.interf.address()))
.detail("NotExcludedAddress2",
(!it.second.details.interf.secondaryAddress().present() ||
@ -363,8 +363,8 @@ public:
ProcessClass::UnsetFit)
.detail("MachineFitness", it.second.details.processClass.machineClassFitness(ProcessClass::Storage));
if (workerAvailable(it.second, false) && it.second.details.recoveredDiskFiles &&
!excludedMachines.count(it.second.details.interf.locality.zoneId()) &&
(includeDCs.size() == 0 || includeDCs.count(it.second.details.interf.locality.dcId())) &&
!excludedMachines.contains(it.second.details.interf.locality.zoneId()) &&
(includeDCs.size() == 0 || includeDCs.contains(it.second.details.interf.locality.dcId())) &&
!addressExcluded(excludedAddresses, it.second.details.interf.address()) &&
(!it.second.details.interf.secondaryAddress().present() ||
!addressExcluded(excludedAddresses, it.second.details.interf.secondaryAddress().get())) &&
@ -379,8 +379,8 @@ public:
for (auto& it : id_worker) {
ProcessClass::Fitness fit = it.second.details.processClass.machineClassFitness(ProcessClass::Storage);
if (workerAvailable(it.second, false) && it.second.details.recoveredDiskFiles &&
!excludedMachines.count(it.second.details.interf.locality.zoneId()) &&
(includeDCs.size() == 0 || includeDCs.count(it.second.details.interf.locality.dcId())) &&
!excludedMachines.contains(it.second.details.interf.locality.zoneId()) &&
(includeDCs.size() == 0 || includeDCs.contains(it.second.details.interf.locality.dcId())) &&
!addressExcluded(excludedAddresses, it.second.details.interf.address()) && fit < bestFit) {
bestFit = fit;
bestInfo = it.second.details;
@ -502,7 +502,7 @@ public:
auto thisField = worker.interf.locality.get(field);
auto thisZone = worker.interf.locality.zoneId();
if (field_count.count(thisField)) {
if (field_count.contains(thisField)) {
zone_workers[thisZone].push_back(worker);
zone_count[thisZone].second = thisField;
}
@ -528,7 +528,7 @@ public:
auto& zoneWorkers = zone_workers[lowestZone.second];
while (zoneWorkers.size() && !added) {
if (!resultSet.count(zoneWorkers.back())) {
if (!resultSet.contains(zoneWorkers.back())) {
resultSet.insert(zoneWorkers.back());
if (resultSet.size() == desired) {
return;
@ -583,7 +583,7 @@ public:
bool added = false;
while (zoneWorkers.size() && !added) {
if (!resultSet.count(zoneWorkers.back())) {
if (!resultSet.contains(zoneWorkers.back())) {
resultSet.insert(zoneWorkers.back());
if (resultSet.size() == desired) {
return;
@ -690,7 +690,7 @@ public:
SevDebug, id, "complex", "Worker's fitness is NeverAssign", worker_details, fitness, dcIds);
continue;
}
if (!dcIds.empty() && dcIds.count(worker_details.interf.locality.dcId()) == 0) {
if (!dcIds.empty() && !dcIds.contains(worker_details.interf.locality.dcId())) {
logWorkerUnavailable(
SevDebug, id, "complex", "Worker is not in the target DC", worker_details, fitness, dcIds);
continue;
@ -801,7 +801,7 @@ public:
}
if (workerIter->second.size() + resultSet.size() <= desired) {
for (auto& worker : workerIter->second) {
if (chosenFields.count(worker.interf.locality.get(field))) {
if (chosenFields.contains(worker.interf.locality.get(field))) {
resultSet.insert(worker);
}
}
@ -940,7 +940,7 @@ public:
SevDebug, id, "simple", "Worker's fitness is NeverAssign", worker_details, fitness, dcIds);
continue;
}
if (!dcIds.empty() && dcIds.count(worker_details.interf.locality.dcId()) == 0) {
if (!dcIds.empty() && !dcIds.contains(worker_details.interf.locality.dcId())) {
logWorkerUnavailable(
SevDebug, id, "simple", "Worker is not in the target DC", worker_details, fitness, dcIds);
continue;
@ -973,7 +973,7 @@ public:
auto used = std::get<1>(workerIter->first);
deterministicRandom()->randomShuffle(workerIter->second);
for (auto& worker : workerIter->second) {
if (!zones.count(worker.interf.locality.zoneId())) {
if (!zones.contains(worker.interf.locality.zoneId())) {
zones.insert(worker.interf.locality.zoneId());
resultSet.insert(worker);
if (resultSet.size() == required) {
@ -1092,7 +1092,7 @@ public:
SevDebug, id, "deprecated", "Worker's fitness is NeverAssign", worker_details, fitness, dcIds);
continue;
}
if (!dcIds.empty() && dcIds.count(worker_details.interf.locality.dcId()) == 0) {
if (!dcIds.empty() && !dcIds.contains(worker_details.interf.locality.dcId())) {
logWorkerUnavailable(
SevDebug, id, "deprecated", "Worker is not in the target DC", worker_details, fitness, dcIds);
continue;
@ -1312,7 +1312,7 @@ public:
std::map<Optional<Standalone<StringRef>>, int> field_count;
std::set<Optional<Standalone<StringRef>>> zones;
for (auto& worker : testWorkers) {
if (!zones.count(worker.interf.locality.zoneId())) {
if (!zones.contains(worker.interf.locality.zoneId())) {
field_count[worker.interf.locality.get(pa1->attributeKey())]++;
zones.insert(worker.interf.locality.zoneId());
}
@ -2478,7 +2478,7 @@ public:
.detail("ProcessID", it.interf().filteredLocality.processId());
return true;
}
if (!logRouterAddresses.count(tlogWorker->second.details.interf.address())) {
if (!logRouterAddresses.contains(tlogWorker->second.details.interf.address())) {
logRouterAddresses.insert(tlogWorker->second.details.interf.address());
log_routers.push_back(tlogWorker->second.details);
}
@ -2498,7 +2498,7 @@ public:
.detail("ProcessID", worker.interf().locality.processId());
return true;
}
if (backup_addresses.count(workerIt->second.details.interf.address()) == 0) {
if (!backup_addresses.contains(workerIt->second.details.interf.address())) {
backup_addresses.insert(workerIt->second.details.interf.address());
backup_workers.push_back(workerIt->second.details);
}
@ -2664,7 +2664,7 @@ public:
int32_t oldSatelliteRegionFit = std::numeric_limits<int32_t>::max();
for (auto& it : satellite_tlogs) {
if (satellite_priority.count(it.interf.locality.dcId())) {
if (satellite_priority.contains(it.interf.locality.dcId())) {
oldSatelliteRegionFit = std::min(oldSatelliteRegionFit, satellite_priority[it.interf.locality.dcId()]);
} else {
oldSatelliteRegionFit = -1;
@ -2673,7 +2673,7 @@ public:
int32_t newSatelliteRegionFit = std::numeric_limits<int32_t>::max();
for (auto& it : newSatelliteTLogs) {
if (satellite_priority.count(it.interf.locality.dcId())) {
if (satellite_priority.contains(it.interf.locality.dcId())) {
newSatelliteRegionFit = std::min(newSatelliteRegionFit, satellite_priority[it.interf.locality.dcId()]);
} else {
newSatelliteRegionFit = -1;

View File

@ -43,7 +43,7 @@ public:
}
}
bool contains(KeyRef configClass) const { return classes.count(configClass); }
bool contains(KeyRef configClass) const { return classes.contains(configClass); }
std::set<Key> const& getClasses() const { return classes; }
template <class Ar>

View File

@ -770,7 +770,7 @@ struct StorageWiggler : ReferenceCounted<StorageWiggler> {
void removeServer(const UID& serverId);
// update metadata and adjust priority_queue
void updateMetadata(const UID& serverId, const StorageMetadataType& metadata);
bool contains(const UID& serverId) const { return pq_handles.count(serverId) > 0; }
bool contains(const UID& serverId) const { return pq_handles.contains(serverId); }
bool empty() const { return wiggle_pq.empty(); }
// It's guarantee that When a.metadata >= b.metadata, if !necessary(a) then !necessary(b)

View File

@ -49,7 +49,7 @@ struct ExclusionTracker {
bool isFailedOrExcluded(NetworkAddress addr) {
AddressExclusion addrExclusion(addr.ip, addr.port);
return excluded.count(addrExclusion) || failed.count(addrExclusion);
return excluded.contains(addrExclusion) || failed.contains(addrExclusion);
}
ACTOR static Future<Void> tracker(ExclusionTracker* self) {

View File

@ -107,7 +107,8 @@ enum EncodingType : uint8_t {
static constexpr std::array EncryptedEncodingTypes = { AESEncryption, AESEncryptionWithAuth, XOREncryption_TestOnly };
inline bool isEncodingTypeEncrypted(EncodingType encoding) {
return std::count(EncryptedEncodingTypes.begin(), EncryptedEncodingTypes.end(), encoding) > 0;
return std::find(EncryptedEncodingTypes.begin(), EncryptedEncodingTypes.end(), encoding) !=
EncryptedEncodingTypes.end();
}
inline bool isEncodingTypeAESEncrypted(EncodingType encoding) {

View File

@ -57,7 +57,7 @@ struct RatekeeperSingleton : Singleton<RatekeeperInterface> {
}
}
void halt(ClusterControllerData& cc, Optional<Standalone<StringRef>> pid) const {
if (interface.present() && cc.id_worker.count(pid)) {
if (interface.present() && cc.id_worker.contains(pid)) {
cc.id_worker[pid].haltRatekeeper =
brokenPromiseToNever(interface.get().haltRatekeeper.getReply(HaltRatekeeperRequest(cc.id)));
}
@ -82,7 +82,7 @@ struct DataDistributorSingleton : Singleton<DataDistributorInterface> {
}
}
void halt(ClusterControllerData& cc, Optional<Standalone<StringRef>> pid) const {
if (interface.present() && cc.id_worker.count(pid)) {
if (interface.present() && cc.id_worker.contains(pid)) {
cc.id_worker[pid].haltDistributor =
brokenPromiseToNever(interface.get().haltDataDistributor.getReply(HaltDataDistributorRequest(cc.id)));
}
@ -132,7 +132,7 @@ struct BlobManagerSingleton : Singleton<BlobManagerInterface> {
}
}
void halt(ClusterControllerData& cc, Optional<Standalone<StringRef>> pid) const {
if (interface.present() && cc.id_worker.count(pid)) {
if (interface.present() && cc.id_worker.contains(pid)) {
cc.id_worker[pid].haltBlobManager =
brokenPromiseToNever(interface.get().haltBlobManager.getReply(HaltBlobManagerRequest(cc.id)));
}
@ -190,7 +190,7 @@ struct EncryptKeyProxySingleton : Singleton<EncryptKeyProxyInterface> {
}
}
void halt(ClusterControllerData& cc, Optional<Standalone<StringRef>> pid) const {
if (interface.present() && cc.id_worker.count(pid)) {
if (interface.present() && cc.id_worker.contains(pid)) {
cc.id_worker[pid].haltEncryptKeyProxy =
brokenPromiseToNever(interface.get().haltEncryptKeyProxy.getReply(HaltEncryptKeyProxyRequest(cc.id)));
}

View File

@ -171,7 +171,7 @@ public:
bool matches(std::vector<Standalone<StringRef>> const& sortedMachineIDs);
std::string getMachineIDsStr() const;
bool containsMachine(Standalone<StringRef> machineID) const {
return std::count(machineIDs.begin(), machineIDs.end(), machineID);
return std::find(machineIDs.begin(), machineIDs.end(), machineID) != machineIDs.end();
}
// Returns true iff team is found

View File

@ -314,9 +314,6 @@ public:
art_iterator insert_if_absent(KeyRef& key, void* value, int* replaced);
void erase(const art_iterator& it);
uint64_t count() { return size; }
}; // art_tree
struct art_iterator {

View File

@ -611,7 +611,7 @@ ACTOR Future<Void> masterServerCxx(MasterInterface mi,
"Master: terminated due to backup worker failure",
probe::decoration::rare);
if (normalMasterErrors().count(err.code())) {
if (normalMasterErrors().contains(err.code())) {
TraceEvent("MasterTerminated", mi.id()).error(err);
return Void();
}

View File

@ -53,7 +53,6 @@ struct LatencyStats {
}
void reset() { *this = LatencyStats(); }
double count() { return n; }
double mean() { return x / n; }
double stddev() { return sqrt(x2 / n - (x / n) * (x / n)); }
};

View File

@ -8072,7 +8072,7 @@ ACTOR Future<Version> fetchChangeFeed(StorageServer* data,
if (g_network->isSimulated() && !g_simulator->restarted) {
// verify that the feed was actually destroyed and it's not an error in this inference logic.
// Restarting tests produce false positives because the validation state isn't kept across tests
ASSERT(g_simulator->validationData.allDestroyedChangeFeedIDs.count(changeFeedInfo->id.toString()));
ASSERT(g_simulator->validationData.allDestroyedChangeFeedIDs.contains(changeFeedInfo->id.toString()));
}
Key beginClearKey = changeFeedInfo->id.withPrefix(persistChangeFeedKeys.begin);
@ -8089,7 +8089,7 @@ ACTOR Future<Version> fetchChangeFeed(StorageServer* data,
changeFeedInfo->destroy(cleanupVersion);
if (data->uidChangeFeed.count(changeFeedInfo->id)) {
if (data->uidChangeFeed.contains(changeFeedInfo->id)) {
// only register range for cleanup if it has not been already cleaned up
data->changeFeedCleanupDurable[changeFeedInfo->id] = cleanupVersion;
}
@ -8308,7 +8308,7 @@ ACTOR Future<std::vector<Key>> fetchChangeFeedMetadata(StorageServer* data,
if (g_network->isSimulated() && !g_simulator->restarted) {
// verify that the feed was actually destroyed and it's not an error in this inference logic. Restarting
// tests produce false positives because the validation state isn't kept across tests
ASSERT(g_simulator->validationData.allDestroyedChangeFeedIDs.count(feed.first.toString()));
ASSERT(g_simulator->validationData.allDestroyedChangeFeedIDs.contains(feed.first.toString()));
}
Key beginClearKey = feed.first.withPrefix(persistChangeFeedKeys.begin);
@ -12545,7 +12545,7 @@ ACTOR Future<Void> updateStorage(StorageServer* data) {
auto info = data->uidChangeFeed.find(feedFetchVersions[curFeed].first);
// Don't update if the feed is pending cleanup. Either it will get cleaned up and destroyed, or it will
// get fetched again, where the fetch version will get reset.
if (info != data->uidChangeFeed.end() && !data->changeFeedCleanupDurable.count(info->second->id)) {
if (info != data->uidChangeFeed.end() && !data->changeFeedCleanupDurable.contains(info->second->id)) {
if (feedFetchVersions[curFeed].second > info->second->durableFetchVersion.get()) {
info->second->durableFetchVersion.set(feedFetchVersions[curFeed].second);
}

View File

@ -418,19 +418,19 @@ void CompoundWorkload::addFailureInjection(WorkloadRequest& work) {
for (auto const& w : workloads) {
w->disableFailureInjectionWorkloads(disabledWorkloads);
}
if (disabledWorkloads.count("all") > 0) {
if (disabledWorkloads.contains("all")) {
return;
}
auto& factories = IFailureInjectorFactory::factories();
DeterministicRandom random(sharedRandomNumber);
for (auto& factory : factories) {
auto workload = factory->create(*this);
if (disabledWorkloads.count(workload->description()) > 0) {
if (disabledWorkloads.contains(workload->description())) {
continue;
}
if (std::count(work.disabledFailureInjectionWorkloads.begin(),
work.disabledFailureInjectionWorkloads.end(),
workload->description()) > 0) {
if (std::find(work.disabledFailureInjectionWorkloads.begin(),
work.disabledFailureInjectionWorkloads.end(),
workload->description()) != work.disabledFailureInjectionWorkloads.end()) {
continue;
}
while (shouldInjectFailure(random, work, workload)) {
@ -1646,7 +1646,7 @@ Optional<Key> getKeyFromString(const std::string& str) {
}
const char first = str.at(i + 2);
const char second = str.at(i + 3);
if (parseCharMap.count(first) == 0 || parseCharMap.count(second) == 0) {
if (!parseCharMap.contains(first) || !parseCharMap.contains(second)) {
TraceEvent(g_network->isSimulated() ? SevError : SevWarnAlways,
"ConsistencyCheckUrgent_GetKeyFromStringError")
.setMaxEventLength(-1)
@ -3150,12 +3150,12 @@ ACTOR Future<Void> testExpectedErrorImpl(Future<Void> test,
}
// Make sure that no duplicate details were provided
ASSERT(details.count("TestDescription") == 0);
ASSERT(details.count("ExpectedError") == 0);
ASSERT(details.count("ExpectedErrorCode") == 0);
ASSERT(details.count("ActualError") == 0);
ASSERT(details.count("ActualErrorCode") == 0);
ASSERT(details.count("Reason") == 0);
ASSERT(!details.contains("TestDescription"));
ASSERT(!details.contains("ExpectedError"));
ASSERT(!details.contains("ExpectedErrorCode"));
ASSERT(!details.contains("ActualError"));
ASSERT(!details.contains("ActualErrorCode"));
ASSERT(!details.contains("Reason"));
for (auto& p : details) {
evt.detail(p.first.c_str(), p.second);

View File

@ -1405,7 +1405,7 @@ std::set<std::thread::id> profiledThreads;
// Returns whether or not a given thread should be profiled
int filter_in_thread(void* arg) {
return profiledThreads.count(std::this_thread::get_id()) > 0 ? 1 : 0;
return profiledThreads.contains(std::this_thread::get_id()) ? 1 : 0;
}
#endif
@ -3329,7 +3329,7 @@ ACTOR Future<Void> workerServer(Reference<IClusterConnectionRecord> connRecord,
}
when(state WorkerSnapRequest snapReq = waitNext(interf.workerSnapReq.getFuture())) {
std::string snapReqKey = snapReq.snapUID.toString() + snapReq.role.toString();
if (snapReqResultMap.count(snapReqKey)) {
if (snapReqResultMap.contains(snapReqKey)) {
CODE_PROBE(true, "Worker received a duplicate finished snapshot request", probe::decoration::rare);
auto result = snapReqResultMap[snapReqKey];
result.isError() ? snapReq.reply.sendError(result.getError()) : snapReq.reply.send(result.get());
@ -3337,7 +3337,7 @@ ACTOR Future<Void> workerServer(Reference<IClusterConnectionRecord> connRecord,
.detail("SnapUID", snapReq.snapUID.toString())
.detail("Role", snapReq.role)
.detail("Result", result.isError() ? result.getError().code() : success().code());
} else if (snapReqMap.count(snapReqKey)) {
} else if (snapReqMap.contains(snapReqKey)) {
CODE_PROBE(true, "Worker received a duplicate ongoing snapshot request", probe::decoration::rare);
TraceEvent("RetryOngoingWorkerSnapRequest")
.detail("SnapUID", snapReq.snapUID.toString())

View File

@ -831,7 +831,7 @@ struct BlobGranuleRangesWorkload : TestWorkload {
int op = OP_COUNT;
loop {
op = deterministicRandom()->randomInt(0, OP_COUNT);
if (!excludedTypes.count((UnitTestTypes)op)) {
if (!excludedTypes.contains((UnitTestTypes)op)) {
break;
}
loopTries--;

View File

@ -148,7 +148,7 @@ struct BulkSetupWorkload : TestWorkload {
for (i = 0; i < workload->tenants.size(); i++) {
state Reference<Tenant> tenant = workload->tenants[i];
std::vector<KeyValueRef> keysForCurTenant = wait(getKVPairsForTenant(workload, tenant, cx));
if (tenantIdsToDrop.count(tenant->id())) {
if (tenantIdsToDrop.contains(tenant->id())) {
// Don't check the tenants that the EKP would throw errors for
continue;
}

View File

@ -432,9 +432,9 @@ struct ConfigureDatabaseWorkload : TestWorkload {
int storeType = 0;
while (true) {
storeType = deterministicRandom()->randomInt(0, 6);
if (std::count(self->storageEngineExcludeTypes.begin(),
self->storageEngineExcludeTypes.end(),
storeType) == 0) {
if (std::find(self->storageEngineExcludeTypes.begin(),
self->storageEngineExcludeTypes.end(),
storeType) == self->storageEngineExcludeTypes.end()) {
break;
}
}

View File

@ -197,7 +197,7 @@ struct ConflictRangeWorkload : TestWorkload {
if (randomSets) {
for (int j = 0; j < 5; j++) {
int proposedKey = deterministicRandom()->randomInt(0, self->maxKeySpace);
if (!insertedSet.count(proposedKey)) {
if (!insertedSet.contains(proposedKey)) {
TraceEvent("ConflictRangeSet").detail("Key", proposedKey);
insertedSet.insert(proposedKey);
tr2.set(StringRef(format("%010d", proposedKey)),
@ -208,7 +208,7 @@ struct ConflictRangeWorkload : TestWorkload {
} else {
for (int j = 0; j < 5; j++) {
int proposedKey = deterministicRandom()->randomInt(0, self->maxKeySpace);
if (insertedSet.count(proposedKey)) {
if (insertedSet.contains(proposedKey)) {
TraceEvent("ConflictRangeClear").detail("Key", proposedKey);
insertedSet.erase(proposedKey);
tr2.clear(StringRef(format("%010d", proposedKey)));

View File

@ -1035,7 +1035,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
}
for (auto& ssi : servers) {
ASSERT(id_ssi.count(ssi.id()));
ASSERT(id_ssi.contains(ssi.id()));
}
return true;
}
@ -1180,7 +1180,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
itr->interf.secondaryAddress().present() ? itr->interf.secondaryAddress().get().toString()
: "Unset");
for (const auto& id : stores.get()) {
if (statefulProcesses[itr->interf.address()].count(id)) {
if (statefulProcesses[itr->interf.address()].contains(id)) {
continue;
}
// For extra data store
@ -1200,7 +1200,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
.detail("ProcessPrimaryAddress", p->address)
.detail("ProcessAddresses", p->addresses.toString())
.detail("DataStoreID", id)
.detail("Protected", g_simulator->protectedAddresses.count(itr->interf.address()))
.detail("Protected", g_simulator->protectedAddresses.contains(itr->interf.address()))
.detail("Reliable", p->isReliable())
.detail("ReliableInfo", p->getReliableInfo())
.detail("KillOrRebootProcess", p->address);
@ -1323,7 +1323,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
all[i]->startingClass != ProcessClass::TesterClass &&
all[i]->startingClass != ProcessClass::SimHTTPServerClass &&
all[i]->protocolVersion == g_network->protocolVersion()) {
if (!workerAddresses.count(all[i]->address)) {
if (!workerAddresses.contains(all[i]->address)) {
TraceEvent("ConsistencyCheck_WorkerMissingFromList").detail("Addr", all[i]->address);
return false;
}
@ -1378,7 +1378,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
for (const auto& addr : oldCoordinators) {
auto findResult = addr_locality.find(addr);
if (findResult != addr_locality.end()) {
if (checkDuplicates.count(findResult->second.zoneId())) {
if (checkDuplicates.contains(findResult->second.zoneId())) {
TraceEvent("ConsistencyCheck_BadCoordinator")
.detail("Addr", addr)
.detail("NotFound", findResult == addr_locality.end());
@ -1410,7 +1410,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
for (const auto& worker : allWorkers) {
allWorkerProcessMap[worker.interf.address()] = worker;
Optional<Key> dc = worker.interf.locality.dcId();
if (!dcToAllClassTypes.count(dc))
if (!dcToAllClassTypes.contains(dc))
dcToAllClassTypes.insert({});
dcToAllClassTypes[dc].push_back(worker.processClass.classType());
}
@ -1420,17 +1420,17 @@ struct ConsistencyCheckWorkload : TestWorkload {
for (const auto& worker : nonExcludedWorkers) {
nonExcludedWorkerProcessMap[worker.interf.address()] = worker;
Optional<Key> dc = worker.interf.locality.dcId();
if (!dcToNonExcludedClassTypes.count(dc))
if (!dcToNonExcludedClassTypes.contains(dc))
dcToNonExcludedClassTypes.insert({});
dcToNonExcludedClassTypes[dc].push_back(worker.processClass.classType());
}
if (!allWorkerProcessMap.count(db.clusterInterface.clientInterface.address())) {
if (!allWorkerProcessMap.contains(db.clusterInterface.clientInterface.address())) {
TraceEvent("ConsistencyCheck_CCNotInWorkerList")
.detail("CCAddress", db.clusterInterface.clientInterface.address().toString());
return false;
}
if (!allWorkerProcessMap.count(db.master.address())) {
if (!allWorkerProcessMap.contains(db.master.address())) {
TraceEvent("ConsistencyCheck_MasterNotInWorkerList")
.detail("MasterAddress", db.master.address().toString());
return false;
@ -1478,13 +1478,13 @@ struct ConsistencyCheckWorkload : TestWorkload {
// Check CC
ProcessClass::Fitness bestClusterControllerFitness =
getBestAvailableFitness(dcToNonExcludedClassTypes[ccDcId], ProcessClass::ClusterController);
if (!nonExcludedWorkerProcessMap.count(db.clusterInterface.clientInterface.address()) ||
if (!nonExcludedWorkerProcessMap.contains(db.clusterInterface.clientInterface.address()) ||
nonExcludedWorkerProcessMap[db.clusterInterface.clientInterface.address()].processClass.machineClassFitness(
ProcessClass::ClusterController) != bestClusterControllerFitness) {
TraceEvent("ConsistencyCheck_ClusterControllerNotBest")
.detail("BestClusterControllerFitness", bestClusterControllerFitness)
.detail("ExistingClusterControllerFit",
nonExcludedWorkerProcessMap.count(db.clusterInterface.clientInterface.address())
nonExcludedWorkerProcessMap.contains(db.clusterInterface.clientInterface.address())
? nonExcludedWorkerProcessMap[db.clusterInterface.clientInterface.address()]
.processClass.machineClassFitness(ProcessClass::ClusterController)
: -1);
@ -1501,14 +1501,14 @@ struct ConsistencyCheckWorkload : TestWorkload {
}
}
if ((!nonExcludedWorkerProcessMap.count(db.master.address()) &&
if ((!nonExcludedWorkerProcessMap.contains(db.master.address()) &&
bestMasterFitness != ProcessClass::ExcludeFit) ||
nonExcludedWorkerProcessMap[db.master.address()].processClass.machineClassFitness(ProcessClass::Master) !=
bestMasterFitness) {
TraceEvent("ConsistencyCheck_MasterNotBest")
.detail("BestMasterFitness", bestMasterFitness)
.detail("ExistingMasterFit",
nonExcludedWorkerProcessMap.count(db.master.address())
nonExcludedWorkerProcessMap.contains(db.master.address())
? nonExcludedWorkerProcessMap[db.master.address()].processClass.machineClassFitness(
ProcessClass::Master)
: -1);
@ -1519,13 +1519,13 @@ struct ConsistencyCheckWorkload : TestWorkload {
ProcessClass::Fitness bestCommitProxyFitness =
getBestAvailableFitness(dcToNonExcludedClassTypes[masterDcId], ProcessClass::CommitProxy);
for (const auto& commitProxy : db.client.commitProxies) {
if (!nonExcludedWorkerProcessMap.count(commitProxy.address()) ||
if (!nonExcludedWorkerProcessMap.contains(commitProxy.address()) ||
nonExcludedWorkerProcessMap[commitProxy.address()].processClass.machineClassFitness(
ProcessClass::CommitProxy) != bestCommitProxyFitness) {
TraceEvent("ConsistencyCheck_CommitProxyNotBest")
.detail("BestCommitProxyFitness", bestCommitProxyFitness)
.detail("ExistingCommitProxyFitness",
nonExcludedWorkerProcessMap.count(commitProxy.address())
nonExcludedWorkerProcessMap.contains(commitProxy.address())
? nonExcludedWorkerProcessMap[commitProxy.address()].processClass.machineClassFitness(
ProcessClass::CommitProxy)
: -1);
@ -1537,13 +1537,13 @@ struct ConsistencyCheckWorkload : TestWorkload {
ProcessClass::Fitness bestGrvProxyFitness =
getBestAvailableFitness(dcToNonExcludedClassTypes[masterDcId], ProcessClass::GrvProxy);
for (const auto& grvProxy : db.client.grvProxies) {
if (!nonExcludedWorkerProcessMap.count(grvProxy.address()) ||
if (!nonExcludedWorkerProcessMap.contains(grvProxy.address()) ||
nonExcludedWorkerProcessMap[grvProxy.address()].processClass.machineClassFitness(
ProcessClass::GrvProxy) != bestGrvProxyFitness) {
TraceEvent("ConsistencyCheck_GrvProxyNotBest")
.detail("BestGrvProxyFitness", bestGrvProxyFitness)
.detail("ExistingGrvProxyFitness",
nonExcludedWorkerProcessMap.count(grvProxy.address())
nonExcludedWorkerProcessMap.contains(grvProxy.address())
? nonExcludedWorkerProcessMap[grvProxy.address()].processClass.machineClassFitness(
ProcessClass::GrvProxy)
: -1);
@ -1555,13 +1555,13 @@ struct ConsistencyCheckWorkload : TestWorkload {
ProcessClass::Fitness bestResolverFitness =
getBestAvailableFitness(dcToNonExcludedClassTypes[masterDcId], ProcessClass::Resolver);
for (const auto& resolver : db.resolvers) {
if (!nonExcludedWorkerProcessMap.count(resolver.address()) ||
if (!nonExcludedWorkerProcessMap.contains(resolver.address()) ||
nonExcludedWorkerProcessMap[resolver.address()].processClass.machineClassFitness(
ProcessClass::Resolver) != bestResolverFitness) {
TraceEvent("ConsistencyCheck_ResolverNotBest")
.detail("BestResolverFitness", bestResolverFitness)
.detail("ExistingResolverFitness",
nonExcludedWorkerProcessMap.count(resolver.address())
nonExcludedWorkerProcessMap.contains(resolver.address())
? nonExcludedWorkerProcessMap[resolver.address()].processClass.machineClassFitness(
ProcessClass::Resolver)
: -1);
@ -1576,7 +1576,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
for (auto& tlogSet : db.logSystemConfig.tLogs) {
if (!tlogSet.isLocal && tlogSet.logRouters.size()) {
for (auto& logRouter : tlogSet.logRouters) {
if (!nonExcludedWorkerProcessMap.count(logRouter.interf().address())) {
if (!nonExcludedWorkerProcessMap.contains(logRouter.interf().address())) {
TraceEvent("ConsistencyCheck_LogRouterNotInNonExcludedWorkers")
.detail("Id", logRouter.id());
return false;
@ -1596,14 +1596,14 @@ struct ConsistencyCheckWorkload : TestWorkload {
ProcessClass::Fitness fitnessLowerBound =
allWorkerProcessMap[db.master.address()].processClass.machineClassFitness(ProcessClass::DataDistributor);
if (db.distributor.present() &&
(!nonExcludedWorkerProcessMap.count(db.distributor.get().address()) ||
(!nonExcludedWorkerProcessMap.contains(db.distributor.get().address()) ||
nonExcludedWorkerProcessMap[db.distributor.get().address()].processClass.machineClassFitness(
ProcessClass::DataDistributor) > fitnessLowerBound)) {
TraceEvent("ConsistencyCheck_DistributorNotBest")
.detail("DataDistributorFitnessLowerBound", fitnessLowerBound)
.detail(
"ExistingDistributorFitness",
nonExcludedWorkerProcessMap.count(db.distributor.get().address())
nonExcludedWorkerProcessMap.contains(db.distributor.get().address())
? nonExcludedWorkerProcessMap[db.distributor.get().address()].processClass.machineClassFitness(
ProcessClass::DataDistributor)
: -1);
@ -1612,14 +1612,14 @@ struct ConsistencyCheckWorkload : TestWorkload {
// Check Ratekeeper
if (db.ratekeeper.present() &&
(!nonExcludedWorkerProcessMap.count(db.ratekeeper.get().address()) ||
(!nonExcludedWorkerProcessMap.contains(db.ratekeeper.get().address()) ||
nonExcludedWorkerProcessMap[db.ratekeeper.get().address()].processClass.machineClassFitness(
ProcessClass::Ratekeeper) > fitnessLowerBound)) {
TraceEvent("ConsistencyCheck_RatekeeperNotBest")
.detail("BestRatekeeperFitness", fitnessLowerBound)
.detail(
"ExistingRatekeeperFitness",
nonExcludedWorkerProcessMap.count(db.ratekeeper.get().address())
nonExcludedWorkerProcessMap.contains(db.ratekeeper.get().address())
? nonExcludedWorkerProcessMap[db.ratekeeper.get().address()].processClass.machineClassFitness(
ProcessClass::Ratekeeper)
: -1);
@ -1628,14 +1628,14 @@ struct ConsistencyCheckWorkload : TestWorkload {
// Check BlobManager
if (config.blobGranulesEnabled && db.blobManager.present() &&
(!nonExcludedWorkerProcessMap.count(db.blobManager.get().address()) ||
(!nonExcludedWorkerProcessMap.contains(db.blobManager.get().address()) ||
nonExcludedWorkerProcessMap[db.blobManager.get().address()].processClass.machineClassFitness(
ProcessClass::BlobManager) > fitnessLowerBound)) {
TraceEvent("ConsistencyCheck_BlobManagerNotBest")
.detail("BestBlobManagerFitness", fitnessLowerBound)
.detail(
"ExistingBlobManagerFitness",
nonExcludedWorkerProcessMap.count(db.blobManager.get().address())
nonExcludedWorkerProcessMap.contains(db.blobManager.get().address())
? nonExcludedWorkerProcessMap[db.blobManager.get().address()].processClass.machineClassFitness(
ProcessClass::BlobManager)
: -1);
@ -1644,14 +1644,14 @@ struct ConsistencyCheckWorkload : TestWorkload {
// Check BlobMigrator
if (config.blobGranulesEnabled && db.blobMigrator.present() &&
(!nonExcludedWorkerProcessMap.count(db.blobMigrator.get().address()) ||
(!nonExcludedWorkerProcessMap.contains(db.blobMigrator.get().address()) ||
nonExcludedWorkerProcessMap[db.blobMigrator.get().address()].processClass.machineClassFitness(
ProcessClass::BlobMigrator) > fitnessLowerBound)) {
TraceEvent("ConsistencyCheck_BlobMigratorNotBest")
.detail("BestBlobMigratorFitness", fitnessLowerBound)
.detail(
"ExistingBlobMigratorFitness",
nonExcludedWorkerProcessMap.count(db.blobMigrator.get().address())
nonExcludedWorkerProcessMap.contains(db.blobMigrator.get().address())
? nonExcludedWorkerProcessMap[db.blobMigrator.get().address()].processClass.machineClassFitness(
ProcessClass::BlobMigrator)
: -1);
@ -1660,13 +1660,13 @@ struct ConsistencyCheckWorkload : TestWorkload {
// Check EncryptKeyProxy
if (config.encryptionAtRestMode.isEncryptionEnabled() && db.client.encryptKeyProxy.present() &&
(!nonExcludedWorkerProcessMap.count(db.client.encryptKeyProxy.get().address()) ||
(!nonExcludedWorkerProcessMap.contains(db.client.encryptKeyProxy.get().address()) ||
nonExcludedWorkerProcessMap[db.client.encryptKeyProxy.get().address()].processClass.machineClassFitness(
ProcessClass::EncryptKeyProxy) > fitnessLowerBound)) {
TraceEvent("ConsistencyCheck_EncryptKeyProxyNotBest")
.detail("BestEncryptKeyProxyFitness", fitnessLowerBound)
.detail("ExistingEncryptKeyProxyFitness",
nonExcludedWorkerProcessMap.count(db.client.encryptKeyProxy.get().address())
nonExcludedWorkerProcessMap.contains(db.client.encryptKeyProxy.get().address())
? nonExcludedWorkerProcessMap[db.client.encryptKeyProxy.get().address()]
.processClass.machineClassFitness(ProcessClass::EncryptKeyProxy)
: -1);
@ -1675,13 +1675,13 @@ struct ConsistencyCheckWorkload : TestWorkload {
// Check ConsistencyScan
if (db.consistencyScan.present() &&
(!nonExcludedWorkerProcessMap.count(db.consistencyScan.get().address()) ||
(!nonExcludedWorkerProcessMap.contains(db.consistencyScan.get().address()) ||
nonExcludedWorkerProcessMap[db.consistencyScan.get().address()].processClass.machineClassFitness(
ProcessClass::ConsistencyScan) > fitnessLowerBound)) {
TraceEvent("ConsistencyCheck_ConsistencyScanNotBest")
.detail("BestConsistencyScanFitness", fitnessLowerBound)
.detail("ExistingConsistencyScanFitness",
nonExcludedWorkerProcessMap.count(db.consistencyScan.get().address())
nonExcludedWorkerProcessMap.contains(db.consistencyScan.get().address())
? nonExcludedWorkerProcessMap[db.consistencyScan.get().address()]
.processClass.machineClassFitness(ProcessClass::ConsistencyScan)
: -1);

View File

@ -195,7 +195,7 @@ struct DataLossRecoveryWorkload : TestWorkload {
state std::vector<StorageServerInterface> interfs = wait(getStorageServers(cx));
if (!interfs.empty()) {
state StorageServerInterface interf = interfs[deterministicRandom()->randomInt(0, interfs.size())];
if (g_simulator->protectedAddresses.count(interf.address()) == 0) {
if (!g_simulator->protectedAddresses.contains(interf.address())) {
// We need to avoid selecting a storage server that is already dead at this point, otherwise
// the test will hang. This is achieved by sending a GetStorageMetrics RPC. This is a necessary
// check for this test because DD has been disabled and the proper mechanism that removes bad

View File

@ -123,7 +123,7 @@ struct DiskDurabilityTest : TestWorkload {
state std::vector<int64_t> targetPages;
for (int i = deterministicRandom()->randomInt(1, 100); i > 0 && targetPages.size() < size / 4096; i--) {
auto p = deterministicRandom()->randomInt(0, size / 4096);
if (!std::count(targetPages.begin(), targetPages.end(), p))
if (std::find(targetPages.begin(), targetPages.end(), p) == targetPages.end())
targetPages.push_back(p);
}
for (int i = deterministicRandom()->randomInt(1, 4); i > 0; i--) {

View File

@ -194,11 +194,10 @@ struct DiskFailureInjectionWorkload : FailureInjectionWorkload {
TraceEvent("ResendChaos")
.detail("ChosenWorkersSize", self->chosenWorkers.size())
.detail("FoundWorkers", workersMap.size())
.detail(
"ResendToNumber",
std::count_if(self->chosenWorkers.begin(),
self->chosenWorkers.end(),
[&map = std::as_const(workersMap)](auto const& addr) { return map.count(addr) > 0; }));
.detail("ResendToNumber",
std::count_if(self->chosenWorkers.begin(),
self->chosenWorkers.end(),
[&map = std::as_const(workersMap)](auto const& addr) { return map.contains(addr); }));
for (auto& workerAddress : self->chosenWorkers) {
auto itr = workersMap.find(workerAddress);
if (itr != workersMap.end()) {

View File

@ -102,7 +102,7 @@ struct ExcludeIncludeStorageServersWorkload : TestWorkload {
std::vector<std::pair<StorageServerInterface, ProcessClass>> results =
wait(NativeAPI::getServerListAndProcessClasses(&tr));
for (auto& [ssi, p] : results) {
if (g_simulator->protectedAddresses.count(ssi.address()) == 0) {
if (!g_simulator->protectedAddresses.contains(ssi.address())) {
servers.insert(AddressExclusion(ssi.address().ip, ssi.address().port));
}
}

View File

@ -225,7 +225,9 @@ struct FuzzApiCorrectnessWorkload : TestWorkload {
return TenantGroupNameRef(format("tenantgroup_%d", groupNum));
}
}
bool canUseTenant(Optional<TenantName> tenant) { return !tenant.present() || createdTenants.count(tenant.get()); }
bool canUseTenant(Optional<TenantName> tenant) {
return !tenant.present() || createdTenants.contains(tenant.get());
}
Future<Void> setup(Database const& cx) override {
if (clientId == 0) {

View File

@ -112,15 +112,15 @@ ACTOR Future<Void> httpKVRequestCallback(Reference<SimHTTPKVStore> kvStore,
// content-length and RequestID from http are already filled in
// ASSERT_EQ(req->data.headers.size(), 5);
ASSERT_EQ(req->data.headers.size(), 5);
ASSERT(req->data.headers.count("Key"));
ASSERT(req->data.headers.count("ClientID"));
ASSERT(req->data.headers.count("UID"));
ASSERT(req->data.headers.count("SeqNo"));
ASSERT(req->data.headers.contains("Key"));
ASSERT(req->data.headers.contains("ClientID"));
ASSERT(req->data.headers.contains("UID"));
ASSERT(req->data.headers.contains("SeqNo"));
int clientId = atoi(req->data.headers["ClientID"].c_str());
int seqNo = atoi(req->data.headers["SeqNo"].c_str());
ASSERT(req->data.headers.count("Content-Length"));
ASSERT(req->data.headers.contains("Content-Length"));
ASSERT_EQ(req->data.headers["Content-Length"], std::to_string(req->data.content.size()));
ASSERT_EQ(req->data.contentLen, req->data.content.size());
@ -291,11 +291,11 @@ struct HTTPKeyValueStoreWorkload : TestWorkload {
}
ASSERT_EQ(response->code, 200);
ASSERT(response->data.headers.count("ClientID"));
ASSERT(response->data.headers.contains("ClientID"));
ASSERT_EQ(response->data.headers["ClientID"], std::to_string(self->clientId));
ASSERT(response->data.headers.count("Key"));
ASSERT(response->data.headers.contains("Key"));
ASSERT_EQ(response->data.headers["Key"], key);
ASSERT(response->data.headers.count("UID"));
ASSERT(response->data.headers.contains("UID"));
ASSERT_EQ(response->data.headers["UID"], requestID.toString());
return response;

View File

@ -219,7 +219,7 @@ struct MachineAttritionWorkload : FailureInjectionWorkload {
for (const auto& worker : workers) {
// kill all matching workers
if (idAccess(worker).present() &&
std::count(targets.begin(), targets.end(), idAccess(worker).get().toString())) {
std::find(targets.begin(), targets.end(), idAccess(worker).get().toString()) != targets.end()) {
TraceEvent("SendingRebootRequest").detail("TargetWorker", worker.interf.locality.toString());
worker.interf.clientInterface.reboot.send(rbReq);
}

View File

@ -117,8 +117,8 @@ struct MetaclusterManagementWorkload : TestWorkload {
for (int i = 0; i < 20; ++i) {
int64_t newPrefix = deterministicRandom()->randomInt(TenantAPI::TENANT_ID_PREFIX_MIN_VALUE,
TenantAPI::TENANT_ID_PREFIX_MAX_VALUE + 1);
if (allowTenantIdPrefixReuse || !usedPrefixes.count(newPrefix)) {
CODE_PROBE(usedPrefixes.count(newPrefix), "Reusing tenant ID prefix", probe::decoration::rare);
if (allowTenantIdPrefixReuse || !usedPrefixes.contains(newPrefix)) {
CODE_PROBE(usedPrefixes.contains(newPrefix), "Reusing tenant ID prefix", probe::decoration::rare);
return newPrefix;
}
}
@ -606,7 +606,7 @@ struct MetaclusterManagementWorkload : TestWorkload {
state bool foundTenantCollision = false;
for (auto t : dataDb->tenants) {
if (self->createdTenants.count(t.first)) {
if (self->createdTenants.contains(t.first)) {
foundTenantCollision = true;
tenantsToRemove.insert(t.first);
}
@ -614,7 +614,7 @@ struct MetaclusterManagementWorkload : TestWorkload {
state bool foundGroupCollision = false;
for (auto t : dataDb->tenantGroups) {
if (self->tenantGroups.count(t.first)) {
if (self->tenantGroups.contains(t.first)) {
foundGroupCollision = true;
tenantsToRemove.insert(t.second->tenants.begin(), t.second->tenants.end());
}
@ -1011,7 +1011,7 @@ struct MetaclusterManagementWorkload : TestWorkload {
auto itr = self->createdTenants.find(tenant);
state bool exists = itr != self->createdTenants.end();
state bool tenantGroupExists = tenantGroup.present() && self->tenantGroups.count(tenantGroup.get());
state bool tenantGroupExists = tenantGroup.present() && self->tenantGroups.contains(tenantGroup.get());
state bool hasCapacity = tenantGroupExists || self->ungroupedTenants.size() + self->tenantGroups.size() <
self->totalTenantGroupCapacity;
@ -1740,7 +1740,7 @@ struct MetaclusterManagementWorkload : TestWorkload {
ASSERT_EQ(tenants.size(), clusterData->tenants.size());
for (auto [tenantName, tenantEntry] : tenants) {
ASSERT(clusterData->tenants.count(tenantName));
ASSERT(clusterData->tenants.contains(tenantName));
auto tenantData = clusterData->tenants.find(tenantName);
ASSERT(tenantData != clusterData->tenants.end());
ASSERT(tenantData->second->cluster == clusterName);

View File

@ -139,7 +139,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
do {
tenantGroup = TenantGroupNameRef(
format("tenantgroup%08d", deterministicRandom()->randomInt(0, maxTenantGroups)));
} while (tenantGroups.count(tenantGroup.get()) > 0);
} while (tenantGroups.contains(tenantGroup.get()));
}
}
}
@ -368,7 +368,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
for (auto const& t : tenantCollisions) {
// If the data cluster tenant is expected, then remove the management tenant
// Note that the management tenant may also have been expected
if (self->createdTenants.count(t.second.first)) {
if (self->createdTenants.contains(t.second.first)) {
CODE_PROBE(true, "Remove management tenant in restore collision");
removeTrackedTenant(t.second.second);
deleteFutures.push_back(metacluster::deleteTenant(self->managementDb, t.second.second));
@ -527,7 +527,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
}
}
for (auto const& g : dataClusterGroups.results) {
if (managementGroups.count(g.first)) {
if (managementGroups.contains(g.first)) {
groupCollisions.insert(g.first);
}
}
@ -765,12 +765,12 @@ struct MetaclusterRestoreWorkload : TestWorkload {
state TenantName tenantName;
for (int i = 0; i < 10; ++i) {
tenantName = self->chooseTenantName();
if (self->tenantNameIndex.count(tenantName) == 0) {
if (!self->tenantNameIndex.contains(tenantName)) {
break;
}
}
if (self->tenantNameIndex.count(tenantName)) {
if (self->tenantNameIndex.contains(tenantName)) {
return Void();
}
@ -815,12 +815,12 @@ struct MetaclusterRestoreWorkload : TestWorkload {
state TenantName tenantName;
for (int i = 0; i < 10; ++i) {
tenantName = self->chooseTenantName();
if (self->tenantNameIndex.count(tenantName) != 0) {
if (self->tenantNameIndex.contains(tenantName)) {
break;
}
}
if (self->tenantNameIndex.count(tenantName) == 0) {
if (!self->tenantNameIndex.contains(tenantName)) {
return Void();
}
@ -856,12 +856,12 @@ struct MetaclusterRestoreWorkload : TestWorkload {
state TenantName tenantName;
for (int i = 0; i < 10; ++i) {
tenantName = self->chooseTenantName();
if (self->tenantNameIndex.count(tenantName) != 0) {
if (self->tenantNameIndex.contains(tenantName)) {
break;
}
}
if (self->tenantNameIndex.count(tenantName) == 0) {
if (!self->tenantNameIndex.contains(tenantName)) {
return Void();
}
@ -920,18 +920,18 @@ struct MetaclusterRestoreWorkload : TestWorkload {
state TenantName newTenantName;
for (int i = 0; i < 10; ++i) {
oldTenantName = self->chooseTenantName();
if (self->tenantNameIndex.count(oldTenantName) != 0) {
if (self->tenantNameIndex.contains(oldTenantName)) {
break;
}
}
for (int i = 0; i < 10; ++i) {
newTenantName = self->chooseTenantName();
if (self->tenantNameIndex.count(newTenantName) == 0) {
if (!self->tenantNameIndex.contains(newTenantName)) {
break;
}
}
if (self->tenantNameIndex.count(oldTenantName) == 0 || self->tenantNameIndex.count(newTenantName) != 0) {
if (!self->tenantNameIndex.contains(oldTenantName) || self->tenantNameIndex.contains(newTenantName)) {
return Void();
}
@ -1094,7 +1094,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
if (!clusterData.restored) {
ASSERT_EQ(tenants.results.size(), clusterData.tenants.size());
for (auto [tenantId, tenantEntry] : tenants.results) {
ASSERT(clusterData.tenants.count(tenantId));
ASSERT(clusterData.tenants.contains(tenantId));
auto tenantData = self->createdTenants[tenantId];
ASSERT(tenantData.cluster == clusterName);
ASSERT(tenantData.tenantGroup == tenantEntry.tenantGroup);
@ -1128,9 +1128,9 @@ struct MetaclusterRestoreWorkload : TestWorkload {
// Check for deleted tenants that reappeared
int unexpectedTenants = 0;
for (auto const& [tenantId, tenantEntry] : tenantMap) {
if (!clusterData.tenants.count(tenantId)) {
if (!clusterData.tenants.contains(tenantId)) {
ASSERT(self->recoverManagementCluster);
ASSERT(self->deletedTenants.count(tenantId));
ASSERT(self->deletedTenants.contains(tenantId));
++unexpectedTenants;
}
}
@ -1204,8 +1204,8 @@ struct MetaclusterRestoreWorkload : TestWorkload {
// If we recovered both the management and some data clusters, we might undelete a tenant
// Check that any unexpected tenants were deleted and that we had a potentially lossy recovery
for (auto const& [tenantId, tenantEntry] : tenantMap) {
if (!self->createdTenants.count(tenantId)) {
ASSERT(self->deletedTenants.count(tenantId));
if (!self->createdTenants.contains(tenantId)) {
ASSERT(self->deletedTenants.contains(tenantId));
ASSERT(self->recoverManagementCluster);
ASSERT(self->recoverDataClusters);
}

View File

@ -558,7 +558,7 @@ struct PhysicalShardMoveWorkLoad : TestWorkload {
ASSERT(interfs.size() > teamSize - includes.size());
while (includes.size() < teamSize) {
const auto& interf = interfs[deterministicRandom()->randomInt(0, interfs.size())];
if (excludes.count(interf.uniqueID) == 0 && includes.count(interf.uniqueID) == 0) {
if (!excludes.contains(interf.uniqueID) && !includes.contains(interf.uniqueID)) {
includes.insert(interf.uniqueID);
}
}

View File

@ -115,7 +115,7 @@ struct MoveKeysWorkload : FailureInjectionWorkload {
while (t.size() < teamSize && storageServers.size()) {
auto s = storageServers.back();
storageServers.pop_back();
if (!machines.count(s.locality.zoneId())) {
if (!machines.contains(s.locality.zoneId())) {
machines.insert(s.locality.zoneId());
t.insert(s);
}

View File

@ -140,7 +140,7 @@ struct RawTenantAccessWorkload : TestWorkload {
Key key = self->specialKeysTenantMapPrefix.withSuffix(self->indexToTenantName(*it));
Optional<Value> value = wait(tr->get(key));
// the commit proxies should have the same view of tenant map
ASSERT_EQ(value.present(), lastCommitted || (self->idx2Tid.count(*it) > 0));
ASSERT_EQ(value.present(), lastCommitted || (self->idx2Tid.contains(*it)));
if (value.present()) {
auto id = self->extractTenantId(value.get());
@ -182,7 +182,7 @@ struct RawTenantAccessWorkload : TestWorkload {
ASSERT(hasNonexistentTenant());
int tenantIdx = deterministicRandom()->randomInt(0, tenantCount);
// find the nearest nonexistent tenant
while (idx2Tid.count(tenantIdx) || lastCreatedTenants.count(tenantIdx)) {
while (idx2Tid.contains(tenantIdx) || lastCreatedTenants.contains(tenantIdx)) {
tenantIdx++;
if (tenantIdx == tenantCount) {
tenantIdx = 0;
@ -201,7 +201,7 @@ struct RawTenantAccessWorkload : TestWorkload {
int tenantIdx = deterministicRandom()->randomInt(0, tenantCount);
// find the nearest existing tenant
while (true) {
if (idx2Tid.count(tenantIdx) && !lastDeletedTenants.count(tenantIdx)) {
if (idx2Tid.contains(tenantIdx) && !lastDeletedTenants.contains(tenantIdx)) {
break;
}
tenantIdx++;
@ -241,7 +241,7 @@ struct RawTenantAccessWorkload : TestWorkload {
// randomly generate a tenant id
do {
tenantId = deterministicRandom()->randomInt64(0, std::numeric_limits<int64_t>::max());
} while (tid2Idx.count(tenantId));
} while (tid2Idx.contains(tenantId));
}
ASSERT_GE(tenantId, 0);

View File

@ -82,12 +82,12 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Zoneid", it->locality.zoneId().get().toString())
.detail("MachineId", it->locality.machineId().get().toString());
if (g_simulator->protectedAddresses.count(it->address) == 0)
if (!g_simulator->protectedAddresses.contains(it->address))
processAddrs.push_back(pAddr);
machineProcesses[machineIp].insert(pAddr);
// add only one entry for each machine
if (!machinesMap.count(it->locality.zoneId()))
if (!machinesMap.contains(it->locality.zoneId()))
machinesMap[it->locality.zoneId()] = machineIp;
machine_ids[machineIp] = it->locality.zoneId();
@ -107,7 +107,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
for (auto k1 : toKill1) {
AddressExclusion machineIp(k1.ip);
ASSERT(machineProcesses.count(machineIp));
ASSERT(machineProcesses.contains(machineIp));
// kill all processes on this machine even if it has a different ip address
std::copy(machineProcesses[machineIp].begin(),
machineProcesses[machineIp].end(),
@ -118,7 +118,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
processSet.clear();
for (auto k2 : toKill2) {
AddressExclusion machineIp(k2.ip);
ASSERT(machineProcesses.count(machineIp));
ASSERT(machineProcesses.contains(machineIp));
std::copy(machineProcesses[machineIp].begin(),
machineProcesses[machineIp].end(),
std::inserter(processSet, processSet.end()));
@ -128,13 +128,13 @@ struct RemoveServersSafelyWorkload : TestWorkload {
for (AddressExclusion ex : toKill1) {
AddressExclusion machineIp(ex.ip);
ASSERT(machine_ids.count(machineIp));
ASSERT(machine_ids.contains(machineIp));
g_simulator->disableSwapToMachine(machine_ids[machineIp]);
}
for (AddressExclusion ex : toKill2) {
AddressExclusion machineIp(ex.ip);
ASSERT(machine_ids.count(machineIp));
ASSERT(machine_ids.contains(machineIp));
g_simulator->disableSwapToMachine(machine_ids[machineIp]);
}
@ -191,7 +191,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Failed", processInfo->failed)
.detail("Excluded", processInfo->excluded)
.detail("Rebooting", processInfo->rebooting)
.detail("Protected", g_simulator->protectedAddresses.count(processInfo->address));
.detail("Protected", g_simulator->protectedAddresses.contains(processInfo->address));
} else {
TraceEvent("RemoveAndKill", functionId)
.detail("Step", "ProcessNotToKill")
@ -200,7 +200,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Failed", processInfo->failed)
.detail("Excluded", processInfo->excluded)
.detail("Rebooting", processInfo->rebooting)
.detail("Protected", g_simulator->protectedAddresses.count(processInfo->address));
.detail("Protected", g_simulator->protectedAddresses.contains(processInfo->address));
}
}
TraceEvent("RemoveAndKill", functionId)
@ -459,14 +459,14 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("ClusterAvailable", g_simulator->isAvailable())
.detail("RemoveViaClear", removeViaClear);
for (auto& killProcess : killProcArray) {
if (g_simulator->protectedAddresses.count(killProcess->address))
if (g_simulator->protectedAddresses.contains(killProcess->address))
TraceEvent("RemoveAndKill", functionId)
.detail("Step", "NoKill Process")
.detail("Process", describe(*killProcess))
.detail("Failed", killProcess->failed)
.detail("Rebooting", killProcess->rebooting)
.detail("ClusterAvailable", g_simulator->isAvailable())
.detail("Protected", g_simulator->protectedAddresses.count(killProcess->address));
.detail("Protected", g_simulator->protectedAddresses.contains(killProcess->address));
else if (removeViaClear) {
g_simulator->rebootProcess(killProcess, ISimulator::KillType::RebootProcessAndDelete);
TraceEvent("RemoveAndKill", functionId)
@ -475,12 +475,12 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Failed", killProcess->failed)
.detail("Rebooting", killProcess->rebooting)
.detail("ClusterAvailable", g_simulator->isAvailable())
.detail("Protected", g_simulator->protectedAddresses.count(killProcess->address));
.detail("Protected", g_simulator->protectedAddresses.contains(killProcess->address));
}
/*
else {
g_simulator->killProcess( killProcess, ISimulator::KillType::KillInstantly );
TraceEvent("RemoveAndKill", functionId).detail("Step", "Kill Process").detail("Process", describe(*killProcess)).detail("Failed", killProcess->failed).detail("Rebooting", killProcess->rebooting).detail("ClusterAvailable", g_simulator->isAvailable()).detail("Protected", g_simulator->protectedAddresses.count(killProcess->address));
TraceEvent("RemoveAndKill", functionId).detail("Step", "Kill Process").detail("Process", describe(*killProcess)).detail("Failed", killProcess->failed).detail("Rebooting", killProcess->rebooting).detail("ClusterAvailable", g_simulator->isAvailable()).detail("Protected", g_simulator->protectedAddresses.contains(killProcess->address));
}
*/
}
@ -798,7 +798,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
bool killContainsProcess(AddressExclusion kill, NetworkAddress process) {
return kill.excludes(process) || (machineProcesses.find(kill) != machineProcesses.end() &&
machineProcesses[kill].count(AddressExclusion(process.ip, process.port)) > 0);
machineProcesses[kill].contains(AddressExclusion(process.ip, process.port)));
}
// Finds the localities list that can be excluded from the safe killable addresses list.
@ -836,8 +836,8 @@ struct RemoveServersSafelyWorkload : TestWorkload {
std::map<std::string, std::string> localityData = processInfo->locality.getAllData();
bool found = false;
for (const auto& l : localityData) {
if (toKillLocalities.count(LocalityData::ExcludeLocalityPrefix.toString() + l.first + ":" +
l.second)) {
if (toKillLocalities.contains(LocalityData::ExcludeLocalityPrefix.toString() + l.first + ":" +
l.second)) {
found = true;
break;
}

View File

@ -459,7 +459,7 @@ struct TenantManagementWorkload : TestWorkload {
state std::map<TenantName, TenantMapEntry> tenantsToCreate;
for (int i = 0; i < numTenants; ++i) {
TenantName tenant = self->chooseTenantName(true);
while (tenantsToCreate.count(tenant)) {
while (tenantsToCreate.contains(tenant)) {
tenant = self->chooseTenantName(true);
}
@ -467,9 +467,9 @@ struct TenantManagementWorkload : TestWorkload {
entry.tenantName = tenant;
entry.tenantGroup = self->chooseTenantGroup(true);
if (self->createdTenants.count(tenant)) {
if (self->createdTenants.contains(tenant)) {
alreadyExists = true;
} else if (!tenantsToCreate.count(tenant)) {
} else if (!tenantsToCreate.contains(tenant)) {
++newTenants;
}
@ -579,7 +579,7 @@ struct TenantManagementWorkload : TestWorkload {
state typename std::map<TenantName, TenantMapEntry>::iterator tenantItr;
for (tenantItr = tenantsToCreate.begin(); tenantItr != tenantsToCreate.end(); ++tenantItr) {
// Ignore any tenants that already existed
if (self->createdTenants.count(tenantItr->first)) {
if (self->createdTenants.contains(tenantItr->first)) {
continue;
}
@ -1452,7 +1452,7 @@ struct TenantManagementWorkload : TestWorkload {
TenantName oldTenant = self->chooseTenantName(false);
TenantName newTenant = self->chooseTenantName(false);
bool checkOverlap =
oldTenant == newTenant || allTenantNames.count(oldTenant) || allTenantNames.count(newTenant);
oldTenant == newTenant || allTenantNames.contains(oldTenant) || allTenantNames.contains(newTenant);
// These operation types do not handle rename collisions
// reject the rename here if it has overlap
if (checkOverlap && (operationType == OperationType::MANAGEMENT_TRANSACTION ||
@ -1464,10 +1464,10 @@ struct TenantManagementWorkload : TestWorkload {
tenantRenames[oldTenant] = newTenant;
allTenantNames.insert(oldTenant);
allTenantNames.insert(newTenant);
if (!self->createdTenants.count(oldTenant)) {
if (!self->createdTenants.contains(oldTenant)) {
tenantNotFound = true;
}
if (self->createdTenants.count(newTenant)) {
if (self->createdTenants.contains(newTenant)) {
tenantExists = true;
}
}
@ -1657,7 +1657,7 @@ struct TenantManagementWorkload : TestWorkload {
ASSERT_GT(currentVersionstamp.version, originalReadVersion);
}
if (tenantGroupChanging) {
ASSERT(configuration.count("tenant_group"_sr) > 0);
ASSERT(configuration.contains("tenant_group"_sr));
auto itr = self->createdTenants.find(tenant);
if (itr->second.tenantGroup.present()) {
auto tenantGroupItr = self->createdTenantGroups.find(itr->second.tenantGroup.get());

View File

@ -258,7 +258,7 @@ struct MeasurePeriodically : IMeasurer {
std::vector<PerfMetric> m;
msp.getMetrics(m);
for (auto i = m.begin(); i != m.end(); ++i)
if (includeMetrics.count(i->name())) {
if (includeMetrics.contains(i->name())) {
accumulatedMetrics.push_back(i->withPrefix(prefix));
}

View File

@ -195,7 +195,7 @@ struct VersionStampWorkload : TestWorkload {
RangeResult result_ = wait(tr.getRange(
KeyRangeRef(self->vsValuePrefix, endOfRange(self->vsValuePrefix)), self->nodeCount + 1));
result = result_;
if (self->allowMetadataVersionKey && self->key_commit.count(metadataVersionKey)) {
if (self->allowMetadataVersionKey && self->key_commit.contains(metadataVersionKey)) {
Optional<Value> mVal = wait(tr.get(metadataVersionKey));
if (mVal.present()) {
result.push_back_deep(result.arena(), KeyValueRef(metadataVersionKey, mVal.get()));

View File

@ -139,7 +139,7 @@ struct WriteTagThrottlingWorkload : KVWorkload {
.detail("BadActorThrottleRetries", badActorThrottleRetries)
.detail("GoodActorThrottleRetries", goodActorThrottleRetries);
}
if (!throttledTags.empty() && throttledTags.count(badTag.toString()) == 0) {
if (!throttledTags.empty() && !throttledTags.contains(badTag.toString())) {
TraceEvent(SevWarnAlways, "IncorrectThrottle")
.detail("ThrottledTagNumber", throttledTags.size())
.detail("ThrottledTags", setToString(throttledTags));