diff --git a/fdbserver/ApplyMetadataMutation.cpp b/fdbserver/ApplyMetadataMutation.cpp index db78dc278c..03c4b14aa4 100644 --- a/fdbserver/ApplyMetadataMutation.cpp +++ b/fdbserver/ApplyMetadataMutation.cpp @@ -93,9 +93,9 @@ public: acsBuilder(proxyCommitData_.acsBuilder), epoch(proxyCommitData_.epoch) { if (encryptMode.isEncryptionEnabled()) { ASSERT(cipherKeys != nullptr); - ASSERT(cipherKeys->count(SYSTEM_KEYSPACE_ENCRYPT_DOMAIN_ID) > 0); + ASSERT(cipherKeys->contains(SYSTEM_KEYSPACE_ENCRYPT_DOMAIN_ID)); if (FLOW_KNOBS->ENCRYPT_HEADER_AUTH_TOKEN_ENABLED) { - ASSERT(cipherKeys->count(ENCRYPT_HEADER_DOMAIN_ID)); + ASSERT(cipherKeys->contains(ENCRYPT_HEADER_DOMAIN_ID)); } } // If commit proxy, epoch must be set @@ -115,9 +115,9 @@ public: accumulativeChecksumIndex(resolverAccumulativeChecksumIndex), epoch(Optional()) { if (encryptMode.isEncryptionEnabled()) { ASSERT(cipherKeys != nullptr); - ASSERT(cipherKeys->count(SYSTEM_KEYSPACE_ENCRYPT_DOMAIN_ID) > 0); + ASSERT(cipherKeys->contains(SYSTEM_KEYSPACE_ENCRYPT_DOMAIN_ID)); if (FLOW_KNOBS->ENCRYPT_HEADER_AUTH_TOKEN_ENABLED) { - ASSERT(cipherKeys->count(ENCRYPT_HEADER_DOMAIN_ID)); + ASSERT(cipherKeys->contains(ENCRYPT_HEADER_DOMAIN_ID)); } } } @@ -1184,7 +1184,7 @@ private: bool foundKey = false; for (auto& it : vecBackupKeys->intersectingRanges(normalKeys)) { - if (it.value().count(logDestination) > 0) { + if (it.value().contains(logDestination)) { foundKey = true; break; } @@ -1192,7 +1192,7 @@ private: auto& systemBackupRanges = getSystemBackupRanges(); for (auto r = systemBackupRanges.begin(); !foundKey && r != systemBackupRanges.end(); ++r) { for (auto& it : vecBackupKeys->intersectingRanges(*r)) { - if (it.value().count(logDestination) > 0) { + if (it.value().contains(logDestination)) { foundKey = true; break; } diff --git a/fdbserver/BackupProgress.actor.cpp b/fdbserver/BackupProgress.actor.cpp index 1b3792ec90..594ad1e69b 100644 --- a/fdbserver/BackupProgress.actor.cpp +++ b/fdbserver/BackupProgress.actor.cpp @@ -92,7 +92,7 @@ std::map, std::map> BackupProgr auto prev = std::prev(current); // Previous epoch is gone, consolidate the progress. for (auto [tag, version] : prev->second) { - if (toCheck.count(tag) > 0) { + if (toCheck.contains(tag)) { progressIt->second[tag] = std::max(version, progressIt->second[tag]); toCheck.erase(tag); } diff --git a/fdbserver/BlobGranuleValidation.actor.cpp b/fdbserver/BlobGranuleValidation.actor.cpp index 620ea6d2f4..7e75b603f2 100644 --- a/fdbserver/BlobGranuleValidation.actor.cpp +++ b/fdbserver/BlobGranuleValidation.actor.cpp @@ -630,7 +630,7 @@ ACTOR Future killBlobWorkers(Database cx) { if (first) { knownWorkers.insert(interf.id()); } - if (knownWorkers.count(interf.id())) { + if (knownWorkers.contains(interf.id())) { haltIds.push_back(interf.id()); haltRequests.push_back(interf.haltBlobWorker.tryGetReply(HaltBlobWorkerRequest(1e6, UID()))); } diff --git a/fdbserver/BlobManager.actor.cpp b/fdbserver/BlobManager.actor.cpp index 6f44b7bb39..d41dea2278 100644 --- a/fdbserver/BlobManager.actor.cpp +++ b/fdbserver/BlobManager.actor.cpp @@ -1000,7 +1000,7 @@ ACTOR Future doRangeAssignment(Reference bmData, // actor map, cancelling this actor before it got here bmData->workerAssignments.insert(assignment.keyRange, workerID.get()); - if (bmData->workerStats.count(workerID.get())) { + if (bmData->workerStats.contains(workerID.get())) { bmData->workerStats[workerID.get()].numGranulesAssigned += 1; } @@ -1040,7 +1040,7 @@ ACTOR Future doRangeAssignment(Reference bmData, req.type = assignment.assign.get().type; // if that worker isn't alive anymore, add the range back into the stream - if (bmData->workersById.count(workerID.get()) == 0) { + if (!bmData->workersById.contains(workerID.get())) { throw no_more_servers(); } state Future assignFuture = bmData->workersById[workerID.get()].assignBlobRangeRequest.getReply(req); @@ -1073,7 +1073,7 @@ ACTOR Future doRangeAssignment(Reference bmData, req.dispose = assignment.revoke.get().dispose; // if that worker isn't alive anymore, this is a noop - if (bmData->workersById.count(workerID.get())) { + if (bmData->workersById.contains(workerID.get())) { wait(bmData->workersById[workerID.get()].revokeBlobRangeRequest.getReply(req)); } else { return Void(); @@ -1271,7 +1271,7 @@ static bool handleRangeIsAssign(Reference bmData, RangeAssignme bmData->assignsInProgress.insert(assignment.keyRange, doRangeAssignment(bmData, assignment, workerId, bmData->epoch, seqNo)); } - if (bmData->workerStats.count(workerId)) { + if (bmData->workerStats.contains(workerId)) { bmData->workerStats[workerId].numGranulesAssigned += 1; } } @@ -1291,7 +1291,7 @@ static bool handleRangeIsAssign(Reference bmData, RangeAssignme static bool handleRangeIsRevoke(Reference bmData, RangeAssignment assignment, int64_t seqNo) { if (assignment.worker.present()) { // revoke this specific range from this specific worker. Either part of recovery or failing a worker - if (bmData->workerStats.count(assignment.worker.get())) { + if (bmData->workerStats.contains(assignment.worker.get())) { bmData->workerStats[assignment.worker.get()].numGranulesAssigned -= 1; } // if this revoke matches the worker assignment state, mark the range as unassigned @@ -1333,7 +1333,7 @@ static bool handleRangeIsRevoke(Reference bmData, RangeAssignme // It is fine for multiple disjoint sub-ranges to have the same sequence number since they were part // of the same logical change - if (bmData->workerStats.count(it.value())) { + if (bmData->workerStats.contains(it.value())) { bmData->workerStats[it.value()].numGranulesAssigned -= 1; } @@ -1407,7 +1407,7 @@ ACTOR Future writeInitialGranuleMapping(Reference bmData, KeyRangeRef(splitPoints.keys[i], splitPoints.keys[endIdx]), blobGranuleMappingValueFor(UID()))); for (j = 0; i + j < endIdx; j++) { - if (splitPoints.boundaries.count(splitPoints.keys[i + j])) { + if (splitPoints.boundaries.contains(splitPoints.keys[i + j])) { tr->set(blobGranuleMergeBoundaryKeyFor(splitPoints.keys[i + j]), blobGranuleMergeBoundaryValueFor(splitPoints.boundaries[splitPoints.keys[i + j]])); } @@ -1419,7 +1419,7 @@ ACTOR Future writeInitialGranuleMapping(Reference bmData, // Update BlobGranuleMergeBoundary in-memory state. for (int k = i; k < i + j; k++) { KeyRef beginKey = splitPoints.keys[k]; - if (splitPoints.boundaries.count(beginKey)) { + if (splitPoints.boundaries.contains(beginKey)) { bmData->mergeBoundaries[beginKey] = splitPoints.boundaries[beginKey]; } } @@ -1809,7 +1809,7 @@ ACTOR Future reevaluateInitialSplit(Reference bmData, if (BM_DEBUG) { fmt::print("Aligned split ({0}):\n", finalSplit.keys.size()); for (auto& it : finalSplit.keys) { - fmt::print(" {0}{1}\n", it.printable(), finalSplit.boundaries.count(it) ? " *" : ""); + fmt::print(" {0}{1}\n", it.printable(), finalSplit.boundaries.contains(it) ? " *" : ""); } } @@ -1934,7 +1934,7 @@ ACTOR Future reevaluateInitialSplit(Reference bmData, blobGranuleMappingKeys.begin, KeyRangeRef(finalSplit.keys[i], finalSplit.keys[i + 1]), blobGranuleMappingValueFor(UID()))); - if (finalSplit.boundaries.count(finalSplit.keys[i])) { + if (finalSplit.boundaries.contains(finalSplit.keys[i])) { tr->set(blobGranuleMergeBoundaryKeyFor(finalSplit.keys[i]), blobGranuleMergeBoundaryValueFor(finalSplit.boundaries[finalSplit.keys[i]])); } @@ -2063,7 +2063,7 @@ ACTOR Future maybeSplitRange(Reference bmData, fmt::print(" {0}:{1}{2}\n", (i < newGranuleIDs.size() ? newGranuleIDs[i] : UID()).toString().substr(0, 6).c_str(), splitPoints.keys[i].printable(), - splitPoints.boundaries.count(splitPoints.keys[i]) ? " *" : ""); + splitPoints.boundaries.contains(splitPoints.keys[i]) ? " *" : ""); } } @@ -2195,7 +2195,7 @@ ACTOR Future maybeSplitRange(Reference bmData, MutationRef::SetVersionstampedValue); // Update BlobGranuleMergeBoundary. - if (splitPoints.boundaries.count(splitRange.begin)) { + if (splitPoints.boundaries.contains(splitRange.begin)) { tr->set(blobGranuleMergeBoundaryKeyFor(splitRange.begin), blobGranuleMergeBoundaryValueFor(splitPoints.boundaries[splitRange.begin])); } @@ -2605,7 +2605,7 @@ ACTOR Future finishMergeGranules(Reference bmData, // Assert that none of the subsequent granules are hard boundaries. if (g_network->isSimulated()) { for (int i = 1; i < parentGranuleRanges.size() - 1; i++) { - ASSERT(!bmData->mergeHardBoundaries.count(parentGranuleRanges[i])); + ASSERT(!bmData->mergeHardBoundaries.contains(parentGranuleRanges[i])); } } @@ -2843,7 +2843,7 @@ ACTOR Future granuleMergeChecker(Reference bmData) { // 2. Hit the maximum in a merge evaluation window. // 3. Hit a hard merge boundary meaning we should not merge across them. if (!it->cvalue().mergeEligible() || currentCandidates.size() == maxRangeSize || - bmData->mergeHardBoundaries.count(it->range().begin)) { + bmData->mergeHardBoundaries.contains(it->range().begin)) { if (currentCandidates.size() >= 2) { mergeChecks.push_back(attemptMerges(bmData, currentCandidates)); } @@ -2859,8 +2859,8 @@ ACTOR Future granuleMergeChecker(Reference bmData) { // Conditions: // 1. Start a new soft merge range. // 2. End a soft merge range. - if ((!mergeBoundaries.count(curRange.begin) && mergeBoundaries.count(curRange.end)) || - (mergeBoundaries.count(lastRange.begin) && !mergeBoundaries.count(lastRange.end))) { + if ((!mergeBoundaries.contains(curRange.begin) && mergeBoundaries.contains(curRange.end)) || + (mergeBoundaries.contains(lastRange.begin) && !mergeBoundaries.contains(lastRange.end))) { if (currentCandidates.size() >= 2) { mergeChecks.push_back(attemptMerges(bmData, currentCandidates)); } @@ -2972,10 +2972,10 @@ ACTOR Future killBlobWorker(Reference bmData, BlobWorkerI } Optional successor = bwId; - while (bmData->workerAffinities.count(successor.get())) { + while (bmData->workerAffinities.contains(successor.get())) { successor = bmData->workerAffinities[successor.get()]; } - if (successor.get() == bwId || !bmData->workersById.count(successor.get())) { + if (successor.get() == bwId || !bmData->workersById.contains(successor.get())) { successor = Optional(); } @@ -3394,16 +3394,16 @@ ACTOR Future checkBlobWorkerList(Reference bmData, Promis // add all blob workers to this new blob manager's records and start monitoring it bool foundAnyNew = false; for (auto& worker : blobWorkers) { - if (!bmData->deadWorkers.count(worker.id())) { + if (!bmData->deadWorkers.contains(worker.id())) { bool isFailedOrExcluded = bmData->exclusionTracker.isFailedOrExcluded(worker.stableAddress()); - if (!bmData->workerAddresses.count(worker.stableAddress()) && + if (!bmData->workerAddresses.contains(worker.stableAddress()) && worker.locality.dcId() == bmData->dcId && !isFailedOrExcluded) { bmData->workerAddresses.insert(worker.stableAddress()); bmData->workersById[worker.id()] = worker; bmData->workerStats[worker.id()] = BlobWorkerInfo(); bmData->addActor.send(monitorBlobWorker(bmData, worker)); foundAnyNew = true; - } else if (!bmData->workersById.count(worker.id())) { + } else if (!bmData->workersById.contains(worker.id())) { TraceEvent("KillingExtraneousBlobWorker", bmData->id) .detail("WorkerId", worker.id()) .detail("Addr", worker.stableAddress()) @@ -3880,7 +3880,7 @@ ACTOR Future recoverBlobManager(Reference bmData) { assignment.seqnoAssigned, outOfDateAssignments); } - if (bmData->workerStats.count(workerId)) { + if (bmData->workerStats.contains(workerId)) { bmData->workerStats[workerId].numGranulesAssigned = reply.get().assignments.size(); } } else { @@ -4043,11 +4043,11 @@ ACTOR Future recoverBlobManager(Reference bmData) { // if worker id is already set to a known worker that replied with it in the mapping, range is already assigned // there. If not, need to explicitly assign it to someone - if (workerId == UID() || epoch == 0 || !endingWorkers.count(workerId)) { + if (workerId == UID() || epoch == 0 || !endingWorkers.contains(workerId)) { if (workerId == UID()) { workerId = workerAffinity; } - while (bmData->workerAffinities.count(workerId)) { + while (bmData->workerAffinities.contains(workerId)) { workerId = bmData->workerAffinities[workerId]; CODE_PROBE(true, "Blob worker has affinity after reboot"); } @@ -4058,7 +4058,7 @@ ACTOR Future recoverBlobManager(Reference bmData) { RangeAssignment raAssign; raAssign.isAssign = true; - if (bmData->workersById.count(workerId)) { + if (bmData->workersById.contains(workerId)) { raAssign.worker = workerId; } raAssign.keyRange = range.range(); @@ -4122,7 +4122,7 @@ ACTOR Future chaosRangeMover(Reference bmData) { while (tries > 0) { tries--; auto randomRange = bmData->workerAssignments.randomRange(); - if (randomRange.value() != UID() && !alreadyMoved.count(randomRange.range().toString())) { + if (randomRange.value() != UID() && !alreadyMoved.contains(randomRange.range().toString())) { if (BM_DEBUG) { fmt::print("Range mover moving range [{0} - {1}): {2}\n", randomRange.begin().printable().c_str(), @@ -4182,7 +4182,7 @@ ACTOR Future initializeBlobWorker(Reference self, // Ask the candidateWorker to initialize a BW only if the worker does not have a pending request if (numExistingBWOnAddr(self, workerAddr) == 0 && - self->recruitingLocalities.count(candidateWorker.worker.stableAddress()) == 0) { + !self->recruitingLocalities.contains(candidateWorker.worker.stableAddress())) { state UID interfaceId = deterministicRandom()->randomUniqueID(); state InitializeBlobWorkerRequest initReq; @@ -4230,13 +4230,13 @@ ACTOR Future initializeBlobWorker(Reference self, if (newBlobWorker.present()) { BlobWorkerInterface bwi = newBlobWorker.get().interf; - if (!self->deadWorkers.count(bwi.id())) { - if (!self->workerAddresses.count(bwi.stableAddress()) && bwi.locality.dcId() == self->dcId) { + if (!self->deadWorkers.contains(bwi.id())) { + if (!self->workerAddresses.contains(bwi.stableAddress()) && bwi.locality.dcId() == self->dcId) { self->workerAddresses.insert(bwi.stableAddress()); self->workersById[bwi.id()] = bwi; self->workerStats[bwi.id()] = BlobWorkerInfo(); self->addActor.send(monitorBlobWorker(self, bwi)); - } else if (!self->workersById.count(bwi.id())) { + } else if (!self->workersById.contains(bwi.id())) { self->addActor.send(killBlobWorker(self, bwi, false)); } } @@ -5970,7 +5970,7 @@ ACTOR Future blobManager(BlobManagerInterface bmInterf, if (g_network->isSimulated()) { UID clusterId = wait(fetchClusterId(self->db)); auto clusterEpoc = std::make_pair(clusterId, epoch); - bool managerEpochAlreadySeen = managerEpochsSeen.count(clusterEpoc); + bool managerEpochAlreadySeen = managerEpochsSeen.contains(clusterEpoc); if (managerEpochAlreadySeen) { TraceEvent(SevError, "DuplicateBlobManagersAtEpoch") .detail("ClusterId", clusterId) diff --git a/fdbserver/ClusterController.actor.cpp b/fdbserver/ClusterController.actor.cpp index 9620f34ae8..dff0380f3e 100644 --- a/fdbserver/ClusterController.actor.cpp +++ b/fdbserver/ClusterController.actor.cpp @@ -558,7 +558,7 @@ bool isHealthySingleton(ClusterControllerData* self, const Optional recruitingID) { // A singleton is stable if it exists in cluster, has not been killed off of proc and is not being recruited bool isStableSingleton = singleton.isPresent() && - self->id_worker.count(singleton.getInterface().locality.processId()) && + self->id_worker.contains(singleton.getInterface().locality.processId()) && (!recruitingID.present() || (recruitingID.get() == singleton.getInterface().id())); if (!isStableSingleton) { @@ -1149,7 +1149,7 @@ void haltRegisteringOrCurrentSingleton(ClusterControllerData* self, // if not currently recruiting, then halt previous one in favour of requesting one TraceEvent(("CCRegister" + roleName).c_str(), self->id).detail(roleAbbr + "ID", registeringID); if (currSingleton.isPresent() && currSingleton.getInterface().id() != registeringID && - self->id_worker.count(currSingleton.getInterface().locality.processId())) { + self->id_worker.contains(currSingleton.getInterface().locality.processId())) { TraceEvent(("CCHaltPrevious" + roleName).c_str(), self->id) .detail(roleAbbr + "ID", currSingleton.getInterface().id()) .detail("DcID", printable(self->clusterControllerDcId)) @@ -1713,7 +1713,7 @@ ACTOR Future monitorStorageMetadata(ClusterControllerData* self) { idMetadata[id] = decodeServerMetadataValue(sm.value); } for (auto& s : servers) { - if (idMetadata.count(s.id())) { + if (idMetadata.contains(s.id())) { s.metadata = idMetadata[s.id()]; } else { TraceEvent(SevWarn, "StorageServerMetadataMissing", self->id).detail("ServerID", s.id()); @@ -2236,7 +2236,7 @@ ACTOR Future startDataDistributor(ClusterControllerData* self, double wait .detail("Addr", worker.interf.address()) .detail("DDID", ddInterf.get().id()); if (distributor.present() && distributor.get().id() != ddInterf.get().id() && - self->id_worker.count(distributor.get().locality.processId())) { + self->id_worker.contains(distributor.get().locality.processId())) { TraceEvent("CCHaltDataDistributorAfterRecruit", self->id) .detail("DDID", distributor.get().id()) @@ -2336,7 +2336,7 @@ ACTOR Future startRatekeeper(ClusterControllerData* self, double waitTime) .detail("Addr", worker.interf.address()) .detail("RKID", interf.get().id()); if (ratekeeper.present() && ratekeeper.get().id() != interf.get().id() && - self->id_worker.count(ratekeeper.get().locality.processId())) { + self->id_worker.contains(ratekeeper.get().locality.processId())) { TraceEvent("CCHaltRatekeeperAfterRecruit", self->id) .detail("RKID", ratekeeper.get().id()) .detail("DcID", printable(self->clusterControllerDcId)); @@ -2426,7 +2426,7 @@ ACTOR Future startConsistencyScan(ClusterControllerData* self) { .detail("Addr", worker.interf.address()) .detail("CKID", interf.get().id()); if (consistencyScan.present() && consistencyScan.get().id() != interf.get().id() && - self->id_worker.count(consistencyScan.get().locality.processId())) { + self->id_worker.contains(consistencyScan.get().locality.processId())) { TraceEvent("CCHaltConsistencyScanAfterRecruit", self->id) .detail("CKID", consistencyScan.get().id()) .detail("DcID", printable(self->clusterControllerDcId)); @@ -2528,7 +2528,7 @@ ACTOR Future startEncryptKeyProxy(ClusterControllerData* self, EncryptionA .detail("Id", interf.get().id()) .detail("ProcessId", interf.get().locality.processId()); if (encryptKeyProxy.present() && encryptKeyProxy.get().id() != interf.get().id() && - self->id_worker.count(encryptKeyProxy.get().locality.processId())) { + self->id_worker.contains(encryptKeyProxy.get().locality.processId())) { TraceEvent("CCEKP_HaltAfterRecruit", self->id) .detail("Id", encryptKeyProxy.get().id()) .detail("DcId", printable(self->clusterControllerDcId)); @@ -2700,7 +2700,7 @@ ACTOR Future startBlobMigrator(ClusterControllerData* self, double waitTim .detail("Addr", worker.interf.address()) .detail("MGID", interf.get().id()); if (blobMigrator.present() && blobMigrator.get().id() != interf.get().id() && - self->id_worker.count(blobMigrator.get().locality.processId())) { + self->id_worker.contains(blobMigrator.get().locality.processId())) { TraceEvent("CCHaltBlobMigratorAfterRecruit", self->id) .detail("MGID", blobMigrator.get().id()) .detail("DcID", printable(self->clusterControllerDcId)); @@ -2805,7 +2805,7 @@ ACTOR Future startBlobManager(ClusterControllerData* self, double waitTime .detail("Addr", worker.interf.address()) .detail("BMID", interf.get().id()); if (blobManager.present() && blobManager.get().id() != interf.get().id() && - self->id_worker.count(blobManager.get().locality.processId())) { + self->id_worker.contains(blobManager.get().locality.processId())) { TraceEvent("CCHaltBlobManagerAfterRecruit", self->id) .detail("BMID", blobManager.get().id()) .detail("DcID", printable(self->clusterControllerDcId)); diff --git a/fdbserver/ClusterRecovery.actor.cpp b/fdbserver/ClusterRecovery.actor.cpp index d28ddadba8..a55ee451b3 100644 --- a/fdbserver/ClusterRecovery.actor.cpp +++ b/fdbserver/ClusterRecovery.actor.cpp @@ -270,7 +270,7 @@ ACTOR Future newTLogServers(Reference self, std::vector>* initialConfChanges) { if (self->configuration.usableRegions > 1) { state Optional remoteDcId = self->remoteDcIds.size() ? self->remoteDcIds[0] : Optional(); - if (!self->dcId_locality.count(recr.dcId)) { + if (!self->dcId_locality.contains(recr.dcId)) { int8_t loc = self->getNextLocality(); Standalone tr; tr.set(tr.arena(), tagLocalityListKeyFor(recr.dcId), tagLocalityListValue(loc)); @@ -279,7 +279,7 @@ ACTOR Future newTLogServers(Reference self, TraceEvent(SevWarn, "UnknownPrimaryDCID", self->dbgid).detail("PrimaryId", recr.dcId).detail("Loc", loc); } - if (!self->dcId_locality.count(remoteDcId)) { + if (!self->dcId_locality.contains(remoteDcId)) { int8_t loc = self->getNextLocality(); Standalone tr; tr.set(tr.arena(), tagLocalityListKeyFor(remoteDcId), tagLocalityListValue(loc)); @@ -357,7 +357,7 @@ ACTOR Future newSeedServers(Reference self, .detail("CandidateWorker", recruits.storageServers[idx].locality.toString()); InitializeStorageRequest isr; - isr.seedTag = dcId_tags.count(recruits.storageServers[idx].locality.dcId()) + isr.seedTag = dcId_tags.contains(recruits.storageServers[idx].locality.dcId()) ? dcId_tags[recruits.storageServers[idx].locality.dcId()] : Tag(nextLocality, 0); isr.storeType = self->configuration.storageServerStoreType; @@ -376,7 +376,7 @@ ACTOR Future newSeedServers(Reference self, CODE_PROBE(true, "initial storage recuitment loop failed to get new server"); wait(delay(SERVER_KNOBS->STORAGE_RECRUITMENT_DELAY)); } else { - if (!dcId_tags.count(recruits.storageServers[idx].locality.dcId())) { + if (!dcId_tags.contains(recruits.storageServers[idx].locality.dcId())) { dcId_tags[recruits.storageServers[idx].locality.dcId()] = Tag(nextLocality, 0); nextLocality++; } @@ -758,7 +758,7 @@ ACTOR Future updateLogsValue(Reference self, Database bool found = false; for (auto& logSet : self->logSystem->getLogSystemConfig().tLogs) { for (auto& log : logSet.tLogs) { - if (logIds.count(log.id())) { + if (logIds.contains(log.id())) { found = true; break; } @@ -1832,7 +1832,7 @@ ACTOR Future cleanupRecoveryActorCollection(Reference } bool isNormalClusterRecoveryError(const Error& error) { - return normalClusterRecoveryErrors().count(error.code()); + return normalClusterRecoveryErrors().contains(error.code()); } std::string& getRecoveryEventName(ClusterRecoveryEventType type) { diff --git a/fdbserver/CommitProxyServer.actor.cpp b/fdbserver/CommitProxyServer.actor.cpp index d2a7d9ff13..0c381e721e 100644 --- a/fdbserver/CommitProxyServer.actor.cpp +++ b/fdbserver/CommitProxyServer.actor.cpp @@ -427,7 +427,7 @@ ACTOR Future commitBatcher(ProxyCommitData* commitData, if (SERVER_KNOBS->STORAGE_QUOTA_ENABLED && !req.bypassStorageQuota() && req.tenantInfo.hasTenant() && - commitData->tenantsOverStorageQuota.count(req.tenantInfo.tenantId) > 0) { + commitData->tenantsOverStorageQuota.contains(req.tenantInfo.tenantId)) { req.reply.sendError(storage_quota_exceeded()); continue; } @@ -1056,7 +1056,7 @@ EncryptCipherDomainId getEncryptDetailsFromMutationRef(ProxyCommitData* commitDa // Parse mutation key to determine mutation encryption domain StringRef prefix = m.param1.substr(0, TenantAPI::PREFIX_SIZE); int64_t tenantId = TenantAPI::prefixToId(prefix, EnforceValidTenantId::False); - if (commitData->tenantMap.count(tenantId)) { + if (commitData->tenantMap.contains(tenantId)) { domainId = tenantId; } else { // Leverage 'default encryption domain' @@ -1194,7 +1194,7 @@ void assertResolutionStateMutationsSizeConsistent(const std::vector const& tenantMap, Optional& tenantId) { if (isSingleKeyMutation((MutationRef::Type)m.type)) { tenantId = TenantAPI::extractTenantIdFromMutation(m); - bool isLegalTenant = tenantMap.count(tenantId.get()) > 0; + bool isLegalTenant = tenantMap.contains(tenantId.get()); CODE_PROBE(!isLegalTenant, "Commit proxy access invalid tenant"); return isLegalTenant; } @@ -1572,7 +1572,7 @@ Error validateAndProcessTenantAccess(CommitTransactionRequest& tr, if (!isValid) { return tenant_not_found(); } - if (!tr.isLockAware() && pProxyCommitData->lockedTenants.count(tr.tenantInfo.tenantId) > 0) { + if (!tr.isLockAware() && pProxyCommitData->lockedTenants.contains(tr.tenantInfo.tenantId)) { CODE_PROBE(true, "Attempt access to locked tenant without lock awareness"); return tenant_locked(); } @@ -1626,7 +1626,7 @@ void applyMetadataEffect(CommitBatchContext* self) { // check if all tenant ids are valid if committed == true committed = committed && std::all_of(tenantIds.get().begin(), tenantIds.get().end(), [self](const int64_t& tid) { - return self->pProxyCommitData->tenantMap.count(tid); + return self->pProxyCommitData->tenantMap.contains(tid); }); if (self->debugID.present()) { @@ -1805,7 +1805,7 @@ ACTOR Future applyMetadataToCommittedTransactions(CommitBatchContext* self if (pProxyCommitData->encryptMode == EncryptionAtRestMode::DOMAIN_AWARE && !rawAccessTenantIds.empty()) { std::unordered_set extraDomainIds; for (auto tenantId : rawAccessTenantIds) { - if (self->cipherKeys.count(tenantId) == 0) { + if (!self->cipherKeys.contains(tenantId)) { extraDomainIds.insert(tenantId); } } @@ -1892,7 +1892,7 @@ Future writeMutation(CommitBatchContext* self, CODE_PROBE(true, "Raw access mutation encryption", probe::decoration::rare); } ASSERT_NE(domainId, INVALID_ENCRYPT_DOMAIN_ID); - ASSERT(self->cipherKeys.count(domainId) > 0); + ASSERT(self->cipherKeys.contains(domainId)); encryptedMutation = mutation->encrypt(self->cipherKeys, domainId, *arena, BlobCipherMetrics::TLOG, encryptTime); } @@ -2827,7 +2827,7 @@ void maybeAddTssMapping(GetKeyServerLocationsReply& reply, ProxyCommitData* commitData, std::unordered_set& included, UID ssId) { - if (!included.count(ssId)) { + if (!included.contains(ssId)) { auto mappingItr = commitData->tssMapping.find(ssId); if (mappingItr != commitData->tssMapping.end()) { reply.resultsTssMapping.push_back(*mappingItr); @@ -3112,8 +3112,8 @@ ACTOR static Future doBlobGranuleLocationRequest(GetBlobGranuleLocationsRe throw blob_granule_transaction_too_old(); } - if (!req.justGranules && !commitData->blobWorkerInterfCache.count(workerId) && - !bwiLookedUp.count(workerId)) { + if (!req.justGranules && !commitData->blobWorkerInterfCache.contains(workerId) && + !bwiLookedUp.contains(workerId)) { bwiLookedUp.insert(workerId); bwiLookupFutures.push_back(tr.get(blobWorkerListKeyFor(workerId))); } @@ -3766,7 +3766,7 @@ ACTOR Future processTransactionStateRequestPart(TransactionStateResolveCon ASSERT(pContext->pCommitData != nullptr); ASSERT(pContext->pActors != nullptr); - if (pContext->receivedSequences.count(request.sequence)) { + if (pContext->receivedSequences.contains(request.sequence)) { if (pContext->receivedSequences.size() == pContext->maxSequence) { wait(pContext->txnRecovery); } diff --git a/fdbserver/ConfigBroadcaster.actor.cpp b/fdbserver/ConfigBroadcaster.actor.cpp index e1630bf43a..bdf5fbc8e0 100644 --- a/fdbserver/ConfigBroadcaster.actor.cpp +++ b/fdbserver/ConfigBroadcaster.actor.cpp @@ -464,7 +464,7 @@ class ConfigBroadcasterImpl { state BroadcastClientDetails client( watcher, std::move(configClassSet), lastSeenVersion, std::move(broadcastInterface)); - if (impl->clients.count(broadcastInterface.id())) { + if (impl->clients.contains(broadcastInterface.id())) { // Client already registered return Void(); } diff --git a/fdbserver/DDRelocationQueue.actor.cpp b/fdbserver/DDRelocationQueue.actor.cpp index 4335154131..9f3e1d9500 100644 --- a/fdbserver/DDRelocationQueue.actor.cpp +++ b/fdbserver/DDRelocationQueue.actor.cpp @@ -694,19 +694,19 @@ void DDQueue::validate() { for (auto it = inFlightRanges.begin(); it != inFlightRanges.end(); ++it) { for (int i = 0; i < it->value().src.size(); i++) { // each server in the inFlight map is in the busymap - if (!busymap.count(it->value().src[i])) + if (!busymap.contains(it->value().src[i])) TraceEvent(SevError, "DDQueueValidateError8") .detail("Problem", "each server in the inFlight map is in the busymap"); // relocate data that is inFlight is not also in the queue - if (queue[it->value().src[i]].count(it->value())) + if (queue[it->value().src[i]].contains(it->value())) TraceEvent(SevError, "DDQueueValidateError9") .detail("Problem", "relocate data that is inFlight is not also in the queue"); } for (int i = 0; i < it->value().completeDests.size(); i++) { // each server in the inFlight map is in the dest busymap - if (!destBusymap.count(it->value().completeDests[i])) + if (!destBusymap.contains(it->value().completeDests[i])) TraceEvent(SevError, "DDQueueValidateError10") .detail("Problem", "each server in the inFlight map is in the destBusymap"); } @@ -853,7 +853,7 @@ void DDQueue::queueRelocation(RelocateShard rs, std::set& serversToLaunchFr // ASSERT(queueMapItr->value() == queueMap.rangeContaining(affectedQueuedItems[r].begin)->value()); RelocateData& rrs = queueMapItr->value(); - if (rrs.src.size() == 0 && (rrs.keys == rd.keys || fetchingSourcesQueue.count(rrs) > 0)) { + if (rrs.src.size() == 0 && (rrs.keys == rd.keys || fetchingSourcesQueue.contains(rrs))) { if (rrs.keys != rd.keys) { delayDelete.insert(rrs); } @@ -927,7 +927,7 @@ void DDQueue::queueRelocation(RelocateShard rs, std::set& serversToLaunchFr } void DDQueue::completeSourceFetch(const RelocateData& results) { - ASSERT(fetchingSourcesQueue.count(results)); + ASSERT(fetchingSourcesQueue.contains(results)); // logRelocation( results, "GotSourceServers" ); @@ -960,7 +960,7 @@ void DDQueue::launchQueuedWork(KeyRange keys, const DDEnabledState* ddEnabledSta std::set> combined; auto f = queueMap.intersectingRanges(keys); for (auto it = f.begin(); it != f.end(); ++it) { - if (it->value().src.size() && queue[it->value().src[0]].count(it->value())) + if (it->value().src.size() && queue[it->value().src[0]].contains(it->value())) combined.insert(it->value()); } launchQueuedWork(combined, ddEnabledState); @@ -1064,7 +1064,7 @@ void DDQueue::launchQueuedWork(std::set bool overlappingInFlight = false; auto intersectingInFlight = inFlight.intersectingRanges(rd.keys); for (auto it = intersectingInFlight.begin(); it != intersectingInFlight.end(); ++it) { - if (fetchKeysComplete.count(it->value()) && inFlightActors.liveActorAt(it->range().begin) && + if (fetchKeysComplete.contains(it->value()) && inFlightActors.liveActorAt(it->range().begin) && !rd.keys.contains(it->range()) && it->value().priority >= rd.priority && rd.healthPriority < SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY) { @@ -1235,7 +1235,7 @@ int DDQueue::getHighestPriorityRelocation() const { // return true if the servers are throttled as source for read rebalance bool DDQueue::timeThrottle(const std::vector& ids) const { return std::any_of(ids.begin(), ids.end(), [this](const UID& id) { - if (this->lastAsSource.count(id)) { + if (this->lastAsSource.contains(id)) { return (now() - this->lastAsSource.at(id)) * SERVER_KNOBS->READ_REBALANCE_SRC_PARALLELISM < SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL; } @@ -1394,7 +1394,7 @@ static int nonOverlappedServerCount(const std::vector& srcIds, const std::v std::unordered_set srcSet{ srcIds.begin(), srcIds.end() }; int count = 0; for (int i = 0; i < destIds.size(); i++) { - if (srcSet.count(destIds[i]) == 0) { + if (!srcSet.contains(destIds[i])) { count++; } } @@ -2231,7 +2231,7 @@ ACTOR Future dataDistributionRelocator(DDQueue* self, inline double getWorstCpu(const HealthMetrics& metrics, const std::vector& ids) { double cpu = 0; for (auto& id : ids) { - if (metrics.storageStats.count(id)) { + if (metrics.storageStats.contains(id)) { cpu = std::max(cpu, metrics.storageStats.at(id).cpuUsage); } else { // assume the server is too busy to report its stats diff --git a/fdbserver/DDShardTracker.actor.cpp b/fdbserver/DDShardTracker.actor.cpp index 99634af9b8..ea3d620bbb 100644 --- a/fdbserver/DDShardTracker.actor.cpp +++ b/fdbserver/DDShardTracker.actor.cpp @@ -343,7 +343,7 @@ ACTOR Future trackShardMetrics(DataDistributionTracker::SafeAccessor self, if (e.code() != error_code_actor_cancelled && e.code() != error_code_dd_tracker_cancelled) { DisabledTraceEvent(SevDebug, "TrackShardError", self()->distributorId).detail("Keys", keys); // The above loop use Database cx, but those error should only be thrown in a code using transaction. - ASSERT(transactionRetryableErrors.count(e.code()) == 0); + ASSERT(!transactionRetryableErrors.contains(e.code())); self()->output.sendError(e); // Propagate failure to dataDistributionTracker } throw e; @@ -368,7 +368,7 @@ ACTOR Future readHotDetector(DataDistributionTracker* self) { } catch (Error& e) { if (e.code() != error_code_actor_cancelled) { // Those error should only be thrown in a code using transaction. - ASSERT(transactionRetryableErrors.count(e.code()) == 0); + ASSERT(!transactionRetryableErrors.contains(e.code())); self->output.sendError(e); // Propagate failure to dataDistributionTracker } throw e; @@ -1837,7 +1837,7 @@ void PhysicalShardCollection::PhysicalShard::removeRange(const KeyRange& outRang PhysicalShardAvailable PhysicalShardCollection::checkPhysicalShardAvailable(uint64_t physicalShardID, StorageMetrics const& moveInMetrics) { ASSERT(physicalShardID != UID().first() && physicalShardID != anonymousShardId.first()); - ASSERT(physicalShardInstances.count(physicalShardID) > 0); + ASSERT(physicalShardInstances.contains(physicalShardID)); if (physicalShardInstances[physicalShardID].metrics.bytes + moveInMetrics.bytes > SERVER_KNOBS->MAX_PHYSICAL_SHARD_BYTES) { return PhysicalShardAvailable::False; @@ -1859,7 +1859,7 @@ void PhysicalShardCollection::updateTeamPhysicalShardIDsMap(uint64_t inputPhysic ASSERT(inputTeams.size() <= 2); ASSERT(inputPhysicalShardID != anonymousShardId.first() && inputPhysicalShardID != UID().first()); for (auto inputTeam : inputTeams) { - if (teamPhysicalShardIDs.count(inputTeam) == 0) { + if (!teamPhysicalShardIDs.contains(inputTeam)) { std::set physicalShardIDSet; physicalShardIDSet.insert(inputPhysicalShardID); teamPhysicalShardIDs.insert(std::make_pair(inputTeam, physicalShardIDSet)); @@ -1876,7 +1876,7 @@ void PhysicalShardCollection::insertPhysicalShardToCollection(uint64_t physicalS uint64_t debugID, PhysicalShardCreationTime whenCreated) { ASSERT(physicalShardID != anonymousShardId.first() && physicalShardID != UID().first()); - ASSERT(physicalShardInstances.count(physicalShardID) == 0); + ASSERT(!physicalShardInstances.contains(physicalShardID)); physicalShardInstances.insert( std::make_pair(physicalShardID, PhysicalShard(txnProcessor, physicalShardID, metrics, teams, whenCreated))); return; @@ -1953,7 +1953,7 @@ Optional PhysicalShardCollection::trySelectAvailablePhysicalShardFor( uint64_t debugID) { ASSERT(team.servers.size() > 0); // Case: The team is not tracked in the mapping (teamPhysicalShardIDs) - if (teamPhysicalShardIDs.count(team) == 0) { + if (!teamPhysicalShardIDs.contains(team)) { return Optional(); } ASSERT(teamPhysicalShardIDs[team].size() >= 1); @@ -1964,7 +1964,7 @@ Optional PhysicalShardCollection::trySelectAvailablePhysicalShardFor( if (physicalShardID == anonymousShardId.first() || physicalShardID == UID().first()) { ASSERT(false); } - ASSERT(physicalShardInstances.count(physicalShardID)); + ASSERT(physicalShardInstances.contains(physicalShardID)); /*TraceEvent("TryGetPhysicalShardIDCandidates") .detail("PhysicalShardID", physicalShardID) .detail("Bytes", physicalShardInstances[physicalShardID].metrics.bytes) @@ -2005,14 +2005,14 @@ uint64_t PhysicalShardCollection::generateNewPhysicalShardID(uint64_t debugID) { } void PhysicalShardCollection::reduceMetricsForMoveOut(uint64_t physicalShardID, StorageMetrics const& moveOutMetrics) { - ASSERT(physicalShardInstances.count(physicalShardID) != 0); + ASSERT(physicalShardInstances.contains(physicalShardID)); ASSERT(physicalShardID != UID().first() && physicalShardID != anonymousShardId.first()); physicalShardInstances[physicalShardID].metrics = physicalShardInstances[physicalShardID].metrics - moveOutMetrics; return; } void PhysicalShardCollection::increaseMetricsForMoveIn(uint64_t physicalShardID, StorageMetrics const& moveInMetrics) { - ASSERT(physicalShardInstances.count(physicalShardID) != 0); + ASSERT(physicalShardInstances.contains(physicalShardID)); ASSERT(physicalShardID != UID().first() && physicalShardID != anonymousShardId.first()); physicalShardInstances[physicalShardID].metrics = physicalShardInstances[physicalShardID].metrics + moveInMetrics; return; @@ -2109,7 +2109,7 @@ std::pair, bool> PhysicalShardCollec ASSERT(SERVER_KNOBS->SHARD_ENCODE_LOCATION_METADATA); ASSERT(SERVER_KNOBS->ENABLE_DD_PHYSICAL_SHARD); ASSERT(inputPhysicalShardID != anonymousShardId.first() && inputPhysicalShardID != UID().first()); - if (physicalShardInstances.count(inputPhysicalShardID) == 0) { + if (!physicalShardInstances.contains(inputPhysicalShardID)) { return { Optional(), true }; } if (!checkPhysicalShardAvailable(inputPhysicalShardID, moveInMetrics)) { @@ -2141,7 +2141,7 @@ void PhysicalShardCollection::initPhysicalShardCollection(KeyRange keys, ASSERT(physicalShardID != UID().first()); if (physicalShardID != anonymousShardId.first()) { updateTeamPhysicalShardIDsMap(physicalShardID, selectedTeams, debugID); - if (physicalShardInstances.count(physicalShardID) == 0) { + if (!physicalShardInstances.contains(physicalShardID)) { insertPhysicalShardToCollection( physicalShardID, StorageMetrics(), selectedTeams, debugID, PhysicalShardCreationTime::DDInit); } else { @@ -2181,7 +2181,7 @@ void PhysicalShardCollection::updatePhysicalShardCollection( // Update physicalShardInstances // Add the metrics to in-physicalShard // e.detail("PhysicalShardIDIn", physicalShardID); - if (physicalShardInstances.count(physicalShardID) == 0) { + if (!physicalShardInstances.contains(physicalShardID)) { // e.detail("Op", "Insert"); insertPhysicalShardToCollection( physicalShardID, metrics, selectedTeams, debugID, PhysicalShardCreationTime::DDRelocator); @@ -2266,8 +2266,8 @@ void PhysicalShardCollection::cleanUpPhysicalShardCollection() { } for (auto it = physicalShardInstances.begin(); it != physicalShardInstances.end();) { uint64_t physicalShardID = it->first; - ASSERT(physicalShardInstances.count(physicalShardID) > 0); - if (physicalShardsInUse.count(physicalShardID) == 0) { + ASSERT(physicalShardInstances.contains(physicalShardID)); + if (!physicalShardsInUse.contains(physicalShardID)) { /*TraceEvent("PhysicalShardisEmpty") .detail("PhysicalShard", physicalShardID) .detail("RemainBytes", physicalShardInstances[physicalShardID].metrics.bytes);*/ @@ -2282,7 +2282,7 @@ void PhysicalShardCollection::cleanUpPhysicalShardCollection() { for (auto [team, _] : teamPhysicalShardIDs) { for (auto it = teamPhysicalShardIDs[team].begin(); it != teamPhysicalShardIDs[team].end();) { uint64_t physicalShardID = *it; - if (physicalShardInstances.count(physicalShardID) == 0) { + if (!physicalShardInstances.contains(physicalShardID)) { // physicalShardID has been removed from physicalShardInstances (see step 1) // So, remove the physicalShard from teamPhysicalShardID[team] it = teamPhysicalShardIDs[team].erase(it); @@ -2322,7 +2322,7 @@ void PhysicalShardCollection::logPhysicalShardCollection() { uint64_t maxPhysicalShardID = 0; uint64_t minPhysicalShardID = 0; for (auto physicalShardID : physicalShardIDs) { - ASSERT(physicalShardInstances.count(physicalShardID) > 0); + ASSERT(physicalShardInstances.contains(physicalShardID)); uint64_t id = physicalShardInstances[physicalShardID].id; int64_t bytes = physicalShardInstances[physicalShardID].metrics.bytes; if (bytes > maxPhysicalShardBytes) { @@ -2352,14 +2352,14 @@ void PhysicalShardCollection::logPhysicalShardCollection() { for (auto ssid : team.servers) { for (auto it = teamPhysicalShardIDs[team].begin(); it != teamPhysicalShardIDs[team].end();) { uint64_t physicalShardID = *it; - if (storageServerPhysicalShardStatus.count(ssid) != 0) { - if (storageServerPhysicalShardStatus[ssid].count(physicalShardID) == 0) { - ASSERT(physicalShardInstances.count(physicalShardID) > 0); + if (storageServerPhysicalShardStatus.contains(ssid)) { + if (!storageServerPhysicalShardStatus[ssid].contains(physicalShardID)) { + ASSERT(physicalShardInstances.contains(physicalShardID)); storageServerPhysicalShardStatus[ssid].insert( std::make_pair(physicalShardID, physicalShardInstances[physicalShardID].metrics.bytes)); } } else { - ASSERT(physicalShardInstances.count(physicalShardID) > 0); + ASSERT(physicalShardInstances.contains(physicalShardID)); std::map tmp; tmp.insert(std::make_pair(physicalShardID, physicalShardInstances[physicalShardID].metrics.bytes)); storageServerPhysicalShardStatus.insert(std::make_pair(ssid, tmp)); diff --git a/fdbserver/DDTeamCollection.actor.cpp b/fdbserver/DDTeamCollection.actor.cpp index 80cde7cbfa..74371df81c 100644 --- a/fdbserver/DDTeamCollection.actor.cpp +++ b/fdbserver/DDTeamCollection.actor.cpp @@ -91,7 +91,7 @@ class DDTeamCollectionImpl { const ProcessData& workerData = workers[i]; AddressExclusion addr(workerData.address.ip, workerData.address.port); existingAddrs.insert(addr); - if (self->invalidLocalityAddr.count(addr) && + if (self->invalidLocalityAddr.contains(addr) && self->isValidLocality(self->configuration.storagePolicy, workerData.locality)) { // The locality info on the addr has been corrected self->invalidLocalityAddr.erase(addr); @@ -104,7 +104,7 @@ class DDTeamCollectionImpl { // In case system operator permanently excludes workers on the address with invalid locality for (auto addr = self->invalidLocalityAddr.begin(); addr != self->invalidLocalityAddr.end();) { - if (!existingAddrs.count(*addr)) { + if (!existingAddrs.contains(*addr)) { // The address no longer has a worker addr = self->invalidLocalityAddr.erase(addr); hasCorrectedLocality = true; @@ -452,7 +452,7 @@ public: bool foundSrc = false; for (const auto& id : req.src) { - if (self->server_info.count(id)) { + if (self->server_info.contains(id)) { foundSrc = true; break; } @@ -1224,7 +1224,7 @@ public: } ASSERT_EQ(tc->primary, t.primary); // tc->traceAllInfo(); - if (tc->server_info.count(t.servers[0])) { + if (tc->server_info.contains(t.servers[0])) { auto& info = tc->server_info[t.servers[0]]; bool found = false; @@ -2173,14 +2173,14 @@ public: // Do not retrigger and double-overwrite failed or wiggling servers auto old = self->excludedServers.getKeys(); for (const auto& o : old) { - if (!exclusionTracker.excluded.count(o) && !exclusionTracker.failed.count(o) && + if (!exclusionTracker.excluded.contains(o) && !exclusionTracker.failed.contains(o) && !(self->excludedServers.count(o) && self->excludedServers.get(o) == DDTeamCollection::Status::WIGGLING)) { self->excludedServers.set(o, DDTeamCollection::Status::NONE); } } for (const auto& n : exclusionTracker.excluded) { - if (!exclusionTracker.failed.count(n)) { + if (!exclusionTracker.failed.contains(n)) { self->excludedServers.set(n, DDTeamCollection::Status::EXCLUDED); } } @@ -2783,7 +2783,7 @@ public: if (newServer.present()) { UID id = newServer.get().interf.id(); - if (!self->server_and_tss_info.count(id)) { + if (!self->server_and_tss_info.contains(id)) { if (!recruitTss || tssState->tssRecruitSuccess()) { self->addServer(newServer.get().interf, candidateWorker.processClass, @@ -3043,7 +3043,7 @@ public: UID tssId = itr->second->getId(); StorageServerInterface tssi = itr->second->getLastKnownInterface(); - if (self->shouldHandleServer(tssi) && self->server_and_tss_info.count(tssId)) { + if (self->shouldHandleServer(tssi) && self->server_and_tss_info.contains(tssId)) { Promise killPromise = itr->second->killTss; if (killPromise.canBeSet()) { CODE_PROBE(tssToRecruit < 0, "Killing TSS due to too many TSS"); @@ -3171,7 +3171,7 @@ public: ProcessClass const& processClass = servers[i].second; if (!self->shouldHandleServer(ssi)) { continue; - } else if (self->server_and_tss_info.count(serverId)) { + } else if (self->server_and_tss_info.contains(serverId)) { auto& serverInfo = self->server_and_tss_info[serverId]; if (ssi.getValue.getEndpoint() != serverInfo->getLastKnownInterface().getValue.getEndpoint() || @@ -3185,7 +3185,7 @@ public: serverInfo->interfaceChanged.getFuture()); currentInterfaceChanged.send(std::make_pair(ssi, processClass)); } - } else if (!self->recruitingIds.count(ssi.id())) { + } else if (!self->recruitingIds.contains(ssi.id())) { self->addServer(ssi, processClass, self->serverTrackerErrorOut, @@ -3263,7 +3263,7 @@ public: // if perpetual_storage_wiggle_locality has value and not 0(disabled). if (!localityKeyValues.empty()) { - if (self->server_info.count(res.begin()->first)) { + if (self->server_info.contains(res.begin()->first)) { auto server = self->server_info.at(res.begin()->first); for (const auto& [localityKey, localityValue] : localityKeyValues) { // Update the wigglingId only if it matches the locality. @@ -3975,14 +3975,14 @@ Optional> DDTeamCollection::findTeamFromServers const std::set completeSources(servers.begin(), servers.end()); for (const auto& server : servers) { - if (!server_info.count(server)) { + if (!server_info.contains(server)) { continue; } auto const& teamList = server_info[server]->getTeams(); for (const auto& team : teamList) { bool found = true; for (const UID& s : team->getServerIDs()) { - if (!completeSources.count(s)) { + if (!completeSources.contains(s)) { found = false; break; } @@ -5688,7 +5688,7 @@ void DDTeamCollection::addServer(StorageServerInterface newServer, if (newServer.isTss()) { tss_info_by_pair[newServer.tssPairID.get()] = r; - if (server_info.count(newServer.tssPairID.get())) { + if (server_info.contains(newServer.tssPairID.get())) { r->onTSSPairRemoved = server_info[newServer.tssPairID.get()]->onRemoved; } } else { @@ -5701,7 +5701,7 @@ void DDTeamCollection::addServer(StorageServerInterface newServer, if (!newServer.isTss()) { // link and wake up tss' tracker so it knows when this server gets removed - if (tss_info_by_pair.count(newServer.id())) { + if (tss_info_by_pair.contains(newServer.id())) { tss_info_by_pair[newServer.id()]->onTSSPairRemoved = r->onRemoved; if (tss_info_by_pair[newServer.id()]->wakeUpTracker.canBeSet()) { auto p = tss_info_by_pair[newServer.id()]->wakeUpTracker; @@ -5987,7 +5987,7 @@ void DDTeamCollection::removeServer(UID removedServer) { Future DDTeamCollection::excludeStorageServersForWiggle(const UID& id) { Future moveFuture = Void(); - if (this->server_info.count(id) != 0) { + if (this->server_info.contains(id)) { auto& info = server_info.at(id); AddressExclusion addr(info->getLastKnownInterface().address().ip, info->getLastKnownInterface().address().port); diff --git a/fdbserver/DataDistribution.actor.cpp b/fdbserver/DataDistribution.actor.cpp index d5483b17a0..0f3389ab46 100644 --- a/fdbserver/DataDistribution.actor.cpp +++ b/fdbserver/DataDistribution.actor.cpp @@ -207,7 +207,7 @@ Future StorageWiggler::onCheck() const { void StorageWiggler::addServer(const UID& serverId, const StorageMetadataType& metadata) { // std::cout << "size: " << pq_handles.size() << " add " << serverId.toString() << " DC: " // << teamCollection->isPrimary() << std::endl; - ASSERT(!pq_handles.count(serverId)); + ASSERT(!pq_handles.contains(serverId)); pq_handles[serverId] = wiggle_pq.emplace(metadata, serverId); } @@ -1730,7 +1730,7 @@ ACTOR Future>> .detail("SS", server.id()); ++storageFailures; } else { - if (result.count(server.address())) { + if (result.contains(server.address())) { ASSERT(itr->second.id() == result[server.address()].first.id()); if (result[server.address()].second.find("storage") == std::string::npos) result[server.address()].second.append(",storage"); @@ -1755,7 +1755,7 @@ ACTOR Future>> TraceEvent(SevWarn, "MissingTlogWorkerInterface").detail("TlogAddress", tlog.address()); throw snap_tlog_failed(); } - if (result.count(tlog.address())) { + if (result.contains(tlog.address())) { ASSERT(workersMap[tlog.address()].id() == result[tlog.address()].first.id()); result[tlog.address()].second.append(",tlog"); } else { @@ -1779,7 +1779,7 @@ ACTOR Future>> Optional secondary = worker.interf.tLog.getEndpoint().addresses.secondaryAddress; if (coordinatorsAddrSet.find(primary) != coordinatorsAddrSet.end() || (secondary.present() && (coordinatorsAddrSet.find(secondary.get()) != coordinatorsAddrSet.end()))) { - if (result.count(primary)) { + if (result.contains(primary)) { ASSERT(workersMap[primary].id() == result[primary].first.id()); result[primary].second.append(",coord"); } else { @@ -1791,7 +1791,7 @@ ACTOR Future>> for (const auto& worker : workers) { const auto& processAddress = worker.interf.address(); // skip processes that are already included - if (result.count(processAddress)) + if (result.contains(processAddress)) continue; const auto& processClassType = worker.processClass.classType(); // coordinators are always configured to be recruited @@ -3784,7 +3784,7 @@ ACTOR Future dataDistributor_impl(DataDistributorInterface di, } when(DistributorSnapRequest snapReq = waitNext(di.distributorSnapReq.getFuture())) { auto& snapUID = snapReq.snapUID; - if (ddSnapReqResultMap.count(snapUID)) { + if (ddSnapReqResultMap.contains(snapUID)) { CODE_PROBE(true, "Data distributor received a duplicate finished snapshot request", probe::decoration::rare); @@ -3793,7 +3793,7 @@ ACTOR Future dataDistributor_impl(DataDistributorInterface di, TraceEvent("RetryFinishedDistributorSnapRequest") .detail("SnapUID", snapUID) .detail("Result", result.isError() ? result.getError().code() : 0); - } else if (ddSnapReqMap.count(snapReq.snapUID)) { + } else if (ddSnapReqMap.contains(snapReq.snapUID)) { CODE_PROBE(true, "Data distributor received a duplicate ongoing snapshot request"); TraceEvent("RetryOngoingDistributorSnapRequest").detail("SnapUID", snapUID); ASSERT(snapReq.snapPayload == ddSnapReqMap[snapUID].snapPayload); @@ -3836,7 +3836,7 @@ ACTOR Future dataDistributor_impl(DataDistributorInterface di, } } } catch (Error& err) { - if (normalDataDistributorErrors().count(err.code()) == 0) { + if (!(normalDataDistributorErrors().contains(err.code()))) { TraceEvent("DataDistributorError", di.id()).errorUnsuppressed(err); throw err; } diff --git a/fdbserver/GrvProxyServer.actor.cpp b/fdbserver/GrvProxyServer.actor.cpp index f55789b3cf..3235039446 100644 --- a/fdbserver/GrvProxyServer.actor.cpp +++ b/fdbserver/GrvProxyServer.actor.cpp @@ -1154,7 +1154,8 @@ ACTOR Future checkRemoved(Reference const> db, GrvProxyInterface myInterface) { loop { if (db->get().recoveryCount >= recoveryCount && - !std::count(db->get().client.grvProxies.begin(), db->get().client.grvProxies.end(), myInterface)) { + std::find(db->get().client.grvProxies.begin(), db->get().client.grvProxies.end(), myInterface) == + db->get().client.grvProxies.end()) { throw worker_removed(); } wait(db->onChange()); diff --git a/fdbserver/KeyValueStoreMemory.actor.cpp b/fdbserver/KeyValueStoreMemory.actor.cpp index ec07810066..db53ec2a69 100644 --- a/fdbserver/KeyValueStoreMemory.actor.cpp +++ b/fdbserver/KeyValueStoreMemory.actor.cpp @@ -494,7 +494,7 @@ private: uint32_t opType = (uint32_t)op; // Make sure the first bit of the optype is empty ASSERT(opType >> ENCRYPTION_ENABLED_BIT == 0); - if (!enableEncryption || metaOps.count(op) > 0) { + if (!enableEncryption || metaOps.contains(op)) { OpHeader h = { opType, v1.size(), v2.size() }; log->push(StringRef((const uint8_t*)&h, sizeof(h))); log->push(v1); @@ -545,7 +545,7 @@ private: ASSERT(!isOpEncrypted(&h)); // Metadata op types to be excluded from encryption. static std::unordered_set metaOps = { OpSnapshotEnd, OpSnapshotAbort, OpCommit, OpRollback }; - if (metaOps.count((OpType)h.op) == 0) { + if (!metaOps.contains((OpType)h.op)) { // It is not supported to open an encrypted store as unencrypted, or vice-versa. ASSERT_EQ(encryptedOp, self->enableEncryption); } diff --git a/fdbserver/KnobProtectiveGroups.cpp b/fdbserver/KnobProtectiveGroups.cpp index 6365da03b2..5af7991591 100644 --- a/fdbserver/KnobProtectiveGroups.cpp +++ b/fdbserver/KnobProtectiveGroups.cpp @@ -28,7 +28,7 @@ #include "fdbserver/Knobs.h" void KnobKeyValuePairs::set(const std::string& name, const ParsedKnobValue value) { - ASSERT(knobs.count(name) == 0); + ASSERT(!knobs.contains(name)); knobs[name] = value; } diff --git a/fdbserver/LatencyBandsMap.actor.cpp b/fdbserver/LatencyBandsMap.actor.cpp index 431cb730a2..d72a3bbbed 100644 --- a/fdbserver/LatencyBandsMap.actor.cpp +++ b/fdbserver/LatencyBandsMap.actor.cpp @@ -44,7 +44,7 @@ LatencyBandsMap::ExpirableBands::ExpirableBands(LatencyBands&& bands) : latencyBands(std::move(bands)), lastUpdated(now()) {} Optional LatencyBandsMap::getLatencyBands(TransactionTag tag) { - if (map.size() == maxSize && !map.count(tag)) { + if (map.size() == maxSize && !map.contains(tag)) { CODE_PROBE(true, "LatencyBandsMap reached maxSize"); return {}; } diff --git a/fdbserver/LeaderElection.actor.cpp b/fdbserver/LeaderElection.actor.cpp index 8240e8eff3..4acb339f68 100644 --- a/fdbserver/LeaderElection.actor.cpp +++ b/fdbserver/LeaderElection.actor.cpp @@ -203,7 +203,8 @@ ACTOR Future tryBecomeLeaderInternal(ServerCoordinators coordinators, // If more than 2*SERVER_KNOBS->POLLING_FREQUENCY elapses while we are nominated by some coordinator but // there is no leader, we might be breaking the leader election process for someone with better // communications but lower ID, so change IDs. - if ((!leader.present() || !leader.get().second) && std::count(nominees.begin(), nominees.end(), myInfo)) { + if ((!leader.present() || !leader.get().second) && + std::find(nominees.begin(), nominees.end(), myInfo) != nominees.end()) { if (!badCandidateTimeout.isValid()) badCandidateTimeout = delay(SERVER_KNOBS->POLLING_FREQUENCY * 2, TaskPriority::CoordinationReply); } else diff --git a/fdbserver/LogSystem.cpp b/fdbserver/LogSystem.cpp index d201a404d8..1eba4ce778 100644 --- a/fdbserver/LogSystem.cpp +++ b/fdbserver/LogSystem.cpp @@ -157,7 +157,7 @@ void LogSet::checkSatelliteTagLocations() { std::set> zones; std::set> dcs; for (auto& loc : tLogLocalities) { - if (zones.count(loc.zoneId())) { + if (zones.contains(loc.zoneId())) { foundDuplicate = true; break; } @@ -341,7 +341,7 @@ float LogPushData::getEmptyMessageRatio() const { bool LogPushData::writeTransactionInfo(int location, uint32_t subseq) { if (!FLOW_KNOBS->WRITE_TRACING_ENABLED || logSystem->getTLogVersion() < TLogVersion::V6 || - writtenLocations.count(location) != 0) { + writtenLocations.contains(location)) { return false; } diff --git a/fdbserver/LogSystemConfig.cpp b/fdbserver/LogSystemConfig.cpp index dd3a306e93..70f6c1c470 100644 --- a/fdbserver/LogSystemConfig.cpp +++ b/fdbserver/LogSystemConfig.cpp @@ -287,13 +287,13 @@ bool LogSystemConfig::isNextGenerationOf(LogSystemConfig const& r) const { bool LogSystemConfig::hasTLog(UID tid) const { for (const auto& log : tLogs) { - if (std::count(log.tLogs.begin(), log.tLogs.end(), tid) > 0) { + if (std::find(log.tLogs.begin(), log.tLogs.end(), tid) != log.tLogs.end()) { return true; } } for (const auto& old : oldTLogs) { for (const auto& log : old.tLogs) { - if (std::count(log.tLogs.begin(), log.tLogs.end(), tid) > 0) { + if (std::find(log.tLogs.begin(), log.tLogs.end(), tid) != log.tLogs.end()) { return true; } } @@ -303,13 +303,13 @@ bool LogSystemConfig::hasTLog(UID tid) const { bool LogSystemConfig::hasLogRouter(UID rid) const { for (const auto& log : tLogs) { - if (std::count(log.logRouters.begin(), log.logRouters.end(), rid) > 0) { + if (std::find(log.logRouters.begin(), log.logRouters.end(), rid) != log.logRouters.end()) { return true; } } for (const auto& old : oldTLogs) { for (const auto& log : old.tLogs) { - if (std::count(log.logRouters.begin(), log.logRouters.end(), rid) > 0) { + if (std::find(log.logRouters.begin(), log.logRouters.end(), rid) != log.logRouters.end()) { return true; } } @@ -319,7 +319,7 @@ bool LogSystemConfig::hasLogRouter(UID rid) const { bool LogSystemConfig::hasBackupWorker(UID bid) const { for (const auto& log : tLogs) { - if (std::count(log.backupWorkers.begin(), log.backupWorkers.end(), bid) > 0) { + if (std::find(log.backupWorkers.begin(), log.backupWorkers.end(), bid) != log.backupWorkers.end()) { return true; } } diff --git a/fdbserver/MockGlobalState.actor.cpp b/fdbserver/MockGlobalState.actor.cpp index 20a208c803..a59d2becb8 100644 --- a/fdbserver/MockGlobalState.actor.cpp +++ b/fdbserver/MockGlobalState.actor.cpp @@ -227,7 +227,7 @@ bool MockStorageServer::allShardStatusIn(const KeyRangeRef& range, const std::se for (auto it = ranges.begin(); it != ranges.end(); ++it) { // fmt::print("allShardStatusIn: {}: {} \n", id.toString(), it->range().toString()); - if (!status.count(it->cvalue().status)) + if (!status.contains(it->cvalue().status)) return false; } return true; @@ -679,7 +679,7 @@ void MockGlobalState::addStoragePerProcess(uint64_t defaultDiskSpace) { } bool MockGlobalState::serverIsSourceForShard(const UID& serverId, KeyRangeRef shard, bool inFlightShard) { - if (!allServers.count(serverId)) + if (!allServers.contains(serverId)) return false; // check serverKeys @@ -703,9 +703,9 @@ bool MockGlobalState::serverIsDestForShard(const UID& serverId, KeyRangeRef shar TraceEvent(SevDebug, "ServerIsDestForShard") .detail("ServerId", serverId) .detail("Keys", shard) - .detail("Contains", allServers.count(serverId)); + .detail("Contains", allServers.contains(serverId)); - if (!allServers.count(serverId)) + if (!allServers.contains(serverId)) return false; // check serverKeys @@ -723,7 +723,7 @@ bool MockGlobalState::serverIsDestForShard(const UID& serverId, KeyRangeRef shar } bool MockGlobalState::allShardsRemovedFromServer(const UID& serverId) { - return allServers.count(serverId) && shardMapping->getNumberOfShards(serverId) == 0; + return allServers.contains(serverId) && shardMapping->getNumberOfShards(serverId) == 0; } Future, int>> MockGlobalState::waitStorageMetrics( diff --git a/fdbserver/MoveKeys.actor.cpp b/fdbserver/MoveKeys.actor.cpp index ee33cb1462..20419898d9 100644 --- a/fdbserver/MoveKeys.actor.cpp +++ b/fdbserver/MoveKeys.actor.cpp @@ -892,14 +892,14 @@ ACTOR Future>> additionalSources(RangeResult shards decodeKeyServersValue(UIDtoTagMap, shards[i].value, src, dest); for (int s = 0; s < src.size(); s++) { - if (!fetching.count(src[s])) { + if (!fetching.contains(src[s])) { fetching.insert(src[s]); serverListEntries.push_back(tr->get(serverListKeyFor(src[s]))); } } for (int s = 0; s < dest.size(); s++) { - if (!fetching.count(dest[s])) { + if (!fetching.contains(dest[s])) { fetching.insert(dest[s]); serverListEntries.push_back(tr->get(serverListKeyFor(dest[s]))); } @@ -1350,7 +1350,7 @@ ACTOR static Future finishMoveKeys(Database occ, completeSrc = src; } else { for (int i = 0; i < completeSrc.size(); i++) { - if (!srcSet.count(completeSrc[i])) { + if (!srcSet.contains(completeSrc[i])) { swapAndPop(&completeSrc, i--); } } @@ -1405,7 +1405,7 @@ ACTOR static Future finishMoveKeys(Database occ, srcSet.insert(src2[s]); for (int i = 0; i < completeSrc.size(); i++) { - if (!srcSet.count(completeSrc[i])) { + if (!srcSet.contains(completeSrc[i])) { swapAndPop(&completeSrc, i--); } } @@ -1452,7 +1452,7 @@ ACTOR static Future finishMoveKeys(Database occ, state std::vector newDestinations; std::set completeSrcSet(completeSrc.begin(), completeSrc.end()); for (auto& it : dest) { - if (!hasRemote || !completeSrcSet.count(it)) { + if (!hasRemote || !completeSrcSet.contains(it)) { newDestinations.push_back(it); } } @@ -1491,7 +1491,7 @@ ACTOR static Future finishMoveKeys(Database occ, auto tssPair = tssMapping.find(storageServerInterfaces[s].id()); if (tssPair != tssMapping.end() && waitForTSSCounter > 0 && - !tssToIgnore.count(tssPair->second.id())) { + !tssToIgnore.contains(tssPair->second.id())) { tssReadyInterfs.push_back(tssPair->second); tssReady.push_back(waitForShardReady( tssPair->second, keys, tr.getReadVersion().get(), GetShardStateRequest::READABLE)); @@ -2171,7 +2171,7 @@ ACTOR static Future finishMoveShards(Database occ, completeSrc = src; } else { for (int i = 0; i < completeSrc.size(); i++) { - if (!srcSet.count(completeSrc[i])) { + if (!srcSet.contains(completeSrc[i])) { swapAndPop(&completeSrc, i--); } } @@ -2187,7 +2187,7 @@ ACTOR static Future finishMoveShards(Database occ, state std::vector newDestinations; std::set completeSrcSet(completeSrc.begin(), completeSrc.end()); for (const UID& id : destServers) { - if (!hasRemote || !completeSrcSet.count(id)) { + if (!hasRemote || !completeSrcSet.contains(id)) { newDestinations.push_back(id); } } @@ -2692,7 +2692,7 @@ ACTOR Future removeStorageServer(Database cx, allLocalities.insert(dcId_locality[decodeTLogDatacentersKey(it.key)]); } - if (locality >= 0 && !allLocalities.count(locality)) { + if (locality >= 0 && !allLocalities.contains(locality)) { for (auto& it : fTagLocalities.get()) { if (locality == decodeTagLocalityListValue(it.value)) { tr->clear(it.key); @@ -3316,7 +3316,7 @@ void seedShardServers(Arena& arena, CommitTransactionRef& tr, std::vector server_tag; int8_t nextLocality = 0; for (auto& s : servers) { - if (!dcId_locality.count(s.locality.dcId())) { + if (!dcId_locality.contains(s.locality.dcId())) { tr.set(arena, tagLocalityListKeyFor(s.locality.dcId()), tagLocalityListValue(nextLocality)); dcId_locality[s.locality.dcId()] = Tag(nextLocality, 0); nextLocality++; @@ -3398,7 +3398,7 @@ Future unassignServerKeys(UID traceId, TrType tr, KeyRangeRef keys, std::s continue; } - if (ignoreServers.count(id)) { + if (ignoreServers.contains(id)) { dprint("Ignore un-assignment from {} .\n", id.toString()); continue; } diff --git a/fdbserver/Ratekeeper.actor.cpp b/fdbserver/Ratekeeper.actor.cpp index 3f0e796153..0b88ad37e0 100644 --- a/fdbserver/Ratekeeper.actor.cpp +++ b/fdbserver/Ratekeeper.actor.cpp @@ -128,7 +128,7 @@ public: const UID serverId = ssi.id(); newServers[serverId] = ssi; - if (oldServers.count(serverId)) { + if (oldServers.contains(serverId)) { if (ssi.getValue.getEndpoint() != oldServers[serverId].getValue.getEndpoint() || ssi.isAcceptingRequests() != oldServers[serverId].isAcceptingRequests()) { serverChanges.send(std::make_pair(serverId, Optional(ssi))); @@ -617,7 +617,7 @@ public: self.maxVersion = std::max(self.maxVersion, req.version); if (recoveryVersion == std::numeric_limits::max() && - self.version_recovery.count(recoveryVersion)) { + self.version_recovery.contains(recoveryVersion)) { recoveryVersion = self.maxVersion; self.version_recovery[recoveryVersion] = self.version_recovery[std::numeric_limits::max()]; @@ -681,7 +681,7 @@ public: if (recoveryVersion == 0) { recoveryVersion = std::numeric_limits::max(); } - if (self.version_recovery.count(recoveryVersion)) { + if (self.version_recovery.contains(recoveryVersion)) { auto& it = self.version_recovery[recoveryVersion]; double existingEnd = it.second.present() ? it.second.get() : now(); double existingDuration = existingEnd - it.first; @@ -999,7 +999,7 @@ void Ratekeeper::updateRate(RatekeeperLimits* limits) { ignoredMachines.insert(ss->second->locality.zoneId()); continue; } - if (ignoredMachines.count(ss->second->locality.zoneId()) > 0) { + if (ignoredMachines.contains(ss->second->locality.zoneId())) { continue; } @@ -1021,7 +1021,7 @@ void Ratekeeper::updateRate(RatekeeperLimits* limits) { ignoredDurabilityLagMachines.insert(ss->second->locality.zoneId()); continue; } - if (ignoredDurabilityLagMachines.count(ss->second->locality.zoneId()) > 0) { + if (ignoredDurabilityLagMachines.contains(ss->second->locality.zoneId())) { continue; } @@ -1215,7 +1215,7 @@ void Ratekeeper::updateRate(RatekeeperLimits* limits) { minSSVer = std::min(minSSVer, ss.lastReply.version); // Machines that ratekeeper isn't controlling can fall arbitrarily far behind - if (ignoredMachines.count(it.value.locality.zoneId()) == 0) { + if (!ignoredMachines.contains(it.value.locality.zoneId())) { minLimitingSSVer = std::min(minLimitingSSVer, ss.lastReply.version); } } diff --git a/fdbserver/ResolutionBalancer.actor.cpp b/fdbserver/ResolutionBalancer.actor.cpp index b44519e3fa..a124271226 100644 --- a/fdbserver/ResolutionBalancer.actor.cpp +++ b/fdbserver/ResolutionBalancer.actor.cpp @@ -34,7 +34,7 @@ void ResolutionBalancer::setResolvers(const std::vector& v) { } void ResolutionBalancer::setChangesInReply(UID requestingProxy, GetCommitVersionReply& rep) { - if (resolverNeedingChanges.count(requestingProxy)) { + if (resolverNeedingChanges.contains(requestingProxy)) { rep.resolverChanges = resolverChanges.get(); rep.resolverChangesVersion = resolverChangesVersion; resolverNeedingChanges.erase(requestingProxy); @@ -86,12 +86,12 @@ static std::pair findRange(CoalescedKeyRangeMap& key_res ++it; // If possible create a new boundary which doesn't exist yet for (; it != ranges.end(); ++it) { - if (it->value() == src && !borders.count(prev->value()) && + if (it->value() == src && !borders.contains(prev->value()) && std::find(movedRanges.begin(), movedRanges.end(), ResolverMoveRef(it->range(), dest)) == movedRanges.end()) { return std::make_pair(it->range(), true); } - if (prev->value() == src && !borders.count(it->value()) && + if (prev->value() == src && !borders.contains(it->value()) && std::find(movedRanges.begin(), movedRanges.end(), ResolverMoveRef(prev->range(), dest)) == movedRanges.end()) { return std::make_pair(prev->range(), false); diff --git a/fdbserver/Resolver.actor.cpp b/fdbserver/Resolver.actor.cpp index 683a09d2ce..b1c9c241ee 100644 --- a/fdbserver/Resolver.actor.cpp +++ b/fdbserver/Resolver.actor.cpp @@ -666,7 +666,7 @@ ACTOR Future processTransactionStateRequestPart(Reference self, ASSERT(pContext->pResolverData.getPtr() != nullptr); ASSERT(pContext->pActors != nullptr); - if (pContext->receivedSequences.count(request.sequence)) { + if (pContext->receivedSequences.contains(request.sequence)) { // This part is already received. Still we will re-broadcast it to other CommitProxies & Resolvers pContext->pActors->send(broadcastTxnRequest(request, SERVER_KNOBS->TXN_STATE_SEND_AMOUNT, true)); wait(yield()); @@ -795,7 +795,7 @@ ACTOR Future checkRemoved(Reference const> db, ResolverInterface myInterface) { loop { if (db->get().recoveryCount >= recoveryCount && - !std::count(db->get().resolvers.begin(), db->get().resolvers.end(), myInterface)) + std::find(db->get().resolvers.begin(), db->get().resolvers.end(), myInterface) == db->get().resolvers.end()) throw worker_removed(); wait(db->onChange()); } diff --git a/fdbserver/ServerThroughputTracker.cpp b/fdbserver/ServerThroughputTracker.cpp index 3a8809c0e5..ae35a5adb5 100644 --- a/fdbserver/ServerThroughputTracker.cpp +++ b/fdbserver/ServerThroughputTracker.cpp @@ -76,12 +76,12 @@ void ServerThroughputTracker::cleanupUnseenTags(TransactionTagMapsecond.begin); - if (i->range() != t->second || !std::count(i->value().first.begin(), i->value().first.end(), t->first)) { + if (i->range() != t->second || + std::find(i->value().first.begin(), i->value().first.end(), t->first) == i->value().first.end()) { ASSERT(false); } } auto rs = shard_teams.ranges(); for (auto i = rs.begin(); i != rs.end(); ++i) { for (auto t = i->value().first.begin(); t != i->value().first.end(); ++t) { - if (!team_shards.count(std::make_pair(*t, i->range()))) { + if (!team_shards.contains(std::make_pair(*t, i->range()))) { std::string teamDesc, shards; for (int k = 0; k < t->servers.size(); k++) teamDesc += format("%llx ", t->servers[k].first()); diff --git a/fdbserver/Status.actor.cpp b/fdbserver/Status.actor.cpp index ed096e1eb6..c206e3f9cb 100644 --- a/fdbserver/Status.actor.cpp +++ b/fdbserver/Status.actor.cpp @@ -332,14 +332,14 @@ JsonBuilderObject machineStatusFetcher(WorkerEvents mMetrics, std::string machineId = event.getValue("MachineID"); // If this machine ID does not already exist in the machineMap, add it - if (machineJsonMap.count(machineId) == 0) { + if (!machineJsonMap.contains(machineId)) { statusObj["machine_id"] = machineId; - if (dcIds.count(it->first)) { + if (dcIds.contains(it->first)) { statusObj["datacenter_id"] = dcIds[it->first]; } - if (locality.count(it->first)) { + if (locality.contains(it->first)) { statusObj["locality"] = locality[it->first].toJSON(); } @@ -393,7 +393,7 @@ JsonBuilderObject machineStatusFetcher(WorkerEvents mMetrics, tempList.address = it->first; // Check if the locality data is present and if so, make use of it. auto localityData = LocalityData(); - if (locality.count(it->first)) { + if (locality.contains(it->first)) { localityData = locality[it->first]; } @@ -819,7 +819,7 @@ ACTOR static Future processStatusFetcher( machineMemoryUsage.insert(std::make_pair(workerItr->interf.locality.machineId(), MachineMemoryInfo())) .first; try { - ASSERT(pMetrics.count(workerItr->interf.address())); + ASSERT(pMetrics.contains(workerItr->interf.address())); const TraceEventFields& processMetrics = pMetrics[workerItr->interf.address()]; const TraceEventFields& programStart = programStarts[workerItr->interf.address()]; @@ -947,7 +947,7 @@ ACTOR static Future processStatusFetcher( wait(yield()); state JsonBuilderObject statusObj; try { - ASSERT(pMetrics.count(workerItr->interf.address())); + ASSERT(pMetrics.contains(workerItr->interf.address())); NetworkAddress address = workerItr->interf.address(); const TraceEventFields& processMetrics = pMetrics[workerItr->interf.address()]; @@ -1037,7 +1037,7 @@ ACTOR static Future processStatusFetcher( } int64_t memoryLimit = 0; - if (programStarts.count(address)) { + if (programStarts.contains(address)) { auto const& programStartEvent = programStarts.at(address); if (programStartEvent.size() > 0) { @@ -1057,7 +1057,7 @@ ACTOR static Future processStatusFetcher( } // if this process address is in the machine metrics - if (mMetrics.count(address) && mMetrics[address].size()) { + if (mMetrics.contains(address) && mMetrics[address].size()) { double availableMemory; availableMemory = mMetrics[address].getDouble("AvailableMemory"); @@ -1074,7 +1074,7 @@ ACTOR static Future processStatusFetcher( JsonBuilderArray messages; - if (errors.count(address) && errors[address].size()) { + if (errors.contains(address) && errors[address].size()) { // returns status object with type and time of error messages.push_back(getError(errors.at(address))); } @@ -1088,7 +1088,7 @@ ACTOR static Future processStatusFetcher( } // If this process had a trace file open error, identified by strAddress, then add it to messages array - if (tracefileOpenErrorMap.count(strAddress)) { + if (tracefileOpenErrorMap.contains(strAddress)) { messages.push_back(tracefileOpenErrorMap[strAddress]); } @@ -1573,9 +1573,9 @@ ACTOR static Future logRangeWarningFetcher(Database cx, KeyRange range = BinaryReader::fromStringRef(it.key.removePrefix(destUidLookupPrefix), IncludeVersion()); UID logUid = BinaryReader::fromStringRef(it.value, Unversioned()); - if (loggingRanges.count(LogRangeAndUID(range, logUid))) { + if (loggingRanges.contains(LogRangeAndUID(range, logUid))) { std::pair rangePair = std::make_pair(range.begin, range.end); - if (existingRanges.count(rangePair)) { + if (existingRanges.contains(rangePair)) { std::string rangeDescription = (range == getDefaultBackupSharedRange()) ? "the default backup set" : format("`%s` - `%s`", @@ -2547,7 +2547,7 @@ static JsonBuilderObject tlogFetcher(int* logFaultTolerance, int failedLogs = 0; for (auto& log : tLogSet.tLogs) { JsonBuilderObject logObj; - bool failed = !log.present() || !address_workers.count(log.interf().address()); + bool failed = !log.present() || !address_workers.contains(log.interf().address()); logObj["id"] = log.id().shortString(); logObj["healthy"] = !failed; if (log.present()) { @@ -3590,7 +3590,7 @@ ACTOR Future clusterGetStatus( if (it.isTss()) { activeTSSCount++; } - if (wiggleServers.count(it.id())) { + if (wiggleServers.contains(it.id())) { wiggleServerAddress.push_back(it.address().toString()); } } diff --git a/fdbserver/TCInfo.actor.cpp b/fdbserver/TCInfo.actor.cpp index d6822e7282..2c759c46bf 100644 --- a/fdbserver/TCInfo.actor.cpp +++ b/fdbserver/TCInfo.actor.cpp @@ -252,7 +252,7 @@ Future TCServerInfo::updateStoreType() { void TCServerInfo::removeTeamsContainingServer(UID removedServer) { for (int t = 0; t < teams.size(); t++) { auto const& serverIds = teams[t]->getServerIDs(); - if (std::count(serverIds.begin(), serverIds.end(), removedServer)) { + if (std::find(serverIds.begin(), serverIds.end(), removedServer) != serverIds.end()) { teams[t--] = teams.back(); teams.pop_back(); } diff --git a/fdbserver/TLogServer.actor.cpp b/fdbserver/TLogServer.actor.cpp index defeb7a73a..ed7409f5f2 100644 --- a/fdbserver/TLogServer.actor.cpp +++ b/fdbserver/TLogServer.actor.cpp @@ -555,7 +555,7 @@ struct LogData : NonCopyable, public ReferenceCounted { bool poppedRecently, bool unpoppedRecovered) { if (tag.locality != tagLocalityLogRouter && tag.locality != tagLocalityTxs && tag != txsTag && allTags.size() && - !allTags.count(tag) && popped <= recoveredAt) { + !allTags.contains(tag) && popped <= recoveredAt) { popped = recoveredAt + 1; } auto newTagData = makeReference(tag, popped, 0, nothingPersistent, poppedRecently, unpoppedRecovered); @@ -1345,7 +1345,7 @@ ACTOR Future tLogPop(TLogData* self, TLogPopRequest req, Reference updateStorage(TLogData* self) { - while (self->spillOrder.size() && !self->id_data.count(self->spillOrder.front())) { + while (self->spillOrder.size() && !self->id_data.contains(self->spillOrder.front())) { self->spillOrder.pop_front(); } @@ -2532,7 +2532,8 @@ ACTOR Future rejoinClusterController(TLogData* self, loop { auto const& inf = self->dbInfo->get(); bool isDisplaced = - !std::count(inf.priorCommittedLogServers.begin(), inf.priorCommittedLogServers.end(), tli.id()); + std::find(inf.priorCommittedLogServers.begin(), inf.priorCommittedLogServers.end(), tli.id()) == + inf.priorCommittedLogServers.end(); if (isPrimary) { isDisplaced = isDisplaced && inf.recoveryCount >= recoveryCount && inf.recoveryState != RecoveryState::UNINITIALIZED; @@ -2791,7 +2792,7 @@ ACTOR Future serveTLogInterface(TLogData* self, bool found = false; if (self->dbInfo->get().recoveryState >= RecoveryState::ACCEPTING_COMMITS) { for (auto& logs : self->dbInfo->get().logSystemConfig.tLogs) { - if (std::count(logs.tLogs.begin(), logs.tLogs.end(), logData->logId)) { + if (std::find(logs.tLogs.begin(), logs.tLogs.end(), logData->logId) != logs.tLogs.end()) { found = true; break; } @@ -2895,7 +2896,7 @@ void removeLog(TLogData* self, Reference logData) { // actors threw an error immediately self->id_data.erase(logData->logId); - while (self->popOrder.size() && !self->id_data.count(self->popOrder.front())) { + while (self->popOrder.size() && !self->id_data.contains(self->popOrder.front())) { self->popOrder.pop_front(); } diff --git a/fdbserver/TagPartitionedLogSystem.actor.cpp b/fdbserver/TagPartitionedLogSystem.actor.cpp index 145a6a09be..6da141fc32 100644 --- a/fdbserver/TagPartitionedLogSystem.actor.cpp +++ b/fdbserver/TagPartitionedLogSystem.actor.cpp @@ -219,14 +219,14 @@ Tag TagPartitionedLogSystem::getPseudoPopTag(Tag tag, ProcessClass::ClassType ty switch (type) { case ProcessClass::LogRouterClass: if (tag.locality == tagLocalityLogRouter) { - ASSERT(pseudoLocalities.count(tagLocalityLogRouterMapped) > 0); + ASSERT(pseudoLocalities.contains(tagLocalityLogRouterMapped)); tag.locality = tagLocalityLogRouterMapped; } break; case ProcessClass::BackupClass: if (tag.locality == tagLocalityLogRouter) { - ASSERT(pseudoLocalities.count(tagLocalityBackup) > 0); + ASSERT(pseudoLocalities.contains(tagLocalityBackup)); tag.locality = tagLocalityBackup; } break; @@ -238,7 +238,7 @@ Tag TagPartitionedLogSystem::getPseudoPopTag(Tag tag, ProcessClass::ClassType ty } bool TagPartitionedLogSystem::hasPseudoLocality(int8_t locality) const { - return pseudoLocalities.count(locality) > 0; + return pseudoLocalities.contains(locality); } Version TagPartitionedLogSystem::popPseudoLocalityTag(Tag tag, Version upTo) { @@ -1856,7 +1856,7 @@ void TagPartitionedLogSystem::setBackupWorkers(const std::vectorepoch; oldestBackupEpoch = this->epoch; for (const auto& reply : replies) { - if (removedBackupWorkers.count(reply.interf.id()) > 0) { + if (removedBackupWorkers.contains(reply.interf.id())) { removedBackupWorkers.erase(reply.interf.id()); continue; } @@ -2372,7 +2372,7 @@ ACTOR Future TagPartitionedLogSystem::epochEnd(Referencelocality)) { + if (!lockedLocalities.contains(log->locality)) { TraceEvent("EpochEndLockExtra").detail("Locality", log->locality); CODE_PROBE(true, "locking old generations for version information"); lockedLocalities.insert(log->locality); diff --git a/fdbserver/VFSAsync.cpp b/fdbserver/VFSAsync.cpp index e6b93b72aa..34e80933a1 100644 --- a/fdbserver/VFSAsync.cpp +++ b/fdbserver/VFSAsync.cpp @@ -685,7 +685,7 @@ static int asyncFullPathname(sqlite3_vfs* pVfs, /* VFS */ ** and false otherwise. */ bool vfsAsyncIsOpen(std::string filename) { - return SharedMemoryInfo::table.count(abspath(filename)) > 0; + return SharedMemoryInfo::table.contains(abspath(filename)); } /* diff --git a/fdbserver/VersionedBTree.actor.cpp b/fdbserver/VersionedBTree.actor.cpp index 605a7e0eac..98d0f9b4b7 100644 --- a/fdbserver/VersionedBTree.actor.cpp +++ b/fdbserver/VersionedBTree.actor.cpp @@ -3344,7 +3344,7 @@ public: if (copyNewToOriginal) { if (g_network->isSimulated()) { - ASSERT(self->remapDestinationsSimOnly.count(p.originalPageID) == 0); + ASSERT(!self->remapDestinationsSimOnly.contains(p.originalPageID)); self->remapDestinationsSimOnly.insert(p.originalPageID); } debug_printf("DWALPager(%s) remapCleanup copy %s\n", self->filename.c_str(), p.toString().c_str()); @@ -9175,7 +9175,7 @@ TEST_CASE("Lredwood/correctness/unit/deltaTree/RedwoodRecordRef") { if (deterministicRandom()->coinflip()) { rec.value = StringRef(arena, v); } - if (uniqueItems.count(rec) == 0) { + if (!uniqueItems.contains(rec)) { uniqueItems.insert(rec); } } @@ -9352,7 +9352,7 @@ TEST_CASE("Lredwood/correctness/unit/deltaTree/RedwoodRecordRef2") { if (deterministicRandom()->coinflip()) { rec.value = StringRef(arena, v); } - if (uniqueItems.count(rec) == 0) { + if (!uniqueItems.contains(rec)) { uniqueItems.insert(rec); } } @@ -9533,7 +9533,7 @@ TEST_CASE("Lredwood/correctness/unit/deltaTree/IntIntPair") { nextP.v++; auto prevP = p; prevP.v--; - if (uniqueItems.count(p) == 0 && uniqueItems.count(nextP) == 0 && uniqueItems.count(prevP) == 0) { + if (!uniqueItems.contains(p) && !uniqueItems.contains(nextP) && !uniqueItems.contains(prevP)) { uniqueItems.insert(p); } } @@ -9676,8 +9676,8 @@ TEST_CASE("Lredwood/correctness/unit/deltaTree/IntIntPair") { // Insert record if it, its predecessor, and its successor are not present. // Test data is intentionally sparse to test finding each record with a directional // seek from each adjacent possible but not present record. - if (uniqueItems.count(p) == 0 && uniqueItems.count(IntIntPair(p.k, p.v - 1)) == 0 && - uniqueItems.count(IntIntPair(p.k, p.v + 1)) == 0) { + if (!uniqueItems.contains(p) && !uniqueItems.contains(IntIntPair(p.k, p.v - 1)) && + !uniqueItems.contains(IntIntPair(p.k, p.v + 1))) { if (!cur2.insert(p)) { shouldBeFull = true; break; diff --git a/fdbserver/fdbserver.actor.cpp b/fdbserver/fdbserver.actor.cpp index 8e2b2413f9..f6f9843e93 100644 --- a/fdbserver/fdbserver.actor.cpp +++ b/fdbserver/fdbserver.actor.cpp @@ -2229,7 +2229,7 @@ int main(int argc, char* argv[]) { const std::set allowedDirectories = { ".", "..", "backups", "unittests", "fdbblob" }; for (const auto& dir : directories) { - if (dir.size() != 32 && allowedDirectories.count(dir) == 0 && dir.find("snap") == std::string::npos) { + if (dir.size() != 32 && !allowedDirectories.contains(dir) && dir.find("snap") == std::string::npos) { TraceEvent(SevError, "IncompatibleDirectoryFound") .detail("DataFolder", dataFolder) diff --git a/fdbserver/include/fdbserver/ClusterController.actor.h b/fdbserver/include/fdbserver/ClusterController.actor.h index 988d4f4efb..43475ba8d8 100644 --- a/fdbserver/include/fdbserver/ClusterController.actor.h +++ b/fdbserver/include/fdbserver/ClusterController.actor.h @@ -351,9 +351,9 @@ public: .detail("Worker", it.second.details.interf.address()) .detail("WorkerAvailable", workerAvailable(it.second, false)) .detail("RecoverDiskFiles", it.second.details.recoveredDiskFiles) - .detail("NotExcludedMachine", !excludedMachines.count(it.second.details.interf.locality.zoneId())) + .detail("NotExcludedMachine", !excludedMachines.contains(it.second.details.interf.locality.zoneId())) .detail("IncludeDC", - (includeDCs.size() == 0 || includeDCs.count(it.second.details.interf.locality.dcId()))) + (includeDCs.size() == 0 || includeDCs.contains(it.second.details.interf.locality.dcId()))) .detail("NotExcludedAddress", !addressExcluded(excludedAddresses, it.second.details.interf.address())) .detail("NotExcludedAddress2", (!it.second.details.interf.secondaryAddress().present() || @@ -363,8 +363,8 @@ public: ProcessClass::UnsetFit) .detail("MachineFitness", it.second.details.processClass.machineClassFitness(ProcessClass::Storage)); if (workerAvailable(it.second, false) && it.second.details.recoveredDiskFiles && - !excludedMachines.count(it.second.details.interf.locality.zoneId()) && - (includeDCs.size() == 0 || includeDCs.count(it.second.details.interf.locality.dcId())) && + !excludedMachines.contains(it.second.details.interf.locality.zoneId()) && + (includeDCs.size() == 0 || includeDCs.contains(it.second.details.interf.locality.dcId())) && !addressExcluded(excludedAddresses, it.second.details.interf.address()) && (!it.second.details.interf.secondaryAddress().present() || !addressExcluded(excludedAddresses, it.second.details.interf.secondaryAddress().get())) && @@ -379,8 +379,8 @@ public: for (auto& it : id_worker) { ProcessClass::Fitness fit = it.second.details.processClass.machineClassFitness(ProcessClass::Storage); if (workerAvailable(it.second, false) && it.second.details.recoveredDiskFiles && - !excludedMachines.count(it.second.details.interf.locality.zoneId()) && - (includeDCs.size() == 0 || includeDCs.count(it.second.details.interf.locality.dcId())) && + !excludedMachines.contains(it.second.details.interf.locality.zoneId()) && + (includeDCs.size() == 0 || includeDCs.contains(it.second.details.interf.locality.dcId())) && !addressExcluded(excludedAddresses, it.second.details.interf.address()) && fit < bestFit) { bestFit = fit; bestInfo = it.second.details; @@ -502,7 +502,7 @@ public: auto thisField = worker.interf.locality.get(field); auto thisZone = worker.interf.locality.zoneId(); - if (field_count.count(thisField)) { + if (field_count.contains(thisField)) { zone_workers[thisZone].push_back(worker); zone_count[thisZone].second = thisField; } @@ -528,7 +528,7 @@ public: auto& zoneWorkers = zone_workers[lowestZone.second]; while (zoneWorkers.size() && !added) { - if (!resultSet.count(zoneWorkers.back())) { + if (!resultSet.contains(zoneWorkers.back())) { resultSet.insert(zoneWorkers.back()); if (resultSet.size() == desired) { return; @@ -583,7 +583,7 @@ public: bool added = false; while (zoneWorkers.size() && !added) { - if (!resultSet.count(zoneWorkers.back())) { + if (!resultSet.contains(zoneWorkers.back())) { resultSet.insert(zoneWorkers.back()); if (resultSet.size() == desired) { return; @@ -690,7 +690,7 @@ public: SevDebug, id, "complex", "Worker's fitness is NeverAssign", worker_details, fitness, dcIds); continue; } - if (!dcIds.empty() && dcIds.count(worker_details.interf.locality.dcId()) == 0) { + if (!dcIds.empty() && !dcIds.contains(worker_details.interf.locality.dcId())) { logWorkerUnavailable( SevDebug, id, "complex", "Worker is not in the target DC", worker_details, fitness, dcIds); continue; @@ -801,7 +801,7 @@ public: } if (workerIter->second.size() + resultSet.size() <= desired) { for (auto& worker : workerIter->second) { - if (chosenFields.count(worker.interf.locality.get(field))) { + if (chosenFields.contains(worker.interf.locality.get(field))) { resultSet.insert(worker); } } @@ -940,7 +940,7 @@ public: SevDebug, id, "simple", "Worker's fitness is NeverAssign", worker_details, fitness, dcIds); continue; } - if (!dcIds.empty() && dcIds.count(worker_details.interf.locality.dcId()) == 0) { + if (!dcIds.empty() && !dcIds.contains(worker_details.interf.locality.dcId())) { logWorkerUnavailable( SevDebug, id, "simple", "Worker is not in the target DC", worker_details, fitness, dcIds); continue; @@ -973,7 +973,7 @@ public: auto used = std::get<1>(workerIter->first); deterministicRandom()->randomShuffle(workerIter->second); for (auto& worker : workerIter->second) { - if (!zones.count(worker.interf.locality.zoneId())) { + if (!zones.contains(worker.interf.locality.zoneId())) { zones.insert(worker.interf.locality.zoneId()); resultSet.insert(worker); if (resultSet.size() == required) { @@ -1092,7 +1092,7 @@ public: SevDebug, id, "deprecated", "Worker's fitness is NeverAssign", worker_details, fitness, dcIds); continue; } - if (!dcIds.empty() && dcIds.count(worker_details.interf.locality.dcId()) == 0) { + if (!dcIds.empty() && !dcIds.contains(worker_details.interf.locality.dcId())) { logWorkerUnavailable( SevDebug, id, "deprecated", "Worker is not in the target DC", worker_details, fitness, dcIds); continue; @@ -1312,7 +1312,7 @@ public: std::map>, int> field_count; std::set>> zones; for (auto& worker : testWorkers) { - if (!zones.count(worker.interf.locality.zoneId())) { + if (!zones.contains(worker.interf.locality.zoneId())) { field_count[worker.interf.locality.get(pa1->attributeKey())]++; zones.insert(worker.interf.locality.zoneId()); } @@ -2478,7 +2478,7 @@ public: .detail("ProcessID", it.interf().filteredLocality.processId()); return true; } - if (!logRouterAddresses.count(tlogWorker->second.details.interf.address())) { + if (!logRouterAddresses.contains(tlogWorker->second.details.interf.address())) { logRouterAddresses.insert(tlogWorker->second.details.interf.address()); log_routers.push_back(tlogWorker->second.details); } @@ -2498,7 +2498,7 @@ public: .detail("ProcessID", worker.interf().locality.processId()); return true; } - if (backup_addresses.count(workerIt->second.details.interf.address()) == 0) { + if (!backup_addresses.contains(workerIt->second.details.interf.address())) { backup_addresses.insert(workerIt->second.details.interf.address()); backup_workers.push_back(workerIt->second.details); } @@ -2664,7 +2664,7 @@ public: int32_t oldSatelliteRegionFit = std::numeric_limits::max(); for (auto& it : satellite_tlogs) { - if (satellite_priority.count(it.interf.locality.dcId())) { + if (satellite_priority.contains(it.interf.locality.dcId())) { oldSatelliteRegionFit = std::min(oldSatelliteRegionFit, satellite_priority[it.interf.locality.dcId()]); } else { oldSatelliteRegionFit = -1; @@ -2673,7 +2673,7 @@ public: int32_t newSatelliteRegionFit = std::numeric_limits::max(); for (auto& it : newSatelliteTLogs) { - if (satellite_priority.count(it.interf.locality.dcId())) { + if (satellite_priority.contains(it.interf.locality.dcId())) { newSatelliteRegionFit = std::min(newSatelliteRegionFit, satellite_priority[it.interf.locality.dcId()]); } else { newSatelliteRegionFit = -1; diff --git a/fdbserver/include/fdbserver/ConfigBroadcastInterface.h b/fdbserver/include/fdbserver/ConfigBroadcastInterface.h index 85f5feb01c..b32528a045 100644 --- a/fdbserver/include/fdbserver/ConfigBroadcastInterface.h +++ b/fdbserver/include/fdbserver/ConfigBroadcastInterface.h @@ -43,7 +43,7 @@ public: } } - bool contains(KeyRef configClass) const { return classes.count(configClass); } + bool contains(KeyRef configClass) const { return classes.contains(configClass); } std::set const& getClasses() const { return classes; } template diff --git a/fdbserver/include/fdbserver/DataDistribution.actor.h b/fdbserver/include/fdbserver/DataDistribution.actor.h index e67af902f4..be8709dc11 100644 --- a/fdbserver/include/fdbserver/DataDistribution.actor.h +++ b/fdbserver/include/fdbserver/DataDistribution.actor.h @@ -770,7 +770,7 @@ struct StorageWiggler : ReferenceCounted { void removeServer(const UID& serverId); // update metadata and adjust priority_queue void updateMetadata(const UID& serverId, const StorageMetadataType& metadata); - bool contains(const UID& serverId) const { return pq_handles.count(serverId) > 0; } + bool contains(const UID& serverId) const { return pq_handles.contains(serverId); } bool empty() const { return wiggle_pq.empty(); } // It's guarantee that When a.metadata >= b.metadata, if !necessary(a) then !necessary(b) diff --git a/fdbserver/include/fdbserver/ExclusionTracker.actor.h b/fdbserver/include/fdbserver/ExclusionTracker.actor.h index 11a579cc1e..0cc9845778 100644 --- a/fdbserver/include/fdbserver/ExclusionTracker.actor.h +++ b/fdbserver/include/fdbserver/ExclusionTracker.actor.h @@ -49,7 +49,7 @@ struct ExclusionTracker { bool isFailedOrExcluded(NetworkAddress addr) { AddressExclusion addrExclusion(addr.ip, addr.port); - return excluded.count(addrExclusion) || failed.count(addrExclusion); + return excluded.contains(addrExclusion) || failed.contains(addrExclusion); } ACTOR static Future tracker(ExclusionTracker* self) { diff --git a/fdbserver/include/fdbserver/IPager.h b/fdbserver/include/fdbserver/IPager.h index 46610a701c..fd9d949d8b 100644 --- a/fdbserver/include/fdbserver/IPager.h +++ b/fdbserver/include/fdbserver/IPager.h @@ -107,7 +107,8 @@ enum EncodingType : uint8_t { static constexpr std::array EncryptedEncodingTypes = { AESEncryption, AESEncryptionWithAuth, XOREncryption_TestOnly }; inline bool isEncodingTypeEncrypted(EncodingType encoding) { - return std::count(EncryptedEncodingTypes.begin(), EncryptedEncodingTypes.end(), encoding) > 0; + return std::find(EncryptedEncodingTypes.begin(), EncryptedEncodingTypes.end(), encoding) != + EncryptedEncodingTypes.end(); } inline bool isEncodingTypeAESEncrypted(EncodingType encoding) { diff --git a/fdbserver/include/fdbserver/SingletonRoles.h b/fdbserver/include/fdbserver/SingletonRoles.h index 6fe1ca3d5d..902160252d 100644 --- a/fdbserver/include/fdbserver/SingletonRoles.h +++ b/fdbserver/include/fdbserver/SingletonRoles.h @@ -57,7 +57,7 @@ struct RatekeeperSingleton : Singleton { } } void halt(ClusterControllerData& cc, Optional> pid) const { - if (interface.present() && cc.id_worker.count(pid)) { + if (interface.present() && cc.id_worker.contains(pid)) { cc.id_worker[pid].haltRatekeeper = brokenPromiseToNever(interface.get().haltRatekeeper.getReply(HaltRatekeeperRequest(cc.id))); } @@ -82,7 +82,7 @@ struct DataDistributorSingleton : Singleton { } } void halt(ClusterControllerData& cc, Optional> pid) const { - if (interface.present() && cc.id_worker.count(pid)) { + if (interface.present() && cc.id_worker.contains(pid)) { cc.id_worker[pid].haltDistributor = brokenPromiseToNever(interface.get().haltDataDistributor.getReply(HaltDataDistributorRequest(cc.id))); } @@ -132,7 +132,7 @@ struct BlobManagerSingleton : Singleton { } } void halt(ClusterControllerData& cc, Optional> pid) const { - if (interface.present() && cc.id_worker.count(pid)) { + if (interface.present() && cc.id_worker.contains(pid)) { cc.id_worker[pid].haltBlobManager = brokenPromiseToNever(interface.get().haltBlobManager.getReply(HaltBlobManagerRequest(cc.id))); } @@ -190,7 +190,7 @@ struct EncryptKeyProxySingleton : Singleton { } } void halt(ClusterControllerData& cc, Optional> pid) const { - if (interface.present() && cc.id_worker.count(pid)) { + if (interface.present() && cc.id_worker.contains(pid)) { cc.id_worker[pid].haltEncryptKeyProxy = brokenPromiseToNever(interface.get().haltEncryptKeyProxy.getReply(HaltEncryptKeyProxyRequest(cc.id))); } diff --git a/fdbserver/include/fdbserver/TCInfo.h b/fdbserver/include/fdbserver/TCInfo.h index aeb91b86bf..77e9fa046e 100644 --- a/fdbserver/include/fdbserver/TCInfo.h +++ b/fdbserver/include/fdbserver/TCInfo.h @@ -171,7 +171,7 @@ public: bool matches(std::vector> const& sortedMachineIDs); std::string getMachineIDsStr() const; bool containsMachine(Standalone machineID) const { - return std::count(machineIDs.begin(), machineIDs.end(), machineID); + return std::find(machineIDs.begin(), machineIDs.end(), machineID) != machineIDs.end(); } // Returns true iff team is found diff --git a/fdbserver/include/fdbserver/art.h b/fdbserver/include/fdbserver/art.h index 7356bc82ea..d1872b29be 100644 --- a/fdbserver/include/fdbserver/art.h +++ b/fdbserver/include/fdbserver/art.h @@ -314,9 +314,6 @@ public: art_iterator insert_if_absent(KeyRef& key, void* value, int* replaced); void erase(const art_iterator& it); - - uint64_t count() { return size; } - }; // art_tree struct art_iterator { diff --git a/fdbserver/masterserver.actor.cpp b/fdbserver/masterserver.actor.cpp index a947cb3b99..cef709990f 100644 --- a/fdbserver/masterserver.actor.cpp +++ b/fdbserver/masterserver.actor.cpp @@ -611,7 +611,7 @@ ACTOR Future masterServerCxx(MasterInterface mi, "Master: terminated due to backup worker failure", probe::decoration::rare); - if (normalMasterErrors().count(err.code())) { + if (normalMasterErrors().contains(err.code())) { TraceEvent("MasterTerminated", mi.id()).error(err); return Void(); } diff --git a/fdbserver/networktest.actor.cpp b/fdbserver/networktest.actor.cpp index 280e552370..f18ed85626 100644 --- a/fdbserver/networktest.actor.cpp +++ b/fdbserver/networktest.actor.cpp @@ -53,7 +53,6 @@ struct LatencyStats { } void reset() { *this = LatencyStats(); } - double count() { return n; } double mean() { return x / n; } double stddev() { return sqrt(x2 / n - (x / n) * (x / n)); } }; diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index 4e22c88eb5..002af62a40 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -8072,7 +8072,7 @@ ACTOR Future fetchChangeFeed(StorageServer* data, if (g_network->isSimulated() && !g_simulator->restarted) { // verify that the feed was actually destroyed and it's not an error in this inference logic. // Restarting tests produce false positives because the validation state isn't kept across tests - ASSERT(g_simulator->validationData.allDestroyedChangeFeedIDs.count(changeFeedInfo->id.toString())); + ASSERT(g_simulator->validationData.allDestroyedChangeFeedIDs.contains(changeFeedInfo->id.toString())); } Key beginClearKey = changeFeedInfo->id.withPrefix(persistChangeFeedKeys.begin); @@ -8089,7 +8089,7 @@ ACTOR Future fetchChangeFeed(StorageServer* data, changeFeedInfo->destroy(cleanupVersion); - if (data->uidChangeFeed.count(changeFeedInfo->id)) { + if (data->uidChangeFeed.contains(changeFeedInfo->id)) { // only register range for cleanup if it has not been already cleaned up data->changeFeedCleanupDurable[changeFeedInfo->id] = cleanupVersion; } @@ -8308,7 +8308,7 @@ ACTOR Future> fetchChangeFeedMetadata(StorageServer* data, if (g_network->isSimulated() && !g_simulator->restarted) { // verify that the feed was actually destroyed and it's not an error in this inference logic. Restarting // tests produce false positives because the validation state isn't kept across tests - ASSERT(g_simulator->validationData.allDestroyedChangeFeedIDs.count(feed.first.toString())); + ASSERT(g_simulator->validationData.allDestroyedChangeFeedIDs.contains(feed.first.toString())); } Key beginClearKey = feed.first.withPrefix(persistChangeFeedKeys.begin); @@ -12545,7 +12545,7 @@ ACTOR Future updateStorage(StorageServer* data) { auto info = data->uidChangeFeed.find(feedFetchVersions[curFeed].first); // Don't update if the feed is pending cleanup. Either it will get cleaned up and destroyed, or it will // get fetched again, where the fetch version will get reset. - if (info != data->uidChangeFeed.end() && !data->changeFeedCleanupDurable.count(info->second->id)) { + if (info != data->uidChangeFeed.end() && !data->changeFeedCleanupDurable.contains(info->second->id)) { if (feedFetchVersions[curFeed].second > info->second->durableFetchVersion.get()) { info->second->durableFetchVersion.set(feedFetchVersions[curFeed].second); } diff --git a/fdbserver/tester.actor.cpp b/fdbserver/tester.actor.cpp index 542be9b0e4..7589db7600 100644 --- a/fdbserver/tester.actor.cpp +++ b/fdbserver/tester.actor.cpp @@ -418,19 +418,19 @@ void CompoundWorkload::addFailureInjection(WorkloadRequest& work) { for (auto const& w : workloads) { w->disableFailureInjectionWorkloads(disabledWorkloads); } - if (disabledWorkloads.count("all") > 0) { + if (disabledWorkloads.contains("all")) { return; } auto& factories = IFailureInjectorFactory::factories(); DeterministicRandom random(sharedRandomNumber); for (auto& factory : factories) { auto workload = factory->create(*this); - if (disabledWorkloads.count(workload->description()) > 0) { + if (disabledWorkloads.contains(workload->description())) { continue; } - if (std::count(work.disabledFailureInjectionWorkloads.begin(), - work.disabledFailureInjectionWorkloads.end(), - workload->description()) > 0) { + if (std::find(work.disabledFailureInjectionWorkloads.begin(), + work.disabledFailureInjectionWorkloads.end(), + workload->description()) != work.disabledFailureInjectionWorkloads.end()) { continue; } while (shouldInjectFailure(random, work, workload)) { @@ -1646,7 +1646,7 @@ Optional getKeyFromString(const std::string& str) { } const char first = str.at(i + 2); const char second = str.at(i + 3); - if (parseCharMap.count(first) == 0 || parseCharMap.count(second) == 0) { + if (!parseCharMap.contains(first) || !parseCharMap.contains(second)) { TraceEvent(g_network->isSimulated() ? SevError : SevWarnAlways, "ConsistencyCheckUrgent_GetKeyFromStringError") .setMaxEventLength(-1) @@ -3150,12 +3150,12 @@ ACTOR Future testExpectedErrorImpl(Future test, } // Make sure that no duplicate details were provided - ASSERT(details.count("TestDescription") == 0); - ASSERT(details.count("ExpectedError") == 0); - ASSERT(details.count("ExpectedErrorCode") == 0); - ASSERT(details.count("ActualError") == 0); - ASSERT(details.count("ActualErrorCode") == 0); - ASSERT(details.count("Reason") == 0); + ASSERT(!details.contains("TestDescription")); + ASSERT(!details.contains("ExpectedError")); + ASSERT(!details.contains("ExpectedErrorCode")); + ASSERT(!details.contains("ActualError")); + ASSERT(!details.contains("ActualErrorCode")); + ASSERT(!details.contains("Reason")); for (auto& p : details) { evt.detail(p.first.c_str(), p.second); diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index f682e6b489..1e596883ef 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -1405,7 +1405,7 @@ std::set profiledThreads; // Returns whether or not a given thread should be profiled int filter_in_thread(void* arg) { - return profiledThreads.count(std::this_thread::get_id()) > 0 ? 1 : 0; + return profiledThreads.contains(std::this_thread::get_id()) ? 1 : 0; } #endif @@ -3329,7 +3329,7 @@ ACTOR Future workerServer(Reference connRecord, } when(state WorkerSnapRequest snapReq = waitNext(interf.workerSnapReq.getFuture())) { std::string snapReqKey = snapReq.snapUID.toString() + snapReq.role.toString(); - if (snapReqResultMap.count(snapReqKey)) { + if (snapReqResultMap.contains(snapReqKey)) { CODE_PROBE(true, "Worker received a duplicate finished snapshot request", probe::decoration::rare); auto result = snapReqResultMap[snapReqKey]; result.isError() ? snapReq.reply.sendError(result.getError()) : snapReq.reply.send(result.get()); @@ -3337,7 +3337,7 @@ ACTOR Future workerServer(Reference connRecord, .detail("SnapUID", snapReq.snapUID.toString()) .detail("Role", snapReq.role) .detail("Result", result.isError() ? result.getError().code() : success().code()); - } else if (snapReqMap.count(snapReqKey)) { + } else if (snapReqMap.contains(snapReqKey)) { CODE_PROBE(true, "Worker received a duplicate ongoing snapshot request", probe::decoration::rare); TraceEvent("RetryOngoingWorkerSnapRequest") .detail("SnapUID", snapReq.snapUID.toString()) diff --git a/fdbserver/workloads/BlobGranuleRangesWorkload.actor.cpp b/fdbserver/workloads/BlobGranuleRangesWorkload.actor.cpp index 0bfa7c422c..4800c7dba0 100644 --- a/fdbserver/workloads/BlobGranuleRangesWorkload.actor.cpp +++ b/fdbserver/workloads/BlobGranuleRangesWorkload.actor.cpp @@ -831,7 +831,7 @@ struct BlobGranuleRangesWorkload : TestWorkload { int op = OP_COUNT; loop { op = deterministicRandom()->randomInt(0, OP_COUNT); - if (!excludedTypes.count((UnitTestTypes)op)) { + if (!excludedTypes.contains((UnitTestTypes)op)) { break; } loopTries--; diff --git a/fdbserver/workloads/BulkLoadWithTenants.actor.cpp b/fdbserver/workloads/BulkLoadWithTenants.actor.cpp index c346cc0e2a..00771ce1f6 100644 --- a/fdbserver/workloads/BulkLoadWithTenants.actor.cpp +++ b/fdbserver/workloads/BulkLoadWithTenants.actor.cpp @@ -148,7 +148,7 @@ struct BulkSetupWorkload : TestWorkload { for (i = 0; i < workload->tenants.size(); i++) { state Reference tenant = workload->tenants[i]; std::vector keysForCurTenant = wait(getKVPairsForTenant(workload, tenant, cx)); - if (tenantIdsToDrop.count(tenant->id())) { + if (tenantIdsToDrop.contains(tenant->id())) { // Don't check the tenants that the EKP would throw errors for continue; } diff --git a/fdbserver/workloads/ConfigureDatabase.actor.cpp b/fdbserver/workloads/ConfigureDatabase.actor.cpp index 40e02adf22..9f9c667ab5 100644 --- a/fdbserver/workloads/ConfigureDatabase.actor.cpp +++ b/fdbserver/workloads/ConfigureDatabase.actor.cpp @@ -432,9 +432,9 @@ struct ConfigureDatabaseWorkload : TestWorkload { int storeType = 0; while (true) { storeType = deterministicRandom()->randomInt(0, 6); - if (std::count(self->storageEngineExcludeTypes.begin(), - self->storageEngineExcludeTypes.end(), - storeType) == 0) { + if (std::find(self->storageEngineExcludeTypes.begin(), + self->storageEngineExcludeTypes.end(), + storeType) == self->storageEngineExcludeTypes.end()) { break; } } diff --git a/fdbserver/workloads/ConflictRange.actor.cpp b/fdbserver/workloads/ConflictRange.actor.cpp index a083f473b5..a55e6109ee 100644 --- a/fdbserver/workloads/ConflictRange.actor.cpp +++ b/fdbserver/workloads/ConflictRange.actor.cpp @@ -197,7 +197,7 @@ struct ConflictRangeWorkload : TestWorkload { if (randomSets) { for (int j = 0; j < 5; j++) { int proposedKey = deterministicRandom()->randomInt(0, self->maxKeySpace); - if (!insertedSet.count(proposedKey)) { + if (!insertedSet.contains(proposedKey)) { TraceEvent("ConflictRangeSet").detail("Key", proposedKey); insertedSet.insert(proposedKey); tr2.set(StringRef(format("%010d", proposedKey)), @@ -208,7 +208,7 @@ struct ConflictRangeWorkload : TestWorkload { } else { for (int j = 0; j < 5; j++) { int proposedKey = deterministicRandom()->randomInt(0, self->maxKeySpace); - if (insertedSet.count(proposedKey)) { + if (insertedSet.contains(proposedKey)) { TraceEvent("ConflictRangeClear").detail("Key", proposedKey); insertedSet.erase(proposedKey); tr2.clear(StringRef(format("%010d", proposedKey))); diff --git a/fdbserver/workloads/ConsistencyCheck.actor.cpp b/fdbserver/workloads/ConsistencyCheck.actor.cpp index 8b7725cd5f..b1d60c8abc 100644 --- a/fdbserver/workloads/ConsistencyCheck.actor.cpp +++ b/fdbserver/workloads/ConsistencyCheck.actor.cpp @@ -1035,7 +1035,7 @@ struct ConsistencyCheckWorkload : TestWorkload { } for (auto& ssi : servers) { - ASSERT(id_ssi.count(ssi.id())); + ASSERT(id_ssi.contains(ssi.id())); } return true; } @@ -1180,7 +1180,7 @@ struct ConsistencyCheckWorkload : TestWorkload { itr->interf.secondaryAddress().present() ? itr->interf.secondaryAddress().get().toString() : "Unset"); for (const auto& id : stores.get()) { - if (statefulProcesses[itr->interf.address()].count(id)) { + if (statefulProcesses[itr->interf.address()].contains(id)) { continue; } // For extra data store @@ -1200,7 +1200,7 @@ struct ConsistencyCheckWorkload : TestWorkload { .detail("ProcessPrimaryAddress", p->address) .detail("ProcessAddresses", p->addresses.toString()) .detail("DataStoreID", id) - .detail("Protected", g_simulator->protectedAddresses.count(itr->interf.address())) + .detail("Protected", g_simulator->protectedAddresses.contains(itr->interf.address())) .detail("Reliable", p->isReliable()) .detail("ReliableInfo", p->getReliableInfo()) .detail("KillOrRebootProcess", p->address); @@ -1323,7 +1323,7 @@ struct ConsistencyCheckWorkload : TestWorkload { all[i]->startingClass != ProcessClass::TesterClass && all[i]->startingClass != ProcessClass::SimHTTPServerClass && all[i]->protocolVersion == g_network->protocolVersion()) { - if (!workerAddresses.count(all[i]->address)) { + if (!workerAddresses.contains(all[i]->address)) { TraceEvent("ConsistencyCheck_WorkerMissingFromList").detail("Addr", all[i]->address); return false; } @@ -1378,7 +1378,7 @@ struct ConsistencyCheckWorkload : TestWorkload { for (const auto& addr : oldCoordinators) { auto findResult = addr_locality.find(addr); if (findResult != addr_locality.end()) { - if (checkDuplicates.count(findResult->second.zoneId())) { + if (checkDuplicates.contains(findResult->second.zoneId())) { TraceEvent("ConsistencyCheck_BadCoordinator") .detail("Addr", addr) .detail("NotFound", findResult == addr_locality.end()); @@ -1410,7 +1410,7 @@ struct ConsistencyCheckWorkload : TestWorkload { for (const auto& worker : allWorkers) { allWorkerProcessMap[worker.interf.address()] = worker; Optional dc = worker.interf.locality.dcId(); - if (!dcToAllClassTypes.count(dc)) + if (!dcToAllClassTypes.contains(dc)) dcToAllClassTypes.insert({}); dcToAllClassTypes[dc].push_back(worker.processClass.classType()); } @@ -1420,17 +1420,17 @@ struct ConsistencyCheckWorkload : TestWorkload { for (const auto& worker : nonExcludedWorkers) { nonExcludedWorkerProcessMap[worker.interf.address()] = worker; Optional dc = worker.interf.locality.dcId(); - if (!dcToNonExcludedClassTypes.count(dc)) + if (!dcToNonExcludedClassTypes.contains(dc)) dcToNonExcludedClassTypes.insert({}); dcToNonExcludedClassTypes[dc].push_back(worker.processClass.classType()); } - if (!allWorkerProcessMap.count(db.clusterInterface.clientInterface.address())) { + if (!allWorkerProcessMap.contains(db.clusterInterface.clientInterface.address())) { TraceEvent("ConsistencyCheck_CCNotInWorkerList") .detail("CCAddress", db.clusterInterface.clientInterface.address().toString()); return false; } - if (!allWorkerProcessMap.count(db.master.address())) { + if (!allWorkerProcessMap.contains(db.master.address())) { TraceEvent("ConsistencyCheck_MasterNotInWorkerList") .detail("MasterAddress", db.master.address().toString()); return false; @@ -1478,13 +1478,13 @@ struct ConsistencyCheckWorkload : TestWorkload { // Check CC ProcessClass::Fitness bestClusterControllerFitness = getBestAvailableFitness(dcToNonExcludedClassTypes[ccDcId], ProcessClass::ClusterController); - if (!nonExcludedWorkerProcessMap.count(db.clusterInterface.clientInterface.address()) || + if (!nonExcludedWorkerProcessMap.contains(db.clusterInterface.clientInterface.address()) || nonExcludedWorkerProcessMap[db.clusterInterface.clientInterface.address()].processClass.machineClassFitness( ProcessClass::ClusterController) != bestClusterControllerFitness) { TraceEvent("ConsistencyCheck_ClusterControllerNotBest") .detail("BestClusterControllerFitness", bestClusterControllerFitness) .detail("ExistingClusterControllerFit", - nonExcludedWorkerProcessMap.count(db.clusterInterface.clientInterface.address()) + nonExcludedWorkerProcessMap.contains(db.clusterInterface.clientInterface.address()) ? nonExcludedWorkerProcessMap[db.clusterInterface.clientInterface.address()] .processClass.machineClassFitness(ProcessClass::ClusterController) : -1); @@ -1501,14 +1501,14 @@ struct ConsistencyCheckWorkload : TestWorkload { } } - if ((!nonExcludedWorkerProcessMap.count(db.master.address()) && + if ((!nonExcludedWorkerProcessMap.contains(db.master.address()) && bestMasterFitness != ProcessClass::ExcludeFit) || nonExcludedWorkerProcessMap[db.master.address()].processClass.machineClassFitness(ProcessClass::Master) != bestMasterFitness) { TraceEvent("ConsistencyCheck_MasterNotBest") .detail("BestMasterFitness", bestMasterFitness) .detail("ExistingMasterFit", - nonExcludedWorkerProcessMap.count(db.master.address()) + nonExcludedWorkerProcessMap.contains(db.master.address()) ? nonExcludedWorkerProcessMap[db.master.address()].processClass.machineClassFitness( ProcessClass::Master) : -1); @@ -1519,13 +1519,13 @@ struct ConsistencyCheckWorkload : TestWorkload { ProcessClass::Fitness bestCommitProxyFitness = getBestAvailableFitness(dcToNonExcludedClassTypes[masterDcId], ProcessClass::CommitProxy); for (const auto& commitProxy : db.client.commitProxies) { - if (!nonExcludedWorkerProcessMap.count(commitProxy.address()) || + if (!nonExcludedWorkerProcessMap.contains(commitProxy.address()) || nonExcludedWorkerProcessMap[commitProxy.address()].processClass.machineClassFitness( ProcessClass::CommitProxy) != bestCommitProxyFitness) { TraceEvent("ConsistencyCheck_CommitProxyNotBest") .detail("BestCommitProxyFitness", bestCommitProxyFitness) .detail("ExistingCommitProxyFitness", - nonExcludedWorkerProcessMap.count(commitProxy.address()) + nonExcludedWorkerProcessMap.contains(commitProxy.address()) ? nonExcludedWorkerProcessMap[commitProxy.address()].processClass.machineClassFitness( ProcessClass::CommitProxy) : -1); @@ -1537,13 +1537,13 @@ struct ConsistencyCheckWorkload : TestWorkload { ProcessClass::Fitness bestGrvProxyFitness = getBestAvailableFitness(dcToNonExcludedClassTypes[masterDcId], ProcessClass::GrvProxy); for (const auto& grvProxy : db.client.grvProxies) { - if (!nonExcludedWorkerProcessMap.count(grvProxy.address()) || + if (!nonExcludedWorkerProcessMap.contains(grvProxy.address()) || nonExcludedWorkerProcessMap[grvProxy.address()].processClass.machineClassFitness( ProcessClass::GrvProxy) != bestGrvProxyFitness) { TraceEvent("ConsistencyCheck_GrvProxyNotBest") .detail("BestGrvProxyFitness", bestGrvProxyFitness) .detail("ExistingGrvProxyFitness", - nonExcludedWorkerProcessMap.count(grvProxy.address()) + nonExcludedWorkerProcessMap.contains(grvProxy.address()) ? nonExcludedWorkerProcessMap[grvProxy.address()].processClass.machineClassFitness( ProcessClass::GrvProxy) : -1); @@ -1555,13 +1555,13 @@ struct ConsistencyCheckWorkload : TestWorkload { ProcessClass::Fitness bestResolverFitness = getBestAvailableFitness(dcToNonExcludedClassTypes[masterDcId], ProcessClass::Resolver); for (const auto& resolver : db.resolvers) { - if (!nonExcludedWorkerProcessMap.count(resolver.address()) || + if (!nonExcludedWorkerProcessMap.contains(resolver.address()) || nonExcludedWorkerProcessMap[resolver.address()].processClass.machineClassFitness( ProcessClass::Resolver) != bestResolverFitness) { TraceEvent("ConsistencyCheck_ResolverNotBest") .detail("BestResolverFitness", bestResolverFitness) .detail("ExistingResolverFitness", - nonExcludedWorkerProcessMap.count(resolver.address()) + nonExcludedWorkerProcessMap.contains(resolver.address()) ? nonExcludedWorkerProcessMap[resolver.address()].processClass.machineClassFitness( ProcessClass::Resolver) : -1); @@ -1576,7 +1576,7 @@ struct ConsistencyCheckWorkload : TestWorkload { for (auto& tlogSet : db.logSystemConfig.tLogs) { if (!tlogSet.isLocal && tlogSet.logRouters.size()) { for (auto& logRouter : tlogSet.logRouters) { - if (!nonExcludedWorkerProcessMap.count(logRouter.interf().address())) { + if (!nonExcludedWorkerProcessMap.contains(logRouter.interf().address())) { TraceEvent("ConsistencyCheck_LogRouterNotInNonExcludedWorkers") .detail("Id", logRouter.id()); return false; @@ -1596,14 +1596,14 @@ struct ConsistencyCheckWorkload : TestWorkload { ProcessClass::Fitness fitnessLowerBound = allWorkerProcessMap[db.master.address()].processClass.machineClassFitness(ProcessClass::DataDistributor); if (db.distributor.present() && - (!nonExcludedWorkerProcessMap.count(db.distributor.get().address()) || + (!nonExcludedWorkerProcessMap.contains(db.distributor.get().address()) || nonExcludedWorkerProcessMap[db.distributor.get().address()].processClass.machineClassFitness( ProcessClass::DataDistributor) > fitnessLowerBound)) { TraceEvent("ConsistencyCheck_DistributorNotBest") .detail("DataDistributorFitnessLowerBound", fitnessLowerBound) .detail( "ExistingDistributorFitness", - nonExcludedWorkerProcessMap.count(db.distributor.get().address()) + nonExcludedWorkerProcessMap.contains(db.distributor.get().address()) ? nonExcludedWorkerProcessMap[db.distributor.get().address()].processClass.machineClassFitness( ProcessClass::DataDistributor) : -1); @@ -1612,14 +1612,14 @@ struct ConsistencyCheckWorkload : TestWorkload { // Check Ratekeeper if (db.ratekeeper.present() && - (!nonExcludedWorkerProcessMap.count(db.ratekeeper.get().address()) || + (!nonExcludedWorkerProcessMap.contains(db.ratekeeper.get().address()) || nonExcludedWorkerProcessMap[db.ratekeeper.get().address()].processClass.machineClassFitness( ProcessClass::Ratekeeper) > fitnessLowerBound)) { TraceEvent("ConsistencyCheck_RatekeeperNotBest") .detail("BestRatekeeperFitness", fitnessLowerBound) .detail( "ExistingRatekeeperFitness", - nonExcludedWorkerProcessMap.count(db.ratekeeper.get().address()) + nonExcludedWorkerProcessMap.contains(db.ratekeeper.get().address()) ? nonExcludedWorkerProcessMap[db.ratekeeper.get().address()].processClass.machineClassFitness( ProcessClass::Ratekeeper) : -1); @@ -1628,14 +1628,14 @@ struct ConsistencyCheckWorkload : TestWorkload { // Check BlobManager if (config.blobGranulesEnabled && db.blobManager.present() && - (!nonExcludedWorkerProcessMap.count(db.blobManager.get().address()) || + (!nonExcludedWorkerProcessMap.contains(db.blobManager.get().address()) || nonExcludedWorkerProcessMap[db.blobManager.get().address()].processClass.machineClassFitness( ProcessClass::BlobManager) > fitnessLowerBound)) { TraceEvent("ConsistencyCheck_BlobManagerNotBest") .detail("BestBlobManagerFitness", fitnessLowerBound) .detail( "ExistingBlobManagerFitness", - nonExcludedWorkerProcessMap.count(db.blobManager.get().address()) + nonExcludedWorkerProcessMap.contains(db.blobManager.get().address()) ? nonExcludedWorkerProcessMap[db.blobManager.get().address()].processClass.machineClassFitness( ProcessClass::BlobManager) : -1); @@ -1644,14 +1644,14 @@ struct ConsistencyCheckWorkload : TestWorkload { // Check BlobMigrator if (config.blobGranulesEnabled && db.blobMigrator.present() && - (!nonExcludedWorkerProcessMap.count(db.blobMigrator.get().address()) || + (!nonExcludedWorkerProcessMap.contains(db.blobMigrator.get().address()) || nonExcludedWorkerProcessMap[db.blobMigrator.get().address()].processClass.machineClassFitness( ProcessClass::BlobMigrator) > fitnessLowerBound)) { TraceEvent("ConsistencyCheck_BlobMigratorNotBest") .detail("BestBlobMigratorFitness", fitnessLowerBound) .detail( "ExistingBlobMigratorFitness", - nonExcludedWorkerProcessMap.count(db.blobMigrator.get().address()) + nonExcludedWorkerProcessMap.contains(db.blobMigrator.get().address()) ? nonExcludedWorkerProcessMap[db.blobMigrator.get().address()].processClass.machineClassFitness( ProcessClass::BlobMigrator) : -1); @@ -1660,13 +1660,13 @@ struct ConsistencyCheckWorkload : TestWorkload { // Check EncryptKeyProxy if (config.encryptionAtRestMode.isEncryptionEnabled() && db.client.encryptKeyProxy.present() && - (!nonExcludedWorkerProcessMap.count(db.client.encryptKeyProxy.get().address()) || + (!nonExcludedWorkerProcessMap.contains(db.client.encryptKeyProxy.get().address()) || nonExcludedWorkerProcessMap[db.client.encryptKeyProxy.get().address()].processClass.machineClassFitness( ProcessClass::EncryptKeyProxy) > fitnessLowerBound)) { TraceEvent("ConsistencyCheck_EncryptKeyProxyNotBest") .detail("BestEncryptKeyProxyFitness", fitnessLowerBound) .detail("ExistingEncryptKeyProxyFitness", - nonExcludedWorkerProcessMap.count(db.client.encryptKeyProxy.get().address()) + nonExcludedWorkerProcessMap.contains(db.client.encryptKeyProxy.get().address()) ? nonExcludedWorkerProcessMap[db.client.encryptKeyProxy.get().address()] .processClass.machineClassFitness(ProcessClass::EncryptKeyProxy) : -1); @@ -1675,13 +1675,13 @@ struct ConsistencyCheckWorkload : TestWorkload { // Check ConsistencyScan if (db.consistencyScan.present() && - (!nonExcludedWorkerProcessMap.count(db.consistencyScan.get().address()) || + (!nonExcludedWorkerProcessMap.contains(db.consistencyScan.get().address()) || nonExcludedWorkerProcessMap[db.consistencyScan.get().address()].processClass.machineClassFitness( ProcessClass::ConsistencyScan) > fitnessLowerBound)) { TraceEvent("ConsistencyCheck_ConsistencyScanNotBest") .detail("BestConsistencyScanFitness", fitnessLowerBound) .detail("ExistingConsistencyScanFitness", - nonExcludedWorkerProcessMap.count(db.consistencyScan.get().address()) + nonExcludedWorkerProcessMap.contains(db.consistencyScan.get().address()) ? nonExcludedWorkerProcessMap[db.consistencyScan.get().address()] .processClass.machineClassFitness(ProcessClass::ConsistencyScan) : -1); diff --git a/fdbserver/workloads/DataLossRecovery.actor.cpp b/fdbserver/workloads/DataLossRecovery.actor.cpp index 460072e3c4..6f053a6b62 100644 --- a/fdbserver/workloads/DataLossRecovery.actor.cpp +++ b/fdbserver/workloads/DataLossRecovery.actor.cpp @@ -195,7 +195,7 @@ struct DataLossRecoveryWorkload : TestWorkload { state std::vector interfs = wait(getStorageServers(cx)); if (!interfs.empty()) { state StorageServerInterface interf = interfs[deterministicRandom()->randomInt(0, interfs.size())]; - if (g_simulator->protectedAddresses.count(interf.address()) == 0) { + if (!g_simulator->protectedAddresses.contains(interf.address())) { // We need to avoid selecting a storage server that is already dead at this point, otherwise // the test will hang. This is achieved by sending a GetStorageMetrics RPC. This is a necessary // check for this test because DD has been disabled and the proper mechanism that removes bad diff --git a/fdbserver/workloads/DiskDurabilityTest.actor.cpp b/fdbserver/workloads/DiskDurabilityTest.actor.cpp index b09bfecb23..a3f9bb81f2 100644 --- a/fdbserver/workloads/DiskDurabilityTest.actor.cpp +++ b/fdbserver/workloads/DiskDurabilityTest.actor.cpp @@ -123,7 +123,7 @@ struct DiskDurabilityTest : TestWorkload { state std::vector targetPages; for (int i = deterministicRandom()->randomInt(1, 100); i > 0 && targetPages.size() < size / 4096; i--) { auto p = deterministicRandom()->randomInt(0, size / 4096); - if (!std::count(targetPages.begin(), targetPages.end(), p)) + if (std::find(targetPages.begin(), targetPages.end(), p) == targetPages.end()) targetPages.push_back(p); } for (int i = deterministicRandom()->randomInt(1, 4); i > 0; i--) { diff --git a/fdbserver/workloads/DiskFailureInjection.actor.cpp b/fdbserver/workloads/DiskFailureInjection.actor.cpp index 2e28b1e6f3..c867b37f3c 100644 --- a/fdbserver/workloads/DiskFailureInjection.actor.cpp +++ b/fdbserver/workloads/DiskFailureInjection.actor.cpp @@ -194,11 +194,10 @@ struct DiskFailureInjectionWorkload : FailureInjectionWorkload { TraceEvent("ResendChaos") .detail("ChosenWorkersSize", self->chosenWorkers.size()) .detail("FoundWorkers", workersMap.size()) - .detail( - "ResendToNumber", - std::count_if(self->chosenWorkers.begin(), - self->chosenWorkers.end(), - [&map = std::as_const(workersMap)](auto const& addr) { return map.count(addr) > 0; })); + .detail("ResendToNumber", + std::count_if(self->chosenWorkers.begin(), + self->chosenWorkers.end(), + [&map = std::as_const(workersMap)](auto const& addr) { return map.contains(addr); })); for (auto& workerAddress : self->chosenWorkers) { auto itr = workersMap.find(workerAddress); if (itr != workersMap.end()) { diff --git a/fdbserver/workloads/ExcludeIncludeStorageServersWorkload.actor.cpp b/fdbserver/workloads/ExcludeIncludeStorageServersWorkload.actor.cpp index 91c2db07b8..f82cde2192 100644 --- a/fdbserver/workloads/ExcludeIncludeStorageServersWorkload.actor.cpp +++ b/fdbserver/workloads/ExcludeIncludeStorageServersWorkload.actor.cpp @@ -102,7 +102,7 @@ struct ExcludeIncludeStorageServersWorkload : TestWorkload { std::vector> results = wait(NativeAPI::getServerListAndProcessClasses(&tr)); for (auto& [ssi, p] : results) { - if (g_simulator->protectedAddresses.count(ssi.address()) == 0) { + if (!g_simulator->protectedAddresses.contains(ssi.address())) { servers.insert(AddressExclusion(ssi.address().ip, ssi.address().port)); } } diff --git a/fdbserver/workloads/FuzzApiCorrectness.actor.cpp b/fdbserver/workloads/FuzzApiCorrectness.actor.cpp index 6d944d9098..ed605da567 100644 --- a/fdbserver/workloads/FuzzApiCorrectness.actor.cpp +++ b/fdbserver/workloads/FuzzApiCorrectness.actor.cpp @@ -225,7 +225,9 @@ struct FuzzApiCorrectnessWorkload : TestWorkload { return TenantGroupNameRef(format("tenantgroup_%d", groupNum)); } } - bool canUseTenant(Optional tenant) { return !tenant.present() || createdTenants.count(tenant.get()); } + bool canUseTenant(Optional tenant) { + return !tenant.present() || createdTenants.contains(tenant.get()); + } Future setup(Database const& cx) override { if (clientId == 0) { diff --git a/fdbserver/workloads/HTTPKeyValueStore.actor.cpp b/fdbserver/workloads/HTTPKeyValueStore.actor.cpp index d4b6b51d81..3f3da2946e 100644 --- a/fdbserver/workloads/HTTPKeyValueStore.actor.cpp +++ b/fdbserver/workloads/HTTPKeyValueStore.actor.cpp @@ -112,15 +112,15 @@ ACTOR Future httpKVRequestCallback(Reference kvStore, // content-length and RequestID from http are already filled in // ASSERT_EQ(req->data.headers.size(), 5); ASSERT_EQ(req->data.headers.size(), 5); - ASSERT(req->data.headers.count("Key")); - ASSERT(req->data.headers.count("ClientID")); - ASSERT(req->data.headers.count("UID")); - ASSERT(req->data.headers.count("SeqNo")); + ASSERT(req->data.headers.contains("Key")); + ASSERT(req->data.headers.contains("ClientID")); + ASSERT(req->data.headers.contains("UID")); + ASSERT(req->data.headers.contains("SeqNo")); int clientId = atoi(req->data.headers["ClientID"].c_str()); int seqNo = atoi(req->data.headers["SeqNo"].c_str()); - ASSERT(req->data.headers.count("Content-Length")); + ASSERT(req->data.headers.contains("Content-Length")); ASSERT_EQ(req->data.headers["Content-Length"], std::to_string(req->data.content.size())); ASSERT_EQ(req->data.contentLen, req->data.content.size()); @@ -291,11 +291,11 @@ struct HTTPKeyValueStoreWorkload : TestWorkload { } ASSERT_EQ(response->code, 200); - ASSERT(response->data.headers.count("ClientID")); + ASSERT(response->data.headers.contains("ClientID")); ASSERT_EQ(response->data.headers["ClientID"], std::to_string(self->clientId)); - ASSERT(response->data.headers.count("Key")); + ASSERT(response->data.headers.contains("Key")); ASSERT_EQ(response->data.headers["Key"], key); - ASSERT(response->data.headers.count("UID")); + ASSERT(response->data.headers.contains("UID")); ASSERT_EQ(response->data.headers["UID"], requestID.toString()); return response; diff --git a/fdbserver/workloads/MachineAttrition.actor.cpp b/fdbserver/workloads/MachineAttrition.actor.cpp index 8dd839bd4b..5542b1b590 100644 --- a/fdbserver/workloads/MachineAttrition.actor.cpp +++ b/fdbserver/workloads/MachineAttrition.actor.cpp @@ -219,7 +219,7 @@ struct MachineAttritionWorkload : FailureInjectionWorkload { for (const auto& worker : workers) { // kill all matching workers if (idAccess(worker).present() && - std::count(targets.begin(), targets.end(), idAccess(worker).get().toString())) { + std::find(targets.begin(), targets.end(), idAccess(worker).get().toString()) != targets.end()) { TraceEvent("SendingRebootRequest").detail("TargetWorker", worker.interf.locality.toString()); worker.interf.clientInterface.reboot.send(rbReq); } diff --git a/fdbserver/workloads/MetaclusterManagementWorkload.actor.cpp b/fdbserver/workloads/MetaclusterManagementWorkload.actor.cpp index d2738120ac..a5fc7e03f1 100644 --- a/fdbserver/workloads/MetaclusterManagementWorkload.actor.cpp +++ b/fdbserver/workloads/MetaclusterManagementWorkload.actor.cpp @@ -117,8 +117,8 @@ struct MetaclusterManagementWorkload : TestWorkload { for (int i = 0; i < 20; ++i) { int64_t newPrefix = deterministicRandom()->randomInt(TenantAPI::TENANT_ID_PREFIX_MIN_VALUE, TenantAPI::TENANT_ID_PREFIX_MAX_VALUE + 1); - if (allowTenantIdPrefixReuse || !usedPrefixes.count(newPrefix)) { - CODE_PROBE(usedPrefixes.count(newPrefix), "Reusing tenant ID prefix", probe::decoration::rare); + if (allowTenantIdPrefixReuse || !usedPrefixes.contains(newPrefix)) { + CODE_PROBE(usedPrefixes.contains(newPrefix), "Reusing tenant ID prefix", probe::decoration::rare); return newPrefix; } } @@ -606,7 +606,7 @@ struct MetaclusterManagementWorkload : TestWorkload { state bool foundTenantCollision = false; for (auto t : dataDb->tenants) { - if (self->createdTenants.count(t.first)) { + if (self->createdTenants.contains(t.first)) { foundTenantCollision = true; tenantsToRemove.insert(t.first); } @@ -614,7 +614,7 @@ struct MetaclusterManagementWorkload : TestWorkload { state bool foundGroupCollision = false; for (auto t : dataDb->tenantGroups) { - if (self->tenantGroups.count(t.first)) { + if (self->tenantGroups.contains(t.first)) { foundGroupCollision = true; tenantsToRemove.insert(t.second->tenants.begin(), t.second->tenants.end()); } @@ -1011,7 +1011,7 @@ struct MetaclusterManagementWorkload : TestWorkload { auto itr = self->createdTenants.find(tenant); state bool exists = itr != self->createdTenants.end(); - state bool tenantGroupExists = tenantGroup.present() && self->tenantGroups.count(tenantGroup.get()); + state bool tenantGroupExists = tenantGroup.present() && self->tenantGroups.contains(tenantGroup.get()); state bool hasCapacity = tenantGroupExists || self->ungroupedTenants.size() + self->tenantGroups.size() < self->totalTenantGroupCapacity; @@ -1740,7 +1740,7 @@ struct MetaclusterManagementWorkload : TestWorkload { ASSERT_EQ(tenants.size(), clusterData->tenants.size()); for (auto [tenantName, tenantEntry] : tenants) { - ASSERT(clusterData->tenants.count(tenantName)); + ASSERT(clusterData->tenants.contains(tenantName)); auto tenantData = clusterData->tenants.find(tenantName); ASSERT(tenantData != clusterData->tenants.end()); ASSERT(tenantData->second->cluster == clusterName); diff --git a/fdbserver/workloads/MetaclusterRestoreWorkload.actor.cpp b/fdbserver/workloads/MetaclusterRestoreWorkload.actor.cpp index e5cae3d5a0..fc2ca2050e 100644 --- a/fdbserver/workloads/MetaclusterRestoreWorkload.actor.cpp +++ b/fdbserver/workloads/MetaclusterRestoreWorkload.actor.cpp @@ -139,7 +139,7 @@ struct MetaclusterRestoreWorkload : TestWorkload { do { tenantGroup = TenantGroupNameRef( format("tenantgroup%08d", deterministicRandom()->randomInt(0, maxTenantGroups))); - } while (tenantGroups.count(tenantGroup.get()) > 0); + } while (tenantGroups.contains(tenantGroup.get())); } } } @@ -368,7 +368,7 @@ struct MetaclusterRestoreWorkload : TestWorkload { for (auto const& t : tenantCollisions) { // If the data cluster tenant is expected, then remove the management tenant // Note that the management tenant may also have been expected - if (self->createdTenants.count(t.second.first)) { + if (self->createdTenants.contains(t.second.first)) { CODE_PROBE(true, "Remove management tenant in restore collision"); removeTrackedTenant(t.second.second); deleteFutures.push_back(metacluster::deleteTenant(self->managementDb, t.second.second)); @@ -527,7 +527,7 @@ struct MetaclusterRestoreWorkload : TestWorkload { } } for (auto const& g : dataClusterGroups.results) { - if (managementGroups.count(g.first)) { + if (managementGroups.contains(g.first)) { groupCollisions.insert(g.first); } } @@ -765,12 +765,12 @@ struct MetaclusterRestoreWorkload : TestWorkload { state TenantName tenantName; for (int i = 0; i < 10; ++i) { tenantName = self->chooseTenantName(); - if (self->tenantNameIndex.count(tenantName) == 0) { + if (!self->tenantNameIndex.contains(tenantName)) { break; } } - if (self->tenantNameIndex.count(tenantName)) { + if (self->tenantNameIndex.contains(tenantName)) { return Void(); } @@ -815,12 +815,12 @@ struct MetaclusterRestoreWorkload : TestWorkload { state TenantName tenantName; for (int i = 0; i < 10; ++i) { tenantName = self->chooseTenantName(); - if (self->tenantNameIndex.count(tenantName) != 0) { + if (self->tenantNameIndex.contains(tenantName)) { break; } } - if (self->tenantNameIndex.count(tenantName) == 0) { + if (!self->tenantNameIndex.contains(tenantName)) { return Void(); } @@ -856,12 +856,12 @@ struct MetaclusterRestoreWorkload : TestWorkload { state TenantName tenantName; for (int i = 0; i < 10; ++i) { tenantName = self->chooseTenantName(); - if (self->tenantNameIndex.count(tenantName) != 0) { + if (self->tenantNameIndex.contains(tenantName)) { break; } } - if (self->tenantNameIndex.count(tenantName) == 0) { + if (!self->tenantNameIndex.contains(tenantName)) { return Void(); } @@ -920,18 +920,18 @@ struct MetaclusterRestoreWorkload : TestWorkload { state TenantName newTenantName; for (int i = 0; i < 10; ++i) { oldTenantName = self->chooseTenantName(); - if (self->tenantNameIndex.count(oldTenantName) != 0) { + if (self->tenantNameIndex.contains(oldTenantName)) { break; } } for (int i = 0; i < 10; ++i) { newTenantName = self->chooseTenantName(); - if (self->tenantNameIndex.count(newTenantName) == 0) { + if (!self->tenantNameIndex.contains(newTenantName)) { break; } } - if (self->tenantNameIndex.count(oldTenantName) == 0 || self->tenantNameIndex.count(newTenantName) != 0) { + if (!self->tenantNameIndex.contains(oldTenantName) || self->tenantNameIndex.contains(newTenantName)) { return Void(); } @@ -1094,7 +1094,7 @@ struct MetaclusterRestoreWorkload : TestWorkload { if (!clusterData.restored) { ASSERT_EQ(tenants.results.size(), clusterData.tenants.size()); for (auto [tenantId, tenantEntry] : tenants.results) { - ASSERT(clusterData.tenants.count(tenantId)); + ASSERT(clusterData.tenants.contains(tenantId)); auto tenantData = self->createdTenants[tenantId]; ASSERT(tenantData.cluster == clusterName); ASSERT(tenantData.tenantGroup == tenantEntry.tenantGroup); @@ -1128,9 +1128,9 @@ struct MetaclusterRestoreWorkload : TestWorkload { // Check for deleted tenants that reappeared int unexpectedTenants = 0; for (auto const& [tenantId, tenantEntry] : tenantMap) { - if (!clusterData.tenants.count(tenantId)) { + if (!clusterData.tenants.contains(tenantId)) { ASSERT(self->recoverManagementCluster); - ASSERT(self->deletedTenants.count(tenantId)); + ASSERT(self->deletedTenants.contains(tenantId)); ++unexpectedTenants; } } @@ -1204,8 +1204,8 @@ struct MetaclusterRestoreWorkload : TestWorkload { // If we recovered both the management and some data clusters, we might undelete a tenant // Check that any unexpected tenants were deleted and that we had a potentially lossy recovery for (auto const& [tenantId, tenantEntry] : tenantMap) { - if (!self->createdTenants.count(tenantId)) { - ASSERT(self->deletedTenants.count(tenantId)); + if (!self->createdTenants.contains(tenantId)) { + ASSERT(self->deletedTenants.contains(tenantId)); ASSERT(self->recoverManagementCluster); ASSERT(self->recoverDataClusters); } diff --git a/fdbserver/workloads/PhysicalShardMove.actor.cpp b/fdbserver/workloads/PhysicalShardMove.actor.cpp index 6d7b5658b7..c915fb7970 100644 --- a/fdbserver/workloads/PhysicalShardMove.actor.cpp +++ b/fdbserver/workloads/PhysicalShardMove.actor.cpp @@ -558,7 +558,7 @@ struct PhysicalShardMoveWorkLoad : TestWorkload { ASSERT(interfs.size() > teamSize - includes.size()); while (includes.size() < teamSize) { const auto& interf = interfs[deterministicRandom()->randomInt(0, interfs.size())]; - if (excludes.count(interf.uniqueID) == 0 && includes.count(interf.uniqueID) == 0) { + if (!excludes.contains(interf.uniqueID) && !includes.contains(interf.uniqueID)) { includes.insert(interf.uniqueID); } } diff --git a/fdbserver/workloads/RandomMoveKeys.actor.cpp b/fdbserver/workloads/RandomMoveKeys.actor.cpp index 21e73d283b..8f47c781f9 100644 --- a/fdbserver/workloads/RandomMoveKeys.actor.cpp +++ b/fdbserver/workloads/RandomMoveKeys.actor.cpp @@ -115,7 +115,7 @@ struct MoveKeysWorkload : FailureInjectionWorkload { while (t.size() < teamSize && storageServers.size()) { auto s = storageServers.back(); storageServers.pop_back(); - if (!machines.count(s.locality.zoneId())) { + if (!machines.contains(s.locality.zoneId())) { machines.insert(s.locality.zoneId()); t.insert(s); } diff --git a/fdbserver/workloads/RawTenantAccessWorkload.actor.cpp b/fdbserver/workloads/RawTenantAccessWorkload.actor.cpp index 14de2a1e87..7e4eb91dd0 100644 --- a/fdbserver/workloads/RawTenantAccessWorkload.actor.cpp +++ b/fdbserver/workloads/RawTenantAccessWorkload.actor.cpp @@ -140,7 +140,7 @@ struct RawTenantAccessWorkload : TestWorkload { Key key = self->specialKeysTenantMapPrefix.withSuffix(self->indexToTenantName(*it)); Optional value = wait(tr->get(key)); // the commit proxies should have the same view of tenant map - ASSERT_EQ(value.present(), lastCommitted || (self->idx2Tid.count(*it) > 0)); + ASSERT_EQ(value.present(), lastCommitted || (self->idx2Tid.contains(*it))); if (value.present()) { auto id = self->extractTenantId(value.get()); @@ -182,7 +182,7 @@ struct RawTenantAccessWorkload : TestWorkload { ASSERT(hasNonexistentTenant()); int tenantIdx = deterministicRandom()->randomInt(0, tenantCount); // find the nearest nonexistent tenant - while (idx2Tid.count(tenantIdx) || lastCreatedTenants.count(tenantIdx)) { + while (idx2Tid.contains(tenantIdx) || lastCreatedTenants.contains(tenantIdx)) { tenantIdx++; if (tenantIdx == tenantCount) { tenantIdx = 0; @@ -201,7 +201,7 @@ struct RawTenantAccessWorkload : TestWorkload { int tenantIdx = deterministicRandom()->randomInt(0, tenantCount); // find the nearest existing tenant while (true) { - if (idx2Tid.count(tenantIdx) && !lastDeletedTenants.count(tenantIdx)) { + if (idx2Tid.contains(tenantIdx) && !lastDeletedTenants.contains(tenantIdx)) { break; } tenantIdx++; @@ -241,7 +241,7 @@ struct RawTenantAccessWorkload : TestWorkload { // randomly generate a tenant id do { tenantId = deterministicRandom()->randomInt64(0, std::numeric_limits::max()); - } while (tid2Idx.count(tenantId)); + } while (tid2Idx.contains(tenantId)); } ASSERT_GE(tenantId, 0); diff --git a/fdbserver/workloads/RemoveServersSafely.actor.cpp b/fdbserver/workloads/RemoveServersSafely.actor.cpp index 7efdecf0be..ac66a396cd 100644 --- a/fdbserver/workloads/RemoveServersSafely.actor.cpp +++ b/fdbserver/workloads/RemoveServersSafely.actor.cpp @@ -82,12 +82,12 @@ struct RemoveServersSafelyWorkload : TestWorkload { .detail("Zoneid", it->locality.zoneId().get().toString()) .detail("MachineId", it->locality.machineId().get().toString()); - if (g_simulator->protectedAddresses.count(it->address) == 0) + if (!g_simulator->protectedAddresses.contains(it->address)) processAddrs.push_back(pAddr); machineProcesses[machineIp].insert(pAddr); // add only one entry for each machine - if (!machinesMap.count(it->locality.zoneId())) + if (!machinesMap.contains(it->locality.zoneId())) machinesMap[it->locality.zoneId()] = machineIp; machine_ids[machineIp] = it->locality.zoneId(); @@ -107,7 +107,7 @@ struct RemoveServersSafelyWorkload : TestWorkload { for (auto k1 : toKill1) { AddressExclusion machineIp(k1.ip); - ASSERT(machineProcesses.count(machineIp)); + ASSERT(machineProcesses.contains(machineIp)); // kill all processes on this machine even if it has a different ip address std::copy(machineProcesses[machineIp].begin(), machineProcesses[machineIp].end(), @@ -118,7 +118,7 @@ struct RemoveServersSafelyWorkload : TestWorkload { processSet.clear(); for (auto k2 : toKill2) { AddressExclusion machineIp(k2.ip); - ASSERT(machineProcesses.count(machineIp)); + ASSERT(machineProcesses.contains(machineIp)); std::copy(machineProcesses[machineIp].begin(), machineProcesses[machineIp].end(), std::inserter(processSet, processSet.end())); @@ -128,13 +128,13 @@ struct RemoveServersSafelyWorkload : TestWorkload { for (AddressExclusion ex : toKill1) { AddressExclusion machineIp(ex.ip); - ASSERT(machine_ids.count(machineIp)); + ASSERT(machine_ids.contains(machineIp)); g_simulator->disableSwapToMachine(machine_ids[machineIp]); } for (AddressExclusion ex : toKill2) { AddressExclusion machineIp(ex.ip); - ASSERT(machine_ids.count(machineIp)); + ASSERT(machine_ids.contains(machineIp)); g_simulator->disableSwapToMachine(machine_ids[machineIp]); } @@ -191,7 +191,7 @@ struct RemoveServersSafelyWorkload : TestWorkload { .detail("Failed", processInfo->failed) .detail("Excluded", processInfo->excluded) .detail("Rebooting", processInfo->rebooting) - .detail("Protected", g_simulator->protectedAddresses.count(processInfo->address)); + .detail("Protected", g_simulator->protectedAddresses.contains(processInfo->address)); } else { TraceEvent("RemoveAndKill", functionId) .detail("Step", "ProcessNotToKill") @@ -200,7 +200,7 @@ struct RemoveServersSafelyWorkload : TestWorkload { .detail("Failed", processInfo->failed) .detail("Excluded", processInfo->excluded) .detail("Rebooting", processInfo->rebooting) - .detail("Protected", g_simulator->protectedAddresses.count(processInfo->address)); + .detail("Protected", g_simulator->protectedAddresses.contains(processInfo->address)); } } TraceEvent("RemoveAndKill", functionId) @@ -459,14 +459,14 @@ struct RemoveServersSafelyWorkload : TestWorkload { .detail("ClusterAvailable", g_simulator->isAvailable()) .detail("RemoveViaClear", removeViaClear); for (auto& killProcess : killProcArray) { - if (g_simulator->protectedAddresses.count(killProcess->address)) + if (g_simulator->protectedAddresses.contains(killProcess->address)) TraceEvent("RemoveAndKill", functionId) .detail("Step", "NoKill Process") .detail("Process", describe(*killProcess)) .detail("Failed", killProcess->failed) .detail("Rebooting", killProcess->rebooting) .detail("ClusterAvailable", g_simulator->isAvailable()) - .detail("Protected", g_simulator->protectedAddresses.count(killProcess->address)); + .detail("Protected", g_simulator->protectedAddresses.contains(killProcess->address)); else if (removeViaClear) { g_simulator->rebootProcess(killProcess, ISimulator::KillType::RebootProcessAndDelete); TraceEvent("RemoveAndKill", functionId) @@ -475,12 +475,12 @@ struct RemoveServersSafelyWorkload : TestWorkload { .detail("Failed", killProcess->failed) .detail("Rebooting", killProcess->rebooting) .detail("ClusterAvailable", g_simulator->isAvailable()) - .detail("Protected", g_simulator->protectedAddresses.count(killProcess->address)); + .detail("Protected", g_simulator->protectedAddresses.contains(killProcess->address)); } /* else { g_simulator->killProcess( killProcess, ISimulator::KillType::KillInstantly ); - TraceEvent("RemoveAndKill", functionId).detail("Step", "Kill Process").detail("Process", describe(*killProcess)).detail("Failed", killProcess->failed).detail("Rebooting", killProcess->rebooting).detail("ClusterAvailable", g_simulator->isAvailable()).detail("Protected", g_simulator->protectedAddresses.count(killProcess->address)); + TraceEvent("RemoveAndKill", functionId).detail("Step", "Kill Process").detail("Process", describe(*killProcess)).detail("Failed", killProcess->failed).detail("Rebooting", killProcess->rebooting).detail("ClusterAvailable", g_simulator->isAvailable()).detail("Protected", g_simulator->protectedAddresses.contains(killProcess->address)); } */ } @@ -798,7 +798,7 @@ struct RemoveServersSafelyWorkload : TestWorkload { bool killContainsProcess(AddressExclusion kill, NetworkAddress process) { return kill.excludes(process) || (machineProcesses.find(kill) != machineProcesses.end() && - machineProcesses[kill].count(AddressExclusion(process.ip, process.port)) > 0); + machineProcesses[kill].contains(AddressExclusion(process.ip, process.port))); } // Finds the localities list that can be excluded from the safe killable addresses list. @@ -836,8 +836,8 @@ struct RemoveServersSafelyWorkload : TestWorkload { std::map localityData = processInfo->locality.getAllData(); bool found = false; for (const auto& l : localityData) { - if (toKillLocalities.count(LocalityData::ExcludeLocalityPrefix.toString() + l.first + ":" + - l.second)) { + if (toKillLocalities.contains(LocalityData::ExcludeLocalityPrefix.toString() + l.first + ":" + + l.second)) { found = true; break; } diff --git a/fdbserver/workloads/TenantManagementWorkload.actor.cpp b/fdbserver/workloads/TenantManagementWorkload.actor.cpp index a4ff334993..6d9efdb558 100644 --- a/fdbserver/workloads/TenantManagementWorkload.actor.cpp +++ b/fdbserver/workloads/TenantManagementWorkload.actor.cpp @@ -459,7 +459,7 @@ struct TenantManagementWorkload : TestWorkload { state std::map tenantsToCreate; for (int i = 0; i < numTenants; ++i) { TenantName tenant = self->chooseTenantName(true); - while (tenantsToCreate.count(tenant)) { + while (tenantsToCreate.contains(tenant)) { tenant = self->chooseTenantName(true); } @@ -467,9 +467,9 @@ struct TenantManagementWorkload : TestWorkload { entry.tenantName = tenant; entry.tenantGroup = self->chooseTenantGroup(true); - if (self->createdTenants.count(tenant)) { + if (self->createdTenants.contains(tenant)) { alreadyExists = true; - } else if (!tenantsToCreate.count(tenant)) { + } else if (!tenantsToCreate.contains(tenant)) { ++newTenants; } @@ -579,7 +579,7 @@ struct TenantManagementWorkload : TestWorkload { state typename std::map::iterator tenantItr; for (tenantItr = tenantsToCreate.begin(); tenantItr != tenantsToCreate.end(); ++tenantItr) { // Ignore any tenants that already existed - if (self->createdTenants.count(tenantItr->first)) { + if (self->createdTenants.contains(tenantItr->first)) { continue; } @@ -1452,7 +1452,7 @@ struct TenantManagementWorkload : TestWorkload { TenantName oldTenant = self->chooseTenantName(false); TenantName newTenant = self->chooseTenantName(false); bool checkOverlap = - oldTenant == newTenant || allTenantNames.count(oldTenant) || allTenantNames.count(newTenant); + oldTenant == newTenant || allTenantNames.contains(oldTenant) || allTenantNames.contains(newTenant); // These operation types do not handle rename collisions // reject the rename here if it has overlap if (checkOverlap && (operationType == OperationType::MANAGEMENT_TRANSACTION || @@ -1464,10 +1464,10 @@ struct TenantManagementWorkload : TestWorkload { tenantRenames[oldTenant] = newTenant; allTenantNames.insert(oldTenant); allTenantNames.insert(newTenant); - if (!self->createdTenants.count(oldTenant)) { + if (!self->createdTenants.contains(oldTenant)) { tenantNotFound = true; } - if (self->createdTenants.count(newTenant)) { + if (self->createdTenants.contains(newTenant)) { tenantExists = true; } } @@ -1657,7 +1657,7 @@ struct TenantManagementWorkload : TestWorkload { ASSERT_GT(currentVersionstamp.version, originalReadVersion); } if (tenantGroupChanging) { - ASSERT(configuration.count("tenant_group"_sr) > 0); + ASSERT(configuration.contains("tenant_group"_sr)); auto itr = self->createdTenants.find(tenant); if (itr->second.tenantGroup.present()) { auto tenantGroupItr = self->createdTenantGroups.find(itr->second.tenantGroup.get()); diff --git a/fdbserver/workloads/Throughput.actor.cpp b/fdbserver/workloads/Throughput.actor.cpp index 703ba05d68..537bd71efb 100644 --- a/fdbserver/workloads/Throughput.actor.cpp +++ b/fdbserver/workloads/Throughput.actor.cpp @@ -258,7 +258,7 @@ struct MeasurePeriodically : IMeasurer { std::vector m; msp.getMetrics(m); for (auto i = m.begin(); i != m.end(); ++i) - if (includeMetrics.count(i->name())) { + if (includeMetrics.contains(i->name())) { accumulatedMetrics.push_back(i->withPrefix(prefix)); } diff --git a/fdbserver/workloads/VersionStamp.actor.cpp b/fdbserver/workloads/VersionStamp.actor.cpp index e75190b43b..c7030ad6db 100644 --- a/fdbserver/workloads/VersionStamp.actor.cpp +++ b/fdbserver/workloads/VersionStamp.actor.cpp @@ -195,7 +195,7 @@ struct VersionStampWorkload : TestWorkload { RangeResult result_ = wait(tr.getRange( KeyRangeRef(self->vsValuePrefix, endOfRange(self->vsValuePrefix)), self->nodeCount + 1)); result = result_; - if (self->allowMetadataVersionKey && self->key_commit.count(metadataVersionKey)) { + if (self->allowMetadataVersionKey && self->key_commit.contains(metadataVersionKey)) { Optional mVal = wait(tr.get(metadataVersionKey)); if (mVal.present()) { result.push_back_deep(result.arena(), KeyValueRef(metadataVersionKey, mVal.get())); diff --git a/fdbserver/workloads/WriteTagThrottling.actor.cpp b/fdbserver/workloads/WriteTagThrottling.actor.cpp index 51839f6e8f..20a7b8d1f0 100644 --- a/fdbserver/workloads/WriteTagThrottling.actor.cpp +++ b/fdbserver/workloads/WriteTagThrottling.actor.cpp @@ -139,7 +139,7 @@ struct WriteTagThrottlingWorkload : KVWorkload { .detail("BadActorThrottleRetries", badActorThrottleRetries) .detail("GoodActorThrottleRetries", goodActorThrottleRetries); } - if (!throttledTags.empty() && throttledTags.count(badTag.toString()) == 0) { + if (!throttledTags.empty() && !throttledTags.contains(badTag.toString())) { TraceEvent(SevWarnAlways, "IncorrectThrottle") .detail("ThrottledTagNumber", throttledTags.size()) .detail("ThrottledTags", setToString(throttledTags));