[llvm] Qualify auto in range-based for loops (NFC)

Identified with readability-qualified-auto.
This commit is contained in:
Kazu Hirata 2022-08-13 12:55:42 -07:00
parent 64f0f7e646
commit 109df7f9a4
33 changed files with 46 additions and 46 deletions

View File

@ -253,7 +253,7 @@ void DWARFTypePrinter::appendUnqualifiedNameAfter(
if (getValOrNull(DW_AT_LLVM_ptrauth_authenticates_null_values))
optionsVec.push_back("authenticates-null-values");
std::string options;
for (auto option : optionsVec) {
for (const auto *option : optionsVec) {
if (options.size())
options += ",";
options += option;

View File

@ -237,7 +237,7 @@ void CompileOnDemandLayer::expandPartition(GlobalValueSet &Partition) {
bool ContainsGlobalVariables = false;
std::vector<const GlobalValue *> GVsToAdd;
for (auto *GV : Partition)
for (const auto *GV : Partition)
if (isa<GlobalAlias>(GV))
GVsToAdd.push_back(
cast<GlobalValue>(cast<GlobalAlias>(GV)->getAliasee()));
@ -252,7 +252,7 @@ void CompileOnDemandLayer::expandPartition(GlobalValueSet &Partition) {
for (auto &G : M.globals())
GVsToAdd.push_back(&G);
for (auto *GV : GVsToAdd)
for (const auto *GV : GVsToAdd)
Partition.insert(GV);
}
@ -336,13 +336,13 @@ void CompileOnDemandLayer::emitPartition(
{
std::vector<const GlobalValue*> HashGVs;
HashGVs.reserve(GVsToExtract->size());
for (auto *GV : *GVsToExtract)
for (const auto *GV : *GVsToExtract)
HashGVs.push_back(GV);
llvm::sort(HashGVs, [](const GlobalValue *LHS, const GlobalValue *RHS) {
return LHS->getName() < RHS->getName();
});
hash_code HC(0);
for (auto *GV : HashGVs) {
for (const auto *GV : HashGVs) {
assert(GV->hasName() && "All GVs to extract should be named by now");
auto GVName = GV->getName();
HC = hash_combine(HC, hash_combine_range(GVName.begin(), GVName.end()));

View File

@ -295,7 +295,7 @@ SpeculateQuery::ResultTy SequenceBBQuery::operator()(Function &F) {
else
SequencedBlocks = queryCFG(F, CallerBlocks);
for (auto BB : SequencedBlocks)
for (const auto *BB : SequencedBlocks)
findCalles(BB, Calles);
CallerAndCalles.insert({F.getName(), std::move(Calles)});

View File

@ -6075,7 +6075,7 @@ void Verifier::verifyCompileUnits() {
SmallPtrSet<const Metadata *, 2> Listed;
if (CUs)
Listed.insert(CUs->op_begin(), CUs->op_end());
for (auto *CU : CUVisited)
for (const auto *CU : CUVisited)
CheckDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
CUVisited.clear();
}
@ -6085,7 +6085,7 @@ void Verifier::verifyDeoptimizeCallingConvs() {
return;
const Function *First = DeoptimizeDeclarations[0];
for (auto *F : makeArrayRef(DeoptimizeDeclarations).slice(1)) {
for (const auto *F : makeArrayRef(DeoptimizeDeclarations).slice(1)) {
Check(First->getCallingConv() == F->getCallingConv(),
"All llvm.experimental.deoptimize declarations must have the same "
"calling convention",

View File

@ -1734,7 +1734,7 @@ IRMover::IRMover(Module &M) : Composite(M) {
// Self-map metadatas in the destination module. This is needed when
// DebugTypeODRUniquing is enabled on the LLVMContext, since metadata in the
// destination module may be reached from the source module.
for (auto *MD : StructTypes.getVisitedMetadata()) {
for (const auto *MD : StructTypes.getVisitedMetadata()) {
SharedMDs[MD].reset(const_cast<MDNode *>(MD));
}
}

View File

@ -3760,7 +3760,7 @@ bool MasmParser::emitIntegralValues(unsigned Size, unsigned *Count) {
if (checkForValidSection() || parseScalarInstList(Size, Values))
return true;
for (auto Value : Values) {
for (const auto *Value : Values) {
emitIntValue(Value, Size);
}
if (Count)

View File

@ -938,7 +938,7 @@ void WindowsResourceCOFFWriter::writeDirectoryTree() {
RelocationAddresses.resize(Data.size());
// Now write all the resource data entries.
for (auto DataNodes : DataEntriesTreeOrder) {
for (const auto *DataNodes : DataEntriesTreeOrder) {
auto *Entry = reinterpret_cast<coff_resource_data_entry *>(BufferStart +
CurrentOffset);
RelocationAddresses[DataNodes->getDataIndex()] = CurrentRelativeOffset;

View File

@ -878,7 +878,7 @@ PreservedCFGCheckerInstrumentation::CFG::CFG(const Function *F,
for (const auto &BB : *F) {
if (BBGuards)
BBGuards->try_emplace(intptr_t(&BB), &BB);
for (auto *Succ : successors(&BB)) {
for (const auto *Succ : successors(&BB)) {
Graph[&BB][Succ]++;
if (BBGuards)
BBGuards->try_emplace(intptr_t(Succ), Succ);

View File

@ -491,12 +491,12 @@ uint64_t GCOVBlock::getCyclesCount(const BlockVector &blocks) {
uint64_t count = 0, d;
for (;;) {
// Make blocks on the line traversable and try finding a cycle.
for (auto b : blocks) {
for (const auto *b : blocks) {
const_cast<GCOVBlock *>(b)->traversable = true;
const_cast<GCOVBlock *>(b)->incoming = nullptr;
}
d = 0;
for (auto block : blocks) {
for (const auto *block : blocks) {
auto *b = const_cast<GCOVBlock *>(block);
if (b->traversable && (d = augmentOneCycle(b, stack)) > 0)
break;
@ -507,7 +507,7 @@ uint64_t GCOVBlock::getCyclesCount(const BlockVector &blocks) {
}
// If there is no more loop, all traversable bits should have been cleared.
// This property is needed by subsequent calls.
for (auto b : blocks) {
for (const auto *b : blocks) {
assert(!b->traversable);
(void)b;
}

View File

@ -547,7 +547,7 @@ RISCVISAInfo::parseArchString(StringRef Arch, bool EnableExperimentalExtension,
// No matter which version is given to `g`, we always set imafd to default
// version since the we don't have clear version scheme for that on
// ISA spec.
for (auto Ext : {"i", "m", "a", "f", "d"})
for (const auto *Ext : {"i", "m", "a", "f", "d"})
if (auto Version = findDefaultVersion(Ext))
ISAInfo->addExtension(Ext, Version->Major, Version->Minor);
else

View File

@ -2503,7 +2503,7 @@ bool AArch64FastISel::selectIndirectBr(const Instruction *I) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addReg(AddrReg);
// Make sure the CFG is up-to-date.
for (auto *Succ : BI->successors())
for (const auto *Succ : BI->successors())
FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[Succ]);
return true;

View File

@ -237,7 +237,7 @@ shouldReplaceInst(MachineFunction *MF, const MCInstrDesc *InstDesc,
SIMDInstrTable[InstID] = false;
return false;
}
for (auto IDesc : InstDescRepl)
for (const auto *IDesc : InstDescRepl)
{
SCDescRepl = SchedModel.getMCSchedModel()->getSchedClassDesc(
IDesc->getSchedClass());
@ -250,7 +250,7 @@ shouldReplaceInst(MachineFunction *MF, const MCInstrDesc *InstDesc,
// Replacement cost.
unsigned ReplCost = 0;
for (auto IDesc :InstDescRepl)
for (const auto *IDesc :InstDescRepl)
ReplCost += SchedModel.computeInstrLatency(IDesc->getOpcode());
if (SchedModel.computeInstrLatency(InstDesc->getOpcode()) > ReplCost)

View File

@ -306,7 +306,7 @@ DecodeStatus AArch64Disassembler::getInstruction(MCInst &MI, uint64_t &Size,
const uint8_t *Tables[] = {DecoderTable32, DecoderTableFallback32};
for (auto Table : Tables) {
for (const auto *Table : Tables) {
DecodeStatus Result =
decodeInstruction(Table, MI, Insn, Address, this, STI);

View File

@ -303,7 +303,7 @@ GCNILPScheduler::schedule(ArrayRef<const SUnit*> BotRoots,
for (const SUnit &SU : SUnits)
CalcNodeSethiUllmanNumber(&SU, SUNumbers);
for (auto SU : BotRoots) {
for (const auto *SU : BotRoots) {
AvailQueue.push_back(
*new (Alloc.Allocate()) Candidate(const_cast<SUnit*>(SU)));
}

View File

@ -326,7 +326,7 @@ GCNIterativeScheduler::detachSchedule(ScheduleRef Schedule) const {
Res.push_back(FirstDbgValue);
const auto DbgB = DbgValues.begin(), DbgE = DbgValues.end();
for (auto SU : Schedule) {
for (const auto *SU : Schedule) {
Res.push_back(SU->getInstr());
const auto &D = std::find_if(DbgB, DbgE, [SU](decltype(*DbgB) &P) {
return P.second == SU->getInstr();

View File

@ -232,7 +232,7 @@ GCNMinRegScheduler::schedule(ArrayRef<const SUnit*> TopRoots,
int StepNo = 0;
for (auto SU : TopRoots) {
for (const auto *SU : TopRoots) {
RQ.push_back(*new (Alloc.Allocate()) Candidate(SU, StepNo));
}
releaseSuccessors(&DAG.EntrySU, StepNo);

View File

@ -12886,7 +12886,7 @@ static bool hasCFUser(const Value *V, SmallPtrSet<const Value *, 16> &Visited,
if (!Visited.insert(V).second)
return false;
bool Result = false;
for (auto U : V->users()) {
for (const auto *U : V->users()) {
if (const IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(U)) {
if (V == U->getOperand(1)) {
switch (Intrinsic->getIntrinsicID()) {

View File

@ -124,7 +124,7 @@ bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
// Collect all globals that had their storage promoted to a constant pool.
// Functions are emitted before variables, so this accumulates promoted
// globals from all functions in PromotedGlobals.
for (auto *GV : AFI->getGlobalsPromotedToConstantPool())
for (const auto *GV : AFI->getGlobalsPromotedToConstantPool())
PromotedGlobals.insert(GV);
// Calculate this function's optimization goal.

View File

@ -2156,7 +2156,7 @@ void HexagonFrameLowering::determineCalleeSaves(MachineFunction &MF,
for (unsigned VR : NewRegs)
SpillRCs.insert(MRI.getRegClass(VR));
for (auto *RC : SpillRCs) {
for (const auto *RC : SpillRCs) {
if (!needToReserveScavengingSpillSlots(MF, HRI, RC))
continue;
unsigned Num = 1;

View File

@ -430,7 +430,7 @@ MCSection *HexagonTargetObjectFile::selectSmallSectionForGlobal(
const Function *
HexagonTargetObjectFile::getLutUsedFunction(const GlobalObject *GO) const {
const Function *ReturnFn = nullptr;
for (auto U : GO->users()) {
for (const auto *U : GO->users()) {
// validate each instance of user to be a live function.
auto *I = dyn_cast<Instruction>(U);
if (!I)

View File

@ -71,7 +71,7 @@ static bool isNullOrUndef(const Constant *C) {
return true;
if (!isa<ConstantAggregate>(C))
return false;
for (auto Operand : C->operand_values()) {
for (const auto *Operand : C->operand_values()) {
if (!isNullOrUndef(cast<Constant>(Operand)))
return false;
}

View File

@ -297,7 +297,7 @@ bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) {
// need to insert any VZEROUPPER instructions. This is constant-time, so it
// is cheap in the common case of no ymm/zmm use.
bool YmmOrZmmUsed = FnHasLiveInYmmOrZmm;
for (auto *RC : {&X86::VR256RegClass, &X86::VR512_0_15RegClass}) {
for (const auto *RC : {&X86::VR256RegClass, &X86::VR512_0_15RegClass}) {
if (!YmmOrZmmUsed) {
for (MCPhysReg R : *RC) {
if (!MRI.reg_nodbg_empty(R)) {

View File

@ -473,7 +473,7 @@ static bool getPotentialCopiesOfMemoryValue(
// Only if we were successful collection all potential copies we record
// dependences (on non-fix AAPointerInfo AAs). We also only then modify the
// given PotentialCopies container.
for (auto *PI : PIs) {
for (const auto *PI : PIs) {
if (!PI->getState().isAtFixpoint())
UsedAssumedInformation = true;
A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);

View File

@ -9515,7 +9515,7 @@ private:
ArrayRef<const AACallEdges *> AAEdgesList) {
ChangeStatus Change = ChangeStatus::UNCHANGED;
for (auto *AAEdges : AAEdgesList) {
for (const auto *AAEdges : AAEdgesList) {
if (AAEdges->hasUnknownCallee()) {
if (!CanReachUnknownCallee) {
LLVM_DEBUG(dbgs()
@ -9563,7 +9563,7 @@ private:
const Function &Fn) const {
// Handle the most trivial case first.
for (auto *AAEdges : AAEdgesList) {
for (const auto *AAEdges : AAEdgesList) {
const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
if (Edges.count(const_cast<Function *>(&Fn)))
@ -9591,7 +9591,7 @@ private:
}
// The result is false for now, set dependencies and leave.
for (auto *Dep : Deps)
for (const auto *Dep : Deps)
A.recordDependence(*Dep, AA, DepClassTy::REQUIRED);
return false;

View File

@ -650,7 +650,7 @@ static bool allUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
Worklist.push_back(GV);
while (!Worklist.empty()) {
const Value *P = Worklist.pop_back_val();
for (auto *U : P->users()) {
for (const auto *U : P->users()) {
if (auto *LI = dyn_cast<LoadInst>(U)) {
SmallPtrSet<const PHINode *, 8> PHIs;
if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))

View File

@ -1541,7 +1541,7 @@ bool ObjCARCOpt::VisitInstructionTopDown(
if (const SmallPtrSet<const Value *, 2> *Roots =
getRCIdentityRootsFromReleaseInsertPt(
Inst, ReleaseInsertPtToRCIdentityRoots))
for (auto *Root : *Roots) {
for (const auto *Root : *Roots) {
TopDownPtrState &S = MyStates.getPtrTopDownState(Root);
// Disable code motion if the current position is S_Retain to prevent
// moving the objc_retain call past objc_release calls. If it's

View File

@ -1699,7 +1699,7 @@ bool NewGVN::isCycleFree(const Instruction *I) const {
return isa<PHINode>(V) || isCopyOfAPHI(V);
});
ICS = AllPhis ? ICS_CycleFree : ICS_Cycle;
for (auto *Member : SCC)
for (const auto *Member : SCC)
if (auto *MemberPhi = dyn_cast<PHINode>(Member))
InstCycleState.insert({MemberPhi, ICS});
}
@ -2090,7 +2090,7 @@ void NewGVN::markMemoryDefTouched(const MemoryAccess *MA) {
void NewGVN::markMemoryUsersTouched(const MemoryAccess *MA) {
if (isa<MemoryUse>(MA))
return;
for (auto U : MA->users())
for (const auto *U : MA->users())
TouchedInstructions.set(MemoryToDFSNum(U));
touchAndErase(MemoryToUsers, MA);
}
@ -2102,7 +2102,7 @@ void NewGVN::markPredicateUsersTouched(Instruction *I) {
// Mark users affected by a memory leader change.
void NewGVN::markMemoryLeaderChangeTouched(CongruenceClass *CC) {
for (auto M : CC->memory())
for (const auto *M : CC->memory())
markMemoryDefTouched(M);
}
@ -3151,7 +3151,7 @@ bool NewGVN::singleReachablePHIPath(
return true;
const auto *EndDef = First;
for (auto *ChainDef : optimized_def_chain(First)) {
for (const auto *ChainDef : optimized_def_chain(First)) {
if (ChainDef == Second)
return true;
if (MSSA->isLiveOnEntryDef(ChainDef))
@ -3196,7 +3196,7 @@ void NewGVN::verifyMemoryCongruency() const {
assert(MemoryAccessToClass.lookup(CC->getMemoryLeader()) == CC &&
"Representative MemoryAccess does not appear to be reverse "
"mapped properly");
for (auto M : CC->memory())
for (const auto *M : CC->memory())
assert(MemoryAccessToClass.lookup(M) == CC &&
"Memory member does not appear to be reverse mapped properly");
}

View File

@ -138,7 +138,7 @@ static bool isBlockValidForExtraction(const BasicBlock &BB,
if (auto *UBB = CSI->getUnwindDest())
if (!Result.count(UBB))
return false;
for (auto *HBB : CSI->handlers())
for (const auto *HBB : CSI->handlers())
if (!Result.count(const_cast<BasicBlock*>(HBB)))
return false;
continue;

View File

@ -455,7 +455,7 @@ bool llvm::nonStrictlyPostDominate(const BasicBlock *ThisBlock,
if (PDT->dominates(CurBlock, OtherBlock))
return true;
for (auto *Pred : predecessors(CurBlock)) {
for (const auto *Pred : predecessors(CurBlock)) {
if (Pred == CommonDominator || Visited.count(Pred))
continue;
WorkList.push_back(Pred);

View File

@ -1166,7 +1166,7 @@ static bool hasHardUserWithinLoop(const Loop *L, const Instruction *I) {
if (Curr->mayHaveSideEffects())
return true;
// Otherwise, add all its users to worklist.
for (auto U : Curr->users()) {
for (const auto *U : Curr->users()) {
auto *UI = cast<Instruction>(U);
if (Visited.insert(UI).second)
WorkList.push_back(UI);

View File

@ -2519,7 +2519,7 @@ Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union,
Instruction *IP) {
// Loop over all checks in this set.
SmallVector<Value *> Checks;
for (auto Pred : Union->getPredicates()) {
for (const auto *Pred : Union->getPredicates()) {
Checks.push_back(expandCodeForPredicate(Pred, IP));
Builder.SetInsertPoint(IP);
}

View File

@ -74,7 +74,7 @@ static void addNonConstUser(ClusterMapType &GVtoClusterMap,
// Adds all GlobalValue users of V to the same cluster as GV.
static void addAllGlobalValueUsers(ClusterMapType &GVtoClusterMap,
const GlobalValue *GV, const Value *V) {
for (auto *U : V->users()) {
for (const auto *U : V->users()) {
SmallVector<const User *, 4> Worklist;
Worklist.push_back(U);
while (!Worklist.empty()) {

View File

@ -9008,7 +9008,7 @@ VPlanPtr LoopVectorizationPlanner::buildVPlanWithVPRecipes(
// Interleave memory: for each Interleave Group we marked earlier as relevant
// for this VPlan, replace the Recipes widening its memory instructions with a
// single VPInterleaveRecipe at its insertion point.
for (auto IG : InterleaveGroups) {
for (const auto *IG : InterleaveGroups) {
auto *Recipe = cast<VPWidenMemoryInstructionRecipe>(
RecipeBuilder.getRecipe(IG->getInsertPos()));
SmallVector<VPValue *, 4> StoredValues;