[CodeGen] Qualify auto variables in for loops (NFC)

This commit is contained in:
Kazu Hirata 2022-07-17 01:33:28 -07:00
parent 6f32e71b54
commit 9e6d1f4b5d
68 changed files with 167 additions and 166 deletions

View File

@ -245,8 +245,8 @@ public:
void AccelTableWriter::emitHashes() const {
uint64_t PrevHash = std::numeric_limits<uint64_t>::max();
unsigned BucketIdx = 0;
for (auto &Bucket : Contents.getBuckets()) {
for (auto &Hash : Bucket) {
for (const auto &Bucket : Contents.getBuckets()) {
for (const auto &Hash : Bucket) {
uint32_t HashValue = Hash->HashValue;
if (SkipIdenticalHashes && PrevHash == HashValue)
continue;
@ -327,7 +327,7 @@ void AppleAccelTableWriter::emitData() const {
const auto &Buckets = Contents.getBuckets();
for (const AccelTableBase::HashList &Bucket : Buckets) {
uint64_t PrevHash = std::numeric_limits<uint64_t>::max();
for (auto &Hash : Bucket) {
for (const auto &Hash : Bucket) {
// Terminate the previous entry if there is no hash collision with the
// current one.
if (PrevHash != std::numeric_limits<uint64_t>::max() &&
@ -667,12 +667,12 @@ void AccelTableBase::print(raw_ostream &OS) const {
}
OS << "Buckets and Hashes: \n";
for (auto &Bucket : Buckets)
for (auto &Hash : Bucket)
for (const auto &Bucket : Buckets)
for (const auto &Hash : Bucket)
Hash->print(OS);
OS << "Data: \n";
for (auto &E : Entries)
for (const auto &E : Entries)
E.second.print(OS);
}

View File

@ -488,7 +488,7 @@ bool AsmPrinter::doInitialization(Module &M) {
GCModuleInfo *MI = getAnalysisIfAvailable<GCModuleInfo>();
assert(MI && "AsmPrinter didn't require GCModuleInfo?");
for (auto &I : *MI)
for (const auto &I : *MI)
if (GCMetadataPrinter *MP = GetOrCreateGCPrinter(*I))
MP->beginAssembly(M, *MI, *this);
@ -1731,7 +1731,7 @@ static unsigned getNumGlobalVariableUses(const Constant *C) {
return 1;
unsigned NumUses = 0;
for (auto *CU : C->users())
for (const auto *CU : C->users())
NumUses += getNumGlobalVariableUses(dyn_cast<Constant>(CU));
return NumUses;
@ -1754,7 +1754,7 @@ static bool isGOTEquivalentCandidate(const GlobalVariable *GV,
// To be a got equivalent, at least one of its users need to be a constant
// expression used by another global variable.
for (auto *U : GV->users())
for (const auto *U : GV->users())
NumGOTEquivUsers += getNumGlobalVariableUses(dyn_cast<Constant>(U));
return NumGOTEquivUsers > 0;
@ -1797,7 +1797,7 @@ void AsmPrinter::emitGlobalGOTEquivs() {
}
GlobalGOTEquivs.clear();
for (auto *GV : FailedCandidates)
for (const auto *GV : FailedCandidates)
emitGlobalVariable(GV);
}
@ -3717,7 +3717,7 @@ void AsmPrinter::emitStackMaps(StackMaps &SM) {
// No GC strategy, use the default format.
NeedsDefault = true;
else
for (auto &I : *MI) {
for (const auto &I : *MI) {
if (GCMetadataPrinter *MP = GetOrCreateGCPrinter(*I))
if (MP->emitStackMaps(SM, *this))
continue;

View File

@ -309,7 +309,7 @@ void AsmPrinter::emitDwarfDIE(const DIE &Die) const {
// Emit the DIE children if any.
if (Die.hasChildren()) {
for (auto &Child : Die.children())
for (const auto &Child : Die.children())
emitDwarfDIE(Child);
OutStreamer->AddComment("End Of Children Mark");

View File

@ -374,7 +374,7 @@ void DIEHash::computeHash(const DIE &Die) {
addAttributes(Die);
// Then hash each of the children of the DIE.
for (auto &C : Die.children()) {
for (const auto &C : Die.children()) {
// 7.27 Step 7
// If C is a nested type entry or a member function entry, ...
if (isType(C.getTag()) || (C.getTag() == dwarf::DW_TAG_subprogram && isType(C.getParent()->getTag()))) {

View File

@ -340,11 +340,11 @@ static void clobberRegEntries(InlinedEntity Var, unsigned RegNo,
if (Entry.getInstr()->hasDebugOperandForReg(RegNo)) {
IndicesToErase.push_back(Index);
Entry.endEntry(ClobberIndex);
for (auto &MO : Entry.getInstr()->debug_operands())
for (const auto &MO : Entry.getInstr()->debug_operands())
if (MO.isReg() && MO.getReg() && MO.getReg() != RegNo)
MaybeRemovedRegisters.insert(MO.getReg());
} else {
for (auto &MO : Entry.getInstr()->debug_operands())
for (const auto &MO : Entry.getInstr()->debug_operands())
if (MO.isReg() && MO.getReg())
KeepRegisters.insert(MO.getReg());
}

View File

@ -304,7 +304,7 @@ void DebugHandlerBase::beginFunction(const MachineFunction *MF) {
LabelsBeforeInsn[Entries.front().getInstr()] = Asm->getFunctionBegin();
if (Entries.front().getInstr()->getDebugExpression()->isFragment()) {
// Mark all non-overlapping initial fragments.
for (auto I = Entries.begin(); I != Entries.end(); ++I) {
for (const auto *I = Entries.begin(); I != Entries.end(); ++I) {
if (!I->isDbgValue())
continue;
const DIExpression *Fragment = I->getInstr()->getDebugExpression();

View File

@ -848,7 +848,7 @@ DIE *DwarfCompileUnit::constructVariableDIEImpl(const DbgVariable &DV,
Optional<unsigned> NVPTXAddressSpace;
DIELoc *Loc = new (DIEValueAllocator) DIELoc;
DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc);
for (auto &Fragment : DV.getFrameIndexExprs()) {
for (const auto &Fragment : DV.getFrameIndexExprs()) {
Register FrameReg;
const DIExpression *Expr = Fragment.Expr;
const TargetFrameLowering *TFI = Asm->MF->getSubtarget().getFrameLowering();
@ -970,7 +970,7 @@ sortLocalVars(SmallVectorImpl<DbgVariable *> &Input) {
SmallDenseSet<DbgVariable *, 8> Visiting;
// Initialize the worklist and the DIVariable lookup table.
for (auto Var : reverse(Input)) {
for (auto *Var : reverse(Input)) {
DbgVar.insert({Var->getVariable(), Var});
WorkList.push_back({Var, 0});
}
@ -1005,7 +1005,7 @@ sortLocalVars(SmallVectorImpl<DbgVariable *> &Input) {
// Push dependencies and this node onto the worklist, so that this node is
// visited again after all of its dependencies are handled.
WorkList.push_back({Var, 1});
for (auto *Dependency : dependencies(Var)) {
for (const auto *Dependency : dependencies(Var)) {
// Don't add dependency if it is in a different lexical scope or a global.
if (const auto *Dep = dyn_cast<const DILocalVariable>(Dependency))
if (DbgVariable *Var = DbgVar.lookup(Dep))

View File

@ -819,7 +819,7 @@ static void collectCallSiteParameters(const MachineInstr *CallMI,
}
// Do not emit CSInfo for undef forwarding registers.
for (auto &MO : CallMI->uses())
for (const auto &MO : CallMI->uses())
if (MO.isReg() && MO.isUndef())
ForwardedRegWorklist.erase(MO.getReg());
@ -2235,7 +2235,7 @@ void DwarfDebug::endFunctionImpl(const MachineFunction *MF) {
#endif
// Construct abstract scopes.
for (LexicalScope *AScope : LScopes.getAbstractScopesList()) {
auto *SP = cast<DISubprogram>(AScope->getScopeNode());
const auto *SP = cast<DISubprogram>(AScope->getScopeNode());
for (const DINode *DN : SP->getRetainedNodes()) {
if (!Processed.insert(InlinedEntity(DN, nullptr)).second)
continue;
@ -2527,7 +2527,7 @@ void DwarfDebug::emitDebugLocEntry(ByteStreamer &Streamer,
using Encoding = DWARFExpression::Operation::Encoding;
uint64_t Offset = 0;
for (auto &Op : Expr) {
for (const auto &Op : Expr) {
assert(Op.getCode() != dwarf::DW_OP_const_type &&
"3 operand ops not yet supported");
Streamer.emitInt8(Op.getCode(), Comment != End ? *(Comment++) : "");

View File

@ -187,7 +187,7 @@ bool AtomicExpand::runOnFunction(Function &F) {
AtomicInsts.push_back(&I);
bool MadeChange = false;
for (auto I : AtomicInsts) {
for (auto *I : AtomicInsts) {
auto LI = dyn_cast<LoadInst>(I);
auto SI = dyn_cast<StoreInst>(I);
auto RMWI = dyn_cast<AtomicRMWInst>(I);
@ -1371,7 +1371,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
// Look for any users of the cmpxchg that are just comparing the loaded value
// against the desired one, and replace them with the CFG-derived version.
SmallVector<ExtractValueInst *, 2> PrunedInsts;
for (auto User : CI->users()) {
for (auto *User : CI->users()) {
ExtractValueInst *EV = dyn_cast<ExtractValueInst>(User);
if (!EV)
continue;
@ -1388,7 +1388,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
}
// We can remove the instructions now we're no longer iterating through them.
for (auto EV : PrunedInsts)
for (auto *EV : PrunedInsts)
EV->eraseFromParent();
if (!CI->use_empty()) {

View File

@ -297,7 +297,7 @@ static bool hasInstrProfHashMismatch(MachineFunction &MF) {
auto *Existing = MF.getFunction().getMetadata(LLVMContext::MD_annotation);
if (Existing) {
MDTuple *Tuple = cast<MDTuple>(Existing);
for (auto &N : Tuple->operands())
for (const auto &N : Tuple->operands())
if (cast<MDString>(N.get())->getString() == MetadataName)
return true;
}

View File

@ -279,7 +279,7 @@ float VirtRegAuxInfo::weightCalcHelper(LiveInterval &LI, SlotIndex *Start,
MRI.clearSimpleHint(LI.reg());
std::set<Register> HintedRegs;
for (auto &Hint : CopyHints) {
for (const auto &Hint : CopyHints) {
if (!HintedRegs.insert(Hint.Reg).second ||
(TargetHint.first != 0 && Hint.Reg == TargetHint.second))
// Don't add the same reg twice or the target-type hint again.

View File

@ -730,7 +730,7 @@ bool CodeGenPrepare::eliminateFallThrough(Function &F) {
// (Repeatedly) merging blocks into their predecessors can create redundant
// debug intrinsics.
for (auto &Pred : Preds)
for (const auto &Pred : Preds)
if (auto *BB = cast_or_null<BasicBlock>(Pred))
RemoveRedundantDbgInstrs(BB);
@ -3684,7 +3684,7 @@ private:
// Phi we added (subject to match) and both of them is in the same basic
// block then we can match our pair if values match. So we state that
// these values match and add it to work list to verify that.
for (auto B : Item.first->blocks()) {
for (auto *B : Item.first->blocks()) {
Value *FirstValue = Item.first->getIncomingValueForBlock(B);
Value *SecondValue = Item.second->getIncomingValueForBlock(B);
if (FirstValue == SecondValue)

View File

@ -239,7 +239,7 @@ void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
});
if (ResourceAvail && shouldAddToPacket(MI)) {
// Dependency check for MI with instructions in CurrentPacketMIs.
for (auto MJ : CurrentPacketMIs) {
for (auto *MJ : CurrentPacketMIs) {
SUnit *SUJ = MIToSUnit[MJ];
assert(SUJ && "Missing SUnit Info!");

View File

@ -808,7 +808,7 @@ void updateDomTree(MachineDominatorTree *DomTree, const SSAIfConv &IfConv,
// TBB and FBB should not dominate any blocks.
// Tail children should be transferred to Head.
MachineDomTreeNode *HeadNode = DomTree->getNode(IfConv.Head);
for (auto B : Removed) {
for (auto *B : Removed) {
MachineDomTreeNode *Node = DomTree->getNode(B);
assert(Node != HeadNode && "Cannot erase the head node");
while (Node->getNumChildren()) {
@ -826,7 +826,7 @@ void updateLoops(MachineLoopInfo *Loops,
return;
// If-conversion doesn't change loop structure, and it doesn't mess with back
// edges, so updating LoopInfo is simply removing the dead blocks.
for (auto B : Removed)
for (auto *B : Removed)
Loops->removeBlock(B);
}
} // namespace
@ -1065,7 +1065,7 @@ bool EarlyIfConverter::runOnMachineFunction(MachineFunction &MF) {
// if-conversion in a single pass. The tryConvertIf() function may erase
// blocks, but only blocks dominated by the head block. This makes it safe to
// update the dominator tree while the post-order iterator is still active.
for (auto DomNode : post_order(DomTree))
for (auto *DomNode : post_order(DomTree))
if (tryConvertIf(DomNode->getBlock()))
Changed = true;
@ -1198,7 +1198,7 @@ bool EarlyIfPredicator::runOnMachineFunction(MachineFunction &MF) {
// if-conversion in a single pass. The tryConvertIf() function may erase
// blocks, but only blocks dominated by the head block. This makes it safe to
// update the dominator tree while the post-order iterator is still active.
for (auto DomNode : post_order(DomTree))
for (auto *DomNode : post_order(DomTree))
if (tryConvertIf(DomNode->getBlock()))
Changed = true;

View File

@ -85,7 +85,7 @@ void FaultMaps::emitFunctionInfo(const MCSymbol *FnLabel,
OS.emitInt32(0); // Reserved
for (auto &Fault : FFI) {
for (const auto &Fault : FFI) {
LLVM_DEBUG(dbgs() << WFMP << " fault type: "
<< faultTypeToString(Fault.Kind) << "\n");
OS.emitInt32(Fault.Kind);

View File

@ -319,7 +319,7 @@ const GISelInstProfileBuilder &
GISelInstProfileBuilder::addNodeID(const MachineInstr *MI) const {
addNodeIDMBB(MI->getParent());
addNodeIDOpcode(MI->getOpcode());
for (auto &Op : MI->operands())
for (const auto &Op : MI->operands())
addNodeIDMachineOperand(Op);
addNodeIDFlag(MI->getFlags());
return *this;

View File

@ -116,7 +116,7 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
// we'll pass to the assigner function.
unsigned i = 0;
unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
for (auto &Arg : CB.args()) {
for (const auto &Arg : CB.args()) {
ArgInfo OrigArg{ArgRegs[i], *Arg.get(), i, getAttributesForArgIdx(CB, i),
i < NumFixedArgs};
setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
@ -960,7 +960,7 @@ bool CallLowering::parametersInCSRMatch(
const SmallVectorImpl<CCValAssign> &OutLocs,
const SmallVectorImpl<ArgInfo> &OutArgs) const {
for (unsigned i = 0; i < OutLocs.size(); ++i) {
auto &ArgLoc = OutLocs[i];
const auto &ArgLoc = OutLocs[i];
// If it's not a register, it's fine.
if (!ArgLoc.isRegLoc())
continue;

View File

@ -684,7 +684,7 @@ bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
BranchProbabilityInfo *BPI = FuncInfo.BPI;
CaseClusterVector Clusters;
Clusters.reserve(SI.getNumCases());
for (auto &I : SI.cases()) {
for (const auto &I : SI.cases()) {
MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
assert(Succ && "Could not find successor mbb in mapping");
const ConstantInt *CaseVal = I.getCaseValue();
@ -1400,7 +1400,7 @@ bool IRTranslator::translateInsertValue(const User &U,
ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
auto InsertedIt = InsertedRegs.begin();
auto *InsertedIt = InsertedRegs.begin();
for (unsigned i = 0; i < DstRegs.size(); ++i) {
if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
@ -1785,7 +1785,7 @@ bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
// Yes. Let's translate it.
SmallVector<llvm::SrcOp, 4> VRegs;
for (auto &Arg : CI.args())
for (const auto &Arg : CI.args())
VRegs.push_back(getOrCreateVReg(*Arg));
MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
@ -2305,7 +2305,7 @@ bool IRTranslator::translateCallBase(const CallBase &CB,
SmallVector<ArrayRef<Register>, 8> Args;
Register SwiftInVReg = 0;
Register SwiftErrorVReg = 0;
for (auto &Arg : CB.args()) {
for (const auto &Arg : CB.args()) {
if (CLI->supportSwiftError() && isSwiftError(Arg)) {
assert(SwiftInVReg == 0 && "Expected only one swift error argument");
LLT Ty = getLLTForType(*Arg->getType(), *DL);
@ -2394,7 +2394,7 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
if (isa<FPMathOperator>(CI))
MIB->copyIRFlags(CI);
for (auto &Arg : enumerate(CI.args())) {
for (const auto &Arg : enumerate(CI.args())) {
// If this is required to be an immediate, don't materialize it in a
// register.
if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
@ -2947,7 +2947,7 @@ void IRTranslator::finishPendingPhis() {
for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
auto IRPred = PI->getIncomingBlock(i);
ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
for (auto *Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
continue;
SeenPreds.insert(Pred);

View File

@ -298,7 +298,7 @@ bool LoadStoreOpt::mergeStores(SmallVectorImpl<GStore *> &StoresToMerge) {
const auto &LegalSizes = LegalStoreSizes[AS];
#ifndef NDEBUG
for (auto StoreMI : StoresToMerge)
for (auto *StoreMI : StoresToMerge)
assert(MRI->getType(StoreMI->getValueReg()) == OrigTy);
#endif
@ -366,7 +366,7 @@ bool LoadStoreOpt::doSingleStoreMerge(SmallVectorImpl<GStore *> &Stores) {
// directly. Otherwise, we need to generate some instructions to merge the
// existing values together into a wider type.
SmallVector<APInt, 8> ConstantVals;
for (auto Store : Stores) {
for (auto *Store : Stores) {
auto MaybeCst =
getIConstantVRegValWithLookThrough(Store->getValueReg(), *MRI);
if (!MaybeCst) {
@ -415,7 +415,7 @@ bool LoadStoreOpt::doSingleStoreMerge(SmallVectorImpl<GStore *> &Stores) {
return R;
});
for (auto MI : Stores)
for (auto *MI : Stores)
InstsToErase.insert(MI);
return true;
}

View File

@ -646,7 +646,7 @@ MachineIRBuilder::buildBuildVectorConstant(const DstOp &Res,
SmallVector<SrcOp> TmpVec;
TmpVec.reserve(Ops.size());
LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
for (auto &Op : Ops)
for (const auto &Op : Ops)
TmpVec.push_back(buildConstant(EltTy, Op));
return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
}

View File

@ -228,7 +228,7 @@ bool llvm::isTriviallyDead(const MachineInstr &MI,
return false;
// Instructions without side-effects are dead iff they only define dead vregs.
for (auto &MO : MI.operands()) {
for (const auto &MO : MI.operands()) {
if (!MO.isReg() || !MO.isDef())
continue;

View File

@ -332,7 +332,7 @@ void HardwareLoop::Create() {
// Run through the basic blocks of the loop and see if any of them have dead
// PHIs that can be removed.
for (auto I : L->blocks())
for (auto *I : L->blocks())
DeleteDeadPHIs(I);
}

View File

@ -758,7 +758,7 @@ void ImplicitNullChecks::rewriteNullChecks(
ArrayRef<ImplicitNullChecks::NullCheck> NullCheckList) {
DebugLoc DL;
for (auto &NC : NullCheckList) {
for (const auto &NC : NullCheckList) {
// Remove the conditional branch dependent on the null check.
unsigned BranchesRemoved = TII->removeBranch(*NC.getCheckBlock());
(void)BranchesRemoved;

View File

@ -1298,7 +1298,7 @@ void HoistSpillHelper::rmRedundantSpills(
// For each spill saw, check SpillBBToSpill[] and see if its BB already has
// another spill inside. If a BB contains more than one spill, only keep the
// earlier spill with smaller SlotIndex.
for (const auto CurrentSpill : Spills) {
for (auto *const CurrentSpill : Spills) {
MachineBasicBlock *Block = CurrentSpill->getParent();
MachineDomTreeNode *Node = MDT.getBase().getNode(Block);
MachineInstr *PrevSpill = SpillBBToSpill[Node];
@ -1313,7 +1313,7 @@ void HoistSpillHelper::rmRedundantSpills(
SpillBBToSpill[MDT.getBase().getNode(Block)] = CurrentSpill;
}
}
for (const auto SpillToRm : SpillsToRm)
for (auto *const SpillToRm : SpillsToRm)
Spills.erase(SpillToRm);
}
@ -1347,7 +1347,7 @@ void HoistSpillHelper::getVisitOrders(
// the path starting from the first node with non-redundant spill to the Root
// node will be added to the WorkSet, which will contain all the possible
// locations where spills may be hoisted to after the loop below is done.
for (const auto Spill : Spills) {
for (auto *const Spill : Spills) {
MachineBasicBlock *Block = Spill->getParent();
MachineDomTreeNode *Node = MDT[Block];
MachineInstr *SpillToRm = nullptr;
@ -1492,7 +1492,7 @@ void HoistSpillHelper::runHoistSpills(
: BranchProbability(1, 1);
if (SubTreeCost > MBFI.getBlockFreq(Block) * MarginProb) {
// Hoist: Move spills to current Block.
for (const auto SpillBB : SpillsInSubTree) {
for (auto *const SpillBB : SpillsInSubTree) {
// When SpillBB is a BB contains original spill, insert the spill
// to SpillsToRm.
if (SpillsToKeep.find(SpillBB) != SpillsToKeep.end() &&
@ -1609,7 +1609,7 @@ void HoistSpillHelper::hoistAllSpills() {
// Remove redundant spills or change them to dead instructions.
NumSpills -= SpillsToRm.size();
for (auto const RMEnt : SpillsToRm) {
for (auto *const RMEnt : SpillsToRm) {
RMEnt->setDesc(TII.get(TargetOpcode::KILL));
for (unsigned i = RMEnt->getNumOperands(); i; --i) {
MachineOperand &MO = RMEnt->getOperand(i - 1);

View File

@ -541,7 +541,7 @@ bool InterleavedAccess::runOnFunction(Function &F) {
Changed |= lowerInterleavedStore(SI, DeadInsts);
}
for (auto I : DeadInsts)
for (auto *I : DeadInsts)
I->eraseFromParent();
return Changed;

View File

@ -528,8 +528,8 @@ public:
if (B.size() != o.B.size())
return false;
auto ob = o.B.begin();
for (auto &b : B) {
auto *ob = o.B.begin();
for (const auto &b : B) {
if (b != *ob)
return false;
ob++;
@ -1154,7 +1154,7 @@ bool InterleavedLoadCombineImpl::combine(std::list<VectorInfo> &InterleavedLoad,
// Test if all participating instruction will be dead after the
// transformation. If intermediate results are used, no performance gain can
// be expected. Also sum the cost of the Instructions beeing left dead.
for (auto &I : Is) {
for (const auto &I : Is) {
// Compute the old cost
InstructionCost += TTI.getInstructionCost(I, CostKind);
@ -1182,7 +1182,7 @@ bool InterleavedLoadCombineImpl::combine(std::list<VectorInfo> &InterleavedLoad,
// that the corresponding defining access dominates first LI. This guarantees
// that there are no aliasing stores in between the loads.
auto FMA = MSSA.getMemoryAccess(First);
for (auto LI : LIs) {
for (auto *LI : LIs) {
auto MADef = MSSA.getMemoryAccess(LI)->getDefiningAccess();
if (!MSSA.dominates(MADef, FMA))
return false;

View File

@ -284,7 +284,7 @@ public:
// Initialized the preferred-location map with illegal locations, to be
// filled in later.
for (auto &VLoc : VLocs)
for (const auto &VLoc : VLocs)
if (VLoc.second.Kind == DbgValue::Def)
ValueToLoc.insert({VLoc.second.ID, LocIdx::MakeIllegalLoc()});
@ -507,7 +507,7 @@ public:
// date. Wipe old tracking data for the location if it's been clobbered in
// the meantime.
if (MTracker->readMLoc(NewLoc) != VarLocs[NewLoc.asU64()]) {
for (auto &P : ActiveMLocs[NewLoc]) {
for (const auto &P : ActiveMLocs[NewLoc]) {
ActiveVLocs.erase(P);
}
ActiveMLocs[NewLoc.asU64()].clear();
@ -560,7 +560,7 @@ public:
// explicitly undef, then stop here.
if (!NewLoc && !MakeUndef) {
// Try and recover a few more locations with entry values.
for (auto &Var : ActiveMLocIt->second) {
for (const auto &Var : ActiveMLocIt->second) {
auto &Prop = ActiveVLocs.find(Var)->second.Properties;
recoverAsEntryValue(Var, Prop, OldValue);
}
@ -570,7 +570,7 @@ public:
// Examine all the variables based on this location.
DenseSet<DebugVariable> NewMLocs;
for (auto &Var : ActiveMLocIt->second) {
for (const auto &Var : ActiveMLocIt->second) {
auto ActiveVLocIt = ActiveVLocs.find(Var);
// Re-state the variable location: if there's no replacement then NewLoc
// is None and a $noreg DBG_VALUE will be created. Otherwise, a DBG_VALUE
@ -623,7 +623,7 @@ public:
VarLocs[Dst.asU64()] = VarLocs[Src.asU64()];
// For each variable based on Src; create a location at Dst.
for (auto &Var : MovingVars) {
for (const auto &Var : MovingVars) {
auto ActiveVLocIt = ActiveVLocs.find(Var);
assert(ActiveVLocIt != ActiveVLocs.end());
ActiveVLocIt->second.Loc = Dst;
@ -1224,7 +1224,7 @@ bool InstrRefBasedLDV::transferDebugInstrRef(MachineInstr &MI,
// FIXME: no index for this?
Register Reg = MTracker->LocIdxToLocID[L];
const TargetRegisterClass *TRC = nullptr;
for (auto *TRCI : TRI->regclasses())
for (const auto *TRCI : TRI->regclasses())
if (TRCI->contains(Reg))
TRC = TRCI;
assert(TRC && "Couldn't find target register class?");
@ -1454,7 +1454,7 @@ void InstrRefBasedLDV::transferRegisterDef(MachineInstr &MI) {
for (uint32_t DeadReg : DeadRegs)
MTracker->defReg(DeadReg, CurBB, CurInst);
for (auto *MO : RegMaskPtrs)
for (const auto *MO : RegMaskPtrs)
MTracker->writeRegMask(MO, CurBB, CurInst);
// If this instruction writes to a spill slot, def that slot.
@ -1493,7 +1493,7 @@ void InstrRefBasedLDV::transferRegisterDef(MachineInstr &MI) {
if (IgnoreSPAlias(Reg))
continue;
for (auto *MO : RegMaskPtrs)
for (const auto *MO : RegMaskPtrs)
if (MO->clobbersPhysReg(Reg))
TTracker->clobberMloc(L.Idx, MI.getIterator(), false);
}
@ -1822,7 +1822,7 @@ void InstrRefBasedLDV::accumulateFragmentMap(MachineInstr &MI) {
// Otherwise, examine all other seen fragments for this variable, with "this"
// fragment being a previously unseen fragment. Record any pair of
// overlapping fragments.
for (auto &ASeenFragment : AllSeenFragments) {
for (const auto &ASeenFragment : AllSeenFragments) {
// Does this previously seen fragment overlap?
if (DIExpression::fragmentsOverlap(ThisFragment, ASeenFragment)) {
// Yes: Mark the current fragment as being overlapped.
@ -1993,7 +1993,7 @@ bool InstrRefBasedLDV::mlocJoin(
// redundant PHI that we can eliminate.
SmallVector<const MachineBasicBlock *, 8> BlockOrders;
for (auto Pred : MBB.predecessors())
for (auto *Pred : MBB.predecessors())
BlockOrders.push_back(Pred);
// Visit predecessors in RPOT order.
@ -2313,7 +2313,7 @@ void InstrRefBasedLDV::buildMLocValueMap(
// All successors should be visited: put any back-edges on the pending
// list for the next pass-through, and any other successors to be
// visited this pass, if they're not going to be already.
for (auto s : MBB->successors()) {
for (auto *s : MBB->successors()) {
// Does branching to this successor represent a back-edge?
if (BBToOrder[s] > BBToOrder[MBB]) {
// No: visit it during this dataflow iteration.
@ -2367,7 +2367,7 @@ Optional<ValueIDNum> InstrRefBasedLDV::pickVPHILoc(
if (BlockOrders.empty())
return None;
for (auto p : BlockOrders) {
for (const auto *p : BlockOrders) {
unsigned ThisBBNum = p->getNumber();
auto OutValIt = LiveOuts.find(p);
if (OutValIt == LiveOuts.end())
@ -2422,7 +2422,7 @@ Optional<ValueIDNum> InstrRefBasedLDV::pickVPHILoc(
// Check that all properties are the same. We can't pick a location if they're
// not.
const DbgValueProperties *Properties0 = Properties[0];
for (auto *Prop : Properties)
for (const auto *Prop : Properties)
if (*Prop != *Properties0)
return None;
@ -2472,7 +2472,7 @@ bool InstrRefBasedLDV::vlocJoin(
SmallVector<InValueT, 8> Values;
bool Bail = false;
int BackEdgesStart = 0;
for (auto p : BlockOrders) {
for (auto *p : BlockOrders) {
// If the predecessor isn't in scope / to be explored, we'll never be
// able to join any locations.
if (!BlocksToExplore.contains(p)) {
@ -2577,7 +2577,7 @@ void InstrRefBasedLDV::getBlocksForScope(
// instructions in scope at all. To accurately replicate VarLoc
// LiveDebugValues, this means exploring all artificial successors too.
// Perform a depth-first-search to enumerate those blocks.
for (auto *MBB : BlocksToExplore) {
for (const auto *MBB : BlocksToExplore) {
// Depth-first-search state: each node is a block and which successor
// we're currently exploring.
SmallVector<std::pair<const MachineBasicBlock *,
@ -2662,7 +2662,7 @@ void InstrRefBasedLDV::buildVLocValueMap(
MutBlocksToExplore.insert(const_cast<MachineBasicBlock *>(MBB));
// Picks out relevants blocks RPO order and sort them.
for (auto *MBB : BlocksToExplore)
for (const auto *MBB : BlocksToExplore)
BlockOrders.push_back(const_cast<MachineBasicBlock *>(MBB));
llvm::sort(BlockOrders, Cmp);
@ -2696,7 +2696,7 @@ void InstrRefBasedLDV::buildVLocValueMap(
// between blocks. This keeps the locality of working on one lexical scope at
// at time, but avoids re-processing variable values because some other
// variable has been assigned.
for (auto &Var : VarsWeCareAbout) {
for (const auto &Var : VarsWeCareAbout) {
// Re-initialize live-ins and live-outs, to clear the remains of previous
// variables live-ins / live-outs.
for (unsigned int I = 0; I < NumBlocks; ++I) {
@ -2823,7 +2823,7 @@ void InstrRefBasedLDV::buildVLocValueMap(
// We should visit all successors. Ensure we'll visit any non-backedge
// successors during this dataflow iteration; book backedge successors
// to be visited next time around.
for (auto s : MBB->successors()) {
for (auto *s : MBB->successors()) {
// Ignore out of scope / not-to-be-explored successors.
if (LiveInIdx.find(s) == LiveInIdx.end())
continue;
@ -2906,7 +2906,7 @@ void InstrRefBasedLDV::placePHIsForSingleVarDefinition(
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void InstrRefBasedLDV::dump_mloc_transfer(
const MLocTransferMap &mloc_transfer) const {
for (auto &P : mloc_transfer) {
for (const auto &P : mloc_transfer) {
std::string foo = MTracker->LocIdxToName(P.first);
std::string bar = MTracker->IDAsString(P.second);
dbgs() << "Loc " << foo << " --> " << bar << "\n";
@ -2993,7 +2993,7 @@ void InstrRefBasedLDV::makeDepthFirstEjectionMap(
if (DILocationIt != ScopeToDILocation.end()) {
getBlocksForScope(DILocationIt->second, BlocksToExplore,
ScopeToAssignBlocks.find(WS)->second);
for (auto *MBB : BlocksToExplore) {
for (const auto *MBB : BlocksToExplore) {
unsigned BBNum = MBB->getNumber();
if (EjectionMap[BBNum] == 0)
EjectionMap[BBNum] = WS->getDFSOut();
@ -3100,7 +3100,7 @@ bool InstrRefBasedLDV::depthFirstVLocAndEmit(
getBlocksForScope(DILocationIt->second, BlocksToExplore,
ScopeToAssignBlocks.find(WS)->second);
for (auto *MBB : BlocksToExplore)
for (const auto *MBB : BlocksToExplore)
if (WS->getDFSOut() == EjectionMap[MBB->getNumber()])
EjectBlock(const_cast<MachineBasicBlock &>(*MBB));

View File

@ -1874,7 +1874,7 @@ void VarLocBasedLDV::accumulateFragmentMap(MachineInstr &MI,
// Otherwise, examine all other seen fragments for this variable, with "this"
// fragment being a previously unseen fragment. Record any pair of
// overlapping fragments.
for (auto &ASeenFragment : AllSeenFragments) {
for (const auto &ASeenFragment : AllSeenFragments) {
// Does this previously seen fragment overlap?
if (DIExpression::fragmentsOverlap(ThisFragment, ASeenFragment)) {
// Yes: Mark the current fragment as being overlapped.
@ -1922,7 +1922,7 @@ bool VarLocBasedLDV::join(
// For all predecessors of this MBB, find the set of VarLocs that
// can be joined.
int NumVisited = 0;
for (auto p : MBB.predecessors()) {
for (auto *p : MBB.predecessors()) {
// Ignore backedges if we have not visited the predecessor yet. As the
// predecessor hasn't yet had locations propagated into it, most locations
// will not yet be valid, so treat them as all being uninitialized and
@ -2246,7 +2246,7 @@ bool VarLocBasedLDV::ExtendRanges(MachineFunction &MF,
if (OLChanged) {
OLChanged = false;
for (auto s : MBB->successors())
for (auto *s : MBB->successors())
if (OnPending.insert(s).second) {
Pending.push(BBToOrder[s]);
}

View File

@ -1891,7 +1891,7 @@ void LDVImpl::emitDebugValues(VirtRegMap *VRM) {
// insert position, insert all instructions at the same SlotIdx. They are
// guaranteed to appear in-sequence in StashedDebugInstrs because we insert
// them in order.
for (auto StashIt = StashedDebugInstrs.begin();
for (auto *StashIt = StashedDebugInstrs.begin();
StashIt != StashedDebugInstrs.end(); ++StashIt) {
SlotIndex Idx = StashIt->Idx;
MachineBasicBlock *MBB = StashIt->MBB;

View File

@ -1417,7 +1417,7 @@ private:
NewIdxDef.getRegSlot(), (NewIdxOut + 1)->end, OldIdxVNI);
OldIdxVNI->def = NewIdxDef;
// Modify subsequent segments to be defined by the moved def OldIdxVNI.
for (auto Idx = NewIdxOut + 2; Idx <= OldIdxOut; ++Idx)
for (auto *Idx = NewIdxOut + 2; Idx <= OldIdxOut; ++Idx)
Idx->valno = OldIdxVNI;
// Aggressively remove all dead flags from the former dead definition.
// Kill/dead flags shouldn't be used while live intervals exist; they

View File

@ -78,7 +78,7 @@ bool LowerEmuTLS::runOnModule(Module &M) {
if (G.isThreadLocal())
TlsVars.append({&G});
}
for (const auto G : TlsVars)
for (const auto *const G : TlsVars)
Changed |= addEmuTlsVar(M, G);
return Changed;
}

View File

@ -129,7 +129,7 @@ static bool rescheduleCanonically(unsigned &PseudoIdempotentInstCount,
// Calculates the distance of MI from the beginning of its parent BB.
auto getInstrIdx = [](const MachineInstr &MI) {
unsigned i = 0;
for (auto &CurMI : *MI.getParent()) {
for (const auto &CurMI : *MI.getParent()) {
if (&CurMI == &MI)
return i;
i++;
@ -416,7 +416,7 @@ bool MIRCanonicalizer::runOnMachineFunction(MachineFunction &MF) {
bool Changed = false;
MachineRegisterInfo &MRI = MF.getRegInfo();
VRegRenamer Renamer(MRI);
for (auto MBB : RPOList)
for (auto *MBB : RPOList)
Changed |= runOnBasicBlock(MBB, BBNum++, Renamer);
return Changed;

View File

@ -3383,7 +3383,7 @@ static void initSlots2BasicBlocks(
DenseMap<unsigned, const BasicBlock *> &Slots2BasicBlocks) {
ModuleSlotTracker MST(F.getParent(), /*ShouldInitializeAllMetadata=*/false);
MST.incorporateFunction(F);
for (auto &BB : F) {
for (const auto &BB : F) {
if (BB.hasName())
continue;
int Slot = MST.getLocalSlot(&BB);

View File

@ -437,7 +437,7 @@ void MIRParserImpl::setupDebugValueTracking(
MF.setDebugInstrNumberingCount(MaxInstrNum);
// Load any substitutions.
for (auto &Sub : YamlMF.DebugValueSubstitutions) {
for (const auto &Sub : YamlMF.DebugValueSubstitutions) {
MF.makeDebugValueSubstitution({Sub.SrcInst, Sub.SrcOp},
{Sub.DstInst, Sub.DstOp}, Sub.Subreg);
}
@ -975,7 +975,7 @@ bool MIRParserImpl::parseMachineMetadata(PerFunctionMIParsingState &PFS,
bool MIRParserImpl::parseMachineMetadataNodes(
PerFunctionMIParsingState &PFS, MachineFunction &MF,
const yaml::MachineFunction &YMF) {
for (auto &MDS : YMF.MachineMetadataNodes) {
for (const auto &MDS : YMF.MachineMetadataNodes) {
if (parseMachineMetadata(PFS, MDS))
return true;
}

View File

@ -1436,7 +1436,7 @@ MachineBasicBlock::getSuccProbability(const_succ_iterator Succ) const {
// ditribute the complemental of the sum to each unknown probability.
unsigned KnownProbNum = 0;
auto Sum = BranchProbability::getZero();
for (auto &P : Probs) {
for (const auto &P : Probs) {
if (!P.isUnknown()) {
Sum += P;
KnownProbNum++;

View File

@ -965,7 +965,7 @@ bool MachineBlockPlacement::isTrellis(
for (MachineBasicBlock *Succ : ViableSuccs) {
int PredCount = 0;
for (auto SuccPred : Succ->predecessors()) {
for (auto *SuccPred : Succ->predecessors()) {
// Allow triangle successors, but don't count them.
if (Successors.count(SuccPred)) {
// Make sure that it is actually a triangle.
@ -1063,7 +1063,7 @@ MachineBlockPlacement::getBestTrellisSuccessor(
// Collect the edge frequencies of all edges that form the trellis.
SmallVector<WeightedEdge, 8> Edges[2];
int SuccIndex = 0;
for (auto Succ : ViableSuccs) {
for (auto *Succ : ViableSuccs) {
for (MachineBasicBlock *SuccPred : Succ->predecessors()) {
// Skip any placed predecessors that are not BB
if (SuccPred != BB)
@ -2451,7 +2451,7 @@ void MachineBlockPlacement::rotateLoopWithProfile(
// as the sum of frequencies of exit edges we collect here, excluding the exit
// edge from the tail of the loop chain.
SmallVector<std::pair<MachineBasicBlock *, BlockFrequency>, 4> ExitsWithFreq;
for (auto BB : LoopChain) {
for (auto *BB : LoopChain) {
auto LargestExitEdgeProb = BranchProbability::getZero();
for (auto *Succ : BB->successors()) {
BlockChain *SuccChain = BlockToChain[Succ];
@ -2561,7 +2561,7 @@ MachineBlockPlacement::collectLoopBlockSet(const MachineLoop &L) {
// profile data is available.
if (F->getFunction().hasProfileData() || ForceLoopColdBlock) {
BlockFrequency LoopFreq(0);
for (auto LoopPred : L.getHeader()->predecessors())
for (auto *LoopPred : L.getHeader()->predecessors())
if (!L.contains(LoopPred))
LoopFreq += MBFI->getBlockFreq(LoopPred) *
MBPI->getEdgeProbability(LoopPred, L.getHeader());

View File

@ -344,7 +344,7 @@ std::pair<unsigned, unsigned> MachineCombiner::getLatenciesForInstrSequences(
NewRootLatency += getLatency(&MI, NewRoot, BlockTrace);
unsigned RootLatency = 0;
for (auto I : DelInstrs)
for (auto *I : DelInstrs)
RootLatency += TSchedModel.computeInstrLatency(I);
return {NewRootLatency, RootLatency};
@ -527,7 +527,7 @@ static void insertDeleteInstructions(MachineBasicBlock *MBB, MachineInstr &MI,
for (auto *InstrPtr : DelInstrs) {
InstrPtr->eraseFromParent();
// Erase all LiveRegs defined by the removed instruction
for (auto I = RegUnits.begin(); I != RegUnits.end(); ) {
for (auto *I = RegUnits.begin(); I != RegUnits.end();) {
if (I->MI == InstrPtr)
I = RegUnits.erase(I);
else

View File

@ -127,7 +127,7 @@ BitVector MachineFrameInfo::getPristineRegs(const MachineFunction &MF) const {
BV.set(*CSR);
// Saved CSRs are not pristine.
for (auto &I : getCalleeSavedInfo())
for (const auto &I : getCalleeSavedInfo())
for (MCSubRegIterator S(I.getReg(), TRI, true); S.isValid(); ++S)
BV.reset(*S);

View File

@ -911,8 +911,8 @@ static const MachineInstr *getCallInstr(const MachineInstr *MI) {
if (!MI->isBundle())
return MI;
for (auto &BMI : make_range(getBundleStart(MI->getIterator()),
getBundleEnd(MI->getIterator())))
for (const auto &BMI : make_range(getBundleStart(MI->getIterator()),
getBundleEnd(MI->getIterator())))
if (BMI.isCandidateForCallSiteEntry())
return &BMI;

View File

@ -2273,7 +2273,7 @@ using MMOList = SmallVector<const MachineMemOperand *, 2>;
static unsigned getSpillSlotSize(const MMOList &Accesses,
const MachineFrameInfo &MFI) {
unsigned Size = 0;
for (auto A : Accesses)
for (const auto *A : Accesses)
if (MFI.isSpillSlotObjectIndex(
cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
->getFrameIndex()))

View File

@ -219,7 +219,7 @@ bool MachinePipeliner::runOnMachineFunction(MachineFunction &mf) {
TII = MF->getSubtarget().getInstrInfo();
RegClassInfo.runOnMachineFunction(*MF);
for (auto &L : *MLI)
for (const auto &L : *MLI)
scheduleLoop(*L);
return false;
@ -231,7 +231,7 @@ bool MachinePipeliner::runOnMachineFunction(MachineFunction &mf) {
/// the loop.
bool MachinePipeliner::scheduleLoop(MachineLoop &L) {
bool Changed = false;
for (auto &InnerLoop : L)
for (const auto &InnerLoop : L)
Changed |= scheduleLoop(*InnerLoop);
#ifndef NDEBUG
@ -689,7 +689,7 @@ static bool isSuccOrder(SUnit *SUa, SUnit *SUb) {
Worklist.push_back(SUa);
while (!Worklist.empty()) {
const SUnit *SU = Worklist.pop_back_val();
for (auto &SI : SU->Succs) {
for (const auto &SI : SU->Succs) {
SUnit *SuccSU = SI.getSUnit();
if (SI.getKind() == SDep::Order) {
if (Visited.count(SuccSU))
@ -750,7 +750,7 @@ void SwingSchedulerDAG::addLoopCarriedDependences(AliasAnalysis *AA) {
::getUnderlyingObjects(&MI, Objs);
if (Objs.empty())
Objs.push_back(UnknownValue);
for (auto V : Objs) {
for (const auto *V : Objs) {
SmallVector<SUnit *, 4> &SUs = PendingLoads[V];
SUs.push_back(&SU);
}
@ -759,12 +759,12 @@ void SwingSchedulerDAG::addLoopCarriedDependences(AliasAnalysis *AA) {
::getUnderlyingObjects(&MI, Objs);
if (Objs.empty())
Objs.push_back(UnknownValue);
for (auto V : Objs) {
for (const auto *V : Objs) {
MapVector<const Value *, SmallVector<SUnit *, 4>>::iterator I =
PendingLoads.find(V);
if (I == PendingLoads.end())
continue;
for (auto Load : I->second) {
for (auto *Load : I->second) {
if (isSuccOrder(Load, &SU))
continue;
MachineInstr &LdMI = *Load->getInstr();
@ -1407,8 +1407,8 @@ void SwingSchedulerDAG::CopyToPhiMutation::apply(ScheduleDAGInstrs *DAG) {
SwingSchedulerDAG *SDAG = cast<SwingSchedulerDAG>(DAG);
// Add the artificial dependencies if it does not form a cycle.
for (auto I : UseSUs) {
for (auto Src : SrcSUs) {
for (auto *I : UseSUs) {
for (auto *Src : SrcSUs) {
if (!SDAG->Topo.IsReachable(I, Src) && Src != I) {
Src->addPred(SDep(I, SDep::Artificial));
SDAG->Topo.AddPred(Src, I);
@ -1878,7 +1878,7 @@ void SwingSchedulerDAG::computeNodeOrder(NodeSetType &NodeSets) {
Order = TopDown;
LLVM_DEBUG(dbgs() << " Top down (intersect) ");
} else if (NodeSets.size() == 1) {
for (auto &N : Nodes)
for (const auto &N : Nodes)
if (N->Succs.size() == 0)
R.insert(N);
Order = BottomUp;

View File

@ -1698,7 +1698,7 @@ void BaseMemOpClusterMutation::collectMemOpRecords(
<< ", Width: " << Width << "\n");
}
#ifndef NDEBUG
for (auto *Op : BaseOps)
for (const auto *Op : BaseOps)
assert(Op);
#endif
}

View File

@ -446,7 +446,7 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
MadeChange |= ProcessBlock(MBB);
// If we have anything we marked as toSplit, split it now.
for (auto &Pair : ToSplit) {
for (const auto &Pair : ToSplit) {
auto NewSucc = Pair.first->SplitCriticalEdge(Pair.second, *this);
if (NewSucc != nullptr) {
LLVM_DEBUG(dbgs() << " *** Splitting critical edge: "

View File

@ -200,7 +200,7 @@ stable_hash llvm::stableHashValue(const MachineInstr &MI, bool HashVRegs,
stable_hash llvm::stableHashValue(const MachineBasicBlock &MBB) {
SmallVector<stable_hash> HashComponents;
// TODO: Hash more stuff like block alignment and branch probabilities.
for (auto &MI : MBB)
for (const auto &MI : MBB)
HashComponents.push_back(stableHashValue(MI));
return stable_hash_combine_range(HashComponents.begin(),
HashComponents.end());
@ -209,7 +209,7 @@ stable_hash llvm::stableHashValue(const MachineBasicBlock &MBB) {
stable_hash llvm::stableHashValue(const MachineFunction &MF) {
SmallVector<stable_hash> HashComponents;
// TODO: Hash lots more stuff like function alignment and stack objects.
for (auto &MBB : MF)
for (const auto &MBB : MF)
HashComponents.push_back(stableHashValue(MBB));
return stable_hash_combine_range(HashComponents.begin(),
HashComponents.end());

View File

@ -484,7 +484,7 @@ void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) {
// Run an upwards post-order search for the trace start.
Bounds.Downward = false;
Bounds.Visited.clear();
for (auto I : inverse_post_order_ext(MBB, Bounds)) {
for (const auto *I : inverse_post_order_ext(MBB, Bounds)) {
LLVM_DEBUG(dbgs() << " pred for " << printMBBReference(*I) << ": ");
TraceBlockInfo &TBI = BlockInfo[I->getNumber()];
// All the predecessors have been visited, pick the preferred one.
@ -502,7 +502,7 @@ void MachineTraceMetrics::Ensemble::computeTrace(const MachineBasicBlock *MBB) {
// Run a downwards post-order search for the trace end.
Bounds.Downward = true;
Bounds.Visited.clear();
for (auto I : post_order_ext(MBB, Bounds)) {
for (const auto *I : post_order_ext(MBB, Bounds)) {
LLVM_DEBUG(dbgs() << " succ for " << printMBBReference(*I) << ": ");
TraceBlockInfo &TBI = BlockInfo[I->getNumber()];
// All the successors have been visited, pick the preferred one.

View File

@ -2802,8 +2802,8 @@ void MachineVerifier::visitMachineFunctionAfter() {
// tracking numbers.
if (MF->getFunction().getSubprogram()) {
DenseSet<unsigned> SeenNumbers;
for (auto &MBB : *MF) {
for (auto &MI : MBB) {
for (const auto &MBB : *MF) {
for (const auto &MI : MBB) {
if (auto Num = MI.peekDebugInstrNum()) {
auto Result = SeenNumbers.insert((unsigned)Num);
if (!Result.second)

View File

@ -1395,7 +1395,7 @@ void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM,
// Finally, add the set of defs to each block in the iterated dominance
// frontier.
for (auto DB : IDF) {
for (auto *DB : IDF) {
NodeAddr<BlockNode*> DBA = findBlock(DB);
PhiM[DBA.Id].insert(Defs.begin(), Defs.end());
}
@ -1657,7 +1657,7 @@ void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, NodeAddr<BlockNode*> BA) {
// Recursively process all children in the dominator tree.
MachineDomTreeNode *N = MDT.getNode(BA.Addr->getCode());
for (auto I : *N) {
for (auto *I : *N) {
MachineBasicBlock *SB = I->getBlock();
NodeAddr<BlockNode*> SBA = findBlock(SB);
linkBlockRefs(DefM, SBA);

View File

@ -61,7 +61,7 @@ namespace rdf {
raw_ostream &operator<< (raw_ostream &OS, const Print<Liveness::RefMap> &P) {
OS << '{';
for (auto &I : P.Obj) {
for (const auto &I : P.Obj) {
OS << ' ' << printReg(I.first, &P.G.getTRI()) << '{';
for (auto J = I.second.begin(), E = I.second.end(); J != E; ) {
OS << Print<NodeId>(J->first, P.G) << PrintLaneMaskOpt(J->second);
@ -767,7 +767,7 @@ void Liveness::computeLiveIns() {
}
for (auto I : IDF)
for (auto S : I.second)
for (auto *S : I.second)
IIDF[S].insert(I.first);
computePhiInfo();
@ -926,7 +926,7 @@ void Liveness::resetKills(MachineBasicBlock *B) {
BitVector LiveIn(TRI.getNumRegs()), Live(TRI.getNumRegs());
CopyLiveIns(B, LiveIn);
for (auto SI : B->successors())
for (auto *SI : B->successors())
CopyLiveIns(SI, Live);
for (MachineInstr &MI : llvm::reverse(*B)) {
@ -1003,7 +1003,7 @@ void Liveness::traverse(MachineBasicBlock *B, RefMap &LiveIn) {
// Go up the dominator tree (depth-first).
MachineDomTreeNode *N = MDT.getNode(B);
for (auto I : *N) {
for (auto *I : *N) {
RefMap L;
MachineBasicBlock *SB = I->getBlock();
traverse(SB, L);
@ -1015,7 +1015,7 @@ void Liveness::traverse(MachineBasicBlock *B, RefMap &LiveIn) {
if (Trace) {
dbgs() << "\n-- " << printMBBReference(*B) << ": " << __func__
<< " after recursion into: {";
for (auto I : *N)
for (auto *I : *N)
dbgs() << ' ' << I->getBlock()->getNumber();
dbgs() << " }\n";
dbgs() << " LiveIn: " << Print<RefMap>(LiveIn, DFG) << '\n';
@ -1155,7 +1155,7 @@ void Liveness::traverse(MachineBasicBlock *B, RefMap &LiveIn) {
dbgs() << " Local: " << Print<RegisterAggr>(Local, DFG) << '\n';
}
for (auto C : IIDF[B]) {
for (auto *C : IIDF[B]) {
RegisterAggr &LiveC = LiveMap[C];
for (const std::pair<const RegisterId, NodeRefSet> &S : LiveIn)
for (auto R : S.second)

View File

@ -635,7 +635,7 @@ ReachingDefAnalysis::isSafeToRemove(MachineInstr *MI, InstSet &Visited,
SmallPtrSet<MachineInstr*, 4> Uses;
getGlobalUses(MI, MO.getReg(), Uses);
for (auto I : Uses) {
for (auto *I : Uses) {
if (Ignore.count(I) || ToRemove.count(I))
continue;
if (!isSafeToRemove(I, Visited, ToRemove, Ignore))

View File

@ -166,7 +166,7 @@ void RegAllocBase::allocatePhysRegs() {
void RegAllocBase::postOptimization() {
spiller().postOptimization();
for (auto DeadInst : DeadRemats) {
for (auto *DeadInst : DeadRemats) {
LIS->RemoveMachineInstrFromMaps(*DeadInst);
DeadInst->eraseFromParent();
}

View File

@ -1478,7 +1478,7 @@ void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) {
RegUnitStates.assign(TRI->getNumRegUnits(), regFree);
assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?");
for (auto &LiveReg : MBB.liveouts())
for (const auto &LiveReg : MBB.liveouts())
setPhysRegState(LiveReg.PhysReg, regPreAssigned);
Coalesced.clear();

View File

@ -783,7 +783,7 @@ void RegAllocPBQP::finalizeAlloc(MachineFunction &MF,
void RegAllocPBQP::postOptimization(Spiller &VRegSpiller, LiveIntervals &LIS) {
VRegSpiller.postOptimization();
/// Remove dead defs because of rematerialization.
for (auto DeadInst : DeadRemats) {
for (auto *DeadInst : DeadRemats) {
LIS.RemoveMachineInstrFromMaps(*DeadInst);
DeadInst->eraseFromParent();
}

View File

@ -1148,7 +1148,7 @@ bool RegisterCoalescer::removePartialRedundancy(const CoalescerPair &CP,
// we need to keep the copy of B = A at the end of Pred if we remove
// B = A from MBB.
bool ValB_Changed = false;
for (auto VNI : IntB.valnos) {
for (auto *VNI : IntB.valnos) {
if (VNI->isUnused())
continue;
if (PVal->def < VNI->def && VNI->def < LIS->getMBBEndIdx(Pred)) {

View File

@ -581,7 +581,7 @@ void RegisterOperands::collect(const MachineInstr &MI,
void RegisterOperands::detectDeadDefs(const MachineInstr &MI,
const LiveIntervals &LIS) {
SlotIndex SlotIdx = LIS.getInstructionIndex(MI);
for (auto RI = Defs.begin(); RI != Defs.end(); /*empty*/) {
for (auto *RI = Defs.begin(); RI != Defs.end(); /*empty*/) {
Register Reg = RI->RegUnit;
const LiveRange *LR = getLiveRange(LIS, Reg);
if (LR != nullptr) {
@ -602,7 +602,7 @@ void RegisterOperands::adjustLaneLiveness(const LiveIntervals &LIS,
const MachineRegisterInfo &MRI,
SlotIndex Pos,
MachineInstr *AddFlagsMI) {
for (auto I = Defs.begin(); I != Defs.end(); ) {
for (auto *I = Defs.begin(); I != Defs.end();) {
LaneBitmask LiveAfter = getLiveLanesAt(LIS, MRI, true, I->RegUnit,
Pos.getDeadSlot());
// If the def is all that is live after the instruction, then in case
@ -620,7 +620,7 @@ void RegisterOperands::adjustLaneLiveness(const LiveIntervals &LIS,
++I;
}
}
for (auto I = Uses.begin(); I != Uses.end(); ) {
for (auto *I = Uses.begin(); I != Uses.end();) {
LaneBitmask LiveBefore = getLiveLanesAt(LIS, MRI, true, I->RegUnit,
Pos.getBaseIndex());
LaneBitmask LaneMask = I->LaneMask & LiveBefore;

View File

@ -340,7 +340,7 @@ bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) {
// analysis here, which would look at all uses of an argument inside
// the function being called.
auto B = CS.arg_begin(), E = CS.arg_end();
for (auto A = B; A != E; ++A)
for (const auto *A = B; A != E; ++A)
if (A->get() == V)
if (!(CS.doesNotCapture(A - B) && (CS.doesNotAccessMemory(A - B) ||
CS.doesNotAccessMemory()))) {
@ -498,7 +498,7 @@ Value *SafeStack::moveStaticAllocasToUnsafeStack(
if (ClColoring)
SSC.run();
for (auto *I : SSC.getMarkers()) {
for (const auto *I : SSC.getMarkers()) {
auto *Op = dyn_cast<Instruction>(I->getOperand(1));
const_cast<IntrinsicInst *>(I)->eraseFromParent();
// Remove the operand bitcast, too, if it has no more uses left.

View File

@ -433,7 +433,7 @@ void SelectOptimize::convertProfitableSIGroups(SelectGroups &ProfSIGroups) {
DebugPseudoINS.push_back(&*DIt);
DIt++;
}
for (auto DI : DebugPseudoINS) {
for (auto *DI : DebugPseudoINS) {
DI->moveBefore(&*EndBlock->getFirstInsertionPt());
}

View File

@ -7935,7 +7935,7 @@ SDValue DAGCombiner::mergeTruncStores(StoreSDNode *N) {
int64_t FirstOffset = INT64_MAX;
StoreSDNode *FirstStore = nullptr;
Optional<BaseIndexOffset> Base;
for (auto Store : Stores) {
for (auto *Store : Stores) {
// All the stores store different parts of the CombinedValue. A truncate is
// required to get the partial value.
SDValue Trunc = Store->getValue();

View File

@ -793,7 +793,7 @@ ScheduleDAGLinearize::EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
// Emit any debug values associated with the node.
if (N->getHasDebugValue()) {
MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
for (auto DV : DAG->GetDbgValues(N)) {
for (auto *DV : DAG->GetDbgValues(N)) {
if (!DV->isEmitted())
if (auto *DbgMI = Emitter.EmitDbgValue(DV, VRBaseMap))
BB->insert(InsertPos, DbgMI);

View File

@ -749,7 +749,7 @@ ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter,
// source order number as N.
MachineBasicBlock *BB = Emitter.getBlock();
MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
for (auto DV : DAG->GetDbgValues(N)) {
for (auto *DV : DAG->GetDbgValues(N)) {
if (DV->isEmitted())
continue;
unsigned DVOrder = DV->getOrder();

View File

@ -602,7 +602,7 @@ static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
static void AddNodeIDOperands(FoldingSetNodeID &ID,
ArrayRef<SDValue> Ops) {
for (auto& Op : Ops) {
for (const auto &Op : Ops) {
ID.AddPointer(Op.getNode());
ID.AddInteger(Op.getResNo());
}
@ -611,7 +611,7 @@ static void AddNodeIDOperands(FoldingSetNodeID &ID,
/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
static void AddNodeIDOperands(FoldingSetNodeID &ID,
ArrayRef<SDUse> Ops) {
for (auto& Op : Ops) {
for (const auto &Op : Ops) {
ID.AddPointer(Op.getNode());
ID.AddInteger(Op.getResNo());
}
@ -8904,7 +8904,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
}
#ifndef NDEBUG
for (auto &Op : Ops)
for (const auto &Op : Ops)
assert(Op.getOpcode() != ISD::DELETED_NODE &&
"Operand is DELETED_NODE!");
#endif
@ -9018,7 +9018,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
return getNode(Opcode, DL, VTList.VTs[0], Ops, Flags);
#ifndef NDEBUG
for (auto &Op : Ops)
for (const auto &Op : Ops)
assert(Op.getOpcode() != ISD::DELETED_NODE &&
"Operand is DELETED_NODE!");
#endif
@ -9914,7 +9914,7 @@ void SelectionDAG::salvageDebugInfo(SDNode &N) {
return;
SmallVector<SDDbgValue *, 2> ClonedDVs;
for (auto DV : GetDbgValues(&N)) {
for (auto *DV : GetDbgValues(&N)) {
if (DV->isInvalidated())
continue;
switch (N.getOpcode()) {
@ -10268,7 +10268,7 @@ bool SelectionDAG::calculateDivergence(SDNode *N) {
}
if (TLI->isSDNodeSourceOfDivergence(N, FLI, DA))
return true;
for (auto &Op : N->ops()) {
for (const auto &Op : N->ops()) {
if (Op.Val.getValueType() != MVT::Other && Op.getNode()->isDivergent())
return true;
}
@ -10298,7 +10298,7 @@ void SelectionDAG::CreateTopologicalOrder(std::vector<SDNode *> &Order) {
}
for (size_t I = 0; I != Order.size(); ++I) {
SDNode *N = Order[I];
for (auto U : N->uses()) {
for (auto *U : N->uses()) {
unsigned &UnsortedOps = Degree[U];
if (0 == --UnsortedOps)
Order.push_back(U);

View File

@ -1789,7 +1789,7 @@ static void findWasmUnwindDestinations(
UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
UnwindDests.back().first->setIsEHScopeEntry();
break;
} else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
} else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
// Add the catchpad handlers to the possible destinations. We don't
// continue to the unwind destination of the catchswitch for wasm.
for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
@ -1844,7 +1844,7 @@ static void findUnwindDestinations(
UnwindDests.back().first->setIsEHScopeEntry();
UnwindDests.back().first->setIsEHFuncletEntry();
break;
} else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
} else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
// Add the catchpad handlers to the possible destinations.
for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
@ -5772,7 +5772,7 @@ static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
->getCalledFunction()
->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
"expected call_preallocated_setup Value");
for (auto *U : PreallocatedSetup->users()) {
for (const auto *U : PreallocatedSetup->users()) {
auto *UseCall = cast<CallBase>(U);
const Function *Fn = UseCall->getCalledFunction();
if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {

View File

@ -193,7 +193,7 @@ static Optional<int> findPreviousSpillSlot(const Value *Val,
if (const PHINode *Phi = dyn_cast<PHINode>(Val)) {
Optional<int> MergedResult = None;
for (auto &IncomingValue : Phi->incoming_values()) {
for (const auto &IncomingValue : Phi->incoming_values()) {
Optional<int> SpillSlot =
findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth - 1);
if (!SpillSlot)
@ -569,9 +569,10 @@ lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
// We cannot assing them to VRegs.
SmallSet<SDValue, 8> LPadPointers;
if (!UseRegistersForGCPointersInLandingPad)
if (auto *StInvoke = dyn_cast_or_null<InvokeInst>(SI.StatepointInstr)) {
if (const auto *StInvoke =
dyn_cast_or_null<InvokeInst>(SI.StatepointInstr)) {
LandingPadInst *LPI = StInvoke->getLandingPadInst();
for (auto *Relocate : SI.GCRelocates)
for (const auto *Relocate : SI.GCRelocates)
if (Relocate->getOperand(0) == LPI) {
LPadPointers.insert(Builder.getValue(Relocate->getBasePtr()));
LPadPointers.insert(Builder.getValue(Relocate->getDerivedPtr()));
@ -739,7 +740,7 @@ SDValue SelectionDAGBuilder::LowerAsSTATEPOINT(
LLVM_DEBUG(dbgs() << "Lowering statepoint " << *SI.StatepointInstr << "\n");
#ifndef NDEBUG
for (auto *Reloc : SI.GCRelocates)
for (const auto *Reloc : SI.GCRelocates)
if (Reloc->getParent() == SI.StatepointInstr->getParent())
StatepointLowering.scheduleRelocCall(*Reloc);
#endif
@ -1017,7 +1018,7 @@ SDValue SelectionDAGBuilder::LowerAsSTATEPOINT(
static std::pair<const GCResultInst*, const GCResultInst*>
getGCResultLocality(const GCStatepointInst &S) {
std::pair<const GCResultInst *, const GCResultInst*> Res(nullptr, nullptr);
for (auto *U : S.users()) {
for (const auto *U : S.users()) {
auto *GRI = dyn_cast<GCResultInst>(U);
if (!GRI)
continue;

View File

@ -365,7 +365,7 @@ StackMaps::parseRegisterLiveOutMask(const uint32_t *Mask) const {
});
for (auto I = LiveOuts.begin(), E = LiveOuts.end(); I != E; ++I) {
for (auto II = std::next(I); II != E; ++II) {
for (auto *II = std::next(I); II != E; ++II) {
if (I->DwarfRegNum != II->DwarfRegNum) {
// Skip all the now invalid entries.
I = --II;

View File

@ -267,7 +267,7 @@ void SwiftErrorValueTracking::preassignVRegs(
if (auto *CB = dyn_cast<CallBase>(&*It)) {
// A call-site with a swifterror argument is both use and def.
const Value *SwiftErrorAddr = nullptr;
for (auto &Arg : CB->args()) {
for (const auto &Arg : CB->args()) {
if (!Arg->isSwiftError())
continue;
// Use of swifterror.

View File

@ -653,7 +653,7 @@ bool TailDuplicator::shouldTailDuplicate(bool IsSimple,
// demonstrated by test/CodeGen/Hexagon/tail-dup-subreg-abort.ll.
// Disable tail duplication for this case for now, until the problem is
// fixed.
for (auto SB : TailBB.successors()) {
for (auto *SB : TailBB.successors()) {
for (auto &I : *SB) {
if (!I.isPHI())
break;

View File

@ -1145,7 +1145,7 @@ static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
/// specified register class are all legal.
bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI,
const TargetRegisterClass &RC) const {
for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
for (const auto *I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
if (isTypeLegal(*I))
return true;
return false;

View File

@ -446,7 +446,7 @@ void IRPromoter::ExtendSources() {
// Now, insert extending instructions between the sources and their users.
LLVM_DEBUG(dbgs() << "IR Promotion: Promoting sources:\n");
for (auto V : Sources) {
for (auto *V : Sources) {
LLVM_DEBUG(dbgs() << " - " << *V << "\n");
if (auto *I = dyn_cast<Instruction>(V))
InsertZExt(I, I);
@ -524,7 +524,7 @@ void IRPromoter::TruncateSinks() {
// Fix up any stores or returns that use the results of the promoted
// chain.
for (auto I : Sinks) {
for (auto *I : Sinks) {
LLVM_DEBUG(dbgs() << "IR Promotion: For Sink: " << *I << "\n");
// Handle calls separately as we need to iterate over arg operands.
@ -570,7 +570,7 @@ void IRPromoter::Cleanup() {
LLVM_DEBUG(dbgs() << "IR Promotion: Cleanup..\n");
// Some zexts will now have become redundant, along with their trunc
// operands, so remove them
for (auto V : Visited) {
for (auto *V : Visited) {
if (!isa<ZExtInst>(V))
continue;

View File

@ -579,7 +579,7 @@ static inline bool isSingleUnscheduledSucc(SUnit *SU, SUnit *SU2) {
/// pressure, then return 0.
int ConvergingVLIWScheduler::pressureChange(const SUnit *SU, bool isBotUp) {
PressureDiff &PD = DAG->getPressureDiff(SU);
for (auto &P : PD) {
for (const auto &P : PD) {
if (!P.isValid())
continue;
// The pressure differences are computed bottom-up, so the comparision for