llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)

Summary: The convenience wrapper in STLExtras is available since rL342102.

Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb

Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits

Differential Revision: https://reviews.llvm.org/D52573

llvm-svn: 343163
This commit is contained in:
Fangrui Song 2018-09-27 02:13:45 +00:00
parent f1c96490d4
commit 0cac726a00
124 changed files with 395 additions and 443 deletions

View File

@ -640,8 +640,8 @@ void LoopInfoBase<BlockT, LoopT>::print(raw_ostream &OS) const {
template <typename T> template <typename T>
bool compareVectors(std::vector<T> &BB1, std::vector<T> &BB2) { bool compareVectors(std::vector<T> &BB1, std::vector<T> &BB2) {
llvm::sort(BB1.begin(), BB1.end()); llvm::sort(BB1);
llvm::sort(BB2.begin(), BB2.end()); llvm::sort(BB2);
return BB1 == BB2; return BB1 == BB2;
} }

View File

@ -676,7 +676,7 @@ class raw_ostream;
idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb)); idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb));
renumberIndexes(newItr); renumberIndexes(newItr);
llvm::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare()); llvm::sort(idx2MBBMap, Idx2MBBCompare());
} }
/// Free the resources that were required to maintain a SlotIndex. /// Free the resources that were required to maintain a SlotIndex.

View File

@ -544,9 +544,9 @@ Error InstrProfSymtab::create(const NameIterRange &IterRange) {
void InstrProfSymtab::finalizeSymtab() { void InstrProfSymtab::finalizeSymtab() {
if (Sorted) if (Sorted)
return; return;
llvm::sort(MD5NameMap.begin(), MD5NameMap.end(), less_first()); llvm::sort(MD5NameMap, less_first());
llvm::sort(MD5FuncMap.begin(), MD5FuncMap.end(), less_first()); llvm::sort(MD5FuncMap, less_first());
llvm::sort(AddrToMD5Map.begin(), AddrToMD5Map.end(), less_first()); llvm::sort(AddrToMD5Map, less_first());
AddrToMD5Map.erase(std::unique(AddrToMD5Map.begin(), AddrToMD5Map.end()), AddrToMD5Map.erase(std::unique(AddrToMD5Map.begin(), AddrToMD5Map.end()),
AddrToMD5Map.end()); AddrToMD5Map.end());
Sorted = true; Sorted = true;

View File

@ -105,7 +105,7 @@ void LegalizeUpdates(ArrayRef<Update<NodePtr>> AllUpdates,
Operations[{U.getTo(), U.getFrom()}] = int(i); Operations[{U.getTo(), U.getFrom()}] = int(i);
} }
llvm::sort(Result.begin(), Result.end(), llvm::sort(Result,
[&Operations](const Update<NodePtr> &A, const Update<NodePtr> &B) { [&Operations](const Update<NodePtr> &A, const Update<NodePtr> &B) {
return Operations[{A.getFrom(), A.getTo()}] > return Operations[{A.getFrom(), A.getTo()}] >
Operations[{B.getFrom(), B.getTo()}]; Operations[{B.getFrom(), B.getTo()}];

View File

@ -1386,8 +1386,7 @@ struct SemiNCAInfo {
// Make a copy and sort it such that it is possible to check if there are // Make a copy and sort it such that it is possible to check if there are
// no gaps between DFS numbers of adjacent children. // no gaps between DFS numbers of adjacent children.
SmallVector<TreeNodePtr, 8> Children(Node->begin(), Node->end()); SmallVector<TreeNodePtr, 8> Children(Node->begin(), Node->end());
llvm::sort(Children.begin(), Children.end(), llvm::sort(Children, [](const TreeNodePtr Ch1, const TreeNodePtr Ch2) {
[](const TreeNodePtr Ch1, const TreeNodePtr Ch2) {
return Ch1->getDFSNumIn() < Ch2->getDFSNumIn(); return Ch1->getDFSNumIn() < Ch2->getDFSNumIn();
}); });

View File

@ -138,7 +138,7 @@ public:
} }
} }
llvm::sort(SetFlags.begin(), SetFlags.end(), &flagName<TFlag>); llvm::sort(SetFlags, &flagName<TFlag>);
startLine() << Label << " [ (" << hex(Value) << ")\n"; startLine() << Label << " [ (" << hex(Value) << ")\n";
for (const auto &Flag : SetFlags) { for (const auto &Flag : SetFlags) {

View File

@ -156,9 +156,9 @@ static void combineWeight(Weight &W, const Weight &OtherW) {
static void combineWeightsBySorting(WeightList &Weights) { static void combineWeightsBySorting(WeightList &Weights) {
// Sort so edges to the same node are adjacent. // Sort so edges to the same node are adjacent.
llvm::sort(Weights.begin(), Weights.end(), llvm::sort(Weights, [](const Weight &L, const Weight &R) {
[](const Weight &L, return L.TargetNode < R.TargetNode;
const Weight &R) { return L.TargetNode < R.TargetNode; }); });
// Combine adjacent edges. // Combine adjacent edges.
WeightList::iterator O = Weights.begin(); WeightList::iterator O = Weights.begin();
@ -707,7 +707,7 @@ static void findIrreducibleHeaders(
"Expected irreducible CFG; -loop-info is likely invalid"); "Expected irreducible CFG; -loop-info is likely invalid");
if (Headers.size() == InSCC.size()) { if (Headers.size() == InSCC.size()) {
// Every block is a header. // Every block is a header.
llvm::sort(Headers.begin(), Headers.end()); llvm::sort(Headers);
return; return;
} }
@ -742,8 +742,8 @@ static void findIrreducibleHeaders(
Others.push_back(Irr.Node); Others.push_back(Irr.Node);
LLVM_DEBUG(dbgs() << " => other = " << BFI.getBlockName(Irr.Node) << "\n"); LLVM_DEBUG(dbgs() << " => other = " << BFI.getBlockName(Irr.Node) << "\n");
} }
llvm::sort(Headers.begin(), Headers.end()); llvm::sort(Headers);
llvm::sort(Others.begin(), Others.end()); llvm::sort(Others);
} }
static void createIrreducibleLoop( static void createIrreducibleLoop(

View File

@ -395,7 +395,7 @@ populateAliasMap(DenseMap<const Value *, std::vector<OffsetValue>> &AliasMap,
} }
// Sort AliasList for faster lookup // Sort AliasList for faster lookup
llvm::sort(AliasList.begin(), AliasList.end()); llvm::sort(AliasList);
} }
} }
@ -479,7 +479,7 @@ static void populateExternalRelations(
} }
// Remove duplicates in ExtRelations // Remove duplicates in ExtRelations
llvm::sort(ExtRelations.begin(), ExtRelations.end()); llvm::sort(ExtRelations);
ExtRelations.erase(std::unique(ExtRelations.begin(), ExtRelations.end()), ExtRelations.erase(std::unique(ExtRelations.begin(), ExtRelations.end()),
ExtRelations.end()); ExtRelations.end());
} }

View File

@ -97,8 +97,7 @@ void CallGraph::print(raw_ostream &OS) const {
for (const auto &I : *this) for (const auto &I : *this)
Nodes.push_back(I.second.get()); Nodes.push_back(I.second.get());
llvm::sort(Nodes.begin(), Nodes.end(), llvm::sort(Nodes, [](CallGraphNode *LHS, CallGraphNode *RHS) {
[](CallGraphNode *LHS, CallGraphNode *RHS) {
if (Function *LF = LHS->getFunction()) if (Function *LF = LHS->getFunction())
if (Function *RF = RHS->getFunction()) if (Function *RF = RHS->getFunction())
return LF->getName() < RF->getName(); return LF->getName() < RF->getName();

View File

@ -807,7 +807,7 @@ MemoryDependenceResults::getNonLocalCallDependency(CallSite QueryCS) {
DirtyBlocks.push_back(Entry.getBB()); DirtyBlocks.push_back(Entry.getBB());
// Sort the cache so that we can do fast binary search lookups below. // Sort the cache so that we can do fast binary search lookups below.
llvm::sort(Cache.begin(), Cache.end()); llvm::sort(Cache);
++NumCacheDirtyNonLocal; ++NumCacheDirtyNonLocal;
// cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: " // cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
@ -1070,7 +1070,7 @@ SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache,
break; break;
default: default:
// Added many values, do a full scale sort. // Added many values, do a full scale sort.
llvm::sort(Cache.begin(), Cache.end()); llvm::sort(Cache);
break; break;
} }
} }
@ -1662,7 +1662,7 @@ void MemoryDependenceResults::removeInstruction(Instruction *RemInst) {
// Re-sort the NonLocalDepInfo. Changing the dirty entry to its // Re-sort the NonLocalDepInfo. Changing the dirty entry to its
// subsequent value may invalidate the sortedness. // subsequent value may invalidate the sortedness.
llvm::sort(NLPDI.begin(), NLPDI.end()); llvm::sort(NLPDI);
} }
ReverseNonLocalPtrDeps.erase(ReversePtrDepIt); ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);

View File

@ -11064,7 +11064,7 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end()); Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end());
// Put larger terms first. // Put larger terms first.
llvm::sort(Terms.begin(), Terms.end(), [](const SCEV *LHS, const SCEV *RHS) { llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) {
return numberOfTerms(LHS) > numberOfTerms(RHS); return numberOfTerms(LHS) > numberOfTerms(RHS);
}); });

View File

@ -1867,7 +1867,7 @@ SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
Phis.push_back(&PN); Phis.push_back(&PN);
if (TTI) if (TTI)
llvm::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) { llvm::sort(Phis, [](Value *LHS, Value *RHS) {
// Put pointers at the back and make sure pointer < pointer = false. // Put pointers at the back and make sure pointer < pointer = false.
if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy()) if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy(); return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();

View File

@ -1399,10 +1399,10 @@ static bool compareWithVectorFnName(const VecDesc &LHS, StringRef S) {
void TargetLibraryInfoImpl::addVectorizableFunctions(ArrayRef<VecDesc> Fns) { void TargetLibraryInfoImpl::addVectorizableFunctions(ArrayRef<VecDesc> Fns) {
VectorDescs.insert(VectorDescs.end(), Fns.begin(), Fns.end()); VectorDescs.insert(VectorDescs.end(), Fns.begin(), Fns.end());
llvm::sort(VectorDescs.begin(), VectorDescs.end(), compareByScalarFnName); llvm::sort(VectorDescs, compareByScalarFnName);
ScalarDescs.insert(ScalarDescs.end(), Fns.begin(), Fns.end()); ScalarDescs.insert(ScalarDescs.end(), Fns.begin(), Fns.end());
llvm::sort(ScalarDescs.begin(), ScalarDescs.end(), compareByVectorFnName); llvm::sort(ScalarDescs, compareByVectorFnName);
} }
void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib( void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib(

View File

@ -144,7 +144,7 @@ Value *BitcodeReaderValueList::getValueFwdRef(unsigned Idx, Type *Ty) {
void BitcodeReaderValueList::resolveConstantForwardRefs() { void BitcodeReaderValueList::resolveConstantForwardRefs() {
// Sort the values by-pointer so that they are efficient to look up with a // Sort the values by-pointer so that they are efficient to look up with a
// binary search. // binary search.
llvm::sort(ResolveConstants.begin(), ResolveConstants.end()); llvm::sort(ResolveConstants);
SmallVector<Constant *, 64> NewOps; SmallVector<Constant *, 64> NewOps;

View File

@ -184,7 +184,7 @@ static void predictValueUseListOrderImpl(const Value *V, const Function *F,
return; return;
bool IsGlobalValue = OM.isGlobalValue(ID); bool IsGlobalValue = OM.isGlobalValue(ID);
llvm::sort(List.begin(), List.end(), [&](const Entry &L, const Entry &R) { llvm::sort(List, [&](const Entry &L, const Entry &R) {
const Use *LU = L.first; const Use *LU = L.first;
const Use *RU = R.first; const Use *RU = R.first;
if (LU == RU) if (LU == RU)
@ -745,7 +745,7 @@ void ValueEnumerator::organizeMetadata() {
// and then sort by the original/current ID. Since the IDs are guaranteed to // and then sort by the original/current ID. Since the IDs are guaranteed to
// be unique, the result of std::sort will be deterministic. There's no need // be unique, the result of std::sort will be deterministic. There's no need
// for std::stable_sort. // for std::stable_sort.
llvm::sort(Order.begin(), Order.end(), [this](MDIndex LHS, MDIndex RHS) { llvm::sort(Order, [this](MDIndex LHS, MDIndex RHS) {
return std::make_tuple(LHS.F, getMetadataTypeOrder(LHS.get(MDs)), LHS.ID) < return std::make_tuple(LHS.F, getMetadataTypeOrder(LHS.get(MDs)), LHS.ID) <
std::make_tuple(RHS.F, getMetadataTypeOrder(RHS.get(MDs)), RHS.ID); std::make_tuple(RHS.F, getMetadataTypeOrder(RHS.get(MDs)), RHS.ID);
}); });

View File

@ -2353,8 +2353,7 @@ void CodeViewDebug::emitLocalVariableList(ArrayRef<LocalVariable> Locals) {
for (const LocalVariable &L : Locals) for (const LocalVariable &L : Locals)
if (L.DIVar->isParameter()) if (L.DIVar->isParameter())
Params.push_back(&L); Params.push_back(&L);
llvm::sort(Params.begin(), Params.end(), llvm::sort(Params, [](const LocalVariable *L, const LocalVariable *R) {
[](const LocalVariable *L, const LocalVariable *R) {
return L->DIVar->getArg() < R->DIVar->getArg(); return L->DIVar->getArg() < R->DIVar->getArg();
}); });
for (const LocalVariable *L : Params) for (const LocalVariable *L : Params)

View File

@ -139,7 +139,7 @@ public:
// Sort the pieces by offset. // Sort the pieces by offset.
// Remove any duplicate entries by dropping all but the first. // Remove any duplicate entries by dropping all but the first.
void sortUniqueValues() { void sortUniqueValues() {
llvm::sort(Values.begin(), Values.end()); llvm::sort(Values);
Values.erase( Values.erase(
std::unique( std::unique(
Values.begin(), Values.end(), [](const Value &A, const Value &B) { Values.begin(), Values.end(), [](const Value &A, const Value &B) {

View File

@ -241,7 +241,7 @@ ArrayRef<DbgVariable::FrameIndexExpr> DbgVariable::getFrameIndexExprs() const {
return A.Expr->isFragment(); return A.Expr->isFragment();
}) && }) &&
"multiple FI expressions without DW_OP_LLVM_fragment"); "multiple FI expressions without DW_OP_LLVM_fragment");
llvm::sort(FrameIndexExprs.begin(), FrameIndexExprs.end(), llvm::sort(FrameIndexExprs,
[](const FrameIndexExpr &A, const FrameIndexExpr &B) -> bool { [](const FrameIndexExpr &A, const FrameIndexExpr &B) -> bool {
return A.Expr->getFragmentInfo()->OffsetInBits < return A.Expr->getFragmentInfo()->OffsetInBits <
B.Expr->getFragmentInfo()->OffsetInBits; B.Expr->getFragmentInfo()->OffsetInBits;
@ -612,9 +612,8 @@ void DwarfDebug::constructAndAddImportedEntityDIE(DwarfCompileUnit &TheCU,
/// Sort and unique GVEs by comparing their fragment offset. /// Sort and unique GVEs by comparing their fragment offset.
static SmallVectorImpl<DwarfCompileUnit::GlobalExpr> & static SmallVectorImpl<DwarfCompileUnit::GlobalExpr> &
sortGlobalExprs(SmallVectorImpl<DwarfCompileUnit::GlobalExpr> &GVEs) { sortGlobalExprs(SmallVectorImpl<DwarfCompileUnit::GlobalExpr> &GVEs) {
llvm::sort(GVEs.begin(), GVEs.end(), llvm::sort(
[](DwarfCompileUnit::GlobalExpr A, GVEs, [](DwarfCompileUnit::GlobalExpr A, DwarfCompileUnit::GlobalExpr B) {
DwarfCompileUnit::GlobalExpr B) {
// Sort order: first null exprs, then exprs without fragment // Sort order: first null exprs, then exprs without fragment
// info, then sort by fragment offset in bits. // info, then sort by fragment offset in bits.
// FIXME: Come up with a more comprehensive comparator so // FIXME: Come up with a more comprehensive comparator so
@ -2000,8 +1999,7 @@ void DwarfDebug::emitDebugARanges() {
} }
// Sort the CU list (again, to ensure consistent output order). // Sort the CU list (again, to ensure consistent output order).
llvm::sort(CUs.begin(), CUs.end(), llvm::sort(CUs, [](const DwarfCompileUnit *A, const DwarfCompileUnit *B) {
[](const DwarfCompileUnit *A, const DwarfCompileUnit *B) {
return A->getUniqueID() < B->getUniqueID(); return A->getUniqueID() < B->getUniqueID();
}); });

View File

@ -359,9 +359,9 @@ void EHStreamer::emitExceptionTable() {
LandingPads.push_back(&PadInfos[i]); LandingPads.push_back(&PadInfos[i]);
// Order landing pads lexicographically by type id. // Order landing pads lexicographically by type id.
llvm::sort(LandingPads.begin(), LandingPads.end(), llvm::sort(LandingPads, [](const LandingPadInfo *L, const LandingPadInfo *R) {
[](const LandingPadInfo *L, return L->TypeIds < R->TypeIds;
const LandingPadInfo *R) { return L->TypeIds < R->TypeIds; }); });
// Compute the actions table and gather the first action index for each // Compute the actions table and gather the first action index for each
// landing pad site. // landing pad site.

View File

@ -4989,8 +4989,7 @@ bool CodeGenPrepare::splitLargeGEPOffsets() {
return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first]; return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first];
}; };
// Sorting all the GEPs of the same data structures based on the offsets. // Sorting all the GEPs of the same data structures based on the offsets.
llvm::sort(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end(), llvm::sort(LargeOffsetGEPs, compareGEPOffset);
compareGEPOffset);
LargeOffsetGEPs.erase( LargeOffsetGEPs.erase(
std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()), std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()),
LargeOffsetGEPs.end()); LargeOffsetGEPs.end());

View File

@ -219,7 +219,7 @@ void LegalizerInfo::computeTables() {
Opcode, TypeIdx, ElementSize, Opcode, TypeIdx, ElementSize,
moreToWiderTypesAndLessToWidest(NumElementsActions)); moreToWiderTypesAndLessToWidest(NumElementsActions));
} }
llvm::sort(ElementSizesSeen.begin(), ElementSizesSeen.end()); llvm::sort(ElementSizesSeen);
SizeChangeStrategy VectorElementSizeChangeStrategy = SizeChangeStrategy VectorElementSizeChangeStrategy =
&unsupportedForDifferentSizes; &unsupportedForDifferentSizes;
if (TypeIdx < VectorElementSizeChangeStrategies[OpcodeIdx].size() && if (TypeIdx < VectorElementSizeChangeStrategies[OpcodeIdx].size() &&

View File

@ -328,7 +328,7 @@ bool LocalStackSlotPass::insertFrameReferenceRegisters(MachineFunction &Fn) {
// Sort the frame references by local offset. // Sort the frame references by local offset.
// Use frame index as a tie-breaker in case MI's have the same offset. // Use frame index as a tie-breaker in case MI's have the same offset.
llvm::sort(FrameReferenceInsns.begin(), FrameReferenceInsns.end()); llvm::sort(FrameReferenceInsns);
MachineBasicBlock *Entry = &Fn.front(); MachineBasicBlock *Entry = &Fn.front();

View File

@ -134,7 +134,7 @@ rescheduleLexographically(std::vector<MachineInstr *> instructions,
StringInstrMap.push_back({(i == std::string::npos) ? S : S.substr(i), II}); StringInstrMap.push_back({(i == std::string::npos) ? S : S.substr(i), II});
} }
llvm::sort(StringInstrMap.begin(), StringInstrMap.end(), llvm::sort(StringInstrMap,
[](const StringInstrPair &a, const StringInstrPair &b) -> bool { [](const StringInstrPair &a, const StringInstrPair &b) -> bool {
return (a.first < b.first); return (a.first < b.first);
}); });

View File

@ -461,7 +461,7 @@ bool MachineBasicBlock::isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask) const {
} }
void MachineBasicBlock::sortUniqueLiveIns() { void MachineBasicBlock::sortUniqueLiveIns() {
llvm::sort(LiveIns.begin(), LiveIns.end(), llvm::sort(LiveIns,
[](const RegisterMaskPair &LI0, const RegisterMaskPair &LI1) { [](const RegisterMaskPair &LI0, const RegisterMaskPair &LI1) {
return LI0.PhysReg < LI1.PhysReg; return LI0.PhysReg < LI1.PhysReg;
}); });

View File

@ -1861,8 +1861,7 @@ void SwingSchedulerDAG::registerPressureFilter(NodeSetType &NodeSets) {
RecRPTracker.closeBottom(); RecRPTracker.closeBottom();
std::vector<SUnit *> SUnits(NS.begin(), NS.end()); std::vector<SUnit *> SUnits(NS.begin(), NS.end());
llvm::sort(SUnits.begin(), SUnits.end(), llvm::sort(SUnits, [](const SUnit *A, const SUnit *B) {
[](const SUnit *A, const SUnit *B) {
return A->NodeNum > B->NodeNum; return A->NodeNum > B->NodeNum;
}); });
@ -3981,7 +3980,7 @@ void SwingSchedulerDAG::checkValidNodeOrder(const NodeSetType &Circuits) const {
}; };
// sort, so that we can perform a binary search // sort, so that we can perform a binary search
llvm::sort(Indices.begin(), Indices.end(), CompareKey); llvm::sort(Indices, CompareKey);
bool Valid = true; bool Valid = true;
(void)Valid; (void)Valid;

View File

@ -1554,7 +1554,7 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
if (MemOpRecords.size() < 2) if (MemOpRecords.size() < 2)
return; return;
llvm::sort(MemOpRecords.begin(), MemOpRecords.end()); llvm::sort(MemOpRecords);
unsigned ClusterLength = 1; unsigned ClusterLength = 1;
for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) { for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
SUnit *SUa = MemOpRecords[Idx].SU; SUnit *SUa = MemOpRecords[Idx].SU;

View File

@ -157,7 +157,7 @@ bool ReachingDefAnalysis::runOnMachineFunction(MachineFunction &mf) {
// Sorting all reaching defs found for a ceartin reg unit in a given BB. // Sorting all reaching defs found for a ceartin reg unit in a given BB.
for (MBBDefsInfo &MBBDefs : MBBReachingDefs) { for (MBBDefsInfo &MBBDefs : MBBReachingDefs) {
for (MBBRegUnitDefs &RegUnitDefs : MBBDefs) for (MBBRegUnitDefs &RegUnitDefs : MBBDefs)
llvm::sort(RegUnitDefs.begin(), RegUnitDefs.end()); llvm::sort(RegUnitDefs);
} }
return false; return false;

View File

@ -996,7 +996,7 @@ void ScheduleDAGInstrs::reduceHugeMemNodeMaps(Value2SUsMap &stores,
for (auto &I : loads) for (auto &I : loads)
for (auto *SU : I.second) for (auto *SU : I.second)
NodeNums.push_back(SU->NodeNum); NodeNums.push_back(SU->NodeNum);
llvm::sort(NodeNums.begin(), NodeNums.end()); llvm::sort(NodeNums);
// The N last elements in NodeNums will be removed, and the SU with // The N last elements in NodeNums will be removed, and the SU with
// the lowest NodeNum of them will become the new BarrierChain to // the lowest NodeNum of them will become the new BarrierChain to

View File

@ -13250,8 +13250,7 @@ static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices,
// Sort the slices so that elements that are likely to be next to each // Sort the slices so that elements that are likely to be next to each
// other in memory are next to each other in the list. // other in memory are next to each other in the list.
llvm::sort(LoadedSlices.begin(), LoadedSlices.end(), llvm::sort(LoadedSlices, [](const LoadedSlice &LHS, const LoadedSlice &RHS) {
[](const LoadedSlice &LHS, const LoadedSlice &RHS) {
assert(LHS.Origin == RHS.Origin && "Different bases not implemented."); assert(LHS.Origin == RHS.Origin && "Different bases not implemented.");
return LHS.getOffsetFromBase() < RHS.getOffsetFromBase(); return LHS.getOffsetFromBase() < RHS.getOffsetFromBase();
}); });
@ -14247,8 +14246,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
// Sort the memory operands according to their distance from the // Sort the memory operands according to their distance from the
// base pointer. // base pointer.
llvm::sort(StoreNodes.begin(), StoreNodes.end(), llvm::sort(StoreNodes, [](MemOpLink LHS, MemOpLink RHS) {
[](MemOpLink LHS, MemOpLink RHS) {
return LHS.OffsetFromBase < RHS.OffsetFromBase; return LHS.OffsetFromBase < RHS.OffsetFromBase;
}); });

View File

@ -242,7 +242,7 @@ void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
return; return;
// Sort them in increasing order. // Sort them in increasing order.
llvm::sort(Offsets.begin(), Offsets.end()); llvm::sort(Offsets);
// Check if the loads are close enough. // Check if the loads are close enough.
SmallVector<SDNode*, 4> Loads; SmallVector<SDNode*, 4> Loads;

View File

@ -8016,7 +8016,7 @@ void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
} }
// Sort the uses, so that all the uses from a given User are together. // Sort the uses, so that all the uses from a given User are together.
llvm::sort(Uses.begin(), Uses.end()); llvm::sort(Uses);
for (unsigned UseIndex = 0, UseIndexEnd = Uses.size(); for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
UseIndex != UseIndexEnd; ) { UseIndex != UseIndexEnd; ) {

View File

@ -2580,8 +2580,7 @@ void SelectionDAGBuilder::sortAndRangeify(CaseClusterVector &Clusters) {
assert(CC.Low == CC.High && "Input clusters must be single-case"); assert(CC.Low == CC.High && "Input clusters must be single-case");
#endif #endif
llvm::sort(Clusters.begin(), Clusters.end(), llvm::sort(Clusters, [](const CaseCluster &a, const CaseCluster &b) {
[](const CaseCluster &a, const CaseCluster &b) {
return a.Low->getValue().slt(b.Low->getValue()); return a.Low->getValue().slt(b.Low->getValue());
}); });
@ -6252,7 +6251,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
GA->getGlobal(), getCurSDLoc(), GA->getGlobal(), getCurSDLoc(),
Val.getValueType(), GA->getOffset())}); Val.getValueType(), GA->getOffset())});
} }
llvm::sort(Targets.begin(), Targets.end(), llvm::sort(Targets,
[](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) { [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
return T1.Offset < T2.Offset; return T1.Offset < T2.Offset;
}); });
@ -9670,7 +9669,7 @@ bool SelectionDAGBuilder::buildBitTests(CaseClusterVector &Clusters,
} }
BitTestInfo BTI; BitTestInfo BTI;
llvm::sort(CBV.begin(), CBV.end(), [](const CaseBits &a, const CaseBits &b) { llvm::sort(CBV, [](const CaseBits &a, const CaseBits &b) {
// Sort by probability first, number of bits second, bit mask third. // Sort by probability first, number of bits second, bit mask third.
if (a.ExtraProb != b.ExtraProb) if (a.ExtraProb != b.ExtraProb)
return a.ExtraProb > b.ExtraProb; return a.ExtraProb > b.ExtraProb;

View File

@ -95,7 +95,7 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
} }
// Sort the Idx2MBBMap // Sort the Idx2MBBMap
llvm::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare()); llvm::sort(idx2MBBMap, Idx2MBBCompare());
LLVM_DEBUG(mf->print(dbgs(), this)); LLVM_DEBUG(mf->print(dbgs(), this));

View File

@ -1231,7 +1231,7 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
}); });
for (auto &s : LiveStarts) for (auto &s : LiveStarts)
llvm::sort(s.begin(), s.end()); llvm::sort(s);
bool Changed = true; bool Changed = true;
while (Changed) { while (Changed) {

View File

@ -268,8 +268,7 @@ StackMaps::parseRegisterLiveOutMask(const uint32_t *Mask) const {
// in the list. Merge entries that refer to the same dwarf register and use // in the list. Merge entries that refer to the same dwarf register and use
// the maximum size that needs to be spilled. // the maximum size that needs to be spilled.
llvm::sort(LiveOuts.begin(), LiveOuts.end(), llvm::sort(LiveOuts, [](const LiveOutReg &LHS, const LiveOutReg &RHS) {
[](const LiveOutReg &LHS, const LiveOutReg &RHS) {
// Only sort by the dwarf register number. // Only sort by the dwarf register number.
return LHS.DwarfRegNum < RHS.DwarfRegNum; return LHS.DwarfRegNum < RHS.DwarfRegNum;
}); });

View File

@ -214,7 +214,7 @@ void StackSlotColoring::InitializeSlots() {
Intervals.reserve(LS->getNumIntervals()); Intervals.reserve(LS->getNumIntervals());
for (auto &I : *LS) for (auto &I : *LS)
Intervals.push_back(&I); Intervals.push_back(&I);
llvm::sort(Intervals.begin(), Intervals.end(), llvm::sort(Intervals,
[](Pair *LHS, Pair *RHS) { return LHS->first < RHS->first; }); [](Pair *LHS, Pair *RHS) { return LHS->first < RHS->first; });
// Gather all spill slots into a list. // Gather all spill slots into a list.

View File

@ -79,7 +79,7 @@ Error DebugCrossModuleImportsSubsection::commit(
for (const auto &M : Mappings) for (const auto &M : Mappings)
Ids.push_back(&M); Ids.push_back(&M);
llvm::sort(Ids.begin(), Ids.end(), [this](const T &L1, const T &L2) { llvm::sort(Ids, [this](const T &L1, const T &L2) {
return Strings.getIdForString(L1->getKey()) < return Strings.getIdForString(L1->getKey()) <
Strings.getIdForString(L2->getKey()); Strings.getIdForString(L2->getKey());
}); });

View File

@ -91,7 +91,7 @@ std::vector<uint32_t> DebugStringTableSubsection::sortedIds() const {
Result.reserve(IdToString.size()); Result.reserve(IdToString.size());
for (const auto &Entry : IdToString) for (const auto &Entry : IdToString)
Result.push_back(Entry.first); Result.push_back(Entry.first);
llvm::sort(Result.begin(), Result.end()); llvm::sort(Result);
return Result; return Result;
} }

View File

@ -106,10 +106,11 @@ collectContributionData(DWARFContext::unit_iterator_range Units) {
// Sort the contributions so that any invalid ones are placed at // Sort the contributions so that any invalid ones are placed at
// the start of the contributions vector. This way they are reported // the start of the contributions vector. This way they are reported
// first. // first.
llvm::sort(Contributions.begin(), Contributions.end(), llvm::sort(Contributions,
[](const Optional<StrOffsetsContributionDescriptor> &L, [](const Optional<StrOffsetsContributionDescriptor> &L,
const Optional<StrOffsetsContributionDescriptor> &R) { const Optional<StrOffsetsContributionDescriptor> &R) {
if (L && R) return L->Base < R->Base; if (L && R)
return L->Base < R->Base;
return R.hasValue(); return R.hasValue();
}); });

View File

@ -80,7 +80,7 @@ void DWARFDebugAranges::appendRange(uint32_t CUOffset, uint64_t LowPC,
void DWARFDebugAranges::construct() { void DWARFDebugAranges::construct() {
std::multiset<uint32_t> ValidCUs; // Maintain the set of CUs describing std::multiset<uint32_t> ValidCUs; // Maintain the set of CUs describing
// a current address range. // a current address range.
llvm::sort(Endpoints.begin(), Endpoints.end()); llvm::sort(Endpoints);
uint64_t PrevAddress = -1ULL; uint64_t PrevAddress = -1ULL;
for (const auto &E : Endpoints) { for (const auto &E : Endpoints) {
if (PrevAddress < E.Address && !ValidCUs.empty()) { if (PrevAddress < E.Address && !ValidCUs.empty()) {

View File

@ -839,7 +839,7 @@ Error DWARFDebugLine::LineTable::parse(
// Sort all sequences so that address lookup will work faster. // Sort all sequences so that address lookup will work faster.
if (!Sequences.empty()) { if (!Sequences.empty()) {
llvm::sort(Sequences.begin(), Sequences.end(), Sequence::orderByLowPC); llvm::sort(Sequences, Sequence::orderByLowPC);
// Note: actually, instruction address ranges of sequences should not // Note: actually, instruction address ranges of sequences should not
// overlap (in shared objects and executables). If they do, the address // overlap (in shared objects and executables). If they do, the address
// lookup would still work, though, but result would be ambiguous. // lookup would still work, though, but result would be ambiguous.

View File

@ -144,8 +144,7 @@ void GSIHashStreamBuilder::finalizeBuckets(uint32_t RecordZeroOffset) {
// can properly early-out when it detects the record won't be found. The // can properly early-out when it detects the record won't be found. The
// algorithm used here corredsponds to the function // algorithm used here corredsponds to the function
// caseInsensitiveComparePchPchCchCch in the reference implementation. // caseInsensitiveComparePchPchCchCch in the reference implementation.
llvm::sort(Bucket.begin(), Bucket.end(), llvm::sort(Bucket, [](const std::pair<StringRef, PSHashRecord> &Left,
[](const std::pair<StringRef, PSHashRecord> &Left,
const std::pair<StringRef, PSHashRecord> &Right) { const std::pair<StringRef, PSHashRecord> &Right) {
return gsiRecordLess(Left.first, Right.first); return gsiRecordLess(Left.first, Right.first);
}); });

View File

@ -199,7 +199,7 @@ static void predictValueUseListOrderImpl(const Value *V, const Function *F,
!isa<GlobalVariable>(V) && !isa<Function>(V) && !isa<BasicBlock>(V); !isa<GlobalVariable>(V) && !isa<Function>(V) && !isa<BasicBlock>(V);
if (auto *BA = dyn_cast<BlockAddress>(V)) if (auto *BA = dyn_cast<BlockAddress>(V))
ID = OM.lookup(BA->getBasicBlock()).first; ID = OM.lookup(BA->getBasicBlock()).first;
llvm::sort(List.begin(), List.end(), [&](const Entry &L, const Entry &R) { llvm::sort(List, [&](const Entry &L, const Entry &R) {
const Use *LU = L.first; const Use *LU = L.first;
const Use *RU = R.first; const Use *RU = R.first;
if (LU == RU) if (LU == RU)

View File

@ -658,7 +658,7 @@ AttributeSetNode *AttributeSetNode::get(LLVMContext &C,
FoldingSetNodeID ID; FoldingSetNodeID ID;
SmallVector<Attribute, 8> SortedAttrs(Attrs.begin(), Attrs.end()); SmallVector<Attribute, 8> SortedAttrs(Attrs.begin(), Attrs.end());
llvm::sort(SortedAttrs.begin(), SortedAttrs.end()); llvm::sort(SortedAttrs);
for (const auto Attr : SortedAttrs) for (const auto Attr : SortedAttrs)
Attr.Profile(ID); Attr.Profile(ID);

View File

@ -237,7 +237,7 @@ void ReplaceableMetadataImpl::replaceAllUsesWith(Metadata *MD) {
// Copy out uses since UseMap will get touched below. // Copy out uses since UseMap will get touched below.
using UseTy = std::pair<void *, std::pair<OwnerTy, uint64_t>>; using UseTy = std::pair<void *, std::pair<OwnerTy, uint64_t>>;
SmallVector<UseTy, 8> Uses(UseMap.begin(), UseMap.end()); SmallVector<UseTy, 8> Uses(UseMap.begin(), UseMap.end());
llvm::sort(Uses.begin(), Uses.end(), [](const UseTy &L, const UseTy &R) { llvm::sort(Uses, [](const UseTy &L, const UseTy &R) {
return L.second.second < R.second.second; return L.second.second < R.second.second;
}); });
for (const auto &Pair : Uses) { for (const auto &Pair : Uses) {
@ -290,7 +290,7 @@ void ReplaceableMetadataImpl::resolveAllUses(bool ResolveUsers) {
// Copy out uses since UseMap could get touched below. // Copy out uses since UseMap could get touched below.
using UseTy = std::pair<void *, std::pair<OwnerTy, uint64_t>>; using UseTy = std::pair<void *, std::pair<OwnerTy, uint64_t>>;
SmallVector<UseTy, 8> Uses(UseMap.begin(), UseMap.end()); SmallVector<UseTy, 8> Uses(UseMap.begin(), UseMap.end());
llvm::sort(Uses.begin(), Uses.end(), [](const UseTy &L, const UseTy &R) { llvm::sort(Uses, [](const UseTy &L, const UseTy &R) {
return L.second.second < R.second.second; return L.second.second < R.second.second;
}); });
UseMap.clear(); UseMap.clear();

View File

@ -2300,7 +2300,7 @@ void Verifier::visitBasicBlock(BasicBlock &BB) {
if (isa<PHINode>(BB.front())) { if (isa<PHINode>(BB.front())) {
SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB)); SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB));
SmallVector<std::pair<BasicBlock*, Value*>, 8> Values; SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
llvm::sort(Preds.begin(), Preds.end()); llvm::sort(Preds);
for (const PHINode &PN : BB.phis()) { for (const PHINode &PN : BB.phis()) {
// Ensure that PHI nodes have at least one entry! // Ensure that PHI nodes have at least one entry!
Assert(PN.getNumIncomingValues() != 0, Assert(PN.getNumIncomingValues() != 0,
@ -2318,7 +2318,7 @@ void Verifier::visitBasicBlock(BasicBlock &BB) {
for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
Values.push_back( Values.push_back(
std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i))); std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
llvm::sort(Values.begin(), Values.end()); llvm::sort(Values);
for (unsigned i = 0, e = Values.size(); i != e; ++i) { for (unsigned i = 0, e = Values.size(); i != e; ++i) {
// Check to make sure that if there is more than one entry for a // Check to make sure that if there is more than one entry for a

View File

@ -968,8 +968,7 @@ void ThinLTOCodeGenerator::run() {
std::vector<int> ModulesOrdering; std::vector<int> ModulesOrdering;
ModulesOrdering.resize(Modules.size()); ModulesOrdering.resize(Modules.size());
std::iota(ModulesOrdering.begin(), ModulesOrdering.end(), 0); std::iota(ModulesOrdering.begin(), ModulesOrdering.end(), 0);
llvm::sort(ModulesOrdering.begin(), ModulesOrdering.end(), llvm::sort(ModulesOrdering, [&](int LeftIndex, int RightIndex) {
[&](int LeftIndex, int RightIndex) {
auto LSize = Modules[LeftIndex].getBuffer().size(); auto LSize = Modules[LeftIndex].getBuffer().size();
auto RSize = Modules[RightIndex].getBuffer().size(); auto RSize = Modules[RightIndex].getBuffer().size();
return LSize > RSize; return LSize > RSize;

View File

@ -597,8 +597,8 @@ void MachObjectWriter::computeSymbolTable(
} }
// External and undefined symbols are required to be in lexicographic order. // External and undefined symbols are required to be in lexicographic order.
llvm::sort(ExternalSymbolData.begin(), ExternalSymbolData.end()); llvm::sort(ExternalSymbolData);
llvm::sort(UndefinedSymbolData.begin(), UndefinedSymbolData.end()); llvm::sort(UndefinedSymbolData);
// Set the symbol indices. // Set the symbol indices.
Index = 0; Index = 0;

View File

@ -558,8 +558,7 @@ void WinCOFFObjectWriter::writeSectionHeaders() {
std::vector<COFFSection *> Arr; std::vector<COFFSection *> Arr;
for (auto &Section : Sections) for (auto &Section : Sections)
Arr.push_back(Section.get()); Arr.push_back(Section.get());
llvm::sort(Arr.begin(), Arr.end(), llvm::sort(Arr, [](const COFFSection *A, const COFFSection *B) {
[](const COFFSection *A, const COFFSection *B) {
return A->Number < B->Number; return A->Number < B->Number;
}); });

View File

@ -83,7 +83,7 @@ Counter CounterExpressionBuilder::simplify(Counter ExpressionTree) {
return Counter::getZero(); return Counter::getZero();
// Group the terms by counter ID. // Group the terms by counter ID.
llvm::sort(Terms.begin(), Terms.end(), [](const Term &LHS, const Term &RHS) { llvm::sort(Terms, [](const Term &LHS, const Term &RHS) {
return LHS.CounterID < RHS.CounterID; return LHS.CounterID < RHS.CounterID;
}); });
@ -463,8 +463,7 @@ class SegmentBuilder {
/// Sort a nested sequence of regions from a single file. /// Sort a nested sequence of regions from a single file.
static void sortNestedRegions(MutableArrayRef<CountedRegion> Regions) { static void sortNestedRegions(MutableArrayRef<CountedRegion> Regions) {
llvm::sort(Regions.begin(), Regions.end(), [](const CountedRegion &LHS, llvm::sort(Regions, [](const CountedRegion &LHS, const CountedRegion &RHS) {
const CountedRegion &RHS) {
if (LHS.startLoc() != RHS.startLoc()) if (LHS.startLoc() != RHS.startLoc())
return LHS.startLoc() < RHS.startLoc(); return LHS.startLoc() < RHS.startLoc();
if (LHS.endLoc() != RHS.endLoc()) if (LHS.endLoc() != RHS.endLoc())
@ -561,7 +560,7 @@ std::vector<StringRef> CoverageMapping::getUniqueSourceFiles() const {
for (const auto &Function : getCoveredFunctions()) for (const auto &Function : getCoveredFunctions())
Filenames.insert(Filenames.end(), Function.Filenames.begin(), Filenames.insert(Filenames.end(), Function.Filenames.begin(),
Function.Filenames.end()); Function.Filenames.end());
llvm::sort(Filenames.begin(), Filenames.end()); llvm::sort(Filenames);
auto Last = std::unique(Filenames.begin(), Filenames.end()); auto Last = std::unique(Filenames.begin(), Filenames.end());
Filenames.erase(Last, Filenames.end()); Filenames.erase(Last, Filenames.end());
return Filenames; return Filenames;

View File

@ -712,7 +712,7 @@ void FileInfo::print(raw_ostream &InfoOS, StringRef MainFilename,
SmallVector<StringRef, 4> Filenames; SmallVector<StringRef, 4> Filenames;
for (const auto &LI : LineInfo) for (const auto &LI : LineInfo)
Filenames.push_back(LI.first()); Filenames.push_back(LI.first());
llvm::sort(Filenames.begin(), Filenames.end()); llvm::sort(Filenames);
for (StringRef Filename : Filenames) { for (StringRef Filename : Filenames) {
auto AllLines = LineConsumer(Filename); auto AllLines = LineConsumer(Filename);

View File

@ -58,7 +58,7 @@ void SampleProfileSummaryBuilder::addRecord(
void ProfileSummaryBuilder::computeDetailedSummary() { void ProfileSummaryBuilder::computeDetailedSummary() {
if (DetailedSummaryCutoffs.empty()) if (DetailedSummaryCutoffs.empty())
return; return;
llvm::sort(DetailedSummaryCutoffs.begin(), DetailedSummaryCutoffs.end()); llvm::sort(DetailedSummaryCutoffs);
auto Iter = CountFrequencies.begin(); auto Iter = CountFrequencies.begin();
const auto End = CountFrequencies.end(); const auto End = CountFrequencies.end();

View File

@ -517,7 +517,7 @@ static std::vector<const Object::value_type *> sortedElements(const Object &O) {
std::vector<const Object::value_type *> Elements; std::vector<const Object::value_type *> Elements;
for (const auto &E : O) for (const auto &E : O)
Elements.push_back(&E); Elements.push_back(&E);
llvm::sort(Elements.begin(), Elements.end(), llvm::sort(Elements,
[](const Object::value_type *L, const Object::value_type *R) { [](const Object::value_type *L, const Object::value_type *R) {
return L->first < R->first; return L->first < R->first;
}); });

View File

@ -269,7 +269,7 @@ SMDiagnostic::SMDiagnostic(const SourceMgr &sm, SMLoc L, StringRef FN,
: SM(&sm), Loc(L), Filename(FN), LineNo(Line), ColumnNo(Col), Kind(Kind), : SM(&sm), Loc(L), Filename(FN), LineNo(Line), ColumnNo(Col), Kind(Kind),
Message(Msg), LineContents(LineStr), Ranges(Ranges.vec()), Message(Msg), LineContents(LineStr), Ranges(Ranges.vec()),
FixIts(Hints.begin(), Hints.end()) { FixIts(Hints.begin(), Hints.end()) {
llvm::sort(FixIts.begin(), FixIts.end()); llvm::sort(FixIts);
} }
static void buildFixItLine(std::string &CaretLine, std::string &FixItLine, static void buildFixItLine(std::string &CaretLine, std::string &FixItLine,

View File

@ -295,7 +295,7 @@ void TimerGroup::addTimer(Timer &T) {
void TimerGroup::PrintQueuedTimers(raw_ostream &OS) { void TimerGroup::PrintQueuedTimers(raw_ostream &OS) {
// Sort the timers in descending order by amount of time taken. // Sort the timers in descending order by amount of time taken.
llvm::sort(TimersToPrint.begin(), TimersToPrint.end()); llvm::sort(TimersToPrint);
TimeRecord Total; TimeRecord Total;
for (const PrintRecord &Record : TimersToPrint) for (const PrintRecord &Record : TimersToPrint)

View File

@ -158,8 +158,7 @@ RecordRecTy *RecordRecTy::get(ArrayRef<Record *> UnsortedClasses) {
SmallVector<Record *, 4> Classes(UnsortedClasses.begin(), SmallVector<Record *, 4> Classes(UnsortedClasses.begin(),
UnsortedClasses.end()); UnsortedClasses.end());
llvm::sort(Classes.begin(), Classes.end(), llvm::sort(Classes, [](Record *LHS, Record *RHS) {
[](Record *LHS, Record *RHS) {
return LHS->getNameInitAsString() < RHS->getNameInitAsString(); return LHS->getNameInitAsString() < RHS->getNameInitAsString();
}); });

View File

@ -377,9 +377,8 @@ bool AArch64A57FPLoadBalancing::runOnBasicBlock(MachineBasicBlock &MBB) {
// Now we have a set of sets, order them by start address so // Now we have a set of sets, order them by start address so
// we can iterate over them sequentially. // we can iterate over them sequentially.
llvm::sort(V.begin(), V.end(), llvm::sort(V,
[](const std::vector<Chain*> &A, [](const std::vector<Chain *> &A, const std::vector<Chain *> &B) {
const std::vector<Chain*> &B) {
return A.front()->startsBefore(B.front()); return A.front()->startsBefore(B.front());
}); });
@ -453,7 +452,7 @@ bool AArch64A57FPLoadBalancing::colorChainSet(std::vector<Chain*> GV,
// change them to! // change them to!
// Final tie-break with instruction order so pass output is stable (i.e. not // Final tie-break with instruction order so pass output is stable (i.e. not
// dependent on malloc'd pointer values). // dependent on malloc'd pointer values).
llvm::sort(GV.begin(), GV.end(), [](const Chain *G1, const Chain *G2) { llvm::sort(GV, [](const Chain *G1, const Chain *G2) {
if (G1->size() != G2->size()) if (G1->size() != G2->size())
return G1->size() > G2->size(); return G1->size() > G2->size();
if (G1->requiresFixup() != G2->requiresFixup()) if (G1->requiresFixup() != G2->requiresFixup())

View File

@ -434,8 +434,7 @@ void GCNIterativeScheduler::scheduleRegion(Region &R, Range &&Schedule,
// Sort recorded regions by pressure - highest at the front // Sort recorded regions by pressure - highest at the front
void GCNIterativeScheduler::sortRegionsByPressure(unsigned TargetOcc) { void GCNIterativeScheduler::sortRegionsByPressure(unsigned TargetOcc) {
const auto &ST = MF.getSubtarget<GCNSubtarget>(); const auto &ST = MF.getSubtarget<GCNSubtarget>();
llvm::sort(Regions.begin(), Regions.end(), llvm::sort(Regions, [&ST, TargetOcc](const Region *R1, const Region *R2) {
[&ST, TargetOcc](const Region *R1, const Region *R2) {
return R2->MaxPressure.less(ST, R1->MaxPressure, TargetOcc); return R2->MaxPressure.less(ST, R1->MaxPressure, TargetOcc);
}); });
} }

View File

@ -168,8 +168,7 @@ void SIFormMemoryClauses::forAllLanes(unsigned Reg, LaneBitmask LaneMask,
CoveringSubregs.push_back(Idx); CoveringSubregs.push_back(Idx);
} }
llvm::sort(CoveringSubregs.begin(), CoveringSubregs.end(), llvm::sort(CoveringSubregs, [this](unsigned A, unsigned B) {
[this](unsigned A, unsigned B) {
LaneBitmask MaskA = TRI->getSubRegIndexLaneMask(A); LaneBitmask MaskA = TRI->getSubRegIndexLaneMask(A);
LaneBitmask MaskB = TRI->getSubRegIndexLaneMask(B); LaneBitmask MaskB = TRI->getSubRegIndexLaneMask(B);
unsigned NA = MaskA.getNumLanes(); unsigned NA = MaskA.getNumLanes();

View File

@ -1444,9 +1444,8 @@ void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const {
SmallVector<unsigned, 6> ScratchRegs; SmallVector<unsigned, 6> ScratchRegs;
for(unsigned I = 5; I < MI->getNumOperands(); ++I) for(unsigned I = 5; I < MI->getNumOperands(); ++I)
ScratchRegs.push_back(MI->getOperand(I).getReg()); ScratchRegs.push_back(MI->getOperand(I).getReg());
llvm::sort(ScratchRegs.begin(), ScratchRegs.end(), llvm::sort(ScratchRegs,
[&TRI](const unsigned &Reg1, [&TRI](const unsigned &Reg1, const unsigned &Reg2) -> bool {
const unsigned &Reg2) -> bool {
return TRI.getEncodingValue(Reg1) < return TRI.getEncodingValue(Reg1) <
TRI.getEncodingValue(Reg2); TRI.getEncodingValue(Reg2);
}); });

View File

@ -1008,8 +1008,7 @@ void ARMFrameLowering::emitPushInst(MachineBasicBlock &MBB,
if (Regs.empty()) if (Regs.empty())
continue; continue;
llvm::sort(Regs.begin(), Regs.end(), [&](const RegAndKill &LHS, llvm::sort(Regs, [&](const RegAndKill &LHS, const RegAndKill &RHS) {
const RegAndKill &RHS) {
return TRI.getEncodingValue(LHS.first) < TRI.getEncodingValue(RHS.first); return TRI.getEncodingValue(LHS.first) < TRI.getEncodingValue(RHS.first);
}); });
@ -1105,7 +1104,7 @@ void ARMFrameLowering::emitPopInst(MachineBasicBlock &MBB,
if (Regs.empty()) if (Regs.empty())
continue; continue;
llvm::sort(Regs.begin(), Regs.end(), [&](unsigned LHS, unsigned RHS) { llvm::sort(Regs, [&](unsigned LHS, unsigned RHS) {
return TRI.getEncodingValue(LHS) < TRI.getEncodingValue(RHS); return TRI.getEncodingValue(LHS) < TRI.getEncodingValue(RHS);
}); });

View File

@ -1848,7 +1848,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
auto LessThan = [](const MergeCandidate* M0, const MergeCandidate *M1) { auto LessThan = [](const MergeCandidate* M0, const MergeCandidate *M1) {
return M0->InsertPos < M1->InsertPos; return M0->InsertPos < M1->InsertPos;
}; };
llvm::sort(Candidates.begin(), Candidates.end(), LessThan); llvm::sort(Candidates, LessThan);
// Go through list of candidates and merge. // Go through list of candidates and merge.
bool Changed = false; bool Changed = false;
@ -2186,8 +2186,7 @@ bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
bool RetVal = false; bool RetVal = false;
// Sort by offset (in reverse order). // Sort by offset (in reverse order).
llvm::sort(Ops.begin(), Ops.end(), llvm::sort(Ops, [](const MachineInstr *LHS, const MachineInstr *RHS) {
[](const MachineInstr *LHS, const MachineInstr *RHS) {
int LOffset = getMemoryOpOffset(*LHS); int LOffset = getMemoryOpOffset(*LHS);
int ROffset = getMemoryOpOffset(*RHS); int ROffset = getMemoryOpOffset(*RHS);
assert(LHS == RHS || LOffset != ROffset); assert(LHS == RHS || LOffset != ROffset);

View File

@ -1072,7 +1072,7 @@ void ARMTargetELFStreamer::finishAttributeSection() {
if (Contents.empty()) if (Contents.empty())
return; return;
llvm::sort(Contents.begin(), Contents.end(), AttributeItem::LessTag); llvm::sort(Contents, AttributeItem::LessTag);
ARMELFStreamer &Streamer = getStreamer(); ARMELFStreamer &Streamer = getStreamer();

View File

@ -1947,8 +1947,7 @@ bool HCE::runOnMachineFunction(MachineFunction &MF) {
AssignmentMap IMap; AssignmentMap IMap;
collect(MF); collect(MF);
llvm::sort(Extenders.begin(), Extenders.end(), llvm::sort(Extenders, [](const ExtDesc &A, const ExtDesc &B) {
[](const ExtDesc &A, const ExtDesc &B) {
return ExtValue(A) < ExtValue(B); return ExtValue(A) < ExtValue(B);
}); });

View File

@ -632,7 +632,7 @@ void HexagonGenInsert::buildOrderingBT(RegisterOrdering &RB,
SortableVectorType VRs; SortableVectorType VRs;
for (RegisterOrdering::iterator I = RB.begin(), E = RB.end(); I != E; ++I) for (RegisterOrdering::iterator I = RB.begin(), E = RB.end(); I != E; ++I)
VRs.push_back(I->first); VRs.push_back(I->first);
llvm::sort(VRs.begin(), VRs.end(), LexCmp); llvm::sort(VRs, LexCmp);
// Transfer the results to the outgoing register ordering. // Transfer the results to the outgoing register ordering.
for (unsigned i = 0, n = VRs.size(); i < n; ++i) for (unsigned i = 0, n = VRs.size(); i < n; ++i)
RO.insert(std::make_pair(VRs[i], i)); RO.insert(std::make_pair(VRs[i], i));

View File

@ -578,7 +578,7 @@ bool HexagonStoreWidening::processBasicBlock(MachineBasicBlock &MBB) {
}; };
for (auto &G : SGs) { for (auto &G : SGs) {
assert(G.size() > 1 && "Store group with fewer than 2 elements"); assert(G.size() > 1 && "Store group with fewer than 2 elements");
llvm::sort(G.begin(), G.end(), Less); llvm::sort(G, Less);
Changed |= processStoreGroup(G); Changed |= processStoreGroup(G);
} }

View File

@ -214,7 +214,7 @@ bool DeadCodeElimination::erase(const SetVector<NodeId> &Nodes) {
return false; return false;
return A.Id < B.Id; return A.Id < B.Id;
}; };
llvm::sort(DRNs.begin(), DRNs.end(), UsesFirst); llvm::sort(DRNs, UsesFirst);
if (trace()) if (trace())
dbgs() << "Removing dead ref nodes:\n"; dbgs() << "Removing dead ref nodes:\n";

View File

@ -1471,7 +1471,7 @@ void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, RegisterSet &AllRefs,
// and add a def for each S in the closure. // and add a def for each S in the closure.
// Sort the refs so that the phis will be created in a deterministic order. // Sort the refs so that the phis will be created in a deterministic order.
llvm::sort(MaxRefs.begin(), MaxRefs.end()); llvm::sort(MaxRefs);
// Remove duplicates. // Remove duplicates.
auto NewEnd = std::unique(MaxRefs.begin(), MaxRefs.end()); auto NewEnd = std::unique(MaxRefs.begin(), MaxRefs.end());
MaxRefs.erase(NewEnd, MaxRefs.end()); MaxRefs.erase(NewEnd, MaxRefs.end());

View File

@ -207,7 +207,7 @@ NodeList Liveness::getAllReachingDefs(RegisterRef RefRR,
}; };
std::vector<NodeId> Tmp(Owners.begin(), Owners.end()); std::vector<NodeId> Tmp(Owners.begin(), Owners.end());
llvm::sort(Tmp.begin(), Tmp.end(), Less); llvm::sort(Tmp, Less);
// The vector is a list of instructions, so that defs coming from // The vector is a list of instructions, so that defs coming from
// the same instruction don't need to be artificially ordered. // the same instruction don't need to be artificially ordered.
@ -813,7 +813,7 @@ void Liveness::computeLiveIns() {
std::vector<RegisterRef> LV; std::vector<RegisterRef> LV;
for (auto I = B.livein_begin(), E = B.livein_end(); I != E; ++I) for (auto I = B.livein_begin(), E = B.livein_end(); I != E; ++I)
LV.push_back(RegisterRef(I->PhysReg, I->LaneMask)); LV.push_back(RegisterRef(I->PhysReg, I->LaneMask));
llvm::sort(LV.begin(), LV.end()); llvm::sort(LV);
dbgs() << printMBBReference(B) << "\t rec = {"; dbgs() << printMBBReference(B) << "\t rec = {";
for (auto I : LV) for (auto I : LV)
dbgs() << ' ' << Print<RegisterRef>(I, DFG); dbgs() << ' ' << Print<RegisterRef>(I, DFG);
@ -824,7 +824,7 @@ void Liveness::computeLiveIns() {
const RegisterAggr &LG = LiveMap[&B]; const RegisterAggr &LG = LiveMap[&B];
for (auto I = LG.rr_begin(), E = LG.rr_end(); I != E; ++I) for (auto I = LG.rr_begin(), E = LG.rr_end(); I != E; ++I)
LV.push_back(*I); LV.push_back(*I);
llvm::sort(LV.begin(), LV.end()); llvm::sort(LV);
dbgs() << "\tcomp = {"; dbgs() << "\tcomp = {";
for (auto I : LV) for (auto I : LV)
dbgs() << ' ' << Print<RegisterRef>(I, DFG); dbgs() << ' ' << Print<RegisterRef>(I, DFG);

View File

@ -453,7 +453,7 @@ void MipsELFObjectWriter::sortRelocs(const MCAssembler &Asm,
return; return;
// Sort relocations by the address they are applied to. // Sort relocations by the address they are applied to.
llvm::sort(Relocs.begin(), Relocs.end(), llvm::sort(Relocs,
[](const ELFRelocationEntry &A, const ELFRelocationEntry &B) { [](const ELFRelocationEntry &A, const ELFRelocationEntry &B) {
return A.Offset < B.Offset; return A.Offset < B.Offset;
}); });

View File

@ -1401,7 +1401,7 @@ class BitPermutationSelector {
for (auto &I : ValueRots) { for (auto &I : ValueRots) {
ValueRotsVec.push_back(I.second); ValueRotsVec.push_back(I.second);
} }
llvm::sort(ValueRotsVec.begin(), ValueRotsVec.end()); llvm::sort(ValueRotsVec);
} }
// In 64-bit mode, rlwinm and friends have a rotation operator that // In 64-bit mode, rlwinm and friends have a rotation operator that

View File

@ -118,8 +118,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) {
// registers), by weight next, and then by position. // registers), by weight next, and then by position.
// TODO: Investigate more intelligent sorting heuristics. For starters, we // TODO: Investigate more intelligent sorting heuristics. For starters, we
// should try to coalesce adjacent live intervals before non-adjacent ones. // should try to coalesce adjacent live intervals before non-adjacent ones.
llvm::sort(SortedIntervals.begin(), SortedIntervals.end(), llvm::sort(SortedIntervals, [MRI](LiveInterval *LHS, LiveInterval *RHS) {
[MRI](LiveInterval *LHS, LiveInterval *RHS) {
if (MRI->isLiveIn(LHS->reg) != MRI->isLiveIn(RHS->reg)) if (MRI->isLiveIn(LHS->reg) != MRI->isLiveIn(RHS->reg))
return MRI->isLiveIn(LHS->reg); return MRI->isLiveIn(LHS->reg);
if (LHS->weight != RHS->weight) if (LHS->weight != RHS->weight)

View File

@ -828,7 +828,7 @@ X86SpeculativeLoadHardeningPass::tracePredStateThroughCFG(
"split above!"); "split above!");
// Sort and unique the codes to minimize them. // Sort and unique the codes to minimize them.
llvm::sort(UncondCodeSeq.begin(), UncondCodeSeq.end()); llvm::sort(UncondCodeSeq);
UncondCodeSeq.erase(std::unique(UncondCodeSeq.begin(), UncondCodeSeq.end()), UncondCodeSeq.erase(std::unique(UncondCodeSeq.begin(), UncondCodeSeq.end()),
UncondCodeSeq.end()); UncondCodeSeq.end());

View File

@ -151,7 +151,7 @@ static void GetSpillList(SmallVectorImpl<StackSlotInfo> &SpillList,
Offset, Offset,
FramePtr)); FramePtr));
} }
llvm::sort(SpillList.begin(), SpillList.end(), CompareSSIOffset); llvm::sort(SpillList, CompareSSIOffset);
} }
/// Creates an ordered list of EH info register 'spills'. /// Creates an ordered list of EH info register 'spills'.
@ -170,7 +170,7 @@ static void GetEHSpillList(SmallVectorImpl<StackSlotInfo> &SpillList,
SpillList.push_back( SpillList.push_back(
StackSlotInfo(EHSlot[0], MFI.getObjectOffset(EHSlot[1]), StackSlotInfo(EHSlot[0], MFI.getObjectOffset(EHSlot[1]),
TL->getExceptionSelectorRegister(PersonalityFn))); TL->getExceptionSelectorRegister(PersonalityFn)));
llvm::sort(SpillList.begin(), SpillList.end(), CompareSSIOffset); llvm::sort(SpillList, CompareSSIOffset);
} }
static MachineMemOperand *getFrameIndexMMO(MachineBasicBlock &MBB, static MachineMemOperand *getFrameIndexMMO(MachineBasicBlock &MBB,

View File

@ -129,7 +129,7 @@ createReplacementInstr(ConstantExpr *CE, Instruction *Instr) {
static bool replaceConstantExprOp(ConstantExpr *CE, Pass *P) { static bool replaceConstantExprOp(ConstantExpr *CE, Pass *P) {
do { do {
SmallVector<WeakTrackingVH, 8> WUsers(CE->user_begin(), CE->user_end()); SmallVector<WeakTrackingVH, 8> WUsers(CE->user_begin(), CE->user_end());
llvm::sort(WUsers.begin(), WUsers.end()); llvm::sort(WUsers);
WUsers.erase(std::unique(WUsers.begin(), WUsers.end()), WUsers.end()); WUsers.erase(std::unique(WUsers.begin(), WUsers.end()), WUsers.end());
while (!WUsers.empty()) while (!WUsers.empty())
if (WeakTrackingVH WU = WUsers.pop_back_val()) { if (WeakTrackingVH WU = WUsers.pop_back_val()) {

View File

@ -49,7 +49,7 @@ public:
BlockToIndexMapping(Function &F) { BlockToIndexMapping(Function &F) {
for (BasicBlock &BB : F) for (BasicBlock &BB : F)
V.push_back(&BB); V.push_back(&BB);
llvm::sort(V.begin(), V.end()); llvm::sort(V);
} }
size_t blockToIndex(BasicBlock *BB) const { size_t blockToIndex(BasicBlock *BB) const {

View File

@ -1997,7 +1997,7 @@ bool LowerTypeTestsModule::lower() {
} }
Sets.emplace_back(I, MaxUniqueId); Sets.emplace_back(I, MaxUniqueId);
} }
llvm::sort(Sets.begin(), Sets.end(), llvm::sort(Sets,
[](const std::pair<GlobalClassesTy::iterator, unsigned> &S1, [](const std::pair<GlobalClassesTy::iterator, unsigned> &S1,
const std::pair<GlobalClassesTy::iterator, unsigned> &S2) { const std::pair<GlobalClassesTy::iterator, unsigned> &S2) {
return S1.second < S2.second; return S1.second < S2.second;
@ -2022,12 +2022,12 @@ bool LowerTypeTestsModule::lower() {
// Order type identifiers by unique ID for determinism. This ordering is // Order type identifiers by unique ID for determinism. This ordering is
// stable as there is a one-to-one mapping between metadata and unique IDs. // stable as there is a one-to-one mapping between metadata and unique IDs.
llvm::sort(TypeIds.begin(), TypeIds.end(), [&](Metadata *M1, Metadata *M2) { llvm::sort(TypeIds, [&](Metadata *M1, Metadata *M2) {
return TypeIdInfo[M1].UniqueId < TypeIdInfo[M2].UniqueId; return TypeIdInfo[M1].UniqueId < TypeIdInfo[M2].UniqueId;
}); });
// Same for the branch funnels. // Same for the branch funnels.
llvm::sort(ICallBranchFunnels.begin(), ICallBranchFunnels.end(), llvm::sort(ICallBranchFunnels,
[&](ICallBranchFunnel *F1, ICallBranchFunnel *F2) { [&](ICallBranchFunnel *F1, ICallBranchFunnel *F2) {
return F1->UniqueId < F2->UniqueId; return F1->UniqueId < F2->UniqueId;
}); });

View File

@ -681,8 +681,7 @@ SampleProfileLoader::findIndirectCallFunctionSamples(
Sum += NameFS.second.getEntrySamples(); Sum += NameFS.second.getEntrySamples();
R.push_back(&NameFS.second); R.push_back(&NameFS.second);
} }
llvm::sort(R.begin(), R.end(), llvm::sort(R, [](const FunctionSamples *L, const FunctionSamples *R) {
[](const FunctionSamples *L, const FunctionSamples *R) {
if (L->getEntrySamples() != R->getEntrySamples()) if (L->getEntrySamples() != R->getEntrySamples())
return L->getEntrySamples() > R->getEntrySamples(); return L->getEntrySamples() > R->getEntrySamples();
return FunctionSamples::getGUID(L->getName()) < return FunctionSamples::getGUID(L->getName()) <
@ -1174,8 +1173,7 @@ static SmallVector<InstrProfValueData, 2> SortCallTargets(
SmallVector<InstrProfValueData, 2> R; SmallVector<InstrProfValueData, 2> R;
for (auto I = M.begin(); I != M.end(); ++I) for (auto I = M.begin(); I != M.end(); ++I)
R.push_back({FunctionSamples::getGUID(I->getKey()), I->getValue()}); R.push_back({FunctionSamples::getGUID(I->getKey()), I->getValue()});
llvm::sort(R.begin(), R.end(), llvm::sort(R, [](const InstrProfValueData &L, const InstrProfValueData &R) {
[](const InstrProfValueData &L, const InstrProfValueData &R) {
if (L.Count == R.Count) if (L.Count == R.Count)
return L.Value > R.Value; return L.Value > R.Value;
else else

View File

@ -748,10 +748,8 @@ private:
// TODO: Remove fully-redundant expressions. // TODO: Remove fully-redundant expressions.
// Get instruction from the Map, assume that all the Instructions // Get instruction from the Map, assume that all the Instructions
// with same VNs have same rank (this is an approximation). // with same VNs have same rank (this is an approximation).
llvm::sort(Ranks.begin(), Ranks.end(), llvm::sort(Ranks, [this, &Map](const VNType &r1, const VNType &r2) {
[this, &Map](const VNType &r1, const VNType &r2) { return (rank(*Map.lookup(r1).begin()) < rank(*Map.lookup(r2).begin()));
return (rank(*Map.lookup(r1).begin()) <
rank(*Map.lookup(r2).begin()));
}); });
// - Sort VNs according to their rank, and start with lowest ranked VN // - Sort VNs according to their rank, and start with lowest ranked VN

View File

@ -239,7 +239,7 @@ public:
SmallVector<std::pair<BasicBlock *, Value *>, 4> Ops; SmallVector<std::pair<BasicBlock *, Value *>, 4> Ops;
for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I)
Ops.push_back({PN->getIncomingBlock(I), PN->getIncomingValue(I)}); Ops.push_back({PN->getIncomingBlock(I), PN->getIncomingValue(I)});
llvm::sort(Ops.begin(), Ops.end()); llvm::sort(Ops);
for (auto &P : Ops) { for (auto &P : Ops) {
Blocks.push_back(P.first); Blocks.push_back(P.first);
Values.push_back(P.second); Values.push_back(P.second);
@ -762,7 +762,7 @@ unsigned GVNSink::sinkBB(BasicBlock *BBEnd) {
} }
if (Preds.size() < 2) if (Preds.size() < 2)
return 0; return 0;
llvm::sort(Preds.begin(), Preds.end()); llvm::sort(Preds);
unsigned NumOrigPreds = Preds.size(); unsigned NumOrigPreds = Preds.size();
// We can only sink instructions through unconditional branches. // We can only sink instructions through unconditional branches.

View File

@ -698,8 +698,7 @@ bool GuardWideningImpl::combineRangeChecks(
// CurrentChecks.size() will typically be 3 here, but so far there has been // CurrentChecks.size() will typically be 3 here, but so far there has been
// no need to hard-code that fact. // no need to hard-code that fact.
llvm::sort(CurrentChecks.begin(), CurrentChecks.end(), llvm::sort(CurrentChecks, [&](const GuardWideningImpl::RangeCheck &LHS,
[&](const GuardWideningImpl::RangeCheck &LHS,
const GuardWideningImpl::RangeCheck &RHS) { const GuardWideningImpl::RangeCheck &RHS) {
return LHS.getOffsetValue().slt(RHS.getOffsetValue()); return LHS.getOffsetValue().slt(RHS.getOffsetValue());
}); });

View File

@ -208,10 +208,8 @@ static bool sinkInstruction(Loop &L, Instruction &I,
SmallVector<BasicBlock *, 2> SortedBBsToSinkInto; SmallVector<BasicBlock *, 2> SortedBBsToSinkInto;
SortedBBsToSinkInto.insert(SortedBBsToSinkInto.begin(), BBsToSinkInto.begin(), SortedBBsToSinkInto.insert(SortedBBsToSinkInto.begin(), BBsToSinkInto.begin(),
BBsToSinkInto.end()); BBsToSinkInto.end());
llvm::sort(SortedBBsToSinkInto.begin(), SortedBBsToSinkInto.end(), llvm::sort(SortedBBsToSinkInto, [&](BasicBlock *A, BasicBlock *B) {
[&](BasicBlock *A, BasicBlock *B) { return LoopBlockNumber.find(A)->second < LoopBlockNumber.find(B)->second;
return LoopBlockNumber.find(A)->second <
LoopBlockNumber.find(B)->second;
}); });
BasicBlock *MoveBB = *SortedBBsToSinkInto.begin(); BasicBlock *MoveBB = *SortedBBsToSinkInto.begin();

View File

@ -1487,7 +1487,7 @@ bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const {
SmallVector<const SCEV *, 4> Key = F.BaseRegs; SmallVector<const SCEV *, 4> Key = F.BaseRegs;
if (F.ScaledReg) Key.push_back(F.ScaledReg); if (F.ScaledReg) Key.push_back(F.ScaledReg);
// Unstable sort by host order ok, because this is only used for uniquifying. // Unstable sort by host order ok, because this is only used for uniquifying.
llvm::sort(Key.begin(), Key.end()); llvm::sort(Key);
return Uniquifier.count(Key); return Uniquifier.count(Key);
} }
@ -1511,7 +1511,7 @@ bool LSRUse::InsertFormula(const Formula &F, const Loop &L) {
SmallVector<const SCEV *, 4> Key = F.BaseRegs; SmallVector<const SCEV *, 4> Key = F.BaseRegs;
if (F.ScaledReg) Key.push_back(F.ScaledReg); if (F.ScaledReg) Key.push_back(F.ScaledReg);
// Unstable sort by host order ok, because this is only used for uniquifying. // Unstable sort by host order ok, because this is only used for uniquifying.
llvm::sort(Key.begin(), Key.end()); llvm::sort(Key);
if (!Uniquifier.insert(Key).second) if (!Uniquifier.insert(Key).second)
return false; return false;
@ -4238,7 +4238,7 @@ void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
Key.push_back(F.ScaledReg); Key.push_back(F.ScaledReg);
// Unstable sort by host order ok, because this is only used for // Unstable sort by host order ok, because this is only used for
// uniquifying. // uniquifying.
llvm::sort(Key.begin(), Key.end()); llvm::sort(Key);
std::pair<BestFormulaeTy::const_iterator, bool> P = std::pair<BestFormulaeTy::const_iterator, bool> P =
BestFormulae.insert(std::make_pair(Key, FIdx)); BestFormulae.insert(std::make_pair(Key, FIdx));

View File

@ -465,8 +465,7 @@ BCECmpChain::BCECmpChain(const std::vector<BasicBlock *> &Blocks, PHINode &Phi,
#endif // MERGEICMPS_DOT_ON #endif // MERGEICMPS_DOT_ON
// Reorder blocks by LHS. We can do that without changing the // Reorder blocks by LHS. We can do that without changing the
// semantics because we are only accessing dereferencable memory. // semantics because we are only accessing dereferencable memory.
llvm::sort(Comparisons_.begin(), Comparisons_.end(), llvm::sort(Comparisons_, [](const BCECmpBlock &a, const BCECmpBlock &b) {
[](const BCECmpBlock &a, const BCECmpBlock &b) {
return a.Lhs() < b.Lhs(); return a.Lhs() < b.Lhs();
}); });
#ifdef MERGEICMPS_DOT_ON #ifdef MERGEICMPS_DOT_ON

View File

@ -959,8 +959,7 @@ static bool isCopyOfAPHI(const Value *V) {
// order. The BlockInstRange numbers are generated in an RPO walk of the basic // order. The BlockInstRange numbers are generated in an RPO walk of the basic
// blocks. // blocks.
void NewGVN::sortPHIOps(MutableArrayRef<ValPair> Ops) const { void NewGVN::sortPHIOps(MutableArrayRef<ValPair> Ops) const {
llvm::sort(Ops.begin(), Ops.end(), llvm::sort(Ops, [&](const ValPair &P1, const ValPair &P2) {
[&](const ValPair &P1, const ValPair &P2) {
return BlockInstRange.lookup(P1.second).first < return BlockInstRange.lookup(P1.second).first <
BlockInstRange.lookup(P2.second).first; BlockInstRange.lookup(P2.second).first;
}); });
@ -3955,7 +3954,7 @@ bool NewGVN::eliminateInstructions(Function &F) {
convertClassToDFSOrdered(*CC, DFSOrderedSet, UseCounts, ProbablyDead); convertClassToDFSOrdered(*CC, DFSOrderedSet, UseCounts, ProbablyDead);
// Sort the whole thing. // Sort the whole thing.
llvm::sort(DFSOrderedSet.begin(), DFSOrderedSet.end()); llvm::sort(DFSOrderedSet);
for (auto &VD : DFSOrderedSet) { for (auto &VD : DFSOrderedSet) {
int MemberDFSIn = VD.DFSIn; int MemberDFSIn = VD.DFSIn;
int MemberDFSOut = VD.DFSOut; int MemberDFSOut = VD.DFSOut;
@ -4118,7 +4117,7 @@ bool NewGVN::eliminateInstructions(Function &F) {
// If we have possible dead stores to look at, try to eliminate them. // If we have possible dead stores to look at, try to eliminate them.
if (CC->getStoreCount() > 0) { if (CC->getStoreCount() > 0) {
convertClassToLoadsAndStores(*CC, PossibleDeadStores); convertClassToLoadsAndStores(*CC, PossibleDeadStores);
llvm::sort(PossibleDeadStores.begin(), PossibleDeadStores.end()); llvm::sort(PossibleDeadStores);
ValueDFSStack EliminationStack; ValueDFSStack EliminationStack;
for (auto &VD : PossibleDeadStores) { for (auto &VD : PossibleDeadStores) {
int MemberDFSIn = VD.DFSIn; int MemberDFSIn = VD.DFSIn;

View File

@ -524,7 +524,7 @@ bool PlaceSafepoints::runOnFunction(Function &F) {
}; };
// We need the order of list to be stable so that naming ends up stable // We need the order of list to be stable so that naming ends up stable
// when we split edges. This makes test cases much easier to write. // when we split edges. This makes test cases much easier to write.
llvm::sort(PollLocations.begin(), PollLocations.end(), OrderByBBName); llvm::sort(PollLocations, OrderByBBName);
// We can sometimes end up with duplicate poll locations. This happens if // We can sometimes end up with duplicate poll locations. This happens if
// a single loop is visited more than once. The fact this happens seems // a single loop is visited more than once. The fact this happens seems

View File

@ -1825,7 +1825,7 @@ static void relocationViaAlloca(
} }
} }
llvm::sort(Uses.begin(), Uses.end()); llvm::sort(Uses);
auto Last = std::unique(Uses.begin(), Uses.end()); auto Last = std::unique(Uses.begin(), Uses.end());
Uses.erase(Last, Uses.end()); Uses.erase(Last, Uses.end());

View File

@ -1060,7 +1060,7 @@ AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
// Sort the uses. This arranges for the offsets to be in ascending order, // Sort the uses. This arranges for the offsets to be in ascending order,
// and the sizes to be in descending order. // and the sizes to be in descending order.
llvm::sort(Slices.begin(), Slices.end()); llvm::sort(Slices);
} }
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@ -1906,7 +1906,7 @@ static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) {
"All non-integer types eliminated!"); "All non-integer types eliminated!");
return RHSTy->getNumElements() < LHSTy->getNumElements(); return RHSTy->getNumElements() < LHSTy->getNumElements();
}; };
llvm::sort(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes); llvm::sort(CandidateTys, RankVectorTypes);
CandidateTys.erase( CandidateTys.erase(
std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes), std::unique(CandidateTys.begin(), CandidateTys.end(), RankVectorTypes),
CandidateTys.end()); CandidateTys.end());
@ -4221,7 +4221,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
} }
if (!IsSorted) if (!IsSorted)
llvm::sort(AS.begin(), AS.end()); llvm::sort(AS);
/// Describes the allocas introduced by rewritePartition in order to migrate /// Describes the allocas introduced by rewritePartition in order to migrate
/// the debug info. /// the debug info.

View File

@ -1265,8 +1265,7 @@ static void buildClonedLoops(Loop &OrigL, ArrayRef<BasicBlock *> ExitBlocks,
// matter as we're just trying to build up the map from inside-out; we use // matter as we're just trying to build up the map from inside-out; we use
// the map in a more stably ordered way below. // the map in a more stably ordered way below.
auto OrderedClonedExitsInLoops = ClonedExitsInLoops; auto OrderedClonedExitsInLoops = ClonedExitsInLoops;
llvm::sort(OrderedClonedExitsInLoops.begin(), OrderedClonedExitsInLoops.end(), llvm::sort(OrderedClonedExitsInLoops, [&](BasicBlock *LHS, BasicBlock *RHS) {
[&](BasicBlock *LHS, BasicBlock *RHS) {
return ExitLoopMap.lookup(LHS)->getLoopDepth() < return ExitLoopMap.lookup(LHS)->getLoopDepth() <
ExitLoopMap.lookup(RHS)->getLoopDepth(); ExitLoopMap.lookup(RHS)->getLoopDepth();
}); });

View File

@ -161,7 +161,7 @@ void ImportedFunctionsInliningStatistics::dump(const bool Verbose) {
void ImportedFunctionsInliningStatistics::calculateRealInlines() { void ImportedFunctionsInliningStatistics::calculateRealInlines() {
// Removing duplicated Callers. // Removing duplicated Callers.
llvm::sort(NonImportedCallers.begin(), NonImportedCallers.end()); llvm::sort(NonImportedCallers);
NonImportedCallers.erase( NonImportedCallers.erase(
std::unique(NonImportedCallers.begin(), NonImportedCallers.end()), std::unique(NonImportedCallers.begin(), NonImportedCallers.end()),
NonImportedCallers.end()); NonImportedCallers.end());

View File

@ -372,7 +372,7 @@ unsigned LowerSwitch::Clusterify(CaseVector& Cases, SwitchInst *SI) {
Cases.push_back(CaseRange(Case.getCaseValue(), Case.getCaseValue(), Cases.push_back(CaseRange(Case.getCaseValue(), Case.getCaseValue(),
Case.getCaseSuccessor())); Case.getCaseSuccessor()));
llvm::sort(Cases.begin(), Cases.end(), CaseCmp()); llvm::sort(Cases, CaseCmp());
// Merge case into clusters // Merge case into clusters
if (Cases.size() >= 2) { if (Cases.size() >= 2) {

View File

@ -569,7 +569,7 @@ void PredicateInfo::renameUses(SmallPtrSetImpl<Value *> &OpSet) {
auto Comparator = [&](const Value *A, const Value *B) { auto Comparator = [&](const Value *A, const Value *B) {
return valueComesBefore(OI, A, B); return valueComesBefore(OI, A, B);
}; };
llvm::sort(OpsToRename.begin(), OpsToRename.end(), Comparator); llvm::sort(OpsToRename, Comparator);
ValueDFS_Compare Compare(OI); ValueDFS_Compare Compare(OI);
// Compute liveness, and rename in O(uses) per Op. // Compute liveness, and rename in O(uses) per Op.
for (auto *Op : OpsToRename) { for (auto *Op : OpsToRename) {

View File

@ -477,7 +477,7 @@ static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
// Sort the stores by their index, making it efficient to do a lookup with a // Sort the stores by their index, making it efficient to do a lookup with a
// binary search. // binary search.
llvm::sort(StoresByIndex.begin(), StoresByIndex.end(), less_first()); llvm::sort(StoresByIndex, less_first());
// Walk all of the loads from this alloca, replacing them with the nearest // Walk all of the loads from this alloca, replacing them with the nearest
// store above them, if any. // store above them, if any.
@ -638,8 +638,7 @@ void PromoteMem2Reg::run() {
SmallVector<BasicBlock *, 32> PHIBlocks; SmallVector<BasicBlock *, 32> PHIBlocks;
IDF.calculate(PHIBlocks); IDF.calculate(PHIBlocks);
if (PHIBlocks.size() > 1) if (PHIBlocks.size() > 1)
llvm::sort(PHIBlocks.begin(), PHIBlocks.end(), llvm::sort(PHIBlocks, [this](BasicBlock *A, BasicBlock *B) {
[this](BasicBlock *A, BasicBlock *B) {
return BBNumbers.lookup(A) < BBNumbers.lookup(B); return BBNumbers.lookup(A) < BBNumbers.lookup(B);
}); });
@ -752,7 +751,7 @@ void PromoteMem2Reg::run() {
// Ok, now we know that all of the PHI nodes are missing entries for some // Ok, now we know that all of the PHI nodes are missing entries for some
// basic blocks. Start by sorting the incoming predecessors for efficient // basic blocks. Start by sorting the incoming predecessors for efficient
// access. // access.
llvm::sort(Preds.begin(), Preds.end()); llvm::sort(Preds);
// Now we loop through all BB's which have entries in SomePHI and remove // Now we loop through all BB's which have entries in SomePHI and remove
// them from the Preds list. // them from the Preds list.

View File

@ -5521,7 +5521,7 @@ static bool ReduceSwitchRange(SwitchInst *SI, IRBuilder<> &Builder,
SmallVector<int64_t,4> Values; SmallVector<int64_t,4> Values;
for (auto &C : SI->cases()) for (auto &C : SI->cases())
Values.push_back(C.getCaseValue()->getValue().getSExtValue()); Values.push_back(C.getCaseValue()->getValue().getSExtValue());
llvm::sort(Values.begin(), Values.end()); llvm::sort(Values);
// If the switch is already dense, there's nothing useful to do here. // If the switch is already dense, there's nothing useful to do here.
if (isSwitchDense(Values)) if (isSwitchDense(Values))

View File

@ -181,11 +181,9 @@ static void findPartitions(Module *M, ClusterIDMapType &ClusterIDMap,
std::make_pair(std::distance(GVtoClusterMap.member_begin(I), std::make_pair(std::distance(GVtoClusterMap.member_begin(I),
GVtoClusterMap.member_end()), I)); GVtoClusterMap.member_end()), I));
llvm::sort(Sets.begin(), Sets.end(), llvm::sort(Sets, [](const SortType &a, const SortType &b) {
[](const SortType &a, const SortType &b) {
if (a.first == b.first) if (a.first == b.first)
return a.second->getData()->getName() > return a.second->getData()->getName() > b.second->getData()->getName();
b.second->getData()->getName();
else else
return a.first > b.first; return a.first > b.first;
}); });

View File

@ -484,7 +484,7 @@ bool DwarfLinker::RelocationManager::findValidRelocs(
// the file, this allows us to just keep an index in the relocation // the file, this allows us to just keep an index in the relocation
// array that we advance during our walk, rather than resorting to // array that we advance during our walk, rather than resorting to
// some associative container. See DwarfLinker::NextValidReloc. // some associative container. See DwarfLinker::NextValidReloc.
llvm::sort(ValidRelocs.begin(), ValidRelocs.end()); llvm::sort(ValidRelocs);
return true; return true;
} }

View File

@ -320,7 +320,7 @@ void DwarfStreamer::emitUnitRangesEntries(CompileUnit &Unit,
// The object addresses where sorted, but again, the linked // The object addresses where sorted, but again, the linked
// addresses might end up in a different order. // addresses might end up in a different order.
llvm::sort(Ranges.begin(), Ranges.end()); llvm::sort(Ranges);
if (!Ranges.empty()) { if (!Ranges.empty()) {
MS->SwitchSection(MC->getObjectFileInfo()->getDwarfARangesSection()); MS->SwitchSection(MC->getObjectFileInfo()->getDwarfARangesSection());

View File

@ -523,7 +523,7 @@ int main(int argc, char **argv) {
if (DyLibExists && !sys::fs::exists(path)) { if (DyLibExists && !sys::fs::exists(path)) {
Components = Components =
GetAllDyLibComponents(IsInDevelopmentTree, true, DirSep); GetAllDyLibComponents(IsInDevelopmentTree, true, DirSep);
llvm::sort(Components.begin(), Components.end()); llvm::sort(Components);
break; break;
} }
} }

View File

@ -358,7 +358,7 @@ static void filterByAccelName(ArrayRef<std::string> Names, DWARFContext &DICtx,
getDies(DICtx, DICtx.getAppleNamespaces(), Name, Dies); getDies(DICtx, DICtx.getAppleNamespaces(), Name, Dies);
getDies(DICtx, DICtx.getDebugNames(), Name, Dies); getDies(DICtx, DICtx.getDebugNames(), Name, Dies);
} }
llvm::sort(Dies.begin(), Dies.end()); llvm::sort(Dies);
Dies.erase(std::unique(Dies.begin(), Dies.end()), Dies.end()); Dies.erase(std::unique(Dies.begin(), Dies.end()), Dies.end());
for (DWARFDie Die : Dies) for (DWARFDie Die : Dies)

View File

@ -668,8 +668,7 @@ void distributePressure(float RemainingPressure,
llvm::SmallVector<float, 32> &DensePressure) { llvm::SmallVector<float, 32> &DensePressure) {
// Find the number of subunits with minimal pressure (they are at the // Find the number of subunits with minimal pressure (they are at the
// front). // front).
llvm::sort(Subunits.begin(), Subunits.end(), llvm::sort(Subunits, [&DensePressure](const uint16_t A, const uint16_t B) {
[&DensePressure](const uint16_t A, const uint16_t B) {
return DensePressure[A] < DensePressure[B]; return DensePressure[A] < DensePressure[B];
}); });
const auto getPressureForSubunit = [&DensePressure, const auto getPressureForSubunit = [&DensePressure,
@ -718,8 +717,7 @@ std::vector<std::pair<uint16_t, float>> computeIdealizedProcResPressure(
llvm::SmallVector<llvm::MCWriteProcResEntry, 8> WPRS) { llvm::SmallVector<llvm::MCWriteProcResEntry, 8> WPRS) {
// DensePressure[I] is the port pressure for Proc Resource I. // DensePressure[I] is the port pressure for Proc Resource I.
llvm::SmallVector<float, 32> DensePressure(SM.getNumProcResourceKinds()); llvm::SmallVector<float, 32> DensePressure(SM.getNumProcResourceKinds());
llvm::sort(WPRS.begin(), WPRS.end(), llvm::sort(WPRS, [](const llvm::MCWriteProcResEntry &A,
[](const llvm::MCWriteProcResEntry &A,
const llvm::MCWriteProcResEntry &B) { const llvm::MCWriteProcResEntry &B) {
return A.ProcResourceIdx < B.ProcResourceIdx; return A.ProcResourceIdx < B.ProcResourceIdx;
}); });

Some files were not shown because too many files have changed in this diff Show More