forked from OSchip/llvm-project
Use llvm::stable_sort
While touching the code, simplify if feasible. llvm-svn: 358996
This commit is contained in:
parent
99cf58339f
commit
efd94c56ba
|
@ -318,12 +318,6 @@ class GCOVBlock {
|
|||
uint64_t Count = 0;
|
||||
};
|
||||
|
||||
struct SortDstEdgesFunctor {
|
||||
bool operator()(const GCOVEdge *E1, const GCOVEdge *E2) {
|
||||
return E1->Dst.Number < E2->Dst.Number;
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
using EdgeIterator = SmallVectorImpl<GCOVEdge *>::const_iterator;
|
||||
using BlockVector = SmallVector<const GCOVBlock *, 4>;
|
||||
|
|
|
@ -572,10 +572,9 @@ public:
|
|||
SampleSorter(const std::map<LocationT, SampleT> &Samples) {
|
||||
for (const auto &I : Samples)
|
||||
V.push_back(&I);
|
||||
std::stable_sort(V.begin(), V.end(),
|
||||
[](const SamplesWithLoc *A, const SamplesWithLoc *B) {
|
||||
return A->first < B->first;
|
||||
});
|
||||
llvm::stable_sort(V, [](const SamplesWithLoc *A, const SamplesWithLoc *B) {
|
||||
return A->first < B->first;
|
||||
});
|
||||
}
|
||||
|
||||
const SamplesWithLocList &get() const { return V; }
|
||||
|
|
|
@ -1144,10 +1144,9 @@ bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
|
|||
std::iota(SortedIndices.begin(), SortedIndices.end(), 0);
|
||||
|
||||
// Sort the memory accesses and keep the order of their uses in UseOrder.
|
||||
std::stable_sort(SortedIndices.begin(), SortedIndices.end(),
|
||||
[&OffValPairs](unsigned Left, unsigned Right) {
|
||||
return OffValPairs[Left].first < OffValPairs[Right].first;
|
||||
});
|
||||
llvm::stable_sort(SortedIndices, [&](unsigned Left, unsigned Right) {
|
||||
return OffValPairs[Left].first < OffValPairs[Right].first;
|
||||
});
|
||||
|
||||
// Check if the order is consecutive already.
|
||||
if (llvm::all_of(SortedIndices, [&SortedIndices](const unsigned I) {
|
||||
|
|
|
@ -799,11 +799,10 @@ static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
|
|||
}
|
||||
|
||||
// Do the rough sort by complexity.
|
||||
std::stable_sort(Ops.begin(), Ops.end(),
|
||||
[&](const SCEV *LHS, const SCEV *RHS) {
|
||||
return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
|
||||
LHS, RHS, DT) < 0;
|
||||
});
|
||||
llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) {
|
||||
return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT) <
|
||||
0;
|
||||
});
|
||||
|
||||
// Now that we are sorted by complexity, group elements of the same
|
||||
// complexity. Note that this is, at worst, N^2, but the vector is likely to
|
||||
|
|
|
@ -694,7 +694,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
|
|||
|
||||
// Sort by loop. Use a stable sort so that constants follow non-constants and
|
||||
// pointer operands precede non-pointer operands.
|
||||
std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT));
|
||||
llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
|
||||
|
||||
// Emit instructions to add all the operands. Hoist as much as possible
|
||||
// out of loops, and form meaningful getelementptrs where possible.
|
||||
|
@ -761,7 +761,7 @@ Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
|
|||
OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
|
||||
|
||||
// Sort by loop. Use a stable sort so that constants follow non-constants.
|
||||
std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT));
|
||||
llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
|
||||
|
||||
// Emit instructions to mul all the operands. Hoist as much as possible
|
||||
// out of loops.
|
||||
|
|
|
@ -55,10 +55,10 @@ void AccelTableBase::finalize(AsmPrinter *Asm, StringRef Prefix) {
|
|||
// Create the individual hash data outputs.
|
||||
for (auto &E : Entries) {
|
||||
// Unique the entries.
|
||||
std::stable_sort(E.second.Values.begin(), E.second.Values.end(),
|
||||
[](const AccelTableData *A, const AccelTableData *B) {
|
||||
return *A < *B;
|
||||
});
|
||||
llvm::stable_sort(E.second.Values,
|
||||
[](const AccelTableData *A, const AccelTableData *B) {
|
||||
return *A < *B;
|
||||
});
|
||||
E.second.Values.erase(
|
||||
std::unique(E.second.Values.begin(), E.second.Values.end()),
|
||||
E.second.Values.end());
|
||||
|
@ -81,10 +81,9 @@ void AccelTableBase::finalize(AsmPrinter *Asm, StringRef Prefix) {
|
|||
// Sort the contents of the buckets by hash value so that hash collisions end
|
||||
// up together. Stable sort makes testing easier and doesn't cost much more.
|
||||
for (auto &Bucket : Buckets)
|
||||
std::stable_sort(Bucket.begin(), Bucket.end(),
|
||||
[](HashData *LHS, HashData *RHS) {
|
||||
return LHS->HashValue < RHS->HashValue;
|
||||
});
|
||||
llvm::stable_sort(Bucket, [](HashData *LHS, HashData *RHS) {
|
||||
return LHS->HashValue < RHS->HashValue;
|
||||
});
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
|
|
@ -1979,9 +1979,9 @@ void AsmPrinter::EmitXXStructorList(const DataLayout &DL, const Constant *List,
|
|||
|
||||
// Emit the function pointers in the target-specific order
|
||||
unsigned Align = Log2_32(DL.getPointerPrefAlignment());
|
||||
std::stable_sort(Structors.begin(), Structors.end(),
|
||||
[](const Structor &L,
|
||||
const Structor &R) { return L.Priority < R.Priority; });
|
||||
llvm::stable_sort(Structors, [](const Structor &L, const Structor &R) {
|
||||
return L.Priority < R.Priority;
|
||||
});
|
||||
for (Structor &S : Structors) {
|
||||
const TargetLoweringObjectFile &Obj = getObjFileLowering();
|
||||
const MCSymbol *KeySym = nullptr;
|
||||
|
|
|
@ -2233,19 +2233,18 @@ void DwarfDebug::emitDebugARanges() {
|
|||
}
|
||||
|
||||
// Sort the symbols by offset within the section.
|
||||
std::stable_sort(
|
||||
List.begin(), List.end(), [&](const SymbolCU &A, const SymbolCU &B) {
|
||||
unsigned IA = A.Sym ? Asm->OutStreamer->GetSymbolOrder(A.Sym) : 0;
|
||||
unsigned IB = B.Sym ? Asm->OutStreamer->GetSymbolOrder(B.Sym) : 0;
|
||||
llvm::stable_sort(List, [&](const SymbolCU &A, const SymbolCU &B) {
|
||||
unsigned IA = A.Sym ? Asm->OutStreamer->GetSymbolOrder(A.Sym) : 0;
|
||||
unsigned IB = B.Sym ? Asm->OutStreamer->GetSymbolOrder(B.Sym) : 0;
|
||||
|
||||
// Symbols with no order assigned should be placed at the end.
|
||||
// (e.g. section end labels)
|
||||
if (IA == 0)
|
||||
return false;
|
||||
if (IB == 0)
|
||||
return true;
|
||||
return IA < IB;
|
||||
});
|
||||
// Symbols with no order assigned should be placed at the end.
|
||||
// (e.g. section end labels)
|
||||
if (IA == 0)
|
||||
return false;
|
||||
if (IB == 0)
|
||||
return true;
|
||||
return IA < IB;
|
||||
});
|
||||
|
||||
// Insert a final terminator.
|
||||
List.push_back(SymbolCU(nullptr, Asm->OutStreamer->endSection(Section)));
|
||||
|
|
|
@ -219,11 +219,11 @@ bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
|
|||
Module &M, bool isConst, unsigned AddrSpace) const {
|
||||
auto &DL = M.getDataLayout();
|
||||
// FIXME: Find better heuristics
|
||||
std::stable_sort(Globals.begin(), Globals.end(),
|
||||
[&DL](const GlobalVariable *GV1, const GlobalVariable *GV2) {
|
||||
return DL.getTypeAllocSize(GV1->getValueType()) <
|
||||
DL.getTypeAllocSize(GV2->getValueType());
|
||||
});
|
||||
llvm::stable_sort(
|
||||
Globals, [&DL](const GlobalVariable *GV1, const GlobalVariable *GV2) {
|
||||
return DL.getTypeAllocSize(GV1->getValueType()) <
|
||||
DL.getTypeAllocSize(GV2->getValueType());
|
||||
});
|
||||
|
||||
// If we want to just blindly group all globals together, do so.
|
||||
if (!GlobalMergeGroupByUse) {
|
||||
|
@ -385,11 +385,11 @@ bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
|
|||
//
|
||||
// Multiply that by the size of the set to give us a crude profitability
|
||||
// metric.
|
||||
std::stable_sort(UsedGlobalSets.begin(), UsedGlobalSets.end(),
|
||||
[](const UsedGlobalSet &UGS1, const UsedGlobalSet &UGS2) {
|
||||
return UGS1.Globals.count() * UGS1.UsageCount <
|
||||
UGS2.Globals.count() * UGS2.UsageCount;
|
||||
});
|
||||
llvm::stable_sort(UsedGlobalSets,
|
||||
[](const UsedGlobalSet &UGS1, const UsedGlobalSet &UGS2) {
|
||||
return UGS1.Globals.count() * UGS1.UsageCount <
|
||||
UGS2.Globals.count() * UGS2.UsageCount;
|
||||
});
|
||||
|
||||
// We can choose to merge all globals together, but ignore globals never used
|
||||
// with another global. This catches the obviously non-profitable cases of
|
||||
|
|
|
@ -1316,7 +1316,7 @@ void IfConverter::AnalyzeBlocks(
|
|||
AnalyzeBlock(MBB, Tokens);
|
||||
|
||||
// Sort to favor more complex ifcvt scheme.
|
||||
std::stable_sort(Tokens.begin(), Tokens.end(), IfcvtTokenCmp);
|
||||
llvm::stable_sort(Tokens, IfcvtTokenCmp);
|
||||
}
|
||||
|
||||
/// Returns true either if ToMBB is the next block after MBB or that all the
|
||||
|
|
|
@ -941,8 +941,8 @@ MachineBlockPlacement::getBestNonConflictingEdges(
|
|||
// Sort for highest frequency.
|
||||
auto Cmp = [](WeightedEdge A, WeightedEdge B) { return A.Weight > B.Weight; };
|
||||
|
||||
std::stable_sort(Edges[0].begin(), Edges[0].end(), Cmp);
|
||||
std::stable_sort(Edges[1].begin(), Edges[1].end(), Cmp);
|
||||
llvm::stable_sort(Edges[0], Cmp);
|
||||
llvm::stable_sort(Edges[1], Cmp);
|
||||
auto BestA = Edges[0].begin();
|
||||
auto BestB = Edges[1].begin();
|
||||
// Arrange for the correct answer to be in BestA and BestB
|
||||
|
@ -1530,15 +1530,12 @@ MachineBlockPlacement::selectBestSuccessor(
|
|||
// profitable than BestSucc. Position is important because we preserve it and
|
||||
// prefer first best match. Here we aren't comparing in order, so we capture
|
||||
// the position instead.
|
||||
if (DupCandidates.size() != 0) {
|
||||
auto cmp =
|
||||
[](const std::tuple<BranchProbability, MachineBasicBlock *> &a,
|
||||
const std::tuple<BranchProbability, MachineBasicBlock *> &b) {
|
||||
return std::get<0>(a) > std::get<0>(b);
|
||||
};
|
||||
std::stable_sort(DupCandidates.begin(), DupCandidates.end(), cmp);
|
||||
}
|
||||
for(auto &Tup : DupCandidates) {
|
||||
llvm::stable_sort(DupCandidates,
|
||||
[](std::tuple<BranchProbability, MachineBasicBlock *> L,
|
||||
std::tuple<BranchProbability, MachineBasicBlock *> R) {
|
||||
return std::get<0>(L) > std::get<0>(R);
|
||||
});
|
||||
for (auto &Tup : DupCandidates) {
|
||||
BranchProbability DupProb;
|
||||
MachineBasicBlock *Succ;
|
||||
std::tie(DupProb, Succ) = Tup;
|
||||
|
|
|
@ -1198,11 +1198,10 @@ bool MachineOutliner::outline(Module &M,
|
|||
unsigned OutlinedFunctionNum = 0;
|
||||
|
||||
// Sort by benefit. The most beneficial functions should be outlined first.
|
||||
std::stable_sort(
|
||||
FunctionList.begin(), FunctionList.end(),
|
||||
[](const OutlinedFunction &LHS, const OutlinedFunction &RHS) {
|
||||
return LHS.getBenefit() > RHS.getBenefit();
|
||||
});
|
||||
llvm::stable_sort(FunctionList, [](const OutlinedFunction &LHS,
|
||||
const OutlinedFunction &RHS) {
|
||||
return LHS.getBenefit() > RHS.getBenefit();
|
||||
});
|
||||
|
||||
// Walk over each function, outlining them as we go along. Functions are
|
||||
// outlined greedily, based off the sort above.
|
||||
|
|
|
@ -428,7 +428,7 @@ void SwingSchedulerDAG::schedule() {
|
|||
}
|
||||
});
|
||||
|
||||
std::stable_sort(NodeSets.begin(), NodeSets.end(), std::greater<NodeSet>());
|
||||
llvm::stable_sort(NodeSets, std::greater<NodeSet>());
|
||||
|
||||
groupRemainingNodes(NodeSets);
|
||||
|
||||
|
|
|
@ -584,9 +584,8 @@ MachineSinking::GetAllSortedSuccessors(MachineInstr &MI, MachineBasicBlock *MBB,
|
|||
AllSuccs.push_back(DTChild->getBlock());
|
||||
|
||||
// Sort Successors according to their loop depth or block frequency info.
|
||||
std::stable_sort(
|
||||
AllSuccs.begin(), AllSuccs.end(),
|
||||
[this](const MachineBasicBlock *L, const MachineBasicBlock *R) {
|
||||
llvm::stable_sort(
|
||||
AllSuccs, [this](const MachineBasicBlock *L, const MachineBasicBlock *R) {
|
||||
uint64_t LHSFreq = MBFI ? MBFI->getBlockFreq(L).getFrequency() : 0;
|
||||
uint64_t RHSFreq = MBFI ? MBFI->getBlockFreq(R).getFrequency() : 0;
|
||||
bool HasBlockFreq = LHSFreq != 0 && RHSFreq != 0;
|
||||
|
|
|
@ -921,7 +921,7 @@ EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
|
|||
// Sort the source order instructions and use the order to insert debug
|
||||
// values. Use stable_sort so that DBG_VALUEs are inserted in the same order
|
||||
// regardless of the host's implementation fo std::sort.
|
||||
std::stable_sort(Orders.begin(), Orders.end(), less_first());
|
||||
llvm::stable_sort(Orders, less_first());
|
||||
std::stable_sort(DAG->DbgBegin(), DAG->DbgEnd(),
|
||||
[](const SDDbgValue *LHS, const SDDbgValue *RHS) {
|
||||
return LHS->getOrder() < RHS->getOrder();
|
||||
|
|
|
@ -1220,11 +1220,12 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
|
|||
|
||||
// Sort the slots according to their size. Place unused slots at the end.
|
||||
// Use stable sort to guarantee deterministic code generation.
|
||||
std::stable_sort(SortedSlots.begin(), SortedSlots.end(),
|
||||
[this](int LHS, int RHS) {
|
||||
llvm::stable_sort(SortedSlots, [this](int LHS, int RHS) {
|
||||
// We use -1 to denote a uninteresting slot. Place these slots at the end.
|
||||
if (LHS == -1) return false;
|
||||
if (RHS == -1) return true;
|
||||
if (LHS == -1)
|
||||
return false;
|
||||
if (RHS == -1)
|
||||
return true;
|
||||
// Sort according to size.
|
||||
return MFI->getObjectSize(LHS) > MFI->getObjectSize(RHS);
|
||||
});
|
||||
|
|
|
@ -242,7 +242,7 @@ void StackSlotColoring::InitializeSlots() {
|
|||
LLVM_DEBUG(dbgs() << '\n');
|
||||
|
||||
// Sort them by weight.
|
||||
std::stable_sort(SSIntervals.begin(), SSIntervals.end(), IntervalSorter());
|
||||
llvm::stable_sort(SSIntervals, IntervalSorter());
|
||||
|
||||
NextColors.resize(AllColors.size());
|
||||
|
||||
|
@ -347,7 +347,7 @@ bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
|
|||
li->weight = SlotWeights[SS];
|
||||
}
|
||||
// Sort them by new weight.
|
||||
std::stable_sort(SSIntervals.begin(), SSIntervals.end(), IntervalSorter());
|
||||
llvm::stable_sort(SSIntervals, IntervalSorter());
|
||||
|
||||
#ifndef NDEBUG
|
||||
for (unsigned i = 0, e = SSIntervals.size(); i != e; ++i)
|
||||
|
|
|
@ -262,8 +262,7 @@ static std::vector<ulittle32_t> computeAddrMap(ArrayRef<CVSymbol> Records) {
|
|||
SymOffsets.push_back(SymOffset);
|
||||
SymOffset += Sym.length();
|
||||
}
|
||||
std::stable_sort(PublicsByAddr.begin(), PublicsByAddr.end(),
|
||||
comparePubSymByAddrAndName);
|
||||
llvm::stable_sort(PublicsByAddr, comparePubSymByAddrAndName);
|
||||
|
||||
// Fill in the symbol offsets in the appropriate order.
|
||||
std::vector<ulittle32_t> AddrMap;
|
||||
|
|
|
@ -68,9 +68,7 @@ MDNode *MDBuilder::createFunctionEntryCount(
|
|||
Ops.push_back(createConstant(ConstantInt::get(Int64Ty, Count)));
|
||||
if (Imports) {
|
||||
SmallVector<GlobalValue::GUID, 2> OrderID(Imports->begin(), Imports->end());
|
||||
std::stable_sort(OrderID.begin(), OrderID.end(),
|
||||
[] (GlobalValue::GUID A, GlobalValue::GUID B) {
|
||||
return A < B;});
|
||||
llvm::stable_sort(OrderID);
|
||||
for (auto ID : OrderID)
|
||||
Ops.push_back(createConstant(ConstantInt::get(Int64Ty, ID)));
|
||||
}
|
||||
|
|
|
@ -1179,10 +1179,7 @@ void MDGlobalAttachmentMap::getAll(
|
|||
|
||||
// Sort the resulting array so it is stable with respect to metadata IDs. We
|
||||
// need to preserve the original insertion order though.
|
||||
std::stable_sort(
|
||||
Result.begin(), Result.end(),
|
||||
[](const std::pair<unsigned, MDNode *> &A,
|
||||
const std::pair<unsigned, MDNode *> &B) { return A.first < B.first; });
|
||||
llvm::stable_sort(Result, less_first());
|
||||
}
|
||||
|
||||
void Instruction::setMetadata(StringRef Kind, MDNode *Node) {
|
||||
|
|
|
@ -1865,11 +1865,10 @@ void MCDwarfFrameEmitter::Emit(MCObjectStreamer &Streamer, MCAsmBackend *MAB,
|
|||
// but the Android libunwindstack rejects eh_frame sections where
|
||||
// an FDE refers to a CIE other than the closest previous CIE.
|
||||
std::vector<MCDwarfFrameInfo> FrameArrayX(FrameArray.begin(), FrameArray.end());
|
||||
std::stable_sort(
|
||||
FrameArrayX.begin(), FrameArrayX.end(),
|
||||
[&](const MCDwarfFrameInfo &X, const MCDwarfFrameInfo &Y) -> bool {
|
||||
return CIEKey(X) < CIEKey(Y);
|
||||
});
|
||||
llvm::stable_sort(FrameArrayX,
|
||||
[](const MCDwarfFrameInfo &X, const MCDwarfFrameInfo &Y) {
|
||||
return CIEKey(X) < CIEKey(Y);
|
||||
});
|
||||
for (auto I = FrameArrayX.begin(), E = FrameArrayX.end(); I != E;) {
|
||||
const MCDwarfFrameInfo &Frame = *I;
|
||||
++I;
|
||||
|
|
|
@ -934,9 +934,8 @@ void WasmObjectWriter::writeRelocSection(
|
|||
// order, but for the code section we combine many MC sections into single
|
||||
// wasm section, and this order is determined by the order of Asm.Symbols()
|
||||
// not the sections order.
|
||||
std::stable_sort(
|
||||
Relocs.begin(), Relocs.end(),
|
||||
[](const WasmRelocationEntry &A, const WasmRelocationEntry &B) {
|
||||
llvm::stable_sort(
|
||||
Relocs, [](const WasmRelocationEntry &A, const WasmRelocationEntry &B) {
|
||||
return (A.Offset + A.FixupSection->getSectionOffset()) <
|
||||
(B.Offset + B.FixupSection->getSectionOffset());
|
||||
});
|
||||
|
|
|
@ -124,15 +124,14 @@ void CoverageMappingWriter::write(raw_ostream &OS) {
|
|||
|
||||
// Sort the regions in an ascending order by the file id and the starting
|
||||
// location. Sort by region kinds to ensure stable order for tests.
|
||||
std::stable_sort(
|
||||
MappingRegions.begin(), MappingRegions.end(),
|
||||
[](const CounterMappingRegion &LHS, const CounterMappingRegion &RHS) {
|
||||
if (LHS.FileID != RHS.FileID)
|
||||
return LHS.FileID < RHS.FileID;
|
||||
if (LHS.startLoc() != RHS.startLoc())
|
||||
return LHS.startLoc() < RHS.startLoc();
|
||||
return LHS.Kind < RHS.Kind;
|
||||
});
|
||||
llvm::stable_sort(MappingRegions, [](const CounterMappingRegion &LHS,
|
||||
const CounterMappingRegion &RHS) {
|
||||
if (LHS.FileID != RHS.FileID)
|
||||
return LHS.FileID < RHS.FileID;
|
||||
if (LHS.startLoc() != RHS.startLoc())
|
||||
return LHS.startLoc() < RHS.startLoc();
|
||||
return LHS.Kind < RHS.Kind;
|
||||
});
|
||||
|
||||
// Write out the fileid -> filename mapping.
|
||||
encodeULEB128(VirtualFileMapping.size(), OS);
|
||||
|
|
|
@ -396,10 +396,10 @@ void GCOVBlock::addCount(size_t DstEdgeNo, uint64_t N) {
|
|||
/// sortDstEdges - Sort destination edges by block number, nop if already
|
||||
/// sorted. This is required for printing branch info in the correct order.
|
||||
void GCOVBlock::sortDstEdges() {
|
||||
if (!DstEdgesAreSorted) {
|
||||
SortDstEdgesFunctor SortEdges;
|
||||
std::stable_sort(DstEdges.begin(), DstEdges.end(), SortEdges);
|
||||
}
|
||||
if (!DstEdgesAreSorted)
|
||||
llvm::stable_sort(DstEdges, [](const GCOVEdge *E1, const GCOVEdge *E2) {
|
||||
return E1->Dst.Number < E2->Dst.Number;
|
||||
});
|
||||
}
|
||||
|
||||
/// collectLineCounts - Collect line counts. This must be used after
|
||||
|
|
|
@ -50,9 +50,8 @@ SampleProfileWriter::write(const StringMap<FunctionSamples> &ProfileMap) {
|
|||
for (const auto &I : ProfileMap)
|
||||
V.push_back(std::make_pair(I.getKey(), &I.second));
|
||||
|
||||
std::stable_sort(
|
||||
V.begin(), V.end(),
|
||||
[](const NameFunctionSamples &A, const NameFunctionSamples &B) {
|
||||
llvm::stable_sort(
|
||||
V, [](const NameFunctionSamples &A, const NameFunctionSamples &B) {
|
||||
if (A.second->getTotalSamples() == B.second->getTotalSamples())
|
||||
return A.first > B.first;
|
||||
return A.second->getTotalSamples() > B.second->getTotalSamples();
|
||||
|
|
|
@ -135,8 +135,7 @@ bool llvm::AreStatisticsEnabled() {
|
|||
}
|
||||
|
||||
void StatisticInfo::sort() {
|
||||
std::stable_sort(Stats.begin(), Stats.end(),
|
||||
[](const Statistic *LHS, const Statistic *RHS) {
|
||||
llvm::stable_sort(Stats, [](const Statistic *LHS, const Statistic *RHS) {
|
||||
if (int Cmp = std::strcmp(LHS->getDebugType(), RHS->getDebugType()))
|
||||
return Cmp < 0;
|
||||
|
||||
|
|
|
@ -3088,8 +3088,7 @@ void X86FrameLowering::orderFrameObjects(
|
|||
|
||||
// Sort the objects using X86FrameSortingAlgorithm (see its comment for
|
||||
// info).
|
||||
std::stable_sort(SortingObjects.begin(), SortingObjects.end(),
|
||||
X86FrameSortingComparator());
|
||||
llvm::stable_sort(SortingObjects, X86FrameSortingComparator());
|
||||
|
||||
// Now modify the original list to represent the final order that
|
||||
// we want. The order will depend on whether we're going to access them
|
||||
|
|
|
@ -548,10 +548,10 @@ ByteArrayInfo *LowerTypeTestsModule::createByteArray(BitSetInfo &BSI) {
|
|||
}
|
||||
|
||||
void LowerTypeTestsModule::allocateByteArrays() {
|
||||
std::stable_sort(ByteArrayInfos.begin(), ByteArrayInfos.end(),
|
||||
[](const ByteArrayInfo &BAI1, const ByteArrayInfo &BAI2) {
|
||||
return BAI1.BitSize > BAI2.BitSize;
|
||||
});
|
||||
llvm::stable_sort(ByteArrayInfos,
|
||||
[](const ByteArrayInfo &BAI1, const ByteArrayInfo &BAI2) {
|
||||
return BAI1.BitSize > BAI2.BitSize;
|
||||
});
|
||||
|
||||
std::vector<uint64_t> ByteArrayOffsets(ByteArrayInfos.size());
|
||||
|
||||
|
@ -1552,11 +1552,10 @@ void LowerTypeTestsModule::buildBitSetsFromDisjointSet(
|
|||
|
||||
// Order the sets of indices by size. The GlobalLayoutBuilder works best
|
||||
// when given small index sets first.
|
||||
std::stable_sort(
|
||||
TypeMembers.begin(), TypeMembers.end(),
|
||||
[](const std::set<uint64_t> &O1, const std::set<uint64_t> &O2) {
|
||||
return O1.size() < O2.size();
|
||||
});
|
||||
llvm::stable_sort(TypeMembers, [](const std::set<uint64_t> &O1,
|
||||
const std::set<uint64_t> &O2) {
|
||||
return O1.size() < O2.size();
|
||||
});
|
||||
|
||||
// Create a GlobalLayoutBuilder and provide it with index sets as layout
|
||||
// fragments. The GlobalLayoutBuilder tries to lay out members of fragments as
|
||||
|
|
|
@ -401,7 +401,7 @@ bool MergeFunctions::runOnModule(Module &M) {
|
|||
}
|
||||
}
|
||||
|
||||
std::stable_sort(HashedFuncs.begin(), HashedFuncs.end(), less_first());
|
||||
llvm::stable_sort(HashedFuncs, less_first());
|
||||
|
||||
auto S = HashedFuncs.begin();
|
||||
for (auto I = HashedFuncs.begin(), IE = HashedFuncs.end(); I != IE; ++I) {
|
||||
|
|
|
@ -195,11 +195,10 @@ public:
|
|||
|
||||
// Sort CFG edges based on its weight.
|
||||
void sortEdgesByWeight() {
|
||||
std::stable_sort(AllEdges.begin(), AllEdges.end(),
|
||||
[](const std::unique_ptr<Edge> &Edge1,
|
||||
const std::unique_ptr<Edge> &Edge2) {
|
||||
return Edge1->Weight > Edge2->Weight;
|
||||
});
|
||||
llvm::stable_sort(AllEdges, [](const std::unique_ptr<Edge> &Edge1,
|
||||
const std::unique_ptr<Edge> &Edge2) {
|
||||
return Edge1->Weight > Edge2->Weight;
|
||||
});
|
||||
}
|
||||
|
||||
// Traverse all the edges and compute the Minimum Weight Spanning Tree
|
||||
|
|
|
@ -1416,7 +1416,7 @@ void CHR::sortScopes(SmallVectorImpl<CHRScope *> &Input,
|
|||
SmallVectorImpl<CHRScope *> &Output) {
|
||||
Output.resize(Input.size());
|
||||
llvm::copy(Input, Output.begin());
|
||||
std::stable_sort(Output.begin(), Output.end(), CHRScopeSorter);
|
||||
llvm::stable_sort(Output, CHRScopeSorter);
|
||||
}
|
||||
|
||||
// Return true if V is already hoisted or was hoisted (along with its operands)
|
||||
|
|
|
@ -67,8 +67,7 @@ namespace llvm {
|
|||
/// MaximumSpanningTree() - Takes a vector of weighted edges and returns a
|
||||
/// spanning tree.
|
||||
MaximumSpanningTree(EdgeWeights &EdgeVector) {
|
||||
|
||||
std::stable_sort(EdgeVector.begin(), EdgeVector.end(), EdgeWeightCompare());
|
||||
llvm::stable_sort(EdgeVector, EdgeWeightCompare());
|
||||
|
||||
// Create spanning tree, Forest contains a special data structure
|
||||
// that makes checking if two nodes are already in a common (sub-)tree
|
||||
|
|
|
@ -647,8 +647,8 @@ void ConstantHoistingPass::findBaseConstants(GlobalVariable *BaseGV) {
|
|||
ConstGEPInfoMap[BaseGV] : ConstIntInfoVec;
|
||||
|
||||
// Sort the constants by value and type. This invalidates the mapping!
|
||||
std::stable_sort(ConstCandVec.begin(), ConstCandVec.end(),
|
||||
[](const ConstantCandidate &LHS, const ConstantCandidate &RHS) {
|
||||
llvm::stable_sort(ConstCandVec, [](const ConstantCandidate &LHS,
|
||||
const ConstantCandidate &RHS) {
|
||||
if (LHS.ConstInt->getType() != RHS.ConstInt->getType())
|
||||
return LHS.ConstInt->getType()->getBitWidth() <
|
||||
RHS.ConstInt->getType()->getBitWidth();
|
||||
|
|
|
@ -702,7 +702,7 @@ private:
|
|||
// Vector of PHIs contains PHIs for different instructions.
|
||||
// Sort the args according to their VNs, such that identical
|
||||
// instructions are together.
|
||||
std::stable_sort(CHIs.begin(), CHIs.end(), cmpVN);
|
||||
llvm::stable_sort(CHIs, cmpVN);
|
||||
auto TI = BB->getTerminator();
|
||||
auto B = CHIs.begin();
|
||||
// [PreIt, PHIIt) form a range of CHIs which have identical VNs.
|
||||
|
|
|
@ -790,10 +790,7 @@ unsigned GVNSink::sinkBB(BasicBlock *BBEnd) {
|
|||
--LRI;
|
||||
}
|
||||
|
||||
std::stable_sort(
|
||||
Candidates.begin(), Candidates.end(),
|
||||
[](const SinkingInstructionCandidate &A,
|
||||
const SinkingInstructionCandidate &B) { return A > B; });
|
||||
llvm::stable_sort(Candidates, std::greater<SinkingInstructionCandidate>());
|
||||
LLVM_DEBUG(dbgs() << " -- Sinking candidates:\n"; for (auto &C
|
||||
: Candidates) dbgs()
|
||||
<< " " << C << "\n";);
|
||||
|
|
|
@ -290,10 +290,9 @@ static bool sinkLoopInvariantInstructions(Loop &L, AAResults &AA, LoopInfo &LI,
|
|||
ColdLoopBBs.push_back(B);
|
||||
LoopBlockNumber[B] = ++i;
|
||||
}
|
||||
std::stable_sort(ColdLoopBBs.begin(), ColdLoopBBs.end(),
|
||||
[&](BasicBlock *A, BasicBlock *B) {
|
||||
return BFI.getBlockFreq(A) < BFI.getBlockFreq(B);
|
||||
});
|
||||
llvm::stable_sort(ColdLoopBBs, [&](BasicBlock *A, BasicBlock *B) {
|
||||
return BFI.getBlockFreq(A) < BFI.getBlockFreq(B);
|
||||
});
|
||||
|
||||
// Traverse preheader's instructions in reverse order becaue if A depends
|
||||
// on B (A appears after B), A needs to be sinked first before B can be
|
||||
|
|
|
@ -1328,8 +1328,7 @@ Value *ReassociatePass::OptimizeXor(Instruction *I,
|
|||
// So, if Rank(X) < Rank(Y) < Rank(Z), it means X is defined earlier
|
||||
// than Y which is defined earlier than Z. Permute "x | 1", "Y & 2",
|
||||
// "z" in the order of X-Y-Z is better than any other orders.
|
||||
std::stable_sort(OpndPtrs.begin(), OpndPtrs.end(),
|
||||
[](XorOpnd *LHS, XorOpnd *RHS) {
|
||||
llvm::stable_sort(OpndPtrs, [](XorOpnd *LHS, XorOpnd *RHS) {
|
||||
return LHS->getSymbolicRank() < RHS->getSymbolicRank();
|
||||
});
|
||||
|
||||
|
@ -1686,8 +1685,7 @@ static bool collectMultiplyFactors(SmallVectorImpl<ValueEntry> &Ops,
|
|||
// below our mininum of '4'.
|
||||
assert(FactorPowerSum >= 4);
|
||||
|
||||
std::stable_sort(Factors.begin(), Factors.end(),
|
||||
[](const Factor &LHS, const Factor &RHS) {
|
||||
llvm::stable_sort(Factors, [](const Factor &LHS, const Factor &RHS) {
|
||||
return LHS.Power > RHS.Power;
|
||||
});
|
||||
return true;
|
||||
|
@ -2141,7 +2139,7 @@ void ReassociatePass::ReassociateExpression(BinaryOperator *I) {
|
|||
// positions maintained (and so the compiler is deterministic). Note that
|
||||
// this sorts so that the highest ranking values end up at the beginning of
|
||||
// the vector.
|
||||
std::stable_sort(Ops.begin(), Ops.end());
|
||||
llvm::stable_sort(Ops);
|
||||
|
||||
// Now that we have the expression tree in a convenient
|
||||
// sorted form, optimize it globally if possible.
|
||||
|
|
|
@ -1722,10 +1722,9 @@ static bool rebuildLoopAfterUnswitch(Loop &L, ArrayRef<BasicBlock *> ExitBlocks,
|
|||
|
||||
// Sort the exits in ascending loop depth, we'll work backwards across these
|
||||
// to process them inside out.
|
||||
std::stable_sort(ExitsInLoops.begin(), ExitsInLoops.end(),
|
||||
[&](BasicBlock *LHS, BasicBlock *RHS) {
|
||||
return LI.getLoopDepth(LHS) < LI.getLoopDepth(RHS);
|
||||
});
|
||||
llvm::stable_sort(ExitsInLoops, [&](BasicBlock *LHS, BasicBlock *RHS) {
|
||||
return LI.getLoopDepth(LHS) < LI.getLoopDepth(RHS);
|
||||
});
|
||||
|
||||
// We'll build up a set for each exit loop.
|
||||
SmallPtrSet<BasicBlock *, 16> NewExitLoopBlocks;
|
||||
|
|
|
@ -62,7 +62,7 @@ ComputeASanStackFrameLayout(SmallVectorImpl<ASanStackVariableDescription> &Vars,
|
|||
for (size_t i = 0; i < NumVars; i++)
|
||||
Vars[i].Alignment = std::max(Vars[i].Alignment, kMinAlignment);
|
||||
|
||||
std::stable_sort(Vars.begin(), Vars.end(), CompareVars);
|
||||
llvm::stable_sort(Vars, CompareVars);
|
||||
|
||||
ASanStackFrameLayout Layout;
|
||||
Layout.Granularity = Granularity;
|
||||
|
|
|
@ -634,7 +634,7 @@ void PredicateInfo::renameUses(SmallPtrSetImpl<Value *> &OpSet) {
|
|||
// uses in the same instruction do not have a strict sort order
|
||||
// currently and will be considered equal. We could get rid of the
|
||||
// stable sort by creating one if we wanted.
|
||||
std::stable_sort(OrderedUses.begin(), OrderedUses.end(), Compare);
|
||||
llvm::stable_sort(OrderedUses, Compare);
|
||||
SmallVector<ValueDFS, 8> RenameStack;
|
||||
// For each use, sorted into dfs order, push values and replaces uses with
|
||||
// top of stack, which will represent the reaching def.
|
||||
|
|
|
@ -4120,10 +4120,10 @@ void BoUpSLP::optimizeGatherSequence() {
|
|||
|
||||
// Sort blocks by domination. This ensures we visit a block after all blocks
|
||||
// dominating it are visited.
|
||||
std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(),
|
||||
[this](const DomTreeNode *A, const DomTreeNode *B) {
|
||||
return DT->properlyDominates(A, B);
|
||||
});
|
||||
llvm::stable_sort(CSEWorkList,
|
||||
[this](const DomTreeNode *A, const DomTreeNode *B) {
|
||||
return DT->properlyDominates(A, B);
|
||||
});
|
||||
|
||||
// Perform O(N^2) search over the gather sequences and merge identical
|
||||
// instructions. TODO: We can further optimize this scan if we split the
|
||||
|
@ -6601,7 +6601,7 @@ bool SLPVectorizerPass::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
|
|||
}
|
||||
|
||||
// Sort by type.
|
||||
std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc);
|
||||
llvm::stable_sort(Incoming, PhiTypeSorterFunc);
|
||||
|
||||
// Try to vectorize elements base on their type.
|
||||
for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(),
|
||||
|
|
|
@ -461,10 +461,9 @@ Expected<Trace> llvm::xray::loadTrace(const DataExtractor &DE, bool Sort) {
|
|||
}
|
||||
|
||||
if (Sort)
|
||||
std::stable_sort(T.Records.begin(), T.Records.end(),
|
||||
[&](const XRayRecord &L, const XRayRecord &R) {
|
||||
return L.TSC < R.TSC;
|
||||
});
|
||||
llvm::stable_sort(T.Records, [&](const XRayRecord &L, const XRayRecord &R) {
|
||||
return L.TSC < R.TSC;
|
||||
});
|
||||
|
||||
return std::move(T);
|
||||
}
|
||||
|
|
|
@ -3131,10 +3131,10 @@ void AsmMatcherEmitter::run(raw_ostream &OS) {
|
|||
// Sort the instruction table using the partial order on classes. We use
|
||||
// stable_sort to ensure that ambiguous instructions are still
|
||||
// deterministically ordered.
|
||||
std::stable_sort(Info.Matchables.begin(), Info.Matchables.end(),
|
||||
[](const std::unique_ptr<MatchableInfo> &a,
|
||||
const std::unique_ptr<MatchableInfo> &b){
|
||||
return *a < *b;});
|
||||
llvm::stable_sort(
|
||||
Info.Matchables,
|
||||
[](const std::unique_ptr<MatchableInfo> &a,
|
||||
const std::unique_ptr<MatchableInfo> &b) { return *a < *b; });
|
||||
|
||||
#ifdef EXPENSIVE_CHECKS
|
||||
// Verify that the table is sorted and operator < works transitively.
|
||||
|
|
|
@ -2101,8 +2101,7 @@ void CodeGenRegBank::computeDerivedInfo() {
|
|||
for (unsigned Idx = 0, EndIdx = RegUnitSets.size(); Idx != EndIdx; ++Idx)
|
||||
RegUnitSetOrder.push_back(Idx);
|
||||
|
||||
std::stable_sort(RegUnitSetOrder.begin(), RegUnitSetOrder.end(),
|
||||
[this](unsigned ID1, unsigned ID2) {
|
||||
llvm::stable_sort(RegUnitSetOrder, [this](unsigned ID1, unsigned ID2) {
|
||||
return getRegPressureSet(ID1).Units.size() <
|
||||
getRegPressureSet(ID2).Units.size();
|
||||
});
|
||||
|
|
|
@ -4501,8 +4501,7 @@ void GlobalISelEmitter::run(raw_ostream &OS) {
|
|||
<< ", // " << Record->getName() << "\n";
|
||||
OS << "};\n\n";
|
||||
|
||||
std::stable_sort(Rules.begin(), Rules.end(), [&](const RuleMatcher &A,
|
||||
const RuleMatcher &B) {
|
||||
llvm::stable_sort(Rules, [&](const RuleMatcher &A, const RuleMatcher &B) {
|
||||
int ScoreA = RuleMatcherScores[A.getRuleID()];
|
||||
int ScoreB = RuleMatcherScores[B.getRuleID()];
|
||||
if (ScoreA > ScoreB)
|
||||
|
|
|
@ -543,15 +543,15 @@ void RISCVCompressInstEmitter::emitCompressInstEmitter(raw_ostream &o,
|
|||
// transformed to a C_ADD or a C_MV. When emitting 'uncompress()' function the
|
||||
// source and destination are flipped and the sort key needs to change
|
||||
// accordingly.
|
||||
std::stable_sort(CompressPatterns.begin(), CompressPatterns.end(),
|
||||
[Compress](const CompressPat &LHS, const CompressPat &RHS) {
|
||||
if (Compress)
|
||||
return (LHS.Source.TheDef->getName().str() <
|
||||
RHS.Source.TheDef->getName().str());
|
||||
else
|
||||
return (LHS.Dest.TheDef->getName().str() <
|
||||
RHS.Dest.TheDef->getName().str());
|
||||
});
|
||||
llvm::stable_sort(CompressPatterns,
|
||||
[Compress](const CompressPat &LHS, const CompressPat &RHS) {
|
||||
if (Compress)
|
||||
return (LHS.Source.TheDef->getName().str() <
|
||||
RHS.Source.TheDef->getName().str());
|
||||
else
|
||||
return (LHS.Dest.TheDef->getName().str() <
|
||||
RHS.Dest.TheDef->getName().str());
|
||||
});
|
||||
|
||||
// A list of MCOperandPredicates for all operands in use, and the reverse map.
|
||||
std::vector<const Record *> MCOpPredicates;
|
||||
|
|
Loading…
Reference in New Issue