forked from OSchip/llvm-project
Use SmallVectorImpl& instead of SmallVector to avoid repeating small vector size.
llvm-svn: 186274
This commit is contained in:
parent
aa8ceba833
commit
b94011fd28
|
@ -849,7 +849,7 @@ namespace llvm {
|
|||
bool propagate(const SCEV *&Src,
|
||||
const SCEV *&Dst,
|
||||
SmallBitVector &Loops,
|
||||
SmallVector<Constraint, 4> &Constraints,
|
||||
SmallVectorImpl<Constraint> &Constraints,
|
||||
bool &Consistent);
|
||||
|
||||
/// propagateDistance - Attempt to propagate a distance
|
||||
|
|
|
@ -158,7 +158,7 @@ private:
|
|||
MachineFunction &MF;
|
||||
const TargetMachine &TM;
|
||||
const TargetRegisterInfo &TRI;
|
||||
SmallVector<CCValAssign, 16> &Locs;
|
||||
SmallVectorImpl<CCValAssign> &Locs;
|
||||
LLVMContext &Context;
|
||||
|
||||
unsigned StackOffset;
|
||||
|
@ -219,7 +219,7 @@ protected:
|
|||
|
||||
public:
|
||||
CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
|
||||
const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs,
|
||||
const TargetMachine &TM, SmallVectorImpl<CCValAssign> &locs,
|
||||
LLVMContext &C);
|
||||
|
||||
void addLoc(const CCValAssign &V) {
|
||||
|
|
|
@ -539,7 +539,7 @@ public:
|
|||
/// NOTE: This is still very expensive. Use carefully.
|
||||
bool hasPredecessorHelper(const SDNode *N,
|
||||
SmallPtrSet<const SDNode *, 32> &Visited,
|
||||
SmallVector<const SDNode *, 16> &Worklist) const;
|
||||
SmallVectorImpl<const SDNode *> &Worklist) const;
|
||||
|
||||
/// getNumOperands - Return the number of values used by this operation.
|
||||
///
|
||||
|
|
|
@ -857,8 +857,8 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
|
|||
return ModRefResult(AliasAnalysis::getModRefInfo(CS, Loc) & Min);
|
||||
}
|
||||
|
||||
static bool areVarIndicesEqual(SmallVector<VariableGEPIndex, 4> &Indices1,
|
||||
SmallVector<VariableGEPIndex, 4> &Indices2) {
|
||||
static bool areVarIndicesEqual(SmallVectorImpl<VariableGEPIndex> &Indices1,
|
||||
SmallVectorImpl<VariableGEPIndex> &Indices2) {
|
||||
unsigned Size1 = Indices1.size();
|
||||
unsigned Size2 = Indices2.size();
|
||||
|
||||
|
|
|
@ -2977,7 +2977,7 @@ const SCEV *DependenceAnalysis::addToCoefficient(const SCEV *Expr,
|
|||
bool DependenceAnalysis::propagate(const SCEV *&Src,
|
||||
const SCEV *&Dst,
|
||||
SmallBitVector &Loops,
|
||||
SmallVector<Constraint, 4> &Constraints,
|
||||
SmallVectorImpl<Constraint> &Constraints,
|
||||
bool &Consistent) {
|
||||
bool Result = false;
|
||||
for (int LI = Loops.find_first(); LI >= 0; LI = Loops.find_next(LI)) {
|
||||
|
|
|
@ -76,7 +76,7 @@ static unsigned ReadProfilingNumEntries(const char *ToolName, FILE *F,
|
|||
/// packet and then accumulate the entries into 'Data'.
|
||||
static void ReadProfilingBlock(const char *ToolName, FILE *F,
|
||||
bool ShouldByteSwap,
|
||||
SmallVector<unsigned, 32> &Data) {
|
||||
SmallVectorImpl<unsigned> &Data) {
|
||||
// Read the number of entries...
|
||||
unsigned NumEntries = ReadProfilingNumEntries(ToolName, F, ShouldByteSwap);
|
||||
|
||||
|
@ -99,7 +99,7 @@ static void ReadProfilingBlock(const char *ToolName, FILE *F,
|
|||
/// run with when the current profiling data packet(s) were generated.
|
||||
static void ReadProfilingArgBlock(const char *ToolName, FILE *F,
|
||||
bool ShouldByteSwap,
|
||||
SmallVector<std::string, 1> &CommandLines) {
|
||||
SmallVectorImpl<std::string> &CommandLines) {
|
||||
// Read the number of bytes ...
|
||||
unsigned ArgLength = ReadProfilingNumEntries(ToolName, F, ShouldByteSwap);
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
using namespace llvm;
|
||||
|
||||
CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf,
|
||||
const TargetMachine &tm, SmallVector<CCValAssign, 16> &locs,
|
||||
const TargetMachine &tm, SmallVectorImpl<CCValAssign> &locs,
|
||||
LLVMContext &C)
|
||||
: CallingConv(CC), IsVarArg(isVarArg), MF(mf), TM(tm),
|
||||
TRI(*TM.getRegisterInfo()), Locs(locs), Context(C),
|
||||
|
|
|
@ -84,11 +84,11 @@ namespace {
|
|||
bool hasLivePhysRegDefUses(const MachineInstr *MI,
|
||||
const MachineBasicBlock *MBB,
|
||||
SmallSet<unsigned,8> &PhysRefs,
|
||||
SmallVector<unsigned,2> &PhysDefs,
|
||||
SmallVectorImpl<unsigned> &PhysDefs,
|
||||
bool &PhysUseDef) const;
|
||||
bool PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
|
||||
SmallSet<unsigned,8> &PhysRefs,
|
||||
SmallVector<unsigned,2> &PhysDefs,
|
||||
SmallVectorImpl<unsigned> &PhysDefs,
|
||||
bool &NonLocal) const;
|
||||
bool isCSECandidate(MachineInstr *MI);
|
||||
bool isProfitableToCSE(unsigned CSReg, unsigned Reg,
|
||||
|
@ -193,7 +193,7 @@ MachineCSE::isPhysDefTriviallyDead(unsigned Reg,
|
|||
bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
|
||||
const MachineBasicBlock *MBB,
|
||||
SmallSet<unsigned,8> &PhysRefs,
|
||||
SmallVector<unsigned,2> &PhysDefs,
|
||||
SmallVectorImpl<unsigned> &PhysDefs,
|
||||
bool &PhysUseDef) const{
|
||||
// First, add all uses to PhysRefs.
|
||||
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
||||
|
@ -244,7 +244,7 @@ bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
|
|||
|
||||
bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
|
||||
SmallSet<unsigned,8> &PhysRefs,
|
||||
SmallVector<unsigned,2> &PhysDefs,
|
||||
SmallVectorImpl<unsigned> &PhysDefs,
|
||||
bool &NonLocal) const {
|
||||
// For now conservatively returns false if the common subexpression is
|
||||
// not in the same basic block as the given instruction. The only exception
|
||||
|
|
|
@ -77,7 +77,7 @@ unsigned MachineSSAUpdater::GetValueAtEndOfBlock(MachineBasicBlock *BB) {
|
|||
|
||||
static
|
||||
unsigned LookForIdenticalPHI(MachineBasicBlock *BB,
|
||||
SmallVector<std::pair<MachineBasicBlock*, unsigned>, 8> &PredValues) {
|
||||
SmallVectorImpl<std::pair<MachineBasicBlock*, unsigned> > &PredValues) {
|
||||
if (BB->empty())
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -394,7 +394,7 @@ static bool AvoidsSinking(MachineInstr *MI, MachineRegisterInfo *MRI) {
|
|||
/// collectDebgValues - Scan instructions following MI and collect any
|
||||
/// matching DBG_VALUEs.
|
||||
static void collectDebugValues(MachineInstr *MI,
|
||||
SmallVector<MachineInstr *, 2> & DbgValues) {
|
||||
SmallVectorImpl<MachineInstr *> &DbgValues) {
|
||||
DbgValues.clear();
|
||||
if (!MI->getOperand(0).isReg())
|
||||
return;
|
||||
|
|
|
@ -112,13 +112,13 @@ namespace llvm {
|
|||
bool calcAvailInOut(MachineBasicBlock* MBB);
|
||||
void calculateAnticAvail(MachineFunction &Fn);
|
||||
bool addUsesForMEMERegion(MachineBasicBlock* MBB,
|
||||
SmallVector<MachineBasicBlock*, 4>& blks);
|
||||
bool addUsesForTopLevelLoops(SmallVector<MachineBasicBlock*, 4>& blks);
|
||||
SmallVectorImpl<MachineBasicBlock *> &blks);
|
||||
bool addUsesForTopLevelLoops(SmallVectorImpl<MachineBasicBlock *> &blks);
|
||||
bool calcSpillPlacements(MachineBasicBlock* MBB,
|
||||
SmallVector<MachineBasicBlock*, 4> &blks,
|
||||
SmallVectorImpl<MachineBasicBlock *> &blks,
|
||||
CSRegBlockMap &prevSpills);
|
||||
bool calcRestorePlacements(MachineBasicBlock* MBB,
|
||||
SmallVector<MachineBasicBlock*, 4> &blks,
|
||||
SmallVectorImpl<MachineBasicBlock *> &blks,
|
||||
CSRegBlockMap &prevRestores);
|
||||
void placeSpillsAndRestores(MachineFunction &Fn);
|
||||
void placeCSRSpillsAndRestores(MachineFunction &Fn);
|
||||
|
|
|
@ -293,7 +293,7 @@ void RAFast::spillVirtReg(MachineBasicBlock::iterator MI,
|
|||
// If this register is used by DBG_VALUE then insert new DBG_VALUE to
|
||||
// identify spilled location as the place to find corresponding variable's
|
||||
// value.
|
||||
SmallVector<MachineInstr *, 4> &LRIDbgValues =
|
||||
SmallVectorImpl<MachineInstr *> &LRIDbgValues =
|
||||
LiveDbgValueMap[LRI->VirtReg];
|
||||
for (unsigned li = 0, le = LRIDbgValues.size(); li != le; ++li) {
|
||||
MachineInstr *DBG = LRIDbgValues[li];
|
||||
|
|
|
@ -279,7 +279,7 @@ namespace {
|
|||
/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
|
||||
/// looking for aliasing nodes and adding them to the Aliases vector.
|
||||
void GatherAllAliases(SDNode *N, SDValue OriginalChain,
|
||||
SmallVector<SDValue, 8> &Aliases);
|
||||
SmallVectorImpl<SDValue> &Aliases);
|
||||
|
||||
/// isAlias - Return true if there is any possibility that the two addresses
|
||||
/// overlap.
|
||||
|
@ -2950,7 +2950,7 @@ SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
|
|||
/// isBSwapHWordElement - Return true if the specified node is an element
|
||||
/// that makes up a 32-bit packed halfword byteswap. i.e.
|
||||
/// ((x&0xff)<<8)|((x&0xff00)>>8)|((x&0x00ff0000)<<8)|((x&0xff000000)>>8)
|
||||
static bool isBSwapHWordElement(SDValue N, SmallVector<SDNode*,4> &Parts) {
|
||||
static bool isBSwapHWordElement(SDValue N, SmallVectorImpl<SDNode *> &Parts) {
|
||||
if (!N.getNode()->hasOneUse())
|
||||
return false;
|
||||
|
||||
|
@ -4309,7 +4309,7 @@ SDValue DAGCombiner::visitSETCC(SDNode *N) {
|
|||
// mentioned transformation is profitable.
|
||||
static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0,
|
||||
unsigned ExtOpc,
|
||||
SmallVector<SDNode*, 4> &ExtendNodes,
|
||||
SmallVectorImpl<SDNode *> &ExtendNodes,
|
||||
const TargetLowering &TLI) {
|
||||
bool HasCopyToRegUses = false;
|
||||
bool isTruncFree = TLI.isTruncateFree(N->getValueType(0), N0.getValueType());
|
||||
|
@ -10240,7 +10240,7 @@ bool DAGCombiner::FindAliasInfo(SDNode *N,
|
|||
/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
|
||||
/// looking for aliasing nodes and adding them to the Aliases vector.
|
||||
void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
|
||||
SmallVector<SDValue, 8> &Aliases) {
|
||||
SmallVectorImpl<SDValue> &Aliases) {
|
||||
SmallVector<SDValue, 8> Chains; // List of chains to visit.
|
||||
SmallPtrSet<SDNode *, 16> Visited; // Visited node set.
|
||||
|
||||
|
|
|
@ -653,7 +653,7 @@ private:
|
|||
/// loads to load a vector with a resulting wider type. It takes
|
||||
/// LdChain: list of chains for the load to be generated.
|
||||
/// Ld: load to widen
|
||||
SDValue GenWidenVectorLoads(SmallVector<SDValue, 16>& LdChain,
|
||||
SDValue GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
|
||||
LoadSDNode *LD);
|
||||
|
||||
/// GenWidenVectorExtLoads - Helper function to generate a set of extension
|
||||
|
@ -661,20 +661,20 @@ private:
|
|||
/// LdChain: list of chains for the load to be generated.
|
||||
/// Ld: load to widen
|
||||
/// ExtType: extension element type
|
||||
SDValue GenWidenVectorExtLoads(SmallVector<SDValue, 16>& LdChain,
|
||||
SDValue GenWidenVectorExtLoads(SmallVectorImpl<SDValue> &LdChain,
|
||||
LoadSDNode *LD, ISD::LoadExtType ExtType);
|
||||
|
||||
/// Helper genWidenVectorStores - Helper function to generate a set of
|
||||
/// stores to store a widen vector into non widen memory
|
||||
/// StChain: list of chains for the stores we have generated
|
||||
/// ST: store of a widen value
|
||||
void GenWidenVectorStores(SmallVector<SDValue, 16>& StChain, StoreSDNode *ST);
|
||||
void GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain, StoreSDNode *ST);
|
||||
|
||||
/// Helper genWidenVectorTruncStores - Helper function to generate a set of
|
||||
/// stores to store a truncate widen vector into non widen memory
|
||||
/// StChain: list of chains for the stores we have generated
|
||||
/// ST: store of a widen value
|
||||
void GenWidenVectorTruncStores(SmallVector<SDValue, 16>& StChain,
|
||||
void GenWidenVectorTruncStores(SmallVectorImpl<SDValue> &StChain,
|
||||
StoreSDNode *ST);
|
||||
|
||||
/// Modifies a vector input (widen or narrows) to a vector of NVT. The
|
||||
|
|
|
@ -2468,7 +2468,7 @@ static EVT FindMemType(SelectionDAG& DAG, const TargetLowering &TLI,
|
|||
// LDOps: Load operators to build a vector type
|
||||
// [Start,End) the list of loads to use.
|
||||
static SDValue BuildVectorFromScalar(SelectionDAG& DAG, EVT VecTy,
|
||||
SmallVector<SDValue, 16>& LdOps,
|
||||
SmallVectorImpl<SDValue> &LdOps,
|
||||
unsigned Start, unsigned End) {
|
||||
SDLoc dl(LdOps[Start]);
|
||||
EVT LdTy = LdOps[Start].getValueType();
|
||||
|
@ -2495,7 +2495,7 @@ static SDValue BuildVectorFromScalar(SelectionDAG& DAG, EVT VecTy,
|
|||
return DAG.getNode(ISD::BITCAST, dl, VecTy, VecOp);
|
||||
}
|
||||
|
||||
SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
|
||||
SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
|
||||
LoadSDNode *LD) {
|
||||
// The strategy assumes that we can efficiently load powers of two widths.
|
||||
// The routines chops the vector into the largest vector loads with the same
|
||||
|
@ -2649,8 +2649,8 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
|
|||
}
|
||||
|
||||
SDValue
|
||||
DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVector<SDValue, 16>& LdChain,
|
||||
LoadSDNode * LD,
|
||||
DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVectorImpl<SDValue> &LdChain,
|
||||
LoadSDNode *LD,
|
||||
ISD::LoadExtType ExtType) {
|
||||
// For extension loads, it may not be more efficient to chop up the vector
|
||||
// and then extended it. Instead, we unroll the load and build a new vector.
|
||||
|
@ -2697,7 +2697,7 @@ DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVector<SDValue, 16>& LdChain,
|
|||
}
|
||||
|
||||
|
||||
void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
|
||||
void DAGTypeLegalizer::GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain,
|
||||
StoreSDNode *ST) {
|
||||
// The strategy assumes that we can efficiently store powers of two widths.
|
||||
// The routines chops the vector into the largest vector stores with the same
|
||||
|
@ -2766,7 +2766,7 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
|
|||
}
|
||||
|
||||
void
|
||||
DAGTypeLegalizer::GenWidenVectorTruncStores(SmallVector<SDValue, 16>& StChain,
|
||||
DAGTypeLegalizer::GenWidenVectorTruncStores(SmallVectorImpl<SDValue> &StChain,
|
||||
StoreSDNode *ST) {
|
||||
// For extension loads, it may not be more efficient to truncate the vector
|
||||
// and then store it. Instead, we extract each element and then store it.
|
||||
|
|
|
@ -102,8 +102,8 @@ private:
|
|||
void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
|
||||
const TargetRegisterClass*,
|
||||
const TargetRegisterClass*,
|
||||
SmallVector<SUnit*, 2>&);
|
||||
bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
|
||||
SmallVectorImpl<SUnit*>&);
|
||||
bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
|
||||
void ListScheduleBottomUp();
|
||||
|
||||
/// forceUnitLatencies - The fast scheduler doesn't care about real latencies.
|
||||
|
@ -387,7 +387,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
|
|||
void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
|
||||
const TargetRegisterClass *DestRC,
|
||||
const TargetRegisterClass *SrcRC,
|
||||
SmallVector<SUnit*, 2> &Copies) {
|
||||
SmallVectorImpl<SUnit*> &Copies) {
|
||||
SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(NULL));
|
||||
CopyFromSU->CopySrcRC = SrcRC;
|
||||
CopyFromSU->CopyDstRC = DestRC;
|
||||
|
@ -448,7 +448,7 @@ static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
|
|||
static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
|
||||
std::vector<SUnit*> &LiveRegDefs,
|
||||
SmallSet<unsigned, 4> &RegAdded,
|
||||
SmallVector<unsigned, 4> &LRegs,
|
||||
SmallVectorImpl<unsigned> &LRegs,
|
||||
const TargetRegisterInfo *TRI) {
|
||||
bool Added = false;
|
||||
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
|
||||
|
@ -467,7 +467,7 @@ static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
|
|||
/// If the specific node is the last one that's available to schedule, do
|
||||
/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
|
||||
bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
|
||||
SmallVector<unsigned, 4> &LRegs){
|
||||
SmallVectorImpl<unsigned> &LRegs){
|
||||
if (NumLiveRegs == 0)
|
||||
return false;
|
||||
|
||||
|
@ -567,7 +567,7 @@ void ScheduleDAGFast::ListScheduleBottomUp() {
|
|||
// "expensive to copy" values to break the dependency. In case even
|
||||
// that doesn't work, insert cross class copies.
|
||||
SUnit *TrySU = NotReady[0];
|
||||
SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
|
||||
SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
|
||||
assert(LRegs.size() == 1 && "Can't handle this yet!");
|
||||
unsigned Reg = LRegs[0];
|
||||
SUnit *LRDef = LiveRegDefs[Reg];
|
||||
|
|
|
@ -229,8 +229,8 @@ private:
|
|||
void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
|
||||
const TargetRegisterClass*,
|
||||
const TargetRegisterClass*,
|
||||
SmallVector<SUnit*, 2>&);
|
||||
bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
|
||||
SmallVectorImpl<SUnit*>&);
|
||||
bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
|
||||
|
||||
void releaseInterferences(unsigned Reg = 0);
|
||||
|
||||
|
@ -1133,9 +1133,9 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
|
|||
/// InsertCopiesAndMoveSuccs - Insert register copies and move all
|
||||
/// scheduled successors of the given SUnit to the last copy.
|
||||
void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
|
||||
const TargetRegisterClass *DestRC,
|
||||
const TargetRegisterClass *SrcRC,
|
||||
SmallVector<SUnit*, 2> &Copies) {
|
||||
const TargetRegisterClass *DestRC,
|
||||
const TargetRegisterClass *SrcRC,
|
||||
SmallVectorImpl<SUnit*> &Copies) {
|
||||
SUnit *CopyFromSU = CreateNewSUnit(NULL);
|
||||
CopyFromSU->CopySrcRC = SrcRC;
|
||||
CopyFromSU->CopyDstRC = DestRC;
|
||||
|
@ -1205,7 +1205,7 @@ static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
|
|||
static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
|
||||
std::vector<SUnit*> &LiveRegDefs,
|
||||
SmallSet<unsigned, 4> &RegAdded,
|
||||
SmallVector<unsigned, 4> &LRegs,
|
||||
SmallVectorImpl<unsigned> &LRegs,
|
||||
const TargetRegisterInfo *TRI) {
|
||||
for (MCRegAliasIterator AliasI(Reg, TRI, true); AliasI.isValid(); ++AliasI) {
|
||||
|
||||
|
@ -1227,7 +1227,7 @@ static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
|
|||
static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask,
|
||||
std::vector<SUnit*> &LiveRegDefs,
|
||||
SmallSet<unsigned, 4> &RegAdded,
|
||||
SmallVector<unsigned, 4> &LRegs) {
|
||||
SmallVectorImpl<unsigned> &LRegs) {
|
||||
// Look at all live registers. Skip Reg0 and the special CallResource.
|
||||
for (unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) {
|
||||
if (!LiveRegDefs[i]) continue;
|
||||
|
@ -1252,7 +1252,7 @@ static const uint32_t *getNodeRegMask(const SDNode *N) {
|
|||
/// If the specific node is the last one that's available to schedule, do
|
||||
/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
|
||||
bool ScheduleDAGRRList::
|
||||
DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
|
||||
DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
|
||||
if (NumLiveRegs == 0)
|
||||
return false;
|
||||
|
||||
|
@ -1331,7 +1331,7 @@ void ScheduleDAGRRList::releaseInterferences(unsigned Reg) {
|
|||
SUnit *SU = Interferences[i-1];
|
||||
LRegsMapT::iterator LRegsPos = LRegsMap.find(SU);
|
||||
if (Reg) {
|
||||
SmallVector<unsigned, 4> &LRegs = LRegsPos->second;
|
||||
SmallVectorImpl<unsigned> &LRegs = LRegsPos->second;
|
||||
if (std::find(LRegs.begin(), LRegs.end(), Reg) == LRegs.end())
|
||||
continue;
|
||||
}
|
||||
|
@ -1385,7 +1385,7 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
|
|||
// to resolve it.
|
||||
for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
|
||||
SUnit *TrySU = Interferences[i];
|
||||
SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
|
||||
SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
|
||||
|
||||
// Try unscheduling up to the point where it's safe to schedule
|
||||
// this node.
|
||||
|
@ -1433,7 +1433,7 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
|
|||
// insert cross class copies.
|
||||
// If it's not too expensive, i.e. cost != -1, issue copies.
|
||||
SUnit *TrySU = Interferences[0];
|
||||
SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
|
||||
SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
|
||||
assert(LRegs.size() == 1 && "Can't handle this yet!");
|
||||
unsigned Reg = LRegs[0];
|
||||
SUnit *LRDef = LiveRegDefs[Reg];
|
||||
|
|
|
@ -700,11 +700,10 @@ namespace {
|
|||
}
|
||||
|
||||
/// ProcessSDDbgValues - Process SDDbgValues associated with this node.
|
||||
static void ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG,
|
||||
InstrEmitter &Emitter,
|
||||
SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
|
||||
DenseMap<SDValue, unsigned> &VRBaseMap,
|
||||
unsigned Order) {
|
||||
static void
|
||||
ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter,
|
||||
SmallVectorImpl<std::pair<unsigned, MachineInstr*> > &Orders,
|
||||
DenseMap<SDValue, unsigned> &VRBaseMap, unsigned Order) {
|
||||
if (!N->getHasDebugValue())
|
||||
return;
|
||||
|
||||
|
@ -731,11 +730,11 @@ static void ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG,
|
|||
// ProcessSourceNode - Process nodes with source order numbers. These are added
|
||||
// to a vector which EmitSchedule uses to determine how to insert dbg_value
|
||||
// instructions in the right order.
|
||||
static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
|
||||
InstrEmitter &Emitter,
|
||||
DenseMap<SDValue, unsigned> &VRBaseMap,
|
||||
SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
|
||||
SmallSet<unsigned, 8> &Seen) {
|
||||
static void
|
||||
ProcessSourceNode(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter,
|
||||
DenseMap<SDValue, unsigned> &VRBaseMap,
|
||||
SmallVectorImpl<std::pair<unsigned, MachineInstr*> > &Orders,
|
||||
SmallSet<unsigned, 8> &Seen) {
|
||||
unsigned Order = N->getIROrder();
|
||||
if (!Order || !Seen.insert(Order)) {
|
||||
// Process any valid SDDbgValues even if node does not have any order
|
||||
|
|
|
@ -6077,9 +6077,10 @@ bool SDNode::hasPredecessor(const SDNode *N) const {
|
|||
return hasPredecessorHelper(N, Visited, Worklist);
|
||||
}
|
||||
|
||||
bool SDNode::hasPredecessorHelper(const SDNode *N,
|
||||
SmallPtrSet<const SDNode *, 32> &Visited,
|
||||
SmallVector<const SDNode *, 16> &Worklist) const {
|
||||
bool
|
||||
SDNode::hasPredecessorHelper(const SDNode *N,
|
||||
SmallPtrSet<const SDNode *, 32> &Visited,
|
||||
SmallVectorImpl<const SDNode *> &Worklist) const {
|
||||
if (Visited.empty()) {
|
||||
Worklist.push_back(this);
|
||||
} else {
|
||||
|
|
|
@ -554,7 +554,7 @@ bool PEI::calculateSets(MachineFunction &Fn) {
|
|||
/// _outside_ the computed minimal placement regions have been covered.
|
||||
///
|
||||
bool PEI::addUsesForMEMERegion(MachineBasicBlock* MBB,
|
||||
SmallVector<MachineBasicBlock*, 4>& blks) {
|
||||
SmallVectorImpl<MachineBasicBlock *> &blks) {
|
||||
if (MBB->succ_size() < 2 && MBB->pred_size() < 2) {
|
||||
bool processThisBlock = false;
|
||||
for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
|
||||
|
@ -629,7 +629,7 @@ bool PEI::addUsesForMEMERegion(MachineBasicBlock* MBB,
|
|||
/// addUsesForTopLevelLoops - add uses for CSRs used inside top
|
||||
/// level loops to the exit blocks of those loops.
|
||||
///
|
||||
bool PEI::addUsesForTopLevelLoops(SmallVector<MachineBasicBlock*, 4>& blks) {
|
||||
bool PEI::addUsesForTopLevelLoops(SmallVectorImpl<MachineBasicBlock *> &blks) {
|
||||
bool addedUses = false;
|
||||
|
||||
// Place restores for top level loops where needed.
|
||||
|
@ -674,7 +674,7 @@ bool PEI::addUsesForTopLevelLoops(SmallVector<MachineBasicBlock*, 4>& blks) {
|
|||
/// multi-entry/exit regions.
|
||||
///
|
||||
bool PEI::calcSpillPlacements(MachineBasicBlock* MBB,
|
||||
SmallVector<MachineBasicBlock*, 4> &blks,
|
||||
SmallVectorImpl<MachineBasicBlock *> &blks,
|
||||
CSRegBlockMap &prevSpills) {
|
||||
bool placedSpills = false;
|
||||
// Intersect (CSRegs - AnticIn[P]) for P in Predecessors(MBB)
|
||||
|
@ -736,7 +736,7 @@ bool PEI::calcSpillPlacements(MachineBasicBlock* MBB,
|
|||
/// multi-entry/exit regions.
|
||||
///
|
||||
bool PEI::calcRestorePlacements(MachineBasicBlock* MBB,
|
||||
SmallVector<MachineBasicBlock*, 4> &blks,
|
||||
SmallVectorImpl<MachineBasicBlock *> &blks,
|
||||
CSRegBlockMap &prevRestores) {
|
||||
bool placedRestores = false;
|
||||
// Intersect (CSRegs - AvailOut[S]) for S in Successors(MBB)
|
||||
|
|
|
@ -106,7 +106,7 @@ namespace {
|
|||
bool OverlapWithAssignments(LiveInterval *li, int Color) const;
|
||||
int ColorSlot(LiveInterval *li);
|
||||
bool ColorSlots(MachineFunction &MF);
|
||||
void RewriteInstruction(MachineInstr *MI, SmallVector<int, 16> &SlotMapping,
|
||||
void RewriteInstruction(MachineInstr *MI, SmallVectorImpl<int> &SlotMapping,
|
||||
MachineFunction &MF);
|
||||
bool RemoveDeadStores(MachineBasicBlock* MBB);
|
||||
};
|
||||
|
@ -340,7 +340,7 @@ bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
|
|||
/// RewriteInstruction - Rewrite specified instruction by replacing references
|
||||
/// to old frame index with new one.
|
||||
void StackSlotColoring::RewriteInstruction(MachineInstr *MI,
|
||||
SmallVector<int, 16> &SlotMapping,
|
||||
SmallVectorImpl<int> &SlotMapping,
|
||||
MachineFunction &MF) {
|
||||
// Update the operands.
|
||||
for (unsigned i = 0, ee = MI->getNumOperands(); i != ee; ++i) {
|
||||
|
|
|
@ -86,7 +86,7 @@ namespace {
|
|||
void ProcessPHI(MachineInstr *MI, MachineBasicBlock *TailBB,
|
||||
MachineBasicBlock *PredBB,
|
||||
DenseMap<unsigned, unsigned> &LocalVRMap,
|
||||
SmallVector<std::pair<unsigned,unsigned>, 4> &Copies,
|
||||
SmallVectorImpl<std::pair<unsigned,unsigned> > &Copies,
|
||||
const DenseSet<unsigned> &UsedByPhi,
|
||||
bool Remove);
|
||||
void DuplicateInstruction(MachineInstr *MI,
|
||||
|
@ -96,7 +96,7 @@ namespace {
|
|||
DenseMap<unsigned, unsigned> &LocalVRMap,
|
||||
const DenseSet<unsigned> &UsedByPhi);
|
||||
void UpdateSuccessorsPHIs(MachineBasicBlock *FromBB, bool isDead,
|
||||
SmallVector<MachineBasicBlock*, 8> &TDBBs,
|
||||
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
|
||||
SmallSetVector<MachineBasicBlock*, 8> &Succs);
|
||||
bool TailDuplicateBlocks(MachineFunction &MF);
|
||||
bool shouldTailDuplicate(const MachineFunction &MF,
|
||||
|
@ -104,14 +104,14 @@ namespace {
|
|||
bool isSimpleBB(MachineBasicBlock *TailBB);
|
||||
bool canCompletelyDuplicateBB(MachineBasicBlock &BB);
|
||||
bool duplicateSimpleBB(MachineBasicBlock *TailBB,
|
||||
SmallVector<MachineBasicBlock*, 8> &TDBBs,
|
||||
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
|
||||
const DenseSet<unsigned> &RegsUsedByPhi,
|
||||
SmallVector<MachineInstr*, 16> &Copies);
|
||||
SmallVectorImpl<MachineInstr *> &Copies);
|
||||
bool TailDuplicate(MachineBasicBlock *TailBB,
|
||||
bool IsSimple,
|
||||
MachineFunction &MF,
|
||||
SmallVector<MachineBasicBlock*, 8> &TDBBs,
|
||||
SmallVector<MachineInstr*, 16> &Copies);
|
||||
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
|
||||
SmallVectorImpl<MachineInstr *> &Copies);
|
||||
bool TailDuplicateAndUpdate(MachineBasicBlock *MBB,
|
||||
bool IsSimple,
|
||||
MachineFunction &MF);
|
||||
|
@ -386,7 +386,7 @@ void TailDuplicatePass::ProcessPHI(MachineInstr *MI,
|
|||
MachineBasicBlock *TailBB,
|
||||
MachineBasicBlock *PredBB,
|
||||
DenseMap<unsigned, unsigned> &LocalVRMap,
|
||||
SmallVector<std::pair<unsigned,unsigned>, 4> &Copies,
|
||||
SmallVectorImpl<std::pair<unsigned,unsigned>> &Copies,
|
||||
const DenseSet<unsigned> &RegsUsedByPhi,
|
||||
bool Remove) {
|
||||
unsigned DefReg = MI->getOperand(0).getReg();
|
||||
|
@ -452,7 +452,7 @@ void TailDuplicatePass::DuplicateInstruction(MachineInstr *MI,
|
|||
/// instructions in them accordingly.
|
||||
void
|
||||
TailDuplicatePass::UpdateSuccessorsPHIs(MachineBasicBlock *FromBB, bool isDead,
|
||||
SmallVector<MachineBasicBlock*, 8> &TDBBs,
|
||||
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
|
||||
SmallSetVector<MachineBasicBlock*,8> &Succs) {
|
||||
for (SmallSetVector<MachineBasicBlock*, 8>::iterator SI = Succs.begin(),
|
||||
SE = Succs.end(); SI != SE; ++SI) {
|
||||
|
@ -662,9 +662,9 @@ TailDuplicatePass::canCompletelyDuplicateBB(MachineBasicBlock &BB) {
|
|||
|
||||
bool
|
||||
TailDuplicatePass::duplicateSimpleBB(MachineBasicBlock *TailBB,
|
||||
SmallVector<MachineBasicBlock*, 8> &TDBBs,
|
||||
const DenseSet<unsigned> &UsedByPhi,
|
||||
SmallVector<MachineInstr*, 16> &Copies) {
|
||||
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
|
||||
const DenseSet<unsigned> &UsedByPhi,
|
||||
SmallVectorImpl<MachineInstr *> &Copies) {
|
||||
SmallPtrSet<MachineBasicBlock*, 8> Succs(TailBB->succ_begin(),
|
||||
TailBB->succ_end());
|
||||
SmallVector<MachineBasicBlock*, 8> Preds(TailBB->pred_begin(),
|
||||
|
@ -742,8 +742,8 @@ bool
|
|||
TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB,
|
||||
bool IsSimple,
|
||||
MachineFunction &MF,
|
||||
SmallVector<MachineBasicBlock*, 8> &TDBBs,
|
||||
SmallVector<MachineInstr*, 16> &Copies) {
|
||||
SmallVectorImpl<MachineBasicBlock *> &TDBBs,
|
||||
SmallVectorImpl<MachineInstr *> &Copies) {
|
||||
DEBUG(dbgs() << "\n*** Tail-duplicating BB#" << TailBB->getNumber() << '\n');
|
||||
|
||||
DenseSet<unsigned> UsedByPhi;
|
||||
|
|
|
@ -1539,7 +1539,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
|
|||
// transformations that may either eliminate the tied operands or
|
||||
// improve the opportunities for coalescing away the register copy.
|
||||
if (TiedOperands.size() == 1) {
|
||||
SmallVector<std::pair<unsigned, unsigned>, 4> &TiedPairs
|
||||
SmallVectorImpl<std::pair<unsigned, unsigned> > &TiedPairs
|
||||
= TiedOperands.begin()->second;
|
||||
if (TiedPairs.size() == 1) {
|
||||
unsigned SrcIdx = TiedPairs[0].first;
|
||||
|
|
|
@ -422,7 +422,7 @@ static bool canBeMerged(const ConstantRange &A, const ConstantRange &B) {
|
|||
return !A.intersectWith(B).isEmptySet() || isContiguous(A, B);
|
||||
}
|
||||
|
||||
static bool tryMergeRange(SmallVector<Value*, 4> &EndPoints, ConstantInt *Low,
|
||||
static bool tryMergeRange(SmallVectorImpl<Value *> &EndPoints, ConstantInt *Low,
|
||||
ConstantInt *High) {
|
||||
ConstantRange NewRange(Low->getValue(), High->getValue());
|
||||
unsigned Size = EndPoints.size();
|
||||
|
@ -439,7 +439,7 @@ static bool tryMergeRange(SmallVector<Value*, 4> &EndPoints, ConstantInt *Low,
|
|||
return false;
|
||||
}
|
||||
|
||||
static void addRange(SmallVector<Value*, 4> &EndPoints, ConstantInt *Low,
|
||||
static void addRange(SmallVectorImpl<Value *> &EndPoints, ConstantInt *Low,
|
||||
ConstantInt *High) {
|
||||
if (!EndPoints.empty())
|
||||
if (tryMergeRange(EndPoints, Low, High))
|
||||
|
|
|
@ -1079,9 +1079,9 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
|
|||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
SelectionDAG &DAG = CLI.DAG;
|
||||
SDLoc &dl = CLI.DL;
|
||||
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
|
||||
SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
|
||||
SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
|
||||
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
|
||||
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
|
||||
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
|
||||
SDValue Chain = CLI.Chain;
|
||||
SDValue Callee = CLI.Callee;
|
||||
bool &IsTailCall = CLI.IsTailCall;
|
||||
|
|
|
@ -74,7 +74,7 @@ namespace {
|
|||
class ARMCCState : public CCState {
|
||||
public:
|
||||
ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
|
||||
const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs,
|
||||
const TargetMachine &TM, SmallVectorImpl<CCValAssign> &locs,
|
||||
LLVMContext &C, ParmContext PC)
|
||||
: CCState(CC, isVarArg, MF, TM, locs, C) {
|
||||
assert(((PC == Call) || (PC == Prologue)) &&
|
||||
|
@ -1330,7 +1330,7 @@ void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
|
|||
RegsToPassVector &RegsToPass,
|
||||
CCValAssign &VA, CCValAssign &NextVA,
|
||||
SDValue &StackPtr,
|
||||
SmallVector<SDValue, 8> &MemOpChains,
|
||||
SmallVectorImpl<SDValue> &MemOpChains,
|
||||
ISD::ArgFlagsTy Flags) const {
|
||||
|
||||
SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
|
||||
|
@ -1358,9 +1358,9 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
|||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
SelectionDAG &DAG = CLI.DAG;
|
||||
SDLoc &dl = CLI.DL;
|
||||
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
|
||||
SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
|
||||
SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
|
||||
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
|
||||
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
|
||||
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
|
||||
SDValue Chain = CLI.Chain;
|
||||
SDValue Callee = CLI.Callee;
|
||||
bool &isTailCall = CLI.IsTailCall;
|
||||
|
|
|
@ -417,7 +417,7 @@ namespace llvm {
|
|||
RegsToPassVector &RegsToPass,
|
||||
CCValAssign &VA, CCValAssign &NextVA,
|
||||
SDValue &StackPtr,
|
||||
SmallVector<SDValue, 8> &MemOpChains,
|
||||
SmallVectorImpl<SDValue> &MemOpChains,
|
||||
ISD::ArgFlagsTy Flags) const;
|
||||
SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
|
||||
SDValue &Root, SelectionDAG &DAG,
|
||||
|
|
|
@ -109,12 +109,12 @@ namespace {
|
|||
unsigned PredReg,
|
||||
unsigned Scratch,
|
||||
DebugLoc dl,
|
||||
SmallVector<MachineBasicBlock::iterator, 4> &Merges);
|
||||
SmallVectorImpl<MachineBasicBlock::iterator> &Merges);
|
||||
void MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, unsigned Base,
|
||||
int Opcode, unsigned Size,
|
||||
ARMCC::CondCodes Pred, unsigned PredReg,
|
||||
unsigned Scratch, MemOpQueue &MemOps,
|
||||
SmallVector<MachineBasicBlock::iterator, 4> &Merges);
|
||||
SmallVectorImpl<MachineBasicBlock::iterator> &Merges);
|
||||
|
||||
void AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps);
|
||||
bool FixInvalidRegPairOp(MachineBasicBlock &MBB,
|
||||
|
@ -371,7 +371,7 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
|
|||
ARMCC::CondCodes Pred, unsigned PredReg,
|
||||
unsigned Scratch,
|
||||
DebugLoc dl,
|
||||
SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
|
||||
SmallVectorImpl<MachineBasicBlock::iterator> &Merges) {
|
||||
// First calculate which of the registers should be killed by the merged
|
||||
// instruction.
|
||||
const unsigned insertPos = memOps[insertAfter].Position;
|
||||
|
@ -444,10 +444,10 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
|
|||
/// load / store multiple instructions.
|
||||
void
|
||||
ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
|
||||
unsigned Base, int Opcode, unsigned Size,
|
||||
ARMCC::CondCodes Pred, unsigned PredReg,
|
||||
unsigned Scratch, MemOpQueue &MemOps,
|
||||
SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
|
||||
unsigned Base, int Opcode, unsigned Size,
|
||||
ARMCC::CondCodes Pred, unsigned PredReg,
|
||||
unsigned Scratch, MemOpQueue &MemOps,
|
||||
SmallVectorImpl<MachineBasicBlock::iterator> &Merges) {
|
||||
bool isNotVFP = isi32Load(Opcode) || isi32Store(Opcode);
|
||||
int Offset = MemOps[SIndex].Offset;
|
||||
int SOffset = Offset;
|
||||
|
|
|
@ -25,7 +25,7 @@ using namespace llvm;
|
|||
|
||||
Hexagon_CCState::Hexagon_CCState(CallingConv::ID CC, bool isVarArg,
|
||||
const TargetMachine &tm,
|
||||
SmallVector<CCValAssign, 16> &locs,
|
||||
SmallVectorImpl<CCValAssign> &locs,
|
||||
LLVMContext &c)
|
||||
: CallingConv(CC), IsVarArg(isVarArg), TM(tm), Locs(locs), Context(c) {
|
||||
// No stack is used.
|
||||
|
|
|
@ -48,14 +48,14 @@ class Hexagon_CCState {
|
|||
CallingConv::ID CallingConv;
|
||||
bool IsVarArg;
|
||||
const TargetMachine &TM;
|
||||
SmallVector<CCValAssign, 16> &Locs;
|
||||
SmallVectorImpl<CCValAssign> &Locs;
|
||||
LLVMContext &Context;
|
||||
|
||||
unsigned StackOffset;
|
||||
SmallVector<uint32_t, 16> UsedRegs;
|
||||
public:
|
||||
Hexagon_CCState(CallingConv::ID CC, bool isVarArg, const TargetMachine &TM,
|
||||
SmallVector<CCValAssign, 16> &locs, LLVMContext &c);
|
||||
SmallVectorImpl<CCValAssign> &locs, LLVMContext &c);
|
||||
|
||||
void addLoc(const CCValAssign &V) {
|
||||
Locs.push_back(V);
|
||||
|
|
|
@ -134,7 +134,7 @@ namespace {
|
|||
/// has a computable trip count and, if so, return a value that represents
|
||||
/// the trip count expression.
|
||||
CountValue *getLoopTripCount(MachineLoop *L,
|
||||
SmallVector<MachineInstr*, 2> &OldInsts);
|
||||
SmallVectorImpl<MachineInstr *> &OldInsts);
|
||||
|
||||
/// \brief Return the expression that represents the number of times
|
||||
/// a loop iterates. The function takes the operands that represent the
|
||||
|
@ -164,7 +164,7 @@ namespace {
|
|||
|
||||
/// \brief Return true if the instruction is now dead.
|
||||
bool isDead(const MachineInstr *MI,
|
||||
SmallVector<MachineInstr*, 1> &DeadPhis) const;
|
||||
SmallVectorImpl<MachineInstr *> &DeadPhis) const;
|
||||
|
||||
/// \brief Remove the instruction if it is now dead.
|
||||
void removeIfDead(MachineInstr *MI);
|
||||
|
@ -428,7 +428,7 @@ bool HexagonHardwareLoops::findInductionRegister(MachineLoop *L,
|
|||
/// induction variable patterns that are used in the calculation for
|
||||
/// the number of time the loop is executed.
|
||||
CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
|
||||
SmallVector<MachineInstr*, 2> &OldInsts) {
|
||||
SmallVectorImpl<MachineInstr *> &OldInsts) {
|
||||
MachineBasicBlock *TopMBB = L->getTopBlock();
|
||||
MachineBasicBlock::pred_iterator PI = TopMBB->pred_begin();
|
||||
assert(PI != TopMBB->pred_end() &&
|
||||
|
@ -890,7 +890,7 @@ bool HexagonHardwareLoops::containsInvalidInstruction(MachineLoop *L) const {
|
|||
/// for inline asm, physical registers and instructions with side effects
|
||||
/// removed.
|
||||
bool HexagonHardwareLoops::isDead(const MachineInstr *MI,
|
||||
SmallVector<MachineInstr*, 1> &DeadPhis) const {
|
||||
SmallVectorImpl<MachineInstr *> &DeadPhis) const {
|
||||
// Examine each operand.
|
||||
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
||||
const MachineOperand &MO = MI->getOperand(i);
|
||||
|
|
|
@ -382,10 +382,10 @@ SDValue
|
|||
HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
SelectionDAG &DAG = CLI.DAG;
|
||||
SDLoc &dl = CLI.DL;
|
||||
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
|
||||
SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
|
||||
SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
|
||||
SDLoc &dl = CLI.DL;
|
||||
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
|
||||
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
|
||||
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
|
||||
SDValue Chain = CLI.Chain;
|
||||
SDValue Callee = CLI.Callee;
|
||||
bool &isTailCall = CLI.IsTailCall;
|
||||
|
|
|
@ -38,8 +38,8 @@ static cl::opt<bool> MBDisableStackAdjust(
|
|||
cl::desc("Disable MBlaze stack layout adjustment."),
|
||||
cl::Hidden);
|
||||
|
||||
static void replaceFrameIndexes(MachineFunction &MF,
|
||||
SmallVector<std::pair<int,int64_t>, 16> &FR) {
|
||||
static void replaceFrameIndexes(MachineFunction &MF,
|
||||
SmallVectorImpl<std::pair<int,int64_t> > &FR) {
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
|
||||
const SmallVectorImpl<std::pair<int,int64_t> >::iterator FRB = FR.begin();
|
||||
|
|
|
@ -687,9 +687,9 @@ LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
|||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
SelectionDAG &DAG = CLI.DAG;
|
||||
SDLoc dl = CLI.DL;
|
||||
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
|
||||
SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
|
||||
SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
|
||||
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
|
||||
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
|
||||
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
|
||||
SDValue Chain = CLI.Chain;
|
||||
SDValue Callee = CLI.Callee;
|
||||
bool &isTailCall = CLI.IsTailCall;
|
||||
|
|
|
@ -118,7 +118,7 @@ public:
|
|||
return false;
|
||||
}
|
||||
|
||||
const SmallVector<int, 16>& getLiveIn() const { return LiveInFI; }
|
||||
const SmallVectorImpl<int> &getLiveIn() const { return LiveInFI; }
|
||||
|
||||
void recordReplacement(int OFI, int NFI) {
|
||||
FIReplacements.insert(std::make_pair(OFI,NFI));
|
||||
|
|
|
@ -279,9 +279,9 @@ MSP430TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
|||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
SelectionDAG &DAG = CLI.DAG;
|
||||
SDLoc &dl = CLI.DL;
|
||||
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
|
||||
SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
|
||||
SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
|
||||
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
|
||||
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
|
||||
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
|
||||
SDValue Chain = CLI.Chain;
|
||||
SDValue Callee = CLI.Callee;
|
||||
bool &isTailCall = CLI.IsTailCall;
|
||||
|
|
|
@ -2323,9 +2323,9 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
|||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
SelectionDAG &DAG = CLI.DAG;
|
||||
SDLoc DL = CLI.DL;
|
||||
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
|
||||
SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
|
||||
SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
|
||||
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
|
||||
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
|
||||
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
|
||||
SDValue Chain = CLI.Chain;
|
||||
SDValue Callee = CLI.Callee;
|
||||
bool &IsTailCall = CLI.IsTailCall;
|
||||
|
@ -3383,7 +3383,7 @@ copyByValRegs(SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains,
|
|||
void MipsTargetLowering::
|
||||
passByValArg(SDValue Chain, SDLoc DL,
|
||||
std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
|
||||
SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr,
|
||||
SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
|
||||
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
|
||||
const MipsCC &CC, const ByValArgInfo &ByVal,
|
||||
const ISD::ArgFlagsTy &Flags, bool isLittle) const {
|
||||
|
|
|
@ -386,7 +386,7 @@ namespace llvm {
|
|||
/// passByValArg - Pass a byval argument in registers or on stack.
|
||||
void passByValArg(SDValue Chain, SDLoc DL,
|
||||
std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
|
||||
SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr,
|
||||
SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
|
||||
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
|
||||
const MipsCC &CC, const ByValArgInfo &ByVal,
|
||||
const ISD::ArgFlagsTy &Flags, bool isLittle) const;
|
||||
|
|
|
@ -493,9 +493,9 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
|||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
SelectionDAG &DAG = CLI.DAG;
|
||||
SDLoc dl = CLI.DL;
|
||||
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
|
||||
SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
|
||||
SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
|
||||
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
|
||||
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
|
||||
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
|
||||
SDValue Chain = CLI.Chain;
|
||||
SDValue Callee = CLI.Callee;
|
||||
bool &isTailCall = CLI.IsTailCall;
|
||||
|
|
|
@ -369,7 +369,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
|
|||
// Check if the link register (LR) must be saved.
|
||||
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
|
||||
bool MustSaveLR = FI->mustSaveLR();
|
||||
const SmallVector<unsigned, 3> &MustSaveCRs = FI->getMustSaveCRs();
|
||||
const SmallVectorImpl<unsigned> &MustSaveCRs = FI->getMustSaveCRs();
|
||||
// Do we have a frame pointer for this function?
|
||||
bool HasFP = hasFP(MF);
|
||||
|
||||
|
@ -642,7 +642,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
|
|||
// Check if the link register (LR) has been saved.
|
||||
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
|
||||
bool MustSaveLR = FI->mustSaveLR();
|
||||
const SmallVector<unsigned, 3> &MustSaveCRs = FI->getMustSaveCRs();
|
||||
const SmallVectorImpl<unsigned> &MustSaveCRs = FI->getMustSaveCRs();
|
||||
// Do we have a frame pointer for this function?
|
||||
bool HasFP = hasFP(MF);
|
||||
|
||||
|
|
|
@ -2957,8 +2957,8 @@ struct TailCallArgumentInfo {
|
|||
static void
|
||||
StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG,
|
||||
SDValue Chain,
|
||||
const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs,
|
||||
SmallVector<SDValue, 8> &MemOpChains,
|
||||
const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
|
||||
SmallVectorImpl<SDValue> &MemOpChains,
|
||||
SDLoc dl) {
|
||||
for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
|
||||
SDValue Arg = TailCallArgs[i].Arg;
|
||||
|
@ -3016,7 +3016,7 @@ static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
|
|||
static void
|
||||
CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
|
||||
SDValue Arg, int SPDiff, unsigned ArgOffset,
|
||||
SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) {
|
||||
SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
|
||||
int Offset = ArgOffset + SPDiff;
|
||||
uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8;
|
||||
int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
|
||||
|
@ -3081,8 +3081,8 @@ static void
|
|||
LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain,
|
||||
SDValue Arg, SDValue PtrOff, int SPDiff,
|
||||
unsigned ArgOffset, bool isPPC64, bool isTailCall,
|
||||
bool isVector, SmallVector<SDValue, 8> &MemOpChains,
|
||||
SmallVector<TailCallArgumentInfo, 8> &TailCallArguments,
|
||||
bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
|
||||
SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments,
|
||||
SDLoc dl) {
|
||||
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
|
||||
if (!isTailCall) {
|
||||
|
@ -3106,7 +3106,7 @@ static
|
|||
void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
|
||||
SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes,
|
||||
SDValue LROp, SDValue FPOp, bool isDarwinABI,
|
||||
SmallVector<TailCallArgumentInfo, 8> &TailCallArguments) {
|
||||
SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
|
||||
// Emit a sequence of copyto/copyfrom virtual registers for arguments that
|
||||
|
@ -3133,8 +3133,8 @@ void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
|
|||
static
|
||||
unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
|
||||
SDValue &Chain, SDLoc dl, int SPDiff, bool isTailCall,
|
||||
SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
|
||||
SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys,
|
||||
SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass,
|
||||
SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys,
|
||||
const PPCSubtarget &PPCSubTarget) {
|
||||
|
||||
bool isPPC64 = PPCSubTarget.isPPC64();
|
||||
|
@ -3460,10 +3460,10 @@ SDValue
|
|||
PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
SelectionDAG &DAG = CLI.DAG;
|
||||
SDLoc &dl = CLI.DL;
|
||||
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
|
||||
SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
|
||||
SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
|
||||
SDLoc &dl = CLI.DL;
|
||||
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
|
||||
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
|
||||
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
|
||||
SDValue Chain = CLI.Chain;
|
||||
SDValue Callee = CLI.Callee;
|
||||
bool &isTailCall = CLI.IsTailCall;
|
||||
|
|
|
@ -160,7 +160,7 @@ public:
|
|||
int getCRSpillFrameIndex() const { return CRSpillFrameIndex; }
|
||||
void setCRSpillFrameIndex(int idx) { CRSpillFrameIndex = idx; }
|
||||
|
||||
const SmallVector<unsigned, 3> &
|
||||
const SmallVectorImpl<unsigned> &
|
||||
getMustSaveCRs() const { return MustSaveCRs; }
|
||||
void addMustSaveCR(unsigned Reg) { MustSaveCRs.push_back(Reg); }
|
||||
};
|
||||
|
|
|
@ -92,7 +92,7 @@ void PrintLoopinfo(const LoopinfoT &LoopInfo, llvm::raw_ostream &OS) {
|
|||
}
|
||||
|
||||
template<class NodeT>
|
||||
void ReverseVector(SmallVector<NodeT *, DEFAULT_VEC_SLOTS> &Src) {
|
||||
void ReverseVector(SmallVectorImpl<NodeT *> &Src) {
|
||||
size_t sz = Src.size();
|
||||
for (size_t i = 0; i < sz/2; ++i) {
|
||||
NodeT *t = Src[i];
|
||||
|
@ -258,7 +258,7 @@ private:
|
|||
BlockT *normalizeInfiniteLoopExit(LoopT *LoopRep);
|
||||
void removeUnconditionalBranch(BlockT *SrcBlock);
|
||||
void removeRedundantConditionalBranch(BlockT *SrcBlock);
|
||||
void addDummyExitBlock(SmallVector<BlockT *, DEFAULT_VEC_SLOTS> &RetBlocks);
|
||||
void addDummyExitBlock(SmallVectorImpl<BlockT *> &RetBlocks);
|
||||
|
||||
void removeSuccessor(BlockT *SrcBlock);
|
||||
BlockT *cloneBlockForPredecessor(BlockT *CurBlock, BlockT *PredBlock);
|
||||
|
@ -2076,8 +2076,8 @@ void CFGStructurizer<PassT>::removeRedundantConditionalBranch(BlockT *srcBlk) {
|
|||
} //removeRedundantConditionalBranch
|
||||
|
||||
template<class PassT>
|
||||
void CFGStructurizer<PassT>::addDummyExitBlock(SmallVector<BlockT*,
|
||||
DEFAULT_VEC_SLOTS> &retBlks) {
|
||||
void CFGStructurizer<PassT>::addDummyExitBlock(SmallVectorImpl<BlockT *>
|
||||
&retBlks) {
|
||||
BlockT *dummyExitBlk = funcRep->CreateMachineBasicBlock();
|
||||
funcRep->push_back(dummyExitBlk); //insert to function
|
||||
CFGTraits::insertInstrEnd(dummyExitBlk, AMDGPU::RETURN, passRep);
|
||||
|
|
|
@ -107,7 +107,7 @@ private:
|
|||
bool SubstituteKCacheBank(MachineInstr *MI,
|
||||
std::vector<std::pair<unsigned, unsigned> > &CachedConsts) const {
|
||||
std::vector<std::pair<unsigned, unsigned> > UsedKCache;
|
||||
const SmallVector<std::pair<MachineOperand *, int64_t>, 3> &Consts =
|
||||
const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Consts =
|
||||
TII->getSrcs(MI);
|
||||
assert((TII->isALUInstr(MI->getOpcode()) ||
|
||||
MI->getOpcode() == AMDGPU::DOT_4) && "Can't assign Const");
|
||||
|
|
|
@ -519,7 +519,7 @@ R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
|
|||
if (!isALUInstr(MI->getOpcode()))
|
||||
continue;
|
||||
|
||||
const SmallVector<std::pair<MachineOperand *, int64_t>, 3> &Srcs =
|
||||
const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Srcs =
|
||||
getSrcs(MI);
|
||||
|
||||
for (unsigned j = 0, e = Srcs.size(); j < e; j++) {
|
||||
|
|
|
@ -654,9 +654,9 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
|
|||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
SelectionDAG &DAG = CLI.DAG;
|
||||
SDLoc &dl = CLI.DL;
|
||||
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
|
||||
SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
|
||||
SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
|
||||
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
|
||||
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
|
||||
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
|
||||
SDValue Chain = CLI.Chain;
|
||||
SDValue Callee = CLI.Callee;
|
||||
bool &isTailCall = CLI.IsTailCall;
|
||||
|
|
|
@ -679,9 +679,9 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
|
|||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
SelectionDAG &DAG = CLI.DAG;
|
||||
SDLoc &DL = CLI.DL;
|
||||
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
|
||||
SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
|
||||
SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
|
||||
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
|
||||
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
|
||||
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
|
||||
SDValue Chain = CLI.Chain;
|
||||
SDValue Callee = CLI.Callee;
|
||||
bool &isTailCall = CLI.IsTailCall;
|
||||
|
|
|
@ -2280,10 +2280,10 @@ SDValue
|
|||
X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
SelectionDAG &DAG = CLI.DAG;
|
||||
SDLoc &dl = CLI.DL;
|
||||
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
|
||||
SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
|
||||
SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
|
||||
SDLoc &dl = CLI.DL;
|
||||
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
|
||||
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
|
||||
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
|
||||
SDValue Chain = CLI.Chain;
|
||||
SDValue Callee = CLI.Callee;
|
||||
CallingConv::ID CallConv = CLI.CallConv;
|
||||
|
|
|
@ -847,10 +847,10 @@ SDValue
|
|||
XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
SelectionDAG &DAG = CLI.DAG;
|
||||
SDLoc &dl = CLI.DL;
|
||||
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
|
||||
SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
|
||||
SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
|
||||
SDLoc &dl = CLI.DL;
|
||||
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
|
||||
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
|
||||
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
|
||||
SDValue Chain = CLI.Chain;
|
||||
SDValue Callee = CLI.Callee;
|
||||
bool &isTailCall = CLI.IsTailCall;
|
||||
|
|
|
@ -1299,7 +1299,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
|
|||
/// always in the local (OverallLeftShift) coordinate space.
|
||||
///
|
||||
static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask,
|
||||
SmallVector<Value*, 8> &ByteValues) {
|
||||
SmallVectorImpl<Value *> &ByteValues) {
|
||||
if (Instruction *I = dyn_cast<Instruction>(V)) {
|
||||
// If this is an or instruction, it may be an inner node of the bswap.
|
||||
if (I->getOpcode() == Instruction::Or) {
|
||||
|
|
|
@ -1198,9 +1198,9 @@ namespace {
|
|||
MapVector<Value *, RRInfo> &Retains,
|
||||
DenseMap<Value *, RRInfo> &Releases,
|
||||
Module *M,
|
||||
SmallVector<Instruction *, 4> &NewRetains,
|
||||
SmallVector<Instruction *, 4> &NewReleases,
|
||||
SmallVector<Instruction *, 8> &DeadInsts,
|
||||
SmallVectorImpl<Instruction *> &NewRetains,
|
||||
SmallVectorImpl<Instruction *> &NewReleases,
|
||||
SmallVectorImpl<Instruction *> &DeadInsts,
|
||||
RRInfo &RetainsToMove,
|
||||
RRInfo &ReleasesToMove,
|
||||
Value *Arg,
|
||||
|
@ -2477,9 +2477,9 @@ ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
|
|||
MapVector<Value *, RRInfo> &Retains,
|
||||
DenseMap<Value *, RRInfo> &Releases,
|
||||
Module *M,
|
||||
SmallVector<Instruction *, 4> &NewRetains,
|
||||
SmallVector<Instruction *, 4> &NewReleases,
|
||||
SmallVector<Instruction *, 8> &DeadInsts,
|
||||
SmallVectorImpl<Instruction *> &NewRetains,
|
||||
SmallVectorImpl<Instruction *> &NewReleases,
|
||||
SmallVectorImpl<Instruction *> &DeadInsts,
|
||||
RRInfo &RetainsToMove,
|
||||
RRInfo &ReleasesToMove,
|
||||
Value *Arg,
|
||||
|
|
|
@ -51,8 +51,8 @@ namespace {
|
|||
}
|
||||
|
||||
private:
|
||||
bool isLoopDead(Loop *L, SmallVector<BasicBlock*, 4> &exitingBlocks,
|
||||
SmallVector<BasicBlock*, 4> &exitBlocks,
|
||||
bool isLoopDead(Loop *L, SmallVectorImpl<BasicBlock *> &exitingBlocks,
|
||||
SmallVectorImpl<BasicBlock *> &exitBlocks,
|
||||
bool &Changed, BasicBlock *Preheader);
|
||||
|
||||
};
|
||||
|
@ -77,8 +77,8 @@ Pass *llvm::createLoopDeletionPass() {
|
|||
/// checked for unique exit and exiting blocks, and that the code is in LCSSA
|
||||
/// form.
|
||||
bool LoopDeletion::isLoopDead(Loop *L,
|
||||
SmallVector<BasicBlock*, 4> &exitingBlocks,
|
||||
SmallVector<BasicBlock*, 4> &exitBlocks,
|
||||
SmallVectorImpl<BasicBlock *> &exitingBlocks,
|
||||
SmallVectorImpl<BasicBlock *> &exitBlocks,
|
||||
bool &Changed, BasicBlock *Preheader) {
|
||||
BasicBlock *exitBlock = exitBlocks[0];
|
||||
|
||||
|
|
|
@ -196,7 +196,7 @@ namespace {
|
|||
|
||||
/// Split all of the edges from inside the loop to their exit blocks.
|
||||
/// Update the appropriate Phi nodes as we do so.
|
||||
void SplitExitEdges(Loop *L, const SmallVector<BasicBlock *, 8> &ExitBlocks);
|
||||
void SplitExitEdges(Loop *L, const SmallVectorImpl<BasicBlock *> &ExitBlocks);
|
||||
|
||||
bool UnswitchIfProfitable(Value *LoopCond, Constant *Val);
|
||||
void UnswitchTrivialCondition(Loop *L, Value *Cond, Constant *Val,
|
||||
|
@ -752,7 +752,7 @@ void LoopUnswitch::UnswitchTrivialCondition(Loop *L, Value *Cond,
|
|||
/// SplitExitEdges - Split all of the edges from inside the loop to their exit
|
||||
/// blocks. Update the appropriate Phi nodes as we do so.
|
||||
void LoopUnswitch::SplitExitEdges(Loop *L,
|
||||
const SmallVector<BasicBlock *, 8> &ExitBlocks){
|
||||
const SmallVectorImpl<BasicBlock *> &ExitBlocks){
|
||||
|
||||
for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
|
||||
BasicBlock *ExitBlock = ExitBlocks[i];
|
||||
|
|
|
@ -439,7 +439,7 @@ private:
|
|||
// getFeasibleSuccessors - Return a vector of booleans to indicate which
|
||||
// successors are reachable from a given terminator instruction.
|
||||
//
|
||||
void getFeasibleSuccessors(TerminatorInst &TI, SmallVector<bool, 16> &Succs);
|
||||
void getFeasibleSuccessors(TerminatorInst &TI, SmallVectorImpl<bool> &Succs);
|
||||
|
||||
// isEdgeFeasible - Return true if the control flow edge from the 'From' basic
|
||||
// block to the 'To' basic block is currently feasible.
|
||||
|
@ -513,7 +513,7 @@ private:
|
|||
// successors are reachable from a given terminator instruction.
|
||||
//
|
||||
void SCCPSolver::getFeasibleSuccessors(TerminatorInst &TI,
|
||||
SmallVector<bool, 16> &Succs) {
|
||||
SmallVectorImpl<bool> &Succs) {
|
||||
Succs.resize(TI.getNumSuccessors());
|
||||
if (BranchInst *BI = dyn_cast<BranchInst>(&TI)) {
|
||||
if (BI->isUnconditional()) {
|
||||
|
|
|
@ -166,21 +166,21 @@ namespace {
|
|||
void DeleteDeadInstructions();
|
||||
|
||||
void RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
|
||||
SmallVector<AllocaInst*, 32> &NewElts);
|
||||
SmallVectorImpl<AllocaInst *> &NewElts);
|
||||
void RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
|
||||
SmallVector<AllocaInst*, 32> &NewElts);
|
||||
SmallVectorImpl<AllocaInst *> &NewElts);
|
||||
void RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
|
||||
SmallVector<AllocaInst*, 32> &NewElts);
|
||||
SmallVectorImpl<AllocaInst *> &NewElts);
|
||||
void RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI,
|
||||
uint64_t Offset,
|
||||
SmallVector<AllocaInst*, 32> &NewElts);
|
||||
SmallVectorImpl<AllocaInst *> &NewElts);
|
||||
void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
|
||||
AllocaInst *AI,
|
||||
SmallVector<AllocaInst*, 32> &NewElts);
|
||||
SmallVectorImpl<AllocaInst *> &NewElts);
|
||||
void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
|
||||
SmallVector<AllocaInst*, 32> &NewElts);
|
||||
SmallVectorImpl<AllocaInst *> &NewElts);
|
||||
void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
|
||||
SmallVector<AllocaInst*, 32> &NewElts);
|
||||
SmallVectorImpl<AllocaInst *> &NewElts);
|
||||
bool ShouldAttemptScalarRepl(AllocaInst *AI);
|
||||
};
|
||||
|
||||
|
@ -1865,7 +1865,7 @@ bool SROA::TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size) {
|
|||
/// Offset indicates the position within AI that is referenced by this
|
||||
/// instruction.
|
||||
void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
|
||||
SmallVector<AllocaInst*, 32> &NewElts) {
|
||||
SmallVectorImpl<AllocaInst *> &NewElts) {
|
||||
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E;) {
|
||||
Use &TheUse = UI.getUse();
|
||||
Instruction *User = cast<Instruction>(*UI++);
|
||||
|
@ -1979,7 +1979,7 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
|
|||
/// RewriteBitCast - Update a bitcast reference to the alloca being replaced
|
||||
/// and recursively continue updating all of its uses.
|
||||
void SROA::RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
|
||||
SmallVector<AllocaInst*, 32> &NewElts) {
|
||||
SmallVectorImpl<AllocaInst *> &NewElts) {
|
||||
RewriteForScalarRepl(BC, AI, Offset, NewElts);
|
||||
if (BC->getOperand(0) != AI)
|
||||
return;
|
||||
|
@ -2037,7 +2037,7 @@ uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset,
|
|||
/// elements of the alloca that are being split apart, and if so, rewrite
|
||||
/// the GEP to be relative to the new element.
|
||||
void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
|
||||
SmallVector<AllocaInst*, 32> &NewElts) {
|
||||
SmallVectorImpl<AllocaInst *> &NewElts) {
|
||||
uint64_t OldOffset = Offset;
|
||||
SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
|
||||
// If the GEP was dynamic then it must have been a dynamic vector lookup.
|
||||
|
@ -2099,7 +2099,7 @@ void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
|
|||
/// to mark the lifetime of the scalarized memory.
|
||||
void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI,
|
||||
uint64_t Offset,
|
||||
SmallVector<AllocaInst*, 32> &NewElts) {
|
||||
SmallVectorImpl<AllocaInst *> &NewElts) {
|
||||
ConstantInt *OldSize = cast<ConstantInt>(II->getArgOperand(0));
|
||||
// Put matching lifetime markers on everything from Offset up to
|
||||
// Offset+OldSize.
|
||||
|
@ -2153,9 +2153,10 @@ void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI,
|
|||
|
||||
/// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI.
|
||||
/// Rewrite it to copy or set the elements of the scalarized memory.
|
||||
void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
|
||||
AllocaInst *AI,
|
||||
SmallVector<AllocaInst*, 32> &NewElts) {
|
||||
void
|
||||
SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
|
||||
AllocaInst *AI,
|
||||
SmallVectorImpl<AllocaInst *> &NewElts) {
|
||||
// If this is a memcpy/memmove, construct the other pointer as the
|
||||
// appropriate type. The "Other" pointer is the pointer that goes to memory
|
||||
// that doesn't have anything to do with the alloca that we are promoting. For
|
||||
|
@ -2326,8 +2327,9 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
|
|||
/// RewriteStoreUserOfWholeAlloca - We found a store of an integer that
|
||||
/// overwrites the entire allocation. Extract out the pieces of the stored
|
||||
/// integer and store them individually.
|
||||
void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
|
||||
SmallVector<AllocaInst*, 32> &NewElts){
|
||||
void
|
||||
SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
|
||||
SmallVectorImpl<AllocaInst *> &NewElts) {
|
||||
// Extract each element out of the integer according to its structure offset
|
||||
// and store the element value to the individual alloca.
|
||||
Value *SrcVal = SI->getOperand(0);
|
||||
|
@ -2440,8 +2442,9 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
|
|||
|
||||
/// RewriteLoadUserOfWholeAlloca - We found a load of the entire allocation to
|
||||
/// an integer. Load the individual pieces to form the aggregate value.
|
||||
void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
|
||||
SmallVector<AllocaInst*, 32> &NewElts) {
|
||||
void
|
||||
SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
|
||||
SmallVectorImpl<AllocaInst *> &NewElts) {
|
||||
// Extract each element out of the NewElts according to its structure offset
|
||||
// and form the result value.
|
||||
Type *AllocaEltTy = AI->getAllocatedType();
|
||||
|
|
|
@ -99,16 +99,16 @@ namespace {
|
|||
bool EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
|
||||
BasicBlock *&OldEntry,
|
||||
bool &TailCallsAreMarkedTail,
|
||||
SmallVector<PHINode*, 8> &ArgumentPHIs,
|
||||
SmallVectorImpl<PHINode *> &ArgumentPHIs,
|
||||
bool CannotTailCallElimCallsMarkedTail);
|
||||
bool FoldReturnAndProcessPred(BasicBlock *BB,
|
||||
ReturnInst *Ret, BasicBlock *&OldEntry,
|
||||
bool &TailCallsAreMarkedTail,
|
||||
SmallVector<PHINode*, 8> &ArgumentPHIs,
|
||||
SmallVectorImpl<PHINode *> &ArgumentPHIs,
|
||||
bool CannotTailCallElimCallsMarkedTail);
|
||||
bool ProcessReturningBlock(ReturnInst *RI, BasicBlock *&OldEntry,
|
||||
bool &TailCallsAreMarkedTail,
|
||||
SmallVector<PHINode*, 8> &ArgumentPHIs,
|
||||
SmallVectorImpl<PHINode *> &ArgumentPHIs,
|
||||
bool CannotTailCallElimCallsMarkedTail);
|
||||
bool CanMoveAboveCall(Instruction *I, CallInst *CI);
|
||||
Value *CanTransformAccumulatorRecursion(Instruction *I, CallInst *CI);
|
||||
|
@ -445,7 +445,7 @@ TailCallElim::FindTRECandidate(Instruction *TI,
|
|||
bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
|
||||
BasicBlock *&OldEntry,
|
||||
bool &TailCallsAreMarkedTail,
|
||||
SmallVector<PHINode*, 8> &ArgumentPHIs,
|
||||
SmallVectorImpl<PHINode *> &ArgumentPHIs,
|
||||
bool CannotTailCallElimCallsMarkedTail) {
|
||||
// If we are introducing accumulator recursion to eliminate operations after
|
||||
// the call instruction that are both associative and commutative, the initial
|
||||
|
@ -621,7 +621,7 @@ bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
|
|||
bool TailCallElim::FoldReturnAndProcessPred(BasicBlock *BB,
|
||||
ReturnInst *Ret, BasicBlock *&OldEntry,
|
||||
bool &TailCallsAreMarkedTail,
|
||||
SmallVector<PHINode*, 8> &ArgumentPHIs,
|
||||
SmallVectorImpl<PHINode *> &ArgumentPHIs,
|
||||
bool CannotTailCallElimCallsMarkedTail) {
|
||||
bool Change = false;
|
||||
|
||||
|
@ -655,10 +655,11 @@ bool TailCallElim::FoldReturnAndProcessPred(BasicBlock *BB,
|
|||
return Change;
|
||||
}
|
||||
|
||||
bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
|
||||
bool &TailCallsAreMarkedTail,
|
||||
SmallVector<PHINode*, 8> &ArgumentPHIs,
|
||||
bool CannotTailCallElimCallsMarkedTail) {
|
||||
bool
|
||||
TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
|
||||
bool &TailCallsAreMarkedTail,
|
||||
SmallVectorImpl<PHINode *> &ArgumentPHIs,
|
||||
bool CannotTailCallElimCallsMarkedTail) {
|
||||
CallInst *CI = FindTRECandidate(Ret, CannotTailCallElimCallsMarkedTail);
|
||||
if (!CI)
|
||||
return false;
|
||||
|
|
|
@ -3353,7 +3353,7 @@ static bool ForwardSwitchConditionToPHI(SwitchInst *SI) {
|
|||
for (ForwardingNodesMap::iterator I = ForwardingNodes.begin(),
|
||||
E = ForwardingNodes.end(); I != E; ++I) {
|
||||
PHINode *Phi = I->first;
|
||||
SmallVector<int,4> &Indexes = I->second;
|
||||
SmallVectorImpl<int> &Indexes = I->second;
|
||||
|
||||
if (Indexes.size() < 2) continue;
|
||||
|
||||
|
@ -3438,11 +3438,12 @@ static Constant *ConstantFold(Instruction *I,
|
|||
/// at the common destination basic block, *CommonDest, for one of the case
|
||||
/// destionations CaseDest corresponding to value CaseVal (0 for the default
|
||||
/// case), of a switch instruction SI.
|
||||
static bool GetCaseResults(SwitchInst *SI,
|
||||
ConstantInt *CaseVal,
|
||||
BasicBlock *CaseDest,
|
||||
BasicBlock **CommonDest,
|
||||
SmallVector<std::pair<PHINode*,Constant*>, 4> &Res) {
|
||||
static bool
|
||||
GetCaseResults(SwitchInst *SI,
|
||||
ConstantInt *CaseVal,
|
||||
BasicBlock *CaseDest,
|
||||
BasicBlock **CommonDest,
|
||||
SmallVectorImpl<std::pair<PHINode*,Constant*> > &Res) {
|
||||
// The block from which we enter the common destination.
|
||||
BasicBlock *Pred = SI->getParent();
|
||||
|
||||
|
@ -3515,7 +3516,7 @@ namespace {
|
|||
SwitchLookupTable(Module &M,
|
||||
uint64_t TableSize,
|
||||
ConstantInt *Offset,
|
||||
const SmallVector<std::pair<ConstantInt*, Constant*>, 4>& Values,
|
||||
const SmallVectorImpl<std::pair<ConstantInt*, Constant*> >& Values,
|
||||
Constant *DefaultValue,
|
||||
const DataLayout *TD);
|
||||
|
||||
|
@ -3562,7 +3563,7 @@ namespace {
|
|||
SwitchLookupTable::SwitchLookupTable(Module &M,
|
||||
uint64_t TableSize,
|
||||
ConstantInt *Offset,
|
||||
const SmallVector<std::pair<ConstantInt*, Constant*>, 4>& Values,
|
||||
const SmallVectorImpl<std::pair<ConstantInt*, Constant*> >& Values,
|
||||
Constant *DefaultValue,
|
||||
const DataLayout *TD)
|
||||
: SingleValue(0), BitMap(0), BitMapElementTy(0), Array(0) {
|
||||
|
|
|
@ -356,7 +356,7 @@ namespace {
|
|||
Instruction *J, unsigned o, bool IBeforeJ);
|
||||
|
||||
void getReplacementInputsForPair(LLVMContext& Context, Instruction *I,
|
||||
Instruction *J, SmallVector<Value *, 3> &ReplacedOperands,
|
||||
Instruction *J, SmallVectorImpl<Value *> &ReplacedOperands,
|
||||
bool IBeforeJ);
|
||||
|
||||
void replaceOutputsOfPair(LLVMContext& Context, Instruction *I,
|
||||
|
@ -2687,7 +2687,7 @@ namespace {
|
|||
// to the vector instruction that fuses I with J.
|
||||
void BBVectorize::getReplacementInputsForPair(LLVMContext& Context,
|
||||
Instruction *I, Instruction *J,
|
||||
SmallVector<Value *, 3> &ReplacedOperands,
|
||||
SmallVectorImpl<Value *> &ReplacedOperands,
|
||||
bool IBeforeJ) {
|
||||
unsigned NumOperands = I->getNumOperands();
|
||||
|
||||
|
|
|
@ -1088,7 +1088,7 @@ CodeGenRegBank::getCompositeSubRegIndex(CodeGenSubRegIndex *A,
|
|||
}
|
||||
|
||||
CodeGenSubRegIndex *CodeGenRegBank::
|
||||
getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex*, 8> &Parts) {
|
||||
getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex *, 8> &Parts) {
|
||||
assert(Parts.size() > 1 && "Need two parts to concatenate");
|
||||
|
||||
// Look for an existing entry.
|
||||
|
|
|
@ -534,10 +534,10 @@ namespace llvm {
|
|||
// Find or create a sub-register index representing the concatenation of
|
||||
// non-overlapping sibling indices.
|
||||
CodeGenSubRegIndex *
|
||||
getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex*, 8>&);
|
||||
getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex *, 8>&);
|
||||
|
||||
void
|
||||
addConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex*, 8> &Parts,
|
||||
addConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex *, 8> &Parts,
|
||||
CodeGenSubRegIndex *Idx) {
|
||||
ConcatIdx.insert(std::make_pair(Parts, Idx));
|
||||
}
|
||||
|
|
|
@ -1102,7 +1102,7 @@ void PredTransitions::getIntersectingVariants(
|
|||
TransVariant &Variant = Variants[VIdx];
|
||||
// Don't expand variants if the processor models don't intersect.
|
||||
// A zero processor index means any processor.
|
||||
SmallVector<unsigned, 4> &ProcIndices = TransVec[TransIdx].ProcIndices;
|
||||
SmallVectorImpl<unsigned> &ProcIndices = TransVec[TransIdx].ProcIndices;
|
||||
if (ProcIndices[0] && Variants[VIdx].ProcIdx) {
|
||||
unsigned Cnt = std::count(ProcIndices.begin(), ProcIndices.end(),
|
||||
Variant.ProcIdx);
|
||||
|
|
Loading…
Reference in New Issue