forked from OSchip/llvm-project
CodeGen: Convert some TII hooks to use Register
This commit is contained in:
parent
178050c3ba
commit
30ebafaa56
|
@ -235,8 +235,8 @@ public:
|
|||
/// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's
|
||||
/// expected the pre-extension value is available as a subreg of the result
|
||||
/// register. This also returns the sub-register index in SubIdx.
|
||||
virtual bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &DstReg, unsigned &SubIdx) const {
|
||||
virtual bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &DstReg, unsigned &SubIdx) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -368,7 +368,7 @@ public:
|
|||
/// DestReg:SubIdx. Any existing subreg index is preserved or composed with
|
||||
/// SubIdx.
|
||||
virtual void reMaterialize(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI, unsigned DestReg,
|
||||
MachineBasicBlock::iterator MI, Register DestReg,
|
||||
unsigned SubIdx, const MachineInstr &Orig,
|
||||
const TargetRegisterInfo &TRI) const;
|
||||
|
||||
|
@ -448,10 +448,10 @@ public:
|
|||
/// A pair composed of a register and a sub-register index.
|
||||
/// Used to give some type checking when modeling Reg:SubReg.
|
||||
struct RegSubRegPair {
|
||||
unsigned Reg;
|
||||
Register Reg;
|
||||
unsigned SubReg;
|
||||
|
||||
RegSubRegPair(unsigned Reg = 0, unsigned SubReg = 0)
|
||||
RegSubRegPair(Register Reg = Register(), unsigned SubReg = 0)
|
||||
: Reg(Reg), SubReg(SubReg) {}
|
||||
|
||||
bool operator==(const RegSubRegPair& P) const {
|
||||
|
@ -468,7 +468,7 @@ public:
|
|||
struct RegSubRegPairAndIdx : RegSubRegPair {
|
||||
unsigned SubIdx;
|
||||
|
||||
RegSubRegPairAndIdx(unsigned Reg = 0, unsigned SubReg = 0,
|
||||
RegSubRegPairAndIdx(Register Reg = Register(), unsigned SubReg = 0,
|
||||
unsigned SubIdx = 0)
|
||||
: RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {}
|
||||
};
|
||||
|
@ -845,8 +845,8 @@ public:
|
|||
/// @param TrueCycles Latency from TrueReg to select output.
|
||||
/// @param FalseCycles Latency from FalseReg to select output.
|
||||
virtual bool canInsertSelect(const MachineBasicBlock &MBB,
|
||||
ArrayRef<MachineOperand> Cond, unsigned DstReg,
|
||||
unsigned TrueReg, unsigned FalseReg,
|
||||
ArrayRef<MachineOperand> Cond, Register DstReg,
|
||||
Register TrueReg, Register FalseReg,
|
||||
int &CondCycles, int &TrueCycles,
|
||||
int &FalseCycles) const {
|
||||
return false;
|
||||
|
@ -869,8 +869,8 @@ public:
|
|||
/// @param FalseReg Virtual register to copy when Cons is false.
|
||||
virtual void insertSelect(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I, const DebugLoc &DL,
|
||||
unsigned DstReg, ArrayRef<MachineOperand> Cond,
|
||||
unsigned TrueReg, unsigned FalseReg) const {
|
||||
Register DstReg, ArrayRef<MachineOperand> Cond,
|
||||
Register TrueReg, Register FalseReg) const {
|
||||
llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
|
||||
}
|
||||
|
||||
|
@ -1416,16 +1416,16 @@ public:
|
|||
/// in SrcReg and SrcReg2 if having two register operands, and the value it
|
||||
/// compares against in CmpValue. Return true if the comparison instruction
|
||||
/// can be analyzed.
|
||||
virtual bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &Mask, int &Value) const {
|
||||
virtual bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &SrcReg2, int &Mask, int &Value) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// See if the comparison instruction can be converted
|
||||
/// into something more efficient. E.g., on ARM most instructions can set the
|
||||
/// flags register, obviating the need for a separate CMP.
|
||||
virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
|
||||
unsigned SrcReg2, int Mask, int Value,
|
||||
virtual bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
|
||||
Register SrcReg2, int Mask, int Value,
|
||||
const MachineRegisterInfo *MRI) const {
|
||||
return false;
|
||||
}
|
||||
|
@ -1452,7 +1452,7 @@ public:
|
|||
/// block. The caller may assume that it will not be erased by this
|
||||
/// function otherwise.
|
||||
virtual bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
|
||||
unsigned Reg, MachineRegisterInfo *MRI) const {
|
||||
Register Reg, MachineRegisterInfo *MRI) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -457,12 +457,12 @@ INITIALIZE_PASS_END(PeepholeOptimizer, DEBUG_TYPE,
|
|||
bool PeepholeOptimizer::
|
||||
optimizeExtInstr(MachineInstr &MI, MachineBasicBlock &MBB,
|
||||
SmallPtrSetImpl<MachineInstr*> &LocalMIs) {
|
||||
unsigned SrcReg, DstReg, SubIdx;
|
||||
Register SrcReg, DstReg;
|
||||
unsigned SubIdx;
|
||||
if (!TII->isCoalescableExtInstr(MI, SrcReg, DstReg, SubIdx))
|
||||
return false;
|
||||
|
||||
if (Register::isPhysicalRegister(DstReg) ||
|
||||
Register::isPhysicalRegister(SrcReg))
|
||||
if (DstReg.isPhysical() || SrcReg.isPhysical())
|
||||
return false;
|
||||
|
||||
if (MRI->hasOneNonDBGUse(SrcReg))
|
||||
|
@ -607,11 +607,10 @@ optimizeExtInstr(MachineInstr &MI, MachineBasicBlock &MBB,
|
|||
bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr &MI) {
|
||||
// If this instruction is a comparison against zero and isn't comparing a
|
||||
// physical register, we can try to optimize it.
|
||||
unsigned SrcReg, SrcReg2;
|
||||
Register SrcReg, SrcReg2;
|
||||
int CmpMask, CmpValue;
|
||||
if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) ||
|
||||
Register::isPhysicalRegister(SrcReg) ||
|
||||
(SrcReg2 != 0 && Register::isPhysicalRegister(SrcReg2)))
|
||||
SrcReg.isPhysical() || SrcReg2.isPhysical())
|
||||
return false;
|
||||
|
||||
// Attempt to optimize the comparison instruction.
|
||||
|
@ -663,8 +662,8 @@ bool PeepholeOptimizer::findNextSource(RegSubRegPair RegSubReg,
|
|||
// So far we do not have any motivating example for doing that.
|
||||
// Thus, instead of maintaining untested code, we will revisit that if
|
||||
// that changes at some point.
|
||||
unsigned Reg = RegSubReg.Reg;
|
||||
if (Register::isPhysicalRegister(Reg))
|
||||
Register Reg = RegSubReg.Reg;
|
||||
if (Reg.isPhysical())
|
||||
return false;
|
||||
const TargetRegisterClass *DefRC = MRI->getRegClass(Reg);
|
||||
|
||||
|
|
|
@ -486,8 +486,8 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
|
|||
for (SDNode *User : Node->uses()) {
|
||||
if (User->getOpcode() == ISD::CopyToReg &&
|
||||
User->getOperand(2).getNode() == Node) {
|
||||
unsigned DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
|
||||
if (Register::isVirtualRegister(DestReg)) {
|
||||
Register DestReg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
|
||||
if (DestReg.isVirtual()) {
|
||||
VRBase = DestReg;
|
||||
break;
|
||||
}
|
||||
|
@ -502,7 +502,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
|
|||
const TargetRegisterClass *TRC =
|
||||
TLI->getRegClassFor(Node->getSimpleValueType(0), Node->isDivergent());
|
||||
|
||||
unsigned Reg;
|
||||
Register Reg;
|
||||
MachineInstr *DefMI;
|
||||
RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(0));
|
||||
if (R && Register::isPhysicalRegister(R->getReg())) {
|
||||
|
@ -513,7 +513,8 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
|
|||
DefMI = MRI->getVRegDef(Reg);
|
||||
}
|
||||
|
||||
unsigned SrcReg, DstReg, DefSubIdx;
|
||||
Register SrcReg, DstReg;
|
||||
unsigned DefSubIdx;
|
||||
if (DefMI &&
|
||||
TII->isCoalescableExtInstr(*DefMI, SrcReg, DstReg, DefSubIdx) &&
|
||||
SubIdx == DefSubIdx &&
|
||||
|
@ -531,7 +532,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
|
|||
// Reg may not support a SubIdx sub-register, and we may need to
|
||||
// constrain its register class or issue a COPY to a compatible register
|
||||
// class.
|
||||
if (Register::isVirtualRegister(Reg))
|
||||
if (Reg.isVirtual())
|
||||
Reg = ConstrainForSubReg(Reg, SubIdx,
|
||||
Node->getOperand(0).getSimpleValueType(),
|
||||
Node->isDivergent(), Node->getDebugLoc());
|
||||
|
@ -543,7 +544,7 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
|
|||
MachineInstrBuilder CopyMI =
|
||||
BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
|
||||
TII->get(TargetOpcode::COPY), VRBase);
|
||||
if (Register::isVirtualRegister(Reg))
|
||||
if (Reg.isVirtual())
|
||||
CopyMI.addReg(Reg, 0, SubIdx);
|
||||
else
|
||||
CopyMI.addReg(TRI->getSubReg(Reg, SubIdx));
|
||||
|
|
|
@ -408,7 +408,7 @@ bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
|
|||
|
||||
void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
unsigned DestReg, unsigned SubIdx,
|
||||
Register DestReg, unsigned SubIdx,
|
||||
const MachineInstr &Orig,
|
||||
const TargetRegisterInfo &TRI) const {
|
||||
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
|
||||
|
|
|
@ -510,8 +510,8 @@ static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
|
|||
|
||||
bool AArch64InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
|
||||
ArrayRef<MachineOperand> Cond,
|
||||
unsigned DstReg, unsigned TrueReg,
|
||||
unsigned FalseReg, int &CondCycles,
|
||||
Register DstReg, Register TrueReg,
|
||||
Register FalseReg, int &CondCycles,
|
||||
int &TrueCycles,
|
||||
int &FalseCycles) const {
|
||||
// Check register classes.
|
||||
|
@ -559,9 +559,9 @@ bool AArch64InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
|
|||
|
||||
void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
const DebugLoc &DL, unsigned DstReg,
|
||||
const DebugLoc &DL, Register DstReg,
|
||||
ArrayRef<MachineOperand> Cond,
|
||||
unsigned TrueReg, unsigned FalseReg) const {
|
||||
Register TrueReg, Register FalseReg) const {
|
||||
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
|
||||
|
||||
// Parse the condition code, see parseCondBranch() above.
|
||||
|
@ -931,7 +931,7 @@ bool AArch64InstrInfo::isSEHInstruction(const MachineInstr &MI) {
|
|||
}
|
||||
|
||||
bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
|
||||
unsigned &SrcReg, unsigned &DstReg,
|
||||
Register &SrcReg, Register &DstReg,
|
||||
unsigned &SubIdx) const {
|
||||
switch (MI.getOpcode()) {
|
||||
default:
|
||||
|
@ -1011,8 +1011,8 @@ bool AArch64InstrInfo::isSchedulingBoundary(const MachineInstr &MI,
|
|||
/// analyzeCompare - For a comparison instruction, return the source registers
|
||||
/// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
|
||||
/// Return true if the comparison instruction can be analyzed.
|
||||
bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &CmpMask,
|
||||
bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &SrcReg2, int &CmpMask,
|
||||
int &CmpValue) const {
|
||||
// The first operand can be a frame index where we'd normally expect a
|
||||
// register.
|
||||
|
@ -1207,7 +1207,7 @@ static bool areCFlagsAccessedBetweenInstrs(
|
|||
/// instruction.
|
||||
/// Only comparison with zero is supported.
|
||||
bool AArch64InstrInfo::optimizeCompareInstr(
|
||||
MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
|
||||
MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int CmpMask,
|
||||
int CmpValue, const MachineRegisterInfo *MRI) const {
|
||||
assert(CmpInstr.getParent());
|
||||
assert(MRI);
|
||||
|
|
|
@ -52,8 +52,8 @@ public:
|
|||
|
||||
bool isAsCheapAsAMove(const MachineInstr &MI) const override;
|
||||
|
||||
bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &DstReg, unsigned &SubIdx) const override;
|
||||
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &DstReg, unsigned &SubIdx) const override;
|
||||
|
||||
bool
|
||||
areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
|
||||
|
@ -197,12 +197,12 @@ public:
|
|||
bool
|
||||
reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
|
||||
bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
|
||||
unsigned, unsigned, unsigned, int &, int &,
|
||||
Register, Register, Register, int &, int &,
|
||||
int &) const override;
|
||||
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
|
||||
const DebugLoc &DL, unsigned DstReg,
|
||||
ArrayRef<MachineOperand> Cond, unsigned TrueReg,
|
||||
unsigned FalseReg) const override;
|
||||
const DebugLoc &DL, Register DstReg,
|
||||
ArrayRef<MachineOperand> Cond, Register TrueReg,
|
||||
Register FalseReg) const override;
|
||||
void getNoop(MCInst &NopInst) const override;
|
||||
|
||||
bool isSchedulingBoundary(const MachineInstr &MI,
|
||||
|
@ -212,13 +212,13 @@ public:
|
|||
/// analyzeCompare - For a comparison instruction, return the source registers
|
||||
/// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
|
||||
/// Return true if the comparison instruction can be analyzed.
|
||||
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &CmpMask,
|
||||
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &SrcReg2, int &CmpMask,
|
||||
int &CmpValue) const override;
|
||||
/// optimizeCompareInstr - Convert the instruction supplying the argument to
|
||||
/// the comparison into one that sets the zero bit in the flags register.
|
||||
bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
|
||||
unsigned SrcReg2, int CmpMask, int CmpValue,
|
||||
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
|
||||
Register SrcReg2, int CmpMask, int CmpValue,
|
||||
const MachineRegisterInfo *MRI) const override;
|
||||
bool optimizeCondBranch(MachineInstr &MI) const override;
|
||||
|
||||
|
|
|
@ -812,10 +812,10 @@ SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const {
|
|||
|
||||
void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
const DebugLoc &DL, unsigned DstReg,
|
||||
const DebugLoc &DL, Register DstReg,
|
||||
ArrayRef<MachineOperand> Cond,
|
||||
unsigned TrueReg,
|
||||
unsigned FalseReg) const {
|
||||
Register TrueReg,
|
||||
Register FalseReg) const {
|
||||
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
|
||||
MachineFunction *MF = MBB.getParent();
|
||||
const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
|
||||
|
@ -2205,8 +2205,8 @@ bool SIInstrInfo::reverseBranchCondition(
|
|||
|
||||
bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
|
||||
ArrayRef<MachineOperand> Cond,
|
||||
unsigned DstReg, unsigned TrueReg,
|
||||
unsigned FalseReg, int &CondCycles,
|
||||
Register DstReg, Register TrueReg,
|
||||
Register FalseReg, int &CondCycles,
|
||||
int &TrueCycles, int &FalseCycles) const {
|
||||
switch (Cond[0].getImm()) {
|
||||
case VCCNZ:
|
||||
|
@ -2245,8 +2245,8 @@ bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
|
|||
|
||||
void SIInstrInfo::insertSelect(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I, const DebugLoc &DL,
|
||||
unsigned DstReg, ArrayRef<MachineOperand> Cond,
|
||||
unsigned TrueReg, unsigned FalseReg) const {
|
||||
Register DstReg, ArrayRef<MachineOperand> Cond,
|
||||
Register TrueReg, Register FalseReg) const {
|
||||
BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm());
|
||||
if (Pred == VCCZ || Pred == SCC_FALSE) {
|
||||
Pred = static_cast<BranchPredicate>(-Pred);
|
||||
|
@ -2393,7 +2393,7 @@ static void removeModOperands(MachineInstr &MI) {
|
|||
}
|
||||
|
||||
bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
|
||||
unsigned Reg, MachineRegisterInfo *MRI) const {
|
||||
Register Reg, MachineRegisterInfo *MRI) const {
|
||||
if (!MRI->hasOneNonDBGUse(Reg))
|
||||
return false;
|
||||
|
||||
|
|
|
@ -297,19 +297,19 @@ public:
|
|||
SmallVectorImpl<MachineOperand> &Cond) const override;
|
||||
|
||||
bool canInsertSelect(const MachineBasicBlock &MBB,
|
||||
ArrayRef<MachineOperand> Cond, unsigned DstReg,
|
||||
unsigned TrueReg, unsigned FalseReg, int &CondCycles,
|
||||
ArrayRef<MachineOperand> Cond, Register DstReg,
|
||||
Register TrueReg, Register FalseReg, int &CondCycles,
|
||||
int &TrueCycles, int &FalseCycles) const override;
|
||||
|
||||
void insertSelect(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I, const DebugLoc &DL,
|
||||
unsigned DstReg, ArrayRef<MachineOperand> Cond,
|
||||
unsigned TrueReg, unsigned FalseReg) const override;
|
||||
Register DstReg, ArrayRef<MachineOperand> Cond,
|
||||
Register TrueReg, Register FalseReg) const override;
|
||||
|
||||
void insertVectorSelect(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I, const DebugLoc &DL,
|
||||
unsigned DstReg, ArrayRef<MachineOperand> Cond,
|
||||
unsigned TrueReg, unsigned FalseReg) const;
|
||||
Register DstReg, ArrayRef<MachineOperand> Cond,
|
||||
Register TrueReg, Register FalseReg) const;
|
||||
|
||||
unsigned getAddressSpaceForPseudoSourceKind(
|
||||
unsigned Kind) const override;
|
||||
|
@ -320,7 +320,7 @@ public:
|
|||
|
||||
bool isFoldableCopy(const MachineInstr &MI) const;
|
||||
|
||||
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, unsigned Reg,
|
||||
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg,
|
||||
MachineRegisterInfo *MRI) const final;
|
||||
|
||||
unsigned getMachineCSELookAheadLimit() const override { return 500; }
|
||||
|
|
|
@ -1736,7 +1736,7 @@ static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
|
|||
|
||||
void ARMBaseInstrInfo::reMaterialize(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
unsigned DestReg, unsigned SubIdx,
|
||||
Register DestReg, unsigned SubIdx,
|
||||
const MachineInstr &Orig,
|
||||
const TargetRegisterInfo &TRI) const {
|
||||
unsigned Opcode = Orig.getOpcode();
|
||||
|
@ -2719,8 +2719,8 @@ bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
|
|||
/// in SrcReg and SrcReg2 if having two register operands, and the value it
|
||||
/// compares against in CmpValue. Return true if the comparison instruction
|
||||
/// can be analyzed.
|
||||
bool ARMBaseInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &CmpMask,
|
||||
bool ARMBaseInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &SrcReg2, int &CmpMask,
|
||||
int &CmpValue) const {
|
||||
switch (MI.getOpcode()) {
|
||||
default: break;
|
||||
|
@ -2756,7 +2756,7 @@ bool ARMBaseInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
|||
/// operates on the given source register and applies the same mask
|
||||
/// as a 'tst' instruction. Provide a limited look-through for copies.
|
||||
/// When successful, MI will hold the found instruction.
|
||||
static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg,
|
||||
static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg,
|
||||
int CmpMask, bool CommonUse) {
|
||||
switch (MI->getOpcode()) {
|
||||
case ARM::ANDri:
|
||||
|
@ -2791,7 +2791,7 @@ inline static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC) {
|
|||
/// CMPrr(r0, r1) can be made redundant by ADDr[ri](r0, r1, X).
|
||||
/// This function can be extended later on.
|
||||
inline static bool isRedundantFlagInstr(const MachineInstr *CmpI,
|
||||
unsigned SrcReg, unsigned SrcReg2,
|
||||
Register SrcReg, Register SrcReg2,
|
||||
int ImmValue, const MachineInstr *OI,
|
||||
bool &IsThumb1) {
|
||||
if ((CmpI->getOpcode() == ARM::CMPrr || CmpI->getOpcode() == ARM::t2CMPrr) &&
|
||||
|
@ -2927,7 +2927,7 @@ static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1) {
|
|||
/// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the
|
||||
/// condition code of instructions which use the flags.
|
||||
bool ARMBaseInstrInfo::optimizeCompareInstr(
|
||||
MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
|
||||
MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int CmpMask,
|
||||
int CmpValue, const MachineRegisterInfo *MRI) const {
|
||||
// Get the unique definition of SrcReg.
|
||||
MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
|
||||
|
@ -3214,7 +3214,7 @@ bool ARMBaseInstrInfo::shouldSink(const MachineInstr &MI) const {
|
|||
return true;
|
||||
MachineBasicBlock::const_iterator Next = &MI;
|
||||
++Next;
|
||||
unsigned SrcReg, SrcReg2;
|
||||
Register SrcReg, SrcReg2;
|
||||
int CmpMask, CmpValue;
|
||||
bool IsThumb1;
|
||||
if (Next != MI.getParent()->end() &&
|
||||
|
@ -3225,7 +3225,7 @@ bool ARMBaseInstrInfo::shouldSink(const MachineInstr &MI) const {
|
|||
}
|
||||
|
||||
bool ARMBaseInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
|
||||
unsigned Reg,
|
||||
Register Reg,
|
||||
MachineRegisterInfo *MRI) const {
|
||||
// Fold large immediates into add, sub, or, xor.
|
||||
unsigned DefOpc = DefMI.getOpcode();
|
||||
|
|
|
@ -232,7 +232,7 @@ public:
|
|||
bool shouldSink(const MachineInstr &MI) const override;
|
||||
|
||||
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
|
||||
unsigned DestReg, unsigned SubIdx,
|
||||
Register DestReg, unsigned SubIdx,
|
||||
const MachineInstr &Orig,
|
||||
const TargetRegisterInfo &TRI) const override;
|
||||
|
||||
|
@ -296,16 +296,16 @@ public:
|
|||
/// in SrcReg and SrcReg2 if having two register operands, and the value it
|
||||
/// compares against in CmpValue. Return true if the comparison instruction
|
||||
/// can be analyzed.
|
||||
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &CmpMask,
|
||||
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &SrcReg2, int &CmpMask,
|
||||
int &CmpValue) const override;
|
||||
|
||||
/// optimizeCompareInstr - Convert the instruction to set the zero flag so
|
||||
/// that we can remove a "comparison with zero"; Remove a redundant CMP
|
||||
/// instruction if the flags can be updated in the same way by an earlier
|
||||
/// instruction such as SUB.
|
||||
bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
|
||||
unsigned SrcReg2, int CmpMask, int CmpValue,
|
||||
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
|
||||
Register SrcReg2, int CmpMask, int CmpValue,
|
||||
const MachineRegisterInfo *MRI) const override;
|
||||
|
||||
bool analyzeSelect(const MachineInstr &MI,
|
||||
|
@ -318,7 +318,7 @@ public:
|
|||
|
||||
/// FoldImmediate - 'Reg' is known to be defined by a move immediate
|
||||
/// instruction, try to fold the immediate into the use instruction.
|
||||
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, unsigned Reg,
|
||||
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg,
|
||||
MachineRegisterInfo *MRI) const override;
|
||||
|
||||
unsigned getNumMicroOps(const InstrItineraryData *ItinData,
|
||||
|
|
|
@ -467,7 +467,7 @@ bool HexagonHardwareLoops::findInductionRegister(MachineLoop *L,
|
|||
if (!PredI->isCompare())
|
||||
return false;
|
||||
|
||||
unsigned CmpReg1 = 0, CmpReg2 = 0;
|
||||
Register CmpReg1, CmpReg2;
|
||||
int CmpImm = 0, CmpMask = 0;
|
||||
bool CmpAnalyzed =
|
||||
TII->analyzeCompare(*PredI, CmpReg1, CmpReg2, CmpMask, CmpImm);
|
||||
|
@ -651,7 +651,7 @@ CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
|
|||
MachineInstr *CondI = MRI->getVRegDef(PredReg);
|
||||
unsigned CondOpc = CondI->getOpcode();
|
||||
|
||||
unsigned CmpReg1 = 0, CmpReg2 = 0;
|
||||
Register CmpReg1, CmpReg2;
|
||||
int Mask = 0, ImmValue = 0;
|
||||
bool AnalyzedCmp =
|
||||
TII->analyzeCompare(*CondI, CmpReg1, CmpReg2, Mask, ImmValue);
|
||||
|
@ -1455,7 +1455,7 @@ bool HexagonHardwareLoops::loopCountMayWrapOrUnderFlow(
|
|||
for (MachineRegisterInfo::use_instr_nodbg_iterator I = MRI->use_instr_nodbg_begin(Reg),
|
||||
E = MRI->use_instr_nodbg_end(); I != E; ++I) {
|
||||
MachineInstr *MI = &*I;
|
||||
unsigned CmpReg1 = 0, CmpReg2 = 0;
|
||||
Register CmpReg1, CmpReg2;
|
||||
int CmpMask = 0, CmpValue = 0;
|
||||
|
||||
if (!TII->analyzeCompare(*MI, CmpReg1, CmpReg2, CmpMask, CmpValue))
|
||||
|
|
|
@ -1785,8 +1785,8 @@ HexagonInstrInfo::CreateTargetPostRAHazardRecognizer(
|
|||
/// \p SrcReg and \p SrcReg2 if having two register operands, and the value it
|
||||
/// compares against in CmpValue. Return true if the comparison instruction
|
||||
/// can be analyzed.
|
||||
bool HexagonInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &Mask,
|
||||
bool HexagonInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &SrcReg2, int &Mask,
|
||||
int &Value) const {
|
||||
unsigned Opc = MI.getOpcode();
|
||||
|
||||
|
|
|
@ -269,8 +269,8 @@ public:
|
|||
/// in SrcReg and SrcReg2 if having two register operands, and the value it
|
||||
/// compares against in CmpValue. Return true if the comparison instruction
|
||||
/// can be analyzed.
|
||||
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &Mask, int &Value) const override;
|
||||
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &SrcReg2, int &Mask, int &Value) const override;
|
||||
|
||||
/// Compute the instruction latency of a given instruction.
|
||||
/// If the instruction has higher cost when predicated, it's returned via
|
||||
|
|
|
@ -504,7 +504,7 @@ void HexagonSplitDoubleRegs::collectIndRegsForLoop(const MachineLoop *L,
|
|||
|
||||
// Get the registers on which the loop controlling compare instruction
|
||||
// depends.
|
||||
unsigned CmpR1 = 0, CmpR2 = 0;
|
||||
Register CmpR1, CmpR2;
|
||||
const MachineInstr *CmpI = MRI->getVRegDef(PR);
|
||||
while (CmpI->getOpcode() == Hexagon::C2_not)
|
||||
CmpI = MRI->getVRegDef(CmpI->getOperand(1).getReg());
|
||||
|
|
|
@ -174,8 +174,8 @@ LanaiInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
|
|||
return makeArrayRef(TargetFlags);
|
||||
}
|
||||
|
||||
bool LanaiInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &CmpMask,
|
||||
bool LanaiInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &SrcReg2, int &CmpMask,
|
||||
int &CmpValue) const {
|
||||
switch (MI.getOpcode()) {
|
||||
default:
|
||||
|
@ -183,7 +183,7 @@ bool LanaiInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
|||
case Lanai::SFSUB_F_RI_LO:
|
||||
case Lanai::SFSUB_F_RI_HI:
|
||||
SrcReg = MI.getOperand(0).getReg();
|
||||
SrcReg2 = 0;
|
||||
SrcReg2 = Register();
|
||||
CmpMask = ~0;
|
||||
CmpValue = MI.getOperand(1).getImm();
|
||||
return true;
|
||||
|
@ -281,7 +281,7 @@ inline static unsigned flagSettingOpcodeVariant(unsigned OldOpcode) {
|
|||
}
|
||||
|
||||
bool LanaiInstrInfo::optimizeCompareInstr(
|
||||
MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int /*CmpMask*/,
|
||||
MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int /*CmpMask*/,
|
||||
int CmpValue, const MachineRegisterInfo *MRI) const {
|
||||
// Get the unique definition of SrcReg.
|
||||
MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
|
||||
|
@ -454,9 +454,9 @@ bool LanaiInstrInfo::analyzeSelect(const MachineInstr &MI,
|
|||
|
||||
// Identify instructions that can be folded into a SELECT instruction, and
|
||||
// return the defining instruction.
|
||||
static MachineInstr *canFoldIntoSelect(unsigned Reg,
|
||||
static MachineInstr *canFoldIntoSelect(Register Reg,
|
||||
const MachineRegisterInfo &MRI) {
|
||||
if (!Register::isVirtualRegister(Reg))
|
||||
if (!Reg.isVirtual())
|
||||
return nullptr;
|
||||
if (!MRI.hasOneNonDBGUse(Reg))
|
||||
return nullptr;
|
||||
|
|
|
@ -95,15 +95,15 @@ public:
|
|||
// For a comparison instruction, return the source registers in SrcReg and
|
||||
// SrcReg2 if having two register operands, and the value it compares against
|
||||
// in CmpValue. Return true if the comparison instruction can be analyzed.
|
||||
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &CmpMask,
|
||||
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &SrcReg2, int &CmpMask,
|
||||
int &CmpValue) const override;
|
||||
|
||||
// See if the comparison instruction can be converted into something more
|
||||
// efficient. E.g., on Lanai register-register instructions can set the flag
|
||||
// register, obviating the need for a separate compare.
|
||||
bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
|
||||
unsigned SrcReg2, int CmpMask, int CmpValue,
|
||||
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
|
||||
Register SrcReg2, int CmpMask, int CmpValue,
|
||||
const MachineRegisterInfo *MRI) const override;
|
||||
|
||||
// Analyze the given select instruction, returning true if it cannot be
|
||||
|
|
|
@ -281,7 +281,7 @@ bool PPCInstrInfo::getMachineCombinerPatterns(
|
|||
|
||||
// Detect 32 -> 64-bit extensions where we may reuse the low sub-register.
|
||||
bool PPCInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
|
||||
unsigned &SrcReg, unsigned &DstReg,
|
||||
Register &SrcReg, Register &DstReg,
|
||||
unsigned &SubIdx) const {
|
||||
switch (MI.getOpcode()) {
|
||||
default: return false;
|
||||
|
@ -754,8 +754,8 @@ unsigned PPCInstrInfo::insertBranch(MachineBasicBlock &MBB,
|
|||
// Select analysis.
|
||||
bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
|
||||
ArrayRef<MachineOperand> Cond,
|
||||
unsigned DstReg, unsigned TrueReg,
|
||||
unsigned FalseReg, int &CondCycles,
|
||||
Register DstReg, Register TrueReg,
|
||||
Register FalseReg, int &CondCycles,
|
||||
int &TrueCycles, int &FalseCycles) const {
|
||||
if (Cond.size() != 2)
|
||||
return false;
|
||||
|
@ -792,9 +792,9 @@ bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
|
|||
|
||||
void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
const DebugLoc &dl, unsigned DestReg,
|
||||
ArrayRef<MachineOperand> Cond, unsigned TrueReg,
|
||||
unsigned FalseReg) const {
|
||||
const DebugLoc &dl, Register DestReg,
|
||||
ArrayRef<MachineOperand> Cond, Register TrueReg,
|
||||
Register FalseReg) const {
|
||||
assert(Cond.size() == 2 &&
|
||||
"PPC branch conditions have two components!");
|
||||
|
||||
|
@ -853,7 +853,7 @@ void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB,
|
|||
case PPC::PRED_BIT_UNSET: SubIdx = 0; SwapOps = true; break;
|
||||
}
|
||||
|
||||
unsigned FirstReg = SwapOps ? FalseReg : TrueReg,
|
||||
Register FirstReg = SwapOps ? FalseReg : TrueReg,
|
||||
SecondReg = SwapOps ? TrueReg : FalseReg;
|
||||
|
||||
// The first input register of isel cannot be r0. If it is a member
|
||||
|
@ -864,7 +864,7 @@ void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB,
|
|||
const TargetRegisterClass *FirstRC =
|
||||
MRI.getRegClass(FirstReg)->contains(PPC::X0) ?
|
||||
&PPC::G8RC_NOX0RegClass : &PPC::GPRC_NOR0RegClass;
|
||||
unsigned OldFirstReg = FirstReg;
|
||||
Register OldFirstReg = FirstReg;
|
||||
FirstReg = MRI.createVirtualRegister(FirstRC);
|
||||
BuildMI(MBB, MI, dl, get(TargetOpcode::COPY), FirstReg)
|
||||
.addReg(OldFirstReg);
|
||||
|
@ -1334,7 +1334,7 @@ reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
|
|||
}
|
||||
|
||||
bool PPCInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
|
||||
unsigned Reg, MachineRegisterInfo *MRI) const {
|
||||
Register Reg, MachineRegisterInfo *MRI) const {
|
||||
// For some instructions, it is legal to fold ZERO into the RA register field.
|
||||
// A zero immediate should always be loaded with a single li.
|
||||
unsigned DefOpc = DefMI.getOpcode();
|
||||
|
@ -1383,7 +1383,7 @@ bool PPCInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
|
|||
if (UseInfo->Constraints != 0)
|
||||
return false;
|
||||
|
||||
unsigned ZeroReg;
|
||||
MCRegister ZeroReg;
|
||||
if (UseInfo->isLookupPtrRegClass()) {
|
||||
bool isPPC64 = Subtarget.isPPC64();
|
||||
ZeroReg = isPPC64 ? PPC::ZERO8 : PPC::ZERO;
|
||||
|
@ -1599,8 +1599,8 @@ bool PPCInstrInfo::DefinesPredicate(MachineInstr &MI,
|
|||
return Found;
|
||||
}
|
||||
|
||||
bool PPCInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &Mask,
|
||||
bool PPCInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &SrcReg2, int &Mask,
|
||||
int &Value) const {
|
||||
unsigned Opc = MI.getOpcode();
|
||||
|
||||
|
@ -1629,8 +1629,8 @@ bool PPCInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
|||
}
|
||||
}
|
||||
|
||||
bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
|
||||
unsigned SrcReg2, int Mask, int Value,
|
||||
bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
|
||||
Register SrcReg2, int Mask, int Value,
|
||||
const MachineRegisterInfo *MRI) const {
|
||||
if (DisableCmpOpt)
|
||||
return false;
|
||||
|
@ -1658,8 +1658,8 @@ bool PPCInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
|
|||
bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD;
|
||||
|
||||
// Look through copies unless that gets us to a physical register.
|
||||
unsigned ActualSrc = TRI->lookThruCopyLike(SrcReg, MRI);
|
||||
if (Register::isVirtualRegister(ActualSrc))
|
||||
Register ActualSrc = TRI->lookThruCopyLike(SrcReg, MRI);
|
||||
if (ActualSrc.isVirtual())
|
||||
SrcReg = ActualSrc;
|
||||
|
||||
// Get the unique definition of SrcReg.
|
||||
|
|
|
@ -249,7 +249,7 @@ public:
|
|||
bool isAssociativeAndCommutative(const MachineInstr &Inst) const override;
|
||||
|
||||
bool isCoalescableExtInstr(const MachineInstr &MI,
|
||||
unsigned &SrcReg, unsigned &DstReg,
|
||||
Register &SrcReg, Register &DstReg,
|
||||
unsigned &SubIdx) const override;
|
||||
unsigned isLoadFromStackSlot(const MachineInstr &MI,
|
||||
int &FrameIndex) const override;
|
||||
|
@ -279,12 +279,12 @@ public:
|
|||
|
||||
// Select analysis.
|
||||
bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
|
||||
unsigned, unsigned, unsigned, int &, int &,
|
||||
Register, Register, Register, int &, int &,
|
||||
int &) const override;
|
||||
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
|
||||
const DebugLoc &DL, unsigned DstReg,
|
||||
ArrayRef<MachineOperand> Cond, unsigned TrueReg,
|
||||
unsigned FalseReg) const override;
|
||||
const DebugLoc &DL, Register DstReg,
|
||||
ArrayRef<MachineOperand> Cond, Register TrueReg,
|
||||
Register FalseReg) const override;
|
||||
|
||||
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
||||
const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
|
||||
|
@ -329,7 +329,7 @@ public:
|
|||
bool
|
||||
reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
|
||||
|
||||
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, unsigned Reg,
|
||||
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg,
|
||||
MachineRegisterInfo *MRI) const override;
|
||||
|
||||
// If conversion by predication (only supported by some branch instructions).
|
||||
|
@ -373,11 +373,11 @@ public:
|
|||
|
||||
// Comparison optimization.
|
||||
|
||||
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &Mask, int &Value) const override;
|
||||
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &SrcReg2, int &Mask, int &Value) const override;
|
||||
|
||||
bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
|
||||
unsigned SrcReg2, int Mask, int Value,
|
||||
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
|
||||
Register SrcReg2, int Mask, int Value,
|
||||
const MachineRegisterInfo *MRI) const override;
|
||||
|
||||
|
||||
|
|
|
@ -513,8 +513,8 @@ unsigned SystemZInstrInfo::insertBranch(MachineBasicBlock &MBB,
|
|||
return Count;
|
||||
}
|
||||
|
||||
bool SystemZInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &Mask,
|
||||
bool SystemZInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &SrcReg2, int &Mask,
|
||||
int &Value) const {
|
||||
assert(MI.isCompare() && "Caller should have checked for a comparison");
|
||||
|
||||
|
@ -532,8 +532,8 @@ bool SystemZInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
|||
|
||||
bool SystemZInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
|
||||
ArrayRef<MachineOperand> Pred,
|
||||
unsigned DstReg, unsigned TrueReg,
|
||||
unsigned FalseReg, int &CondCycles,
|
||||
Register DstReg, Register TrueReg,
|
||||
Register FalseReg, int &CondCycles,
|
||||
int &TrueCycles,
|
||||
int &FalseCycles) const {
|
||||
// Not all subtargets have LOCR instructions.
|
||||
|
@ -566,10 +566,10 @@ bool SystemZInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
|
|||
|
||||
void SystemZInstrInfo::insertSelect(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
const DebugLoc &DL, unsigned DstReg,
|
||||
const DebugLoc &DL, Register DstReg,
|
||||
ArrayRef<MachineOperand> Pred,
|
||||
unsigned TrueReg,
|
||||
unsigned FalseReg) const {
|
||||
Register TrueReg,
|
||||
Register FalseReg) const {
|
||||
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
|
||||
const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
|
||||
|
||||
|
@ -607,7 +607,7 @@ void SystemZInstrInfo::insertSelect(MachineBasicBlock &MBB,
|
|||
}
|
||||
|
||||
bool SystemZInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
|
||||
unsigned Reg,
|
||||
Register Reg,
|
||||
MachineRegisterInfo *MRI) const {
|
||||
unsigned DefOpc = DefMI.getOpcode();
|
||||
if (DefOpc != SystemZ::LHIMux && DefOpc != SystemZ::LHI &&
|
||||
|
|
|
@ -223,16 +223,16 @@ public:
|
|||
MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
|
||||
const DebugLoc &DL,
|
||||
int *BytesAdded = nullptr) const override;
|
||||
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &Mask, int &Value) const override;
|
||||
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &SrcReg2, int &Mask, int &Value) const override;
|
||||
bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
|
||||
unsigned, unsigned, unsigned, int &, int &,
|
||||
Register, Register, Register, int &, int &,
|
||||
int &) const override;
|
||||
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
|
||||
const DebugLoc &DL, unsigned DstReg,
|
||||
ArrayRef<MachineOperand> Cond, unsigned TrueReg,
|
||||
unsigned FalseReg) const override;
|
||||
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, unsigned Reg,
|
||||
const DebugLoc &DL, Register DstReg,
|
||||
ArrayRef<MachineOperand> Cond, Register TrueReg,
|
||||
Register FalseReg) const override;
|
||||
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg,
|
||||
MachineRegisterInfo *MRI) const override;
|
||||
bool isPredicable(const MachineInstr &MI) const override;
|
||||
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
|
||||
|
|
|
@ -88,7 +88,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
|
|||
|
||||
bool
|
||||
X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
|
||||
unsigned &SrcReg, unsigned &DstReg,
|
||||
Register &SrcReg, Register &DstReg,
|
||||
unsigned &SubIdx) const {
|
||||
switch (MI.getOpcode()) {
|
||||
default: break;
|
||||
|
@ -1123,7 +1123,7 @@ bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
|
|||
|
||||
void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
unsigned DestReg, unsigned SubIdx,
|
||||
Register DestReg, unsigned SubIdx,
|
||||
const MachineInstr &Orig,
|
||||
const TargetRegisterInfo &TRI) const {
|
||||
bool ClobbersEFLAGS = Orig.modifiesRegister(X86::EFLAGS, &TRI);
|
||||
|
@ -3312,8 +3312,8 @@ unsigned X86InstrInfo::insertBranch(MachineBasicBlock &MBB,
|
|||
|
||||
bool X86InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
|
||||
ArrayRef<MachineOperand> Cond,
|
||||
unsigned DstReg, unsigned TrueReg,
|
||||
unsigned FalseReg, int &CondCycles,
|
||||
Register DstReg, Register TrueReg,
|
||||
Register FalseReg, int &CondCycles,
|
||||
int &TrueCycles, int &FalseCycles) const {
|
||||
// Not all subtargets have cmov instructions.
|
||||
if (!Subtarget.hasCMov())
|
||||
|
@ -3349,9 +3349,9 @@ bool X86InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
|
|||
|
||||
void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
const DebugLoc &DL, unsigned DstReg,
|
||||
ArrayRef<MachineOperand> Cond, unsigned TrueReg,
|
||||
unsigned FalseReg) const {
|
||||
const DebugLoc &DL, Register DstReg,
|
||||
ArrayRef<MachineOperand> Cond, Register TrueReg,
|
||||
Register FalseReg) const {
|
||||
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
|
||||
const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
|
||||
const TargetRegisterClass &RC = *MRI.getRegClass(DstReg);
|
||||
|
@ -3757,8 +3757,8 @@ void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
|
|||
addFrameReference(BuildMI(MBB, MI, DebugLoc(), get(Opc), DestReg), FrameIdx);
|
||||
}
|
||||
|
||||
bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &CmpMask,
|
||||
bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &SrcReg2, int &CmpMask,
|
||||
int &CmpValue) const {
|
||||
switch (MI.getOpcode()) {
|
||||
default: break;
|
||||
|
@ -3845,7 +3845,7 @@ bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
|||
/// SrcReg, SrcRegs: register operands for FlagI.
|
||||
/// ImmValue: immediate for FlagI if it takes an immediate.
|
||||
inline static bool isRedundantFlagInstr(const MachineInstr &FlagI,
|
||||
unsigned SrcReg, unsigned SrcReg2,
|
||||
Register SrcReg, Register SrcReg2,
|
||||
int ImmMask, int ImmValue,
|
||||
const MachineInstr &OI) {
|
||||
if (((FlagI.getOpcode() == X86::CMP64rr && OI.getOpcode() == X86::SUB64rr) ||
|
||||
|
@ -4034,8 +4034,8 @@ static X86::CondCode isUseDefConvertible(const MachineInstr &MI) {
|
|||
/// Check if there exists an earlier instruction that
|
||||
/// operates on the same source operands and sets flags in the same way as
|
||||
/// Compare; remove Compare if possible.
|
||||
bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
|
||||
unsigned SrcReg2, int CmpMask,
|
||||
bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
|
||||
Register SrcReg2, int CmpMask,
|
||||
int CmpValue,
|
||||
const MachineRegisterInfo *MRI) const {
|
||||
// Check whether we can replace SUB with CMP.
|
||||
|
|
|
@ -180,8 +180,8 @@ public:
|
|||
/// true, then it's expected the pre-extension value is available as a subreg
|
||||
/// of the result register. This also returns the sub-register index in
|
||||
/// SubIdx.
|
||||
bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &DstReg, unsigned &SubIdx) const override;
|
||||
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &DstReg, unsigned &SubIdx) const override;
|
||||
|
||||
/// Returns true if the instruction has no behavior (specified or otherwise)
|
||||
/// that is based on the value of any of its register operands
|
||||
|
@ -237,7 +237,7 @@ public:
|
|||
bool isReallyTriviallyReMaterializable(const MachineInstr &MI,
|
||||
AAResults *AA) const override;
|
||||
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
|
||||
unsigned DestReg, unsigned SubIdx,
|
||||
Register DestReg, unsigned SubIdx,
|
||||
const MachineInstr &Orig,
|
||||
const TargetRegisterInfo &TRI) const override;
|
||||
|
||||
|
@ -336,12 +336,12 @@ public:
|
|||
const DebugLoc &DL,
|
||||
int *BytesAdded = nullptr) const override;
|
||||
bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
|
||||
unsigned, unsigned, unsigned, int &, int &,
|
||||
Register, Register, Register, int &, int &,
|
||||
int &) const override;
|
||||
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
|
||||
const DebugLoc &DL, unsigned DstReg,
|
||||
ArrayRef<MachineOperand> Cond, unsigned TrueReg,
|
||||
unsigned FalseReg) const override;
|
||||
const DebugLoc &DL, Register DstReg,
|
||||
ArrayRef<MachineOperand> Cond, Register TrueReg,
|
||||
Register FalseReg) const override;
|
||||
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
|
||||
const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
|
||||
bool KillSrc) const override;
|
||||
|
@ -500,15 +500,15 @@ public:
|
|||
/// in SrcReg and SrcReg2 if having two register operands, and the value it
|
||||
/// compares against in CmpValue. Return true if the comparison instruction
|
||||
/// can be analyzed.
|
||||
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
|
||||
unsigned &SrcReg2, int &CmpMask,
|
||||
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg,
|
||||
Register &SrcReg2, int &CmpMask,
|
||||
int &CmpValue) const override;
|
||||
|
||||
/// optimizeCompareInstr - Check if there exists an earlier instruction that
|
||||
/// operates on the same source operands and sets flags in the same way as
|
||||
/// Compare; remove Compare if possible.
|
||||
bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
|
||||
unsigned SrcReg2, int CmpMask, int CmpValue,
|
||||
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
|
||||
Register SrcReg2, int CmpMask, int CmpValue,
|
||||
const MachineRegisterInfo *MRI) const override;
|
||||
|
||||
/// optimizeLoadInstr - Try to remove the load by folding it to a register
|
||||
|
|
Loading…
Reference in New Issue