diff --git a/llvm/include/llvm/Target/TargetInstrInfo.h b/llvm/include/llvm/Target/TargetInstrInfo.h index c57a2d4c236f..6172fcfa64c2 100644 --- a/llvm/include/llvm/Target/TargetInstrInfo.h +++ b/llvm/include/llvm/Target/TargetInstrInfo.h @@ -149,6 +149,19 @@ public: return false; } + /// isCoalescableInstr - Return true if the instruction is "coalescable". That + /// is, it's like a copy where it's legal for the source to overlap the + /// destination. e.g. X86::MOVSX64rr32. + virtual bool isCoalescableInstr(const MachineInstr &MI, bool &isCopy, + unsigned &SrcReg, unsigned &DstReg, + unsigned &SrcSubIdx, unsigned &DstSubIdx) const { + if (isMoveInstr(MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx)) { + isCopy = true; + return true; + } + return false; + } + /// isIdentityCopy - Return true if the instruction is a copy (or /// extract_subreg, insert_subreg, subreg_to_reg) where the source and /// destination registers are the same. diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 9600cffa91fd..52077cfd79d2 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -712,6 +712,59 @@ bool X86InstrInfo::isMoveInstr(const MachineInstr& MI, } } +bool +X86InstrInfo::isCoalescableInstr(const MachineInstr &MI, bool &isCopy, + unsigned &SrcReg, unsigned &DstReg, + unsigned &SrcSubIdx, unsigned &DstSubIdx) const { + switch (MI.getOpcode()) { + default: break; + case X86::MOVSX16rr8: + case X86::MOVZX16rr8: + case X86::MOVSX32rr8: + case X86::MOVZX32rr8: + case X86::MOVSX64rr8: + case X86::MOVZX64rr8: + case X86::MOVSX32rr16: + case X86::MOVZX32rr16: + case X86::MOVSX64rr16: + case X86::MOVZX64rr16: + case X86::MOVSX64rr32: + case X86::MOVZX64rr32: { + if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg()) + // Be conservative. + return false; + isCopy = false; + SrcReg = MI.getOperand(1).getReg(); + DstReg = MI.getOperand(0).getReg(); + DstSubIdx = 0; + switch (MI.getOpcode()) { + default: + llvm_unreachable(0); + break; + case X86::MOVSX16rr8: + case X86::MOVZX16rr8: + case X86::MOVSX32rr8: + case X86::MOVZX32rr8: + case X86::MOVSX64rr8: + case X86::MOVZX64rr8: + SrcSubIdx = 1; + break; + case X86::MOVSX32rr16: + case X86::MOVZX32rr16: + case X86::MOVSX64rr16: + case X86::MOVZX64rr16: + SrcSubIdx = 3; + break; + case X86::MOVSX64rr32: + case X86::MOVZX64rr32: + SrcSubIdx = 4; + break; + } + } + } + return isMoveInstr(MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx); +} + /// isFrameOperand - Return true and the FrameIndex if the specified /// operand and follow operands form a reference to the stack frame. bool X86InstrInfo::isFrameOperand(const MachineInstr *MI, unsigned int Op, diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h index b83441d89eff..6ae7808e2dd8 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -448,6 +448,14 @@ public: unsigned &SrcReg, unsigned &DstReg, unsigned &SrcSubIdx, unsigned &DstSubIdx) const; + /// isCoalescableInstr - Return true if the instruction is "coalescable". That + /// is, it's like a copy where it's legal for the source to overlap the + /// destination. e.g. X86::MOVSX64rr32. + virtual bool isCoalescableInstr(const MachineInstr &MI, bool &isCopy, + unsigned &SrcReg, unsigned &DstReg, + unsigned &SrcSubIdx, unsigned &DstSubIdx) const; + + unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const; /// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination /// stack locations as well. This uses a heuristic so it isn't