[X86FixupLEAs] Transform the sequence LEA/SUB to SUB/SUB

This patch transforms the sequence

    lea (reg1, reg2), reg3
    sub reg3, reg4

to two sub instructions

    sub reg1, reg4
    sub reg2, reg4

Similar optimization can also be applied to LEA/ADD sequence.
The modifications to TwoAddressInstructionPass is to ensure the operands of ADD
instruction has expected order (the dest register of LEA should be src register
of ADD).

Differential Revision: https://reviews.llvm.org/D101970
This commit is contained in:
Guozhi Wei 2021-05-18 18:02:36 -07:00
parent 58369fce30
commit 528bc10e95
8 changed files with 308 additions and 76 deletions

View File

@ -459,6 +459,13 @@ public:
unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const;
/// Returns true if the target has a preference on the operands order of
/// the given machine instruction. And specify if \p Commute is required to
/// get the desired operands order.
virtual bool hasCommutePreference(MachineInstr &MI, bool &Commute) const {
return false;
}
/// A pair composed of a register and a sub-register index.
/// Used to give some type checking when modeling Reg:SubReg.
struct RegSubRegPair {

View File

@ -527,6 +527,11 @@ bool TwoAddressInstructionPass::isProfitableToCommute(Register RegA,
if (isRevCopyChain(RegB, RegA, MaxDataFlowEdge))
return false;
// Look for other target specific commute preference.
bool Commute;
if (TII->hasCommutePreference(*MI, Commute))
return Commute;
// Since there are no intervening uses for both registers, then commute
// if the def of RegC is closer. Its live interval is shorter.
return LastDefB && LastDefC && LastDefC > LastDefB;

View File

@ -79,6 +79,27 @@ class FixupLEAPass : public MachineFunctionPass {
MachineBasicBlock &MBB, bool OptIncDec,
bool UseLEAForSP) const;
/// Look for and transform the sequence
/// lea (reg1, reg2), reg3
/// sub reg3, reg4
/// to
/// sub reg1, reg4
/// sub reg2, reg4
/// It can also optimize the sequence lea/add similarly.
bool optLEAALU(MachineBasicBlock::iterator &I, MachineBasicBlock &MBB) const;
/// Step forwards in MBB, looking for an ADD/SUB instruction which uses
/// the dest register of LEA instruction I.
MachineBasicBlock::iterator searchALUInst(MachineBasicBlock::iterator &I,
MachineBasicBlock &MBB) const;
/// Check instructions between LeaI and AluI (exclusively).
/// Set BaseIndexDef to true if base or index register from LeaI is defined.
/// Set AluDestRef to true if the dest register of AluI is used or defined.
void checkRegUsage(MachineBasicBlock::iterator &LeaI,
MachineBasicBlock::iterator &AluI, bool &BaseIndexDef,
bool &AluDestRef) const;
/// Determine if an instruction references a machine register
/// and, if so, whether it reads or writes the register.
RegUsageState usesRegister(MachineOperand &p, MachineBasicBlock::iterator I);
@ -338,6 +359,18 @@ static inline unsigned getADDrrFromLEA(unsigned LEAOpcode) {
}
}
static inline unsigned getSUBrrFromLEA(unsigned LEAOpcode) {
switch (LEAOpcode) {
default:
llvm_unreachable("Unexpected LEA instruction");
case X86::LEA32r:
case X86::LEA64_32r:
return X86::SUB32rr;
case X86::LEA64r:
return X86::SUB64rr;
}
}
static inline unsigned getADDriFromLEA(unsigned LEAOpcode,
const MachineOperand &Offset) {
bool IsInt8 = Offset.isImm() && isInt<8>(Offset.getImm());
@ -364,6 +397,133 @@ static inline unsigned getINCDECFromLEA(unsigned LEAOpcode, bool IsINC) {
}
}
MachineBasicBlock::iterator
FixupLEAPass::searchALUInst(MachineBasicBlock::iterator &I,
MachineBasicBlock &MBB) const {
const int InstrDistanceThreshold = 5;
int InstrDistance = 1;
MachineBasicBlock::iterator CurInst = std::next(I);
unsigned LEAOpcode = I->getOpcode();
unsigned AddOpcode = getADDrrFromLEA(LEAOpcode);
unsigned SubOpcode = getSUBrrFromLEA(LEAOpcode);
Register DestReg = I->getOperand(0).getReg();
while (CurInst != MBB.end()) {
if (CurInst->isCall() || CurInst->isInlineAsm())
break;
if (InstrDistance > InstrDistanceThreshold)
break;
// Check if the lea dest register is used in an add/sub instruction only.
for (unsigned I = 0, E = CurInst->getNumOperands(); I != E; ++I) {
MachineOperand &Opnd = CurInst->getOperand(I);
if (Opnd.isReg() && Opnd.getReg() == DestReg) {
if (Opnd.isDef() || !Opnd.isKill())
return MachineBasicBlock::iterator();
unsigned AluOpcode = CurInst->getOpcode();
if (AluOpcode != AddOpcode && AluOpcode != SubOpcode)
return MachineBasicBlock::iterator();
MachineOperand &Opnd2 = CurInst->getOperand(3 - I);
MachineOperand AluDest = CurInst->getOperand(0);
if (Opnd2.getReg() != AluDest.getReg())
return MachineBasicBlock::iterator();
return CurInst;
}
}
InstrDistance++;
++CurInst;
}
return MachineBasicBlock::iterator();
}
void FixupLEAPass::checkRegUsage(MachineBasicBlock::iterator &LeaI,
MachineBasicBlock::iterator &AluI,
bool &BaseIndexDef, bool &AluDestRef) const {
BaseIndexDef = AluDestRef = false;
Register BaseReg = LeaI->getOperand(1 + X86::AddrBaseReg).getReg();
Register IndexReg = LeaI->getOperand(1 + X86::AddrIndexReg).getReg();
Register AluDestReg = AluI->getOperand(0).getReg();
MachineBasicBlock::iterator CurInst = std::next(LeaI);
while (CurInst != AluI) {
for (unsigned I = 0, E = CurInst->getNumOperands(); I != E; ++I) {
MachineOperand &Opnd = CurInst->getOperand(I);
if (!Opnd.isReg())
continue;
Register Reg = Opnd.getReg();
if (TRI->regsOverlap(Reg, AluDestReg))
AluDestRef = true;
if (Opnd.isDef() &&
(TRI->regsOverlap(Reg, BaseReg) || TRI->regsOverlap(Reg, IndexReg))) {
BaseIndexDef = true;
}
}
++CurInst;
}
}
bool FixupLEAPass::optLEAALU(MachineBasicBlock::iterator &I,
MachineBasicBlock &MBB) const {
// Look for an add/sub instruction which uses the result of lea.
MachineBasicBlock::iterator AluI = searchALUInst(I, MBB);
if (AluI == MachineBasicBlock::iterator())
return false;
// Check if there are any related register usage between lea and alu.
bool BaseIndexDef, AluDestRef;
checkRegUsage(I, AluI, BaseIndexDef, AluDestRef);
MachineBasicBlock::iterator InsertPos = AluI;
if (BaseIndexDef) {
if (AluDestRef)
return false;
MachineBasicBlock::iterator AfterAluI = std::next(AluI);
if (MBB.computeRegisterLiveness(TRI, X86::EFLAGS, AfterAluI) !=
MachineBasicBlock::LQR_Dead)
return false;
InsertPos = I;
}
// Check if there are same registers.
Register AluDestReg = AluI->getOperand(0).getReg();
Register BaseReg = I->getOperand(1 + X86::AddrBaseReg).getReg();
Register IndexReg = I->getOperand(1 + X86::AddrIndexReg).getReg();
if (I->getOpcode() == X86::LEA64_32r) {
BaseReg = TRI->getSubReg(BaseReg, X86::sub_32bit);
IndexReg = TRI->getSubReg(IndexReg, X86::sub_32bit);
}
if (AluDestReg == IndexReg) {
if (BaseReg == IndexReg)
return false;
std::swap(BaseReg, IndexReg);
}
// Now it's safe to change instructions.
MachineInstr *NewMI1, *NewMI2;
unsigned NewOpcode = AluI->getOpcode();
NewMI1 = BuildMI(MBB, InsertPos, AluI->getDebugLoc(), TII->get(NewOpcode),
AluDestReg)
.addReg(AluDestReg)
.addReg(BaseReg);
NewMI2 = BuildMI(MBB, InsertPos, AluI->getDebugLoc(), TII->get(NewOpcode),
AluDestReg)
.addReg(AluDestReg)
.addReg(IndexReg);
MBB.getParent()->substituteDebugValuesForInst(*AluI, *NewMI1, 1);
MBB.getParent()->substituteDebugValuesForInst(*AluI, *NewMI2, 1);
MBB.erase(I);
MBB.erase(AluI);
I = NewMI1;
return true;
}
bool FixupLEAPass::optTwoAddrLEA(MachineBasicBlock::iterator &I,
MachineBasicBlock &MBB, bool OptIncDec,
bool UseLEAForSP) const {
@ -398,6 +558,7 @@ bool FixupLEAPass::optTwoAddrLEA(MachineBasicBlock::iterator &I,
MachineInstr *NewMI = nullptr;
// Case 1.
// Look for lea(%reg1, %reg2), %reg1 or lea(%reg2, %reg1), %reg1
// which can be turned into add %reg2, %reg1
if (BaseReg != 0 && IndexReg != 0 && Disp.getImm() == 0 &&
@ -417,6 +578,7 @@ bool FixupLEAPass::optTwoAddrLEA(MachineBasicBlock::iterator &I,
.addReg(BaseReg).addReg(IndexReg);
}
} else if (DestReg == BaseReg && IndexReg == 0) {
// Case 2.
// This is an LEA with only a base register and a displacement,
// We can use ADDri or INC/DEC.
@ -447,6 +609,12 @@ bool FixupLEAPass::optTwoAddrLEA(MachineBasicBlock::iterator &I,
.addReg(BaseReg).addImm(Disp.getImm());
}
}
} else if (BaseReg != 0 && IndexReg != 0 && Disp.getImm() == 0) {
// Case 3.
// Look for and transform the sequence
// lea (reg1, reg2), reg3
// sub reg3, reg4
return optLEAALU(I, MBB);
} else
return false;

View File

@ -2670,6 +2670,60 @@ bool X86InstrInfo::findCommutedOpIndices(const MachineInstr &MI,
return false;
}
static bool isConvertibleLEA(MachineInstr *MI) {
unsigned Opcode = MI->getOpcode();
if (Opcode != X86::LEA32r && Opcode != X86::LEA64r &&
Opcode != X86::LEA64_32r)
return false;
const MachineOperand &Scale = MI->getOperand(1 + X86::AddrScaleAmt);
const MachineOperand &Disp = MI->getOperand(1 + X86::AddrDisp);
const MachineOperand &Segment = MI->getOperand(1 + X86::AddrSegmentReg);
if (Segment.getReg() != 0 || !Disp.isImm() || Disp.getImm() != 0 ||
Scale.getImm() > 1)
return false;
return true;
}
bool X86InstrInfo::hasCommutePreference(MachineInstr &MI, bool &Commute) const {
// Currently we're interested in following sequence only.
// r3 = lea r1, r2
// r5 = add r3, r4
// Both r3 and r4 are killed in add, we hope the add instruction has the
// operand order
// r5 = add r4, r3
// So later in X86FixupLEAs the lea instruction can be rewritten as add.
unsigned Opcode = MI.getOpcode();
if (Opcode != X86::ADD32rr && Opcode != X86::ADD64rr)
return false;
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
Register Reg1 = MI.getOperand(1).getReg();
Register Reg2 = MI.getOperand(2).getReg();
// Check if Reg1 comes from LEA in the same MBB.
if (MachineOperand *Op = MRI.getOneDef(Reg1)) {
MachineInstr *Inst = Op->getParent();
if (isConvertibleLEA(Inst) && Inst->getParent() == MI.getParent()) {
Commute = true;
return true;
}
}
// Check if Reg2 comes from LEA in the same MBB.
if (MachineOperand *Op = MRI.getOneDef(Reg2)) {
MachineInstr *Inst = Op->getParent();
if (isConvertibleLEA(Inst) && Inst->getParent() == MI.getParent()) {
Commute = false;
return true;
}
}
return false;
}
X86::CondCode X86::getCondFromBranch(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default: return X86::COND_INVALID;

View File

@ -284,6 +284,10 @@ public:
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const override;
/// Returns true if we have preference on the operands order in MI, the
/// commute decision is returned in Commute.
bool hasCommutePreference(MachineInstr &MI, bool &Commute) const override;
/// Returns an adjusted FMA opcode that must be used in FMA instruction that
/// performs the same computations as the given \p MI but which has the
/// operands \p SrcOpIdx1 and \p SrcOpIdx2 commuted.

View File

@ -29,9 +29,9 @@ define fastcc i64 @foo() nounwind {
; CHECK-NEXT: addq %r15, %rbx
; CHECK-NEXT: addq %rdx, %rbx
; CHECK-NEXT: addq %rsi, %rbx
; CHECK-NEXT: leaq (%r9,%r10), %rsi
; CHECK-NEXT: leaq (%rsi,%r8), %rdx
; CHECK-NEXT: addq %rsi, %rdx
; CHECK-NEXT: leaq (%r9,%r10), %rdx
; CHECK-NEXT: addq %rdx, %rdx
; CHECK-NEXT: addq %r8, %rdx
; CHECK-NEXT: movq {{.*}}(%rip), %rdi
; CHECK-NEXT: addq %rbx, %r12
; CHECK-NEXT: addq %r8, %rdx
@ -41,9 +41,9 @@ define fastcc i64 @foo() nounwind {
; CHECK-NEXT: addq %r12, %rsi
; CHECK-NEXT: addq %r11, %rdi
; CHECK-NEXT: addq %rsi, %rdi
; CHECK-NEXT: leaq (%r10,%r8), %rbx
; CHECK-NEXT: leaq (%rdx,%rbx), %rsi
; CHECK-NEXT: addq %rbx, %rsi
; CHECK-NEXT: leaq (%r10,%r8), %rsi
; CHECK-NEXT: addq %rsi, %rsi
; CHECK-NEXT: addq %rdx, %rsi
; CHECK-NEXT: movq {{.*}}(%rip), %rbx
; CHECK-NEXT: addq %r12, %rdi
; CHECK-NEXT: addq %rdi, %r9
@ -54,9 +54,9 @@ define fastcc i64 @foo() nounwind {
; CHECK-NEXT: addq %r9, %rdi
; CHECK-NEXT: addq %r14, %rbx
; CHECK-NEXT: addq %rdi, %rbx
; CHECK-NEXT: leaq (%rdx,%r8), %rax
; CHECK-NEXT: leaq (%rsi,%rax), %rdi
; CHECK-NEXT: addq %rax, %rdi
; CHECK-NEXT: leaq (%rdx,%r8), %rdi
; CHECK-NEXT: addq %rdi, %rdi
; CHECK-NEXT: addq %rsi, %rdi
; CHECK-NEXT: movq {{.*}}(%rip), %rcx
; CHECK-NEXT: addq %r9, %rbx
; CHECK-NEXT: addq %rbx, %r10
@ -67,9 +67,9 @@ define fastcc i64 @foo() nounwind {
; CHECK-NEXT: addq %r10, %rax
; CHECK-NEXT: addq %r15, %rcx
; CHECK-NEXT: addq %rax, %rcx
; CHECK-NEXT: leaq (%rsi,%rdx), %rbx
; CHECK-NEXT: leaq (%rdi,%rbx), %r11
; CHECK-NEXT: addq %rbx, %r11
; CHECK-NEXT: leaq (%rsi,%rdx), %r11
; CHECK-NEXT: addq %r11, %r11
; CHECK-NEXT: addq %rdi, %r11
; CHECK-NEXT: movq {{.*}}(%rip), %rbx
; CHECK-NEXT: addq %r10, %rcx
; CHECK-NEXT: addq %rcx, %r8
@ -80,9 +80,9 @@ define fastcc i64 @foo() nounwind {
; CHECK-NEXT: addq %r8, %rcx
; CHECK-NEXT: addq %r12, %rbx
; CHECK-NEXT: addq %rcx, %rbx
; CHECK-NEXT: leaq (%rdi,%rsi), %rax
; CHECK-NEXT: leaq (%r11,%rax), %r14
; CHECK-NEXT: addq %rax, %r14
; CHECK-NEXT: leaq (%rdi,%rsi), %r14
; CHECK-NEXT: addq %r14, %r14
; CHECK-NEXT: addq %r11, %r14
; CHECK-NEXT: movq {{.*}}(%rip), %rax
; CHECK-NEXT: addq %r8, %rbx
; CHECK-NEXT: addq %rbx, %rdx
@ -93,9 +93,9 @@ define fastcc i64 @foo() nounwind {
; CHECK-NEXT: addq %rdx, %rbx
; CHECK-NEXT: addq %r9, %rax
; CHECK-NEXT: addq %rbx, %rax
; CHECK-NEXT: leaq (%r11,%rdi), %rbx
; CHECK-NEXT: leaq (%r14,%rbx), %r9
; CHECK-NEXT: addq %rbx, %r9
; CHECK-NEXT: leaq (%r11,%rdi), %r9
; CHECK-NEXT: addq %r9, %r9
; CHECK-NEXT: addq %r14, %r9
; CHECK-NEXT: movq {{.*}}(%rip), %rbx
; CHECK-NEXT: addq %rdx, %rax
; CHECK-NEXT: addq %rax, %rsi
@ -106,9 +106,9 @@ define fastcc i64 @foo() nounwind {
; CHECK-NEXT: addq %rsi, %rax
; CHECK-NEXT: addq %r10, %rbx
; CHECK-NEXT: addq %rax, %rbx
; CHECK-NEXT: leaq (%r14,%r11), %rax
; CHECK-NEXT: leaq (%r9,%rax), %r10
; CHECK-NEXT: addq %rax, %r10
; CHECK-NEXT: leaq (%r14,%r11), %r10
; CHECK-NEXT: addq %r10, %r10
; CHECK-NEXT: addq %r9, %r10
; CHECK-NEXT: movq {{.*}}(%rip), %rax
; CHECK-NEXT: addq %rsi, %rbx
; CHECK-NEXT: addq %rbx, %rdi
@ -119,9 +119,9 @@ define fastcc i64 @foo() nounwind {
; CHECK-NEXT: addq %rdi, %rbx
; CHECK-NEXT: addq %r8, %rax
; CHECK-NEXT: addq %rbx, %rax
; CHECK-NEXT: leaq (%r9,%r14), %rbx
; CHECK-NEXT: leaq (%r10,%rbx), %r8
; CHECK-NEXT: addq %rbx, %r8
; CHECK-NEXT: leaq (%r9,%r14), %r8
; CHECK-NEXT: addq %r8, %r8
; CHECK-NEXT: addq %r10, %r8
; CHECK-NEXT: movq {{.*}}(%rip), %rbx
; CHECK-NEXT: addq %rdi, %rax
; CHECK-NEXT: addq %rax, %r11
@ -132,9 +132,9 @@ define fastcc i64 @foo() nounwind {
; CHECK-NEXT: addq %r11, %rax
; CHECK-NEXT: addq %rdx, %rbx
; CHECK-NEXT: addq %rax, %rbx
; CHECK-NEXT: leaq (%r10,%r9), %rax
; CHECK-NEXT: leaq (%r8,%rax), %r15
; CHECK-NEXT: addq %rax, %r15
; CHECK-NEXT: leaq (%r10,%r9), %r15
; CHECK-NEXT: addq %r15, %r15
; CHECK-NEXT: addq %r8, %r15
; CHECK-NEXT: movq {{.*}}(%rip), %rax
; CHECK-NEXT: addq %r11, %rbx
; CHECK-NEXT: addq %rbx, %r14
@ -145,9 +145,9 @@ define fastcc i64 @foo() nounwind {
; CHECK-NEXT: addq %r14, %rbx
; CHECK-NEXT: addq %rsi, %rax
; CHECK-NEXT: addq %rbx, %rax
; CHECK-NEXT: leaq (%r8,%r10), %rbx
; CHECK-NEXT: leaq (%r15,%rbx), %rsi
; CHECK-NEXT: addq %rbx, %rsi
; CHECK-NEXT: leaq (%r8,%r10), %rsi
; CHECK-NEXT: addq %rsi, %rsi
; CHECK-NEXT: addq %r15, %rsi
; CHECK-NEXT: movq {{.*}}(%rip), %rbx
; CHECK-NEXT: addq %r14, %rax
; CHECK-NEXT: addq %rax, %r9
@ -158,9 +158,9 @@ define fastcc i64 @foo() nounwind {
; CHECK-NEXT: addq %r9, %rax
; CHECK-NEXT: addq %rdi, %rbx
; CHECK-NEXT: addq %rax, %rbx
; CHECK-NEXT: leaq (%r15,%r8), %rax
; CHECK-NEXT: leaq (%rsi,%rax), %r12
; CHECK-NEXT: addq %rax, %r12
; CHECK-NEXT: leaq (%r15,%r8), %r12
; CHECK-NEXT: addq %r12, %r12
; CHECK-NEXT: addq %rsi, %r12
; CHECK-NEXT: movq {{.*}}(%rip), %rcx
; CHECK-NEXT: addq %r9, %rbx
; CHECK-NEXT: addq %rbx, %r10
@ -171,9 +171,9 @@ define fastcc i64 @foo() nounwind {
; CHECK-NEXT: addq %r10, %rax
; CHECK-NEXT: addq %r11, %rcx
; CHECK-NEXT: addq %rax, %rcx
; CHECK-NEXT: leaq (%rsi,%r15), %rbx
; CHECK-NEXT: leaq (%r12,%rbx), %rax
; CHECK-NEXT: addq %rbx, %rax
; CHECK-NEXT: leaq (%rsi,%r15), %rax
; CHECK-NEXT: addq %rax, %rax
; CHECK-NEXT: addq %r12, %rax
; CHECK-NEXT: movq {{.*}}(%rip), %rbx
; CHECK-NEXT: addq %r10, %rcx
; CHECK-NEXT: addq %rcx, %r8
@ -184,9 +184,9 @@ define fastcc i64 @foo() nounwind {
; CHECK-NEXT: addq %r8, %rcx
; CHECK-NEXT: addq %r14, %rbx
; CHECK-NEXT: addq %rcx, %rbx
; CHECK-NEXT: leaq (%r12,%rsi), %rdx
; CHECK-NEXT: leaq (%rax,%rdx), %rcx
; CHECK-NEXT: addq %rdx, %rcx
; CHECK-NEXT: leaq (%r12,%rsi), %rcx
; CHECK-NEXT: addq %rcx, %rcx
; CHECK-NEXT: addq %rax, %rcx
; CHECK-NEXT: movq {{.*}}(%rip), %rdx
; CHECK-NEXT: addq %r8, %rbx
; CHECK-NEXT: addq %rbx, %r15
@ -197,9 +197,9 @@ define fastcc i64 @foo() nounwind {
; CHECK-NEXT: addq %r15, %rbx
; CHECK-NEXT: addq %r9, %rdx
; CHECK-NEXT: addq %rbx, %rdx
; CHECK-NEXT: leaq (%rax,%r12), %r9
; CHECK-NEXT: leaq (%rcx,%r9), %rbx
; CHECK-NEXT: addq %r9, %rbx
; CHECK-NEXT: leaq (%rax,%r12), %rbx
; CHECK-NEXT: addq %rbx, %rbx
; CHECK-NEXT: addq %rcx, %rbx
; CHECK-NEXT: addq %r15, %rdx
; CHECK-NEXT: addq %rdx, %rsi
; CHECK-NEXT: addq %rcx, %rbx
@ -211,12 +211,12 @@ define fastcc i64 @foo() nounwind {
; CHECK-NEXT: addq %rsi, %rdi
; CHECK-NEXT: addq %rdi, %rdx
; CHECK-NEXT: addq %rax, %rcx
; CHECK-NEXT: leaq (%rbx,%rcx), %rdi
; CHECK-NEXT: addq %rcx, %rdi
; CHECK-NEXT: addq %rbx, %rdi
; CHECK-NEXT: addq %rcx, %rcx
; CHECK-NEXT: addq %rbx, %rcx
; CHECK-NEXT: addq %rbx, %rcx
; CHECK-NEXT: addq %rsi, %rdx
; CHECK-NEXT: addq %rdx, %r12
; CHECK-NEXT: addq %rdx, %rdi
; CHECK-NEXT: addq %rdx, %rcx
; CHECK-NEXT: addq %r15, %rsi
; CHECK-NEXT: movq {{.*}}(%rip), %rax
; CHECK-NEXT: bswapq %rax
@ -225,7 +225,7 @@ define fastcc i64 @foo() nounwind {
; CHECK-NEXT: addq %r12, %rsi
; CHECK-NEXT: addq %rsi, %rax
; CHECK-NEXT: addq %r12, %rax
; CHECK-NEXT: addq %rdi, %rax
; CHECK-NEXT: addq %rcx, %rax
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: popq %r12
; CHECK-NEXT: popq %r14

View File

@ -11,15 +11,14 @@
; subl %edx, %ecx
; subl %eax, %ecx
; TODO: replace lea with sub.
; C - (A + B) --> C - A - B
define i32 @test1(i32* %p, i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: test1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: # kill: def $edx killed $edx def $rdx
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: leal (%rdx,%rax), %esi
; CHECK-NEXT: subl %esi, %ecx
; CHECK-NEXT: subl %edx, %ecx
; CHECK-NEXT: subl %eax, %ecx
; CHECK-NEXT: movl %ecx, (%rdi)
; CHECK-NEXT: subl %edx, %eax
; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
@ -32,16 +31,15 @@ entry:
ret i32 %sub1
}
; TODO: replace lea with add.
; (A + B) + C --> C + A + B
define i32 @test2(i32* %p, i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: test2:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: # kill: def $edx killed $edx def $rdx
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: leal (%rax,%rdx), %esi
; CHECK-NEXT: addl %ecx, %esi
; CHECK-NEXT: movl %esi, (%rdi)
; CHECK-NEXT: addl %eax, %ecx
; CHECK-NEXT: addl %edx, %ecx
; CHECK-NEXT: movl %ecx, (%rdi)
; CHECK-NEXT: subl %edx, %eax
; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-NEXT: retq
@ -53,16 +51,15 @@ entry:
ret i32 %sub1
}
; TODO: replace lea with add.
; C + (A + B) --> C + A + B
define i32 @test3(i32* %p, i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: test3:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: # kill: def $edx killed $edx def $rdx
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: leal (%rax,%rdx), %esi
; CHECK-NEXT: addl %ecx, %esi
; CHECK-NEXT: movl %esi, (%rdi)
; CHECK-NEXT: addl %eax, %ecx
; CHECK-NEXT: addl %edx, %ecx
; CHECK-NEXT: movl %ecx, (%rdi)
; CHECK-NEXT: subl %edx, %eax
; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-NEXT: retq
@ -95,13 +92,12 @@ entry:
ret i32 %sub1
}
; TODO: replace lea with sub.
define i64 @test5(i64* %p, i64 %a, i64 %b, i64 %c) {
; CHECK-LABEL: test5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq (%rdi), %rax
; CHECK-NEXT: leaq (%rdx,%rax), %rsi
; CHECK-NEXT: subq %rsi, %rcx
; CHECK-NEXT: subq %rdx, %rcx
; CHECK-NEXT: subq %rax, %rcx
; CHECK-NEXT: movq %rcx, (%rdi)
; CHECK-NEXT: subq %rdx, %rax
; CHECK-NEXT: retq
@ -114,14 +110,13 @@ entry:
ret i64 %sub1
}
; TODO: replace lea with add.
define i64 @test6(i64* %p, i64 %a, i64 %b, i64 %c) {
; CHECK-LABEL: test6:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq (%rdi), %rax
; CHECK-NEXT: leaq (%rdx,%rax), %rsi
; CHECK-NEXT: addq %rcx, %rsi
; CHECK-NEXT: movq %rsi, (%rdi)
; CHECK-NEXT: addq %rdx, %rcx
; CHECK-NEXT: addq %rax, %rcx
; CHECK-NEXT: movq %rcx, (%rdi)
; CHECK-NEXT: subq %rdx, %rax
; CHECK-NEXT: retq
entry:
@ -133,14 +128,13 @@ entry:
ret i64 %sub1
}
; TODO: replace lea with add.
define i64 @test7(i64* %p, i64 %a, i64 %b, i64 %c) {
; CHECK-LABEL: test7:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movq (%rdi), %rax
; CHECK-NEXT: leaq (%rdx,%rax), %rsi
; CHECK-NEXT: addq %rcx, %rsi
; CHECK-NEXT: movq %rsi, (%rdi)
; CHECK-NEXT: addq %rdx, %rcx
; CHECK-NEXT: addq %rax, %rcx
; CHECK-NEXT: movq %rcx, (%rdi)
; CHECK-NEXT: subq %rdx, %rax
; CHECK-NEXT: retq
entry:

View File

@ -53,9 +53,9 @@ define void @test(<16 x i32> %a0, <16 x i32> %b0, <16 x i32> %a1, <16 x i32> %b1
; X86-NEXT: addl %ecx, %edx
; X86-NEXT: kmovw %k1, %ecx
; X86-NEXT: addl %edi, %ecx
; X86-NEXT: addl %eax, %ecx
; X86-NEXT: addl %edx, %ecx
; X86-NEXT: movw %cx, (%esi)
; X86-NEXT: addl %ecx, %eax
; X86-NEXT: addl %edx, %eax
; X86-NEXT: movw %ax, (%esi)
; X86-NEXT: leal -8(%ebp), %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
@ -107,10 +107,10 @@ define void @test(<16 x i32> %a0, <16 x i32> %b0, <16 x i32> %a1, <16 x i32> %b1
; X64-NEXT: kmovw %k1, %ebx
; X64-NEXT: addl %edi, %eax
; X64-NEXT: addl %ecx, %edx
; X64-NEXT: leal (%rbx,%rsi), %ecx
; X64-NEXT: addl %eax, %ecx
; X64-NEXT: addl %edx, %ecx
; X64-NEXT: movw %cx, (%r14)
; X64-NEXT: addl %ebx, %eax
; X64-NEXT: addl %esi, %eax
; X64-NEXT: addl %edx, %eax
; X64-NEXT: movw %ax, (%r14)
; X64-NEXT: leaq -16(%rbp), %rsp
; X64-NEXT: popq %rbx
; X64-NEXT: popq %r14