diff --git a/llvm/lib/Target/X86/X86FixupLEAs.cpp b/llvm/lib/Target/X86/X86FixupLEAs.cpp index c8ef92f35c22..91ab4f8ab24a 100644 --- a/llvm/lib/Target/X86/X86FixupLEAs.cpp +++ b/llvm/lib/Target/X86/X86FixupLEAs.cpp @@ -8,7 +8,7 @@ // // This file defines the pass that finds instructions that can be // re-written as LEA instructions in order to reduce pipeline delays. -// When optimizing for size it replaces suitable LEAs with INC or DEC. +// It replaces LEAs with ADD/INC/DEC when that is better for size/speed. // //===----------------------------------------------------------------------===// @@ -70,10 +70,11 @@ class FixupLEAPass : public MachineFunctionPass { MachineInstr *processInstrForSlow3OpLEA(MachineInstr &MI, MachineBasicBlock &MBB); - /// Look for LEAs that add 1 to reg or subtract 1 from reg - /// and convert them to INC or DEC respectively. - bool fixupIncDec(MachineBasicBlock::iterator &I, - MachineBasicBlock &MBB) const; + /// Look for LEAs that are really two address LEAs that we might be able to + /// turn into regular ADD instructions. + bool optTwoAddrLEA(MachineBasicBlock::iterator &I, + MachineBasicBlock &MBB, bool OptIncDec, + bool UseLEAForSP) const; /// Determine if an instruction references a machine register /// and, if so, whether it reads or writes the register. @@ -114,7 +115,8 @@ public: private: TargetSchedModel TSM; - const X86InstrInfo *TII; // Machine instruction info. + const X86InstrInfo *TII; + const X86RegisterInfo *TRI; }; } @@ -197,13 +199,11 @@ bool FixupLEAPass::runOnMachineFunction(MachineFunction &MF) { bool LEAUsesAG = ST.LEAusesAG(); bool OptIncDec = !ST.slowIncDec() || MF.getFunction().hasOptSize(); - bool OptLEA = LEAUsesAG || IsSlowLEA || IsSlow3OpsLEA; - - if (!OptLEA && !OptIncDec) - return false; + bool UseLEAForSP = ST.useLeaForSP(); TSM.init(&ST); TII = ST.getInstrInfo(); + TRI = ST.getRegisterInfo(); LLVM_DEBUG(dbgs() << "Start X86FixupLEAs\n";); for (MachineBasicBlock &MBB : MF) { @@ -212,7 +212,7 @@ bool FixupLEAPass::runOnMachineFunction(MachineFunction &MF) { if (!isLEA(I->getOpcode())) continue; - if (OptIncDec && fixupIncDec(I, MBB)) + if (optTwoAddrLEA(I, MBB, OptIncDec, UseLEAForSP)) continue; if (IsSlowLEA) { @@ -323,8 +323,8 @@ static inline unsigned getADDrrFromLEA(unsigned LEAOpcode) { default: llvm_unreachable("Unexpected LEA instruction"); case X86::LEA32r: - return X86::ADD32rr; case X86::LEA64_32r: + return X86::ADD32rr; case X86::LEA64r: return X86::ADD64rr; } @@ -344,48 +344,106 @@ static inline unsigned getADDriFromLEA(unsigned LEAOpcode, } } -/// isLEASimpleIncOrDec - Does this LEA have one these forms: -/// lea %reg, 1(%reg) -/// lea %reg, -1(%reg) -static inline bool isLEASimpleIncOrDec(MachineInstr &LEA) { - unsigned SrcReg = LEA.getOperand(1 + X86::AddrBaseReg).getReg(); - unsigned DstReg = LEA.getOperand(0).getReg(); - const MachineOperand &AddrDisp = LEA.getOperand(1 + X86::AddrDisp); - return SrcReg == DstReg && - LEA.getOperand(1 + X86::AddrIndexReg).getReg() == 0 && - LEA.getOperand(1 + X86::AddrSegmentReg).getReg() == 0 && - AddrDisp.isImm() && - (AddrDisp.getImm() == 1 || AddrDisp.getImm() == -1); +static inline unsigned getINCDECFromLEA(unsigned LEAOpcode, bool IsINC) { + switch (LEAOpcode) { + default: + llvm_unreachable("Unexpected LEA instruction"); + case X86::LEA32r: + case X86::LEA64_32r: + return IsINC ? X86::INC32r : X86::DEC32r; + case X86::LEA64r: + return IsINC ? X86::INC64r : X86::DEC64r; + } } -bool FixupLEAPass::fixupIncDec(MachineBasicBlock::iterator &I, - MachineBasicBlock &MBB) const { +bool FixupLEAPass::optTwoAddrLEA(MachineBasicBlock::iterator &I, + MachineBasicBlock &MBB, bool OptIncDec, + bool UseLEAForSP) const { MachineInstr &MI = *I; - if (isLEASimpleIncOrDec(MI) && TII->isSafeToClobberEFLAGS(MBB, I)) { - unsigned NewOpcode; - bool isINC = MI.getOperand(1 + X86::AddrDisp).getImm() == 1; - switch (MI.getOpcode()) { - default: - llvm_unreachable("Unexpected LEA instruction"); - case X86::LEA32r: - case X86::LEA64_32r: - NewOpcode = isINC ? X86::INC32r : X86::DEC32r; - break; - case X86::LEA64r: - NewOpcode = isINC ? X86::INC64r : X86::DEC64r; - break; - } + const MachineOperand &Base = MI.getOperand(1 + X86::AddrBaseReg); + const MachineOperand &Scale = MI.getOperand(1 + X86::AddrScaleAmt); + const MachineOperand &Index = MI.getOperand(1 + X86::AddrIndexReg); + const MachineOperand &Disp = MI.getOperand(1 + X86::AddrDisp); + const MachineOperand &Segment = MI.getOperand(1 + X86::AddrSegmentReg); - MachineInstr *NewMI = - BuildMI(MBB, I, MI.getDebugLoc(), TII->get(NewOpcode)) - .add(MI.getOperand(0)) - .add(MI.getOperand(1 + X86::AddrBaseReg)); - MBB.erase(I); - I = static_cast(NewMI); - return true; + if (Segment.getReg() != 0 || !Disp.isImm() || Scale.getImm() > 1 || + !TII->isSafeToClobberEFLAGS(MBB, I)) + return false; + + unsigned DestReg = MI.getOperand(0).getReg(); + unsigned BaseReg = Base.getReg(); + unsigned IndexReg = Index.getReg(); + + // Don't change stack adjustment LEAs. + if (UseLEAForSP && (DestReg == X86::ESP || DestReg == X86::RSP)) + return false; + + // LEA64_32 has 64-bit operands but 32-bit result. + if (MI.getOpcode() == X86::LEA64_32r) { + if (BaseReg != 0) + BaseReg = TRI->getSubReg(BaseReg, X86::sub_32bit); + if (IndexReg != 0) + IndexReg = TRI->getSubReg(IndexReg, X86::sub_32bit); } - return false; + + MachineInstr *NewMI = nullptr; + + // Look for lea(%reg1, %reg2), %reg1 or lea(%reg2, %reg1), %reg1 + // which can be turned into add %reg2, %reg1 + if (BaseReg != 0 && IndexReg != 0 && Disp.getImm() == 0 && + (DestReg == BaseReg || DestReg == IndexReg)) { + unsigned NewOpcode = getADDrrFromLEA(MI.getOpcode()); + if (DestReg != BaseReg) + std::swap(BaseReg, IndexReg); + + if (MI.getOpcode() == X86::LEA64_32r) { + // TODO: Do we need the super register implicit use? + NewMI = BuildMI(MBB, I, MI.getDebugLoc(), TII->get(NewOpcode), DestReg) + .addReg(BaseReg).addReg(IndexReg) + .addReg(Base.getReg(), RegState::Implicit) + .addReg(Index.getReg(), RegState::Implicit); + } else { + NewMI = BuildMI(MBB, I, MI.getDebugLoc(), TII->get(NewOpcode), DestReg) + .addReg(BaseReg).addReg(IndexReg); + } + } else if (DestReg == BaseReg && IndexReg == 0) { + // This is an LEA with only a base register and a displacement, + // We can use ADDri or INC/DEC. + + // Does this LEA have one these forms: + // lea %reg, 1(%reg) + // lea %reg, -1(%reg) + if (OptIncDec && (Disp.getImm() == 1 || Disp.getImm() == -1)) { + bool IsINC = Disp.getImm() == 1; + unsigned NewOpcode = getINCDECFromLEA(MI.getOpcode(), IsINC); + + if (MI.getOpcode() == X86::LEA64_32r) { + // TODO: Do we need the super register implicit use? + NewMI = BuildMI(MBB, I, MI.getDebugLoc(), TII->get(NewOpcode), DestReg) + .addReg(BaseReg).addReg(Base.getReg(), RegState::Implicit); + } else { + NewMI = BuildMI(MBB, I, MI.getDebugLoc(), TII->get(NewOpcode), DestReg) + .addReg(BaseReg); + } + } else { + unsigned NewOpcode = getADDriFromLEA(MI.getOpcode(), Disp); + if (MI.getOpcode() == X86::LEA64_32r) { + // TODO: Do we need the super register implicit use? + NewMI = BuildMI(MBB, I, MI.getDebugLoc(), TII->get(NewOpcode), DestReg) + .addReg(BaseReg).addImm(Disp.getImm()) + .addReg(Base.getReg(), RegState::Implicit); + } else { + NewMI = BuildMI(MBB, I, MI.getDebugLoc(), TII->get(NewOpcode), DestReg) + .addReg(BaseReg).addImm(Disp.getImm()); + } + } + } else + return false; + + MBB.erase(I); + I = NewMI; + return true; } void FixupLEAPass::processInstruction(MachineBasicBlock::iterator &I, diff --git a/llvm/test/CodeGen/X86/GlobalISel/add-ext.ll b/llvm/test/CodeGen/X86/GlobalISel/add-ext.ll index da5a6f182ccf..bf081b355ad4 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/add-ext.ll +++ b/llvm/test/CodeGen/X86/GlobalISel/add-ext.ll @@ -79,7 +79,7 @@ define i8* @gep8(i32 %i, i8* %x) { ; CHECK: # %bb.0: ; CHECK-NEXT: addl $5, %edi ; CHECK-NEXT: movslq %edi, %rax -; CHECK-NEXT: leaq (%rsi,%rax), %rax +; CHECK-NEXT: addq %rsi, %rax ; CHECK-NEXT: retq %add = add nsw i32 %i, 5 @@ -166,16 +166,16 @@ define void @PR20134(i32* %a, i32 %i) { ; CHECK-NEXT: cltq ; CHECK-NEXT: movq $4, %rcx ; CHECK-NEXT: imulq %rcx, %rax -; CHECK-NEXT: leaq (%rdi,%rax), %rax +; CHECK-NEXT: addq %rdi, %rax ; CHECK-NEXT: leal 2(%rsi), %edx ; CHECK-NEXT: movslq %edx, %rdx ; CHECK-NEXT: imulq %rcx, %rdx -; CHECK-NEXT: leaq (%rdi,%rdx), %rdx +; CHECK-NEXT: addq %rdi, %rdx ; CHECK-NEXT: movl (%rdx), %edx ; CHECK-NEXT: addl (%rax), %edx ; CHECK-NEXT: movslq %esi, %rax ; CHECK-NEXT: imulq %rcx, %rax -; CHECK-NEXT: leaq (%rdi,%rax), %rax +; CHECK-NEXT: addq %rdi, %rax ; CHECK-NEXT: movl %edx, (%rax) ; CHECK-NEXT: retq @@ -204,10 +204,10 @@ define void @PR20134_zext(i32* %a, i32 %i) { ; CHECK-NEXT: leal 1(%rsi), %eax ; CHECK-NEXT: movq $4, %rcx ; CHECK-NEXT: imulq %rcx, %rax -; CHECK-NEXT: leaq (%rdi,%rax), %rax +; CHECK-NEXT: addq %rdi, %rax ; CHECK-NEXT: leal 2(%rsi), %edx ; CHECK-NEXT: imulq %rcx, %rdx -; CHECK-NEXT: leaq (%rdi,%rdx), %rdx +; CHECK-NEXT: addq %rdi, %rdx ; CHECK-NEXT: movl (%rdx), %edx ; CHECK-NEXT: addl (%rax), %edx ; CHECK-NEXT: imulq %rcx, %rsi diff --git a/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll b/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll index 33e16893473c..b8deacdb19b3 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll +++ b/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll @@ -409,7 +409,7 @@ define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) { ; X32-NEXT: movl 4(%ecx), %ecx ; X32-NEXT: movl %eax, (%esp) ; X32-NEXT: movl $4, %eax -; X32-NEXT: leal (%esp,%eax), %eax +; X32-NEXT: addl %esp, %eax ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) ; X32-NEXT: movl %ecx, 4(%eax) ; X32-NEXT: calll variadic_callee diff --git a/llvm/test/CodeGen/X86/GlobalISel/gep.ll b/llvm/test/CodeGen/X86/GlobalISel/gep.ll index e0e40810af20..20047fd7b081 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/gep.ll +++ b/llvm/test/CodeGen/X86/GlobalISel/gep.ll @@ -12,7 +12,7 @@ define i32* @test_gep_i8(i32 *%arr, i8 %ind) { ; X64_GISEL-NEXT: sarq %cl, %rsi ; X64_GISEL-NEXT: movq $4, %rax ; X64_GISEL-NEXT: imulq %rsi, %rax -; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: addq %rdi, %rax ; X64_GISEL-NEXT: retq ; ; X64-LABEL: test_gep_i8: @@ -29,7 +29,7 @@ define i32* @test_gep_i8_const(i32 *%arr) { ; X64_GISEL-LABEL: test_gep_i8_const: ; X64_GISEL: # %bb.0: ; X64_GISEL-NEXT: movq $80, %rax -; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: addq %rdi, %rax ; X64_GISEL-NEXT: retq ; ; X64-LABEL: test_gep_i8_const: @@ -50,7 +50,7 @@ define i32* @test_gep_i16(i32 *%arr, i16 %ind) { ; X64_GISEL-NEXT: sarq %cl, %rsi ; X64_GISEL-NEXT: movq $4, %rax ; X64_GISEL-NEXT: imulq %rsi, %rax -; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: addq %rdi, %rax ; X64_GISEL-NEXT: retq ; ; X64-LABEL: test_gep_i16: @@ -67,7 +67,7 @@ define i32* @test_gep_i16_const(i32 *%arr) { ; X64_GISEL-LABEL: test_gep_i16_const: ; X64_GISEL: # %bb.0: ; X64_GISEL-NEXT: movq $80, %rax -; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: addq %rdi, %rax ; X64_GISEL-NEXT: retq ; ; X64-LABEL: test_gep_i16_const: @@ -100,7 +100,7 @@ define i32* @test_gep_i32_const(i32 *%arr) { ; X64_GISEL-LABEL: test_gep_i32_const: ; X64_GISEL: # %bb.0: ; X64_GISEL-NEXT: movq $20, %rax -; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: addq %rdi, %rax ; X64_GISEL-NEXT: retq ; ; X64-LABEL: test_gep_i32_const: @@ -116,7 +116,7 @@ define i32* @test_gep_i64(i32 *%arr, i64 %ind) { ; X64_GISEL: # %bb.0: ; X64_GISEL-NEXT: movq $4, %rax ; X64_GISEL-NEXT: imulq %rsi, %rax -; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: addq %rdi, %rax ; X64_GISEL-NEXT: retq ; ; X64-LABEL: test_gep_i64: @@ -131,7 +131,7 @@ define i32* @test_gep_i64_const(i32 *%arr) { ; X64_GISEL-LABEL: test_gep_i64_const: ; X64_GISEL: # %bb.0: ; X64_GISEL-NEXT: movq $20, %rax -; X64_GISEL-NEXT: leaq (%rdi,%rax), %rax +; X64_GISEL-NEXT: addq %rdi, %rax ; X64_GISEL-NEXT: retq ; ; X64-LABEL: test_gep_i64_const: diff --git a/llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll index 089263359ce9..b98d7ca38f9c 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll +++ b/llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll @@ -181,7 +181,7 @@ define i32 @test_gep_folding_largeGepIndex(i32* %arr, i32 %val) { ; ALL-LABEL: test_gep_folding_largeGepIndex: ; ALL: # %bb.0: ; ALL-NEXT: movabsq $228719476720, %rax # imm = 0x3540BE3FF0 -; ALL-NEXT: leaq (%rdi,%rax), %rax +; ALL-NEXT: addq %rdi, %rax ; ALL-NEXT: movl %esi, (%rax) ; ALL-NEXT: movl (%rax), %eax ; ALL-NEXT: retq diff --git a/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll b/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll index edec3fdd7f5a..de74c8055834 100644 --- a/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll +++ b/llvm/test/CodeGen/X86/MergeConsecutiveStores.ll @@ -632,7 +632,7 @@ define void @loadStoreBaseIndexOffsetSextNoSex(i8* %a, i8* %b, i8* %c, i32 %n) { ; BWON-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1 ; BWON-NEXT: movsbq (%rdi,%rcx), %rax ; BWON-NEXT: movzbl (%rdx,%rax), %r9d -; BWON-NEXT: leal 1(%rax), %eax +; BWON-NEXT: incl %eax ; BWON-NEXT: movsbq %al, %rax ; BWON-NEXT: movzbl (%rdx,%rax), %eax ; BWON-NEXT: movb %r9b, (%rsi,%rcx,2) @@ -651,7 +651,7 @@ define void @loadStoreBaseIndexOffsetSextNoSex(i8* %a, i8* %b, i8* %c, i32 %n) { ; BWOFF-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1 ; BWOFF-NEXT: movsbq (%rdi,%rcx), %rax ; BWOFF-NEXT: movb (%rdx,%rax), %r9b -; BWOFF-NEXT: leal 1(%rax), %eax +; BWOFF-NEXT: incl %eax ; BWOFF-NEXT: movsbq %al, %rax ; BWOFF-NEXT: movb (%rdx,%rax), %al ; BWOFF-NEXT: movb %r9b, (%rsi,%rcx,2) diff --git a/llvm/test/CodeGen/X86/atomic-unordered.ll b/llvm/test/CodeGen/X86/atomic-unordered.ll index 316f124c79b5..567e8b47c4ba 100644 --- a/llvm/test/CodeGen/X86/atomic-unordered.ll +++ b/llvm/test/CodeGen/X86/atomic-unordered.ll @@ -772,7 +772,7 @@ define i64 @load_fold_sdiv1(i64* %p) { ; CHECK-O3-NEXT: movq %rdx, %rax ; CHECK-O3-NEXT: shrq $63, %rax ; CHECK-O3-NEXT: sarq $3, %rdx -; CHECK-O3-NEXT: leaq (%rdx,%rax), %rax +; CHECK-O3-NEXT: addq %rdx, %rax ; CHECK-O3-NEXT: retq %v = load atomic i64, i64* %p unordered, align 8 %ret = sdiv i64 %v, 15 diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll index 0159d9196daa..fa82fe6a9c1c 100644 --- a/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll +++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll @@ -9268,7 +9268,7 @@ define i8@test_int_x86_avx512_ptestm_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) ; X64-NEXT: kandw %k1, %k0, %k1 # encoding: [0xc5,0xfc,0x41,0xc9] ; X64-NEXT: kmovw %k1, %eax # encoding: [0xc5,0xf8,0x93,0xc1] ; X64-NEXT: kmovw %k0, %ecx # encoding: [0xc5,0xf8,0x93,0xc8] -; X64-NEXT: leal (%rcx,%rax), %eax # encoding: [0x8d,0x04,0x01] +; X64-NEXT: addl %ecx, %eax # encoding: [0x01,0xc8] ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq # encoding: [0xc3] %res = call i8 @llvm.x86.avx512.ptestm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) @@ -9327,7 +9327,7 @@ define i8@test_int_x86_avx512_ptestm_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) ; X64-NEXT: kandw %k1, %k0, %k1 # encoding: [0xc5,0xfc,0x41,0xc9] ; X64-NEXT: kmovw %k1, %eax # encoding: [0xc5,0xf8,0x93,0xc1] ; X64-NEXT: kmovw %k0, %ecx # encoding: [0xc5,0xf8,0x93,0xc8] -; X64-NEXT: leal (%rcx,%rax), %eax # encoding: [0x8d,0x04,0x01] +; X64-NEXT: addl %ecx, %eax # encoding: [0x01,0xc8] ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq # encoding: [0xc3] %res = call i8 @llvm.x86.avx512.ptestm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) @@ -9359,7 +9359,7 @@ define i8@test_int_x86_avx512_ptestm_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2) ; X64-NEXT: kandw %k1, %k0, %k1 # encoding: [0xc5,0xfc,0x41,0xc9] ; X64-NEXT: kmovw %k1, %eax # encoding: [0xc5,0xf8,0x93,0xc1] ; X64-NEXT: kmovw %k0, %ecx # encoding: [0xc5,0xf8,0x93,0xc8] -; X64-NEXT: leal (%rcx,%rax), %eax # encoding: [0x8d,0x04,0x01] +; X64-NEXT: addl %ecx, %eax # encoding: [0x01,0xc8] ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] ; X64-NEXT: retq # encoding: [0xc3] @@ -9391,7 +9391,7 @@ define i8@test_int_x86_avx512_ptestnm_d_128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2 ; X64-NEXT: kandw %k1, %k0, %k1 # encoding: [0xc5,0xfc,0x41,0xc9] ; X64-NEXT: kmovw %k1, %eax # encoding: [0xc5,0xf8,0x93,0xc1] ; X64-NEXT: kmovw %k0, %ecx # encoding: [0xc5,0xf8,0x93,0xc8] -; X64-NEXT: leal (%rcx,%rax), %eax # encoding: [0x8d,0x04,0x01] +; X64-NEXT: addl %ecx, %eax # encoding: [0x01,0xc8] ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq # encoding: [0xc3] %res = call i8 @llvm.x86.avx512.ptestnm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2) @@ -9450,7 +9450,7 @@ define i8@test_int_x86_avx512_ptestnm_q_128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2 ; X64-NEXT: kandw %k1, %k0, %k1 # encoding: [0xc5,0xfc,0x41,0xc9] ; X64-NEXT: kmovw %k1, %eax # encoding: [0xc5,0xf8,0x93,0xc1] ; X64-NEXT: kmovw %k0, %ecx # encoding: [0xc5,0xf8,0x93,0xc8] -; X64-NEXT: leal (%rcx,%rax), %eax # encoding: [0x8d,0x04,0x01] +; X64-NEXT: addl %ecx, %eax # encoding: [0x01,0xc8] ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq # encoding: [0xc3] %res = call i8 @llvm.x86.avx512.ptestnm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2) @@ -9482,7 +9482,7 @@ define i8@test_int_x86_avx512_ptestnm_q_256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2 ; X64-NEXT: kandw %k1, %k0, %k1 # encoding: [0xc5,0xfc,0x41,0xc9] ; X64-NEXT: kmovw %k1, %eax # encoding: [0xc5,0xf8,0x93,0xc1] ; X64-NEXT: kmovw %k0, %ecx # encoding: [0xc5,0xf8,0x93,0xc8] -; X64-NEXT: leal (%rcx,%rax), %eax # encoding: [0x8d,0x04,0x01] +; X64-NEXT: addl %ecx, %eax # encoding: [0x01,0xc8] ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] ; X64-NEXT: retq # encoding: [0xc3] diff --git a/llvm/test/CodeGen/X86/bitreverse.ll b/llvm/test/CodeGen/X86/bitreverse.ll index 23056f9e802a..5dd2b36bebd0 100644 --- a/llvm/test/CodeGen/X86/bitreverse.ll +++ b/llvm/test/CodeGen/X86/bitreverse.ll @@ -347,7 +347,7 @@ define i8 @test_bitreverse_i8(i8 %a) { ; X64-NEXT: addb %al, %al ; X64-NEXT: andb $-86, %dil ; X64-NEXT: shrb %dil -; X64-NEXT: leal (%rdi,%rax), %eax +; X64-NEXT: addl %edi, %eax ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq %b = call i8 @llvm.bitreverse.i8(i8 %a) @@ -391,7 +391,7 @@ define i4 @test_bitreverse_i4(i4 %a) { ; X64-NEXT: addb %al, %al ; X64-NEXT: andb $-96, %dil ; X64-NEXT: shrb %dil -; X64-NEXT: leal (%rdi,%rax), %eax +; X64-NEXT: addl %edi, %eax ; X64-NEXT: shrb $4, %al ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq diff --git a/llvm/test/CodeGen/X86/bswap_tree2.ll b/llvm/test/CodeGen/X86/bswap_tree2.ll index 3f64d6f94d80..7e06c8a11b81 100644 --- a/llvm/test/CodeGen/X86/bswap_tree2.ll +++ b/llvm/test/CodeGen/X86/bswap_tree2.ll @@ -81,7 +81,7 @@ define i32 @test2(i32 %x) nounwind { ; CHECK64-NEXT: andl $-16777216, %edi # imm = 0xFF000000 ; CHECK64-NEXT: andl $16711680, %eax # imm = 0xFF0000 ; CHECK64-NEXT: orl %edi, %eax -; CHECK64-NEXT: leal (%rax,%rcx), %eax +; CHECK64-NEXT: addl %ecx, %eax ; CHECK64-NEXT: retq %byte1 = lshr i32 %x, 8 %byte0 = shl i32 %x, 8 diff --git a/llvm/test/CodeGen/X86/bypass-slow-division-32.ll b/llvm/test/CodeGen/X86/bypass-slow-division-32.ll index 1533a393cfbf..66aacf19cb8d 100644 --- a/llvm/test/CodeGen/X86/bypass-slow-division-32.ll +++ b/llvm/test/CodeGen/X86/bypass-slow-division-32.ll @@ -143,7 +143,7 @@ define i32 @Test_use_div_reg_imm(i32 %a) nounwind { ; CHECK-NEXT: movl %edx, %eax ; CHECK-NEXT: shrl $31, %eax ; CHECK-NEXT: sarl $3, %edx -; CHECK-NEXT: leal (%edx,%eax), %eax +; CHECK-NEXT: addl %edx, %eax ; CHECK-NEXT: retl %resultdiv = sdiv i32 %a, 33 ret i32 %resultdiv diff --git a/llvm/test/CodeGen/X86/combine-srem.ll b/llvm/test/CodeGen/X86/combine-srem.ll index 4878d708e481..ef338b371091 100644 --- a/llvm/test/CodeGen/X86/combine-srem.ll +++ b/llvm/test/CodeGen/X86/combine-srem.ll @@ -61,7 +61,7 @@ define i32 @combine_srem_by_minsigned(i32 %x) { ; CHECK-NEXT: shrl %eax ; CHECK-NEXT: addl %edi, %eax ; CHECK-NEXT: andl $-2147483648, %eax # imm = 0x80000000 -; CHECK-NEXT: leal (%rax,%rdi), %eax +; CHECK-NEXT: addl %edi, %eax ; CHECK-NEXT: retq %1 = srem i32 %x, -2147483648 ret i32 %1 diff --git a/llvm/test/CodeGen/X86/dagcombine-shifts.ll b/llvm/test/CodeGen/X86/dagcombine-shifts.ll index d650bd18eafd..d8996251e9aa 100644 --- a/llvm/test/CodeGen/X86/dagcombine-shifts.ll +++ b/llvm/test/CodeGen/X86/dagcombine-shifts.ll @@ -161,7 +161,7 @@ define i64 @fun11(i16 zeroext %v) { ; CHECK-NEXT: shrl $4, %edi ; CHECK-NEXT: movq %rdi, %rax ; CHECK-NEXT: shlq $4, %rax -; CHECK-NEXT: leaq (%rax,%rdi), %rax +; CHECK-NEXT: addq %rdi, %rax ; CHECK-NEXT: retq entry: %shr = lshr i16 %v, 4 @@ -178,7 +178,7 @@ define i64 @fun12(i32 zeroext %v) { ; CHECK-NEXT: shrl $4, %edi ; CHECK-NEXT: movq %rdi, %rax ; CHECK-NEXT: shlq $4, %rax -; CHECK-NEXT: leaq (%rax,%rdi), %rax +; CHECK-NEXT: addq %rdi, %rax ; CHECK-NEXT: retq entry: %shr = lshr i32 %v, 4 diff --git a/llvm/test/CodeGen/X86/fixup-bw-copy.ll b/llvm/test/CodeGen/X86/fixup-bw-copy.ll index 9e434ef7333b..ed15ec3b8a92 100644 --- a/llvm/test/CodeGen/X86/fixup-bw-copy.ll +++ b/llvm/test/CodeGen/X86/fixup-bw-copy.ll @@ -46,7 +46,7 @@ define i8 @test_movb_hreg(i16 %a0) { ; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: movl %edi, %eax ; X64-NEXT: shrl $8, %eax -; X64-NEXT: leal (%rax,%rdi), %eax +; X64-NEXT: addl %edi, %eax ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/fixup-lea.ll b/llvm/test/CodeGen/X86/fixup-lea.ll index da9a1613fef8..8d8a1cd19f05 100644 --- a/llvm/test/CodeGen/X86/fixup-lea.ll +++ b/llvm/test/CodeGen/X86/fixup-lea.ll @@ -129,7 +129,7 @@ define void @foo_nosize(i32 inreg %dns) { ; FAST-NEXT: .LBB4_1: # %for.body ; FAST-NEXT: # =>This Inner Loop Header: Depth=1 ; FAST-NEXT: movzwl %cx, %edx -; FAST-NEXT: leal -1(%ecx), %ecx +; FAST-NEXT: addl $-1, %ecx ; FAST-NEXT: cmpl %eax, %edx ; FAST-NEXT: jl .LBB4_1 ; FAST-NEXT: # %bb.2: # %for.end @@ -169,7 +169,7 @@ define void @bar_nosize(i32 inreg %dns) { ; FAST-NEXT: .LBB5_1: # %for.body ; FAST-NEXT: # =>This Inner Loop Header: Depth=1 ; FAST-NEXT: movzwl %cx, %edx -; FAST-NEXT: leal 1(%ecx), %ecx +; FAST-NEXT: addl $1, %ecx ; FAST-NEXT: cmpl %eax, %edx ; FAST-NEXT: jl .LBB5_1 ; FAST-NEXT: # %bb.2: # %for.end diff --git a/llvm/test/CodeGen/X86/imul.ll b/llvm/test/CodeGen/X86/imul.ll index d3ec8e975a1d..450b19142dab 100644 --- a/llvm/test/CodeGen/X86/imul.ll +++ b/llvm/test/CodeGen/X86/imul.ll @@ -220,7 +220,7 @@ define i32 @mul33_32(i32 %A) { ; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: movl %edi, %eax ; X64-NEXT: shll $5, %eax -; X64-NEXT: leal (%rax,%rdi), %eax +; X64-NEXT: addl %edi, %eax ; X64-NEXT: retq ; ; X86-LABEL: mul33_32: @@ -349,7 +349,7 @@ define i32 @test2(i32 %a) { ; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: movl %edi, %eax ; X64-NEXT: shll $5, %eax -; X64-NEXT: leal (%rax,%rdi), %eax +; X64-NEXT: addl %edi, %eax ; X64-NEXT: retq ; ; X86-LABEL: test2: @@ -370,7 +370,7 @@ define i32 @test3(i32 %a) { ; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: movl %edi, %eax ; X64-NEXT: shll $5, %eax -; X64-NEXT: leal (%rax,%rdi), %eax +; X64-NEXT: addl %edi, %eax ; X64-NEXT: negl %eax ; X64-NEXT: retq ; @@ -448,7 +448,7 @@ define i64 @test6(i64 %a) { ; X64: # %bb.0: # %entry ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: shlq $5, %rax -; X64-NEXT: leaq (%rax,%rdi), %rax +; X64-NEXT: addq %rdi, %rax ; X64-NEXT: retq ; ; X86-LABEL: test6: @@ -471,7 +471,7 @@ define i64 @test7(i64 %a) { ; X64: # %bb.0: # %entry ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: shlq $5, %rax -; X64-NEXT: leaq (%rax,%rdi), %rax +; X64-NEXT: addq %rdi, %rax ; X64-NEXT: negq %rax ; X64-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/leaFixup32.mir b/llvm/test/CodeGen/X86/leaFixup32.mir index 5928575306fa..6d57cf2d9776 100644 --- a/llvm/test/CodeGen/X86/leaFixup32.mir +++ b/llvm/test/CodeGen/X86/leaFixup32.mir @@ -174,7 +174,7 @@ frameInfo: body: | bb.0 (%ir-block.0): liveins: $eax, $ebp - ; CHECK: $ebp = ADD32rr $ebp, killed $eax + ; CHECK: $ebp = ADD32rr $ebp, $eax $ebp = LEA32r killed $ebp, 1, killed $eax, 0, $noreg RETQ $ebp diff --git a/llvm/test/CodeGen/X86/leaFixup64.mir b/llvm/test/CodeGen/X86/leaFixup64.mir index dccb99661f0c..fa738adfd065 100644 --- a/llvm/test/CodeGen/X86/leaFixup64.mir +++ b/llvm/test/CodeGen/X86/leaFixup64.mir @@ -247,7 +247,7 @@ frameInfo: body: | bb.0 (%ir-block.0): liveins: $rax, $rbp - ; CHECK: $ebp = LEA64_32r killed $rax, 1, killed $rbp, 0 + ; CHECK: $ebp = ADD32rr $ebp, $eax, implicit-def $eflags, implicit $rbp, implicit $rax $ebp = LEA64_32r killed $rbp, 1, killed $rax, 0, $noreg RETQ $ebp @@ -351,7 +351,7 @@ frameInfo: body: | bb.0 (%ir-block.0): liveins: $rax, $rbp - ; CHECK: $rbp = ADD64rr $rbp, killed $rax + ; CHECK: $rbp = ADD64rr $rbp, $rax $rbp = LEA64r killed $rbp, 1, killed $rax, 0, $noreg RETQ $ebp diff --git a/llvm/test/CodeGen/X86/mul-constant-i16.ll b/llvm/test/CodeGen/X86/mul-constant-i16.ll index c2950cda510e..f127a0936e5e 100644 --- a/llvm/test/CodeGen/X86/mul-constant-i16.ll +++ b/llvm/test/CodeGen/X86/mul-constant-i16.ll @@ -321,7 +321,7 @@ define i16 @test_mul_by_17(i16 %x) { ; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: movl %edi, %eax ; X64-NEXT: shll $4, %eax -; X64-NEXT: leal (%rax,%rdi), %eax +; X64-NEXT: addl %edi, %eax ; X64-NEXT: # kill: def $ax killed $ax killed $eax ; X64-NEXT: retq %mul = mul nsw i16 %x, 17 diff --git a/llvm/test/CodeGen/X86/mul-constant-i32.ll b/llvm/test/CodeGen/X86/mul-constant-i32.ll index 3b2abf8c1f35..cd23c6424efb 100644 --- a/llvm/test/CodeGen/X86/mul-constant-i32.ll +++ b/llvm/test/CodeGen/X86/mul-constant-i32.ll @@ -490,7 +490,7 @@ define i32 @test_mul_by_17(i32 %x) { ; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: movl %edi, %eax ; X64-NEXT: shll $4, %eax -; X64-NEXT: leal (%rax,%rdi), %eax +; X64-NEXT: addl %edi, %eax ; X64-NEXT: retq ; ; X86-NOOPT-LABEL: test_mul_by_17: @@ -1183,7 +1183,7 @@ define i32 @test_mul_by_66(i32 %x) { ; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi ; X64-SLM-NEXT: movl %edi, %eax ; X64-SLM-NEXT: shll $6, %eax -; X64-SLM-NEXT: leal (%rax,%rdi), %eax +; X64-SLM-NEXT: addl %edi, %eax ; X64-SLM-NEXT: addl %edi, %eax ; X64-SLM-NEXT: retq %mul = mul nsw i32 %x, 66 diff --git a/llvm/test/CodeGen/X86/mul-constant-i64.ll b/llvm/test/CodeGen/X86/mul-constant-i64.ll index 4dd94de46367..c875e084f486 100644 --- a/llvm/test/CodeGen/X86/mul-constant-i64.ll +++ b/llvm/test/CodeGen/X86/mul-constant-i64.ll @@ -515,26 +515,12 @@ define i64 @test_mul_by_17(i64 %x) { ; X86-NOOPT-NEXT: addl %ecx, %edx ; X86-NOOPT-NEXT: retl ; -; X64-HSW-LABEL: test_mul_by_17: -; X64-HSW: # %bb.0: -; X64-HSW-NEXT: movq %rdi, %rax -; X64-HSW-NEXT: shlq $4, %rax -; X64-HSW-NEXT: leaq (%rax,%rdi), %rax -; X64-HSW-NEXT: retq -; -; X64-JAG-LABEL: test_mul_by_17: -; X64-JAG: # %bb.0: -; X64-JAG-NEXT: movq %rdi, %rax -; X64-JAG-NEXT: shlq $4, %rax -; X64-JAG-NEXT: leaq (%rax,%rdi), %rax -; X64-JAG-NEXT: retq -; -; X64-SLM-LABEL: test_mul_by_17: -; X64-SLM: # %bb.0: -; X64-SLM-NEXT: movq %rdi, %rax -; X64-SLM-NEXT: shlq $4, %rax -; X64-SLM-NEXT: addq %rdi, %rax -; X64-SLM-NEXT: retq +; X64-OPT-LABEL: test_mul_by_17: +; X64-OPT: # %bb.0: +; X64-OPT-NEXT: movq %rdi, %rax +; X64-OPT-NEXT: shlq $4, %rax +; X64-OPT-NEXT: addq %rdi, %rax +; X64-OPT-NEXT: retq ; ; X64-NOOPT-LABEL: test_mul_by_17: ; X64-NOOPT: # %bb.0: diff --git a/llvm/test/CodeGen/X86/mul-constant-i8.ll b/llvm/test/CodeGen/X86/mul-constant-i8.ll index 5a33888e0a5a..7cb245a2eeef 100644 --- a/llvm/test/CodeGen/X86/mul-constant-i8.ll +++ b/llvm/test/CodeGen/X86/mul-constant-i8.ll @@ -191,7 +191,7 @@ define i8 @test_mul_by_17(i8 %x) { ; X64-NEXT: # kill: def $edi killed $edi def $rdi ; X64-NEXT: movl %edi, %eax ; X64-NEXT: shll $4, %eax -; X64-NEXT: leal (%rax,%rdi), %eax +; X64-NEXT: addl %edi, %eax ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq %m = mul i8 %x, 17 diff --git a/llvm/test/CodeGen/X86/popcnt.ll b/llvm/test/CodeGen/X86/popcnt.ll index 46a56e93a701..5c4f07e782c7 100644 --- a/llvm/test/CodeGen/X86/popcnt.ll +++ b/llvm/test/CodeGen/X86/popcnt.ll @@ -39,7 +39,7 @@ define i8 @cnt8(i8 %x) nounwind readnone { ; X64-NEXT: addb %al, %dil ; X64-NEXT: movl %edi, %eax ; X64-NEXT: shrb $4, %al -; X64-NEXT: leal (%rax,%rdi), %eax +; X64-NEXT: addl %edi, %eax ; X64-NEXT: andb $15, %al ; X64-NEXT: # kill: def $al killed $al killed $eax ; X64-NEXT: retq @@ -227,7 +227,7 @@ define i64 @cnt64(i64 %x) nounwind readnone { ; X64-NEXT: addq %rcx, %rdi ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: shrq $4, %rax -; X64-NEXT: leaq (%rax,%rdi), %rax +; X64-NEXT: addq %rdi, %rax ; X64-NEXT: movabsq $1085102592571150095, %rcx # imm = 0xF0F0F0F0F0F0F0F ; X64-NEXT: andq %rax, %rcx ; X64-NEXT: movabsq $72340172838076673, %rax # imm = 0x101010101010101 @@ -347,7 +347,7 @@ define i64 @cnt64_noimplicitfloat(i64 %x) nounwind readnone noimplicitfloat { ; X64-NEXT: addq %rcx, %rdi ; X64-NEXT: movq %rdi, %rax ; X64-NEXT: shrq $4, %rax -; X64-NEXT: leaq (%rax,%rdi), %rax +; X64-NEXT: addq %rdi, %rax ; X64-NEXT: movabsq $1085102592571150095, %rcx # imm = 0xF0F0F0F0F0F0F0F ; X64-NEXT: andq %rax, %rcx ; X64-NEXT: movabsq $72340172838076673, %rax # imm = 0x101010101010101 diff --git a/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll b/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll index 449d23204f43..c9a577dbaa92 100644 --- a/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll +++ b/llvm/test/CodeGen/X86/ragreedy-hoist-spill.ll @@ -166,7 +166,7 @@ define i8* @SyFgets(i8* %line, i64 %length, i64 %fid) { ; CHECK-NEXT: LBB0_34: ## %if.end517 ; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=1 ; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax ## 8-byte Reload -; CHECK-NEXT: leal -324(%rax), %eax +; CHECK-NEXT: addl $-324, %eax ## imm = 0xFEBC ; CHECK-NEXT: cmpl $59, %eax ; CHECK-NEXT: ja LBB0_35 ; CHECK-NEXT: ## %bb.57: ## %if.end517 diff --git a/llvm/test/CodeGen/X86/reverse_branches.ll b/llvm/test/CodeGen/X86/reverse_branches.ll index a0ca990415c4..9f51a6313cca 100644 --- a/llvm/test/CodeGen/X86/reverse_branches.ll +++ b/llvm/test/CodeGen/X86/reverse_branches.ll @@ -62,7 +62,7 @@ define i32 @test_branches_order() uwtable ssp { ; CHECK-NEXT: ## in Loop: Header=BB0_3 Depth=2 ; CHECK-NEXT: addq $1002, %rbp ## imm = 0x3EA ; CHECK-NEXT: movq %rbx, %rdi -; CHECK-NEXT: leaq 1001(%rbx), %rbx +; CHECK-NEXT: addq $1001, %rbx ## imm = 0x3E9 ; CHECK-NEXT: movl $1000, %edx ## imm = 0x3E8 ; CHECK-NEXT: movl $120, %esi ; CHECK-NEXT: callq _memchr diff --git a/llvm/test/CodeGen/X86/rotate-extract.ll b/llvm/test/CodeGen/X86/rotate-extract.ll index a1babd1d3cc3..e5228d271e94 100644 --- a/llvm/test/CodeGen/X86/rotate-extract.ll +++ b/llvm/test/CodeGen/X86/rotate-extract.ll @@ -156,7 +156,7 @@ define i64 @no_extract_shl(i64 %i) nounwind { ; X64-NEXT: shlq $5, %rax ; X64-NEXT: shlq $10, %rdi ; X64-NEXT: shrq $57, %rax -; X64-NEXT: leaq (%rax,%rdi), %rax +; X64-NEXT: addq %rdi, %rax ; X64-NEXT: retq %lhs_mul = shl i64 %i, 5 %rhs_mul = shl i64 %i, 10 @@ -184,7 +184,7 @@ define i32 @no_extract_shrl(i32 %i) nounwind { ; X64-NEXT: andl $-8, %eax ; X64-NEXT: shll $25, %eax ; X64-NEXT: shrl $9, %edi -; X64-NEXT: leal (%rdi,%rax), %eax +; X64-NEXT: addl %edi, %eax ; X64-NEXT: retq %lhs_div = lshr i32 %i, 3 %rhs_div = lshr i32 %i, 9 diff --git a/llvm/test/CodeGen/X86/sat-add.ll b/llvm/test/CodeGen/X86/sat-add.ll index 63c78fa7c72c..70d102667ff6 100644 --- a/llvm/test/CodeGen/X86/sat-add.ll +++ b/llvm/test/CodeGen/X86/sat-add.ll @@ -236,7 +236,7 @@ define i16 @unsigned_sat_variable_i16_using_min(i16 %x, i16 %y) { ; ANY-NEXT: notl %eax ; ANY-NEXT: cmpw %ax, %di ; ANY-NEXT: cmovbl %edi, %eax -; ANY-NEXT: leal (%rax,%rsi), %eax +; ANY-NEXT: addl %esi, %eax ; ANY-NEXT: # kill: def $ax killed $ax killed $eax ; ANY-NEXT: retq %noty = xor i16 %y, -1 @@ -287,7 +287,7 @@ define i32 @unsigned_sat_variable_i32_using_min(i32 %x, i32 %y) { ; ANY-NEXT: notl %eax ; ANY-NEXT: cmpl %eax, %edi ; ANY-NEXT: cmovbl %edi, %eax -; ANY-NEXT: leal (%rax,%rsi), %eax +; ANY-NEXT: addl %esi, %eax ; ANY-NEXT: retq %noty = xor i32 %y, -1 %c = icmp ult i32 %x, %noty @@ -334,7 +334,7 @@ define i64 @unsigned_sat_variable_i64_using_min(i64 %x, i64 %y) { ; ANY-NEXT: notq %rax ; ANY-NEXT: cmpq %rax, %rdi ; ANY-NEXT: cmovbq %rdi, %rax -; ANY-NEXT: leaq (%rax,%rsi), %rax +; ANY-NEXT: addq %rsi, %rax ; ANY-NEXT: retq %noty = xor i64 %y, -1 %c = icmp ult i64 %x, %noty diff --git a/llvm/test/CodeGen/X86/twoaddr-lea.ll b/llvm/test/CodeGen/X86/twoaddr-lea.ll index fdcd99adef53..077cf805bcb1 100644 --- a/llvm/test/CodeGen/X86/twoaddr-lea.ll +++ b/llvm/test/CodeGen/X86/twoaddr-lea.ll @@ -69,7 +69,7 @@ bb2: bb3: ; CHECK: subl %e[[REG0:[a-z0-9]+]], -; CHECK: leaq 4({{%[a-z0-9]+}}), %r[[REG0]] +; CHECK: addq $4, %r[[REG0]] %tmp14 = phi i64 [ %tmp15, %bb5 ], [ 0, %bb1 ] %tmp15 = add nuw i64 %tmp14, 4 %tmp16 = trunc i64 %tmp14 to i32 diff --git a/llvm/test/CodeGen/X86/vector-bitreverse.ll b/llvm/test/CodeGen/X86/vector-bitreverse.ll index bfbfda08cd0e..e152785b08f6 100644 --- a/llvm/test/CodeGen/X86/vector-bitreverse.ll +++ b/llvm/test/CodeGen/X86/vector-bitreverse.ll @@ -27,7 +27,7 @@ define i8 @test_bitreverse_i8(i8 %a) nounwind { ; SSE-NEXT: addb %al, %al ; SSE-NEXT: andb $-86, %dil ; SSE-NEXT: shrb %dil -; SSE-NEXT: leal (%rdi,%rax), %eax +; SSE-NEXT: addl %edi, %eax ; SSE-NEXT: # kill: def $al killed $al killed $eax ; SSE-NEXT: retq ; @@ -46,7 +46,7 @@ define i8 @test_bitreverse_i8(i8 %a) nounwind { ; AVX-NEXT: addb %al, %al ; AVX-NEXT: andb $-86, %dil ; AVX-NEXT: shrb %dil -; AVX-NEXT: leal (%rdi,%rax), %eax +; AVX-NEXT: addl %edi, %eax ; AVX-NEXT: # kill: def $al killed $al killed $eax ; AVX-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/win_coreclr_chkstk.ll b/llvm/test/CodeGen/X86/win_coreclr_chkstk.ll index 54789dc32d25..86aa295b7c87 100644 --- a/llvm/test/CodeGen/X86/win_coreclr_chkstk.ll +++ b/llvm/test/CodeGen/X86/win_coreclr_chkstk.ll @@ -21,7 +21,7 @@ entry: ; WIN_X64:# %bb.1: ; WIN_X64: andq $-4096, %rdx ; WIN_X64:.LBB0_2: -; WIN_X64: leaq -4096(%rcx), %rcx +; WIN_X64: addq $-4096, %rcx ; WIN_X64: movb $0, (%rcx) ; WIN_X64: cmpq %rcx, %rdx ; WIN_X64: jne .LBB0_2 diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll index 0be39d3814a9..c17f4a8a233b 100644 --- a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll @@ -243,14 +243,14 @@ define void @extrastride(i8* nocapture %main, i32 %main_stride, i32* nocapture % ; X32-NEXT: # =>This Inner Loop Header: Depth=1 ; X32-NEXT: movl (%ebx,%esi), %ebp ; X32-NEXT: addl (%ebx), %ebp -; X32-NEXT: leal (%ebx,%esi), %ebx +; X32-NEXT: addl %esi, %ebx ; X32-NEXT: addl (%esi,%ebx), %ebp -; X32-NEXT: leal (%ebx,%esi), %ebx +; X32-NEXT: addl %esi, %ebx ; X32-NEXT: addl (%esi,%ebx), %ebp -; X32-NEXT: leal (%ebx,%esi), %ebx +; X32-NEXT: addl %esi, %ebx ; X32-NEXT: addl (%esi,%ebx), %ebp ; X32-NEXT: movl %ebp, (%edx) -; X32-NEXT: leal (%ebx,%esi), %ebx +; X32-NEXT: addl %esi, %ebx ; X32-NEXT: addl %edi, %ebx ; X32-NEXT: addl %ecx, %edx ; X32-NEXT: decl %eax