llvm-project/llvm/test/CodeGen/X86/select.ll

1634 lines
48 KiB
LLVM
Raw Normal View History

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=generic | FileCheck %s --check-prefix=CHECK --check-prefix=GENERIC
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=atom | FileCheck %s --check-prefix=CHECK --check-prefix=ATOM
; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=athlon | FileCheck %s --check-prefix=ATHLON
; RUN: llc < %s -mtriple=i386-intel-elfiamcu | FileCheck %s --check-prefix=MCU
; PR5757
%0 = type { i64, i32 }
define i32 @test1(%0* %p, %0* %q, i1 %r) nounwind {
; CHECK-LABEL: test1:
; CHECK: ## %bb.0:
; CHECK-NEXT: addq $8, %rdi
; CHECK-NEXT: addq $8, %rsi
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovneq %rdi, %rsi
; CHECK-NEXT: movl (%rsi), %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test1:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: addl $8, %ecx
; ATHLON-NEXT: addl $8, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovnel %ecx, %eax
; ATHLON-NEXT: movl (%eax), %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: test1:
; MCU: # %bb.0:
; MCU-NEXT: testb $1, %cl
; MCU-NEXT: jne .LBB0_1
; MCU-NEXT: # %bb.2:
; MCU-NEXT: addl $8, %edx
; MCU-NEXT: movl (%edx), %eax
; MCU-NEXT: retl
; MCU-NEXT: .LBB0_1:
; MCU-NEXT: addl $8, %eax
; MCU-NEXT: movl (%eax), %eax
; MCU-NEXT: retl
%t0 = load %0, %0* %p
%t1 = load %0, %0* %q
%t4 = select i1 %r, %0 %t0, %0 %t1
%t5 = extractvalue %0 %t4, 1
ret i32 %t5
}
; PR2139
define i32 @test2() nounwind {
; GENERIC-LABEL: test2:
; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: pushq %rax
; GENERIC-NEXT: callq _return_false
; GENERIC-NEXT: xorl %ecx, %ecx
; GENERIC-NEXT: testb $1, %al
; GENERIC-NEXT: movl $-480, %eax ## imm = 0xFE20
; GENERIC-NEXT: cmovnel %ecx, %eax
; GENERIC-NEXT: shll $3, %eax
; GENERIC-NEXT: cmpl $32768, %eax ## imm = 0x8000
; GENERIC-NEXT: jge LBB1_1
; GENERIC-NEXT: ## %bb.2: ## %bb91
; GENERIC-NEXT: xorl %eax, %eax
; GENERIC-NEXT: popq %rcx
; GENERIC-NEXT: retq
; GENERIC-NEXT: LBB1_1: ## %bb90
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; GENERIC-NEXT: ud2
;
; ATOM-LABEL: test2:
; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: pushq %rax
; ATOM-NEXT: callq _return_false
; ATOM-NEXT: xorl %ecx, %ecx
; ATOM-NEXT: movl $-480, %edx ## imm = 0xFE20
; ATOM-NEXT: testb $1, %al
; ATOM-NEXT: cmovnel %ecx, %edx
; ATOM-NEXT: shll $3, %edx
; ATOM-NEXT: cmpl $32768, %edx ## imm = 0x8000
; ATOM-NEXT: jge LBB1_1
; ATOM-NEXT: ## %bb.2: ## %bb91
; ATOM-NEXT: xorl %eax, %eax
; ATOM-NEXT: popq %rcx
; ATOM-NEXT: retq
; ATOM-NEXT: LBB1_1: ## %bb90
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; ATOM-NEXT: ud2
;
; ATHLON-LABEL: test2:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: subl $12, %esp
; ATHLON-NEXT: calll _return_false
; ATHLON-NEXT: xorl %ecx, %ecx
; ATHLON-NEXT: testb $1, %al
; ATHLON-NEXT: movl $-480, %eax ## imm = 0xFE20
; ATHLON-NEXT: cmovnel %ecx, %eax
; ATHLON-NEXT: shll $3, %eax
; ATHLON-NEXT: cmpl $32768, %eax ## imm = 0x8000
; ATHLON-NEXT: jge LBB1_1
; ATHLON-NEXT: ## %bb.2: ## %bb91
; ATHLON-NEXT: xorl %eax, %eax
; ATHLON-NEXT: addl $12, %esp
; ATHLON-NEXT: retl
; ATHLON-NEXT: LBB1_1: ## %bb90
; ATHLON-NEXT: ud2
;
; MCU-LABEL: test2:
; MCU: # %bb.0: # %entry
; MCU-NEXT: calll return_false
; MCU-NEXT: xorl %ecx, %ecx
; MCU-NEXT: testb $1, %al
; MCU-NEXT: jne .LBB1_2
; MCU-NEXT: # %bb.1: # %entry
; MCU-NEXT: movl $-480, %ecx # imm = 0xFE20
; MCU-NEXT: .LBB1_2: # %entry
; MCU-NEXT: shll $3, %ecx
; MCU-NEXT: cmpl $32768, %ecx # imm = 0x8000
; MCU-NEXT: jge .LBB1_3
; MCU-NEXT: # %bb.4: # %bb91
; MCU-NEXT: xorl %eax, %eax
; MCU-NEXT: retl
; MCU-NEXT: .LBB1_3: # %bb90
entry:
%tmp73 = tail call i1 @return_false()
%g.0 = select i1 %tmp73, i16 0, i16 -480
%tmp7778 = sext i16 %g.0 to i32
%tmp80 = shl i32 %tmp7778, 3
%tmp87 = icmp sgt i32 %tmp80, 32767
br i1 %tmp87, label %bb90, label %bb91
bb90:
unreachable
bb91:
ret i32 0
}
declare i1 @return_false()
;; Select between two floating point constants.
define float @test3(i32 %x) nounwind readnone {
; GENERIC-LABEL: test3:
; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: xorl %eax, %eax
; GENERIC-NEXT: testl %edi, %edi
; GENERIC-NEXT: sete %al
; GENERIC-NEXT: leaq {{.*}}(%rip), %rcx
; GENERIC-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test3:
; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: xorl %eax, %eax
; ATOM-NEXT: leaq {{.*}}(%rip), %rcx
; ATOM-NEXT: testl %edi, %edi
; ATOM-NEXT: sete %al
; ATOM-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test3:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: xorl %eax, %eax
; ATHLON-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; ATHLON-NEXT: sete %al
; ATHLON-NEXT: flds LCPI2_0(,%eax,4)
; ATHLON-NEXT: retl
;
; MCU-LABEL: test3:
; MCU: # %bb.0: # %entry
; MCU-NEXT: xorl %ecx, %ecx
; MCU-NEXT: testl %eax, %eax
; MCU-NEXT: sete %cl
; MCU-NEXT: flds {{\.LCPI.*}}(,%ecx,4)
; MCU-NEXT: retl
entry:
%0 = icmp eq i32 %x, 0
%iftmp.0.0 = select i1 %0, float 4.200000e+01, float 2.300000e+01
ret float %iftmp.0.0
}
define signext i8 @test4(i8* nocapture %P, double %F) nounwind readonly {
; CHECK-LABEL: test4:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: ucomisd %xmm0, %xmm1
; CHECK-NEXT: seta %al
; CHECK-NEXT: movsbl (%rdi,%rax,4), %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test4:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: fldl {{[0-9]+}}(%esp)
; ATHLON-NEXT: flds LCPI3_0
; ATHLON-NEXT: xorl %ecx, %ecx
; ATHLON-NEXT: fucompi %st(1)
; ATHLON-NEXT: fstp %st(0)
; ATHLON-NEXT: seta %cl
; ATHLON-NEXT: movsbl (%eax,%ecx,4), %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: test4:
; MCU: # %bb.0: # %entry
; MCU-NEXT: movl %eax, %ecx
; MCU-NEXT: fldl {{[0-9]+}}(%esp)
; MCU-NEXT: flds {{\.LCPI.*}}
; MCU-NEXT: fucompp
; MCU-NEXT: fnstsw %ax
; MCU-NEXT: xorl %edx, %edx
; MCU-NEXT: # kill: def $ah killed $ah killed $ax
; MCU-NEXT: sahf
; MCU-NEXT: seta %dl
; MCU-NEXT: movb (%ecx,%edx,4), %al
; MCU-NEXT: retl
entry:
%0 = fcmp olt double %F, 4.200000e+01
%iftmp.0.0 = select i1 %0, i32 4, i32 0
%1 = getelementptr i8, i8* %P, i32 %iftmp.0.0
%2 = load i8, i8* %1, align 1
ret i8 %2
}
define void @test5(i1 %c, <2 x i16> %a, <2 x i16> %b, <2 x i16>* %p) nounwind {
; CHECK-LABEL: test5:
; CHECK: ## %bb.0:
; CHECK-NEXT: testb $1, %dil
; CHECK-NEXT: jne LBB4_2
; CHECK-NEXT: ## %bb.1:
; CHECK-NEXT: movdqa %xmm1, %xmm0
; CHECK-NEXT: LBB4_2:
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; CHECK-NEXT: movd %xmm0, (%rsi)
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test5:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: pushl %esi
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: cmovnel %ecx, %edx
; ATHLON-NEXT: movzwl (%edx), %ecx
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %esi
; ATHLON-NEXT: cmovnel %edx, %esi
; ATHLON-NEXT: movzwl (%esi), %edx
; ATHLON-NEXT: movw %dx, 2(%eax)
; ATHLON-NEXT: movw %cx, (%eax)
; ATHLON-NEXT: popl %esi
; ATHLON-NEXT: retl
;
; MCU-LABEL: test5:
; MCU: # %bb.0:
; MCU-NEXT: pushl %esi
; MCU-NEXT: movl {{[0-9]+}}(%esp), %esi
; MCU-NEXT: testb $1, %al
; MCU-NEXT: jne .LBB4_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; MCU-NEXT: movzwl {{[0-9]+}}(%esp), %edx
; MCU-NEXT: .LBB4_2:
; MCU-NEXT: movw %cx, 2(%esi)
; MCU-NEXT: movw %dx, (%esi)
; MCU-NEXT: popl %esi
; MCU-NEXT: retl
%x = select i1 %c, <2 x i16> %a, <2 x i16> %b
store <2 x i16> %x, <2 x i16>* %p
ret void
}
; Verify that the fmul gets sunk into the one part of the diamond where it is needed.
define void @test6(i32 %C, <4 x float>* %A, <4 x float>* %B) nounwind {
; CHECK-LABEL: test6:
; CHECK: ## %bb.0:
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: je LBB5_1
; CHECK-NEXT: ## %bb.2:
; CHECK-NEXT: movaps (%rsi), %xmm0
; CHECK-NEXT: movaps %xmm0, (%rsi)
; CHECK-NEXT: retq
; CHECK-NEXT: LBB5_1:
; CHECK-NEXT: movaps (%rdx), %xmm0
; CHECK-NEXT: mulps %xmm0, %xmm0
; CHECK-NEXT: movaps %xmm0, (%rsi)
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test6:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: flds 12(%ecx)
; ATHLON-NEXT: flds 8(%ecx)
; ATHLON-NEXT: flds 4(%ecx)
; ATHLON-NEXT: flds (%ecx)
; ATHLON-NEXT: flds (%eax)
; ATHLON-NEXT: fmul %st(0), %st(0)
; ATHLON-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; ATHLON-NEXT: fxch %st(1)
; ATHLON-NEXT: fcmove %st(1), %st(0)
; ATHLON-NEXT: fstp %st(1)
; ATHLON-NEXT: flds 4(%eax)
; ATHLON-NEXT: fmul %st(0), %st(0)
; ATHLON-NEXT: fxch %st(2)
; ATHLON-NEXT: fcmove %st(2), %st(0)
; ATHLON-NEXT: fstp %st(2)
; ATHLON-NEXT: flds 8(%eax)
; ATHLON-NEXT: fmul %st(0), %st(0)
; ATHLON-NEXT: fxch %st(3)
; ATHLON-NEXT: fcmove %st(3), %st(0)
; ATHLON-NEXT: fstp %st(3)
; ATHLON-NEXT: flds 12(%eax)
; ATHLON-NEXT: fmul %st(0), %st(0)
; ATHLON-NEXT: fxch %st(4)
; ATHLON-NEXT: fcmove %st(4), %st(0)
; ATHLON-NEXT: fstp %st(4)
; ATHLON-NEXT: fxch %st(3)
; ATHLON-NEXT: fstps 12(%ecx)
; ATHLON-NEXT: fxch %st(1)
; ATHLON-NEXT: fstps 8(%ecx)
; ATHLON-NEXT: fstps 4(%ecx)
; ATHLON-NEXT: fstps (%ecx)
; ATHLON-NEXT: retl
;
; MCU-LABEL: test6:
; MCU: # %bb.0:
; MCU-NEXT: pushl %eax
; MCU-NEXT: flds 12(%edx)
; MCU-NEXT: fstps (%esp) # 4-byte Folded Spill
; MCU-NEXT: flds 8(%edx)
; MCU-NEXT: flds 4(%edx)
; MCU-NEXT: flds (%ecx)
; MCU-NEXT: flds 4(%ecx)
; MCU-NEXT: flds 8(%ecx)
; MCU-NEXT: flds 12(%ecx)
; MCU-NEXT: fmul %st(0), %st(0)
; MCU-NEXT: fxch %st(1)
; MCU-NEXT: fmul %st(0), %st(0)
; MCU-NEXT: fxch %st(2)
; MCU-NEXT: fmul %st(0), %st(0)
; MCU-NEXT: fxch %st(3)
; MCU-NEXT: fmul %st(0), %st(0)
; MCU-NEXT: testl %eax, %eax
; MCU-NEXT: flds (%edx)
; MCU-NEXT: je .LBB5_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: fstp %st(1)
; MCU-NEXT: fstp %st(3)
; MCU-NEXT: fstp %st(1)
; MCU-NEXT: fstp %st(0)
; MCU-NEXT: flds (%esp) # 4-byte Folded Reload
; MCU-NEXT: fldz
; MCU-NEXT: fldz
; MCU-NEXT: fldz
; MCU-NEXT: fxch %st(1)
; MCU-NEXT: fxch %st(6)
; MCU-NEXT: fxch %st(1)
; MCU-NEXT: fxch %st(5)
; MCU-NEXT: fxch %st(4)
; MCU-NEXT: fxch %st(1)
; MCU-NEXT: fxch %st(3)
; MCU-NEXT: fxch %st(2)
; MCU-NEXT: .LBB5_2:
; MCU-NEXT: fstp %st(0)
; MCU-NEXT: fstp %st(5)
; MCU-NEXT: fstp %st(3)
; MCU-NEXT: fxch %st(2)
; MCU-NEXT: fstps 12(%edx)
; MCU-NEXT: fxch %st(1)
; MCU-NEXT: fstps 8(%edx)
; MCU-NEXT: fstps 4(%edx)
; MCU-NEXT: fstps (%edx)
; MCU-NEXT: popl %eax
; MCU-NEXT: retl
%tmp = load <4 x float>, <4 x float>* %A
%tmp3 = load <4 x float>, <4 x float>* %B
%tmp9 = fmul <4 x float> %tmp3, %tmp3
%tmp.upgrd.1 = icmp eq i32 %C, 0
%iftmp.38.0 = select i1 %tmp.upgrd.1, <4 x float> %tmp9, <4 x float> %tmp
store <4 x float> %iftmp.38.0, <4 x float>* %A
ret void
}
; Select with fp80's
define x86_fp80 @test7(i32 %tmp8) nounwind {
; GENERIC-LABEL: test7:
; GENERIC: ## %bb.0:
; GENERIC-NEXT: xorl %eax, %eax
; GENERIC-NEXT: testl %edi, %edi
; GENERIC-NEXT: setns %al
; GENERIC-NEXT: shlq $4, %rax
; GENERIC-NEXT: leaq {{.*}}(%rip), %rcx
; GENERIC-NEXT: fldt (%rax,%rcx)
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test7:
; ATOM: ## %bb.0:
; ATOM-NEXT: xorl %eax, %eax
; ATOM-NEXT: leaq {{.*}}(%rip), %rcx
; ATOM-NEXT: testl %edi, %edi
; ATOM-NEXT: setns %al
; ATOM-NEXT: shlq $4, %rax
; ATOM-NEXT: fldt (%rax,%rcx)
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test7:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: notl %eax
; ATHLON-NEXT: shrl $27, %eax
; ATHLON-NEXT: andl $-16, %eax
; ATHLON-NEXT: fldt LCPI6_0(%eax)
; ATHLON-NEXT: retl
;
; MCU-LABEL: test7:
; MCU: # %bb.0:
; MCU-NEXT: notl %eax
; MCU-NEXT: shrl $27, %eax
; MCU-NEXT: andl $-16, %eax
; MCU-NEXT: fldt {{\.LCPI.*}}(%eax)
; MCU-NEXT: retl
%tmp9 = icmp sgt i32 %tmp8, -1
%retval = select i1 %tmp9, x86_fp80 0xK4005B400000000000000, x86_fp80 0xK40078700000000000000
ret x86_fp80 %retval
}
; widening select v6i32 and then a sub
define void @test8(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2) nounwind {
; GENERIC-LABEL: test8:
; GENERIC: ## %bb.0:
; GENERIC-NEXT: testb $1, %dil
; GENERIC-NEXT: jne LBB7_1
; GENERIC-NEXT: ## %bb.2:
; GENERIC-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; GENERIC-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; GENERIC-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; GENERIC-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; GENERIC-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; GENERIC-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; GENERIC-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; GENERIC-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; GENERIC-NEXT: jmp LBB7_3
; GENERIC-NEXT: LBB7_1:
; GENERIC-NEXT: movd %r9d, %xmm0
; GENERIC-NEXT: movd %r8d, %xmm1
; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; GENERIC-NEXT: movd %ecx, %xmm2
; GENERIC-NEXT: movd %edx, %xmm0
; GENERIC-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; GENERIC-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; GENERIC-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; GENERIC-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; GENERIC-NEXT: LBB7_3:
; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
[x86] transform vector inc/dec to use -1 constant (PR33483) Convert vector increment or decrement to sub/add with an all-ones constant: add X, <1, 1...> --> sub X, <-1, -1...> sub X, <1, 1...> --> add X, <-1, -1...> The all-ones vector constant can be materialized using a pcmpeq instruction that is commonly recognized as an idiom (has no register dependency), so that's better than loading a splat 1 constant. AVX512 uses 'vpternlogd' for 512-bit vectors because there is apparently no better way to produce 512 one-bits. The general advantages of this lowering are: 1. pcmpeq has lower latency than a memop on every uarch I looked at in Agner's tables, so in theory, this could be better for perf, but... 2. That seems unlikely to affect any OOO implementation, and I can't measure any real perf difference from this transform on Haswell or Jaguar, but... 3. It doesn't look like it from the diffs, but this is an overall size win because we eliminate 16 - 64 constant bytes in the case of a vector load. If we're broadcasting a scalar load (which might itself be a bug), then we're replacing a scalar constant load + broadcast with a single cheap op, so that should always be smaller/better too. 4. This makes the DAG/isel output more consistent - we use pcmpeq already for padd x, -1 and psub x, -1, so we should use that form for +1 too because we can. If there's some reason to favor a constant load on some CPU, let's make the reverse transform for all of these cases (either here in the DAG or in a later machine pass). This should fix: https://bugs.llvm.org/show_bug.cgi?id=33483 Differential Revision: https://reviews.llvm.org/D34336 llvm-svn: 306289
2017-06-26 22:19:26 +08:00
; GENERIC-NEXT: pcmpeqd %xmm2, %xmm2
; GENERIC-NEXT: paddd %xmm2, %xmm0
; GENERIC-NEXT: paddd %xmm2, %xmm1
; GENERIC-NEXT: movq %xmm1, 16(%rsi)
; GENERIC-NEXT: movdqa %xmm0, (%rsi)
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test8:
; ATOM: ## %bb.0:
; ATOM-NEXT: testb $1, %dil
; ATOM-NEXT: jne LBB7_1
; ATOM-NEXT: ## %bb.2:
; ATOM-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; ATOM-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; ATOM-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; ATOM-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; ATOM-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; ATOM-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; ATOM-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; ATOM-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; ATOM-NEXT: jmp LBB7_3
; ATOM-NEXT: LBB7_1:
; ATOM-NEXT: movd %r9d, %xmm1
; ATOM-NEXT: movd %r8d, %xmm2
; ATOM-NEXT: movd %ecx, %xmm3
; ATOM-NEXT: movd %edx, %xmm0
; ATOM-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; ATOM-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; ATOM-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; ATOM-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; ATOM-NEXT: LBB7_3:
; ATOM-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
[x86] transform vector inc/dec to use -1 constant (PR33483) Convert vector increment or decrement to sub/add with an all-ones constant: add X, <1, 1...> --> sub X, <-1, -1...> sub X, <1, 1...> --> add X, <-1, -1...> The all-ones vector constant can be materialized using a pcmpeq instruction that is commonly recognized as an idiom (has no register dependency), so that's better than loading a splat 1 constant. AVX512 uses 'vpternlogd' for 512-bit vectors because there is apparently no better way to produce 512 one-bits. The general advantages of this lowering are: 1. pcmpeq has lower latency than a memop on every uarch I looked at in Agner's tables, so in theory, this could be better for perf, but... 2. That seems unlikely to affect any OOO implementation, and I can't measure any real perf difference from this transform on Haswell or Jaguar, but... 3. It doesn't look like it from the diffs, but this is an overall size win because we eliminate 16 - 64 constant bytes in the case of a vector load. If we're broadcasting a scalar load (which might itself be a bug), then we're replacing a scalar constant load + broadcast with a single cheap op, so that should always be smaller/better too. 4. This makes the DAG/isel output more consistent - we use pcmpeq already for padd x, -1 and psub x, -1, so we should use that form for +1 too because we can. If there's some reason to favor a constant load on some CPU, let's make the reverse transform for all of these cases (either here in the DAG or in a later machine pass). This should fix: https://bugs.llvm.org/show_bug.cgi?id=33483 Differential Revision: https://reviews.llvm.org/D34336 llvm-svn: 306289
2017-06-26 22:19:26 +08:00
; ATOM-NEXT: pcmpeqd %xmm2, %xmm2
; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
[x86] transform vector inc/dec to use -1 constant (PR33483) Convert vector increment or decrement to sub/add with an all-ones constant: add X, <1, 1...> --> sub X, <-1, -1...> sub X, <1, 1...> --> add X, <-1, -1...> The all-ones vector constant can be materialized using a pcmpeq instruction that is commonly recognized as an idiom (has no register dependency), so that's better than loading a splat 1 constant. AVX512 uses 'vpternlogd' for 512-bit vectors because there is apparently no better way to produce 512 one-bits. The general advantages of this lowering are: 1. pcmpeq has lower latency than a memop on every uarch I looked at in Agner's tables, so in theory, this could be better for perf, but... 2. That seems unlikely to affect any OOO implementation, and I can't measure any real perf difference from this transform on Haswell or Jaguar, but... 3. It doesn't look like it from the diffs, but this is an overall size win because we eliminate 16 - 64 constant bytes in the case of a vector load. If we're broadcasting a scalar load (which might itself be a bug), then we're replacing a scalar constant load + broadcast with a single cheap op, so that should always be smaller/better too. 4. This makes the DAG/isel output more consistent - we use pcmpeq already for padd x, -1 and psub x, -1, so we should use that form for +1 too because we can. If there's some reason to favor a constant load on some CPU, let's make the reverse transform for all of these cases (either here in the DAG or in a later machine pass). This should fix: https://bugs.llvm.org/show_bug.cgi?id=33483 Differential Revision: https://reviews.llvm.org/D34336 llvm-svn: 306289
2017-06-26 22:19:26 +08:00
; ATOM-NEXT: paddd %xmm2, %xmm0
; ATOM-NEXT: paddd %xmm2, %xmm1
; ATOM-NEXT: movq %xmm1, 16(%rsi)
; ATOM-NEXT: movdqa %xmm0, (%rsi)
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test8:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: pushl %ebp
; ATHLON-NEXT: pushl %ebx
; ATHLON-NEXT: pushl %edi
; ATHLON-NEXT: pushl %esi
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: cmovnel %eax, %ecx
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: cmovnel %eax, %edx
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %esi
; ATHLON-NEXT: cmovnel %eax, %esi
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %edi
; ATHLON-NEXT: cmovnel %eax, %edi
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %ebx
; ATHLON-NEXT: cmovnel %eax, %ebx
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %ebp
; ATHLON-NEXT: cmovnel %eax, %ebp
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl (%ecx), %ecx
; ATHLON-NEXT: movl (%edx), %edx
; ATHLON-NEXT: movl (%esi), %esi
; ATHLON-NEXT: movl (%edi), %edi
; ATHLON-NEXT: movl (%ebx), %ebx
; ATHLON-NEXT: movl (%ebp), %ebp
; ATHLON-NEXT: decl %ecx
; ATHLON-NEXT: movl %ecx, 20(%eax)
; ATHLON-NEXT: decl %edx
; ATHLON-NEXT: movl %edx, 16(%eax)
; ATHLON-NEXT: decl %esi
; ATHLON-NEXT: movl %esi, 12(%eax)
; ATHLON-NEXT: decl %edi
; ATHLON-NEXT: movl %edi, 8(%eax)
; ATHLON-NEXT: decl %ebx
; ATHLON-NEXT: movl %ebx, 4(%eax)
; ATHLON-NEXT: decl %ebp
; ATHLON-NEXT: movl %ebp, (%eax)
; ATHLON-NEXT: popl %esi
; ATHLON-NEXT: popl %edi
; ATHLON-NEXT: popl %ebx
; ATHLON-NEXT: popl %ebp
; ATHLON-NEXT: retl
;
; MCU-LABEL: test8:
; MCU: # %bb.0:
; MCU-NEXT: pushl %ebp
; MCU-NEXT: pushl %ebx
; MCU-NEXT: pushl %edi
; MCU-NEXT: pushl %esi
; MCU-NEXT: testb $1, %al
; MCU-NEXT: jne .LBB7_1
; MCU-NEXT: # %bb.2:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %eax
; MCU-NEXT: movl (%eax), %eax
; MCU-NEXT: je .LBB7_5
; MCU-NEXT: .LBB7_4:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ecx
; MCU-NEXT: movl (%ecx), %ecx
; MCU-NEXT: je .LBB7_8
; MCU-NEXT: .LBB7_7:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %esi
; MCU-NEXT: movl (%esi), %esi
; MCU-NEXT: je .LBB7_11
; MCU-NEXT: .LBB7_10:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %edi
; MCU-NEXT: movl (%edi), %edi
; MCU-NEXT: je .LBB7_14
; MCU-NEXT: .LBB7_13:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ebx
; MCU-NEXT: movl (%ebx), %ebx
; MCU-NEXT: je .LBB7_17
; MCU-NEXT: .LBB7_16:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ebp
; MCU-NEXT: jmp .LBB7_18
; MCU-NEXT: .LBB7_1:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %eax
; MCU-NEXT: movl (%eax), %eax
; MCU-NEXT: jne .LBB7_4
; MCU-NEXT: .LBB7_5:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ecx
; MCU-NEXT: movl (%ecx), %ecx
; MCU-NEXT: jne .LBB7_7
; MCU-NEXT: .LBB7_8:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %esi
; MCU-NEXT: movl (%esi), %esi
; MCU-NEXT: jne .LBB7_10
; MCU-NEXT: .LBB7_11:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %edi
; MCU-NEXT: movl (%edi), %edi
; MCU-NEXT: jne .LBB7_13
; MCU-NEXT: .LBB7_14:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ebx
; MCU-NEXT: movl (%ebx), %ebx
; MCU-NEXT: jne .LBB7_16
; MCU-NEXT: .LBB7_17:
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ebp
; MCU-NEXT: .LBB7_18:
; MCU-NEXT: movl (%ebp), %ebp
; MCU-NEXT: decl %ebp
; MCU-NEXT: decl %ebx
; MCU-NEXT: decl %edi
; MCU-NEXT: decl %esi
; MCU-NEXT: decl %ecx
; MCU-NEXT: decl %eax
; MCU-NEXT: movl %eax, 20(%edx)
; MCU-NEXT: movl %ecx, 16(%edx)
; MCU-NEXT: movl %esi, 12(%edx)
; MCU-NEXT: movl %edi, 8(%edx)
; MCU-NEXT: movl %ebx, 4(%edx)
; MCU-NEXT: movl %ebp, (%edx)
; MCU-NEXT: popl %esi
; MCU-NEXT: popl %edi
; MCU-NEXT: popl %ebx
; MCU-NEXT: popl %ebp
; MCU-NEXT: retl
%x = select i1 %c, <6 x i32> %src1, <6 x i32> %src2
%val = sub <6 x i32> %x, < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
store <6 x i32> %val, <6 x i32>* %dst.addr
ret void
}
;; Test integer select between values and constants.
define i64 @test9(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-LABEL: test9:
; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpq $1, %rdi
; CHECK-NEXT: sbbq %rax, %rax
; CHECK-NEXT: orq %rsi, %rax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test9:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl $-1, %eax
; ATHLON-NEXT: movl $-1, %edx
; ATHLON-NEXT: je LBB8_2
; ATHLON-NEXT: ## %bb.1:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: LBB8_2:
; ATHLON-NEXT: retl
;
; MCU-LABEL: test9:
; MCU: # %bb.0:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: jne .LBB8_1
; MCU-NEXT: # %bb.2:
; MCU-NEXT: movl $-1, %eax
; MCU-NEXT: movl $-1, %edx
; MCU-NEXT: retl
; MCU-NEXT: .LBB8_1:
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
; MCU-NEXT: retl
%cmp = icmp ne i64 %x, 0
%cond = select i1 %cmp, i64 %y, i64 -1
ret i64 %cond
}
;; Same as test9
define i64 @test9a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-LABEL: test9a:
; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpq $1, %rdi
; CHECK-NEXT: sbbq %rax, %rax
; CHECK-NEXT: orq %rsi, %rax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test9a:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl $-1, %eax
; ATHLON-NEXT: movl $-1, %edx
; ATHLON-NEXT: je LBB9_2
; ATHLON-NEXT: ## %bb.1:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: LBB9_2:
; ATHLON-NEXT: retl
;
; MCU-LABEL: test9a:
; MCU: # %bb.0:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: movl $-1, %eax
; MCU-NEXT: movl $-1, %edx
; MCU-NEXT: je .LBB9_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
; MCU-NEXT: .LBB9_2:
; MCU-NEXT: retl
%cmp = icmp eq i64 %x, 0
%cond = select i1 %cmp, i64 -1, i64 %y
ret i64 %cond
}
define i64 @test9b(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; GENERIC-LABEL: test9b:
; GENERIC: ## %bb.0:
; GENERIC-NEXT: cmpq $1, %rdi
; GENERIC-NEXT: sbbq %rax, %rax
; GENERIC-NEXT: orq %rsi, %rax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test9b:
; ATOM: ## %bb.0:
; ATOM-NEXT: cmpq $1, %rdi
; ATOM-NEXT: sbbq %rax, %rax
; ATOM-NEXT: orq %rsi, %rax
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test9b:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: xorl %edx, %edx
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: sete %dl
; ATHLON-NEXT: negl %edx
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl %edx, %eax
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: retl
;
; MCU-LABEL: test9b:
; MCU: # %bb.0:
[x86] use more shift or LEA for select-of-constants (2nd try) The previous rev (r310208) failed to account for overflow when subtracting the constants to see if they're suitable for shift/lea. This version add a check for that and more test were added in r310490. We can convert any select-of-constants to math ops: http://rise4fun.com/Alive/d7d For this patch, I'm enhancing an existing x86 transform that uses fake multiplies (they always become shl/lea) to avoid cmov or branching. The current code misses cases where we have a negative constant and a positive constant, so this is just trying to plug that hole. The DAGCombiner diff prevents us from hitting a terrible inefficiency: we can start with a select in IR, create a select DAG node, convert it into a sext, convert it back into a select, and then lower it to sext machine code. Some notes about the test diffs: 1. 2010-08-04-MaskedSignedCompare.ll - We were creating control flow that didn't exist in the IR. 2. memcmp.ll - Choose -1 or 1 is the case that got me looking at this again. We could avoid the push/pop in some cases if we used 'movzbl %al' instead of an xor on a different reg? That's a post-DAG problem though. 3. mul-constant-result.ll - The trade-off between sbb+not vs. setne+neg could be addressed if that's a regression, but those would always be nearly equivalent. 4. pr22338.ll and sext-i1.ll - These tests have undef operands, so we don't actually care about these diffs. 5. sbb.ll - This shows a win for what is likely a common case: choose -1 or 0. 6. select.ll - There's another borderline case here: cmp+sbb+or vs. test+set+lea? Also, sbb+not vs. setae+neg shows up again. 7. select_const.ll - These are motivating cases for the enhancement; replace cmov with cheaper ops. Assembly differences between movzbl and xor to avoid a partial reg stall are caused later by the X86 Fixup SetCC pass. Differential Revision: https://reviews.llvm.org/D35340 llvm-svn: 310717
2017-08-11 23:44:14 +08:00
; MCU-NEXT: movl %edx, %ecx
; MCU-NEXT: xorl %edx, %edx
[x86] use more shift or LEA for select-of-constants (2nd try) The previous rev (r310208) failed to account for overflow when subtracting the constants to see if they're suitable for shift/lea. This version add a check for that and more test were added in r310490. We can convert any select-of-constants to math ops: http://rise4fun.com/Alive/d7d For this patch, I'm enhancing an existing x86 transform that uses fake multiplies (they always become shl/lea) to avoid cmov or branching. The current code misses cases where we have a negative constant and a positive constant, so this is just trying to plug that hole. The DAGCombiner diff prevents us from hitting a terrible inefficiency: we can start with a select in IR, create a select DAG node, convert it into a sext, convert it back into a select, and then lower it to sext machine code. Some notes about the test diffs: 1. 2010-08-04-MaskedSignedCompare.ll - We were creating control flow that didn't exist in the IR. 2. memcmp.ll - Choose -1 or 1 is the case that got me looking at this again. We could avoid the push/pop in some cases if we used 'movzbl %al' instead of an xor on a different reg? That's a post-DAG problem though. 3. mul-constant-result.ll - The trade-off between sbb+not vs. setne+neg could be addressed if that's a regression, but those would always be nearly equivalent. 4. pr22338.ll and sext-i1.ll - These tests have undef operands, so we don't actually care about these diffs. 5. sbb.ll - This shows a win for what is likely a common case: choose -1 or 0. 6. select.ll - There's another borderline case here: cmp+sbb+or vs. test+set+lea? Also, sbb+not vs. setae+neg shows up again. 7. select_const.ll - These are motivating cases for the enhancement; replace cmov with cheaper ops. Assembly differences between movzbl and xor to avoid a partial reg stall are caused later by the X86 Fixup SetCC pass. Differential Revision: https://reviews.llvm.org/D35340 llvm-svn: 310717
2017-08-11 23:44:14 +08:00
; MCU-NEXT: orl %ecx, %eax
; MCU-NEXT: sete %dl
; MCU-NEXT: negl %edx
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: orl {{[0-9]+}}(%esp), %edx
; MCU-NEXT: retl
%cmp = icmp eq i64 %x, 0
%A = sext i1 %cmp to i64
%cond = or i64 %y, %A
ret i64 %cond
}
;; Select between -1 and 1.
define i64 @test10(i64 %x, i64 %y) nounwind readnone ssp noredzone {
[x86] use more shift or LEA for select-of-constants (2nd try) The previous rev (r310208) failed to account for overflow when subtracting the constants to see if they're suitable for shift/lea. This version add a check for that and more test were added in r310490. We can convert any select-of-constants to math ops: http://rise4fun.com/Alive/d7d For this patch, I'm enhancing an existing x86 transform that uses fake multiplies (they always become shl/lea) to avoid cmov or branching. The current code misses cases where we have a negative constant and a positive constant, so this is just trying to plug that hole. The DAGCombiner diff prevents us from hitting a terrible inefficiency: we can start with a select in IR, create a select DAG node, convert it into a sext, convert it back into a select, and then lower it to sext machine code. Some notes about the test diffs: 1. 2010-08-04-MaskedSignedCompare.ll - We were creating control flow that didn't exist in the IR. 2. memcmp.ll - Choose -1 or 1 is the case that got me looking at this again. We could avoid the push/pop in some cases if we used 'movzbl %al' instead of an xor on a different reg? That's a post-DAG problem though. 3. mul-constant-result.ll - The trade-off between sbb+not vs. setne+neg could be addressed if that's a regression, but those would always be nearly equivalent. 4. pr22338.ll and sext-i1.ll - These tests have undef operands, so we don't actually care about these diffs. 5. sbb.ll - This shows a win for what is likely a common case: choose -1 or 0. 6. select.ll - There's another borderline case here: cmp+sbb+or vs. test+set+lea? Also, sbb+not vs. setae+neg shows up again. 7. select_const.ll - These are motivating cases for the enhancement; replace cmov with cheaper ops. Assembly differences between movzbl and xor to avoid a partial reg stall are caused later by the X86 Fixup SetCC pass. Differential Revision: https://reviews.llvm.org/D35340 llvm-svn: 310717
2017-08-11 23:44:14 +08:00
; CHECK-LABEL: test10:
; CHECK: ## %bb.0:
[x86] use more shift or LEA for select-of-constants (2nd try) The previous rev (r310208) failed to account for overflow when subtracting the constants to see if they're suitable for shift/lea. This version add a check for that and more test were added in r310490. We can convert any select-of-constants to math ops: http://rise4fun.com/Alive/d7d For this patch, I'm enhancing an existing x86 transform that uses fake multiplies (they always become shl/lea) to avoid cmov or branching. The current code misses cases where we have a negative constant and a positive constant, so this is just trying to plug that hole. The DAGCombiner diff prevents us from hitting a terrible inefficiency: we can start with a select in IR, create a select DAG node, convert it into a sext, convert it back into a select, and then lower it to sext machine code. Some notes about the test diffs: 1. 2010-08-04-MaskedSignedCompare.ll - We were creating control flow that didn't exist in the IR. 2. memcmp.ll - Choose -1 or 1 is the case that got me looking at this again. We could avoid the push/pop in some cases if we used 'movzbl %al' instead of an xor on a different reg? That's a post-DAG problem though. 3. mul-constant-result.ll - The trade-off between sbb+not vs. setne+neg could be addressed if that's a regression, but those would always be nearly equivalent. 4. pr22338.ll and sext-i1.ll - These tests have undef operands, so we don't actually care about these diffs. 5. sbb.ll - This shows a win for what is likely a common case: choose -1 or 0. 6. select.ll - There's another borderline case here: cmp+sbb+or vs. test+set+lea? Also, sbb+not vs. setae+neg shows up again. 7. select_const.ll - These are motivating cases for the enhancement; replace cmov with cheaper ops. Assembly differences between movzbl and xor to avoid a partial reg stall are caused later by the X86 Fixup SetCC pass. Differential Revision: https://reviews.llvm.org/D35340 llvm-svn: 310717
2017-08-11 23:44:14 +08:00
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testq %rdi, %rdi
; CHECK-NEXT: setne %al
; CHECK-NEXT: leaq -1(%rax,%rax), %rax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test10:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: xorl %edx, %edx
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl $-1, %ecx
; ATHLON-NEXT: movl $1, %eax
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: cmovel %ecx, %edx
; ATHLON-NEXT: retl
;
; MCU-LABEL: test10:
; MCU: # %bb.0:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: movl $-1, %eax
; MCU-NEXT: movl $-1, %edx
; MCU-NEXT: je .LBB11_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: xorl %edx, %edx
; MCU-NEXT: movl $1, %eax
; MCU-NEXT: .LBB11_2:
; MCU-NEXT: retl
%cmp = icmp eq i64 %x, 0
%cond = select i1 %cmp, i64 -1, i64 1
ret i64 %cond
}
define i64 @test11(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-LABEL: test11:
; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpq $1, %rdi
; CHECK-NEXT: sbbq %rax, %rax
; CHECK-NEXT: notq %rax
; CHECK-NEXT: orq %rsi, %rax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test11:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl $-1, %eax
; ATHLON-NEXT: movl $-1, %edx
; ATHLON-NEXT: jne LBB12_2
; ATHLON-NEXT: ## %bb.1:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: LBB12_2:
; ATHLON-NEXT: retl
;
; MCU-LABEL: test11:
; MCU: # %bb.0:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: je .LBB12_1
; MCU-NEXT: # %bb.2:
; MCU-NEXT: movl $-1, %eax
; MCU-NEXT: movl $-1, %edx
; MCU-NEXT: retl
; MCU-NEXT: .LBB12_1:
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
; MCU-NEXT: retl
%cmp = icmp eq i64 %x, 0
%cond = select i1 %cmp, i64 %y, i64 -1
ret i64 %cond
}
define i64 @test11a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
; CHECK-LABEL: test11a:
; CHECK: ## %bb.0:
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpq $1, %rdi
; CHECK-NEXT: sbbq %rax, %rax
; CHECK-NEXT: notq %rax
; CHECK-NEXT: orq %rsi, %rax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test11a:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl $-1, %eax
; ATHLON-NEXT: movl $-1, %edx
; ATHLON-NEXT: jne LBB13_2
; ATHLON-NEXT: ## %bb.1:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: LBB13_2:
; ATHLON-NEXT: retl
;
; MCU-LABEL: test11a:
; MCU: # %bb.0:
; MCU-NEXT: orl %edx, %eax
; MCU-NEXT: movl $-1, %eax
; MCU-NEXT: movl $-1, %edx
; MCU-NEXT: jne .LBB13_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
; MCU-NEXT: .LBB13_2:
; MCU-NEXT: retl
%cmp = icmp ne i64 %x, 0
%cond = select i1 %cmp, i64 -1, i64 %y
ret i64 %cond
}
define i32 @test13(i32 %a, i32 %b) nounwind {
; GENERIC-LABEL: test13:
; GENERIC: ## %bb.0:
; GENERIC-NEXT: cmpl %esi, %edi
; GENERIC-NEXT: sbbl %eax, %eax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test13:
; ATOM: ## %bb.0:
; ATOM-NEXT: cmpl %esi, %edi
; ATOM-NEXT: sbbl %eax, %eax
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test13:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: sbbl %eax, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: test13:
; MCU: # %bb.0:
; MCU-NEXT: cmpl %edx, %eax
; MCU-NEXT: sbbl %eax, %eax
; MCU-NEXT: retl
%c = icmp ult i32 %a, %b
%d = sext i1 %c to i32
ret i32 %d
}
define i32 @test14(i32 %a, i32 %b) nounwind {
[x86] use more shift or LEA for select-of-constants (2nd try) The previous rev (r310208) failed to account for overflow when subtracting the constants to see if they're suitable for shift/lea. This version add a check for that and more test were added in r310490. We can convert any select-of-constants to math ops: http://rise4fun.com/Alive/d7d For this patch, I'm enhancing an existing x86 transform that uses fake multiplies (they always become shl/lea) to avoid cmov or branching. The current code misses cases where we have a negative constant and a positive constant, so this is just trying to plug that hole. The DAGCombiner diff prevents us from hitting a terrible inefficiency: we can start with a select in IR, create a select DAG node, convert it into a sext, convert it back into a select, and then lower it to sext machine code. Some notes about the test diffs: 1. 2010-08-04-MaskedSignedCompare.ll - We were creating control flow that didn't exist in the IR. 2. memcmp.ll - Choose -1 or 1 is the case that got me looking at this again. We could avoid the push/pop in some cases if we used 'movzbl %al' instead of an xor on a different reg? That's a post-DAG problem though. 3. mul-constant-result.ll - The trade-off between sbb+not vs. setne+neg could be addressed if that's a regression, but those would always be nearly equivalent. 4. pr22338.ll and sext-i1.ll - These tests have undef operands, so we don't actually care about these diffs. 5. sbb.ll - This shows a win for what is likely a common case: choose -1 or 0. 6. select.ll - There's another borderline case here: cmp+sbb+or vs. test+set+lea? Also, sbb+not vs. setae+neg shows up again. 7. select_const.ll - These are motivating cases for the enhancement; replace cmov with cheaper ops. Assembly differences between movzbl and xor to avoid a partial reg stall are caused later by the X86 Fixup SetCC pass. Differential Revision: https://reviews.llvm.org/D35340 llvm-svn: 310717
2017-08-11 23:44:14 +08:00
; CHECK-LABEL: test14:
; CHECK: ## %bb.0:
[x86] use more shift or LEA for select-of-constants (2nd try) The previous rev (r310208) failed to account for overflow when subtracting the constants to see if they're suitable for shift/lea. This version add a check for that and more test were added in r310490. We can convert any select-of-constants to math ops: http://rise4fun.com/Alive/d7d For this patch, I'm enhancing an existing x86 transform that uses fake multiplies (they always become shl/lea) to avoid cmov or branching. The current code misses cases where we have a negative constant and a positive constant, so this is just trying to plug that hole. The DAGCombiner diff prevents us from hitting a terrible inefficiency: we can start with a select in IR, create a select DAG node, convert it into a sext, convert it back into a select, and then lower it to sext machine code. Some notes about the test diffs: 1. 2010-08-04-MaskedSignedCompare.ll - We were creating control flow that didn't exist in the IR. 2. memcmp.ll - Choose -1 or 1 is the case that got me looking at this again. We could avoid the push/pop in some cases if we used 'movzbl %al' instead of an xor on a different reg? That's a post-DAG problem though. 3. mul-constant-result.ll - The trade-off between sbb+not vs. setne+neg could be addressed if that's a regression, but those would always be nearly equivalent. 4. pr22338.ll and sext-i1.ll - These tests have undef operands, so we don't actually care about these diffs. 5. sbb.ll - This shows a win for what is likely a common case: choose -1 or 0. 6. select.ll - There's another borderline case here: cmp+sbb+or vs. test+set+lea? Also, sbb+not vs. setae+neg shows up again. 7. select_const.ll - These are motivating cases for the enhancement; replace cmov with cheaper ops. Assembly differences between movzbl and xor to avoid a partial reg stall are caused later by the X86 Fixup SetCC pass. Differential Revision: https://reviews.llvm.org/D35340 llvm-svn: 310717
2017-08-11 23:44:14 +08:00
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: setae %al
; CHECK-NEXT: negl %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test14:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: xorl %eax, %eax
; ATHLON-NEXT: cmpl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: setae %al
; ATHLON-NEXT: negl %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: test14:
; MCU: # %bb.0:
[x86] use more shift or LEA for select-of-constants (2nd try) The previous rev (r310208) failed to account for overflow when subtracting the constants to see if they're suitable for shift/lea. This version add a check for that and more test were added in r310490. We can convert any select-of-constants to math ops: http://rise4fun.com/Alive/d7d For this patch, I'm enhancing an existing x86 transform that uses fake multiplies (they always become shl/lea) to avoid cmov or branching. The current code misses cases where we have a negative constant and a positive constant, so this is just trying to plug that hole. The DAGCombiner diff prevents us from hitting a terrible inefficiency: we can start with a select in IR, create a select DAG node, convert it into a sext, convert it back into a select, and then lower it to sext machine code. Some notes about the test diffs: 1. 2010-08-04-MaskedSignedCompare.ll - We were creating control flow that didn't exist in the IR. 2. memcmp.ll - Choose -1 or 1 is the case that got me looking at this again. We could avoid the push/pop in some cases if we used 'movzbl %al' instead of an xor on a different reg? That's a post-DAG problem though. 3. mul-constant-result.ll - The trade-off between sbb+not vs. setne+neg could be addressed if that's a regression, but those would always be nearly equivalent. 4. pr22338.ll and sext-i1.ll - These tests have undef operands, so we don't actually care about these diffs. 5. sbb.ll - This shows a win for what is likely a common case: choose -1 or 0. 6. select.ll - There's another borderline case here: cmp+sbb+or vs. test+set+lea? Also, sbb+not vs. setae+neg shows up again. 7. select_const.ll - These are motivating cases for the enhancement; replace cmov with cheaper ops. Assembly differences between movzbl and xor to avoid a partial reg stall are caused later by the X86 Fixup SetCC pass. Differential Revision: https://reviews.llvm.org/D35340 llvm-svn: 310717
2017-08-11 23:44:14 +08:00
; MCU-NEXT: xorl %ecx, %ecx
; MCU-NEXT: cmpl %edx, %eax
[x86] use more shift or LEA for select-of-constants (2nd try) The previous rev (r310208) failed to account for overflow when subtracting the constants to see if they're suitable for shift/lea. This version add a check for that and more test were added in r310490. We can convert any select-of-constants to math ops: http://rise4fun.com/Alive/d7d For this patch, I'm enhancing an existing x86 transform that uses fake multiplies (they always become shl/lea) to avoid cmov or branching. The current code misses cases where we have a negative constant and a positive constant, so this is just trying to plug that hole. The DAGCombiner diff prevents us from hitting a terrible inefficiency: we can start with a select in IR, create a select DAG node, convert it into a sext, convert it back into a select, and then lower it to sext machine code. Some notes about the test diffs: 1. 2010-08-04-MaskedSignedCompare.ll - We were creating control flow that didn't exist in the IR. 2. memcmp.ll - Choose -1 or 1 is the case that got me looking at this again. We could avoid the push/pop in some cases if we used 'movzbl %al' instead of an xor on a different reg? That's a post-DAG problem though. 3. mul-constant-result.ll - The trade-off between sbb+not vs. setne+neg could be addressed if that's a regression, but those would always be nearly equivalent. 4. pr22338.ll and sext-i1.ll - These tests have undef operands, so we don't actually care about these diffs. 5. sbb.ll - This shows a win for what is likely a common case: choose -1 or 0. 6. select.ll - There's another borderline case here: cmp+sbb+or vs. test+set+lea? Also, sbb+not vs. setae+neg shows up again. 7. select_const.ll - These are motivating cases for the enhancement; replace cmov with cheaper ops. Assembly differences between movzbl and xor to avoid a partial reg stall are caused later by the X86 Fixup SetCC pass. Differential Revision: https://reviews.llvm.org/D35340 llvm-svn: 310717
2017-08-11 23:44:14 +08:00
; MCU-NEXT: setae %cl
; MCU-NEXT: negl %ecx
; MCU-NEXT: movl %ecx, %eax
; MCU-NEXT: retl
%c = icmp uge i32 %a, %b
%d = sext i1 %c to i32
ret i32 %d
}
; rdar://10961709
define i32 @test15(i32 %x) nounwind {
; GENERIC-LABEL: test15:
; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: negl %edi
; GENERIC-NEXT: sbbl %eax, %eax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test15:
; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: negl %edi
; ATOM-NEXT: sbbl %eax, %eax
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test15:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: xorl %eax, %eax
; ATHLON-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: sbbl %eax, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: test15:
; MCU: # %bb.0: # %entry
; MCU-NEXT: negl %eax
; MCU-NEXT: sbbl %eax, %eax
; MCU-NEXT: retl
entry:
%cmp = icmp ne i32 %x, 0
%sub = sext i1 %cmp to i32
ret i32 %sub
}
define i64 @test16(i64 %x) nounwind uwtable readnone ssp {
; GENERIC-LABEL: test16:
; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: negq %rdi
; GENERIC-NEXT: sbbq %rax, %rax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test16:
; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: negq %rdi
; ATOM-NEXT: sbbq %rax, %rax
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test16:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: xorl %eax, %eax
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: setne %al
; ATHLON-NEXT: negl %eax
; ATHLON-NEXT: movl %eax, %edx
; ATHLON-NEXT: retl
;
; MCU-LABEL: test16:
; MCU: # %bb.0: # %entry
[x86] use more shift or LEA for select-of-constants (2nd try) The previous rev (r310208) failed to account for overflow when subtracting the constants to see if they're suitable for shift/lea. This version add a check for that and more test were added in r310490. We can convert any select-of-constants to math ops: http://rise4fun.com/Alive/d7d For this patch, I'm enhancing an existing x86 transform that uses fake multiplies (they always become shl/lea) to avoid cmov or branching. The current code misses cases where we have a negative constant and a positive constant, so this is just trying to plug that hole. The DAGCombiner diff prevents us from hitting a terrible inefficiency: we can start with a select in IR, create a select DAG node, convert it into a sext, convert it back into a select, and then lower it to sext machine code. Some notes about the test diffs: 1. 2010-08-04-MaskedSignedCompare.ll - We were creating control flow that didn't exist in the IR. 2. memcmp.ll - Choose -1 or 1 is the case that got me looking at this again. We could avoid the push/pop in some cases if we used 'movzbl %al' instead of an xor on a different reg? That's a post-DAG problem though. 3. mul-constant-result.ll - The trade-off between sbb+not vs. setne+neg could be addressed if that's a regression, but those would always be nearly equivalent. 4. pr22338.ll and sext-i1.ll - These tests have undef operands, so we don't actually care about these diffs. 5. sbb.ll - This shows a win for what is likely a common case: choose -1 or 0. 6. select.ll - There's another borderline case here: cmp+sbb+or vs. test+set+lea? Also, sbb+not vs. setae+neg shows up again. 7. select_const.ll - These are motivating cases for the enhancement; replace cmov with cheaper ops. Assembly differences between movzbl and xor to avoid a partial reg stall are caused later by the X86 Fixup SetCC pass. Differential Revision: https://reviews.llvm.org/D35340 llvm-svn: 310717
2017-08-11 23:44:14 +08:00
; MCU-NEXT: movl %eax, %ecx
; MCU-NEXT: xorl %eax, %eax
[x86] use more shift or LEA for select-of-constants (2nd try) The previous rev (r310208) failed to account for overflow when subtracting the constants to see if they're suitable for shift/lea. This version add a check for that and more test were added in r310490. We can convert any select-of-constants to math ops: http://rise4fun.com/Alive/d7d For this patch, I'm enhancing an existing x86 transform that uses fake multiplies (they always become shl/lea) to avoid cmov or branching. The current code misses cases where we have a negative constant and a positive constant, so this is just trying to plug that hole. The DAGCombiner diff prevents us from hitting a terrible inefficiency: we can start with a select in IR, create a select DAG node, convert it into a sext, convert it back into a select, and then lower it to sext machine code. Some notes about the test diffs: 1. 2010-08-04-MaskedSignedCompare.ll - We were creating control flow that didn't exist in the IR. 2. memcmp.ll - Choose -1 or 1 is the case that got me looking at this again. We could avoid the push/pop in some cases if we used 'movzbl %al' instead of an xor on a different reg? That's a post-DAG problem though. 3. mul-constant-result.ll - The trade-off between sbb+not vs. setne+neg could be addressed if that's a regression, but those would always be nearly equivalent. 4. pr22338.ll and sext-i1.ll - These tests have undef operands, so we don't actually care about these diffs. 5. sbb.ll - This shows a win for what is likely a common case: choose -1 or 0. 6. select.ll - There's another borderline case here: cmp+sbb+or vs. test+set+lea? Also, sbb+not vs. setae+neg shows up again. 7. select_const.ll - These are motivating cases for the enhancement; replace cmov with cheaper ops. Assembly differences between movzbl and xor to avoid a partial reg stall are caused later by the X86 Fixup SetCC pass. Differential Revision: https://reviews.llvm.org/D35340 llvm-svn: 310717
2017-08-11 23:44:14 +08:00
; MCU-NEXT: orl %edx, %ecx
; MCU-NEXT: setne %al
; MCU-NEXT: negl %eax
; MCU-NEXT: movl %eax, %edx
; MCU-NEXT: retl
entry:
%cmp = icmp ne i64 %x, 0
%conv1 = sext i1 %cmp to i64
ret i64 %conv1
}
define i16 @test17(i16 %x) nounwind {
; GENERIC-LABEL: test17:
; GENERIC: ## %bb.0: ## %entry
; GENERIC-NEXT: negw %di
[x86] use more shift or LEA for select-of-constants (2nd try) The previous rev (r310208) failed to account for overflow when subtracting the constants to see if they're suitable for shift/lea. This version add a check for that and more test were added in r310490. We can convert any select-of-constants to math ops: http://rise4fun.com/Alive/d7d For this patch, I'm enhancing an existing x86 transform that uses fake multiplies (they always become shl/lea) to avoid cmov or branching. The current code misses cases where we have a negative constant and a positive constant, so this is just trying to plug that hole. The DAGCombiner diff prevents us from hitting a terrible inefficiency: we can start with a select in IR, create a select DAG node, convert it into a sext, convert it back into a select, and then lower it to sext machine code. Some notes about the test diffs: 1. 2010-08-04-MaskedSignedCompare.ll - We were creating control flow that didn't exist in the IR. 2. memcmp.ll - Choose -1 or 1 is the case that got me looking at this again. We could avoid the push/pop in some cases if we used 'movzbl %al' instead of an xor on a different reg? That's a post-DAG problem though. 3. mul-constant-result.ll - The trade-off between sbb+not vs. setne+neg could be addressed if that's a regression, but those would always be nearly equivalent. 4. pr22338.ll and sext-i1.ll - These tests have undef operands, so we don't actually care about these diffs. 5. sbb.ll - This shows a win for what is likely a common case: choose -1 or 0. 6. select.ll - There's another borderline case here: cmp+sbb+or vs. test+set+lea? Also, sbb+not vs. setae+neg shows up again. 7. select_const.ll - These are motivating cases for the enhancement; replace cmov with cheaper ops. Assembly differences between movzbl and xor to avoid a partial reg stall are caused later by the X86 Fixup SetCC pass. Differential Revision: https://reviews.llvm.org/D35340 llvm-svn: 310717
2017-08-11 23:44:14 +08:00
; GENERIC-NEXT: sbbl %eax, %eax
; GENERIC-NEXT: ## kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test17:
; ATOM: ## %bb.0: ## %entry
; ATOM-NEXT: negw %di
[x86] use more shift or LEA for select-of-constants (2nd try) The previous rev (r310208) failed to account for overflow when subtracting the constants to see if they're suitable for shift/lea. This version add a check for that and more test were added in r310490. We can convert any select-of-constants to math ops: http://rise4fun.com/Alive/d7d For this patch, I'm enhancing an existing x86 transform that uses fake multiplies (they always become shl/lea) to avoid cmov or branching. The current code misses cases where we have a negative constant and a positive constant, so this is just trying to plug that hole. The DAGCombiner diff prevents us from hitting a terrible inefficiency: we can start with a select in IR, create a select DAG node, convert it into a sext, convert it back into a select, and then lower it to sext machine code. Some notes about the test diffs: 1. 2010-08-04-MaskedSignedCompare.ll - We were creating control flow that didn't exist in the IR. 2. memcmp.ll - Choose -1 or 1 is the case that got me looking at this again. We could avoid the push/pop in some cases if we used 'movzbl %al' instead of an xor on a different reg? That's a post-DAG problem though. 3. mul-constant-result.ll - The trade-off between sbb+not vs. setne+neg could be addressed if that's a regression, but those would always be nearly equivalent. 4. pr22338.ll and sext-i1.ll - These tests have undef operands, so we don't actually care about these diffs. 5. sbb.ll - This shows a win for what is likely a common case: choose -1 or 0. 6. select.ll - There's another borderline case here: cmp+sbb+or vs. test+set+lea? Also, sbb+not vs. setae+neg shows up again. 7. select_const.ll - These are motivating cases for the enhancement; replace cmov with cheaper ops. Assembly differences between movzbl and xor to avoid a partial reg stall are caused later by the X86 Fixup SetCC pass. Differential Revision: https://reviews.llvm.org/D35340 llvm-svn: 310717
2017-08-11 23:44:14 +08:00
; ATOM-NEXT: sbbl %eax, %eax
; ATOM-NEXT: ## kill: def $ax killed $ax killed $eax
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test17:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: xorl %eax, %eax
; ATHLON-NEXT: cmpw {{[0-9]+}}(%esp), %ax
; ATHLON-NEXT: sbbl %eax, %eax
; ATHLON-NEXT: ## kill: def $ax killed $ax killed $eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: test17:
; MCU: # %bb.0: # %entry
; MCU-NEXT: negw %ax
[x86] use more shift or LEA for select-of-constants (2nd try) The previous rev (r310208) failed to account for overflow when subtracting the constants to see if they're suitable for shift/lea. This version add a check for that and more test were added in r310490. We can convert any select-of-constants to math ops: http://rise4fun.com/Alive/d7d For this patch, I'm enhancing an existing x86 transform that uses fake multiplies (they always become shl/lea) to avoid cmov or branching. The current code misses cases where we have a negative constant and a positive constant, so this is just trying to plug that hole. The DAGCombiner diff prevents us from hitting a terrible inefficiency: we can start with a select in IR, create a select DAG node, convert it into a sext, convert it back into a select, and then lower it to sext machine code. Some notes about the test diffs: 1. 2010-08-04-MaskedSignedCompare.ll - We were creating control flow that didn't exist in the IR. 2. memcmp.ll - Choose -1 or 1 is the case that got me looking at this again. We could avoid the push/pop in some cases if we used 'movzbl %al' instead of an xor on a different reg? That's a post-DAG problem though. 3. mul-constant-result.ll - The trade-off between sbb+not vs. setne+neg could be addressed if that's a regression, but those would always be nearly equivalent. 4. pr22338.ll and sext-i1.ll - These tests have undef operands, so we don't actually care about these diffs. 5. sbb.ll - This shows a win for what is likely a common case: choose -1 or 0. 6. select.ll - There's another borderline case here: cmp+sbb+or vs. test+set+lea? Also, sbb+not vs. setae+neg shows up again. 7. select_const.ll - These are motivating cases for the enhancement; replace cmov with cheaper ops. Assembly differences between movzbl and xor to avoid a partial reg stall are caused later by the X86 Fixup SetCC pass. Differential Revision: https://reviews.llvm.org/D35340 llvm-svn: 310717
2017-08-11 23:44:14 +08:00
; MCU-NEXT: sbbl %eax, %eax
; MCU-NEXT: # kill: def $ax killed $ax killed $eax
; MCU-NEXT: retl
entry:
%cmp = icmp ne i16 %x, 0
%sub = sext i1 %cmp to i16
ret i16 %sub
}
define i8 @test18(i32 %x, i8 zeroext %a, i8 zeroext %b) nounwind {
; GENERIC-LABEL: test18:
; GENERIC: ## %bb.0:
; GENERIC-NEXT: movl %esi, %eax
; GENERIC-NEXT: cmpl $15, %edi
; GENERIC-NEXT: cmovgel %edx, %eax
; GENERIC-NEXT: ## kill: def $al killed $al killed $eax
; GENERIC-NEXT: retq
;
; ATOM-LABEL: test18:
; ATOM: ## %bb.0:
; ATOM-NEXT: movl %esi, %eax
; ATOM-NEXT: cmpl $15, %edi
; ATOM-NEXT: cmovgel %edx, %eax
; ATOM-NEXT: ## kill: def $al killed $al killed $eax
; ATOM-NEXT: nop
; ATOM-NEXT: nop
; ATOM-NEXT: retq
;
; ATHLON-LABEL: test18:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: cmpl $15, {{[0-9]+}}(%esp)
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: cmovll %eax, %ecx
; ATHLON-NEXT: movb (%ecx), %al
; ATHLON-NEXT: retl
;
; MCU-LABEL: test18:
; MCU: # %bb.0:
; MCU-NEXT: cmpl $15, %eax
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: jl .LBB19_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: movl %ecx, %edx
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: .LBB19_2:
; MCU-NEXT: movl %edx, %eax
; MCU-NEXT: retl
%cmp = icmp slt i32 %x, 15
%sel = select i1 %cmp, i8 %a, i8 %b
ret i8 %sel
}
define i32 @trunc_select_miscompile(i32 %a, i1 zeroext %cc) {
; CHECK-LABEL: trunc_select_miscompile:
; CHECK: ## %bb.0:
; CHECK-NEXT: movl %esi, %ecx
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: orb $2, %cl
; CHECK-NEXT: ## kill: def $cl killed $cl killed $ecx
; CHECK-NEXT: shll %cl, %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: trunc_select_miscompile:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movb {{[0-9]+}}(%esp), %cl
; ATHLON-NEXT: orb $2, %cl
; ATHLON-NEXT: shll %cl, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: trunc_select_miscompile:
; MCU: # %bb.0:
; MCU-NEXT: movl %edx, %ecx
; MCU-NEXT: orb $2, %cl
; MCU-NEXT: # kill: def $cl killed $cl killed $ecx
; MCU-NEXT: shll %cl, %eax
; MCU-NEXT: retl
%tmp1 = select i1 %cc, i32 3, i32 2
%tmp2 = shl i32 %a, %tmp1
ret i32 %tmp2
}
; reproducer for pr29002
define void @clamp_i8(i32 %src, i8* %dst) {
; GENERIC-LABEL: clamp_i8:
; GENERIC: ## %bb.0:
; GENERIC-NEXT: cmpl $127, %edi
; GENERIC-NEXT: movl $127, %eax
; GENERIC-NEXT: cmovlel %edi, %eax
; GENERIC-NEXT: cmpl $-128, %eax
; GENERIC-NEXT: movb $-128, %cl
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; GENERIC-NEXT: jl LBB21_2
; GENERIC-NEXT: ## %bb.1:
; GENERIC-NEXT: movl %eax, %ecx
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; GENERIC-NEXT: LBB21_2:
; GENERIC-NEXT: movb %cl, (%rsi)
; GENERIC-NEXT: retq
;
; ATOM-LABEL: clamp_i8:
; ATOM: ## %bb.0:
; ATOM-NEXT: cmpl $127, %edi
; ATOM-NEXT: movl $127, %eax
; ATOM-NEXT: movb $-128, %cl
; ATOM-NEXT: cmovlel %edi, %eax
; ATOM-NEXT: cmpl $-128, %eax
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; ATOM-NEXT: jl LBB21_2
; ATOM-NEXT: ## %bb.1:
; ATOM-NEXT: movl %eax, %ecx
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; ATOM-NEXT: LBB21_2:
; ATOM-NEXT: movb %cl, (%rsi)
; ATOM-NEXT: retq
;
; ATHLON-LABEL: clamp_i8:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %edx
; ATHLON-NEXT: cmpl $127, %edx
; ATHLON-NEXT: movl $127, %ecx
; ATHLON-NEXT: cmovlel %edx, %ecx
; ATHLON-NEXT: cmpl $-128, %ecx
; ATHLON-NEXT: movb $-128, %dl
; ATHLON-NEXT: jl LBB21_2
; ATHLON-NEXT: ## %bb.1:
; ATHLON-NEXT: movl %ecx, %edx
; ATHLON-NEXT: LBB21_2:
; ATHLON-NEXT: movb %dl, (%eax)
; ATHLON-NEXT: retl
;
; MCU-LABEL: clamp_i8:
; MCU: # %bb.0:
; MCU-NEXT: cmpl $127, %eax
; MCU-NEXT: movl $127, %ecx
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: jg .LBB21_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: movl %eax, %ecx
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: .LBB21_2:
; MCU-NEXT: cmpl $-128, %ecx
; MCU-NEXT: movb $-128, %al
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: jl .LBB21_4
; MCU-NEXT: # %bb.3:
; MCU-NEXT: movl %ecx, %eax
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: .LBB21_4:
; MCU-NEXT: movb %al, (%edx)
; MCU-NEXT: retl
%cmp = icmp sgt i32 %src, 127
%sel1 = select i1 %cmp, i32 127, i32 %src
%cmp1 = icmp slt i32 %sel1, -128
%sel2 = select i1 %cmp1, i32 -128, i32 %sel1
%conv = trunc i32 %sel2 to i8
store i8 %conv, i8* %dst, align 2
ret void
}
; reproducer for pr29002
define void @clamp(i32 %src, i16* %dst) {
; GENERIC-LABEL: clamp:
; GENERIC: ## %bb.0:
; GENERIC-NEXT: cmpl $32767, %edi ## imm = 0x7FFF
; GENERIC-NEXT: movl $32767, %eax ## imm = 0x7FFF
; GENERIC-NEXT: cmovlel %edi, %eax
; GENERIC-NEXT: cmpl $-32768, %eax ## imm = 0x8000
; GENERIC-NEXT: movl $32768, %ecx ## imm = 0x8000
; GENERIC-NEXT: cmovgel %eax, %ecx
; GENERIC-NEXT: movw %cx, (%rsi)
; GENERIC-NEXT: retq
;
; ATOM-LABEL: clamp:
; ATOM: ## %bb.0:
; ATOM-NEXT: cmpl $32767, %edi ## imm = 0x7FFF
; ATOM-NEXT: movl $32767, %eax ## imm = 0x7FFF
; ATOM-NEXT: movl $32768, %ecx ## imm = 0x8000
; ATOM-NEXT: cmovlel %edi, %eax
; ATOM-NEXT: cmpl $-32768, %eax ## imm = 0x8000
; ATOM-NEXT: cmovgel %eax, %ecx
; ATOM-NEXT: movw %cx, (%rsi)
; ATOM-NEXT: retq
;
; ATHLON-LABEL: clamp:
; ATHLON: ## %bb.0:
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: cmpl $32767, %ecx ## imm = 0x7FFF
; ATHLON-NEXT: movl $32767, %edx ## imm = 0x7FFF
; ATHLON-NEXT: cmovlel %ecx, %edx
; ATHLON-NEXT: cmpl $-32768, %edx ## imm = 0x8000
; ATHLON-NEXT: movl $32768, %ecx ## imm = 0x8000
; ATHLON-NEXT: cmovgel %edx, %ecx
; ATHLON-NEXT: movw %cx, (%eax)
; ATHLON-NEXT: retl
;
; MCU-LABEL: clamp:
; MCU: # %bb.0:
; MCU-NEXT: cmpl $32767, %eax # imm = 0x7FFF
; MCU-NEXT: movl $32767, %ecx # imm = 0x7FFF
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: jg .LBB22_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: movl %eax, %ecx
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: .LBB22_2:
; MCU-NEXT: cmpl $-32768, %ecx # imm = 0x8000
; MCU-NEXT: movl $32768, %eax # imm = 0x8000
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: jl .LBB22_4
; MCU-NEXT: # %bb.3:
; MCU-NEXT: movl %ecx, %eax
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: .LBB22_4:
; MCU-NEXT: movw %ax, (%edx)
; MCU-NEXT: retl
%cmp = icmp sgt i32 %src, 32767
%sel1 = select i1 %cmp, i32 32767, i32 %src
%cmp1 = icmp slt i32 %sel1, -32768
%sel2 = select i1 %cmp1, i32 -32768, i32 %sel1
%conv = trunc i32 %sel2 to i16
store i16 %conv, i16* %dst, align 2
ret void
}
define void @test19() {
; This is a massive reduction of an llvm-stress test case that generates
; interesting chains feeding setcc and eventually a f32 select operation. This
; is intended to exercise the SELECT formation in the DAG combine simplifying
; a simplified select_cc node. If it it regresses and is no longer triggering
; that code path, it can be deleted.
;
; CHECK-LABEL: test19:
; CHECK: ## %bb.0: ## %BB
; CHECK-NEXT: movl $-1, %eax
; CHECK-NEXT: movb $1, %cl
; CHECK-NEXT: .p2align 4, 0x90
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; CHECK-NEXT: LBB23_1: ## %CF
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
; CHECK-NEXT: testb %cl, %cl
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; CHECK-NEXT: jne LBB23_1
; CHECK-NEXT: ## %bb.2: ## %CF250
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; CHECK-NEXT: ## in Loop: Header=BB23_1 Depth=1
; CHECK-NEXT: jne LBB23_1
; CHECK-NEXT: .p2align 4, 0x90
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; CHECK-NEXT: LBB23_3: ## %CF242
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
; CHECK-NEXT: cmpl %eax, %eax
; CHECK-NEXT: ucomiss %xmm0, %xmm0
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; CHECK-NEXT: jp LBB23_3
; CHECK-NEXT: ## %bb.4: ## %CF244
; CHECK-NEXT: retq
;
; ATHLON-LABEL: test19:
; ATHLON: ## %bb.0: ## %BB
; ATHLON-NEXT: movb $1, %al
; ATHLON-NEXT: .p2align 4, 0x90
; ATHLON-NEXT: LBB23_1: ## %CF
; ATHLON-NEXT: ## =>This Inner Loop Header: Depth=1
; ATHLON-NEXT: testb %al, %al
; ATHLON-NEXT: jne LBB23_1
; ATHLON-NEXT: ## %bb.2: ## %CF250
; ATHLON-NEXT: ## in Loop: Header=BB23_1 Depth=1
; ATHLON-NEXT: jne LBB23_1
; ATHLON-NEXT: ## %bb.3: ## %CF242.preheader
; ATHLON-NEXT: fldz
; ATHLON-NEXT: .p2align 4, 0x90
; ATHLON-NEXT: LBB23_4: ## %CF242
; ATHLON-NEXT: ## =>This Inner Loop Header: Depth=1
; ATHLON-NEXT: fucomi %st(0)
; ATHLON-NEXT: jp LBB23_4
; ATHLON-NEXT: ## %bb.5: ## %CF244
; ATHLON-NEXT: fstp %st(0)
; ATHLON-NEXT: retl
;
; MCU-LABEL: test19:
; MCU: # %bb.0: # %BB
; MCU-NEXT: movl $-1, %ecx
; MCU-NEXT: movb $1, %al
; MCU-NEXT: .p2align 4, 0x90
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: .LBB23_1: # %CF
; MCU-NEXT: # =>This Inner Loop Header: Depth=1
; MCU-NEXT: testb %al, %al
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: jne .LBB23_1
; MCU-NEXT: # %bb.2: # %CF250
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: # in Loop: Header=BB23_1 Depth=1
; MCU-NEXT: jne .LBB23_1
; MCU-NEXT: # %bb.3: # %CF242.preheader
; MCU-NEXT: fldz
; MCU-NEXT: .p2align 4, 0x90
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: .LBB23_4: # %CF242
; MCU-NEXT: # =>This Inner Loop Header: Depth=1
; MCU-NEXT: cmpl %eax, %ecx
; MCU-NEXT: fucom %st(0)
; MCU-NEXT: fnstsw %ax
; MCU-NEXT: # kill: def $ah killed $ah killed $ax
; MCU-NEXT: sahf
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: jp .LBB23_4
; MCU-NEXT: # %bb.5: # %CF244
; MCU-NEXT: fstp %st(0)
; MCU-NEXT: retl
BB:
br label %CF
CF:
%Cmp10 = icmp ule i8 undef, undef
br i1 %Cmp10, label %CF, label %CF250
CF250:
%E12 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 2
%Cmp32 = icmp ugt i1 %Cmp10, false
br i1 %Cmp32, label %CF, label %CF242
CF242:
%Cmp38 = icmp uge i32 %E12, undef
%FC = uitofp i1 %Cmp38 to float
%Sl59 = select i1 %Cmp32, float %FC, float undef
%Cmp60 = fcmp ugt float undef, undef
br i1 %Cmp60, label %CF242, label %CF244
CF244:
%B122 = fadd float %Sl59, undef
ret void
}
define i16 @select_xor_1(i16 %A, i8 %cond) {
; CHECK-LABEL: select_xor_1:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: xorl $43, %eax
; CHECK-NEXT: testb $1, %sil
; CHECK-NEXT: cmovel %edi, %eax
; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: select_xor_1:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: movl %ecx, %eax
; ATHLON-NEXT: xorl $43, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: ## kill: def $ax killed $ax killed $eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: select_xor_1:
; MCU: # %bb.0: # %entry
; MCU-NEXT: andl $1, %edx
; MCU-NEXT: negl %edx
; MCU-NEXT: andl $43, %edx
; MCU-NEXT: xorl %edx, %eax
; MCU-NEXT: # kill: def $ax killed $ax killed $eax
; MCU-NEXT: retl
entry:
%and = and i8 %cond, 1
%cmp10 = icmp eq i8 %and, 0
%0 = xor i16 %A, 43
%1 = select i1 %cmp10, i16 %A, i16 %0
ret i16 %1
}
; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
; icmp eq (and %cond, 1), 0
define i16 @select_xor_1b(i16 %A, i8 %cond) {
; CHECK-LABEL: select_xor_1b:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: xorl $43, %eax
; CHECK-NEXT: testb $1, %sil
; CHECK-NEXT: cmovel %edi, %eax
; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: select_xor_1b:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: movl %ecx, %eax
; ATHLON-NEXT: xorl $43, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: ## kill: def $ax killed $ax killed $eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: select_xor_1b:
; MCU: # %bb.0: # %entry
; MCU-NEXT: testb $1, %dl
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: je .LBB25_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: xorl $43, %eax
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: .LBB25_2: # %entry
; MCU-NEXT: # kill: def $ax killed $ax killed $eax
; MCU-NEXT: retl
entry:
%and = and i8 %cond, 1
%cmp10 = icmp ne i8 %and, 1
%0 = xor i16 %A, 43
%1 = select i1 %cmp10, i16 %A, i16 %0
ret i16 %1
}
define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) {
; CHECK-LABEL: select_xor_2:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: xorl %edi, %eax
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovel %edi, %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: select_xor_2:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: xorl %ecx, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: select_xor_2:
; MCU: # %bb.0: # %entry
; MCU-NEXT: andl $1, %ecx
; MCU-NEXT: negl %ecx
; MCU-NEXT: andl %edx, %ecx
; MCU-NEXT: xorl %ecx, %eax
; MCU-NEXT: retl
entry:
%and = and i8 %cond, 1
%cmp10 = icmp eq i8 %and, 0
%0 = xor i32 %B, %A
%1 = select i1 %cmp10, i32 %A, i32 %0
ret i32 %1
}
; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
; icmp eq (and %cond, 1), 0
define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) {
; CHECK-LABEL: select_xor_2b:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: xorl %edi, %eax
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovel %edi, %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: select_xor_2b:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: xorl %ecx, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: select_xor_2b:
; MCU: # %bb.0: # %entry
; MCU-NEXT: testb $1, %cl
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: je .LBB27_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: xorl %edx, %eax
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: .LBB27_2: # %entry
; MCU-NEXT: retl
entry:
%and = and i8 %cond, 1
%cmp10 = icmp ne i8 %and, 1
%0 = xor i32 %B, %A
%1 = select i1 %cmp10, i32 %A, i32 %0
ret i32 %1
}
define i32 @select_or(i32 %A, i32 %B, i8 %cond) {
; CHECK-LABEL: select_or:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: orl %edi, %eax
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovel %edi, %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: select_or:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl %ecx, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: select_or:
; MCU: # %bb.0: # %entry
; MCU-NEXT: andl $1, %ecx
; MCU-NEXT: negl %ecx
; MCU-NEXT: andl %edx, %ecx
; MCU-NEXT: orl %ecx, %eax
; MCU-NEXT: retl
entry:
%and = and i8 %cond, 1
%cmp10 = icmp eq i8 %and, 0
%0 = or i32 %B, %A
%1 = select i1 %cmp10, i32 %A, i32 %0
ret i32 %1
}
; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
; icmp eq (and %cond, 1), 0
define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) {
; CHECK-LABEL: select_or_b:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: orl %edi, %eax
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovel %edi, %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: select_or_b:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl %ecx, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: select_or_b:
; MCU: # %bb.0: # %entry
; MCU-NEXT: testb $1, %cl
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: je .LBB29_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: orl %edx, %eax
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: .LBB29_2: # %entry
; MCU-NEXT: retl
entry:
%and = and i8 %cond, 1
%cmp10 = icmp ne i8 %and, 1
%0 = or i32 %B, %A
%1 = select i1 %cmp10, i32 %A, i32 %0
ret i32 %1
}
define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) {
; CHECK-LABEL: select_or_1:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: orl %edi, %eax
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovel %edi, %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: select_or_1:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl %ecx, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: select_or_1:
; MCU: # %bb.0: # %entry
; MCU-NEXT: andl $1, %ecx
; MCU-NEXT: negl %ecx
; MCU-NEXT: andl %edx, %ecx
; MCU-NEXT: orl %ecx, %eax
; MCU-NEXT: retl
entry:
%and = and i32 %cond, 1
%cmp10 = icmp eq i32 %and, 0
%0 = or i32 %B, %A
%1 = select i1 %cmp10, i32 %A, i32 %0
ret i32 %1
}
; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
; icmp eq (and %cond, 1), 0
define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) {
; CHECK-LABEL: select_or_1b:
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: orl %edi, %eax
; CHECK-NEXT: testb $1, %dl
; CHECK-NEXT: cmovel %edi, %eax
; CHECK-NEXT: retq
;
; ATHLON-LABEL: select_or_1b:
; ATHLON: ## %bb.0: ## %entry
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
; ATHLON-NEXT: orl %ecx, %eax
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
; ATHLON-NEXT: cmovel %ecx, %eax
; ATHLON-NEXT: retl
;
; MCU-LABEL: select_or_1b:
; MCU: # %bb.0: # %entry
; MCU-NEXT: testb $1, %cl
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: je .LBB31_2
; MCU-NEXT: # %bb.1:
; MCU-NEXT: orl %edx, %eax
[SelectionDAG] Improve the legalisation lowering of UMULO. There is no way in the universe, that doing a full-width division in software will be faster than doing overflowing multiplication in software in the first place, especially given that this same full-width multiplication needs to be done anyway. This patch replaces the previous implementation with a direct lowering into an overflowing multiplication algorithm based on half-width operations. Correctness of the algorithm was verified by exhaustively checking the output of this algorithm for overflowing multiplication of 16 bit integers against an obviously correct widening multiplication. Baring any oversights introduced by porting the algorithm to DAG, confidence in correctness of this algorithm is extremely high. Following table shows the change in both t = runtime and s = space. The change is expressed as a multiplier of original, so anything under 1 is “better” and anything above 1 is worse. +-------+-----------+-----------+-------------+-------------+ | Arch | u64*u64 t | u64*u64 s | u128*u128 t | u128*u128 s | +-------+-----------+-----------+-------------+-------------+ | X64 | - | - | ~0.5 | ~0.64 | | i686 | ~0.5 | ~0.6666 | ~0.05 | ~0.9 | | armv7 | - | ~0.75 | - | ~1.4 | +-------+-----------+-----------+-------------+-------------+ Performance numbers have been collected by running overflowing multiplication in a loop under `perf` on two x86_64 (one Intel Haswell, other AMD Ryzen) based machines. Size numbers have been collected by looking at the size of function containing an overflowing multiply in a loop. All in all, it can be seen that both performance and size has improved except in the case of armv7 where code size has regressed for 128-bit multiply. u128*u128 overflowing multiply on 32-bit platforms seem to benefit from this change a lot, taking only 5% of the time compared to original algorithm to calculate the same thing. The final benefit of this change is that LLVM is now capable of lowering the overflowing unsigned multiply for integers of any bit-width as long as the target is capable of lowering regular multiplication for the same bit-width. Previously, 128-bit overflowing multiply was the widest possible. Patch by Simonas Kazlauskas! Differential Revision: https://reviews.llvm.org/D50310 llvm-svn: 339922
2018-08-17 02:39:39 +08:00
; MCU-NEXT: .LBB31_2: # %entry
; MCU-NEXT: retl
entry:
%and = and i32 %cond, 1
%cmp10 = icmp ne i32 %and, 1
%0 = or i32 %B, %A
%1 = select i1 %cmp10, i32 %A, i32 %0
ret i32 %1
}