2016-10-09 05:17:44 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2016-09-15 04:16:24 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=generic | FileCheck %s --check-prefix=CHECK --check-prefix=GENERIC
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=atom | FileCheck %s --check-prefix=CHECK --check-prefix=ATOM
|
2018-08-30 14:01:03 +08:00
|
|
|
; RUN: llc < %s -mtriple=i386-apple-darwin10 -mcpu=athlon | FileCheck %s --check-prefix=ATHLON
|
2017-01-30 16:16:59 +08:00
|
|
|
; RUN: llc < %s -mtriple=i386-intel-elfiamcu | FileCheck %s --check-prefix=MCU
|
2009-12-12 03:50:50 +08:00
|
|
|
|
2016-09-15 04:16:24 +08:00
|
|
|
; PR5757
|
2009-12-12 03:50:50 +08:00
|
|
|
%0 = type { i64, i32 }
|
|
|
|
|
2010-12-05 09:13:58 +08:00
|
|
|
define i32 @test1(%0* %p, %0* %q, i1 %r) nounwind {
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-LABEL: test1:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: ## %bb.0:
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-NEXT: addq $8, %rdi
|
|
|
|
; CHECK-NEXT: addq $8, %rsi
|
|
|
|
; CHECK-NEXT: testb $1, %dl
|
|
|
|
; CHECK-NEXT: cmovneq %rdi, %rsi
|
|
|
|
; CHECK-NEXT: movl (%rsi), %eax
|
|
|
|
; CHECK-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test1:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; ATHLON-NEXT: addl $8, %ecx
|
|
|
|
; ATHLON-NEXT: addl $8, %eax
|
|
|
|
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
|
|
|
|
; ATHLON-NEXT: cmovnel %ecx, %eax
|
|
|
|
; ATHLON-NEXT: movl (%eax), %eax
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test1:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: testb $1, %cl
|
|
|
|
; MCU-NEXT: jne .LBB0_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.2:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: addl $8, %edx
|
2018-02-28 00:59:10 +08:00
|
|
|
; MCU-NEXT: movl (%edx), %eax
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: retl
|
|
|
|
; MCU-NEXT: .LBB0_1:
|
|
|
|
; MCU-NEXT: addl $8, %eax
|
|
|
|
; MCU-NEXT: movl (%eax), %eax
|
|
|
|
; MCU-NEXT: retl
|
2015-02-28 05:17:42 +08:00
|
|
|
%t0 = load %0, %0* %p
|
|
|
|
%t1 = load %0, %0* %q
|
2009-12-12 03:50:50 +08:00
|
|
|
%t4 = select i1 %r, %0 %t0, %0 %t1
|
|
|
|
%t5 = extractvalue %0 %t4, 1
|
|
|
|
ret i32 %t5
|
2010-12-05 09:13:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
; PR2139
|
|
|
|
define i32 @test2() nounwind {
|
[X86] Combining CMOVs with [ANY,SIGN,ZERO]_EXTEND for cases where CMOV has constant arguments
Combine CMOV[i16]<-[SIGN,ZERO,ANY]_EXTEND to [i32,i64] into CMOV[i32,i64].
One example of where it is useful is:
before (20 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ax
mov $0xffff,%cx
cmovne %ax,%cx
movzwl %cx,%eax
retq
after (18 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ecx
mov $0xffff,%eax
cmovne %ecx,%eax
retq
Reviewers: craig.topper, aaboud, spatel, RKSimon, zvi
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D36711
llvm-svn: 313982
2017-09-22 21:21:39 +08:00
|
|
|
; GENERIC-LABEL: test2:
|
2017-12-05 01:18:51 +08:00
|
|
|
; GENERIC: ## %bb.0: ## %entry
|
[X86] Combining CMOVs with [ANY,SIGN,ZERO]_EXTEND for cases where CMOV has constant arguments
Combine CMOV[i16]<-[SIGN,ZERO,ANY]_EXTEND to [i32,i64] into CMOV[i32,i64].
One example of where it is useful is:
before (20 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ax
mov $0xffff,%cx
cmovne %ax,%cx
movzwl %cx,%eax
retq
after (18 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ecx
mov $0xffff,%eax
cmovne %ecx,%eax
retq
Reviewers: craig.topper, aaboud, spatel, RKSimon, zvi
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D36711
llvm-svn: 313982
2017-09-22 21:21:39 +08:00
|
|
|
; GENERIC-NEXT: pushq %rax
|
|
|
|
; GENERIC-NEXT: callq _return_false
|
|
|
|
; GENERIC-NEXT: xorl %ecx, %ecx
|
|
|
|
; GENERIC-NEXT: testb $1, %al
|
2017-11-07 21:21:02 +08:00
|
|
|
; GENERIC-NEXT: movl $-480, %eax ## imm = 0xFE20
|
[X86] Combining CMOVs with [ANY,SIGN,ZERO]_EXTEND for cases where CMOV has constant arguments
Combine CMOV[i16]<-[SIGN,ZERO,ANY]_EXTEND to [i32,i64] into CMOV[i32,i64].
One example of where it is useful is:
before (20 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ax
mov $0xffff,%cx
cmovne %ax,%cx
movzwl %cx,%eax
retq
after (18 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ecx
mov $0xffff,%eax
cmovne %ecx,%eax
retq
Reviewers: craig.topper, aaboud, spatel, RKSimon, zvi
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D36711
llvm-svn: 313982
2017-09-22 21:21:39 +08:00
|
|
|
; GENERIC-NEXT: cmovnel %ecx, %eax
|
|
|
|
; GENERIC-NEXT: shll $3, %eax
|
|
|
|
; GENERIC-NEXT: cmpl $32768, %eax ## imm = 0x8000
|
|
|
|
; GENERIC-NEXT: jge LBB1_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; GENERIC-NEXT: ## %bb.2: ## %bb91
|
[X86] Combining CMOVs with [ANY,SIGN,ZERO]_EXTEND for cases where CMOV has constant arguments
Combine CMOV[i16]<-[SIGN,ZERO,ANY]_EXTEND to [i32,i64] into CMOV[i32,i64].
One example of where it is useful is:
before (20 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ax
mov $0xffff,%cx
cmovne %ax,%cx
movzwl %cx,%eax
retq
after (18 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ecx
mov $0xffff,%eax
cmovne %ecx,%eax
retq
Reviewers: craig.topper, aaboud, spatel, RKSimon, zvi
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D36711
llvm-svn: 313982
2017-09-22 21:21:39 +08:00
|
|
|
; GENERIC-NEXT: xorl %eax, %eax
|
|
|
|
; GENERIC-NEXT: popq %rcx
|
|
|
|
; GENERIC-NEXT: retq
|
|
|
|
; GENERIC-NEXT: LBB1_1: ## %bb90
|
2018-08-17 02:39:39 +08:00
|
|
|
; GENERIC-NEXT: ud2
|
[X86] Combining CMOVs with [ANY,SIGN,ZERO]_EXTEND for cases where CMOV has constant arguments
Combine CMOV[i16]<-[SIGN,ZERO,ANY]_EXTEND to [i32,i64] into CMOV[i32,i64].
One example of where it is useful is:
before (20 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ax
mov $0xffff,%cx
cmovne %ax,%cx
movzwl %cx,%eax
retq
after (18 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ecx
mov $0xffff,%eax
cmovne %ecx,%eax
retq
Reviewers: craig.topper, aaboud, spatel, RKSimon, zvi
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D36711
llvm-svn: 313982
2017-09-22 21:21:39 +08:00
|
|
|
;
|
|
|
|
; ATOM-LABEL: test2:
|
2017-12-05 01:18:51 +08:00
|
|
|
; ATOM: ## %bb.0: ## %entry
|
[X86] Combining CMOVs with [ANY,SIGN,ZERO]_EXTEND for cases where CMOV has constant arguments
Combine CMOV[i16]<-[SIGN,ZERO,ANY]_EXTEND to [i32,i64] into CMOV[i32,i64].
One example of where it is useful is:
before (20 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ax
mov $0xffff,%cx
cmovne %ax,%cx
movzwl %cx,%eax
retq
after (18 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ecx
mov $0xffff,%eax
cmovne %ecx,%eax
retq
Reviewers: craig.topper, aaboud, spatel, RKSimon, zvi
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D36711
llvm-svn: 313982
2017-09-22 21:21:39 +08:00
|
|
|
; ATOM-NEXT: pushq %rax
|
|
|
|
; ATOM-NEXT: callq _return_false
|
|
|
|
; ATOM-NEXT: xorl %ecx, %ecx
|
2017-11-07 21:21:02 +08:00
|
|
|
; ATOM-NEXT: movl $-480, %edx ## imm = 0xFE20
|
[X86] Combining CMOVs with [ANY,SIGN,ZERO]_EXTEND for cases where CMOV has constant arguments
Combine CMOV[i16]<-[SIGN,ZERO,ANY]_EXTEND to [i32,i64] into CMOV[i32,i64].
One example of where it is useful is:
before (20 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ax
mov $0xffff,%cx
cmovne %ax,%cx
movzwl %cx,%eax
retq
after (18 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ecx
mov $0xffff,%eax
cmovne %ecx,%eax
retq
Reviewers: craig.topper, aaboud, spatel, RKSimon, zvi
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D36711
llvm-svn: 313982
2017-09-22 21:21:39 +08:00
|
|
|
; ATOM-NEXT: testb $1, %al
|
|
|
|
; ATOM-NEXT: cmovnel %ecx, %edx
|
|
|
|
; ATOM-NEXT: shll $3, %edx
|
|
|
|
; ATOM-NEXT: cmpl $32768, %edx ## imm = 0x8000
|
|
|
|
; ATOM-NEXT: jge LBB1_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; ATOM-NEXT: ## %bb.2: ## %bb91
|
[X86] Combining CMOVs with [ANY,SIGN,ZERO]_EXTEND for cases where CMOV has constant arguments
Combine CMOV[i16]<-[SIGN,ZERO,ANY]_EXTEND to [i32,i64] into CMOV[i32,i64].
One example of where it is useful is:
before (20 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ax
mov $0xffff,%cx
cmovne %ax,%cx
movzwl %cx,%eax
retq
after (18 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ecx
mov $0xffff,%eax
cmovne %ecx,%eax
retq
Reviewers: craig.topper, aaboud, spatel, RKSimon, zvi
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D36711
llvm-svn: 313982
2017-09-22 21:21:39 +08:00
|
|
|
; ATOM-NEXT: xorl %eax, %eax
|
|
|
|
; ATOM-NEXT: popq %rcx
|
|
|
|
; ATOM-NEXT: retq
|
|
|
|
; ATOM-NEXT: LBB1_1: ## %bb90
|
2018-08-17 02:39:39 +08:00
|
|
|
; ATOM-NEXT: ud2
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test2:
|
|
|
|
; ATHLON: ## %bb.0: ## %entry
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: subl $12, %esp
|
|
|
|
; ATHLON-NEXT: calll _return_false
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: xorl %ecx, %ecx
|
|
|
|
; ATHLON-NEXT: testb $1, %al
|
|
|
|
; ATHLON-NEXT: movl $-480, %eax ## imm = 0xFE20
|
|
|
|
; ATHLON-NEXT: cmovnel %ecx, %eax
|
|
|
|
; ATHLON-NEXT: shll $3, %eax
|
|
|
|
; ATHLON-NEXT: cmpl $32768, %eax ## imm = 0x8000
|
|
|
|
; ATHLON-NEXT: jge LBB1_1
|
|
|
|
; ATHLON-NEXT: ## %bb.2: ## %bb91
|
|
|
|
; ATHLON-NEXT: xorl %eax, %eax
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: addl $12, %esp
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: LBB1_1: ## %bb90
|
|
|
|
; ATHLON-NEXT: ud2
|
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test2:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0: # %entry
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: calll return_false
|
2017-11-07 21:21:02 +08:00
|
|
|
; MCU-NEXT: xorl %ecx, %ecx
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: testb $1, %al
|
[X86] Combining CMOVs with [ANY,SIGN,ZERO]_EXTEND for cases where CMOV has constant arguments
Combine CMOV[i16]<-[SIGN,ZERO,ANY]_EXTEND to [i32,i64] into CMOV[i32,i64].
One example of where it is useful is:
before (20 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ax
mov $0xffff,%cx
cmovne %ax,%cx
movzwl %cx,%eax
retq
after (18 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ecx
mov $0xffff,%eax
cmovne %ecx,%eax
retq
Reviewers: craig.topper, aaboud, spatel, RKSimon, zvi
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D36711
llvm-svn: 313982
2017-09-22 21:21:39 +08:00
|
|
|
; MCU-NEXT: jne .LBB1_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.1: # %entry
|
[X86] Combining CMOVs with [ANY,SIGN,ZERO]_EXTEND for cases where CMOV has constant arguments
Combine CMOV[i16]<-[SIGN,ZERO,ANY]_EXTEND to [i32,i64] into CMOV[i32,i64].
One example of where it is useful is:
before (20 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ax
mov $0xffff,%cx
cmovne %ax,%cx
movzwl %cx,%eax
retq
after (18 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ecx
mov $0xffff,%eax
cmovne %ecx,%eax
retq
Reviewers: craig.topper, aaboud, spatel, RKSimon, zvi
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D36711
llvm-svn: 313982
2017-09-22 21:21:39 +08:00
|
|
|
; MCU-NEXT: movl $-480, %ecx # imm = 0xFE20
|
2017-11-07 21:21:02 +08:00
|
|
|
; MCU-NEXT: .LBB1_2: # %entry
|
[X86] Combining CMOVs with [ANY,SIGN,ZERO]_EXTEND for cases where CMOV has constant arguments
Combine CMOV[i16]<-[SIGN,ZERO,ANY]_EXTEND to [i32,i64] into CMOV[i32,i64].
One example of where it is useful is:
before (20 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ax
mov $0xffff,%cx
cmovne %ax,%cx
movzwl %cx,%eax
retq
after (18 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ecx
mov $0xffff,%eax
cmovne %ecx,%eax
retq
Reviewers: craig.topper, aaboud, spatel, RKSimon, zvi
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D36711
llvm-svn: 313982
2017-09-22 21:21:39 +08:00
|
|
|
; MCU-NEXT: shll $3, %ecx
|
|
|
|
; MCU-NEXT: cmpl $32768, %ecx # imm = 0x8000
|
|
|
|
; MCU-NEXT: jge .LBB1_3
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.4: # %bb91
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: xorl %eax, %eax
|
|
|
|
; MCU-NEXT: retl
|
[X86] Combining CMOVs with [ANY,SIGN,ZERO]_EXTEND for cases where CMOV has constant arguments
Combine CMOV[i16]<-[SIGN,ZERO,ANY]_EXTEND to [i32,i64] into CMOV[i32,i64].
One example of where it is useful is:
before (20 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ax
mov $0xffff,%cx
cmovne %ax,%cx
movzwl %cx,%eax
retq
after (18 bytes)
<foo>:
test $0x1,%dil
mov $0x307e,%ecx
mov $0xffff,%eax
cmovne %ecx,%eax
retq
Reviewers: craig.topper, aaboud, spatel, RKSimon, zvi
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D36711
llvm-svn: 313982
2017-09-22 21:21:39 +08:00
|
|
|
; MCU-NEXT: .LBB1_3: # %bb90
|
2016-09-15 04:16:24 +08:00
|
|
|
entry:
|
|
|
|
%tmp73 = tail call i1 @return_false()
|
|
|
|
%g.0 = select i1 %tmp73, i16 0, i16 -480
|
|
|
|
%tmp7778 = sext i16 %g.0 to i32
|
|
|
|
%tmp80 = shl i32 %tmp7778, 3
|
|
|
|
%tmp87 = icmp sgt i32 %tmp80, 32767
|
|
|
|
br i1 %tmp87, label %bb90, label %bb91
|
|
|
|
bb90:
|
|
|
|
unreachable
|
|
|
|
bb91:
|
|
|
|
ret i32 0
|
2010-12-05 09:13:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
declare i1 @return_false()
|
|
|
|
|
|
|
|
;; Select between two floating point constants.
|
|
|
|
define float @test3(i32 %x) nounwind readnone {
|
2018-04-12 02:23:01 +08:00
|
|
|
; GENERIC-LABEL: test3:
|
|
|
|
; GENERIC: ## %bb.0: ## %entry
|
|
|
|
; GENERIC-NEXT: xorl %eax, %eax
|
|
|
|
; GENERIC-NEXT: testl %edi, %edi
|
|
|
|
; GENERIC-NEXT: sete %al
|
|
|
|
; GENERIC-NEXT: leaq {{.*}}(%rip), %rcx
|
|
|
|
; GENERIC-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
|
|
; GENERIC-NEXT: retq
|
|
|
|
;
|
|
|
|
; ATOM-LABEL: test3:
|
|
|
|
; ATOM: ## %bb.0: ## %entry
|
|
|
|
; ATOM-NEXT: xorl %eax, %eax
|
|
|
|
; ATOM-NEXT: leaq {{.*}}(%rip), %rcx
|
|
|
|
; ATOM-NEXT: testl %edi, %edi
|
|
|
|
; ATOM-NEXT: sete %al
|
|
|
|
; ATOM-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
|
|
; ATOM-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test3:
|
|
|
|
; ATHLON: ## %bb.0: ## %entry
|
|
|
|
; ATHLON-NEXT: xorl %eax, %eax
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: cmpl $0, {{[0-9]+}}(%esp)
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: sete %al
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: flds LCPI2_0(,%eax,4)
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test3:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0: # %entry
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: xorl %ecx, %ecx
|
|
|
|
; MCU-NEXT: testl %eax, %eax
|
|
|
|
; MCU-NEXT: sete %cl
|
|
|
|
; MCU-NEXT: flds {{\.LCPI.*}}(,%ecx,4)
|
|
|
|
; MCU-NEXT: retl
|
2016-09-15 04:16:24 +08:00
|
|
|
entry:
|
|
|
|
%0 = icmp eq i32 %x, 0
|
|
|
|
%iftmp.0.0 = select i1 %0, float 4.200000e+01, float 2.300000e+01
|
|
|
|
ret float %iftmp.0.0
|
2010-12-05 09:13:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define signext i8 @test4(i8* nocapture %P, double %F) nounwind readonly {
|
2013-07-14 04:38:47 +08:00
|
|
|
; CHECK-LABEL: test4:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: ## %bb.0: ## %entry
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
|
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
|
|
; CHECK-NEXT: ucomisd %xmm0, %xmm1
|
|
|
|
; CHECK-NEXT: seta %al
|
|
|
|
; CHECK-NEXT: movsbl (%rdi,%rax,4), %eax
|
|
|
|
; CHECK-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test4:
|
|
|
|
; ATHLON: ## %bb.0: ## %entry
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: fldl {{[0-9]+}}(%esp)
|
|
|
|
; ATHLON-NEXT: flds LCPI3_0
|
|
|
|
; ATHLON-NEXT: xorl %ecx, %ecx
|
|
|
|
; ATHLON-NEXT: fucompi %st(1)
|
|
|
|
; ATHLON-NEXT: fstp %st(0)
|
|
|
|
; ATHLON-NEXT: seta %cl
|
|
|
|
; ATHLON-NEXT: movsbl (%eax,%ecx,4), %eax
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test4:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0: # %entry
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movl %eax, %ecx
|
|
|
|
; MCU-NEXT: fldl {{[0-9]+}}(%esp)
|
|
|
|
; MCU-NEXT: flds {{\.LCPI.*}}
|
|
|
|
; MCU-NEXT: fucompp
|
|
|
|
; MCU-NEXT: fnstsw %ax
|
|
|
|
; MCU-NEXT: xorl %edx, %edx
|
2018-02-01 06:04:26 +08:00
|
|
|
; MCU-NEXT: # kill: def $ah killed $ah killed $ax
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: sahf
|
|
|
|
; MCU-NEXT: seta %dl
|
|
|
|
; MCU-NEXT: movb (%ecx,%edx,4), %al
|
|
|
|
; MCU-NEXT: retl
|
2016-09-15 04:16:24 +08:00
|
|
|
entry:
|
|
|
|
%0 = fcmp olt double %F, 4.200000e+01
|
|
|
|
%iftmp.0.0 = select i1 %0, i32 4, i32 0
|
|
|
|
%1 = getelementptr i8, i8* %P, i32 %iftmp.0.0
|
|
|
|
%2 = load i8, i8* %1, align 1
|
|
|
|
ret i8 %2
|
2010-12-05 09:13:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define void @test5(i1 %c, <2 x i16> %a, <2 x i16> %b, <2 x i16>* %p) nounwind {
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-LABEL: test5:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: ## %bb.0:
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-NEXT: testb $1, %dil
|
|
|
|
; CHECK-NEXT: jne LBB4_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK-NEXT: ## %bb.1:
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; CHECK-NEXT: LBB4_2:
|
|
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
|
|
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
|
|
|
|
; CHECK-NEXT: movd %xmm0, (%rsi)
|
|
|
|
; CHECK-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test5:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: pushl %esi
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %ecx
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %edx
|
|
|
|
; ATHLON-NEXT: cmovnel %ecx, %edx
|
|
|
|
; ATHLON-NEXT: movzwl (%edx), %ecx
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %edx
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %esi
|
|
|
|
; ATHLON-NEXT: cmovnel %edx, %esi
|
|
|
|
; ATHLON-NEXT: movzwl (%esi), %edx
|
|
|
|
; ATHLON-NEXT: movw %dx, 2(%eax)
|
|
|
|
; ATHLON-NEXT: movw %cx, (%eax)
|
|
|
|
; ATHLON-NEXT: popl %esi
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test5:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: pushl %esi
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
|
|
; MCU-NEXT: testb $1, %al
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: jne .LBB4_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.1:
|
[X86FixupBWInsts] More precise register liveness if no <imp-use> on MOVs.
Summary:
Subregister liveness tracking is not implemented for X86 backend, so
sometimes the whole super register is said to be live, when only a
subregister is really live. That might happen if the def and the use
are located in different MBBs, see added fixup-bw-isnt.mir test.
However, using knowledge of the specific instructions handled by the
bw-fixup-pass we can get more precise liveness information which this
change does.
Reviewers: MatzeB, DavidKreitzer, ab, andrew.w.kaylor, craig.topper
Reviewed By: craig.topper
Subscribers: n.bozhenov, myatsina, llvm-commits, hiraditya
Patch by Andrei Elovikov <andrei.elovikov@intel.com>
Differential Revision: https://reviews.llvm.org/D37559
llvm-svn: 313524
2017-09-18 18:17:59 +08:00
|
|
|
; MCU-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; MCU-NEXT: movzwl {{[0-9]+}}(%esp), %edx
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: .LBB4_2:
|
|
|
|
; MCU-NEXT: movw %cx, 2(%esi)
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: movw %dx, (%esi)
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: popl %esi
|
|
|
|
; MCU-NEXT: retl
|
2010-12-05 09:13:58 +08:00
|
|
|
%x = select i1 %c, <2 x i16> %a, <2 x i16> %b
|
|
|
|
store <2 x i16> %x, <2 x i16>* %p
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2016-09-15 04:16:24 +08:00
|
|
|
; Verify that the fmul gets sunk into the one part of the diamond where it is needed.
|
2010-12-05 09:13:58 +08:00
|
|
|
define void @test6(i32 %C, <4 x float>* %A, <4 x float>* %B) nounwind {
|
2013-07-14 04:38:47 +08:00
|
|
|
; CHECK-LABEL: test6:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: ## %bb.0:
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-NEXT: testl %edi, %edi
|
|
|
|
; CHECK-NEXT: je LBB5_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK-NEXT: ## %bb.2:
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-NEXT: movaps (%rsi), %xmm0
|
|
|
|
; CHECK-NEXT: movaps %xmm0, (%rsi)
|
|
|
|
; CHECK-NEXT: retq
|
|
|
|
; CHECK-NEXT: LBB5_1:
|
|
|
|
; CHECK-NEXT: movaps (%rdx), %xmm0
|
|
|
|
; CHECK-NEXT: mulps %xmm0, %xmm0
|
|
|
|
; CHECK-NEXT: movaps %xmm0, (%rsi)
|
|
|
|
; CHECK-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test6:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; ATHLON-NEXT: flds 12(%ecx)
|
|
|
|
; ATHLON-NEXT: flds 8(%ecx)
|
|
|
|
; ATHLON-NEXT: flds 4(%ecx)
|
|
|
|
; ATHLON-NEXT: flds (%ecx)
|
|
|
|
; ATHLON-NEXT: flds (%eax)
|
|
|
|
; ATHLON-NEXT: fmul %st(0), %st(0)
|
|
|
|
; ATHLON-NEXT: cmpl $0, {{[0-9]+}}(%esp)
|
|
|
|
; ATHLON-NEXT: fxch %st(1)
|
|
|
|
; ATHLON-NEXT: fcmove %st(1), %st(0)
|
|
|
|
; ATHLON-NEXT: fstp %st(1)
|
|
|
|
; ATHLON-NEXT: flds 4(%eax)
|
|
|
|
; ATHLON-NEXT: fmul %st(0), %st(0)
|
|
|
|
; ATHLON-NEXT: fxch %st(2)
|
|
|
|
; ATHLON-NEXT: fcmove %st(2), %st(0)
|
|
|
|
; ATHLON-NEXT: fstp %st(2)
|
|
|
|
; ATHLON-NEXT: flds 8(%eax)
|
|
|
|
; ATHLON-NEXT: fmul %st(0), %st(0)
|
|
|
|
; ATHLON-NEXT: fxch %st(3)
|
|
|
|
; ATHLON-NEXT: fcmove %st(3), %st(0)
|
|
|
|
; ATHLON-NEXT: fstp %st(3)
|
|
|
|
; ATHLON-NEXT: flds 12(%eax)
|
|
|
|
; ATHLON-NEXT: fmul %st(0), %st(0)
|
|
|
|
; ATHLON-NEXT: fxch %st(4)
|
|
|
|
; ATHLON-NEXT: fcmove %st(4), %st(0)
|
|
|
|
; ATHLON-NEXT: fstp %st(4)
|
|
|
|
; ATHLON-NEXT: fxch %st(3)
|
|
|
|
; ATHLON-NEXT: fstps 12(%ecx)
|
|
|
|
; ATHLON-NEXT: fxch %st(1)
|
|
|
|
; ATHLON-NEXT: fstps 8(%ecx)
|
|
|
|
; ATHLON-NEXT: fstps 4(%ecx)
|
|
|
|
; ATHLON-NEXT: fstps (%ecx)
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test6:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: pushl %eax
|
|
|
|
; MCU-NEXT: flds 12(%edx)
|
|
|
|
; MCU-NEXT: fstps (%esp) # 4-byte Folded Spill
|
|
|
|
; MCU-NEXT: flds 8(%edx)
|
|
|
|
; MCU-NEXT: flds 4(%edx)
|
|
|
|
; MCU-NEXT: flds (%ecx)
|
|
|
|
; MCU-NEXT: flds 4(%ecx)
|
|
|
|
; MCU-NEXT: flds 8(%ecx)
|
|
|
|
; MCU-NEXT: flds 12(%ecx)
|
|
|
|
; MCU-NEXT: fmul %st(0), %st(0)
|
|
|
|
; MCU-NEXT: fxch %st(1)
|
|
|
|
; MCU-NEXT: fmul %st(0), %st(0)
|
|
|
|
; MCU-NEXT: fxch %st(2)
|
|
|
|
; MCU-NEXT: fmul %st(0), %st(0)
|
|
|
|
; MCU-NEXT: fxch %st(3)
|
|
|
|
; MCU-NEXT: fmul %st(0), %st(0)
|
|
|
|
; MCU-NEXT: testl %eax, %eax
|
|
|
|
; MCU-NEXT: flds (%edx)
|
|
|
|
; MCU-NEXT: je .LBB5_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.1:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: fstp %st(1)
|
|
|
|
; MCU-NEXT: fstp %st(3)
|
|
|
|
; MCU-NEXT: fstp %st(1)
|
|
|
|
; MCU-NEXT: fstp %st(0)
|
|
|
|
; MCU-NEXT: flds (%esp) # 4-byte Folded Reload
|
|
|
|
; MCU-NEXT: fldz
|
|
|
|
; MCU-NEXT: fldz
|
|
|
|
; MCU-NEXT: fldz
|
|
|
|
; MCU-NEXT: fxch %st(1)
|
|
|
|
; MCU-NEXT: fxch %st(6)
|
|
|
|
; MCU-NEXT: fxch %st(1)
|
|
|
|
; MCU-NEXT: fxch %st(5)
|
|
|
|
; MCU-NEXT: fxch %st(4)
|
|
|
|
; MCU-NEXT: fxch %st(1)
|
|
|
|
; MCU-NEXT: fxch %st(3)
|
|
|
|
; MCU-NEXT: fxch %st(2)
|
|
|
|
; MCU-NEXT: .LBB5_2:
|
|
|
|
; MCU-NEXT: fstp %st(0)
|
|
|
|
; MCU-NEXT: fstp %st(5)
|
|
|
|
; MCU-NEXT: fstp %st(3)
|
|
|
|
; MCU-NEXT: fxch %st(2)
|
|
|
|
; MCU-NEXT: fstps 12(%edx)
|
|
|
|
; MCU-NEXT: fxch %st(1)
|
|
|
|
; MCU-NEXT: fstps 8(%edx)
|
|
|
|
; MCU-NEXT: fstps 4(%edx)
|
|
|
|
; MCU-NEXT: fstps (%edx)
|
|
|
|
; MCU-NEXT: popl %eax
|
|
|
|
; MCU-NEXT: retl
|
2016-09-15 04:16:24 +08:00
|
|
|
%tmp = load <4 x float>, <4 x float>* %A
|
|
|
|
%tmp3 = load <4 x float>, <4 x float>* %B
|
|
|
|
%tmp9 = fmul <4 x float> %tmp3, %tmp3
|
|
|
|
%tmp.upgrd.1 = icmp eq i32 %C, 0
|
|
|
|
%iftmp.38.0 = select i1 %tmp.upgrd.1, <4 x float> %tmp9, <4 x float> %tmp
|
|
|
|
store <4 x float> %iftmp.38.0, <4 x float>* %A
|
|
|
|
ret void
|
2010-12-05 09:13:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
; Select with fp80's
|
|
|
|
define x86_fp80 @test7(i32 %tmp8) nounwind {
|
2018-04-12 02:23:01 +08:00
|
|
|
; GENERIC-LABEL: test7:
|
|
|
|
; GENERIC: ## %bb.0:
|
|
|
|
; GENERIC-NEXT: xorl %eax, %eax
|
|
|
|
; GENERIC-NEXT: testl %edi, %edi
|
|
|
|
; GENERIC-NEXT: setns %al
|
|
|
|
; GENERIC-NEXT: shlq $4, %rax
|
|
|
|
; GENERIC-NEXT: leaq {{.*}}(%rip), %rcx
|
|
|
|
; GENERIC-NEXT: fldt (%rax,%rcx)
|
|
|
|
; GENERIC-NEXT: retq
|
|
|
|
;
|
|
|
|
; ATOM-LABEL: test7:
|
|
|
|
; ATOM: ## %bb.0:
|
|
|
|
; ATOM-NEXT: xorl %eax, %eax
|
|
|
|
; ATOM-NEXT: leaq {{.*}}(%rip), %rcx
|
|
|
|
; ATOM-NEXT: testl %edi, %edi
|
|
|
|
; ATOM-NEXT: setns %al
|
|
|
|
; ATOM-NEXT: shlq $4, %rax
|
|
|
|
; ATOM-NEXT: fldt (%rax,%rcx)
|
|
|
|
; ATOM-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test7:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: notl %eax
|
|
|
|
; ATHLON-NEXT: shrl $27, %eax
|
|
|
|
; ATHLON-NEXT: andl $-16, %eax
|
|
|
|
; ATHLON-NEXT: fldt LCPI6_0(%eax)
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test7:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
[DAGCombiner] extend(ifpositive(X)) -> shift-right (not X)
This is almost the same as an existing IR canonicalization in instcombine,
so I'm assuming this is a good early generic DAG combine too.
The motivation comes from reduced bit-hacking for select-of-constants in IR
after rL331486. We want to restore that functionality in the DAG as noted in
the commit comments for that change and the llvm-dev discussion here:
http://lists.llvm.org/pipermail/llvm-dev/2018-July/124433.html
The PPC and AArch tests show that those targets are already doing something
similar. x86 will be neutral in the minimal case and generally better when
this pattern is extended with other ops as shown in the signbit-shift.ll tests.
Note the asymmetry: we don't include the (extend (ifneg X)) transform because
it already exists in SimplifySelectCC(), and that is verified in the later
unchanged tests in the signbit-shift.ll files. Without the 'not' op, the
general transform to use a shift is always a win because that's a single
instruction.
Alive proofs:
https://rise4fun.com/Alive/ysli
Name: if pos, get -1
%c = icmp sgt i16 %x, -1
%r = sext i1 %c to i16
=>
%n = xor i16 %x, -1
%r = ashr i16 %n, 15
Name: if pos, get 1
%c = icmp sgt i16 %x, -1
%r = zext i1 %c to i16
=>
%n = xor i16 %x, -1
%r = lshr i16 %n, 15
Differential Revision: https://reviews.llvm.org/D48970
llvm-svn: 337130
2018-07-16 00:27:07 +08:00
|
|
|
; MCU-NEXT: notl %eax
|
|
|
|
; MCU-NEXT: shrl $27, %eax
|
|
|
|
; MCU-NEXT: andl $-16, %eax
|
|
|
|
; MCU-NEXT: fldt {{\.LCPI.*}}(%eax)
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: retl
|
2016-09-15 04:16:24 +08:00
|
|
|
%tmp9 = icmp sgt i32 %tmp8, -1
|
|
|
|
%retval = select i1 %tmp9, x86_fp80 0xK4005B400000000000000, x86_fp80 0xK40078700000000000000
|
|
|
|
ret x86_fp80 %retval
|
2010-12-05 09:13:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
; widening select v6i32 and then a sub
|
|
|
|
define void @test8(i1 %c, <6 x i32>* %dst.addr, <6 x i32> %src1,<6 x i32> %src2) nounwind {
|
2016-10-09 05:17:44 +08:00
|
|
|
; GENERIC-LABEL: test8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; GENERIC: ## %bb.0:
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; GENERIC-NEXT: testb $1, %dil
|
2016-10-09 05:17:44 +08:00
|
|
|
; GENERIC-NEXT: jne LBB7_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; GENERIC-NEXT: ## %bb.2:
|
2016-10-09 05:17:44 +08:00
|
|
|
; GENERIC-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
|
|
; GENERIC-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
|
|
; GENERIC-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
2016-10-09 05:17:44 +08:00
|
|
|
; GENERIC-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; GENERIC-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; GENERIC-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; GENERIC-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
Add LiveRangeShrink pass to shrink live range within BB.
Summary: LiveRangeShrink pass moves instruction right after the definition with the same BB if the instruction and its operands all have more than one use. This pass is inexpensive and guarantees optimal live-range within BB.
Reviewers: davidxl, wmi, hfinkel, MatzeB, andreadb
Reviewed By: MatzeB, andreadb
Subscribers: hiraditya, jyknight, sanjoy, skatkov, gberry, jholewinski, qcolombet, javed.absar, krytarowski, atrick, spatel, RKSimon, andreadb, MatzeB, mehdi_amini, mgorny, efriedma, davide, dberlin, llvm-commits
Differential Revision: https://reviews.llvm.org/D32563
llvm-svn: 304371
2017-06-01 07:25:25 +08:00
|
|
|
; GENERIC-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; GENERIC-NEXT: jmp LBB7_3
|
|
|
|
; GENERIC-NEXT: LBB7_1:
|
|
|
|
; GENERIC-NEXT: movd %r9d, %xmm0
|
|
|
|
; GENERIC-NEXT: movd %r8d, %xmm1
|
|
|
|
; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
|
|
; GENERIC-NEXT: movd %ecx, %xmm2
|
|
|
|
; GENERIC-NEXT: movd %edx, %xmm0
|
|
|
|
; GENERIC-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; GENERIC-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
2016-10-09 05:17:44 +08:00
|
|
|
; GENERIC-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
|
|
|
; GENERIC-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; GENERIC-NEXT: LBB7_3:
|
|
|
|
; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
[x86] transform vector inc/dec to use -1 constant (PR33483)
Convert vector increment or decrement to sub/add with an all-ones constant:
add X, <1, 1...> --> sub X, <-1, -1...>
sub X, <1, 1...> --> add X, <-1, -1...>
The all-ones vector constant can be materialized using a pcmpeq instruction that is
commonly recognized as an idiom (has no register dependency), so that's better than
loading a splat 1 constant.
AVX512 uses 'vpternlogd' for 512-bit vectors because there is apparently no better
way to produce 512 one-bits.
The general advantages of this lowering are:
1. pcmpeq has lower latency than a memop on every uarch I looked at in Agner's tables,
so in theory, this could be better for perf, but...
2. That seems unlikely to affect any OOO implementation, and I can't measure any real
perf difference from this transform on Haswell or Jaguar, but...
3. It doesn't look like it from the diffs, but this is an overall size win because we
eliminate 16 - 64 constant bytes in the case of a vector load. If we're broadcasting
a scalar load (which might itself be a bug), then we're replacing a scalar constant
load + broadcast with a single cheap op, so that should always be smaller/better too.
4. This makes the DAG/isel output more consistent - we use pcmpeq already for padd x, -1
and psub x, -1, so we should use that form for +1 too because we can. If there's some
reason to favor a constant load on some CPU, let's make the reverse transform for all
of these cases (either here in the DAG or in a later machine pass).
This should fix:
https://bugs.llvm.org/show_bug.cgi?id=33483
Differential Revision: https://reviews.llvm.org/D34336
llvm-svn: 306289
2017-06-26 22:19:26 +08:00
|
|
|
; GENERIC-NEXT: pcmpeqd %xmm2, %xmm2
|
|
|
|
; GENERIC-NEXT: paddd %xmm2, %xmm0
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; GENERIC-NEXT: paddd %xmm2, %xmm1
|
|
|
|
; GENERIC-NEXT: movq %xmm1, 16(%rsi)
|
|
|
|
; GENERIC-NEXT: movdqa %xmm0, (%rsi)
|
2016-10-09 05:17:44 +08:00
|
|
|
; GENERIC-NEXT: retq
|
2016-09-15 04:16:24 +08:00
|
|
|
;
|
2016-10-09 05:17:44 +08:00
|
|
|
; ATOM-LABEL: test8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; ATOM: ## %bb.0:
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; ATOM-NEXT: testb $1, %dil
|
2016-10-09 05:17:44 +08:00
|
|
|
; ATOM-NEXT: jne LBB7_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; ATOM-NEXT: ## %bb.2:
|
2018-04-12 02:23:01 +08:00
|
|
|
; ATOM-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
2016-10-09 05:17:44 +08:00
|
|
|
; ATOM-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
|
|
|
; ATOM-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; ATOM-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
2018-04-12 02:23:01 +08:00
|
|
|
; ATOM-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
|
|
|
; ATOM-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
|
|
|
|
; ATOM-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
|
2016-10-09 05:17:44 +08:00
|
|
|
; ATOM-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; ATOM-NEXT: jmp LBB7_3
|
|
|
|
; ATOM-NEXT: LBB7_1:
|
2018-04-12 02:23:01 +08:00
|
|
|
; ATOM-NEXT: movd %r9d, %xmm1
|
[X86][SSE] Change BUILD_VECTOR interleaving ordering to improve coalescing/combine opportunities
We currently generate BUILD_VECTOR as a tree of UNPCKL shuffles of the same type:
e.g. for v4f32:
Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
: unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
The issue is because we are not placing sequential vector elements together early enough, we fail to recognise many combinable patterns - consecutive scalar loads, extractions etc.
Instead, this patch unpacks progressively larger sequential vector elements together:
e.g. for v4f32:
Step 1: unpcklps 0, 2 ==> X: <?, ?, 1, 0>
: unpcklps 1, 3 ==> Y: <?, ?, 3, 2>
Step 2: unpcklpd X, Y ==> <3, 2, 1, 0>
This does mean that we are creating UNPCKL shuffle of different value types, but the relevant combines that benefit from this are quite capable of handling the additional BITCASTs that are now included in the shuffle tree.
Differential Revision: https://reviews.llvm.org/D33864
llvm-svn: 304688
2017-06-05 04:12:04 +08:00
|
|
|
; ATOM-NEXT: movd %r8d, %xmm2
|
|
|
|
; ATOM-NEXT: movd %ecx, %xmm3
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; ATOM-NEXT: movd %edx, %xmm0
|
2018-04-12 02:23:01 +08:00
|
|
|
; ATOM-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
|
|
|
; ATOM-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
|
|
|
|
; ATOM-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; ATOM-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
|
|
|
; ATOM-NEXT: LBB7_3:
|
|
|
|
; ATOM-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
|
[x86] transform vector inc/dec to use -1 constant (PR33483)
Convert vector increment or decrement to sub/add with an all-ones constant:
add X, <1, 1...> --> sub X, <-1, -1...>
sub X, <1, 1...> --> add X, <-1, -1...>
The all-ones vector constant can be materialized using a pcmpeq instruction that is
commonly recognized as an idiom (has no register dependency), so that's better than
loading a splat 1 constant.
AVX512 uses 'vpternlogd' for 512-bit vectors because there is apparently no better
way to produce 512 one-bits.
The general advantages of this lowering are:
1. pcmpeq has lower latency than a memop on every uarch I looked at in Agner's tables,
so in theory, this could be better for perf, but...
2. That seems unlikely to affect any OOO implementation, and I can't measure any real
perf difference from this transform on Haswell or Jaguar, but...
3. It doesn't look like it from the diffs, but this is an overall size win because we
eliminate 16 - 64 constant bytes in the case of a vector load. If we're broadcasting
a scalar load (which might itself be a bug), then we're replacing a scalar constant
load + broadcast with a single cheap op, so that should always be smaller/better too.
4. This makes the DAG/isel output more consistent - we use pcmpeq already for padd x, -1
and psub x, -1, so we should use that form for +1 too because we can. If there's some
reason to favor a constant load on some CPU, let's make the reverse transform for all
of these cases (either here in the DAG or in a later machine pass).
This should fix:
https://bugs.llvm.org/show_bug.cgi?id=33483
Differential Revision: https://reviews.llvm.org/D34336
llvm-svn: 306289
2017-06-26 22:19:26 +08:00
|
|
|
; ATOM-NEXT: pcmpeqd %xmm2, %xmm2
|
2018-04-12 02:23:01 +08:00
|
|
|
; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
|
[x86] transform vector inc/dec to use -1 constant (PR33483)
Convert vector increment or decrement to sub/add with an all-ones constant:
add X, <1, 1...> --> sub X, <-1, -1...>
sub X, <1, 1...> --> add X, <-1, -1...>
The all-ones vector constant can be materialized using a pcmpeq instruction that is
commonly recognized as an idiom (has no register dependency), so that's better than
loading a splat 1 constant.
AVX512 uses 'vpternlogd' for 512-bit vectors because there is apparently no better
way to produce 512 one-bits.
The general advantages of this lowering are:
1. pcmpeq has lower latency than a memop on every uarch I looked at in Agner's tables,
so in theory, this could be better for perf, but...
2. That seems unlikely to affect any OOO implementation, and I can't measure any real
perf difference from this transform on Haswell or Jaguar, but...
3. It doesn't look like it from the diffs, but this is an overall size win because we
eliminate 16 - 64 constant bytes in the case of a vector load. If we're broadcasting
a scalar load (which might itself be a bug), then we're replacing a scalar constant
load + broadcast with a single cheap op, so that should always be smaller/better too.
4. This makes the DAG/isel output more consistent - we use pcmpeq already for padd x, -1
and psub x, -1, so we should use that form for +1 too because we can. If there's some
reason to favor a constant load on some CPU, let's make the reverse transform for all
of these cases (either here in the DAG or in a later machine pass).
This should fix:
https://bugs.llvm.org/show_bug.cgi?id=33483
Differential Revision: https://reviews.llvm.org/D34336
llvm-svn: 306289
2017-06-26 22:19:26 +08:00
|
|
|
; ATOM-NEXT: paddd %xmm2, %xmm0
|
|
|
|
; ATOM-NEXT: paddd %xmm2, %xmm1
|
2017-08-30 22:57:12 +08:00
|
|
|
; ATOM-NEXT: movq %xmm1, 16(%rsi)
|
2018-04-12 02:23:01 +08:00
|
|
|
; ATOM-NEXT: movdqa %xmm0, (%rsi)
|
2016-10-09 05:17:44 +08:00
|
|
|
; ATOM-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test8:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: pushl %ebp
|
|
|
|
; ATHLON-NEXT: pushl %ebx
|
|
|
|
; ATHLON-NEXT: pushl %edi
|
|
|
|
; ATHLON-NEXT: pushl %esi
|
|
|
|
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %ecx
|
|
|
|
; ATHLON-NEXT: cmovnel %eax, %ecx
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %edx
|
|
|
|
; ATHLON-NEXT: cmovnel %eax, %edx
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %esi
|
|
|
|
; ATHLON-NEXT: cmovnel %eax, %esi
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %edi
|
|
|
|
; ATHLON-NEXT: cmovnel %eax, %edi
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %ebx
|
|
|
|
; ATHLON-NEXT: cmovnel %eax, %ebx
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %ebp
|
|
|
|
; ATHLON-NEXT: cmovnel %eax, %ebp
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: movl (%ecx), %ecx
|
|
|
|
; ATHLON-NEXT: movl (%edx), %edx
|
|
|
|
; ATHLON-NEXT: movl (%esi), %esi
|
|
|
|
; ATHLON-NEXT: movl (%edi), %edi
|
|
|
|
; ATHLON-NEXT: movl (%ebx), %ebx
|
|
|
|
; ATHLON-NEXT: movl (%ebp), %ebp
|
|
|
|
; ATHLON-NEXT: decl %ecx
|
|
|
|
; ATHLON-NEXT: movl %ecx, 20(%eax)
|
|
|
|
; ATHLON-NEXT: decl %edx
|
|
|
|
; ATHLON-NEXT: movl %edx, 16(%eax)
|
|
|
|
; ATHLON-NEXT: decl %esi
|
|
|
|
; ATHLON-NEXT: movl %esi, 12(%eax)
|
|
|
|
; ATHLON-NEXT: decl %edi
|
|
|
|
; ATHLON-NEXT: movl %edi, 8(%eax)
|
|
|
|
; ATHLON-NEXT: decl %ebx
|
|
|
|
; ATHLON-NEXT: movl %ebx, 4(%eax)
|
|
|
|
; ATHLON-NEXT: decl %ebp
|
|
|
|
; ATHLON-NEXT: movl %ebp, (%eax)
|
|
|
|
; ATHLON-NEXT: popl %esi
|
|
|
|
; ATHLON-NEXT: popl %edi
|
|
|
|
; ATHLON-NEXT: popl %ebx
|
|
|
|
; ATHLON-NEXT: popl %ebp
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: pushl %ebp
|
|
|
|
; MCU-NEXT: pushl %ebx
|
|
|
|
; MCU-NEXT: pushl %edi
|
|
|
|
; MCU-NEXT: pushl %esi
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: testb $1, %al
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: jne .LBB7_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.2:
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: leal {{[0-9]+}}(%esp), %eax
|
|
|
|
; MCU-NEXT: movl (%eax), %eax
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: je .LBB7_5
|
|
|
|
; MCU-NEXT: .LBB7_4:
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ecx
|
|
|
|
; MCU-NEXT: movl (%ecx), %ecx
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: je .LBB7_8
|
|
|
|
; MCU-NEXT: .LBB7_7:
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: leal {{[0-9]+}}(%esp), %esi
|
|
|
|
; MCU-NEXT: movl (%esi), %esi
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: je .LBB7_11
|
|
|
|
; MCU-NEXT: .LBB7_10:
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: leal {{[0-9]+}}(%esp), %edi
|
|
|
|
; MCU-NEXT: movl (%edi), %edi
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: je .LBB7_14
|
|
|
|
; MCU-NEXT: .LBB7_13:
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ebx
|
|
|
|
; MCU-NEXT: movl (%ebx), %ebx
|
|
|
|
; MCU-NEXT: je .LBB7_17
|
|
|
|
; MCU-NEXT: .LBB7_16:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ebp
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: jmp .LBB7_18
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: .LBB7_1:
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: leal {{[0-9]+}}(%esp), %eax
|
|
|
|
; MCU-NEXT: movl (%eax), %eax
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: jne .LBB7_4
|
|
|
|
; MCU-NEXT: .LBB7_5:
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ecx
|
|
|
|
; MCU-NEXT: movl (%ecx), %ecx
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: jne .LBB7_7
|
|
|
|
; MCU-NEXT: .LBB7_8:
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: leal {{[0-9]+}}(%esp), %esi
|
|
|
|
; MCU-NEXT: movl (%esi), %esi
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: jne .LBB7_10
|
|
|
|
; MCU-NEXT: .LBB7_11:
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: leal {{[0-9]+}}(%esp), %edi
|
|
|
|
; MCU-NEXT: movl (%edi), %edi
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: jne .LBB7_13
|
|
|
|
; MCU-NEXT: .LBB7_14:
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ebx
|
|
|
|
; MCU-NEXT: movl (%ebx), %ebx
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: jne .LBB7_16
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: .LBB7_17:
|
|
|
|
; MCU-NEXT: leal {{[0-9]+}}(%esp), %ebp
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: .LBB7_18:
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: movl (%ebp), %ebp
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: decl %ebp
|
|
|
|
; MCU-NEXT: decl %ebx
|
|
|
|
; MCU-NEXT: decl %edi
|
|
|
|
; MCU-NEXT: decl %esi
|
|
|
|
; MCU-NEXT: decl %ecx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; MCU-NEXT: decl %eax
|
|
|
|
; MCU-NEXT: movl %eax, 20(%edx)
|
|
|
|
; MCU-NEXT: movl %ecx, 16(%edx)
|
|
|
|
; MCU-NEXT: movl %esi, 12(%edx)
|
|
|
|
; MCU-NEXT: movl %edi, 8(%edx)
|
|
|
|
; MCU-NEXT: movl %ebx, 4(%edx)
|
|
|
|
; MCU-NEXT: movl %ebp, (%edx)
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: popl %esi
|
|
|
|
; MCU-NEXT: popl %edi
|
|
|
|
; MCU-NEXT: popl %ebx
|
|
|
|
; MCU-NEXT: popl %ebp
|
|
|
|
; MCU-NEXT: retl
|
2016-09-15 04:16:24 +08:00
|
|
|
%x = select i1 %c, <6 x i32> %src1, <6 x i32> %src2
|
|
|
|
%val = sub <6 x i32> %x, < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
|
|
|
|
store <6 x i32> %val, <6 x i32>* %dst.addr
|
|
|
|
ret void
|
2009-12-12 03:50:50 +08:00
|
|
|
}
|
Improve an integer select optimization in two ways:
1. generalize
(select (x == 0), -1, 0) -> (sign_bit (x - 1))
to:
(select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2. Handle the identical pattern that happens with !=:
(select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
cmov is often high latency and can't fold immediates or
memory operands. For example for (x == 0) ? -1 : 1, before
we got:
< testb %sil, %sil
< movl $-1, %ecx
< movl $1, %eax
< cmovel %ecx, %eax
now we get:
> cmpb $1, %sil
> sbbl %eax, %eax
> orl $1, %eax
llvm-svn: 120929
2010-12-05 09:23:24 +08:00
|
|
|
|
|
|
|
|
|
|
|
;; Test integer select between values and constants.
|
|
|
|
|
|
|
|
define i64 @test9(i64 %x, i64 %y) nounwind readnone ssp noredzone {
|
2018-12-13 03:20:21 +08:00
|
|
|
; CHECK-LABEL: test9:
|
|
|
|
; CHECK: ## %bb.0:
|
|
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
|
|
; CHECK-NEXT: cmpq $1, %rdi
|
|
|
|
; CHECK-NEXT: sbbq %rax, %rax
|
|
|
|
; CHECK-NEXT: orq %rsi, %rax
|
|
|
|
; CHECK-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test9:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: movl $-1, %eax
|
|
|
|
; ATHLON-NEXT: movl $-1, %edx
|
|
|
|
; ATHLON-NEXT: je LBB8_2
|
|
|
|
; ATHLON-NEXT: ## %bb.1:
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
|
|
; ATHLON-NEXT: LBB8_2:
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test9:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: orl %edx, %eax
|
|
|
|
; MCU-NEXT: jne .LBB8_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.2:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movl $-1, %eax
|
|
|
|
; MCU-NEXT: movl $-1, %edx
|
|
|
|
; MCU-NEXT: retl
|
|
|
|
; MCU-NEXT: .LBB8_1:
|
|
|
|
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
|
|
; MCU-NEXT: retl
|
Improve an integer select optimization in two ways:
1. generalize
(select (x == 0), -1, 0) -> (sign_bit (x - 1))
to:
(select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2. Handle the identical pattern that happens with !=:
(select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
cmov is often high latency and can't fold immediates or
memory operands. For example for (x == 0) ? -1 : 1, before
we got:
< testb %sil, %sil
< movl $-1, %ecx
< movl $1, %eax
< cmovel %ecx, %eax
now we get:
> cmpb $1, %sil
> sbbl %eax, %eax
> orl $1, %eax
llvm-svn: 120929
2010-12-05 09:23:24 +08:00
|
|
|
%cmp = icmp ne i64 %x, 0
|
|
|
|
%cond = select i1 %cmp, i64 %y, i64 -1
|
|
|
|
ret i64 %cond
|
|
|
|
}
|
|
|
|
|
|
|
|
;; Same as test9
|
|
|
|
define i64 @test9a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
|
2018-12-13 03:20:21 +08:00
|
|
|
; CHECK-LABEL: test9a:
|
|
|
|
; CHECK: ## %bb.0:
|
|
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
|
|
; CHECK-NEXT: cmpq $1, %rdi
|
|
|
|
; CHECK-NEXT: sbbq %rax, %rax
|
|
|
|
; CHECK-NEXT: orq %rsi, %rax
|
|
|
|
; CHECK-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test9a:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: movl $-1, %eax
|
|
|
|
; ATHLON-NEXT: movl $-1, %edx
|
|
|
|
; ATHLON-NEXT: je LBB9_2
|
|
|
|
; ATHLON-NEXT: ## %bb.1:
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
|
|
; ATHLON-NEXT: LBB9_2:
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test9a:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: orl %edx, %eax
|
|
|
|
; MCU-NEXT: movl $-1, %eax
|
|
|
|
; MCU-NEXT: movl $-1, %edx
|
|
|
|
; MCU-NEXT: je .LBB9_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.1:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
|
|
; MCU-NEXT: .LBB9_2:
|
|
|
|
; MCU-NEXT: retl
|
Improve an integer select optimization in two ways:
1. generalize
(select (x == 0), -1, 0) -> (sign_bit (x - 1))
to:
(select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2. Handle the identical pattern that happens with !=:
(select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
cmov is often high latency and can't fold immediates or
memory operands. For example for (x == 0) ? -1 : 1, before
we got:
< testb %sil, %sil
< movl $-1, %ecx
< movl $1, %eax
< cmovel %ecx, %eax
now we get:
> cmpb $1, %sil
> sbbl %eax, %eax
> orl $1, %eax
llvm-svn: 120929
2010-12-05 09:23:24 +08:00
|
|
|
%cmp = icmp eq i64 %x, 0
|
|
|
|
%cond = select i1 %cmp, i64 -1, i64 %y
|
|
|
|
ret i64 %cond
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @test9b(i64 %x, i64 %y) nounwind readnone ssp noredzone {
|
2016-09-15 04:16:24 +08:00
|
|
|
; GENERIC-LABEL: test9b:
|
2017-12-05 01:18:51 +08:00
|
|
|
; GENERIC: ## %bb.0:
|
2016-09-15 04:16:24 +08:00
|
|
|
; GENERIC-NEXT: cmpq $1, %rdi
|
|
|
|
; GENERIC-NEXT: sbbq %rax, %rax
|
|
|
|
; GENERIC-NEXT: orq %rsi, %rax
|
|
|
|
; GENERIC-NEXT: retq
|
|
|
|
;
|
|
|
|
; ATOM-LABEL: test9b:
|
2017-12-05 01:18:51 +08:00
|
|
|
; ATOM: ## %bb.0:
|
2016-09-15 04:16:24 +08:00
|
|
|
; ATOM-NEXT: cmpq $1, %rdi
|
|
|
|
; ATOM-NEXT: sbbq %rax, %rax
|
|
|
|
; ATOM-NEXT: orq %rsi, %rax
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test9b:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: xorl %edx, %edx
|
|
|
|
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: sete %dl
|
|
|
|
; ATHLON-NEXT: negl %edx
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: orl %edx, %eax
|
|
|
|
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %edx
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test9b:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
2017-08-11 23:44:14 +08:00
|
|
|
; MCU-NEXT: movl %edx, %ecx
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: xorl %edx, %edx
|
2017-08-11 23:44:14 +08:00
|
|
|
; MCU-NEXT: orl %ecx, %eax
|
|
|
|
; MCU-NEXT: sete %dl
|
|
|
|
; MCU-NEXT: negl %edx
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; MCU-NEXT: orl %edx, %eax
|
|
|
|
; MCU-NEXT: orl {{[0-9]+}}(%esp), %edx
|
|
|
|
; MCU-NEXT: retl
|
Improve an integer select optimization in two ways:
1. generalize
(select (x == 0), -1, 0) -> (sign_bit (x - 1))
to:
(select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2. Handle the identical pattern that happens with !=:
(select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
cmov is often high latency and can't fold immediates or
memory operands. For example for (x == 0) ? -1 : 1, before
we got:
< testb %sil, %sil
< movl $-1, %ecx
< movl $1, %eax
< cmovel %ecx, %eax
now we get:
> cmpb $1, %sil
> sbbl %eax, %eax
> orl $1, %eax
llvm-svn: 120929
2010-12-05 09:23:24 +08:00
|
|
|
%cmp = icmp eq i64 %x, 0
|
|
|
|
%A = sext i1 %cmp to i64
|
|
|
|
%cond = or i64 %y, %A
|
|
|
|
ret i64 %cond
|
|
|
|
}
|
|
|
|
|
|
|
|
;; Select between -1 and 1.
|
|
|
|
define i64 @test10(i64 %x, i64 %y) nounwind readnone ssp noredzone {
|
2017-08-11 23:44:14 +08:00
|
|
|
; CHECK-LABEL: test10:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: ## %bb.0:
|
2017-08-11 23:44:14 +08:00
|
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
|
|
; CHECK-NEXT: testq %rdi, %rdi
|
|
|
|
; CHECK-NEXT: setne %al
|
|
|
|
; CHECK-NEXT: leaq -1(%rax,%rax), %rax
|
|
|
|
; CHECK-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test10:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: xorl %edx, %edx
|
|
|
|
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: movl $-1, %ecx
|
|
|
|
; ATHLON-NEXT: movl $1, %eax
|
|
|
|
; ATHLON-NEXT: cmovel %ecx, %eax
|
|
|
|
; ATHLON-NEXT: cmovel %ecx, %edx
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test10:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: orl %edx, %eax
|
|
|
|
; MCU-NEXT: movl $-1, %eax
|
|
|
|
; MCU-NEXT: movl $-1, %edx
|
|
|
|
; MCU-NEXT: je .LBB11_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.1:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: xorl %edx, %edx
|
|
|
|
; MCU-NEXT: movl $1, %eax
|
|
|
|
; MCU-NEXT: .LBB11_2:
|
|
|
|
; MCU-NEXT: retl
|
Improve an integer select optimization in two ways:
1. generalize
(select (x == 0), -1, 0) -> (sign_bit (x - 1))
to:
(select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2. Handle the identical pattern that happens with !=:
(select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
cmov is often high latency and can't fold immediates or
memory operands. For example for (x == 0) ? -1 : 1, before
we got:
< testb %sil, %sil
< movl $-1, %ecx
< movl $1, %eax
< cmovel %ecx, %eax
now we get:
> cmpb $1, %sil
> sbbl %eax, %eax
> orl $1, %eax
llvm-svn: 120929
2010-12-05 09:23:24 +08:00
|
|
|
%cmp = icmp eq i64 %x, 0
|
|
|
|
%cond = select i1 %cmp, i64 -1, i64 1
|
|
|
|
ret i64 %cond
|
|
|
|
}
|
|
|
|
|
generalize the previous check to handle -1 on either side of the
select, inserting a not to compensate. Add a missing isZero check
that I lost somehow.
This improves codegen of:
void *func(long count) {
return new int[count];
}
from:
__Z4funcl: ## @_Z4funcl
movl $4, %ecx ## encoding: [0xb9,0x04,0x00,0x00,0x00]
movq %rdi, %rax ## encoding: [0x48,0x89,0xf8]
mulq %rcx ## encoding: [0x48,0xf7,0xe1]
testq %rdx, %rdx ## encoding: [0x48,0x85,0xd2]
movq $-1, %rdi ## encoding: [0x48,0xc7,0xc7,0xff,0xff,0xff,0xff]
cmoveq %rax, %rdi ## encoding: [0x48,0x0f,0x44,0xf8]
jmp __Znam ## TAILCALL
## encoding: [0xeb,A]
to:
__Z4funcl: ## @_Z4funcl
movl $4, %ecx ## encoding: [0xb9,0x04,0x00,0x00,0x00]
movq %rdi, %rax ## encoding: [0x48,0x89,0xf8]
mulq %rcx ## encoding: [0x48,0xf7,0xe1]
cmpq $1, %rdx ## encoding: [0x48,0x83,0xfa,0x01]
sbbq %rdi, %rdi ## encoding: [0x48,0x19,0xff]
notq %rdi ## encoding: [0x48,0xf7,0xd7]
orq %rax, %rdi ## encoding: [0x48,0x09,0xc7]
jmp __Znam ## TAILCALL
## encoding: [0xeb,A]
llvm-svn: 120932
2010-12-05 10:00:51 +08:00
|
|
|
define i64 @test11(i64 %x, i64 %y) nounwind readnone ssp noredzone {
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-LABEL: test11:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: ## %bb.0:
|
2018-12-13 03:20:21 +08:00
|
|
|
; CHECK-NEXT: xorl %eax, %eax
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-NEXT: cmpq $1, %rdi
|
|
|
|
; CHECK-NEXT: sbbq %rax, %rax
|
|
|
|
; CHECK-NEXT: notq %rax
|
|
|
|
; CHECK-NEXT: orq %rsi, %rax
|
|
|
|
; CHECK-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test11:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: movl $-1, %eax
|
|
|
|
; ATHLON-NEXT: movl $-1, %edx
|
|
|
|
; ATHLON-NEXT: jne LBB12_2
|
|
|
|
; ATHLON-NEXT: ## %bb.1:
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
|
|
; ATHLON-NEXT: LBB12_2:
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test11:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: orl %edx, %eax
|
|
|
|
; MCU-NEXT: je .LBB12_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.2:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movl $-1, %eax
|
|
|
|
; MCU-NEXT: movl $-1, %edx
|
|
|
|
; MCU-NEXT: retl
|
|
|
|
; MCU-NEXT: .LBB12_1:
|
|
|
|
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
|
|
; MCU-NEXT: retl
|
generalize the previous check to handle -1 on either side of the
select, inserting a not to compensate. Add a missing isZero check
that I lost somehow.
This improves codegen of:
void *func(long count) {
return new int[count];
}
from:
__Z4funcl: ## @_Z4funcl
movl $4, %ecx ## encoding: [0xb9,0x04,0x00,0x00,0x00]
movq %rdi, %rax ## encoding: [0x48,0x89,0xf8]
mulq %rcx ## encoding: [0x48,0xf7,0xe1]
testq %rdx, %rdx ## encoding: [0x48,0x85,0xd2]
movq $-1, %rdi ## encoding: [0x48,0xc7,0xc7,0xff,0xff,0xff,0xff]
cmoveq %rax, %rdi ## encoding: [0x48,0x0f,0x44,0xf8]
jmp __Znam ## TAILCALL
## encoding: [0xeb,A]
to:
__Z4funcl: ## @_Z4funcl
movl $4, %ecx ## encoding: [0xb9,0x04,0x00,0x00,0x00]
movq %rdi, %rax ## encoding: [0x48,0x89,0xf8]
mulq %rcx ## encoding: [0x48,0xf7,0xe1]
cmpq $1, %rdx ## encoding: [0x48,0x83,0xfa,0x01]
sbbq %rdi, %rdi ## encoding: [0x48,0x19,0xff]
notq %rdi ## encoding: [0x48,0xf7,0xd7]
orq %rax, %rdi ## encoding: [0x48,0x09,0xc7]
jmp __Znam ## TAILCALL
## encoding: [0xeb,A]
llvm-svn: 120932
2010-12-05 10:00:51 +08:00
|
|
|
%cmp = icmp eq i64 %x, 0
|
|
|
|
%cond = select i1 %cmp, i64 %y, i64 -1
|
|
|
|
ret i64 %cond
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @test11a(i64 %x, i64 %y) nounwind readnone ssp noredzone {
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-LABEL: test11a:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: ## %bb.0:
|
2018-12-13 03:20:21 +08:00
|
|
|
; CHECK-NEXT: xorl %eax, %eax
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-NEXT: cmpq $1, %rdi
|
|
|
|
; CHECK-NEXT: sbbq %rax, %rax
|
|
|
|
; CHECK-NEXT: notq %rax
|
|
|
|
; CHECK-NEXT: orq %rsi, %rax
|
|
|
|
; CHECK-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test11a:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: movl $-1, %eax
|
|
|
|
; ATHLON-NEXT: movl $-1, %edx
|
|
|
|
; ATHLON-NEXT: jne LBB13_2
|
|
|
|
; ATHLON-NEXT: ## %bb.1:
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
|
|
; ATHLON-NEXT: LBB13_2:
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test11a:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: orl %edx, %eax
|
|
|
|
; MCU-NEXT: movl $-1, %eax
|
|
|
|
; MCU-NEXT: movl $-1, %edx
|
|
|
|
; MCU-NEXT: jne .LBB13_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.1:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; MCU-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
|
|
; MCU-NEXT: .LBB13_2:
|
|
|
|
; MCU-NEXT: retl
|
generalize the previous check to handle -1 on either side of the
select, inserting a not to compensate. Add a missing isZero check
that I lost somehow.
This improves codegen of:
void *func(long count) {
return new int[count];
}
from:
__Z4funcl: ## @_Z4funcl
movl $4, %ecx ## encoding: [0xb9,0x04,0x00,0x00,0x00]
movq %rdi, %rax ## encoding: [0x48,0x89,0xf8]
mulq %rcx ## encoding: [0x48,0xf7,0xe1]
testq %rdx, %rdx ## encoding: [0x48,0x85,0xd2]
movq $-1, %rdi ## encoding: [0x48,0xc7,0xc7,0xff,0xff,0xff,0xff]
cmoveq %rax, %rdi ## encoding: [0x48,0x0f,0x44,0xf8]
jmp __Znam ## TAILCALL
## encoding: [0xeb,A]
to:
__Z4funcl: ## @_Z4funcl
movl $4, %ecx ## encoding: [0xb9,0x04,0x00,0x00,0x00]
movq %rdi, %rax ## encoding: [0x48,0x89,0xf8]
mulq %rcx ## encoding: [0x48,0xf7,0xe1]
cmpq $1, %rdx ## encoding: [0x48,0x83,0xfa,0x01]
sbbq %rdi, %rdi ## encoding: [0x48,0x19,0xff]
notq %rdi ## encoding: [0x48,0xf7,0xd7]
orq %rax, %rdi ## encoding: [0x48,0x09,0xc7]
jmp __Znam ## TAILCALL
## encoding: [0xeb,A]
llvm-svn: 120932
2010-12-05 10:00:51 +08:00
|
|
|
%cmp = icmp ne i64 %x, 0
|
|
|
|
%cond = select i1 %cmp, i64 -1, i64 %y
|
|
|
|
ret i64 %cond
|
|
|
|
}
|
|
|
|
|
X86: Lower a select directly to a setcc_carry if possible.
int test(unsigned long a, unsigned long b) { return -(a < b); }
compiles to
_test: ## @test
cmpq %rsi, %rdi ## encoding: [0x48,0x39,0xf7]
sbbl %eax, %eax ## encoding: [0x19,0xc0]
ret ## encoding: [0xc3]
instead of
_test: ## @test
xorl %ecx, %ecx ## encoding: [0x31,0xc9]
cmpq %rsi, %rdi ## encoding: [0x48,0x39,0xf7]
movl $-1, %eax ## encoding: [0xb8,0xff,0xff,0xff,0xff]
cmovael %ecx, %eax ## encoding: [0x0f,0x43,0xc1]
ret ## encoding: [0xc3]
llvm-svn: 122451
2010-12-23 07:09:28 +08:00
|
|
|
define i32 @test13(i32 %a, i32 %b) nounwind {
|
2016-09-15 04:16:24 +08:00
|
|
|
; GENERIC-LABEL: test13:
|
2017-12-05 01:18:51 +08:00
|
|
|
; GENERIC: ## %bb.0:
|
2016-09-15 04:16:24 +08:00
|
|
|
; GENERIC-NEXT: cmpl %esi, %edi
|
|
|
|
; GENERIC-NEXT: sbbl %eax, %eax
|
|
|
|
; GENERIC-NEXT: retq
|
|
|
|
;
|
|
|
|
; ATOM-LABEL: test13:
|
2017-12-05 01:18:51 +08:00
|
|
|
; ATOM: ## %bb.0:
|
2016-09-15 04:16:24 +08:00
|
|
|
; ATOM-NEXT: cmpl %esi, %edi
|
|
|
|
; ATOM-NEXT: sbbl %eax, %eax
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test13:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: cmpl {{[0-9]+}}(%esp), %eax
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: sbbl %eax, %eax
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test13:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: cmpl %edx, %eax
|
|
|
|
; MCU-NEXT: sbbl %eax, %eax
|
|
|
|
; MCU-NEXT: retl
|
X86: Lower a select directly to a setcc_carry if possible.
int test(unsigned long a, unsigned long b) { return -(a < b); }
compiles to
_test: ## @test
cmpq %rsi, %rdi ## encoding: [0x48,0x39,0xf7]
sbbl %eax, %eax ## encoding: [0x19,0xc0]
ret ## encoding: [0xc3]
instead of
_test: ## @test
xorl %ecx, %ecx ## encoding: [0x31,0xc9]
cmpq %rsi, %rdi ## encoding: [0x48,0x39,0xf7]
movl $-1, %eax ## encoding: [0xb8,0xff,0xff,0xff,0xff]
cmovael %ecx, %eax ## encoding: [0x0f,0x43,0xc1]
ret ## encoding: [0xc3]
llvm-svn: 122451
2010-12-23 07:09:28 +08:00
|
|
|
%c = icmp ult i32 %a, %b
|
|
|
|
%d = sext i1 %c to i32
|
|
|
|
ret i32 %d
|
|
|
|
}
|
Teach X86ISelLowering that the second result of X86ISD::UMUL is a flags
result. This allows us to compile:
void *test12(long count) {
return new int[count];
}
into:
test12:
movl $4, %ecx
movq %rdi, %rax
mulq %rcx
movq $-1, %rdi
cmovnoq %rax, %rdi
jmp __Znam ## TAILCALL
instead of:
test12:
movl $4, %ecx
movq %rdi, %rax
mulq %rcx
seto %cl
testb %cl, %cl
movq $-1, %rdi
cmoveq %rax, %rdi
jmp __Znam
Of course it would be even better if the regalloc inverted the cmov to 'cmovoq',
which would eliminate the need for the 'movq %rdi, %rax'.
llvm-svn: 120936
2010-12-05 15:49:54 +08:00
|
|
|
|
X86: Lower a select directly to a setcc_carry if possible.
int test(unsigned long a, unsigned long b) { return -(a < b); }
compiles to
_test: ## @test
cmpq %rsi, %rdi ## encoding: [0x48,0x39,0xf7]
sbbl %eax, %eax ## encoding: [0x19,0xc0]
ret ## encoding: [0xc3]
instead of
_test: ## @test
xorl %ecx, %ecx ## encoding: [0x31,0xc9]
cmpq %rsi, %rdi ## encoding: [0x48,0x39,0xf7]
movl $-1, %eax ## encoding: [0xb8,0xff,0xff,0xff,0xff]
cmovael %ecx, %eax ## encoding: [0x0f,0x43,0xc1]
ret ## encoding: [0xc3]
llvm-svn: 122451
2010-12-23 07:09:28 +08:00
|
|
|
define i32 @test14(i32 %a, i32 %b) nounwind {
|
2017-08-11 23:44:14 +08:00
|
|
|
; CHECK-LABEL: test14:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: ## %bb.0:
|
2017-08-11 23:44:14 +08:00
|
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
|
|
; CHECK-NEXT: cmpl %esi, %edi
|
|
|
|
; CHECK-NEXT: setae %al
|
|
|
|
; CHECK-NEXT: negl %eax
|
|
|
|
; CHECK-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test14:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: xorl %eax, %eax
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: cmpl {{[0-9]+}}(%esp), %ecx
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: setae %al
|
|
|
|
; ATHLON-NEXT: negl %eax
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test14:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
2017-08-11 23:44:14 +08:00
|
|
|
; MCU-NEXT: xorl %ecx, %ecx
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: cmpl %edx, %eax
|
2017-08-11 23:44:14 +08:00
|
|
|
; MCU-NEXT: setae %cl
|
|
|
|
; MCU-NEXT: negl %ecx
|
|
|
|
; MCU-NEXT: movl %ecx, %eax
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: retl
|
X86: Lower a select directly to a setcc_carry if possible.
int test(unsigned long a, unsigned long b) { return -(a < b); }
compiles to
_test: ## @test
cmpq %rsi, %rdi ## encoding: [0x48,0x39,0xf7]
sbbl %eax, %eax ## encoding: [0x19,0xc0]
ret ## encoding: [0xc3]
instead of
_test: ## @test
xorl %ecx, %ecx ## encoding: [0x31,0xc9]
cmpq %rsi, %rdi ## encoding: [0x48,0x39,0xf7]
movl $-1, %eax ## encoding: [0xb8,0xff,0xff,0xff,0xff]
cmovael %ecx, %eax ## encoding: [0x0f,0x43,0xc1]
ret ## encoding: [0xc3]
llvm-svn: 122451
2010-12-23 07:09:28 +08:00
|
|
|
%c = icmp uge i32 %a, %b
|
|
|
|
%d = sext i1 %c to i32
|
|
|
|
ret i32 %d
|
|
|
|
}
|
Improve an integer select optimization in two ways:
1. generalize
(select (x == 0), -1, 0) -> (sign_bit (x - 1))
to:
(select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
2. Handle the identical pattern that happens with !=:
(select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
cmov is often high latency and can't fold immediates or
memory operands. For example for (x == 0) ? -1 : 1, before
we got:
< testb %sil, %sil
< movl $-1, %ecx
< movl $1, %eax
< cmovel %ecx, %eax
now we get:
> cmpb $1, %sil
> sbbl %eax, %eax
> orl $1, %eax
llvm-svn: 120929
2010-12-05 09:23:24 +08:00
|
|
|
|
2012-05-08 02:06:23 +08:00
|
|
|
; rdar://10961709
|
|
|
|
define i32 @test15(i32 %x) nounwind {
|
2016-09-15 04:16:24 +08:00
|
|
|
; GENERIC-LABEL: test15:
|
2017-12-05 01:18:51 +08:00
|
|
|
; GENERIC: ## %bb.0: ## %entry
|
2016-09-15 04:16:24 +08:00
|
|
|
; GENERIC-NEXT: negl %edi
|
|
|
|
; GENERIC-NEXT: sbbl %eax, %eax
|
|
|
|
; GENERIC-NEXT: retq
|
|
|
|
;
|
|
|
|
; ATOM-LABEL: test15:
|
2017-12-05 01:18:51 +08:00
|
|
|
; ATOM: ## %bb.0: ## %entry
|
2016-09-15 04:16:24 +08:00
|
|
|
; ATOM-NEXT: negl %edi
|
|
|
|
; ATOM-NEXT: sbbl %eax, %eax
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test15:
|
|
|
|
; ATHLON: ## %bb.0: ## %entry
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: xorl %eax, %eax
|
|
|
|
; ATHLON-NEXT: cmpl {{[0-9]+}}(%esp), %eax
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: sbbl %eax, %eax
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test15:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0: # %entry
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: negl %eax
|
|
|
|
; MCU-NEXT: sbbl %eax, %eax
|
|
|
|
; MCU-NEXT: retl
|
2012-05-08 02:06:23 +08:00
|
|
|
entry:
|
|
|
|
%cmp = icmp ne i32 %x, 0
|
|
|
|
%sub = sext i1 %cmp to i32
|
|
|
|
ret i32 %sub
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @test16(i64 %x) nounwind uwtable readnone ssp {
|
2016-09-15 04:16:24 +08:00
|
|
|
; GENERIC-LABEL: test16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; GENERIC: ## %bb.0: ## %entry
|
2016-09-15 04:16:24 +08:00
|
|
|
; GENERIC-NEXT: negq %rdi
|
|
|
|
; GENERIC-NEXT: sbbq %rax, %rax
|
|
|
|
; GENERIC-NEXT: retq
|
|
|
|
;
|
|
|
|
; ATOM-LABEL: test16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; ATOM: ## %bb.0: ## %entry
|
2016-09-15 04:16:24 +08:00
|
|
|
; ATOM-NEXT: negq %rdi
|
|
|
|
; ATOM-NEXT: sbbq %rax, %rax
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test16:
|
|
|
|
; ATHLON: ## %bb.0: ## %entry
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; ATHLON-NEXT: xorl %eax, %eax
|
|
|
|
; ATHLON-NEXT: orl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; ATHLON-NEXT: setne %al
|
|
|
|
; ATHLON-NEXT: negl %eax
|
|
|
|
; ATHLON-NEXT: movl %eax, %edx
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0: # %entry
|
2017-08-11 23:44:14 +08:00
|
|
|
; MCU-NEXT: movl %eax, %ecx
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: xorl %eax, %eax
|
2017-08-11 23:44:14 +08:00
|
|
|
; MCU-NEXT: orl %edx, %ecx
|
|
|
|
; MCU-NEXT: setne %al
|
|
|
|
; MCU-NEXT: negl %eax
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movl %eax, %edx
|
|
|
|
; MCU-NEXT: retl
|
2012-05-08 02:06:23 +08:00
|
|
|
entry:
|
|
|
|
%cmp = icmp ne i64 %x, 0
|
|
|
|
%conv1 = sext i1 %cmp to i64
|
|
|
|
ret i64 %conv1
|
|
|
|
}
|
|
|
|
|
|
|
|
define i16 @test17(i16 %x) nounwind {
|
2016-09-15 04:16:24 +08:00
|
|
|
; GENERIC-LABEL: test17:
|
2017-12-05 01:18:51 +08:00
|
|
|
; GENERIC: ## %bb.0: ## %entry
|
2016-09-15 04:16:24 +08:00
|
|
|
; GENERIC-NEXT: negw %di
|
2017-08-11 23:44:14 +08:00
|
|
|
; GENERIC-NEXT: sbbl %eax, %eax
|
2018-02-01 06:04:26 +08:00
|
|
|
; GENERIC-NEXT: ## kill: def $ax killed $ax killed $eax
|
2016-09-15 04:16:24 +08:00
|
|
|
; GENERIC-NEXT: retq
|
|
|
|
;
|
|
|
|
; ATOM-LABEL: test17:
|
2017-12-05 01:18:51 +08:00
|
|
|
; ATOM: ## %bb.0: ## %entry
|
2016-09-15 04:16:24 +08:00
|
|
|
; ATOM-NEXT: negw %di
|
2017-08-11 23:44:14 +08:00
|
|
|
; ATOM-NEXT: sbbl %eax, %eax
|
2018-02-01 06:04:26 +08:00
|
|
|
; ATOM-NEXT: ## kill: def $ax killed $ax killed $eax
|
2016-09-15 04:16:24 +08:00
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test17:
|
|
|
|
; ATHLON: ## %bb.0: ## %entry
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: xorl %eax, %eax
|
|
|
|
; ATHLON-NEXT: cmpw {{[0-9]+}}(%esp), %ax
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: sbbl %eax, %eax
|
|
|
|
; ATHLON-NEXT: ## kill: def $ax killed $ax killed $eax
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test17:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0: # %entry
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: negw %ax
|
2017-08-11 23:44:14 +08:00
|
|
|
; MCU-NEXT: sbbl %eax, %eax
|
2018-02-01 06:04:26 +08:00
|
|
|
; MCU-NEXT: # kill: def $ax killed $ax killed $eax
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: retl
|
2012-05-08 02:06:23 +08:00
|
|
|
entry:
|
|
|
|
%cmp = icmp ne i16 %x, 0
|
|
|
|
%sub = sext i1 %cmp to i16
|
|
|
|
ret i16 %sub
|
|
|
|
}
|
2012-10-13 18:39:49 +08:00
|
|
|
|
|
|
|
define i8 @test18(i32 %x, i8 zeroext %a, i8 zeroext %b) nounwind {
|
2016-09-15 04:16:24 +08:00
|
|
|
; GENERIC-LABEL: test18:
|
2017-12-05 01:18:51 +08:00
|
|
|
; GENERIC: ## %bb.0:
|
2016-09-15 04:16:24 +08:00
|
|
|
; GENERIC-NEXT: movl %esi, %eax
|
2018-09-20 02:59:08 +08:00
|
|
|
; GENERIC-NEXT: cmpl $15, %edi
|
|
|
|
; GENERIC-NEXT: cmovgel %edx, %eax
|
|
|
|
; GENERIC-NEXT: ## kill: def $al killed $al killed $eax
|
2016-09-15 04:16:24 +08:00
|
|
|
; GENERIC-NEXT: retq
|
|
|
|
;
|
|
|
|
; ATOM-LABEL: test18:
|
2017-12-05 01:18:51 +08:00
|
|
|
; ATOM: ## %bb.0:
|
2016-09-15 04:16:24 +08:00
|
|
|
; ATOM-NEXT: movl %esi, %eax
|
2018-09-20 02:59:08 +08:00
|
|
|
; ATOM-NEXT: cmpl $15, %edi
|
|
|
|
; ATOM-NEXT: cmovgel %edx, %eax
|
|
|
|
; ATOM-NEXT: ## kill: def $al killed $al killed $eax
|
2016-09-15 04:16:24 +08:00
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: nop
|
|
|
|
; ATOM-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test18:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: cmpl $15, {{[0-9]+}}(%esp)
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: leal {{[0-9]+}}(%esp), %ecx
|
|
|
|
; ATHLON-NEXT: cmovll %eax, %ecx
|
|
|
|
; ATHLON-NEXT: movb (%ecx), %al
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test18:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: cmpl $15, %eax
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: jl .LBB19_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.1:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movl %ecx, %edx
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: .LBB19_2:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movl %edx, %eax
|
|
|
|
; MCU-NEXT: retl
|
2012-10-13 18:39:49 +08:00
|
|
|
%cmp = icmp slt i32 %x, 15
|
|
|
|
%sel = select i1 %cmp, i8 %a, i8 %b
|
|
|
|
ret i8 %sel
|
|
|
|
}
|
2014-07-11 02:21:04 +08:00
|
|
|
|
|
|
|
define i32 @trunc_select_miscompile(i32 %a, i1 zeroext %cc) {
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-LABEL: trunc_select_miscompile:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: ## %bb.0:
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-NEXT: movl %esi, %ecx
|
|
|
|
; CHECK-NEXT: movl %edi, %eax
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK-NEXT: orb $2, %cl
|
|
|
|
; CHECK-NEXT: ## kill: def $cl killed $cl killed $ecx
|
|
|
|
; CHECK-NEXT: shll %cl, %eax
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: trunc_select_miscompile:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: movb {{[0-9]+}}(%esp), %cl
|
|
|
|
; ATHLON-NEXT: orb $2, %cl
|
|
|
|
; ATHLON-NEXT: shll %cl, %eax
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: trunc_select_miscompile:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movl %edx, %ecx
|
2018-09-20 02:59:08 +08:00
|
|
|
; MCU-NEXT: orb $2, %cl
|
|
|
|
; MCU-NEXT: # kill: def $cl killed $cl killed $ecx
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: shll %cl, %eax
|
|
|
|
; MCU-NEXT: retl
|
2014-07-11 02:21:04 +08:00
|
|
|
%tmp1 = select i1 %cc, i32 3, i32 2
|
|
|
|
%tmp2 = shl i32 %a, %tmp1
|
|
|
|
ret i32 %tmp2
|
2014-08-05 05:29:59 +08:00
|
|
|
}
|
|
|
|
|
2016-11-11 00:27:27 +08:00
|
|
|
; reproducer for pr29002
|
|
|
|
define void @clamp_i8(i32 %src, i8* %dst) {
|
2016-11-15 15:55:22 +08:00
|
|
|
; GENERIC-LABEL: clamp_i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; GENERIC: ## %bb.0:
|
2016-11-15 15:55:22 +08:00
|
|
|
; GENERIC-NEXT: cmpl $127, %edi
|
|
|
|
; GENERIC-NEXT: movl $127, %eax
|
|
|
|
; GENERIC-NEXT: cmovlel %edi, %eax
|
|
|
|
; GENERIC-NEXT: cmpl $-128, %eax
|
|
|
|
; GENERIC-NEXT: movb $-128, %cl
|
2018-08-17 02:39:39 +08:00
|
|
|
; GENERIC-NEXT: jl LBB21_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; GENERIC-NEXT: ## %bb.1:
|
2016-11-15 15:55:22 +08:00
|
|
|
; GENERIC-NEXT: movl %eax, %ecx
|
2018-08-17 02:39:39 +08:00
|
|
|
; GENERIC-NEXT: LBB21_2:
|
2016-11-15 15:55:22 +08:00
|
|
|
; GENERIC-NEXT: movb %cl, (%rsi)
|
|
|
|
; GENERIC-NEXT: retq
|
|
|
|
;
|
|
|
|
; ATOM-LABEL: clamp_i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; ATOM: ## %bb.0:
|
2016-11-15 15:55:22 +08:00
|
|
|
; ATOM-NEXT: cmpl $127, %edi
|
|
|
|
; ATOM-NEXT: movl $127, %eax
|
|
|
|
; ATOM-NEXT: movb $-128, %cl
|
2018-04-12 02:23:01 +08:00
|
|
|
; ATOM-NEXT: cmovlel %edi, %eax
|
2016-11-15 15:55:22 +08:00
|
|
|
; ATOM-NEXT: cmpl $-128, %eax
|
2018-08-17 02:39:39 +08:00
|
|
|
; ATOM-NEXT: jl LBB21_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; ATOM-NEXT: ## %bb.1:
|
2016-11-15 15:55:22 +08:00
|
|
|
; ATOM-NEXT: movl %eax, %ecx
|
2018-08-17 02:39:39 +08:00
|
|
|
; ATOM-NEXT: LBB21_2:
|
2016-11-15 15:55:22 +08:00
|
|
|
; ATOM-NEXT: movb %cl, (%rsi)
|
|
|
|
; ATOM-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: clamp_i8:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
|
|
; ATHLON-NEXT: cmpl $127, %edx
|
|
|
|
; ATHLON-NEXT: movl $127, %ecx
|
|
|
|
; ATHLON-NEXT: cmovlel %edx, %ecx
|
|
|
|
; ATHLON-NEXT: cmpl $-128, %ecx
|
|
|
|
; ATHLON-NEXT: movb $-128, %dl
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: jl LBB21_2
|
|
|
|
; ATHLON-NEXT: ## %bb.1:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl %ecx, %edx
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: LBB21_2:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movb %dl, (%eax)
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: clamp_i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: cmpl $127, %eax
|
|
|
|
; MCU-NEXT: movl $127, %ecx
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: jg .LBB21_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.1:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movl %eax, %ecx
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: .LBB21_2:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: cmpl $-128, %ecx
|
|
|
|
; MCU-NEXT: movb $-128, %al
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: jl .LBB21_4
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.3:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movl %ecx, %eax
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: .LBB21_4:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movb %al, (%edx)
|
|
|
|
; MCU-NEXT: retl
|
2016-11-11 00:27:27 +08:00
|
|
|
%cmp = icmp sgt i32 %src, 127
|
|
|
|
%sel1 = select i1 %cmp, i32 127, i32 %src
|
|
|
|
%cmp1 = icmp slt i32 %sel1, -128
|
|
|
|
%sel2 = select i1 %cmp1, i32 -128, i32 %sel1
|
|
|
|
%conv = trunc i32 %sel2 to i8
|
|
|
|
store i8 %conv, i8* %dst, align 2
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; reproducer for pr29002
|
|
|
|
define void @clamp(i32 %src, i16* %dst) {
|
|
|
|
; GENERIC-LABEL: clamp:
|
2017-12-05 01:18:51 +08:00
|
|
|
; GENERIC: ## %bb.0:
|
2016-11-11 00:27:27 +08:00
|
|
|
; GENERIC-NEXT: cmpl $32767, %edi ## imm = 0x7FFF
|
|
|
|
; GENERIC-NEXT: movl $32767, %eax ## imm = 0x7FFF
|
|
|
|
; GENERIC-NEXT: cmovlel %edi, %eax
|
|
|
|
; GENERIC-NEXT: cmpl $-32768, %eax ## imm = 0x8000
|
2018-02-21 01:41:00 +08:00
|
|
|
; GENERIC-NEXT: movl $32768, %ecx ## imm = 0x8000
|
|
|
|
; GENERIC-NEXT: cmovgel %eax, %ecx
|
2016-11-15 15:55:22 +08:00
|
|
|
; GENERIC-NEXT: movw %cx, (%rsi)
|
2016-11-11 00:27:27 +08:00
|
|
|
; GENERIC-NEXT: retq
|
|
|
|
;
|
|
|
|
; ATOM-LABEL: clamp:
|
2017-12-05 01:18:51 +08:00
|
|
|
; ATOM: ## %bb.0:
|
2016-11-11 00:27:27 +08:00
|
|
|
; ATOM-NEXT: cmpl $32767, %edi ## imm = 0x7FFF
|
|
|
|
; ATOM-NEXT: movl $32767, %eax ## imm = 0x7FFF
|
2018-02-21 01:41:00 +08:00
|
|
|
; ATOM-NEXT: movl $32768, %ecx ## imm = 0x8000
|
2018-04-12 02:23:01 +08:00
|
|
|
; ATOM-NEXT: cmovlel %edi, %eax
|
2016-11-11 00:27:27 +08:00
|
|
|
; ATOM-NEXT: cmpl $-32768, %eax ## imm = 0x8000
|
2018-02-21 01:41:00 +08:00
|
|
|
; ATOM-NEXT: cmovgel %eax, %ecx
|
2016-11-15 15:55:22 +08:00
|
|
|
; ATOM-NEXT: movw %cx, (%rsi)
|
2016-11-11 00:27:27 +08:00
|
|
|
; ATOM-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: clamp:
|
|
|
|
; ATHLON: ## %bb.0:
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; ATHLON-NEXT: cmpl $32767, %ecx ## imm = 0x7FFF
|
|
|
|
; ATHLON-NEXT: movl $32767, %edx ## imm = 0x7FFF
|
|
|
|
; ATHLON-NEXT: cmovlel %ecx, %edx
|
|
|
|
; ATHLON-NEXT: cmpl $-32768, %edx ## imm = 0x8000
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: movl $32768, %ecx ## imm = 0x8000
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: cmovgel %edx, %ecx
|
|
|
|
; ATHLON-NEXT: movw %cx, (%eax)
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: clamp:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: cmpl $32767, %eax # imm = 0x7FFF
|
|
|
|
; MCU-NEXT: movl $32767, %ecx # imm = 0x7FFF
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: jg .LBB22_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.1:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movl %eax, %ecx
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: .LBB22_2:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: cmpl $-32768, %ecx # imm = 0x8000
|
2018-02-21 01:41:00 +08:00
|
|
|
; MCU-NEXT: movl $32768, %eax # imm = 0x8000
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: jl .LBB22_4
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.3:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movl %ecx, %eax
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: .LBB22_4:
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movw %ax, (%edx)
|
|
|
|
; MCU-NEXT: retl
|
2016-11-11 00:27:27 +08:00
|
|
|
%cmp = icmp sgt i32 %src, 32767
|
|
|
|
%sel1 = select i1 %cmp, i32 32767, i32 %src
|
|
|
|
%cmp1 = icmp slt i32 %sel1, -32768
|
|
|
|
%sel2 = select i1 %cmp1, i32 -32768, i32 %sel1
|
|
|
|
%conv = trunc i32 %sel2 to i16
|
|
|
|
store i16 %conv, i16* %dst, align 2
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2014-08-05 05:29:59 +08:00
|
|
|
define void @test19() {
|
|
|
|
; This is a massive reduction of an llvm-stress test case that generates
|
|
|
|
; interesting chains feeding setcc and eventually a f32 select operation. This
|
|
|
|
; is intended to exercise the SELECT formation in the DAG combine simplifying
|
|
|
|
; a simplified select_cc node. If it it regresses and is no longer triggering
|
|
|
|
; that code path, it can be deleted.
|
|
|
|
;
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-LABEL: test19:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: ## %bb.0: ## %BB
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-NEXT: movl $-1, %eax
|
|
|
|
; CHECK-NEXT: movb $1, %cl
|
|
|
|
; CHECK-NEXT: .p2align 4, 0x90
|
2018-08-17 02:39:39 +08:00
|
|
|
; CHECK-NEXT: LBB23_1: ## %CF
|
2016-10-09 05:17:44 +08:00
|
|
|
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-NEXT: testb %cl, %cl
|
2018-08-17 02:39:39 +08:00
|
|
|
; CHECK-NEXT: jne LBB23_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK-NEXT: ## %bb.2: ## %CF250
|
2018-08-17 02:39:39 +08:00
|
|
|
; CHECK-NEXT: ## in Loop: Header=BB23_1 Depth=1
|
|
|
|
; CHECK-NEXT: jne LBB23_1
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-NEXT: .p2align 4, 0x90
|
2018-08-17 02:39:39 +08:00
|
|
|
; CHECK-NEXT: LBB23_3: ## %CF242
|
2016-10-09 05:17:44 +08:00
|
|
|
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-NEXT: cmpl %eax, %eax
|
|
|
|
; CHECK-NEXT: ucomiss %xmm0, %xmm0
|
2018-08-17 02:39:39 +08:00
|
|
|
; CHECK-NEXT: jp LBB23_3
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK-NEXT: ## %bb.4: ## %CF244
|
2016-09-15 04:16:24 +08:00
|
|
|
; CHECK-NEXT: retq
|
2017-02-26 00:46:47 +08:00
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: test19:
|
|
|
|
; ATHLON: ## %bb.0: ## %BB
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movb $1, %al
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: .p2align 4, 0x90
|
|
|
|
; ATHLON-NEXT: LBB23_1: ## %CF
|
|
|
|
; ATHLON-NEXT: ## =>This Inner Loop Header: Depth=1
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: testb %al, %al
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: jne LBB23_1
|
|
|
|
; ATHLON-NEXT: ## %bb.2: ## %CF250
|
|
|
|
; ATHLON-NEXT: ## in Loop: Header=BB23_1 Depth=1
|
|
|
|
; ATHLON-NEXT: jne LBB23_1
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: ## %bb.3: ## %CF242.preheader
|
|
|
|
; ATHLON-NEXT: fldz
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: .p2align 4, 0x90
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: LBB23_4: ## %CF242
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: ## =>This Inner Loop Header: Depth=1
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: fucomi %st(0)
|
|
|
|
; ATHLON-NEXT: jp LBB23_4
|
|
|
|
; ATHLON-NEXT: ## %bb.5: ## %CF244
|
|
|
|
; ATHLON-NEXT: fstp %st(0)
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: test19:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0: # %BB
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: movl $-1, %ecx
|
|
|
|
; MCU-NEXT: movb $1, %al
|
|
|
|
; MCU-NEXT: .p2align 4, 0x90
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: .LBB23_1: # %CF
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: # =>This Inner Loop Header: Depth=1
|
|
|
|
; MCU-NEXT: testb %al, %al
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: jne .LBB23_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.2: # %CF250
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: # in Loop: Header=BB23_1 Depth=1
|
|
|
|
; MCU-NEXT: jne .LBB23_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.3: # %CF242.preheader
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: fldz
|
|
|
|
; MCU-NEXT: .p2align 4, 0x90
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: .LBB23_4: # %CF242
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: # =>This Inner Loop Header: Depth=1
|
|
|
|
; MCU-NEXT: cmpl %eax, %ecx
|
|
|
|
; MCU-NEXT: fucom %st(0)
|
|
|
|
; MCU-NEXT: fnstsw %ax
|
2018-02-01 06:04:26 +08:00
|
|
|
; MCU-NEXT: # kill: def $ah killed $ah killed $ax
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: sahf
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: jp .LBB23_4
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU-NEXT: # %bb.5: # %CF244
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: fstp %st(0)
|
|
|
|
; MCU-NEXT: retl
|
2014-08-05 05:29:59 +08:00
|
|
|
BB:
|
|
|
|
br label %CF
|
|
|
|
|
|
|
|
CF:
|
|
|
|
%Cmp10 = icmp ule i8 undef, undef
|
|
|
|
br i1 %Cmp10, label %CF, label %CF250
|
|
|
|
|
|
|
|
CF250:
|
|
|
|
%E12 = extractelement <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, i32 2
|
|
|
|
%Cmp32 = icmp ugt i1 %Cmp10, false
|
|
|
|
br i1 %Cmp32, label %CF, label %CF242
|
|
|
|
|
|
|
|
CF242:
|
|
|
|
%Cmp38 = icmp uge i32 %E12, undef
|
|
|
|
%FC = uitofp i1 %Cmp38 to float
|
|
|
|
%Sl59 = select i1 %Cmp32, float %FC, float undef
|
|
|
|
%Cmp60 = fcmp ugt float undef, undef
|
|
|
|
br i1 %Cmp60, label %CF242, label %CF244
|
|
|
|
|
|
|
|
CF244:
|
|
|
|
%B122 = fadd float %Sl59, undef
|
|
|
|
ret void
|
|
|
|
}
|
2017-01-30 16:16:59 +08:00
|
|
|
|
|
|
|
define i16 @select_xor_1(i16 %A, i8 %cond) {
|
|
|
|
; CHECK-LABEL: select_xor_1:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: ## %bb.0: ## %entry
|
2017-02-26 00:46:47 +08:00
|
|
|
; CHECK-NEXT: movl %edi, %eax
|
|
|
|
; CHECK-NEXT: xorl $43, %eax
|
|
|
|
; CHECK-NEXT: testb $1, %sil
|
2018-02-21 01:41:00 +08:00
|
|
|
; CHECK-NEXT: cmovel %edi, %eax
|
|
|
|
; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
|
2017-02-26 00:46:47 +08:00
|
|
|
; CHECK-NEXT: retq
|
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: select_xor_1:
|
|
|
|
; ATHLON: ## %bb.0: ## %entry
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; ATHLON-NEXT: movl %ecx, %eax
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: xorl $43, %eax
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
|
|
|
|
; ATHLON-NEXT: cmovel %ecx, %eax
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: ## kill: def $ax killed $ax killed $eax
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: select_xor_1:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0: # %entry
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: andl $1, %edx
|
|
|
|
; MCU-NEXT: negl %edx
|
|
|
|
; MCU-NEXT: andl $43, %edx
|
|
|
|
; MCU-NEXT: xorl %edx, %eax
|
2018-02-01 06:04:26 +08:00
|
|
|
; MCU-NEXT: # kill: def $ax killed $ax killed $eax
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: retl
|
2017-01-30 16:16:59 +08:00
|
|
|
entry:
|
|
|
|
%and = and i8 %cond, 1
|
|
|
|
%cmp10 = icmp eq i8 %and, 0
|
|
|
|
%0 = xor i16 %A, 43
|
|
|
|
%1 = select i1 %cmp10, i16 %A, i16 %0
|
|
|
|
ret i16 %1
|
|
|
|
}
|
|
|
|
|
2018-06-23 05:09:31 +08:00
|
|
|
; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
|
|
|
|
; icmp eq (and %cond, 1), 0
|
|
|
|
define i16 @select_xor_1b(i16 %A, i8 %cond) {
|
|
|
|
; CHECK-LABEL: select_xor_1b:
|
|
|
|
; CHECK: ## %bb.0: ## %entry
|
|
|
|
; CHECK-NEXT: movl %edi, %eax
|
|
|
|
; CHECK-NEXT: xorl $43, %eax
|
|
|
|
; CHECK-NEXT: testb $1, %sil
|
|
|
|
; CHECK-NEXT: cmovel %edi, %eax
|
|
|
|
; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
|
|
|
|
; CHECK-NEXT: retq
|
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: select_xor_1b:
|
|
|
|
; ATHLON: ## %bb.0: ## %entry
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; ATHLON-NEXT: movl %ecx, %eax
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: xorl $43, %eax
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
|
|
|
|
; ATHLON-NEXT: cmovel %ecx, %eax
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-NEXT: ## kill: def $ax killed $ax killed $eax
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2018-06-23 05:09:31 +08:00
|
|
|
; MCU-LABEL: select_xor_1b:
|
|
|
|
; MCU: # %bb.0: # %entry
|
|
|
|
; MCU-NEXT: testb $1, %dl
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: je .LBB25_2
|
2018-06-23 05:09:31 +08:00
|
|
|
; MCU-NEXT: # %bb.1:
|
|
|
|
; MCU-NEXT: xorl $43, %eax
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: .LBB25_2: # %entry
|
2018-06-23 05:09:31 +08:00
|
|
|
; MCU-NEXT: # kill: def $ax killed $ax killed $eax
|
|
|
|
; MCU-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%and = and i8 %cond, 1
|
|
|
|
%cmp10 = icmp ne i8 %and, 1
|
|
|
|
%0 = xor i16 %A, 43
|
|
|
|
%1 = select i1 %cmp10, i16 %A, i16 %0
|
|
|
|
ret i16 %1
|
|
|
|
}
|
|
|
|
|
2017-01-30 16:16:59 +08:00
|
|
|
define i32 @select_xor_2(i32 %A, i32 %B, i8 %cond) {
|
|
|
|
; CHECK-LABEL: select_xor_2:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: ## %bb.0: ## %entry
|
2017-02-26 00:46:47 +08:00
|
|
|
; CHECK-NEXT: movl %esi, %eax
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK-NEXT: xorl %edi, %eax
|
|
|
|
; CHECK-NEXT: testb $1, %dl
|
|
|
|
; CHECK-NEXT: cmovel %edi, %eax
|
2017-02-26 00:46:47 +08:00
|
|
|
; CHECK-NEXT: retq
|
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: select_xor_2:
|
|
|
|
; ATHLON: ## %bb.0: ## %entry
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: xorl %ecx, %eax
|
|
|
|
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
|
|
|
|
; ATHLON-NEXT: cmovel %ecx, %eax
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: select_xor_2:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0: # %entry
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: andl $1, %ecx
|
2017-01-30 16:16:59 +08:00
|
|
|
; MCU-NEXT: negl %ecx
|
|
|
|
; MCU-NEXT: andl %edx, %ecx
|
|
|
|
; MCU-NEXT: xorl %ecx, %eax
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: retl
|
2017-01-30 16:16:59 +08:00
|
|
|
entry:
|
|
|
|
%and = and i8 %cond, 1
|
|
|
|
%cmp10 = icmp eq i8 %and, 0
|
|
|
|
%0 = xor i32 %B, %A
|
|
|
|
%1 = select i1 %cmp10, i32 %A, i32 %0
|
|
|
|
ret i32 %1
|
|
|
|
}
|
|
|
|
|
2018-06-23 05:09:31 +08:00
|
|
|
; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
|
|
|
|
; icmp eq (and %cond, 1), 0
|
|
|
|
define i32 @select_xor_2b(i32 %A, i32 %B, i8 %cond) {
|
|
|
|
; CHECK-LABEL: select_xor_2b:
|
|
|
|
; CHECK: ## %bb.0: ## %entry
|
|
|
|
; CHECK-NEXT: movl %esi, %eax
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK-NEXT: xorl %edi, %eax
|
|
|
|
; CHECK-NEXT: testb $1, %dl
|
|
|
|
; CHECK-NEXT: cmovel %edi, %eax
|
2018-06-23 05:09:31 +08:00
|
|
|
; CHECK-NEXT: retq
|
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: select_xor_2b:
|
|
|
|
; ATHLON: ## %bb.0: ## %entry
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: xorl %ecx, %eax
|
|
|
|
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
|
|
|
|
; ATHLON-NEXT: cmovel %ecx, %eax
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2018-06-23 05:09:31 +08:00
|
|
|
; MCU-LABEL: select_xor_2b:
|
|
|
|
; MCU: # %bb.0: # %entry
|
|
|
|
; MCU-NEXT: testb $1, %cl
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: je .LBB27_2
|
2018-06-23 05:09:31 +08:00
|
|
|
; MCU-NEXT: # %bb.1:
|
|
|
|
; MCU-NEXT: xorl %edx, %eax
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: .LBB27_2: # %entry
|
2018-06-23 05:09:31 +08:00
|
|
|
; MCU-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%and = and i8 %cond, 1
|
|
|
|
%cmp10 = icmp ne i8 %and, 1
|
|
|
|
%0 = xor i32 %B, %A
|
|
|
|
%1 = select i1 %cmp10, i32 %A, i32 %0
|
|
|
|
ret i32 %1
|
|
|
|
}
|
|
|
|
|
2017-01-30 16:16:59 +08:00
|
|
|
define i32 @select_or(i32 %A, i32 %B, i8 %cond) {
|
|
|
|
; CHECK-LABEL: select_or:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: ## %bb.0: ## %entry
|
2017-02-26 00:46:47 +08:00
|
|
|
; CHECK-NEXT: movl %esi, %eax
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK-NEXT: orl %edi, %eax
|
|
|
|
; CHECK-NEXT: testb $1, %dl
|
|
|
|
; CHECK-NEXT: cmovel %edi, %eax
|
2017-02-26 00:46:47 +08:00
|
|
|
; CHECK-NEXT: retq
|
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: select_or:
|
|
|
|
; ATHLON: ## %bb.0: ## %entry
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: orl %ecx, %eax
|
|
|
|
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
|
|
|
|
; ATHLON-NEXT: cmovel %ecx, %eax
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: select_or:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0: # %entry
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: andl $1, %ecx
|
2017-01-30 16:16:59 +08:00
|
|
|
; MCU-NEXT: negl %ecx
|
|
|
|
; MCU-NEXT: andl %edx, %ecx
|
|
|
|
; MCU-NEXT: orl %ecx, %eax
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: retl
|
2017-01-30 16:16:59 +08:00
|
|
|
entry:
|
|
|
|
%and = and i8 %cond, 1
|
|
|
|
%cmp10 = icmp eq i8 %and, 0
|
|
|
|
%0 = or i32 %B, %A
|
|
|
|
%1 = select i1 %cmp10, i32 %A, i32 %0
|
|
|
|
ret i32 %1
|
|
|
|
}
|
2017-01-30 21:14:37 +08:00
|
|
|
|
2018-06-23 05:09:31 +08:00
|
|
|
; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
|
|
|
|
; icmp eq (and %cond, 1), 0
|
|
|
|
define i32 @select_or_b(i32 %A, i32 %B, i8 %cond) {
|
|
|
|
; CHECK-LABEL: select_or_b:
|
|
|
|
; CHECK: ## %bb.0: ## %entry
|
|
|
|
; CHECK-NEXT: movl %esi, %eax
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK-NEXT: orl %edi, %eax
|
|
|
|
; CHECK-NEXT: testb $1, %dl
|
|
|
|
; CHECK-NEXT: cmovel %edi, %eax
|
2018-06-23 05:09:31 +08:00
|
|
|
; CHECK-NEXT: retq
|
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: select_or_b:
|
|
|
|
; ATHLON: ## %bb.0: ## %entry
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: orl %ecx, %eax
|
|
|
|
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
|
|
|
|
; ATHLON-NEXT: cmovel %ecx, %eax
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2018-06-23 05:09:31 +08:00
|
|
|
; MCU-LABEL: select_or_b:
|
|
|
|
; MCU: # %bb.0: # %entry
|
|
|
|
; MCU-NEXT: testb $1, %cl
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: je .LBB29_2
|
2018-06-23 05:09:31 +08:00
|
|
|
; MCU-NEXT: # %bb.1:
|
|
|
|
; MCU-NEXT: orl %edx, %eax
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: .LBB29_2: # %entry
|
2018-06-23 05:09:31 +08:00
|
|
|
; MCU-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%and = and i8 %cond, 1
|
|
|
|
%cmp10 = icmp ne i8 %and, 1
|
|
|
|
%0 = or i32 %B, %A
|
|
|
|
%1 = select i1 %cmp10, i32 %A, i32 %0
|
|
|
|
ret i32 %1
|
|
|
|
}
|
|
|
|
|
2017-01-30 21:14:37 +08:00
|
|
|
define i32 @select_or_1(i32 %A, i32 %B, i32 %cond) {
|
|
|
|
; CHECK-LABEL: select_or_1:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: ## %bb.0: ## %entry
|
2017-02-26 00:46:47 +08:00
|
|
|
; CHECK-NEXT: movl %esi, %eax
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK-NEXT: orl %edi, %eax
|
|
|
|
; CHECK-NEXT: testb $1, %dl
|
|
|
|
; CHECK-NEXT: cmovel %edi, %eax
|
2017-02-26 00:46:47 +08:00
|
|
|
; CHECK-NEXT: retq
|
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: select_or_1:
|
|
|
|
; ATHLON: ## %bb.0: ## %entry
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: orl %ecx, %eax
|
|
|
|
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
|
|
|
|
; ATHLON-NEXT: cmovel %ecx, %eax
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-LABEL: select_or_1:
|
2017-12-05 01:18:51 +08:00
|
|
|
; MCU: # %bb.0: # %entry
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: andl $1, %ecx
|
2017-01-30 21:14:37 +08:00
|
|
|
; MCU-NEXT: negl %ecx
|
|
|
|
; MCU-NEXT: andl %edx, %ecx
|
|
|
|
; MCU-NEXT: orl %ecx, %eax
|
2017-02-26 00:46:47 +08:00
|
|
|
; MCU-NEXT: retl
|
2017-01-30 21:14:37 +08:00
|
|
|
entry:
|
|
|
|
%and = and i32 %cond, 1
|
|
|
|
%cmp10 = icmp eq i32 %and, 0
|
|
|
|
%0 = or i32 %B, %A
|
|
|
|
%1 = select i1 %cmp10, i32 %A, i32 %0
|
|
|
|
ret i32 %1
|
2017-02-26 00:46:47 +08:00
|
|
|
}
|
2018-06-23 05:09:31 +08:00
|
|
|
|
|
|
|
; Equivalent to above, but with icmp ne (and %cond, 1), 1 instead of
|
|
|
|
; icmp eq (and %cond, 1), 0
|
|
|
|
define i32 @select_or_1b(i32 %A, i32 %B, i32 %cond) {
|
|
|
|
; CHECK-LABEL: select_or_1b:
|
|
|
|
; CHECK: ## %bb.0: ## %entry
|
|
|
|
; CHECK-NEXT: movl %esi, %eax
|
2018-09-20 02:59:08 +08:00
|
|
|
; CHECK-NEXT: orl %edi, %eax
|
|
|
|
; CHECK-NEXT: testb $1, %dl
|
|
|
|
; CHECK-NEXT: cmovel %edi, %eax
|
2018-06-23 05:09:31 +08:00
|
|
|
; CHECK-NEXT: retq
|
|
|
|
;
|
2018-08-27 02:29:27 +08:00
|
|
|
; ATHLON-LABEL: select_or_1b:
|
|
|
|
; ATHLON: ## %bb.0: ## %entry
|
2018-08-30 14:01:03 +08:00
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; ATHLON-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; ATHLON-NEXT: orl %ecx, %eax
|
|
|
|
; ATHLON-NEXT: testb $1, {{[0-9]+}}(%esp)
|
|
|
|
; ATHLON-NEXT: cmovel %ecx, %eax
|
|
|
|
; ATHLON-NEXT: retl
|
2018-08-27 02:29:27 +08:00
|
|
|
;
|
2018-06-23 05:09:31 +08:00
|
|
|
; MCU-LABEL: select_or_1b:
|
|
|
|
; MCU: # %bb.0: # %entry
|
|
|
|
; MCU-NEXT: testb $1, %cl
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: je .LBB31_2
|
2018-06-23 05:09:31 +08:00
|
|
|
; MCU-NEXT: # %bb.1:
|
|
|
|
; MCU-NEXT: orl %edx, %eax
|
2018-08-17 02:39:39 +08:00
|
|
|
; MCU-NEXT: .LBB31_2: # %entry
|
2018-06-23 05:09:31 +08:00
|
|
|
; MCU-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%and = and i32 %cond, 1
|
|
|
|
%cmp10 = icmp ne i32 %and, 1
|
|
|
|
%0 = or i32 %B, %A
|
|
|
|
%1 = select i1 %cmp10, i32 %A, i32 %0
|
|
|
|
ret i32 %1
|
|
|
|
}
|