2017-02-12 03:27:15 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
X86: More efficient legalization of wide integer compares
In particular, this makes the code for 64-bit compares on 32-bit targets
much more efficient.
Example:
define i32 @test_slt(i64 %a, i64 %b) {
entry:
%cmp = icmp slt i64 %a, %b
br i1 %cmp, label %bb1, label %bb2
bb1:
ret i32 1
bb2:
ret i32 2
}
Before this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
setae %al
cmpl 16(%esp), %ecx
setge %cl
je .LBB2_2
movb %cl, %al
.LBB2_2:
testb %al, %al
jne .LBB2_4
movl $1, %eax
retl
.LBB2_4:
movl $2, %eax
retl
After this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
sbbl 16(%esp), %ecx
jge .LBB1_2
movl $1, %eax
retl
.LBB1_2:
movl $2, %eax
retl
Differential Revision: http://reviews.llvm.org/D14496
llvm-svn: 253572
2015-11-20 00:35:08 +08:00
|
|
|
; RUN: llc -mtriple=i686-linux-gnu %s -o - | FileCheck %s
|
|
|
|
|
|
|
|
define i32 @branch_eq(i64 %a, i64 %b) {
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-LABEL: branch_eq:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; CHECK-NEXT: xorl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; CHECK-NEXT: xorl {{[0-9]+}}(%esp), %eax
|
|
|
|
; CHECK-NEXT: orl %ecx, %eax
|
|
|
|
; CHECK-NEXT: jne .LBB0_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK-NEXT: # %bb.1: # %bb1
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-NEXT: movl $1, %eax
|
|
|
|
; CHECK-NEXT: retl
|
|
|
|
; CHECK-NEXT: .LBB0_2: # %bb2
|
|
|
|
; CHECK-NEXT: movl $2, %eax
|
|
|
|
; CHECK-NEXT: retl
|
X86: More efficient legalization of wide integer compares
In particular, this makes the code for 64-bit compares on 32-bit targets
much more efficient.
Example:
define i32 @test_slt(i64 %a, i64 %b) {
entry:
%cmp = icmp slt i64 %a, %b
br i1 %cmp, label %bb1, label %bb2
bb1:
ret i32 1
bb2:
ret i32 2
}
Before this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
setae %al
cmpl 16(%esp), %ecx
setge %cl
je .LBB2_2
movb %cl, %al
.LBB2_2:
testb %al, %al
jne .LBB2_4
movl $1, %eax
retl
.LBB2_4:
movl $2, %eax
retl
After this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
sbbl 16(%esp), %ecx
jge .LBB1_2
movl $1, %eax
retl
.LBB1_2:
movl $2, %eax
retl
Differential Revision: http://reviews.llvm.org/D14496
llvm-svn: 253572
2015-11-20 00:35:08 +08:00
|
|
|
entry:
|
|
|
|
%cmp = icmp eq i64 %a, %b
|
|
|
|
br i1 %cmp, label %bb1, label %bb2
|
|
|
|
bb1:
|
|
|
|
ret i32 1
|
|
|
|
bb2:
|
|
|
|
ret i32 2
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @branch_slt(i64 %a, i64 %b) {
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-LABEL: branch_slt:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
|
|
|
|
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; CHECK-NEXT: jge .LBB1_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK-NEXT: # %bb.1: # %bb1
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-NEXT: movl $1, %eax
|
|
|
|
; CHECK-NEXT: retl
|
|
|
|
; CHECK-NEXT: .LBB1_2: # %bb2
|
|
|
|
; CHECK-NEXT: movl $2, %eax
|
|
|
|
; CHECK-NEXT: retl
|
X86: More efficient legalization of wide integer compares
In particular, this makes the code for 64-bit compares on 32-bit targets
much more efficient.
Example:
define i32 @test_slt(i64 %a, i64 %b) {
entry:
%cmp = icmp slt i64 %a, %b
br i1 %cmp, label %bb1, label %bb2
bb1:
ret i32 1
bb2:
ret i32 2
}
Before this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
setae %al
cmpl 16(%esp), %ecx
setge %cl
je .LBB2_2
movb %cl, %al
.LBB2_2:
testb %al, %al
jne .LBB2_4
movl $1, %eax
retl
.LBB2_4:
movl $2, %eax
retl
After this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
sbbl 16(%esp), %ecx
jge .LBB1_2
movl $1, %eax
retl
.LBB1_2:
movl $2, %eax
retl
Differential Revision: http://reviews.llvm.org/D14496
llvm-svn: 253572
2015-11-20 00:35:08 +08:00
|
|
|
entry:
|
|
|
|
%cmp = icmp slt i64 %a, %b
|
|
|
|
br i1 %cmp, label %bb1, label %bb2
|
|
|
|
bb1:
|
|
|
|
ret i32 1
|
|
|
|
bb2:
|
|
|
|
ret i32 2
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @branch_ule(i64 %a, i64 %b) {
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-LABEL: branch_ule:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
|
|
|
|
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; CHECK-NEXT: jb .LBB2_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK-NEXT: # %bb.1: # %bb1
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-NEXT: movl $1, %eax
|
|
|
|
; CHECK-NEXT: retl
|
|
|
|
; CHECK-NEXT: .LBB2_2: # %bb2
|
|
|
|
; CHECK-NEXT: movl $2, %eax
|
|
|
|
; CHECK-NEXT: retl
|
X86: More efficient legalization of wide integer compares
In particular, this makes the code for 64-bit compares on 32-bit targets
much more efficient.
Example:
define i32 @test_slt(i64 %a, i64 %b) {
entry:
%cmp = icmp slt i64 %a, %b
br i1 %cmp, label %bb1, label %bb2
bb1:
ret i32 1
bb2:
ret i32 2
}
Before this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
setae %al
cmpl 16(%esp), %ecx
setge %cl
je .LBB2_2
movb %cl, %al
.LBB2_2:
testb %al, %al
jne .LBB2_4
movl $1, %eax
retl
.LBB2_4:
movl $2, %eax
retl
After this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
sbbl 16(%esp), %ecx
jge .LBB1_2
movl $1, %eax
retl
.LBB1_2:
movl $2, %eax
retl
Differential Revision: http://reviews.llvm.org/D14496
llvm-svn: 253572
2015-11-20 00:35:08 +08:00
|
|
|
entry:
|
|
|
|
%cmp = icmp ule i64 %a, %b
|
|
|
|
br i1 %cmp, label %bb1, label %bb2
|
|
|
|
bb1:
|
|
|
|
ret i32 1
|
|
|
|
bb2:
|
|
|
|
ret i32 2
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @set_gt(i64 %a, i64 %b) {
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-LABEL: set_gt:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
|
|
|
|
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; CHECK-NEXT: setl %al
|
|
|
|
; CHECK-NEXT: movzbl %al, %eax
|
|
|
|
; CHECK-NEXT: retl
|
X86: More efficient legalization of wide integer compares
In particular, this makes the code for 64-bit compares on 32-bit targets
much more efficient.
Example:
define i32 @test_slt(i64 %a, i64 %b) {
entry:
%cmp = icmp slt i64 %a, %b
br i1 %cmp, label %bb1, label %bb2
bb1:
ret i32 1
bb2:
ret i32 2
}
Before this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
setae %al
cmpl 16(%esp), %ecx
setge %cl
je .LBB2_2
movb %cl, %al
.LBB2_2:
testb %al, %al
jne .LBB2_4
movl $1, %eax
retl
.LBB2_4:
movl $2, %eax
retl
After this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
sbbl 16(%esp), %ecx
jge .LBB1_2
movl $1, %eax
retl
.LBB1_2:
movl $2, %eax
retl
Differential Revision: http://reviews.llvm.org/D14496
llvm-svn: 253572
2015-11-20 00:35:08 +08:00
|
|
|
entry:
|
|
|
|
%cmp = icmp sgt i64 %a, %b
|
|
|
|
%res = select i1 %cmp, i32 1, i32 0
|
|
|
|
ret i32 %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_wide(i128 %a, i128 %b) {
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-LABEL: test_wide:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-NEXT: pushl %esi
|
|
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 8
|
|
|
|
; CHECK-NEXT: .cfi_offset %esi, -8
|
|
|
|
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
|
|
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
|
|
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
|
|
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %edx
|
|
|
|
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %esi
|
2017-06-30 20:56:02 +08:00
|
|
|
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %ecx
|
2017-07-05 09:21:23 +08:00
|
|
|
; CHECK-NEXT: sbbl {{[0-9]+}}(%esp), %eax
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-NEXT: jge .LBB4_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK-NEXT: # %bb.1: # %bb1
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-NEXT: movl $1, %eax
|
|
|
|
; CHECK-NEXT: popl %esi
|
2018-04-24 18:32:08 +08:00
|
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 4
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-NEXT: retl
|
|
|
|
; CHECK-NEXT: .LBB4_2: # %bb2
|
2018-04-24 18:32:08 +08:00
|
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 8
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-NEXT: movl $2, %eax
|
|
|
|
; CHECK-NEXT: popl %esi
|
2018-04-24 18:32:08 +08:00
|
|
|
; CHECK-NEXT: .cfi_def_cfa_offset 4
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-NEXT: retl
|
X86: More efficient legalization of wide integer compares
In particular, this makes the code for 64-bit compares on 32-bit targets
much more efficient.
Example:
define i32 @test_slt(i64 %a, i64 %b) {
entry:
%cmp = icmp slt i64 %a, %b
br i1 %cmp, label %bb1, label %bb2
bb1:
ret i32 1
bb2:
ret i32 2
}
Before this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
setae %al
cmpl 16(%esp), %ecx
setge %cl
je .LBB2_2
movb %cl, %al
.LBB2_2:
testb %al, %al
jne .LBB2_4
movl $1, %eax
retl
.LBB2_4:
movl $2, %eax
retl
After this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
sbbl 16(%esp), %ecx
jge .LBB1_2
movl $1, %eax
retl
.LBB1_2:
movl $2, %eax
retl
Differential Revision: http://reviews.llvm.org/D14496
llvm-svn: 253572
2015-11-20 00:35:08 +08:00
|
|
|
entry:
|
|
|
|
%cmp = icmp slt i128 %a, %b
|
|
|
|
br i1 %cmp, label %bb1, label %bb2
|
|
|
|
bb1:
|
|
|
|
ret i32 1
|
|
|
|
bb2:
|
|
|
|
ret i32 2
|
|
|
|
}
|
|
|
|
|
2017-02-12 03:27:15 +08:00
|
|
|
; The comparison of the low bits will be folded to a CARRY_FALSE node. Make
|
|
|
|
; sure the code can handle that.
|
X86: More efficient legalization of wide integer compares
In particular, this makes the code for 64-bit compares on 32-bit targets
much more efficient.
Example:
define i32 @test_slt(i64 %a, i64 %b) {
entry:
%cmp = icmp slt i64 %a, %b
br i1 %cmp, label %bb1, label %bb2
bb1:
ret i32 1
bb2:
ret i32 2
}
Before this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
setae %al
cmpl 16(%esp), %ecx
setge %cl
je .LBB2_2
movb %cl, %al
.LBB2_2:
testb %al, %al
jne .LBB2_4
movl $1, %eax
retl
.LBB2_4:
movl $2, %eax
retl
After this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
sbbl 16(%esp), %ecx
jge .LBB1_2
movl $1, %eax
retl
.LBB1_2:
movl $2, %eax
retl
Differential Revision: http://reviews.llvm.org/D14496
llvm-svn: 253572
2015-11-20 00:35:08 +08:00
|
|
|
define i32 @test_carry_false(i64 %a, i64 %b) {
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-LABEL: test_carry_false:
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK: # %bb.0: # %entry
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax
|
|
|
|
; CHECK-NEXT: jge .LBB5_2
|
2017-12-05 01:18:51 +08:00
|
|
|
; CHECK-NEXT: # %bb.1: # %bb1
|
2017-02-12 03:27:15 +08:00
|
|
|
; CHECK-NEXT: movl $1, %eax
|
|
|
|
; CHECK-NEXT: retl
|
|
|
|
; CHECK-NEXT: .LBB5_2: # %bb2
|
|
|
|
; CHECK-NEXT: movl $2, %eax
|
|
|
|
; CHECK-NEXT: retl
|
X86: More efficient legalization of wide integer compares
In particular, this makes the code for 64-bit compares on 32-bit targets
much more efficient.
Example:
define i32 @test_slt(i64 %a, i64 %b) {
entry:
%cmp = icmp slt i64 %a, %b
br i1 %cmp, label %bb1, label %bb2
bb1:
ret i32 1
bb2:
ret i32 2
}
Before this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
setae %al
cmpl 16(%esp), %ecx
setge %cl
je .LBB2_2
movb %cl, %al
.LBB2_2:
testb %al, %al
jne .LBB2_4
movl $1, %eax
retl
.LBB2_4:
movl $2, %eax
retl
After this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
sbbl 16(%esp), %ecx
jge .LBB1_2
movl $1, %eax
retl
.LBB1_2:
movl $2, %eax
retl
Differential Revision: http://reviews.llvm.org/D14496
llvm-svn: 253572
2015-11-20 00:35:08 +08:00
|
|
|
entry:
|
|
|
|
%x = and i64 %a, -4294967296 ;0xffffffff00000000
|
|
|
|
%y = and i64 %b, -4294967296
|
|
|
|
%cmp = icmp slt i64 %x, %y
|
|
|
|
br i1 %cmp, label %bb1, label %bb2
|
|
|
|
bb1:
|
|
|
|
ret i32 1
|
|
|
|
bb2:
|
|
|
|
ret i32 2
|
|
|
|
}
|