2014-11-06 22:39:49 +08:00
|
|
|
; RUN: llc < %s -relocation-model=pic | FileCheck %s
|
|
|
|
|
|
|
|
; CHECK: calll L0$pb
|
2015-12-06 21:06:20 +08:00
|
|
|
; CHECK-NEXT: .cfi_adjust_cfa_offset 4
|
2014-11-06 22:39:49 +08:00
|
|
|
; CHECK-NEXT: L0$pb:
|
|
|
|
; CHECK-NEXT: popl %eax
|
2015-12-06 21:06:20 +08:00
|
|
|
; CHECK-NEXT: .cfi_adjust_cfa_offset -4
|
2014-11-06 22:39:49 +08:00
|
|
|
; CHECK-NEXT: addl LJTI0_0(,%ecx,4), %eax
|
|
|
|
; CHECK-NEXT: jmpl *%eax
|
|
|
|
|
|
|
|
; CHECK: LJTI0_0:
|
X86: More efficient legalization of wide integer compares
In particular, this makes the code for 64-bit compares on 32-bit targets
much more efficient.
Example:
define i32 @test_slt(i64 %a, i64 %b) {
entry:
%cmp = icmp slt i64 %a, %b
br i1 %cmp, label %bb1, label %bb2
bb1:
ret i32 1
bb2:
ret i32 2
}
Before this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
setae %al
cmpl 16(%esp), %ecx
setge %cl
je .LBB2_2
movb %cl, %al
.LBB2_2:
testb %al, %al
jne .LBB2_4
movl $1, %eax
retl
.LBB2_4:
movl $2, %eax
retl
After this patch:
test_slt:
movl 4(%esp), %eax
movl 8(%esp), %ecx
cmpl 12(%esp), %eax
sbbl 16(%esp), %ecx
jge .LBB1_2
movl $1, %eax
retl
.LBB1_2:
movl $2, %eax
retl
Differential Revision: http://reviews.llvm.org/D14496
llvm-svn: 253572
2015-11-20 00:35:08 +08:00
|
|
|
; CHECK-NEXT: .long LBB0_2-L0$pb
|
|
|
|
; CHECK-NEXT: .long LBB0_3-L0$pb
|
2014-11-06 22:39:49 +08:00
|
|
|
; CHECK-NEXT: .long LBB0_4-L0$pb
|
|
|
|
; CHECK-NEXT: .long LBB0_5-L0$pb
|
|
|
|
|
|
|
|
|
|
|
|
target triple = "i686--windows-itanium"
|
|
|
|
define i32 @f(i64 %x) {
|
|
|
|
bb0:
|
|
|
|
switch i64 %x, label %bb5 [
|
|
|
|
i64 1, label %bb1
|
|
|
|
i64 2, label %bb2
|
|
|
|
i64 3, label %bb3
|
|
|
|
i64 4, label %bb4
|
|
|
|
]
|
|
|
|
bb1:
|
|
|
|
br label %bb5
|
|
|
|
bb2:
|
|
|
|
br label %bb5
|
|
|
|
bb3:
|
|
|
|
br label %bb5
|
|
|
|
bb4:
|
|
|
|
br label %bb5
|
|
|
|
bb5:
|
|
|
|
%y = phi i32 [ 0, %bb0 ], [ 1, %bb1 ], [ 2, %bb2 ], [ 3, %bb3 ], [ 4, %bb4 ]
|
|
|
|
ret i32 %y
|
|
|
|
}
|