2017-08-29 18:49:33 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2017-12-02 06:20:26 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s -check-prefix=X64
|
|
|
|
; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s -check-prefix=X86
|
2017-08-29 18:49:33 +08:00
|
|
|
|
|
|
|
%struct.SA = type { i32 , i32 , i32 , i32 , i32};
|
|
|
|
|
|
|
|
define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
|
|
|
|
; X64-LABEL: foo:
|
|
|
|
; X64: # BB#0: # %entry
|
2017-12-02 06:20:26 +08:00
|
|
|
; X64-NEXT: movl 16(%rdi), %eax
|
|
|
|
; X64-NEXT: movl (%rdi), %ecx
|
|
|
|
; X64-NEXT: addl %eax, %ecx
|
|
|
|
; X64-NEXT: addl %eax, %ecx
|
|
|
|
; X64-NEXT: addl %eax, %ecx
|
|
|
|
; X64-NEXT: leal (%rcx,%rax), %edx
|
|
|
|
; X64-NEXT: leal 1(%rax,%rcx), %ecx
|
|
|
|
; X64-NEXT: movl %ecx, 12(%rdi)
|
|
|
|
; X64-NEXT: leal 1(%rax,%rdx), %eax
|
2017-08-29 18:49:33 +08:00
|
|
|
; X64-NEXT: movl %eax, 16(%rdi)
|
|
|
|
; X64-NEXT: retq
|
|
|
|
;
|
|
|
|
; X86-LABEL: foo:
|
|
|
|
; X86: # BB#0: # %entry
|
2017-12-02 06:20:26 +08:00
|
|
|
; X86-NEXT: pushl %esi
|
|
|
|
; X86-NEXT: .cfi_def_cfa_offset 8
|
|
|
|
; X86-NEXT: .cfi_offset %esi, -8
|
2017-08-29 18:49:33 +08:00
|
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
2017-12-02 06:20:26 +08:00
|
|
|
; X86-NEXT: movl 16(%eax), %ecx
|
|
|
|
; X86-NEXT: movl (%eax), %edx
|
|
|
|
; X86-NEXT: addl %ecx, %edx
|
|
|
|
; X86-NEXT: addl %ecx, %edx
|
|
|
|
; X86-NEXT: addl %ecx, %edx
|
|
|
|
; X86-NEXT: leal 1(%ecx,%edx), %esi
|
|
|
|
; X86-NEXT: addl %ecx, %edx
|
|
|
|
; X86-NEXT: movl %esi, 12(%eax)
|
|
|
|
; X86-NEXT: leal 1(%ecx,%edx), %ecx
|
2017-08-29 18:49:33 +08:00
|
|
|
; X86-NEXT: movl %ecx, 16(%eax)
|
2017-12-02 06:20:26 +08:00
|
|
|
; X86-NEXT: popl %esi
|
2017-08-29 18:49:33 +08:00
|
|
|
; X86-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0
|
|
|
|
%0 = load i32, i32* %h0, align 8
|
|
|
|
%h3 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 3
|
|
|
|
%h4 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 4
|
|
|
|
%1 = load i32, i32* %h4, align 8
|
|
|
|
%add = add i32 %0 , 1
|
|
|
|
%add1 = add i32 %add, %1
|
|
|
|
%add2 = add i32 %add1, %1
|
|
|
|
%add3 = add i32 %add2, %1
|
|
|
|
%add4 = add i32 %add3, %1
|
|
|
|
store i32 %add4, i32* %h3, align 4
|
|
|
|
%add29 = add i32 %add4, %1
|
|
|
|
store i32 %add29, i32* %h4, align 8
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
define void @foo_loop(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
|
|
|
|
; X64-LABEL: foo_loop:
|
|
|
|
; X64: # BB#0: # %entry
|
|
|
|
; X64-NEXT: .p2align 4, 0x90
|
|
|
|
; X64-NEXT: .LBB1_1: # %loop
|
|
|
|
; X64-NEXT: # =>This Inner Loop Header: Depth=1
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 22:07:38 +08:00
|
|
|
; X64-NEXT: movl (%rdi), %ecx
|
2017-12-02 06:20:26 +08:00
|
|
|
; X64-NEXT: movl 16(%rdi), %eax
|
|
|
|
; X64-NEXT: leal 1(%rcx,%rax), %edx
|
|
|
|
; X64-NEXT: movl %edx, 12(%rdi)
|
2017-08-29 18:49:33 +08:00
|
|
|
; X64-NEXT: decl %esi
|
|
|
|
; X64-NEXT: jne .LBB1_1
|
|
|
|
; X64-NEXT: # BB#2: # %exit
|
|
|
|
; X64-NEXT: addl %eax, %ecx
|
2017-12-02 06:20:26 +08:00
|
|
|
; X64-NEXT: leal 1(%rax,%rcx), %ecx
|
2017-08-29 18:49:33 +08:00
|
|
|
; X64-NEXT: addl %eax, %ecx
|
|
|
|
; X64-NEXT: addl %eax, %ecx
|
|
|
|
; X64-NEXT: addl %eax, %ecx
|
|
|
|
; X64-NEXT: addl %eax, %ecx
|
|
|
|
; X64-NEXT: addl %eax, %ecx
|
|
|
|
; X64-NEXT: addl %eax, %ecx
|
|
|
|
; X64-NEXT: movl %ecx, 16(%rdi)
|
|
|
|
; X64-NEXT: retq
|
|
|
|
;
|
|
|
|
; X86-LABEL: foo_loop:
|
|
|
|
; X86: # BB#0: # %entry
|
2017-12-02 06:20:26 +08:00
|
|
|
; X86-NEXT: pushl %edi
|
[X86] Improvement in CodeGen instruction selection for LEAs.
Summary:
1/ Operand folding during complex pattern matching for LEAs has been extended, such that it promotes Scale to
accommodate similar operand appearing in the DAG e.g.
T1 = A + B
T2 = T1 + 10
T3 = T2 + A
For above DAG rooted at T3, X86AddressMode will now look like
Base = B , Index = A , Scale = 2 , Disp = 10
2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs so that if there is an opportunity
then complex LEAs (having 3 operands) could be factored out e.g.
leal 1(%rax,%rcx,1), %rdx
leal 1(%rax,%rcx,2), %rcx
will be factored as following
leal 1(%rax,%rcx,1), %rdx
leal (%rdx,%rcx) , %edx
3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops, thus avoiding creation of any complex LEAs within a loop.
4/ Simplify LEA converts (lea (BASE,1,INDEX,0) --> add (BASE, INDEX) which offers better through put.
PR32755 will be taken care of by this pathc.
Previous patch revisions : r313343 , r314886
Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy, jbhateja
Reviewed By: lsaba, RKSimon, jbhateja
Subscribers: jmolloy, spatel, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 319543
2017-12-01 22:07:38 +08:00
|
|
|
; X86-NEXT: .cfi_def_cfa_offset 8
|
2017-12-02 06:20:26 +08:00
|
|
|
; X86-NEXT: pushl %esi
|
|
|
|
; X86-NEXT: .cfi_def_cfa_offset 12
|
|
|
|
; X86-NEXT: .cfi_offset %esi, -12
|
|
|
|
; X86-NEXT: .cfi_offset %edi, -8
|
|
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
2017-08-29 18:49:33 +08:00
|
|
|
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X86-NEXT: .p2align 4, 0x90
|
|
|
|
; X86-NEXT: .LBB1_1: # %loop
|
|
|
|
; X86-NEXT: # =>This Inner Loop Header: Depth=1
|
2017-12-02 06:20:26 +08:00
|
|
|
; X86-NEXT: movl (%eax), %esi
|
2017-08-29 18:49:33 +08:00
|
|
|
; X86-NEXT: movl 16(%eax), %ecx
|
2017-12-02 06:20:26 +08:00
|
|
|
; X86-NEXT: leal 1(%esi,%ecx), %edi
|
|
|
|
; X86-NEXT: movl %edi, 12(%eax)
|
|
|
|
; X86-NEXT: decl %edx
|
2017-08-29 18:49:33 +08:00
|
|
|
; X86-NEXT: jne .LBB1_1
|
|
|
|
; X86-NEXT: # BB#2: # %exit
|
2017-12-02 06:20:26 +08:00
|
|
|
; X86-NEXT: addl %ecx, %esi
|
|
|
|
; X86-NEXT: leal 1(%ecx,%esi), %edx
|
2017-08-29 18:49:33 +08:00
|
|
|
; X86-NEXT: addl %ecx, %edx
|
|
|
|
; X86-NEXT: addl %ecx, %edx
|
|
|
|
; X86-NEXT: addl %ecx, %edx
|
|
|
|
; X86-NEXT: addl %ecx, %edx
|
|
|
|
; X86-NEXT: addl %ecx, %edx
|
|
|
|
; X86-NEXT: addl %ecx, %edx
|
|
|
|
; X86-NEXT: movl %edx, 16(%eax)
|
|
|
|
; X86-NEXT: popl %esi
|
2017-12-02 06:20:26 +08:00
|
|
|
; X86-NEXT: popl %edi
|
2017-08-29 18:49:33 +08:00
|
|
|
; X86-NEXT: retl
|
|
|
|
entry:
|
|
|
|
br label %loop
|
|
|
|
|
|
|
|
loop:
|
|
|
|
%iter = phi i32 [%n ,%entry ] ,[ %iter.ctr ,%loop]
|
|
|
|
%h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0
|
|
|
|
%0 = load i32, i32* %h0, align 8
|
|
|
|
%h3 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 3
|
|
|
|
%h4 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 4
|
|
|
|
%1 = load i32, i32* %h4, align 8
|
|
|
|
%add = add i32 %0, 1
|
|
|
|
%add4 = add i32 %add, %1
|
|
|
|
store i32 %add4, i32* %h3, align 4
|
|
|
|
%add291 = add i32 %add4, %1
|
|
|
|
%add292 = add i32 %add291, %1
|
|
|
|
%add293 = add i32 %add292, %1
|
|
|
|
%add294 = add i32 %add293, %1
|
|
|
|
%add295 = add i32 %add294, %1
|
|
|
|
%add296 = add i32 %add295, %1
|
|
|
|
%add29 = add i32 %add296, %1
|
|
|
|
%iter.ctr = sub i32 %iter , 1
|
|
|
|
%res = icmp ne i32 %iter.ctr , 0
|
|
|
|
br i1 %res , label %loop , label %exit
|
|
|
|
|
|
|
|
exit:
|
|
|
|
store i32 %add29, i32* %h4, align 8
|
|
|
|
ret void
|
|
|
|
}
|