2017-03-23 23:25:57 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2017-05-21 19:13:56 +08:00
|
|
|
; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE_FAST
|
|
|
|
; RUN: llc -mtriple=x86_64-linux-gnu -regbankselect-greedy -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE_GREEDY
|
2017-03-23 23:25:57 +08:00
|
|
|
|
2017-07-10 17:26:09 +08:00
|
|
|
define i1 @test_load_i1(i1 * %p1) {
|
|
|
|
; ALL-LABEL: test_load_i1:
|
|
|
|
; ALL: # BB#0:
|
|
|
|
; ALL-NEXT: movb (%rdi), %al
|
|
|
|
; ALL-NEXT: retq
|
|
|
|
%r = load i1, i1* %p1
|
|
|
|
ret i1 %r
|
|
|
|
}
|
|
|
|
|
2017-03-23 23:25:57 +08:00
|
|
|
define i8 @test_load_i8(i8 * %p1) {
|
|
|
|
; ALL-LABEL: test_load_i8:
|
|
|
|
; ALL: # BB#0:
|
|
|
|
; ALL-NEXT: movb (%rdi), %al
|
|
|
|
; ALL-NEXT: retq
|
|
|
|
%r = load i8, i8* %p1
|
|
|
|
ret i8 %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define i16 @test_load_i16(i16 * %p1) {
|
|
|
|
; ALL-LABEL: test_load_i16:
|
|
|
|
; ALL: # BB#0:
|
|
|
|
; ALL-NEXT: movzwl (%rdi), %eax
|
|
|
|
; ALL-NEXT: retq
|
|
|
|
%r = load i16, i16* %p1
|
|
|
|
ret i16 %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_load_i32(i32 * %p1) {
|
|
|
|
; ALL-LABEL: test_load_i32:
|
|
|
|
; ALL: # BB#0:
|
|
|
|
; ALL-NEXT: movl (%rdi), %eax
|
|
|
|
; ALL-NEXT: retq
|
|
|
|
%r = load i32, i32* %p1
|
|
|
|
ret i32 %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @test_load_i64(i64 * %p1) {
|
|
|
|
; ALL-LABEL: test_load_i64:
|
|
|
|
; ALL: # BB#0:
|
|
|
|
; ALL-NEXT: movq (%rdi), %rax
|
|
|
|
; ALL-NEXT: retq
|
|
|
|
%r = load i64, i64* %p1
|
|
|
|
ret i64 %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define float @test_load_float(float * %p1) {
|
|
|
|
; SSE-LABEL: test_load_float:
|
|
|
|
; SSE: # BB#0:
|
|
|
|
; SSE-NEXT: movl (%rdi), %eax
|
|
|
|
; SSE-NEXT: movd %eax, %xmm0
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
2017-06-19 21:12:57 +08:00
|
|
|
; ALL-LABEL: test_load_float:
|
|
|
|
; ALL: # BB#0:
|
|
|
|
; ALL-NEXT: movl (%rdi), %eax
|
|
|
|
; ALL-NEXT: movd %eax, %xmm0
|
|
|
|
; ALL-NEXT: retq
|
2017-03-23 23:25:57 +08:00
|
|
|
%r = load float, float* %p1
|
|
|
|
ret float %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define double @test_load_double(double * %p1) {
|
|
|
|
; SSE-LABEL: test_load_double:
|
|
|
|
; SSE: # BB#0:
|
|
|
|
; SSE-NEXT: movq (%rdi), %rax
|
2017-04-26 15:08:44 +08:00
|
|
|
; SSE-NEXT: movq %rax, %xmm0
|
2017-03-23 23:25:57 +08:00
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
2017-06-19 21:12:57 +08:00
|
|
|
; ALL-LABEL: test_load_double:
|
|
|
|
; ALL: # BB#0:
|
|
|
|
; ALL-NEXT: movq (%rdi), %rax
|
|
|
|
; ALL-NEXT: movq %rax, %xmm0
|
|
|
|
; ALL-NEXT: retq
|
2017-03-23 23:25:57 +08:00
|
|
|
%r = load double, double* %p1
|
|
|
|
ret double %r
|
|
|
|
}
|
|
|
|
|
2017-07-10 17:26:09 +08:00
|
|
|
define i1 * @test_store_i1(i1 %val, i1 * %p1) {
|
|
|
|
; ALL-LABEL: test_store_i1:
|
|
|
|
; ALL: # BB#0:
|
|
|
|
; ALL-NEXT: andb $1, %dil
|
|
|
|
; ALL-NEXT: movb %dil, (%rsi)
|
|
|
|
; ALL-NEXT: movq %rsi, %rax
|
|
|
|
; ALL-NEXT: retq
|
|
|
|
store i1 %val, i1* %p1
|
|
|
|
ret i1 * %p1;
|
|
|
|
}
|
|
|
|
|
2017-03-23 23:25:57 +08:00
|
|
|
define i32 * @test_store_i32(i32 %val, i32 * %p1) {
|
|
|
|
; ALL-LABEL: test_store_i32:
|
|
|
|
; ALL: # BB#0:
|
|
|
|
; ALL-NEXT: movl %edi, (%rsi)
|
|
|
|
; ALL-NEXT: movq %rsi, %rax
|
|
|
|
; ALL-NEXT: retq
|
|
|
|
store i32 %val, i32* %p1
|
|
|
|
ret i32 * %p1;
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 * @test_store_i64(i64 %val, i64 * %p1) {
|
|
|
|
; ALL-LABEL: test_store_i64:
|
|
|
|
; ALL: # BB#0:
|
|
|
|
; ALL-NEXT: movq %rdi, (%rsi)
|
|
|
|
; ALL-NEXT: movq %rsi, %rax
|
|
|
|
; ALL-NEXT: retq
|
|
|
|
store i64 %val, i64* %p1
|
|
|
|
ret i64 * %p1;
|
|
|
|
}
|
|
|
|
|
|
|
|
define float * @test_store_float(float %val, float * %p1) {
|
|
|
|
;
|
[GlobalISel][X86] support G_FRAME_INDEX instruction selection.
Summary:
G_LOAD/G_STORE, add alternative RegisterBank mapping.
For G_LOAD, Fast and Greedy mode choose the same RegisterBank mapping (GprRegBank ) for the G_GLOAD + G_FADD , can't get rid of cross register bank copy GprRegBank->VecRegBank.
Reviewers: zvi, rovka, qcolombet, ab
Reviewed By: zvi
Subscribers: llvm-commits, dberris, kristof.beyls, eladcohen, guyblank
Differential Revision: https://reviews.llvm.org/D30979
llvm-svn: 298907
2017-03-28 17:35:06 +08:00
|
|
|
; SSE_FAST-LABEL: test_store_float:
|
|
|
|
; SSE_FAST: # BB#0:
|
|
|
|
; SSE_FAST-NEXT: movd %xmm0, %eax
|
|
|
|
; SSE_FAST-NEXT: movl %eax, (%rdi)
|
|
|
|
; SSE_FAST-NEXT: movq %rdi, %rax
|
|
|
|
; SSE_FAST-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE_GREEDY-LABEL: test_store_float:
|
|
|
|
; SSE_GREEDY: # BB#0:
|
|
|
|
; SSE_GREEDY-NEXT: movss %xmm0, (%rdi)
|
|
|
|
; SSE_GREEDY-NEXT: movq %rdi, %rax
|
|
|
|
; SSE_GREEDY-NEXT: retq
|
2017-03-23 23:25:57 +08:00
|
|
|
store float %val, float* %p1
|
|
|
|
ret float * %p1;
|
|
|
|
}
|
|
|
|
|
|
|
|
define double * @test_store_double(double %val, double * %p1) {
|
|
|
|
;
|
[GlobalISel][X86] support G_FRAME_INDEX instruction selection.
Summary:
G_LOAD/G_STORE, add alternative RegisterBank mapping.
For G_LOAD, Fast and Greedy mode choose the same RegisterBank mapping (GprRegBank ) for the G_GLOAD + G_FADD , can't get rid of cross register bank copy GprRegBank->VecRegBank.
Reviewers: zvi, rovka, qcolombet, ab
Reviewed By: zvi
Subscribers: llvm-commits, dberris, kristof.beyls, eladcohen, guyblank
Differential Revision: https://reviews.llvm.org/D30979
llvm-svn: 298907
2017-03-28 17:35:06 +08:00
|
|
|
; SSE_FAST-LABEL: test_store_double:
|
|
|
|
; SSE_FAST: # BB#0:
|
2017-04-26 15:08:44 +08:00
|
|
|
; SSE_FAST-NEXT: movq %xmm0, %rax
|
[GlobalISel][X86] support G_FRAME_INDEX instruction selection.
Summary:
G_LOAD/G_STORE, add alternative RegisterBank mapping.
For G_LOAD, Fast and Greedy mode choose the same RegisterBank mapping (GprRegBank ) for the G_GLOAD + G_FADD , can't get rid of cross register bank copy GprRegBank->VecRegBank.
Reviewers: zvi, rovka, qcolombet, ab
Reviewed By: zvi
Subscribers: llvm-commits, dberris, kristof.beyls, eladcohen, guyblank
Differential Revision: https://reviews.llvm.org/D30979
llvm-svn: 298907
2017-03-28 17:35:06 +08:00
|
|
|
; SSE_FAST-NEXT: movq %rax, (%rdi)
|
|
|
|
; SSE_FAST-NEXT: movq %rdi, %rax
|
|
|
|
; SSE_FAST-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE_GREEDY-LABEL: test_store_double:
|
|
|
|
; SSE_GREEDY: # BB#0:
|
|
|
|
; SSE_GREEDY-NEXT: movsd %xmm0, (%rdi)
|
|
|
|
; SSE_GREEDY-NEXT: movq %rdi, %rax
|
|
|
|
; SSE_GREEDY-NEXT: retq
|
2017-03-23 23:25:57 +08:00
|
|
|
store double %val, double* %p1
|
|
|
|
ret double * %p1;
|
|
|
|
}
|
|
|
|
|
2017-05-01 14:08:32 +08:00
|
|
|
define i32* @test_load_ptr(i32** %ptr1) {
|
|
|
|
; ALL-LABEL: test_load_ptr:
|
|
|
|
; ALL: # BB#0:
|
|
|
|
; ALL-NEXT: movq (%rdi), %rax
|
|
|
|
; ALL-NEXT: retq
|
|
|
|
%p = load i32*, i32** %ptr1
|
|
|
|
ret i32* %p
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @test_store_ptr(i32** %ptr1, i32* %a) {
|
|
|
|
; ALL-LABEL: test_store_ptr:
|
|
|
|
; ALL: # BB#0:
|
|
|
|
; ALL-NEXT: movq %rsi, (%rdi)
|
|
|
|
; ALL-NEXT: retq
|
|
|
|
store i32* %a, i32** %ptr1
|
|
|
|
ret void
|
|
|
|
}
|
2017-06-19 21:12:57 +08:00
|
|
|
|
|
|
|
define i32 @test_gep_folding(i32* %arr, i32 %val) {
|
|
|
|
; ALL-LABEL: test_gep_folding:
|
|
|
|
; ALL: # BB#0:
|
|
|
|
; ALL-NEXT: movl %esi, 20(%rdi)
|
|
|
|
; ALL-NEXT: movl 20(%rdi), %eax
|
|
|
|
; ALL-NEXT: retq
|
|
|
|
%arrayidx = getelementptr i32, i32* %arr, i32 5
|
|
|
|
store i32 %val, i32* %arrayidx
|
|
|
|
%r = load i32, i32* %arrayidx
|
|
|
|
ret i32 %r
|
|
|
|
}
|
|
|
|
|
|
|
|
; check that gep index doesn't folded into memory operand
|
|
|
|
define i32 @test_gep_folding_largeGepIndex(i32* %arr, i32 %val) {
|
|
|
|
; ALL-LABEL: test_gep_folding_largeGepIndex:
|
|
|
|
; ALL: # BB#0:
|
|
|
|
; ALL-NEXT: movabsq $228719476720, %rax # imm = 0x3540BE3FF0
|
Revert r314886 "[X86] Improvement in CodeGen instruction selection for LEAs (re-applying post required revision changes.)"
It broke the Chromium / SQLite build; see PR34830.
> Summary:
> 1/ Operand folding during complex pattern matching for LEAs has been
> extended, such that it promotes Scale to accommodate similar operand
> appearing in the DAG.
> e.g.
> T1 = A + B
> T2 = T1 + 10
> T3 = T2 + A
> For above DAG rooted at T3, X86AddressMode will no look like
> Base = B , Index = A , Scale = 2 , Disp = 10
>
> 2/ During OptimizeLEAPass down the pipeline factorization is now performed over LEAs
> so that if there is an opportunity then complex LEAs (having 3 operands)
> could be factored out.
> e.g.
> leal 1(%rax,%rcx,1), %rdx
> leal 1(%rax,%rcx,2), %rcx
> will be factored as following
> leal 1(%rax,%rcx,1), %rdx
> leal (%rdx,%rcx) , %edx
>
> 3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops,
> thus avoiding creation of any complex LEAs within a loop.
>
> Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy
>
> Reviewed By: lsaba
>
> Subscribers: jmolloy, spatel, igorb, llvm-commits
>
> Differential Revision: https://reviews.llvm.org/D35014
llvm-svn: 314919
2017-10-05 01:54:06 +08:00
|
|
|
; ALL-NEXT: leaq (%rdi,%rax), %rax
|
2017-06-19 21:12:57 +08:00
|
|
|
; ALL-NEXT: movl %esi, (%rax)
|
|
|
|
; ALL-NEXT: movl (%rax), %eax
|
|
|
|
; ALL-NEXT: retq
|
|
|
|
%arrayidx = getelementptr i32, i32* %arr, i64 57179869180
|
|
|
|
store i32 %val, i32* %arrayidx
|
|
|
|
%r = load i32, i32* %arrayidx
|
|
|
|
ret i32 %r
|
|
|
|
}
|