forked from OSchip/llvm-project
129 lines
4.9 KiB
LLVM
129 lines
4.9 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=i686-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE
|
|
; RUN: llc < %s -mtriple=i686-linux -mattr=+avx | FileCheck %s --check-prefix=X86 --check-prefix=X86-AVX --check-prefix=X86-AVX1
|
|
; RUN: llc < %s -mtriple=i686-linux -mattr=+avx2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-AVX --check-prefix=X86-AVX2
|
|
; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE
|
|
; RUN: llc < %s -mtriple=x86_64-linux -mattr=+avx | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX1
|
|
; RUN: llc < %s -mtriple=x86_64-linux -mattr=+avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX2
|
|
|
|
; PR27708
|
|
|
|
define <7 x i64> @load7_aligned(<7 x i64>* %x) {
|
|
; X86-SSE-LABEL: load7_aligned:
|
|
; X86-SSE: # BB#0:
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-SSE-NEXT: movaps (%ecx), %xmm0
|
|
; X86-SSE-NEXT: movaps 16(%ecx), %xmm1
|
|
; X86-SSE-NEXT: movaps 32(%ecx), %xmm2
|
|
; X86-SSE-NEXT: movl 48(%ecx), %edx
|
|
; X86-SSE-NEXT: movl 52(%ecx), %ecx
|
|
; X86-SSE-NEXT: movl %ecx, 52(%eax)
|
|
; X86-SSE-NEXT: movl %edx, 48(%eax)
|
|
; X86-SSE-NEXT: movaps %xmm2, 32(%eax)
|
|
; X86-SSE-NEXT: movaps %xmm1, 16(%eax)
|
|
; X86-SSE-NEXT: movaps %xmm0, (%eax)
|
|
; X86-SSE-NEXT: retl $4
|
|
;
|
|
; X86-AVX-LABEL: load7_aligned:
|
|
; X86-AVX: # BB#0:
|
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-AVX-NEXT: vmovaps (%ecx), %ymm0
|
|
; X86-AVX-NEXT: vmovaps 32(%ecx), %ymm1
|
|
; X86-AVX-NEXT: vmovaps %ymm0, (%eax)
|
|
; X86-AVX-NEXT: vextractf128 $1, %ymm1, %xmm0
|
|
; X86-AVX-NEXT: vextractps $1, %xmm0, 52(%eax)
|
|
; X86-AVX-NEXT: vmovss %xmm0, 48(%eax)
|
|
; X86-AVX-NEXT: vmovaps %xmm1, 32(%eax)
|
|
; X86-AVX-NEXT: vzeroupper
|
|
; X86-AVX-NEXT: retl $4
|
|
;
|
|
; X64-SSE-LABEL: load7_aligned:
|
|
; X64-SSE: # BB#0:
|
|
; X64-SSE-NEXT: movaps (%rsi), %xmm0
|
|
; X64-SSE-NEXT: movaps 16(%rsi), %xmm1
|
|
; X64-SSE-NEXT: movaps 32(%rsi), %xmm2
|
|
; X64-SSE-NEXT: movq 48(%rsi), %rax
|
|
; X64-SSE-NEXT: movq %rax, 48(%rdi)
|
|
; X64-SSE-NEXT: movaps %xmm2, 32(%rdi)
|
|
; X64-SSE-NEXT: movaps %xmm1, 16(%rdi)
|
|
; X64-SSE-NEXT: movaps %xmm0, (%rdi)
|
|
; X64-SSE-NEXT: movq %rdi, %rax
|
|
; X64-SSE-NEXT: retq
|
|
;
|
|
; X64-AVX-LABEL: load7_aligned:
|
|
; X64-AVX: # BB#0:
|
|
; X64-AVX-NEXT: vmovaps (%rsi), %ymm0
|
|
; X64-AVX-NEXT: vmovaps 32(%rsi), %ymm1
|
|
; X64-AVX-NEXT: vmovaps %ymm0, (%rdi)
|
|
; X64-AVX-NEXT: vextractf128 $1, %ymm1, %xmm0
|
|
; X64-AVX-NEXT: vmovlps %xmm0, 48(%rdi)
|
|
; X64-AVX-NEXT: vmovaps %xmm1, 32(%rdi)
|
|
; X64-AVX-NEXT: movq %rdi, %rax
|
|
; X64-AVX-NEXT: vzeroupper
|
|
; X64-AVX-NEXT: retq
|
|
%x1 = load <7 x i64>, <7 x i64>* %x
|
|
ret <7 x i64> %x1
|
|
}
|
|
|
|
define <7 x i64> @load7_unaligned(<7 x i64>* %x) {
|
|
; X86-SSE-LABEL: load7_unaligned:
|
|
; X86-SSE: # BB#0:
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-SSE-NEXT: movups (%ecx), %xmm0
|
|
; X86-SSE-NEXT: movups 16(%ecx), %xmm1
|
|
; X86-SSE-NEXT: movups 32(%ecx), %xmm2
|
|
; X86-SSE-NEXT: movl 48(%ecx), %edx
|
|
; X86-SSE-NEXT: movl 52(%ecx), %ecx
|
|
; X86-SSE-NEXT: movl %ecx, 52(%eax)
|
|
; X86-SSE-NEXT: movl %edx, 48(%eax)
|
|
; X86-SSE-NEXT: movaps %xmm2, 32(%eax)
|
|
; X86-SSE-NEXT: movaps %xmm1, 16(%eax)
|
|
; X86-SSE-NEXT: movaps %xmm0, (%eax)
|
|
; X86-SSE-NEXT: retl $4
|
|
;
|
|
; X86-AVX-LABEL: load7_unaligned:
|
|
; X86-AVX: # BB#0:
|
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X86-AVX-NEXT: vmovups (%ecx), %ymm0
|
|
; X86-AVX-NEXT: vmovups 32(%ecx), %xmm1
|
|
; X86-AVX-NEXT: movl 48(%ecx), %edx
|
|
; X86-AVX-NEXT: movl 52(%ecx), %ecx
|
|
; X86-AVX-NEXT: movl %ecx, 52(%eax)
|
|
; X86-AVX-NEXT: movl %edx, 48(%eax)
|
|
; X86-AVX-NEXT: vmovaps %xmm1, 32(%eax)
|
|
; X86-AVX-NEXT: vmovaps %ymm0, (%eax)
|
|
; X86-AVX-NEXT: vzeroupper
|
|
; X86-AVX-NEXT: retl $4
|
|
;
|
|
; X64-SSE-LABEL: load7_unaligned:
|
|
; X64-SSE: # BB#0:
|
|
; X64-SSE-NEXT: movups (%rsi), %xmm0
|
|
; X64-SSE-NEXT: movups 16(%rsi), %xmm1
|
|
; X64-SSE-NEXT: movups 32(%rsi), %xmm2
|
|
; X64-SSE-NEXT: movq 48(%rsi), %rax
|
|
; X64-SSE-NEXT: movq %rax, 48(%rdi)
|
|
; X64-SSE-NEXT: movaps %xmm2, 32(%rdi)
|
|
; X64-SSE-NEXT: movaps %xmm1, 16(%rdi)
|
|
; X64-SSE-NEXT: movaps %xmm0, (%rdi)
|
|
; X64-SSE-NEXT: movq %rdi, %rax
|
|
; X64-SSE-NEXT: retq
|
|
;
|
|
; X64-AVX-LABEL: load7_unaligned:
|
|
; X64-AVX: # BB#0:
|
|
; X64-AVX-NEXT: vmovups (%rsi), %ymm0
|
|
; X64-AVX-NEXT: vmovups 32(%rsi), %xmm1
|
|
; X64-AVX-NEXT: movq 48(%rsi), %rax
|
|
; X64-AVX-NEXT: movq %rax, 48(%rdi)
|
|
; X64-AVX-NEXT: vmovaps %xmm1, 32(%rdi)
|
|
; X64-AVX-NEXT: vmovaps %ymm0, (%rdi)
|
|
; X64-AVX-NEXT: movq %rdi, %rax
|
|
; X64-AVX-NEXT: vzeroupper
|
|
; X64-AVX-NEXT: retq
|
|
%x1 = load <7 x i64>, <7 x i64>* %x, align 1
|
|
ret <7 x i64> %x1
|
|
}
|