forked from OSchip/llvm-project
88 lines
3.4 KiB
LLVM
88 lines
3.4 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=i686-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X32
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=core-avx2 -mattr=+avx2 | FileCheck %s --check-prefix=X64
|
|
|
|
declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, i8*,
|
|
<4 x i32>, <4 x float>, i8) nounwind readonly
|
|
|
|
define <4 x float> @test_x86_avx2_gather_d_ps(i8* %a1, <4 x i32> %idx, <4 x float> %mask) {
|
|
; X32-LABEL: test_x86_avx2_gather_d_ps:
|
|
; X32: ## BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vgatherdps %xmm1, (%eax,%xmm0,2), %xmm2
|
|
; X32-NEXT: vmovaps %xmm2, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_x86_avx2_gather_d_ps:
|
|
; X64: ## BB#0:
|
|
; X64-NEXT: vgatherdps %xmm1, (%rdi,%xmm0,2), %xmm2
|
|
; X64-NEXT: vmovaps %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> undef,
|
|
i8* %a1, <4 x i32> %idx, <4 x float> %mask, i8 2) ;
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, i8*,
|
|
<4 x i32>, <2 x double>, i8) nounwind readonly
|
|
|
|
define <2 x double> @test_x86_avx2_gather_d_pd(i8* %a1, <4 x i32> %idx, <2 x double> %mask) {
|
|
; X32-LABEL: test_x86_avx2_gather_d_pd:
|
|
; X32: ## BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vgatherdpd %xmm1, (%eax,%xmm0,2), %xmm2
|
|
; X32-NEXT: vmovapd %xmm2, %xmm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_x86_avx2_gather_d_pd:
|
|
; X64: ## BB#0:
|
|
; X64-NEXT: vgatherdpd %xmm1, (%rdi,%xmm0,2), %xmm2
|
|
; X64-NEXT: vmovapd %xmm2, %xmm0
|
|
; X64-NEXT: retq
|
|
%res = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> undef,
|
|
i8* %a1, <4 x i32> %idx, <2 x double> %mask, i8 2) ;
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*,
|
|
<8 x i32>, <8 x float>, i8) nounwind readonly
|
|
|
|
define <8 x float> @test_x86_avx2_gather_d_ps_256(i8* %a1, <8 x i32> %idx, <8 x float> %mask) {
|
|
; X32-LABEL: test_x86_avx2_gather_d_ps_256:
|
|
; X32: ## BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vgatherdps %ymm1, (%eax,%ymm0,4), %ymm2
|
|
; X32-NEXT: vmovaps %ymm2, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_x86_avx2_gather_d_ps_256:
|
|
; X64: ## BB#0:
|
|
; X64-NEXT: vgatherdps %ymm1, (%rdi,%ymm0,4), %ymm2
|
|
; X64-NEXT: vmovaps %ymm2, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef,
|
|
i8* %a1, <8 x i32> %idx, <8 x float> %mask, i8 4) ;
|
|
ret <8 x float> %res
|
|
}
|
|
|
|
declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, i8*,
|
|
<4 x i32>, <4 x double>, i8) nounwind readonly
|
|
|
|
define <4 x double> @test_x86_avx2_gather_d_pd_256(i8* %a1, <4 x i32> %idx, <4 x double> %mask) {
|
|
; X32-LABEL: test_x86_avx2_gather_d_pd_256:
|
|
; X32: ## BB#0:
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: vgatherdpd %ymm1, (%eax,%xmm0,8), %ymm2
|
|
; X32-NEXT: vmovapd %ymm2, %ymm0
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_x86_avx2_gather_d_pd_256:
|
|
; X64: ## BB#0:
|
|
; X64-NEXT: vgatherdpd %ymm1, (%rdi,%xmm0,8), %ymm2
|
|
; X64-NEXT: vmovapd %ymm2, %ymm0
|
|
; X64-NEXT: retq
|
|
%res = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef,
|
|
i8* %a1, <4 x i32> %idx, <4 x double> %mask, i8 8) ;
|
|
ret <4 x double> %res
|
|
}
|