llvm-project/llvm/test/CodeGen/X86/vector-half-conversions.ll

3923 lines
144 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+f16c | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+f16c | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+f16c | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX512
;
; Half to Float
;
define float @cvt_i16_to_f32(i16 %a0) {
; ALL-LABEL: cvt_i16_to_f32:
; ALL: # BB#0:
; ALL-NEXT: movswl %di, %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
; ALL-NEXT: retq
%1 = bitcast i16 %a0 to half
%2 = fpext half %1 to float
ret float %2
}
define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) {
; ALL-LABEL: cvt_4i16_to_4f32:
; ALL: # BB#0:
; ALL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; ALL-NEXT: vmovq %xmm0, %rax
; ALL-NEXT: movq %rax, %rcx
; ALL-NEXT: movq %rax, %rdx
; ALL-NEXT: movswl %ax, %esi
; ALL-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
; ALL-NEXT: shrl $16, %eax
; ALL-NEXT: shrq $32, %rcx
; ALL-NEXT: shrq $48, %rdx
; ALL-NEXT: movswl %dx, %edx
; ALL-NEXT: vmovd %edx, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
; ALL-NEXT: movswl %cx, %ecx
; ALL-NEXT: vmovd %ecx, %xmm1
; ALL-NEXT: vcvtph2ps %xmm1, %xmm1
; ALL-NEXT: cwtl
; ALL-NEXT: vmovd %eax, %xmm2
; ALL-NEXT: vcvtph2ps %xmm2, %xmm2
; ALL-NEXT: vmovd %esi, %xmm3
; ALL-NEXT: vcvtph2ps %xmm3, %xmm3
; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; ALL-NEXT: retq
%1 = bitcast <4 x i16> %a0 to <4 x half>
%2 = fpext <4 x half> %1 to <4 x float>
ret <4 x float> %2
}
define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) {
; ALL-LABEL: cvt_8i16_to_4f32:
; ALL: # BB#0:
; ALL-NEXT: vmovq %xmm0, %rax
; ALL-NEXT: movq %rax, %rcx
; ALL-NEXT: movq %rax, %rdx
; ALL-NEXT: movswl %ax, %esi
; ALL-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
; ALL-NEXT: shrl $16, %eax
; ALL-NEXT: shrq $32, %rcx
; ALL-NEXT: shrq $48, %rdx
; ALL-NEXT: movswl %dx, %edx
; ALL-NEXT: vmovd %edx, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
; ALL-NEXT: movswl %cx, %ecx
; ALL-NEXT: vmovd %ecx, %xmm1
; ALL-NEXT: vcvtph2ps %xmm1, %xmm1
; ALL-NEXT: cwtl
; ALL-NEXT: vmovd %eax, %xmm2
; ALL-NEXT: vcvtph2ps %xmm2, %xmm2
; ALL-NEXT: vmovd %esi, %xmm3
; ALL-NEXT: vcvtph2ps %xmm3, %xmm3
; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; ALL-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%2 = bitcast <4 x i16> %1 to <4 x half>
%3 = fpext <4 x half> %2 to <4 x float>
ret <4 x float> %3
}
define <8 x float> @cvt_8i16_to_8f32(<8 x i16> %a0) {
; AVX1-LABEL: cvt_8i16_to_8f32:
; AVX1: # BB#0:
; AVX1-NEXT: vpextrq $1, %xmm0, %rdx
; AVX1-NEXT: movq %rdx, %r8
; AVX1-NEXT: movq %rdx, %r10
; AVX1-NEXT: movswl %dx, %r9d
; AVX1-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<kill>
; AVX1-NEXT: shrl $16, %edx
; AVX1-NEXT: shrq $32, %r8
; AVX1-NEXT: shrq $48, %r10
; AVX1-NEXT: vmovq %xmm0, %rdi
; AVX1-NEXT: movq %rdi, %rax
; AVX1-NEXT: movq %rdi, %rsi
; AVX1-NEXT: movswl %di, %ecx
; AVX1-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<kill>
; AVX1-NEXT: shrl $16, %edi
; AVX1-NEXT: shrq $32, %rax
; AVX1-NEXT: shrq $48, %rsi
; AVX1-NEXT: movswl %si, %esi
; AVX1-NEXT: vmovd %esi, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX1-NEXT: cwtl
; AVX1-NEXT: vmovd %eax, %xmm1
; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX1-NEXT: movswl %di, %eax
; AVX1-NEXT: vmovd %eax, %xmm2
; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX1-NEXT: vmovd %ecx, %xmm3
; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3
; AVX1-NEXT: movswl %r10w, %eax
; AVX1-NEXT: vmovd %eax, %xmm4
; AVX1-NEXT: vcvtph2ps %xmm4, %xmm4
; AVX1-NEXT: movswl %r8w, %eax
; AVX1-NEXT: vmovd %eax, %xmm5
; AVX1-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX1-NEXT: movswl %dx, %eax
; AVX1-NEXT: vmovd %eax, %xmm6
; AVX1-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX1-NEXT: vmovd %r9d, %xmm7
; AVX1-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX1-NEXT: vinsertps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[2,3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0]
; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_8i16_to_8f32:
; AVX2: # BB#0:
; AVX2-NEXT: vpextrq $1, %xmm0, %rdx
; AVX2-NEXT: movq %rdx, %r8
; AVX2-NEXT: movq %rdx, %r10
; AVX2-NEXT: movswl %dx, %r9d
; AVX2-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<kill>
; AVX2-NEXT: shrl $16, %edx
; AVX2-NEXT: shrq $32, %r8
; AVX2-NEXT: shrq $48, %r10
; AVX2-NEXT: vmovq %xmm0, %rdi
; AVX2-NEXT: movq %rdi, %rax
; AVX2-NEXT: movq %rdi, %rsi
; AVX2-NEXT: movswl %di, %ecx
; AVX2-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<kill>
; AVX2-NEXT: shrl $16, %edi
; AVX2-NEXT: shrq $32, %rax
; AVX2-NEXT: shrq $48, %rsi
; AVX2-NEXT: movswl %si, %esi
; AVX2-NEXT: vmovd %esi, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: cwtl
; AVX2-NEXT: vmovd %eax, %xmm1
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX2-NEXT: movswl %di, %eax
; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX2-NEXT: vmovd %ecx, %xmm3
; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3
; AVX2-NEXT: movswl %r10w, %eax
; AVX2-NEXT: vmovd %eax, %xmm4
; AVX2-NEXT: vcvtph2ps %xmm4, %xmm4
; AVX2-NEXT: movswl %r8w, %eax
; AVX2-NEXT: vmovd %eax, %xmm5
; AVX2-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX2-NEXT: movswl %dx, %eax
; AVX2-NEXT: vmovd %eax, %xmm6
; AVX2-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX2-NEXT: vmovd %r9d, %xmm7
; AVX2-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX2-NEXT: vinsertps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[2,3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0]
; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX2-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_8i16_to_8f32:
; AVX512: # BB#0:
; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
; AVX512-NEXT: movq %rdx, %r8
; AVX512-NEXT: movq %rdx, %r10
; AVX512-NEXT: movswl %dx, %r9d
; AVX512-NEXT: # kill: %EDX<def> %EDX<kill> %RDX<kill>
; AVX512-NEXT: shrl $16, %edx
; AVX512-NEXT: shrq $32, %r8
; AVX512-NEXT: shrq $48, %r10
; AVX512-NEXT: vmovq %xmm0, %rdi
; AVX512-NEXT: movq %rdi, %rax
; AVX512-NEXT: movq %rdi, %rsi
; AVX512-NEXT: movswl %di, %ecx
; AVX512-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<kill>
; AVX512-NEXT: shrl $16, %edi
; AVX512-NEXT: shrq $32, %rax
; AVX512-NEXT: shrq $48, %rsi
; AVX512-NEXT: movswl %si, %esi
; AVX512-NEXT: vmovd %esi, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: cwtl
; AVX512-NEXT: vmovd %eax, %xmm1
; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX512-NEXT: movswl %di, %eax
; AVX512-NEXT: vmovd %eax, %xmm2
; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX512-NEXT: vmovd %ecx, %xmm3
; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3
; AVX512-NEXT: movswl %r10w, %eax
; AVX512-NEXT: vmovd %eax, %xmm4
; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4
; AVX512-NEXT: movswl %r8w, %eax
; AVX512-NEXT: vmovd %eax, %xmm5
; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX512-NEXT: movswl %dx, %eax
; AVX512-NEXT: vmovd %eax, %xmm6
; AVX512-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX512-NEXT: vmovd %r9d, %xmm7
; AVX512-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX512-NEXT: vinsertps {{.*#+}} xmm6 = xmm7[0],xmm6[0],xmm7[2,3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0]
; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = bitcast <8 x i16> %a0 to <8 x half>
%2 = fpext <8 x half> %1 to <8 x float>
ret <8 x float> %2
}
define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) {
; AVX1-LABEL: cvt_16i16_to_16f32:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vmovq %xmm4, %rax
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq $48, %rcx
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm8
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq $32, %rcx
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm9
; AVX1-NEXT: movswl %ax, %ecx
; AVX1-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: cwtl
; AVX1-NEXT: vmovd %eax, %xmm10
; AVX1-NEXT: vpextrq $1, %xmm4, %rax
; AVX1-NEXT: vmovd %ecx, %xmm11
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq $48, %rcx
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm12
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq $32, %rcx
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm13
; AVX1-NEXT: movswl %ax, %ecx
; AVX1-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: cwtl
; AVX1-NEXT: vmovd %eax, %xmm14
; AVX1-NEXT: vmovq %xmm0, %rax
; AVX1-NEXT: vmovd %ecx, %xmm15
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq $48, %rcx
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm2
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq $32, %rcx
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm3
; AVX1-NEXT: movswl %ax, %ecx
; AVX1-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: cwtl
; AVX1-NEXT: vmovd %eax, %xmm4
; AVX1-NEXT: vpextrq $1, %xmm0, %rax
; AVX1-NEXT: vmovd %ecx, %xmm0
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq $48, %rcx
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm5
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: shrq $32, %rcx
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm6
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: shrl $16, %ecx
; AVX1-NEXT: movswl %cx, %ecx
; AVX1-NEXT: vmovd %ecx, %xmm7
; AVX1-NEXT: cwtl
; AVX1-NEXT: vmovd %eax, %xmm1
; AVX1-NEXT: vcvtph2ps %xmm8, %xmm8
; AVX1-NEXT: vcvtph2ps %xmm9, %xmm9
; AVX1-NEXT: vcvtph2ps %xmm10, %xmm10
; AVX1-NEXT: vcvtph2ps %xmm11, %xmm11
; AVX1-NEXT: vcvtph2ps %xmm12, %xmm12
; AVX1-NEXT: vcvtph2ps %xmm13, %xmm13
; AVX1-NEXT: vcvtph2ps %xmm14, %xmm14
; AVX1-NEXT: vcvtph2ps %xmm15, %xmm15
; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3
; AVX1-NEXT: vcvtph2ps %xmm4, %xmm4
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX1-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX1-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[2,3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm6[0],xmm1[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[0]
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[2,3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm15[0],xmm14[0],xmm15[2,3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm13[0],xmm1[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm12[0]
; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[2,3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_16i16_to_16f32:
; AVX2: # BB#0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
; AVX2-NEXT: vmovq %xmm4, %rax
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq $48, %rcx
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm8
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq $32, %rcx
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm9
; AVX2-NEXT: movswl %ax, %ecx
; AVX2-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: cwtl
; AVX2-NEXT: vmovd %eax, %xmm10
; AVX2-NEXT: vpextrq $1, %xmm4, %rax
; AVX2-NEXT: vmovd %ecx, %xmm11
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq $48, %rcx
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm12
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq $32, %rcx
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm13
; AVX2-NEXT: movswl %ax, %ecx
; AVX2-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: cwtl
; AVX2-NEXT: vmovd %eax, %xmm14
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: vmovd %ecx, %xmm15
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq $48, %rcx
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm2
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq $32, %rcx
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm3
; AVX2-NEXT: movswl %ax, %ecx
; AVX2-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: cwtl
; AVX2-NEXT: vmovd %eax, %xmm4
; AVX2-NEXT: vpextrq $1, %xmm0, %rax
; AVX2-NEXT: vmovd %ecx, %xmm0
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq $48, %rcx
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm5
; AVX2-NEXT: movq %rax, %rcx
; AVX2-NEXT: shrq $32, %rcx
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm6
; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: shrl $16, %ecx
; AVX2-NEXT: movswl %cx, %ecx
; AVX2-NEXT: vmovd %ecx, %xmm7
; AVX2-NEXT: cwtl
; AVX2-NEXT: vmovd %eax, %xmm1
; AVX2-NEXT: vcvtph2ps %xmm8, %xmm8
; AVX2-NEXT: vcvtph2ps %xmm9, %xmm9
; AVX2-NEXT: vcvtph2ps %xmm10, %xmm10
; AVX2-NEXT: vcvtph2ps %xmm11, %xmm11
; AVX2-NEXT: vcvtph2ps %xmm12, %xmm12
; AVX2-NEXT: vcvtph2ps %xmm13, %xmm13
; AVX2-NEXT: vcvtph2ps %xmm14, %xmm14
; AVX2-NEXT: vcvtph2ps %xmm15, %xmm15
; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3
; AVX2-NEXT: vcvtph2ps %xmm4, %xmm4
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX2-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX2-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[2,3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm6[0],xmm1[3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[0]
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[2,3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm3[0],xmm0[3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[0]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm15[0],xmm14[0],xmm15[2,3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm13[0],xmm1[3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm12[0]
; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[2,3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[0]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_16i16_to_16f32:
; AVX512: # BB#0:
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm10
; AVX512-NEXT: vmovq %xmm0, %rax
; AVX512-NEXT: movq %rax, %rcx
; AVX512-NEXT: shrq $48, %rcx
; AVX512-NEXT: movswl %cx, %ecx
; AVX512-NEXT: vmovd %ecx, %xmm8
; AVX512-NEXT: movq %rax, %rcx
; AVX512-NEXT: shrq $32, %rcx
; AVX512-NEXT: movswl %cx, %ecx
; AVX512-NEXT: vmovd %ecx, %xmm9
; AVX512-NEXT: movswl %ax, %ecx
; AVX512-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
; AVX512-NEXT: shrl $16, %eax
; AVX512-NEXT: cwtl
; AVX512-NEXT: vmovd %eax, %xmm11
; AVX512-NEXT: vpextrq $1, %xmm0, %rax
; AVX512-NEXT: vmovd %ecx, %xmm12
; AVX512-NEXT: movq %rax, %rcx
; AVX512-NEXT: shrq $48, %rcx
; AVX512-NEXT: movswl %cx, %ecx
; AVX512-NEXT: vmovd %ecx, %xmm13
; AVX512-NEXT: movq %rax, %rcx
; AVX512-NEXT: shrq $32, %rcx
; AVX512-NEXT: movswl %cx, %ecx
; AVX512-NEXT: vmovd %ecx, %xmm14
; AVX512-NEXT: movswl %ax, %ecx
; AVX512-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
; AVX512-NEXT: shrl $16, %eax
; AVX512-NEXT: cwtl
; AVX512-NEXT: vmovd %eax, %xmm15
; AVX512-NEXT: vmovq %xmm10, %rax
; AVX512-NEXT: vmovd %ecx, %xmm2
; AVX512-NEXT: movq %rax, %rcx
; AVX512-NEXT: shrq $48, %rcx
; AVX512-NEXT: movswl %cx, %ecx
; AVX512-NEXT: vmovd %ecx, %xmm3
; AVX512-NEXT: movq %rax, %rcx
; AVX512-NEXT: shrq $32, %rcx
; AVX512-NEXT: movswl %cx, %ecx
; AVX512-NEXT: vmovd %ecx, %xmm1
; AVX512-NEXT: movswl %ax, %ecx
; AVX512-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
; AVX512-NEXT: shrl $16, %eax
; AVX512-NEXT: cwtl
; AVX512-NEXT: vmovd %eax, %xmm4
; AVX512-NEXT: vpextrq $1, %xmm10, %rax
; AVX512-NEXT: vmovd %ecx, %xmm10
; AVX512-NEXT: movq %rax, %rcx
; AVX512-NEXT: shrq $48, %rcx
; AVX512-NEXT: movswl %cx, %ecx
; AVX512-NEXT: vmovd %ecx, %xmm5
; AVX512-NEXT: movq %rax, %rcx
; AVX512-NEXT: shrq $32, %rcx
; AVX512-NEXT: movswl %cx, %ecx
; AVX512-NEXT: vmovd %ecx, %xmm6
; AVX512-NEXT: movl %eax, %ecx
; AVX512-NEXT: shrl $16, %ecx
; AVX512-NEXT: movswl %cx, %ecx
; AVX512-NEXT: vmovd %ecx, %xmm7
; AVX512-NEXT: cwtl
; AVX512-NEXT: vmovd %eax, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm8, %xmm8
; AVX512-NEXT: vcvtph2ps %xmm9, %xmm9
; AVX512-NEXT: vcvtph2ps %xmm11, %xmm11
; AVX512-NEXT: vcvtph2ps %xmm12, %xmm12
; AVX512-NEXT: vcvtph2ps %xmm13, %xmm13
; AVX512-NEXT: vcvtph2ps %xmm14, %xmm14
; AVX512-NEXT: vcvtph2ps %xmm15, %xmm15
; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3
; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4
; AVX512-NEXT: vcvtph2ps %xmm10, %xmm10
; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX512-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX512-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[2,3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm6[0],xmm0[3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm5[0]
; AVX512-NEXT: vinsertps {{.*#+}} xmm4 = xmm10[0],xmm4[0],xmm10[2,3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0,1],xmm1[0],xmm4[3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm3[0]
; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm15[0],xmm2[2,3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm14[0],xmm1[3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm13[0]
; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm12[0],xmm11[0],xmm12[2,3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[0]
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
; AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%1 = bitcast <16 x i16> %a0 to <16 x half>
%2 = fpext <16 x half> %1 to <16 x float>
ret <16 x float> %2
}
;
; Half to Float (Load)
;
define float @load_cvt_i16_to_f32(i16* %a0) {
; ALL-LABEL: load_cvt_i16_to_f32:
; ALL: # BB#0:
; ALL-NEXT: movswl (%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
; ALL-NEXT: retq
%1 = load i16, i16* %a0
%2 = bitcast i16 %1 to half
%3 = fpext half %2 to float
ret float %3
}
define <4 x float> @load_cvt_4i16_to_4f32(<4 x i16>* %a0) {
; ALL-LABEL: load_cvt_4i16_to_4f32:
; ALL: # BB#0:
; ALL-NEXT: movswl 6(%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
; ALL-NEXT: movswl 4(%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm1
; ALL-NEXT: vcvtph2ps %xmm1, %xmm1
; ALL-NEXT: movswl (%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm2
; ALL-NEXT: vcvtph2ps %xmm2, %xmm2
; ALL-NEXT: movswl 2(%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm3
; ALL-NEXT: vcvtph2ps %xmm3, %xmm3
; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; ALL-NEXT: retq
%1 = load <4 x i16>, <4 x i16>* %a0
%2 = bitcast <4 x i16> %1 to <4 x half>
%3 = fpext <4 x half> %2 to <4 x float>
ret <4 x float> %3
}
define <4 x float> @load_cvt_8i16_to_4f32(<8 x i16>* %a0) {
; ALL-LABEL: load_cvt_8i16_to_4f32:
; ALL: # BB#0:
; ALL-NEXT: movq (%rdi), %rax
; ALL-NEXT: movq %rax, %rcx
; ALL-NEXT: movq %rax, %rdx
; ALL-NEXT: movswl %ax, %esi
; ALL-NEXT: # kill: %EAX<def> %EAX<kill> %RAX<kill>
; ALL-NEXT: shrl $16, %eax
; ALL-NEXT: shrq $32, %rcx
; ALL-NEXT: shrq $48, %rdx
; ALL-NEXT: movswl %dx, %edx
; ALL-NEXT: vmovd %edx, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
; ALL-NEXT: movswl %cx, %ecx
; ALL-NEXT: vmovd %ecx, %xmm1
; ALL-NEXT: vcvtph2ps %xmm1, %xmm1
; ALL-NEXT: cwtl
; ALL-NEXT: vmovd %eax, %xmm2
; ALL-NEXT: vcvtph2ps %xmm2, %xmm2
; ALL-NEXT: vmovd %esi, %xmm3
; ALL-NEXT: vcvtph2ps %xmm3, %xmm3
; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
; ALL-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; ALL-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%3 = bitcast <4 x i16> %2 to <4 x half>
%4 = fpext <4 x half> %3 to <4 x float>
ret <4 x float> %4
}
define <8 x float> @load_cvt_8i16_to_8f32(<8 x i16>* %a0) {
; AVX1-LABEL: load_cvt_8i16_to_8f32:
; AVX1: # BB#0:
; AVX1-NEXT: movswl 6(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX1-NEXT: movswl 4(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm1
; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX1-NEXT: movswl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm2
; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX1-NEXT: movswl 2(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm3
; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3
; AVX1-NEXT: movswl 14(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm4
; AVX1-NEXT: vcvtph2ps %xmm4, %xmm4
; AVX1-NEXT: movswl 12(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm5
; AVX1-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX1-NEXT: movswl 8(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm6
; AVX1-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX1-NEXT: movswl 10(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm7
; AVX1-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX1-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0]
; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_cvt_8i16_to_8f32:
; AVX2: # BB#0:
; AVX2-NEXT: movswl 6(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: movswl 4(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm1
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX2-NEXT: movswl (%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX2-NEXT: movswl 2(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm3
; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3
; AVX2-NEXT: movswl 14(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm4
; AVX2-NEXT: vcvtph2ps %xmm4, %xmm4
; AVX2-NEXT: movswl 12(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm5
; AVX2-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX2-NEXT: movswl 8(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm6
; AVX2-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX2-NEXT: movswl 10(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm7
; AVX2-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX2-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0]
; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX2-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_cvt_8i16_to_8f32:
; AVX512: # BB#0:
; AVX512-NEXT: movswl 6(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: movswl 4(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm1
; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX512-NEXT: movswl (%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm2
; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX512-NEXT: movswl 2(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm3
; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3
; AVX512-NEXT: movswl 14(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm4
; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4
; AVX512-NEXT: movswl 12(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm5
; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX512-NEXT: movswl 8(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm6
; AVX512-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX512-NEXT: movswl 10(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm7
; AVX512-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX512-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0]
; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
%2 = bitcast <8 x i16> %1 to <8 x half>
%3 = fpext <8 x half> %2 to <8 x float>
ret <8 x float> %3
}
define <16 x float> @load_cvt_16i16_to_16f32(<16 x i16>* %a0) {
; AVX1-LABEL: load_cvt_16i16_to_16f32:
; AVX1: # BB#0:
; AVX1-NEXT: movswl 22(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm8
; AVX1-NEXT: movswl 20(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm9
; AVX1-NEXT: movswl 16(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm10
; AVX1-NEXT: movswl 18(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm11
; AVX1-NEXT: movswl 30(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm12
; AVX1-NEXT: movswl 28(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm13
; AVX1-NEXT: movswl 24(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm14
; AVX1-NEXT: movswl 26(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm15
; AVX1-NEXT: movswl 6(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX1-NEXT: movswl 4(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm2
; AVX1-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX1-NEXT: movswl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm3
; AVX1-NEXT: vcvtph2ps %xmm3, %xmm3
; AVX1-NEXT: movswl 2(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm4
; AVX1-NEXT: vcvtph2ps %xmm4, %xmm4
; AVX1-NEXT: movswl 14(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm5
; AVX1-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX1-NEXT: movswl 12(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm6
; AVX1-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX1-NEXT: movswl 8(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm7
; AVX1-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX1-NEXT: movswl 10(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm1
; AVX1-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[2,3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm6[0],xmm1[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[0]
; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[2,3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0,1],xmm2[0],xmm3[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm14[0],xmm15[0],xmm14[2,3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm13[0],xmm1[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm12[0]
; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm10[0],xmm11[0],xmm10[2,3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_cvt_16i16_to_16f32:
; AVX2: # BB#0:
; AVX2-NEXT: movswl 22(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm8
; AVX2-NEXT: movswl 20(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm9
; AVX2-NEXT: movswl 16(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm10
; AVX2-NEXT: movswl 18(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm11
; AVX2-NEXT: movswl 30(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm12
; AVX2-NEXT: movswl 28(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm13
; AVX2-NEXT: movswl 24(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm14
; AVX2-NEXT: movswl 26(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm15
; AVX2-NEXT: movswl 6(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: movswl 4(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX2-NEXT: movswl (%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm3
; AVX2-NEXT: vcvtph2ps %xmm3, %xmm3
; AVX2-NEXT: movswl 2(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm4
; AVX2-NEXT: vcvtph2ps %xmm4, %xmm4
; AVX2-NEXT: movswl 14(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm5
; AVX2-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX2-NEXT: movswl 12(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm6
; AVX2-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX2-NEXT: movswl 8(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm7
; AVX2-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX2-NEXT: movswl 10(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm1
; AVX2-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[2,3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm6[0],xmm1[3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm5[0]
; AVX2-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[2,3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm3[0,1],xmm2[0],xmm3[3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm14[0],xmm15[0],xmm14[2,3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm13[0],xmm1[3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm12[0]
; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm10[0],xmm11[0],xmm10[2,3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3]
; AVX2-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[0]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_cvt_16i16_to_16f32:
; AVX512: # BB#0:
; AVX512-NEXT: movswl 6(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm8
; AVX512-NEXT: movswl 4(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm9
; AVX512-NEXT: movswl (%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm10
; AVX512-NEXT: movswl 2(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm11
; AVX512-NEXT: movswl 14(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm12
; AVX512-NEXT: movswl 12(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm13
; AVX512-NEXT: movswl 8(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm14
; AVX512-NEXT: movswl 10(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm15
; AVX512-NEXT: movswl 22(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: movswl 20(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm1
; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX512-NEXT: movswl 16(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm2
; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX512-NEXT: movswl 18(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm3
; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3
; AVX512-NEXT: movswl 30(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm4
; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4
; AVX512-NEXT: movswl 28(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm5
; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX512-NEXT: movswl 24(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm6
; AVX512-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX512-NEXT: movswl 26(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm7
; AVX512-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX512-NEXT: vinsertps {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[2,3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[0,1],xmm5[0],xmm6[3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm4 = xmm5[0,1,2],xmm4[0]
; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[0],xmm2[3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm0
; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm14[0],xmm15[0],xmm14[2,3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm13[0],xmm1[3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm12[0]
; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm10[0],xmm11[0],xmm10[2,3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0],xmm2[3]
; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm8[0]
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
; AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
; AVX512-NEXT: retq
%1 = load <16 x i16>, <16 x i16>* %a0
%2 = bitcast <16 x i16> %1 to <16 x half>
%3 = fpext <16 x half> %2 to <16 x float>
ret <16 x float> %3
}
;
; Half to Double
;
define double @cvt_i16_to_f64(i16 %a0) {
; ALL-LABEL: cvt_i16_to_f64:
; ALL: # BB#0:
; ALL-NEXT: movswl %di, %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; ALL-NEXT: retq
%1 = bitcast i16 %a0 to half
%2 = fpext half %1 to double
ret double %2
}
define <2 x double> @cvt_2i16_to_2f64(<2 x i16> %a0) {
; ALL-LABEL: cvt_2i16_to_2f64:
; ALL: # BB#0:
; ALL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; ALL-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; ALL-NEXT: vmovd %xmm0, %eax
; ALL-NEXT: movswl %ax, %ecx
; ALL-NEXT: shrl $16, %eax
; ALL-NEXT: cwtl
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
; ALL-NEXT: vmovd %ecx, %xmm1
; ALL-NEXT: vcvtph2ps %xmm1, %xmm1
; ALL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; ALL-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; ALL-NEXT: retq
%1 = bitcast <2 x i16> %a0 to <2 x half>
%2 = fpext <2 x half> %1 to <2 x double>
ret <2 x double> %2
}
define <4 x double> @cvt_4i16_to_4f64(<4 x i16> %a0) {
; ALL-LABEL: cvt_4i16_to_4f64:
; ALL: # BB#0:
; ALL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; ALL-NEXT: vmovq %xmm0, %rax
; ALL-NEXT: movq %rax, %rcx
; ALL-NEXT: movl %eax, %edx
; ALL-NEXT: movswl %ax, %esi
; ALL-NEXT: shrq $48, %rax
; ALL-NEXT: shrq $32, %rcx
; ALL-NEXT: shrl $16, %edx
; ALL-NEXT: movswl %dx, %edx
; ALL-NEXT: vmovd %edx, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
; ALL-NEXT: vmovd %esi, %xmm1
; ALL-NEXT: vcvtph2ps %xmm1, %xmm1
; ALL-NEXT: movswl %cx, %ecx
; ALL-NEXT: vmovd %ecx, %xmm2
; ALL-NEXT: vcvtph2ps %xmm2, %xmm2
; ALL-NEXT: cwtl
; ALL-NEXT: vmovd %eax, %xmm3
; ALL-NEXT: vcvtph2ps %xmm3, %xmm3
; ALL-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3
; ALL-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2
; ALL-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; ALL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; ALL-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; ALL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; ALL-NEXT: retq
%1 = bitcast <4 x i16> %a0 to <4 x half>
%2 = fpext <4 x half> %1 to <4 x double>
ret <4 x double> %2
}
define <2 x double> @cvt_8i16_to_2f64(<8 x i16> %a0) {
; ALL-LABEL: cvt_8i16_to_2f64:
; ALL: # BB#0:
; ALL-NEXT: vmovd %xmm0, %eax
; ALL-NEXT: movswl %ax, %ecx
; ALL-NEXT: shrl $16, %eax
; ALL-NEXT: cwtl
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
; ALL-NEXT: vmovd %ecx, %xmm1
; ALL-NEXT: vcvtph2ps %xmm1, %xmm1
; ALL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; ALL-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; ALL-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
%2 = bitcast <2 x i16> %1 to <2 x half>
%3 = fpext <2 x half> %2 to <2 x double>
ret <2 x double> %3
}
define <4 x double> @cvt_8i16_to_4f64(<8 x i16> %a0) {
; ALL-LABEL: cvt_8i16_to_4f64:
; ALL: # BB#0:
; ALL-NEXT: vmovq %xmm0, %rax
; ALL-NEXT: movq %rax, %rcx
; ALL-NEXT: movl %eax, %edx
; ALL-NEXT: movswl %ax, %esi
; ALL-NEXT: shrq $48, %rax
; ALL-NEXT: shrq $32, %rcx
; ALL-NEXT: shrl $16, %edx
; ALL-NEXT: movswl %dx, %edx
; ALL-NEXT: vmovd %edx, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
; ALL-NEXT: vmovd %esi, %xmm1
; ALL-NEXT: vcvtph2ps %xmm1, %xmm1
; ALL-NEXT: movswl %cx, %ecx
; ALL-NEXT: vmovd %ecx, %xmm2
; ALL-NEXT: vcvtph2ps %xmm2, %xmm2
; ALL-NEXT: cwtl
; ALL-NEXT: vmovd %eax, %xmm3
; ALL-NEXT: vcvtph2ps %xmm3, %xmm3
; ALL-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3
; ALL-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2
; ALL-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; ALL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; ALL-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; ALL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; ALL-NEXT: retq
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%2 = bitcast <4 x i16> %1 to <4 x half>
%3 = fpext <4 x half> %2 to <4 x double>
ret <4 x double> %3
}
define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) {
; AVX1-LABEL: cvt_8i16_to_8f64:
; AVX1: # BB#0:
; AVX1-NEXT: vmovq %xmm0, %rdx
; AVX1-NEXT: movq %rdx, %r9
; AVX1-NEXT: movl %edx, %r10d
; AVX1-NEXT: movswl %dx, %r8d
; AVX1-NEXT: shrq $48, %rdx
; AVX1-NEXT: shrq $32, %r9
; AVX1-NEXT: shrl $16, %r10d
; AVX1-NEXT: vpextrq $1, %xmm0, %rdi
; AVX1-NEXT: movq %rdi, %rsi
; AVX1-NEXT: movl %edi, %eax
; AVX1-NEXT: movswl %di, %ecx
; AVX1-NEXT: shrq $48, %rdi
; AVX1-NEXT: shrq $32, %rsi
; AVX1-NEXT: shrl $16, %eax
; AVX1-NEXT: cwtl
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm1
; AVX1-NEXT: vmovd %ecx, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm2
; AVX1-NEXT: movswl %si, %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm3
; AVX1-NEXT: movswl %di, %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm4
; AVX1-NEXT: movswl %r10w, %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX1-NEXT: vmovd %r8d, %xmm5
; AVX1-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX1-NEXT: movswl %r9w, %eax
; AVX1-NEXT: vmovd %eax, %xmm6
; AVX1-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX1-NEXT: movswl %dx, %eax
; AVX1-NEXT: vmovd %eax, %xmm7
; AVX1-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX1-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7
; AVX1-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],xmm7[0]
; AVX1-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5
; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm5[0],xmm0[0]
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0
; AVX1-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm3 = xmm3[0],xmm4[0]
; AVX1-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_8i16_to_8f64:
; AVX2: # BB#0:
; AVX2-NEXT: vmovq %xmm0, %rdx
; AVX2-NEXT: movq %rdx, %r9
; AVX2-NEXT: movl %edx, %r10d
; AVX2-NEXT: movswl %dx, %r8d
; AVX2-NEXT: shrq $48, %rdx
; AVX2-NEXT: shrq $32, %r9
; AVX2-NEXT: shrl $16, %r10d
; AVX2-NEXT: vpextrq $1, %xmm0, %rdi
; AVX2-NEXT: movq %rdi, %rsi
; AVX2-NEXT: movl %edi, %eax
; AVX2-NEXT: movswl %di, %ecx
; AVX2-NEXT: shrq $48, %rdi
; AVX2-NEXT: shrq $32, %rsi
; AVX2-NEXT: shrl $16, %eax
; AVX2-NEXT: cwtl
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm1
; AVX2-NEXT: vmovd %ecx, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm2
; AVX2-NEXT: movswl %si, %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm3
; AVX2-NEXT: movswl %di, %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm4
; AVX2-NEXT: movswl %r10w, %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: vmovd %r8d, %xmm5
; AVX2-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX2-NEXT: movswl %r9w, %eax
; AVX2-NEXT: vmovd %eax, %xmm6
; AVX2-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX2-NEXT: movswl %dx, %eax
; AVX2-NEXT: vmovd %eax, %xmm7
; AVX2-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX2-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7
; AVX2-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6
; AVX2-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],xmm7[0]
; AVX2-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5
; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm5[0],xmm0[0]
; AVX2-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0
; AVX2-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4
; AVX2-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vunpcklpd {{.*#+}} xmm3 = xmm3[0],xmm4[0]
; AVX2-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_8i16_to_8f64:
; AVX512: # BB#0:
; AVX512-NEXT: vpextrq $1, %xmm0, %rdx
; AVX512-NEXT: movq %rdx, %r8
; AVX512-NEXT: movl %edx, %r10d
; AVX512-NEXT: movswl %dx, %r9d
; AVX512-NEXT: shrq $48, %rdx
; AVX512-NEXT: shrq $32, %r8
; AVX512-NEXT: shrl $16, %r10d
; AVX512-NEXT: vmovq %xmm0, %rdi
; AVX512-NEXT: movq %rdi, %rax
; AVX512-NEXT: movl %edi, %esi
; AVX512-NEXT: movswl %di, %ecx
; AVX512-NEXT: shrq $48, %rdi
; AVX512-NEXT: shrq $32, %rax
; AVX512-NEXT: shrl $16, %esi
; AVX512-NEXT: movswl %si, %esi
; AVX512-NEXT: vmovd %esi, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: vmovd %ecx, %xmm1
; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX512-NEXT: cwtl
; AVX512-NEXT: vmovd %eax, %xmm2
; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX512-NEXT: movswl %di, %eax
; AVX512-NEXT: vmovd %eax, %xmm3
; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3
; AVX512-NEXT: movswl %r10w, %eax
; AVX512-NEXT: vmovd %eax, %xmm4
; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4
; AVX512-NEXT: vmovd %r9d, %xmm5
; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX512-NEXT: movswl %r8w, %eax
; AVX512-NEXT: vmovd %eax, %xmm6
; AVX512-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX512-NEXT: movswl %dx, %eax
; AVX512-NEXT: vmovd %eax, %xmm7
; AVX512-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX512-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7
; AVX512-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6
; AVX512-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],xmm7[0]
; AVX512-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5
; AVX512-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4
; AVX512-NEXT: vunpcklpd {{.*#+}} xmm4 = xmm5[0],xmm4[0]
; AVX512-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4
; AVX512-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3
; AVX512-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; AVX512-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; AVX512-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX512-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0
; AVX512-NEXT: retq
%1 = bitcast <8 x i16> %a0 to <8 x half>
%2 = fpext <8 x half> %1 to <8 x double>
ret <8 x double> %2
}
;
; Half to Double (Load)
;
define double @load_cvt_i16_to_f64(i16* %a0) {
; ALL-LABEL: load_cvt_i16_to_f64:
; ALL: # BB#0:
; ALL-NEXT: movswl (%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; ALL-NEXT: retq
%1 = load i16, i16* %a0
%2 = bitcast i16 %1 to half
%3 = fpext half %2 to double
ret double %3
}
define <2 x double> @load_cvt_2i16_to_2f64(<2 x i16>* %a0) {
; ALL-LABEL: load_cvt_2i16_to_2f64:
; ALL: # BB#0:
; ALL-NEXT: movswl (%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
; ALL-NEXT: movswl 2(%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm1
; ALL-NEXT: vcvtph2ps %xmm1, %xmm1
; ALL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; ALL-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; ALL-NEXT: retq
%1 = load <2 x i16>, <2 x i16>* %a0
%2 = bitcast <2 x i16> %1 to <2 x half>
%3 = fpext <2 x half> %2 to <2 x double>
ret <2 x double> %3
}
define <4 x double> @load_cvt_4i16_to_4f64(<4 x i16>* %a0) {
; ALL-LABEL: load_cvt_4i16_to_4f64:
; ALL: # BB#0:
; ALL-NEXT: movswl (%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
; ALL-NEXT: movswl 2(%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm1
; ALL-NEXT: vcvtph2ps %xmm1, %xmm1
; ALL-NEXT: movswl 4(%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm2
; ALL-NEXT: vcvtph2ps %xmm2, %xmm2
; ALL-NEXT: movswl 6(%rdi), %eax
; ALL-NEXT: vmovd %eax, %xmm3
; ALL-NEXT: vcvtph2ps %xmm3, %xmm3
; ALL-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3
; ALL-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2
; ALL-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; ALL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; ALL-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; ALL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; ALL-NEXT: retq
%1 = load <4 x i16>, <4 x i16>* %a0
%2 = bitcast <4 x i16> %1 to <4 x half>
%3 = fpext <4 x half> %2 to <4 x double>
ret <4 x double> %3
}
define <4 x double> @load_cvt_8i16_to_4f64(<8 x i16>* %a0) {
; ALL-LABEL: load_cvt_8i16_to_4f64:
; ALL: # BB#0:
; ALL-NEXT: movq (%rdi), %rax
; ALL-NEXT: movq %rax, %rcx
; ALL-NEXT: movl %eax, %edx
; ALL-NEXT: movswl %ax, %esi
; ALL-NEXT: shrq $48, %rax
; ALL-NEXT: shrq $32, %rcx
; ALL-NEXT: shrl $16, %edx
; ALL-NEXT: movswl %dx, %edx
; ALL-NEXT: vmovd %edx, %xmm0
; ALL-NEXT: vcvtph2ps %xmm0, %xmm0
; ALL-NEXT: vmovd %esi, %xmm1
; ALL-NEXT: vcvtph2ps %xmm1, %xmm1
; ALL-NEXT: movswl %cx, %ecx
; ALL-NEXT: vmovd %ecx, %xmm2
; ALL-NEXT: vcvtph2ps %xmm2, %xmm2
; ALL-NEXT: cwtl
; ALL-NEXT: vmovd %eax, %xmm3
; ALL-NEXT: vcvtph2ps %xmm3, %xmm3
; ALL-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3
; ALL-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2
; ALL-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; ALL-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; ALL-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; ALL-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; ALL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; ALL-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%3 = bitcast <4 x i16> %2 to <4 x half>
%4 = fpext <4 x half> %3 to <4 x double>
ret <4 x double> %4
}
define <8 x double> @load_cvt_8i16_to_8f64(<8 x i16>* %a0) {
; AVX1-LABEL: load_cvt_8i16_to_8f64:
; AVX1: # BB#0:
; AVX1-NEXT: movswl 8(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm1
; AVX1-NEXT: movswl 10(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm2
; AVX1-NEXT: movswl 12(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm3
; AVX1-NEXT: movswl 14(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm4
; AVX1-NEXT: movswl (%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX1-NEXT: movswl 2(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm5
; AVX1-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX1-NEXT: movswl 4(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm6
; AVX1-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX1-NEXT: movswl 6(%rdi), %eax
; AVX1-NEXT: vmovd %eax, %xmm7
; AVX1-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX1-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7
; AVX1-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],xmm7[0]
; AVX1-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5
; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm5[0]
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0
; AVX1-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm3 = xmm3[0],xmm4[0]
; AVX1-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_cvt_8i16_to_8f64:
; AVX2: # BB#0:
; AVX2-NEXT: movswl 8(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm1
; AVX2-NEXT: movswl 10(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm2
; AVX2-NEXT: movswl 12(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm3
; AVX2-NEXT: movswl 14(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm4
; AVX2-NEXT: movswl (%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm0
; AVX2-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX2-NEXT: movswl 2(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm5
; AVX2-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX2-NEXT: movswl 4(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm6
; AVX2-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX2-NEXT: movswl 6(%rdi), %eax
; AVX2-NEXT: vmovd %eax, %xmm7
; AVX2-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX2-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7
; AVX2-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6
; AVX2-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],xmm7[0]
; AVX2-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5
; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX2-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm5[0]
; AVX2-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm0
; AVX2-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4
; AVX2-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vunpcklpd {{.*#+}} xmm3 = xmm3[0],xmm4[0]
; AVX2-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: load_cvt_8i16_to_8f64:
; AVX512: # BB#0:
; AVX512-NEXT: movswl (%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm0
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
; AVX512-NEXT: movswl 2(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm1
; AVX512-NEXT: vcvtph2ps %xmm1, %xmm1
; AVX512-NEXT: movswl 4(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm2
; AVX512-NEXT: vcvtph2ps %xmm2, %xmm2
; AVX512-NEXT: movswl 6(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm3
; AVX512-NEXT: vcvtph2ps %xmm3, %xmm3
; AVX512-NEXT: movswl 8(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm4
; AVX512-NEXT: vcvtph2ps %xmm4, %xmm4
; AVX512-NEXT: movswl 10(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm5
; AVX512-NEXT: vcvtph2ps %xmm5, %xmm5
; AVX512-NEXT: movswl 12(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm6
; AVX512-NEXT: vcvtph2ps %xmm6, %xmm6
; AVX512-NEXT: movswl 14(%rdi), %eax
; AVX512-NEXT: vmovd %eax, %xmm7
; AVX512-NEXT: vcvtph2ps %xmm7, %xmm7
; AVX512-NEXT: vcvtss2sd %xmm7, %xmm7, %xmm7
; AVX512-NEXT: vcvtss2sd %xmm6, %xmm6, %xmm6
; AVX512-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],xmm7[0]
; AVX512-NEXT: vcvtss2sd %xmm5, %xmm5, %xmm5
; AVX512-NEXT: vcvtss2sd %xmm4, %xmm4, %xmm4
; AVX512-NEXT: vunpcklpd {{.*#+}} xmm4 = xmm4[0],xmm5[0]
; AVX512-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4
; AVX512-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3
; AVX512-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; AVX512-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1
; AVX512-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
; AVX512-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX512-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX512-NEXT: vinsertf64x4 $1, %ymm4, %zmm0, %zmm0
; AVX512-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a0
%2 = bitcast <8 x i16> %1 to <8 x half>
%3 = fpext <8 x half> %2 to <8 x double>
ret <8 x double> %3
}
;
; Float to Half
;
define i16 @cvt_f32_to_i16(float %a0) {
; ALL-LABEL: cvt_f32_to_i16:
; ALL: # BB#0:
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; ALL-NEXT: vmovd %xmm0, %eax
; ALL-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; ALL-NEXT: retq
%1 = fptrunc float %a0 to half
%2 = bitcast half %1 to i16
ret i16 %2
}
define <4 x i16> @cvt_4f32_to_4i16(<4 x float> %a0) {
; ALL-LABEL: cvt_4f32_to_4i16:
; ALL: # BB#0:
; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
; ALL-NEXT: shll $16, %eax
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1
; ALL-NEXT: vmovd %xmm1, %ecx
; ALL-NEXT: movzwl %cx, %ecx
; ALL-NEXT: orl %eax, %ecx
; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
; ALL-NEXT: shll $16, %eax
; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; ALL-NEXT: vmovd %xmm0, %edx
; ALL-NEXT: movzwl %dx, %edx
; ALL-NEXT: orl %eax, %edx
; ALL-NEXT: shlq $32, %rdx
; ALL-NEXT: orq %rcx, %rdx
; ALL-NEXT: vmovq %rdx, %xmm0
; ALL-NEXT: retq
%1 = fptrunc <4 x float> %a0 to <4 x half>
%2 = bitcast <4 x half> %1 to <4 x i16>
ret <4 x i16> %2
}
define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) {
; ALL-LABEL: cvt_4f32_to_8i16_undef:
; ALL: # BB#0:
; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
; ALL-NEXT: shll $16, %eax
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1
; ALL-NEXT: vmovd %xmm1, %ecx
; ALL-NEXT: movzwl %cx, %ecx
; ALL-NEXT: orl %eax, %ecx
; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
; ALL-NEXT: shll $16, %eax
; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; ALL-NEXT: vmovd %xmm0, %edx
; ALL-NEXT: movzwl %dx, %edx
; ALL-NEXT: orl %eax, %edx
; ALL-NEXT: shlq $32, %rdx
; ALL-NEXT: orq %rcx, %rdx
; ALL-NEXT: vmovq %rdx, %xmm0
; ALL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; ALL-NEXT: retq
%1 = fptrunc <4 x float> %a0 to <4 x half>
%2 = bitcast <4 x half> %1 to <4 x i16>
%3 = shufflevector <4 x i16> %2, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <8 x i16> %3
}
define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) {
; ALL-LABEL: cvt_4f32_to_8i16_zero:
; ALL: # BB#0:
; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
; ALL-NEXT: shll $16, %eax
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1
; ALL-NEXT: vmovd %xmm1, %ecx
; ALL-NEXT: movzwl %cx, %ecx
; ALL-NEXT: orl %eax, %ecx
; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
; ALL-NEXT: shll $16, %eax
; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; ALL-NEXT: vmovd %xmm0, %edx
; ALL-NEXT: movzwl %dx, %edx
; ALL-NEXT: orl %eax, %edx
; ALL-NEXT: shlq $32, %rdx
; ALL-NEXT: orq %rcx, %rdx
; ALL-NEXT: vmovq %rdx, %xmm0
; ALL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
; ALL-NEXT: retq
%1 = fptrunc <4 x float> %a0 to <4 x half>
%2 = bitcast <4 x half> %1 to <4 x i16>
%3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <8 x i16> %3
}
define <8 x i16> @cvt_8f32_to_8i16(<8 x float> %a0) {
; AVX1-LABEL: cvt_8f32_to_8i16:
; AVX1: # BB#0:
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm1, %eax
; AVX1-NEXT: shll $16, %eax
; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm1
; AVX1-NEXT: vmovd %xmm1, %ecx
; AVX1-NEXT: movzwl %cx, %ecx
; AVX1-NEXT: orl %eax, %ecx
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm1, %edx
; AVX1-NEXT: shll $16, %edx
; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm1, %eax
; AVX1-NEXT: movzwl %ax, %eax
; AVX1-NEXT: orl %edx, %eax
; AVX1-NEXT: shlq $32, %rax
; AVX1-NEXT: orq %rcx, %rax
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm1, %ecx
; AVX1-NEXT: shll $16, %ecx
; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm1
; AVX1-NEXT: vmovd %xmm1, %edx
; AVX1-NEXT: movzwl %dx, %edx
; AVX1-NEXT: orl %ecx, %edx
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm1, %ecx
; AVX1-NEXT: shll $16, %ecx
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, %esi
; AVX1-NEXT: movzwl %si, %esi
; AVX1-NEXT: orl %ecx, %esi
; AVX1-NEXT: shlq $32, %rsi
; AVX1-NEXT: orq %rdx, %rsi
; AVX1-NEXT: vmovq %rsi, %xmm0
; AVX1-NEXT: vmovq %rax, %xmm1
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_8f32_to_8i16:
; AVX2: # BB#0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %eax
; AVX2-NEXT: shll $16, %eax
; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm1
; AVX2-NEXT: vmovd %xmm1, %ecx
; AVX2-NEXT: movzwl %cx, %ecx
; AVX2-NEXT: orl %eax, %ecx
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %edx
; AVX2-NEXT: shll $16, %edx
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %eax
; AVX2-NEXT: movzwl %ax, %eax
; AVX2-NEXT: orl %edx, %eax
; AVX2-NEXT: shlq $32, %rax
; AVX2-NEXT: orq %rcx, %rax
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %ecx
; AVX2-NEXT: shll $16, %ecx
; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm1
; AVX2-NEXT: vmovd %xmm1, %edx
; AVX2-NEXT: movzwl %dx, %edx
; AVX2-NEXT: orl %ecx, %edx
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %ecx
; AVX2-NEXT: shll $16, %ecx
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %esi
; AVX2-NEXT: movzwl %si, %esi
; AVX2-NEXT: orl %ecx, %esi
; AVX2-NEXT: shlq $32, %rsi
; AVX2-NEXT: orq %rdx, %rsi
; AVX2-NEXT: vmovq %rsi, %xmm0
; AVX2-NEXT: vmovq %rax, %xmm1
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_8f32_to_8i16:
; AVX512: # BB#0:
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: shll $16, %eax
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm1
; AVX512-NEXT: vmovd %xmm1, %ecx
; AVX512-NEXT: movzwl %cx, %ecx
; AVX512-NEXT: orl %eax, %ecx
; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512-NEXT: vmovd %xmm1, %edx
; AVX512-NEXT: shll $16, %edx
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: movzwl %ax, %eax
; AVX512-NEXT: orl %edx, %eax
; AVX512-NEXT: shlq $32, %rax
; AVX512-NEXT: orq %rcx, %rax
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512-NEXT: vmovd %xmm1, %ecx
; AVX512-NEXT: shll $16, %ecx
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm1
; AVX512-NEXT: vmovd %xmm1, %edx
; AVX512-NEXT: movzwl %dx, %edx
; AVX512-NEXT: orl %ecx, %edx
; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512-NEXT: vmovd %xmm1, %ecx
; AVX512-NEXT: shll $16, %ecx
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %esi
; AVX512-NEXT: movzwl %si, %esi
; AVX512-NEXT: orl %ecx, %esi
; AVX512-NEXT: shlq $32, %rsi
; AVX512-NEXT: orq %rdx, %rsi
; AVX512-NEXT: vmovq %rsi, %xmm0
; AVX512-NEXT: vmovq %rax, %xmm1
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512-NEXT: retq
%1 = fptrunc <8 x float> %a0 to <8 x half>
%2 = bitcast <8 x half> %1 to <8 x i16>
ret <8 x i16> %2
}
define <16 x i16> @cvt_16f32_to_16i16(<16 x float> %a0) {
; AVX1-LABEL: cvt_16f32_to_16i16:
; AVX1: # BB#0:
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm2
; AVX1-NEXT: vmovd %xmm2, %eax
; AVX1-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX1-NEXT: vmovd %eax, %xmm3
; AVX1-NEXT: vmovd %xmm2, %eax
; AVX1-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX1-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
; AVX1-NEXT: vmovd %xmm2, %eax
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
; AVX1-NEXT: vmovd %xmm1, %eax
; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm1
; AVX1-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
; AVX1-NEXT: vmovd %xmm1, %eax
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
; AVX1-NEXT: vmovd %xmm1, %eax
; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
; AVX1-NEXT: vmovd %xmm1, %eax
; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm1
; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX1-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
; AVX1-NEXT: vmovd %xmm2, %eax
; AVX1-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2
; AVX1-NEXT: vmovd %xmm1, %eax
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %eax, %xmm3
; AVX1-NEXT: vmovd %xmm1, %eax
; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
; AVX1-NEXT: vmovd %xmm1, %eax
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX1-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm0
; AVX1-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX1-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[3,1,2,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
; AVX1-NEXT: vmovd %xmm1, %eax
; AVX1-NEXT: vpinsrw $6, %eax, %xmm3, %xmm1
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_16f32_to_16i16:
; AVX2: # BB#0:
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm2
; AVX2-NEXT: vmovd %xmm2, %eax
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX2-NEXT: vmovd %eax, %xmm3
; AVX2-NEXT: vmovd %xmm2, %eax
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX2-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
; AVX2-NEXT: vmovd %xmm2, %eax
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
; AVX2-NEXT: vmovd %xmm1, %eax
; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm1
; AVX2-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
; AVX2-NEXT: vmovd %xmm1, %eax
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
; AVX2-NEXT: vmovd %xmm1, %eax
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
; AVX2-NEXT: vmovd %xmm1, %eax
; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm1
; AVX2-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX2-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
; AVX2-NEXT: vmovd %xmm2, %eax
; AVX2-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2
; AVX2-NEXT: vmovd %xmm1, %eax
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %eax, %xmm3
; AVX2-NEXT: vmovd %xmm1, %eax
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
; AVX2-NEXT: vmovd %xmm1, %eax
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX2-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm0
; AVX2-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX2-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[3,1,2,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
; AVX2-NEXT: vmovd %xmm1, %eax
; AVX2-NEXT: vpinsrw $6, %eax, %xmm3, %xmm1
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_16f32_to_16i16:
; AVX512: # BB#0:
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm2
; AVX512-NEXT: vmovd %xmm2, %eax
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX512-NEXT: vmovd %eax, %xmm3
; AVX512-NEXT: vmovd %xmm2, %eax
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX512-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
; AVX512-NEXT: vmovd %xmm2, %eax
; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm1
; AVX512-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX512-NEXT: vpinsrw $6, %eax, %xmm3, %xmm3
; AVX512-NEXT: vmovd %xmm2, %eax
; AVX512-NEXT: vpinsrw $7, %eax, %xmm3, %xmm2
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512-NEXT: vmovd %eax, %xmm3
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512-NEXT: vpinsrw $1, %eax, %xmm3, %xmm3
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX512-NEXT: vpinsrw $2, %eax, %xmm3, %xmm3
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm0
; AVX512-NEXT: vpinsrw $3, %eax, %xmm3, %xmm3
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX512-NEXT: vpinsrw $4, %eax, %xmm3, %xmm3
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm1[1,0]
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX512-NEXT: vpinsrw $5, %eax, %xmm3, %xmm3
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm1[3,1,2,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX512-NEXT: vpinsrw $6, %eax, %xmm3, %xmm1
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0
; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; AVX512-NEXT: retq
%1 = fptrunc <16 x float> %a0 to <16 x half>
%2 = bitcast <16 x half> %1 to <16 x i16>
ret <16 x i16> %2
}
;
; Float to Half (Store)
;
define void @store_cvt_f32_to_i16(float %a0, i16* %a1) {
; ALL-LABEL: store_cvt_f32_to_i16:
; ALL: # BB#0:
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; ALL-NEXT: vmovd %xmm0, %eax
; ALL-NEXT: movw %ax, (%rdi)
; ALL-NEXT: retq
%1 = fptrunc float %a0 to half
%2 = bitcast half %1 to i16
store i16 %2, i16* %a1
ret void
}
define void @store_cvt_4f32_to_4i16(<4 x float> %a0, <4 x i16>* %a1) {
; ALL-LABEL: store_cvt_4f32_to_4i16:
; ALL: # BB#0:
; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
; ALL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %ecx
; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %edx
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; ALL-NEXT: vmovd %xmm0, %esi
; ALL-NEXT: movw %si, (%rdi)
; ALL-NEXT: movw %dx, 6(%rdi)
; ALL-NEXT: movw %cx, 4(%rdi)
; ALL-NEXT: movw %ax, 2(%rdi)
; ALL-NEXT: retq
%1 = fptrunc <4 x float> %a0 to <4 x half>
%2 = bitcast <4 x half> %1 to <4 x i16>
store <4 x i16> %2, <4 x i16>* %a1
ret void
}
define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, <8 x i16>* %a1) {
; ALL-LABEL: store_cvt_4f32_to_8i16_undef:
; ALL: # BB#0:
; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
; ALL-NEXT: shll $16, %eax
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1
; ALL-NEXT: vmovd %xmm1, %ecx
; ALL-NEXT: movzwl %cx, %ecx
; ALL-NEXT: orl %eax, %ecx
; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
; ALL-NEXT: shll $16, %eax
; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; ALL-NEXT: vmovd %xmm0, %edx
; ALL-NEXT: movzwl %dx, %edx
; ALL-NEXT: orl %eax, %edx
; ALL-NEXT: shlq $32, %rdx
; ALL-NEXT: orq %rcx, %rdx
; ALL-NEXT: vmovq %rdx, %xmm0
; ALL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; ALL-NEXT: vmovdqa %xmm0, (%rdi)
; ALL-NEXT: retq
%1 = fptrunc <4 x float> %a0 to <4 x half>
%2 = bitcast <4 x half> %1 to <4 x i16>
%3 = shufflevector <4 x i16> %2, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
store <8 x i16> %3, <8 x i16>* %a1
ret void
}
define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, <8 x i16>* %a1) {
; ALL-LABEL: store_cvt_4f32_to_8i16_zero:
; ALL: # BB#0:
; ALL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
; ALL-NEXT: shll $16, %eax
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm1
; ALL-NEXT: vmovd %xmm1, %ecx
; ALL-NEXT: movzwl %cx, %ecx
; ALL-NEXT: orl %eax, %ecx
; ALL-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; ALL-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; ALL-NEXT: vmovd %xmm1, %eax
; ALL-NEXT: shll $16, %eax
; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; ALL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; ALL-NEXT: vmovd %xmm0, %edx
; ALL-NEXT: movzwl %dx, %edx
; ALL-NEXT: orl %eax, %edx
; ALL-NEXT: shlq $32, %rdx
; ALL-NEXT: orq %rcx, %rdx
; ALL-NEXT: vmovq %rdx, %xmm0
; ALL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
; ALL-NEXT: vmovdqa %xmm0, (%rdi)
; ALL-NEXT: retq
%1 = fptrunc <4 x float> %a0 to <4 x half>
%2 = bitcast <4 x half> %1 to <4 x i16>
%3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
store <8 x i16> %3, <8 x i16>* %a1
ret void
}
define void @store_cvt_8f32_to_8i16(<8 x float> %a0, <8 x i16>* %a1) {
; AVX1-LABEL: store_cvt_8f32_to_8i16:
; AVX1: # BB#0:
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm1, %r8d
; AVX1-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm1, %r9d
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vmovd %xmm1, %r10d
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX1-NEXT: vmovd %xmm2, %r11d
; AVX1-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX1-NEXT: vmovd %xmm2, %eax
; AVX1-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX1-NEXT: vmovd %xmm2, %ecx
; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX1-NEXT: vmovd %xmm0, %edx
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm0
; AVX1-NEXT: vmovd %xmm0, %esi
; AVX1-NEXT: movw %si, 8(%rdi)
; AVX1-NEXT: movw %dx, (%rdi)
; AVX1-NEXT: movw %cx, 14(%rdi)
; AVX1-NEXT: movw %ax, 12(%rdi)
; AVX1-NEXT: movw %r11w, 10(%rdi)
; AVX1-NEXT: movw %r10w, 6(%rdi)
; AVX1-NEXT: movw %r9w, 4(%rdi)
; AVX1-NEXT: movw %r8w, 2(%rdi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_8f32_to_8i16:
; AVX2: # BB#0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %r8d
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %r9d
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vmovd %xmm1, %r10d
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX2-NEXT: vmovd %xmm2, %r11d
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX2-NEXT: vmovd %xmm2, %eax
; AVX2-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX2-NEXT: vmovd %xmm2, %ecx
; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX2-NEXT: vmovd %xmm0, %edx
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm0
; AVX2-NEXT: vmovd %xmm0, %esi
; AVX2-NEXT: movw %si, 8(%rdi)
; AVX2-NEXT: movw %dx, (%rdi)
; AVX2-NEXT: movw %cx, 14(%rdi)
; AVX2-NEXT: movw %ax, 12(%rdi)
; AVX2-NEXT: movw %r11w, 10(%rdi)
; AVX2-NEXT: movw %r10w, 6(%rdi)
; AVX2-NEXT: movw %r9w, 4(%rdi)
; AVX2-NEXT: movw %r8w, 2(%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: store_cvt_8f32_to_8i16:
; AVX512: # BB#0:
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512-NEXT: vmovd %xmm1, %r8d
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512-NEXT: vmovd %xmm1, %r9d
; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[3,1,2,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512-NEXT: vmovd %xmm1, %r10d
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX512-NEXT: vmovd %xmm2, %r11d
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX512-NEXT: vmovd %xmm2, %eax
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX512-NEXT: vmovd %xmm2, %ecx
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX512-NEXT: vmovd %xmm0, %edx
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm0
; AVX512-NEXT: vmovd %xmm0, %esi
; AVX512-NEXT: movw %si, 8(%rdi)
; AVX512-NEXT: movw %dx, (%rdi)
; AVX512-NEXT: movw %cx, 14(%rdi)
; AVX512-NEXT: movw %ax, 12(%rdi)
; AVX512-NEXT: movw %r11w, 10(%rdi)
; AVX512-NEXT: movw %r10w, 6(%rdi)
; AVX512-NEXT: movw %r9w, 4(%rdi)
; AVX512-NEXT: movw %r8w, 2(%rdi)
; AVX512-NEXT: retq
%1 = fptrunc <8 x float> %a0 to <8 x half>
%2 = bitcast <8 x half> %1 to <8 x i16>
store <8 x i16> %2, <8 x i16>* %a1
ret void
}
define void @store_cvt_16f32_to_16i16(<16 x float> %a0, <16 x i16>* %a1) {
; AVX1-LABEL: store_cvt_16f32_to_16i16:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vcvtps2ph $4, %xmm3, %xmm4
; AVX1-NEXT: vmovd %xmm4, %eax
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm4
; AVX1-NEXT: movw %ax, 24(%rdi)
; AVX1-NEXT: vmovd %xmm4, %eax
; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm4
; AVX1-NEXT: movw %ax, 16(%rdi)
; AVX1-NEXT: vmovd %xmm4, %eax
; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm4
; AVX1-NEXT: movw %ax, 8(%rdi)
; AVX1-NEXT: vmovd %xmm4, %eax
; AVX1-NEXT: vpermilps {{.*#+}} xmm4 = xmm3[3,1,2,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm4, %xmm4
; AVX1-NEXT: movw %ax, (%rdi)
; AVX1-NEXT: vmovd %xmm4, %eax
; AVX1-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0]
; AVX1-NEXT: vcvtps2ph $4, %xmm4, %xmm4
; AVX1-NEXT: movw %ax, 30(%rdi)
; AVX1-NEXT: vmovd %xmm4, %eax
; AVX1-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm4, %xmm4
; AVX1-NEXT: vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX1-NEXT: movw %ax, 28(%rdi)
; AVX1-NEXT: vmovd %xmm3, %eax
; AVX1-NEXT: vpermilps {{.*#+}} xmm3 = xmm1[3,1,2,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX1-NEXT: movw %ax, 26(%rdi)
; AVX1-NEXT: vmovd %xmm3, %eax
; AVX1-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
; AVX1-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX1-NEXT: movw %ax, 22(%rdi)
; AVX1-NEXT: vmovd %xmm3, %eax
; AVX1-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX1-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: movw %ax, 20(%rdi)
; AVX1-NEXT: vmovd %xmm1, %eax
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm2[3,1,2,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: movw %ax, 18(%rdi)
; AVX1-NEXT: vmovd %xmm1, %eax
; AVX1-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
; AVX1-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX1-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX1-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX1-NEXT: movw %ax, 14(%rdi)
; AVX1-NEXT: vmovd %xmm2, %eax
; AVX1-NEXT: movw %ax, 12(%rdi)
; AVX1-NEXT: vmovd %xmm1, %eax
; AVX1-NEXT: movw %ax, 10(%rdi)
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: movw %ax, 6(%rdi)
; AVX1-NEXT: vmovd %xmm3, %eax
; AVX1-NEXT: movw %ax, 4(%rdi)
; AVX1-NEXT: vmovd %xmm4, %eax
; AVX1-NEXT: movw %ax, 2(%rdi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_16f32_to_16i16:
; AVX2: # BB#0:
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX2-NEXT: vcvtps2ph $4, %xmm3, %xmm4
; AVX2-NEXT: vmovd %xmm4, %eax
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm4
; AVX2-NEXT: movw %ax, 24(%rdi)
; AVX2-NEXT: vmovd %xmm4, %eax
; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm4
; AVX2-NEXT: movw %ax, 16(%rdi)
; AVX2-NEXT: vmovd %xmm4, %eax
; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm4
; AVX2-NEXT: movw %ax, 8(%rdi)
; AVX2-NEXT: vmovd %xmm4, %eax
; AVX2-NEXT: vpermilps {{.*#+}} xmm4 = xmm3[3,1,2,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm4, %xmm4
; AVX2-NEXT: movw %ax, (%rdi)
; AVX2-NEXT: vmovd %xmm4, %eax
; AVX2-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0]
; AVX2-NEXT: vcvtps2ph $4, %xmm4, %xmm4
; AVX2-NEXT: movw %ax, 30(%rdi)
; AVX2-NEXT: vmovd %xmm4, %eax
; AVX2-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm4, %xmm4
; AVX2-NEXT: vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX2-NEXT: movw %ax, 28(%rdi)
; AVX2-NEXT: vmovd %xmm3, %eax
; AVX2-NEXT: vpermilps {{.*#+}} xmm3 = xmm1[3,1,2,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX2-NEXT: movw %ax, 26(%rdi)
; AVX2-NEXT: vmovd %xmm3, %eax
; AVX2-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
; AVX2-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX2-NEXT: movw %ax, 22(%rdi)
; AVX2-NEXT: vmovd %xmm3, %eax
; AVX2-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX2-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: movw %ax, 20(%rdi)
; AVX2-NEXT: vmovd %xmm1, %eax
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm2[3,1,2,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: movw %ax, 18(%rdi)
; AVX2-NEXT: vmovd %xmm1, %eax
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
; AVX2-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX2-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX2-NEXT: movw %ax, 14(%rdi)
; AVX2-NEXT: vmovd %xmm2, %eax
; AVX2-NEXT: movw %ax, 12(%rdi)
; AVX2-NEXT: vmovd %xmm1, %eax
; AVX2-NEXT: movw %ax, 10(%rdi)
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: movw %ax, 6(%rdi)
; AVX2-NEXT: vmovd %xmm3, %eax
; AVX2-NEXT: movw %ax, 4(%rdi)
; AVX2-NEXT: vmovd %xmm4, %eax
; AVX2-NEXT: movw %ax, 2(%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: store_cvt_16f32_to_16i16:
; AVX512: # BB#0:
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm2
; AVX512-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm4
; AVX512-NEXT: vmovd %xmm4, %eax
; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm4
; AVX512-NEXT: movw %ax, 24(%rdi)
; AVX512-NEXT: vmovd %xmm4, %eax
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm4
; AVX512-NEXT: movw %ax, 16(%rdi)
; AVX512-NEXT: vmovd %xmm4, %eax
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm4
; AVX512-NEXT: movw %ax, 8(%rdi)
; AVX512-NEXT: vmovd %xmm4, %eax
; AVX512-NEXT: vpermilps {{.*#+}} xmm4 = xmm3[3,1,2,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4
; AVX512-NEXT: movw %ax, (%rdi)
; AVX512-NEXT: vmovd %xmm4, %eax
; AVX512-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0]
; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4
; AVX512-NEXT: movw %ax, 30(%rdi)
; AVX512-NEXT: vmovd %xmm4, %eax
; AVX512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4
; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX512-NEXT: movw %ax, 28(%rdi)
; AVX512-NEXT: vmovd %xmm3, %eax
; AVX512-NEXT: vpermilps {{.*#+}} xmm3 = xmm2[3,1,2,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX512-NEXT: movw %ax, 26(%rdi)
; AVX512-NEXT: vmovd %xmm3, %eax
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX512-NEXT: movw %ax, 22(%rdi)
; AVX512-NEXT: vmovd %xmm3, %eax
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX512-NEXT: movw %ax, 20(%rdi)
; AVX512-NEXT: vmovd %xmm2, %eax
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX512-NEXT: movw %ax, 18(%rdi)
; AVX512-NEXT: vmovd %xmm2, %eax
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX512-NEXT: vcvtps2ph $4, %xmm1, %xmm1
; AVX512-NEXT: movw %ax, 14(%rdi)
; AVX512-NEXT: vmovd %xmm1, %eax
; AVX512-NEXT: movw %ax, 12(%rdi)
; AVX512-NEXT: vmovd %xmm2, %eax
; AVX512-NEXT: movw %ax, 10(%rdi)
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: movw %ax, 6(%rdi)
; AVX512-NEXT: vmovd %xmm3, %eax
; AVX512-NEXT: movw %ax, 4(%rdi)
; AVX512-NEXT: vmovd %xmm4, %eax
; AVX512-NEXT: movw %ax, 2(%rdi)
; AVX512-NEXT: retq
%1 = fptrunc <16 x float> %a0 to <16 x half>
%2 = bitcast <16 x half> %1 to <16 x i16>
store <16 x i16> %2, <16 x i16>* %a1
ret void
}
;
; Double to Half
;
define i16 @cvt_f64_to_i16(double %a0) {
; ALL-LABEL: cvt_f64_to_i16:
; ALL: # BB#0:
; ALL-NEXT: jmp __truncdfhf2 # TAILCALL
%1 = fptrunc double %a0 to half
%2 = bitcast half %1 to i16
ret i16 %2
}
define <2 x i16> @cvt_2f64_to_2i16(<2 x double> %a0) {
; ALL-LABEL: cvt_2f64_to_2i16:
; ALL: # BB#0:
; ALL-NEXT: pushq %rbx
; ALL-NEXT: .Ltmp0:
; ALL-NEXT: .cfi_def_cfa_offset 16
; ALL-NEXT: subq $16, %rsp
; ALL-NEXT: .Ltmp1:
; ALL-NEXT: .cfi_def_cfa_offset 32
; ALL-NEXT: .Ltmp2:
; ALL-NEXT: .cfi_offset %rbx, -16
; ALL-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; ALL-NEXT: callq __truncdfhf2
; ALL-NEXT: movw %ax, %bx
; ALL-NEXT: shll $16, %ebx
; ALL-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; ALL-NEXT: callq __truncdfhf2
; ALL-NEXT: movzwl %ax, %eax
; ALL-NEXT: orl %ebx, %eax
; ALL-NEXT: vmovd %eax, %xmm0
; ALL-NEXT: addq $16, %rsp
; ALL-NEXT: popq %rbx
; ALL-NEXT: retq
%1 = fptrunc <2 x double> %a0 to <2 x half>
%2 = bitcast <2 x half> %1 to <2 x i16>
ret <2 x i16> %2
}
define <4 x i16> @cvt_4f64_to_4i16(<4 x double> %a0) {
; AVX1-LABEL: cvt_4f64_to_4i16:
; AVX1: # BB#0:
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: .Ltmp3:
; AVX1-NEXT: .cfi_def_cfa_offset 16
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: .Ltmp4:
; AVX1-NEXT: .cfi_def_cfa_offset 24
; AVX1-NEXT: subq $40, %rsp
; AVX1-NEXT: .Ltmp5:
; AVX1-NEXT: .cfi_def_cfa_offset 64
; AVX1-NEXT: .Ltmp6:
; AVX1-NEXT: .cfi_offset %rbx, -24
; AVX1-NEXT: .Ltmp7:
; AVX1-NEXT: .cfi_offset %r14, -16
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, %bx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
; AVX1-NEXT: orl %ebx, %r14d
; AVX1-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, %bx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %eax
; AVX1-NEXT: orl %ebx, %eax
; AVX1-NEXT: shlq $32, %rax
; AVX1-NEXT: orq %r14, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: addq $40, %rsp
; AVX1-NEXT: popq %rbx
; AVX1-NEXT: popq %r14
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4f64_to_4i16:
; AVX2: # BB#0:
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: .Ltmp3:
; AVX2-NEXT: .cfi_def_cfa_offset 16
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: .Ltmp4:
; AVX2-NEXT: .cfi_def_cfa_offset 24
; AVX2-NEXT: subq $40, %rsp
; AVX2-NEXT: .Ltmp5:
; AVX2-NEXT: .cfi_def_cfa_offset 64
; AVX2-NEXT: .Ltmp6:
; AVX2-NEXT: .cfi_offset %rbx, -24
; AVX2-NEXT: .Ltmp7:
; AVX2-NEXT: .cfi_offset %r14, -16
; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, %bx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r14d
; AVX2-NEXT: orl %ebx, %r14d
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, %bx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %eax
; AVX2-NEXT: orl %ebx, %eax
; AVX2-NEXT: shlq $32, %rax
; AVX2-NEXT: orq %r14, %rax
; AVX2-NEXT: vmovq %rax, %xmm0
; AVX2-NEXT: addq $40, %rsp
; AVX2-NEXT: popq %rbx
; AVX2-NEXT: popq %r14
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_4f64_to_4i16:
; AVX512: # BB#0:
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: .Ltmp3:
; AVX512-NEXT: .cfi_def_cfa_offset 16
; AVX512-NEXT: pushq %rbx
; AVX512-NEXT: .Ltmp4:
; AVX512-NEXT: .cfi_def_cfa_offset 24
; AVX512-NEXT: subq $40, %rsp
; AVX512-NEXT: .Ltmp5:
; AVX512-NEXT: .cfi_def_cfa_offset 64
; AVX512-NEXT: .Ltmp6:
; AVX512-NEXT: .cfi_offset %rbx, -24
; AVX512-NEXT: .Ltmp7:
; AVX512-NEXT: .cfi_offset %r14, -16
; AVX512-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, %bx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r14d
; AVX512-NEXT: orl %ebx, %r14d
; AVX512-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, %bx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %eax
; AVX512-NEXT: orl %ebx, %eax
; AVX512-NEXT: shlq $32, %rax
; AVX512-NEXT: orq %r14, %rax
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: addq $40, %rsp
; AVX512-NEXT: popq %rbx
; AVX512-NEXT: popq %r14
; AVX512-NEXT: retq
%1 = fptrunc <4 x double> %a0 to <4 x half>
%2 = bitcast <4 x half> %1 to <4 x i16>
ret <4 x i16> %2
}
define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) {
; AVX1-LABEL: cvt_4f64_to_8i16_undef:
; AVX1: # BB#0:
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: .Ltmp8:
; AVX1-NEXT: .cfi_def_cfa_offset 16
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: .Ltmp9:
; AVX1-NEXT: .cfi_def_cfa_offset 24
; AVX1-NEXT: subq $40, %rsp
; AVX1-NEXT: .Ltmp10:
; AVX1-NEXT: .cfi_def_cfa_offset 64
; AVX1-NEXT: .Ltmp11:
; AVX1-NEXT: .cfi_offset %rbx, -24
; AVX1-NEXT: .Ltmp12:
; AVX1-NEXT: .cfi_offset %r14, -16
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, %bx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
; AVX1-NEXT: orl %ebx, %r14d
; AVX1-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, %bx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %eax
; AVX1-NEXT: orl %ebx, %eax
; AVX1-NEXT: shlq $32, %rax
; AVX1-NEXT: orq %r14, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: addq $40, %rsp
; AVX1-NEXT: popq %rbx
; AVX1-NEXT: popq %r14
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4f64_to_8i16_undef:
; AVX2: # BB#0:
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: .Ltmp8:
; AVX2-NEXT: .cfi_def_cfa_offset 16
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: .Ltmp9:
; AVX2-NEXT: .cfi_def_cfa_offset 24
; AVX2-NEXT: subq $40, %rsp
; AVX2-NEXT: .Ltmp10:
; AVX2-NEXT: .cfi_def_cfa_offset 64
; AVX2-NEXT: .Ltmp11:
; AVX2-NEXT: .cfi_offset %rbx, -24
; AVX2-NEXT: .Ltmp12:
; AVX2-NEXT: .cfi_offset %r14, -16
; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, %bx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r14d
; AVX2-NEXT: orl %ebx, %r14d
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, %bx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %eax
; AVX2-NEXT: orl %ebx, %eax
; AVX2-NEXT: shlq $32, %rax
; AVX2-NEXT: orq %r14, %rax
; AVX2-NEXT: vmovq %rax, %xmm0
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX2-NEXT: addq $40, %rsp
; AVX2-NEXT: popq %rbx
; AVX2-NEXT: popq %r14
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_4f64_to_8i16_undef:
; AVX512: # BB#0:
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: .Ltmp8:
; AVX512-NEXT: .cfi_def_cfa_offset 16
; AVX512-NEXT: pushq %rbx
; AVX512-NEXT: .Ltmp9:
; AVX512-NEXT: .cfi_def_cfa_offset 24
; AVX512-NEXT: subq $40, %rsp
; AVX512-NEXT: .Ltmp10:
; AVX512-NEXT: .cfi_def_cfa_offset 64
; AVX512-NEXT: .Ltmp11:
; AVX512-NEXT: .cfi_offset %rbx, -24
; AVX512-NEXT: .Ltmp12:
; AVX512-NEXT: .cfi_offset %r14, -16
; AVX512-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, %bx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r14d
; AVX512-NEXT: orl %ebx, %r14d
; AVX512-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, %bx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %eax
; AVX512-NEXT: orl %ebx, %eax
; AVX512-NEXT: shlq $32, %rax
; AVX512-NEXT: orq %r14, %rax
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512-NEXT: addq $40, %rsp
; AVX512-NEXT: popq %rbx
; AVX512-NEXT: popq %r14
; AVX512-NEXT: retq
%1 = fptrunc <4 x double> %a0 to <4 x half>
%2 = bitcast <4 x half> %1 to <4 x i16>
%3 = shufflevector <4 x i16> %2, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <8 x i16> %3
}
define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) {
; AVX1-LABEL: cvt_4f64_to_8i16_zero:
; AVX1: # BB#0:
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: .Ltmp13:
; AVX1-NEXT: .cfi_def_cfa_offset 16
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: .Ltmp14:
; AVX1-NEXT: .cfi_def_cfa_offset 24
; AVX1-NEXT: subq $40, %rsp
; AVX1-NEXT: .Ltmp15:
; AVX1-NEXT: .cfi_def_cfa_offset 64
; AVX1-NEXT: .Ltmp16:
; AVX1-NEXT: .cfi_offset %rbx, -24
; AVX1-NEXT: .Ltmp17:
; AVX1-NEXT: .cfi_offset %r14, -16
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, %bx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
; AVX1-NEXT: orl %ebx, %r14d
; AVX1-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, %bx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %eax
; AVX1-NEXT: orl %ebx, %eax
; AVX1-NEXT: shlq $32, %rax
; AVX1-NEXT: orq %r14, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: addq $40, %rsp
; AVX1-NEXT: popq %rbx
; AVX1-NEXT: popq %r14
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_4f64_to_8i16_zero:
; AVX2: # BB#0:
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: .Ltmp13:
; AVX2-NEXT: .cfi_def_cfa_offset 16
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: .Ltmp14:
; AVX2-NEXT: .cfi_def_cfa_offset 24
; AVX2-NEXT: subq $40, %rsp
; AVX2-NEXT: .Ltmp15:
; AVX2-NEXT: .cfi_def_cfa_offset 64
; AVX2-NEXT: .Ltmp16:
; AVX2-NEXT: .cfi_offset %rbx, -24
; AVX2-NEXT: .Ltmp17:
; AVX2-NEXT: .cfi_offset %r14, -16
; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, %bx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r14d
; AVX2-NEXT: orl %ebx, %r14d
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, %bx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %eax
; AVX2-NEXT: orl %ebx, %eax
; AVX2-NEXT: shlq $32, %rax
; AVX2-NEXT: orq %r14, %rax
; AVX2-NEXT: vmovq %rax, %xmm0
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: addq $40, %rsp
; AVX2-NEXT: popq %rbx
; AVX2-NEXT: popq %r14
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_4f64_to_8i16_zero:
; AVX512: # BB#0:
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: .Ltmp13:
; AVX512-NEXT: .cfi_def_cfa_offset 16
; AVX512-NEXT: pushq %rbx
; AVX512-NEXT: .Ltmp14:
; AVX512-NEXT: .cfi_def_cfa_offset 24
; AVX512-NEXT: subq $40, %rsp
; AVX512-NEXT: .Ltmp15:
; AVX512-NEXT: .cfi_def_cfa_offset 64
; AVX512-NEXT: .Ltmp16:
; AVX512-NEXT: .cfi_offset %rbx, -24
; AVX512-NEXT: .Ltmp17:
; AVX512-NEXT: .cfi_offset %r14, -16
; AVX512-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, %bx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r14d
; AVX512-NEXT: orl %ebx, %r14d
; AVX512-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, %bx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %eax
; AVX512-NEXT: orl %ebx, %eax
; AVX512-NEXT: shlq $32, %rax
; AVX512-NEXT: orq %r14, %rax
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: addq $40, %rsp
; AVX512-NEXT: popq %rbx
; AVX512-NEXT: popq %r14
; AVX512-NEXT: retq
%1 = fptrunc <4 x double> %a0 to <4 x half>
%2 = bitcast <4 x half> %1 to <4 x i16>
%3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
ret <8 x i16> %3
}
define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) {
; AVX1-LABEL: cvt_8f64_to_8i16:
; AVX1: # BB#0:
; AVX1-NEXT: pushq %r15
; AVX1-NEXT: .Ltmp18:
; AVX1-NEXT: .cfi_def_cfa_offset 16
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: .Ltmp19:
; AVX1-NEXT: .cfi_def_cfa_offset 24
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: .Ltmp20:
; AVX1-NEXT: .cfi_def_cfa_offset 32
; AVX1-NEXT: subq $64, %rsp
; AVX1-NEXT: .Ltmp21:
; AVX1-NEXT: .cfi_def_cfa_offset 96
; AVX1-NEXT: .Ltmp22:
; AVX1-NEXT: .cfi_offset %rbx, -32
; AVX1-NEXT: .Ltmp23:
; AVX1-NEXT: .cfi_offset %r14, -24
; AVX1-NEXT: .Ltmp24:
; AVX1-NEXT: .cfi_offset %r15, -16
; AVX1-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill
; AVX1-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, %bx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r15d
; AVX1-NEXT: orl %ebx, %r15d
; AVX1-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, %bx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
; AVX1-NEXT: orl %ebx, %r14d
; AVX1-NEXT: shlq $32, %r14
; AVX1-NEXT: orq %r15, %r14
; AVX1-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, %bx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r15d
; AVX1-NEXT: orl %ebx, %r15d
; AVX1-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, %bx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %eax
; AVX1-NEXT: orl %ebx, %eax
; AVX1-NEXT: shlq $32, %rax
; AVX1-NEXT: orq %r15, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vmovq %r14, %xmm1
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: addq $64, %rsp
; AVX1-NEXT: popq %rbx
; AVX1-NEXT: popq %r14
; AVX1-NEXT: popq %r15
; AVX1-NEXT: retq
;
; AVX2-LABEL: cvt_8f64_to_8i16:
; AVX2: # BB#0:
; AVX2-NEXT: pushq %r15
; AVX2-NEXT: .Ltmp18:
; AVX2-NEXT: .cfi_def_cfa_offset 16
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: .Ltmp19:
; AVX2-NEXT: .cfi_def_cfa_offset 24
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: .Ltmp20:
; AVX2-NEXT: .cfi_def_cfa_offset 32
; AVX2-NEXT: subq $64, %rsp
; AVX2-NEXT: .Ltmp21:
; AVX2-NEXT: .cfi_def_cfa_offset 96
; AVX2-NEXT: .Ltmp22:
; AVX2-NEXT: .cfi_offset %rbx, -32
; AVX2-NEXT: .Ltmp23:
; AVX2-NEXT: .cfi_offset %r14, -24
; AVX2-NEXT: .Ltmp24:
; AVX2-NEXT: .cfi_offset %r15, -16
; AVX2-NEXT: vmovups %ymm1, (%rsp) # 32-byte Spill
; AVX2-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, %bx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r15d
; AVX2-NEXT: orl %ebx, %r15d
; AVX2-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, %bx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r14d
; AVX2-NEXT: orl %ebx, %r14d
; AVX2-NEXT: shlq $32, %r14
; AVX2-NEXT: orq %r15, %r14
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, %bx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r15d
; AVX2-NEXT: orl %ebx, %r15d
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, %bx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %eax
; AVX2-NEXT: orl %ebx, %eax
; AVX2-NEXT: shlq $32, %rax
; AVX2-NEXT: orq %r15, %rax
; AVX2-NEXT: vmovq %rax, %xmm0
; AVX2-NEXT: vmovq %r14, %xmm1
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX2-NEXT: addq $64, %rsp
; AVX2-NEXT: popq %rbx
; AVX2-NEXT: popq %r14
; AVX2-NEXT: popq %r15
; AVX2-NEXT: retq
;
; AVX512-LABEL: cvt_8f64_to_8i16:
; AVX512: # BB#0:
; AVX512-NEXT: pushq %r15
; AVX512-NEXT: .Ltmp18:
; AVX512-NEXT: .cfi_def_cfa_offset 16
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: .Ltmp19:
; AVX512-NEXT: .cfi_def_cfa_offset 24
; AVX512-NEXT: pushq %rbx
; AVX512-NEXT: .Ltmp20:
; AVX512-NEXT: .cfi_def_cfa_offset 32
; AVX512-NEXT: subq $96, %rsp
; AVX512-NEXT: .Ltmp21:
; AVX512-NEXT: .cfi_def_cfa_offset 128
; AVX512-NEXT: .Ltmp22:
; AVX512-NEXT: .cfi_offset %rbx, -32
; AVX512-NEXT: .Ltmp23:
; AVX512-NEXT: .cfi_offset %r14, -24
; AVX512-NEXT: .Ltmp24:
; AVX512-NEXT: .cfi_offset %r15, -16
; AVX512-NEXT: vmovups %zmm0, (%rsp) # 64-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, %bx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r15d
; AVX512-NEXT: orl %ebx, %r15d
; AVX512-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, %bx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r14d
; AVX512-NEXT: orl %ebx, %r14d
; AVX512-NEXT: shlq $32, %r14
; AVX512-NEXT: orq %r15, %r14
; AVX512-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; AVX512-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, %bx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r15d
; AVX512-NEXT: orl %ebx, %r15d
; AVX512-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, %bx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %eax
; AVX512-NEXT: orl %ebx, %eax
; AVX512-NEXT: shlq $32, %rax
; AVX512-NEXT: orq %r15, %rax
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vmovq %r14, %xmm1
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX512-NEXT: addq $96, %rsp
; AVX512-NEXT: popq %rbx
; AVX512-NEXT: popq %r14
; AVX512-NEXT: popq %r15
; AVX512-NEXT: retq
%1 = fptrunc <8 x double> %a0 to <8 x half>
%2 = bitcast <8 x half> %1 to <8 x i16>
ret <8 x i16> %2
}
;
; Double to Half (Store)
;
define void @store_cvt_f64_to_i16(double %a0, i16* %a1) {
; ALL-LABEL: store_cvt_f64_to_i16:
; ALL: # BB#0:
; ALL-NEXT: pushq %rbx
; ALL-NEXT: .Ltmp25:
; ALL-NEXT: .cfi_def_cfa_offset 16
; ALL-NEXT: .Ltmp26:
; ALL-NEXT: .cfi_offset %rbx, -16
; ALL-NEXT: movq %rdi, %rbx
; ALL-NEXT: callq __truncdfhf2
; ALL-NEXT: movw %ax, (%rbx)
; ALL-NEXT: popq %rbx
; ALL-NEXT: retq
%1 = fptrunc double %a0 to half
%2 = bitcast half %1 to i16
store i16 %2, i16* %a1
ret void
}
define void @store_cvt_2f64_to_2i16(<2 x double> %a0, <2 x i16>* %a1) {
; ALL-LABEL: store_cvt_2f64_to_2i16:
; ALL: # BB#0:
; ALL-NEXT: pushq %rbp
; ALL-NEXT: .Ltmp27:
; ALL-NEXT: .cfi_def_cfa_offset 16
; ALL-NEXT: pushq %rbx
; ALL-NEXT: .Ltmp28:
; ALL-NEXT: .cfi_def_cfa_offset 24
; ALL-NEXT: subq $24, %rsp
; ALL-NEXT: .Ltmp29:
; ALL-NEXT: .cfi_def_cfa_offset 48
; ALL-NEXT: .Ltmp30:
; ALL-NEXT: .cfi_offset %rbx, -24
; ALL-NEXT: .Ltmp31:
; ALL-NEXT: .cfi_offset %rbp, -16
; ALL-NEXT: movq %rdi, %rbx
; ALL-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; ALL-NEXT: callq __truncdfhf2
; ALL-NEXT: movl %eax, %ebp
; ALL-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; ALL-NEXT: callq __truncdfhf2
; ALL-NEXT: movw %ax, (%rbx)
; ALL-NEXT: movw %bp, 2(%rbx)
; ALL-NEXT: addq $24, %rsp
; ALL-NEXT: popq %rbx
; ALL-NEXT: popq %rbp
; ALL-NEXT: retq
%1 = fptrunc <2 x double> %a0 to <2 x half>
%2 = bitcast <2 x half> %1 to <2 x i16>
store <2 x i16> %2, <2 x i16>* %a1
ret void
}
define void @store_cvt_4f64_to_4i16(<4 x double> %a0, <4 x i16>* %a1) {
; AVX1-LABEL: store_cvt_4f64_to_4i16:
; AVX1: # BB#0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: .Ltmp32:
; AVX1-NEXT: .cfi_def_cfa_offset 16
; AVX1-NEXT: pushq %r15
; AVX1-NEXT: .Ltmp33:
; AVX1-NEXT: .cfi_def_cfa_offset 24
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: .Ltmp34:
; AVX1-NEXT: .cfi_def_cfa_offset 32
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: .Ltmp35:
; AVX1-NEXT: .cfi_def_cfa_offset 40
; AVX1-NEXT: subq $88, %rsp
; AVX1-NEXT: .Ltmp36:
; AVX1-NEXT: .cfi_def_cfa_offset 128
; AVX1-NEXT: .Ltmp37:
; AVX1-NEXT: .cfi_offset %rbx, -40
; AVX1-NEXT: .Ltmp38:
; AVX1-NEXT: .cfi_offset %r14, -32
; AVX1-NEXT: .Ltmp39:
; AVX1-NEXT: .cfi_offset %r15, -24
; AVX1-NEXT: .Ltmp40:
; AVX1-NEXT: .cfi_offset %rbp, -16
; AVX1-NEXT: movq %rdi, %rbx
; AVX1-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r14d
; AVX1-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r15d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebp
; AVX1-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, 4(%rbx)
; AVX1-NEXT: movw %bp, (%rbx)
; AVX1-NEXT: movw %r15w, 6(%rbx)
; AVX1-NEXT: movw %r14w, 2(%rbx)
; AVX1-NEXT: addq $88, %rsp
; AVX1-NEXT: popq %rbx
; AVX1-NEXT: popq %r14
; AVX1-NEXT: popq %r15
; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_4f64_to_4i16:
; AVX2: # BB#0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: .Ltmp32:
; AVX2-NEXT: .cfi_def_cfa_offset 16
; AVX2-NEXT: pushq %r15
; AVX2-NEXT: .Ltmp33:
; AVX2-NEXT: .cfi_def_cfa_offset 24
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: .Ltmp34:
; AVX2-NEXT: .cfi_def_cfa_offset 32
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: .Ltmp35:
; AVX2-NEXT: .cfi_def_cfa_offset 40
; AVX2-NEXT: subq $88, %rsp
; AVX2-NEXT: .Ltmp36:
; AVX2-NEXT: .cfi_def_cfa_offset 128
; AVX2-NEXT: .Ltmp37:
; AVX2-NEXT: .cfi_offset %rbx, -40
; AVX2-NEXT: .Ltmp38:
; AVX2-NEXT: .cfi_offset %r14, -32
; AVX2-NEXT: .Ltmp39:
; AVX2-NEXT: .cfi_offset %r15, -24
; AVX2-NEXT: .Ltmp40:
; AVX2-NEXT: .cfi_offset %rbp, -16
; AVX2-NEXT: movq %rdi, %rbx
; AVX2-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r14d
; AVX2-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r15d
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebp
; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, 4(%rbx)
; AVX2-NEXT: movw %bp, (%rbx)
; AVX2-NEXT: movw %r15w, 6(%rbx)
; AVX2-NEXT: movw %r14w, 2(%rbx)
; AVX2-NEXT: addq $88, %rsp
; AVX2-NEXT: popq %rbx
; AVX2-NEXT: popq %r14
; AVX2-NEXT: popq %r15
; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
; AVX512-LABEL: store_cvt_4f64_to_4i16:
; AVX512: # BB#0:
; AVX512-NEXT: pushq %rbp
; AVX512-NEXT: .Ltmp32:
; AVX512-NEXT: .cfi_def_cfa_offset 16
; AVX512-NEXT: pushq %r15
; AVX512-NEXT: .Ltmp33:
; AVX512-NEXT: .cfi_def_cfa_offset 24
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: .Ltmp34:
; AVX512-NEXT: .cfi_def_cfa_offset 32
; AVX512-NEXT: pushq %rbx
; AVX512-NEXT: .Ltmp35:
; AVX512-NEXT: .cfi_def_cfa_offset 40
; AVX512-NEXT: subq $88, %rsp
; AVX512-NEXT: .Ltmp36:
; AVX512-NEXT: .cfi_def_cfa_offset 128
; AVX512-NEXT: .Ltmp37:
; AVX512-NEXT: .cfi_offset %rbx, -40
; AVX512-NEXT: .Ltmp38:
; AVX512-NEXT: .cfi_offset %r14, -32
; AVX512-NEXT: .Ltmp39:
; AVX512-NEXT: .cfi_offset %r15, -24
; AVX512-NEXT: .Ltmp40:
; AVX512-NEXT: .cfi_offset %rbp, -16
; AVX512-NEXT: movq %rdi, %rbx
; AVX512-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r14d
; AVX512-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r15d
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebp
; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, 4(%rbx)
; AVX512-NEXT: movw %bp, (%rbx)
; AVX512-NEXT: movw %r15w, 6(%rbx)
; AVX512-NEXT: movw %r14w, 2(%rbx)
; AVX512-NEXT: addq $88, %rsp
; AVX512-NEXT: popq %rbx
; AVX512-NEXT: popq %r14
; AVX512-NEXT: popq %r15
; AVX512-NEXT: popq %rbp
; AVX512-NEXT: retq
%1 = fptrunc <4 x double> %a0 to <4 x half>
%2 = bitcast <4 x half> %1 to <4 x i16>
store <4 x i16> %2, <4 x i16>* %a1
ret void
}
define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, <8 x i16>* %a1) {
; AVX1-LABEL: store_cvt_4f64_to_8i16_undef:
; AVX1: # BB#0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: .Ltmp41:
; AVX1-NEXT: .cfi_def_cfa_offset 16
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: .Ltmp42:
; AVX1-NEXT: .cfi_def_cfa_offset 24
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: .Ltmp43:
; AVX1-NEXT: .cfi_def_cfa_offset 32
; AVX1-NEXT: subq $32, %rsp
; AVX1-NEXT: .Ltmp44:
; AVX1-NEXT: .cfi_def_cfa_offset 64
; AVX1-NEXT: .Ltmp45:
; AVX1-NEXT: .cfi_offset %rbx, -32
; AVX1-NEXT: .Ltmp46:
; AVX1-NEXT: .cfi_offset %r14, -24
; AVX1-NEXT: .Ltmp47:
; AVX1-NEXT: .cfi_offset %rbp, -16
; AVX1-NEXT: movq %rdi, %r14
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, %bp
; AVX1-NEXT: shll $16, %ebp
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %ebx
; AVX1-NEXT: orl %ebp, %ebx
; AVX1-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, %bp
; AVX1-NEXT: shll $16, %ebp
; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %eax
; AVX1-NEXT: orl %ebp, %eax
; AVX1-NEXT: shlq $32, %rax
; AVX1-NEXT: orq %rbx, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vmovdqa %xmm0, (%r14)
; AVX1-NEXT: addq $32, %rsp
; AVX1-NEXT: popq %rbx
; AVX1-NEXT: popq %r14
; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_4f64_to_8i16_undef:
; AVX2: # BB#0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: .Ltmp41:
; AVX2-NEXT: .cfi_def_cfa_offset 16
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: .Ltmp42:
; AVX2-NEXT: .cfi_def_cfa_offset 24
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: .Ltmp43:
; AVX2-NEXT: .cfi_def_cfa_offset 32
; AVX2-NEXT: subq $32, %rsp
; AVX2-NEXT: .Ltmp44:
; AVX2-NEXT: .cfi_def_cfa_offset 64
; AVX2-NEXT: .Ltmp45:
; AVX2-NEXT: .cfi_offset %rbx, -32
; AVX2-NEXT: .Ltmp46:
; AVX2-NEXT: .cfi_offset %r14, -24
; AVX2-NEXT: .Ltmp47:
; AVX2-NEXT: .cfi_offset %rbp, -16
; AVX2-NEXT: movq %rdi, %r14
; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, %bp
; AVX2-NEXT: shll $16, %ebp
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %ebx
; AVX2-NEXT: orl %ebp, %ebx
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, %bp
; AVX2-NEXT: shll $16, %ebp
; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %eax
; AVX2-NEXT: orl %ebp, %eax
; AVX2-NEXT: shlq $32, %rax
; AVX2-NEXT: orq %rbx, %rax
; AVX2-NEXT: vmovq %rax, %xmm0
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX2-NEXT: vmovdqa %xmm0, (%r14)
; AVX2-NEXT: addq $32, %rsp
; AVX2-NEXT: popq %rbx
; AVX2-NEXT: popq %r14
; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
; AVX512-LABEL: store_cvt_4f64_to_8i16_undef:
; AVX512: # BB#0:
; AVX512-NEXT: pushq %rbp
; AVX512-NEXT: .Ltmp41:
; AVX512-NEXT: .cfi_def_cfa_offset 16
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: .Ltmp42:
; AVX512-NEXT: .cfi_def_cfa_offset 24
; AVX512-NEXT: pushq %rbx
; AVX512-NEXT: .Ltmp43:
; AVX512-NEXT: .cfi_def_cfa_offset 32
; AVX512-NEXT: subq $32, %rsp
; AVX512-NEXT: .Ltmp44:
; AVX512-NEXT: .cfi_def_cfa_offset 64
; AVX512-NEXT: .Ltmp45:
; AVX512-NEXT: .cfi_offset %rbx, -32
; AVX512-NEXT: .Ltmp46:
; AVX512-NEXT: .cfi_offset %r14, -24
; AVX512-NEXT: .Ltmp47:
; AVX512-NEXT: .cfi_offset %rbp, -16
; AVX512-NEXT: movq %rdi, %r14
; AVX512-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, %bp
; AVX512-NEXT: shll $16, %ebp
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %ebx
; AVX512-NEXT: orl %ebp, %ebx
; AVX512-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, %bp
; AVX512-NEXT: shll $16, %ebp
; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %eax
; AVX512-NEXT: orl %ebp, %eax
; AVX512-NEXT: shlq $32, %rax
; AVX512-NEXT: orq %rbx, %rax
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX512-NEXT: vmovdqa %xmm0, (%r14)
; AVX512-NEXT: addq $32, %rsp
; AVX512-NEXT: popq %rbx
; AVX512-NEXT: popq %r14
; AVX512-NEXT: popq %rbp
; AVX512-NEXT: retq
%1 = fptrunc <4 x double> %a0 to <4 x half>
%2 = bitcast <4 x half> %1 to <4 x i16>
%3 = shufflevector <4 x i16> %2, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
store <8 x i16> %3, <8 x i16>* %a1
ret void
}
define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, <8 x i16>* %a1) {
; AVX1-LABEL: store_cvt_4f64_to_8i16_zero:
; AVX1: # BB#0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: .Ltmp48:
; AVX1-NEXT: .cfi_def_cfa_offset 16
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: .Ltmp49:
; AVX1-NEXT: .cfi_def_cfa_offset 24
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: .Ltmp50:
; AVX1-NEXT: .cfi_def_cfa_offset 32
; AVX1-NEXT: subq $32, %rsp
; AVX1-NEXT: .Ltmp51:
; AVX1-NEXT: .cfi_def_cfa_offset 64
; AVX1-NEXT: .Ltmp52:
; AVX1-NEXT: .cfi_offset %rbx, -32
; AVX1-NEXT: .Ltmp53:
; AVX1-NEXT: .cfi_offset %r14, -24
; AVX1-NEXT: .Ltmp54:
; AVX1-NEXT: .cfi_offset %rbp, -16
; AVX1-NEXT: movq %rdi, %r14
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, %bp
; AVX1-NEXT: shll $16, %ebp
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %ebx
; AVX1-NEXT: orl %ebp, %ebx
; AVX1-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, %bp
; AVX1-NEXT: shll $16, %ebp
; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %eax
; AVX1-NEXT: orl %ebp, %eax
; AVX1-NEXT: shlq $32, %rax
; AVX1-NEXT: orq %rbx, %rax
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vmovdqa %xmm0, (%r14)
; AVX1-NEXT: addq $32, %rsp
; AVX1-NEXT: popq %rbx
; AVX1-NEXT: popq %r14
; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_4f64_to_8i16_zero:
; AVX2: # BB#0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: .Ltmp48:
; AVX2-NEXT: .cfi_def_cfa_offset 16
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: .Ltmp49:
; AVX2-NEXT: .cfi_def_cfa_offset 24
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: .Ltmp50:
; AVX2-NEXT: .cfi_def_cfa_offset 32
; AVX2-NEXT: subq $32, %rsp
; AVX2-NEXT: .Ltmp51:
; AVX2-NEXT: .cfi_def_cfa_offset 64
; AVX2-NEXT: .Ltmp52:
; AVX2-NEXT: .cfi_offset %rbx, -32
; AVX2-NEXT: .Ltmp53:
; AVX2-NEXT: .cfi_offset %r14, -24
; AVX2-NEXT: .Ltmp54:
; AVX2-NEXT: .cfi_offset %rbp, -16
; AVX2-NEXT: movq %rdi, %r14
; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, %bp
; AVX2-NEXT: shll $16, %ebp
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %ebx
; AVX2-NEXT: orl %ebp, %ebx
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, %bp
; AVX2-NEXT: shll $16, %ebp
; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %eax
; AVX2-NEXT: orl %ebp, %eax
; AVX2-NEXT: shlq $32, %rax
; AVX2-NEXT: orq %rbx, %rax
; AVX2-NEXT: vmovq %rax, %xmm0
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vmovdqa %xmm0, (%r14)
; AVX2-NEXT: addq $32, %rsp
; AVX2-NEXT: popq %rbx
; AVX2-NEXT: popq %r14
; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
; AVX512-LABEL: store_cvt_4f64_to_8i16_zero:
; AVX512: # BB#0:
; AVX512-NEXT: pushq %rbp
; AVX512-NEXT: .Ltmp48:
; AVX512-NEXT: .cfi_def_cfa_offset 16
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: .Ltmp49:
; AVX512-NEXT: .cfi_def_cfa_offset 24
; AVX512-NEXT: pushq %rbx
; AVX512-NEXT: .Ltmp50:
; AVX512-NEXT: .cfi_def_cfa_offset 32
; AVX512-NEXT: subq $32, %rsp
; AVX512-NEXT: .Ltmp51:
; AVX512-NEXT: .cfi_def_cfa_offset 64
; AVX512-NEXT: .Ltmp52:
; AVX512-NEXT: .cfi_offset %rbx, -32
; AVX512-NEXT: .Ltmp53:
; AVX512-NEXT: .cfi_offset %r14, -24
; AVX512-NEXT: .Ltmp54:
; AVX512-NEXT: .cfi_offset %rbp, -16
; AVX512-NEXT: movq %rdi, %r14
; AVX512-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, %bp
; AVX512-NEXT: shll $16, %ebp
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %ebx
; AVX512-NEXT: orl %ebp, %ebx
; AVX512-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, %bp
; AVX512-NEXT: shll $16, %ebp
; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %eax
; AVX512-NEXT: orl %ebp, %eax
; AVX512-NEXT: shlq $32, %rax
; AVX512-NEXT: orq %rbx, %rax
; AVX512-NEXT: vmovq %rax, %xmm0
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: vmovdqa %xmm0, (%r14)
; AVX512-NEXT: addq $32, %rsp
; AVX512-NEXT: popq %rbx
; AVX512-NEXT: popq %r14
; AVX512-NEXT: popq %rbp
; AVX512-NEXT: retq
%1 = fptrunc <4 x double> %a0 to <4 x half>
%2 = bitcast <4 x half> %1 to <4 x i16>
%3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
store <8 x i16> %3, <8 x i16>* %a1
ret void
}
define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) {
; AVX1-LABEL: store_cvt_8f64_to_8i16:
; AVX1: # BB#0:
; AVX1-NEXT: pushq %rbp
; AVX1-NEXT: .Ltmp55:
; AVX1-NEXT: .cfi_def_cfa_offset 16
; AVX1-NEXT: pushq %r15
; AVX1-NEXT: .Ltmp56:
; AVX1-NEXT: .cfi_def_cfa_offset 24
; AVX1-NEXT: pushq %r14
; AVX1-NEXT: .Ltmp57:
; AVX1-NEXT: .cfi_def_cfa_offset 32
; AVX1-NEXT: pushq %r13
; AVX1-NEXT: .Ltmp58:
; AVX1-NEXT: .cfi_def_cfa_offset 40
; AVX1-NEXT: pushq %r12
; AVX1-NEXT: .Ltmp59:
; AVX1-NEXT: .cfi_def_cfa_offset 48
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: .Ltmp60:
; AVX1-NEXT: .cfi_def_cfa_offset 56
; AVX1-NEXT: subq $136, %rsp
; AVX1-NEXT: .Ltmp61:
; AVX1-NEXT: .cfi_def_cfa_offset 192
; AVX1-NEXT: .Ltmp62:
; AVX1-NEXT: .cfi_offset %rbx, -56
; AVX1-NEXT: .Ltmp63:
; AVX1-NEXT: .cfi_offset %r12, -48
; AVX1-NEXT: .Ltmp64:
; AVX1-NEXT: .cfi_offset %r13, -40
; AVX1-NEXT: .Ltmp65:
; AVX1-NEXT: .cfi_offset %r14, -32
; AVX1-NEXT: .Ltmp66:
; AVX1-NEXT: .cfi_offset %r15, -24
; AVX1-NEXT: .Ltmp67:
; AVX1-NEXT: .cfi_offset %rbp, -16
; AVX1-NEXT: movq %rdi, %rbx
; AVX1-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX1-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
; AVX1-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
; AVX1-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r12d
; AVX1-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r13d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebp
; AVX1-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r14d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r15d
; AVX1-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, 12(%rbx)
; AVX1-NEXT: movw %r15w, 8(%rbx)
; AVX1-NEXT: movw %r14w, 4(%rbx)
; AVX1-NEXT: movw %bp, (%rbx)
; AVX1-NEXT: movw %r13w, 14(%rbx)
; AVX1-NEXT: movw %r12w, 10(%rbx)
; AVX1-NEXT: movzwl {{[0-9]+}}(%rsp), %eax # 2-byte Folded Reload
; AVX1-NEXT: movw %ax, 6(%rbx)
; AVX1-NEXT: movzwl {{[0-9]+}}(%rsp), %eax # 2-byte Folded Reload
; AVX1-NEXT: movw %ax, 2(%rbx)
; AVX1-NEXT: addq $136, %rsp
; AVX1-NEXT: popq %rbx
; AVX1-NEXT: popq %r12
; AVX1-NEXT: popq %r13
; AVX1-NEXT: popq %r14
; AVX1-NEXT: popq %r15
; AVX1-NEXT: popq %rbp
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_cvt_8f64_to_8i16:
; AVX2: # BB#0:
; AVX2-NEXT: pushq %rbp
; AVX2-NEXT: .Ltmp55:
; AVX2-NEXT: .cfi_def_cfa_offset 16
; AVX2-NEXT: pushq %r15
; AVX2-NEXT: .Ltmp56:
; AVX2-NEXT: .cfi_def_cfa_offset 24
; AVX2-NEXT: pushq %r14
; AVX2-NEXT: .Ltmp57:
; AVX2-NEXT: .cfi_def_cfa_offset 32
; AVX2-NEXT: pushq %r13
; AVX2-NEXT: .Ltmp58:
; AVX2-NEXT: .cfi_def_cfa_offset 40
; AVX2-NEXT: pushq %r12
; AVX2-NEXT: .Ltmp59:
; AVX2-NEXT: .cfi_def_cfa_offset 48
; AVX2-NEXT: pushq %rbx
; AVX2-NEXT: .Ltmp60:
; AVX2-NEXT: .cfi_def_cfa_offset 56
; AVX2-NEXT: subq $136, %rsp
; AVX2-NEXT: .Ltmp61:
; AVX2-NEXT: .cfi_def_cfa_offset 192
; AVX2-NEXT: .Ltmp62:
; AVX2-NEXT: .cfi_offset %rbx, -56
; AVX2-NEXT: .Ltmp63:
; AVX2-NEXT: .cfi_offset %r12, -48
; AVX2-NEXT: .Ltmp64:
; AVX2-NEXT: .cfi_offset %r13, -40
; AVX2-NEXT: .Ltmp65:
; AVX2-NEXT: .cfi_offset %r14, -32
; AVX2-NEXT: .Ltmp66:
; AVX2-NEXT: .cfi_offset %r15, -24
; AVX2-NEXT: .Ltmp67:
; AVX2-NEXT: .cfi_offset %rbp, -16
; AVX2-NEXT: movq %rdi, %rbx
; AVX2-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX2-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
; AVX2-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
; AVX2-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r12d
; AVX2-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r13d
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebp
; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r14d
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r15d
; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, 12(%rbx)
; AVX2-NEXT: movw %r15w, 8(%rbx)
; AVX2-NEXT: movw %r14w, 4(%rbx)
; AVX2-NEXT: movw %bp, (%rbx)
; AVX2-NEXT: movw %r13w, 14(%rbx)
; AVX2-NEXT: movw %r12w, 10(%rbx)
; AVX2-NEXT: movzwl {{[0-9]+}}(%rsp), %eax # 2-byte Folded Reload
; AVX2-NEXT: movw %ax, 6(%rbx)
; AVX2-NEXT: movzwl {{[0-9]+}}(%rsp), %eax # 2-byte Folded Reload
; AVX2-NEXT: movw %ax, 2(%rbx)
; AVX2-NEXT: addq $136, %rsp
; AVX2-NEXT: popq %rbx
; AVX2-NEXT: popq %r12
; AVX2-NEXT: popq %r13
; AVX2-NEXT: popq %r14
; AVX2-NEXT: popq %r15
; AVX2-NEXT: popq %rbp
; AVX2-NEXT: retq
;
; AVX512-LABEL: store_cvt_8f64_to_8i16:
; AVX512: # BB#0:
; AVX512-NEXT: pushq %rbp
; AVX512-NEXT: .Ltmp55:
; AVX512-NEXT: .cfi_def_cfa_offset 16
; AVX512-NEXT: pushq %r15
; AVX512-NEXT: .Ltmp56:
; AVX512-NEXT: .cfi_def_cfa_offset 24
; AVX512-NEXT: pushq %r14
; AVX512-NEXT: .Ltmp57:
; AVX512-NEXT: .cfi_def_cfa_offset 32
; AVX512-NEXT: pushq %r13
; AVX512-NEXT: .Ltmp58:
; AVX512-NEXT: .cfi_def_cfa_offset 40
; AVX512-NEXT: pushq %r12
; AVX512-NEXT: .Ltmp59:
; AVX512-NEXT: .cfi_def_cfa_offset 48
; AVX512-NEXT: pushq %rbx
; AVX512-NEXT: .Ltmp60:
; AVX512-NEXT: .cfi_def_cfa_offset 56
; AVX512-NEXT: subq $200, %rsp
; AVX512-NEXT: .Ltmp61:
; AVX512-NEXT: .cfi_def_cfa_offset 256
; AVX512-NEXT: .Ltmp62:
; AVX512-NEXT: .cfi_offset %rbx, -56
; AVX512-NEXT: .Ltmp63:
; AVX512-NEXT: .cfi_offset %r12, -48
; AVX512-NEXT: .Ltmp64:
; AVX512-NEXT: .cfi_offset %r13, -40
; AVX512-NEXT: .Ltmp65:
; AVX512-NEXT: .cfi_offset %r14, -32
; AVX512-NEXT: .Ltmp66:
; AVX512-NEXT: .cfi_offset %r15, -24
; AVX512-NEXT: .Ltmp67:
; AVX512-NEXT: .cfi_offset %rbp, -16
; AVX512-NEXT: movq %rdi, %rbx
; AVX512-NEXT: vmovups %zmm0, {{[0-9]+}}(%rsp) # 64-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; AVX512-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r12d
; AVX512-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r13d
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebp
; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r14d
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: # kill: %XMM0<def> %XMM0<kill> %YMM0<kill>
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r15d
; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, 12(%rbx)
; AVX512-NEXT: movw %r15w, 8(%rbx)
; AVX512-NEXT: movw %r14w, 4(%rbx)
; AVX512-NEXT: movw %bp, (%rbx)
; AVX512-NEXT: movw %r13w, 14(%rbx)
; AVX512-NEXT: movw %r12w, 10(%rbx)
; AVX512-NEXT: movzwl {{[0-9]+}}(%rsp), %eax # 2-byte Folded Reload
; AVX512-NEXT: movw %ax, 6(%rbx)
; AVX512-NEXT: movzwl {{[0-9]+}}(%rsp), %eax # 2-byte Folded Reload
; AVX512-NEXT: movw %ax, 2(%rbx)
; AVX512-NEXT: addq $200, %rsp
; AVX512-NEXT: popq %rbx
; AVX512-NEXT: popq %r12
; AVX512-NEXT: popq %r13
; AVX512-NEXT: popq %r14
; AVX512-NEXT: popq %r15
; AVX512-NEXT: popq %rbp
; AVX512-NEXT: retq
%1 = fptrunc <8 x double> %a0 to <8 x half>
%2 = bitcast <8 x half> %1 to <8 x i16>
store <8 x i16> %2, <8 x i16>* %a1
ret void
}