llvm-project/llvm/test/CodeGen/X86/compress_expand.ll

417 lines
17 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mcpu=skylake-avx512 < %s | FileCheck %s --check-prefix=ALL --check-prefix=SKX
; RUN: llc -mcpu=knl < %s | FileCheck %s --check-prefix=ALL --check-prefix=KNL
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
define <16 x float> @expandload_v16f32_const_undef(float* %base) {
; SKX-LABEL: expandload_v16f32_const_undef:
; SKX: # %bb.0:
; SKX-NEXT: movw $-2049, %ax # imm = 0xF7FF
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
; SKX-NEXT: retq
;
; KNL-LABEL: expandload_v16f32_const_undef:
; KNL: # %bb.0:
; KNL-NEXT: movw $-2049, %ax # imm = 0xF7FF
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z}
; KNL-NEXT: retq
%res = call <16 x float> @llvm.masked.expandload.v16f32(float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true>, <16 x float> undef)
ret <16 x float>%res
}
define <16 x float> @expandload_v16f32_const(float* %base, <16 x float> %src0) {
; SKX-LABEL: expandload_v16f32_const:
; SKX: # %bb.0:
; SKX-NEXT: movw $30719, %ax # imm = 0x77FF
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: expandload_v16f32_const:
; KNL: # %bb.0:
; KNL-NEXT: movw $30719, %ax # imm = 0x77FF
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1}
; KNL-NEXT: retq
%res = call <16 x float> @llvm.masked.expandload.v16f32(float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 false>, <16 x float> %src0)
ret <16 x float>%res
}
define <8 x double> @expandload_v8f64_v8i1(double* %base, <8 x double> %src0, <8 x i1> %mask) {
; SKX-LABEL: expandload_v8f64_v8i1:
; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vexpandpd (%rdi), %zmm0 {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: expandload_v8f64_v8i1:
; KNL: # %bb.0:
; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL-NEXT: vexpandpd (%rdi), %zmm0 {%k1}
; KNL-NEXT: retq
%res = call <8 x double> @llvm.masked.expandload.v8f64(double* %base, <8 x i1> %mask, <8 x double> %src0)
ret <8 x double>%res
}
define <4 x float> @expandload_v4f32_const(float* %base, <4 x float> %src0) {
; SKX-LABEL: expandload_v4f32_const:
; SKX: # %bb.0:
; SKX-NEXT: movb $7, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vexpandps (%rdi), %xmm0 {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: expandload_v4f32_const:
; KNL: # %bb.0:
; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: movw $7, %ax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1}
; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
%res = call <4 x float> @llvm.masked.expandload.v4f32(float* %base, <4 x i1> <i1 true, i1 true, i1 true, i1 false>, <4 x float> %src0)
ret <4 x float>%res
}
define <2 x i64> @expandload_v2i64_const(i64* %base, <2 x i64> %src0) {
; SKX-LABEL: expandload_v2i64_const:
; SKX: # %bb.0:
; SKX-NEXT: movb $2, %al
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vpexpandq (%rdi), %xmm0 {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: expandload_v2i64_const:
; KNL: # %bb.0:
; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: movb $2, %al
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpexpandq (%rdi), %zmm0 {%k1}
; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
%res = call <2 x i64> @llvm.masked.expandload.v2i64(i64* %base, <2 x i1> <i1 false, i1 true>, <2 x i64> %src0)
ret <2 x i64>%res
}
declare <16 x float> @llvm.masked.expandload.v16f32(float*, <16 x i1>, <16 x float>)
declare <8 x double> @llvm.masked.expandload.v8f64(double*, <8 x i1>, <8 x double>)
declare <4 x float> @llvm.masked.expandload.v4f32(float*, <4 x i1>, <4 x float>)
declare <2 x i64> @llvm.masked.expandload.v2i64(i64*, <2 x i1>, <2 x i64>)
define void @compressstore_v16f32_const(float* %base, <16 x float> %V) {
; SKX-LABEL: compressstore_v16f32_const:
; SKX: # %bb.0:
; SKX-NEXT: movw $-2049, %ax # imm = 0xF7FF
; SKX-NEXT: kmovd %eax, %k1
; SKX-NEXT: vcompressps %zmm0, (%rdi) {%k1}
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; KNL-LABEL: compressstore_v16f32_const:
; KNL: # %bb.0:
; KNL-NEXT: movw $-2049, %ax # imm = 0xF7FF
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k1}
; KNL-NEXT: retq
call void @llvm.masked.compressstore.v16f32(<16 x float> %V, float* %base, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 true, i1 true, i1 true, i1 true>)
ret void
}
define void @compressstore_v8f32_v8i1(float* %base, <8 x float> %V, <8 x i1> %mask) {
; SKX-LABEL: compressstore_v8f32_v8i1:
; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vcompressps %ymm0, (%rdi) {%k1}
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; KNL-LABEL: compressstore_v8f32_v8i1:
; KNL: # %bb.0:
; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k1}
; KNL-NEXT: retq
call void @llvm.masked.compressstore.v8f32(<8 x float> %V, float* %base, <8 x i1> %mask)
ret void
}
define void @compressstore_v8f64_v8i1(double* %base, <8 x double> %V, <8 x i1> %mask) {
; SKX-LABEL: compressstore_v8f64_v8i1:
; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vcompresspd %zmm0, (%rdi) {%k1}
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; KNL-LABEL: compressstore_v8f64_v8i1:
; KNL: # %bb.0:
; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL-NEXT: vcompresspd %zmm0, (%rdi) {%k1}
; KNL-NEXT: retq
call void @llvm.masked.compressstore.v8f64(<8 x double> %V, double* %base, <8 x i1> %mask)
ret void
}
define void @compressstore_v8i64_v8i1(i64* %base, <8 x i64> %V, <8 x i1> %mask) {
; SKX-LABEL: compressstore_v8i64_v8i1:
; SKX: # %bb.0:
; SKX-NEXT: vpsllw $15, %xmm1, %xmm1
; SKX-NEXT: vpmovw2m %xmm1, %k1
; SKX-NEXT: vpcompressq %zmm0, (%rdi) {%k1}
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; KNL-LABEL: compressstore_v8i64_v8i1:
; KNL: # %bb.0:
; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
; KNL-NEXT: vpsllq $63, %zmm1, %zmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL-NEXT: vpcompressq %zmm0, (%rdi) {%k1}
; KNL-NEXT: retq
call void @llvm.masked.compressstore.v8i64(<8 x i64> %V, i64* %base, <8 x i1> %mask)
ret void
}
define void @compressstore_v4i64_v4i1(i64* %base, <4 x i64> %V, <4 x i1> %mask) {
; SKX-LABEL: compressstore_v4i64_v4i1:
; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
; SKX-NEXT: vpmovd2m %xmm1, %k1
; SKX-NEXT: vpcompressq %ymm0, (%rdi) {%k1}
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; KNL-LABEL: compressstore_v4i64_v4i1:
; KNL: # %bb.0:
; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpslld $31, %xmm1, %xmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0
; KNL-NEXT: kshiftlw $12, %k0, %k0
; KNL-NEXT: kshiftrw $12, %k0, %k1
; KNL-NEXT: vpcompressq %zmm0, (%rdi) {%k1}
; KNL-NEXT: retq
call void @llvm.masked.compressstore.v4i64(<4 x i64> %V, i64* %base, <4 x i1> %mask)
ret void
}
define void @compressstore_v2i64_v2i1(i64* %base, <2 x i64> %V, <2 x i1> %mask) {
; SKX-LABEL: compressstore_v2i64_v2i1:
; SKX: # %bb.0:
; SKX-NEXT: vpsllq $63, %xmm1, %xmm1
; SKX-NEXT: vpmovq2m %xmm1, %k1
; SKX-NEXT: vpcompressq %xmm0, (%rdi) {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: compressstore_v2i64_v2i1:
; KNL: # %bb.0:
; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpsllq $63, %xmm1, %xmm1
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k0
; KNL-NEXT: kshiftlw $14, %k0, %k0
; KNL-NEXT: kshiftrw $14, %k0, %k1
; KNL-NEXT: vpcompressq %zmm0, (%rdi) {%k1}
; KNL-NEXT: retq
call void @llvm.masked.compressstore.v2i64(<2 x i64> %V, i64* %base, <2 x i1> %mask)
ret void
}
define void @compressstore_v4f32_v4i1(float* %base, <4 x float> %V, <4 x i1> %mask) {
; SKX-LABEL: compressstore_v4f32_v4i1:
; SKX: # %bb.0:
; SKX-NEXT: vpslld $31, %xmm1, %xmm1
; SKX-NEXT: vpmovd2m %xmm1, %k1
; SKX-NEXT: vcompressps %xmm0, (%rdi) {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: compressstore_v4f32_v4i1:
; KNL: # %bb.0:
; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpslld $31, %xmm1, %xmm1
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k0
; KNL-NEXT: kshiftlw $12, %k0, %k0
; KNL-NEXT: kshiftrw $12, %k0, %k1
; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k1}
; KNL-NEXT: retq
call void @llvm.masked.compressstore.v4f32(<4 x float> %V, float* %base, <4 x i1> %mask)
ret void
}
define <2 x float> @expandload_v2f32_v2i1(float* %base, <2 x float> %src0, <2 x i32> %trigger) {
; SKX-LABEL: expandload_v2f32_v2i1:
; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; SKX-NEXT: vptestnmq %xmm1, %xmm1, %k1
; SKX-NEXT: vexpandps (%rdi), %xmm0 {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: expandload_v2f32_v2i1:
; KNL: # %bb.0:
; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0
; KNL-NEXT: kshiftlw $14, %k0, %k0
; KNL-NEXT: kshiftrw $14, %k0, %k1
; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1}
; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
%mask = icmp eq <2 x i32> %trigger, zeroinitializer
%res = call <2 x float> @llvm.masked.expandload.v2f32(float* %base, <2 x i1> %mask, <2 x float> %src0)
ret <2 x float> %res
}
define void @compressstore_v2f32_v2i32(float* %base, <2 x float> %V, <2 x i32> %trigger) {
; SKX-LABEL: compressstore_v2f32_v2i32:
; SKX: # %bb.0:
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; SKX-NEXT: vptestnmq %xmm1, %xmm1, %k1
; SKX-NEXT: vcompressps %xmm0, (%rdi) {%k1}
; SKX-NEXT: retq
;
; KNL-LABEL: compressstore_v2f32_v2i32:
; KNL: # %bb.0:
; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0
; KNL-NEXT: kshiftlw $14, %k0, %k0
; KNL-NEXT: kshiftrw $14, %k0, %k1
; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k1}
; KNL-NEXT: retq
%mask = icmp eq <2 x i32> %trigger, zeroinitializer
call void @llvm.masked.compressstore.v2f32(<2 x float> %V, float* %base, <2 x i1> %mask)
ret void
}
define <32 x float> @expandload_v32f32_v32i32(float* %base, <32 x float> %src0, <32 x i32> %trigger) {
; ALL-LABEL: expandload_v32f32_v32i32:
; ALL: # %bb.0:
; ALL-NEXT: vptestnmd %zmm3, %zmm3, %k1
; ALL-NEXT: vptestnmd %zmm2, %zmm2, %k2
; ALL-NEXT: kmovw %k2, %eax
; ALL-NEXT: popcntl %eax, %eax
; ALL-NEXT: vexpandps (%rdi,%rax,4), %zmm1 {%k1}
; ALL-NEXT: vexpandps (%rdi), %zmm0 {%k2}
; ALL-NEXT: retq
%mask = icmp eq <32 x i32> %trigger, zeroinitializer
%res = call <32 x float> @llvm.masked.expandload.v32f32(float* %base, <32 x i1> %mask, <32 x float> %src0)
ret <32 x float> %res
}
define <16 x double> @compressstore_v16f64_v16i32(double* %base, <16 x double> %src0, <16 x i32> %trigger) {
; SKX-LABEL: compressstore_v16f64_v16i32:
; SKX: # %bb.0:
; SKX-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; SKX-NEXT: vptestnmd %ymm3, %ymm3, %k1
; SKX-NEXT: vptestnmd %ymm2, %ymm2, %k2
; SKX-NEXT: kmovb %k2, %eax
; SKX-NEXT: popcntl %eax, %eax
; SKX-NEXT: vexpandpd (%rdi,%rax,8), %zmm1 {%k1}
; SKX-NEXT: vexpandpd (%rdi), %zmm0 {%k2}
; SKX-NEXT: retq
;
; KNL-LABEL: compressstore_v16f64_v16i32:
; KNL: # %bb.0:
; KNL-NEXT: vextracti64x4 $1, %zmm2, %ymm3
; KNL-NEXT: vptestnmd %zmm3, %zmm3, %k1
; KNL-NEXT: vptestnmd %zmm2, %zmm2, %k2
; KNL-NEXT: vexpandpd (%rdi), %zmm0 {%k2}
; KNL-NEXT: kmovw %k2, %eax
; KNL-NEXT: movzbl %al, %eax
; KNL-NEXT: popcntl %eax, %eax
; KNL-NEXT: vexpandpd (%rdi,%rax,8), %zmm1 {%k1}
; KNL-NEXT: retq
%mask = icmp eq <16 x i32> %trigger, zeroinitializer
%res = call <16 x double> @llvm.masked.expandload.v16f64(double* %base, <16 x i1> %mask, <16 x double> %src0)
ret <16 x double> %res
}
define void @compressstore_v32f32_v32i32(float* %base, <32 x float> %V, <32 x i32> %trigger) {
; SKX-LABEL: compressstore_v32f32_v32i32:
; SKX: # %bb.0:
; SKX-NEXT: vptestnmd %zmm3, %zmm3, %k1
; SKX-NEXT: vptestnmd %zmm2, %zmm2, %k2
; SKX-NEXT: kmovw %k2, %eax
; SKX-NEXT: popcntl %eax, %eax
; SKX-NEXT: vcompressps %zmm1, (%rdi,%rax,4) {%k1}
; SKX-NEXT: vcompressps %zmm0, (%rdi) {%k2}
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; KNL-LABEL: compressstore_v32f32_v32i32:
; KNL: # %bb.0:
; KNL-NEXT: vptestnmd %zmm3, %zmm3, %k1
; KNL-NEXT: vptestnmd %zmm2, %zmm2, %k2
; KNL-NEXT: kmovw %k2, %eax
; KNL-NEXT: popcntl %eax, %eax
; KNL-NEXT: vcompressps %zmm1, (%rdi,%rax,4) {%k1}
; KNL-NEXT: vcompressps %zmm0, (%rdi) {%k2}
; KNL-NEXT: retq
%mask = icmp eq <32 x i32> %trigger, zeroinitializer
call void @llvm.masked.compressstore.v32f32(<32 x float> %V, float* %base, <32 x i1> %mask)
ret void
}
define void @compressstore_v16f64_v16i1(double* %base, <16 x double> %V, <16 x i1> %mask) {
; SKX-LABEL: compressstore_v16f64_v16i1:
; SKX: # %bb.0:
; SKX-NEXT: vpsllw $7, %xmm2, %xmm2
; SKX-NEXT: vpmovb2m %xmm2, %k1
; SKX-NEXT: kshiftrw $8, %k1, %k2
; SKX-NEXT: kmovb %k1, %eax
; SKX-NEXT: popcntl %eax, %eax
; SKX-NEXT: vcompresspd %zmm1, (%rdi,%rax,8) {%k2}
; SKX-NEXT: vcompresspd %zmm0, (%rdi) {%k1}
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
; KNL-LABEL: compressstore_v16f64_v16i1:
; KNL: # %bb.0:
; KNL-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4],zero,zero,zero,xmm2[5],zero,zero,zero,xmm2[6],zero,zero,zero,xmm2[7],zero,zero,zero,xmm2[8],zero,zero,zero,xmm2[9],zero,zero,zero,xmm2[10],zero,zero,zero,xmm2[11],zero,zero,zero,xmm2[12],zero,zero,zero,xmm2[13],zero,zero,zero,xmm2[14],zero,zero,zero,xmm2[15],zero,zero,zero
; KNL-NEXT: vpslld $31, %zmm2, %zmm2
; KNL-NEXT: vptestmd %zmm2, %zmm2, %k1
; KNL-NEXT: kshiftrw $8, %k1, %k2
; KNL-NEXT: vcompresspd %zmm0, (%rdi) {%k1}
; KNL-NEXT: kmovw %k1, %eax
; KNL-NEXT: movzbl %al, %eax
; KNL-NEXT: popcntl %eax, %eax
; KNL-NEXT: vcompresspd %zmm1, (%rdi,%rax,8) {%k2}
; KNL-NEXT: retq
call void @llvm.masked.compressstore.v16f64(<16 x double> %V, double* %base, <16 x i1> %mask)
ret void
}
declare void @llvm.masked.compressstore.v16f32(<16 x float>, float* , <16 x i1>)
declare void @llvm.masked.compressstore.v8f32(<8 x float>, float* , <8 x i1>)
declare void @llvm.masked.compressstore.v8f64(<8 x double>, double* , <8 x i1>)
declare void @llvm.masked.compressstore.v16i32(<16 x i32>, i32* , <16 x i1>)
declare void @llvm.masked.compressstore.v8i32(<8 x i32>, i32* , <8 x i1>)
declare void @llvm.masked.compressstore.v8i64(<8 x i64>, i64* , <8 x i1>)
declare void @llvm.masked.compressstore.v4i32(<4 x i32>, i32* , <4 x i1>)
declare void @llvm.masked.compressstore.v4f32(<4 x float>, float* , <4 x i1>)
declare void @llvm.masked.compressstore.v4i64(<4 x i64>, i64* , <4 x i1>)
declare void @llvm.masked.compressstore.v2i64(<2 x i64>, i64* , <2 x i1>)
declare void @llvm.masked.compressstore.v2f32(<2 x float>, float* , <2 x i1>)
declare void @llvm.masked.compressstore.v32f32(<32 x float>, float* , <32 x i1>)
declare void @llvm.masked.compressstore.v16f64(<16 x double>, double* , <16 x i1>)
declare void @llvm.masked.compressstore.v32f64(<32 x double>, double* , <32 x i1>)
declare <2 x float> @llvm.masked.expandload.v2f32(float* , <2 x i1> , <2 x float> )
declare <32 x float> @llvm.masked.expandload.v32f32(float* , <32 x i1> , <32 x float> )
declare <16 x double> @llvm.masked.expandload.v16f64(double* , <16 x i1> , <16 x double> )