forked from OSchip/llvm-project
3760 lines
173 KiB
LLVM
3760 lines
173 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl | FileCheck %s
|
|
|
|
declare i16 @llvm.x86.avx512.kunpck.bw(i16, i16) nounwind readnone
|
|
|
|
define i16 @unpckbw_test(i16 %a0, i16 %a1) {
|
|
; CHECK-LABEL: unpckbw_test:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k0
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: kunpckbw %k0, %k1, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i16 @llvm.x86.avx512.kunpck.bw(i16 %a0, i16 %a1)
|
|
ret i16 %res
|
|
}
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_pbroadcastd_gpr_512(i32 %x0, <16 x i32> %x1, i16 %mask) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcastd_gpr_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpbroadcastd %edi, %zmm1
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1}
|
|
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpbroadcastd %edi, %zmm1 {%k1} {z}
|
|
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pbroadcast.d.gpr.512(i32 %x0, <16 x i32> %x1, i16 -1)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.pbroadcast.d.gpr.512(i32 %x0, <16 x i32> %x1, i16 %mask)
|
|
%res2 = call <16 x i32> @llvm.x86.avx512.mask.pbroadcast.d.gpr.512(i32 %x0, <16 x i32> zeroinitializer, i16 %mask)
|
|
%res3 = add <16 x i32> %res, %res1
|
|
%res4 = add <16 x i32> %res2, %res3
|
|
ret <16 x i32> %res4
|
|
}
|
|
declare <16 x i32> @llvm.x86.avx512.mask.pbroadcast.d.gpr.512(i32, <16 x i32>, i16)
|
|
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_pbroadcastq_gpr_512(i64 %x0, <8 x i64> %x1, i8 %mask) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pbroadcastq_gpr_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpbroadcastq %rdi, %zmm1
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1}
|
|
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpbroadcastq %rdi, %zmm1 {%k1} {z}
|
|
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pbroadcast.q.gpr.512(i64 %x0, <8 x i64> %x1,i8 -1)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.pbroadcast.q.gpr.512(i64 %x0, <8 x i64> %x1,i8 %mask)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.mask.pbroadcast.q.gpr.512(i64 %x0, <8 x i64> zeroinitializer,i8 %mask)
|
|
%res3 = add <8 x i64> %res, %res1
|
|
%res4 = add <8 x i64> %res2, %res3
|
|
ret <8 x i64> %res4
|
|
}
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pbroadcast.q.gpr.512(i64, <8 x i64>, i8)
|
|
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.broadcast.ss.ps.512(<4 x float>, <16 x float>, i16) nounwind readonly
|
|
|
|
define <16 x float> @test_x86_vbroadcast_ss_ps_512(<4 x float> %a0, <16 x float> %a1, i16 %mask ) {
|
|
; CHECK-LABEL: test_x86_vbroadcast_ss_ps_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vbroadcastss %xmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vbroadcastss %xmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vaddps %zmm1, %zmm2, %zmm1
|
|
; CHECK-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.broadcast.ss.ps.512(<4 x float> %a0, <16 x float> zeroinitializer, i16 -1)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.broadcast.ss.ps.512(<4 x float> %a0, <16 x float> %a1, i16 %mask)
|
|
%res2 = call <16 x float> @llvm.x86.avx512.mask.broadcast.ss.ps.512(<4 x float> %a0, <16 x float> zeroinitializer, i16 %mask)
|
|
%res3 = fadd <16 x float> %res, %res1
|
|
%res4 = fadd <16 x float> %res2, %res3
|
|
ret <16 x float> %res4
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.broadcast.sd.pd.512(<2 x double>, <8 x double>, i8) nounwind readonly
|
|
|
|
define <8 x double> @test_x86_vbroadcast_sd_pd_512(<2 x double> %a0, <8 x double> %a1, i8 %mask ) {
|
|
; CHECK-LABEL: test_x86_vbroadcast_sd_pd_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vaddpd %zmm1, %zmm2, %zmm1
|
|
; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.broadcast.sd.pd.512(<2 x double> %a0, <8 x double> zeroinitializer, i8 -1)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.broadcast.sd.pd.512(<2 x double> %a0, <8 x double> %a1, i8 %mask)
|
|
%res2 = call <8 x double> @llvm.x86.avx512.mask.broadcast.sd.pd.512(<2 x double> %a0, <8 x double> zeroinitializer, i8 %mask)
|
|
%res3 = fadd <8 x double> %res, %res1
|
|
%res4 = fadd <8 x double> %res2, %res3
|
|
ret <8 x double> %res4
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.pbroadcastd.512(<4 x i32>, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_pbroadcastd_512(<4 x i32> %x0, <16 x i32> %x1, i16 %mask) {
|
|
; CHECK-LABEL: test_int_x86_avx512_pbroadcastd_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpbroadcastd %xmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpbroadcastd %xmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vpaddd %zmm1, %zmm2, %zmm1
|
|
; CHECK-NEXT: vpbroadcastd %xmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.pbroadcastd.512(<4 x i32> %x0, <16 x i32> %x1, i16 -1)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.pbroadcastd.512(<4 x i32> %x0, <16 x i32> %x1, i16 %mask)
|
|
%res2 = call <16 x i32> @llvm.x86.avx512.pbroadcastd.512(<4 x i32> %x0, <16 x i32> zeroinitializer, i16 %mask)
|
|
%res3 = add <16 x i32> %res, %res1
|
|
%res4 = add <16 x i32> %res2, %res3
|
|
ret <16 x i32> %res4
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.pbroadcastq.512(<2 x i64>, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_pbroadcastq_512(<2 x i64> %x0, <8 x i64> %x1, i8 %mask) {
|
|
; CHECK-LABEL: test_int_x86_avx512_pbroadcastq_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpbroadcastq %xmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpbroadcastq %xmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vpaddq %zmm1, %zmm2, %zmm1
|
|
; CHECK-NEXT: vpbroadcastq %xmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.pbroadcastq.512(<2 x i64> %x0, <8 x i64> %x1,i8 -1)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.pbroadcastq.512(<2 x i64> %x0, <8 x i64> %x1,i8 %mask)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.pbroadcastq.512(<2 x i64> %x0, <8 x i64> zeroinitializer,i8 %mask)
|
|
%res3 = add <8 x i64> %res, %res1
|
|
%res4 = add <8 x i64> %res2, %res3
|
|
ret <8 x i64> %res4
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.movsldup.512(<16 x float>, <16 x float>, i16)
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_movsldup_512(<16 x float> %x0, <16 x float> %x1, i16 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_movsldup_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vmovsldup {{.*#+}} zmm2 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vmovsldup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
|
|
; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1
|
|
; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
|
|
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.movsldup.512(<16 x float> %x0, <16 x float> %x1, i16 %x2)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.movsldup.512(<16 x float> %x0, <16 x float> %x1, i16 -1)
|
|
%res2 = call <16 x float> @llvm.x86.avx512.mask.movsldup.512(<16 x float> %x0, <16 x float> zeroinitializer, i16 %x2)
|
|
%res3 = fadd <16 x float> %res, %res1
|
|
%res4 = fadd <16 x float> %res2, %res3
|
|
ret <16 x float> %res4
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.movshdup.512(<16 x float>, <16 x float>, i16)
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_movshdup_512(<16 x float> %x0, <16 x float> %x1, i16 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_movshdup_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vmovshdup {{.*#+}} zmm2 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vmovshdup {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
|
|
; CHECK-NEXT: vaddps %zmm2, %zmm1, %zmm1
|
|
; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
|
|
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.movshdup.512(<16 x float> %x0, <16 x float> %x1, i16 %x2)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.movshdup.512(<16 x float> %x0, <16 x float> %x1, i16 -1)
|
|
%res2 = call <16 x float> @llvm.x86.avx512.mask.movshdup.512(<16 x float> %x0, <16 x float> zeroinitializer, i16 %x2)
|
|
%res3 = fadd <16 x float> %res, %res1
|
|
%res4 = fadd <16 x float> %res2, %res3
|
|
ret <16 x float> %res4
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.movddup.512(<8 x double>, <8 x double>, i8)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_movddup_512(<8 x double> %x0, <8 x double> %x1, i8 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_movddup_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vmovddup {{.*#+}} zmm2 = zmm0[0,0,2,2,4,4,6,6]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vmovddup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6]
|
|
; CHECK-NEXT: vaddpd %zmm2, %zmm1, %zmm1
|
|
; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6]
|
|
; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.movddup.512(<8 x double> %x0, <8 x double> %x1, i8 %x2)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.movddup.512(<8 x double> %x0, <8 x double> %x1, i8 -1)
|
|
%res2 = call <8 x double> @llvm.x86.avx512.mask.movddup.512(<8 x double> %x0, <8 x double> zeroinitializer, i8 %x2)
|
|
%res3 = fadd <8 x double> %res, %res1
|
|
%res4 = fadd <8 x double> %res2, %res3
|
|
ret <8 x double> %res4
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.perm.df.512(<8 x double>, i32, <8 x double>, i8)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_perm_df_512(<8 x double> %x0, i32 %x1, <8 x double> %x2, i8 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_perm_df_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpermpd {{.*#+}} zmm2 = zmm0[3,0,0,0,7,4,4,4]
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,0,0,0,7,4,4,4]
|
|
; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,0,0,0,7,4,4,4]
|
|
; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vaddpd %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.perm.df.512(<8 x double> %x0, i32 3, <8 x double> %x2, i8 %x3)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.perm.df.512(<8 x double> %x0, i32 3, <8 x double> zeroinitializer, i8 %x3)
|
|
%res2 = call <8 x double> @llvm.x86.avx512.mask.perm.df.512(<8 x double> %x0, i32 3, <8 x double> %x2, i8 -1)
|
|
%res3 = fadd <8 x double> %res, %res1
|
|
%res4 = fadd <8 x double> %res3, %res2
|
|
ret <8 x double> %res4
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.perm.di.512(<8 x i64>, i32, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_perm_di_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_perm_di_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpermq {{.*#+}} zmm2 = zmm0[3,0,0,0,7,4,4,4]
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,0,0,0,7,4,4,4]
|
|
; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[3,0,0,0,7,4,4,4]
|
|
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.perm.di.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 %x3)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.perm.di.512(<8 x i64> %x0, i32 3, <8 x i64> zeroinitializer, i8 %x3)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.mask.perm.di.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 -1)
|
|
%res3 = add <8 x i64> %res, %res1
|
|
%res4 = add <8 x i64> %res3, %res2
|
|
ret <8 x i64> %res4
|
|
}
|
|
|
|
define void @test_store1(<16 x float> %data, i8* %ptr, i8* %ptr2, i16 %mask) {
|
|
; CHECK-LABEL: test_store1:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edx, %k1
|
|
; CHECK-NEXT: vmovups %zmm0, (%rdi) {%k1}
|
|
; CHECK-NEXT: vmovups %zmm0, (%rsi)
|
|
; CHECK-NEXT: retq
|
|
call void @llvm.x86.avx512.mask.storeu.ps.512(i8* %ptr, <16 x float> %data, i16 %mask)
|
|
call void @llvm.x86.avx512.mask.storeu.ps.512(i8* %ptr2, <16 x float> %data, i16 -1)
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.storeu.ps.512(i8*, <16 x float>, i16 )
|
|
|
|
define void @test_store2(<8 x double> %data, i8* %ptr, i8* %ptr2, i8 %mask) {
|
|
; CHECK-LABEL: test_store2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edx, %k1
|
|
; CHECK-NEXT: vmovupd %zmm0, (%rdi) {%k1}
|
|
; CHECK-NEXT: vmovupd %zmm0, (%rsi)
|
|
; CHECK-NEXT: retq
|
|
call void @llvm.x86.avx512.mask.storeu.pd.512(i8* %ptr, <8 x double> %data, i8 %mask)
|
|
call void @llvm.x86.avx512.mask.storeu.pd.512(i8* %ptr2, <8 x double> %data, i8 -1)
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.storeu.pd.512(i8*, <8 x double>, i8)
|
|
|
|
define void @test_mask_store_aligned_ps(<16 x float> %data, i8* %ptr, i8* %ptr2, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_store_aligned_ps:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edx, %k1
|
|
; CHECK-NEXT: vmovaps %zmm0, (%rdi) {%k1}
|
|
; CHECK-NEXT: vmovaps %zmm0, (%rsi)
|
|
; CHECK-NEXT: retq
|
|
call void @llvm.x86.avx512.mask.store.ps.512(i8* %ptr, <16 x float> %data, i16 %mask)
|
|
call void @llvm.x86.avx512.mask.store.ps.512(i8* %ptr2, <16 x float> %data, i16 -1)
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.store.ps.512(i8*, <16 x float>, i16 )
|
|
|
|
define void @test_mask_store_aligned_pd(<8 x double> %data, i8* %ptr, i8* %ptr2, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_store_aligned_pd:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edx, %k1
|
|
; CHECK-NEXT: vmovapd %zmm0, (%rdi) {%k1}
|
|
; CHECK-NEXT: vmovapd %zmm0, (%rsi)
|
|
; CHECK-NEXT: retq
|
|
call void @llvm.x86.avx512.mask.store.pd.512(i8* %ptr, <8 x double> %data, i8 %mask)
|
|
call void @llvm.x86.avx512.mask.store.pd.512(i8* %ptr2, <8 x double> %data, i8 -1)
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.store.pd.512(i8*, <8 x double>, i8)
|
|
|
|
define void@test_int_x86_avx512_mask_storeu_q_512(i8* %ptr1, i8* %ptr2, <8 x i64> %x1, i8 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edx, %k1
|
|
; CHECK-NEXT: vmovdqu64 %zmm0, (%rdi) {%k1}
|
|
; CHECK-NEXT: vmovdqu64 %zmm0, (%rsi)
|
|
; CHECK-NEXT: retq
|
|
call void @llvm.x86.avx512.mask.storeu.q.512(i8* %ptr1, <8 x i64> %x1, i8 %x2)
|
|
call void @llvm.x86.avx512.mask.storeu.q.512(i8* %ptr2, <8 x i64> %x1, i8 -1)
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.storeu.q.512(i8*, <8 x i64>, i8)
|
|
|
|
define void@test_int_x86_avx512_mask_storeu_d_512(i8* %ptr1, i8* %ptr2, <16 x i32> %x1, i16 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_storeu_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edx, %k1
|
|
; CHECK-NEXT: vmovdqu32 %zmm0, (%rdi) {%k1}
|
|
; CHECK-NEXT: vmovdqu64 %zmm0, (%rsi)
|
|
; CHECK-NEXT: retq
|
|
call void @llvm.x86.avx512.mask.storeu.d.512(i8* %ptr1, <16 x i32> %x1, i16 %x2)
|
|
call void @llvm.x86.avx512.mask.storeu.d.512(i8* %ptr2, <16 x i32> %x1, i16 -1)
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.storeu.d.512(i8*, <16 x i32>, i16)
|
|
|
|
define void@test_int_x86_avx512_mask_store_q_512(i8* %ptr1, i8* %ptr2, <8 x i64> %x1, i8 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_store_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edx, %k1
|
|
; CHECK-NEXT: vmovdqa64 %zmm0, (%rdi) {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm0, (%rsi)
|
|
; CHECK-NEXT: retq
|
|
call void @llvm.x86.avx512.mask.store.q.512(i8* %ptr1, <8 x i64> %x1, i8 %x2)
|
|
call void @llvm.x86.avx512.mask.store.q.512(i8* %ptr2, <8 x i64> %x1, i8 -1)
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.store.q.512(i8*, <8 x i64>, i8)
|
|
|
|
define void@test_int_x86_avx512_mask_store_d_512(i8* %ptr1, i8* %ptr2, <16 x i32> %x1, i16 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_store_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edx, %k1
|
|
; CHECK-NEXT: vmovdqa32 %zmm0, (%rdi) {%k1}
|
|
; CHECK-NEXT: vmovdqa32 %zmm0, (%rsi)
|
|
; CHECK-NEXT: retq
|
|
call void @llvm.x86.avx512.mask.store.d.512(i8* %ptr1, <16 x i32> %x1, i16 %x2)
|
|
call void @llvm.x86.avx512.mask.store.d.512(i8* %ptr2, <16 x i32> %x1, i16 -1)
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.mask.store.d.512(i8*, <16 x i32>, i16)
|
|
|
|
define <16 x float> @test_mask_load_aligned_ps(<16 x float> %data, i8* %ptr, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_load_aligned_ps:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vmovaps (%rdi), %zmm0
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vmovaps (%rdi), %zmm0 {%k1}
|
|
; CHECK-NEXT: vmovaps (%rdi), %zmm1 {%k1} {z}
|
|
; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.load.ps.512(i8* %ptr, <16 x float> zeroinitializer, i16 -1)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.load.ps.512(i8* %ptr, <16 x float> %res, i16 %mask)
|
|
%res2 = call <16 x float> @llvm.x86.avx512.mask.load.ps.512(i8* %ptr, <16 x float> zeroinitializer, i16 %mask)
|
|
%res4 = fadd <16 x float> %res2, %res1
|
|
ret <16 x float> %res4
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.load.ps.512(i8*, <16 x float>, i16)
|
|
|
|
define <16 x float> @test_mask_load_unaligned_ps(<16 x float> %data, i8* %ptr, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_load_unaligned_ps:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vmovups (%rdi), %zmm0
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vmovups (%rdi), %zmm0 {%k1}
|
|
; CHECK-NEXT: vmovups (%rdi), %zmm1 {%k1} {z}
|
|
; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.loadu.ps.512(i8* %ptr, <16 x float> zeroinitializer, i16 -1)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.loadu.ps.512(i8* %ptr, <16 x float> %res, i16 %mask)
|
|
%res2 = call <16 x float> @llvm.x86.avx512.mask.loadu.ps.512(i8* %ptr, <16 x float> zeroinitializer, i16 %mask)
|
|
%res4 = fadd <16 x float> %res2, %res1
|
|
ret <16 x float> %res4
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.loadu.ps.512(i8*, <16 x float>, i16)
|
|
|
|
define <8 x double> @test_mask_load_aligned_pd(<8 x double> %data, i8* %ptr, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_load_aligned_pd:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vmovapd (%rdi), %zmm0
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vmovapd (%rdi), %zmm0 {%k1}
|
|
; CHECK-NEXT: vmovapd (%rdi), %zmm1 {%k1} {z}
|
|
; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.load.pd.512(i8* %ptr, <8 x double> zeroinitializer, i8 -1)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.load.pd.512(i8* %ptr, <8 x double> %res, i8 %mask)
|
|
%res2 = call <8 x double> @llvm.x86.avx512.mask.load.pd.512(i8* %ptr, <8 x double> zeroinitializer, i8 %mask)
|
|
%res4 = fadd <8 x double> %res2, %res1
|
|
ret <8 x double> %res4
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.load.pd.512(i8*, <8 x double>, i8)
|
|
|
|
define <8 x double> @test_mask_load_unaligned_pd(<8 x double> %data, i8* %ptr, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_load_unaligned_pd:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vmovupd (%rdi), %zmm0
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vmovupd (%rdi), %zmm0 {%k1}
|
|
; CHECK-NEXT: vmovupd (%rdi), %zmm1 {%k1} {z}
|
|
; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.loadu.pd.512(i8* %ptr, <8 x double> zeroinitializer, i8 -1)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.loadu.pd.512(i8* %ptr, <8 x double> %res, i8 %mask)
|
|
%res2 = call <8 x double> @llvm.x86.avx512.mask.loadu.pd.512(i8* %ptr, <8 x double> zeroinitializer, i8 %mask)
|
|
%res4 = fadd <8 x double> %res2, %res1
|
|
ret <8 x double> %res4
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.loadu.pd.512(i8*, <8 x double>, i8)
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.loadu.d.512(i8*, <16 x i32>, i16)
|
|
|
|
define <16 x i32> @test_mask_load_unaligned_d(i8* %ptr, i8* %ptr2, <16 x i32> %data, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_load_unaligned_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vmovdqu64 (%rdi), %zmm0
|
|
; CHECK-NEXT: kmovw %edx, %k1
|
|
; CHECK-NEXT: vmovdqu32 (%rsi), %zmm0 {%k1}
|
|
; CHECK-NEXT: vmovdqu32 (%rdi), %zmm1 {%k1} {z}
|
|
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.loadu.d.512(i8* %ptr, <16 x i32> zeroinitializer, i16 -1)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.loadu.d.512(i8* %ptr2, <16 x i32> %res, i16 %mask)
|
|
%res2 = call <16 x i32> @llvm.x86.avx512.mask.loadu.d.512(i8* %ptr, <16 x i32> zeroinitializer, i16 %mask)
|
|
%res4 = add <16 x i32> %res2, %res1
|
|
ret <16 x i32> %res4
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.loadu.q.512(i8*, <8 x i64>, i8)
|
|
|
|
define <8 x i64> @test_mask_load_unaligned_q(i8* %ptr, i8* %ptr2, <8 x i64> %data, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_load_unaligned_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vmovdqu64 (%rdi), %zmm0
|
|
; CHECK-NEXT: kmovw %edx, %k1
|
|
; CHECK-NEXT: vmovdqu64 (%rsi), %zmm0 {%k1}
|
|
; CHECK-NEXT: vmovdqu64 (%rdi), %zmm1 {%k1} {z}
|
|
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.loadu.q.512(i8* %ptr, <8 x i64> zeroinitializer, i8 -1)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.loadu.q.512(i8* %ptr2, <8 x i64> %res, i8 %mask)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.mask.loadu.q.512(i8* %ptr, <8 x i64> zeroinitializer, i8 %mask)
|
|
%res4 = add <8 x i64> %res2, %res1
|
|
ret <8 x i64> %res4
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.load.d.512(i8*, <16 x i32>, i16)
|
|
|
|
define <16 x i32> @test_mask_load_aligned_d(<16 x i32> %data, i8* %ptr, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_load_aligned_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm0 {%k1}
|
|
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1 {%k1} {z}
|
|
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.load.d.512(i8* %ptr, <16 x i32> zeroinitializer, i16 -1)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.load.d.512(i8* %ptr, <16 x i32> %res, i16 %mask)
|
|
%res2 = call <16 x i32> @llvm.x86.avx512.mask.load.d.512(i8* %ptr, <16 x i32> zeroinitializer, i16 %mask)
|
|
%res4 = add <16 x i32> %res2, %res1
|
|
ret <16 x i32> %res4
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.load.q.512(i8*, <8 x i64>, i8)
|
|
|
|
define <8 x i64> @test_mask_load_aligned_q(<8 x i64> %data, i8* %ptr, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_load_aligned_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 {%k1} {z}
|
|
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.load.q.512(i8* %ptr, <8 x i64> zeroinitializer, i8 -1)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.load.q.512(i8* %ptr, <8 x i64> %res, i8 %mask)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.mask.load.q.512(i8* %ptr, <8 x i64> zeroinitializer, i8 %mask)
|
|
%res4 = add <8 x i64> %res2, %res1
|
|
ret <8 x i64> %res4
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.vpermil.pd.512(<8 x double>, i32, <8 x double>, i8)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_vpermil_pd_512(<8 x double> %x0, <8 x double> %x2, i8 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_vpermil_pd_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} zmm2 = zmm0[0,1,3,2,5,4,6,6]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} zmm1 {%k1} = zmm0[0,1,3,2,5,4,6,6]
|
|
; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,3,2,5,4,6,6]
|
|
; CHECK-NEXT: vaddpd %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vaddpd %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.vpermil.pd.512(<8 x double> %x0, i32 22, <8 x double> %x2, i8 %x3)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.vpermil.pd.512(<8 x double> %x0, i32 22, <8 x double> zeroinitializer, i8 %x3)
|
|
%res2 = call <8 x double> @llvm.x86.avx512.mask.vpermil.pd.512(<8 x double> %x0, i32 22, <8 x double> %x2, i8 -1)
|
|
%res3 = fadd <8 x double> %res, %res1
|
|
%res4 = fadd <8 x double> %res3, %res2
|
|
ret <8 x double> %res4
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.vpermil.ps.512(<16 x float>, i32, <16 x float>, i16)
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_vpermil_ps_512(<16 x float> %x0, <16 x float> %x2, i16 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_vpermil_ps_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpermilps {{.*#+}} zmm2 = zmm0[2,1,1,0,6,5,5,4,10,9,9,8,14,13,13,12]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} = zmm0[2,1,1,0,6,5,5,4,10,9,9,8,14,13,13,12]
|
|
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[2,1,1,0,6,5,5,4,10,9,9,8,14,13,13,12]
|
|
; CHECK-NEXT: vaddps %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vaddps %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.vpermil.ps.512(<16 x float> %x0, i32 22, <16 x float> %x2, i16 %x3)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.vpermil.ps.512(<16 x float> %x0, i32 22, <16 x float> zeroinitializer, i16 %x3)
|
|
%res2 = call <16 x float> @llvm.x86.avx512.mask.vpermil.ps.512(<16 x float> %x0, i32 22, <16 x float> %x2, i16 -1)
|
|
%res3 = fadd <16 x float> %res, %res1
|
|
%res4 = fadd <16 x float> %res3, %res2
|
|
ret <16 x float> %res4
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.pshuf.d.512(<16 x i32>, i32, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_pshuf_d_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pshuf_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpshufd {{.*#+}} zmm2 = zmm0[3,0,0,0,7,4,4,4,11,8,8,8,15,12,12,12]
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[3,0,0,0,7,4,4,4,11,8,8,8,15,12,12,12]
|
|
; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,0,0,0,7,4,4,4,11,8,8,8,15,12,12,12]
|
|
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpaddd %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pshuf.d.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 %x3)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.pshuf.d.512(<16 x i32> %x0, i32 3, <16 x i32> zeroinitializer, i16 %x3)
|
|
%res2 = call <16 x i32> @llvm.x86.avx512.mask.pshuf.d.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 -1)
|
|
%res3 = add <16 x i32> %res, %res1
|
|
%res4 = add <16 x i32> %res3, %res2
|
|
ret <16 x i32> %res4
|
|
}
|
|
|
|
define i16 @test_pcmpeq_d(<16 x i32> %a, <16 x i32> %b) {
|
|
; CHECK-LABEL: test_pcmpeq_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32> %a, <16 x i32> %b, i16 -1)
|
|
ret i16 %res
|
|
}
|
|
|
|
define i16 @test_mask_pcmpeq_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_pcmpeq_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
|
|
ret i16 %res
|
|
}
|
|
|
|
declare i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32>, <16 x i32>, i16)
|
|
|
|
define i8 @test_pcmpeq_q(<8 x i64> %a, <8 x i64> %b) {
|
|
; CHECK-LABEL: test_pcmpeq_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ## kill: def %al killed %al killed %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64> %a, <8 x i64> %b, i8 -1)
|
|
ret i8 %res
|
|
}
|
|
|
|
define i8 @test_mask_pcmpeq_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_pcmpeq_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ## kill: def %al killed %al killed %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
|
|
ret i8 %res
|
|
}
|
|
|
|
declare i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64>, <8 x i64>, i8)
|
|
|
|
define i16 @test_pcmpgt_d(<16 x i32> %a, <16 x i32> %b) {
|
|
; CHECK-LABEL: test_pcmpgt_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32> %a, <16 x i32> %b, i16 -1)
|
|
ret i16 %res
|
|
}
|
|
|
|
define i16 @test_mask_pcmpgt_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_pcmpgt_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
|
|
ret i16 %res
|
|
}
|
|
|
|
declare i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32>, <16 x i32>, i16)
|
|
|
|
define i8 @test_pcmpgt_q(<8 x i64> %a, <8 x i64> %b) {
|
|
; CHECK-LABEL: test_pcmpgt_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ## kill: def %al killed %al killed %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64> %a, <8 x i64> %b, i8 -1)
|
|
ret i8 %res
|
|
}
|
|
|
|
define i8 @test_mask_pcmpgt_q(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_pcmpgt_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: ## kill: def %al killed %al killed %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
|
|
ret i8 %res
|
|
}
|
|
|
|
declare i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64>, <8 x i64>, i8)
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.unpckh.pd.512(<8 x double>, <8 x double>, <8 x double>, i8)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_unpckh_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_unpckh_pd_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm3 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
|
|
; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.unpckh.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.unpckh.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1)
|
|
%res2 = fadd <8 x double> %res, %res1
|
|
ret <8 x double> %res2
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.unpckh.ps.512(<16 x float>, <16 x float>, <16 x float>, i16)
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_unpckh_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_unpckh_ps_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vunpckhps {{.*#+}} zmm3 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
|
|
; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.unpckh.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.unpckh.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1)
|
|
%res2 = fadd <16 x float> %res, %res1
|
|
ret <16 x float> %res2
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.unpckl.pd.512(<8 x double>, <8 x double>, <8 x double>, i8)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_unpckl_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_unpckl_pd_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
|
|
; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.unpckl.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.unpckl.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1)
|
|
%res2 = fadd <8 x double> %res, %res1
|
|
ret <8 x double> %res2
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.unpckl.ps.512(<16 x float>, <16 x float>, <16 x float>, i16)
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_unpckl_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_unpckl_ps_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vunpcklps {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
|
|
; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.unpckl.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.unpckl.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1)
|
|
%res2 = fadd <16 x float> %res, %res1
|
|
ret <16 x float> %res2
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.punpcklqd.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_punpcklqd_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_punpcklqd_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpunpcklqdq {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpunpcklqdq {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
|
|
; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm2
|
|
; CHECK-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
|
|
; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.punpcklqd.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.punpcklqd.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.mask.punpcklqd.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> zeroinitializer,i8 %x3)
|
|
%res3 = add <8 x i64> %res, %res1
|
|
%res4 = add <8 x i64> %res2, %res3
|
|
ret <8 x i64> %res4
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.punpckhqd.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_punpckhqd_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_punpckhqd_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpunpckhqdq {{.*#+}} zmm3 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpunpckhqdq {{.*#+}} zmm2 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
|
|
; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.punpckhqd.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.punpckhqd.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
|
|
%res2 = add <8 x i64> %res, %res1
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.punpckhd.q.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_punpckhd_q_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_punpckhd_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpunpckhdq {{.*#+}} zmm3 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpunpckhdq {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15]
|
|
; CHECK-NEXT: vpaddd %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.punpckhd.q.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.punpckhd.q.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1)
|
|
%res2 = add <16 x i32> %res, %res1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.punpckld.q.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_punpckld_q_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_punpckld_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpunpckldq {{.*#+}} zmm3 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpunpckldq {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13]
|
|
; CHECK-NEXT: vpaddd %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.punpckld.q.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.punpckld.q.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1)
|
|
%res2 = add <16 x i32> %res, %res1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_pslli_d(<16 x i32> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_pslli_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpslld $7, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pslli.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_mask_pslli_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_pslli_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpslld $7, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pslli.d(<16 x i32> %a0, i32 7, <16 x i32> %a1, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_maskz_pslli_d(<16 x i32> %a0, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_pslli_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpslld $7, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pslli.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.pslli.d(<16 x i32>, i32, <16 x i32>, i16) nounwind readnone
|
|
|
|
define <8 x i64> @test_x86_avx512_pslli_q(<8 x i64> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_pslli_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsllq $7, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pslli.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 -1)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_mask_pslli_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_pslli_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsllq $7, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pslli.q(<8 x i64> %a0, i32 7, <8 x i64> %a1, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_maskz_pslli_q(<8 x i64> %a0, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_pslli_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsllq $7, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pslli.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pslli.q(<8 x i64>, i32, <8 x i64>, i8) nounwind readnone
|
|
|
|
define <16 x i32> @test_x86_avx512_psrli_d(<16 x i32> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_psrli_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsrld $7, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psrli.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_mask_psrli_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_psrli_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsrld $7, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psrli.d(<16 x i32> %a0, i32 7, <16 x i32> %a1, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_maskz_psrli_d(<16 x i32> %a0, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_psrli_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsrld $7, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psrli.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.psrli.d(<16 x i32>, i32, <16 x i32>, i16) nounwind readnone
|
|
|
|
define <8 x i64> @test_x86_avx512_psrli_q(<8 x i64> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_psrli_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsrlq $7, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrli.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 -1)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_mask_psrli_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_psrli_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsrlq $7, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrli.q(<8 x i64> %a0, i32 7, <8 x i64> %a1, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_maskz_psrli_q(<8 x i64> %a0, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_psrli_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsrlq $7, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrli.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.psrli.q(<8 x i64>, i32, <8 x i64>, i8) nounwind readnone
|
|
|
|
define <16 x i32> @test_x86_avx512_psrai_d(<16 x i32> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_psrai_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsrad $7, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psrai.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_mask_psrai_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_psrai_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsrad $7, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psrai.d(<16 x i32> %a0, i32 7, <16 x i32> %a1, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_maskz_psrai_d(<16 x i32> %a0, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_psrai_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsrad $7, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psrai.d(<16 x i32> %a0, i32 7, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.psrai.d(<16 x i32>, i32, <16 x i32>, i16) nounwind readnone
|
|
|
|
define <8 x i64> @test_x86_avx512_psrai_q(<8 x i64> %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_psrai_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsraq $7, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrai.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 -1)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_mask_psrai_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_psrai_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsraq $7, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrai.q(<8 x i64> %a0, i32 7, <8 x i64> %a1, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_maskz_psrai_q(<8 x i64> %a0, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_psrai_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsraq $7, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrai.q(<8 x i64> %a0, i32 7, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.psrai.q(<8 x i64>, i32, <8 x i64>, i8) nounwind readnone
|
|
|
|
declare void @llvm.x86.avx512.storent.q.512(i8*, <8 x i64>)
|
|
|
|
define void@test_storent_q_512(<8 x i64> %data, i8* %ptr) {
|
|
; CHECK-LABEL: test_storent_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vmovntps %zmm0, (%rdi)
|
|
; CHECK-NEXT: retq
|
|
call void @llvm.x86.avx512.storent.q.512(i8* %ptr, <8 x i64> %data)
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.storent.pd.512(i8*, <8 x double>)
|
|
|
|
define void @test_storent_pd_512(<8 x double> %data, i8* %ptr) {
|
|
; CHECK-LABEL: test_storent_pd_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vmovntps %zmm0, (%rdi)
|
|
; CHECK-NEXT: retq
|
|
call void @llvm.x86.avx512.storent.pd.512(i8* %ptr, <8 x double> %data)
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.x86.avx512.storent.ps.512(i8*, <16 x float>)
|
|
|
|
define void @test_storent_ps_512(<16 x float> %data, i8* %ptr) {
|
|
; CHECK-LABEL: test_storent_ps_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vmovntps %zmm0, (%rdi)
|
|
; CHECK-NEXT: retq
|
|
call void @llvm.x86.avx512.storent.ps.512(i8* %ptr, <16 x float> %data)
|
|
ret void
|
|
}
|
|
|
|
define <16 x i32> @test_xor_epi32(<16 x i32> %a, <16 x i32> %b) {
|
|
; CHECK-LABEL: test_xor_epi32:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpxorq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pxor.d.512(<16 x i32> %a,<16 x i32> %b, <16 x i32>zeroinitializer, i16 -1)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_xor_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_xor_epi32:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpxord %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pxor.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.pxor.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
|
|
|
|
define <16 x i32> @test_or_epi32(<16 x i32> %a, <16 x i32> %b) {
|
|
; CHECK-LABEL: test_or_epi32:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vporq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.por.d.512(<16 x i32> %a,<16 x i32> %b, <16 x i32>zeroinitializer, i16 -1)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_or_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_or_epi32:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpord %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.por.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.por.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
|
|
|
|
define <16 x i32> @test_and_epi32(<16 x i32> %a, <16 x i32> %b) {
|
|
; CHECK-LABEL: test_and_epi32:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpandq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pand.d.512(<16 x i32> %a,<16 x i32> %b, <16 x i32>zeroinitializer, i16 -1)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_and_epi32(<16 x i32> %a,<16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_and_epi32:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpandd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pand.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.pand.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
|
|
|
|
define <8 x i64> @test_xor_epi64(<8 x i64> %a, <8 x i64> %b) {
|
|
; CHECK-LABEL: test_xor_epi64:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpxorq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pxor.q.512(<8 x i64> %a,<8 x i64> %b, <8 x i64>zeroinitializer, i8 -1)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_xor_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_xor_epi64:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpxorq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pxor.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pxor.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
|
|
|
|
define <8 x i64> @test_or_epi64(<8 x i64> %a, <8 x i64> %b) {
|
|
; CHECK-LABEL: test_or_epi64:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vporq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.por.q.512(<8 x i64> %a,<8 x i64> %b, <8 x i64>zeroinitializer, i8 -1)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_or_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_or_epi64:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vporq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.por.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.por.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
|
|
|
|
define <8 x i64> @test_and_epi64(<8 x i64> %a, <8 x i64> %b) {
|
|
; CHECK-LABEL: test_and_epi64:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpandq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pand.q.512(<8 x i64> %a,<8 x i64> %b, <8 x i64>zeroinitializer, i8 -1)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_and_epi64(<8 x i64> %a,<8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_and_epi64:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpandq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pand.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pand.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
|
|
|
|
define <16 x i32> @test_mask_add_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
|
|
; CHECK-LABEL: test_mask_add_epi32_rr:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_add_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_add_epi32_rrk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_add_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_add_epi32_rrkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_add_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
|
|
; CHECK-LABEL: test_mask_add_epi32_rm:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_add_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <16 x i32> %passThru, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_add_epi32_rmk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_add_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_add_epi32_rmkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpaddd (%rdi), %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_add_epi32_rmb(<16 x i32> %a, i32* %ptr_b) {
|
|
; CHECK-LABEL: test_mask_add_epi32_rmb:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpaddd (%rdi){1to16}, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%q = load i32, i32* %ptr_b
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
|
|
%b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_add_epi32_rmbk(<16 x i32> %a, i32* %ptr_b, <16 x i32> %passThru, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_add_epi32_rmbk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpaddd (%rdi){1to16}, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%q = load i32, i32* %ptr_b
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
|
|
%b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_add_epi32_rmbkz(<16 x i32> %a, i32* %ptr_b, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_add_epi32_rmbkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpaddd (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%q = load i32, i32* %ptr_b
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
|
|
%b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.padd.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
|
|
|
|
define <16 x i32> @test_mask_sub_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
|
|
; CHECK-LABEL: test_mask_sub_epi32_rr:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_sub_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_sub_epi32_rrk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_sub_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_sub_epi32_rrkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsubd %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_sub_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
|
|
; CHECK-LABEL: test_mask_sub_epi32_rm:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsubd (%rdi), %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_sub_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <16 x i32> %passThru, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_sub_epi32_rmk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpsubd (%rdi), %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_sub_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_sub_epi32_rmkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpsubd (%rdi), %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_sub_epi32_rmb(<16 x i32> %a, i32* %ptr_b) {
|
|
; CHECK-LABEL: test_mask_sub_epi32_rmb:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsubd (%rdi){1to16}, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%q = load i32, i32* %ptr_b
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
|
|
%b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_sub_epi32_rmbk(<16 x i32> %a, i32* %ptr_b, <16 x i32> %passThru, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_sub_epi32_rmbk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpsubd (%rdi){1to16}, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%q = load i32, i32* %ptr_b
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
|
|
%b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_sub_epi32_rmbkz(<16 x i32> %a, i32* %ptr_b, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_sub_epi32_rmbkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpsubd (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%q = load i32, i32* %ptr_b
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
|
|
%b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.psub.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
|
|
|
|
define <8 x i64> @test_mask_add_epi64_rr(<8 x i64> %a, <8 x i64> %b) {
|
|
; CHECK-LABEL: test_mask_add_epi64_rr:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_add_epi64_rrk(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_add_epi64_rrk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_add_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_add_epi64_rrkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_add_epi64_rm(<8 x i64> %a, <8 x i64>* %ptr_b) {
|
|
; CHECK-LABEL: test_mask_add_epi64_rm:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpaddq (%rdi), %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%b = load <8 x i64>, <8 x i64>* %ptr_b
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_add_epi64_rmk(<8 x i64> %a, <8 x i64>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_add_epi64_rmk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpaddq (%rdi), %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%b = load <8 x i64>, <8 x i64>* %ptr_b
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_add_epi64_rmkz(<8 x i64> %a, <8 x i64>* %ptr_b, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_add_epi64_rmkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpaddq (%rdi), %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%b = load <8 x i64>, <8 x i64>* %ptr_b
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_add_epi64_rmb(<8 x i64> %a, i64* %ptr_b) {
|
|
; CHECK-LABEL: test_mask_add_epi64_rmb:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpaddq (%rdi){1to8}, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%q = load i64, i64* %ptr_b
|
|
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
|
%b = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_add_epi64_rmbk(<8 x i64> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_add_epi64_rmbk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpaddq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%q = load i64, i64* %ptr_b
|
|
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
|
%b = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_add_epi64_rmbkz(<8 x i64> %a, i64* %ptr_b, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_add_epi64_rmbkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpaddq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%q = load i64, i64* %ptr_b
|
|
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
|
%b = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.padd.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
|
|
|
|
define <8 x i64> @test_mask_sub_epi64_rr(<8 x i64> %a, <8 x i64> %b) {
|
|
; CHECK-LABEL: test_mask_sub_epi64_rr:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_sub_epi64_rrk(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_sub_epi64_rrk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_sub_epi64_rrkz(<8 x i64> %a, <8 x i64> %b, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_sub_epi64_rrkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsubq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_sub_epi64_rm(<8 x i64> %a, <8 x i64>* %ptr_b) {
|
|
; CHECK-LABEL: test_mask_sub_epi64_rm:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsubq (%rdi), %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%b = load <8 x i64>, <8 x i64>* %ptr_b
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_sub_epi64_rmk(<8 x i64> %a, <8 x i64>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_sub_epi64_rmk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpsubq (%rdi), %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%b = load <8 x i64>, <8 x i64>* %ptr_b
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_sub_epi64_rmkz(<8 x i64> %a, <8 x i64>* %ptr_b, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_sub_epi64_rmkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpsubq (%rdi), %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%b = load <8 x i64>, <8 x i64>* %ptr_b
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_sub_epi64_rmb(<8 x i64> %a, i64* %ptr_b) {
|
|
; CHECK-LABEL: test_mask_sub_epi64_rmb:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsubq (%rdi){1to8}, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%q = load i64, i64* %ptr_b
|
|
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
|
%b = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_sub_epi64_rmbk(<8 x i64> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_sub_epi64_rmbk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpsubq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%q = load i64, i64* %ptr_b
|
|
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
|
%b = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> %passThru, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_sub_epi64_rmbkz(<8 x i64> %a, i64* %ptr_b, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_sub_epi64_rmbkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpsubq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%q = load i64, i64* %ptr_b
|
|
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
|
%b = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64> %a, <8 x i64> %b, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.psub.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
|
|
|
|
define <16 x i32> @test_mask_mullo_epi32_rr_512(<16 x i32> %a, <16 x i32> %b) {
|
|
; CHECK-LABEL: test_mask_mullo_epi32_rr_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmulld %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_mullo_epi32_rrk_512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_mullo_epi32_rrk_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmulld %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_mullo_epi32_rrkz_512(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_mullo_epi32_rrkz_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmulld %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_mullo_epi32_rm_512(<16 x i32> %a, <16 x i32>* %ptr_b) {
|
|
; CHECK-LABEL: test_mask_mullo_epi32_rm_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmulld (%rdi), %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_mullo_epi32_rmk_512(<16 x i32> %a, <16 x i32>* %ptr_b, <16 x i32> %passThru, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_mullo_epi32_rmk_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpmulld (%rdi), %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_mullo_epi32_rmkz_512(<16 x i32> %a, <16 x i32>* %ptr_b, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_mullo_epi32_rmkz_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpmulld (%rdi), %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_mullo_epi32_rmb_512(<16 x i32> %a, i32* %ptr_b) {
|
|
; CHECK-LABEL: test_mask_mullo_epi32_rmb_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmulld (%rdi){1to16}, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%q = load i32, i32* %ptr_b
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
|
|
%b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 -1)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_mullo_epi32_rmbk_512(<16 x i32> %a, i32* %ptr_b, <16 x i32> %passThru, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_mullo_epi32_rmbk_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpmulld (%rdi){1to16}, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%q = load i32, i32* %ptr_b
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
|
|
%b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> %passThru, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_mask_mullo_epi32_rmbkz_512(<16 x i32> %a, i32* %ptr_b, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_mullo_epi32_rmbkz_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpmulld (%rdi){1to16}, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%q = load i32, i32* %ptr_b
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %q, i32 0
|
|
%b = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32> %a, <16 x i32> %b, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret < 16 x i32> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.pmull.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
|
|
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.shuf.f32x4(<16 x float>, <16 x float>, i32, <16 x float>, i16)
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_shuf_f32x4(<16 x float> %x0, <16 x float> %x1, <16 x float> %x3, i16 %x4) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_f32x4:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm3 = zmm0[8,9,10,11,4,5,6,7],zmm1[4,5,6,7,0,1,2,3]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[8,9,10,11,4,5,6,7],zmm1[4,5,6,7,0,1,2,3]
|
|
; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.shuf.f32x4(<16 x float> %x0, <16 x float> %x1, i32 22, <16 x float> %x3, i16 %x4)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.shuf.f32x4(<16 x float> %x0, <16 x float> %x1, i32 22, <16 x float> %x3, i16 -1)
|
|
%res2 = fadd <16 x float> %res, %res1
|
|
ret <16 x float> %res2
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.shuf.f64x2(<8 x double>, <8 x double>, i32, <8 x double>, i8)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_shuf_f64x2(<8 x double> %x0, <8 x double> %x1, <8 x double> %x3, i8 %x4) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_f64x2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm3 = zmm0[4,5,2,3],zmm1[2,3,0,1]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[4,5,2,3],zmm1[2,3,0,1]
|
|
; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm2
|
|
; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,2,3],zmm1[2,3,0,1]
|
|
; CHECK-NEXT: vaddpd %zmm0, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.shuf.f64x2(<8 x double> %x0, <8 x double> %x1, i32 22, <8 x double> %x3, i8 %x4)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.shuf.f64x2(<8 x double> %x0, <8 x double> %x1, i32 22, <8 x double> %x3, i8 -1)
|
|
%res2 = call <8 x double> @llvm.x86.avx512.mask.shuf.f64x2(<8 x double> %x0, <8 x double> %x1, i32 22, <8 x double> zeroinitializer, i8 %x4)
|
|
|
|
%res3 = fadd <8 x double> %res, %res1
|
|
%res4 = fadd <8 x double> %res3, %res2
|
|
ret <8 x double> %res4
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.shuf.i32x4(<16 x i32>, <16 x i32>, i32, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_shuf_i32x4(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x3, i16 %x4) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_i32x4:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm3 = zmm0[8,9,10,11,4,5,6,7],zmm1[4,5,6,7,0,1,2,3]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[8,9,10,11,4,5,6,7],zmm1[4,5,6,7,0,1,2,3]
|
|
; CHECK-NEXT: vpaddd %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.shuf.i32x4(<16 x i32> %x0, <16 x i32> %x1, i32 22, <16 x i32> %x3, i16 %x4)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.shuf.i32x4(<16 x i32> %x0, <16 x i32> %x1, i32 22, <16 x i32> %x3, i16 -1)
|
|
%res2 = add <16 x i32> %res, %res1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.shuf.i64x2(<8 x i64>, <8 x i64>, i32, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_shuf_i64x2(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x3, i8 %x4) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_i64x2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm0[4,5,2,3],zmm1[2,3,0,1]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[4,5,2,3],zmm1[2,3,0,1]
|
|
; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.shuf.i64x2(<8 x i64> %x0, <8 x i64> %x1, i32 22, <8 x i64> %x3, i8 %x4)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.shuf.i64x2(<8 x i64> %x0, <8 x i64> %x1, i32 22, <8 x i64> %x3, i8 -1)
|
|
%res2 = add <8 x i64> %res, %res1
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.shuf.pd.512(<8 x double>, <8 x double>, i32, <8 x double>, i8)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_shuf_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x3, i8 %x4) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_pd_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vshufpd {{.*#+}} zmm3 = zmm0[0],zmm1[1],zmm0[3],zmm1[2],zmm0[5],zmm1[4],zmm0[6],zmm1[6]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vshufpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[1],zmm0[3],zmm1[2],zmm0[5],zmm1[4],zmm0[6],zmm1[6]
|
|
; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm2
|
|
; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[1],zmm0[3],zmm1[2],zmm0[5],zmm1[4],zmm0[6],zmm1[6]
|
|
; CHECK-NEXT: vaddpd %zmm0, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.shuf.pd.512(<8 x double> %x0, <8 x double> %x1, i32 22, <8 x double> %x3, i8 %x4)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.shuf.pd.512(<8 x double> %x0, <8 x double> %x1, i32 22, <8 x double> %x3, i8 -1)
|
|
%res2 = call <8 x double> @llvm.x86.avx512.mask.shuf.pd.512(<8 x double> %x0, <8 x double> %x1, i32 22, <8 x double> zeroinitializer, i8 %x4)
|
|
|
|
%res3 = fadd <8 x double> %res, %res1
|
|
%res4 = fadd <8 x double> %res3, %res2
|
|
ret <8 x double> %res4
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.shuf.ps.512(<16 x float>, <16 x float>, i32, <16 x float>, i16)
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_shuf_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x3, i16 %x4) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_shuf_ps_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vshufps {{.*#+}} zmm3 = zmm0[2,1],zmm1[1,0],zmm0[6,5],zmm1[5,4],zmm0[10,9],zmm1[9,8],zmm0[14,13],zmm1[13,12]
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vshufps {{.*#+}} zmm2 {%k1} = zmm0[2,1],zmm1[1,0],zmm0[6,5],zmm1[5,4],zmm0[10,9],zmm1[9,8],zmm0[14,13],zmm1[13,12]
|
|
; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.shuf.ps.512(<16 x float> %x0, <16 x float> %x1, i32 22, <16 x float> %x3, i16 %x4)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.shuf.ps.512(<16 x float> %x0, <16 x float> %x1, i32 22, <16 x float> %x3, i16 -1)
|
|
%res2 = fadd <16 x float> %res, %res1
|
|
ret <16 x float> %res2
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.pmaxs.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_pmaxs_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmaxsd %zmm1, %zmm0, %zmm3
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmaxsd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vpaddd %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pmaxs.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.pmaxs.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1)
|
|
%res2 = add <16 x i32> %res, %res1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pmaxs.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_pmaxs_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxs_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmaxsq %zmm1, %zmm0, %zmm3
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmaxsq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmaxs.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.pmaxs.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
|
|
%res2 = add <8 x i64> %res, %res1
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.pmaxu.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_pmaxu_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmaxud %zmm1, %zmm0, %zmm3
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmaxud %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vpaddd %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pmaxu.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.pmaxu.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1)
|
|
%res2 = add <16 x i32> %res, %res1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pmaxu.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_pmaxu_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pmaxu_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmaxuq %zmm1, %zmm0, %zmm3
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmaxuq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmaxu.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.pmaxu.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
|
|
%res2 = add <8 x i64> %res, %res1
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.pmins.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_pmins_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pmins_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpminsd %zmm1, %zmm0, %zmm3
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpminsd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vpaddd %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pmins.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.pmins.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1)
|
|
%res2 = add <16 x i32> %res, %res1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pmins.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_pmins_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pmins_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpminsq %zmm1, %zmm0, %zmm3
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpminsq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmins.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.pmins.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
|
|
%res2 = add <8 x i64> %res, %res1
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.pminu.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_pminu_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pminu_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpminud %zmm1, %zmm0, %zmm3
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpminud %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vpaddd %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pminu.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.pminu.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 -1)
|
|
%res2 = add <16 x i32> %res, %res1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pminu.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_pminu_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pminu_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpminuq %zmm1, %zmm0, %zmm3
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpminuq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pminu.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.pminu.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 -1)
|
|
%res2 = add <8 x i64> %res, %res1
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
define <4 x float> @test_mm_mask_move_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) {
|
|
; CHECK-LABEL: test_mm_mask_move_ss:
|
|
; CHECK: ## %bb.0: ## %entry
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vmovss %xmm2, %xmm1, %xmm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.move.ss(<4 x float> %__A, <4 x float> %__B, <4 x float> %__W, i8 %__U)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
|
|
define <4 x float> @test_mm_maskz_move_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) {
|
|
; CHECK-LABEL: test_mm_maskz_move_ss:
|
|
; CHECK: ## %bb.0: ## %entry
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.move.ss(<4 x float> %__A, <4 x float> %__B, <4 x float> zeroinitializer, i8 %__U)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
define <2 x double> @test_mm_mask_move_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) {
|
|
; CHECK-LABEL: test_mm_mask_move_sd:
|
|
; CHECK: ## %bb.0: ## %entry
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vmovsd %xmm2, %xmm1, %xmm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.move.sd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__W, i8 %__U)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
define <2 x double> @test_mm_maskz_move_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) {
|
|
; CHECK-LABEL: test_mm_maskz_move_sd:
|
|
; CHECK: ## %bb.0: ## %entry
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
entry:
|
|
%res = call <2 x double> @llvm.x86.avx512.mask.move.sd(<2 x double> %__A, <2 x double> %__B, <2 x double> zeroinitializer, i8 %__U)
|
|
ret <2 x double> %res
|
|
}
|
|
|
|
declare <4 x float> @llvm.x86.avx512.mask.move.ss(<4 x float>, <4 x float>, <4 x float>, i8)
|
|
declare <2 x double> @llvm.x86.avx512.mask.move.sd(<2 x double>, <2 x double>, <2 x double>, i8)
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.pmovzxb.d.512(<16 x i8>, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_pmovzxb_d_512(<16 x i8> %x0, <16 x i32> %x1, i16 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxb_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmovzxbd {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmovzxbd {{.*#+}} zmm1 {%k1} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
|
|
; CHECK-NEXT: vpmovzxbd {{.*#+}} zmm0 {%k1} {z} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
|
|
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpaddd %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pmovzxb.d.512(<16 x i8> %x0, <16 x i32> %x1, i16 %x2)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.pmovzxb.d.512(<16 x i8> %x0, <16 x i32> zeroinitializer, i16 %x2)
|
|
%res2 = call <16 x i32> @llvm.x86.avx512.mask.pmovzxb.d.512(<16 x i8> %x0, <16 x i32> %x1, i16 -1)
|
|
%res3 = add <16 x i32> %res, %res1
|
|
%res4 = add <16 x i32> %res3, %res2
|
|
ret <16 x i32> %res4
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pmovzxb.q.512(<16 x i8>, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_pmovzxb_q_512(<16 x i8> %x0, <8 x i64> %x1, i8 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxb_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmovzxbq {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmovzxbq {{.*#+}} zmm1 {%k1} = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
|
|
; CHECK-NEXT: vpmovzxbq {{.*#+}} zmm0 {%k1} {z} = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,zero,zero,zero,xmm0[5],zero,zero,zero,zero,zero,zero,zero,xmm0[6],zero,zero,zero,zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,zero,zero,zero
|
|
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmovzxb.q.512(<16 x i8> %x0, <8 x i64> %x1, i8 %x2)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.pmovzxb.q.512(<16 x i8> %x0, <8 x i64> zeroinitializer, i8 %x2)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.mask.pmovzxb.q.512(<16 x i8> %x0, <8 x i64> %x1, i8 -1)
|
|
%res3 = add <8 x i64> %res, %res1
|
|
%res4 = add <8 x i64> %res3, %res2
|
|
ret <8 x i64> %res4
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pmovzxd.q.512(<8 x i32>, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_pmovzxd_q_512(<8 x i32> %x0, <8 x i64> %x1, i8 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxd_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmovzxdq {{.*#+}} zmm1 {%k1} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
|
|
; CHECK-NEXT: vpmovzxdq {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
|
|
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmovzxd.q.512(<8 x i32> %x0, <8 x i64> %x1, i8 %x2)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.pmovzxd.q.512(<8 x i32> %x0, <8 x i64> zeroinitializer, i8 %x2)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.mask.pmovzxd.q.512(<8 x i32> %x0, <8 x i64> %x1, i8 -1)
|
|
%res3 = add <8 x i64> %res, %res1
|
|
%res4 = add <8 x i64> %res3, %res2
|
|
ret <8 x i64> %res4
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.pmovzxw.d.512(<16 x i16>, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_pmovzxw_d_512(<16 x i16> %x0, <16 x i32> %x1, i16 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxw_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmovzxwd {{.*#+}} zmm1 {%k1} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
|
; CHECK-NEXT: vpmovzxwd {{.*#+}} zmm0 {%k1} {z} = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
|
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpaddd %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pmovzxw.d.512(<16 x i16> %x0, <16 x i32> %x1, i16 %x2)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.pmovzxw.d.512(<16 x i16> %x0, <16 x i32> zeroinitializer, i16 %x2)
|
|
%res2 = call <16 x i32> @llvm.x86.avx512.mask.pmovzxw.d.512(<16 x i16> %x0, <16 x i32> %x1, i16 -1)
|
|
%res3 = add <16 x i32> %res, %res1
|
|
%res4 = add <16 x i32> %res3, %res2
|
|
ret <16 x i32> %res4
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pmovzxw.q.512(<8 x i16>, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_pmovzxw_q_512(<8 x i16> %x0, <8 x i64> %x1, i8 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pmovzxw_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmovzxwq {{.*#+}} zmm2 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmovzxwq {{.*#+}} zmm1 {%k1} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
|
|
; CHECK-NEXT: vpmovzxwq {{.*#+}} zmm0 {%k1} {z} = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
|
|
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmovzxw.q.512(<8 x i16> %x0, <8 x i64> %x1, i8 %x2)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.pmovzxw.q.512(<8 x i16> %x0, <8 x i64> zeroinitializer, i8 %x2)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.mask.pmovzxw.q.512(<8 x i16> %x0, <8 x i64> %x1, i8 -1)
|
|
%res3 = add <8 x i64> %res, %res1
|
|
%res4 = add <8 x i64> %res3, %res2
|
|
ret <8 x i64> %res4
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.pmovsxb.d.512(<16 x i8>, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_pmovsxb_d_512(<16 x i8> %x0, <16 x i32> %x1, i16 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxb_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmovsxbd %xmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmovsxbd %xmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vpmovsxbd %xmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpaddd %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pmovsxb.d.512(<16 x i8> %x0, <16 x i32> %x1, i16 %x2)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.pmovsxb.d.512(<16 x i8> %x0, <16 x i32> zeroinitializer, i16 %x2)
|
|
%res2 = call <16 x i32> @llvm.x86.avx512.mask.pmovsxb.d.512(<16 x i8> %x0, <16 x i32> %x1, i16 -1)
|
|
%res3 = add <16 x i32> %res, %res1
|
|
%res4 = add <16 x i32> %res3, %res2
|
|
ret <16 x i32> %res4
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pmovsxb.q.512(<16 x i8>, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_pmovsxb_q_512(<16 x i8> %x0, <8 x i64> %x1, i8 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxb_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmovsxbq %xmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmovsxbq %xmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vpmovsxbq %xmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmovsxb.q.512(<16 x i8> %x0, <8 x i64> %x1, i8 %x2)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.pmovsxb.q.512(<16 x i8> %x0, <8 x i64> zeroinitializer, i8 %x2)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.mask.pmovsxb.q.512(<16 x i8> %x0, <8 x i64> %x1, i8 -1)
|
|
%res3 = add <8 x i64> %res, %res1
|
|
%res4 = add <8 x i64> %res3, %res2
|
|
ret <8 x i64> %res4
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pmovsxd.q.512(<8 x i32>, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_pmovsxd_q_512(<8 x i32> %x0, <8 x i64> %x1, i8 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxd_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmovsxdq %ymm0, %zmm2
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmovsxdq %ymm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vpmovsxdq %ymm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmovsxd.q.512(<8 x i32> %x0, <8 x i64> %x1, i8 %x2)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.pmovsxd.q.512(<8 x i32> %x0, <8 x i64> zeroinitializer, i8 %x2)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.mask.pmovsxd.q.512(<8 x i32> %x0, <8 x i64> %x1, i8 -1)
|
|
%res3 = add <8 x i64> %res, %res1
|
|
%res4 = add <8 x i64> %res3, %res2
|
|
ret <8 x i64> %res4
|
|
}
|
|
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.pmovsxw.d.512(<16 x i16>, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_pmovsxw_d_512(<16 x i16> %x0, <16 x i32> %x1, i16 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxw_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmovsxwd %ymm0, %zmm2
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmovsxwd %ymm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vpmovsxwd %ymm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpaddd %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pmovsxw.d.512(<16 x i16> %x0, <16 x i32> %x1, i16 %x2)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.pmovsxw.d.512(<16 x i16> %x0, <16 x i32> zeroinitializer, i16 %x2)
|
|
%res2 = call <16 x i32> @llvm.x86.avx512.mask.pmovsxw.d.512(<16 x i16> %x0, <16 x i32> %x1, i16 -1)
|
|
%res3 = add <16 x i32> %res, %res1
|
|
%res4 = add <16 x i32> %res3, %res2
|
|
ret <16 x i32> %res4
|
|
}
|
|
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pmovsxw.q.512(<8 x i16>, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_pmovsxw_q_512(<8 x i16> %x0, <8 x i64> %x1, i8 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pmovsxw_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmovsxwq %xmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmovsxwq %xmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vpmovsxwq %xmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmovsxw.q.512(<8 x i16> %x0, <8 x i64> %x1, i8 %x2)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.pmovsxw.q.512(<8 x i16> %x0, <8 x i64> zeroinitializer, i8 %x2)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.mask.pmovsxw.q.512(<8 x i16> %x0, <8 x i64> %x1, i8 -1)
|
|
%res3 = add <8 x i64> %res, %res1
|
|
%res4 = add <8 x i64> %res3, %res2
|
|
ret <8 x i64> %res4
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64>, i32, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_psrl_qi_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_qi_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vpaddq %zmm2, %zmm1, %zmm1
|
|
; CHECK-NEXT: vpsrlq $4, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64> %x0, i32 4, <8 x i64> %x2, i8 %x3)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64> %x0, i32 4, <8 x i64> %x2, i8 -1)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.mask.psrl.qi.512(<8 x i64> %x0, i32 4, <8 x i64> zeroinitializer, i8 %x3)
|
|
%res3 = add <8 x i64> %res, %res1
|
|
%res4 = add <8 x i64> %res3, %res2
|
|
ret <8 x i64> %res4
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.psrl.di.512(<16 x i32>, i32, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_psrl_di_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_psrl_di_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsrld $4, %zmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpsrld $4, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm1
|
|
; CHECK-NEXT: vpsrld $4, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psrl.di.512(<16 x i32> %x0, i32 4, <16 x i32> %x2, i16 %x3)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.psrl.di.512(<16 x i32> %x0, i32 4, <16 x i32> %x2, i16 -1)
|
|
%res2 = call <16 x i32> @llvm.x86.avx512.mask.psrl.di.512(<16 x i32> %x0, i32 4, <16 x i32> zeroinitializer, i16 %x3)
|
|
%res3 = add <16 x i32> %res, %res1
|
|
%res4 = add <16 x i32> %res3, %res2
|
|
ret <16 x i32> %res4
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.psra.di.512(<16 x i32>, i32, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_psra_di_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_psra_di_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsrad $3, %zmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpsrad $3, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vpsrad $3, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpaddd %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psra.di.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 %x3)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.psra.di.512(<16 x i32> %x0, i32 3, <16 x i32> zeroinitializer, i16 %x3)
|
|
%res2 = call <16 x i32> @llvm.x86.avx512.mask.psra.di.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 -1)
|
|
%res3 = add <16 x i32> %res, %res1
|
|
%res4 = add <16 x i32> %res3, %res2
|
|
ret <16 x i32> %res4
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.psra.qi.512(<8 x i64>, i32, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_psra_qi_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_psra_qi_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsraq $3, %zmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpsraq $3, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vpsraq $3, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psra.qi.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 %x3)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.psra.qi.512(<8 x i64> %x0, i32 3, <8 x i64> zeroinitializer, i8 %x3)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.mask.psra.qi.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 -1)
|
|
%res3 = add <8 x i64> %res, %res1
|
|
%res4 = add <8 x i64> %res3, %res2
|
|
ret <8 x i64> %res4
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.psll.di.512(<16 x i32>, i32, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_psll_di_512(<16 x i32> %x0, i32 %x1, <16 x i32> %x2, i16 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_psll_di_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpslld $3, %zmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpslld $3, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vpslld $3, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vpaddd %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpaddd %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psll.di.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 %x3)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.psll.di.512(<16 x i32> %x0, i32 3, <16 x i32> zeroinitializer, i16 %x3)
|
|
%res2 = call <16 x i32> @llvm.x86.avx512.mask.psll.di.512(<16 x i32> %x0, i32 3, <16 x i32> %x2, i16 -1)
|
|
%res3 = add <16 x i32> %res, %res1
|
|
%res4 = add <16 x i32> %res3, %res2
|
|
ret <16 x i32> %res4
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.psll.qi.512(<8 x i64>, i32, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_psll_qi_512(<8 x i64> %x0, i32 %x1, <8 x i64> %x2, i8 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_psll_qi_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsllq $3, %zmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpsllq $3, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vpsllq $3, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
|
; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psll.qi.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 %x3)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.psll.qi.512(<8 x i64> %x0, i32 3, <8 x i64> zeroinitializer, i8 %x3)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.mask.psll.qi.512(<8 x i64> %x0, i32 3, <8 x i64> %x2, i8 -1)
|
|
%res3 = add <8 x i64> %res, %res1
|
|
%res4 = add <8 x i64> %res3, %res2
|
|
ret <8 x i64> %res4
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_psll_d(<16 x i32> %a0, <4 x i32> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psll_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpslld %xmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_mask_psll_d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_psll_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpslld %xmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_maskz_psll_d(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_psll_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpslld %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.psll.d(<16 x i32>, <4 x i32>, <16 x i32>, i16) nounwind readnone
|
|
|
|
define <8 x i64> @test_x86_avx512_psll_q(<8 x i64> %a0, <2 x i64> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psll_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsllq %xmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_mask_psll_q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_psll_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsllq %xmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_maskz_psll_q(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_psll_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsllq %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.psll.q(<8 x i64>, <2 x i64>, <8 x i64>, i8) nounwind readnone
|
|
|
|
define <16 x i32> @test_x86_avx512_psrl_d(<16 x i32> %a0, <4 x i32> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psrl_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsrld %xmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_mask_psrl_d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_psrl_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsrld %xmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_maskz_psrl_d(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_psrl_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsrld %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.psrl.d(<16 x i32>, <4 x i32>, <16 x i32>, i16) nounwind readnone
|
|
|
|
define <8 x i64> @test_x86_avx512_psrl_q(<8 x i64> %a0, <2 x i64> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psrl_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsrlq %xmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_mask_psrl_q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_psrl_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsrlq %xmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_maskz_psrl_q(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_psrl_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsrlq %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.psrl.q(<8 x i64>, <2 x i64>, <8 x i64>, i8) nounwind readnone
|
|
|
|
define <16 x i32> @test_x86_avx512_psra_d(<16 x i32> %a0, <4 x i32> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psra_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsrad %xmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_mask_psra_d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_psra_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsrad %xmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> %a2, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_maskz_psra_d(<16 x i32> %a0, <4 x i32> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_psra_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsrad %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32> %a0, <4 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.psra.d(<16 x i32>, <4 x i32>, <16 x i32>, i16) nounwind readnone
|
|
|
|
define <8 x i64> @test_x86_avx512_psra_q(<8 x i64> %a0, <2 x i64> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psra_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsraq %xmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_mask_psra_q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_psra_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsraq %xmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> %a2, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_maskz_psra_q(<8 x i64> %a0, <2 x i64> %a1, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_psra_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsraq %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64> %a0, <2 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.psra.q(<8 x i64>, <2 x i64>, <8 x i64>, i8) nounwind readnone
|
|
|
|
define <16 x i32> @test_x86_avx512_psllv_d(<16 x i32> %a0, <16 x i32> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psllv_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_mask_psllv_d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_psllv_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsllvd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_maskz_psllv_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_psllv_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsllvd %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.psllv.d(<16 x i32>, <16 x i32>, <16 x i32>, i16) nounwind readnone
|
|
|
|
define <8 x i64> @test_x86_avx512_psllv_q(<8 x i64> %a0, <8 x i64> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psllv_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsllvq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_mask_psllv_q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_psllv_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsllvq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_maskz_psllv_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_psllv_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsllvq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.psllv.q(<8 x i64>, <8 x i64>, <8 x i64>, i8) nounwind readnone
|
|
|
|
|
|
define <16 x i32> @test_x86_avx512_psrav_d(<16 x i32> %a0, <16 x i32> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psrav_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsravd %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_mask_psrav_d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_psrav_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsravd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_maskz_psrav_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_psrav_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsravd %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.psrav.d(<16 x i32>, <16 x i32>, <16 x i32>, i16) nounwind readnone
|
|
|
|
define <8 x i64> @test_x86_avx512_psrav_q(<8 x i64> %a0, <8 x i64> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psrav_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsravq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_mask_psrav_q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_psrav_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsravq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_maskz_psrav_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_psrav_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsravq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.psrav.q(<8 x i64>, <8 x i64>, <8 x i64>, i8) nounwind readnone
|
|
|
|
define <16 x i32> @test_x86_avx512_psrlv_d(<16 x i32> %a0, <16 x i32> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psrlv_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_mask_psrlv_d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_psrlv_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsrlvd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @test_x86_avx512_maskz_psrlv_d(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_psrlv_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.psrlv.d(<16 x i32>, <16 x i32>, <16 x i32>, i16) nounwind readnone
|
|
|
|
define <8 x i64> @test_x86_avx512_psrlv_q(<8 x i64> %a0, <8 x i64> %a1) {
|
|
; CHECK-LABEL: test_x86_avx512_psrlv_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsrlvq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 -1)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_mask_psrlv_q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_mask_psrlv_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsrlvq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> %a2, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_maskz_psrlv_q(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
|
|
; CHECK-LABEL: test_x86_avx512_maskz_psrlv_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpsrlvq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64> %a0, <8 x i64> %a1, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64>, <8 x i64>, <8 x i64>, i8) nounwind readnone
|
|
|
|
define <8 x i64> @test_x86_avx512_psrlv_q_memop(<8 x i64> %a0, <8 x i64>* %ptr) {
|
|
; CHECK-LABEL: test_x86_avx512_psrlv_q_memop:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpsrlvq (%rdi), %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%b = load <8 x i64>, <8 x i64>* %ptr
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.psrlv.q(<8 x i64> %a0, <8 x i64> %b, <8 x i64> zeroinitializer, i8 -1)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.cvtdq2pd.512(<8 x i32>, <8 x double>, i8)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_cvt_dq2pd_512(<8 x i32> %x0, <8 x double> %x1, i8 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_dq2pd_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vcvtdq2pd %ymm0, %zmm2
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vcvtdq2pd %ymm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vaddpd %zmm2, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.cvtdq2pd.512(<8 x i32> %x0, <8 x double> %x1, i8 %x2)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.cvtdq2pd.512(<8 x i32> %x0, <8 x double> %x1, i8 -1)
|
|
%res2 = fadd <8 x double> %res, %res1
|
|
ret <8 x double> %res2
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.cvtudq2pd.512(<8 x i32>, <8 x double>, i8)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_cvt_udq2pd_512(<8 x i32> %x0, <8 x double> %x1, i8 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_cvt_udq2pd_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vcvtudq2pd %ymm0, %zmm2
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vcvtudq2pd %ymm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vaddpd %zmm2, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.cvtudq2pd.512(<8 x i32> %x0, <8 x double> %x1, i8 %x2)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.cvtudq2pd.512(<8 x i32> %x0, <8 x double> %x1, i8 -1)
|
|
%res2 = fadd <8 x double> %res, %res1
|
|
ret <8 x double> %res2
|
|
}
|
|
|
|
define <8 x i64> @test_valign_q(<8 x i64> %a, <8 x i64> %b) {
|
|
; CHECK-LABEL: test_valign_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: valignq {{.*#+}} zmm0 = zmm1[2,3,4,5,6,7],zmm0[0,1]
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64> %a, <8 x i64> %b, i32 2, <8 x i64> zeroinitializer, i8 -1)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_valign_q(<8 x i64> %a, <8 x i64> %b, <8 x i64> %src, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_valign_q:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: valignq {{.*#+}} zmm2 {%k1} = zmm1[2,3,4,5,6,7],zmm0[0,1]
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64> %a, <8 x i64> %b, i32 2, <8 x i64> %src, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64>, <8 x i64>, i32, <8 x i64>, i8)
|
|
|
|
define <16 x i32> @test_maskz_valign_d(<16 x i32> %a, <16 x i32> %b, i16 %mask) {
|
|
; CHECK-LABEL: test_maskz_valign_d:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: valignd {{.*#+}} zmm0 {%k1} {z} = zmm1[5,6,7,8,9,10,11,12,13,14,15],zmm0[0,1,2,3,4]
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.valign.d.512(<16 x i32> %a, <16 x i32> %b, i32 5, <16 x i32> zeroinitializer, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.valign.d.512(<16 x i32>, <16 x i32>, i32, <16 x i32>, i16)
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.vpermilvar.pd.512(<8 x double>, <8 x i64>, <8 x double>, i8)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_vpermilvar_pd_512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_vpermilvar_pd_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpermilpd %zmm1, %zmm0, %zmm3
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpermilpd %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vpermilpd %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vaddpd %zmm0, %zmm2, %zmm0
|
|
; CHECK-NEXT: vaddpd %zmm0, %zmm3, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.vpermilvar.pd.512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 %x3)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.vpermilvar.pd.512(<8 x double> %x0, <8 x i64> %x1, <8 x double> zeroinitializer, i8 %x3)
|
|
%res2 = call <8 x double> @llvm.x86.avx512.mask.vpermilvar.pd.512(<8 x double> %x0, <8 x i64> %x1, <8 x double> %x2, i8 -1)
|
|
%res3 = fadd <8 x double> %res, %res1
|
|
%res4 = fadd <8 x double> %res2, %res3
|
|
ret <8 x double> %res4
|
|
}
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float>, <16 x i32>, <16 x float>, i16)
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_vpermilvar_ps_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpermilps %zmm1, %zmm0, %zmm3
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpermilps %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vpermilps %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vaddps %zmm0, %zmm2, %zmm0
|
|
; CHECK-NEXT: vaddps %zmm0, %zmm3, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> %x1, <16 x float> zeroinitializer, i16 %x3)
|
|
%res2 = call <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 -1)
|
|
%res3 = fadd <16 x float> %res, %res1
|
|
%res4 = fadd <16 x float> %res2, %res3
|
|
ret <16 x float> %res4
|
|
}
|
|
|
|
; Test case to make sure we can print shuffle decode comments for constant pool loads.
|
|
define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool(<16 x float> %x0, <16 x i32> %x1, <16 x float> %x2, i16 %x3) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpermilps {{.*#+}} zmm2 {%k1} = zmm0[2,3,0,1,7,6,5,4,9,8,11,10,12,13,14,15]
|
|
; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} {z} = zmm0[1,0,3,2,4,5,6,7,9,8,11,10,12,13,14,15]
|
|
; CHECK-NEXT: vaddps %zmm1, %zmm2, %zmm1
|
|
; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[1,0,3,2,4,5,6,7,10,11,8,9,14,15,13,12]
|
|
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 3, i32 2, i32 1, i32 0, i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3>, <16 x float> %x2, i16 %x3)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3, i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3>, <16 x float> zeroinitializer, i16 %x3)
|
|
%res2 = call <16 x float> @llvm.x86.avx512.mask.vpermilvar.ps.512(<16 x float> %x0, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 1, i32 0>, <16 x float> %x2, i16 -1)
|
|
%res3 = fadd <16 x float> %res, %res1
|
|
%res4 = fadd <16 x float> %res2, %res3
|
|
ret <16 x float> %res4
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
|
|
; CHECK-LABEL: test_mask_mul_epi32_rr:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmuldq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_mul_epi32_rrk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmuldq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_mul_epi32_rrkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmuldq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
|
|
; CHECK-LABEL: test_mask_mul_epi32_rm:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmuldq (%rdi), %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_mul_epi32_rmk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpmuldq (%rdi), %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_mul_epi32_rmkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpmuldq (%rdi), %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epi32_rmb(<16 x i32> %a, i64* %ptr_b) {
|
|
; CHECK-LABEL: test_mask_mul_epi32_rmb:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmuldq (%rdi){1to8}, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%q = load i64, i64* %ptr_b
|
|
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
|
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epi32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_mul_epi32_rmbk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpmuldq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%q = load i64, i64* %ptr_b
|
|
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
|
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epi32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_mul_epi32_rmbkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpmuldq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%q = load i64, i64* %ptr_b
|
|
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
|
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32>, <16 x i32>, <8 x i64>, i8)
|
|
|
|
define <8 x i64> @test_mask_mul_epu32_rr(<16 x i32> %a, <16 x i32> %b) {
|
|
; CHECK-LABEL: test_mask_mul_epu32_rr:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epu32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_mul_epu32_rrk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmuludq %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_mul_epu32_rrkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpmuludq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epu32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
|
|
; CHECK-LABEL: test_mask_mul_epu32_rm:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmuludq (%rdi), %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epu32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_mul_epu32_rmk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpmuludq (%rdi), %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epu32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_mul_epu32_rmkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpmuludq (%rdi), %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epu32_rmb(<16 x i32> %a, i64* %ptr_b) {
|
|
; CHECK-LABEL: test_mask_mul_epu32_rmb:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpmuludq (%rdi){1to8}, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%q = load i64, i64* %ptr_b
|
|
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
|
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epu32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_mul_epu32_rmbk:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpmuludq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%q = load i64, i64* %ptr_b
|
|
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
|
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
define <8 x i64> @test_mask_mul_epu32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_mul_epu32_rmbkz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vpmuludq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%q = load i64, i64* %ptr_b
|
|
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
|
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
|
ret < 8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32>, <16 x i32>, <8 x i64>, i8)
|
|
|
|
define <4 x float> @test_mask_vextractf32x4(<4 x float> %b, <16 x float> %a, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_vextractf32x4:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vextractf32x4 $2, %zmm1, %xmm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
%res = call <4 x float> @llvm.x86.avx512.mask.vextractf32x4.512(<16 x float> %a, i32 2, <4 x float> %b, i8 %mask)
|
|
ret <4 x float> %res
|
|
}
|
|
|
|
declare <4 x float> @llvm.x86.avx512.mask.vextractf32x4.512(<16 x float>, i32, <4 x float>, i8)
|
|
|
|
define <4 x i64> @test_mask_vextracti64x4(<4 x i64> %b, <8 x i64> %a, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_vextracti64x4:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
%res = call <4 x i64> @llvm.x86.avx512.mask.vextracti64x4.512(<8 x i64> %a, i32 1, <4 x i64> %b, i8 %mask)
|
|
ret <4 x i64> %res
|
|
}
|
|
|
|
declare <4 x i64> @llvm.x86.avx512.mask.vextracti64x4.512(<8 x i64>, i32, <4 x i64>, i8)
|
|
|
|
define <4 x i32> @test_maskz_vextracti32x4(<16 x i32> %a, i8 %mask) {
|
|
; CHECK-LABEL: test_maskz_vextracti32x4:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vextracti32x4 $2, %zmm0, %xmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <4 x i32> @llvm.x86.avx512.mask.vextracti32x4.512(<16 x i32> %a, i32 2, <4 x i32> zeroinitializer, i8 %mask)
|
|
ret <4 x i32> %res
|
|
}
|
|
|
|
declare <4 x i32> @llvm.x86.avx512.mask.vextracti32x4.512(<16 x i32>, i32, <4 x i32>, i8)
|
|
|
|
define <4 x double> @test_vextractf64x4(<8 x double> %a) {
|
|
; CHECK-LABEL: test_vextractf64x4:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <4 x double> @llvm.x86.avx512.mask.vextractf64x4.512(<8 x double> %a, i32 1, <4 x double> zeroinitializer, i8 -1)
|
|
ret <4 x double> %res
|
|
}
|
|
|
|
declare <4 x double> @llvm.x86.avx512.mask.vextractf64x4.512(<8 x double>, i32, <4 x double>, i8)
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.insertf32x4.512(<16 x float>, <4 x float>, i32, <16 x float>, i16)
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_insertf32x4_512(<16 x float> %x0, <4 x float> %x1, <16 x float> %x3, i16 %x4) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_insertf32x4_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
|
|
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm3
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vaddps %zmm3, %zmm2, %zmm2
|
|
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vaddps %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.insertf32x4.512(<16 x float> %x0, <4 x float> %x1, i32 1, <16 x float> %x3, i16 %x4)
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.insertf32x4.512(<16 x float> %x0, <4 x float> %x1, i32 1, <16 x float> %x3, i16 -1)
|
|
%res2 = call <16 x float> @llvm.x86.avx512.mask.insertf32x4.512(<16 x float> %x0, <4 x float> %x1, i32 1, <16 x float> zeroinitializer, i16 %x4)
|
|
%res3 = fadd <16 x float> %res, %res1
|
|
%res4 = fadd <16 x float> %res2, %res3
|
|
ret <16 x float> %res4
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.inserti32x4.512(<16 x i32>, <4 x i32>, i32, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_inserti32x4_512(<16 x i32> %x0, <4 x i32> %x1, <16 x i32> %x3, i16 %x4) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_inserti32x4_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
|
|
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm3
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vpaddd %zmm3, %zmm2, %zmm2
|
|
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vpaddd %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.inserti32x4.512(<16 x i32> %x0, <4 x i32> %x1, i32 1, <16 x i32> %x3, i16 %x4)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.inserti32x4.512(<16 x i32> %x0, <4 x i32> %x1, i32 1, <16 x i32> %x3, i16 -1)
|
|
%res2 = call <16 x i32> @llvm.x86.avx512.mask.inserti32x4.512(<16 x i32> %x0, <4 x i32> %x1, i32 1, <16 x i32> zeroinitializer, i16 %x4)
|
|
%res3 = add <16 x i32> %res, %res1
|
|
%res4 = add <16 x i32> %res2, %res3
|
|
ret <16 x i32> %res4
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.insertf64x4.512(<8 x double>, <4 x double>, i32, <8 x double>, i8)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_insertf64x4_512(<8 x double> %x0, <4 x double> %x1, <8 x double> %x3, i8 %x4) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_insertf64x4_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm3
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vaddpd %zmm3, %zmm2, %zmm2
|
|
; CHECK-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vaddpd %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.insertf64x4.512(<8 x double> %x0, <4 x double> %x1, i32 1, <8 x double> %x3, i8 %x4)
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.insertf64x4.512(<8 x double> %x0, <4 x double> %x1, i32 1, <8 x double> %x3, i8 -1)
|
|
%res2 = call <8 x double> @llvm.x86.avx512.mask.insertf64x4.512(<8 x double> %x0, <4 x double> %x1, i32 1, <8 x double> zeroinitializer, i8 %x4)
|
|
%res3 = fadd <8 x double> %res, %res1
|
|
%res4 = fadd <8 x double> %res2, %res3
|
|
ret <8 x double> %res4
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.inserti64x4.512(<8 x i64>, <4 x i64>, i32, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_inserti64x4_512(<8 x i64> %x0, <4 x i64> %x1, <8 x i64> %x3, i8 %x4) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_inserti64x4_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm3
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vpaddq %zmm3, %zmm2, %zmm2
|
|
; CHECK-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vpaddq %zmm2, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.inserti64x4.512(<8 x i64> %x0, <4 x i64> %x1, i32 1, <8 x i64> %x3, i8 %x4)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.inserti64x4.512(<8 x i64> %x0, <4 x i64> %x1, i32 1, <8 x i64> %x3, i8 -1)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.mask.inserti64x4.512(<8 x i64> %x0, <4 x i64> %x1, i32 1, <8 x i64> zeroinitializer, i8 %x4)
|
|
%res3 = add <8 x i64> %res, %res1
|
|
%res4 = add <8 x i64> %res2, %res3
|
|
ret <8 x i64> %res4
|
|
}
|
|
|
|
define <8 x i64> @test_x86_avx512_movntdqa(i8* %a0) {
|
|
; CHECK-LABEL: test_x86_avx512_movntdqa:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vmovntdqa (%rdi), %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.movntdqa(i8* %a0)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.movntdqa(i8*) nounwind readonly
|
|
|
|
define <8 x i16> @test_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
|
|
; CHECK-LABEL: test_cmp_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
|
|
; CHECK-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
|
|
; CHECK-NEXT: vpcmpled %zmm1, %zmm0, %k2
|
|
; CHECK-NEXT: vpcmpneqd %zmm1, %zmm0, %k3
|
|
; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k4
|
|
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k5
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0
|
|
; CHECK-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k1, %eax
|
|
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k2, %eax
|
|
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k3, %eax
|
|
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k4, %eax
|
|
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k5, %eax
|
|
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kxnorw %k0, %k0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: retq
|
|
%res0 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 0, i16 -1)
|
|
%vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
|
|
%res1 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 1, i16 -1)
|
|
%vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
|
|
%res2 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 2, i16 -1)
|
|
%vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
|
|
%res3 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 3, i16 -1)
|
|
%vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
|
|
%res4 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 4, i16 -1)
|
|
%vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
|
|
%res5 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 5, i16 -1)
|
|
%vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
|
|
%res6 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 6, i16 -1)
|
|
%vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
|
|
%res7 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 7, i16 -1)
|
|
%vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
|
|
ret <8 x i16> %vec7
|
|
}
|
|
|
|
define <8 x i16> @test_mask_cmp_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_cmp_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
|
|
; CHECK-NEXT: vpcmpgtd %zmm0, %zmm1, %k2 {%k1}
|
|
; CHECK-NEXT: vpcmpled %zmm1, %zmm0, %k3 {%k1}
|
|
; CHECK-NEXT: kxorw %k0, %k0, %k4
|
|
; CHECK-NEXT: vpcmpneqd %zmm1, %zmm0, %k5 {%k1}
|
|
; CHECK-NEXT: vpcmpled %zmm0, %zmm1, %k6 {%k1}
|
|
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k1 {%k1}
|
|
; CHECK-NEXT: kmovw %k2, %eax
|
|
; CHECK-NEXT: kmovw %k0, %ecx
|
|
; CHECK-NEXT: vmovd %ecx, %xmm0
|
|
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k3, %eax
|
|
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k4, %eax
|
|
; CHECK-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k5, %eax
|
|
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k6, %eax
|
|
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k1, %eax
|
|
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: vpinsrw $7, %edi, %xmm0, %xmm0
|
|
; CHECK-NEXT: retq
|
|
%res0 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 0, i16 %mask)
|
|
%vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
|
|
%res1 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 1, i16 %mask)
|
|
%vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
|
|
%res2 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 2, i16 %mask)
|
|
%vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
|
|
%res3 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 3, i16 %mask)
|
|
%vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
|
|
%res4 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 4, i16 %mask)
|
|
%vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
|
|
%res5 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 5, i16 %mask)
|
|
%vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
|
|
%res6 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 6, i16 %mask)
|
|
%vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
|
|
%res7 = call i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 7, i16 %mask)
|
|
%vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
|
|
ret <8 x i16> %vec7
|
|
}
|
|
|
|
declare i16 @llvm.x86.avx512.mask.cmp.d.512(<16 x i32>, <16 x i32>, i32, i16) nounwind readnone
|
|
|
|
define <8 x i16> @test_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1) {
|
|
; CHECK-LABEL: test_ucmp_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
|
|
; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k1
|
|
; CHECK-NEXT: vpcmpleud %zmm1, %zmm0, %k2
|
|
; CHECK-NEXT: vpcmpneqd %zmm1, %zmm0, %k3
|
|
; CHECK-NEXT: vpcmpnltud %zmm1, %zmm0, %k4
|
|
; CHECK-NEXT: vpcmpnleud %zmm1, %zmm0, %k5
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0
|
|
; CHECK-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k1, %eax
|
|
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k2, %eax
|
|
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k3, %eax
|
|
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k4, %eax
|
|
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k5, %eax
|
|
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kxnorw %k0, %k0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: retq
|
|
%res0 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 0, i16 -1)
|
|
%vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
|
|
%res1 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 1, i16 -1)
|
|
%vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
|
|
%res2 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 2, i16 -1)
|
|
%vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
|
|
%res3 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 3, i16 -1)
|
|
%vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
|
|
%res4 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 4, i16 -1)
|
|
%vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
|
|
%res5 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 5, i16 -1)
|
|
%vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
|
|
%res6 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 6, i16 -1)
|
|
%vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
|
|
%res7 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 7, i16 -1)
|
|
%vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
|
|
ret <8 x i16> %vec7
|
|
}
|
|
|
|
define <8 x i16> @test_mask_ucmp_d_512(<16 x i32> %a0, <16 x i32> %a1, i16 %mask) {
|
|
; CHECK-LABEL: test_mask_ucmp_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
|
|
; CHECK-NEXT: vpcmpltud %zmm1, %zmm0, %k2 {%k1}
|
|
; CHECK-NEXT: vpcmpleud %zmm1, %zmm0, %k3 {%k1}
|
|
; CHECK-NEXT: kxorw %k0, %k0, %k4
|
|
; CHECK-NEXT: vpcmpneqd %zmm1, %zmm0, %k5 {%k1}
|
|
; CHECK-NEXT: vpcmpnltud %zmm1, %zmm0, %k6 {%k1}
|
|
; CHECK-NEXT: vpcmpnleud %zmm1, %zmm0, %k1 {%k1}
|
|
; CHECK-NEXT: kmovw %k2, %eax
|
|
; CHECK-NEXT: kmovw %k0, %ecx
|
|
; CHECK-NEXT: vmovd %ecx, %xmm0
|
|
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k3, %eax
|
|
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k4, %eax
|
|
; CHECK-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k5, %eax
|
|
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k6, %eax
|
|
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k1, %eax
|
|
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: vpinsrw $7, %edi, %xmm0, %xmm0
|
|
; CHECK-NEXT: retq
|
|
%res0 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 0, i16 %mask)
|
|
%vec0 = insertelement <8 x i16> undef, i16 %res0, i32 0
|
|
%res1 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 1, i16 %mask)
|
|
%vec1 = insertelement <8 x i16> %vec0, i16 %res1, i32 1
|
|
%res2 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 2, i16 %mask)
|
|
%vec2 = insertelement <8 x i16> %vec1, i16 %res2, i32 2
|
|
%res3 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 3, i16 %mask)
|
|
%vec3 = insertelement <8 x i16> %vec2, i16 %res3, i32 3
|
|
%res4 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 4, i16 %mask)
|
|
%vec4 = insertelement <8 x i16> %vec3, i16 %res4, i32 4
|
|
%res5 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 5, i16 %mask)
|
|
%vec5 = insertelement <8 x i16> %vec4, i16 %res5, i32 5
|
|
%res6 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 6, i16 %mask)
|
|
%vec6 = insertelement <8 x i16> %vec5, i16 %res6, i32 6
|
|
%res7 = call i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32> %a0, <16 x i32> %a1, i32 7, i16 %mask)
|
|
%vec7 = insertelement <8 x i16> %vec6, i16 %res7, i32 7
|
|
ret <8 x i16> %vec7
|
|
}
|
|
|
|
declare i16 @llvm.x86.avx512.mask.ucmp.d.512(<16 x i32>, <16 x i32>, i32, i16) nounwind readnone
|
|
|
|
define <8 x i8> @test_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
|
|
; CHECK-LABEL: test_cmp_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
|
|
; CHECK-NEXT: vpcmpgtq %zmm0, %zmm1, %k1
|
|
; CHECK-NEXT: vpcmpleq %zmm1, %zmm0, %k2
|
|
; CHECK-NEXT: vpcmpneqq %zmm1, %zmm0, %k3
|
|
; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k4
|
|
; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k5
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0
|
|
; CHECK-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k1, %eax
|
|
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k2, %eax
|
|
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k3, %eax
|
|
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k4, %eax
|
|
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k5, %eax
|
|
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kxnorw %k0, %k0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: retq
|
|
%res0 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 0, i8 -1)
|
|
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
|
|
%res1 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 1, i8 -1)
|
|
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
|
|
%res2 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 2, i8 -1)
|
|
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
|
|
%res3 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 3, i8 -1)
|
|
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
|
|
%res4 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 4, i8 -1)
|
|
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
|
|
%res5 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 5, i8 -1)
|
|
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
|
|
%res6 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 6, i8 -1)
|
|
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
|
|
%res7 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 7, i8 -1)
|
|
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
|
|
ret <8 x i8> %vec7
|
|
}
|
|
|
|
define <8 x i8> @test_mask_cmp_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_cmp_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
|
|
; CHECK-NEXT: vpcmpgtq %zmm0, %zmm1, %k2 {%k1}
|
|
; CHECK-NEXT: vpcmpleq %zmm1, %zmm0, %k3 {%k1}
|
|
; CHECK-NEXT: kxorw %k0, %k0, %k4
|
|
; CHECK-NEXT: vpcmpneqq %zmm1, %zmm0, %k5 {%k1}
|
|
; CHECK-NEXT: vpcmpleq %zmm0, %zmm1, %k6 {%k1}
|
|
; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k1 {%k1}
|
|
; CHECK-NEXT: kmovw %k2, %eax
|
|
; CHECK-NEXT: kmovw %k0, %ecx
|
|
; CHECK-NEXT: vmovd %ecx, %xmm0
|
|
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k3, %eax
|
|
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k4, %eax
|
|
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k5, %eax
|
|
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k6, %eax
|
|
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k1, %eax
|
|
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: vpinsrb $14, %edi, %xmm0, %xmm0
|
|
; CHECK-NEXT: retq
|
|
%res0 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 0, i8 %mask)
|
|
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
|
|
%res1 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 1, i8 %mask)
|
|
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
|
|
%res2 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 2, i8 %mask)
|
|
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
|
|
%res3 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 3, i8 %mask)
|
|
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
|
|
%res4 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 4, i8 %mask)
|
|
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
|
|
%res5 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 5, i8 %mask)
|
|
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
|
|
%res6 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 6, i8 %mask)
|
|
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
|
|
%res7 = call i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 7, i8 %mask)
|
|
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
|
|
ret <8 x i8> %vec7
|
|
}
|
|
|
|
declare i8 @llvm.x86.avx512.mask.cmp.q.512(<8 x i64>, <8 x i64>, i32, i8) nounwind readnone
|
|
|
|
define <8 x i8> @test_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1) {
|
|
; CHECK-LABEL: test_ucmp_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
|
|
; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k1
|
|
; CHECK-NEXT: vpcmpleuq %zmm1, %zmm0, %k2
|
|
; CHECK-NEXT: vpcmpneqq %zmm1, %zmm0, %k3
|
|
; CHECK-NEXT: vpcmpnltuq %zmm1, %zmm0, %k4
|
|
; CHECK-NEXT: vpcmpnleuq %zmm1, %zmm0, %k5
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0
|
|
; CHECK-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k1, %eax
|
|
; CHECK-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k2, %eax
|
|
; CHECK-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k3, %eax
|
|
; CHECK-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k4, %eax
|
|
; CHECK-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k5, %eax
|
|
; CHECK-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kxnorw %k0, %k0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: retq
|
|
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 0, i8 -1)
|
|
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
|
|
%res1 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 1, i8 -1)
|
|
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
|
|
%res2 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 2, i8 -1)
|
|
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
|
|
%res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 3, i8 -1)
|
|
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
|
|
%res4 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 4, i8 -1)
|
|
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
|
|
%res5 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 5, i8 -1)
|
|
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
|
|
%res6 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 6, i8 -1)
|
|
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
|
|
%res7 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 7, i8 -1)
|
|
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
|
|
ret <8 x i8> %vec7
|
|
}
|
|
|
|
define <8 x i8> @test_mask_ucmp_q_512(<8 x i64> %a0, <8 x i64> %a1, i8 %mask) {
|
|
; CHECK-LABEL: test_mask_ucmp_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
|
|
; CHECK-NEXT: vpcmpltuq %zmm1, %zmm0, %k2 {%k1}
|
|
; CHECK-NEXT: vpcmpleuq %zmm1, %zmm0, %k3 {%k1}
|
|
; CHECK-NEXT: kxorw %k0, %k0, %k4
|
|
; CHECK-NEXT: vpcmpneqq %zmm1, %zmm0, %k5 {%k1}
|
|
; CHECK-NEXT: vpcmpnltuq %zmm1, %zmm0, %k6 {%k1}
|
|
; CHECK-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1 {%k1}
|
|
; CHECK-NEXT: kmovw %k2, %eax
|
|
; CHECK-NEXT: kmovw %k0, %ecx
|
|
; CHECK-NEXT: vmovd %ecx, %xmm0
|
|
; CHECK-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k3, %eax
|
|
; CHECK-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k4, %eax
|
|
; CHECK-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k5, %eax
|
|
; CHECK-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k6, %eax
|
|
; CHECK-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: kmovw %k1, %eax
|
|
; CHECK-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
|
|
; CHECK-NEXT: vpinsrb $14, %edi, %xmm0, %xmm0
|
|
; CHECK-NEXT: retq
|
|
%res0 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 0, i8 %mask)
|
|
%vec0 = insertelement <8 x i8> undef, i8 %res0, i32 0
|
|
%res1 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 1, i8 %mask)
|
|
%vec1 = insertelement <8 x i8> %vec0, i8 %res1, i32 1
|
|
%res2 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 2, i8 %mask)
|
|
%vec2 = insertelement <8 x i8> %vec1, i8 %res2, i32 2
|
|
%res3 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 3, i8 %mask)
|
|
%vec3 = insertelement <8 x i8> %vec2, i8 %res3, i32 3
|
|
%res4 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 4, i8 %mask)
|
|
%vec4 = insertelement <8 x i8> %vec3, i8 %res4, i32 4
|
|
%res5 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 5, i8 %mask)
|
|
%vec5 = insertelement <8 x i8> %vec4, i8 %res5, i32 5
|
|
%res6 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 6, i8 %mask)
|
|
%vec6 = insertelement <8 x i8> %vec5, i8 %res6, i32 6
|
|
%res7 = call i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64> %a0, <8 x i64> %a1, i32 7, i8 %mask)
|
|
%vec7 = insertelement <8 x i8> %vec6, i8 %res7, i32 7
|
|
ret <8 x i8> %vec7
|
|
}
|
|
|
|
declare i8 @llvm.x86.avx512.mask.ucmp.q.512(<8 x i64>, <8 x i64>, i32, i8) nounwind readnone
|
|
|
|
declare <16 x float> @llvm.x86.avx512.mask.broadcastf32x4.512(<4 x float>, <16 x float>, i16)
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512(<4 x float> %x0, <16 x float> %x2, i16 %mask) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
|
|
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
|
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vmovaps %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovaps %zmm0, %zmm2 {%k1} {z}
|
|
; CHECK-NEXT: vaddps %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: vaddps %zmm0, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
|
|
%res1 = call <16 x float> @llvm.x86.avx512.mask.broadcastf32x4.512(<4 x float> %x0, <16 x float> %x2, i16 -1)
|
|
%res2 = call <16 x float> @llvm.x86.avx512.mask.broadcastf32x4.512(<4 x float> %x0, <16 x float> %x2, i16 %mask)
|
|
%res3 = call <16 x float> @llvm.x86.avx512.mask.broadcastf32x4.512(<4 x float> %x0, <16 x float> zeroinitializer, i16 %mask)
|
|
%res4 = fadd <16 x float> %res1, %res2
|
|
%res5 = fadd <16 x float> %res3, %res4
|
|
ret <16 x float> %res5
|
|
}
|
|
|
|
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512_load(<4 x float>* %x0ptr, <16 x float> %x2, i16 %mask) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_512_load:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
|
|
; CHECK-NEXT: retq
|
|
%x0 = load <4 x float>, <4 x float>* %x0ptr
|
|
%res = call <16 x float> @llvm.x86.avx512.mask.broadcastf32x4.512(<4 x float> %x0, <16 x float> %x2, i16 %mask)
|
|
ret <16 x float> %res
|
|
}
|
|
|
|
declare <8 x double> @llvm.x86.avx512.mask.broadcastf64x4.512(<4 x double>, <8 x double>, i8)
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512(<4 x double> %x0, <8 x double> %x2, i8 %mask) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x4_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
|
|
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vaddpd %zmm1, %zmm2, %zmm1
|
|
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vaddpd %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
|
|
%res1 = call <8 x double> @llvm.x86.avx512.mask.broadcastf64x4.512(<4 x double> %x0, <8 x double> %x2, i8 -1)
|
|
%res2 = call <8 x double> @llvm.x86.avx512.mask.broadcastf64x4.512(<4 x double> %x0, <8 x double> %x2, i8 %mask)
|
|
%res3 = call <8 x double> @llvm.x86.avx512.mask.broadcastf64x4.512(<4 x double> %x0, <8 x double> zeroinitializer, i8 %mask)
|
|
%res4 = fadd <8 x double> %res1, %res2
|
|
%res5 = fadd <8 x double> %res3, %res4
|
|
ret <8 x double> %res5
|
|
}
|
|
|
|
define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512_load(<4 x double>* %x0ptr, <8 x double> %x2, i8 %mask) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x4_512_load:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3]
|
|
; CHECK-NEXT: retq
|
|
|
|
%x0 = load <4 x double>, <4 x double>* %x0ptr
|
|
%res = call <8 x double> @llvm.x86.avx512.mask.broadcastf64x4.512(<4 x double> %x0, <8 x double> %x2, i8 %mask)
|
|
ret <8 x double> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.broadcasti32x4.512(<4 x i32>, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512(<4 x i32> %x0, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
|
|
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
|
|
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa32 %zmm0, %zmm2 {%k1} {z}
|
|
; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: vpaddd %zmm0, %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.broadcasti32x4.512(<4 x i32> %x0, <16 x i32> %x2, i16 -1)
|
|
%res2 = call <16 x i32> @llvm.x86.avx512.mask.broadcasti32x4.512(<4 x i32> %x0, <16 x i32> %x2, i16 %mask)
|
|
%res3 = call <16 x i32> @llvm.x86.avx512.mask.broadcasti32x4.512(<4 x i32> %x0, <16 x i32> zeroinitializer, i16 %mask)
|
|
%res4 = add <16 x i32> %res1, %res2
|
|
%res5 = add <16 x i32> %res3, %res4
|
|
ret <16 x i32> %res5
|
|
}
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512_load(<4 x i32>* %x0ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_512_load:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
|
|
; CHECK-NEXT: retq
|
|
|
|
%x0 = load <4 x i32>, <4 x i32>* %x0ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.broadcasti32x4.512(<4 x i32> %x0, <16 x i32> %x2, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.broadcasti64x4.512(<4 x i64>, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512(<4 x i64> %x0, <8 x i64> %x2, i8 %mask) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x4_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
|
|
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vpaddq %zmm1, %zmm2, %zmm1
|
|
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: vpaddq %zmm1, %zmm0, %zmm0
|
|
; CHECK-NEXT: retq
|
|
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.broadcasti64x4.512(<4 x i64> %x0, <8 x i64> %x2, i8 -1)
|
|
%res2 = call <8 x i64> @llvm.x86.avx512.mask.broadcasti64x4.512(<4 x i64> %x0, <8 x i64> %x2, i8 %mask)
|
|
%res3 = call <8 x i64> @llvm.x86.avx512.mask.broadcasti64x4.512(<4 x i64> %x0, <8 x i64> zeroinitializer, i8 %mask)
|
|
%res4 = add <8 x i64> %res1, %res2
|
|
%res5 = add <8 x i64> %res3, %res4
|
|
ret <8 x i64> %res5
|
|
}
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512_load(<4 x i64>* %x0ptr, <8 x i64> %x2, i8 %mask) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x4_512_load:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovw %esi, %k1
|
|
; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3]
|
|
; CHECK-NEXT: retq
|
|
|
|
%x0 = load <4 x i64>, <4 x i64>* %x0ptr
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.broadcasti64x4.512(<4 x i64> %x0, <8 x i64> %x2, i8 %mask)
|
|
ret <8 x i64> %res
|
|
}
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32>, <16 x i32>, i16)
|
|
|
|
define <16 x i32>@test_int_x86_avx512_mask_pabs_d_512(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pabs_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpabsd %zmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpabsd %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vpaddd %zmm2, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %x0, <16 x i32> %x1, i16 %x2)
|
|
%res1 = call <16 x i32> @llvm.x86.avx512.mask.pabs.d.512(<16 x i32> %x0, <16 x i32> %x1, i16 -1)
|
|
%res2 = add <16 x i32> %res, %res1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
declare <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64>, <8 x i64>, i8)
|
|
|
|
define <8 x i64>@test_int_x86_avx512_mask_pabs_q_512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_mask_pabs_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpabsq %zmm0, %zmm2
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vpabsq %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vpaddq %zmm2, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2)
|
|
%res1 = call <8 x i64> @llvm.x86.avx512.mask.pabs.q.512(<8 x i64> %x0, <8 x i64> %x1, i8 -1)
|
|
%res2 = add <8 x i64> %res, %res1
|
|
ret <8 x i64> %res2
|
|
}
|
|
|
|
define i8 @test_vptestmq(<8 x i64> %a0, <8 x i64> %a1, i8 %m) {
|
|
; CHECK-LABEL: test_vptestmq:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vptestmq %zmm1, %zmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %ecx
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vptestmq %zmm1, %zmm0, %k0 {%k1}
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: addb %cl, %al
|
|
; CHECK-NEXT: ## kill: def %al killed %al killed %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 -1)
|
|
%res1 = call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 %m)
|
|
%res2 = add i8 %res1, %res
|
|
ret i8 %res2
|
|
}
|
|
declare i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64>, <8 x i64>, i8)
|
|
|
|
define i16 @test_vptestmd(<16 x i32> %a0, <16 x i32> %a1, i16 %m) {
|
|
; CHECK-LABEL: test_vptestmd:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vptestmd %zmm1, %zmm0, %k0
|
|
; CHECK-NEXT: kmovw %k0, %ecx
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vptestmd %zmm1, %zmm0, %k0 {%k1}
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: addl %ecx, %eax
|
|
; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i16 @llvm.x86.avx512.ptestm.d.512(<16 x i32> %a0, <16 x i32> %a1, i16 -1)
|
|
%res1 = call i16 @llvm.x86.avx512.ptestm.d.512(<16 x i32> %a0, <16 x i32> %a1, i16 %m)
|
|
%res2 = add i16 %res1, %res
|
|
ret i16 %res2
|
|
}
|
|
declare i16 @llvm.x86.avx512.ptestm.d.512(<16 x i32>, <16 x i32>, i16)
|
|
|
|
declare i16 @llvm.x86.avx512.ptestnm.d.512(<16 x i32>, <16 x i32>, i16 %x2)
|
|
|
|
define i16@test_int_x86_avx512_ptestnm_d_512(<16 x i32> %x0, <16 x i32> %x1, i16 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_ptestnm_d_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vptestnmd %zmm1, %zmm0, %k0
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vptestnmd %zmm1, %zmm0, %k1 {%k1}
|
|
; CHECK-NEXT: kmovw %k1, %ecx
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: addl %ecx, %eax
|
|
; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i16 @llvm.x86.avx512.ptestnm.d.512(<16 x i32> %x0, <16 x i32> %x1, i16 %x2)
|
|
%res1 = call i16 @llvm.x86.avx512.ptestnm.d.512(<16 x i32> %x0, <16 x i32> %x1, i16-1)
|
|
%res2 = add i16 %res, %res1
|
|
ret i16 %res2
|
|
}
|
|
|
|
declare i8 @llvm.x86.avx512.ptestnm.q.512(<8 x i64>, <8 x i64>, i8 %x2)
|
|
|
|
define i8@test_int_x86_avx512_ptestnm_q_512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2) {
|
|
; CHECK-LABEL: test_int_x86_avx512_ptestnm_q_512:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vptestnmq %zmm1, %zmm0, %k0
|
|
; CHECK-NEXT: kmovw %edi, %k1
|
|
; CHECK-NEXT: vptestnmq %zmm1, %zmm0, %k1 {%k1}
|
|
; CHECK-NEXT: kmovw %k1, %ecx
|
|
; CHECK-NEXT: kmovw %k0, %eax
|
|
; CHECK-NEXT: addb %cl, %al
|
|
; CHECK-NEXT: ## kill: def %al killed %al killed %eax
|
|
; CHECK-NEXT: retq
|
|
%res = call i8 @llvm.x86.avx512.ptestnm.q.512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2)
|
|
%res1 = call i8 @llvm.x86.avx512.ptestnm.q.512(<8 x i64> %x0, <8 x i64> %x1, i8-1)
|
|
%res2 = add i8 %res, %res1
|
|
ret i8 %res2
|
|
}
|
|
|