forked from OSchip/llvm-project
1069 lines
50 KiB
LLVM
1069 lines
50 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s
|
|
|
|
; These test cases demonstrate cases where vpternlog could benefit from being commuted.
|
|
|
|
declare <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i32, i16)
|
|
declare <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i32, i16)
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $114, %zmm2, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $78, %zmm2, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $78, %zmm0, %zmm2, %zmm1
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_load0(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_load0:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $46, (%rdi), %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_load1(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_load1:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_load2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_load2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_load0(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_load0:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_load1(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_load1:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $46, (%rdi), %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_load2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_load2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $78, (%rdi), %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_load0(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_load0:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $78, (%rdi), %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_load1(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_load1:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $92, (%rdi), %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_load2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_load2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $58, (%rdi), %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_021_load0(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_021_load0:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $58, (%rdi), %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_021_load1(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_021_load1:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_021_load2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr) {
|
|
; CHECK-LABEL: vpternlog_v16i32_021_load2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %edi, %k1
|
|
; CHECK-NEXT: vpternlogd $114, %zmm2, %zmm1, %zmm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %edi, %k1
|
|
; CHECK-NEXT: vpternlogd $114, %zmm2, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %edi, %k1
|
|
; CHECK-NEXT: vpternlogd $114, %zmm0, %zmm1, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_mask1(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_mask1:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %edi, %k1
|
|
; CHECK-NEXT: vpternlogd $78, %zmm2, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_mask2(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_mask2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %edi, %k1
|
|
; CHECK-NEXT: vpternlogd $58, %zmm0, %zmm1, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x2
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_load0_mask(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_load0_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
|
|
; CHECK-NEXT: vpternlogd $114, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_load0_mask1(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_load0_mask1:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $65, (%rdi), %zmm1, %zmm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_load0_mask2(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_load0_mask2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $33, (%rdi), %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x2
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_load1_mask(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_load1_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_load1_mask2(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_load1_mask2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $9, (%rdi), %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x2
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_load2_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_load2_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_load2_mask1(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_load2_mask1:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $9, (%rdi), %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 33, i16 -1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_load0_mask(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_load0_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_load1_mask(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_load1_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
|
|
; CHECK-NEXT: vpternlogd $114, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_load2_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_load2_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_load0_mask(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_load0_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_load1_mask(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_load1_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_load2_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_load2_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
|
|
; CHECK-NEXT: vpternlogd $114, %zmm0, %zmm1, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_021_load0_mask(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_021_load0_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vmovdqa32 (%rdi), %zmm2
|
|
; CHECK-NEXT: vpternlogd $114, %zmm0, %zmm1, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_021_load1_mask(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_021_load1_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_021_load2_mask(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_021_load2_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %edi, %k1
|
|
; CHECK-NEXT: vpternlogd $114, %zmm2, %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %edi, %k1
|
|
; CHECK-NEXT: vpternlogd $78, %zmm2, %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %edi, %k1
|
|
; CHECK-NEXT: vpternlogd $78, %zmm0, %zmm2, %zmm1 {%k1} {z}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_load0_maskz(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_load0_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $46, (%rdi), %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_load1_maskz(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_load1_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_load2_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_load2_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_load0_maskz(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_load0_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_load1_maskz(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_load1_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $46, (%rdi), %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_load2_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_load2_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $78, (%rdi), %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_load0_maskz(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_load0_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $78, (%rdi), %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_load1_maskz(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_load1_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $92, (%rdi), %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_load2_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_load2_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $58, (%rdi), %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_021_load0_maskz(<16 x i32>* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_021_load0_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $58, (%rdi), %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x0 = load <16 x i32>, <16 x i32>* %x0ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_021_load1_maskz(<16 x i32> %x0, <16 x i32>* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_021_load1_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $114, (%rdi), %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x1 = load <16 x i32>, <16 x i32>* %x1ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_021_load2_maskz(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_021_load2_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi), %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x2 = load <16 x i32>, <16 x i32>* %x2ptr
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_broadcast0(i32* %ptr_x0, <16 x i32> %x1, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_broadcast0:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $46, (%rdi){1to16}, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x0_scalar = load i32, i32* %ptr_x0
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x0_scalar, i32 0
|
|
%x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_broadcast1(<16 x i32> %x0, i32* %ptr_x1, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_broadcast1:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x1_scalar = load i32, i32* %ptr_x1
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x1_scalar, i32 0
|
|
%x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_broadcast2(<16 x i32> %x0, <16 x i32> %x1, i32* %ptr_x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_broadcast2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x2_scalar = load i32, i32* %ptr_x2
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x2_scalar, i32 0
|
|
%x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_broadcast0(i32* %ptr_x0, <16 x i32> %x1, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_broadcast0:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x0_scalar = load i32, i32* %ptr_x0
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x0_scalar, i32 0
|
|
%x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_broadcast1(<16 x i32> %x0, i32* %ptr_x1, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_broadcast1:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $46, (%rdi){1to16}, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x1_scalar = load i32, i32* %ptr_x1
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x1_scalar, i32 0
|
|
%x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_broadcast2(<16 x i32> %x0, <16 x i32> %x1, i32* %ptr_x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_broadcast2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $78, (%rdi){1to16}, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x2_scalar = load i32, i32* %ptr_x2
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x2_scalar, i32 0
|
|
%x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_broadcast0(i32* %ptr_x0, <16 x i32> %x1, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_broadcast0:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $78, (%rdi){1to16}, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x0_scalar = load i32, i32* %ptr_x0
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x0_scalar, i32 0
|
|
%x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_broadcast1(<16 x i32> %x0, i32* %ptr_x1, <16 x i32> %x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_broadcast1:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $92, (%rdi){1to16}, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x1_scalar = load i32, i32* %ptr_x1
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x1_scalar, i32 0
|
|
%x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_broadcast2(<16 x i32> %x0, <16 x i32> %x1, i32* %ptr_x2) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_broadcast2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: vpternlogd $58, (%rdi){1to16}, %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x2_scalar = load i32, i32* %ptr_x2
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x2_scalar, i32 0
|
|
%x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 -1)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_broadcast0_mask(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_broadcast0_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpbroadcastd (%rdi), %zmm2
|
|
; CHECK-NEXT: vpternlogd $114, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x0scalar = load i32, i32* %x0ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
|
|
%x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_broadcast1_mask(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_broadcast1_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
%x1scalar = load i32, i32* %x1ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
|
|
%x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_broadcast2_mask(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_broadcast2_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
%x2scalar = load i32, i32* %x2ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
|
|
%x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_broadcast0_mask(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_broadcast0_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
%x0scalar = load i32, i32* %x0ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
|
|
%x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_broadcast1_mask(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_broadcast1_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpbroadcastd (%rdi), %zmm2
|
|
; CHECK-NEXT: vpternlogd $114, %zmm1, %zmm0, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x1scalar = load i32, i32* %x1ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
|
|
%x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_broadcast2_mask(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_broadcast2_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x2scalar = load i32, i32* %x2ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
|
|
%x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_broadcast0_mask(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_broadcast0_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x0scalar = load i32, i32* %x0ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
|
|
%x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_broadcast1_mask(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_broadcast1_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x1scalar = load i32, i32* %x1ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
|
|
%x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_broadcast2_mask(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_broadcast2_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpbroadcastd (%rdi), %zmm2
|
|
; CHECK-NEXT: vpternlogd $114, %zmm0, %zmm1, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x2scalar = load i32, i32* %x2ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
|
|
%x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_021_broadcast0_mask(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_021_broadcast0_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpbroadcastd (%rdi), %zmm2
|
|
; CHECK-NEXT: vpternlogd $114, %zmm0, %zmm1, %zmm2 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x0scalar = load i32, i32* %x0ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
|
|
%x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_021_broadcast1_mask(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_021_broadcast1_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
%x1scalar = load i32, i32* %x1ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
|
|
%x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_021_broadcast2_mask(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_021_broadcast2_mask:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
%x2scalar = load i32, i32* %x2ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
|
|
%x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_broadcast0_maskz(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_broadcast0_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $46, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x0scalar = load i32, i32* %x0ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
|
|
%x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_broadcast1_maskz(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_broadcast1_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x1scalar = load i32, i32* %x1ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
|
|
%x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_broadcast2_maskz(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_broadcast2_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x2scalar = load i32, i32* %x2ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
|
|
%x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_broadcast0_maskz(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_broadcast0_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x0scalar = load i32, i32* %x0ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
|
|
%x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_broadcast1_maskz(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_broadcast1_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $46, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x1scalar = load i32, i32* %x1ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
|
|
%x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_102_broadcast2_maskz(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_102_broadcast2_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $78, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x2scalar = load i32, i32* %x2ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
|
|
%x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x1, <16 x i32> %x0, <16 x i32> %x2, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_broadcast0_maskz(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_broadcast0_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $78, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x0scalar = load i32, i32* %x0ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
|
|
%x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_broadcast1_maskz(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_broadcast1_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $92, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x1scalar = load i32, i32* %x1ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
|
|
%x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_210_broadcast2_maskz(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_210_broadcast2_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $58, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x2scalar = load i32, i32* %x2ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
|
|
%x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x2, <16 x i32> %x1, <16 x i32> %x0, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_021_broadcast0_maskz(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_021_broadcast0_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $58, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x0scalar = load i32, i32* %x0ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
|
|
%x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_021_broadcast1_maskz(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_021_broadcast1_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $114, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x1scalar = load i32, i32* %x1ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
|
|
%x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_021_broadcast2_maskz(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_021_broadcast2_maskz:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $116, (%rdi){1to16}, %zmm1, %zmm0 {%k1} {z}
|
|
; CHECK-NEXT: retq
|
|
%x2scalar = load i32, i32* %x2ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
|
|
%x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.maskz.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x2, <16 x i32> %x1, i32 114, i16 %mask)
|
|
ret <16 x i32> %res
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_broadcast0_mask1(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_broadcast0_mask1:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $92, (%rdi){1to16}, %zmm1, %zmm0 {%k1}
|
|
; CHECK-NEXT: retq
|
|
%x0scalar = load i32, i32* %x0ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
|
|
%x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x1
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_broadcast0_mask2(i32* %x0ptr, <16 x i32> %x1, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_broadcast0_mask2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $58, (%rdi){1to16}, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x0scalar = load i32, i32* %x0ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x0scalar, i32 0
|
|
%x0 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x2
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_broadcast1_mask2(<16 x i32> %x0, i32* %x1ptr, <16 x i32> %x2, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_broadcast1_mask2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $46, (%rdi){1to16}, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x1scalar = load i32, i32* %x1ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x1scalar, i32 0
|
|
%x1 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x2
|
|
ret <16 x i32> %res2
|
|
}
|
|
|
|
define <16 x i32> @vpternlog_v16i32_012_broadcast2_mask1(<16 x i32> %x0, <16 x i32> %x1, i32* %x2ptr, i16 %mask) {
|
|
; CHECK-LABEL: vpternlog_v16i32_012_broadcast2_mask1:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: kmovd %esi, %k1
|
|
; CHECK-NEXT: vpternlogd $78, (%rdi){1to16}, %zmm0, %zmm1 {%k1}
|
|
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
|
; CHECK-NEXT: retq
|
|
%x2scalar = load i32, i32* %x2ptr
|
|
%vecinit.i = insertelement <16 x i32> undef, i32 %x2scalar, i32 0
|
|
%x2 = shufflevector <16 x i32> %vecinit.i, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%res = call <16 x i32> @llvm.x86.avx512.mask.pternlog.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i32 114, i16 -1)
|
|
%mask.cast = bitcast i16 %mask to <16 x i1>
|
|
%res2 = select <16 x i1> %mask.cast, <16 x i32> %res, <16 x i32> %x1
|
|
ret <16 x i32> %res2
|
|
}
|