forked from OSchip/llvm-project
[X86][AVX512] Dropped avx512 VPSLLDQ/VPSRLDQ intrinsics
Auto-upgrade to generic shuffles like sse/avx2 implementations now that we can lower to VPSLLDQ/VPSRLDQ llvm-svn: 272308
This commit is contained in:
parent
47c76e201a
commit
f718682eb9
|
@ -223,6 +223,8 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
|
|||
Name.startswith("x86.sse2.psrl.dq") ||
|
||||
Name.startswith("x86.avx2.psll.dq") ||
|
||||
Name.startswith("x86.avx2.psrl.dq") ||
|
||||
Name.startswith("x86.avx512.psll.dq") ||
|
||||
Name.startswith("x86.avx512.psrl.dq") ||
|
||||
Name == "x86.sse41.pblendw" ||
|
||||
Name.startswith("x86.sse41.blendp") ||
|
||||
Name.startswith("x86.avx.blend.p") ||
|
||||
|
@ -333,7 +335,7 @@ bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
|
|||
return false;
|
||||
}
|
||||
|
||||
// Handles upgrading SSE2 and AVX2 PSLLDQ intrinsics by converting them
|
||||
// Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
|
||||
// to byte shuffles.
|
||||
static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, LLVMContext &C,
|
||||
Value *Op, unsigned Shift) {
|
||||
|
@ -350,8 +352,8 @@ static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, LLVMContext &C,
|
|||
// If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
|
||||
// we'll just return the zero vector.
|
||||
if (Shift < 16) {
|
||||
int Idxs[32];
|
||||
// 256-bit version is split into two 16-byte lanes.
|
||||
int Idxs[64];
|
||||
// 256/512-bit version is split into 2/4 16-byte lanes.
|
||||
for (unsigned l = 0; l != NumElts; l += 16)
|
||||
for (unsigned i = 0; i != 16; ++i) {
|
||||
unsigned Idx = NumElts + i - Shift;
|
||||
|
@ -414,7 +416,7 @@ static Value *UpgradeX86PALIGNRIntrinsics(IRBuilder<> &Builder, LLVMContext &C,
|
|||
return Builder.CreateSelect(Mask, Align, Passthru);
|
||||
}
|
||||
|
||||
// Handles upgrading SSE2 and AVX2 PSRLDQ intrinsics by converting them
|
||||
// Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
|
||||
// to byte shuffles.
|
||||
static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, LLVMContext &C,
|
||||
Value *Op,
|
||||
|
@ -432,8 +434,8 @@ static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, LLVMContext &C,
|
|||
// If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
|
||||
// we'll just return the zero vector.
|
||||
if (Shift < 16) {
|
||||
int Idxs[32];
|
||||
// 256-bit version is split into two 16-byte lanes.
|
||||
int Idxs[64];
|
||||
// 256/512-bit version is split into 2/4 16-byte lanes.
|
||||
for (unsigned l = 0; l != NumElts; l += 16)
|
||||
for (unsigned i = 0; i != 16; ++i) {
|
||||
unsigned Idx = i + Shift;
|
||||
|
@ -792,13 +794,15 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
|||
Rep = UpgradeX86PSRLDQIntrinsics(Builder, C, CI->getArgOperand(0),
|
||||
Shift / 8); // Shift is in bits.
|
||||
} else if (Name == "llvm.x86.sse2.psll.dq.bs" ||
|
||||
Name == "llvm.x86.avx2.psll.dq.bs") {
|
||||
// 128/256-bit shift left specified in bytes.
|
||||
Name == "llvm.x86.avx2.psll.dq.bs" ||
|
||||
Name == "llvm.x86.avx512.psll.dq.512") {
|
||||
// 128/256/512-bit shift left specified in bytes.
|
||||
unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
||||
Rep = UpgradeX86PSLLDQIntrinsics(Builder, C, CI->getArgOperand(0), Shift);
|
||||
} else if (Name == "llvm.x86.sse2.psrl.dq.bs" ||
|
||||
Name == "llvm.x86.avx2.psrl.dq.bs") {
|
||||
// 128/256-bit shift right specified in bytes.
|
||||
Name == "llvm.x86.avx2.psrl.dq.bs" ||
|
||||
Name == "llvm.x86.avx512.psrl.dq.512") {
|
||||
// 128/256/512-bit shift right specified in bytes.
|
||||
unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
|
||||
Rep = UpgradeX86PSRLDQIntrinsics(Builder, C, CI->getArgOperand(0), Shift);
|
||||
} else if (Name == "llvm.x86.sse41.pblendw" ||
|
||||
|
|
|
@ -2002,8 +2002,6 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
|
|||
X86_INTRINSIC_DATA(avx512_pbroadcastw_512, INTR_TYPE_1OP_MASK,
|
||||
X86ISD::VBROADCAST, 0),
|
||||
X86_INTRINSIC_DATA(avx512_psad_bw_512, INTR_TYPE_2OP, X86ISD::PSADBW, 0),
|
||||
X86_INTRINSIC_DATA(avx512_psll_dq_512, INTR_TYPE_2OP_IMM8, X86ISD::VSHLDQ, 0),
|
||||
X86_INTRINSIC_DATA(avx512_psrl_dq_512, INTR_TYPE_2OP_IMM8, X86ISD::VSRLDQ, 0),
|
||||
X86_INTRINSIC_DATA(avx512_ptestm_b_128, CMP_MASK, X86ISD::TESTM, 0),
|
||||
X86_INTRINSIC_DATA(avx512_ptestm_b_256, CMP_MASK, X86ISD::TESTM, 0),
|
||||
X86_INTRINSIC_DATA(avx512_ptestm_b_512, CMP_MASK, X86ISD::TESTM, 0),
|
||||
|
|
|
@ -106,6 +106,50 @@ define <64 x i8>@test_int_x86_avx512_mask_loadu_b_512(i8* %ptr, i8* %ptr2, <64 x
|
|||
ret <64 x i8> %res2
|
||||
}
|
||||
|
||||
declare <8 x i64> @llvm.x86.avx512.psll.dq.512(<8 x i64>, i32)
|
||||
|
||||
define <8 x i64>@test_int_x86_avx512_mask_psll_dq_512(<8 x i64> %x0) {
|
||||
; AVX512BW-LABEL: test_int_x86_avx512_mask_psll_dq_512:
|
||||
; AVX512BW: ## BB#0:
|
||||
; AVX512BW-NEXT: vpslldq $8, %zmm0, %zmm1
|
||||
; AVX512BW-NEXT: vpslldq $4, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psll_dq_512:
|
||||
; AVX512F-32: # BB#0:
|
||||
; AVX512F-32-NEXT: vpslldq $8, %zmm0, %zmm1
|
||||
; AVX512F-32-NEXT: vpslldq $4, %zmm0, %zmm0
|
||||
; AVX512F-32-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
||||
; AVX512F-32-NEXT: retl
|
||||
%res = call <8 x i64> @llvm.x86.avx512.psll.dq.512(<8 x i64> %x0, i32 8)
|
||||
%res1 = call <8 x i64> @llvm.x86.avx512.psll.dq.512(<8 x i64> %x0, i32 4)
|
||||
%res2 = add <8 x i64> %res, %res1
|
||||
ret <8 x i64> %res2
|
||||
}
|
||||
|
||||
declare <8 x i64> @llvm.x86.avx512.psrl.dq.512(<8 x i64>, i32)
|
||||
|
||||
define <8 x i64>@test_int_x86_avx512_mask_psrl_dq_512(<8 x i64> %x0) {
|
||||
; AVX512BW-LABEL: test_int_x86_avx512_mask_psrl_dq_512:
|
||||
; AVX512BW: ## BB#0:
|
||||
; AVX512BW-NEXT: vpsrldq $8, %zmm0, %zmm1
|
||||
; AVX512BW-NEXT: vpsrldq $4, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psrl_dq_512:
|
||||
; AVX512F-32: # BB#0:
|
||||
; AVX512F-32-NEXT: vpsrldq $8, %zmm0, %zmm1
|
||||
; AVX512F-32-NEXT: vpsrldq $4, %zmm0, %zmm0
|
||||
; AVX512F-32-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
||||
; AVX512F-32-NEXT: retl
|
||||
%res = call <8 x i64> @llvm.x86.avx512.psrl.dq.512(<8 x i64> %x0, i32 8)
|
||||
%res1 = call <8 x i64> @llvm.x86.avx512.psrl.dq.512(<8 x i64> %x0, i32 4)
|
||||
%res2 = add <8 x i64> %res, %res1
|
||||
ret <8 x i64> %res2
|
||||
}
|
||||
|
||||
declare <64 x i8> @llvm.x86.avx512.mask.palignr.512(<64 x i8>, <64 x i8>, i32, <64 x i8>, i64)
|
||||
|
||||
define <64 x i8>@test_int_x86_avx512_mask_palignr_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x3, i64 %x4) {
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
; NOTE: Assertions have been autogenerated by update_llc_test_checks.py
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW
|
||||
; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512F-32
|
||||
|
@ -2604,49 +2603,6 @@ define <32 x i16>@test_int_x86_avx512_mask_dbpsadbw_512(<64 x i8> %x0, <64 x i8>
|
|||
ret <32 x i16> %res4
|
||||
}
|
||||
|
||||
declare <8 x i64> @llvm.x86.avx512.psll.dq.512(<8 x i64>, i32)
|
||||
|
||||
define <8 x i64>@test_int_x86_avx512_mask_psll_dq_512(<8 x i64> %x0) {
|
||||
; AVX512BW-LABEL: test_int_x86_avx512_mask_psll_dq_512:
|
||||
; AVX512BW: ## BB#0:
|
||||
; AVX512BW-NEXT: vpslldq $8, %zmm0, %zmm1
|
||||
; AVX512BW-NEXT: vpslldq $4, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psll_dq_512:
|
||||
; AVX512F-32: # BB#0:
|
||||
; AVX512F-32-NEXT: vpslldq $8, %zmm0, %zmm1
|
||||
; AVX512F-32-NEXT: vpslldq $4, %zmm0, %zmm0
|
||||
; AVX512F-32-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
||||
; AVX512F-32-NEXT: retl
|
||||
%res = call <8 x i64> @llvm.x86.avx512.psll.dq.512(<8 x i64> %x0, i32 8)
|
||||
%res1 = call <8 x i64> @llvm.x86.avx512.psll.dq.512(<8 x i64> %x0, i32 4)
|
||||
%res2 = add <8 x i64> %res, %res1
|
||||
ret <8 x i64> %res2
|
||||
}
|
||||
|
||||
declare <8 x i64> @llvm.x86.avx512.psrl.dq.512(<8 x i64>, i32)
|
||||
|
||||
define <8 x i64>@test_int_x86_avx512_mask_psrl_dq_512(<8 x i64> %x0) {
|
||||
; AVX512BW-LABEL: test_int_x86_avx512_mask_psrl_dq_512:
|
||||
; AVX512BW: ## BB#0:
|
||||
; AVX512BW-NEXT: vpsrldq $8, %zmm0, %zmm1
|
||||
; AVX512BW-NEXT: vpsrldq $4, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_int_x86_avx512_mask_psrl_dq_512:
|
||||
; AVX512F-32: # BB#0:
|
||||
; AVX512F-32-NEXT: vpsrldq $8, %zmm0, %zmm1
|
||||
; AVX512F-32-NEXT: vpsrldq $4, %zmm0, %zmm0
|
||||
; AVX512F-32-NEXT: vpaddq %zmm0, %zmm1, %zmm0
|
||||
; AVX512F-32-NEXT: retl
|
||||
%res = call <8 x i64> @llvm.x86.avx512.psrl.dq.512(<8 x i64> %x0, i32 8)
|
||||
%res1 = call <8 x i64> @llvm.x86.avx512.psrl.dq.512(<8 x i64> %x0, i32 4)
|
||||
%res2 = add <8 x i64> %res, %res1
|
||||
ret <8 x i64> %res2
|
||||
}
|
||||
declare <8 x i64> @llvm.x86.avx512.psad.bw.512(<64 x i8>, <64 x i8>)
|
||||
|
||||
define <8 x i64>@test_int_x86_avx512_mask_psadb_w_512(<64 x i8> %x0, <64 x i8> %x1, <64 x i8> %x2){
|
||||
|
|
Loading…
Reference in New Issue