forked from OSchip/llvm-project
[X86][AVX512] Removing llvm x86 intrinsics for _mm_mask_move_{ss|sd} intrinsics.
Differential Revision: https://reviews.llvm.org/D26128 llvm-svn: 287087
This commit is contained in:
parent
f3c143188d
commit
4d60243bfd
|
@ -1644,20 +1644,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
|
|||
[IntrReadMem, IntrArgMemOnly]>;
|
||||
}
|
||||
|
||||
// Conditional move ops
|
||||
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
|
||||
def int_x86_avx512_mask_move_ss :
|
||||
GCCBuiltin<"__builtin_ia32_movss_mask">,
|
||||
Intrinsic<[llvm_v4f32_ty],
|
||||
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
|
||||
[IntrNoMem]>;
|
||||
def int_x86_avx512_mask_move_sd :
|
||||
GCCBuiltin<"__builtin_ia32_movsd_mask">,
|
||||
Intrinsic<[llvm_v2f64_ty],
|
||||
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
|
||||
[IntrNoMem]>;
|
||||
}
|
||||
|
||||
// Conditional store ops
|
||||
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
|
||||
def int_x86_avx_maskstore_pd : GCCBuiltin<"__builtin_ia32_maskstorepd">,
|
||||
|
|
|
@ -382,6 +382,7 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
|
|||
Name.startswith("avx.vbroadcastf128") || // Added in 4.0
|
||||
Name == "avx2.vbroadcasti128" || // Added in 3.7
|
||||
Name == "xop.vpcmov" || // Added in 3.8
|
||||
Name.startswith("avx512.mask.move.s") || // Added in 4.0
|
||||
(Name.startswith("xop.vpcom") && // Added in 3.2
|
||||
F->arg_size() == 2))) {
|
||||
NewFn = nullptr;
|
||||
|
@ -706,6 +707,19 @@ static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallInst &CI,
|
|||
return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
|
||||
}
|
||||
|
||||
static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallInst &CI) {
|
||||
Value* A = CI.getArgOperand(0);
|
||||
Value* B = CI.getArgOperand(1);
|
||||
Value* Src = CI.getArgOperand(2);
|
||||
Value* Mask = CI.getArgOperand(3);
|
||||
|
||||
Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1));
|
||||
Value* Cmp = Builder.CreateIsNotNull(AndNode);
|
||||
Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0);
|
||||
Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0);
|
||||
Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2);
|
||||
return Builder.CreateInsertElement(A, Select, (uint64_t)0);
|
||||
}
|
||||
|
||||
/// Upgrade a call to an old intrinsic. All argument and return casting must be
|
||||
/// provided to seamlessly integrate with existing context.
|
||||
|
@ -1589,6 +1603,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
|||
}
|
||||
|
||||
Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
|
||||
} else if (IsX86 && Name.startswith("avx512.mask.move.s")) {
|
||||
Rep = upgradeMaskedMove(Builder, *CI);
|
||||
} else {
|
||||
llvm_unreachable("Unknown function for CallInst upgrade.");
|
||||
}
|
||||
|
|
|
@ -794,10 +794,6 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
|
|||
X86ISD::FMIN_RND, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_min_ss_round, INTR_TYPE_SCALAR_MASK_RM,
|
||||
X86ISD::FMIN_RND, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_move_sd, INTR_TYPE_SCALAR_MASK,
|
||||
X86ISD::MOVSD, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_move_ss, INTR_TYPE_SCALAR_MASK,
|
||||
X86ISD::MOVSS, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_mul_pd_512, INTR_TYPE_2OP_MASK, ISD::FMUL,
|
||||
X86ISD::FMUL_RND),
|
||||
X86_INTRINSIC_DATA(avx512_mask_mul_ps_512, INTR_TYPE_2OP_MASK, ISD::FMUL,
|
||||
|
|
|
@ -1765,6 +1765,54 @@ define <8 x i64>@test_int_x86_avx512_mask_pminu_q_512(<8 x i64> %x0, <8 x i64> %
|
|||
ret <8 x i64> %res2
|
||||
}
|
||||
|
||||
define <4 x float> @test_mm_mask_move_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) {
|
||||
; CHECK-LABEL: test_mm_mask_move_ss:
|
||||
; CHECK: ## BB#0: ## %entry
|
||||
; CHECK-NEXT: kmovw %edi, %k1
|
||||
; CHECK-NEXT: vmovss %xmm2, %xmm1, %xmm0 {%k1}
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%res = call <4 x float> @llvm.x86.avx512.mask.move.ss(<4 x float> %__A, <4 x float> %__B, <4 x float> %__W, i8 %__U)
|
||||
ret <4 x float> %res
|
||||
}
|
||||
|
||||
|
||||
define <4 x float> @test_mm_maskz_move_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) {
|
||||
; CHECK-LABEL: test_mm_maskz_move_ss:
|
||||
; CHECK: ## BB#0: ## %entry
|
||||
; CHECK-NEXT: kmovw %edi, %k1
|
||||
; CHECK-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} {z}
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%res = call <4 x float> @llvm.x86.avx512.mask.move.ss(<4 x float> %__A, <4 x float> %__B, <4 x float> zeroinitializer, i8 %__U)
|
||||
ret <4 x float> %res
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_mask_move_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) {
|
||||
; CHECK-LABEL: test_mm_mask_move_sd:
|
||||
; CHECK: ## BB#0: ## %entry
|
||||
; CHECK-NEXT: kmovw %edi, %k1
|
||||
; CHECK-NEXT: vmovsd %xmm2, %xmm1, %xmm0 {%k1}
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%res = call <2 x double> @llvm.x86.avx512.mask.move.sd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__W, i8 %__U)
|
||||
ret <2 x double> %res
|
||||
}
|
||||
|
||||
define <2 x double> @test_mm_maskz_move_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) {
|
||||
; CHECK-LABEL: test_mm_maskz_move_sd:
|
||||
; CHECK: ## BB#0: ## %entry
|
||||
; CHECK-NEXT: kmovw %edi, %k1
|
||||
; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z}
|
||||
; CHECK-NEXT: retq
|
||||
entry:
|
||||
%res = call <2 x double> @llvm.x86.avx512.mask.move.sd(<2 x double> %__A, <2 x double> %__B, <2 x double> zeroinitializer, i8 %__U)
|
||||
ret <2 x double> %res
|
||||
}
|
||||
|
||||
declare <4 x float> @llvm.x86.avx512.mask.move.ss(<4 x float>, <4 x float>, <4 x float>, i8)
|
||||
declare <2 x double> @llvm.x86.avx512.mask.move.sd(<2 x double>, <2 x double>, <2 x double>, i8)
|
||||
|
||||
declare <16 x i32> @llvm.x86.avx512.mask.pmovzxb.d.512(<16 x i8>, <16 x i32>, i16)
|
||||
|
||||
define <16 x i32>@test_int_x86_avx512_mask_pmovzxb_d_512(<16 x i8> %x0, <16 x i32> %x1, i16 %x2) {
|
||||
|
|
Loading…
Reference in New Issue