forked from OSchip/llvm-project
[X86] Emit native IR for pmuldq/pmuludq builtins.
I believe all the pieces are now in place in the backend to make this work correctly. We can either mask the input to 32 bits for pmuludg or shl/ashr for pmuldq and use a regular mul instruction. The backend should combine this to PMULUDQ/PMULDQ and then SimplifyDemandedBits will remove the and/shifts. Differential Revision: https://reviews.llvm.org/D45421 llvm-svn: 329605
This commit is contained in:
parent
3a0cab73eb
commit
304edc1e75
|
@ -8264,6 +8264,32 @@ static Value *EmitX86MinMax(CodeGenFunction &CGF, ICmpInst::Predicate Pred,
|
|||
return EmitX86Select(CGF, Ops[3], Res, Ops[2]);
|
||||
}
|
||||
|
||||
static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
|
||||
ArrayRef<Value *> Ops) {
|
||||
llvm::Type *Ty = Ops[0]->getType();
|
||||
// Arguments have a vXi32 type so cast to vXi64.
|
||||
Ty = llvm::VectorType::get(CGF.Int64Ty,
|
||||
Ty->getPrimitiveSizeInBits() / 64);
|
||||
Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
|
||||
Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
|
||||
|
||||
if (IsSigned) {
|
||||
// Shift left then arithmetic shift right.
|
||||
Constant *ShiftAmt = ConstantInt::get(Ty, 32);
|
||||
LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
|
||||
LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
|
||||
RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
|
||||
RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
|
||||
} else {
|
||||
// Clear the upper bits.
|
||||
Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
|
||||
LHS = CGF.Builder.CreateAnd(LHS, Mask);
|
||||
RHS = CGF.Builder.CreateAnd(RHS, Mask);
|
||||
}
|
||||
|
||||
return CGF.Builder.CreateMul(LHS, RHS);
|
||||
}
|
||||
|
||||
static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
|
||||
llvm::Type *DstTy) {
|
||||
unsigned NumberOfElements = DstTy->getVectorNumElements();
|
||||
|
@ -8968,6 +8994,16 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
|
|||
case X86::BI__builtin_ia32_pminuq512_mask:
|
||||
return EmitX86MinMax(*this, ICmpInst::ICMP_ULT, Ops);
|
||||
|
||||
case X86::BI__builtin_ia32_pmuludq128:
|
||||
case X86::BI__builtin_ia32_pmuludq256:
|
||||
case X86::BI__builtin_ia32_pmuludq512:
|
||||
return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
|
||||
|
||||
case X86::BI__builtin_ia32_pmuldq128:
|
||||
case X86::BI__builtin_ia32_pmuldq256:
|
||||
case X86::BI__builtin_ia32_pmuldq512:
|
||||
return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
|
||||
|
||||
// 3DNow!
|
||||
case X86::BI__builtin_ia32_pswapdsf:
|
||||
case X86::BI__builtin_ia32_pswapdsi: {
|
||||
|
|
|
@ -835,13 +835,19 @@ __m256i test_mm256_mpsadbw_epu8(__m256i x, __m256i y) {
|
|||
|
||||
__m256i test_mm256_mul_epi32(__m256i a, __m256i b) {
|
||||
// CHECK-LABEL: test_mm256_mul_epi32
|
||||
// CHECK: call <4 x i64> @llvm.x86.avx2.pmul.dq(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
|
||||
// CHECK: shl <4 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32>
|
||||
// CHECK: ashr <4 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32>
|
||||
// CHECK: shl <4 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32>
|
||||
// CHECK: ashr <4 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32>
|
||||
// CHECK: mul <4 x i64> %{{.*}}, %{{.*}}
|
||||
return _mm256_mul_epi32(a, b);
|
||||
}
|
||||
|
||||
__m256i test_mm256_mul_epu32(__m256i a, __m256i b) {
|
||||
// CHECK-LABEL: test_mm256_mul_epu32
|
||||
// CHECK: call <4 x i64> @llvm.x86.avx2.pmulu.dq(<8 x i32> %{{.*}}, <8 x i32> %{{.*}})
|
||||
// CHECK: and <4 x i64> %{{.*}}, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
|
||||
// CHECK: and <4 x i64> %{{.*}}, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
|
||||
// CHECK: mul <4 x i64> %{{.*}}, %{{.*}}
|
||||
return _mm256_mul_epu32(a, b);
|
||||
}
|
||||
|
||||
|
|
|
@ -1874,13 +1874,21 @@ __m512i test_mm512_add_epi64(__m512i __A, __m512i __B) {
|
|||
|
||||
__m512i test_mm512_mul_epi32(__m512i __A, __m512i __B) {
|
||||
//CHECK-LABEL: @test_mm512_mul_epi32
|
||||
//CHECK: @llvm.x86.avx512.pmul.dq.512
|
||||
//CHECK: shl <8 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: ashr <8 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: shl <8 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: ashr <8 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: mul <8 x i64> %{{.*}}, %{{.*}}
|
||||
return _mm512_mul_epi32(__A,__B);
|
||||
}
|
||||
|
||||
__m512i test_mm512_maskz_mul_epi32 (__mmask16 __k,__m512i __A, __m512i __B) {
|
||||
//CHECK-LABEL: @test_mm512_maskz_mul_epi32
|
||||
//CHECK: @llvm.x86.avx512.pmul.dq.512
|
||||
//CHECK: shl <8 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: ashr <8 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: shl <8 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: ashr <8 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: mul <8 x i64> %{{.*}}, %{{.*}}
|
||||
//CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
|
||||
return _mm512_maskz_mul_epi32(__k,__A,__B);
|
||||
}
|
||||
|
@ -1888,20 +1896,28 @@ __m512i test_mm512_maskz_mul_epi32 (__mmask16 __k,__m512i __A, __m512i __B) {
|
|||
__m512i test_mm512_mask_mul_epi32 (__mmask16 __k,__m512i __A, __m512i __B,
|
||||
__m512i __src) {
|
||||
//CHECK-LABEL: @test_mm512_mask_mul_epi32
|
||||
//CHECK: @llvm.x86.avx512.pmul.dq.512
|
||||
//CHECK: shl <8 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: ashr <8 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: shl <8 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: ashr <8 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: mul <8 x i64> %{{.*}}, %{{.*}}
|
||||
//CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
|
||||
return _mm512_mask_mul_epi32(__src,__k,__A,__B);
|
||||
}
|
||||
|
||||
__m512i test_mm512_mul_epu32 (__m512i __A, __m512i __B) {
|
||||
//CHECK-LABEL: @test_mm512_mul_epu32
|
||||
//CHECK: @llvm.x86.avx512.pmulu.dq.512
|
||||
//CHECK: and <8 x i64> %{{.*}}, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
|
||||
//CHECK: and <8 x i64> %{{.*}}, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
|
||||
//CHECK: mul <8 x i64> %{{.*}}, %{{.*}}
|
||||
return _mm512_mul_epu32(__A,__B);
|
||||
}
|
||||
|
||||
__m512i test_mm512_maskz_mul_epu32 (__mmask16 __k,__m512i __A, __m512i __B) {
|
||||
//CHECK-LABEL: @test_mm512_maskz_mul_epu32
|
||||
//CHECK: @llvm.x86.avx512.pmulu.dq.512
|
||||
//CHECK: and <8 x i64> %{{.*}}, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
|
||||
//CHECK: and <8 x i64> %{{.*}}, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
|
||||
//CHECK: mul <8 x i64> %{{.*}}, %{{.*}}
|
||||
//CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
|
||||
return _mm512_maskz_mul_epu32(__k,__A,__B);
|
||||
}
|
||||
|
@ -1909,7 +1925,9 @@ __m512i test_mm512_maskz_mul_epu32 (__mmask16 __k,__m512i __A, __m512i __B) {
|
|||
__m512i test_mm512_mask_mul_epu32 (__mmask16 __k,__m512i __A, __m512i __B,
|
||||
__m512i __src) {
|
||||
//CHECK-LABEL: @test_mm512_mask_mul_epu32
|
||||
//CHECK: @llvm.x86.avx512.pmulu.dq.512
|
||||
//CHECK: and <8 x i64> %{{.*}}, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
|
||||
//CHECK: and <8 x i64> %{{.*}}, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
|
||||
//CHECK: mul <8 x i64> %{{.*}}, %{{.*}}
|
||||
//CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
|
||||
return _mm512_mask_mul_epu32(__src,__k,__A,__B);
|
||||
}
|
||||
|
|
|
@ -727,14 +727,22 @@ __m128i test_mm_maskz_sub_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
|
|||
__m256i test_mm256_mask_mul_epi32 (__m256i __W, __mmask8 __M, __m256i __X,
|
||||
__m256i __Y) {
|
||||
//CHECK-LABEL: @test_mm256_mask_mul_epi32
|
||||
//CHECK: @llvm.x86.avx2.pmul.dq
|
||||
//CHECK: shl <4 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: ashr <4 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: shl <4 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: ashr <4 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: mul <4 x i64> %{{.*}}, %{{.*}}
|
||||
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
|
||||
return _mm256_mask_mul_epi32(__W, __M, __X, __Y);
|
||||
}
|
||||
|
||||
__m256i test_mm256_maskz_mul_epi32 (__mmask8 __M, __m256i __X, __m256i __Y) {
|
||||
//CHECK-LABEL: @test_mm256_maskz_mul_epi32
|
||||
//CHECK: @llvm.x86.avx2.pmul.dq
|
||||
//CHECK: shl <4 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: ashr <4 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: shl <4 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: ashr <4 x i64> %{{.*}}, <i64 32, i64 32, i64 32, i64 32>
|
||||
//CHECK: mul <4 x i64> %{{.*}}, %{{.*}}
|
||||
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
|
||||
return _mm256_maskz_mul_epi32(__M, __X, __Y);
|
||||
}
|
||||
|
@ -743,14 +751,22 @@ __m256i test_mm256_maskz_mul_epi32 (__mmask8 __M, __m256i __X, __m256i __Y) {
|
|||
__m128i test_mm_mask_mul_epi32 (__m128i __W, __mmask8 __M, __m128i __X,
|
||||
__m128i __Y) {
|
||||
//CHECK-LABEL: @test_mm_mask_mul_epi32
|
||||
//CHECK: @llvm.x86.sse41.pmuldq
|
||||
//CHECK: shl <2 x i64> %{{.*}}, <i64 32, i64 32>
|
||||
//CHECK: ashr <2 x i64> %{{.*}}, <i64 32, i64 32>
|
||||
//CHECK: shl <2 x i64> %{{.*}}, <i64 32, i64 32>
|
||||
//CHECK: ashr <2 x i64> %{{.*}}, <i64 32, i64 32>
|
||||
//CHECK: mul <2 x i64> %{{.*}}, %{{.*}}
|
||||
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
|
||||
return _mm_mask_mul_epi32(__W, __M, __X, __Y);
|
||||
}
|
||||
|
||||
__m128i test_mm_maskz_mul_epi32 (__mmask8 __M, __m128i __X, __m128i __Y) {
|
||||
//CHECK-LABEL: @test_mm_maskz_mul_epi32
|
||||
//CHECK: @llvm.x86.sse41.pmuldq
|
||||
//CHECK: shl <2 x i64> %{{.*}}, <i64 32, i64 32>
|
||||
//CHECK: ashr <2 x i64> %{{.*}}, <i64 32, i64 32>
|
||||
//CHECK: shl <2 x i64> %{{.*}}, <i64 32, i64 32>
|
||||
//CHECK: ashr <2 x i64> %{{.*}}, <i64 32, i64 32>
|
||||
//CHECK: mul <2 x i64> %{{.*}}, %{{.*}}
|
||||
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
|
||||
return _mm_maskz_mul_epi32(__M, __X, __Y);
|
||||
}
|
||||
|
@ -758,14 +774,18 @@ __m128i test_mm_maskz_mul_epi32 (__mmask8 __M, __m128i __X, __m128i __Y) {
|
|||
__m256i test_mm256_mask_mul_epu32 (__m256i __W, __mmask8 __M, __m256i __X,
|
||||
__m256i __Y) {
|
||||
//CHECK-LABEL: @test_mm256_mask_mul_epu32
|
||||
//CHECK: @llvm.x86.avx2.pmulu.dq
|
||||
//CHECK: and <4 x i64> %{{.*}}, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
|
||||
//CHECK: and <4 x i64> %{{.*}}, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
|
||||
//CHECK: mul <4 x i64> %{{.*}}, %{{.*}}
|
||||
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
|
||||
return _mm256_mask_mul_epu32(__W, __M, __X, __Y);
|
||||
}
|
||||
|
||||
__m256i test_mm256_maskz_mul_epu32 (__mmask8 __M, __m256i __X, __m256i __Y) {
|
||||
//CHECK-LABEL: @test_mm256_maskz_mul_epu32
|
||||
//CHECK: @llvm.x86.avx2.pmulu.dq
|
||||
//CHECK: and <4 x i64> %{{.*}}, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
|
||||
//CHECK: and <4 x i64> %{{.*}}, <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
|
||||
//CHECK: mul <4 x i64> %{{.*}}, %{{.*}}
|
||||
//CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
|
||||
return _mm256_maskz_mul_epu32(__M, __X, __Y);
|
||||
}
|
||||
|
@ -773,14 +793,18 @@ __m256i test_mm256_maskz_mul_epu32 (__mmask8 __M, __m256i __X, __m256i __Y) {
|
|||
__m128i test_mm_mask_mul_epu32 (__m128i __W, __mmask8 __M, __m128i __X,
|
||||
__m128i __Y) {
|
||||
//CHECK-LABEL: @test_mm_mask_mul_epu32
|
||||
//CHECK: @llvm.x86.sse2.pmulu.dq
|
||||
//CHECK: and <2 x i64> %{{.*}}, <i64 4294967295, i64 4294967295>
|
||||
//CHECK: and <2 x i64> %{{.*}}, <i64 4294967295, i64 4294967295>
|
||||
//CHECK: mul <2 x i64> %{{.*}}, %{{.*}}
|
||||
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
|
||||
return _mm_mask_mul_epu32(__W, __M, __X, __Y);
|
||||
}
|
||||
|
||||
__m128i test_mm_maskz_mul_epu32 (__mmask8 __M, __m128i __X, __m128i __Y) {
|
||||
//CHECK-LABEL: @test_mm_maskz_mul_epu32
|
||||
//CHECK: @llvm.x86.sse2.pmulu.dq
|
||||
//CHECK: and <2 x i64> %{{.*}}, <i64 4294967295, i64 4294967295>
|
||||
//CHECK: and <2 x i64> %{{.*}}, <i64 4294967295, i64 4294967295>
|
||||
//CHECK: mul <2 x i64> %{{.*}}, %{{.*}}
|
||||
//CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
|
||||
return _mm_maskz_mul_epu32(__M, __X, __Y);
|
||||
}
|
||||
|
|
|
@ -816,7 +816,9 @@ int test_mm_movemask_pd(__m128d A) {
|
|||
|
||||
__m128i test_mm_mul_epu32(__m128i A, __m128i B) {
|
||||
// CHECK-LABEL: test_mm_mul_epu32
|
||||
// CHECK: call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
|
||||
// CHECK: and <2 x i64> %{{.*}}, <i64 4294967295, i64 4294967295>
|
||||
// CHECK: and <2 x i64> %{{.*}}, <i64 4294967295, i64 4294967295>
|
||||
// CHECK: mul <2 x i64> %{{.*}}, %{{.*}}
|
||||
return _mm_mul_epu32(A, B);
|
||||
}
|
||||
|
||||
|
|
|
@ -312,7 +312,11 @@ __m128i test_mm_mpsadbw_epu8(__m128i x, __m128i y) {
|
|||
|
||||
__m128i test_mm_mul_epi32(__m128i x, __m128i y) {
|
||||
// CHECK-LABEL: test_mm_mul_epi32
|
||||
// CHECK: call <2 x i64> @llvm.x86.sse41.pmuldq(<4 x i32> %{{.*}}, <4 x i32> %{{.*}})
|
||||
// CHECK: shl <2 x i64> %{{.*}}, <i64 32, i64 32>
|
||||
// CHECK: ashr <2 x i64> %{{.*}}, <i64 32, i64 32>
|
||||
// CHECK: shl <2 x i64> %{{.*}}, <i64 32, i64 32>
|
||||
// CHECK: ashr <2 x i64> %{{.*}}, <i64 32, i64 32>
|
||||
// CHECK: mul <2 x i64> %{{.*}}, %{{.*}}
|
||||
return _mm_mul_epi32(x, y);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue