forked from OSchip/llvm-project
[X86][SSE] Use raw shuffle mask decode in SimplifyDemandedVectorEltsForTargetNode (PR39549)
We were using the 'normalized' shuffle mask from resolveTargetShuffleInputs, which replaces zero/undef inputs with sentinel values. For SimplifyDemandedVectorElts we need the raw mask so we can correctly demand those 'zero' inputs that got normalized away, this requires an extra bit of logic to locally normalize undef inputs. llvm-svn: 347158
This commit is contained in:
parent
45beaa0bb9
commit
cc1f5d2407
|
@ -32085,13 +32085,15 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
|
|||
}
|
||||
|
||||
// Simplify target shuffles.
|
||||
if (!isTargetShuffle(Opc))
|
||||
if (!isTargetShuffle(Opc) || !VT.isSimple())
|
||||
return false;
|
||||
|
||||
// Get target shuffle mask.
|
||||
bool IsUnary;
|
||||
SmallVector<int, 64> OpMask;
|
||||
SmallVector<SDValue, 2> OpInputs;
|
||||
if (!resolveTargetShuffleInputs(Op, OpInputs, OpMask, TLO.DAG))
|
||||
if (!getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, OpInputs,
|
||||
OpMask, IsUnary))
|
||||
return false;
|
||||
|
||||
// Shuffle inputs must be the same type as the result.
|
||||
|
@ -32101,9 +32103,13 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
|
|||
|
||||
// Check if shuffle mask can be simplified to undef/zero/identity.
|
||||
int NumSrcs = OpInputs.size();
|
||||
for (int i = 0; i != NumElts; ++i)
|
||||
for (int i = 0; i != NumElts; ++i) {
|
||||
int &M = OpMask[i];
|
||||
if (!DemandedElts[i])
|
||||
OpMask[i] = SM_SentinelUndef;
|
||||
M = SM_SentinelUndef;
|
||||
else if (0 <= M && OpInputs[M / NumElts].isUndef())
|
||||
M = SM_SentinelUndef;
|
||||
}
|
||||
|
||||
if (isUndefInRange(OpMask, 0, NumElts)) {
|
||||
KnownUndef.setAllBits();
|
||||
|
|
|
@ -1318,76 +1318,74 @@ entry:
|
|||
define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) {
|
||||
; SSE2-LABEL: mul_v8i64_sext:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm5
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm8
|
||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7]
|
||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE2-NEXT: psrad $31, %xmm3
|
||||
; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7]
|
||||
; SSE2-NEXT: movdqa %xmm12, %xmm9
|
||||
; SSE2-NEXT: psrad $31, %xmm9
|
||||
; SSE2-NEXT: psrad $16, %xmm12
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm9[0],xmm12[1],xmm9[1]
|
||||
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm4
|
||||
; SSE2-NEXT: psrad $31, %xmm4
|
||||
; SSE2-NEXT: psrad $16, %xmm0
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
|
||||
; SSE2-NEXT: movdqa %xmm5, %xmm4
|
||||
; SSE2-NEXT: psrad $31, %xmm4
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
|
||||
; SSE2-NEXT: pxor %xmm7, %xmm7
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1]
|
||||
; SSE2-NEXT: pmuludq %xmm5, %xmm3
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
|
||||
; SSE2-NEXT: pmuludq %xmm0, %xmm4
|
||||
; SSE2-NEXT: paddq %xmm3, %xmm4
|
||||
; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[0,2,2,3,4,5,6,7]
|
||||
; SSE2-NEXT: pmuludq %xmm5, %xmm0
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm5
|
||||
; SSE2-NEXT: psrad $31, %xmm5
|
||||
; SSE2-NEXT: psrad $16, %xmm3
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
|
||||
; SSE2-NEXT: psllq $32, %xmm4
|
||||
; SSE2-NEXT: paddq %xmm4, %xmm0
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm4
|
||||
; SSE2-NEXT: psrad $31, %xmm4
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm5
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
|
||||
; SSE2-NEXT: pmuludq %xmm3, %xmm4
|
||||
; SSE2-NEXT: paddq %xmm5, %xmm4
|
||||
; SSE2-NEXT: movdqa %xmm6, %xmm5
|
||||
; SSE2-NEXT: psrad $31, %xmm5
|
||||
; SSE2-NEXT: psrad $16, %xmm6
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
|
||||
; SSE2-NEXT: pmuludq %xmm3, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,0,1]
|
||||
; SSE2-NEXT: psllq $32, %xmm4
|
||||
; SSE2-NEXT: paddq %xmm4, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm4
|
||||
; SSE2-NEXT: psrad $31, %xmm4
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm5
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
|
||||
; SSE2-NEXT: pmuludq %xmm6, %xmm4
|
||||
; SSE2-NEXT: paddq %xmm5, %xmm4
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm8[2,3,0,1]
|
||||
; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,2,2,3,4,5,6,7]
|
||||
; SSE2-NEXT: pmuludq %xmm6, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm5, %xmm6
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,3,0,1]
|
||||
; SSE2-NEXT: pshuflw {{.*#+}} xmm15 = xmm5[0,2,2,3,4,5,6,7]
|
||||
; SSE2-NEXT: movdqa %xmm15, %xmm8
|
||||
; SSE2-NEXT: psrad $31, %xmm8
|
||||
; SSE2-NEXT: psrad $16, %xmm15
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1]
|
||||
; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm3[0,2,2,3,4,5,6,7]
|
||||
; SSE2-NEXT: movdqa %xmm7, %xmm13
|
||||
; SSE2-NEXT: psrad $31, %xmm13
|
||||
; SSE2-NEXT: psrad $16, %xmm7
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm2[2,3,0,1]
|
||||
; SSE2-NEXT: movdqa %xmm11, %xmm10
|
||||
; SSE2-NEXT: psrad $31, %xmm10
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
|
||||
; SSE2-NEXT: movdqa %xmm2, %xmm14
|
||||
; SSE2-NEXT: psrad $31, %xmm14
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm6
|
||||
; SSE2-NEXT: psrad $31, %xmm6
|
||||
; SSE2-NEXT: psrad $16, %xmm5
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm5
|
||||
; SSE2-NEXT: psrad $31, %xmm5
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,1,3]
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm4
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,1,3]
|
||||
; SSE2-NEXT: pmuludq %xmm0, %xmm5
|
||||
; SSE2-NEXT: paddq %xmm4, %xmm5
|
||||
; SSE2-NEXT: psllq $32, %xmm5
|
||||
; SSE2-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE2-NEXT: paddq %xmm5, %xmm0
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,1,1,3]
|
||||
; SSE2-NEXT: pmuludq %xmm7, %xmm1
|
||||
; SSE2-NEXT: pmuludq %xmm3, %xmm7
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm13[0,1,1,3]
|
||||
; SSE2-NEXT: pmuludq %xmm3, %xmm4
|
||||
; SSE2-NEXT: paddq %xmm4, %xmm1
|
||||
; SSE2-NEXT: psllq $32, %xmm1
|
||||
; SSE2-NEXT: paddq %xmm7, %xmm1
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm9[0,1,1,3]
|
||||
; SSE2-NEXT: pmuludq %xmm2, %xmm3
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm14[0,1,1,3]
|
||||
; SSE2-NEXT: pmuludq %xmm12, %xmm4
|
||||
; SSE2-NEXT: paddq %xmm3, %xmm4
|
||||
; SSE2-NEXT: psllq $32, %xmm4
|
||||
; SSE2-NEXT: pmuludq %xmm12, %xmm2
|
||||
; SSE2-NEXT: paddq %xmm4, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm3, %xmm4
|
||||
; SSE2-NEXT: psrad $31, %xmm4
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
|
||||
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
|
||||
; SSE2-NEXT: pmuludq %xmm3, %xmm6
|
||||
; SSE2-NEXT: pmuludq %xmm5, %xmm4
|
||||
; SSE2-NEXT: paddq %xmm6, %xmm4
|
||||
; SSE2-NEXT: pmuludq %xmm5, %xmm3
|
||||
; SSE2-NEXT: psllq $32, %xmm4
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm10[0,1,1,3]
|
||||
; SSE2-NEXT: pmuludq %xmm15, %xmm3
|
||||
; SSE2-NEXT: pmuludq %xmm11, %xmm15
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm8[0,1,1,3]
|
||||
; SSE2-NEXT: pmuludq %xmm11, %xmm4
|
||||
; SSE2-NEXT: paddq %xmm4, %xmm3
|
||||
; SSE2-NEXT: psllq $32, %xmm3
|
||||
; SSE2-NEXT: paddq %xmm15, %xmm3
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: mul_v8i64_sext:
|
||||
|
|
|
@ -124,21 +124,21 @@ define <8 x double> @combine_vpermt2var_8f64_identity(<8 x double> %x0, <8 x dou
|
|||
define <8 x double> @combine_vpermt2var_8f64_identity_mask(<8 x double> %x0, <8 x double> %x1, i8 %m) {
|
||||
; X32-LABEL: combine_vpermt2var_8f64_identity_mask:
|
||||
; X32: # %bb.0:
|
||||
; X32-NEXT: vmovapd {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
|
||||
; X32-NEXT: vmovapd {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
|
||||
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: kmovd %eax, %k1
|
||||
; X32-NEXT: vpermi2pd %zmm1, %zmm0, %zmm2 {%k1} {z}
|
||||
; X32-NEXT: vpermi2pd %zmm0, %zmm0, %zmm1 {%k1} {z}
|
||||
; X32-NEXT: vmovapd {{.*#+}} zmm0 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
|
||||
; X32-NEXT: vpermi2pd %zmm2, %zmm2, %zmm0 {%k1} {z}
|
||||
; X32-NEXT: vpermi2pd %zmm1, %zmm1, %zmm0 {%k1} {z}
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: combine_vpermt2var_8f64_identity_mask:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: vmovapd {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
|
||||
; X64-NEXT: vmovapd {{.*#+}} zmm1 = [7,6,5,4,3,2,1,0]
|
||||
; X64-NEXT: kmovd %edi, %k1
|
||||
; X64-NEXT: vpermi2pd %zmm1, %zmm0, %zmm2 {%k1} {z}
|
||||
; X64-NEXT: vpermi2pd %zmm0, %zmm0, %zmm1 {%k1} {z}
|
||||
; X64-NEXT: vmovapd {{.*#+}} zmm0 = [7,14,5,12,3,10,1,8]
|
||||
; X64-NEXT: vpermi2pd %zmm2, %zmm2, %zmm0 {%k1} {z}
|
||||
; X64-NEXT: vpermi2pd %zmm1, %zmm1, %zmm0 {%k1} {z}
|
||||
; X64-NEXT: retq
|
||||
%res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x double> %x0, <8 x double> %x1, i8 %m)
|
||||
%res1 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x double> %res0, <8 x double> %res0, i8 %m)
|
||||
|
@ -205,21 +205,21 @@ define <8 x i64> @combine_vpermt2var_8i64_identity(<8 x i64> %x0, <8 x i64> %x1)
|
|||
define <8 x i64> @combine_vpermt2var_8i64_identity_mask(<8 x i64> %x0, <8 x i64> %x1, i8 %m) {
|
||||
; X32-LABEL: combine_vpermt2var_8i64_identity_mask:
|
||||
; X32: # %bb.0:
|
||||
; X32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
|
||||
; X32-NEXT: vmovdqa64 {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0]
|
||||
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: kmovd %eax, %k1
|
||||
; X32-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 {%k1} {z}
|
||||
; X32-NEXT: vpermi2q %zmm0, %zmm0, %zmm1 {%k1} {z}
|
||||
; X32-NEXT: vmovdqa64 {{.*#+}} zmm0 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0]
|
||||
; X32-NEXT: vpermi2q %zmm2, %zmm2, %zmm0 {%k1} {z}
|
||||
; X32-NEXT: vpermi2q %zmm1, %zmm1, %zmm0 {%k1} {z}
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: combine_vpermt2var_8i64_identity_mask:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0]
|
||||
; X64-NEXT: vmovdqa64 {{.*#+}} zmm1 = [7,6,5,4,3,2,1,0]
|
||||
; X64-NEXT: kmovd %edi, %k1
|
||||
; X64-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 {%k1} {z}
|
||||
; X64-NEXT: vpermi2q %zmm0, %zmm0, %zmm1 {%k1} {z}
|
||||
; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [7,14,5,12,3,10,1,8]
|
||||
; X64-NEXT: vpermi2q %zmm2, %zmm2, %zmm0 {%k1} {z}
|
||||
; X64-NEXT: vpermi2q %zmm1, %zmm1, %zmm0 {%k1} {z}
|
||||
; X64-NEXT: retq
|
||||
%res0 = call <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64> <i64 7, i64 6, i64 5, i64 4, i64 3, i64 2, i64 1, i64 0>, <8 x i64> %x0, <8 x i64> %x1, i8 %m)
|
||||
%res1 = call <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64> <i64 7, i64 14, i64 5, i64 12, i64 3, i64 10, i64 1, i64 8>, <8 x i64> %res0, <8 x i64> %res0, i8 %m)
|
||||
|
@ -241,20 +241,20 @@ define <16 x float> @combine_vpermt2var_16f32_identity(<16 x float> %x0, <16 x f
|
|||
define <16 x float> @combine_vpermt2var_16f32_identity_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
|
||||
; X32-LABEL: combine_vpermt2var_16f32_identity_mask:
|
||||
; X32: # %bb.0:
|
||||
; X32-NEXT: vmovaps {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X32-NEXT: vmovaps {{.*#+}} zmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
||||
; X32-NEXT: vpermi2ps %zmm1, %zmm0, %zmm2 {%k1} {z}
|
||||
; X32-NEXT: vpermi2ps %zmm0, %zmm0, %zmm1 {%k1} {z}
|
||||
; X32-NEXT: vmovaps {{.*#+}} zmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
|
||||
; X32-NEXT: vpermi2ps %zmm2, %zmm2, %zmm0 {%k1} {z}
|
||||
; X32-NEXT: vpermi2ps %zmm1, %zmm1, %zmm0 {%k1} {z}
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: combine_vpermt2var_16f32_identity_mask:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: vmovaps {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X64-NEXT: vmovaps {{.*#+}} zmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X64-NEXT: kmovd %edi, %k1
|
||||
; X64-NEXT: vpermi2ps %zmm1, %zmm0, %zmm2 {%k1} {z}
|
||||
; X64-NEXT: vpermi2ps %zmm0, %zmm0, %zmm1 {%k1} {z}
|
||||
; X64-NEXT: vmovaps {{.*#+}} zmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
|
||||
; X64-NEXT: vpermi2ps %zmm2, %zmm2, %zmm0 {%k1} {z}
|
||||
; X64-NEXT: vpermi2ps %zmm1, %zmm1, %zmm0 {%k1} {z}
|
||||
; X64-NEXT: retq
|
||||
%res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x float> %x0, <16 x float> %x1, i16 %m)
|
||||
%res1 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 15, i32 30, i32 13, i32 28, i32 11, i32 26, i32 9, i32 24, i32 7, i32 22, i32 5, i32 20, i32 3, i32 18, i32 1, i32 16>, <16 x float> %res0, <16 x float> %res0, i16 %m)
|
||||
|
@ -264,14 +264,14 @@ define <16 x float> @combine_vpermt2var_16f32_identity_mask(<16 x float> %x0, <1
|
|||
define <16 x float> @combine_vpermt2var_16f32_vmovddup(<16 x float> %x0, <16 x float> %x1) {
|
||||
; X32-LABEL: combine_vpermt2var_16f32_vmovddup:
|
||||
; X32: # %bb.0:
|
||||
; X32-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
|
||||
; X32-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0
|
||||
; X32-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
|
||||
; X32-NEXT: vpermt2ps %zmm0, %zmm1, %zmm0
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: combine_vpermt2var_16f32_vmovddup:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
|
||||
; X64-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0
|
||||
; X64-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
|
||||
; X64-NEXT: vpermt2ps %zmm0, %zmm1, %zmm0
|
||||
; X64-NEXT: retq
|
||||
%res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5, i32 8, i32 9, i32 8, i32 9, i32 12, i32 13, i32 12, i32 13>, <16 x float> %x0, <16 x float> %x1, i16 -1)
|
||||
ret <16 x float> %res0
|
||||
|
@ -280,18 +280,16 @@ define <16 x float> @combine_vpermt2var_16f32_vmovddup_load(<16 x float> *%p0, <
|
|||
; X32-LABEL: combine_vpermt2var_16f32_vmovddup_load:
|
||||
; X32: # %bb.0:
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: vmovaps (%eax), %zmm2
|
||||
; X32-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
|
||||
; X32-NEXT: vpermi2ps %zmm0, %zmm2, %zmm1
|
||||
; X32-NEXT: vmovaps %zmm1, %zmm0
|
||||
; X32-NEXT: vmovaps (%eax), %zmm1
|
||||
; X32-NEXT: vmovaps {{.*#+}} zmm0 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
|
||||
; X32-NEXT: vpermi2ps %zmm0, %zmm1, %zmm0
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: combine_vpermt2var_16f32_vmovddup_load:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: vmovaps (%rdi), %zmm2
|
||||
; X64-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
|
||||
; X64-NEXT: vpermi2ps %zmm0, %zmm2, %zmm1
|
||||
; X64-NEXT: vmovaps %zmm1, %zmm0
|
||||
; X64-NEXT: vmovaps (%rdi), %zmm1
|
||||
; X64-NEXT: vmovaps {{.*#+}} zmm0 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
|
||||
; X64-NEXT: vpermi2ps %zmm0, %zmm1, %zmm0
|
||||
; X64-NEXT: retq
|
||||
%x0 = load <16 x float>, <16 x float> *%p0
|
||||
%res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5, i32 8, i32 9, i32 8, i32 9, i32 12, i32 13, i32 12, i32 13>, <16 x float> %x0, <16 x float> %x1, i16 -1)
|
||||
|
@ -300,16 +298,16 @@ define <16 x float> @combine_vpermt2var_16f32_vmovddup_load(<16 x float> *%p0, <
|
|||
define <16 x float> @combine_vpermt2var_16f32_vmovddup_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) {
|
||||
; X32-LABEL: combine_vpermt2var_16f32_vmovddup_mask:
|
||||
; X32: # %bb.0:
|
||||
; X32-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
|
||||
; X32-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
|
||||
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
||||
; X32-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} {z}
|
||||
; X32-NEXT: vpermt2ps %zmm0, %zmm1, %zmm0 {%k1} {z}
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: combine_vpermt2var_16f32_vmovddup_mask:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
|
||||
; X64-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
|
||||
; X64-NEXT: kmovd %edi, %k1
|
||||
; X64-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} {z}
|
||||
; X64-NEXT: vpermt2ps %zmm0, %zmm1, %zmm0 {%k1} {z}
|
||||
; X64-NEXT: retq
|
||||
%res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5, i32 8, i32 9, i32 8, i32 9, i32 12, i32 13, i32 12, i32 13>, <16 x float> %x0, <16 x float> %x1, i16 %m)
|
||||
ret <16 x float> %res0
|
||||
|
@ -318,20 +316,18 @@ define <16 x float> @combine_vpermt2var_16f32_vmovddup_mask_load(<16 x float> *%
|
|||
; X32-LABEL: combine_vpermt2var_16f32_vmovddup_mask_load:
|
||||
; X32: # %bb.0:
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: vmovaps (%eax), %zmm2
|
||||
; X32-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
|
||||
; X32-NEXT: vmovaps (%eax), %zmm1
|
||||
; X32-NEXT: vmovaps {{.*#+}} zmm0 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
|
||||
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
||||
; X32-NEXT: vpermi2ps %zmm0, %zmm2, %zmm1 {%k1} {z}
|
||||
; X32-NEXT: vmovaps %zmm1, %zmm0
|
||||
; X32-NEXT: vpermi2ps %zmm0, %zmm1, %zmm0 {%k1} {z}
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: combine_vpermt2var_16f32_vmovddup_mask_load:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: vmovaps (%rdi), %zmm2
|
||||
; X64-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
|
||||
; X64-NEXT: vmovaps (%rdi), %zmm1
|
||||
; X64-NEXT: vmovaps {{.*#+}} zmm0 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13]
|
||||
; X64-NEXT: kmovd %esi, %k1
|
||||
; X64-NEXT: vpermi2ps %zmm0, %zmm2, %zmm1 {%k1} {z}
|
||||
; X64-NEXT: vmovaps %zmm1, %zmm0
|
||||
; X64-NEXT: vpermi2ps %zmm0, %zmm1, %zmm0 {%k1} {z}
|
||||
; X64-NEXT: retq
|
||||
%x0 = load <16 x float>, <16 x float> *%p0
|
||||
%res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 4, i32 5, i32 4, i32 5, i32 8, i32 9, i32 8, i32 9, i32 12, i32 13, i32 12, i32 13>, <16 x float> %x0, <16 x float> %x1, i16 %m)
|
||||
|
@ -519,20 +515,20 @@ define <16 x i32> @combine_vpermt2var_16i32_identity(<16 x i32> %x0, <16 x i32>
|
|||
define <16 x i32> @combine_vpermt2var_16i32_identity_mask(<16 x i32> %x0, <16 x i32> %x1, i16 %m) {
|
||||
; X32-LABEL: combine_vpermt2var_16i32_identity_mask:
|
||||
; X32: # %bb.0:
|
||||
; X32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X32-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
||||
; X32-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 {%k1} {z}
|
||||
; X32-NEXT: vpermi2d %zmm0, %zmm0, %zmm1 {%k1} {z}
|
||||
; X32-NEXT: vmovdqa64 {{.*#+}} zmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
|
||||
; X32-NEXT: vpermi2d %zmm2, %zmm2, %zmm0 {%k1} {z}
|
||||
; X32-NEXT: vpermi2d %zmm1, %zmm1, %zmm0 {%k1} {z}
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: combine_vpermt2var_16i32_identity_mask:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X64-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X64-NEXT: kmovd %edi, %k1
|
||||
; X64-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 {%k1} {z}
|
||||
; X64-NEXT: vpermi2d %zmm0, %zmm0, %zmm1 {%k1} {z}
|
||||
; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
|
||||
; X64-NEXT: vpermi2d %zmm2, %zmm2, %zmm0 {%k1} {z}
|
||||
; X64-NEXT: vpermi2d %zmm1, %zmm1, %zmm0 {%k1} {z}
|
||||
; X64-NEXT: retq
|
||||
%res0 = call <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>, <16 x i32> %x0, <16 x i32> %x1, i16 %m)
|
||||
%res1 = call <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32> <i32 15, i32 30, i32 13, i32 28, i32 11, i32 26, i32 9, i32 24, i32 7, i32 22, i32 5, i32 20, i32 3, i32 18, i32 1, i32 16>, <16 x i32> %res0, <16 x i32> %res0, i16 %m)
|
||||
|
@ -554,20 +550,20 @@ define <32 x i16> @combine_vpermt2var_32i16_identity(<32 x i16> %x0, <32 x i16>
|
|||
define <32 x i16> @combine_vpermt2var_32i16_identity_mask(<32 x i16> %x0, <32 x i16> %x1, i32 %m) {
|
||||
; X32-LABEL: combine_vpermt2var_32i16_identity_mask:
|
||||
; X32: # %bb.0:
|
||||
; X32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X32-NEXT: vmovdqa64 {{.*#+}} zmm1 = [31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
|
||||
; X32-NEXT: vpermi2w %zmm1, %zmm0, %zmm2 {%k1} {z}
|
||||
; X32-NEXT: vpermi2w %zmm0, %zmm0, %zmm1 {%k1} {z}
|
||||
; X32-NEXT: vmovdqa64 {{.*#+}} zmm0 = [63,30,61,28,59,26,57,24,55,22,53,20,51,18,49,16,47,46,13,44,11,42,9,40,7,38,5,36,3,34,1,32]
|
||||
; X32-NEXT: vpermi2w %zmm2, %zmm2, %zmm0 {%k1} {z}
|
||||
; X32-NEXT: vpermi2w %zmm1, %zmm1, %zmm0 {%k1} {z}
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: combine_vpermt2var_32i16_identity_mask:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: vmovdqa64 {{.*#+}} zmm2 = [31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X64-NEXT: vmovdqa64 {{.*#+}} zmm1 = [31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X64-NEXT: kmovd %edi, %k1
|
||||
; X64-NEXT: vpermi2w %zmm1, %zmm0, %zmm2 {%k1} {z}
|
||||
; X64-NEXT: vpermi2w %zmm0, %zmm0, %zmm1 {%k1} {z}
|
||||
; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [63,30,61,28,59,26,57,24,55,22,53,20,51,18,49,16,47,46,13,44,11,42,9,40,7,38,5,36,3,34,1,32]
|
||||
; X64-NEXT: vpermi2w %zmm2, %zmm2, %zmm0 {%k1} {z}
|
||||
; X64-NEXT: vpermi2w %zmm1, %zmm1, %zmm0 {%k1} {z}
|
||||
; X64-NEXT: retq
|
||||
%res0 = call <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16> <i16 31, i16 30, i16 29, i16 28, i16 27, i16 26, i16 25, i16 24, i16 23, i16 22, i16 21, i16 20, i16 19, i16 18, i16 17, i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <32 x i16> %x0, <32 x i16> %x1, i32 %m)
|
||||
%res1 = call <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16> <i16 63, i16 30, i16 61, i16 28, i16 59, i16 26, i16 57, i16 24, i16 55, i16 22, i16 53, i16 20, i16 51, i16 18, i16 49, i16 16, i16 47, i16 46, i16 13, i16 44, i16 11, i16 42, i16 9, i16 40, i16 7, i16 38, i16 5, i16 36, i16 3, i16 34, i16 1, i16 32>, <32 x i16> %res0, <32 x i16> %res0, i32 %m)
|
||||
|
|
|
@ -20,20 +20,20 @@ define <16 x i16> @combine_vpermt2var_16i16_identity(<16 x i16> %x0, <16 x i16>
|
|||
define <16 x i16> @combine_vpermt2var_16i16_identity_mask(<16 x i16> %x0, <16 x i16> %x1, i16 %m) {
|
||||
; X32-LABEL: combine_vpermt2var_16i16_identity_mask:
|
||||
; X32: # %bb.0:
|
||||
; X32-NEXT: vmovdqa {{.*#+}} ymm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
||||
; X32-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 {%k1} {z}
|
||||
; X32-NEXT: vpermi2w %ymm0, %ymm0, %ymm1 {%k1} {z}
|
||||
; X32-NEXT: vmovdqa {{.*#+}} ymm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
|
||||
; X32-NEXT: vpermi2w %ymm2, %ymm2, %ymm0 {%k1} {z}
|
||||
; X32-NEXT: vpermi2w %ymm1, %ymm1, %ymm0 {%k1} {z}
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: combine_vpermt2var_16i16_identity_mask:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: vmovdqa {{.*#+}} ymm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X64-NEXT: vmovdqa {{.*#+}} ymm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X64-NEXT: kmovd %edi, %k1
|
||||
; X64-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 {%k1} {z}
|
||||
; X64-NEXT: vpermi2w %ymm0, %ymm0, %ymm1 {%k1} {z}
|
||||
; X64-NEXT: vmovdqa {{.*#+}} ymm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
|
||||
; X64-NEXT: vpermi2w %ymm2, %ymm2, %ymm0 {%k1} {z}
|
||||
; X64-NEXT: vpermi2w %ymm1, %ymm1, %ymm0 {%k1} {z}
|
||||
; X64-NEXT: retq
|
||||
%res0 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1, i16 0>, <16 x i16> %x0, <16 x i16> %x1, i16 %m)
|
||||
%res1 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> <i16 15, i16 30, i16 13, i16 28, i16 11, i16 26, i16 9, i16 24, i16 7, i16 22, i16 5, i16 20, i16 3, i16 18, i16 1, i16 16>, <16 x i16> %res0, <16 x i16> %res0, i16 %m)
|
||||
|
|
|
@ -33,20 +33,20 @@ define <16 x i8> @combine_vpermt2var_16i8_identity(<16 x i8> %x0, <16 x i8> %x1)
|
|||
define <16 x i8> @combine_vpermt2var_16i8_identity_mask(<16 x i8> %x0, <16 x i8> %x1, i16 %m) {
|
||||
; X32-LABEL: combine_vpermt2var_16i8_identity_mask:
|
||||
; X32: # %bb.0:
|
||||
; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1
|
||||
; X32-NEXT: vpermi2b %xmm1, %xmm0, %xmm2 {%k1} {z}
|
||||
; X32-NEXT: vpermi2b %xmm0, %xmm0, %xmm1 {%k1} {z}
|
||||
; X32-NEXT: vmovdqa {{.*#+}} xmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
|
||||
; X32-NEXT: vpermi2b %xmm2, %xmm2, %xmm0 {%k1} {z}
|
||||
; X32-NEXT: vpermi2b %xmm1, %xmm1, %xmm0 {%k1} {z}
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: combine_vpermt2var_16i8_identity_mask:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: vmovdqa {{.*#+}} xmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X64-NEXT: vmovdqa {{.*#+}} xmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]
|
||||
; X64-NEXT: kmovd %edi, %k1
|
||||
; X64-NEXT: vpermi2b %xmm1, %xmm0, %xmm2 {%k1} {z}
|
||||
; X64-NEXT: vpermi2b %xmm0, %xmm0, %xmm1 {%k1} {z}
|
||||
; X64-NEXT: vmovdqa {{.*#+}} xmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16]
|
||||
; X64-NEXT: vpermi2b %xmm2, %xmm2, %xmm0 {%k1} {z}
|
||||
; X64-NEXT: vpermi2b %xmm1, %xmm1, %xmm0 {%k1} {z}
|
||||
; X64-NEXT: retq
|
||||
%res0 = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> <i8 15, i8 14, i8 13, i8 12, i8 11, i8 10, i8 9, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>, <16 x i8> %x0, <16 x i8> %x1, i16 %m)
|
||||
%res1 = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> <i8 15, i8 30, i8 13, i8 28, i8 11, i8 26, i8 9, i8 24, i8 7, i8 22, i8 5, i8 20, i8 3, i8 18, i8 1, i8 16>, <16 x i8> %res0, <16 x i8> %res0, i16 %m)
|
||||
|
|
|
@ -2827,16 +2827,13 @@ define <4 x float> @PR30264(<4 x float> %x) {
|
|||
define <8 x i16> @PR39549(<16 x i8> %x) {
|
||||
; SSE-LABEL: PR39549:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: pxor %xmm1, %xmm1
|
||||
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
||||
; SSE-NEXT: psraw $8, %xmm1
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE-NEXT: psraw $8, %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: PR39549:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
||||
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX-NEXT: vpsraw $8, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
%a = shufflevector <16 x i8> %x, <16 x i8> undef, <16 x i32> <i32 8, i32 undef, i32 9, i32 undef, i32 10, i32 undef, i32 11, i32 undef, i32 12, i32 undef, i32 13, i32 undef, i32 14, i32 undef, i32 15, i32 undef>
|
||||
|
|
|
@ -5607,40 +5607,39 @@ define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwi
|
|||
define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
|
||||
; SSE-LABEL: mul_add_self_v4i64_v4i32:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
||||
; SSE-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE-NEXT: psrad $31, %xmm3
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm6
|
||||
; SSE-NEXT: psrad $31, %xmm6
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
|
||||
; SSE-NEXT: movdqa %xmm4, %xmm5
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
|
||||
; SSE-NEXT: movdqa %xmm3, %xmm4
|
||||
; SSE-NEXT: psrad $31, %xmm4
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm5
|
||||
; SSE-NEXT: psrad $31, %xmm5
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm7
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1]
|
||||
; SSE-NEXT: movdqa %xmm6, %xmm7
|
||||
; SSE-NEXT: psrad $31, %xmm7
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
|
||||
; SSE-NEXT: pxor %xmm8, %xmm8
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1]
|
||||
; SSE-NEXT: pmuludq %xmm1, %xmm6
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
|
||||
; SSE-NEXT: pmuludq %xmm0, %xmm7
|
||||
; SSE-NEXT: paddq %xmm6, %xmm7
|
||||
; SSE-NEXT: psllq $32, %xmm7
|
||||
; SSE-NEXT: pmuludq %xmm0, %xmm1
|
||||
; SSE-NEXT: paddq %xmm7, %xmm1
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
|
||||
; SSE-NEXT: pmuludq %xmm4, %xmm3
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
|
||||
; SSE-NEXT: pmuludq %xmm2, %xmm5
|
||||
; SSE-NEXT: paddq %xmm3, %xmm5
|
||||
; SSE-NEXT: psllq $32, %xmm5
|
||||
; SSE-NEXT: pmuludq %xmm2, %xmm4
|
||||
; SSE-NEXT: paddq %xmm5, %xmm4
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2]
|
||||
; SSE-NEXT: paddd %xmm1, %xmm1
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE-NEXT: psrad $31, %xmm2
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
|
||||
; SSE-NEXT: pmuludq %xmm0, %xmm2
|
||||
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,1,3]
|
||||
; SSE-NEXT: pmuludq %xmm1, %xmm5
|
||||
; SSE-NEXT: paddq %xmm5, %xmm2
|
||||
; SSE-NEXT: psllq $32, %xmm2
|
||||
; SSE-NEXT: paddq %xmm0, %xmm2
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,1,1,3]
|
||||
; SSE-NEXT: pmuludq %xmm3, %xmm0
|
||||
; SSE-NEXT: pmuludq %xmm6, %xmm3
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,1,1,3]
|
||||
; SSE-NEXT: pmuludq %xmm6, %xmm1
|
||||
; SSE-NEXT: paddq %xmm1, %xmm0
|
||||
; SSE-NEXT: psllq $32, %xmm0
|
||||
; SSE-NEXT: paddq %xmm3, %xmm0
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[0,2]
|
||||
; SSE-NEXT: paddd %xmm2, %xmm2
|
||||
; SSE-NEXT: movdqa %xmm2, %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: mul_add_self_v4i64_v4i32:
|
||||
|
|
|
@ -5607,40 +5607,39 @@ define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwi
|
|||
define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind {
|
||||
; SSE-LABEL: mul_add_self_v4i64_v4i32:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
|
||||
; SSE-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE-NEXT: psrad $31, %xmm3
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm6
|
||||
; SSE-NEXT: psrad $31, %xmm6
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
|
||||
; SSE-NEXT: movdqa %xmm4, %xmm5
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
|
||||
; SSE-NEXT: movdqa %xmm3, %xmm4
|
||||
; SSE-NEXT: psrad $31, %xmm4
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm5
|
||||
; SSE-NEXT: psrad $31, %xmm5
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm7
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1]
|
||||
; SSE-NEXT: movdqa %xmm6, %xmm7
|
||||
; SSE-NEXT: psrad $31, %xmm7
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
|
||||
; SSE-NEXT: pxor %xmm8, %xmm8
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1]
|
||||
; SSE-NEXT: pmuludq %xmm1, %xmm6
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
|
||||
; SSE-NEXT: pmuludq %xmm0, %xmm7
|
||||
; SSE-NEXT: paddq %xmm6, %xmm7
|
||||
; SSE-NEXT: psllq $32, %xmm7
|
||||
; SSE-NEXT: pmuludq %xmm0, %xmm1
|
||||
; SSE-NEXT: paddq %xmm7, %xmm1
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
|
||||
; SSE-NEXT: pmuludq %xmm4, %xmm3
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
|
||||
; SSE-NEXT: pmuludq %xmm2, %xmm5
|
||||
; SSE-NEXT: paddq %xmm3, %xmm5
|
||||
; SSE-NEXT: psllq $32, %xmm5
|
||||
; SSE-NEXT: pmuludq %xmm2, %xmm4
|
||||
; SSE-NEXT: paddq %xmm5, %xmm4
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2]
|
||||
; SSE-NEXT: paddd %xmm1, %xmm1
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE-NEXT: psrad $31, %xmm2
|
||||
; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
|
||||
; SSE-NEXT: pmuludq %xmm0, %xmm2
|
||||
; SSE-NEXT: pmuludq %xmm1, %xmm0
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,1,3]
|
||||
; SSE-NEXT: pmuludq %xmm1, %xmm5
|
||||
; SSE-NEXT: paddq %xmm5, %xmm2
|
||||
; SSE-NEXT: psllq $32, %xmm2
|
||||
; SSE-NEXT: paddq %xmm0, %xmm2
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,1,1,3]
|
||||
; SSE-NEXT: pmuludq %xmm3, %xmm0
|
||||
; SSE-NEXT: pmuludq %xmm6, %xmm3
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,1,1,3]
|
||||
; SSE-NEXT: pmuludq %xmm6, %xmm1
|
||||
; SSE-NEXT: paddq %xmm1, %xmm0
|
||||
; SSE-NEXT: psllq $32, %xmm0
|
||||
; SSE-NEXT: paddq %xmm3, %xmm0
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[0,2]
|
||||
; SSE-NEXT: paddd %xmm2, %xmm2
|
||||
; SSE-NEXT: movdqa %xmm2, %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: mul_add_self_v4i64_v4i32:
|
||||
|
|
Loading…
Reference in New Issue