diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 7eae08e54c29..d2a5f396d64e 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -32085,13 +32085,15 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode( } // Simplify target shuffles. - if (!isTargetShuffle(Opc)) + if (!isTargetShuffle(Opc) || !VT.isSimple()) return false; // Get target shuffle mask. + bool IsUnary; SmallVector OpMask; SmallVector OpInputs; - if (!resolveTargetShuffleInputs(Op, OpInputs, OpMask, TLO.DAG)) + if (!getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, OpInputs, + OpMask, IsUnary)) return false; // Shuffle inputs must be the same type as the result. @@ -32101,9 +32103,13 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode( // Check if shuffle mask can be simplified to undef/zero/identity. int NumSrcs = OpInputs.size(); - for (int i = 0; i != NumElts; ++i) + for (int i = 0; i != NumElts; ++i) { + int &M = OpMask[i]; if (!DemandedElts[i]) - OpMask[i] = SM_SentinelUndef; + M = SM_SentinelUndef; + else if (0 <= M && OpInputs[M / NumElts].isUndef()) + M = SM_SentinelUndef; + } if (isUndefInRange(OpMask, 0, NumElts)) { KnownUndef.setAllBits(); diff --git a/llvm/test/CodeGen/X86/pmul.ll b/llvm/test/CodeGen/X86/pmul.ll index 4874935f8a06..f5f7019043af 100644 --- a/llvm/test/CodeGen/X86/pmul.ll +++ b/llvm/test/CodeGen/X86/pmul.ll @@ -1318,76 +1318,74 @@ entry: define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) { ; SSE2-LABEL: mul_v8i64_sext: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa %xmm1, %xmm5 -; SSE2-NEXT: movdqa %xmm0, %xmm8 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm0[4],xmm6[5],xmm0[5],xmm6[6],xmm0[6],xmm6[7],xmm0[7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: psrad $31, %xmm3 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm0[4],xmm12[5],xmm0[5],xmm12[6],xmm0[6],xmm12[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm12, %xmm9 +; SSE2-NEXT: psrad $31, %xmm9 +; SSE2-NEXT: psrad $16, %xmm12 +; SSE2-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm9[0],xmm12[1],xmm9[1] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-NEXT: movdqa %xmm0, %xmm4 +; SSE2-NEXT: psrad $31, %xmm4 ; SSE2-NEXT: psrad $16, %xmm0 -; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE2-NEXT: movdqa %xmm5, %xmm4 -; SSE2-NEXT: psrad $31, %xmm4 -; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] -; SSE2-NEXT: pxor %xmm7, %xmm7 -; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1] -; SSE2-NEXT: pmuludq %xmm5, %xmm3 -; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1] -; SSE2-NEXT: pmuludq %xmm0, %xmm4 -; SSE2-NEXT: paddq %xmm3, %xmm4 -; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[0,2,2,3,4,5,6,7] -; SSE2-NEXT: pmuludq %xmm5, %xmm0 -; SSE2-NEXT: movdqa %xmm3, %xmm5 -; SSE2-NEXT: psrad $31, %xmm5 -; SSE2-NEXT: psrad $16, %xmm3 -; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1] -; SSE2-NEXT: psllq $32, %xmm4 -; SSE2-NEXT: paddq %xmm4, %xmm0 -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: psrad $31, %xmm4 -; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] -; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1] -; SSE2-NEXT: pmuludq %xmm1, %xmm5 -; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1] -; SSE2-NEXT: pmuludq %xmm3, %xmm4 -; SSE2-NEXT: paddq %xmm5, %xmm4 -; SSE2-NEXT: movdqa %xmm6, %xmm5 -; SSE2-NEXT: psrad $31, %xmm5 -; SSE2-NEXT: psrad $16, %xmm6 -; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1] -; SSE2-NEXT: pmuludq %xmm3, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,0,1] -; SSE2-NEXT: psllq $32, %xmm4 -; SSE2-NEXT: paddq %xmm4, %xmm1 -; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: psrad $31, %xmm4 -; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] -; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1] -; SSE2-NEXT: pmuludq %xmm2, %xmm5 -; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1] -; SSE2-NEXT: pmuludq %xmm6, %xmm4 -; SSE2-NEXT: paddq %xmm5, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm8[2,3,0,1] -; SSE2-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,2,2,3,4,5,6,7] -; SSE2-NEXT: pmuludq %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm5, %xmm6 +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,3,0,1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm15 = xmm5[0,2,2,3,4,5,6,7] +; SSE2-NEXT: movdqa %xmm15, %xmm8 +; SSE2-NEXT: psrad $31, %xmm8 +; SSE2-NEXT: psrad $16, %xmm15 +; SSE2-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm3[0,2,2,3,4,5,6,7] +; SSE2-NEXT: movdqa %xmm7, %xmm13 +; SSE2-NEXT: psrad $31, %xmm13 +; SSE2-NEXT: psrad $16, %xmm7 +; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm11 = xmm2[2,3,0,1] +; SSE2-NEXT: movdqa %xmm11, %xmm10 +; SSE2-NEXT: psrad $31, %xmm10 +; SSE2-NEXT: punpckldq {{.*#+}} xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1] +; SSE2-NEXT: movdqa %xmm2, %xmm14 +; SSE2-NEXT: psrad $31, %xmm14 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] +; SSE2-NEXT: movdqa %xmm3, %xmm6 ; SSE2-NEXT: psrad $31, %xmm6 -; SSE2-NEXT: psrad $16, %xmm5 -; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1] +; SSE2-NEXT: movdqa %xmm1, %xmm5 +; SSE2-NEXT: psrad $31, %xmm5 +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,1,3] +; SSE2-NEXT: pmuludq %xmm1, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,1,3] +; SSE2-NEXT: pmuludq %xmm0, %xmm5 +; SSE2-NEXT: paddq %xmm4, %xmm5 +; SSE2-NEXT: psllq $32, %xmm5 +; SSE2-NEXT: pmuludq %xmm1, %xmm0 +; SSE2-NEXT: paddq %xmm5, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,1,1,3] +; SSE2-NEXT: pmuludq %xmm7, %xmm1 +; SSE2-NEXT: pmuludq %xmm3, %xmm7 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm13[0,1,1,3] +; SSE2-NEXT: pmuludq %xmm3, %xmm4 +; SSE2-NEXT: paddq %xmm4, %xmm1 +; SSE2-NEXT: psllq $32, %xmm1 +; SSE2-NEXT: paddq %xmm7, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm9[0,1,1,3] +; SSE2-NEXT: pmuludq %xmm2, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm14[0,1,1,3] +; SSE2-NEXT: pmuludq %xmm12, %xmm4 +; SSE2-NEXT: paddq %xmm3, %xmm4 ; SSE2-NEXT: psllq $32, %xmm4 +; SSE2-NEXT: pmuludq %xmm12, %xmm2 ; SSE2-NEXT: paddq %xmm4, %xmm2 -; SSE2-NEXT: movdqa %xmm3, %xmm4 -; SSE2-NEXT: psrad $31, %xmm4 -; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] -; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] -; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1] -; SSE2-NEXT: pmuludq %xmm3, %xmm6 -; SSE2-NEXT: pmuludq %xmm5, %xmm4 -; SSE2-NEXT: paddq %xmm6, %xmm4 -; SSE2-NEXT: pmuludq %xmm5, %xmm3 -; SSE2-NEXT: psllq $32, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm10[0,1,1,3] +; SSE2-NEXT: pmuludq %xmm15, %xmm3 +; SSE2-NEXT: pmuludq %xmm11, %xmm15 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm8[0,1,1,3] +; SSE2-NEXT: pmuludq %xmm11, %xmm4 ; SSE2-NEXT: paddq %xmm4, %xmm3 +; SSE2-NEXT: psllq $32, %xmm3 +; SSE2-NEXT: paddq %xmm15, %xmm3 ; SSE2-NEXT: retq ; ; SSE41-LABEL: mul_v8i64_sext: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll index 64dd50639d31..b8d3824f1caf 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll @@ -124,21 +124,21 @@ define <8 x double> @combine_vpermt2var_8f64_identity(<8 x double> %x0, <8 x dou define <8 x double> @combine_vpermt2var_8f64_identity_mask(<8 x double> %x0, <8 x double> %x1, i8 %m) { ; X32-LABEL: combine_vpermt2var_8f64_identity_mask: ; X32: # %bb.0: -; X32-NEXT: vmovapd {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0] +; X32-NEXT: vmovapd {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0] ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-NEXT: kmovd %eax, %k1 -; X32-NEXT: vpermi2pd %zmm1, %zmm0, %zmm2 {%k1} {z} +; X32-NEXT: vpermi2pd %zmm0, %zmm0, %zmm1 {%k1} {z} ; X32-NEXT: vmovapd {{.*#+}} zmm0 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0] -; X32-NEXT: vpermi2pd %zmm2, %zmm2, %zmm0 {%k1} {z} +; X32-NEXT: vpermi2pd %zmm1, %zmm1, %zmm0 {%k1} {z} ; X32-NEXT: retl ; ; X64-LABEL: combine_vpermt2var_8f64_identity_mask: ; X64: # %bb.0: -; X64-NEXT: vmovapd {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0] +; X64-NEXT: vmovapd {{.*#+}} zmm1 = [7,6,5,4,3,2,1,0] ; X64-NEXT: kmovd %edi, %k1 -; X64-NEXT: vpermi2pd %zmm1, %zmm0, %zmm2 {%k1} {z} +; X64-NEXT: vpermi2pd %zmm0, %zmm0, %zmm1 {%k1} {z} ; X64-NEXT: vmovapd {{.*#+}} zmm0 = [7,14,5,12,3,10,1,8] -; X64-NEXT: vpermi2pd %zmm2, %zmm2, %zmm0 {%k1} {z} +; X64-NEXT: vpermi2pd %zmm1, %zmm1, %zmm0 {%k1} {z} ; X64-NEXT: retq %res0 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> , <8 x double> %x0, <8 x double> %x1, i8 %m) %res1 = call <8 x double> @llvm.x86.avx512.maskz.vpermt2var.pd.512(<8 x i64> , <8 x double> %res0, <8 x double> %res0, i8 %m) @@ -205,21 +205,21 @@ define <8 x i64> @combine_vpermt2var_8i64_identity(<8 x i64> %x0, <8 x i64> %x1) define <8 x i64> @combine_vpermt2var_8i64_identity_mask(<8 x i64> %x0, <8 x i64> %x1, i8 %m) { ; X32-LABEL: combine_vpermt2var_8i64_identity_mask: ; X32: # %bb.0: -; X32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0] +; X32-NEXT: vmovdqa64 {{.*#+}} zmm1 = [7,0,6,0,5,0,4,0,3,0,2,0,1,0,0,0] ; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X32-NEXT: kmovd %eax, %k1 -; X32-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 {%k1} {z} +; X32-NEXT: vpermi2q %zmm0, %zmm0, %zmm1 {%k1} {z} ; X32-NEXT: vmovdqa64 {{.*#+}} zmm0 = [7,0,14,0,5,0,12,0,3,0,10,0,1,0,8,0] -; X32-NEXT: vpermi2q %zmm2, %zmm2, %zmm0 {%k1} {z} +; X32-NEXT: vpermi2q %zmm1, %zmm1, %zmm0 {%k1} {z} ; X32-NEXT: retl ; ; X64-LABEL: combine_vpermt2var_8i64_identity_mask: ; X64: # %bb.0: -; X64-NEXT: vmovdqa64 {{.*#+}} zmm2 = [7,6,5,4,3,2,1,0] +; X64-NEXT: vmovdqa64 {{.*#+}} zmm1 = [7,6,5,4,3,2,1,0] ; X64-NEXT: kmovd %edi, %k1 -; X64-NEXT: vpermi2q %zmm1, %zmm0, %zmm2 {%k1} {z} +; X64-NEXT: vpermi2q %zmm0, %zmm0, %zmm1 {%k1} {z} ; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [7,14,5,12,3,10,1,8] -; X64-NEXT: vpermi2q %zmm2, %zmm2, %zmm0 {%k1} {z} +; X64-NEXT: vpermi2q %zmm1, %zmm1, %zmm0 {%k1} {z} ; X64-NEXT: retq %res0 = call <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64> , <8 x i64> %x0, <8 x i64> %x1, i8 %m) %res1 = call <8 x i64> @llvm.x86.avx512.maskz.vpermt2var.q.512(<8 x i64> , <8 x i64> %res0, <8 x i64> %res0, i8 %m) @@ -241,20 +241,20 @@ define <16 x float> @combine_vpermt2var_16f32_identity(<16 x float> %x0, <16 x f define <16 x float> @combine_vpermt2var_16f32_identity_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) { ; X32-LABEL: combine_vpermt2var_16f32_identity_mask: ; X32: # %bb.0: -; X32-NEXT: vmovaps {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] +; X32-NEXT: vmovaps {{.*#+}} zmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] ; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1 -; X32-NEXT: vpermi2ps %zmm1, %zmm0, %zmm2 {%k1} {z} +; X32-NEXT: vpermi2ps %zmm0, %zmm0, %zmm1 {%k1} {z} ; X32-NEXT: vmovaps {{.*#+}} zmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16] -; X32-NEXT: vpermi2ps %zmm2, %zmm2, %zmm0 {%k1} {z} +; X32-NEXT: vpermi2ps %zmm1, %zmm1, %zmm0 {%k1} {z} ; X32-NEXT: retl ; ; X64-LABEL: combine_vpermt2var_16f32_identity_mask: ; X64: # %bb.0: -; X64-NEXT: vmovaps {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] +; X64-NEXT: vmovaps {{.*#+}} zmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] ; X64-NEXT: kmovd %edi, %k1 -; X64-NEXT: vpermi2ps %zmm1, %zmm0, %zmm2 {%k1} {z} +; X64-NEXT: vpermi2ps %zmm0, %zmm0, %zmm1 {%k1} {z} ; X64-NEXT: vmovaps {{.*#+}} zmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16] -; X64-NEXT: vpermi2ps %zmm2, %zmm2, %zmm0 {%k1} {z} +; X64-NEXT: vpermi2ps %zmm1, %zmm1, %zmm0 {%k1} {z} ; X64-NEXT: retq %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> , <16 x float> %x0, <16 x float> %x1, i16 %m) %res1 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> , <16 x float> %res0, <16 x float> %res0, i16 %m) @@ -264,14 +264,14 @@ define <16 x float> @combine_vpermt2var_16f32_identity_mask(<16 x float> %x0, <1 define <16 x float> @combine_vpermt2var_16f32_vmovddup(<16 x float> %x0, <16 x float> %x1) { ; X32-LABEL: combine_vpermt2var_16f32_vmovddup: ; X32: # %bb.0: -; X32-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13] -; X32-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0 +; X32-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13] +; X32-NEXT: vpermt2ps %zmm0, %zmm1, %zmm0 ; X32-NEXT: retl ; ; X64-LABEL: combine_vpermt2var_16f32_vmovddup: ; X64: # %bb.0: -; X64-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13] -; X64-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0 +; X64-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13] +; X64-NEXT: vpermt2ps %zmm0, %zmm1, %zmm0 ; X64-NEXT: retq %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> , <16 x float> %x0, <16 x float> %x1, i16 -1) ret <16 x float> %res0 @@ -280,18 +280,16 @@ define <16 x float> @combine_vpermt2var_16f32_vmovddup_load(<16 x float> *%p0, < ; X32-LABEL: combine_vpermt2var_16f32_vmovddup_load: ; X32: # %bb.0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vmovaps (%eax), %zmm2 -; X32-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13] -; X32-NEXT: vpermi2ps %zmm0, %zmm2, %zmm1 -; X32-NEXT: vmovaps %zmm1, %zmm0 +; X32-NEXT: vmovaps (%eax), %zmm1 +; X32-NEXT: vmovaps {{.*#+}} zmm0 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13] +; X32-NEXT: vpermi2ps %zmm0, %zmm1, %zmm0 ; X32-NEXT: retl ; ; X64-LABEL: combine_vpermt2var_16f32_vmovddup_load: ; X64: # %bb.0: -; X64-NEXT: vmovaps (%rdi), %zmm2 -; X64-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13] -; X64-NEXT: vpermi2ps %zmm0, %zmm2, %zmm1 -; X64-NEXT: vmovaps %zmm1, %zmm0 +; X64-NEXT: vmovaps (%rdi), %zmm1 +; X64-NEXT: vmovaps {{.*#+}} zmm0 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13] +; X64-NEXT: vpermi2ps %zmm0, %zmm1, %zmm0 ; X64-NEXT: retq %x0 = load <16 x float>, <16 x float> *%p0 %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> , <16 x float> %x0, <16 x float> %x1, i16 -1) @@ -300,16 +298,16 @@ define <16 x float> @combine_vpermt2var_16f32_vmovddup_load(<16 x float> *%p0, < define <16 x float> @combine_vpermt2var_16f32_vmovddup_mask(<16 x float> %x0, <16 x float> %x1, i16 %m) { ; X32-LABEL: combine_vpermt2var_16f32_vmovddup_mask: ; X32: # %bb.0: -; X32-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13] +; X32-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13] ; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1 -; X32-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} {z} +; X32-NEXT: vpermt2ps %zmm0, %zmm1, %zmm0 {%k1} {z} ; X32-NEXT: retl ; ; X64-LABEL: combine_vpermt2var_16f32_vmovddup_mask: ; X64: # %bb.0: -; X64-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13] +; X64-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13] ; X64-NEXT: kmovd %edi, %k1 -; X64-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0 {%k1} {z} +; X64-NEXT: vpermt2ps %zmm0, %zmm1, %zmm0 {%k1} {z} ; X64-NEXT: retq %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> , <16 x float> %x0, <16 x float> %x1, i16 %m) ret <16 x float> %res0 @@ -318,20 +316,18 @@ define <16 x float> @combine_vpermt2var_16f32_vmovddup_mask_load(<16 x float> *% ; X32-LABEL: combine_vpermt2var_16f32_vmovddup_mask_load: ; X32: # %bb.0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: vmovaps (%eax), %zmm2 -; X32-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13] +; X32-NEXT: vmovaps (%eax), %zmm1 +; X32-NEXT: vmovaps {{.*#+}} zmm0 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13] ; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1 -; X32-NEXT: vpermi2ps %zmm0, %zmm2, %zmm1 {%k1} {z} -; X32-NEXT: vmovaps %zmm1, %zmm0 +; X32-NEXT: vpermi2ps %zmm0, %zmm1, %zmm0 {%k1} {z} ; X32-NEXT: retl ; ; X64-LABEL: combine_vpermt2var_16f32_vmovddup_mask_load: ; X64: # %bb.0: -; X64-NEXT: vmovaps (%rdi), %zmm2 -; X64-NEXT: vmovaps {{.*#+}} zmm1 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13] +; X64-NEXT: vmovaps (%rdi), %zmm1 +; X64-NEXT: vmovaps {{.*#+}} zmm0 = [0,1,0,1,4,5,4,5,8,9,8,9,12,13,12,13] ; X64-NEXT: kmovd %esi, %k1 -; X64-NEXT: vpermi2ps %zmm0, %zmm2, %zmm1 {%k1} {z} -; X64-NEXT: vmovaps %zmm1, %zmm0 +; X64-NEXT: vpermi2ps %zmm0, %zmm1, %zmm0 {%k1} {z} ; X64-NEXT: retq %x0 = load <16 x float>, <16 x float> *%p0 %res0 = call <16 x float> @llvm.x86.avx512.maskz.vpermt2var.ps.512(<16 x i32> , <16 x float> %x0, <16 x float> %x1, i16 %m) @@ -519,20 +515,20 @@ define <16 x i32> @combine_vpermt2var_16i32_identity(<16 x i32> %x0, <16 x i32> define <16 x i32> @combine_vpermt2var_16i32_identity_mask(<16 x i32> %x0, <16 x i32> %x1, i16 %m) { ; X32-LABEL: combine_vpermt2var_16i32_identity_mask: ; X32: # %bb.0: -; X32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] +; X32-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] ; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1 -; X32-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 {%k1} {z} +; X32-NEXT: vpermi2d %zmm0, %zmm0, %zmm1 {%k1} {z} ; X32-NEXT: vmovdqa64 {{.*#+}} zmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16] -; X32-NEXT: vpermi2d %zmm2, %zmm2, %zmm0 {%k1} {z} +; X32-NEXT: vpermi2d %zmm1, %zmm1, %zmm0 {%k1} {z} ; X32-NEXT: retl ; ; X64-LABEL: combine_vpermt2var_16i32_identity_mask: ; X64: # %bb.0: -; X64-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] +; X64-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] ; X64-NEXT: kmovd %edi, %k1 -; X64-NEXT: vpermi2d %zmm1, %zmm0, %zmm2 {%k1} {z} +; X64-NEXT: vpermi2d %zmm0, %zmm0, %zmm1 {%k1} {z} ; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16] -; X64-NEXT: vpermi2d %zmm2, %zmm2, %zmm0 {%k1} {z} +; X64-NEXT: vpermi2d %zmm1, %zmm1, %zmm0 {%k1} {z} ; X64-NEXT: retq %res0 = call <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32> , <16 x i32> %x0, <16 x i32> %x1, i16 %m) %res1 = call <16 x i32> @llvm.x86.avx512.maskz.vpermt2var.d.512(<16 x i32> , <16 x i32> %res0, <16 x i32> %res0, i16 %m) @@ -554,20 +550,20 @@ define <32 x i16> @combine_vpermt2var_32i16_identity(<32 x i16> %x0, <32 x i16> define <32 x i16> @combine_vpermt2var_32i16_identity_mask(<32 x i16> %x0, <32 x i16> %x1, i32 %m) { ; X32-LABEL: combine_vpermt2var_32i16_identity_mask: ; X32: # %bb.0: -; X32-NEXT: vmovdqa64 {{.*#+}} zmm2 = [31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] +; X32-NEXT: vmovdqa64 {{.*#+}} zmm1 = [31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] ; X32-NEXT: kmovd {{[0-9]+}}(%esp), %k1 -; X32-NEXT: vpermi2w %zmm1, %zmm0, %zmm2 {%k1} {z} +; X32-NEXT: vpermi2w %zmm0, %zmm0, %zmm1 {%k1} {z} ; X32-NEXT: vmovdqa64 {{.*#+}} zmm0 = [63,30,61,28,59,26,57,24,55,22,53,20,51,18,49,16,47,46,13,44,11,42,9,40,7,38,5,36,3,34,1,32] -; X32-NEXT: vpermi2w %zmm2, %zmm2, %zmm0 {%k1} {z} +; X32-NEXT: vpermi2w %zmm1, %zmm1, %zmm0 {%k1} {z} ; X32-NEXT: retl ; ; X64-LABEL: combine_vpermt2var_32i16_identity_mask: ; X64: # %bb.0: -; X64-NEXT: vmovdqa64 {{.*#+}} zmm2 = [31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] +; X64-NEXT: vmovdqa64 {{.*#+}} zmm1 = [31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] ; X64-NEXT: kmovd %edi, %k1 -; X64-NEXT: vpermi2w %zmm1, %zmm0, %zmm2 {%k1} {z} +; X64-NEXT: vpermi2w %zmm0, %zmm0, %zmm1 {%k1} {z} ; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [63,30,61,28,59,26,57,24,55,22,53,20,51,18,49,16,47,46,13,44,11,42,9,40,7,38,5,36,3,34,1,32] -; X64-NEXT: vpermi2w %zmm2, %zmm2, %zmm0 {%k1} {z} +; X64-NEXT: vpermi2w %zmm1, %zmm1, %zmm0 {%k1} {z} ; X64-NEXT: retq %res0 = call <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16> , <32 x i16> %x0, <32 x i16> %x1, i32 %m) %res1 = call <32 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.512(<32 x i16> , <32 x i16> %res0, <32 x i16> %res0, i32 %m) diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll index 0d56a24c5ff3..c4ab922d068c 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bwvl.ll @@ -20,20 +20,20 @@ define <16 x i16> @combine_vpermt2var_16i16_identity(<16 x i16> %x0, <16 x i16> define <16 x i16> @combine_vpermt2var_16i16_identity_mask(<16 x i16> %x0, <16 x i16> %x1, i16 %m) { ; X32-LABEL: combine_vpermt2var_16i16_identity_mask: ; X32: # %bb.0: -; X32-NEXT: vmovdqa {{.*#+}} ymm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] +; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] ; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1 -; X32-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 {%k1} {z} +; X32-NEXT: vpermi2w %ymm0, %ymm0, %ymm1 {%k1} {z} ; X32-NEXT: vmovdqa {{.*#+}} ymm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16] -; X32-NEXT: vpermi2w %ymm2, %ymm2, %ymm0 {%k1} {z} +; X32-NEXT: vpermi2w %ymm1, %ymm1, %ymm0 {%k1} {z} ; X32-NEXT: retl ; ; X64-LABEL: combine_vpermt2var_16i16_identity_mask: ; X64: # %bb.0: -; X64-NEXT: vmovdqa {{.*#+}} ymm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] +; X64-NEXT: vmovdqa {{.*#+}} ymm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] ; X64-NEXT: kmovd %edi, %k1 -; X64-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 {%k1} {z} +; X64-NEXT: vpermi2w %ymm0, %ymm0, %ymm1 {%k1} {z} ; X64-NEXT: vmovdqa {{.*#+}} ymm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16] -; X64-NEXT: vpermi2w %ymm2, %ymm2, %ymm0 {%k1} {z} +; X64-NEXT: vpermi2w %ymm1, %ymm1, %ymm0 {%k1} {z} ; X64-NEXT: retq %res0 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> , <16 x i16> %x0, <16 x i16> %x1, i16 %m) %res1 = call <16 x i16> @llvm.x86.avx512.maskz.vpermt2var.hi.256(<16 x i16> , <16 x i16> %res0, <16 x i16> %res0, i16 %m) diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll index a9054102460c..5350ddab5ef7 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll @@ -33,20 +33,20 @@ define <16 x i8> @combine_vpermt2var_16i8_identity(<16 x i8> %x0, <16 x i8> %x1) define <16 x i8> @combine_vpermt2var_16i8_identity_mask(<16 x i8> %x0, <16 x i8> %x1, i16 %m) { ; X32-LABEL: combine_vpermt2var_16i8_identity_mask: ; X32: # %bb.0: -; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] +; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] ; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1 -; X32-NEXT: vpermi2b %xmm1, %xmm0, %xmm2 {%k1} {z} +; X32-NEXT: vpermi2b %xmm0, %xmm0, %xmm1 {%k1} {z} ; X32-NEXT: vmovdqa {{.*#+}} xmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16] -; X32-NEXT: vpermi2b %xmm2, %xmm2, %xmm0 {%k1} {z} +; X32-NEXT: vpermi2b %xmm1, %xmm1, %xmm0 {%k1} {z} ; X32-NEXT: retl ; ; X64-LABEL: combine_vpermt2var_16i8_identity_mask: ; X64: # %bb.0: -; X64-NEXT: vmovdqa {{.*#+}} xmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] +; X64-NEXT: vmovdqa {{.*#+}} xmm1 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] ; X64-NEXT: kmovd %edi, %k1 -; X64-NEXT: vpermi2b %xmm1, %xmm0, %xmm2 {%k1} {z} +; X64-NEXT: vpermi2b %xmm0, %xmm0, %xmm1 {%k1} {z} ; X64-NEXT: vmovdqa {{.*#+}} xmm0 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16] -; X64-NEXT: vpermi2b %xmm2, %xmm2, %xmm0 {%k1} {z} +; X64-NEXT: vpermi2b %xmm1, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq %res0 = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> , <16 x i8> %x0, <16 x i8> %x1, i16 %m) %res1 = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> , <16 x i8> %res0, <16 x i8> %res0, i16 %m) diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll index b8f572f0c8d8..199a05eb2b8f 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll @@ -2827,16 +2827,13 @@ define <4 x float> @PR30264(<4 x float> %x) { define <8 x i16> @PR39549(<16 x i8> %x) { ; SSE-LABEL: PR39549: ; SSE: # %bb.0: -; SSE-NEXT: pxor %xmm1, %xmm1 -; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] -; SSE-NEXT: psraw $8, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE-NEXT: psraw $8, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: PR39549: ; AVX: # %bb.0: -; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] +; AVX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; AVX-NEXT: vpsraw $8, %xmm0, %xmm0 ; AVX-NEXT: retq %a = shufflevector <16 x i8> %x, <16 x i8> undef, <16 x i32> diff --git a/llvm/test/CodeGen/X86/vector-trunc-math-widen.ll b/llvm/test/CodeGen/X86/vector-trunc-math-widen.ll index c5095ae918ac..8f6a1fe57260 100644 --- a/llvm/test/CodeGen/X86/vector-trunc-math-widen.ll +++ b/llvm/test/CodeGen/X86/vector-trunc-math-widen.ll @@ -5607,40 +5607,39 @@ define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwi define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind { ; SSE-LABEL: mul_add_self_v4i64_v4i32: ; SSE: # %bb.0: -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] -; SSE-NEXT: movdqa %xmm2, %xmm3 -; SSE-NEXT: psrad $31, %xmm3 -; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] -; SSE-NEXT: movdqa %xmm0, %xmm6 -; SSE-NEXT: psrad $31, %xmm6 -; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1] -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] -; SSE-NEXT: movdqa %xmm4, %xmm5 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSE-NEXT: movdqa %xmm3, %xmm4 +; SSE-NEXT: psrad $31, %xmm4 +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSE-NEXT: movdqa %xmm0, %xmm5 ; SSE-NEXT: psrad $31, %xmm5 -; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] -; SSE-NEXT: movdqa %xmm1, %xmm7 +; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1] +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1] +; SSE-NEXT: movdqa %xmm6, %xmm7 ; SSE-NEXT: psrad $31, %xmm7 -; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] -; SSE-NEXT: pxor %xmm8, %xmm8 -; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1] -; SSE-NEXT: pmuludq %xmm1, %xmm6 -; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1] -; SSE-NEXT: pmuludq %xmm0, %xmm7 -; SSE-NEXT: paddq %xmm6, %xmm7 -; SSE-NEXT: psllq $32, %xmm7 -; SSE-NEXT: pmuludq %xmm0, %xmm1 -; SSE-NEXT: paddq %xmm7, %xmm1 -; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1] -; SSE-NEXT: pmuludq %xmm4, %xmm3 -; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1] -; SSE-NEXT: pmuludq %xmm2, %xmm5 -; SSE-NEXT: paddq %xmm3, %xmm5 -; SSE-NEXT: psllq $32, %xmm5 -; SSE-NEXT: pmuludq %xmm2, %xmm4 -; SSE-NEXT: paddq %xmm5, %xmm4 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2] -; SSE-NEXT: paddd %xmm1, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] +; SSE-NEXT: movdqa %xmm1, %xmm2 +; SSE-NEXT: psrad $31, %xmm2 +; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3] +; SSE-NEXT: pmuludq %xmm0, %xmm2 +; SSE-NEXT: pmuludq %xmm1, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,1,3] +; SSE-NEXT: pmuludq %xmm1, %xmm5 +; SSE-NEXT: paddq %xmm5, %xmm2 +; SSE-NEXT: psllq $32, %xmm2 +; SSE-NEXT: paddq %xmm0, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,1,1,3] +; SSE-NEXT: pmuludq %xmm3, %xmm0 +; SSE-NEXT: pmuludq %xmm6, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,1,1,3] +; SSE-NEXT: pmuludq %xmm6, %xmm1 +; SSE-NEXT: paddq %xmm1, %xmm0 +; SSE-NEXT: psllq $32, %xmm0 +; SSE-NEXT: paddq %xmm3, %xmm0 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[0,2] +; SSE-NEXT: paddd %xmm2, %xmm2 +; SSE-NEXT: movdqa %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: mul_add_self_v4i64_v4i32: diff --git a/llvm/test/CodeGen/X86/vector-trunc-math.ll b/llvm/test/CodeGen/X86/vector-trunc-math.ll index b8aedf985a54..97c4f3da409f 100644 --- a/llvm/test/CodeGen/X86/vector-trunc-math.ll +++ b/llvm/test/CodeGen/X86/vector-trunc-math.ll @@ -5607,40 +5607,39 @@ define <4 x i32> @mul_add_const_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwi define <4 x i32> @mul_add_self_v4i64_v4i32(<4 x i32> %a0, <4 x i32> %a1) nounwind { ; SSE-LABEL: mul_add_self_v4i64_v4i32: ; SSE: # %bb.0: -; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] -; SSE-NEXT: movdqa %xmm2, %xmm3 -; SSE-NEXT: psrad $31, %xmm3 -; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1] -; SSE-NEXT: movdqa %xmm0, %xmm6 -; SSE-NEXT: psrad $31, %xmm6 -; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1] -; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] -; SSE-NEXT: movdqa %xmm4, %xmm5 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSE-NEXT: movdqa %xmm3, %xmm4 +; SSE-NEXT: psrad $31, %xmm4 +; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] +; SSE-NEXT: movdqa %xmm0, %xmm5 ; SSE-NEXT: psrad $31, %xmm5 -; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] -; SSE-NEXT: movdqa %xmm1, %xmm7 +; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1] +; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1] +; SSE-NEXT: movdqa %xmm6, %xmm7 ; SSE-NEXT: psrad $31, %xmm7 -; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] -; SSE-NEXT: pxor %xmm8, %xmm8 -; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1] -; SSE-NEXT: pmuludq %xmm1, %xmm6 -; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1] -; SSE-NEXT: pmuludq %xmm0, %xmm7 -; SSE-NEXT: paddq %xmm6, %xmm7 -; SSE-NEXT: psllq $32, %xmm7 -; SSE-NEXT: pmuludq %xmm0, %xmm1 -; SSE-NEXT: paddq %xmm7, %xmm1 -; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1] -; SSE-NEXT: pmuludq %xmm4, %xmm3 -; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1] -; SSE-NEXT: pmuludq %xmm2, %xmm5 -; SSE-NEXT: paddq %xmm3, %xmm5 -; SSE-NEXT: psllq $32, %xmm5 -; SSE-NEXT: pmuludq %xmm2, %xmm4 -; SSE-NEXT: paddq %xmm5, %xmm4 -; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2] -; SSE-NEXT: paddd %xmm1, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] +; SSE-NEXT: movdqa %xmm1, %xmm2 +; SSE-NEXT: psrad $31, %xmm2 +; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3] +; SSE-NEXT: pmuludq %xmm0, %xmm2 +; SSE-NEXT: pmuludq %xmm1, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,1,3] +; SSE-NEXT: pmuludq %xmm1, %xmm5 +; SSE-NEXT: paddq %xmm5, %xmm2 +; SSE-NEXT: psllq $32, %xmm2 +; SSE-NEXT: paddq %xmm0, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[0,1,1,3] +; SSE-NEXT: pmuludq %xmm3, %xmm0 +; SSE-NEXT: pmuludq %xmm6, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,1,1,3] +; SSE-NEXT: pmuludq %xmm6, %xmm1 +; SSE-NEXT: paddq %xmm1, %xmm0 +; SSE-NEXT: psllq $32, %xmm0 +; SSE-NEXT: paddq %xmm3, %xmm0 +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[0,2] +; SSE-NEXT: paddd %xmm2, %xmm2 +; SSE-NEXT: movdqa %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: mul_add_self_v4i64_v4i32: