diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index fb939daab577..306a7c5590f0 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -9959,17 +9959,18 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget, ++LowV2Elements; else if (M >= 0) ++LowV1Elements; - if (LowV2Elements > LowV1Elements) - return DAG.getCommutedVectorShuffle(*SVOp); - - int SumV1Indices = 0, SumV2Indices = 0; - for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i) - if (SVOp->getMask()[i] >= NumElements) - SumV2Indices += i; - else if (SVOp->getMask()[i] >= 0) - SumV1Indices += i; - if (SumV2Indices < SumV1Indices) + if (LowV2Elements > LowV1Elements) { return DAG.getCommutedVectorShuffle(*SVOp); + } else if (LowV2Elements == LowV1Elements) { + int SumV1Indices = 0, SumV2Indices = 0; + for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i) + if (SVOp->getMask()[i] >= NumElements) + SumV2Indices += i; + else if (SVOp->getMask()[i] >= 0) + SumV1Indices += i; + if (SumV2Indices < SumV1Indices) + return DAG.getCommutedVectorShuffle(*SVOp); + } } // For each vector width, delegate to a specialized lowering routine. diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll index fb5c993250ef..eec968eec2ac 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll @@ -669,6 +669,30 @@ define <8 x float> @shuffle_v8f32_c348cda0(<8 x float> %a, <8 x float> %b) { ret <8 x float> %shuffle } +define <8 x float> @shuffle_v8f32_f511235a(<8 x float> %a, <8 x float> %b) { +; AVX1-LABEL: @shuffle_v8f32_f511235a +; AVX1: # BB#0: +; AVX1-NEXT: vperm2f128 {{.*}} # ymm2 = ymm0[2,3,0,1] +; AVX1-NEXT: vpermilps {{.*}} # ymm2 = ymm2[u,1,u,u,6,7,u,u] +; AVX1-NEXT: vpermilps {{.*}} # ymm0 = ymm0[0,1,1,1,4,5,5,5] +; AVX1-NEXT: vblendps {{.*}} # ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4,5],ymm0[6,7] +; AVX1-NEXT: vperm2f128 {{.*}} # ymm1 = ymm1[2,3,0,1] +; AVX1-NEXT: vpermilps {{.*}} # ymm1 = ymm1[3,1,2,2,7,5,6,6] +; AVX1-NEXT: vblendps {{.*}} # ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7] +; AVX1-NEXT: retq +; +; AVX2-LABEL: @shuffle_v8f32_f511235a +; AVX2: # BB#0: +; AVX2-NEXT: vmovaps {{.*}} # ymm2 = <7,u,u,u,u,u,u,2> +; AVX2-NEXT: vpermps %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vmovaps {{.*}} # ymm2 = +; AVX2-NEXT: vpermps %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vblendps {{.*}} # ymm0 = ymm1[0],ymm0[1,2,3,4,5,6],ymm1[7] +; AVX2-NEXT: retq + %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> + ret <8 x float> %shuffle +} + define <8 x i32> @shuffle_v8i32_00000000(<8 x i32> %a, <8 x i32> %b) { ; AVX1-LABEL: @shuffle_v8i32_00000000 ; AVX1: # BB#0: