From 66e39067edbfdb1469be001ebb053530a608b532 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sun, 12 Jan 2020 12:29:41 +0000 Subject: [PATCH] [X86][AVX] Use lowerShuffleAsLanePermuteAndSHUFP to lower binary v4f64 shuffles. Only perform this if we are shuffling lower and upper lane elements across the lanes (otherwise splitting to lower xmm shuffles would be better). This is a regression if we shuffle build_vectors due to getVectorShuffle canonicalizing 'blend of splat' build vectors, for now I've set this not to shuffle build_vector nodes at all to avoid this. --- llvm/lib/Target/X86/X86ISelLowering.cpp | 12 ++ llvm/test/CodeGen/X86/avx-unpack.ll | 8 +- .../X86/avx512-shuffles/partial_permute.ll | 98 +++++++------ llvm/test/CodeGen/X86/subvector-broadcast.ll | 4 +- .../test/CodeGen/X86/vector-shuffle-256-v4.ll | 129 +++++------------- .../test/CodeGen/X86/vector-shuffle-256-v8.ll | 26 ++-- 6 files changed, 103 insertions(+), 174 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index c7a42b15cb0c..2cb8fce20fe3 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -15879,6 +15879,18 @@ static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef Mask, Zeroable, Subtarget, DAG)) return Op; + // If we have lane crossing shuffles AND they don't all come from the lower + // lane elements, lower to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)). + // TODO: Handle BUILD_VECTOR sources which getVectorShuffle currently + // canonicalize to a blend of splat which isn't necessary for this combine. + if (is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask) && + !all_of(Mask, [](int M) { return M < 2 || (4 <= M && M < 6); }) && + (V1.getOpcode() != ISD::BUILD_VECTOR) && + (V2.getOpcode() != ISD::BUILD_VECTOR)) + if (SDValue Op = lowerShuffleAsLanePermuteAndSHUFP(DL, MVT::v4f64, V1, V2, + Mask, DAG)) + return Op; + // If we have one input in place, then we can permute the other input and // blend the result. if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask)) diff --git a/llvm/test/CodeGen/X86/avx-unpack.ll b/llvm/test/CodeGen/X86/avx-unpack.ll index 8c7098097566..30b42a2d8beb 100644 --- a/llvm/test/CodeGen/X86/avx-unpack.ll +++ b/llvm/test/CodeGen/X86/avx-unpack.ll @@ -73,11 +73,9 @@ define <8 x float> @unpackhips_not(<8 x float> %src1, <8 x float> %src2) nounwin define <4 x double> @unpackhipd_not(<4 x double> %src1, <4 x double> %src2) nounwind uwtable readnone ssp { ; CHECK-LABEL: unpackhipd_not: ; CHECK: # %bb.0: -; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm1 -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 -; CHECK-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm1[1] -; CHECK-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,2,3] +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[3] ; CHECK-NEXT: retq %shuffle.i = shufflevector <4 x double> %src1, <4 x double> %src2, <4 x i32> ret <4 x double> %shuffle.i diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll index 887e4747e78e..ba06a7a13f4f 100644 --- a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll +++ b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll @@ -3766,10 +3766,10 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask2(<8 x double> define <4 x double> @test_8xdouble_to_4xdouble_perm_mask3(<8 x double> %vec) { ; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mask3: ; CHECK: # %bb.0: -; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm2 -; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,1,4] -; CHECK-NEXT: vpermi2pd %ymm2, %ymm0, %ymm1 -; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [0,2,1,4,0,2,1,4] +; CHECK-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: vpermq %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; CHECK-NEXT: retq %res = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> ret <4 x double> %res @@ -3777,12 +3777,12 @@ define <4 x double> @test_8xdouble_to_4xdouble_perm_mask3(<8 x double> %vec) { define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask3(<8 x double> %vec, <4 x double> %vec2, <4 x double> %mask) { ; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask3: ; CHECK: # %bb.0: -; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm3 -; CHECK-NEXT: vmovapd {{.*#+}} ymm4 = [0,2,1,4] -; CHECK-NEXT: vpermi2pd %ymm3, %ymm0, %ymm4 -; CHECK-NEXT: vxorpd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vcmpeqpd %ymm0, %ymm2, %k1 -; CHECK-NEXT: vblendmpd %ymm4, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [0,2,1,4,0,2,1,4] +; CHECK-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: vpermq %zmm0, %zmm3, %zmm0 +; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1 +; CHECK-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1} ; CHECK-NEXT: retq %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> %cmp = fcmp oeq <4 x double> %mask, zeroinitializer @@ -3793,12 +3793,11 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask3(<8 x double> %v define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask3(<8 x double> %vec, <4 x double> %mask) { ; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask3: ; CHECK: # %bb.0: -; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm3 ; CHECK-NEXT: vmovapd {{.*#+}} ymm2 = [0,2,1,4] -; CHECK-NEXT: vxorpd %xmm4, %xmm4, %xmm4 -; CHECK-NEXT: vcmpeqpd %ymm4, %ymm1, %k1 -; CHECK-NEXT: vpermi2pd %ymm3, %ymm0, %ymm2 {%k1} {z} -; CHECK-NEXT: vmovapd %ymm2, %ymm0 +; CHECK-NEXT: vxorpd %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vcmpeqpd %ymm3, %ymm1, %k1 +; CHECK-NEXT: vpermpd %zmm0, %zmm2, %zmm0 {%k1} {z} +; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; CHECK-NEXT: retq %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> %cmp = fcmp oeq <4 x double> %mask, zeroinitializer @@ -3869,9 +3868,9 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask5(<8 x double> define <4 x double> @test_8xdouble_to_4xdouble_perm_mask6(<8 x double> %vec) { ; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mask6: ; CHECK: # %bb.0: -; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [5,0,7,0,5,0,7,0] +; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [5,8,7,8,5,8,7,8] ; CHECK-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3] -; CHECK-NEXT: vpermq %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: vpermt2pd %zmm0, %zmm1, %zmm0 ; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; CHECK-NEXT: retq %res = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> @@ -3880,12 +3879,12 @@ define <4 x double> @test_8xdouble_to_4xdouble_perm_mask6(<8 x double> %vec) { define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask6(<8 x double> %vec, <4 x double> %vec2, <4 x double> %mask) { ; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask6: ; CHECK: # %bb.0: -; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [5,0,7,0,5,0,7,0] +; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [5,8,7,8,5,8,7,8] ; CHECK-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3] -; CHECK-NEXT: vpermq %zmm0, %zmm3, %zmm0 -; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1 -; CHECK-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: vpermi2pd %zmm0, %zmm0, %zmm3 +; CHECK-NEXT: vxorpd %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vcmpeqpd %ymm0, %ymm2, %k1 +; CHECK-NEXT: vblendmpd %ymm3, %ymm1, %ymm0 {%k1} ; CHECK-NEXT: retq %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> %cmp = fcmp oeq <4 x double> %mask, zeroinitializer @@ -3896,10 +3895,10 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask6(<8 x double> %v define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask6(<8 x double> %vec, <4 x double> %mask) { ; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask6: ; CHECK: # %bb.0: -; CHECK-NEXT: vmovapd {{.*#+}} ymm2 = [5,0,7,0] +; CHECK-NEXT: vmovapd {{.*#+}} ymm2 = [5,8,7,8] ; CHECK-NEXT: vxorpd %xmm3, %xmm3, %xmm3 ; CHECK-NEXT: vcmpeqpd %ymm3, %ymm1, %k1 -; CHECK-NEXT: vpermpd %zmm0, %zmm2, %zmm0 {%k1} {z} +; CHECK-NEXT: vpermt2pd %zmm0, %zmm2, %zmm0 {%k1} {z} ; CHECK-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; CHECK-NEXT: retq %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> @@ -3983,9 +3982,8 @@ define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mask0(<8 x double> define <2 x double> @test_masked_8xdouble_to_2xdouble_perm_mask1(<8 x double> %vec, <2 x double> %vec2, <2 x double> %mask) { ; CHECK-LABEL: test_masked_8xdouble_to_2xdouble_perm_mask1: ; CHECK: # %bb.0: -; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3 -; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm3[1],ymm0[3],ymm3[3] -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vmovapd {{.*#+}} xmm3 = [3,7] +; CHECK-NEXT: vpermpd %zmm0, %zmm3, %zmm0 ; CHECK-NEXT: vxorpd %xmm3, %xmm3, %xmm3 ; CHECK-NEXT: vcmpeqpd %xmm3, %xmm2, %k1 ; CHECK-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1} @@ -4000,12 +3998,11 @@ define <2 x double> @test_masked_8xdouble_to_2xdouble_perm_mask1(<8 x double> %v define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mask1(<8 x double> %vec, <2 x double> %mask) { ; CHECK-LABEL: test_masked_z_8xdouble_to_2xdouble_perm_mask1: ; CHECK: # %bb.0: -; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 -; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3] -; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 -; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vcmpeqpd %xmm2, %xmm1, %k1 -; CHECK-NEXT: vmovapd %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vmovapd {{.*#+}} xmm2 = [3,7] +; CHECK-NEXT: vxorpd %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vcmpeqpd %xmm3, %xmm1, %k1 +; CHECK-NEXT: vpermpd %zmm0, %zmm2, %zmm0 {%k1} {z} +; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <2 x i32> @@ -4062,8 +4059,8 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask1(<8 x double ; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovapd (%rdi), %ymm2 -; CHECK-NEXT: vmovapd {{.*#+}} ymm3 = [3,4,2,4] -; CHECK-NEXT: vpermi2pd 32(%rdi), %ymm2, %ymm3 +; CHECK-NEXT: vmovapd {{.*#+}} ymm3 = [3,4,2,6] +; CHECK-NEXT: vpermi2pd 32(%rdi){1to4}, %ymm2, %ymm3 ; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2 ; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1 ; CHECK-NEXT: vmovapd %ymm3, %ymm0 {%k1} @@ -4079,10 +4076,10 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask1(<8 x doub ; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovapd (%rdi), %ymm2 -; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [3,4,2,4] +; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [3,4,2,6] ; CHECK-NEXT: vxorpd %xmm3, %xmm3, %xmm3 ; CHECK-NEXT: vcmpeqpd %ymm3, %ymm0, %k1 -; CHECK-NEXT: vpermi2pd 32(%rdi), %ymm2, %ymm1 {%k1} {z} +; CHECK-NEXT: vpermi2pd 32(%rdi){1to4}, %ymm2, %ymm1 {%k1} {z} ; CHECK-NEXT: vmovapd %ymm1, %ymm0 ; CHECK-NEXT: retq %vec = load <8 x double>, <8 x double>* %vp @@ -4242,10 +4239,9 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask5(<8 x doub define <4 x double> @test_8xdouble_to_4xdouble_perm_mem_mask6(<8 x double>* %vp) { ; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mem_mask6: ; CHECK: # %bb.0: -; CHECK-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm1 -; CHECK-NEXT: vmovapd 32(%rdi), %ymm2 -; CHECK-NEXT: vmovapd {{.*#+}} ymm0 = [0,2,6,1] -; CHECK-NEXT: vpermi2pd %ymm1, %ymm2, %ymm0 +; CHECK-NEXT: vmovapd 32(%rdi), %ymm1 +; CHECK-NEXT: vmovapd {{.*#+}} ymm0 = [0,2,4,1] +; CHECK-NEXT: vpermi2pd (%rdi), %ymm1, %ymm0 ; CHECK-NEXT: retq %vec = load <8 x double>, <8 x double>* %vp %res = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> @@ -4254,13 +4250,12 @@ define <4 x double> @test_8xdouble_to_4xdouble_perm_mem_mask6(<8 x double>* %vp) define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask6(<8 x double>* %vp, <4 x double> %vec2, <4 x double> %mask) { ; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask6: ; CHECK: # %bb.0: -; CHECK-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm2 -; CHECK-NEXT: vmovapd 32(%rdi), %ymm3 -; CHECK-NEXT: vmovapd {{.*#+}} ymm4 = [0,2,6,1] -; CHECK-NEXT: vpermi2pd %ymm2, %ymm3, %ymm4 +; CHECK-NEXT: vmovapd 32(%rdi), %ymm2 +; CHECK-NEXT: vmovapd {{.*#+}} ymm3 = [0,2,4,1] +; CHECK-NEXT: vpermi2pd (%rdi), %ymm2, %ymm3 ; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2 ; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1 -; CHECK-NEXT: vmovapd %ymm4, %ymm0 {%k1} +; CHECK-NEXT: vmovapd %ymm3, %ymm0 {%k1} ; CHECK-NEXT: retq %vec = load <8 x double>, <8 x double>* %vp %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> @@ -4272,12 +4267,11 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask6(<8 x double define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask6(<8 x double>* %vp, <4 x double> %mask) { ; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask6: ; CHECK: # %bb.0: -; CHECK-NEXT: vinsertf128 $1, (%rdi), %ymm0, %ymm2 -; CHECK-NEXT: vmovapd 32(%rdi), %ymm3 -; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,6,1] -; CHECK-NEXT: vxorpd %xmm4, %xmm4, %xmm4 -; CHECK-NEXT: vcmpeqpd %ymm4, %ymm0, %k1 -; CHECK-NEXT: vpermi2pd %ymm2, %ymm3, %ymm1 {%k1} {z} +; CHECK-NEXT: vmovapd 32(%rdi), %ymm2 +; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,4,1] +; CHECK-NEXT: vxorpd %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vcmpeqpd %ymm3, %ymm0, %k1 +; CHECK-NEXT: vpermi2pd (%rdi), %ymm2, %ymm1 {%k1} {z} ; CHECK-NEXT: vmovapd %ymm1, %ymm0 ; CHECK-NEXT: retq %vec = load <8 x double>, <8 x double>* %vp diff --git a/llvm/test/CodeGen/X86/subvector-broadcast.ll b/llvm/test/CodeGen/X86/subvector-broadcast.ll index 567c438bd5b0..430d152e31c5 100644 --- a/llvm/test/CodeGen/X86/subvector-broadcast.ll +++ b/llvm/test/CodeGen/X86/subvector-broadcast.ll @@ -1462,13 +1462,13 @@ define <8 x float> @broadcast_v8f32_v2f32_u1uu0uEu(<2 x float>* %vp, <8 x float> ; X32: # %bb.0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vbroadcastsd (%eax), %ymm1 -; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] +; X32-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[3] ; X32-NEXT: retl ; ; X64-LABEL: broadcast_v8f32_v2f32_u1uu0uEu: ; X64: # %bb.0: ; X64-NEXT: vbroadcastsd (%rdi), %ymm1 -; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] +; X64-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[3] ; X64-NEXT: retq %vec = load <2 x float>, <2 x float>* %vp %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll index cdadbaff06f6..5195f5f0e0c7 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v4.ll @@ -624,23 +624,16 @@ define <4 x double> @shuffle_v4f64_3333(<4 x double> %a, <4 x double> %b) { } define <4 x double> @shuffle_v4f64_0456(<4 x double> %a, <4 x double> %b) { -; AVX1-LABEL: shuffle_v4f64_0456: -; AVX1: # %bb.0: -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2 -; AVX1-NEXT: vshufpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[3],ymm1[2] -; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3] -; AVX1-NEXT: retq -; -; AVX2-LABEL: shuffle_v4f64_0456: -; AVX2: # %bb.0: -; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,1,2] -; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7] -; AVX2-NEXT: retq +; AVX1OR2-LABEL: shuffle_v4f64_0456: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1OR2-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[2] +; AVX1OR2-NEXT: retq ; ; AVX512VL-SLOW-LABEL: shuffle_v4f64_0456: ; AVX512VL-SLOW: # %bb.0: -; AVX512VL-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,1,2] -; AVX512VL-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7] +; AVX512VL-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX512VL-SLOW-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[2] ; AVX512VL-SLOW-NEXT: retq ; ; AVX512VL-FAST-LABEL: shuffle_v4f64_0456: @@ -945,9 +938,8 @@ define <4 x i64> @shuffle_v4i64_0213(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_0124(<4 x i64> %a, <4 x i64> %b) { ; AVX1-LABEL: shuffle_v4i64_0124: ; AVX1: # %bb.0: -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,1,0,1] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7] +; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[2] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v4i64_0124: @@ -975,8 +967,7 @@ define <4 x i64> @shuffle_v4i64_0142(<4 x i64> %a, <4 x i64> %b) { ; AVX1-LABEL: shuffle_v4i64_0142: ; AVX1: # %bb.0: ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 -; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[0,1,2,2] -; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3] +; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[2] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v4i64_0142: @@ -998,10 +989,9 @@ define <4 x i64> @shuffle_v4i64_0142(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_0412(<4 x i64> %a, <4 x i64> %b) { ; AVX1-LABEL: shuffle_v4i64_0412: ; AVX1: # %bb.0: -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,1,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 -; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[3],ymm0[2] -; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3] +; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2],ymm1[3] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[2] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v4i64_0412: @@ -1023,9 +1013,8 @@ define <4 x i64> @shuffle_v4i64_0412(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_4012(<4 x i64> %a, <4 x i64> %b) { ; AVX1-LABEL: shuffle_v4i64_4012: ; AVX1: # %bb.0: -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 -; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[3],ymm0[2] -; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm1 +; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[3],ymm0[2] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v4i64_4012: @@ -1118,9 +1107,9 @@ define <4 x i64> @shuffle_v4i64_4015(<4 x i64> %a, <4 x i64> %b) { define <4 x i64> @shuffle_v4i64_2u35(<4 x i64> %a, <4 x i64> %b) { ; AVX1-LABEL: shuffle_v4i64_2u35: ; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm0[1],xmm1[1] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 +; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[3] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v4i64_2u35: @@ -1148,10 +1137,8 @@ define <4 x i64> @shuffle_v4i64_1251(<4 x i64> %a, <4 x i64> %b) { ; AVX1-LABEL: shuffle_v4i64_1251: ; AVX1: # %bb.0: ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3,0,1] -; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm2[0],ymm0[2],ymm2[3] -; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,3,0,1] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 -; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm2[0],ymm0[3],ymm2[3] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v4i64_1251: @@ -1779,39 +1766,12 @@ define <4 x i64> @shuffle_v4i64_1z2z(<4 x i64> %a, <4 x i64> %b) { } define <4 x double> @add_v4f64_0246_1357(<4 x double> %a, <4 x double> %b) { -; AVX1-LABEL: add_v4f64_0246_1357: -; AVX1: # %bb.0: # %entry -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vhaddpd %ymm2, %ymm0, %ymm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: add_v4f64_0246_1357: -; AVX2: # %bb.0: # %entry -; AVX2-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] -; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3] -; AVX2-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] -; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] -; AVX2-NEXT: vaddpd %ymm0, %ymm2, %ymm0 -; AVX2-NEXT: retq -; -; AVX512VL-SLOW-LABEL: add_v4f64_0246_1357: -; AVX512VL-SLOW: # %bb.0: # %entry -; AVX512VL-SLOW-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] -; AVX512VL-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3] -; AVX512VL-SLOW-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] -; AVX512VL-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] -; AVX512VL-SLOW-NEXT: vaddpd %ymm0, %ymm2, %ymm0 -; AVX512VL-SLOW-NEXT: retq -; -; AVX512VL-FAST-LABEL: add_v4f64_0246_1357: -; AVX512VL-FAST: # %bb.0: # %entry -; AVX512VL-FAST-NEXT: vmovapd {{.*#+}} ymm2 = [0,2,4,6] -; AVX512VL-FAST-NEXT: vpermi2pd %ymm1, %ymm0, %ymm2 -; AVX512VL-FAST-NEXT: vmovapd {{.*#+}} ymm3 = [1,3,5,7] -; AVX512VL-FAST-NEXT: vpermi2pd %ymm1, %ymm0, %ymm3 -; AVX512VL-FAST-NEXT: vaddpd %ymm3, %ymm2, %ymm0 -; AVX512VL-FAST-NEXT: retq +; ALL-LABEL: add_v4f64_0246_1357: +; ALL: # %bb.0: # %entry +; ALL-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3] +; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; ALL-NEXT: vhaddpd %ymm2, %ymm0, %ymm0 +; ALL-NEXT: retq entry: %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> %shuffle1 = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> @@ -1820,39 +1780,12 @@ entry: } define <4 x double> @add_v4f64_4602_5713(<4 x double> %a, <4 x double> %b) { -; AVX1-LABEL: add_v4f64_4602_5713: -; AVX1: # %bb.0: # %entry -; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm1[2,3],ymm0[2,3] -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: vhaddpd %ymm2, %ymm0, %ymm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: add_v4f64_4602_5713: -; AVX2: # %bb.0: # %entry -; AVX2-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] -; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3] -; AVX2-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3] -; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] -; AVX2-NEXT: vaddpd %ymm0, %ymm2, %ymm0 -; AVX2-NEXT: retq -; -; AVX512VL-SLOW-LABEL: add_v4f64_4602_5713: -; AVX512VL-SLOW: # %bb.0: # %entry -; AVX512VL-SLOW-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm1[0],ymm0[0],ymm1[2],ymm0[2] -; AVX512VL-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3] -; AVX512VL-SLOW-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3] -; AVX512VL-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] -; AVX512VL-SLOW-NEXT: vaddpd %ymm0, %ymm2, %ymm0 -; AVX512VL-SLOW-NEXT: retq -; -; AVX512VL-FAST-LABEL: add_v4f64_4602_5713: -; AVX512VL-FAST: # %bb.0: # %entry -; AVX512VL-FAST-NEXT: vmovapd {{.*#+}} ymm2 = [0,2,4,6] -; AVX512VL-FAST-NEXT: vpermi2pd %ymm0, %ymm1, %ymm2 -; AVX512VL-FAST-NEXT: vmovapd {{.*#+}} ymm3 = [1,3,5,7] -; AVX512VL-FAST-NEXT: vpermi2pd %ymm0, %ymm1, %ymm3 -; AVX512VL-FAST-NEXT: vaddpd %ymm3, %ymm2, %ymm0 -; AVX512VL-FAST-NEXT: retq +; ALL-LABEL: add_v4f64_4602_5713: +; ALL: # %bb.0: # %entry +; ALL-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm1[2,3],ymm0[2,3] +; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; ALL-NEXT: vhaddpd %ymm2, %ymm0, %ymm0 +; ALL-NEXT: retq entry: %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> %shuffle1 = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll index b2b157fef4a2..99429560d0da 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v8.ll @@ -1196,23 +1196,16 @@ define <8 x float> @shuffle_v8f32_089abcde(<8 x float> %a, <8 x float> %b) { } define <8 x float> @shuffle_v8f32_0189abcd(<8 x float> %a, <8 x float> %b) { -; AVX1-LABEL: shuffle_v8f32_0189abcd: -; AVX1: # %bb.0: -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2 -; AVX1-NEXT: vshufpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[3],ymm1[2] -; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3] -; AVX1-NEXT: retq -; -; AVX2-LABEL: shuffle_v8f32_0189abcd: -; AVX2: # %bb.0: -; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,1,2] -; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7] -; AVX2-NEXT: retq +; AVX1OR2-LABEL: shuffle_v8f32_0189abcd: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1OR2-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[2] +; AVX1OR2-NEXT: retq ; ; AVX512VL-SLOW-LABEL: shuffle_v8f32_0189abcd: ; AVX512VL-SLOW: # %bb.0: -; AVX512VL-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,1,2] -; AVX512VL-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7] +; AVX512VL-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX512VL-SLOW-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[2] ; AVX512VL-SLOW-NEXT: retq ; ; AVX512VL-FAST-LABEL: shuffle_v8f32_0189abcd: @@ -2502,9 +2495,8 @@ define <8 x i32> @shuffle_v8i32_089abcde(<8 x i32> %a, <8 x i32> %b) { define <8 x i32> @shuffle_v8i32_0189abcd(<8 x i32> %a, <8 x i32> %b) { ; AVX1-LABEL: shuffle_v8i32_0189abcd: ; AVX1: # %bb.0: -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2 -; AVX1-NEXT: vshufpd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[3],ymm1[2] -; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[2] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v8i32_0189abcd: