forked from OSchip/llvm-project
[x86] allow more shuffle splitting to avoid vpermps (PR40434)
This is tricky to make optimal: sometimes we're better off using a single wider op, but other times it makes more sense to combine a narrow ops to achieve the same result. This solves the case from: https://bugs.llvm.org/show_bug.cgi?id=40434 There's potentially a similar change for vectors with 64-bit elements, but it needs adjustments similar to rL352333 to avoid creating infinite loops. llvm-svn: 352380
This commit is contained in:
parent
7d6fd6d73d
commit
94cca60b82
|
@ -14487,8 +14487,10 @@ static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
|
|||
if (NumUpperHalves == 1) {
|
||||
// AVX2 has efficient 32/64-bit element cross-lane shuffles.
|
||||
if (Subtarget.hasAVX2()) {
|
||||
// extract128 + vunpckhps, is better than vblend + vpermps.
|
||||
// TODO: Refine to account for unary shuffle, splat, and other masks?
|
||||
if (EltWidth == 32 && NumLowerHalves == 1)
|
||||
if (EltWidth == 32 && NumLowerHalves &&
|
||||
HalfVT.is128BitVector() && !is128BitUnpackShuffleMask(HalfMask))
|
||||
return SDValue();
|
||||
if (EltWidth == 64)
|
||||
return SDValue();
|
||||
|
|
|
@ -2878,143 +2878,76 @@ entry:
|
|||
ret <8 x float> %tmp6
|
||||
}
|
||||
|
||||
; FIXME: AVX1 lowering is better than AVX2 (and AVX512?)
|
||||
; PR40434: https://bugs.llvm.org/show_bug.cgi?id=40434
|
||||
|
||||
define <8 x i32> @unpckh_v8i32(<8 x i32> %x, <8 x i32> %y) {
|
||||
; AVX1-LABEL: unpckh_v8i32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
||||
; AVX1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: unpckh_v8i32:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
|
||||
; AVX2-NEXT: vmovaps {{.*#+}} ymm1 = <2,6,3,7,u,u,u,u>
|
||||
; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: unpckh_v8i32:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = <2,14,3,15,u,u,u,u>
|
||||
; AVX512VL-NEXT: vpermt2d %ymm1, %ymm2, %ymm0
|
||||
; AVX512VL-NEXT: retq
|
||||
; ALL-LABEL: unpckh_v8i32:
|
||||
; ALL: # %bb.0:
|
||||
; ALL-NEXT: vextractf128 $1, %ymm1, %xmm1
|
||||
; ALL-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||
; ALL-NEXT: retq
|
||||
%unpckh = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 2, i32 14, i32 3, i32 15, i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
ret <8 x i32> %unpckh
|
||||
}
|
||||
|
||||
; FIXME: Same as above but with floats. AVX1 lowering is better than AVX2 (and AVX512?)
|
||||
; Same as above but with floats.
|
||||
|
||||
define <8 x float> @unpckh_v8f32(<8 x float> %x, <8 x float> %y) {
|
||||
; AVX1-LABEL: unpckh_v8f32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
||||
; AVX1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: unpckh_v8f32:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
|
||||
; AVX2-NEXT: vmovaps {{.*#+}} ymm1 = <2,6,3,7,u,u,u,u>
|
||||
; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: unpckh_v8f32:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vmovaps {{.*#+}} ymm2 = <2,14,3,15,u,u,u,u>
|
||||
; AVX512VL-NEXT: vpermt2ps %ymm1, %ymm2, %ymm0
|
||||
; AVX512VL-NEXT: retq
|
||||
; ALL-LABEL: unpckh_v8f32:
|
||||
; ALL: # %bb.0:
|
||||
; ALL-NEXT: vextractf128 $1, %ymm1, %xmm1
|
||||
; ALL-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||
; ALL-NEXT: retq
|
||||
%unpckh = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 2, i32 14, i32 3, i32 15, i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
ret <8 x float> %unpckh
|
||||
}
|
||||
|
||||
; FIXME: AVX1 lowering is better than AVX2 (and AVX512?)
|
||||
; Alternate form of the above - make sure we don't have conflicting transforms.
|
||||
|
||||
define <8 x i32> @blend_perm_v8i32(<8 x i32> %x, <8 x i32> %y) {
|
||||
; AVX1-LABEL: blend_perm_v8i32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
||||
; AVX1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: blend_perm_v8i32:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
|
||||
; AVX2-NEXT: vmovaps {{.*#+}} ymm1 = <2,6,3,7,u,u,u,u>
|
||||
; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: blend_perm_v8i32:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = <2,14,3,15,u,u,u,u>
|
||||
; AVX512VL-NEXT: vpermt2d %ymm1, %ymm2, %ymm0
|
||||
; AVX512VL-NEXT: retq
|
||||
; ALL-LABEL: blend_perm_v8i32:
|
||||
; ALL: # %bb.0:
|
||||
; ALL-NEXT: vextractf128 $1, %ymm1, %xmm1
|
||||
; ALL-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||
; ALL-NEXT: retq
|
||||
%unpckh = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 14, i32 15>
|
||||
%r = shufflevector <8 x i32> %unpckh, <8 x i32> undef, <8 x i32> <i32 2, i32 6, i32 3, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
ret <8 x i32> %r
|
||||
}
|
||||
|
||||
; FIXME: Same as above but with floats. AVX1 lowering is better than AVX2 (and AVX512?)
|
||||
; Same as above but with floats.
|
||||
|
||||
define <8 x float> @blend_perm_v8f32(<8 x float> %x, <8 x float> %y) {
|
||||
; AVX1-LABEL: blend_perm_v8f32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
||||
; AVX1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: blend_perm_v8f32:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
|
||||
; AVX2-NEXT: vmovaps {{.*#+}} ymm1 = <2,6,3,7,u,u,u,u>
|
||||
; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: blend_perm_v8f32:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vmovaps {{.*#+}} ymm2 = <2,14,3,15,u,u,u,u>
|
||||
; AVX512VL-NEXT: vpermt2ps %ymm1, %ymm2, %ymm0
|
||||
; AVX512VL-NEXT: retq
|
||||
; ALL-LABEL: blend_perm_v8f32:
|
||||
; ALL: # %bb.0:
|
||||
; ALL-NEXT: vextractf128 $1, %ymm1, %xmm1
|
||||
; ALL-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||
; ALL-NEXT: retq
|
||||
%unpckh = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 14, i32 15>
|
||||
%r = shufflevector <8 x float> %unpckh, <8 x float> undef, <8 x i32> <i32 2, i32 6, i32 3, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
ret <8 x float> %r
|
||||
}
|
||||
|
||||
; FIXME: AVX1 lowering is better than AVX2/AVX512.
|
||||
; Another variation of the above - make sure we don't have conflicting transforms.
|
||||
|
||||
define <8 x i32> @unpckh_v8i32_unary(<8 x i32> %x) {
|
||||
; AVX1-LABEL: unpckh_v8i32_unary:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2OR512VL-LABEL: unpckh_v8i32_unary:
|
||||
; AVX2OR512VL: # %bb.0:
|
||||
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = <2,6,3,7,u,u,u,u>
|
||||
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
||||
; AVX2OR512VL-NEXT: retq
|
||||
; ALL-LABEL: unpckh_v8i32_unary:
|
||||
; ALL: # %bb.0:
|
||||
; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; ALL-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||
; ALL-NEXT: retq
|
||||
%r = shufflevector <8 x i32> %x, <8 x i32> undef, <8 x i32> <i32 2, i32 6, i32 3, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
ret <8 x i32> %r
|
||||
}
|
||||
|
||||
; FIXME: Same as above but with floats. AVX1 lowering is better than AVX2/AVX512.
|
||||
; Same as above but with floats.
|
||||
|
||||
define <8 x float> @unpckh_v8f32_unary(<8 x float> %x) {
|
||||
; AVX1-LABEL: unpckh_v8f32_unary:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2OR512VL-LABEL: unpckh_v8f32_unary:
|
||||
; AVX2OR512VL: # %bb.0:
|
||||
; AVX2OR512VL-NEXT: vmovaps {{.*#+}} ymm1 = <2,6,3,7,u,u,u,u>
|
||||
; AVX2OR512VL-NEXT: vpermps %ymm0, %ymm1, %ymm0
|
||||
; AVX2OR512VL-NEXT: retq
|
||||
; ALL-LABEL: unpckh_v8f32_unary:
|
||||
; ALL: # %bb.0:
|
||||
; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; ALL-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||
; ALL-NEXT: retq
|
||||
%r = shufflevector <8 x float> %x, <8 x float> undef, <8 x i32> <i32 2, i32 6, i32 3, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
|
||||
ret <8 x float> %r
|
||||
}
|
||||
|
|
|
@ -311,12 +311,12 @@ define <4 x i32> @test_v16i32_0_1_2_12 (<16 x i32> %v) {
|
|||
define <4 x i32> @test_v16i32_0_4_8_12(<16 x i32> %v) {
|
||||
; ALL-LABEL: test_v16i32_0_4_8_12:
|
||||
; ALL: # %bb.0:
|
||||
; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1
|
||||
; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; ALL-NEXT: vunpcklps {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0
|
||||
; ALL-NEXT: vmovaps {{.*#+}} ymm2 = <u,u,0,4,u,u,u,u>
|
||||
; ALL-NEXT: vpermps %ymm1, %ymm2, %ymm1
|
||||
; ALL-NEXT: vmovaps {{.*#+}} ymm2 = <0,4,u,u,u,u,u,u>
|
||||
; ALL-NEXT: vpermps %ymm0, %ymm2, %ymm0
|
||||
; ALL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; ALL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
|
||||
; ALL-NEXT: vzeroupper
|
||||
; ALL-NEXT: retq
|
||||
%res = shufflevector <16 x i32> %v, <16 x i32> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
|
||||
|
|
Loading…
Reference in New Issue