[X86][SSE] lowerVECTOR_SHUFFLE - canonicalize with horizontal ops.

Before lowering shuffles, see if we can merge horizontal ops or canonicalize the shuffle mask to point to the same LHS/RHS of the HOps when an HOp's args are repeated.
This commit is contained in:
Simon Pilgrim 2021-08-22 14:17:39 +01:00
parent dcf659e821
commit a1c892b439
2 changed files with 66 additions and 59 deletions

View File

@ -18690,7 +18690,13 @@ static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
return false;
}
/// Top-level lowering for x86 vector shuffles.
// Forward declaration.
static SDValue canonicalizeShuffleMaskWithHorizOp(
MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
const X86Subtarget &Subtarget);
/// Top-level lowering for x86 vector shuffles.
///
/// This handles decomposition, canonicalization, and lowering of all x86
/// vector shuffles. Most of the specific lowering strategies are encapsulated
@ -18799,8 +18805,21 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
}
}
SmallVector<SDValue> Ops = {V1, V2};
SmallVector<int> Mask(OrigMask.begin(), OrigMask.end());
// Canonicalize the shuffle with any horizontal ops inputs.
// NOTE: This may update Ops and Mask.
if (SDValue HOp = canonicalizeShuffleMaskWithHorizOp(
Ops, Mask, VT.getSizeInBits(), DL, DAG, Subtarget))
return DAG.getBitcast(VT, HOp);
V1 = DAG.getBitcast(VT, Ops[0]);
V2 = DAG.getBitcast(VT, Ops[1]);
assert(Mask.size() == NumElements && "canonicalizeShuffleMaskWithHorizOp "
"shouldn't alter the shuffle mask size");
// Commute the shuffle if it will improve canonicalization.
SmallVector<int, 64> Mask(OrigMask.begin(), OrigMask.end());
if (canonicalizeShuffleMaskWithCommute(Mask)) {
ShuffleVectorSDNode::commuteMask(Mask);
std::swap(V1, V2);

View File

@ -105,8 +105,8 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
;
; SSSE3-FAST-LABEL: pair_sum_v4i32_v4i32:
; SSSE3-FAST: # %bb.0:
; SSSE3-FAST-NEXT: phaddd %xmm3, %xmm2
; SSSE3-FAST-NEXT: phaddd %xmm1, %xmm0
; SSSE3-FAST-NEXT: phaddd %xmm3, %xmm2
; SSSE3-FAST-NEXT: phaddd %xmm2, %xmm0
; SSSE3-FAST-NEXT: retq
;
@ -126,12 +126,12 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; AVX1-SLOW-NEXT: retq
;
; AVX-FAST-LABEL: pair_sum_v4i32_v4i32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm2
; AVX-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX-FAST-NEXT: vphaddd %xmm2, %xmm0, %xmm0
; AVX-FAST-NEXT: retq
; AVX1-FAST-LABEL: pair_sum_v4i32_v4i32:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm1
; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: retq
;
; AVX2-SLOW-LABEL: pair_sum_v4i32_v4i32:
; AVX2-SLOW: # %bb.0:
@ -147,6 +147,13 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
; AVX2-SLOW-NEXT: vpaddd %xmm1, %xmm2, %xmm1
; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: pair_sum_v4i32_v4i32:
; AVX2-FAST: # %bb.0:
; AVX2-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm2
; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX2-FAST-NEXT: vphaddd %xmm2, %xmm0, %xmm0
; AVX2-FAST-NEXT: retq
%5 = shufflevector <4 x i32> %0, <4 x i32> poison, <2 x i32> <i32 0, i32 2>
%6 = shufflevector <4 x i32> %0, <4 x i32> poison, <2 x i32> <i32 1, i32 3>
%7 = add <2 x i32> %5, %6
@ -451,20 +458,20 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
; AVX2-FAST: # %bb.0:
; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX2-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX2-FAST-NEXT: vphaddd %xmm4, %xmm4, %xmm1
; AVX2-FAST-NEXT: vphaddd %xmm5, %xmm5, %xmm4
; AVX2-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm1
; AVX2-FAST-NEXT: vphaddd %xmm4, %xmm4, %xmm4
; AVX2-FAST-NEXT: vphaddd %xmm5, %xmm5, %xmm5
; AVX2-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm2
; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm1[0,3]
; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3]
; AVX2-FAST-NEXT: vpaddd %xmm1, %xmm3, %xmm1
; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-FAST-NEXT: vphaddd %xmm7, %xmm6, %xmm1
; AVX2-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm2
; AVX2-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm4[0,3]
; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm5[0]
; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,3],xmm4[1,3]
; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm5[3]
; AVX2-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
; AVX2-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
; AVX2-FAST-NEXT: vphaddd %xmm7, %xmm6, %xmm2
; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm2, %xmm1
; AVX2-FAST-NEXT: vpbroadcastq %xmm1, %ymm1
; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX2-FAST-NEXT: retq
@ -993,8 +1000,8 @@ define <4 x float> @reduction_sum_v4f32_v4f32_reassoc(<4 x float> %0, <4 x float
; SSSE3-FAST-NEXT: movaps %xmm3, %xmm1
; SSSE3-FAST-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
; SSSE3-FAST-NEXT: addps %xmm3, %xmm1
; SSSE3-FAST-NEXT: haddps %xmm0, %xmm1
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm1[2,0]
; SSSE3-FAST-NEXT: haddps %xmm1, %xmm0
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm0[0,2]
; SSSE3-FAST-NEXT: movaps %xmm4, %xmm0
; SSSE3-FAST-NEXT: retq
;
@ -1028,8 +1035,8 @@ define <4 x float> @reduction_sum_v4f32_v4f32_reassoc(<4 x float> %0, <4 x float
; AVX-FAST-NEXT: vaddps %xmm1, %xmm2, %xmm1
; AVX-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm3[1,0]
; AVX-FAST-NEXT: vaddps %xmm2, %xmm3, %xmm2
; AVX-FAST-NEXT: vhaddps %xmm1, %xmm2, %xmm1
; AVX-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[2,0]
; AVX-FAST-NEXT: vhaddps %xmm2, %xmm1, %xmm1
; AVX-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX-FAST-NEXT: retq
%5 = call reassoc float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %0)
%6 = call reassoc float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %1)
@ -1105,39 +1112,20 @@ define <4 x i32> @reduction_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32
; AVX-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; AVX-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: reduction_sum_v4i32_v4i32:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
; AVX1-FAST-NEXT: vpaddd %xmm4, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
; AVX1-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
; AVX1-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
; AVX1-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,0,2]
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-FAST-NEXT: retq
;
; AVX2-FAST-LABEL: reduction_sum_v4i32_v4i32:
; AVX2-FAST: # %bb.0:
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
; AVX2-FAST-NEXT: vpaddd %xmm4, %xmm0, %xmm0
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
; AVX2-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
; AVX2-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
; AVX2-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
; AVX2-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,0,2]
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX2-FAST-NEXT: retq
; AVX-FAST-LABEL: reduction_sum_v4i32_v4i32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
; AVX-FAST-NEXT: vpaddd %xmm4, %xmm0, %xmm0
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
; AVX-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
; AVX-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
; AVX-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
; AVX-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
; AVX-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
; AVX-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX-FAST-NEXT: retq
%5 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %0)
%6 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %1)
%7 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %2)