[X86][SSE] Replace foldShuffleOfHorizOp with generalized version in canonicalizeShuffleMaskWithHorizOp

foldShuffleOfHorizOp only handled basic shufps(hop(x,y),hop(z,w)) folds - by moving this to canonicalizeShuffleMaskWithHorizOp we can work with more general/combined v4x32 shuffles masks, float/integer domains and support shuffle-of-packs as well.

The next step will be to support 256/512-bit vector cases.
This commit is contained in:
Simon Pilgrim 2021-05-11 12:26:14 +01:00
parent bce3cca488
commit 9acc03ad92
3 changed files with 162 additions and 188 deletions

View File

@ -36224,36 +36224,75 @@ static SDValue canonicalizeShuffleMaskWithHorizOp(
int NumLanes = VT0.getSizeInBits() / 128;
int NumEltsPerLane = NumElts / NumLanes;
int NumHalfEltsPerLane = NumEltsPerLane / 2;
MVT SrcVT = BC0.getOperand(0).getSimpleValueType();
// See if we can remove the shuffle by resorting the HOP chain so that
// the HOP args are pre-shuffled.
// TODO: Generalize to any sized/depth chain.
// TODO: Add support for 256/512-bit vectors.
// TODO: Add support for PACKSS/PACKUS.
if (isHoriz && NumEltsPerLane == 4 && VT0.is128BitVector() &&
shouldUseHorizontalOp(Ops.size() == 1, DAG, Subtarget)) {
if (RootSizeInBits == 128 && NumEltsPerLane >= 4 &&
(isPack || shouldUseHorizontalOp(Ops.size() == 1, DAG, Subtarget))) {
SmallVector<int> ScaledMask;
if (scaleShuffleElements(Mask, 4, ScaledMask)) {
// Attempt to find a HOP(HOP(X,Y),HOP(Z,W)) source operand.
auto GetHOpSrc = [&](int M) {
if (M == SM_SentinelUndef)
return DAG.getUNDEF(VT0);
if (M == SM_SentinelZero)
return getZeroVector(VT0.getSimpleVT(), Subtarget, DAG, DL);
SDValue Src0 = BC[M / NumElts];
SDValue Src1 = Src0.getOperand((M % 4) >= 2);
if (Src1.getOpcode() == Opcode0 && Src0->isOnlyUserOf(Src1.getNode()))
return Src1.getOperand(M % 2);
return SDValue();
};
SDValue M0 = GetHOpSrc(ScaledMask[0]);
SDValue M1 = GetHOpSrc(ScaledMask[1]);
SDValue M2 = GetHOpSrc(ScaledMask[2]);
SDValue M3 = GetHOpSrc(ScaledMask[3]);
if (M0 && M1 && M2 && M3) {
SDValue LHS = DAG.getNode(Opcode0, DL, VT0, M0, M1);
SDValue RHS = DAG.getNode(Opcode0, DL, VT0, M2, M3);
return DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
// See if we can remove the shuffle by resorting the HOP chain so that
// the HOP args are pre-shuffled.
// TODO: Generalize to any sized/depth chain.
// TODO: Add support for PACKSS/PACKUS.
if (isHoriz && NumEltsPerLane == 4) {
// Attempt to find a HOP(HOP(X,Y),HOP(Z,W)) source operand.
auto GetHOpSrc = [&](int M) {
if (M == SM_SentinelUndef)
return DAG.getUNDEF(VT0);
if (M == SM_SentinelZero)
return getZeroVector(VT0.getSimpleVT(), Subtarget, DAG, DL);
SDValue Src0 = BC[M / NumElts];
SDValue Src1 = Src0.getOperand((M % 4) >= 2);
if (Src1.getOpcode() == Opcode0 && Src0->isOnlyUserOf(Src1.getNode()))
return Src1.getOperand(M % 2);
return SDValue();
};
SDValue M0 = GetHOpSrc(ScaledMask[0]);
SDValue M1 = GetHOpSrc(ScaledMask[1]);
SDValue M2 = GetHOpSrc(ScaledMask[2]);
SDValue M3 = GetHOpSrc(ScaledMask[3]);
if (M0 && M1 && M2 && M3) {
SDValue LHS = DAG.getNode(Opcode0, DL, VT0, M0, M1);
SDValue RHS = DAG.getNode(Opcode0, DL, VT0, M2, M3);
return DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
}
}
// shuffle(hop(x,y),hop(z,w)) -> permute(hop(x,z)) etc.
if (Ops.size() >= 2) {
SDValue LHS, RHS;
auto GetHOpSrc = [&](int M, int &OutM) {
// TODO: Support SM_SentinelZero
if (M < 0)
return M == SM_SentinelUndef;
SDValue Src = BC[M / 4].getOperand((M % 4) >= 2);
if (!LHS || LHS == Src) {
LHS = Src;
OutM = (M % 2);
return true;
}
if (!RHS || RHS == Src) {
RHS = Src;
OutM = (M % 2) + 2;
return true;
}
return false;
};
int PostMask[4] = {-1, -1, -1, -1};
if (GetHOpSrc(ScaledMask[0], PostMask[0]) &&
GetHOpSrc(ScaledMask[1], PostMask[1]) &&
GetHOpSrc(ScaledMask[2], PostMask[2]) &&
GetHOpSrc(ScaledMask[3], PostMask[3])) {
LHS = DAG.getBitcast(SrcVT, LHS);
RHS = DAG.getBitcast(SrcVT, RHS ? RHS : LHS);
SDValue Res = DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
// Use SHUFPS for the permute so this will work on SSE3 targets,
// shuffle combining and domain handling will simplify this later on.
MVT ShuffleVT = MVT::getVectorVT(MVT::f32, RootSizeInBits / 32);
Res = DAG.getBitcast(ShuffleVT, Res);
return DAG.getNode(X86ISD::SHUFP, DL, ShuffleVT, Res, Res,
getV4X86ShuffleImm8ForMask(PostMask, DL, DAG));
}
}
}
}
@ -36315,14 +36354,13 @@ static SDValue canonicalizeShuffleMaskWithHorizOp(
scaleShuffleElements(TargetMask128, 2, WideMask128)) {
assert(isUndefOrZeroOrInRange(WideMask128, 0, 4) && "Illegal shuffle");
bool SingleOp = (Ops.size() == 1);
if (!isHoriz || OneUseOps ||
if (isPack || OneUseOps ||
shouldUseHorizontalOp(SingleOp, DAG, Subtarget)) {
SDValue Lo = isInRange(WideMask128[0], 0, 2) ? BC0 : BC1;
SDValue Hi = isInRange(WideMask128[1], 0, 2) ? BC0 : BC1;
Lo = Lo.getOperand(WideMask128[0] & 1);
Hi = Hi.getOperand(WideMask128[1] & 1);
if (SingleOp) {
MVT SrcVT = BC0.getOperand(0).getSimpleValueType();
SDValue Undef = DAG.getUNDEF(SrcVT);
SDValue Zero = getZeroVector(SrcVT, Subtarget, DAG, DL);
Lo = (WideMask128[0] == SM_SentinelZero ? Zero : Lo);
@ -38050,48 +38088,6 @@ static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
}
// Eliminate a redundant shuffle of a horizontal math op.
// TODO: Merge this into canonicalizeShuffleMaskWithHorizOp.
static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
unsigned Opcode = N->getOpcode();
if (Opcode != X86ISD::SHUFP)
return SDValue();
EVT VT = N->getValueType(0);
SDValue HOp = N->getOperand(0);
if (HOp.getOpcode() != X86ISD::HADD && HOp.getOpcode() != X86ISD::FHADD &&
HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
return SDValue();
// shufps(hop(x,y),hop(z,w)) -> permute(hop(x,z)) etc.
// Don't fold if hop(x,y) == hop(z,w).
if (Opcode == X86ISD::SHUFP) {
SDValue HOp2 = N->getOperand(1);
if (HOp.getOpcode() != HOp2.getOpcode() || VT != MVT::v4f32 || HOp == HOp2)
return SDValue();
SmallVector<int> RepeatedMask;
DecodeSHUFPMask(4, 32, N->getConstantOperandVal(2), RepeatedMask);
SDValue Op0 = HOp.getOperand(RepeatedMask[0] >= 2 ? 1 : 0);
SDValue Op1 = HOp.getOperand(RepeatedMask[1] >= 2 ? 1 : 0);
SDValue Op2 = HOp2.getOperand(RepeatedMask[2] >= 6 ? 1 : 0);
SDValue Op3 = HOp2.getOperand(RepeatedMask[3] >= 6 ? 1 : 0);
if ((Op0 == Op1) && (Op2 == Op3)) {
int NewMask[4] = {RepeatedMask[0] % 2, RepeatedMask[1] % 2,
((RepeatedMask[2] - 4) % 2) + 2,
((RepeatedMask[3] - 4) % 2) + 2};
SDLoc DL(HOp);
SDValue Res = DAG.getNode(HOp.getOpcode(), DL, VT, Op0, Op2);
// Use SHUFPS for the permute so this will work on SSE3 targets, shuffle
// combining and domain handling will simplify this later on.
return DAG.getNode(X86ISD::SHUFP, DL, VT, Res, Res,
getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
}
return SDValue();
}
return SDValue();
}
/// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
/// low half of each source vector and does not set any high half elements in
/// the destination vector, narrow the shuffle to half its original size.
@ -38137,14 +38133,10 @@ static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
SDLoc dl(N);
EVT VT = N->getValueType(0);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.isTypeLegal(VT)) {
if (TLI.isTypeLegal(VT))
if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
return AddSub;
if (SDValue HAddSub = foldShuffleOfHorizOp(N, DAG))
return HAddSub;
}
// Attempt to combine into a vector load/broadcast.
if (SDValue LD = combineToConsecutiveLoads(VT, SDValue(N, 0), dl, DAG,
Subtarget, true))

View File

@ -141,9 +141,8 @@ define <16 x i8> @test_unpackh_packus_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16
define <4 x float> @test_shufps_packss_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) {
; CHECK-LABEL: test_shufps_packss_128:
; CHECK: ## %bb.0:
; CHECK-NEXT: vpackssdw %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vpackssdw %xmm3, %xmm3, %xmm1
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,2]
; CHECK-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,2,2]
; CHECK-NEXT: ret{{[l|q]}}
%1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1)
%2 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a2, <4 x i32> %a3)
@ -156,9 +155,8 @@ define <4 x float> @test_shufps_packss_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i3
define <4 x float> @test_shufps_packus_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) {
; CHECK-LABEL: test_shufps_packus_128:
; CHECK: ## %bb.0:
; CHECK-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vpackuswb %xmm2, %xmm2, %xmm1
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
; CHECK-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,2]
; CHECK-NEXT: ret{{[l|q]}}
%1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1)
%2 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a2, <8 x i16> %a3)

View File

@ -98,11 +98,9 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm0
; SSSE3-SLOW-NEXT: phaddd %xmm2, %xmm3
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,1,0,1]
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
; SSSE3-SLOW-NEXT: paddd %xmm3, %xmm2
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
; SSSE3-SLOW-NEXT: paddd %xmm3, %xmm1
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; SSSE3-SLOW-NEXT: retq
;
; SSSE3-FAST-LABEL: pair_sum_v4i32_v4i32:
@ -138,9 +136,7 @@ define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
; AVX2-SLOW-LABEL: pair_sum_v4i32_v4i32:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vphaddd %xmm2, %xmm2, %xmm1
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
@ -187,42 +183,35 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm0[1,3]
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSSE3-SLOW-NEXT: addps %xmm1, %xmm0
; SSSE3-SLOW-NEXT: movaps %xmm3, %xmm1
; SSSE3-SLOW-NEXT: haddps %xmm2, %xmm1
; SSSE3-SLOW-NEXT: haddps %xmm4, %xmm5
; SSSE3-SLOW-NEXT: haddps %xmm3, %xmm2
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[2,0]
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm5[3,1]
; SSSE3-SLOW-NEXT: addps %xmm2, %xmm1
; SSSE3-SLOW-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-SLOW-NEXT: movaps %xmm5, %xmm1
; SSSE3-SLOW-NEXT: haddps %xmm4, %xmm1
; SSSE3-SLOW-NEXT: haddps %xmm1, %xmm2
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1,3,2]
; SSSE3-SLOW-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSSE3-SLOW-NEXT: haddps %xmm7, %xmm6
; SSSE3-SLOW-NEXT: haddps %xmm6, %xmm6
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,3],xmm6[0,3]
; SSSE3-SLOW-NEXT: haddps %xmm5, %xmm4
; SSSE3-SLOW-NEXT: haddps %xmm6, %xmm4
; SSSE3-SLOW-NEXT: movaps %xmm4, %xmm1
; SSSE3-SLOW-NEXT: retq
;
; SSSE3-FAST-LABEL: pair_sum_v8f32_v4f32:
; SSSE3-FAST: # %bb.0:
; SSSE3-FAST-NEXT: haddps %xmm1, %xmm0
; SSSE3-FAST-NEXT: haddps %xmm0, %xmm0
; SSSE3-FAST-NEXT: movaps %xmm3, %xmm1
; SSSE3-FAST-NEXT: haddps %xmm2, %xmm1
; SSSE3-FAST-NEXT: haddps %xmm4, %xmm5
; SSSE3-FAST-NEXT: haddps %xmm3, %xmm2
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm5[2,0]
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm5[3,1]
; SSSE3-FAST-NEXT: addps %xmm2, %xmm1
; SSSE3-FAST-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-FAST-NEXT: haddps %xmm6, %xmm6
; SSSE3-FAST-NEXT: haddps %xmm7, %xmm7
; SSSE3-FAST-NEXT: haddps %xmm5, %xmm4
; SSSE3-FAST-NEXT: haddps %xmm4, %xmm2
; SSSE3-FAST-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSSE3-FAST-NEXT: haddps %xmm7, %xmm6
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,3],xmm6[0,2]
; SSSE3-FAST-NEXT: haddps %xmm6, %xmm4
; SSSE3-FAST-NEXT: movaps %xmm4, %xmm1
; SSSE3-FAST-NEXT: retq
;
; AVX1-SLOW-LABEL: pair_sum_v8f32_v4f32:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,3],xmm1[0,1]
; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,3,1,3]
; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm1
@ -230,7 +219,7 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
; AVX1-SLOW-NEXT: vhaddps %xmm3, %xmm2, %xmm2
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm1[0,1]
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,1]
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[1]
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm3, %xmm1
; AVX1-SLOW-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@ -245,32 +234,28 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
; AVX1-FAST-LABEL: pair_sum_v8f32_v4f32:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm8
; AVX1-FAST-NEXT: vhaddps %xmm2, %xmm2, %xmm1
; AVX1-FAST-NEXT: vhaddps %xmm3, %xmm3, %xmm0
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[1],xmm0[1],zero,zero
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vhaddps %xmm4, %xmm4, %xmm1
; AVX1-FAST-NEXT: vhaddps %xmm5, %xmm5, %xmm4
; AVX1-FAST-NEXT: vhaddps %xmm3, %xmm2, %xmm2
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,1]
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm4[0]
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[1,3]
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[1]
; AVX1-FAST-NEXT: vaddps %xmm0, %xmm2, %xmm0
; AVX1-FAST-NEXT: vmovlhps {{.*#+}} xmm1 = xmm8[0],xmm0[0]
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm1[0,1]
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[1]
; AVX1-FAST-NEXT: vaddps %xmm1, %xmm3, %xmm1
; AVX1-FAST-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX1-FAST-NEXT: vhaddps %xmm7, %xmm6, %xmm2
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm2, %xmm2
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-FAST-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[2]
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-FAST-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[2]
; AVX1-FAST-NEXT: retq
;
; AVX2-SLOW-LABEL: pair_sum_v8f32_v4f32:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,3],xmm1[0,1]
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,3,1,3]
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX2-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm1
@ -365,37 +350,24 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,1,3]
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm0
; SSSE3-SLOW-NEXT: phaddd %xmm4, %xmm5
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,1,0,1]
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[1,1]
; SSSE3-SLOW-NEXT: phaddd %xmm3, %xmm2
; SSSE3-SLOW-NEXT: movdqa %xmm2, %xmm1
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[2,0]
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm4[2,0]
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm2
; SSSE3-SLOW-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSSE3-SLOW-NEXT: phaddd %xmm4, %xmm5
; SSSE3-SLOW-NEXT: phaddd %xmm5, %xmm2
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,1,3,2]
; SSSE3-SLOW-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-SLOW-NEXT: phaddd %xmm7, %xmm6
; SSSE3-SLOW-NEXT: phaddd %xmm6, %xmm6
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,1,1,1]
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,3],xmm1[0,2]
; SSSE3-SLOW-NEXT: movaps %xmm2, %xmm1
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm2 = xmm6[0,1,1,1]
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,3],xmm2[0,2]
; SSSE3-SLOW-NEXT: retq
;
; SSSE3-FAST-LABEL: pair_sum_v8i32_v4i32:
; SSSE3-FAST: # %bb.0:
; SSSE3-FAST-NEXT: phaddd %xmm1, %xmm0
; SSSE3-FAST-NEXT: phaddd %xmm0, %xmm0
; SSSE3-FAST-NEXT: movdqa %xmm5, %xmm1
; SSSE3-FAST-NEXT: phaddd %xmm4, %xmm5
; SSSE3-FAST-NEXT: phaddd %xmm4, %xmm4
; SSSE3-FAST-NEXT: phaddd %xmm1, %xmm1
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[1,1]
; SSSE3-FAST-NEXT: phaddd %xmm3, %xmm2
; SSSE3-FAST-NEXT: movdqa %xmm2, %xmm3
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm5[2,0]
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[2,0]
; SSSE3-FAST-NEXT: paddd %xmm3, %xmm2
; SSSE3-FAST-NEXT: phaddd %xmm5, %xmm4
; SSSE3-FAST-NEXT: phaddd %xmm4, %xmm2
; SSSE3-FAST-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSSE3-FAST-NEXT: phaddd %xmm6, %xmm6
; SSSE3-FAST-NEXT: phaddd %xmm7, %xmm7
@ -407,18 +379,17 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
; AVX1-SLOW-LABEL: pair_sum_v8i32_v4i32:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,1]
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,3]
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vphaddd %xmm4, %xmm4, %xmm1
; AVX1-SLOW-NEXT: vphaddd %xmm5, %xmm5, %xmm4
; AVX1-SLOW-NEXT: vphaddd %xmm3, %xmm2, %xmm2
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,2,1,3]
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,2,2,3]
; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[0,0,0,0]
; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5],xmm5[6,7]
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,3,1,1]
; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0,1],xmm1[1],zero
; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
; AVX1-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3]
; AVX1-SLOW-NEXT: vpaddd %xmm1, %xmm3, %xmm1
; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@ -433,34 +404,30 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
; AVX1-FAST-LABEL: pair_sum_v8i32_v4i32:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm8
; AVX1-FAST-NEXT: vphaddd %xmm2, %xmm2, %xmm1
; AVX1-FAST-NEXT: vphaddd %xmm3, %xmm3, %xmm0
; AVX1-FAST-NEXT: vphaddd %xmm4, %xmm4, %xmm4
; AVX1-FAST-NEXT: vphaddd %xmm5, %xmm5, %xmm5
; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vphaddd %xmm4, %xmm4, %xmm1
; AVX1-FAST-NEXT: vphaddd %xmm5, %xmm5, %xmm4
; AVX1-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm2
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,2,1,3]
; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm5[0,0,0,0]
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5],xmm3[6,7]
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5,6,7]
; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm4[1],zero
; AVX1-FAST-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm5[3]
; AVX1-FAST-NEXT: vpaddd %xmm0, %xmm2, %xmm0
; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm8[0],xmm0[0]
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,2,2,3]
; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[0,0,0,0]
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5],xmm5[6,7]
; AVX1-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
; AVX1-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3]
; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm3, %xmm1
; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX1-FAST-NEXT: vphaddd %xmm7, %xmm6, %xmm2
; AVX1-FAST-NEXT: vphaddd %xmm0, %xmm2, %xmm2
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-FAST-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[2]
; AVX1-FAST-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-FAST-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2],ymm0[2]
; AVX1-FAST-NEXT: retq
;
; AVX2-SLOW-LABEL: pair_sum_v8i32_v4i32:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,1]
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,3,1,3]
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX2-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vphaddd %xmm4, %xmm4, %xmm1
@ -1138,22 +1105,39 @@ define <4 x i32> @reduction_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32
; AVX-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; AVX-SLOW-NEXT: retq
;
; AVX-FAST-LABEL: reduction_sum_v4i32_v4i32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
; AVX-FAST-NEXT: vpaddd %xmm4, %xmm0, %xmm0
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
; AVX-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
; AVX-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
; AVX-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
; AVX-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
; AVX-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
; AVX-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-FAST-NEXT: retq
; AVX1-FAST-LABEL: reduction_sum_v4i32_v4i32:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
; AVX1-FAST-NEXT: vpaddd %xmm4, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
; AVX1-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
; AVX1-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
; AVX1-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
; AVX1-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,0,2]
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-FAST-NEXT: retq
;
; AVX2-FAST-LABEL: reduction_sum_v4i32_v4i32:
; AVX2-FAST: # %bb.0:
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
; AVX2-FAST-NEXT: vpaddd %xmm4, %xmm0, %xmm0
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
; AVX2-FAST-NEXT: vpaddd %xmm4, %xmm1, %xmm1
; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
; AVX2-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
; AVX2-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
; AVX2-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,0,2]
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX2-FAST-NEXT: retq
%5 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %0)
%6 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %1)
%7 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %2)