forked from OSchip/llvm-project
[X86][SSE] Enable (F)HADD/SUB handling to SimplifyMultipleUseDemandedVectorElts
Attempt to bypass unused horiz-op operands. This is very similar to the PACKSS/PACKUS handling - we should try to merge these.
This commit is contained in:
parent
f7aeaced65
commit
301319840e
|
@ -38430,14 +38430,17 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
|
|||
APInt DemandedLHS, DemandedRHS;
|
||||
getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
|
||||
|
||||
APInt SrcUndef, SrcZero;
|
||||
if (SimplifyDemandedVectorElts(N0, DemandedLHS, SrcUndef, SrcZero, TLO,
|
||||
APInt LHSUndef, LHSZero;
|
||||
if (SimplifyDemandedVectorElts(N0, DemandedLHS, LHSUndef, LHSZero, TLO,
|
||||
Depth + 1))
|
||||
return true;
|
||||
if (SimplifyDemandedVectorElts(N1, DemandedRHS, SrcUndef, SrcZero, TLO,
|
||||
APInt RHSUndef, RHSZero;
|
||||
if (SimplifyDemandedVectorElts(N1, DemandedRHS, RHSUndef, RHSZero, TLO,
|
||||
Depth + 1))
|
||||
return true;
|
||||
|
||||
// TODO - pass on known zero/undef.
|
||||
|
||||
// Aggressively peek through ops to get at the demanded elts.
|
||||
// TODO - we should do this for all target/faux shuffles ops.
|
||||
if (!DemandedElts.isAllOnesValue()) {
|
||||
|
@ -38458,17 +38461,37 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
|
|||
case X86ISD::HSUB:
|
||||
case X86ISD::FHADD:
|
||||
case X86ISD::FHSUB: {
|
||||
SDValue N0 = Op.getOperand(0);
|
||||
SDValue N1 = Op.getOperand(1);
|
||||
|
||||
APInt DemandedLHS, DemandedRHS;
|
||||
getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
|
||||
|
||||
APInt LHSUndef, LHSZero;
|
||||
if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, LHSUndef,
|
||||
LHSZero, TLO, Depth + 1))
|
||||
if (SimplifyDemandedVectorElts(N0, DemandedLHS, LHSUndef, LHSZero, TLO,
|
||||
Depth + 1))
|
||||
return true;
|
||||
APInt RHSUndef, RHSZero;
|
||||
if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, RHSUndef,
|
||||
RHSZero, TLO, Depth + 1))
|
||||
if (SimplifyDemandedVectorElts(N1, DemandedRHS, RHSUndef, RHSZero, TLO,
|
||||
Depth + 1))
|
||||
return true;
|
||||
|
||||
// TODO - pass on known zero/undef.
|
||||
|
||||
// Aggressively peek through ops to get at the demanded elts.
|
||||
// TODO: Handle repeated operands.
|
||||
if (N0 != N1 && !DemandedElts.isAllOnesValue()) {
|
||||
SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
|
||||
TLO.DAG, Depth + 1);
|
||||
SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
|
||||
TLO.DAG, Depth + 1);
|
||||
if (NewN0 || NewN1) {
|
||||
NewN0 = NewN0 ? NewN0 : N0;
|
||||
NewN1 = NewN1 ? NewN1 : N1;
|
||||
return TLO.CombineTo(Op,
|
||||
TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case X86ISD::VTRUNC:
|
||||
|
|
|
@ -505,21 +505,21 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
|
|||
; AVX2-FAST-LABEL: pair_sum_v8i32_v4i32:
|
||||
; AVX2-FAST: # %bb.0:
|
||||
; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
|
||||
; AVX2-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm1
|
||||
; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm0, %xmm0
|
||||
; AVX2-FAST-NEXT: vphaddd %xmm4, %xmm4, %xmm4
|
||||
; AVX2-FAST-NEXT: vphaddd %xmm5, %xmm5, %xmm5
|
||||
; AVX2-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm0
|
||||
; AVX2-FAST-NEXT: vphaddd %xmm4, %xmm4, %xmm1
|
||||
; AVX2-FAST-NEXT: vphaddd %xmm5, %xmm5, %xmm4
|
||||
; AVX2-FAST-NEXT: vphaddd %xmm3, %xmm2, %xmm2
|
||||
; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm4[0,3]
|
||||
; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm5[0]
|
||||
; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,3],xmm4[1,3]
|
||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm2 = xmm2[0,1,2],xmm5[3]
|
||||
; AVX2-FAST-NEXT: vpaddd %xmm2, %xmm3, %xmm2
|
||||
; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
|
||||
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
|
||||
; AVX2-FAST-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
|
||||
; AVX2-FAST-NEXT: vphaddd %xmm7, %xmm6, %xmm2
|
||||
; AVX2-FAST-NEXT: vphaddd %xmm1, %xmm2, %xmm1
|
||||
; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm1[0,3]
|
||||
; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
|
||||
; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
|
||||
; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3]
|
||||
; AVX2-FAST-NEXT: vpaddd %xmm1, %xmm3, %xmm1
|
||||
; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
|
||||
; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX2-FAST-NEXT: vphaddd %xmm7, %xmm6, %xmm1
|
||||
; AVX2-FAST-NEXT: vphaddd %xmm0, %xmm0, %xmm2
|
||||
; AVX2-FAST-NEXT: vphaddd %xmm2, %xmm1, %xmm1
|
||||
; AVX2-FAST-NEXT: vpbroadcastq %xmm1, %ymm1
|
||||
; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
|
||||
; AVX2-FAST-NEXT: retq
|
||||
|
|
Loading…
Reference in New Issue