diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 2003e71f1655..9585c09ad928 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -14670,7 +14670,24 @@ static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) { DAG.getConstant(0, OtherVal.getValueType()), NewCmp); } -static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG) { +/// PerformADDCombine - Do target-specific dag combines on integer adds. +static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG, + const X86Subtarget *Subtarget) { + EVT VT = N->getValueType(0); + SDValue Op0 = N->getOperand(0); + SDValue Op1 = N->getOperand(1); + + // Try to synthesize horizontal adds from adds of shuffles. + if ((Subtarget->hasSSSE3() || Subtarget->hasAVX()) && + (VT == MVT::v8i16 || VT == MVT::v4i32) && + isHorizontalBinOp(Op0, Op1, true)) + return DAG.getNode(X86ISD::HADD, N->getDebugLoc(), VT, Op0, Op1); + + return OptimizeConditionalInDecrement(N, DAG); +} + +static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG, + const X86Subtarget *Subtarget) { SDValue Op0 = N->getOperand(0); SDValue Op1 = N->getOperand(1); @@ -14692,6 +14709,13 @@ static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG) { } } + // Try to synthesize horizontal adds from adds of shuffles. + EVT VT = N->getValueType(0); + if ((Subtarget->hasSSSE3() || Subtarget->hasAVX()) && + (VT == MVT::v8i16 || VT == MVT::v4i32) && + isHorizontalBinOp(Op0, Op1, false)) + return DAG.getNode(X86ISD::HSUB, N->getDebugLoc(), VT, Op0, Op1); + return OptimizeConditionalInDecrement(N, DAG); } @@ -14705,8 +14729,8 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case ISD::VSELECT: case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI); - case ISD::ADD: return OptimizeConditionalInDecrement(N, DAG); - case ISD::SUB: return PerformSubCombine(N, DAG); + case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget); + case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget); case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI); case ISD::MUL: return PerformMulCombine(N, DAG, DCI); case ISD::SHL: diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index c95c45169b1f..5a407012c09c 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -178,6 +178,12 @@ namespace llvm { /// BLEND family of opcodes BLENDV, + /// HADD - Integer horizontal add. + HADD, + + /// HSUB - Integer horizontal sub. + HSUB, + /// FHADD - Floating point horizontal add. FHADD, diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td index 62ee94fd7c7a..c91e2dfefeea 100644 --- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -41,6 +41,8 @@ def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>; def X86fgetsign: SDNode<"X86ISD::FGETSIGNx86",SDTFPToIntOp>; def X86fhadd : SDNode<"X86ISD::FHADD", SDTFPBinOp>; def X86fhsub : SDNode<"X86ISD::FHSUB", SDTFPBinOp>; +def X86hadd : SDNode<"X86ISD::HADD", SDTIntBinOp>; +def X86hsub : SDNode<"X86ISD::HSUB", SDTIntBinOp>; def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>; def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>; def X86cmpss : SDNode<"X86ISD::FSETCCss", SDTX86Cmpss>; diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td index 27ad550056e5..c365fad73e6a 100644 --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -5369,6 +5369,15 @@ let Predicates = [HasSSSE3] in { (PSIGNWrr128 VR128:$src1, VR128:$src2)>; def : Pat<(v4i32 (X86psign VR128:$src1, VR128:$src2)), (PSIGNDrr128 VR128:$src1, VR128:$src2)>; + + def : Pat<(v8i16 (X86hadd VR128:$src1, VR128:$src2)), + (PHADDWrr128 VR128:$src1, VR128:$src2)>; + def : Pat<(v4i32 (X86hadd VR128:$src1, VR128:$src2)), + (PHADDDrr128 VR128:$src1, VR128:$src2)>; + def : Pat<(v8i16 (X86hsub VR128:$src1, VR128:$src2)), + (PHSUBWrr128 VR128:$src1, VR128:$src2)>; + def : Pat<(v4i32 (X86hsub VR128:$src1, VR128:$src2)), + (PHSUBDrr128 VR128:$src1, VR128:$src2)>; } let Predicates = [HasAVX] in { @@ -5383,6 +5392,15 @@ let Predicates = [HasAVX] in { (VPSIGNWrr128 VR128:$src1, VR128:$src2)>; def : Pat<(v4i32 (X86psign VR128:$src1, VR128:$src2)), (VPSIGNDrr128 VR128:$src1, VR128:$src2)>; + + def : Pat<(v8i16 (X86hadd VR128:$src1, VR128:$src2)), + (VPHADDWrr128 VR128:$src1, VR128:$src2)>; + def : Pat<(v4i32 (X86hadd VR128:$src1, VR128:$src2)), + (VPHADDDrr128 VR128:$src1, VR128:$src2)>; + def : Pat<(v8i16 (X86hsub VR128:$src1, VR128:$src2)), + (VPHSUBWrr128 VR128:$src1, VR128:$src2)>; + def : Pat<(v4i32 (X86hsub VR128:$src1, VR128:$src2)), + (VPHSUBDrr128 VR128:$src1, VR128:$src2)>; } let Predicates = [HasAVX2] in { @@ -5392,6 +5410,15 @@ let Predicates = [HasAVX2] in { (VPSIGNWrr256 VR256:$src1, VR256:$src2)>; def : Pat<(v8i32 (X86psign VR256:$src1, VR256:$src2)), (VPSIGNDrr256 VR256:$src1, VR256:$src2)>; + + def : Pat<(v16i16 (X86hadd VR256:$src1, VR256:$src2)), + (VPHADDWrr256 VR256:$src1, VR256:$src2)>; + def : Pat<(v8i32 (X86hadd VR256:$src1, VR256:$src2)), + (VPHADDDrr256 VR256:$src1, VR256:$src2)>; + def : Pat<(v16i16 (X86hsub VR256:$src1, VR256:$src2)), + (VPHSUBWrr256 VR256:$src1, VR256:$src2)>; + def : Pat<(v8i32 (X86hsub VR256:$src1, VR256:$src2)), + (VPHSUBDrr256 VR256:$src1, VR256:$src2)>; } //===---------------------------------------------------------------------===//