From 9eed5d2f78d933e6cf73d389bdd884d72d97f11d Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Wed, 19 Jun 2019 12:41:37 +0000 Subject: [PATCH] [DAGCombiner] Support (shl (ext (shl x, c1)), c2) -> (shl (ext x), (add c1, c2)) non-uniform folds. Use matchBinaryPredicate instead of isConstOrConstSplat to let us handle non-uniform shift cases. llvm-svn: 363793 --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 31 ++++++------ llvm/test/CodeGen/X86/combine-shl.ll | 50 +++++++++---------- 2 files changed, 39 insertions(+), 42 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index ae555ac07111..7145b7ed9c0d 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -7232,23 +7232,22 @@ SDValue DAGCombiner::visitSHL(SDNode *N) { /*AllowTypeMismatch*/ true)) return DAG.getConstant(0, SDLoc(N), VT); - ConstantSDNode *N0Op0C1 = isConstOrConstSplat(InnerShiftAmt); - if (N1C && N0Op0C1) { - APInt c1 = N0Op0C1->getAPIntValue(); - APInt c2 = N1C->getAPIntValue(); + auto MatchInRange = [OpSizeInBits, InnerBitwidth](ConstantSDNode *LHS, + ConstantSDNode *RHS) { + APInt c1 = LHS->getAPIntValue(); + APInt c2 = RHS->getAPIntValue(); zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); - - if (c2.uge(OpSizeInBits - InnerBitwidth)) { - SDLoc DL(N0); - APInt Sum = c1 + c2; - if (Sum.uge(OpSizeInBits)) - return DAG.getConstant(0, DL, VT); - - return DAG.getNode( - ISD::SHL, DL, VT, - DAG.getNode(N0.getOpcode(), DL, VT, N0Op0->getOperand(0)), - DAG.getConstant(Sum.getZExtValue(), DL, ShiftVT)); - } + return c2.uge(OpSizeInBits - InnerBitwidth) && + (c1 + c2).ult(OpSizeInBits); + }; + if (ISD::matchBinaryPredicate(InnerShiftAmt, N1, MatchInRange, + /*AllowUndefs*/ false, + /*AllowTypeMismatch*/ true)) { + SDLoc DL(N); + SDValue Ext = DAG.getNode(N0.getOpcode(), DL, VT, N0Op0.getOperand(0)); + SDValue Sum = DAG.getZExtOrTrunc(InnerShiftAmt, DL, ShiftVT); + Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, Sum, N1); + return DAG.getNode(ISD::SHL, DL, VT, Ext, Sum); } } diff --git a/llvm/test/CodeGen/X86/combine-shl.ll b/llvm/test/CodeGen/X86/combine-shl.ll index 0dd6426543ce..44eea8c9946c 100644 --- a/llvm/test/CodeGen/X86/combine-shl.ll +++ b/llvm/test/CodeGen/X86/combine-shl.ll @@ -281,46 +281,44 @@ define <8 x i32> @combine_vec_shl_ext_shl1(<8 x i16> %x) { ret <8 x i32> %3 } -; TODO - this should fold to shl(ext(%x),c). define <8 x i32> @combine_vec_shl_ext_shl2(<8 x i16> %x) { ; SSE2-LABEL: combine_vec_shl_ext_shl2: ; SSE2: # %bb.0: -; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm0 -; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] ; SSE2-NEXT: psrad $16, %xmm1 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] -; SSE2-NEXT: psrad $16, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65536,131072,262144,524288] -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq %xmm2, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; SSE2-NEXT: pmuludq %xmm3, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1048576,2097152,4194304,8388608] -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3] -; SSE2-NEXT: pmuludq %xmm2, %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [131072,524288,2097152,8388608] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm3, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm4, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] -; SSE2-NEXT: pmuludq %xmm3, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [33554432,134217728,536870912,2147483648] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm3, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm4, %xmm0 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-NEXT: movdqa %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: combine_vec_shl_ext_shl2: ; SSE41: # %bb.0: -; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm0 -; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; SSE41-NEXT: pmovsxwd %xmm1, %xmm1 -; SSE41-NEXT: pmovsxwd %xmm0, %xmm0 -; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0 +; SSE41-NEXT: pmovsxwd %xmm0, %xmm2 +; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; SSE41-NEXT: pmovsxwd %xmm0, %xmm1 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1 +; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: retq ; ; AVX-LABEL: combine_vec_shl_ext_shl2: ; AVX: # %bb.0: -; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0 ; AVX-NEXT: retq