[DAGCombiner] Support (shl (ext (shl x, c1)), c2) -> (shl (ext x), (add c1, c2)) non-uniform folds.

Use matchBinaryPredicate instead of isConstOrConstSplat to let us handle non-uniform shift cases. 

llvm-svn: 363793
This commit is contained in:
Simon Pilgrim 2019-06-19 12:41:37 +00:00
parent 8c49366c9b
commit 9eed5d2f78
2 changed files with 39 additions and 42 deletions

View File

@ -7232,23 +7232,22 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
/*AllowTypeMismatch*/ true)) /*AllowTypeMismatch*/ true))
return DAG.getConstant(0, SDLoc(N), VT); return DAG.getConstant(0, SDLoc(N), VT);
ConstantSDNode *N0Op0C1 = isConstOrConstSplat(InnerShiftAmt); auto MatchInRange = [OpSizeInBits, InnerBitwidth](ConstantSDNode *LHS,
if (N1C && N0Op0C1) { ConstantSDNode *RHS) {
APInt c1 = N0Op0C1->getAPIntValue(); APInt c1 = LHS->getAPIntValue();
APInt c2 = N1C->getAPIntValue(); APInt c2 = RHS->getAPIntValue();
zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
return c2.uge(OpSizeInBits - InnerBitwidth) &&
if (c2.uge(OpSizeInBits - InnerBitwidth)) { (c1 + c2).ult(OpSizeInBits);
SDLoc DL(N0); };
APInt Sum = c1 + c2; if (ISD::matchBinaryPredicate(InnerShiftAmt, N1, MatchInRange,
if (Sum.uge(OpSizeInBits)) /*AllowUndefs*/ false,
return DAG.getConstant(0, DL, VT); /*AllowTypeMismatch*/ true)) {
SDLoc DL(N);
return DAG.getNode( SDValue Ext = DAG.getNode(N0.getOpcode(), DL, VT, N0Op0.getOperand(0));
ISD::SHL, DL, VT, SDValue Sum = DAG.getZExtOrTrunc(InnerShiftAmt, DL, ShiftVT);
DAG.getNode(N0.getOpcode(), DL, VT, N0Op0->getOperand(0)), Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, Sum, N1);
DAG.getConstant(Sum.getZExtValue(), DL, ShiftVT)); return DAG.getNode(ISD::SHL, DL, VT, Ext, Sum);
}
} }
} }

View File

@ -281,46 +281,44 @@ define <8 x i32> @combine_vec_shl_ext_shl1(<8 x i16> %x) {
ret <8 x i32> %3 ret <8 x i32> %3
} }
; TODO - this should fold to shl(ext(%x),c).
define <8 x i32> @combine_vec_shl_ext_shl2(<8 x i16> %x) { define <8 x i32> @combine_vec_shl_ext_shl2(<8 x i16> %x) {
; SSE2-LABEL: combine_vec_shl_ext_shl2: ; SSE2-LABEL: combine_vec_shl_ext_shl2:
; SSE2: # %bb.0: ; SSE2: # %bb.0:
; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm0 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: psrad $16, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [131072,524288,2097152,8388608]
; SSE2-NEXT: psrad $16, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65536,131072,262144,524288] ; SSE2-NEXT: pmuludq %xmm3, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,2,2,3]
; SSE2-NEXT: pmuludq %xmm2, %xmm0 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,3,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE2-NEXT: pmuludq %xmm4, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1048576,2097152,4194304,8388608]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm2, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE2-NEXT: pmuludq %xmm3, %xmm2 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] ; SSE2-NEXT: psrad $16, %xmm0
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [33554432,134217728,536870912,2147483648]
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm4, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE2-NEXT: movdqa %xmm2, %xmm0
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSE41-LABEL: combine_vec_shl_ext_shl2: ; SSE41-LABEL: combine_vec_shl_ext_shl2:
; SSE41: # %bb.0: ; SSE41: # %bb.0:
; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm0 ; SSE41-NEXT: pmovsxwd %xmm0, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm2
; SSE41-NEXT: pmovsxwd %xmm1, %xmm1 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovsxwd %xmm0, %xmm0 ; SSE41-NEXT: pmovsxwd %xmm0, %xmm1
; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1
; SSE41-NEXT: movdqa %xmm2, %xmm0
; SSE41-NEXT: retq ; SSE41-NEXT: retq
; ;
; AVX-LABEL: combine_vec_shl_ext_shl2: ; AVX-LABEL: combine_vec_shl_ext_shl2:
; AVX: # %bb.0: ; AVX: # %bb.0:
; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq ; AVX-NEXT: retq