forked from OSchip/llvm-project
[DAGCombine] Generalize distributeTruncateThroughAnd to work with any non-opaque constant or constant vector
llvm-svn: 284574
This commit is contained in:
parent
99eeab7ff3
commit
b2ca2505cc
|
@ -4445,19 +4445,15 @@ SDValue DAGCombiner::distributeTruncateThroughAnd(SDNode *N) {
|
|||
// (truncate:TruncVT (and N00, N01C)) -> (and (truncate:TruncVT N00), TruncC)
|
||||
if (N->hasOneUse() && N->getOperand(0).hasOneUse()) {
|
||||
SDValue N01 = N->getOperand(0).getOperand(1);
|
||||
|
||||
if (ConstantSDNode *N01C = isConstOrConstSplat(N01)) {
|
||||
if (!N01C->isOpaque()) {
|
||||
EVT TruncVT = N->getValueType(0);
|
||||
SDValue N00 = N->getOperand(0).getOperand(0);
|
||||
APInt TruncC = N01C->getAPIntValue();
|
||||
TruncC = TruncC.trunc(TruncVT.getScalarSizeInBits());
|
||||
SDLoc DL(N);
|
||||
|
||||
return DAG.getNode(ISD::AND, DL, TruncVT,
|
||||
DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N00),
|
||||
DAG.getConstant(TruncC, DL, TruncVT));
|
||||
}
|
||||
if (isConstantOrConstantVector(N01, /* NoOpaques */ true)) {
|
||||
SDLoc DL(N);
|
||||
EVT TruncVT = N->getValueType(0);
|
||||
SDValue N00 = N->getOperand(0).getOperand(0);
|
||||
SDValue Trunc00 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N00);
|
||||
SDValue Trunc01 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N01);
|
||||
AddToWorklist(Trunc00.getNode());
|
||||
AddToWorklist(Trunc01.getNode());
|
||||
return DAG.getNode(ISD::AND, DL, TruncVT, Trunc00, Trunc01);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -98,11 +98,10 @@ define <4 x i32> @combine_vec_shl_known_zero1(<4 x i32> %x) {
|
|||
define <4 x i32> @combine_vec_shl_trunc_and(<4 x i32> %x, <4 x i64> %y) {
|
||||
; SSE-LABEL: combine_vec_shl_trunc_and:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: pand {{.*}}(%rip), %xmm1
|
||||
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
|
||||
; SSE-NEXT: pand {{.*}}(%rip), %xmm1
|
||||
; SSE-NEXT: pslld $23, %xmm1
|
||||
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
|
||||
; SSE-NEXT: cvttps2dq %xmm1, %xmm1
|
||||
|
@ -111,9 +110,9 @@ define <4 x i32> @combine_vec_shl_trunc_and(<4 x i32> %x, <4 x i64> %y) {
|
|||
;
|
||||
; AVX-LABEL: combine_vec_shl_trunc_and:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
|
||||
; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
|
||||
; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
|
||||
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
||||
; AVX-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: vzeroupper
|
||||
; AVX-NEXT: retq
|
||||
|
|
|
@ -161,11 +161,10 @@ define <4 x i32> @combine_vec_ashr_ashr2(<4 x i32> %x) {
|
|||
define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
|
||||
; SSE-LABEL: combine_vec_ashr_trunc_and:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: pand {{.*}}(%rip), %xmm1
|
||||
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
|
||||
; SSE-NEXT: pand {{.*}}(%rip), %xmm1
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm3
|
||||
|
@ -187,9 +186,9 @@ define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
|
|||
;
|
||||
; AVX-LABEL: combine_vec_ashr_trunc_and:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
|
||||
; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
|
||||
; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
|
||||
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
||||
; AVX-NEXT: vpsravd %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: vzeroupper
|
||||
; AVX-NEXT: retq
|
||||
|
|
|
@ -507,11 +507,10 @@ declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1)
|
|||
define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
|
||||
; SSE-LABEL: combine_vec_lshr_trunc_and:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: pand {{.*}}(%rip), %xmm1
|
||||
; SSE-NEXT: pand {{.*}}(%rip), %xmm2
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
|
||||
; SSE-NEXT: pand {{.*}}(%rip), %xmm1
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm2
|
||||
; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm3
|
||||
|
@ -533,9 +532,9 @@ define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
|
|||
;
|
||||
; AVX-LABEL: combine_vec_lshr_trunc_and:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
|
||||
; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
|
||||
; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
|
||||
; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
||||
; AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: vzeroupper
|
||||
; AVX-NEXT: retq
|
||||
|
|
Loading…
Reference in New Issue