diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index aa63889a7d79..6a047ac09151 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -4445,19 +4445,15 @@ SDValue DAGCombiner::distributeTruncateThroughAnd(SDNode *N) { // (truncate:TruncVT (and N00, N01C)) -> (and (truncate:TruncVT N00), TruncC) if (N->hasOneUse() && N->getOperand(0).hasOneUse()) { SDValue N01 = N->getOperand(0).getOperand(1); - - if (ConstantSDNode *N01C = isConstOrConstSplat(N01)) { - if (!N01C->isOpaque()) { - EVT TruncVT = N->getValueType(0); - SDValue N00 = N->getOperand(0).getOperand(0); - APInt TruncC = N01C->getAPIntValue(); - TruncC = TruncC.trunc(TruncVT.getScalarSizeInBits()); - SDLoc DL(N); - - return DAG.getNode(ISD::AND, DL, TruncVT, - DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N00), - DAG.getConstant(TruncC, DL, TruncVT)); - } + if (isConstantOrConstantVector(N01, /* NoOpaques */ true)) { + SDLoc DL(N); + EVT TruncVT = N->getValueType(0); + SDValue N00 = N->getOperand(0).getOperand(0); + SDValue Trunc00 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N00); + SDValue Trunc01 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N01); + AddToWorklist(Trunc00.getNode()); + AddToWorklist(Trunc01.getNode()); + return DAG.getNode(ISD::AND, DL, TruncVT, Trunc00, Trunc01); } } diff --git a/llvm/test/CodeGen/X86/combine-shl.ll b/llvm/test/CodeGen/X86/combine-shl.ll index 7d9706075dc7..ff5d4f013a43 100644 --- a/llvm/test/CodeGen/X86/combine-shl.ll +++ b/llvm/test/CodeGen/X86/combine-shl.ll @@ -98,11 +98,10 @@ define <4 x i32> @combine_vec_shl_known_zero1(<4 x i32> %x) { define <4 x i32> @combine_vec_shl_trunc_and(<4 x i32> %x, <4 x i64> %y) { ; SSE-LABEL: combine_vec_shl_trunc_and: ; SSE: # BB#0: -; SSE-NEXT: pand {{.*}}(%rip), %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] +; SSE-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE-NEXT: pslld $23, %xmm1 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm1 ; SSE-NEXT: cvttps2dq %xmm1, %xmm1 @@ -111,9 +110,9 @@ define <4 x i32> @combine_vec_shl_trunc_and(<4 x i32> %x, <4 x i64> %y) { ; ; AVX-LABEL: combine_vec_shl_trunc_and: ; AVX: # BB#0: -; AVX-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 ; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7] ; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3] +; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vpsllvd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq diff --git a/llvm/test/CodeGen/X86/combine-sra.ll b/llvm/test/CodeGen/X86/combine-sra.ll index 570eb7ef56ef..bfa5b9e98b11 100644 --- a/llvm/test/CodeGen/X86/combine-sra.ll +++ b/llvm/test/CodeGen/X86/combine-sra.ll @@ -161,11 +161,10 @@ define <4 x i32> @combine_vec_ashr_ashr2(<4 x i32> %x) { define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) { ; SSE-LABEL: combine_vec_ashr_trunc_and: ; SSE: # BB#0: -; SSE-NEXT: pand {{.*}}(%rip), %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] +; SSE-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm2 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; SSE-NEXT: movdqa %xmm0, %xmm3 @@ -187,9 +186,9 @@ define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) { ; ; AVX-LABEL: combine_vec_ashr_trunc_and: ; AVX: # BB#0: -; AVX-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 ; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7] ; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3] +; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vpsravd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq diff --git a/llvm/test/CodeGen/X86/combine-srl.ll b/llvm/test/CodeGen/X86/combine-srl.ll index 8dfdb911d76d..2bbe779e38fd 100644 --- a/llvm/test/CodeGen/X86/combine-srl.ll +++ b/llvm/test/CodeGen/X86/combine-srl.ll @@ -507,11 +507,10 @@ declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) { ; SSE-LABEL: combine_vec_lshr_trunc_and: ; SSE: # BB#0: -; SSE-NEXT: pand {{.*}}(%rip), %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] +; SSE-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm2 ; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; SSE-NEXT: movdqa %xmm0, %xmm3 @@ -533,9 +532,9 @@ define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) { ; ; AVX-LABEL: combine_vec_lshr_trunc_and: ; AVX: # BB#0: -; AVX-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 ; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7] ; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3] +; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq