From 6b915d3353feff1a4004790ac4c233b88d1382e1 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Thu, 21 Dec 2017 16:34:46 +0000 Subject: [PATCH] [DAGCombiner] Generalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2) combine to work on non-splat vectors The knownbits_mask_or_shuffle_uitofp change is interesting - shuffle combines manage to kick in, removing the AND constant mask load. For targets with fast-variable-shuffle this should reduce further to VPOR+VPSHUFB+VCVTDQ2PS. llvm-svn: 321279 --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 20 +++++++++---------- llvm/test/CodeGen/X86/combine-or.ll | 4 ++-- llvm/test/CodeGen/X86/known-bits-vector.ll | 14 +++++++------ 3 files changed, 20 insertions(+), 18 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index e8f76a2bb70e..5a393581ee79 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -4687,16 +4687,16 @@ SDValue DAGCombiner::visitOR(SDNode *N) { // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2) // iff (c1 & c2) != 0. - if (N1C && N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse()) { - if (ConstantSDNode *C1 = dyn_cast(N0.getOperand(1))) { - if (C1->getAPIntValue().intersects(N1C->getAPIntValue())) { - if (SDValue COR = - DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N1), VT, N1C, C1)) - return DAG.getNode( - ISD::AND, SDLoc(N), VT, - DAG.getNode(ISD::OR, SDLoc(N0), VT, N0.getOperand(0), N1), COR); - return SDValue(); - } + auto MatchIntersect = [](ConstantSDNode *LHS, ConstantSDNode *RHS) { + return LHS->getAPIntValue().intersects(RHS->getAPIntValue()); + }; + if (N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() && + matchBinaryPredicate(N0.getOperand(1), N1, MatchIntersect)) { + if (SDValue COR = DAG.FoldConstantArithmetic( + ISD::OR, SDLoc(N1), VT, N1.getNode(), N0.getOperand(1).getNode())) { + SDValue IOR = DAG.getNode(ISD::OR, SDLoc(N0), VT, N0.getOperand(0), N1); + AddToWorklist(IOR.getNode()); + return DAG.getNode(ISD::AND, SDLoc(N), VT, COR, IOR); } } diff --git a/llvm/test/CodeGen/X86/combine-or.ll b/llvm/test/CodeGen/X86/combine-or.ll index 5ab2e3d25089..65184c4d278e 100644 --- a/llvm/test/CodeGen/X86/combine-or.ll +++ b/llvm/test/CodeGen/X86/combine-or.ll @@ -435,8 +435,8 @@ define <4 x i32> @test2f(<4 x i32> %a, <4 x i32> %b) { define <2 x i64> @or_and_v2i64(<2 x i64> %a0) { ; CHECK-LABEL: or_and_v2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: andps {{.*}}(%rip), %xmm0 ; CHECK-NEXT: orps {{.*}}(%rip), %xmm0 +; CHECK-NEXT: andps {{.*}}(%rip), %xmm0 ; CHECK-NEXT: retq %1 = and <2 x i64> %a0, %2 = or <2 x i64> %1, @@ -446,8 +446,8 @@ define <2 x i64> @or_and_v2i64(<2 x i64> %a0) { define <4 x i32> @or_and_v4i32(<4 x i32> %a0) { ; CHECK-LABEL: or_and_v4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: andps {{.*}}(%rip), %xmm0 ; CHECK-NEXT: orps {{.*}}(%rip), %xmm0 +; CHECK-NEXT: andps {{.*}}(%rip), %xmm0 ; CHECK-NEXT: retq %1 = and <4 x i32> %a0, %2 = or <4 x i32> %1, diff --git a/llvm/test/CodeGen/X86/known-bits-vector.ll b/llvm/test/CodeGen/X86/known-bits-vector.ll index 283d1f93dfb6..46a888f3b9b6 100644 --- a/llvm/test/CodeGen/X86/known-bits-vector.ll +++ b/llvm/test/CodeGen/X86/known-bits-vector.ll @@ -160,17 +160,19 @@ define <4 x float> @knownbits_mask_shuffle_uitofp(<4 x i32> %a0) nounwind { define <4 x float> @knownbits_mask_or_shuffle_uitofp(<4 x i32> %a0) nounwind { ; X32-LABEL: knownbits_mask_or_shuffle_uitofp: ; X32: # %bb.0: -; X32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0 -; X32-NEXT: vorps {{\.LCPI.*}}, %xmm0, %xmm0 -; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3] +; X32-NEXT: vpor {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; X32-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,6] +; X32-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; X32-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X32-NEXT: retl ; ; X64-LABEL: knownbits_mask_or_shuffle_uitofp: ; X64: # %bb.0: -; X64-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 -; X64-NEXT: vorps {{.*}}(%rip), %xmm0, %xmm0 -; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3] +; X64-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; X64-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,6] +; X64-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; X64-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X64-NEXT: retq %1 = and <4 x i32> %a0,