[InstCombine] Simplify 'add' to 'or' if no common bits are set.

Summary:
In order to get the whole fold as specified in [[ https://bugs.llvm.org/show_bug.cgi?id=6773 | PR6773 ]],
let's first handle the simple straight-forward things.
Let's start with the `and` -> `or` simplification.

The one obvious thing missing here: the constant mask is not handled.
I have an idea how to handle it, but it will require some thinking,
and is not strictly required here, so i've left that for later.

https://rise4fun.com/Alive/Pkmg

Reviewers: spatel, craig.topper, eli.friedman, jingyue

Reviewed By: spatel

Subscribers: llvm-commits

Differential Revision: https://reviews.llvm.org/D45631

llvm-svn: 330101
This commit is contained in:
Roman Lebedev 2018-04-15 18:59:33 +00:00
parent 25cbb62d18
commit 620b3da38f
2 changed files with 19 additions and 11 deletions

View File

@ -190,6 +190,14 @@ bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
"LHS and RHS should have the same type");
assert(LHS->getType()->isIntOrIntVectorTy() &&
"LHS and RHS should be integers");
// Look for an inverted mask: (X & ~M) op (Y & M).
Value *M;
if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
match(RHS, m_c_And(m_Specific(M), m_Value())))
return true;
if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
match(LHS, m_c_And(m_Specific(M), m_Value())))
return true;
IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
KnownBits LHSKnown(IT->getBitWidth());
KnownBits RHSKnown(IT->getBitWidth());

View File

@ -21,7 +21,7 @@ define i32 @p(i32 %x, i32 %y, i32 %m) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
; CHECK-NEXT: [[RET:%.*]] = add i32 [[AND]], [[AND1]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]]
; CHECK-NEXT: ret i32 [[RET]]
;
%and = and i32 %x, %m
@ -36,7 +36,7 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) {
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M]], <i32 -1, i32 -1>
; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[NEG]], [[Y:%.*]]
; CHECK-NEXT: [[RET:%.*]] = add <2 x i32> [[AND]], [[AND1]]
; CHECK-NEXT: [[RET:%.*]] = or <2 x i32> [[AND]], [[AND1]]
; CHECK-NEXT: ret <2 x i32> [[RET]]
;
%and = and <2 x i32> %x, %m
@ -51,7 +51,7 @@ define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> %m) {
; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], <i32 -1, i32 undef, i32 -1>
; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]]
; CHECK-NEXT: [[RET:%.*]] = add <3 x i32> [[AND]], [[AND1]]
; CHECK-NEXT: [[RET:%.*]] = or <3 x i32> [[AND]], [[AND1]]
; CHECK-NEXT: ret <3 x i32> [[RET]]
;
%and = and <3 x i32> %x, %m
@ -129,7 +129,7 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
; CHECK-NEXT: [[RET:%.*]] = add i32 [[AND]], [[AND1]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]]
; CHECK-NEXT: ret i32 [[RET]]
;
%and = and i32 %m, %x ; swapped order
@ -145,7 +145,7 @@ define i32 @p_commutative1(i32 %x, i32 %m) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = add i32 [[AND]], [[AND1]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]]
; CHECK-NEXT: ret i32 [[RET]]
;
%y = call i32 @gen32()
@ -161,7 +161,7 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
; CHECK-NEXT: [[RET:%.*]] = add i32 [[AND1]], [[AND]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]]
; CHECK-NEXT: ret i32 [[RET]]
;
%and = and i32 %x, %m
@ -177,7 +177,7 @@ define i32 @p_commutative3(i32 %x, i32 %m) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = add i32 [[AND]], [[AND1]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]]
; CHECK-NEXT: ret i32 [[RET]]
;
%y = call i32 @gen32()
@ -193,7 +193,7 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
; CHECK-NEXT: [[RET:%.*]] = add i32 [[AND1]], [[AND]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]]
; CHECK-NEXT: ret i32 [[RET]]
;
%and = and i32 %m, %x ; swapped order
@ -209,7 +209,7 @@ define i32 @p_commutative5(i32 %x, i32 %m) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = add i32 [[AND1]], [[AND]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]]
; CHECK-NEXT: ret i32 [[RET]]
;
%y = call i32 @gen32()
@ -226,7 +226,7 @@ define i32 @p_commutative6(i32 %x, i32 %m) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]]
; CHECK-NEXT: [[RET:%.*]] = add i32 [[AND1]], [[AND]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]]
; CHECK-NEXT: ret i32 [[RET]]
;
%y = call i32 @gen32()
@ -263,7 +263,7 @@ define i32 @n0_oneuse(i32 %x, i32 %y, i32 %m) {
; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]]
; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1
; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]]
; CHECK-NEXT: [[RET:%.*]] = add i32 [[AND]], [[AND1]]
; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]]
; CHECK-NEXT: call void @use32(i32 [[AND]])
; CHECK-NEXT: call void @use32(i32 [[NEG]])
; CHECK-NEXT: call void @use32(i32 [[AND1]])