[InstCombine] revert r300977 and r301021

This can cause an inf-loop. Investigating...

llvm-svn: 301035
This commit is contained in:
Sanjay Patel 2017-04-21 20:29:17 +00:00
parent 11506a9d1c
commit 8ce1d4cbe1
3 changed files with 5 additions and 32 deletions

View File

@ -277,20 +277,10 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return InsertNewInstWith(And, *I);
}
// If the RHS is a constant, see if we can change it. Don't alter a -1
// constant because that's a canonical 'not' op, and that is better for
// combining, SCEV, and codegen.
const APInt *C;
if (match(I->getOperand(1), m_APInt(C)) && !C->isAllOnesValue()) {
if (DemandedMask.isSubsetOf(*C)) {
// Force bits to 1 to create a 'not' op.
I->setOperand(1, ConstantInt::getAllOnesValue(VTy));
return I;
}
// If we can't turn this into a 'not', try to shrink the constant.
if (ShrinkDemandedConstant(I, 1, DemandedMask))
return I;
}
// If the RHS is a constant, see if we can simplify it.
// FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
if (ShrinkDemandedConstant(I, 1, DemandedMask))
return I;
// If our LHS is an 'and' and if it has one use, and if any of the bits we
// are flipping are known to be set, then the xor is just resetting those

View File

@ -536,20 +536,3 @@ define i32 @test38(i32 %A, i32 %B) {
%xor = xor i32 %and, %B
ret i32 %xor
}
; PR32706 - https://bugs.llvm.org/show_bug.cgi?id=32706
; Pin an xor constant operand to -1 if possible because 'not' is better for SCEV and codegen.
define i32 @not_is_canonical(i32 %x, i32 %y) {
; CHECK-LABEL: @not_is_canonical(
; CHECK-NEXT: [[SUB:%.*]] = xor i32 %x, -1
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SUB]], %y
; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[ADD]], 2
; CHECK-NEXT: ret i32 [[MUL]]
;
%sub = xor i32 %x, 1073741823
%add = add i32 %sub, %y
%mul = shl i32 %add, 2
ret i32 %mul
}

View File

@ -35,7 +35,7 @@ define <2 x i64> @test3(<2 x i64> %A) {
define <2 x i64> @test4(<2 x i64> %A) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i64> %A, <i64 -1, i64 -1>
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i64> %A, <i64 63, i64 63>
; CHECK-NEXT: [[XOR:%.*]] = and <2 x i64> [[TMP1]], <i64 23, i64 42>
; CHECK-NEXT: ret <2 x i64> [[XOR]]
;