forked from OSchip/llvm-project
function names start with a lowercase letter; NFC
llvm-svn: 259425
This commit is contained in:
parent
48c8426fa2
commit
4b198802b3
|
@ -1052,15 +1052,15 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
|
|||
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
|
||||
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
|
||||
I.hasNoUnsignedWrap(), DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// (A*B)+(A*C) -> A*(B+C) etc
|
||||
if (Value *V = SimplifyUsingDistributiveLaws(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
|
||||
// X + (signbit) --> X ^ signbit
|
||||
|
@ -1157,7 +1157,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
|
|||
return BinaryOperator::CreateSub(LHS, V);
|
||||
|
||||
if (Value *V = checkForNegativeOperand(I, Builder))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// A+B --> A|B iff A and B have no bits set in common.
|
||||
if (haveNoCommonBitsSet(LHS, RHS, DL, AC, &I, DT))
|
||||
|
@ -1317,11 +1317,11 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
|
|||
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
|
||||
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V =
|
||||
SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (isa<Constant>(RHS)) {
|
||||
if (isa<PHINode>(LHS))
|
||||
|
@ -1415,7 +1415,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
|
|||
|
||||
if (I.hasUnsafeAlgebra()) {
|
||||
if (Value *V = FAddCombine(Builder).simplify(&I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
}
|
||||
|
||||
return Changed ? &I : nullptr;
|
||||
|
@ -1493,15 +1493,15 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
|
|||
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
|
||||
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifySubInst(Op0, Op1, I.hasNoSignedWrap(),
|
||||
I.hasNoUnsignedWrap(), DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// (A*B)-(A*C) -> A*(B-C) etc
|
||||
if (Value *V = SimplifyUsingDistributiveLaws(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// If this is a 'B = x-(-A)', change to B = x+A.
|
||||
if (Value *V = dyn_castNegVal(Op1)) {
|
||||
|
@ -1667,13 +1667,13 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
|
|||
if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
|
||||
match(Op1, m_PtrToInt(m_Value(RHSOp))))
|
||||
if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
|
||||
return ReplaceInstUsesWith(I, Res);
|
||||
return replaceInstUsesWith(I, Res);
|
||||
|
||||
// trunc(p)-trunc(q) -> trunc(p-q)
|
||||
if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
|
||||
match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
|
||||
if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
|
||||
return ReplaceInstUsesWith(I, Res);
|
||||
return replaceInstUsesWith(I, Res);
|
||||
|
||||
bool Changed = false;
|
||||
if (!I.hasNoSignedWrap() && WillNotOverflowSignedSub(Op0, Op1, I)) {
|
||||
|
@ -1692,11 +1692,11 @@ Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
|
|||
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
|
||||
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V =
|
||||
SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// fsub nsz 0, X ==> fsub nsz -0.0, X
|
||||
if (I.getFastMathFlags().noSignedZeros() && match(Op0, m_Zero())) {
|
||||
|
@ -1736,7 +1736,7 @@ Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
|
|||
|
||||
if (I.hasUnsafeAlgebra()) {
|
||||
if (Value *V = FAddCombine(Builder).simplify(&I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
|
|
|
@ -243,7 +243,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op,
|
|||
|
||||
if (CI->getValue() == ShlMask)
|
||||
// Masking out bits that the shift already masks.
|
||||
return ReplaceInstUsesWith(TheAnd, Op); // No need for the and.
|
||||
return replaceInstUsesWith(TheAnd, Op); // No need for the and.
|
||||
|
||||
if (CI != AndRHS) { // Reducing bits set in and.
|
||||
TheAnd.setOperand(1, CI);
|
||||
|
@ -263,7 +263,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op,
|
|||
|
||||
if (CI->getValue() == ShrMask)
|
||||
// Masking out bits that the shift already masks.
|
||||
return ReplaceInstUsesWith(TheAnd, Op);
|
||||
return replaceInstUsesWith(TheAnd, Op);
|
||||
|
||||
if (CI != AndRHS) {
|
||||
TheAnd.setOperand(1, CI); // Reduce bits set in and cst.
|
||||
|
@ -1248,14 +1248,14 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
|
|||
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
|
||||
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyAndInst(Op0, Op1, DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// (A|B)&(A|C) -> A|(B&C) etc
|
||||
if (Value *V = SimplifyUsingDistributiveLaws(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// See if we can simplify any instructions used by the instruction whose sole
|
||||
// purpose is to compute bits we don't care about.
|
||||
|
@ -1263,7 +1263,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
|
|||
return &I;
|
||||
|
||||
if (Value *V = SimplifyBSwap(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
|
||||
const APInt &AndRHSMask = AndRHS->getValue();
|
||||
|
@ -1451,7 +1451,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
|
|||
ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
|
||||
if (LHS && RHS)
|
||||
if (Value *Res = FoldAndOfICmps(LHS, RHS))
|
||||
return ReplaceInstUsesWith(I, Res);
|
||||
return replaceInstUsesWith(I, Res);
|
||||
|
||||
// TODO: Make this recursive; it's a little tricky because an arbitrary
|
||||
// number of 'and' instructions might have to be created.
|
||||
|
@ -1459,18 +1459,18 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
|
|||
if (LHS && match(Op1, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
|
||||
if (auto *Cmp = dyn_cast<ICmpInst>(X))
|
||||
if (Value *Res = FoldAndOfICmps(LHS, Cmp))
|
||||
return ReplaceInstUsesWith(I, Builder->CreateAnd(Res, Y));
|
||||
return replaceInstUsesWith(I, Builder->CreateAnd(Res, Y));
|
||||
if (auto *Cmp = dyn_cast<ICmpInst>(Y))
|
||||
if (Value *Res = FoldAndOfICmps(LHS, Cmp))
|
||||
return ReplaceInstUsesWith(I, Builder->CreateAnd(Res, X));
|
||||
return replaceInstUsesWith(I, Builder->CreateAnd(Res, X));
|
||||
}
|
||||
if (RHS && match(Op0, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
|
||||
if (auto *Cmp = dyn_cast<ICmpInst>(X))
|
||||
if (Value *Res = FoldAndOfICmps(Cmp, RHS))
|
||||
return ReplaceInstUsesWith(I, Builder->CreateAnd(Res, Y));
|
||||
return replaceInstUsesWith(I, Builder->CreateAnd(Res, Y));
|
||||
if (auto *Cmp = dyn_cast<ICmpInst>(Y))
|
||||
if (Value *Res = FoldAndOfICmps(Cmp, RHS))
|
||||
return ReplaceInstUsesWith(I, Builder->CreateAnd(Res, X));
|
||||
return replaceInstUsesWith(I, Builder->CreateAnd(Res, X));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1478,7 +1478,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
|
|||
if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
|
||||
if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
|
||||
if (Value *Res = FoldAndOfFCmps(LHS, RHS))
|
||||
return ReplaceInstUsesWith(I, Res);
|
||||
return replaceInstUsesWith(I, Res);
|
||||
|
||||
|
||||
if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
|
||||
|
@ -2055,14 +2055,14 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
|
|||
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
|
||||
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyOrInst(Op0, Op1, DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// (A&B)|(A&C) -> A&(B|C) etc
|
||||
if (Value *V = SimplifyUsingDistributiveLaws(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// See if we can simplify any instructions used by the instruction whose sole
|
||||
// purpose is to compute bits we don't care about.
|
||||
|
@ -2070,7 +2070,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
|
|||
return &I;
|
||||
|
||||
if (Value *V = SimplifyBSwap(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
|
||||
ConstantInt *C1 = nullptr; Value *X = nullptr;
|
||||
|
@ -2335,7 +2335,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
|
|||
ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
|
||||
if (LHS && RHS)
|
||||
if (Value *Res = FoldOrOfICmps(LHS, RHS, &I))
|
||||
return ReplaceInstUsesWith(I, Res);
|
||||
return replaceInstUsesWith(I, Res);
|
||||
|
||||
// TODO: Make this recursive; it's a little tricky because an arbitrary
|
||||
// number of 'or' instructions might have to be created.
|
||||
|
@ -2343,18 +2343,18 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
|
|||
if (LHS && match(Op1, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
|
||||
if (auto *Cmp = dyn_cast<ICmpInst>(X))
|
||||
if (Value *Res = FoldOrOfICmps(LHS, Cmp, &I))
|
||||
return ReplaceInstUsesWith(I, Builder->CreateOr(Res, Y));
|
||||
return replaceInstUsesWith(I, Builder->CreateOr(Res, Y));
|
||||
if (auto *Cmp = dyn_cast<ICmpInst>(Y))
|
||||
if (Value *Res = FoldOrOfICmps(LHS, Cmp, &I))
|
||||
return ReplaceInstUsesWith(I, Builder->CreateOr(Res, X));
|
||||
return replaceInstUsesWith(I, Builder->CreateOr(Res, X));
|
||||
}
|
||||
if (RHS && match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
|
||||
if (auto *Cmp = dyn_cast<ICmpInst>(X))
|
||||
if (Value *Res = FoldOrOfICmps(Cmp, RHS, &I))
|
||||
return ReplaceInstUsesWith(I, Builder->CreateOr(Res, Y));
|
||||
return replaceInstUsesWith(I, Builder->CreateOr(Res, Y));
|
||||
if (auto *Cmp = dyn_cast<ICmpInst>(Y))
|
||||
if (Value *Res = FoldOrOfICmps(Cmp, RHS, &I))
|
||||
return ReplaceInstUsesWith(I, Builder->CreateOr(Res, X));
|
||||
return replaceInstUsesWith(I, Builder->CreateOr(Res, X));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2362,7 +2362,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
|
|||
if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
|
||||
if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
|
||||
if (Value *Res = FoldOrOfFCmps(LHS, RHS))
|
||||
return ReplaceInstUsesWith(I, Res);
|
||||
return replaceInstUsesWith(I, Res);
|
||||
|
||||
// fold (or (cast A), (cast B)) -> (cast (or A, B))
|
||||
if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
|
||||
|
@ -2440,14 +2440,14 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
|
|||
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
|
||||
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyXorInst(Op0, Op1, DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// (A&B)^(A&C) -> A&(B^C) etc
|
||||
if (Value *V = SimplifyUsingDistributiveLaws(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// See if we can simplify any instructions used by the instruction whose sole
|
||||
// purpose is to compute bits we don't care about.
|
||||
|
@ -2455,7 +2455,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
|
|||
return &I;
|
||||
|
||||
if (Value *V = SimplifyBSwap(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// Is this a ~ operation?
|
||||
if (Value *NotOp = dyn_castNotVal(&I)) {
|
||||
|
@ -2724,7 +2724,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
|
|||
Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
|
||||
unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS);
|
||||
bool isSigned = LHS->isSigned() || RHS->isSigned();
|
||||
return ReplaceInstUsesWith(I,
|
||||
return replaceInstUsesWith(I,
|
||||
getNewICmpValue(isSigned, Code, Op0, Op1,
|
||||
Builder));
|
||||
}
|
||||
|
|
|
@ -780,7 +780,7 @@ static Instruction *simplifyMaskedStore(IntrinsicInst &II, InstCombiner &IC) {
|
|||
|
||||
// If the mask is all zeros, this instruction does nothing.
|
||||
if (ConstMask->isNullValue())
|
||||
return IC.EraseInstFromFunction(II);
|
||||
return IC.eraseInstFromFunction(II);
|
||||
|
||||
// If the mask is all ones, this is a plain vector store of the 1st argument.
|
||||
if (ConstMask->isAllOnesValue()) {
|
||||
|
@ -796,7 +796,7 @@ static Instruction *simplifyMaskedGather(IntrinsicInst &II, InstCombiner &IC) {
|
|||
// If the mask is all zeros, return the "passthru" argument of the gather.
|
||||
auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2));
|
||||
if (ConstMask && ConstMask->isNullValue())
|
||||
return IC.ReplaceInstUsesWith(II, II.getArgOperand(3));
|
||||
return IC.replaceInstUsesWith(II, II.getArgOperand(3));
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -805,7 +805,7 @@ static Instruction *simplifyMaskedScatter(IntrinsicInst &II, InstCombiner &IC) {
|
|||
// If the mask is all zeros, a scatter does nothing.
|
||||
auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
|
||||
if (ConstMask && ConstMask->isNullValue())
|
||||
return IC.EraseInstFromFunction(II);
|
||||
return IC.eraseInstFromFunction(II);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -817,7 +817,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
auto Args = CI.arg_operands();
|
||||
if (Value *V = SimplifyCall(CI.getCalledValue(), Args.begin(), Args.end(), DL,
|
||||
TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(CI, V);
|
||||
return replaceInstUsesWith(CI, V);
|
||||
|
||||
if (isFreeCall(&CI, TLI))
|
||||
return visitFree(CI);
|
||||
|
@ -841,7 +841,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
// memmove/cpy/set of zero bytes is a noop.
|
||||
if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
|
||||
if (NumBytes->isNullValue())
|
||||
return EraseInstFromFunction(CI);
|
||||
return eraseInstFromFunction(CI);
|
||||
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
|
||||
if (CI->getZExtValue() == 1) {
|
||||
|
@ -874,7 +874,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
|
||||
// memmove(x,x,size) -> noop.
|
||||
if (MTI->getSource() == MTI->getDest())
|
||||
return EraseInstFromFunction(CI);
|
||||
return eraseInstFromFunction(CI);
|
||||
}
|
||||
|
||||
// If we can determine a pointer alignment that is bigger than currently
|
||||
|
@ -902,7 +902,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
case Intrinsic::objectsize: {
|
||||
uint64_t Size;
|
||||
if (getObjectSize(II->getArgOperand(0), Size, DL, TLI))
|
||||
return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
|
||||
return replaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
|
||||
return nullptr;
|
||||
}
|
||||
case Intrinsic::bswap: {
|
||||
|
@ -911,7 +911,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
|
||||
// bswap(bswap(x)) -> x
|
||||
if (match(IIOperand, m_BSwap(m_Value(X))))
|
||||
return ReplaceInstUsesWith(CI, X);
|
||||
return replaceInstUsesWith(CI, X);
|
||||
|
||||
// bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
|
||||
if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
|
||||
|
@ -930,13 +930,13 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
|
||||
// bitreverse(bitreverse(x)) -> x
|
||||
if (match(IIOperand, m_Intrinsic<Intrinsic::bitreverse>(m_Value(X))))
|
||||
return ReplaceInstUsesWith(CI, X);
|
||||
return replaceInstUsesWith(CI, X);
|
||||
break;
|
||||
}
|
||||
|
||||
case Intrinsic::masked_load:
|
||||
if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II, *Builder))
|
||||
return ReplaceInstUsesWith(CI, SimplifiedMaskedOp);
|
||||
return replaceInstUsesWith(CI, SimplifiedMaskedOp);
|
||||
break;
|
||||
case Intrinsic::masked_store:
|
||||
return simplifyMaskedStore(*II, *this);
|
||||
|
@ -949,10 +949,10 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
|
||||
// powi(x, 0) -> 1.0
|
||||
if (Power->isZero())
|
||||
return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
|
||||
return replaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
|
||||
// powi(x, 1) -> x
|
||||
if (Power->isOne())
|
||||
return ReplaceInstUsesWith(CI, II->getArgOperand(0));
|
||||
return replaceInstUsesWith(CI, II->getArgOperand(0));
|
||||
// powi(x, -1) -> 1/x
|
||||
if (Power->isAllOnesValue())
|
||||
return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
|
||||
|
@ -972,7 +972,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
unsigned TrailingZeros = KnownOne.countTrailingZeros();
|
||||
APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
|
||||
if ((Mask & KnownZero) == Mask)
|
||||
return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
|
||||
return replaceInstUsesWith(CI, ConstantInt::get(IT,
|
||||
APInt(BitWidth, TrailingZeros)));
|
||||
|
||||
}
|
||||
|
@ -990,7 +990,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
unsigned LeadingZeros = KnownOne.countLeadingZeros();
|
||||
APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
|
||||
if ((Mask & KnownZero) == Mask)
|
||||
return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
|
||||
return replaceInstUsesWith(CI, ConstantInt::get(IT,
|
||||
APInt(BitWidth, LeadingZeros)));
|
||||
|
||||
}
|
||||
|
@ -1036,7 +1036,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
return II;
|
||||
}
|
||||
if (Value *V = simplifyMinnumMaxnum(*II))
|
||||
return ReplaceInstUsesWith(*II, V);
|
||||
return replaceInstUsesWith(*II, V);
|
||||
break;
|
||||
}
|
||||
case Intrinsic::ppc_altivec_lvx:
|
||||
|
@ -1147,7 +1147,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
|
||||
// Constant folding: Convert to generic half to single conversion.
|
||||
if (isa<ConstantAggregateZero>(Arg))
|
||||
return ReplaceInstUsesWith(*II, ConstantAggregateZero::get(RetType));
|
||||
return replaceInstUsesWith(*II, ConstantAggregateZero::get(RetType));
|
||||
|
||||
if (isa<ConstantDataVector>(Arg)) {
|
||||
auto VectorHalfAsShorts = Arg;
|
||||
|
@ -1164,7 +1164,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
auto VectorHalfs =
|
||||
Builder->CreateBitCast(VectorHalfAsShorts, VectorHalfType);
|
||||
auto VectorFloats = Builder->CreateFPExt(VectorHalfs, RetType);
|
||||
return ReplaceInstUsesWith(*II, VectorFloats);
|
||||
return replaceInstUsesWith(*II, VectorFloats);
|
||||
}
|
||||
|
||||
// We only use the lowest lanes of the argument.
|
||||
|
@ -1214,7 +1214,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
case Intrinsic::x86_avx2_pslli_q:
|
||||
case Intrinsic::x86_avx2_pslli_w:
|
||||
if (Value *V = simplifyX86immShift(*II, *Builder))
|
||||
return ReplaceInstUsesWith(*II, V);
|
||||
return replaceInstUsesWith(*II, V);
|
||||
break;
|
||||
|
||||
case Intrinsic::x86_sse2_psra_d:
|
||||
|
@ -1234,7 +1234,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
case Intrinsic::x86_avx2_psll_q:
|
||||
case Intrinsic::x86_avx2_psll_w: {
|
||||
if (Value *V = simplifyX86immShift(*II, *Builder))
|
||||
return ReplaceInstUsesWith(*II, V);
|
||||
return replaceInstUsesWith(*II, V);
|
||||
|
||||
// SSE2/AVX2 uses only the first 64-bits of the 128-bit vector
|
||||
// operand to compute the shift amount.
|
||||
|
@ -1257,7 +1257,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
case Intrinsic::x86_avx2_pmovsxwd:
|
||||
case Intrinsic::x86_avx2_pmovsxwq:
|
||||
if (Value *V = simplifyX86extend(*II, *Builder, true))
|
||||
return ReplaceInstUsesWith(*II, V);
|
||||
return replaceInstUsesWith(*II, V);
|
||||
break;
|
||||
|
||||
case Intrinsic::x86_sse41_pmovzxbd:
|
||||
|
@ -1273,12 +1273,12 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
case Intrinsic::x86_avx2_pmovzxwd:
|
||||
case Intrinsic::x86_avx2_pmovzxwq:
|
||||
if (Value *V = simplifyX86extend(*II, *Builder, false))
|
||||
return ReplaceInstUsesWith(*II, V);
|
||||
return replaceInstUsesWith(*II, V);
|
||||
break;
|
||||
|
||||
case Intrinsic::x86_sse41_insertps:
|
||||
if (Value *V = simplifyX86insertps(*II, *Builder))
|
||||
return ReplaceInstUsesWith(*II, V);
|
||||
return replaceInstUsesWith(*II, V);
|
||||
break;
|
||||
|
||||
case Intrinsic::x86_sse4a_extrq: {
|
||||
|
@ -1301,7 +1301,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
|
||||
// Attempt to simplify to a constant, shuffle vector or EXTRQI call.
|
||||
if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, *Builder))
|
||||
return ReplaceInstUsesWith(*II, V);
|
||||
return replaceInstUsesWith(*II, V);
|
||||
|
||||
// EXTRQ only uses the lowest 64-bits of the first 128-bit vector
|
||||
// operands and the lowest 16-bits of the second.
|
||||
|
@ -1330,7 +1330,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
|
||||
// Attempt to simplify to a constant or shuffle vector.
|
||||
if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, *Builder))
|
||||
return ReplaceInstUsesWith(*II, V);
|
||||
return replaceInstUsesWith(*II, V);
|
||||
|
||||
// EXTRQI only uses the lowest 64-bits of the first 128-bit vector
|
||||
// operand.
|
||||
|
@ -1362,7 +1362,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
APInt Len = V11.zextOrTrunc(6);
|
||||
APInt Idx = V11.lshr(8).zextOrTrunc(6);
|
||||
if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, *Builder))
|
||||
return ReplaceInstUsesWith(*II, V);
|
||||
return replaceInstUsesWith(*II, V);
|
||||
}
|
||||
|
||||
// INSERTQ only uses the lowest 64-bits of the first 128-bit vector
|
||||
|
@ -1395,7 +1395,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
APInt Len = CILength->getValue().zextOrTrunc(6);
|
||||
APInt Idx = CIIndex->getValue().zextOrTrunc(6);
|
||||
if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, *Builder))
|
||||
return ReplaceInstUsesWith(*II, V);
|
||||
return replaceInstUsesWith(*II, V);
|
||||
}
|
||||
|
||||
// INSERTQI only uses the lowest 64-bits of the first two 128-bit vector
|
||||
|
@ -1429,11 +1429,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
|
||||
// fold (blend A, A, Mask) -> A
|
||||
if (Op0 == Op1)
|
||||
return ReplaceInstUsesWith(CI, Op0);
|
||||
return replaceInstUsesWith(CI, Op0);
|
||||
|
||||
// Zero Mask - select 1st argument.
|
||||
if (isa<ConstantAggregateZero>(Mask))
|
||||
return ReplaceInstUsesWith(CI, Op0);
|
||||
return replaceInstUsesWith(CI, Op0);
|
||||
|
||||
// Constant Mask - select 1st/2nd argument lane based on top bit of mask.
|
||||
if (auto C = dyn_cast<ConstantDataVector>(Mask)) {
|
||||
|
@ -1501,7 +1501,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
auto V1 = II->getArgOperand(0);
|
||||
auto V2 = Constant::getNullValue(II->getType());
|
||||
auto Shuffle = Builder->CreateShuffleVector(V1, V2, NewC);
|
||||
return ReplaceInstUsesWith(CI, Shuffle);
|
||||
return replaceInstUsesWith(CI, Shuffle);
|
||||
}
|
||||
|
||||
case Intrinsic::x86_avx_vpermilvar_ps:
|
||||
|
@ -1541,7 +1541,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
auto V1 = II->getArgOperand(0);
|
||||
auto V2 = UndefValue::get(V1->getType());
|
||||
auto Shuffle = Builder->CreateShuffleVector(V1, V2, NewC);
|
||||
return ReplaceInstUsesWith(CI, Shuffle);
|
||||
return replaceInstUsesWith(CI, Shuffle);
|
||||
}
|
||||
|
||||
case Intrinsic::x86_avx_vperm2f128_pd_256:
|
||||
|
@ -1549,7 +1549,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
case Intrinsic::x86_avx_vperm2f128_si_256:
|
||||
case Intrinsic::x86_avx2_vperm2i128:
|
||||
if (Value *V = simplifyX86vperm2(*II, *Builder))
|
||||
return ReplaceInstUsesWith(*II, V);
|
||||
return replaceInstUsesWith(*II, V);
|
||||
break;
|
||||
|
||||
case Intrinsic::x86_xop_vpcomb:
|
||||
|
@ -1557,7 +1557,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
case Intrinsic::x86_xop_vpcomq:
|
||||
case Intrinsic::x86_xop_vpcomw:
|
||||
if (Value *V = simplifyX86vpcom(*II, *Builder, true))
|
||||
return ReplaceInstUsesWith(*II, V);
|
||||
return replaceInstUsesWith(*II, V);
|
||||
break;
|
||||
|
||||
case Intrinsic::x86_xop_vpcomub:
|
||||
|
@ -1565,7 +1565,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
case Intrinsic::x86_xop_vpcomuq:
|
||||
case Intrinsic::x86_xop_vpcomuw:
|
||||
if (Value *V = simplifyX86vpcom(*II, *Builder, false))
|
||||
return ReplaceInstUsesWith(*II, V);
|
||||
return replaceInstUsesWith(*II, V);
|
||||
break;
|
||||
|
||||
case Intrinsic::ppc_altivec_vperm:
|
||||
|
@ -1662,7 +1662,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
|
||||
// Handle mul by zero first:
|
||||
if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
|
||||
return ReplaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
|
||||
return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
|
||||
}
|
||||
|
||||
// Check for constant LHS & RHS - in this case we just simplify.
|
||||
|
@ -1674,7 +1674,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
|
||||
CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
|
||||
|
||||
return ReplaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
|
||||
return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
|
||||
}
|
||||
|
||||
// Couldn't simplify - canonicalize constant to the RHS.
|
||||
|
@ -1701,7 +1701,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
// Only do this if it was exact and therefore not dependent on the
|
||||
// rounding mode.
|
||||
if (Status == APFloat::opOK)
|
||||
return ReplaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
|
||||
return replaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -1712,7 +1712,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
|
||||
if (SS->getIntrinsicID() == Intrinsic::stacksave) {
|
||||
if (&*++SS->getIterator() == II)
|
||||
return EraseInstFromFunction(CI);
|
||||
return eraseInstFromFunction(CI);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1730,7 +1730,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
|
||||
// If there is a stackrestore below this one, remove this one.
|
||||
if (II->getIntrinsicID() == Intrinsic::stackrestore)
|
||||
return EraseInstFromFunction(CI);
|
||||
return eraseInstFromFunction(CI);
|
||||
// Otherwise, ignore the intrinsic.
|
||||
} else {
|
||||
// If we found a non-intrinsic call, we can't remove the stack
|
||||
|
@ -1745,7 +1745,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
// are no allocas or calls between the restore and the return, nuke the
|
||||
// restore.
|
||||
if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
|
||||
return EraseInstFromFunction(CI);
|
||||
return eraseInstFromFunction(CI);
|
||||
break;
|
||||
}
|
||||
case Intrinsic::lifetime_start: {
|
||||
|
@ -1761,8 +1761,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
if (LTE->getIntrinsicID() == Intrinsic::lifetime_end) {
|
||||
if (II->getOperand(0) == LTE->getOperand(0) &&
|
||||
II->getOperand(1) == LTE->getOperand(1)) {
|
||||
EraseInstFromFunction(*LTE);
|
||||
return EraseInstFromFunction(*II);
|
||||
eraseInstFromFunction(*LTE);
|
||||
return eraseInstFromFunction(*II);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
@ -1780,7 +1780,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
|
||||
Builder->CreateCall(AssumeIntrinsic, A, II->getName());
|
||||
Builder->CreateCall(AssumeIntrinsic, B, II->getName());
|
||||
return EraseInstFromFunction(*II);
|
||||
return eraseInstFromFunction(*II);
|
||||
}
|
||||
// assume(!(a || b)) -> assume(!a); assume(!b);
|
||||
if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
|
||||
|
@ -1788,7 +1788,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
II->getName());
|
||||
Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(B),
|
||||
II->getName());
|
||||
return EraseInstFromFunction(*II);
|
||||
return eraseInstFromFunction(*II);
|
||||
}
|
||||
|
||||
// assume( (load addr) != null ) -> add 'nonnull' metadata to load
|
||||
|
@ -1805,7 +1805,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
if (isValidAssumeForContext(II, LI, DT)) {
|
||||
MDNode *MD = MDNode::get(II->getContext(), None);
|
||||
LI->setMetadata(LLVMContext::MD_nonnull, MD);
|
||||
return EraseInstFromFunction(*II);
|
||||
return eraseInstFromFunction(*II);
|
||||
}
|
||||
}
|
||||
// TODO: apply nonnull return attributes to calls and invokes
|
||||
|
@ -1816,7 +1816,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
APInt KnownZero(1, 0), KnownOne(1, 0);
|
||||
computeKnownBits(IIOperand, KnownZero, KnownOne, 0, II);
|
||||
if (KnownOne.isAllOnesValue())
|
||||
return EraseInstFromFunction(*II);
|
||||
return eraseInstFromFunction(*II);
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -1830,7 +1830,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
// Remove the relocation if unused, note that this check is required
|
||||
// to prevent the cases below from looping forever.
|
||||
if (II->use_empty())
|
||||
return EraseInstFromFunction(*II);
|
||||
return eraseInstFromFunction(*II);
|
||||
|
||||
// Undef is undef, even after relocation.
|
||||
// TODO: provide a hook for this in GCStrategy. This is clearly legal for
|
||||
|
@ -1838,7 +1838,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
// about whether it was legal for all possible collectors.
|
||||
if (isa<UndefValue>(DerivedPtr)) {
|
||||
// gc_relocate is uncasted. Use undef of gc_relocate's type to replace it.
|
||||
return ReplaceInstUsesWith(*II, UndefValue::get(GCRelocateType));
|
||||
return replaceInstUsesWith(*II, UndefValue::get(GCRelocateType));
|
||||
}
|
||||
|
||||
// The relocation of null will be null for most any collector.
|
||||
|
@ -1847,7 +1847,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
if (isa<ConstantPointerNull>(DerivedPtr)) {
|
||||
// gc_relocate is uncasted. Use null-pointer of gc_relocate's type to
|
||||
// replace it.
|
||||
return ReplaceInstUsesWith(*II, ConstantPointerNull::get(GCRelocateType));
|
||||
return replaceInstUsesWith(*II, ConstantPointerNull::get(GCRelocateType));
|
||||
}
|
||||
|
||||
// isKnownNonNull -> nonnull attribute
|
||||
|
@ -1915,12 +1915,12 @@ Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
|
|||
if (!CI->getCalledFunction()) return nullptr;
|
||||
|
||||
auto InstCombineRAUW = [this](Instruction *From, Value *With) {
|
||||
ReplaceInstUsesWith(*From, With);
|
||||
replaceInstUsesWith(*From, With);
|
||||
};
|
||||
LibCallSimplifier Simplifier(DL, TLI, InstCombineRAUW);
|
||||
if (Value *With = Simplifier.optimizeCall(CI)) {
|
||||
++NumSimplified;
|
||||
return CI->use_empty() ? CI : ReplaceInstUsesWith(*CI, With);
|
||||
return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
|
@ -2057,9 +2057,9 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
|
|||
// If OldCall does not return void then replaceAllUsesWith undef.
|
||||
// This allows ValueHandlers and custom metadata to adjust itself.
|
||||
if (!OldCall->getType()->isVoidTy())
|
||||
ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
|
||||
replaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
|
||||
if (isa<CallInst>(OldCall))
|
||||
return EraseInstFromFunction(*OldCall);
|
||||
return eraseInstFromFunction(*OldCall);
|
||||
|
||||
// We cannot remove an invoke, because it would change the CFG, just
|
||||
// change the callee to a null pointer.
|
||||
|
@ -2072,7 +2072,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
|
|||
// If CS does not return void then replaceAllUsesWith undef.
|
||||
// This allows ValueHandlers and custom metadata to adjust itself.
|
||||
if (!CS.getInstruction()->getType()->isVoidTy())
|
||||
ReplaceInstUsesWith(*CS.getInstruction(),
|
||||
replaceInstUsesWith(*CS.getInstruction(),
|
||||
UndefValue::get(CS.getInstruction()->getType()));
|
||||
|
||||
if (isa<InvokeInst>(CS.getInstruction())) {
|
||||
|
@ -2087,7 +2087,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
|
|||
UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
|
||||
CS.getInstruction());
|
||||
|
||||
return EraseInstFromFunction(*CS.getInstruction());
|
||||
return eraseInstFromFunction(*CS.getInstruction());
|
||||
}
|
||||
|
||||
if (IntrinsicInst *II = findInitTrampoline(Callee))
|
||||
|
@ -2122,7 +2122,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
|
|||
Instruction *I = tryOptimizeCall(CI);
|
||||
// If we changed something return the result, etc. Otherwise let
|
||||
// the fallthrough check.
|
||||
if (I) return EraseInstFromFunction(*I);
|
||||
if (I) return eraseInstFromFunction(*I);
|
||||
}
|
||||
|
||||
return Changed ? CS.getInstruction() : nullptr;
|
||||
|
@ -2389,7 +2389,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
|
|||
}
|
||||
|
||||
if (!Caller->use_empty())
|
||||
ReplaceInstUsesWith(*Caller, NV);
|
||||
replaceInstUsesWith(*Caller, NV);
|
||||
else if (Caller->hasValueHandle()) {
|
||||
if (OldRetTy == NV->getType())
|
||||
ValueHandleBase::ValueIsRAUWd(Caller, NV);
|
||||
|
@ -2399,7 +2399,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
|
|||
ValueHandleBase::ValueIsDeleted(Caller);
|
||||
}
|
||||
|
||||
EraseInstFromFunction(*Caller);
|
||||
eraseInstFromFunction(*Caller);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -149,9 +149,9 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
|
|||
// New is the allocation instruction, pointer typed. AI is the original
|
||||
// allocation instruction, also pointer typed. Thus, cast to use is BitCast.
|
||||
Value *NewCast = AllocaBuilder.CreateBitCast(New, AI.getType(), "tmpcast");
|
||||
ReplaceInstUsesWith(AI, NewCast);
|
||||
replaceInstUsesWith(AI, NewCast);
|
||||
}
|
||||
return ReplaceInstUsesWith(CI, New);
|
||||
return replaceInstUsesWith(CI, New);
|
||||
}
|
||||
|
||||
/// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns
|
||||
|
@ -508,7 +508,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
|
|||
" to avoid cast: " << CI << '\n');
|
||||
Value *Res = EvaluateInDifferentType(Src, DestTy, false);
|
||||
assert(Res->getType() == DestTy);
|
||||
return ReplaceInstUsesWith(CI, Res);
|
||||
return replaceInstUsesWith(CI, Res);
|
||||
}
|
||||
|
||||
// Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0), likewise for vector.
|
||||
|
@ -532,7 +532,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
|
|||
// If the shift amount is larger than the size of A, then the result is
|
||||
// known to be zero because all the input bits got shifted out.
|
||||
if (Cst->getZExtValue() >= ASize)
|
||||
return ReplaceInstUsesWith(CI, Constant::getNullValue(DestTy));
|
||||
return replaceInstUsesWith(CI, Constant::getNullValue(DestTy));
|
||||
|
||||
// Since we're doing an lshr and a zero extend, and know that the shift
|
||||
// amount is smaller than ASize, it is always safe to do the shift in A's
|
||||
|
@ -606,7 +606,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
|
|||
In = Builder->CreateXor(In, One, In->getName() + ".not");
|
||||
}
|
||||
|
||||
return ReplaceInstUsesWith(CI, In);
|
||||
return replaceInstUsesWith(CI, In);
|
||||
}
|
||||
|
||||
// zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
|
||||
|
@ -636,7 +636,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
|
|||
Constant *Res = ConstantInt::get(Type::getInt1Ty(CI.getContext()),
|
||||
isNE);
|
||||
Res = ConstantExpr::getZExt(Res, CI.getType());
|
||||
return ReplaceInstUsesWith(CI, Res);
|
||||
return replaceInstUsesWith(CI, Res);
|
||||
}
|
||||
|
||||
uint32_t ShAmt = KnownZeroMask.logBase2();
|
||||
|
@ -654,7 +654,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
|
|||
}
|
||||
|
||||
if (CI.getType() == In->getType())
|
||||
return ReplaceInstUsesWith(CI, In);
|
||||
return replaceInstUsesWith(CI, In);
|
||||
return CastInst::CreateIntegerCast(In, CI.getType(), false/*ZExt*/);
|
||||
}
|
||||
}
|
||||
|
@ -694,7 +694,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
|
|||
if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
|
||||
Result = Builder->CreateXor(Result, ConstantInt::get(ITy, 1));
|
||||
Result->takeName(ICI);
|
||||
return ReplaceInstUsesWith(CI, Result);
|
||||
return replaceInstUsesWith(CI, Result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -872,7 +872,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
|
|||
APInt::getHighBitsSet(DestBitSize,
|
||||
DestBitSize-SrcBitsKept),
|
||||
0, &CI))
|
||||
return ReplaceInstUsesWith(CI, Res);
|
||||
return replaceInstUsesWith(CI, Res);
|
||||
|
||||
// We need to emit an AND to clear the high bits.
|
||||
Constant *C = ConstantInt::get(Res->getType(),
|
||||
|
@ -986,7 +986,7 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
|
|||
|
||||
if (Pred == ICmpInst::ICMP_SGT)
|
||||
In = Builder->CreateNot(In, In->getName()+".not");
|
||||
return ReplaceInstUsesWith(CI, In);
|
||||
return replaceInstUsesWith(CI, In);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1009,7 +1009,7 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
|
|||
Value *V = Pred == ICmpInst::ICMP_NE ?
|
||||
ConstantInt::getAllOnesValue(CI.getType()) :
|
||||
ConstantInt::getNullValue(CI.getType());
|
||||
return ReplaceInstUsesWith(CI, V);
|
||||
return replaceInstUsesWith(CI, V);
|
||||
}
|
||||
|
||||
if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) {
|
||||
|
@ -1041,7 +1041,7 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
|
|||
}
|
||||
|
||||
if (CI.getType() == In->getType())
|
||||
return ReplaceInstUsesWith(CI, In);
|
||||
return replaceInstUsesWith(CI, In);
|
||||
return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/);
|
||||
}
|
||||
}
|
||||
|
@ -1137,7 +1137,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
|
|||
ComputeSignBit(Src, KnownZero, KnownOne, 0, &CI);
|
||||
if (KnownZero) {
|
||||
Value *ZExt = Builder->CreateZExt(Src, DestTy);
|
||||
return ReplaceInstUsesWith(CI, ZExt);
|
||||
return replaceInstUsesWith(CI, ZExt);
|
||||
}
|
||||
|
||||
// Attempt to extend the entire input expression tree to the destination
|
||||
|
@ -1158,7 +1158,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
|
|||
// If the high bits are already filled with sign bit, just replace this
|
||||
// cast with the result.
|
||||
if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize)
|
||||
return ReplaceInstUsesWith(CI, Res);
|
||||
return replaceInstUsesWith(CI, Res);
|
||||
|
||||
// We need to emit a shl + ashr to do the sign extend.
|
||||
Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
|
||||
|
@ -1451,7 +1451,7 @@ Instruction *InstCombiner::FoldItoFPtoI(Instruction &FI) {
|
|||
if (FITy->getScalarSizeInBits() < SrcTy->getScalarSizeInBits())
|
||||
return new TruncInst(SrcI, FITy);
|
||||
if (SrcTy == FITy)
|
||||
return ReplaceInstUsesWith(FI, SrcI);
|
||||
return replaceInstUsesWith(FI, SrcI);
|
||||
return new BitCastInst(SrcI, FITy);
|
||||
}
|
||||
return nullptr;
|
||||
|
@ -1796,7 +1796,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
|
|||
// Get rid of casts from one type to the same type. These are useless and can
|
||||
// be replaced by the operand.
|
||||
if (DestTy == Src->getType())
|
||||
return ReplaceInstUsesWith(CI, Src);
|
||||
return replaceInstUsesWith(CI, Src);
|
||||
|
||||
if (PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) {
|
||||
PointerType *SrcPTy = cast<PointerType>(SrcTy);
|
||||
|
@ -1854,7 +1854,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
|
|||
// assemble the elements of the vector manually. Try to rip the code out
|
||||
// and replace it with insertelements.
|
||||
if (Value *V = optimizeIntegerToVectorInsertions(CI, *this))
|
||||
return ReplaceInstUsesWith(CI, V);
|
||||
return replaceInstUsesWith(CI, V);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -402,7 +402,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
|
|||
if (SecondTrueElement != Overdefined) {
|
||||
// None true -> false.
|
||||
if (FirstTrueElement == Undefined)
|
||||
return ReplaceInstUsesWith(ICI, Builder->getFalse());
|
||||
return replaceInstUsesWith(ICI, Builder->getFalse());
|
||||
|
||||
Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
|
||||
|
||||
|
@ -422,7 +422,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
|
|||
if (SecondFalseElement != Overdefined) {
|
||||
// None false -> true.
|
||||
if (FirstFalseElement == Undefined)
|
||||
return ReplaceInstUsesWith(ICI, Builder->getTrue());
|
||||
return replaceInstUsesWith(ICI, Builder->getTrue());
|
||||
|
||||
Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
|
||||
|
||||
|
@ -983,7 +983,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
|
|||
|
||||
Value *Cmp = Builder->CreateICmp(ICmpInst::getSignedPredicate(Cond),
|
||||
LOffset, ROffset);
|
||||
return ReplaceInstUsesWith(I, Cmp);
|
||||
return replaceInstUsesWith(I, Cmp);
|
||||
}
|
||||
|
||||
// Otherwise, the base pointers are different and the indices are
|
||||
|
@ -1020,7 +1020,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
|
|||
}
|
||||
|
||||
if (NumDifferences == 0) // SAME GEP?
|
||||
return ReplaceInstUsesWith(I, // No comparison is needed here.
|
||||
return replaceInstUsesWith(I, // No comparison is needed here.
|
||||
Builder->getInt1(ICmpInst::isTrueWhenEqual(Cond)));
|
||||
|
||||
else if (NumDifferences == 1 && GEPsInBounds) {
|
||||
|
@ -1119,7 +1119,7 @@ Instruction *InstCombiner::FoldAllocaCmp(ICmpInst &ICI, AllocaInst *Alloca,
|
|||
}
|
||||
|
||||
Type *CmpTy = CmpInst::makeCmpResultType(Other->getType());
|
||||
return ReplaceInstUsesWith(
|
||||
return replaceInstUsesWith(
|
||||
ICI,
|
||||
ConstantInt::get(CmpTy, !CmpInst::isTrueWhenEqual(ICI.getPredicate())));
|
||||
}
|
||||
|
@ -1290,39 +1290,39 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
|
|||
default: llvm_unreachable("Unhandled icmp opcode!");
|
||||
case ICmpInst::ICMP_EQ:
|
||||
if (LoOverflow && HiOverflow)
|
||||
return ReplaceInstUsesWith(ICI, Builder->getFalse());
|
||||
return replaceInstUsesWith(ICI, Builder->getFalse());
|
||||
if (HiOverflow)
|
||||
return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
|
||||
ICmpInst::ICMP_UGE, X, LoBound);
|
||||
if (LoOverflow)
|
||||
return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
|
||||
ICmpInst::ICMP_ULT, X, HiBound);
|
||||
return ReplaceInstUsesWith(ICI, InsertRangeTest(X, LoBound, HiBound,
|
||||
return replaceInstUsesWith(ICI, InsertRangeTest(X, LoBound, HiBound,
|
||||
DivIsSigned, true));
|
||||
case ICmpInst::ICMP_NE:
|
||||
if (LoOverflow && HiOverflow)
|
||||
return ReplaceInstUsesWith(ICI, Builder->getTrue());
|
||||
return replaceInstUsesWith(ICI, Builder->getTrue());
|
||||
if (HiOverflow)
|
||||
return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
|
||||
ICmpInst::ICMP_ULT, X, LoBound);
|
||||
if (LoOverflow)
|
||||
return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
|
||||
ICmpInst::ICMP_UGE, X, HiBound);
|
||||
return ReplaceInstUsesWith(ICI, InsertRangeTest(X, LoBound, HiBound,
|
||||
return replaceInstUsesWith(ICI, InsertRangeTest(X, LoBound, HiBound,
|
||||
DivIsSigned, false));
|
||||
case ICmpInst::ICMP_ULT:
|
||||
case ICmpInst::ICMP_SLT:
|
||||
if (LoOverflow == +1) // Low bound is greater than input range.
|
||||
return ReplaceInstUsesWith(ICI, Builder->getTrue());
|
||||
return replaceInstUsesWith(ICI, Builder->getTrue());
|
||||
if (LoOverflow == -1) // Low bound is less than input range.
|
||||
return ReplaceInstUsesWith(ICI, Builder->getFalse());
|
||||
return replaceInstUsesWith(ICI, Builder->getFalse());
|
||||
return new ICmpInst(Pred, X, LoBound);
|
||||
case ICmpInst::ICMP_UGT:
|
||||
case ICmpInst::ICMP_SGT:
|
||||
if (HiOverflow == +1) // High bound greater than input range.
|
||||
return ReplaceInstUsesWith(ICI, Builder->getFalse());
|
||||
return replaceInstUsesWith(ICI, Builder->getFalse());
|
||||
if (HiOverflow == -1) // High bound less than input range.
|
||||
return ReplaceInstUsesWith(ICI, Builder->getTrue());
|
||||
return replaceInstUsesWith(ICI, Builder->getTrue());
|
||||
if (Pred == ICmpInst::ICMP_UGT)
|
||||
return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound);
|
||||
return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound);
|
||||
|
@ -1394,7 +1394,7 @@ Instruction *InstCombiner::FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *Shr,
|
|||
if (Comp != CmpRHSV) { // Comparing against a bit that we know is zero.
|
||||
bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
|
||||
Constant *Cst = Builder->getInt1(IsICMP_NE);
|
||||
return ReplaceInstUsesWith(ICI, Cst);
|
||||
return replaceInstUsesWith(ICI, Cst);
|
||||
}
|
||||
|
||||
// Otherwise, check to see if the bits shifted out are known to be zero.
|
||||
|
@ -1426,7 +1426,7 @@ Instruction *InstCombiner::FoldICmpCstShrCst(ICmpInst &I, Value *Op, Value *A,
|
|||
auto getConstant = [&I, this](bool IsTrue) {
|
||||
if (I.getPredicate() == I.ICMP_NE)
|
||||
IsTrue = !IsTrue;
|
||||
return ReplaceInstUsesWith(I, ConstantInt::get(I.getType(), IsTrue));
|
||||
return replaceInstUsesWith(I, ConstantInt::get(I.getType(), IsTrue));
|
||||
};
|
||||
|
||||
auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
|
||||
|
@ -1490,7 +1490,7 @@ Instruction *InstCombiner::FoldICmpCstShlCst(ICmpInst &I, Value *Op, Value *A,
|
|||
auto getConstant = [&I, this](bool IsTrue) {
|
||||
if (I.getPredicate() == I.ICMP_NE)
|
||||
IsTrue = !IsTrue;
|
||||
return ReplaceInstUsesWith(I, ConstantInt::get(I.getType(), IsTrue));
|
||||
return replaceInstUsesWith(I, ConstantInt::get(I.getType(), IsTrue));
|
||||
};
|
||||
|
||||
auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
|
||||
|
@ -1729,9 +1729,9 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
|
|||
// As a special case, check to see if this means that the
|
||||
// result is always true or false now.
|
||||
if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
|
||||
return ReplaceInstUsesWith(ICI, Builder->getFalse());
|
||||
return replaceInstUsesWith(ICI, Builder->getFalse());
|
||||
if (ICI.getPredicate() == ICmpInst::ICMP_NE)
|
||||
return ReplaceInstUsesWith(ICI, Builder->getTrue());
|
||||
return replaceInstUsesWith(ICI, Builder->getTrue());
|
||||
} else {
|
||||
ICI.setOperand(1, NewCst);
|
||||
Constant *NewAndCst;
|
||||
|
@ -1991,7 +1991,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
|
|||
if (Comp != RHS) {// Comparing against a bit that we know is zero.
|
||||
bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
|
||||
Constant *Cst = Builder->getInt1(IsICMP_NE);
|
||||
return ReplaceInstUsesWith(ICI, Cst);
|
||||
return replaceInstUsesWith(ICI, Cst);
|
||||
}
|
||||
|
||||
// If the shift is NUW, then it is just shifting out zeros, no need for an
|
||||
|
@ -2241,7 +2241,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
|
|||
if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) {
|
||||
Constant *NotCI = ConstantExpr::getNot(RHS);
|
||||
if (!ConstantExpr::getAnd(BOC, NotCI)->isNullValue())
|
||||
return ReplaceInstUsesWith(ICI, Builder->getInt1(isICMP_NE));
|
||||
return replaceInstUsesWith(ICI, Builder->getInt1(isICMP_NE));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -2250,7 +2250,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
|
|||
// If bits are being compared against that are and'd out, then the
|
||||
// comparison can never succeed!
|
||||
if ((RHSV & ~BOC->getValue()) != 0)
|
||||
return ReplaceInstUsesWith(ICI, Builder->getInt1(isICMP_NE));
|
||||
return replaceInstUsesWith(ICI, Builder->getInt1(isICMP_NE));
|
||||
|
||||
// If we have ((X & C) == C), turn it into ((X & C) != 0).
|
||||
if (RHS == BOC && RHSV.isPowerOf2())
|
||||
|
@ -2438,7 +2438,7 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
|
|||
|
||||
// Finally, return the value computed.
|
||||
if (ICI.getPredicate() == ICmpInst::ICMP_ULT)
|
||||
return ReplaceInstUsesWith(ICI, Result);
|
||||
return replaceInstUsesWith(ICI, Result);
|
||||
|
||||
assert(ICI.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
|
||||
return BinaryOperator::CreateNot(Result);
|
||||
|
@ -2524,7 +2524,7 @@ static Instruction *ProcessUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
|
|||
|
||||
// The inner add was the result of the narrow add, zero extended to the
|
||||
// wider type. Replace it with the result computed by the intrinsic.
|
||||
IC.ReplaceInstUsesWith(*OrigAdd, ZExt);
|
||||
IC.replaceInstUsesWith(*OrigAdd, ZExt);
|
||||
|
||||
// The original icmp gets replaced with the overflow value.
|
||||
return ExtractValueInst::Create(Call, 1, "sadd.overflow");
|
||||
|
@ -2808,7 +2808,7 @@ static Instruction *ProcessUMulZExtIdiom(ICmpInst &I, Value *MulVal,
|
|||
continue;
|
||||
if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
|
||||
if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
|
||||
IC.ReplaceInstUsesWith(*TI, Mul);
|
||||
IC.replaceInstUsesWith(*TI, Mul);
|
||||
else
|
||||
TI->setOperand(0, Mul);
|
||||
} else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
|
||||
|
@ -2820,7 +2820,7 @@ static Instruction *ProcessUMulZExtIdiom(ICmpInst &I, Value *MulVal,
|
|||
Instruction *Zext =
|
||||
cast<Instruction>(Builder->CreateZExt(ShortAnd, BO->getType()));
|
||||
IC.Worklist.Add(Zext);
|
||||
IC.ReplaceInstUsesWith(*BO, Zext);
|
||||
IC.replaceInstUsesWith(*BO, Zext);
|
||||
} else {
|
||||
llvm_unreachable("Unexpected Binary operation");
|
||||
}
|
||||
|
@ -3074,7 +3074,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
|
|||
|
||||
if (Value *V =
|
||||
SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL, TLI, DT, AC, &I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// comparing -val or val with non-zero is the same as just comparing val
|
||||
// ie, abs(val) != 0 -> val != 0
|
||||
|
@ -3292,7 +3292,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
|
|||
default: llvm_unreachable("Unknown icmp opcode!");
|
||||
case ICmpInst::ICMP_EQ: {
|
||||
if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
|
||||
// If all bits are known zero except for one, then we know at most one
|
||||
// bit is set. If the comparison is against zero, then this is a check
|
||||
|
@ -3336,7 +3336,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
|
|||
}
|
||||
case ICmpInst::ICMP_NE: {
|
||||
if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
|
||||
// If all bits are known zero except for one, then we know at most one
|
||||
// bit is set. If the comparison is against zero, then this is a check
|
||||
|
@ -3380,9 +3380,9 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
|
|||
}
|
||||
case ICmpInst::ICMP_ULT:
|
||||
if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
|
||||
return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
|
||||
|
@ -3398,9 +3398,9 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
|
|||
break;
|
||||
case ICmpInst::ICMP_UGT:
|
||||
if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
|
||||
if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
|
||||
return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
|
||||
|
@ -3417,9 +3417,9 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
|
|||
break;
|
||||
case ICmpInst::ICMP_SLT:
|
||||
if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
|
||||
return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
|
||||
|
@ -3430,9 +3430,9 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
|
|||
break;
|
||||
case ICmpInst::ICMP_SGT:
|
||||
if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
|
||||
if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
|
||||
return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
|
||||
|
@ -3445,30 +3445,30 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
|
|||
case ICmpInst::ICMP_SGE:
|
||||
assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
|
||||
if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
break;
|
||||
case ICmpInst::ICMP_SLE:
|
||||
assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
|
||||
if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
break;
|
||||
case ICmpInst::ICMP_UGE:
|
||||
assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
|
||||
if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
break;
|
||||
case ICmpInst::ICMP_ULE:
|
||||
assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
|
||||
if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -3842,9 +3842,9 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
|
|||
switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
|
||||
default: break;
|
||||
case ICmpInst::ICMP_EQ:
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
|
||||
case ICmpInst::ICMP_NE:
|
||||
return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
|
||||
case ICmpInst::ICMP_SGT:
|
||||
case ICmpInst::ICMP_SGE:
|
||||
return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
|
||||
|
@ -3971,8 +3971,8 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
|
|||
Constant *Overflow;
|
||||
if (OptimizeOverflowCheck(OCF_UNSIGNED_ADD, A, B, *AddI, Result,
|
||||
Overflow)) {
|
||||
ReplaceInstUsesWith(*AddI, Result);
|
||||
return ReplaceInstUsesWith(I, Overflow);
|
||||
replaceInstUsesWith(*AddI, Result);
|
||||
return replaceInstUsesWith(I, Overflow);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4181,10 +4181,10 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
|
|||
RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven);
|
||||
if (RHS.compare(RHSRoundInt) != APFloat::cmpEqual) {
|
||||
if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ)
|
||||
return ReplaceInstUsesWith(I, Builder->getFalse());
|
||||
return replaceInstUsesWith(I, Builder->getFalse());
|
||||
|
||||
assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE);
|
||||
return ReplaceInstUsesWith(I, Builder->getTrue());
|
||||
return replaceInstUsesWith(I, Builder->getTrue());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4250,9 +4250,9 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
|
|||
Pred = ICmpInst::ICMP_NE;
|
||||
break;
|
||||
case FCmpInst::FCMP_ORD:
|
||||
return ReplaceInstUsesWith(I, Builder->getTrue());
|
||||
return replaceInstUsesWith(I, Builder->getTrue());
|
||||
case FCmpInst::FCMP_UNO:
|
||||
return ReplaceInstUsesWith(I, Builder->getFalse());
|
||||
return replaceInstUsesWith(I, Builder->getFalse());
|
||||
}
|
||||
|
||||
// Now we know that the APFloat is a normal number, zero or inf.
|
||||
|
@ -4270,8 +4270,8 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
|
|||
if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0
|
||||
if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
|
||||
Pred == ICmpInst::ICMP_SLE)
|
||||
return ReplaceInstUsesWith(I, Builder->getTrue());
|
||||
return ReplaceInstUsesWith(I, Builder->getFalse());
|
||||
return replaceInstUsesWith(I, Builder->getTrue());
|
||||
return replaceInstUsesWith(I, Builder->getFalse());
|
||||
}
|
||||
} else {
|
||||
// If the RHS value is > UnsignedMax, fold the comparison. This handles
|
||||
|
@ -4282,8 +4282,8 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
|
|||
if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0
|
||||
if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
|
||||
Pred == ICmpInst::ICMP_ULE)
|
||||
return ReplaceInstUsesWith(I, Builder->getTrue());
|
||||
return ReplaceInstUsesWith(I, Builder->getFalse());
|
||||
return replaceInstUsesWith(I, Builder->getTrue());
|
||||
return replaceInstUsesWith(I, Builder->getFalse());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4295,8 +4295,8 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
|
|||
if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
|
||||
if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
|
||||
Pred == ICmpInst::ICMP_SGE)
|
||||
return ReplaceInstUsesWith(I, Builder->getTrue());
|
||||
return ReplaceInstUsesWith(I, Builder->getFalse());
|
||||
return replaceInstUsesWith(I, Builder->getTrue());
|
||||
return replaceInstUsesWith(I, Builder->getFalse());
|
||||
}
|
||||
} else {
|
||||
// See if the RHS value is < UnsignedMin.
|
||||
|
@ -4306,8 +4306,8 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
|
|||
if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0
|
||||
if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
|
||||
Pred == ICmpInst::ICMP_UGE)
|
||||
return ReplaceInstUsesWith(I, Builder->getTrue());
|
||||
return ReplaceInstUsesWith(I, Builder->getFalse());
|
||||
return replaceInstUsesWith(I, Builder->getTrue());
|
||||
return replaceInstUsesWith(I, Builder->getFalse());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4329,14 +4329,14 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
|
|||
switch (Pred) {
|
||||
default: llvm_unreachable("Unexpected integer comparison!");
|
||||
case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
|
||||
return ReplaceInstUsesWith(I, Builder->getTrue());
|
||||
return replaceInstUsesWith(I, Builder->getTrue());
|
||||
case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
|
||||
return ReplaceInstUsesWith(I, Builder->getFalse());
|
||||
return replaceInstUsesWith(I, Builder->getFalse());
|
||||
case ICmpInst::ICMP_ULE:
|
||||
// (float)int <= 4.4 --> int <= 4
|
||||
// (float)int <= -4.4 --> false
|
||||
if (RHS.isNegative())
|
||||
return ReplaceInstUsesWith(I, Builder->getFalse());
|
||||
return replaceInstUsesWith(I, Builder->getFalse());
|
||||
break;
|
||||
case ICmpInst::ICMP_SLE:
|
||||
// (float)int <= 4.4 --> int <= 4
|
||||
|
@ -4348,7 +4348,7 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
|
|||
// (float)int < -4.4 --> false
|
||||
// (float)int < 4.4 --> int <= 4
|
||||
if (RHS.isNegative())
|
||||
return ReplaceInstUsesWith(I, Builder->getFalse());
|
||||
return replaceInstUsesWith(I, Builder->getFalse());
|
||||
Pred = ICmpInst::ICMP_ULE;
|
||||
break;
|
||||
case ICmpInst::ICMP_SLT:
|
||||
|
@ -4361,7 +4361,7 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
|
|||
// (float)int > 4.4 --> int > 4
|
||||
// (float)int > -4.4 --> true
|
||||
if (RHS.isNegative())
|
||||
return ReplaceInstUsesWith(I, Builder->getTrue());
|
||||
return replaceInstUsesWith(I, Builder->getTrue());
|
||||
break;
|
||||
case ICmpInst::ICMP_SGT:
|
||||
// (float)int > 4.4 --> int > 4
|
||||
|
@ -4373,7 +4373,7 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
|
|||
// (float)int >= -4.4 --> true
|
||||
// (float)int >= 4.4 --> int > 4
|
||||
if (RHS.isNegative())
|
||||
return ReplaceInstUsesWith(I, Builder->getTrue());
|
||||
return replaceInstUsesWith(I, Builder->getTrue());
|
||||
Pred = ICmpInst::ICMP_UGT;
|
||||
break;
|
||||
case ICmpInst::ICMP_SGE:
|
||||
|
@ -4406,7 +4406,7 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
|
|||
|
||||
if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1,
|
||||
I.getFastMathFlags(), DL, TLI, DT, AC, &I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// Simplify 'fcmp pred X, X'
|
||||
if (Op0 == Op1) {
|
||||
|
@ -4540,7 +4540,7 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
|
|||
break;
|
||||
// fabs(x) < 0 --> false
|
||||
case FCmpInst::FCMP_OLT:
|
||||
return ReplaceInstUsesWith(I, Builder->getFalse());
|
||||
return replaceInstUsesWith(I, Builder->getFalse());
|
||||
// fabs(x) > 0 --> x != 0
|
||||
case FCmpInst::FCMP_OGT:
|
||||
return new FCmpInst(FCmpInst::FCMP_ONE, CI->getArgOperand(0), RHSC);
|
||||
|
|
|
@ -417,7 +417,7 @@ public:
|
|||
/// replaceable with another preexisting expression. Here we add all uses of
|
||||
/// I to the worklist, replace all uses of I with the new value, then return
|
||||
/// I, so that the inst combiner will know that I was modified.
|
||||
Instruction *ReplaceInstUsesWith(Instruction &I, Value *V) {
|
||||
Instruction *replaceInstUsesWith(Instruction &I, Value *V) {
|
||||
// If there are no uses to replace, then we return nullptr to indicate that
|
||||
// no changes were made to the program.
|
||||
if (I.use_empty()) return nullptr;
|
||||
|
@ -451,7 +451,7 @@ public:
|
|||
/// When dealing with an instruction that has side effects or produces a void
|
||||
/// value, we can't rely on DCE to delete the instruction. Instead, visit
|
||||
/// methods should return the value returned by this function.
|
||||
Instruction *EraseInstFromFunction(Instruction &I) {
|
||||
Instruction *eraseInstFromFunction(Instruction &I) {
|
||||
DEBUG(dbgs() << "IC: ERASE " << I << '\n');
|
||||
|
||||
assert(I.use_empty() && "Cannot erase instruction that is used!");
|
||||
|
|
|
@ -205,11 +205,11 @@ static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
|
|||
|
||||
// Now make everything use the getelementptr instead of the original
|
||||
// allocation.
|
||||
return IC.ReplaceInstUsesWith(AI, GEP);
|
||||
return IC.replaceInstUsesWith(AI, GEP);
|
||||
}
|
||||
|
||||
if (isa<UndefValue>(AI.getArraySize()))
|
||||
return IC.ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
|
||||
return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
|
||||
|
||||
// Ensure that the alloca array size argument has type intptr_t, so that
|
||||
// any casting is exposed early.
|
||||
|
@ -271,7 +271,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
|
|||
EntryAI->setAlignment(MaxAlign);
|
||||
if (AI.getType() != EntryAI->getType())
|
||||
return new BitCastInst(EntryAI, AI.getType());
|
||||
return ReplaceInstUsesWith(AI, EntryAI);
|
||||
return replaceInstUsesWith(AI, EntryAI);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -291,12 +291,12 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
|
|||
DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
|
||||
DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
|
||||
for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
|
||||
EraseInstFromFunction(*ToDelete[i]);
|
||||
eraseInstFromFunction(*ToDelete[i]);
|
||||
Constant *TheSrc = cast<Constant>(Copy->getSource());
|
||||
Constant *Cast
|
||||
= ConstantExpr::getPointerBitCastOrAddrSpaceCast(TheSrc, AI.getType());
|
||||
Instruction *NewI = ReplaceInstUsesWith(AI, Cast);
|
||||
EraseInstFromFunction(*Copy);
|
||||
Instruction *NewI = replaceInstUsesWith(AI, Cast);
|
||||
eraseInstFromFunction(*Copy);
|
||||
++NumGlobalCopies;
|
||||
return NewI;
|
||||
}
|
||||
|
@ -486,7 +486,7 @@ static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
|
|||
auto *SI = cast<StoreInst>(*UI++);
|
||||
IC.Builder->SetInsertPoint(SI);
|
||||
combineStoreToNewValue(IC, *SI, NewLoad);
|
||||
IC.EraseInstFromFunction(*SI);
|
||||
IC.eraseInstFromFunction(*SI);
|
||||
}
|
||||
assert(LI.use_empty() && "Failed to remove all users of the load!");
|
||||
// Return the old load so the combiner can delete it safely.
|
||||
|
@ -503,7 +503,7 @@ static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
|
|||
if (CI->isNoopCast(DL)) {
|
||||
LoadInst *NewLoad = combineLoadToNewType(IC, LI, CI->getDestTy());
|
||||
CI->replaceAllUsesWith(NewLoad);
|
||||
IC.EraseInstFromFunction(*CI);
|
||||
IC.eraseInstFromFunction(*CI);
|
||||
return &LI;
|
||||
}
|
||||
}
|
||||
|
@ -531,7 +531,7 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
|
|||
if (Count == 1) {
|
||||
LoadInst *NewLoad = combineLoadToNewType(IC, LI, ST->getTypeAtIndex(0U),
|
||||
".unpack");
|
||||
return IC.ReplaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
|
||||
return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
|
||||
UndefValue::get(T), NewLoad, 0, LI.getName()));
|
||||
}
|
||||
|
||||
|
@ -562,7 +562,7 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
|
|||
}
|
||||
|
||||
V->setName(Name);
|
||||
return IC.ReplaceInstUsesWith(LI, V);
|
||||
return IC.replaceInstUsesWith(LI, V);
|
||||
}
|
||||
|
||||
if (auto *AT = dyn_cast<ArrayType>(T)) {
|
||||
|
@ -570,7 +570,7 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
|
|||
if (AT->getNumElements() == 1) {
|
||||
LoadInst *NewLoad = combineLoadToNewType(IC, LI, AT->getElementType(),
|
||||
".unpack");
|
||||
return IC.ReplaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
|
||||
return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue(
|
||||
UndefValue::get(T), NewLoad, 0, LI.getName()));
|
||||
}
|
||||
}
|
||||
|
@ -804,7 +804,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
|
|||
combineMetadata(NLI, &LI, KnownIDs);
|
||||
};
|
||||
|
||||
return ReplaceInstUsesWith(
|
||||
return replaceInstUsesWith(
|
||||
LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(),
|
||||
LI.getName() + ".cast"));
|
||||
}
|
||||
|
@ -820,7 +820,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
|
|||
// CFG.
|
||||
new StoreInst(UndefValue::get(LI.getType()),
|
||||
Constant::getNullValue(Op->getType()), &LI);
|
||||
return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
|
||||
return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -833,7 +833,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
|
|||
// unreachable instruction directly because we cannot modify the CFG.
|
||||
new StoreInst(UndefValue::get(LI.getType()),
|
||||
Constant::getNullValue(Op->getType()), &LI);
|
||||
return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
|
||||
return replaceInstUsesWith(LI, UndefValue::get(LI.getType()));
|
||||
}
|
||||
|
||||
if (Op->hasOneUse()) {
|
||||
|
@ -1014,7 +1014,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
|||
|
||||
// Try to canonicalize the stored type.
|
||||
if (combineStoreToValueType(*this, SI))
|
||||
return EraseInstFromFunction(SI);
|
||||
return eraseInstFromFunction(SI);
|
||||
|
||||
// Attempt to improve the alignment.
|
||||
unsigned KnownAlign = getOrEnforceKnownAlignment(
|
||||
|
@ -1030,7 +1030,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
|||
|
||||
// Try to canonicalize the stored type.
|
||||
if (unpackStoreToAggregate(*this, SI))
|
||||
return EraseInstFromFunction(SI);
|
||||
return eraseInstFromFunction(SI);
|
||||
|
||||
// Replace GEP indices if possible.
|
||||
if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI)) {
|
||||
|
@ -1046,11 +1046,11 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
|||
// alloca dead.
|
||||
if (Ptr->hasOneUse()) {
|
||||
if (isa<AllocaInst>(Ptr))
|
||||
return EraseInstFromFunction(SI);
|
||||
return eraseInstFromFunction(SI);
|
||||
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
|
||||
if (isa<AllocaInst>(GEP->getOperand(0))) {
|
||||
if (GEP->getOperand(0)->hasOneUse())
|
||||
return EraseInstFromFunction(SI);
|
||||
return eraseInstFromFunction(SI);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1076,7 +1076,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
|||
SI.getOperand(1))) {
|
||||
++NumDeadStore;
|
||||
++BBI;
|
||||
EraseInstFromFunction(*PrevSI);
|
||||
eraseInstFromFunction(*PrevSI);
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
|
@ -1088,7 +1088,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
|||
if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
|
||||
if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
|
||||
assert(SI.isUnordered() && "can't eliminate ordering operation");
|
||||
return EraseInstFromFunction(SI);
|
||||
return eraseInstFromFunction(SI);
|
||||
}
|
||||
|
||||
// Otherwise, this is a load from some other location. Stores before it
|
||||
|
@ -1113,7 +1113,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
|||
|
||||
// store undef, Ptr -> noop
|
||||
if (isa<UndefValue>(Val))
|
||||
return EraseInstFromFunction(SI);
|
||||
return eraseInstFromFunction(SI);
|
||||
|
||||
// The code below needs to be audited and adjusted for unordered atomics
|
||||
if (!SI.isSimple())
|
||||
|
@ -1265,7 +1265,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
|
|||
}
|
||||
|
||||
// Nuke the old stores.
|
||||
EraseInstFromFunction(SI);
|
||||
EraseInstFromFunction(*OtherStore);
|
||||
eraseInstFromFunction(SI);
|
||||
eraseInstFromFunction(*OtherStore);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -177,13 +177,13 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
|
|||
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
|
||||
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyMulInst(Op0, Op1, DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyUsingDistributiveLaws(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// X * -1 == 0 - X
|
||||
if (match(Op1, m_AllOnes())) {
|
||||
|
@ -323,7 +323,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
|
|||
if (PossiblyExactOperator *SDiv = dyn_cast<PossiblyExactOperator>(BO))
|
||||
if (SDiv->isExact()) {
|
||||
if (Op1BO == Op1C)
|
||||
return ReplaceInstUsesWith(I, Op0BO);
|
||||
return replaceInstUsesWith(I, Op0BO);
|
||||
return BinaryOperator::CreateNeg(Op0BO);
|
||||
}
|
||||
|
||||
|
@ -536,14 +536,14 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
|
|||
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
|
||||
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (isa<Constant>(Op0))
|
||||
std::swap(Op0, Op1);
|
||||
|
||||
if (Value *V =
|
||||
SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
bool AllowReassociate = I.hasUnsafeAlgebra();
|
||||
|
||||
|
@ -574,7 +574,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
|
|||
// Try to simplify "MDC * Constant"
|
||||
if (isFMulOrFDivWithConstant(Op0))
|
||||
if (Value *V = foldFMulConst(cast<Instruction>(Op0), C, &I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// (MDC +/- C1) * C => (MDC * C) +/- (C1 * C)
|
||||
Instruction *FAddSub = dyn_cast<Instruction>(Op0);
|
||||
|
@ -616,7 +616,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
|
|||
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op0)) {
|
||||
// sqrt(X) * sqrt(X) -> X
|
||||
if (AllowReassociate && II->getIntrinsicID() == Intrinsic::sqrt)
|
||||
return ReplaceInstUsesWith(I, II->getOperand(0));
|
||||
return replaceInstUsesWith(I, II->getOperand(0));
|
||||
|
||||
// fabs(X) * fabs(X) -> X * X
|
||||
if (II->getIntrinsicID() == Intrinsic::fabs) {
|
||||
|
@ -652,7 +652,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
|
|||
Value *FMulVal = Builder->CreateFMul(OpX, Log2);
|
||||
Value *FSub = Builder->CreateFSub(FMulVal, OpX);
|
||||
FSub->takeName(&I);
|
||||
return ReplaceInstUsesWith(I, FSub);
|
||||
return replaceInstUsesWith(I, FSub);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -672,7 +672,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
|
|||
if (N1) {
|
||||
Value *FMul = Builder->CreateFMul(N0, N1);
|
||||
FMul->takeName(&I);
|
||||
return ReplaceInstUsesWith(I, FMul);
|
||||
return replaceInstUsesWith(I, FMul);
|
||||
}
|
||||
|
||||
if (Opnd0->hasOneUse()) {
|
||||
|
@ -680,7 +680,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
|
|||
Value *T = Builder->CreateFMul(N0, Opnd1);
|
||||
Value *Neg = Builder->CreateFNeg(T);
|
||||
Neg->takeName(&I);
|
||||
return ReplaceInstUsesWith(I, Neg);
|
||||
return replaceInstUsesWith(I, Neg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -709,7 +709,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
|
|||
|
||||
Value *R = Builder->CreateFMul(T, Y);
|
||||
R->takeName(&I);
|
||||
return ReplaceInstUsesWith(I, R);
|
||||
return replaceInstUsesWith(I, R);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1054,10 +1054,10 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
|
|||
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
|
||||
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyUDivInst(Op0, Op1, DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// Handle the integer div common cases
|
||||
if (Instruction *Common = commonIDivTransforms(I))
|
||||
|
@ -1127,10 +1127,10 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
|
|||
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
|
||||
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifySDivInst(Op0, Op1, DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// Handle the integer div common cases
|
||||
if (Instruction *Common = commonIDivTransforms(I))
|
||||
|
@ -1225,11 +1225,11 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
|
|||
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
|
||||
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyFDivInst(Op0, Op1, I.getFastMathFlags(),
|
||||
DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (isa<Constant>(Op0))
|
||||
if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
|
||||
|
@ -1391,10 +1391,10 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) {
|
|||
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
|
||||
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyURemInst(Op0, Op1, DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Instruction *common = commonIRemTransforms(I))
|
||||
return common;
|
||||
|
@ -1416,7 +1416,7 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) {
|
|||
if (match(Op0, m_One())) {
|
||||
Value *Cmp = Builder->CreateICmpNE(Op1, Op0);
|
||||
Value *Ext = Builder->CreateZExt(Cmp, I.getType());
|
||||
return ReplaceInstUsesWith(I, Ext);
|
||||
return replaceInstUsesWith(I, Ext);
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
|
@ -1426,10 +1426,10 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
|
|||
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
|
||||
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifySRemInst(Op0, Op1, DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// Handle the integer rem common cases
|
||||
if (Instruction *Common = commonIRemTransforms(I))
|
||||
|
@ -1501,11 +1501,11 @@ Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
|
|||
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
|
||||
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyFRemInst(Op0, Op1, I.getFastMathFlags(),
|
||||
DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// Handle cases involving: rem X, (select Cond, Y, Z)
|
||||
if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
|
||||
|
|
|
@ -768,7 +768,7 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
|
|||
|
||||
// If we have no users, they must be all self uses, just nuke the PHI.
|
||||
if (PHIUsers.empty())
|
||||
return ReplaceInstUsesWith(FirstPhi, UndefValue::get(FirstPhi.getType()));
|
||||
return replaceInstUsesWith(FirstPhi, UndefValue::get(FirstPhi.getType()));
|
||||
|
||||
// If this phi node is transformable, create new PHIs for all the pieces
|
||||
// extracted out of it. First, sort the users by their offset and size.
|
||||
|
@ -864,22 +864,22 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
|
|||
}
|
||||
|
||||
// Replace the use of this piece with the PHI node.
|
||||
ReplaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
|
||||
replaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
|
||||
}
|
||||
|
||||
// Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
|
||||
// with undefs.
|
||||
Value *Undef = UndefValue::get(FirstPhi.getType());
|
||||
for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
|
||||
ReplaceInstUsesWith(*PHIsToSlice[i], Undef);
|
||||
return ReplaceInstUsesWith(FirstPhi, Undef);
|
||||
replaceInstUsesWith(*PHIsToSlice[i], Undef);
|
||||
return replaceInstUsesWith(FirstPhi, Undef);
|
||||
}
|
||||
|
||||
// PHINode simplification
|
||||
//
|
||||
Instruction *InstCombiner::visitPHINode(PHINode &PN) {
|
||||
if (Value *V = SimplifyInstruction(&PN, DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(PN, V);
|
||||
return replaceInstUsesWith(PN, V);
|
||||
|
||||
if (Instruction *Result = FoldPHIArgZextsIntoPHI(PN))
|
||||
return Result;
|
||||
|
@ -905,7 +905,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) {
|
|||
SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
|
||||
PotentiallyDeadPHIs.insert(&PN);
|
||||
if (DeadPHICycle(PU, PotentiallyDeadPHIs))
|
||||
return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
|
||||
return replaceInstUsesWith(PN, UndefValue::get(PN.getType()));
|
||||
}
|
||||
|
||||
// If this phi has a single use, and if that use just computes a value for
|
||||
|
@ -917,7 +917,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) {
|
|||
if (PHIUser->hasOneUse() &&
|
||||
(isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
|
||||
PHIUser->user_back() == &PN) {
|
||||
return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
|
||||
return replaceInstUsesWith(PN, UndefValue::get(PN.getType()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -951,7 +951,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) {
|
|||
if (InValNo == NumIncomingVals) {
|
||||
SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
|
||||
if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
|
||||
return ReplaceInstUsesWith(PN, NonPhiInVal);
|
||||
return replaceInstUsesWith(PN, NonPhiInVal);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -519,10 +519,10 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
|
|||
|
||||
// Check if we can express the operation with a single or.
|
||||
if (C2->isAllOnesValue())
|
||||
return ReplaceInstUsesWith(SI, Builder->CreateOr(AShr, C1));
|
||||
return replaceInstUsesWith(SI, Builder->CreateOr(AShr, C1));
|
||||
|
||||
Value *And = Builder->CreateAnd(AShr, C2->getValue()-C1->getValue());
|
||||
return ReplaceInstUsesWith(SI, Builder->CreateAdd(And, C1));
|
||||
return replaceInstUsesWith(SI, Builder->CreateAdd(And, C1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -585,15 +585,15 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
|
|||
V = Builder->CreateOr(X, *Y);
|
||||
|
||||
if (V)
|
||||
return ReplaceInstUsesWith(SI, V);
|
||||
return replaceInstUsesWith(SI, V);
|
||||
}
|
||||
}
|
||||
|
||||
if (Value *V = foldSelectICmpAndOr(SI, TrueVal, FalseVal, Builder))
|
||||
return ReplaceInstUsesWith(SI, V);
|
||||
return replaceInstUsesWith(SI, V);
|
||||
|
||||
if (Value *V = foldSelectCttzCtlz(ICI, TrueVal, FalseVal, Builder))
|
||||
return ReplaceInstUsesWith(SI, V);
|
||||
return replaceInstUsesWith(SI, V);
|
||||
|
||||
return Changed ? &SI : nullptr;
|
||||
}
|
||||
|
@ -646,7 +646,7 @@ Instruction *InstCombiner::FoldSPFofSPF(Instruction *Inner,
|
|||
// MAX(MAX(A, B), B) -> MAX(A, B)
|
||||
// MIN(MIN(a, b), a) -> MIN(a, b)
|
||||
if (SPF1 == SPF2)
|
||||
return ReplaceInstUsesWith(Outer, Inner);
|
||||
return replaceInstUsesWith(Outer, Inner);
|
||||
|
||||
// MAX(MIN(a, b), a) -> a
|
||||
// MIN(MAX(a, b), a) -> a
|
||||
|
@ -654,7 +654,7 @@ Instruction *InstCombiner::FoldSPFofSPF(Instruction *Inner,
|
|||
(SPF1 == SPF_SMAX && SPF2 == SPF_SMIN) ||
|
||||
(SPF1 == SPF_UMIN && SPF2 == SPF_UMAX) ||
|
||||
(SPF1 == SPF_UMAX && SPF2 == SPF_UMIN))
|
||||
return ReplaceInstUsesWith(Outer, C);
|
||||
return replaceInstUsesWith(Outer, C);
|
||||
}
|
||||
|
||||
if (SPF1 == SPF2) {
|
||||
|
@ -669,7 +669,7 @@ Instruction *InstCombiner::FoldSPFofSPF(Instruction *Inner,
|
|||
(SPF1 == SPF_SMIN && ACB.sle(ACC)) ||
|
||||
(SPF1 == SPF_UMAX && ACB.uge(ACC)) ||
|
||||
(SPF1 == SPF_SMAX && ACB.sge(ACC)))
|
||||
return ReplaceInstUsesWith(Outer, Inner);
|
||||
return replaceInstUsesWith(Outer, Inner);
|
||||
|
||||
// MIN(MIN(A, 97), 23) -> MIN(A, 23)
|
||||
// MAX(MAX(A, 23), 97) -> MAX(A, 97)
|
||||
|
@ -687,7 +687,7 @@ Instruction *InstCombiner::FoldSPFofSPF(Instruction *Inner,
|
|||
// ABS(ABS(X)) -> ABS(X)
|
||||
// NABS(NABS(X)) -> NABS(X)
|
||||
if (SPF1 == SPF2 && (SPF1 == SPF_ABS || SPF1 == SPF_NABS)) {
|
||||
return ReplaceInstUsesWith(Outer, Inner);
|
||||
return replaceInstUsesWith(Outer, Inner);
|
||||
}
|
||||
|
||||
// ABS(NABS(X)) -> ABS(X)
|
||||
|
@ -697,7 +697,7 @@ Instruction *InstCombiner::FoldSPFofSPF(Instruction *Inner,
|
|||
SelectInst *SI = cast<SelectInst>(Inner);
|
||||
Value *NewSI = Builder->CreateSelect(
|
||||
SI->getCondition(), SI->getFalseValue(), SI->getTrueValue());
|
||||
return ReplaceInstUsesWith(Outer, NewSI);
|
||||
return replaceInstUsesWith(Outer, NewSI);
|
||||
}
|
||||
|
||||
auto IsFreeOrProfitableToInvert =
|
||||
|
@ -742,7 +742,7 @@ Instruction *InstCombiner::FoldSPFofSPF(Instruction *Inner,
|
|||
Builder, getInverseMinMaxSelectPattern(SPF1), NotA, NotB);
|
||||
Value *NewOuter = Builder->CreateNot(generateMinMaxSelectPattern(
|
||||
Builder, getInverseMinMaxSelectPattern(SPF2), NewInner, NotC));
|
||||
return ReplaceInstUsesWith(Outer, NewOuter);
|
||||
return replaceInstUsesWith(Outer, NewOuter);
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
|
@ -830,7 +830,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
|
|||
|
||||
if (Value *V =
|
||||
SimplifySelectInst(CondVal, TrueVal, FalseVal, DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(SI, V);
|
||||
return replaceInstUsesWith(SI, V);
|
||||
|
||||
if (SI.getType()->isIntegerTy(1)) {
|
||||
if (ConstantInt *C = dyn_cast<ConstantInt>(TrueVal)) {
|
||||
|
@ -891,7 +891,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
|
|||
}
|
||||
|
||||
if (Value *V = foldSelectICmpAnd(SI, TrueValC, FalseValC, Builder))
|
||||
return ReplaceInstUsesWith(SI, V);
|
||||
return replaceInstUsesWith(SI, V);
|
||||
}
|
||||
|
||||
// See if we are selecting two values based on a comparison of the two values.
|
||||
|
@ -907,7 +907,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
|
|||
!CFPt->getValueAPF().isZero()) ||
|
||||
((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
|
||||
!CFPf->getValueAPF().isZero()))
|
||||
return ReplaceInstUsesWith(SI, FalseVal);
|
||||
return replaceInstUsesWith(SI, FalseVal);
|
||||
}
|
||||
// Transform (X une Y) ? X : Y -> X
|
||||
if (FCI->getPredicate() == FCmpInst::FCMP_UNE) {
|
||||
|
@ -919,7 +919,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
|
|||
!CFPt->getValueAPF().isZero()) ||
|
||||
((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
|
||||
!CFPf->getValueAPF().isZero()))
|
||||
return ReplaceInstUsesWith(SI, TrueVal);
|
||||
return replaceInstUsesWith(SI, TrueVal);
|
||||
}
|
||||
|
||||
// Canonicalize to use ordered comparisons by swapping the select
|
||||
|
@ -950,7 +950,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
|
|||
!CFPt->getValueAPF().isZero()) ||
|
||||
((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
|
||||
!CFPf->getValueAPF().isZero()))
|
||||
return ReplaceInstUsesWith(SI, FalseVal);
|
||||
return replaceInstUsesWith(SI, FalseVal);
|
||||
}
|
||||
// Transform (X une Y) ? Y : X -> Y
|
||||
if (FCI->getPredicate() == FCmpInst::FCMP_UNE) {
|
||||
|
@ -962,7 +962,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
|
|||
!CFPt->getValueAPF().isZero()) ||
|
||||
((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
|
||||
!CFPf->getValueAPF().isZero()))
|
||||
return ReplaceInstUsesWith(SI, TrueVal);
|
||||
return replaceInstUsesWith(SI, TrueVal);
|
||||
}
|
||||
|
||||
// Canonicalize to use ordered comparisons by swapping the select
|
||||
|
@ -1089,7 +1089,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
|
|||
Value *NewSI = Builder->CreateCast(CastOp,
|
||||
Builder->CreateSelect(Cmp, LHS, RHS),
|
||||
SI.getType());
|
||||
return ReplaceInstUsesWith(SI, NewSI);
|
||||
return replaceInstUsesWith(SI, NewSI);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1132,7 +1132,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
|
|||
: Builder->CreateICmpULT(NewLHS, NewRHS);
|
||||
Value *NewSI =
|
||||
Builder->CreateNot(Builder->CreateSelect(NewCmp, NewLHS, NewRHS));
|
||||
return ReplaceInstUsesWith(SI, NewSI);
|
||||
return replaceInstUsesWith(SI, NewSI);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1201,12 +1201,12 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
|
|||
APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
|
||||
if (Value *V = SimplifyDemandedVectorElts(&SI, AllOnesEltMask, UndefElts)) {
|
||||
if (V != &SI)
|
||||
return ReplaceInstUsesWith(SI, V);
|
||||
return replaceInstUsesWith(SI, V);
|
||||
return &SI;
|
||||
}
|
||||
|
||||
if (isa<ConstantAggregateZero>(CondVal)) {
|
||||
return ReplaceInstUsesWith(SI, FalseVal);
|
||||
return replaceInstUsesWith(SI, FalseVal);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -340,7 +340,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
|
|||
DEBUG(dbgs() << "ICE: GetShiftedValue propagating shift through expression"
|
||||
" to eliminate shift:\n IN: " << *Op0 << "\n SH: " << I <<"\n");
|
||||
|
||||
return ReplaceInstUsesWith(
|
||||
return replaceInstUsesWith(
|
||||
I, GetShiftedValue(Op0, COp1->getZExtValue(), isLeftShift, *this, DL));
|
||||
}
|
||||
|
||||
|
@ -573,7 +573,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
|
|||
// saturates.
|
||||
if (AmtSum >= TypeBits) {
|
||||
if (I.getOpcode() != Instruction::AShr)
|
||||
return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
|
||||
return replaceInstUsesWith(I, Constant::getNullValue(I.getType()));
|
||||
AmtSum = TypeBits-1; // Saturate to 31 for i32 ashr.
|
||||
}
|
||||
|
||||
|
@ -694,12 +694,12 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
|
|||
|
||||
Instruction *InstCombiner::visitShl(BinaryOperator &I) {
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V =
|
||||
SimplifyShlInst(I.getOperand(0), I.getOperand(1), I.hasNoSignedWrap(),
|
||||
I.hasNoUnsignedWrap(), DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Instruction *V = commonShiftTransforms(I))
|
||||
return V;
|
||||
|
@ -736,11 +736,11 @@ Instruction *InstCombiner::visitShl(BinaryOperator &I) {
|
|||
|
||||
Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyLShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
|
||||
DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Instruction *R = commonShiftTransforms(I))
|
||||
return R;
|
||||
|
@ -780,11 +780,11 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
|
|||
|
||||
Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
|
||||
if (Value *V = SimplifyVectorOp(I))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyAShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
|
||||
DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(I, V);
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Instruction *R = commonShiftTransforms(I))
|
||||
return R;
|
||||
|
|
|
@ -61,7 +61,7 @@ bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
|
|||
0, &Inst);
|
||||
if (!V) return false;
|
||||
if (V == &Inst) return true;
|
||||
ReplaceInstUsesWith(Inst, V);
|
||||
replaceInstUsesWith(Inst, V);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -125,19 +125,19 @@ Instruction *InstCombiner::scalarizePHI(ExtractElementInst &EI, PHINode *PN) {
|
|||
scalarPHI->addIncoming(newEI, inBB);
|
||||
}
|
||||
}
|
||||
return ReplaceInstUsesWith(EI, scalarPHI);
|
||||
return replaceInstUsesWith(EI, scalarPHI);
|
||||
}
|
||||
|
||||
Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
|
||||
if (Value *V = SimplifyExtractElementInst(
|
||||
EI.getVectorOperand(), EI.getIndexOperand(), DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(EI, V);
|
||||
return replaceInstUsesWith(EI, V);
|
||||
|
||||
// If vector val is constant with all elements the same, replace EI with
|
||||
// that element. We handle a known element # below.
|
||||
if (Constant *C = dyn_cast<Constant>(EI.getOperand(0)))
|
||||
if (cheapToScalarize(C, false))
|
||||
return ReplaceInstUsesWith(EI, C->getAggregateElement(0U));
|
||||
return replaceInstUsesWith(EI, C->getAggregateElement(0U));
|
||||
|
||||
// If extracting a specified index from the vector, see if we can recursively
|
||||
// find a previously computed scalar that was inserted into the vector.
|
||||
|
@ -198,7 +198,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
|
|||
} else if (InsertElementInst *IE = dyn_cast<InsertElementInst>(I)) {
|
||||
// Extracting the inserted element?
|
||||
if (IE->getOperand(2) == EI.getOperand(1))
|
||||
return ReplaceInstUsesWith(EI, IE->getOperand(1));
|
||||
return replaceInstUsesWith(EI, IE->getOperand(1));
|
||||
// If the inserted and extracted elements are constants, they must not
|
||||
// be the same value, extract from the pre-inserted value instead.
|
||||
if (isa<Constant>(IE->getOperand(2)) && isa<Constant>(EI.getOperand(1))) {
|
||||
|
@ -216,7 +216,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
|
|||
SVI->getOperand(0)->getType()->getVectorNumElements();
|
||||
|
||||
if (SrcIdx < 0)
|
||||
return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
|
||||
return replaceInstUsesWith(EI, UndefValue::get(EI.getType()));
|
||||
if (SrcIdx < (int)LHSWidth)
|
||||
Src = SVI->getOperand(0);
|
||||
else {
|
||||
|
@ -417,7 +417,7 @@ static void replaceExtractElements(InsertElementInst *InsElt,
|
|||
continue;
|
||||
auto *NewExt = ExtractElementInst::Create(WideVec, OldExt->getOperand(1));
|
||||
NewExt->insertAfter(WideVec);
|
||||
IC.ReplaceInstUsesWith(*OldExt, NewExt);
|
||||
IC.replaceInstUsesWith(*OldExt, NewExt);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -546,7 +546,7 @@ Instruction *InstCombiner::visitInsertValueInst(InsertValueInst &I) {
|
|||
}
|
||||
|
||||
if (IsRedundant)
|
||||
return ReplaceInstUsesWith(I, I.getOperand(0));
|
||||
return replaceInstUsesWith(I, I.getOperand(0));
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -557,7 +557,7 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
|
|||
|
||||
// Inserting an undef or into an undefined place, remove this.
|
||||
if (isa<UndefValue>(ScalarOp) || isa<UndefValue>(IdxOp))
|
||||
ReplaceInstUsesWith(IE, VecOp);
|
||||
replaceInstUsesWith(IE, VecOp);
|
||||
|
||||
// If the inserted element was extracted from some other vector, and if the
|
||||
// indexes are constant, try to turn this into a shufflevector operation.
|
||||
|
@ -571,15 +571,15 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
|
|||
unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
|
||||
|
||||
if (ExtractedIdx >= NumExtractVectorElts) // Out of range extract.
|
||||
return ReplaceInstUsesWith(IE, VecOp);
|
||||
return replaceInstUsesWith(IE, VecOp);
|
||||
|
||||
if (InsertedIdx >= NumInsertVectorElts) // Out of range insert.
|
||||
return ReplaceInstUsesWith(IE, UndefValue::get(IE.getType()));
|
||||
return replaceInstUsesWith(IE, UndefValue::get(IE.getType()));
|
||||
|
||||
// If we are extracting a value from a vector, then inserting it right
|
||||
// back into the same place, just use the input vector.
|
||||
if (EI->getOperand(0) == VecOp && ExtractedIdx == InsertedIdx)
|
||||
return ReplaceInstUsesWith(IE, VecOp);
|
||||
return replaceInstUsesWith(IE, VecOp);
|
||||
|
||||
// If this insertelement isn't used by some other insertelement, turn it
|
||||
// (and any insertelements it points to), into one big shuffle.
|
||||
|
@ -605,7 +605,7 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
|
|||
APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
|
||||
if (Value *V = SimplifyDemandedVectorElts(&IE, AllOnesEltMask, UndefElts)) {
|
||||
if (V != &IE)
|
||||
return ReplaceInstUsesWith(IE, V);
|
||||
return replaceInstUsesWith(IE, V);
|
||||
return &IE;
|
||||
}
|
||||
|
||||
|
@ -910,7 +910,7 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
|
|||
|
||||
// Undefined shuffle mask -> undefined value.
|
||||
if (isa<UndefValue>(SVI.getOperand(2)))
|
||||
return ReplaceInstUsesWith(SVI, UndefValue::get(SVI.getType()));
|
||||
return replaceInstUsesWith(SVI, UndefValue::get(SVI.getType()));
|
||||
|
||||
unsigned VWidth = cast<VectorType>(SVI.getType())->getNumElements();
|
||||
|
||||
|
@ -918,7 +918,7 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
|
|||
APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
|
||||
if (Value *V = SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, UndefElts)) {
|
||||
if (V != &SVI)
|
||||
return ReplaceInstUsesWith(SVI, V);
|
||||
return replaceInstUsesWith(SVI, V);
|
||||
LHS = SVI.getOperand(0);
|
||||
RHS = SVI.getOperand(1);
|
||||
MadeChange = true;
|
||||
|
@ -933,7 +933,7 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
|
|||
// shuffle(undef,undef,mask) -> undef.
|
||||
Value *Result = (VWidth == LHSWidth)
|
||||
? LHS : UndefValue::get(SVI.getType());
|
||||
return ReplaceInstUsesWith(SVI, Result);
|
||||
return replaceInstUsesWith(SVI, Result);
|
||||
}
|
||||
|
||||
// Remap any references to RHS to use LHS.
|
||||
|
@ -967,13 +967,13 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
|
|||
recognizeIdentityMask(Mask, isLHSID, isRHSID);
|
||||
|
||||
// Eliminate identity shuffles.
|
||||
if (isLHSID) return ReplaceInstUsesWith(SVI, LHS);
|
||||
if (isRHSID) return ReplaceInstUsesWith(SVI, RHS);
|
||||
if (isLHSID) return replaceInstUsesWith(SVI, LHS);
|
||||
if (isRHSID) return replaceInstUsesWith(SVI, RHS);
|
||||
}
|
||||
|
||||
if (isa<UndefValue>(RHS) && CanEvaluateShuffled(LHS, Mask)) {
|
||||
Value *V = EvaluateInDifferentElementOrder(LHS, Mask);
|
||||
return ReplaceInstUsesWith(SVI, V);
|
||||
return replaceInstUsesWith(SVI, V);
|
||||
}
|
||||
|
||||
// SROA generates shuffle+bitcast when the extracted sub-vector is bitcast to
|
||||
|
@ -1060,7 +1060,7 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
|
|||
NewBC, ConstantInt::get(Int32Ty, BegIdx), SVI.getName() + ".extract");
|
||||
// The shufflevector isn't being replaced: the bitcast that used it
|
||||
// is. InstCombine will visit the newly-created instructions.
|
||||
ReplaceInstUsesWith(*BC, Ext);
|
||||
replaceInstUsesWith(*BC, Ext);
|
||||
MadeChange = true;
|
||||
}
|
||||
}
|
||||
|
@ -1251,8 +1251,8 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
|
|||
// corresponding argument.
|
||||
bool isLHSID, isRHSID;
|
||||
recognizeIdentityMask(newMask, isLHSID, isRHSID);
|
||||
if (isLHSID && VWidth == LHSOp0Width) return ReplaceInstUsesWith(SVI, newLHS);
|
||||
if (isRHSID && VWidth == RHSOp0Width) return ReplaceInstUsesWith(SVI, newRHS);
|
||||
if (isLHSID && VWidth == LHSOp0Width) return replaceInstUsesWith(SVI, newLHS);
|
||||
if (isRHSID && VWidth == RHSOp0Width) return replaceInstUsesWith(SVI, newRHS);
|
||||
|
||||
return MadeChange ? &SVI : nullptr;
|
||||
}
|
||||
|
|
|
@ -919,10 +919,10 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
|
|||
for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
|
||||
Instruction *User = cast<Instruction>(*UI++);
|
||||
if (User == &I) continue;
|
||||
ReplaceInstUsesWith(*User, NewPN);
|
||||
EraseInstFromFunction(*User);
|
||||
replaceInstUsesWith(*User, NewPN);
|
||||
eraseInstFromFunction(*User);
|
||||
}
|
||||
return ReplaceInstUsesWith(I, NewPN);
|
||||
return replaceInstUsesWith(I, NewPN);
|
||||
}
|
||||
|
||||
/// Given a pointer type and a constant offset, determine whether or not there
|
||||
|
@ -1335,7 +1335,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
|||
SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
|
||||
|
||||
if (Value *V = SimplifyGEPInst(GEP.getSourceElementType(), Ops, DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(GEP, V);
|
||||
return replaceInstUsesWith(GEP, V);
|
||||
|
||||
Value *PtrOp = GEP.getOperand(0);
|
||||
|
||||
|
@ -1821,7 +1821,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
|||
if (I != BCI) {
|
||||
I->takeName(BCI);
|
||||
BCI->getParent()->getInstList().insert(BCI->getIterator(), I);
|
||||
ReplaceInstUsesWith(*BCI, I);
|
||||
replaceInstUsesWith(*BCI, I);
|
||||
}
|
||||
return &GEP;
|
||||
}
|
||||
|
@ -1843,7 +1843,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
|||
: Builder->CreateGEP(nullptr, Operand, NewIndices);
|
||||
|
||||
if (NGEP->getType() == GEP.getType())
|
||||
return ReplaceInstUsesWith(GEP, NGEP);
|
||||
return replaceInstUsesWith(GEP, NGEP);
|
||||
NGEP->takeName(&GEP);
|
||||
|
||||
if (NGEP->getType()->getPointerAddressSpace() != GEP.getAddressSpace())
|
||||
|
@ -1944,19 +1944,19 @@ Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
|
|||
if (!I) continue;
|
||||
|
||||
if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
|
||||
ReplaceInstUsesWith(*C,
|
||||
replaceInstUsesWith(*C,
|
||||
ConstantInt::get(Type::getInt1Ty(C->getContext()),
|
||||
C->isFalseWhenEqual()));
|
||||
} else if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
|
||||
ReplaceInstUsesWith(*I, UndefValue::get(I->getType()));
|
||||
replaceInstUsesWith(*I, UndefValue::get(I->getType()));
|
||||
} else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
|
||||
if (II->getIntrinsicID() == Intrinsic::objectsize) {
|
||||
ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
|
||||
uint64_t DontKnow = CI->isZero() ? -1ULL : 0;
|
||||
ReplaceInstUsesWith(*I, ConstantInt::get(I->getType(), DontKnow));
|
||||
replaceInstUsesWith(*I, ConstantInt::get(I->getType(), DontKnow));
|
||||
}
|
||||
}
|
||||
EraseInstFromFunction(*I);
|
||||
eraseInstFromFunction(*I);
|
||||
}
|
||||
|
||||
if (InvokeInst *II = dyn_cast<InvokeInst>(&MI)) {
|
||||
|
@ -1966,7 +1966,7 @@ Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
|
|||
InvokeInst::Create(F, II->getNormalDest(), II->getUnwindDest(),
|
||||
None, "", II->getParent());
|
||||
}
|
||||
return EraseInstFromFunction(MI);
|
||||
return eraseInstFromFunction(MI);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -2037,13 +2037,13 @@ Instruction *InstCombiner::visitFree(CallInst &FI) {
|
|||
// Insert a new store to null because we cannot modify the CFG here.
|
||||
Builder->CreateStore(ConstantInt::getTrue(FI.getContext()),
|
||||
UndefValue::get(Type::getInt1PtrTy(FI.getContext())));
|
||||
return EraseInstFromFunction(FI);
|
||||
return eraseInstFromFunction(FI);
|
||||
}
|
||||
|
||||
// If we have 'free null' delete the instruction. This can happen in stl code
|
||||
// when lots of inlining happens.
|
||||
if (isa<ConstantPointerNull>(Op))
|
||||
return EraseInstFromFunction(FI);
|
||||
return eraseInstFromFunction(FI);
|
||||
|
||||
// If we optimize for code size, try to move the call to free before the null
|
||||
// test so that simplify cfg can remove the empty block and dead code
|
||||
|
@ -2202,11 +2202,11 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
|
|||
Value *Agg = EV.getAggregateOperand();
|
||||
|
||||
if (!EV.hasIndices())
|
||||
return ReplaceInstUsesWith(EV, Agg);
|
||||
return replaceInstUsesWith(EV, Agg);
|
||||
|
||||
if (Value *V =
|
||||
SimplifyExtractValueInst(Agg, EV.getIndices(), DL, TLI, DT, AC))
|
||||
return ReplaceInstUsesWith(EV, V);
|
||||
return replaceInstUsesWith(EV, V);
|
||||
|
||||
if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
|
||||
// We're extracting from an insertvalue instruction, compare the indices
|
||||
|
@ -2232,7 +2232,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
|
|||
// %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
|
||||
// %C = extractvalue { i32, { i32 } } %B, 1, 0
|
||||
// with "i32 42"
|
||||
return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
|
||||
return replaceInstUsesWith(EV, IV->getInsertedValueOperand());
|
||||
if (exti == exte) {
|
||||
// The extract list is a prefix of the insert list. i.e. replace
|
||||
// %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
|
||||
|
@ -2272,8 +2272,8 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
|
|||
case Intrinsic::sadd_with_overflow:
|
||||
if (*EV.idx_begin() == 0) { // Normal result.
|
||||
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
|
||||
ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
|
||||
EraseInstFromFunction(*II);
|
||||
replaceInstUsesWith(*II, UndefValue::get(II->getType()));
|
||||
eraseInstFromFunction(*II);
|
||||
return BinaryOperator::CreateAdd(LHS, RHS);
|
||||
}
|
||||
|
||||
|
@ -2289,8 +2289,8 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
|
|||
case Intrinsic::ssub_with_overflow:
|
||||
if (*EV.idx_begin() == 0) { // Normal result.
|
||||
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
|
||||
ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
|
||||
EraseInstFromFunction(*II);
|
||||
replaceInstUsesWith(*II, UndefValue::get(II->getType()));
|
||||
eraseInstFromFunction(*II);
|
||||
return BinaryOperator::CreateSub(LHS, RHS);
|
||||
}
|
||||
break;
|
||||
|
@ -2298,8 +2298,8 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
|
|||
case Intrinsic::smul_with_overflow:
|
||||
if (*EV.idx_begin() == 0) { // Normal result.
|
||||
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
|
||||
ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
|
||||
EraseInstFromFunction(*II);
|
||||
replaceInstUsesWith(*II, UndefValue::get(II->getType()));
|
||||
eraseInstFromFunction(*II);
|
||||
return BinaryOperator::CreateMul(LHS, RHS);
|
||||
}
|
||||
break;
|
||||
|
@ -2329,8 +2329,8 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
|
|||
Value *GEP = Builder->CreateInBoundsGEP(L->getType(),
|
||||
L->getPointerOperand(), Indices);
|
||||
// Returning the load directly will cause the main loop to insert it in
|
||||
// the wrong spot, so use ReplaceInstUsesWith().
|
||||
return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP));
|
||||
// the wrong spot, so use replaceInstUsesWith().
|
||||
return replaceInstUsesWith(EV, Builder->CreateLoad(GEP));
|
||||
}
|
||||
// We could simplify extracts from other values. Note that nested extracts may
|
||||
// already be simplified implicitly by the above: extract (extract (insert) )
|
||||
|
@ -2730,7 +2730,7 @@ bool InstCombiner::run() {
|
|||
// Check to see if we can DCE the instruction.
|
||||
if (isInstructionTriviallyDead(I, TLI)) {
|
||||
DEBUG(dbgs() << "IC: DCE: " << *I << '\n');
|
||||
EraseInstFromFunction(*I);
|
||||
eraseInstFromFunction(*I);
|
||||
++NumDeadInst;
|
||||
MadeIRChange = true;
|
||||
continue;
|
||||
|
@ -2743,9 +2743,9 @@ bool InstCombiner::run() {
|
|||
DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
|
||||
|
||||
// Add operands to the worklist.
|
||||
ReplaceInstUsesWith(*I, C);
|
||||
replaceInstUsesWith(*I, C);
|
||||
++NumConstProp;
|
||||
EraseInstFromFunction(*I);
|
||||
eraseInstFromFunction(*I);
|
||||
MadeIRChange = true;
|
||||
continue;
|
||||
}
|
||||
|
@ -2764,9 +2764,9 @@ bool InstCombiner::run() {
|
|||
" from: " << *I << '\n');
|
||||
|
||||
// Add operands to the worklist.
|
||||
ReplaceInstUsesWith(*I, C);
|
||||
replaceInstUsesWith(*I, C);
|
||||
++NumConstProp;
|
||||
EraseInstFromFunction(*I);
|
||||
eraseInstFromFunction(*I);
|
||||
MadeIRChange = true;
|
||||
continue;
|
||||
}
|
||||
|
@ -2851,7 +2851,7 @@ bool InstCombiner::run() {
|
|||
|
||||
InstParent->getInstList().insert(InsertPos, Result);
|
||||
|
||||
EraseInstFromFunction(*I);
|
||||
eraseInstFromFunction(*I);
|
||||
} else {
|
||||
#ifndef NDEBUG
|
||||
DEBUG(dbgs() << "IC: Mod = " << OrigI << '\n'
|
||||
|
@ -2861,7 +2861,7 @@ bool InstCombiner::run() {
|
|||
// If the instruction was modified, it's possible that it is now dead.
|
||||
// if so, remove it.
|
||||
if (isInstructionTriviallyDead(I, TLI)) {
|
||||
EraseInstFromFunction(*I);
|
||||
eraseInstFromFunction(*I);
|
||||
} else {
|
||||
Worklist.Add(I);
|
||||
Worklist.AddUsersToWorkList(*I);
|
||||
|
|
Loading…
Reference in New Issue