forked from OSchip/llvm-project
InstCombine: Replace some never-null pointers with references. NFC
llvm-svn: 277792
This commit is contained in:
parent
808d13ea49
commit
9979840f59
|
@ -1035,7 +1035,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
|
|||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
|
||||
I.hasNoUnsignedWrap(), DL, TLI, DT, AC))
|
||||
I.hasNoUnsignedWrap(), DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// (A*B)+(A*C) -> A*(B+C) etc
|
||||
|
@ -1154,7 +1154,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
|
|||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// A+B --> A|B iff A and B have no bits set in common.
|
||||
if (haveNoCommonBitsSet(LHS, RHS, DL, AC, &I, DT))
|
||||
if (haveNoCommonBitsSet(LHS, RHS, DL, &AC, &I, &DT))
|
||||
return BinaryOperator::CreateOr(LHS, RHS);
|
||||
|
||||
if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
|
||||
|
@ -1317,7 +1317,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
|
|||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V =
|
||||
SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), DL, TLI, DT, AC))
|
||||
SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (isa<Constant>(RHS)) {
|
||||
|
@ -1493,7 +1493,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
|
|||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifySubInst(Op0, Op1, I.hasNoSignedWrap(),
|
||||
I.hasNoUnsignedWrap(), DL, TLI, DT, AC))
|
||||
I.hasNoUnsignedWrap(), DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// (A*B)-(A*C) -> A*(B-C) etc
|
||||
|
@ -1692,7 +1692,7 @@ Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
|
|||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V =
|
||||
SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), DL, TLI, DT, AC))
|
||||
SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// fsub nsz 0, X ==> fsub nsz -0.0, X
|
||||
|
|
|
@ -1279,7 +1279,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
|
|||
if (Value *V = SimplifyVectorOp(I))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyAndInst(Op0, Op1, DL, TLI, DT, AC))
|
||||
if (Value *V = SimplifyAndInst(Op0, Op1, DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// (A|B)&(A|C) -> A|(B&C) etc
|
||||
|
@ -1664,17 +1664,17 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
|
|||
Value *Mask = nullptr;
|
||||
Value *Masked = nullptr;
|
||||
if (LAnd->getOperand(0) == RAnd->getOperand(0) &&
|
||||
isKnownToBeAPowerOfTwo(LAnd->getOperand(1), DL, false, 0, AC, CxtI,
|
||||
DT) &&
|
||||
isKnownToBeAPowerOfTwo(RAnd->getOperand(1), DL, false, 0, AC, CxtI,
|
||||
DT)) {
|
||||
isKnownToBeAPowerOfTwo(LAnd->getOperand(1), DL, false, 0, &AC, CxtI,
|
||||
&DT) &&
|
||||
isKnownToBeAPowerOfTwo(RAnd->getOperand(1), DL, false, 0, &AC, CxtI,
|
||||
&DT)) {
|
||||
Mask = Builder->CreateOr(LAnd->getOperand(1), RAnd->getOperand(1));
|
||||
Masked = Builder->CreateAnd(LAnd->getOperand(0), Mask);
|
||||
} else if (LAnd->getOperand(1) == RAnd->getOperand(1) &&
|
||||
isKnownToBeAPowerOfTwo(LAnd->getOperand(0), DL, false, 0, AC,
|
||||
CxtI, DT) &&
|
||||
isKnownToBeAPowerOfTwo(RAnd->getOperand(0), DL, false, 0, AC,
|
||||
CxtI, DT)) {
|
||||
isKnownToBeAPowerOfTwo(LAnd->getOperand(0), DL, false, 0, &AC,
|
||||
CxtI, &DT) &&
|
||||
isKnownToBeAPowerOfTwo(RAnd->getOperand(0), DL, false, 0, &AC,
|
||||
CxtI, &DT)) {
|
||||
Mask = Builder->CreateOr(LAnd->getOperand(0), RAnd->getOperand(0));
|
||||
Masked = Builder->CreateAnd(LAnd->getOperand(1), Mask);
|
||||
}
|
||||
|
@ -2093,7 +2093,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
|
|||
if (Value *V = SimplifyVectorOp(I))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyOrInst(Op0, Op1, DL, TLI, DT, AC))
|
||||
if (Value *V = SimplifyOrInst(Op0, Op1, DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// (A&B)|(A&C) -> A&(B|C) etc
|
||||
|
@ -2446,7 +2446,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
|
|||
if (Value *V = SimplifyVectorOp(I))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyXorInst(Op0, Op1, DL, TLI, DT, AC))
|
||||
if (Value *V = SimplifyXorInst(Op0, Op1, DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// (A&B)^(A&C) -> A&(B^C) etc
|
||||
|
|
|
@ -79,8 +79,8 @@ static Constant *getNegativeIsTrueBoolVec(ConstantDataVector *V) {
|
|||
}
|
||||
|
||||
Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
|
||||
unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, MI, AC, DT);
|
||||
unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, MI, AC, DT);
|
||||
unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, MI, &AC, &DT);
|
||||
unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, MI, &AC, &DT);
|
||||
unsigned MinAlign = std::min(DstAlign, SrcAlign);
|
||||
unsigned CopyAlign = MI->getAlignment();
|
||||
|
||||
|
@ -173,7 +173,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
|
|||
}
|
||||
|
||||
Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
|
||||
unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, AC, DT);
|
||||
unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
|
||||
if (MI->getAlignment() < Alignment) {
|
||||
MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
|
||||
Alignment, false));
|
||||
|
@ -1243,10 +1243,10 @@ Instruction *InstCombiner::visitVACopyInst(VACopyInst &I) {
|
|||
Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
||||
auto Args = CI.arg_operands();
|
||||
if (Value *V = SimplifyCall(CI.getCalledValue(), Args.begin(), Args.end(), DL,
|
||||
TLI, DT, AC))
|
||||
&TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(CI, V);
|
||||
|
||||
if (isFreeCall(&CI, TLI))
|
||||
if (isFreeCall(&CI, &TLI))
|
||||
return visitFree(CI);
|
||||
|
||||
// If the caller function is nounwind, mark the call as nounwind, even if the
|
||||
|
@ -1334,7 +1334,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
default: break;
|
||||
case Intrinsic::objectsize: {
|
||||
uint64_t Size;
|
||||
if (getObjectSize(II->getArgOperand(0), Size, DL, TLI)) {
|
||||
if (getObjectSize(II->getArgOperand(0), Size, DL, &TLI)) {
|
||||
APInt APSize(II->getType()->getIntegerBitWidth(), Size);
|
||||
// Equality check to be sure that `Size` can fit in a value of type
|
||||
// `II->getType()`
|
||||
|
@ -1480,8 +1480,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
case Intrinsic::ppc_altivec_lvx:
|
||||
case Intrinsic::ppc_altivec_lvxl:
|
||||
// Turn PPC lvx -> load if the pointer is known aligned.
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
|
||||
16) {
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
|
||||
&DT) >= 16) {
|
||||
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
|
||||
PointerType::getUnqual(II->getType()));
|
||||
return new LoadInst(Ptr);
|
||||
|
@ -1497,8 +1497,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
case Intrinsic::ppc_altivec_stvx:
|
||||
case Intrinsic::ppc_altivec_stvxl:
|
||||
// Turn stvx -> store if the pointer is known aligned.
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
|
||||
16) {
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
|
||||
&DT) >= 16) {
|
||||
Type *OpPtrTy =
|
||||
PointerType::getUnqual(II->getArgOperand(0)->getType());
|
||||
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
|
||||
|
@ -1514,8 +1514,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
}
|
||||
case Intrinsic::ppc_qpx_qvlfs:
|
||||
// Turn PPC QPX qvlfs -> load if the pointer is known aligned.
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
|
||||
16) {
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
|
||||
&DT) >= 16) {
|
||||
Type *VTy = VectorType::get(Builder->getFloatTy(),
|
||||
II->getType()->getVectorNumElements());
|
||||
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
|
||||
|
@ -1526,8 +1526,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
break;
|
||||
case Intrinsic::ppc_qpx_qvlfd:
|
||||
// Turn PPC QPX qvlfd -> load if the pointer is known aligned.
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, AC, DT) >=
|
||||
32) {
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC,
|
||||
&DT) >= 32) {
|
||||
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
|
||||
PointerType::getUnqual(II->getType()));
|
||||
return new LoadInst(Ptr);
|
||||
|
@ -1535,8 +1535,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
break;
|
||||
case Intrinsic::ppc_qpx_qvstfs:
|
||||
// Turn PPC QPX qvstfs -> store if the pointer is known aligned.
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
|
||||
16) {
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
|
||||
&DT) >= 16) {
|
||||
Type *VTy = VectorType::get(Builder->getFloatTy(),
|
||||
II->getArgOperand(0)->getType()->getVectorNumElements());
|
||||
Value *TOp = Builder->CreateFPTrunc(II->getArgOperand(0), VTy);
|
||||
|
@ -1547,8 +1547,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
break;
|
||||
case Intrinsic::ppc_qpx_qvstfd:
|
||||
// Turn PPC QPX qvstfd -> store if the pointer is known aligned.
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, AC, DT) >=
|
||||
32) {
|
||||
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, &AC,
|
||||
&DT) >= 32) {
|
||||
Type *OpPtrTy =
|
||||
PointerType::getUnqual(II->getArgOperand(0)->getType());
|
||||
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
|
||||
|
@ -2104,7 +2104,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
case Intrinsic::arm_neon_vst2lane:
|
||||
case Intrinsic::arm_neon_vst3lane:
|
||||
case Intrinsic::arm_neon_vst4lane: {
|
||||
unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL, II, AC, DT);
|
||||
unsigned MemAlign =
|
||||
getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
|
||||
unsigned AlignArg = II->getNumArgOperands() - 1;
|
||||
ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
|
||||
if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
|
||||
|
@ -2288,7 +2289,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
RHS->getType()->isPointerTy() &&
|
||||
cast<Constant>(RHS)->isNullValue()) {
|
||||
LoadInst* LI = cast<LoadInst>(LHS);
|
||||
if (isValidAssumeForContext(II, LI, DT)) {
|
||||
if (isValidAssumeForContext(II, LI, &DT)) {
|
||||
MDNode *MD = MDNode::get(II->getContext(), None);
|
||||
LI->setMetadata(LLVMContext::MD_nonnull, MD);
|
||||
return eraseInstFromFunction(*II);
|
||||
|
@ -2334,7 +2335,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
return replaceInstUsesWith(*II, ConstantPointerNull::get(PT));
|
||||
|
||||
// isKnownNonNull -> nonnull attribute
|
||||
if (isKnownNonNullAt(DerivedPtr, II, DT))
|
||||
if (isKnownNonNullAt(DerivedPtr, II, &DT))
|
||||
II->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
|
||||
}
|
||||
|
||||
|
@ -2394,7 +2395,7 @@ Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
|
|||
auto InstCombineRAUW = [this](Instruction *From, Value *With) {
|
||||
replaceInstUsesWith(*From, With);
|
||||
};
|
||||
LibCallSimplifier Simplifier(DL, TLI, InstCombineRAUW);
|
||||
LibCallSimplifier Simplifier(DL, &TLI, InstCombineRAUW);
|
||||
if (Value *With = Simplifier.optimizeCall(CI)) {
|
||||
++NumSimplified;
|
||||
return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
|
||||
|
@ -2483,7 +2484,7 @@ static IntrinsicInst *findInitTrampoline(Value *Callee) {
|
|||
/// Improvements for call and invoke instructions.
|
||||
Instruction *InstCombiner::visitCallSite(CallSite CS) {
|
||||
|
||||
if (isAllocLikeFn(CS.getInstruction(), TLI))
|
||||
if (isAllocLikeFn(CS.getInstruction(), &TLI))
|
||||
return visitAllocSite(*CS.getInstruction());
|
||||
|
||||
bool Changed = false;
|
||||
|
@ -2497,7 +2498,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
|
|||
for (Value *V : CS.args()) {
|
||||
if (V->getType()->isPointerTy() &&
|
||||
!CS.paramHasAttr(ArgNo + 1, Attribute::NonNull) &&
|
||||
isKnownNonNullAt(V, CS.getInstruction(), DT))
|
||||
isKnownNonNullAt(V, CS.getInstruction(), &DT))
|
||||
Indices.push_back(ArgNo + 1);
|
||||
ArgNo++;
|
||||
}
|
||||
|
|
|
@ -161,7 +161,7 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
|
|||
if (Constant *C = dyn_cast<Constant>(V)) {
|
||||
C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
|
||||
// If we got a constantexpr back, try to simplify it with DL info.
|
||||
if (Constant *FoldedC = ConstantFoldConstant(C, DL, TLI))
|
||||
if (Constant *FoldedC = ConstantFoldConstant(C, DL, &TLI))
|
||||
C = FoldedC;
|
||||
return C;
|
||||
}
|
||||
|
|
|
@ -314,7 +314,7 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
|
|||
|
||||
// Find out if the comparison would be true or false for the i'th element.
|
||||
Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
|
||||
CompareRHS, DL, TLI);
|
||||
CompareRHS, DL, &TLI);
|
||||
// If the result is undef for this element, ignore it.
|
||||
if (isa<UndefValue>(C)) {
|
||||
// Extend range state machines to cover this element in case there is an
|
||||
|
@ -3015,12 +3015,9 @@ bool InstCombiner::dominatesAllUses(const Instruction *DI,
|
|||
// Protect from self-referencing blocks
|
||||
if (DI->getParent() == DB)
|
||||
return false;
|
||||
// DominatorTree available?
|
||||
if (!DT)
|
||||
return false;
|
||||
for (const User *U : DI->users()) {
|
||||
auto *Usr = cast<Instruction>(U);
|
||||
if (Usr != UI && !DT->dominates(DB, Usr->getParent()))
|
||||
if (Usr != UI && !DT.dominates(DB, Usr->getParent()))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -3179,7 +3176,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
|
|||
}
|
||||
|
||||
if (Value *V =
|
||||
SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL, TLI, DT, AC, &I))
|
||||
SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL, &TLI, &DT, &AC, &I))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// comparing -val or val with non-zero is the same as just comparing val
|
||||
|
@ -4103,7 +4100,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
|
|||
// if A is a power of 2.
|
||||
if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
|
||||
match(Op1, m_Zero()) &&
|
||||
isKnownToBeAPowerOfTwo(A, DL, false, 0, AC, &I, DT) && I.isEquality())
|
||||
isKnownToBeAPowerOfTwo(A, DL, false, 0, &AC, &I, &DT) && I.isEquality())
|
||||
return new ICmpInst(I.getInversePredicate(),
|
||||
Builder->CreateAnd(A, B),
|
||||
Op1);
|
||||
|
@ -4558,7 +4555,7 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
|
|||
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
|
||||
|
||||
if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1,
|
||||
I.getFastMathFlags(), DL, TLI, DT, AC, &I))
|
||||
I.getFastMathFlags(), DL, &TLI, &DT, &AC, &I))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// Simplify 'fcmp pred X, X'
|
||||
|
@ -4678,7 +4675,7 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
|
|||
break;
|
||||
|
||||
CallInst *CI = cast<CallInst>(LHSI);
|
||||
Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
|
||||
Intrinsic::ID IID = getIntrinsicForCallSite(CI, &TLI);
|
||||
if (IID != Intrinsic::fabs)
|
||||
break;
|
||||
|
||||
|
|
|
@ -161,10 +161,9 @@ private:
|
|||
AliasAnalysis *AA;
|
||||
|
||||
// Required analyses.
|
||||
// FIXME: These can never be null and should be references.
|
||||
AssumptionCache *AC;
|
||||
TargetLibraryInfo *TLI;
|
||||
DominatorTree *DT;
|
||||
AssumptionCache &AC;
|
||||
TargetLibraryInfo &TLI;
|
||||
DominatorTree &DT;
|
||||
const DataLayout &DL;
|
||||
|
||||
// Optional analyses. When non-null, these can both be used to do better
|
||||
|
@ -176,8 +175,8 @@ private:
|
|||
public:
|
||||
InstCombiner(InstCombineWorklist &Worklist, BuilderTy *Builder,
|
||||
bool MinimizeSize, bool ExpensiveCombines, AliasAnalysis *AA,
|
||||
AssumptionCache *AC, TargetLibraryInfo *TLI,
|
||||
DominatorTree *DT, const DataLayout &DL, LoopInfo *LI)
|
||||
AssumptionCache &AC, TargetLibraryInfo &TLI,
|
||||
DominatorTree &DT, const DataLayout &DL, LoopInfo *LI)
|
||||
: Worklist(Worklist), Builder(Builder), MinimizeSize(MinimizeSize),
|
||||
ExpensiveCombines(ExpensiveCombines), AA(AA), AC(AC), TLI(TLI), DT(DT),
|
||||
DL(DL), LI(LI), MadeIRChange(false) {}
|
||||
|
@ -187,15 +186,15 @@ public:
|
|||
/// \returns true if the IR is changed.
|
||||
bool run();
|
||||
|
||||
AssumptionCache *getAssumptionCache() const { return AC; }
|
||||
AssumptionCache &getAssumptionCache() const { return AC; }
|
||||
|
||||
const DataLayout &getDataLayout() const { return DL; }
|
||||
|
||||
DominatorTree *getDominatorTree() const { return DT; }
|
||||
DominatorTree &getDominatorTree() const { return DT; }
|
||||
|
||||
LoopInfo *getLoopInfo() const { return LI; }
|
||||
|
||||
TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }
|
||||
TargetLibraryInfo &getTargetLibraryInfo() const { return TLI; }
|
||||
|
||||
// Visitation implementation - Implement instruction combining for different
|
||||
// instruction types. The semantics are as follows:
|
||||
|
@ -462,30 +461,30 @@ public:
|
|||
|
||||
void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
|
||||
unsigned Depth, Instruction *CxtI) const {
|
||||
return llvm::computeKnownBits(V, KnownZero, KnownOne, DL, Depth, AC, CxtI,
|
||||
DT);
|
||||
return llvm::computeKnownBits(V, KnownZero, KnownOne, DL, Depth, &AC, CxtI,
|
||||
&DT);
|
||||
}
|
||||
|
||||
bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth = 0,
|
||||
Instruction *CxtI = nullptr) const {
|
||||
return llvm::MaskedValueIsZero(V, Mask, DL, Depth, AC, CxtI, DT);
|
||||
return llvm::MaskedValueIsZero(V, Mask, DL, Depth, &AC, CxtI, &DT);
|
||||
}
|
||||
unsigned ComputeNumSignBits(Value *Op, unsigned Depth = 0,
|
||||
Instruction *CxtI = nullptr) const {
|
||||
return llvm::ComputeNumSignBits(Op, DL, Depth, AC, CxtI, DT);
|
||||
return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT);
|
||||
}
|
||||
void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
|
||||
unsigned Depth = 0, Instruction *CxtI = nullptr) const {
|
||||
return llvm::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth, AC, CxtI,
|
||||
DT);
|
||||
return llvm::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth, &AC, CxtI,
|
||||
&DT);
|
||||
}
|
||||
OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
|
||||
const Instruction *CxtI) {
|
||||
return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, AC, CxtI, DT);
|
||||
return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT);
|
||||
}
|
||||
OverflowResult computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
|
||||
const Instruction *CxtI) {
|
||||
return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, AC, CxtI, DT);
|
||||
return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, &AC, CxtI, &DT);
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
|
@ -286,7 +286,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
|
|||
SmallVector<Instruction *, 4> ToDelete;
|
||||
if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
|
||||
unsigned SourceAlign = getOrEnforceKnownAlignment(
|
||||
Copy->getSource(), AI.getAlignment(), DL, &AI, AC, DT);
|
||||
Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
|
||||
if (AI.getAlignment() <= SourceAlign) {
|
||||
DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
|
||||
DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
|
||||
|
@ -795,7 +795,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
|
|||
|
||||
// Attempt to improve the alignment.
|
||||
unsigned KnownAlign = getOrEnforceKnownAlignment(
|
||||
Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, AC, DT);
|
||||
Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT);
|
||||
unsigned LoadAlign = LI.getAlignment();
|
||||
unsigned EffectiveLoadAlign =
|
||||
LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
|
||||
|
@ -1155,7 +1155,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
|||
|
||||
// Attempt to improve the alignment.
|
||||
unsigned KnownAlign = getOrEnforceKnownAlignment(
|
||||
Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, AC, DT);
|
||||
Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT);
|
||||
unsigned StoreAlign = SI.getAlignment();
|
||||
unsigned EffectiveStoreAlign =
|
||||
StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
|
||||
|
|
|
@ -48,8 +48,8 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC,
|
|||
BinaryOperator *I = dyn_cast<BinaryOperator>(V);
|
||||
if (I && I->isLogicalShift() &&
|
||||
isKnownToBeAPowerOfTwo(I->getOperand(0), IC.getDataLayout(), false, 0,
|
||||
IC.getAssumptionCache(), &CxtI,
|
||||
IC.getDominatorTree())) {
|
||||
&IC.getAssumptionCache(), &CxtI,
|
||||
&IC.getDominatorTree())) {
|
||||
// We know that this is an exact/nuw shift and that the input is a
|
||||
// non-zero context as well.
|
||||
if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) {
|
||||
|
@ -179,7 +179,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
|
|||
if (Value *V = SimplifyVectorOp(I))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyMulInst(Op0, Op1, DL, TLI, DT, AC))
|
||||
if (Value *V = SimplifyMulInst(Op0, Op1, DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyUsingDistributiveLaws(I))
|
||||
|
@ -545,7 +545,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
|
|||
std::swap(Op0, Op1);
|
||||
|
||||
if (Value *V =
|
||||
SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), DL, TLI, DT, AC))
|
||||
SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
bool AllowReassociate = I.hasUnsafeAlgebra();
|
||||
|
@ -1059,7 +1059,7 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
|
|||
if (Value *V = SimplifyVectorOp(I))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyUDivInst(Op0, Op1, DL, TLI, DT, AC))
|
||||
if (Value *V = SimplifyUDivInst(Op0, Op1, DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// Handle the integer div common cases
|
||||
|
@ -1132,7 +1132,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
|
|||
if (Value *V = SimplifyVectorOp(I))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifySDivInst(Op0, Op1, DL, TLI, DT, AC))
|
||||
if (Value *V = SimplifySDivInst(Op0, Op1, DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// Handle the integer div common cases
|
||||
|
@ -1195,7 +1195,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
|
|||
return BO;
|
||||
}
|
||||
|
||||
if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, AC, &I, DT)) {
|
||||
if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, &AC, &I, &DT)) {
|
||||
// X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
|
||||
// Safe because the only negative value (1 << Y) can take on is
|
||||
// INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
|
||||
|
@ -1247,7 +1247,7 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
|
|||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyFDivInst(Op0, Op1, I.getFastMathFlags(),
|
||||
DL, TLI, DT, AC))
|
||||
DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (isa<Constant>(Op0))
|
||||
|
@ -1421,7 +1421,7 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) {
|
|||
if (Value *V = SimplifyVectorOp(I))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyURemInst(Op0, Op1, DL, TLI, DT, AC))
|
||||
if (Value *V = SimplifyURemInst(Op0, Op1, DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Instruction *common = commonIRemTransforms(I))
|
||||
|
@ -1434,7 +1434,7 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) {
|
|||
I.getType());
|
||||
|
||||
// X urem Y -> X and Y-1, where Y is a power of 2,
|
||||
if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, AC, &I, DT)) {
|
||||
if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, &AC, &I, &DT)) {
|
||||
Constant *N1 = Constant::getAllOnesValue(I.getType());
|
||||
Value *Add = Builder->CreateAdd(Op1, N1);
|
||||
return BinaryOperator::CreateAnd(Op0, Add);
|
||||
|
@ -1456,7 +1456,7 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
|
|||
if (Value *V = SimplifyVectorOp(I))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifySRemInst(Op0, Op1, DL, TLI, DT, AC))
|
||||
if (Value *V = SimplifySRemInst(Op0, Op1, DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// Handle the integer rem common cases
|
||||
|
@ -1532,7 +1532,7 @@ Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
|
|||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyFRemInst(Op0, Op1, I.getFastMathFlags(),
|
||||
DL, TLI, DT, AC))
|
||||
DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
// Handle cases involving: rem X, (select Cond, Y, Z)
|
||||
|
|
|
@ -864,7 +864,7 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
|
|||
// PHINode simplification
|
||||
//
|
||||
Instruction *InstCombiner::visitPHINode(PHINode &PN) {
|
||||
if (Value *V = SimplifyInstruction(&PN, DL, TLI, DT, AC))
|
||||
if (Value *V = SimplifyInstruction(&PN, DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(PN, V);
|
||||
|
||||
if (Instruction *Result = FoldPHIArgZextsIntoPHI(PN))
|
||||
|
@ -921,7 +921,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) {
|
|||
for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
|
||||
Instruction *CtxI = PN.getIncomingBlock(i)->getTerminator();
|
||||
Value *VA = PN.getIncomingValue(i);
|
||||
if (isKnownNonZero(VA, DL, 0, AC, CtxI, DT)) {
|
||||
if (isKnownNonZero(VA, DL, 0, &AC, CtxI, &DT)) {
|
||||
if (!NonZeroConst)
|
||||
NonZeroConst = GetAnyNonZeroConstInt(PN);
|
||||
PN.setIncomingValue(i, NonZeroConst);
|
||||
|
|
|
@ -919,7 +919,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
|
|||
Type *SelType = SI.getType();
|
||||
|
||||
if (Value *V =
|
||||
SimplifySelectInst(CondVal, TrueVal, FalseVal, DL, TLI, DT, AC))
|
||||
SimplifySelectInst(CondVal, TrueVal, FalseVal, DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(SI, V);
|
||||
|
||||
if (SelType->getScalarType()->isIntegerTy(1) &&
|
||||
|
|
|
@ -196,7 +196,7 @@ static Value *GetShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
|
|||
// If we got a constantexpr back, try to simplify it with TD info.
|
||||
if (auto *C = dyn_cast<Constant>(V))
|
||||
if (auto *FoldedC =
|
||||
ConstantFoldConstant(C, DL, IC.getTargetLibraryInfo()))
|
||||
ConstantFoldConstant(C, DL, &IC.getTargetLibraryInfo()))
|
||||
V = FoldedC;
|
||||
return V;
|
||||
}
|
||||
|
@ -701,7 +701,7 @@ Instruction *InstCombiner::visitShl(BinaryOperator &I) {
|
|||
|
||||
if (Value *V =
|
||||
SimplifyShlInst(I.getOperand(0), I.getOperand(1), I.hasNoSignedWrap(),
|
||||
I.hasNoUnsignedWrap(), DL, TLI, DT, AC))
|
||||
I.hasNoUnsignedWrap(), DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Instruction *V = commonShiftTransforms(I))
|
||||
|
@ -742,7 +742,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
|
|||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyLShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
|
||||
DL, TLI, DT, AC))
|
||||
DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Instruction *R = commonShiftTransforms(I))
|
||||
|
@ -786,7 +786,7 @@ Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
|
|||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Value *V = SimplifyAShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
|
||||
DL, TLI, DT, AC))
|
||||
DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(I, V);
|
||||
|
||||
if (Instruction *R = commonShiftTransforms(I))
|
||||
|
|
|
@ -145,7 +145,7 @@ Instruction *InstCombiner::scalarizePHI(ExtractElementInst &EI, PHINode *PN) {
|
|||
|
||||
Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
|
||||
if (Value *V = SimplifyExtractElementInst(
|
||||
EI.getVectorOperand(), EI.getIndexOperand(), DL, TLI, DT, AC))
|
||||
EI.getVectorOperand(), EI.getIndexOperand(), DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(EI, V);
|
||||
|
||||
// If vector val is constant with all elements the same, replace EI with
|
||||
|
|
|
@ -684,14 +684,14 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
|
|||
if (SI0->getCondition() == SI1->getCondition()) {
|
||||
Value *SI = nullptr;
|
||||
if (Value *V = SimplifyBinOp(TopLevelOpcode, SI0->getFalseValue(),
|
||||
SI1->getFalseValue(), DL, TLI, DT, AC))
|
||||
SI1->getFalseValue(), DL, &TLI, &DT, &AC))
|
||||
SI = Builder->CreateSelect(SI0->getCondition(),
|
||||
Builder->CreateBinOp(TopLevelOpcode,
|
||||
SI0->getTrueValue(),
|
||||
SI1->getTrueValue()),
|
||||
V);
|
||||
if (Value *V = SimplifyBinOp(TopLevelOpcode, SI0->getTrueValue(),
|
||||
SI1->getTrueValue(), DL, TLI, DT, AC))
|
||||
SI1->getTrueValue(), DL, &TLI, &DT, &AC))
|
||||
SI = Builder->CreateSelect(
|
||||
SI0->getCondition(), V,
|
||||
Builder->CreateBinOp(TopLevelOpcode, SI0->getFalseValue(),
|
||||
|
@ -877,7 +877,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
|
|||
// If the incoming non-constant value is in I's block, we will remove one
|
||||
// instruction, but insert another equivalent one, leading to infinite
|
||||
// instcombine.
|
||||
if (isPotentiallyReachable(I.getParent(), NonConstBB, DT, LI))
|
||||
if (isPotentiallyReachable(I.getParent(), NonConstBB, &DT, LI))
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -1379,7 +1379,8 @@ Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
|
|||
Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||
SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
|
||||
|
||||
if (Value *V = SimplifyGEPInst(GEP.getSourceElementType(), Ops, DL, TLI, DT, AC))
|
||||
if (Value *V =
|
||||
SimplifyGEPInst(GEP.getSourceElementType(), Ops, DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(GEP, V);
|
||||
|
||||
Value *PtrOp = GEP.getOperand(0);
|
||||
|
@ -1860,7 +1861,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
|||
if (!Offset) {
|
||||
// If the bitcast is of an allocation, and the allocation will be
|
||||
// converted to match the type of the cast, don't touch this.
|
||||
if (isa<AllocaInst>(Operand) || isAllocationFn(Operand, TLI)) {
|
||||
if (isa<AllocaInst>(Operand) || isAllocationFn(Operand, &TLI)) {
|
||||
// See if the bitcast simplifies, if so, don't nuke this GEP yet.
|
||||
if (Instruction *I = visitBitCast(*BCI)) {
|
||||
if (I != BCI) {
|
||||
|
@ -2002,7 +2003,7 @@ Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
|
|||
// to null and free calls, delete the calls and replace the comparisons with
|
||||
// true or false as appropriate.
|
||||
SmallVector<WeakVH, 64> Users;
|
||||
if (isAllocSiteRemovable(&MI, Users, TLI)) {
|
||||
if (isAllocSiteRemovable(&MI, Users, &TLI)) {
|
||||
for (unsigned i = 0, e = Users.size(); i != e; ++i) {
|
||||
// Lowering all @llvm.objectsize calls first because they may
|
||||
// use a bitcast/GEP of the alloca we are removing.
|
||||
|
@ -2014,7 +2015,7 @@ Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
|
|||
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
|
||||
if (II->getIntrinsicID() == Intrinsic::objectsize) {
|
||||
uint64_t Size;
|
||||
if (!getObjectSize(II->getArgOperand(0), Size, DL, TLI)) {
|
||||
if (!getObjectSize(II->getArgOperand(0), Size, DL, &TLI)) {
|
||||
ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
|
||||
Size = CI->isZero() ? -1ULL : 0;
|
||||
}
|
||||
|
@ -2284,7 +2285,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
|
|||
return replaceInstUsesWith(EV, Agg);
|
||||
|
||||
if (Value *V =
|
||||
SimplifyExtractValueInst(Agg, EV.getIndices(), DL, TLI, DT, AC))
|
||||
SimplifyExtractValueInst(Agg, EV.getIndices(), DL, &TLI, &DT, &AC))
|
||||
return replaceInstUsesWith(EV, V);
|
||||
|
||||
if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
|
||||
|
@ -2813,7 +2814,7 @@ bool InstCombiner::run() {
|
|||
if (I == nullptr) continue; // skip null values.
|
||||
|
||||
// Check to see if we can DCE the instruction.
|
||||
if (isInstructionTriviallyDead(I, TLI)) {
|
||||
if (isInstructionTriviallyDead(I, &TLI)) {
|
||||
DEBUG(dbgs() << "IC: DCE: " << *I << '\n');
|
||||
eraseInstFromFunction(*I);
|
||||
++NumDeadInst;
|
||||
|
@ -2824,13 +2825,13 @@ bool InstCombiner::run() {
|
|||
// Instruction isn't dead, see if we can constant propagate it.
|
||||
if (!I->use_empty() &&
|
||||
(I->getNumOperands() == 0 || isa<Constant>(I->getOperand(0)))) {
|
||||
if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
|
||||
if (Constant *C = ConstantFoldInstruction(I, DL, &TLI)) {
|
||||
DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
|
||||
|
||||
// Add operands to the worklist.
|
||||
replaceInstUsesWith(*I, C);
|
||||
++NumConstProp;
|
||||
if (isInstructionTriviallyDead(I, TLI))
|
||||
if (isInstructionTriviallyDead(I, &TLI))
|
||||
eraseInstFromFunction(*I);
|
||||
MadeIRChange = true;
|
||||
continue;
|
||||
|
@ -2852,7 +2853,7 @@ bool InstCombiner::run() {
|
|||
// Add operands to the worklist.
|
||||
replaceInstUsesWith(*I, C);
|
||||
++NumConstProp;
|
||||
if (isInstructionTriviallyDead(I, TLI))
|
||||
if (isInstructionTriviallyDead(I, &TLI))
|
||||
eraseInstFromFunction(*I);
|
||||
MadeIRChange = true;
|
||||
continue;
|
||||
|
@ -2948,7 +2949,7 @@ bool InstCombiner::run() {
|
|||
|
||||
// If the instruction was modified, it's possible that it is now dead.
|
||||
// if so, remove it.
|
||||
if (isInstructionTriviallyDead(I, TLI)) {
|
||||
if (isInstructionTriviallyDead(I, &TLI)) {
|
||||
eraseInstFromFunction(*I);
|
||||
} else {
|
||||
Worklist.Add(I);
|
||||
|
@ -3144,7 +3145,7 @@ combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,
|
|||
bool Changed = prepareICWorklistFromFunction(F, DL, &TLI, Worklist);
|
||||
|
||||
InstCombiner IC(Worklist, &Builder, F.optForMinSize(), ExpensiveCombines,
|
||||
AA, &AC, &TLI, &DT, DL, LI);
|
||||
AA, AC, TLI, DT, DL, LI);
|
||||
Changed |= IC.run();
|
||||
|
||||
if (!Changed)
|
||||
|
|
Loading…
Reference in New Issue