forked from OSchip/llvm-project
[InstCombine] Add replaceOperand() helper
Adds a replaceOperand() helper, which is like Instruction.setOperand() but adds the old operand to the worklist. This reduces the amount of missing or incorrect worklist management. This only applies the helper to a relatively small subset of setOperand() calls in InstCombine, namely those of the pattern `I.setOperand(); return &I;`, where it is most obviously applicable. Differential Revision: https://reviews.llvm.org/D73803
This commit is contained in:
parent
e6c9ab4fb7
commit
878cb38a5c
|
@ -143,8 +143,7 @@ Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
|
|||
// the XOR is to toggle the bit. If it is clear, then the ADD has
|
||||
// no effect.
|
||||
if ((AddRHS & AndRHSV).isNullValue()) { // Bit is not set, noop
|
||||
TheAnd.setOperand(0, X);
|
||||
return &TheAnd;
|
||||
return replaceOperand(TheAnd, 0, X);
|
||||
} else {
|
||||
// Pull the XOR out of the AND.
|
||||
Value *NewAnd = Builder.CreateAnd(X, AndRHS);
|
||||
|
|
|
@ -1204,19 +1204,15 @@ static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) {
|
|||
|
||||
if (IsTZ) {
|
||||
// cttz(-x) -> cttz(x)
|
||||
if (match(Op0, m_Neg(m_Value(X)))) {
|
||||
II.setOperand(0, X);
|
||||
return ⅈ
|
||||
}
|
||||
if (match(Op0, m_Neg(m_Value(X))))
|
||||
return IC.replaceOperand(II, 0, X);
|
||||
|
||||
// cttz(abs(x)) -> cttz(x)
|
||||
// cttz(nabs(x)) -> cttz(x)
|
||||
Value *Y;
|
||||
SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor;
|
||||
if (SPF == SPF_ABS || SPF == SPF_NABS) {
|
||||
II.setOperand(0, X);
|
||||
return ⅈ
|
||||
}
|
||||
if (SPF == SPF_ABS || SPF == SPF_NABS)
|
||||
return IC.replaceOperand(II, 0, X);
|
||||
}
|
||||
|
||||
KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
|
||||
|
@ -1242,10 +1238,8 @@ static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) {
|
|||
if (!Known.One.isNullValue() ||
|
||||
isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
|
||||
&IC.getDominatorTree())) {
|
||||
if (!match(II.getArgOperand(1), m_One())) {
|
||||
II.setOperand(1, IC.Builder.getTrue());
|
||||
return ⅈ
|
||||
}
|
||||
if (!match(II.getArgOperand(1), m_One()))
|
||||
return IC.replaceOperand(II, 1, IC.Builder.getTrue());
|
||||
}
|
||||
|
||||
// Add range metadata since known bits can't completely reflect what we know.
|
||||
|
@ -1270,10 +1264,8 @@ static Instruction *foldCtpop(IntrinsicInst &II, InstCombiner &IC) {
|
|||
Value *X;
|
||||
// ctpop(bitreverse(x)) -> ctpop(x)
|
||||
// ctpop(bswap(x)) -> ctpop(x)
|
||||
if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X)))) {
|
||||
II.setOperand(0, X);
|
||||
return ⅈ
|
||||
}
|
||||
if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X))))
|
||||
return IC.replaceOperand(II, 0, X);
|
||||
|
||||
// FIXME: Try to simplify vectors of integers.
|
||||
auto *IT = dyn_cast<IntegerType>(Op0->getType());
|
||||
|
@ -3959,8 +3951,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
break;
|
||||
|
||||
// If bound_ctrl = 1, row mask = bank mask = 0xf we can omit old value.
|
||||
II->setOperand(0, UndefValue::get(Old->getType()));
|
||||
return II;
|
||||
return replaceOperand(*II, 0, UndefValue::get(Old->getType()));
|
||||
}
|
||||
case Intrinsic::amdgcn_permlane16:
|
||||
case Intrinsic::amdgcn_permlanex16: {
|
||||
|
@ -3974,8 +3965,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
if (!FetchInvalid->getZExtValue() && !BoundCtrl->getZExtValue())
|
||||
break;
|
||||
|
||||
II->setArgOperand(0, UndefValue::get(VDstIn->getType()));
|
||||
return II;
|
||||
return replaceOperand(*II, 0, UndefValue::get(VDstIn->getType()));
|
||||
}
|
||||
case Intrinsic::amdgcn_readfirstlane:
|
||||
case Intrinsic::amdgcn_readlane: {
|
||||
|
@ -4135,8 +4125,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
if (GCR.getBasePtr() == GCR.getDerivedPtr() &&
|
||||
GCR.getBasePtrIndex() != GCR.getDerivedPtrIndex()) {
|
||||
auto *OpIntTy = GCR.getOperand(2)->getType();
|
||||
II->setOperand(2, ConstantInt::get(OpIntTy, GCR.getBasePtrIndex()));
|
||||
return II;
|
||||
return replaceOperand(*II, 2,
|
||||
ConstantInt::get(OpIntTy, GCR.getBasePtrIndex()));
|
||||
}
|
||||
|
||||
// Translate facts known about a pointer before relocating into
|
||||
|
|
|
@ -1821,9 +1821,7 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
|
|||
// Changing the cast operand is usually not a good idea but it is safe
|
||||
// here because the pointer operand is being replaced with another
|
||||
// pointer operand so the opcode doesn't need to change.
|
||||
Worklist.push(GEP);
|
||||
CI.setOperand(0, GEP->getOperand(0));
|
||||
return &CI;
|
||||
return replaceOperand(CI, 0, GEP->getOperand(0));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1575,11 +1575,8 @@ Instruction *InstCombiner::foldICmpXorConstant(ICmpInst &Cmp,
|
|||
|
||||
// If the sign bit of the XorCst is not set, there is no change to
|
||||
// the operation, just stop using the Xor.
|
||||
if (!XorC->isNegative()) {
|
||||
Cmp.setOperand(0, X);
|
||||
Worklist.push(Xor);
|
||||
return &Cmp;
|
||||
}
|
||||
if (!XorC->isNegative())
|
||||
return replaceOperand(Cmp, 0, X);
|
||||
|
||||
// Emit the opposite comparison.
|
||||
if (TrueIfSigned)
|
||||
|
@ -1705,8 +1702,7 @@ Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
|
|||
|
||||
// Compute X & (C2 << Y).
|
||||
Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift);
|
||||
Cmp.setOperand(0, NewAnd);
|
||||
return &Cmp;
|
||||
return replaceOperand(Cmp, 0, NewAnd);
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
|
@ -1812,8 +1808,7 @@ Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp,
|
|||
}
|
||||
if (NewOr) {
|
||||
Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName());
|
||||
Cmp.setOperand(0, NewAnd);
|
||||
return &Cmp;
|
||||
return replaceOperand(Cmp, 0, NewAnd);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -683,6 +683,13 @@ public:
|
|||
return &I;
|
||||
}
|
||||
|
||||
/// Replace operand of instruction and add old operand to the worklist.
|
||||
Instruction *replaceOperand(Instruction &I, unsigned OpNum, Value *V) {
|
||||
Worklist.addValue(I.getOperand(OpNum));
|
||||
I.setOperand(OpNum, V);
|
||||
return &I;
|
||||
}
|
||||
|
||||
/// Creates a result tuple for an overflow intrinsic \p II with a given
|
||||
/// \p Result and a constant \p Overflow value.
|
||||
Instruction *CreateOverflowTuple(IntrinsicInst *II, Value *Result,
|
||||
|
|
|
@ -187,9 +187,7 @@ static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
|
|||
return nullptr;
|
||||
|
||||
// Canonicalize it.
|
||||
Value *V = IC.Builder.getInt32(1);
|
||||
AI.setOperand(0, V);
|
||||
return &AI;
|
||||
return IC.replaceOperand(AI, 0, IC.Builder.getInt32(1));
|
||||
}
|
||||
|
||||
// Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
|
||||
|
@ -230,8 +228,7 @@ static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
|
|||
Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
|
||||
if (AI.getArraySize()->getType() != IntPtrTy) {
|
||||
Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false);
|
||||
AI.setOperand(0, V);
|
||||
return &AI;
|
||||
return IC.replaceOperand(AI, 0, V);
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
|
@ -355,10 +352,9 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
|
|||
// For a zero sized alloca there is no point in doing an array allocation.
|
||||
// This is helpful if the array size is a complicated expression not used
|
||||
// elsewhere.
|
||||
if (AI.isArrayAllocation()) {
|
||||
AI.setOperand(0, ConstantInt::get(AI.getArraySize()->getType(), 1));
|
||||
return &AI;
|
||||
}
|
||||
if (AI.isArrayAllocation())
|
||||
return replaceOperand(AI, 0,
|
||||
ConstantInt::get(AI.getArraySize()->getType(), 1));
|
||||
|
||||
// Get the first instruction in the entry block.
|
||||
BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
|
||||
|
@ -1048,18 +1044,14 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
|
|||
// load (select (cond, null, P)) -> load P
|
||||
if (isa<ConstantPointerNull>(SI->getOperand(1)) &&
|
||||
!NullPointerIsDefined(SI->getFunction(),
|
||||
LI.getPointerAddressSpace())) {
|
||||
LI.setOperand(0, SI->getOperand(2));
|
||||
return &LI;
|
||||
}
|
||||
LI.getPointerAddressSpace()))
|
||||
return replaceOperand(LI, 0, SI->getOperand(2));
|
||||
|
||||
// load (select (cond, P, null)) -> load P
|
||||
if (isa<ConstantPointerNull>(SI->getOperand(2)) &&
|
||||
!NullPointerIsDefined(SI->getFunction(),
|
||||
LI.getPointerAddressSpace())) {
|
||||
LI.setOperand(0, SI->getOperand(1));
|
||||
return &LI;
|
||||
}
|
||||
LI.getPointerAddressSpace()))
|
||||
return replaceOperand(LI, 0, SI->getOperand(1));
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
|
@ -1463,11 +1455,8 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
|||
// store X, null -> turns into 'unreachable' in SimplifyCFG
|
||||
// store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
|
||||
if (canSimplifyNullStoreOrGEP(SI)) {
|
||||
if (!isa<UndefValue>(Val)) {
|
||||
SI.setOperand(0, UndefValue::get(Val->getType()));
|
||||
if (Instruction *U = dyn_cast<Instruction>(Val))
|
||||
Worklist.push(U); // Dropped a use.
|
||||
}
|
||||
if (!isa<UndefValue>(Val))
|
||||
return replaceOperand(SI, 0, UndefValue::get(Val->getType()));
|
||||
return nullptr; // Do not modify these!
|
||||
}
|
||||
|
||||
|
|
|
@ -1417,11 +1417,8 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
|
|||
{
|
||||
const APInt *Y;
|
||||
// X % -Y -> X % Y
|
||||
if (match(Op1, m_Negative(Y)) && !Y->isMinSignedValue()) {
|
||||
Worklist.pushValue(I.getOperand(1));
|
||||
I.setOperand(1, ConstantInt::get(I.getType(), -*Y));
|
||||
return &I;
|
||||
}
|
||||
if (match(Op1, m_Negative(Y)) && !Y->isMinSignedValue())
|
||||
return replaceOperand(I, 1, ConstantInt::get(I.getType(), -*Y));
|
||||
}
|
||||
|
||||
// -X srem Y --> -(X srem Y)
|
||||
|
@ -1468,11 +1465,8 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
|
|||
}
|
||||
|
||||
Constant *NewRHSV = ConstantVector::get(Elts);
|
||||
if (NewRHSV != C) { // Don't loop on -MININT
|
||||
Worklist.pushValue(I.getOperand(1));
|
||||
I.setOperand(1, NewRHSV);
|
||||
return &I;
|
||||
}
|
||||
if (NewRHSV != C) // Don't loop on -MININT
|
||||
return replaceOperand(I, 1, NewRHSV);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -399,11 +399,8 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
|
|||
return replaceInstUsesWith(EI, IE->getOperand(1));
|
||||
// If the inserted and extracted elements are constants, they must not
|
||||
// be the same value, extract from the pre-inserted value instead.
|
||||
if (isa<Constant>(IE->getOperand(2)) && IndexC) {
|
||||
Worklist.pushValue(SrcVec);
|
||||
EI.setOperand(0, IE->getOperand(0));
|
||||
return &EI;
|
||||
}
|
||||
if (isa<Constant>(IE->getOperand(2)) && IndexC)
|
||||
return replaceOperand(EI, 0, IE->getOperand(0));
|
||||
} else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
|
||||
// If this is extracting an element from a shufflevector, figure out where
|
||||
// it came from and extract from the appropriate input element instead.
|
||||
|
|
|
@ -2740,11 +2740,9 @@ Instruction *InstCombiner::visitReturnInst(ReturnInst &RI) {
|
|||
// There might be assume intrinsics dominating this return that completely
|
||||
// determine the value. If so, constant fold it.
|
||||
KnownBits Known = computeKnownBits(ResultOp, 0, &RI);
|
||||
if (Known.isConstant()) {
|
||||
Worklist.pushValue(ResultOp);
|
||||
RI.setOperand(0, Constant::getIntegerValue(VTy, Known.getConstant()));
|
||||
return &RI;
|
||||
}
|
||||
if (Known.isConstant())
|
||||
return replaceOperand(RI, 0,
|
||||
Constant::getIntegerValue(VTy, Known.getConstant()));
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue