forked from OSchip/llvm-project
Added SCEV::NoWrapFlags to manage unsigned, signed, and self wrap
properties. Added the self-wrap flag for SCEV::AddRecExpr. A slew of temporary FIXMEs indicate the intention of the no-self-wrap flag without changing behavior in this revision. llvm-svn: 127590
This commit is contained in:
parent
328b223bb1
commit
8b55b736b1
|
@ -72,6 +72,29 @@ namespace llvm {
|
||||||
void operator=(const SCEV &); // DO NOT IMPLEMENT
|
void operator=(const SCEV &); // DO NOT IMPLEMENT
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
/// NoWrapFlags are bitfield indices into SubclassData.
|
||||||
|
///
|
||||||
|
/// Add and Mul expressions may have no-unsigned-wrap <NUW> or
|
||||||
|
/// no-signed-wrap <NSW> properties, which are derived from the IR
|
||||||
|
/// operator. NSW is a misnomer that we use to mean no signed overflow or
|
||||||
|
/// underflow.
|
||||||
|
///
|
||||||
|
/// AddRec expression may have a no-self-wraparound <NW> property if the
|
||||||
|
/// result can never reach the start value. This property is independent of
|
||||||
|
/// the actual start value and step direction. Self-wraparound is defined
|
||||||
|
/// purely in terms of the recurrence's loop, step size, and
|
||||||
|
/// bitwidth. Formally, a recurrence with no self-wraparound satisfies:
|
||||||
|
/// abs(step) * max-iteration(loop) <= unsigned-max(bitwidth).
|
||||||
|
///
|
||||||
|
/// Note that NUW and NSW are also valid properties of a recurrence, and
|
||||||
|
/// either implies NW. For convenience, NW will be set for a recurrence
|
||||||
|
/// whenever either NUW or NSW are set.
|
||||||
|
enum NoWrapFlags { FlagAnyWrap = 0, // No guarantee.
|
||||||
|
FlagNW = (1 << 0), // No self-wrap.
|
||||||
|
FlagNUW = (1 << 1), // No unsigned wrap.
|
||||||
|
FlagNSW = (1 << 2), // No signed wrap.
|
||||||
|
NoWrapMask = (1 << 3) -1 };
|
||||||
|
|
||||||
explicit SCEV(const FoldingSetNodeIDRef ID, unsigned SCEVTy) :
|
explicit SCEV(const FoldingSetNodeIDRef ID, unsigned SCEVTy) :
|
||||||
FastID(ID), SCEVType(SCEVTy), SubclassData(0) {}
|
FastID(ID), SCEVType(SCEVTy), SubclassData(0) {}
|
||||||
|
|
||||||
|
@ -159,6 +182,20 @@ namespace llvm {
|
||||||
ProperlyDominatesBlock ///< The SCEV properly dominates the block.
|
ProperlyDominatesBlock ///< The SCEV properly dominates the block.
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Convenient NoWrapFlags manipulation that hides enum casts and is
|
||||||
|
/// visible in the ScalarEvolution name space.
|
||||||
|
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask) {
|
||||||
|
return (SCEV::NoWrapFlags)(Flags & Mask);
|
||||||
|
}
|
||||||
|
static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags,
|
||||||
|
SCEV::NoWrapFlags OnFlags) {
|
||||||
|
return (SCEV::NoWrapFlags)(Flags | OnFlags);
|
||||||
|
}
|
||||||
|
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags,
|
||||||
|
SCEV::NoWrapFlags OffFlags) {
|
||||||
|
return (SCEV::NoWrapFlags)(Flags & ~OffFlags);
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// SCEVCallbackVH - A CallbackVH to arrange for ScalarEvolution to be
|
/// SCEVCallbackVH - A CallbackVH to arrange for ScalarEvolution to be
|
||||||
/// notified whenever a Value is deleted.
|
/// notified whenever a Value is deleted.
|
||||||
|
@ -465,44 +502,41 @@ namespace llvm {
|
||||||
const SCEV *getSignExtendExpr(const SCEV *Op, const Type *Ty);
|
const SCEV *getSignExtendExpr(const SCEV *Op, const Type *Ty);
|
||||||
const SCEV *getAnyExtendExpr(const SCEV *Op, const Type *Ty);
|
const SCEV *getAnyExtendExpr(const SCEV *Op, const Type *Ty);
|
||||||
const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
bool HasNUW = false, bool HasNSW = false);
|
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
|
||||||
const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
|
const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
|
||||||
bool HasNUW = false, bool HasNSW = false) {
|
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
|
||||||
SmallVector<const SCEV *, 2> Ops;
|
SmallVector<const SCEV *, 2> Ops;
|
||||||
Ops.push_back(LHS);
|
Ops.push_back(LHS);
|
||||||
Ops.push_back(RHS);
|
Ops.push_back(RHS);
|
||||||
return getAddExpr(Ops, HasNUW, HasNSW);
|
return getAddExpr(Ops, Flags);
|
||||||
}
|
}
|
||||||
const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1,
|
const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
|
||||||
const SCEV *Op2,
|
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
|
||||||
bool HasNUW = false, bool HasNSW = false) {
|
|
||||||
SmallVector<const SCEV *, 3> Ops;
|
SmallVector<const SCEV *, 3> Ops;
|
||||||
Ops.push_back(Op0);
|
Ops.push_back(Op0);
|
||||||
Ops.push_back(Op1);
|
Ops.push_back(Op1);
|
||||||
Ops.push_back(Op2);
|
Ops.push_back(Op2);
|
||||||
return getAddExpr(Ops, HasNUW, HasNSW);
|
return getAddExpr(Ops, Flags);
|
||||||
}
|
}
|
||||||
const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
bool HasNUW = false, bool HasNSW = false);
|
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
|
||||||
const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
|
const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
|
||||||
bool HasNUW = false, bool HasNSW = false) {
|
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap)
|
||||||
|
{
|
||||||
SmallVector<const SCEV *, 2> Ops;
|
SmallVector<const SCEV *, 2> Ops;
|
||||||
Ops.push_back(LHS);
|
Ops.push_back(LHS);
|
||||||
Ops.push_back(RHS);
|
Ops.push_back(RHS);
|
||||||
return getMulExpr(Ops, HasNUW, HasNSW);
|
return getMulExpr(Ops, Flags);
|
||||||
}
|
}
|
||||||
const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
|
const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
|
||||||
const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step,
|
const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step,
|
||||||
const Loop *L,
|
const Loop *L, SCEV::NoWrapFlags Flags);
|
||||||
bool HasNUW = false, bool HasNSW = false);
|
|
||||||
const SCEV *getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
|
const SCEV *getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
|
||||||
const Loop *L,
|
const Loop *L, SCEV::NoWrapFlags Flags);
|
||||||
bool HasNUW = false, bool HasNSW = false);
|
|
||||||
const SCEV *getAddRecExpr(const SmallVectorImpl<const SCEV *> &Operands,
|
const SCEV *getAddRecExpr(const SmallVectorImpl<const SCEV *> &Operands,
|
||||||
const Loop *L,
|
const Loop *L, SCEV::NoWrapFlags Flags) {
|
||||||
bool HasNUW = false, bool HasNSW = false) {
|
|
||||||
SmallVector<const SCEV *, 4> NewOp(Operands.begin(), Operands.end());
|
SmallVector<const SCEV *, 4> NewOp(Operands.begin(), Operands.end());
|
||||||
return getAddRecExpr(NewOp, L, HasNUW, HasNSW);
|
return getAddRecExpr(NewOp, L, Flags);
|
||||||
}
|
}
|
||||||
const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
|
const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
|
||||||
const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
|
const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
|
||||||
|
@ -537,11 +571,9 @@ namespace llvm {
|
||||||
///
|
///
|
||||||
const SCEV *getNotSCEV(const SCEV *V);
|
const SCEV *getNotSCEV(const SCEV *V);
|
||||||
|
|
||||||
/// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1,
|
/// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
|
||||||
/// and thus the HasNUW and HasNSW bits apply to the resultant add, not
|
|
||||||
/// whether the sub would have overflowed.
|
|
||||||
const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
|
const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
|
||||||
bool HasNUW = false, bool HasNSW = false);
|
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
|
||||||
|
|
||||||
/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion
|
/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion
|
||||||
/// of the input value to the specified type. If the type must be
|
/// of the input value to the specified type. If the type must be
|
||||||
|
|
|
@ -160,13 +160,8 @@ namespace llvm {
|
||||||
|
|
||||||
const Type *getType() const { return getOperand(0)->getType(); }
|
const Type *getType() const { return getOperand(0)->getType(); }
|
||||||
|
|
||||||
bool hasNoUnsignedWrap() const { return SubclassData & (1 << 0); }
|
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask = NoWrapMask) const {
|
||||||
void setHasNoUnsignedWrap(bool B) {
|
return (NoWrapFlags)(SubclassData & Mask);
|
||||||
SubclassData = (SubclassData & ~(1 << 0)) | (B << 0);
|
|
||||||
}
|
|
||||||
bool hasNoSignedWrap() const { return SubclassData & (1 << 1); }
|
|
||||||
void setHasNoSignedWrap(bool B) {
|
|
||||||
SubclassData = (SubclassData & ~(1 << 1)) | (B << 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Methods for support type inquiry through isa, cast, and dyn_cast:
|
/// Methods for support type inquiry through isa, cast, and dyn_cast:
|
||||||
|
@ -199,6 +194,11 @@ namespace llvm {
|
||||||
S->getSCEVType() == scSMaxExpr ||
|
S->getSCEVType() == scSMaxExpr ||
|
||||||
S->getSCEVType() == scUMaxExpr;
|
S->getSCEVType() == scUMaxExpr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set flags for a non-recurrence without clearing previously set flags.
|
||||||
|
void setNoWrapFlags(NoWrapFlags Flags) {
|
||||||
|
SubclassData |= Flags;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -305,11 +305,12 @@ namespace llvm {
|
||||||
/// getStepRecurrence - This method constructs and returns the recurrence
|
/// getStepRecurrence - This method constructs and returns the recurrence
|
||||||
/// indicating how much this expression steps by. If this is a polynomial
|
/// indicating how much this expression steps by. If this is a polynomial
|
||||||
/// of degree N, it returns a chrec of degree N-1.
|
/// of degree N, it returns a chrec of degree N-1.
|
||||||
|
/// We cannot determine whether the step recurrence has self-wraparound.
|
||||||
const SCEV *getStepRecurrence(ScalarEvolution &SE) const {
|
const SCEV *getStepRecurrence(ScalarEvolution &SE) const {
|
||||||
if (isAffine()) return getOperand(1);
|
if (isAffine()) return getOperand(1);
|
||||||
return SE.getAddRecExpr(SmallVector<const SCEV *, 3>(op_begin()+1,
|
return SE.getAddRecExpr(SmallVector<const SCEV *, 3>(op_begin()+1,
|
||||||
op_end()),
|
op_end()),
|
||||||
getLoop());
|
getLoop(), FlagAnyWrap);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// isAffine - Return true if this is an affine AddRec (i.e., it represents
|
/// isAffine - Return true if this is an affine AddRec (i.e., it represents
|
||||||
|
@ -327,6 +328,15 @@ namespace llvm {
|
||||||
return getNumOperands() == 3;
|
return getNumOperands() == 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set flags for a recurrence without clearing any previously set flags.
|
||||||
|
/// For AddRec, either NUW or NSW implies NW. Keep track of this fact here
|
||||||
|
/// to make it easier to propagate flags.
|
||||||
|
void setNoWrapFlags(NoWrapFlags Flags) {
|
||||||
|
if (Flags & (FlagNUW | FlagNSW))
|
||||||
|
Flags = ScalarEvolution::setFlags(Flags, FlagNW);
|
||||||
|
SubclassData |= Flags;
|
||||||
|
}
|
||||||
|
|
||||||
/// evaluateAtIteration - Return the value of this chain of recurrences at
|
/// evaluateAtIteration - Return the value of this chain of recurrences at
|
||||||
/// the specified iteration number.
|
/// the specified iteration number.
|
||||||
const SCEV *evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const;
|
const SCEV *evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const;
|
||||||
|
@ -364,8 +374,7 @@ namespace llvm {
|
||||||
const SCEV *const *O, size_t N)
|
const SCEV *const *O, size_t N)
|
||||||
: SCEVCommutativeExpr(ID, scSMaxExpr, O, N) {
|
: SCEVCommutativeExpr(ID, scSMaxExpr, O, N) {
|
||||||
// Max never overflows.
|
// Max never overflows.
|
||||||
setHasNoUnsignedWrap(true);
|
setNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW));
|
||||||
setHasNoSignedWrap(true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
@ -387,8 +396,7 @@ namespace llvm {
|
||||||
const SCEV *const *O, size_t N)
|
const SCEV *const *O, size_t N)
|
||||||
: SCEVCommutativeExpr(ID, scUMaxExpr, O, N) {
|
: SCEVCommutativeExpr(ID, scUMaxExpr, O, N) {
|
||||||
// Max never overflows.
|
// Max never overflows.
|
||||||
setHasNoUnsignedWrap(true);
|
setNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW));
|
||||||
setHasNoSignedWrap(true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -157,10 +157,13 @@ void SCEV::print(raw_ostream &OS) const {
|
||||||
for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
|
for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
|
||||||
OS << ",+," << *AR->getOperand(i);
|
OS << ",+," << *AR->getOperand(i);
|
||||||
OS << "}<";
|
OS << "}<";
|
||||||
if (AR->hasNoUnsignedWrap())
|
if (AR->getNoWrapFlags(FlagNUW))
|
||||||
OS << "nuw><";
|
OS << "nuw><";
|
||||||
if (AR->hasNoSignedWrap())
|
if (AR->getNoWrapFlags(FlagNSW))
|
||||||
OS << "nsw><";
|
OS << "nsw><";
|
||||||
|
if (AR->getNoWrapFlags(FlagNW) &&
|
||||||
|
!AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
|
||||||
|
OS << "nw><";
|
||||||
WriteAsOperand(OS, AR->getLoop()->getHeader(), /*PrintType=*/false);
|
WriteAsOperand(OS, AR->getLoop()->getHeader(), /*PrintType=*/false);
|
||||||
OS << ">";
|
OS << ">";
|
||||||
return;
|
return;
|
||||||
|
@ -830,7 +833,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
|
||||||
Operands.push_back(S);
|
Operands.push_back(S);
|
||||||
}
|
}
|
||||||
if (!hasTrunc)
|
if (!hasTrunc)
|
||||||
return getAddExpr(Operands, false, false);
|
return getAddExpr(Operands);
|
||||||
UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
|
UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -845,7 +848,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
|
||||||
Operands.push_back(S);
|
Operands.push_back(S);
|
||||||
}
|
}
|
||||||
if (!hasTrunc)
|
if (!hasTrunc)
|
||||||
return getMulExpr(Operands, false, false);
|
return getMulExpr(Operands);
|
||||||
UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
|
UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -854,7 +857,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
|
||||||
SmallVector<const SCEV *, 4> Operands;
|
SmallVector<const SCEV *, 4> Operands;
|
||||||
for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
|
for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
|
||||||
Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
|
Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
|
||||||
return getAddRecExpr(Operands, AddRec->getLoop());
|
return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
|
||||||
}
|
}
|
||||||
|
|
||||||
// As a special case, fold trunc(undef) to undef. We don't want to
|
// As a special case, fold trunc(undef) to undef. We don't want to
|
||||||
|
@ -926,10 +929,11 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
|
||||||
|
|
||||||
// If we have special knowledge that this addrec won't overflow,
|
// If we have special knowledge that this addrec won't overflow,
|
||||||
// we don't need to do any further analysis.
|
// we don't need to do any further analysis.
|
||||||
if (AR->hasNoUnsignedWrap())
|
if (AR->getNoWrapFlags(SCEV::FlagNUW))
|
||||||
return getAddRecExpr(getZeroExtendExpr(Start, Ty),
|
return getAddRecExpr(getZeroExtendExpr(Start, Ty),
|
||||||
getZeroExtendExpr(Step, Ty),
|
getZeroExtendExpr(Step, Ty),
|
||||||
L);
|
// FIXME: Can use SCEV::FlagNUW
|
||||||
|
L, SCEV::FlagAnyWrap);
|
||||||
|
|
||||||
// Check whether the backedge-taken count is SCEVCouldNotCompute.
|
// Check whether the backedge-taken count is SCEVCouldNotCompute.
|
||||||
// Note that this serves two purposes: It filters out loops that are
|
// Note that this serves two purposes: It filters out loops that are
|
||||||
|
@ -963,7 +967,8 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
|
||||||
// Return the expression with the addrec on the outside.
|
// Return the expression with the addrec on the outside.
|
||||||
return getAddRecExpr(getZeroExtendExpr(Start, Ty),
|
return getAddRecExpr(getZeroExtendExpr(Start, Ty),
|
||||||
getZeroExtendExpr(Step, Ty),
|
getZeroExtendExpr(Step, Ty),
|
||||||
L);
|
// FIXME: can use FlagNUW
|
||||||
|
L, SCEV::FlagAnyWrap);
|
||||||
|
|
||||||
// Similar to above, only this time treat the step value as signed.
|
// Similar to above, only this time treat the step value as signed.
|
||||||
// This covers loops that count down.
|
// This covers loops that count down.
|
||||||
|
@ -977,7 +982,8 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
|
||||||
// Return the expression with the addrec on the outside.
|
// Return the expression with the addrec on the outside.
|
||||||
return getAddRecExpr(getZeroExtendExpr(Start, Ty),
|
return getAddRecExpr(getZeroExtendExpr(Start, Ty),
|
||||||
getSignExtendExpr(Step, Ty),
|
getSignExtendExpr(Step, Ty),
|
||||||
L);
|
// FIXME: can use FlagNW
|
||||||
|
L, SCEV::FlagAnyWrap);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the backedge is guarded by a comparison with the pre-inc value
|
// If the backedge is guarded by a comparison with the pre-inc value
|
||||||
|
@ -994,7 +1000,8 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
|
||||||
// Return the expression with the addrec on the outside.
|
// Return the expression with the addrec on the outside.
|
||||||
return getAddRecExpr(getZeroExtendExpr(Start, Ty),
|
return getAddRecExpr(getZeroExtendExpr(Start, Ty),
|
||||||
getZeroExtendExpr(Step, Ty),
|
getZeroExtendExpr(Step, Ty),
|
||||||
L);
|
// FIXME: can use FlagNUW
|
||||||
|
L, SCEV::FlagAnyWrap);
|
||||||
} else if (isKnownNegative(Step)) {
|
} else if (isKnownNegative(Step)) {
|
||||||
const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
|
const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
|
||||||
getSignedRange(Step).getSignedMin());
|
getSignedRange(Step).getSignedMin());
|
||||||
|
@ -1002,10 +1009,12 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
|
||||||
(isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
|
(isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
|
||||||
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
|
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
|
||||||
AR->getPostIncExpr(*this), N)))
|
AR->getPostIncExpr(*this), N)))
|
||||||
// Return the expression with the addrec on the outside.
|
// Return the expression with the addrec on the outside. The
|
||||||
|
// negative step causes unsigned wrap, but it still can't self-wrap.
|
||||||
return getAddRecExpr(getZeroExtendExpr(Start, Ty),
|
return getAddRecExpr(getZeroExtendExpr(Start, Ty),
|
||||||
getSignExtendExpr(Step, Ty),
|
getSignExtendExpr(Step, Ty),
|
||||||
L);
|
// FIXME: can use FlagNW
|
||||||
|
L, SCEV::FlagAnyWrap);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1080,10 +1089,11 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
|
||||||
|
|
||||||
// If we have special knowledge that this addrec won't overflow,
|
// If we have special knowledge that this addrec won't overflow,
|
||||||
// we don't need to do any further analysis.
|
// we don't need to do any further analysis.
|
||||||
if (AR->hasNoSignedWrap())
|
if (AR->getNoWrapFlags(SCEV::FlagNSW))
|
||||||
return getAddRecExpr(getSignExtendExpr(Start, Ty),
|
return getAddRecExpr(getSignExtendExpr(Start, Ty),
|
||||||
getSignExtendExpr(Step, Ty),
|
getSignExtendExpr(Step, Ty),
|
||||||
L);
|
// FIXME: can use SCEV::FlagNSW
|
||||||
|
L, SCEV::FlagAnyWrap);
|
||||||
|
|
||||||
// Check whether the backedge-taken count is SCEVCouldNotCompute.
|
// Check whether the backedge-taken count is SCEVCouldNotCompute.
|
||||||
// Note that this serves two purposes: It filters out loops that are
|
// Note that this serves two purposes: It filters out loops that are
|
||||||
|
@ -1117,7 +1127,8 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
|
||||||
// Return the expression with the addrec on the outside.
|
// Return the expression with the addrec on the outside.
|
||||||
return getAddRecExpr(getSignExtendExpr(Start, Ty),
|
return getAddRecExpr(getSignExtendExpr(Start, Ty),
|
||||||
getSignExtendExpr(Step, Ty),
|
getSignExtendExpr(Step, Ty),
|
||||||
L);
|
// FIXME: can use SCEV::FlagNSW
|
||||||
|
L, SCEV::FlagAnyWrap);
|
||||||
|
|
||||||
// Similar to above, only this time treat the step value as unsigned.
|
// Similar to above, only this time treat the step value as unsigned.
|
||||||
// This covers loops that count up with an unsigned step.
|
// This covers loops that count up with an unsigned step.
|
||||||
|
@ -1131,7 +1142,8 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
|
||||||
// Return the expression with the addrec on the outside.
|
// Return the expression with the addrec on the outside.
|
||||||
return getAddRecExpr(getSignExtendExpr(Start, Ty),
|
return getAddRecExpr(getSignExtendExpr(Start, Ty),
|
||||||
getZeroExtendExpr(Step, Ty),
|
getZeroExtendExpr(Step, Ty),
|
||||||
L);
|
// FIXME: can use SCEV::FlagNSW
|
||||||
|
L, SCEV::FlagAnyWrap);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the backedge is guarded by a comparison with the pre-inc value
|
// If the backedge is guarded by a comparison with the pre-inc value
|
||||||
|
@ -1148,7 +1160,8 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
|
||||||
// Return the expression with the addrec on the outside.
|
// Return the expression with the addrec on the outside.
|
||||||
return getAddRecExpr(getSignExtendExpr(Start, Ty),
|
return getAddRecExpr(getSignExtendExpr(Start, Ty),
|
||||||
getSignExtendExpr(Step, Ty),
|
getSignExtendExpr(Step, Ty),
|
||||||
L);
|
// FIXME: can use SCEV::FlagNSW
|
||||||
|
L, SCEV::FlagAnyWrap);
|
||||||
} else if (isKnownNegative(Step)) {
|
} else if (isKnownNegative(Step)) {
|
||||||
const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
|
const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
|
||||||
getSignedRange(Step).getSignedMin());
|
getSignedRange(Step).getSignedMin());
|
||||||
|
@ -1159,7 +1172,8 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
|
||||||
// Return the expression with the addrec on the outside.
|
// Return the expression with the addrec on the outside.
|
||||||
return getAddRecExpr(getSignExtendExpr(Start, Ty),
|
return getAddRecExpr(getSignExtendExpr(Start, Ty),
|
||||||
getSignExtendExpr(Step, Ty),
|
getSignExtendExpr(Step, Ty),
|
||||||
L);
|
// FIXME: can use SCEV::FlagNSW
|
||||||
|
L, SCEV::FlagAnyWrap);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1213,7 +1227,8 @@ const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
|
||||||
for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
|
for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
|
||||||
I != E; ++I)
|
I != E; ++I)
|
||||||
Ops.push_back(getAnyExtendExpr(*I, Ty));
|
Ops.push_back(getAnyExtendExpr(*I, Ty));
|
||||||
return getAddRecExpr(Ops, AR->getLoop());
|
// FIXME: can use AR->getNoWrapFlags(SCEV::FlagNW)
|
||||||
|
return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagAnyWrap);
|
||||||
}
|
}
|
||||||
|
|
||||||
// As a special case, fold anyext(undef) to undef. We don't want to
|
// As a special case, fold anyext(undef) to undef. We don't want to
|
||||||
|
@ -1334,7 +1349,9 @@ namespace {
|
||||||
/// getAddExpr - Get a canonical add expression, or something simpler if
|
/// getAddExpr - Get a canonical add expression, or something simpler if
|
||||||
/// possible.
|
/// possible.
|
||||||
const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
bool HasNUW, bool HasNSW) {
|
SCEV::NoWrapFlags Flags) {
|
||||||
|
assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
|
||||||
|
"only nuw or nsw allowed");
|
||||||
assert(!Ops.empty() && "Cannot get empty add!");
|
assert(!Ops.empty() && "Cannot get empty add!");
|
||||||
if (Ops.size() == 1) return Ops[0];
|
if (Ops.size() == 1) return Ops[0];
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
|
@ -1344,8 +1361,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
"SCEVAddExpr operand types don't match!");
|
"SCEVAddExpr operand types don't match!");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// If HasNSW is true and all the operands are non-negative, infer HasNUW.
|
// If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
|
||||||
if (!HasNUW && HasNSW) {
|
if (!(Flags & SCEV::FlagNUW) && (Flags & SCEV::FlagNSW)) {
|
||||||
bool All = true;
|
bool All = true;
|
||||||
for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
|
for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
|
||||||
E = Ops.end(); I != E; ++I)
|
E = Ops.end(); I != E; ++I)
|
||||||
|
@ -1353,7 +1370,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
All = false;
|
All = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (All) HasNUW = true;
|
if (All) Flags = setFlags(Flags, SCEV::FlagNUW);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort by complexity, this groups all similar expression types together.
|
// Sort by complexity, this groups all similar expression types together.
|
||||||
|
@ -1404,7 +1421,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
FoundMatch = true;
|
FoundMatch = true;
|
||||||
}
|
}
|
||||||
if (FoundMatch)
|
if (FoundMatch)
|
||||||
return getAddExpr(Ops, HasNUW, HasNSW);
|
return getAddExpr(Ops, Flags);
|
||||||
|
|
||||||
// Check for truncates. If all the operands are truncated from the same
|
// Check for truncates. If all the operands are truncated from the same
|
||||||
// type, see if factoring out the truncate would permit the result to be
|
// type, see if factoring out the truncate would permit the result to be
|
||||||
|
@ -1454,7 +1471,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
}
|
}
|
||||||
if (Ok) {
|
if (Ok) {
|
||||||
// Evaluate the expression in the larger type.
|
// Evaluate the expression in the larger type.
|
||||||
const SCEV *Fold = getAddExpr(LargeOps, HasNUW, HasNSW);
|
const SCEV *Fold = getAddExpr(LargeOps, Flags);
|
||||||
// If it folds to something simple, use it. Otherwise, don't.
|
// If it folds to something simple, use it. Otherwise, don't.
|
||||||
if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
|
if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
|
||||||
return getTruncateExpr(Fold, DstType);
|
return getTruncateExpr(Fold, DstType);
|
||||||
|
@ -1625,9 +1642,10 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
|
|
||||||
// Build the new addrec. Propagate the NUW and NSW flags if both the
|
// Build the new addrec. Propagate the NUW and NSW flags if both the
|
||||||
// outer add and the inner addrec are guaranteed to have no overflow.
|
// outer add and the inner addrec are guaranteed to have no overflow.
|
||||||
const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop,
|
// FIXME: Always propagate NW
|
||||||
HasNUW && AddRec->hasNoUnsignedWrap(),
|
// AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW))
|
||||||
HasNSW && AddRec->hasNoSignedWrap());
|
Flags = AddRec->getNoWrapFlags(Flags);
|
||||||
|
const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
|
||||||
|
|
||||||
// If all of the other operands were loop invariant, we are done.
|
// If all of the other operands were loop invariant, we are done.
|
||||||
if (Ops.size() == 1) return NewRec;
|
if (Ops.size() == 1) return NewRec;
|
||||||
|
@ -1668,7 +1686,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
}
|
}
|
||||||
Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
|
Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
|
||||||
}
|
}
|
||||||
Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop);
|
// Step size has changed, so we cannot guarantee no self-wraparound.
|
||||||
|
Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
|
||||||
return getAddExpr(Ops);
|
return getAddExpr(Ops);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1692,15 +1711,16 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
O, Ops.size());
|
O, Ops.size());
|
||||||
UniqueSCEVs.InsertNode(S, IP);
|
UniqueSCEVs.InsertNode(S, IP);
|
||||||
}
|
}
|
||||||
if (HasNUW) S->setHasNoUnsignedWrap(true);
|
S->setNoWrapFlags(Flags);
|
||||||
if (HasNSW) S->setHasNoSignedWrap(true);
|
|
||||||
return S;
|
return S;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// getMulExpr - Get a canonical multiply expression, or something simpler if
|
/// getMulExpr - Get a canonical multiply expression, or something simpler if
|
||||||
/// possible.
|
/// possible.
|
||||||
const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
bool HasNUW, bool HasNSW) {
|
SCEV::NoWrapFlags Flags) {
|
||||||
|
assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
|
||||||
|
"only nuw or nsw allowed");
|
||||||
assert(!Ops.empty() && "Cannot get empty mul!");
|
assert(!Ops.empty() && "Cannot get empty mul!");
|
||||||
if (Ops.size() == 1) return Ops[0];
|
if (Ops.size() == 1) return Ops[0];
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
|
@ -1710,8 +1730,8 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
"SCEVMulExpr operand types don't match!");
|
"SCEVMulExpr operand types don't match!");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// If HasNSW is true and all the operands are non-negative, infer HasNUW.
|
// If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
|
||||||
if (!HasNUW && HasNSW) {
|
if (!(Flags & SCEV::FlagNUW) && (Flags & SCEV::FlagNSW)) {
|
||||||
bool All = true;
|
bool All = true;
|
||||||
for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
|
for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
|
||||||
E = Ops.end(); I != E; ++I)
|
E = Ops.end(); I != E; ++I)
|
||||||
|
@ -1719,7 +1739,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
All = false;
|
All = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (All) HasNUW = true;
|
if (All) Flags = setFlags(Flags, SCEV::FlagNUW);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort by complexity, this groups all similar expression types together.
|
// Sort by complexity, this groups all similar expression types together.
|
||||||
|
@ -1759,12 +1779,12 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
} else if (Ops[0]->isAllOnesValue()) {
|
} else if (Ops[0]->isAllOnesValue()) {
|
||||||
// If we have a mul by -1 of an add, try distributing the -1 among the
|
// If we have a mul by -1 of an add, try distributing the -1 among the
|
||||||
// add operands.
|
// add operands.
|
||||||
if (Ops.size() == 2)
|
if (Ops.size() == 2) {
|
||||||
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
|
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
|
||||||
SmallVector<const SCEV *, 4> NewOps;
|
SmallVector<const SCEV *, 4> NewOps;
|
||||||
bool AnyFolded = false;
|
bool AnyFolded = false;
|
||||||
for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
|
for (SCEVAddRecExpr::op_iterator I = Add->op_begin(),
|
||||||
I != E; ++I) {
|
E = Add->op_end(); I != E; ++I) {
|
||||||
const SCEV *Mul = getMulExpr(Ops[0], *I);
|
const SCEV *Mul = getMulExpr(Ops[0], *I);
|
||||||
if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
|
if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
|
||||||
NewOps.push_back(Mul);
|
NewOps.push_back(Mul);
|
||||||
|
@ -1772,6 +1792,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
if (AnyFolded)
|
if (AnyFolded)
|
||||||
return getAddExpr(NewOps);
|
return getAddExpr(NewOps);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Ops.size() == 1)
|
if (Ops.size() == 1)
|
||||||
|
@ -1831,9 +1852,11 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
|
|
||||||
// Build the new addrec. Propagate the NUW and NSW flags if both the
|
// Build the new addrec. Propagate the NUW and NSW flags if both the
|
||||||
// outer mul and the inner addrec are guaranteed to have no overflow.
|
// outer mul and the inner addrec are guaranteed to have no overflow.
|
||||||
const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop,
|
//
|
||||||
HasNUW && AddRec->hasNoUnsignedWrap(),
|
// No self-wrap cannot be guaranteed after changing the step size, but
|
||||||
HasNSW && AddRec->hasNoSignedWrap());
|
// will be infered if either NUW or NSW is true.
|
||||||
|
Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
|
||||||
|
const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
|
||||||
|
|
||||||
// If all of the other operands were loop invariant, we are done.
|
// If all of the other operands were loop invariant, we are done.
|
||||||
if (Ops.size() == 1) return NewRec;
|
if (Ops.size() == 1) return NewRec;
|
||||||
|
@ -1869,7 +1892,8 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
getMulExpr(G, B),
|
getMulExpr(G, B),
|
||||||
getMulExpr(B, D));
|
getMulExpr(B, D));
|
||||||
const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
|
const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
|
||||||
F->getLoop());
|
F->getLoop(),
|
||||||
|
SCEV::FlagAnyWrap);
|
||||||
if (Ops.size() == 2) return NewAddRec;
|
if (Ops.size() == 2) return NewAddRec;
|
||||||
Ops[Idx] = AddRec = cast<SCEVAddRecExpr>(NewAddRec);
|
Ops[Idx] = AddRec = cast<SCEVAddRecExpr>(NewAddRec);
|
||||||
Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
|
Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
|
||||||
|
@ -1897,8 +1921,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
O, Ops.size());
|
O, Ops.size());
|
||||||
UniqueSCEVs.InsertNode(S, IP);
|
UniqueSCEVs.InsertNode(S, IP);
|
||||||
}
|
}
|
||||||
if (HasNUW) S->setHasNoUnsignedWrap(true);
|
S->setNoWrapFlags(Flags);
|
||||||
if (HasNSW) S->setHasNoSignedWrap(true);
|
|
||||||
return S;
|
return S;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1938,11 +1961,13 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
|
||||||
getZeroExtendExpr(AR, ExtTy) ==
|
getZeroExtendExpr(AR, ExtTy) ==
|
||||||
getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
|
getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
|
||||||
getZeroExtendExpr(Step, ExtTy),
|
getZeroExtendExpr(Step, ExtTy),
|
||||||
AR->getLoop())) {
|
AR->getLoop(), SCEV::FlagAnyWrap)) {
|
||||||
SmallVector<const SCEV *, 4> Operands;
|
SmallVector<const SCEV *, 4> Operands;
|
||||||
for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
|
for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
|
||||||
Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
|
Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
|
||||||
return getAddRecExpr(Operands, AR->getLoop());
|
return getAddRecExpr(Operands, AR->getLoop(),
|
||||||
|
// FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
|
||||||
|
SCEV::FlagAnyWrap);
|
||||||
}
|
}
|
||||||
// (A*B)/C --> A*(B/C) if safe and B/C can be folded.
|
// (A*B)/C --> A*(B/C) if safe and B/C can be folded.
|
||||||
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
|
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
|
||||||
|
@ -2006,27 +2031,27 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
|
||||||
|
|
||||||
/// getAddRecExpr - Get an add recurrence expression for the specified loop.
|
/// getAddRecExpr - Get an add recurrence expression for the specified loop.
|
||||||
/// Simplify the expression as much as possible.
|
/// Simplify the expression as much as possible.
|
||||||
const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
|
const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
|
||||||
const SCEV *Step, const Loop *L,
|
const Loop *L,
|
||||||
bool HasNUW, bool HasNSW) {
|
SCEV::NoWrapFlags Flags) {
|
||||||
SmallVector<const SCEV *, 4> Operands;
|
SmallVector<const SCEV *, 4> Operands;
|
||||||
Operands.push_back(Start);
|
Operands.push_back(Start);
|
||||||
if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
|
if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
|
||||||
if (StepChrec->getLoop() == L) {
|
if (StepChrec->getLoop() == L) {
|
||||||
Operands.append(StepChrec->op_begin(), StepChrec->op_end());
|
Operands.append(StepChrec->op_begin(), StepChrec->op_end());
|
||||||
return getAddRecExpr(Operands, L);
|
// FIXME: can use maskFlags(Flags, SCEV::FlagNW)
|
||||||
|
return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap);
|
||||||
}
|
}
|
||||||
|
|
||||||
Operands.push_back(Step);
|
Operands.push_back(Step);
|
||||||
return getAddRecExpr(Operands, L, HasNUW, HasNSW);
|
return getAddRecExpr(Operands, L, Flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// getAddRecExpr - Get an add recurrence expression for the specified loop.
|
/// getAddRecExpr - Get an add recurrence expression for the specified loop.
|
||||||
/// Simplify the expression as much as possible.
|
/// Simplify the expression as much as possible.
|
||||||
const SCEV *
|
const SCEV *
|
||||||
ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
|
ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
|
||||||
const Loop *L,
|
const Loop *L, SCEV::NoWrapFlags Flags) {
|
||||||
bool HasNUW, bool HasNSW) {
|
|
||||||
if (Operands.size() == 1) return Operands[0];
|
if (Operands.size() == 1) return Operands[0];
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
const Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
|
const Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
|
||||||
|
@ -2040,7 +2065,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
|
||||||
|
|
||||||
if (Operands.back()->isZero()) {
|
if (Operands.back()->isZero()) {
|
||||||
Operands.pop_back();
|
Operands.pop_back();
|
||||||
return getAddRecExpr(Operands, L, HasNUW, HasNSW); // {X,+,0} --> X
|
return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
|
||||||
}
|
}
|
||||||
|
|
||||||
// It's tempting to want to call getMaxBackedgeTakenCount count here and
|
// It's tempting to want to call getMaxBackedgeTakenCount count here and
|
||||||
|
@ -2049,8 +2074,8 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
|
||||||
// meaningful BE count at this point (and if we don't, we'd be stuck
|
// meaningful BE count at this point (and if we don't, we'd be stuck
|
||||||
// with a SCEVCouldNotCompute as the cached BE count).
|
// with a SCEVCouldNotCompute as the cached BE count).
|
||||||
|
|
||||||
// If HasNSW is true and all the operands are non-negative, infer HasNUW.
|
// If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
|
||||||
if (!HasNUW && HasNSW) {
|
if (!(Flags & SCEV::FlagNUW) && (Flags & SCEV::FlagNSW)) {
|
||||||
bool All = true;
|
bool All = true;
|
||||||
for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(),
|
for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(),
|
||||||
E = Operands.end(); I != E; ++I)
|
E = Operands.end(); I != E; ++I)
|
||||||
|
@ -2058,7 +2083,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
|
||||||
All = false;
|
All = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (All) HasNUW = true;
|
if (All) Flags = setFlags(Flags, SCEV::FlagNUW);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Canonicalize nested AddRecs in by nesting them in order of loop depth.
|
// Canonicalize nested AddRecs in by nesting them in order of loop depth.
|
||||||
|
@ -2081,16 +2106,31 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (AllInvariant) {
|
if (AllInvariant) {
|
||||||
NestedOperands[0] = getAddRecExpr(Operands, L);
|
// Create a recurrence for the outer loop with the same step size.
|
||||||
|
//
|
||||||
|
// FIXME:
|
||||||
|
// The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
|
||||||
|
// inner recurrence has the same property.
|
||||||
|
// maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
|
||||||
|
SCEV::NoWrapFlags OuterFlags = SCEV::FlagAnyWrap;
|
||||||
|
|
||||||
|
NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
|
||||||
AllInvariant = true;
|
AllInvariant = true;
|
||||||
for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
|
for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
|
||||||
if (!isLoopInvariant(NestedOperands[i], NestedLoop)) {
|
if (!isLoopInvariant(NestedOperands[i], NestedLoop)) {
|
||||||
AllInvariant = false;
|
AllInvariant = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (AllInvariant)
|
if (AllInvariant) {
|
||||||
// Ok, both add recurrences are valid after the transformation.
|
// Ok, both add recurrences are valid after the transformation.
|
||||||
return getAddRecExpr(NestedOperands, NestedLoop, HasNUW, HasNSW);
|
//
|
||||||
|
// FIXME:
|
||||||
|
// The inner recurrence keeps its NW flag but only keeps NUW/NSW if
|
||||||
|
// the outer recurrence has the same property.
|
||||||
|
// maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
|
||||||
|
SCEV::NoWrapFlags InnerFlags = SCEV::FlagAnyWrap;
|
||||||
|
return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Reset Operands to its original state.
|
// Reset Operands to its original state.
|
||||||
Operands[0] = NestedAR;
|
Operands[0] = NestedAR;
|
||||||
|
@ -2114,8 +2154,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
|
||||||
O, Operands.size(), L);
|
O, Operands.size(), L);
|
||||||
UniqueSCEVs.InsertNode(S, IP);
|
UniqueSCEVs.InsertNode(S, IP);
|
||||||
}
|
}
|
||||||
if (HasNUW) S->setHasNoUnsignedWrap(true);
|
S->setNoWrapFlags(Flags);
|
||||||
if (HasNSW) S->setHasNoSignedWrap(true);
|
|
||||||
return S;
|
return S;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2510,17 +2549,17 @@ const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
|
||||||
return getMinusSCEV(AllOnes, V);
|
return getMinusSCEV(AllOnes, V);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1,
|
/// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
|
||||||
/// and thus the HasNUW and HasNSW bits apply to the resultant add, not
|
///
|
||||||
/// whether the sub would have overflowed.
|
/// FIXME: prohibit FlagNUW here, as soon as getMinusSCEVForExitTest goes.
|
||||||
const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
|
const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
|
||||||
bool HasNUW, bool HasNSW) {
|
SCEV::NoWrapFlags Flags) {
|
||||||
// Fast path: X - X --> 0.
|
// Fast path: X - X --> 0.
|
||||||
if (LHS == RHS)
|
if (LHS == RHS)
|
||||||
return getConstant(LHS->getType(), 0);
|
return getConstant(LHS->getType(), 0);
|
||||||
|
|
||||||
// X - Y --> X + -Y
|
// X - Y --> X + -Y
|
||||||
return getAddExpr(LHS, getNegativeSCEV(RHS), HasNUW, HasNSW);
|
return getAddExpr(LHS, getNegativeSCEV(RHS), Flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
|
/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
|
||||||
|
@ -2773,44 +2812,35 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
|
||||||
if (isLoopInvariant(Accum, L) ||
|
if (isLoopInvariant(Accum, L) ||
|
||||||
(isa<SCEVAddRecExpr>(Accum) &&
|
(isa<SCEVAddRecExpr>(Accum) &&
|
||||||
cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
|
cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
|
||||||
bool HasNUW = false;
|
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
|
||||||
bool HasNSW = false;
|
|
||||||
|
|
||||||
// If the increment doesn't overflow, then neither the addrec nor
|
// If the increment doesn't overflow, then neither the addrec nor
|
||||||
// the post-increment will overflow.
|
// the post-increment will overflow.
|
||||||
if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
|
if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
|
||||||
if (OBO->hasNoUnsignedWrap())
|
if (OBO->hasNoUnsignedWrap())
|
||||||
HasNUW = true;
|
Flags = setFlags(Flags, SCEV::FlagNUW);
|
||||||
if (OBO->hasNoSignedWrap())
|
if (OBO->hasNoSignedWrap())
|
||||||
HasNSW = true;
|
Flags = setFlags(Flags, SCEV::FlagNSW);
|
||||||
} else if (const GEPOperator *GEP =
|
} else if (const GEPOperator *GEP =
|
||||||
dyn_cast<GEPOperator>(BEValueV)) {
|
dyn_cast<GEPOperator>(BEValueV)) {
|
||||||
// If the increment is a GEP, then we know it won't perform a
|
// If the increment is an inbounds GEP, then we know the address
|
||||||
// signed overflow, because the address space cannot be
|
// space cannot be wrapped around. We cannot make any guarantee
|
||||||
// wrapped around.
|
// about signed or unsigned overflow because pointers are
|
||||||
//
|
// unsigned but we may have a negative index from the base
|
||||||
// NOTE: This isn't strictly true, because you could have an
|
// pointer.
|
||||||
// object straddling the 2G address boundary in a 32-bit address
|
if (GEP->isInBounds())
|
||||||
// space (for example). We really want to model this as a "has
|
// FIXME: should be SCEV::FlagNW
|
||||||
// no signed/unsigned wrap" where the base pointer is treated as
|
Flags = setFlags(Flags, SCEV::FlagNSW);
|
||||||
// unsigned and the increment is known to not have signed
|
|
||||||
// wrapping.
|
|
||||||
//
|
|
||||||
// This is a highly theoretical concern though, and this is good
|
|
||||||
// enough for all cases we know of at this point. :)
|
|
||||||
//
|
|
||||||
HasNSW |= GEP->isInBounds();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const SCEV *StartVal = getSCEV(StartValueV);
|
const SCEV *StartVal = getSCEV(StartValueV);
|
||||||
const SCEV *PHISCEV =
|
const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
|
||||||
getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW);
|
|
||||||
|
|
||||||
// Since the no-wrap flags are on the increment, they apply to the
|
// Since the no-wrap flags are on the increment, they apply to the
|
||||||
// post-incremented value as well.
|
// post-incremented value as well.
|
||||||
if (isLoopInvariant(Accum, L))
|
if (isLoopInvariant(Accum, L))
|
||||||
(void)getAddRecExpr(getAddExpr(StartVal, Accum),
|
(void)getAddRecExpr(getAddExpr(StartVal, Accum),
|
||||||
Accum, L, HasNUW, HasNSW);
|
Accum, L, Flags);
|
||||||
|
|
||||||
// Okay, for the entire analysis of this edge we assumed the PHI
|
// Okay, for the entire analysis of this edge we assumed the PHI
|
||||||
// to be symbolic. We now need to go back and purge all of the
|
// to be symbolic. We now need to go back and purge all of the
|
||||||
|
@ -2834,8 +2864,11 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
|
||||||
// initial step of the addrec evolution.
|
// initial step of the addrec evolution.
|
||||||
if (StartVal == getMinusSCEV(AddRec->getOperand(0),
|
if (StartVal == getMinusSCEV(AddRec->getOperand(0),
|
||||||
AddRec->getOperand(1))) {
|
AddRec->getOperand(1))) {
|
||||||
|
// FIXME: For constant StartVal, we should be able to infer
|
||||||
|
// no-wrap flags.
|
||||||
const SCEV *PHISCEV =
|
const SCEV *PHISCEV =
|
||||||
getAddRecExpr(StartVal, AddRec->getOperand(1), L);
|
getAddRecExpr(StartVal, AddRec->getOperand(1), L,
|
||||||
|
SCEV::FlagAnyWrap);
|
||||||
|
|
||||||
// Okay, for the entire analysis of this edge we assumed the PHI
|
// Okay, for the entire analysis of this edge we assumed the PHI
|
||||||
// to be symbolic. We now need to go back and purge all of the
|
// to be symbolic. We now need to go back and purge all of the
|
||||||
|
@ -2899,8 +2932,9 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
|
||||||
IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
|
IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
|
||||||
|
|
||||||
// Multiply the index by the element size to compute the element offset.
|
// Multiply the index by the element size to compute the element offset.
|
||||||
const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize, /*NUW*/ false,
|
const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize,
|
||||||
/*NSW*/ isInBounds);
|
isInBounds ? SCEV::FlagNSW :
|
||||||
|
SCEV::FlagAnyWrap);
|
||||||
|
|
||||||
// Add the element offset to the running total offset.
|
// Add the element offset to the running total offset.
|
||||||
TotalOffset = getAddExpr(TotalOffset, LocalOffset);
|
TotalOffset = getAddExpr(TotalOffset, LocalOffset);
|
||||||
|
@ -2911,8 +2945,8 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
|
||||||
const SCEV *BaseS = getSCEV(Base);
|
const SCEV *BaseS = getSCEV(Base);
|
||||||
|
|
||||||
// Add the total offset from all the GEP indices to the base.
|
// Add the total offset from all the GEP indices to the base.
|
||||||
return getAddExpr(BaseS, TotalOffset, /*NUW*/ false,
|
return getAddExpr(BaseS, TotalOffset,
|
||||||
/*NSW*/ isInBounds);
|
isInBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
|
/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
|
||||||
|
@ -3074,7 +3108,8 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
|
||||||
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
|
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
|
||||||
// If there's no unsigned wrap, the value will never be less than its
|
// If there's no unsigned wrap, the value will never be less than its
|
||||||
// initial value.
|
// initial value.
|
||||||
if (AddRec->hasNoUnsignedWrap())
|
// FIXME: can broaden to FlagNW?
|
||||||
|
if (AddRec->getNoWrapFlags(SCEV::FlagNUW))
|
||||||
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
|
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
|
||||||
if (!C->getValue()->isZero())
|
if (!C->getValue()->isZero())
|
||||||
ConservativeResult =
|
ConservativeResult =
|
||||||
|
@ -3216,7 +3251,7 @@ ScalarEvolution::getSignedRange(const SCEV *S) {
|
||||||
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
|
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
|
||||||
// If there's no signed wrap, and all the operands have the same sign or
|
// If there's no signed wrap, and all the operands have the same sign or
|
||||||
// zero, the value won't ever change sign.
|
// zero, the value won't ever change sign.
|
||||||
if (AddRec->hasNoSignedWrap()) {
|
if (AddRec->getNoWrapFlags(SCEV::FlagNSW)) {
|
||||||
bool AllNonNeg = true;
|
bool AllNonNeg = true;
|
||||||
bool AllNonPos = true;
|
bool AllNonPos = true;
|
||||||
for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
|
for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
|
||||||
|
@ -3411,10 +3446,8 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
|
||||||
// transfer the no-wrap flags, since an or won't introduce a wrap.
|
// transfer the no-wrap flags, since an or won't introduce a wrap.
|
||||||
if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
|
if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
|
||||||
const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
|
const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
|
||||||
if (OldAR->hasNoUnsignedWrap())
|
const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags(
|
||||||
const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoUnsignedWrap(true);
|
OldAR->getNoWrapFlags());
|
||||||
if (OldAR->hasNoSignedWrap())
|
|
||||||
const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoSignedWrap(true);
|
|
||||||
}
|
}
|
||||||
return S;
|
return S;
|
||||||
}
|
}
|
||||||
|
@ -4031,7 +4064,7 @@ isSimpleUnwrappingAddRec(const SCEV *S, const Loop *L) {
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
// The SCEV must be known to not wrap in some way to be interesting.
|
// The SCEV must be known to not wrap in some way to be interesting.
|
||||||
if (!SA->hasNoUnsignedWrap() && !SA->hasNoSignedWrap())
|
if (!SA->getNoWrapFlags(SCEV::FlagNW))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
// The stride must be a constant so that we know if it is striding up or down.
|
// The stride must be a constant so that we know if it is striding up or down.
|
||||||
|
@ -4046,14 +4079,15 @@ isSimpleUnwrappingAddRec(const SCEV *S, const Loop *L) {
|
||||||
/// advantage of the fact that this subtraction is only being used in a
|
/// advantage of the fact that this subtraction is only being used in a
|
||||||
/// comparison by zero context.
|
/// comparison by zero context.
|
||||||
///
|
///
|
||||||
|
/// FIXME: this can be completely removed once AddRec FlagNWs are propagated.
|
||||||
static const SCEV *getMinusSCEVForExitTest(const SCEV *LHS, const SCEV *RHS,
|
static const SCEV *getMinusSCEVForExitTest(const SCEV *LHS, const SCEV *RHS,
|
||||||
const Loop *L, ScalarEvolution &SE) {
|
const Loop *L, ScalarEvolution &SE) {
|
||||||
// If either LHS or RHS is an AddRec SCEV (of this loop) that is known to not
|
// If either LHS or RHS is an AddRec SCEV (of this loop) that is known to not
|
||||||
// wrap (either NSW or NUW), then we know that the value will either become
|
// self-wrap, then we know that the value will either become the other one
|
||||||
// the other one (and thus the loop terminates), that the loop will terminate
|
// (and thus the loop terminates), that the loop will terminate through some
|
||||||
// through some other exit condition first, or that the loop has undefined
|
// other exit condition first, or that the loop has undefined behavior. This
|
||||||
// behavior. This information is useful when the addrec has a stride that is
|
// information is useful when the addrec has a stride that is != 1 or -1,
|
||||||
// != 1 or -1, because it means we can't "miss" the exit value.
|
// because it means we can't "miss" the exit value.
|
||||||
//
|
//
|
||||||
// In any of these three cases, it is safe to turn the exit condition into a
|
// In any of these three cases, it is safe to turn the exit condition into a
|
||||||
// "counting down" AddRec (to zero) by subtracting the two inputs as normal,
|
// "counting down" AddRec (to zero) by subtracting the two inputs as normal,
|
||||||
|
@ -4090,7 +4124,7 @@ static const SCEV *getMinusSCEVForExitTest(const SCEV *LHS, const SCEV *RHS,
|
||||||
if (Stride->getValue().isNegative())
|
if (Stride->getValue().isNegative())
|
||||||
std::swap(LHS, RHS);
|
std::swap(LHS, RHS);
|
||||||
|
|
||||||
return SE.getMinusSCEV(RHS, LHS, true /*HasNUW*/);
|
return SE.getMinusSCEV(RHS, LHS, SCEV::FlagNUW);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If both LHS and RHS are interesting, we have something like:
|
// If both LHS and RHS are interesting, we have something like:
|
||||||
|
@ -4118,7 +4152,7 @@ static const SCEV *getMinusSCEVForExitTest(const SCEV *LHS, const SCEV *RHS,
|
||||||
std::swap(LHS, RHS);
|
std::swap(LHS, RHS);
|
||||||
}
|
}
|
||||||
|
|
||||||
return SE.getMinusSCEV(LHS, RHS, true /*HasNUW*/);
|
return SE.getMinusSCEV(LHS, RHS, SCEV::FlagNUW);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
|
/// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
|
||||||
|
@ -4180,6 +4214,8 @@ ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
|
||||||
switch (Cond) {
|
switch (Cond) {
|
||||||
case ICmpInst::ICMP_NE: { // while (X != Y)
|
case ICmpInst::ICMP_NE: { // while (X != Y)
|
||||||
// Convert to: while (X-Y != 0)
|
// Convert to: while (X-Y != 0)
|
||||||
|
// FIXME: Once AddRec FlagNW are propagated, should be:
|
||||||
|
// BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEV(LHS, RHS), L);
|
||||||
BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEVForExitTest(LHS, RHS, L,
|
BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEVForExitTest(LHS, RHS, L,
|
||||||
*this), L);
|
*this), L);
|
||||||
if (BTI.hasAnyInfo()) return BTI;
|
if (BTI.hasAnyInfo()) return BTI;
|
||||||
|
@ -4706,7 +4742,10 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
|
||||||
for (++i; i != e; ++i)
|
for (++i; i != e; ++i)
|
||||||
NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
|
NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
|
||||||
|
|
||||||
AddRec = cast<SCEVAddRecExpr>(getAddRecExpr(NewOps, AddRec->getLoop()));
|
AddRec = cast<SCEVAddRecExpr>(
|
||||||
|
getAddRecExpr(NewOps, AddRec->getLoop(),
|
||||||
|
// FIXME: AddRec->getNoWrapFlags(SCEV::FlagNW)
|
||||||
|
SCEV::FlagAnyWrap));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4871,6 +4910,11 @@ SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
|
||||||
|
|
||||||
/// HowFarToZero - Return the number of times a backedge comparing the specified
|
/// HowFarToZero - Return the number of times a backedge comparing the specified
|
||||||
/// value to zero will execute. If not computable, return CouldNotCompute.
|
/// value to zero will execute. If not computable, return CouldNotCompute.
|
||||||
|
///
|
||||||
|
/// This is only used for loops with a "x != y" exit test. The exit condition is
|
||||||
|
/// now expressed as a single expression, V = x-y. So the exit test is
|
||||||
|
/// effectively V != 0. We know and take advantage of the fact that this
|
||||||
|
/// expression only being used in a comparison by zero context.
|
||||||
ScalarEvolution::BackedgeTakenInfo
|
ScalarEvolution::BackedgeTakenInfo
|
||||||
ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
|
ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
|
||||||
// If the value is a constant
|
// If the value is a constant
|
||||||
|
@ -4939,21 +4983,34 @@ ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
|
||||||
// down, it cannot "miss" 0 (which would cause it to wrap), regardless of what
|
// down, it cannot "miss" 0 (which would cause it to wrap), regardless of what
|
||||||
// the stride is. As such, NUW addrec's will always become zero in
|
// the stride is. As such, NUW addrec's will always become zero in
|
||||||
// "start / -stride" steps, and we know that the division is exact.
|
// "start / -stride" steps, and we know that the division is exact.
|
||||||
if (AddRec->hasNoUnsignedWrap())
|
if (AddRec->getNoWrapFlags(SCEV::FlagNUW))
|
||||||
// FIXME: We really want an "isexact" bit for udiv.
|
// FIXME: We really want an "isexact" bit for udiv.
|
||||||
return getUDivExpr(Start, getNegativeSCEV(Step));
|
return getUDivExpr(Start, getNegativeSCEV(Step));
|
||||||
|
|
||||||
// For now we handle only constant steps.
|
// For now we handle only constant steps.
|
||||||
|
//
|
||||||
|
// TODO: Handle a nonconstant Step given AddRec<NUW>. If the
|
||||||
|
// AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
|
||||||
|
// to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
|
||||||
|
// We have not yet seen any such cases.
|
||||||
const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
|
const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
|
||||||
if (StepC == 0)
|
if (StepC == 0)
|
||||||
return getCouldNotCompute();
|
return getCouldNotCompute();
|
||||||
|
|
||||||
// First, handle unitary steps.
|
// For positive steps (counting up until unsigned overflow):
|
||||||
|
// N = -Start/Step (as unsigned)
|
||||||
|
// For negative steps (counting down to zero):
|
||||||
|
// N = Start/-Step
|
||||||
|
// First compute the unsigned distance from zero in the direction of Step.
|
||||||
|
const SCEV *Distance = StepC->getValue()->getValue().isNonNegative() ?
|
||||||
|
getNegativeSCEV(Start) : Start;
|
||||||
|
|
||||||
|
// Handle unitary steps, which cannot wraparound.
|
||||||
if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so:
|
if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so:
|
||||||
return getNegativeSCEV(Start); // N = -Start (as unsigned)
|
return Distance; // N = -Start (as unsigned)
|
||||||
|
|
||||||
if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so:
|
if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so:
|
||||||
return Start; // N = Start (as unsigned)
|
return Distance; // N = Start (as unsigned)
|
||||||
|
|
||||||
// Then, try to solve the above equation provided that Start is constant.
|
// Then, try to solve the above equation provided that Start is constant.
|
||||||
if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
|
if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
|
||||||
|
@ -5220,12 +5277,12 @@ bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
|
||||||
case ICmpInst::ICMP_SLE:
|
case ICmpInst::ICMP_SLE:
|
||||||
if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
|
if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
|
||||||
RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
|
RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
|
||||||
/*HasNUW=*/false, /*HasNSW=*/true);
|
SCEV::FlagNSW);
|
||||||
Pred = ICmpInst::ICMP_SLT;
|
Pred = ICmpInst::ICMP_SLT;
|
||||||
Changed = true;
|
Changed = true;
|
||||||
} else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
|
} else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
|
||||||
LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
|
LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
|
||||||
/*HasNUW=*/false, /*HasNSW=*/true);
|
SCEV::FlagNSW);
|
||||||
Pred = ICmpInst::ICMP_SLT;
|
Pred = ICmpInst::ICMP_SLT;
|
||||||
Changed = true;
|
Changed = true;
|
||||||
}
|
}
|
||||||
|
@ -5233,12 +5290,12 @@ bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
|
||||||
case ICmpInst::ICMP_SGE:
|
case ICmpInst::ICMP_SGE:
|
||||||
if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
|
if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
|
||||||
RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
|
RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
|
||||||
/*HasNUW=*/false, /*HasNSW=*/true);
|
SCEV::FlagNSW);
|
||||||
Pred = ICmpInst::ICMP_SGT;
|
Pred = ICmpInst::ICMP_SGT;
|
||||||
Changed = true;
|
Changed = true;
|
||||||
} else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
|
} else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
|
||||||
LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
|
LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
|
||||||
/*HasNUW=*/false, /*HasNSW=*/true);
|
SCEV::FlagNSW);
|
||||||
Pred = ICmpInst::ICMP_SGT;
|
Pred = ICmpInst::ICMP_SGT;
|
||||||
Changed = true;
|
Changed = true;
|
||||||
}
|
}
|
||||||
|
@ -5246,12 +5303,12 @@ bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
|
||||||
case ICmpInst::ICMP_ULE:
|
case ICmpInst::ICMP_ULE:
|
||||||
if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
|
if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
|
||||||
RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
|
RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
|
||||||
/*HasNUW=*/true, /*HasNSW=*/false);
|
SCEV::FlagNUW);
|
||||||
Pred = ICmpInst::ICMP_ULT;
|
Pred = ICmpInst::ICMP_ULT;
|
||||||
Changed = true;
|
Changed = true;
|
||||||
} else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
|
} else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
|
||||||
LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
|
LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
|
||||||
/*HasNUW=*/true, /*HasNSW=*/false);
|
SCEV::FlagNUW);
|
||||||
Pred = ICmpInst::ICMP_ULT;
|
Pred = ICmpInst::ICMP_ULT;
|
||||||
Changed = true;
|
Changed = true;
|
||||||
}
|
}
|
||||||
|
@ -5259,12 +5316,12 @@ bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
|
||||||
case ICmpInst::ICMP_UGE:
|
case ICmpInst::ICMP_UGE:
|
||||||
if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
|
if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
|
||||||
RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
|
RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
|
||||||
/*HasNUW=*/true, /*HasNSW=*/false);
|
SCEV::FlagNUW);
|
||||||
Pred = ICmpInst::ICMP_UGT;
|
Pred = ICmpInst::ICMP_UGT;
|
||||||
Changed = true;
|
Changed = true;
|
||||||
} else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
|
} else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
|
||||||
LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
|
LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
|
||||||
/*HasNUW=*/true, /*HasNSW=*/false);
|
SCEV::FlagNUW);
|
||||||
Pred = ICmpInst::ICMP_UGT;
|
Pred = ICmpInst::ICMP_UGT;
|
||||||
Changed = true;
|
Changed = true;
|
||||||
}
|
}
|
||||||
|
@ -5690,8 +5747,8 @@ ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
|
||||||
return getCouldNotCompute();
|
return getCouldNotCompute();
|
||||||
|
|
||||||
// Check to see if we have a flag which makes analysis easy.
|
// Check to see if we have a flag which makes analysis easy.
|
||||||
bool NoWrap = isSigned ? AddRec->hasNoSignedWrap() :
|
bool NoWrap = isSigned ? AddRec->getNoWrapFlags(SCEV::FlagNSW) :
|
||||||
AddRec->hasNoUnsignedWrap();
|
AddRec->getNoWrapFlags(SCEV::FlagNUW);
|
||||||
|
|
||||||
if (AddRec->isAffine()) {
|
if (AddRec->isAffine()) {
|
||||||
unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
|
unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
|
||||||
|
@ -5807,7 +5864,9 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
|
||||||
if (!SC->getValue()->isZero()) {
|
if (!SC->getValue()->isZero()) {
|
||||||
SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
|
SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
|
||||||
Operands[0] = SE.getConstant(SC->getType(), 0);
|
Operands[0] = SE.getConstant(SC->getType(), 0);
|
||||||
const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
|
const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
|
||||||
|
// FIXME: getNoWrapFlags(FlagNW)
|
||||||
|
FlagAnyWrap);
|
||||||
if (const SCEVAddRecExpr *ShiftedAddRec =
|
if (const SCEVAddRecExpr *ShiftedAddRec =
|
||||||
dyn_cast<SCEVAddRecExpr>(Shifted))
|
dyn_cast<SCEVAddRecExpr>(Shifted))
|
||||||
return ShiftedAddRec->getNumIterationsInRange(
|
return ShiftedAddRec->getNumIterationsInRange(
|
||||||
|
@ -5868,7 +5927,9 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
|
||||||
// Range.getUpper() is crossed.
|
// Range.getUpper() is crossed.
|
||||||
SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
|
SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
|
||||||
NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
|
NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
|
||||||
const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
|
const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(),
|
||||||
|
// getNoWrapFlags(FlagNW)
|
||||||
|
FlagAnyWrap);
|
||||||
|
|
||||||
// Next, solve the constructed addrec
|
// Next, solve the constructed addrec
|
||||||
std::pair<const SCEV *,const SCEV *> Roots =
|
std::pair<const SCEV *,const SCEV *> Roots =
|
||||||
|
|
|
@ -262,7 +262,8 @@ static bool FactorOutConstant(const SCEV *&S,
|
||||||
const SCEV *Start = A->getStart();
|
const SCEV *Start = A->getStart();
|
||||||
if (!FactorOutConstant(Start, Remainder, Factor, SE, TD))
|
if (!FactorOutConstant(Start, Remainder, Factor, SE, TD))
|
||||||
return false;
|
return false;
|
||||||
S = SE.getAddRecExpr(Start, Step, A->getLoop());
|
// FIXME: can use A->getNoWrapFlags(FlagNW)
|
||||||
|
S = SE.getAddRecExpr(Start, Step, A->getLoop(), SCEV::FlagAnyWrap);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -314,7 +315,9 @@ static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
|
||||||
const SCEV *Zero = SE.getConstant(Ty, 0);
|
const SCEV *Zero = SE.getConstant(Ty, 0);
|
||||||
AddRecs.push_back(SE.getAddRecExpr(Zero,
|
AddRecs.push_back(SE.getAddRecExpr(Zero,
|
||||||
A->getStepRecurrence(SE),
|
A->getStepRecurrence(SE),
|
||||||
A->getLoop()));
|
A->getLoop(),
|
||||||
|
// FIXME: A->getNoWrapFlags(FlagNW)
|
||||||
|
SCEV::FlagAnyWrap));
|
||||||
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
|
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
|
||||||
Ops[i] = Zero;
|
Ops[i] = Zero;
|
||||||
Ops.append(Add->op_begin(), Add->op_end());
|
Ops.append(Add->op_begin(), Add->op_end());
|
||||||
|
@ -823,7 +826,9 @@ static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
|
||||||
Rest = SE.getAddExpr(Rest,
|
Rest = SE.getAddExpr(Rest,
|
||||||
SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
|
SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
|
||||||
A->getStepRecurrence(SE),
|
A->getStepRecurrence(SE),
|
||||||
A->getLoop()));
|
A->getLoop(),
|
||||||
|
// FIXME: A->getNoWrapFlags(FlagNW)
|
||||||
|
SCEV::FlagAnyWrap));
|
||||||
}
|
}
|
||||||
if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
|
if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
|
||||||
Base = A->getOperand(A->getNumOperands()-1);
|
Base = A->getOperand(A->getNumOperands()-1);
|
||||||
|
@ -1005,10 +1010,11 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
|
||||||
if (!SE.properlyDominates(Start, L->getHeader())) {
|
if (!SE.properlyDominates(Start, L->getHeader())) {
|
||||||
PostLoopOffset = Start;
|
PostLoopOffset = Start;
|
||||||
Start = SE.getConstant(Normalized->getType(), 0);
|
Start = SE.getConstant(Normalized->getType(), 0);
|
||||||
Normalized =
|
Normalized = cast<SCEVAddRecExpr>(
|
||||||
cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start,
|
SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
|
||||||
Normalized->getStepRecurrence(SE),
|
Normalized->getLoop(),
|
||||||
Normalized->getLoop()));
|
// FIXME: Normalized->getNoWrapFlags(FlagNW)
|
||||||
|
SCEV::FlagAnyWrap));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Strip off any non-loop-dominating component from the addrec step.
|
// Strip off any non-loop-dominating component from the addrec step.
|
||||||
|
@ -1019,7 +1025,10 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
|
||||||
Step = SE.getConstant(Normalized->getType(), 1);
|
Step = SE.getConstant(Normalized->getType(), 1);
|
||||||
Normalized =
|
Normalized =
|
||||||
cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step,
|
cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step,
|
||||||
Normalized->getLoop()));
|
Normalized->getLoop(),
|
||||||
|
// FIXME: Normalized
|
||||||
|
// ->getNoWrapFlags(FlagNW)
|
||||||
|
SCEV::FlagAnyWrap));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Expand the core addrec. If we need post-loop scaling, force it to
|
// Expand the core addrec. If we need post-loop scaling, force it to
|
||||||
|
@ -1082,7 +1091,9 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
|
||||||
SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
|
SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
|
||||||
for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
|
for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
|
||||||
NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
|
NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
|
||||||
Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop()));
|
Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
|
||||||
|
// FIXME: S->getNoWrapFlags(FlagNW)
|
||||||
|
SCEV::FlagAnyWrap));
|
||||||
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
|
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
|
||||||
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
|
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
|
||||||
BasicBlock::iterator NewInsertPt =
|
BasicBlock::iterator NewInsertPt =
|
||||||
|
@ -1099,7 +1110,8 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
|
||||||
if (!S->getStart()->isZero()) {
|
if (!S->getStart()->isZero()) {
|
||||||
SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
|
SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
|
||||||
NewOps[0] = SE.getConstant(Ty, 0);
|
NewOps[0] = SE.getConstant(Ty, 0);
|
||||||
const SCEV *Rest = SE.getAddRecExpr(NewOps, L);
|
// FIXME: can use S->getNoWrapFlags()
|
||||||
|
const SCEV *Rest = SE.getAddRecExpr(NewOps, L, SCEV::FlagAnyWrap);
|
||||||
|
|
||||||
// Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
|
// Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
|
||||||
// comments on expandAddToGEP for details.
|
// comments on expandAddToGEP for details.
|
||||||
|
@ -1334,7 +1346,7 @@ void SCEVExpander::rememberInstruction(Value *I) {
|
||||||
InsertedValues.insert(I);
|
InsertedValues.insert(I);
|
||||||
|
|
||||||
// If we just claimed an existing instruction and that instruction had
|
// If we just claimed an existing instruction and that instruction had
|
||||||
// been the insert point, adjust the insert point forward so that
|
// been the insert point, adjust the insert point forward so that
|
||||||
// subsequently inserted code will be dominated.
|
// subsequently inserted code will be dominated.
|
||||||
if (Builder.GetInsertPoint() == I) {
|
if (Builder.GetInsertPoint() == I) {
|
||||||
BasicBlock::iterator It = cast<Instruction>(I);
|
BasicBlock::iterator It = cast<Instruction>(I);
|
||||||
|
@ -1362,8 +1374,9 @@ SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
|
||||||
assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
|
assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
|
||||||
|
|
||||||
// Build a SCEV for {0,+,1}<L>.
|
// Build a SCEV for {0,+,1}<L>.
|
||||||
|
// Conservatively use FlagAnyWrap for now.
|
||||||
const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
|
const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
|
||||||
SE.getConstant(Ty, 1), L);
|
SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
|
||||||
|
|
||||||
// Emit code for it.
|
// Emit code for it.
|
||||||
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
|
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
|
||||||
|
|
|
@ -97,7 +97,8 @@ const SCEV *llvm::TransformForPostIncUse(TransformKind Kind,
|
||||||
const SCEV *N = TransformForPostIncUse(Kind, O, LUser, 0, Loops, SE, DT);
|
const SCEV *N = TransformForPostIncUse(Kind, O, LUser, 0, Loops, SE, DT);
|
||||||
Operands.push_back(N);
|
Operands.push_back(N);
|
||||||
}
|
}
|
||||||
const SCEV *Result = SE.getAddRecExpr(Operands, L);
|
// Conservatively use AnyWrap until/unless we need FlagNW.
|
||||||
|
const SCEV *Result = SE.getAddRecExpr(Operands, L, SCEV::FlagAnyWrap);
|
||||||
switch (Kind) {
|
switch (Kind) {
|
||||||
default: llvm_unreachable("Unexpected transform name!");
|
default: llvm_unreachable("Unexpected transform name!");
|
||||||
case NormalizeAutodetect:
|
case NormalizeAutodetect:
|
||||||
|
|
|
@ -485,10 +485,10 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
|
||||||
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
|
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
|
||||||
|
|
||||||
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
|
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
|
||||||
true /*no unsigned overflow*/);
|
SCEV::FlagNUW);
|
||||||
if (StoreSize != 1)
|
if (StoreSize != 1)
|
||||||
NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
|
NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
|
||||||
true /*no unsigned overflow*/);
|
SCEV::FlagNUW);
|
||||||
|
|
||||||
Value *NumBytes =
|
Value *NumBytes =
|
||||||
Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
|
Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
|
||||||
|
@ -581,10 +581,10 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
|
||||||
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
|
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
|
||||||
|
|
||||||
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
|
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
|
||||||
true /*no unsigned overflow*/);
|
SCEV::FlagNUW);
|
||||||
if (StoreSize != 1)
|
if (StoreSize != 1)
|
||||||
NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
|
NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
|
||||||
true /*no unsigned overflow*/);
|
SCEV::FlagNUW);
|
||||||
|
|
||||||
Value *NumBytes =
|
Value *NumBytes =
|
||||||
Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
|
Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
|
||||||
|
|
|
@ -253,7 +253,8 @@ static void DoInitialMatch(const SCEV *S, Loop *L,
|
||||||
DoInitialMatch(AR->getStart(), L, Good, Bad, SE);
|
DoInitialMatch(AR->getStart(), L, Good, Bad, SE);
|
||||||
DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
|
DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
|
||||||
AR->getStepRecurrence(SE),
|
AR->getStepRecurrence(SE),
|
||||||
AR->getLoop()),
|
// FIXME: AR->getNoWrapFlags()
|
||||||
|
AR->getLoop(), SCEV::FlagAnyWrap),
|
||||||
L, Good, Bad, SE);
|
L, Good, Bad, SE);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -455,7 +456,10 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
|
||||||
const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE,
|
const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE,
|
||||||
IgnoreSignificantBits);
|
IgnoreSignificantBits);
|
||||||
if (!Start) return 0;
|
if (!Start) return 0;
|
||||||
return SE.getAddRecExpr(Start, Step, AR->getLoop());
|
// FlagNW is independent of the start value, step direction, and is
|
||||||
|
// preserved with smaller magnitude steps.
|
||||||
|
// FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
|
||||||
|
return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -520,7 +524,9 @@ static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) {
|
||||||
SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
|
SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
|
||||||
int64_t Result = ExtractImmediate(NewOps.front(), SE);
|
int64_t Result = ExtractImmediate(NewOps.front(), SE);
|
||||||
if (Result != 0)
|
if (Result != 0)
|
||||||
S = SE.getAddRecExpr(NewOps, AR->getLoop());
|
S = SE.getAddRecExpr(NewOps, AR->getLoop(),
|
||||||
|
// FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
|
||||||
|
SCEV::FlagAnyWrap);
|
||||||
return Result;
|
return Result;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -545,7 +551,9 @@ static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) {
|
||||||
SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
|
SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
|
||||||
GlobalValue *Result = ExtractSymbol(NewOps.front(), SE);
|
GlobalValue *Result = ExtractSymbol(NewOps.front(), SE);
|
||||||
if (Result)
|
if (Result)
|
||||||
S = SE.getAddRecExpr(NewOps, AR->getLoop());
|
S = SE.getAddRecExpr(NewOps, AR->getLoop(),
|
||||||
|
// FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
|
||||||
|
SCEV::FlagAnyWrap);
|
||||||
return Result;
|
return Result;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2236,7 +2244,9 @@ static void CollectSubexprs(const SCEV *S, const SCEVConstant *C,
|
||||||
if (!AR->getStart()->isZero()) {
|
if (!AR->getStart()->isZero()) {
|
||||||
CollectSubexprs(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
|
CollectSubexprs(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
|
||||||
AR->getStepRecurrence(SE),
|
AR->getStepRecurrence(SE),
|
||||||
AR->getLoop()),
|
AR->getLoop(),
|
||||||
|
//FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
|
||||||
|
SCEV::FlagAnyWrap),
|
||||||
C, Ops, L, SE);
|
C, Ops, L, SE);
|
||||||
CollectSubexprs(AR->getStart(), C, Ops, L, SE);
|
CollectSubexprs(AR->getStart(), C, Ops, L, SE);
|
||||||
return;
|
return;
|
||||||
|
@ -3047,7 +3057,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters - Call
|
/// NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters - Call
|
||||||
/// FilterOutUndesirableDedicatedRegisters again, if necessary, now that
|
/// FilterOutUndesirableDedicatedRegisters again, if necessary, now that
|
||||||
/// we've done more filtering, as it may be able to find more formulae to
|
/// we've done more filtering, as it may be able to find more formulae to
|
||||||
/// eliminate.
|
/// eliminate.
|
||||||
|
|
Loading…
Reference in New Issue