Change SCEVNAryExpr's operand array from a SmallVector to a plain

pointer and length, and allocate the arrays in ScalarEvolution's
BumpPtrAllocator, so that they get released when their owning
SCEV gets released. SCEVs are immutable, so they don't need to worry
about operand array resizing. This fixes a memory leak reported
in PR6637.

llvm-svn: 98755
This commit is contained in:
Dan Gohman 2010-03-17 18:51:01 +00:00
parent ee8fed0146
commit 5c9b0e1a6e
3 changed files with 61 additions and 58 deletions

View File

@ -180,25 +180,27 @@ namespace llvm {
///
class SCEVNAryExpr : public SCEV {
protected:
SmallVector<const SCEV *, 8> Operands;
// Since SCEVs are immutable, ScalarEvolution allocates operand
// arrays with its SCEVAllocator, so this class just needs a simple
// pointer rather than a more elaborate vector-like data structure.
// This also avoids the need for a non-trivial destructor.
const SCEV *const *Operands;
size_t NumOperands;
SCEVNAryExpr(const FoldingSetNodeID &ID,
enum SCEVTypes T, const SmallVectorImpl<const SCEV *> &ops)
: SCEV(ID, T), Operands(ops.begin(), ops.end()) {}
enum SCEVTypes T, const SCEV *const *O, size_t N)
: SCEV(ID, T), Operands(O), NumOperands(N) {}
public:
unsigned getNumOperands() const { return (unsigned)Operands.size(); }
size_t getNumOperands() const { return NumOperands; }
const SCEV *getOperand(unsigned i) const {
assert(i < Operands.size() && "Operand index out of range!");
assert(i < NumOperands && "Operand index out of range!");
return Operands[i];
}
const SmallVectorImpl<const SCEV *> &getOperands() const {
return Operands;
}
typedef SmallVectorImpl<const SCEV *>::const_iterator op_iterator;
op_iterator op_begin() const { return Operands.begin(); }
op_iterator op_end() const { return Operands.end(); }
typedef const SCEV *const *op_iterator;
op_iterator op_begin() const { return Operands; }
op_iterator op_end() const { return Operands + NumOperands; }
virtual bool isLoopInvariant(const Loop *L) const {
for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
@ -262,8 +264,8 @@ namespace llvm {
protected:
SCEVCommutativeExpr(const FoldingSetNodeID &ID,
enum SCEVTypes T,
const SmallVectorImpl<const SCEV *> &ops)
: SCEVNAryExpr(ID, T, ops) {}
const SCEV *const *O, size_t N)
: SCEVNAryExpr(ID, T, O, N) {}
public:
virtual const char *getOperationStr() const = 0;
@ -288,8 +290,8 @@ namespace llvm {
friend class ScalarEvolution;
SCEVAddExpr(const FoldingSetNodeID &ID,
const SmallVectorImpl<const SCEV *> &ops)
: SCEVCommutativeExpr(ID, scAddExpr, ops) {
const SCEV *const *O, size_t N)
: SCEVCommutativeExpr(ID, scAddExpr, O, N) {
}
public:
@ -316,8 +318,8 @@ namespace llvm {
friend class ScalarEvolution;
SCEVMulExpr(const FoldingSetNodeID &ID,
const SmallVectorImpl<const SCEV *> &ops)
: SCEVCommutativeExpr(ID, scMulExpr, ops) {
const SCEV *const *O, size_t N)
: SCEVCommutativeExpr(ID, scMulExpr, O, N) {
}
public:
@ -390,9 +392,9 @@ namespace llvm {
const Loop *L;
SCEVAddRecExpr(const FoldingSetNodeID &ID,
const SmallVectorImpl<const SCEV *> &ops, const Loop *l)
: SCEVNAryExpr(ID, scAddRecExpr, ops), L(l) {
for (size_t i = 0, e = Operands.size(); i != e; ++i)
const SCEV *const *O, size_t N, const Loop *l)
: SCEVNAryExpr(ID, scAddRecExpr, O, N), L(l) {
for (size_t i = 0, e = NumOperands; i != e; ++i)
assert(Operands[i]->isLoopInvariant(l) &&
"Operands of AddRec must be loop-invariant!");
}
@ -472,8 +474,8 @@ namespace llvm {
friend class ScalarEvolution;
SCEVSMaxExpr(const FoldingSetNodeID &ID,
const SmallVectorImpl<const SCEV *> &ops)
: SCEVCommutativeExpr(ID, scSMaxExpr, ops) {
const SCEV *const *O, size_t N)
: SCEVCommutativeExpr(ID, scSMaxExpr, O, N) {
// Max never overflows.
setHasNoUnsignedWrap(true);
setHasNoSignedWrap(true);
@ -497,8 +499,8 @@ namespace llvm {
friend class ScalarEvolution;
SCEVUMaxExpr(const FoldingSetNodeID &ID,
const SmallVectorImpl<const SCEV *> &ops)
: SCEVCommutativeExpr(ID, scUMaxExpr, ops) {
const SCEV *const *O, size_t N)
: SCEVCommutativeExpr(ID, scUMaxExpr, O, N) {
// Max never overflows.
setHasNoUnsignedWrap(true);
setHasNoSignedWrap(true);

View File

@ -248,10 +248,10 @@ void SCEVSignExtendExpr::print(raw_ostream &OS) const {
}
void SCEVCommutativeExpr::print(raw_ostream &OS) const {
assert(Operands.size() > 1 && "This plus expr shouldn't exist!");
assert(NumOperands > 1 && "This plus expr shouldn't exist!");
const char *OpStr = getOperationStr();
OS << "(" << *Operands[0];
for (unsigned i = 1, e = Operands.size(); i != e; ++i)
for (unsigned i = 1, e = NumOperands; i != e; ++i)
OS << OpStr << *Operands[i];
OS << ")";
}
@ -329,7 +329,7 @@ SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
void SCEVAddRecExpr::print(raw_ostream &OS) const {
OS << "{" << *Operands[0];
for (unsigned i = 1, e = Operands.size(); i != e; ++i)
for (unsigned i = 1, e = NumOperands; i != e; ++i)
OS << ",+," << *Operands[i];
OS << "}<";
WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
@ -1202,23 +1202,23 @@ static bool
CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
SmallVector<const SCEV *, 8> &NewOps,
APInt &AccumulatedConstant,
const SmallVectorImpl<const SCEV *> &Ops,
const SCEV *const *Ops, size_t NumOperands,
const APInt &Scale,
ScalarEvolution &SE) {
bool Interesting = false;
// Iterate over the add operands.
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
for (unsigned i = 0, e = NumOperands; i != e; ++i) {
const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
APInt NewScale =
Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
// A multiplication of a constant with another add; recurse.
const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
Interesting |=
CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
cast<SCEVAddExpr>(Mul->getOperand(1))
->getOperands(),
Add->op_begin(), Add->getNumOperands(),
NewScale, SE);
} else {
// A multiplication of a constant with some other value. Update
@ -1427,7 +1427,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
SmallVector<const SCEV *, 8> NewOps;
APInt AccumulatedConstant(BitWidth, 0);
if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
Ops, APInt(BitWidth, 1), *this)) {
Ops.data(), Ops.size(),
APInt(BitWidth, 1), *this)) {
// Some interesting folding opportunity is present, so its worthwhile to
// re-generate the operands list. Group the operands by constant scale,
// to avoid multiplying by the same constant scale multiple times.
@ -1612,7 +1613,9 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
S = SCEVAllocator.Allocate<SCEVAddExpr>();
new (S) SCEVAddExpr(ID, Ops);
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
new (S) SCEVAddExpr(ID, O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
}
if (HasNUW) S->setHasNoUnsignedWrap(true);
@ -1820,7 +1823,9 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
S = SCEVAllocator.Allocate<SCEVMulExpr>();
new (S) SCEVMulExpr(ID, Ops);
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
new (S) SCEVMulExpr(ID, O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
}
if (HasNUW) S->setHasNoUnsignedWrap(true);
@ -1880,9 +1885,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
const SCEV *Op = M->getOperand(i);
const SCEV *Div = getUDivExpr(Op, RHSC);
if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
Operands = SmallVector<const SCEV *, 4>(MOperands.begin(),
MOperands.end());
Operands = SmallVector<const SCEV *, 4>(M->op_begin(), M->op_end());
Operands[i] = Div;
return getMulExpr(Operands);
}
@ -2031,7 +2034,9 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
S = SCEVAllocator.Allocate<SCEVAddRecExpr>();
new (S) SCEVAddRecExpr(ID, Operands, L);
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
std::uninitialized_copy(Operands.begin(), Operands.end(), O);
new (S) SCEVAddRecExpr(ID, O, Operands.size(), L);
UniqueSCEVs.InsertNode(S, IP);
}
if (HasNUW) S->setHasNoUnsignedWrap(true);
@ -2131,7 +2136,9 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
SCEV *S = SCEVAllocator.Allocate<SCEVSMaxExpr>();
new (S) SCEVSMaxExpr(ID, Ops);
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
new (S) SCEVSMaxExpr(ID, O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
return S;
}
@ -2228,7 +2235,9 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
SCEV *S = SCEVAllocator.Allocate<SCEVUMaxExpr>();
new (S) SCEVUMaxExpr(ID, Ops);
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
new (S) SCEVUMaxExpr(ID, O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
return S;
}

View File

@ -232,9 +232,7 @@ static bool FactorOutConstant(const SCEV *&S,
const SCEVConstant *FC = cast<SCEVConstant>(Factor);
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
SmallVector<const SCEV *, 4> NewMulOps(MOperands.begin(),
MOperands.end());
SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
NewMulOps[0] =
SE.getConstant(C->getValue()->getValue().sdiv(
FC->getValue()->getValue()));
@ -249,9 +247,7 @@ static bool FactorOutConstant(const SCEV *&S,
const SCEV *Remainder = SE.getIntegerSCEV(0, SOp->getType());
if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) &&
Remainder->isZero()) {
const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
SmallVector<const SCEV *, 4> NewMulOps(MOperands.begin(),
MOperands.end());
SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
NewMulOps[i] = SOp;
S = SE.getMulExpr(NewMulOps);
return true;
@ -297,13 +293,11 @@ static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
SE.getAddExpr(NoAddRecs);
// If it returned an add, use the operands. Otherwise it simplified
// the sum into a single value, so just use that.
Ops.clear();
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
Ops = Add->getOperands();
else {
Ops.clear();
if (!Sum->isZero())
Ops.push_back(Sum);
}
Ops.insert(Ops.end(), Add->op_begin(), Add->op_begin());
else if (!Sum->isZero())
Ops.push_back(Sum);
// Then append the addrecs.
Ops.insert(Ops.end(), AddRecs.begin(), AddRecs.end());
}
@ -1060,10 +1054,9 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
if (CanonicalIV &&
SE.getTypeSizeInBits(CanonicalIV->getType()) >
SE.getTypeSizeInBits(Ty)) {
const SmallVectorImpl<const SCEV *> &Ops = S->getOperands();
SmallVector<const SCEV *, 4> NewOps(Ops.size());
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
NewOps[i] = SE.getAnyExtendExpr(Ops[i], CanonicalIV->getType());
SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop()));
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
@ -1078,8 +1071,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
// {X,+,F} --> X + {0,+,F}
if (!S->getStart()->isZero()) {
const SmallVectorImpl<const SCEV *> &SOperands = S->getOperands();
SmallVector<const SCEV *, 4> NewOps(SOperands.begin(), SOperands.end());
SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
NewOps[0] = SE.getIntegerSCEV(0, Ty);
const SCEV *Rest = SE.getAddRecExpr(NewOps, L);