at the end of instcombine, explicitly clear WorklistMap.

This shrinks it down to something small.  On the testcase
from PR1432, this speeds up instcombine from 0.7959s to 0.5000s,
(59%)

llvm-svn: 40840
This commit is contained in:
Chris Lattner 2007-08-05 08:47:58 +00:00
parent 4515601f1b
commit f0da7975ea
1 changed files with 9 additions and 7 deletions

View File

@ -870,11 +870,10 @@ static void ComputeSignedMinMaxValuesFromKnownBits(const Type *Ty,
// could have the specified known zero and known one bits, returning them in // could have the specified known zero and known one bits, returning them in
// min/max. // min/max.
static void ComputeUnsignedMinMaxValuesFromKnownBits(const Type *Ty, static void ComputeUnsignedMinMaxValuesFromKnownBits(const Type *Ty,
const APInt& KnownZero, const APInt &KnownZero,
const APInt& KnownOne, const APInt &KnownOne,
APInt& Min, APInt &Min, APInt &Max) {
APInt& Max) { uint32_t BitWidth = cast<IntegerType>(Ty)->getBitWidth(); BitWidth = BitWidth;
uint32_t BitWidth = cast<IntegerType>(Ty)->getBitWidth();
assert(KnownZero.getBitWidth() == BitWidth && assert(KnownZero.getBitWidth() == BitWidth &&
KnownOne.getBitWidth() == BitWidth && KnownOne.getBitWidth() == BitWidth &&
Min.getBitWidth() == BitWidth && Max.getBitWidth() && Min.getBitWidth() == BitWidth && Max.getBitWidth() &&
@ -1885,7 +1884,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
if (I.getNumOperands() == 2) { if (I.getNumOperands() == 2) {
Constant *C = cast<Constant>(I.getOperand(1)); Constant *C = cast<Constant>(I.getOperand(1));
for (unsigned i = 0; i != NumPHIValues; ++i) { for (unsigned i = 0; i != NumPHIValues; ++i) {
Value *InV; Value *InV = 0;
if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) { if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
if (CmpInst *CI = dyn_cast<CmpInst>(&I)) if (CmpInst *CI = dyn_cast<CmpInst>(&I))
InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C); InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
@ -4095,7 +4094,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
// xor X, X = 0, even if X is nested in a sequence of Xor's. // xor X, X = 0, even if X is nested in a sequence of Xor's.
if (Instruction *Result = AssociativeOpt(I, XorSelf(Op1))) { if (Instruction *Result = AssociativeOpt(I, XorSelf(Op1))) {
assert(Result == &I && "AssociativeOpt didn't work?"); assert(Result == &I && "AssociativeOpt didn't work?"); Result=Result;
return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
} }
@ -10051,6 +10050,9 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
} }
assert(WorklistMap.empty() && "Worklist empty, but map not?"); assert(WorklistMap.empty() && "Worklist empty, but map not?");
// Do an explicit clear, this shrinks the map if needed.
WorklistMap.clear();
return Changed; return Changed;
} }