forked from OSchip/llvm-project
Try to keep the cached inliner costs around for a bit longer for big functions.
The Caller cost info would be reset everytime a callee was inlined. If the caller has lots of calls and there is some mutual recursion going on, the caller cost info could be calculated many times. This patch reduces inliner runtime from 240s to 0.5s for a function with 20000 small function calls. This is a more conservative version of r98089 that doesn't break the clang test CodeGenCXX/temp-order.cpp. That test relies on rather extreme inlining for constant folding. llvm-svn: 98099
This commit is contained in:
parent
b70c2f795e
commit
b495cad7ca
|
@ -179,6 +179,11 @@ namespace llvm {
|
|||
void resetCachedCostInfo(Function* Caller) {
|
||||
CachedFunctionInfo[Caller] = FunctionInfo();
|
||||
}
|
||||
|
||||
/// growCachedCostInfo - update the cached cost info for Caller after Callee
|
||||
/// has been inlined. If Callee is NULL it means a dead call has been
|
||||
/// eliminated.
|
||||
void growCachedCostInfo(Function* Caller, Function* Callee);
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -75,6 +75,10 @@ struct Inliner : public CallGraphSCCPass {
|
|||
///
|
||||
virtual void resetCachedCostInfo(Function* Caller) = 0;
|
||||
|
||||
/// growCachedCostInfo - update the cached cost info for Caller after Callee
|
||||
/// has been inlined.
|
||||
virtual void growCachedCostInfo(Function* Caller, Function* Callee) = 0;
|
||||
|
||||
/// removeDeadFunctions - Remove dead functions that are not included in
|
||||
/// DNR (Do Not Remove) list.
|
||||
bool removeDeadFunctions(CallGraph &CG,
|
||||
|
|
|
@ -383,3 +383,45 @@ float InlineCostAnalyzer::getInlineFudgeFactor(CallSite CS) {
|
|||
Factor += 1.5f;
|
||||
return Factor;
|
||||
}
|
||||
|
||||
/// growCachedCostInfo - update the cached cost info for Caller after Callee has
|
||||
/// been inlined.
|
||||
void
|
||||
InlineCostAnalyzer::growCachedCostInfo(Function* Caller, Function* Callee) {
|
||||
FunctionInfo &CallerFI = CachedFunctionInfo[Caller];
|
||||
|
||||
// For small functions we prefer to recalculate the cost for better accuracy.
|
||||
if (CallerFI.Metrics.NumBlocks < 10 || CallerFI.Metrics.NumInsts < 1000) {
|
||||
resetCachedCostInfo(Caller);
|
||||
return;
|
||||
}
|
||||
|
||||
// For large functions, we can save a lot of computation time by skipping
|
||||
// recalculations.
|
||||
if (CallerFI.Metrics.NumCalls > 0)
|
||||
--CallerFI.Metrics.NumCalls;
|
||||
|
||||
if (Callee) {
|
||||
FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
|
||||
if (!CalleeFI.Metrics.NumBlocks) {
|
||||
resetCachedCostInfo(Caller);
|
||||
return;
|
||||
}
|
||||
CallerFI.Metrics.NeverInline |= CalleeFI.Metrics.NeverInline;
|
||||
CallerFI.Metrics.usesDynamicAlloca |= CalleeFI.Metrics.usesDynamicAlloca;
|
||||
|
||||
CallerFI.Metrics.NumInsts += CalleeFI.Metrics.NumInsts;
|
||||
CallerFI.Metrics.NumBlocks += CalleeFI.Metrics.NumBlocks;
|
||||
CallerFI.Metrics.NumCalls += CalleeFI.Metrics.NumCalls;
|
||||
CallerFI.Metrics.NumVectorInsts += CalleeFI.Metrics.NumVectorInsts;
|
||||
CallerFI.Metrics.NumRets += CalleeFI.Metrics.NumRets;
|
||||
|
||||
// analyzeBasicBlock counts each function argument as an inst.
|
||||
if (CallerFI.Metrics.NumInsts >= Callee->arg_size())
|
||||
CallerFI.Metrics.NumInsts -= Callee->arg_size();
|
||||
else
|
||||
CallerFI.Metrics.NumInsts = 0;
|
||||
}
|
||||
// We are not updating the argumentweights. We have already determined that
|
||||
// Caller is a fairly large function, so we accept the loss of precision.
|
||||
}
|
||||
|
|
|
@ -45,7 +45,10 @@ namespace {
|
|||
return CA.getInlineFudgeFactor(CS);
|
||||
}
|
||||
void resetCachedCostInfo(Function *Caller) {
|
||||
return CA.resetCachedCostInfo(Caller);
|
||||
CA.resetCachedCostInfo(Caller);
|
||||
}
|
||||
void growCachedCostInfo(Function* Caller, Function* Callee) {
|
||||
CA.growCachedCostInfo(Caller, Callee);
|
||||
}
|
||||
virtual bool doFinalization(CallGraph &CG) {
|
||||
return removeDeadFunctions(CG, &NeverInline);
|
||||
|
|
|
@ -45,6 +45,9 @@ namespace {
|
|||
void resetCachedCostInfo(Function *Caller) {
|
||||
CA.resetCachedCostInfo(Caller);
|
||||
}
|
||||
void growCachedCostInfo(Function* Caller, Function* Callee) {
|
||||
CA.growCachedCostInfo(Caller, Callee);
|
||||
}
|
||||
virtual bool doInitialization(CallGraph &CG);
|
||||
};
|
||||
}
|
||||
|
|
|
@ -369,6 +369,8 @@ bool Inliner::runOnSCC(std::vector<CallGraphNode*> &SCC) {
|
|||
CG[Caller]->removeCallEdgeFor(CS);
|
||||
CS.getInstruction()->eraseFromParent();
|
||||
++NumCallsDeleted;
|
||||
// Update the cached cost info with the missing call
|
||||
growCachedCostInfo(Caller, NULL);
|
||||
} else {
|
||||
// We can only inline direct calls to non-declarations.
|
||||
if (Callee == 0 || Callee->isDeclaration()) continue;
|
||||
|
@ -382,6 +384,9 @@ bool Inliner::runOnSCC(std::vector<CallGraphNode*> &SCC) {
|
|||
if (!InlineCallIfPossible(CS, CG, TD, InlinedArrayAllocas))
|
||||
continue;
|
||||
++NumInlined;
|
||||
|
||||
// Update the cached cost info with the inlined call.
|
||||
growCachedCostInfo(Caller, Callee);
|
||||
}
|
||||
|
||||
// If we inlined or deleted the last possible call site to the function,
|
||||
|
@ -407,11 +412,6 @@ bool Inliner::runOnSCC(std::vector<CallGraphNode*> &SCC) {
|
|||
delete CG.removeFunctionFromModule(CalleeNode);
|
||||
++NumDeleted;
|
||||
}
|
||||
|
||||
// Remove any cached cost info for this caller, as inlining the
|
||||
// callee has increased the size of the caller (which may be the
|
||||
// same as the callee).
|
||||
resetCachedCostInfo(Caller);
|
||||
|
||||
// Remove this call site from the list. If possible, use
|
||||
// swap/pop_back for efficiency, but do not use it if doing so would
|
||||
|
|
Loading…
Reference in New Issue