2011-08-03 05:50:27 +08:00
|
|
|
//===- PassManagerBuilder.cpp - Build Standard Pass -----------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the PassManagerBuilder class, which is used to set up a
|
|
|
|
// "standard" optimization sequence suitable for languages like C and C++.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
|
2011-08-10 06:17:34 +08:00
|
|
|
#include "llvm-c/Transforms/PassManagerBuilder.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2015-08-06 15:33:15 +08:00
|
|
|
#include "llvm/Analysis/BasicAliasAnalysis.h"
|
2016-07-06 08:26:41 +08:00
|
|
|
#include "llvm/Analysis/CFLAndersAliasAnalysis.h"
|
|
|
|
#include "llvm/Analysis/CFLSteensAliasAnalysis.h"
|
2015-08-14 11:48:20 +08:00
|
|
|
#include "llvm/Analysis/GlobalsModRef.h"
|
2016-08-12 02:24:08 +08:00
|
|
|
#include "llvm/Analysis/InlineCost.h"
|
2015-12-27 16:41:34 +08:00
|
|
|
#include "llvm/Analysis/Passes.h"
|
2015-08-14 10:55:50 +08:00
|
|
|
#include "llvm/Analysis/ScopedNoAliasAA.h"
|
2015-01-15 10:16:27 +08:00
|
|
|
#include "llvm/Analysis/TargetLibraryInfo.h"
|
2015-08-14 11:33:48 +08:00
|
|
|
#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
|
2015-12-27 16:41:34 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/LegacyPassManager.h"
|
2016-03-15 08:04:37 +08:00
|
|
|
#include "llvm/IR/ModuleSummaryIndex.h"
|
2015-12-27 16:41:34 +08:00
|
|
|
#include "llvm/IR/Verifier.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/ManagedStatic.h"
|
2014-08-22 04:03:44 +08:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Transforms/IPO.h"
|
2015-12-27 16:13:45 +08:00
|
|
|
#include "llvm/Transforms/IPO/ForceFunctionAttrs.h"
|
2016-02-18 19:03:11 +08:00
|
|
|
#include "llvm/Transforms/IPO/FunctionAttrs.h"
|
2015-12-27 16:41:34 +08:00
|
|
|
#include "llvm/Transforms/IPO/InferFunctionAttrs.h"
|
2016-03-15 08:04:37 +08:00
|
|
|
#include "llvm/Transforms/Instrumentation.h"
|
2011-08-03 05:50:27 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2016-03-11 16:50:55 +08:00
|
|
|
#include "llvm/Transforms/Scalar/GVN.h"
|
2012-02-01 11:51:43 +08:00
|
|
|
#include "llvm/Transforms/Vectorize.h"
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2012-02-01 11:51:43 +08:00
|
|
|
static cl::opt<bool>
|
2013-10-19 07:38:13 +08:00
|
|
|
RunLoopVectorization("vectorize-loops", cl::Hidden,
|
2012-10-31 02:37:43 +08:00
|
|
|
cl::desc("Run the Loop vectorization passes"));
|
2012-10-30 00:36:25 +08:00
|
|
|
|
|
|
|
static cl::opt<bool>
|
2013-10-19 07:38:13 +08:00
|
|
|
RunSLPVectorization("vectorize-slp", cl::Hidden,
|
2013-04-15 13:39:58 +08:00
|
|
|
cl::desc("Run the SLP vectorization passes"));
|
|
|
|
|
|
|
|
static cl::opt<bool>
|
2013-10-19 07:38:13 +08:00
|
|
|
RunBBVectorization("vectorize-slp-aggressive", cl::Hidden,
|
2013-04-15 13:39:58 +08:00
|
|
|
cl::desc("Run the BB vectorization passes"));
|
2012-02-01 11:51:43 +08:00
|
|
|
|
2012-04-14 01:15:33 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
UseGVNAfterVectorization("use-gvn-after-vectorization",
|
|
|
|
cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Run GVN instead of Early CSE after vectorization passes"));
|
|
|
|
|
2014-10-14 08:31:29 +08:00
|
|
|
static cl::opt<bool> ExtraVectorizerPasses(
|
|
|
|
"extra-vectorizer-passes", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Run cleanup optimization passes after vectorization."));
|
|
|
|
|
2013-11-17 07:59:05 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
RunLoopRerolling("reroll-loops", cl::Hidden,
|
|
|
|
cl::desc("Run the loop rerolling pass"));
|
|
|
|
|
2014-05-29 09:55:07 +08:00
|
|
|
static cl::opt<bool> RunLoadCombine("combine-loads", cl::init(false),
|
|
|
|
cl::Hidden,
|
|
|
|
cl::desc("Run the load combining pass"));
|
|
|
|
|
2016-12-27 02:26:19 +08:00
|
|
|
static cl::opt<bool> RunNewGVN("enable-newgvn", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Run the NewGVN pass"));
|
|
|
|
|
2014-08-06 20:56:19 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
RunSLPAfterLoopVectorization("run-slp-after-loop-vectorization",
|
2014-09-04 21:23:08 +08:00
|
|
|
cl::init(true), cl::Hidden,
|
2014-08-06 20:56:19 +08:00
|
|
|
cl::desc("Run the SLP vectorizer (and BB vectorizer) after the Loop "
|
|
|
|
"vectorizer instead of before"));
|
|
|
|
|
2016-07-06 08:26:41 +08:00
|
|
|
// Experimental option to use CFL-AA
|
|
|
|
enum class CFLAAType { None, Steensgaard, Andersen, Both };
|
|
|
|
static cl::opt<CFLAAType>
|
|
|
|
UseCFLAA("use-cfl-aa", cl::init(CFLAAType::None), cl::Hidden,
|
|
|
|
cl::desc("Enable the new, experimental CFL alias analysis"),
|
|
|
|
cl::values(clEnumValN(CFLAAType::None, "none", "Disable CFL-AA"),
|
|
|
|
clEnumValN(CFLAAType::Steensgaard, "steens",
|
|
|
|
"Enable unification-based CFL-AA"),
|
|
|
|
clEnumValN(CFLAAType::Andersen, "anders",
|
|
|
|
"Enable inclusion-based CFL-AA"),
|
|
|
|
clEnumValN(CFLAAType::Both, "both",
|
2016-10-09 03:41:06 +08:00
|
|
|
"Enable both variants of CFL-AA")));
|
2014-08-06 20:56:19 +08:00
|
|
|
|
2015-03-06 18:11:25 +08:00
|
|
|
static cl::opt<bool> EnableLoopInterchange(
|
|
|
|
"enable-loopinterchange", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable the new, experimental LoopInterchange Pass"));
|
|
|
|
|
2015-07-22 19:57:28 +08:00
|
|
|
static cl::opt<bool> EnableNonLTOGlobalsModRef(
|
2015-10-13 18:43:57 +08:00
|
|
|
"enable-non-lto-gmr", cl::init(true), cl::Hidden,
|
2015-07-22 19:57:28 +08:00
|
|
|
cl::desc(
|
|
|
|
"Enable the GlobalsModRef AliasAnalysis outside of the LTO pipeline."));
|
|
|
|
|
LLE 6/6: Add LoopLoadElimination pass
Summary:
The goal of this pass is to perform store-to-load forwarding across the
backedge of a loop. E.g.:
for (i)
A[i + 1] = A[i] + B[i]
=>
T = A[0]
for (i)
T = T + B[i]
A[i + 1] = T
The pass relies on loop dependence analysis via LoopAccessAnalisys to
find opportunities of loop-carried dependences with a distance of one
between a store and a load. Since it's using LoopAccessAnalysis, it was
easy to also add support for versioning away may-aliasing intervening
stores that would otherwise prevent this transformation.
This optimization is also performed by Load-PRE in GVN without the
option of multi-versioning. As was discussed with Daniel Berlin in
http://reviews.llvm.org/D9548, this is inferior to a more loop-aware
solution applied here. Hopefully, we will be able to remove some
complexity from GVN/MemorySSA as a consequence.
In the long run, we may want to extend this pass (or create a new one if
there is little overlap) to also eliminate loop-indepedent redundant
loads and store that *require* versioning due to may-aliasing
intervening stores/loads. I have some motivating cases for store
elimination. My plan right now is to wait for MemorySSA to come online
first rather than using memdep for this.
The main motiviation for this pass is the 456.hmmer loop in SPECint2006
where after distributing the original loop and vectorizing the top part,
we are left with the critical path exposed in the bottom loop. Being
able to promote the memory dependence into a register depedence (even
though the HW does perform store-to-load fowarding as well) results in a
major gain (~20%). This gain also transfers over to x86: it's
around 8-10%.
Right now the pass is off by default and can be enabled
with -enable-loop-load-elim. On the LNT testsuite, there are two
performance changes (negative number -> improvement):
1. -28% in Polybench/linear-algebra/solvers/dynprog: the length of the
critical paths is reduced
2. +2% in Polybench/stencils/adi: Unfortunately, I couldn't reproduce this
outside of LNT
The pass is scheduled after the loop vectorizer (which is after loop
distribution). The rational is to try to reuse LAA state, rather than
recomputing it. The order between LV and LLE is not critical because
normally LV does not touch scalar st->ld forwarding cases where
vectorizing would inhibit the CPU's st->ld forwarding to kick in.
LoopLoadElimination requires LAA to provide the full set of dependences
(including forward dependences). LAA is known to omit loop-independent
dependences in certain situations. The big comment before
removeDependencesFromMultipleStores explains why this should not occur
for the cases that we're interested in.
Reviewers: dberlin, hfinkel
Subscribers: junbuml, dberlin, mssimpso, rengolin, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13259
llvm-svn: 252017
2015-11-04 07:50:08 +08:00
|
|
|
static cl::opt<bool> EnableLoopLoadElim(
|
2016-03-16 06:26:12 +08:00
|
|
|
"enable-loop-load-elim", cl::init(true), cl::Hidden,
|
|
|
|
cl::desc("Enable the LoopLoadElimination Pass"));
|
LLE 6/6: Add LoopLoadElimination pass
Summary:
The goal of this pass is to perform store-to-load forwarding across the
backedge of a loop. E.g.:
for (i)
A[i + 1] = A[i] + B[i]
=>
T = A[0]
for (i)
T = T + B[i]
A[i + 1] = T
The pass relies on loop dependence analysis via LoopAccessAnalisys to
find opportunities of loop-carried dependences with a distance of one
between a store and a load. Since it's using LoopAccessAnalysis, it was
easy to also add support for versioning away may-aliasing intervening
stores that would otherwise prevent this transformation.
This optimization is also performed by Load-PRE in GVN without the
option of multi-versioning. As was discussed with Daniel Berlin in
http://reviews.llvm.org/D9548, this is inferior to a more loop-aware
solution applied here. Hopefully, we will be able to remove some
complexity from GVN/MemorySSA as a consequence.
In the long run, we may want to extend this pass (or create a new one if
there is little overlap) to also eliminate loop-indepedent redundant
loads and store that *require* versioning due to may-aliasing
intervening stores/loads. I have some motivating cases for store
elimination. My plan right now is to wait for MemorySSA to come online
first rather than using memdep for this.
The main motiviation for this pass is the 456.hmmer loop in SPECint2006
where after distributing the original loop and vectorizing the top part,
we are left with the critical path exposed in the bottom loop. Being
able to promote the memory dependence into a register depedence (even
though the HW does perform store-to-load fowarding as well) results in a
major gain (~20%). This gain also transfers over to x86: it's
around 8-10%.
Right now the pass is off by default and can be enabled
with -enable-loop-load-elim. On the LNT testsuite, there are two
performance changes (negative number -> improvement):
1. -28% in Polybench/linear-algebra/solvers/dynprog: the length of the
critical paths is reduced
2. +2% in Polybench/stencils/adi: Unfortunately, I couldn't reproduce this
outside of LNT
The pass is scheduled after the loop vectorizer (which is after loop
distribution). The rational is to try to reuse LAA state, rather than
recomputing it. The order between LV and LLE is not critical because
normally LV does not touch scalar st->ld forwarding cases where
vectorizing would inhibit the CPU's st->ld forwarding to kick in.
LoopLoadElimination requires LAA to provide the full set of dependences
(including forward dependences). LAA is known to omit loop-independent
dependences in certain situations. The big comment before
removeDependencesFromMultipleStores explains why this should not occur
for the cases that we're interested in.
Reviewers: dberlin, hfinkel
Subscribers: junbuml, dberlin, mssimpso, rengolin, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13259
llvm-svn: 252017
2015-11-04 07:50:08 +08:00
|
|
|
|
2016-09-18 04:40:16 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnablePrepareForThinLTO("prepare-for-thinlto", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable preparation for ThinLTO."));
|
|
|
|
|
2016-07-23 12:28:52 +08:00
|
|
|
static cl::opt<bool> RunPGOInstrGen(
|
|
|
|
"profile-generate", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable PGO instrumentation."));
|
|
|
|
|
|
|
|
static cl::opt<std::string>
|
|
|
|
PGOOutputFile("profile-generate-file", cl::init(""), cl::Hidden,
|
|
|
|
cl::desc("Specify the path of profile data file."));
|
2016-01-22 02:28:59 +08:00
|
|
|
|
|
|
|
static cl::opt<std::string> RunPGOInstrUse(
|
|
|
|
"profile-use", cl::init(""), cl::Hidden, cl::value_desc("filename"),
|
|
|
|
cl::desc("Enable use phase of PGO instrumentation and specify the path "
|
|
|
|
"of profile data file"));
|
|
|
|
|
New Loop Versioning LICM Pass
Summary:
When alias analysis is uncertain about the aliasing between any two accesses,
it will return MayAlias. This uncertainty from alias analysis restricts LICM
from proceeding further. In cases where alias analysis is uncertain we might
use loop versioning as an alternative.
Loop Versioning will create a version of the loop with aggressive aliasing
assumptions in addition to the original with conservative (default) aliasing
assumptions. The version of the loop making aggressive aliasing assumptions
will have all the memory accesses marked as no-alias. These two versions of
loop will be preceded by a memory runtime check. This runtime check consists
of bound checks for all unique memory accessed in loop, and it ensures the
lack of memory aliasing. The result of the runtime check determines which of
the loop versions is executed: If the runtime check detects any memory
aliasing, then the original loop is executed. Otherwise, the version with
aggressive aliasing assumptions is used.
The pass is off by default and can be enabled with command line option
-enable-loop-versioning-licm.
Reviewers: hfinkel, anemet, chatur01, reames
Subscribers: MatzeB, grosser, joker.eph, sanjoy, javed.absar, sbaranga,
llvm-commits
Differential Revision: http://reviews.llvm.org/D9151
llvm-svn: 259986
2016-02-06 15:47:48 +08:00
|
|
|
static cl::opt<bool> UseLoopVersioningLICM(
|
|
|
|
"enable-loop-versioning-licm", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable the experimental Loop Versioning LICM pass"));
|
|
|
|
|
2016-07-16 02:10:49 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
DisablePreInliner("disable-preinline", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Disable pre-instrumentation inliner"));
|
|
|
|
|
|
|
|
static cl::opt<int> PreInlineThreshold(
|
|
|
|
"preinline-threshold", cl::Hidden, cl::init(75), cl::ZeroOrMore,
|
|
|
|
cl::desc("Control the amount of inlining in pre-instrumentation inliner "
|
|
|
|
"(default = 75)"));
|
|
|
|
|
2016-07-23 06:02:19 +08:00
|
|
|
static cl::opt<bool> EnableGVNHoist(
|
2017-03-07 05:10:40 +08:00
|
|
|
"enable-gvn-hoist", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable the GVN hoisting pass"));
|
2016-07-23 06:02:19 +08:00
|
|
|
|
Conditionally eliminate library calls where the result value is not used
Summary:
This pass shrink-wraps a condition to some library calls where the call
result is not used. For example:
sqrt(val);
is transformed to
if (val < 0)
sqrt(val);
Even if the result of library call is not being used, the compiler cannot
safely delete the call because the function can set errno on error
conditions.
Note in many functions, the error condition solely depends on the incoming
parameter. In this optimization, we can generate the condition can lead to
the errno to shrink-wrap the call. Since the chances of hitting the error
condition is low, the runtime call is effectively eliminated.
These partially dead calls are usually results of C++ abstraction penalty
exposed by inlining. This optimization hits 108 times in 19 C/C++ programs
in SPEC2006.
Reviewers: hfinkel, mehdi_amini, davidxl
Subscribers: modocache, mgorny, mehdi_amini, xur, llvm-commits, beanz
Differential Revision: https://reviews.llvm.org/D24414
llvm-svn: 284542
2016-10-19 05:36:27 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
DisableLibCallsShrinkWrap("disable-libcalls-shrinkwrap", cl::init(false),
|
|
|
|
cl::Hidden,
|
|
|
|
cl::desc("Disable shrink-wrap library calls"));
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
PassManagerBuilder::PassManagerBuilder() {
|
|
|
|
OptLevel = 2;
|
|
|
|
SizeLevel = 0;
|
2014-04-25 13:29:35 +08:00
|
|
|
LibraryInfo = nullptr;
|
|
|
|
Inliner = nullptr;
|
2011-08-03 05:50:27 +08:00
|
|
|
DisableUnitAtATime = false;
|
|
|
|
DisableUnrollLoops = false;
|
2013-04-15 13:39:58 +08:00
|
|
|
BBVectorize = RunBBVectorization;
|
2013-04-15 12:54:42 +08:00
|
|
|
SLPVectorize = RunSLPVectorization;
|
2012-10-30 00:36:25 +08:00
|
|
|
LoopVectorize = RunLoopVectorization;
|
2013-11-18 00:02:50 +08:00
|
|
|
RerollLoops = RunLoopRerolling;
|
2014-05-29 09:55:07 +08:00
|
|
|
LoadCombine = RunLoadCombine;
|
2016-12-27 02:26:19 +08:00
|
|
|
NewGVN = RunNewGVN;
|
2014-08-21 21:13:17 +08:00
|
|
|
DisableGVNLoadPRE = false;
|
2014-08-22 04:03:44 +08:00
|
|
|
VerifyInput = false;
|
|
|
|
VerifyOutput = false;
|
2014-09-14 05:46:00 +08:00
|
|
|
MergeFunctions = false;
|
2015-07-07 00:22:42 +08:00
|
|
|
PrepareForLTO = false;
|
2016-07-23 12:28:52 +08:00
|
|
|
EnablePGOInstrGen = RunPGOInstrGen;
|
|
|
|
PGOInstrGen = PGOOutputFile;
|
2016-01-22 02:28:59 +08:00
|
|
|
PGOInstrUse = RunPGOInstrUse;
|
2016-09-18 04:40:16 +08:00
|
|
|
PrepareForThinLTO = EnablePrepareForThinLTO;
|
2016-02-17 07:02:29 +08:00
|
|
|
PerformThinLTO = false;
|
2017-03-18 01:13:41 +08:00
|
|
|
DivergentTarget = false;
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
PassManagerBuilder::~PassManagerBuilder() {
|
|
|
|
delete LibraryInfo;
|
|
|
|
delete Inliner;
|
|
|
|
}
|
|
|
|
|
2011-08-16 21:58:41 +08:00
|
|
|
/// Set of global extensions, automatically added as part of the standard set.
|
|
|
|
static ManagedStatic<SmallVector<std::pair<PassManagerBuilder::ExtensionPointTy,
|
|
|
|
PassManagerBuilder::ExtensionFn>, 8> > GlobalExtensions;
|
|
|
|
|
|
|
|
void PassManagerBuilder::addGlobalExtension(
|
|
|
|
PassManagerBuilder::ExtensionPointTy Ty,
|
|
|
|
PassManagerBuilder::ExtensionFn Fn) {
|
2016-03-31 04:39:29 +08:00
|
|
|
GlobalExtensions->push_back(std::make_pair(Ty, std::move(Fn)));
|
2011-08-16 21:58:41 +08:00
|
|
|
}
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
void PassManagerBuilder::addExtension(ExtensionPointTy Ty, ExtensionFn Fn) {
|
2016-03-31 04:39:29 +08:00
|
|
|
Extensions.push_back(std::make_pair(Ty, std::move(Fn)));
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void PassManagerBuilder::addExtensionsToPM(ExtensionPointTy ETy,
|
2015-02-13 18:01:29 +08:00
|
|
|
legacy::PassManagerBase &PM) const {
|
2011-08-16 21:58:41 +08:00
|
|
|
for (unsigned i = 0, e = GlobalExtensions->size(); i != e; ++i)
|
|
|
|
if ((*GlobalExtensions)[i].first == ETy)
|
|
|
|
(*GlobalExtensions)[i].second(*this, PM);
|
2011-08-03 05:50:27 +08:00
|
|
|
for (unsigned i = 0, e = Extensions.size(); i != e; ++i)
|
|
|
|
if (Extensions[i].first == ETy)
|
|
|
|
Extensions[i].second(*this, PM);
|
|
|
|
}
|
|
|
|
|
2015-02-13 18:01:29 +08:00
|
|
|
void PassManagerBuilder::addInitialAliasAnalysisPasses(
|
|
|
|
legacy::PassManagerBase &PM) const {
|
2016-07-06 08:26:41 +08:00
|
|
|
switch (UseCFLAA) {
|
|
|
|
case CFLAAType::Steensgaard:
|
|
|
|
PM.add(createCFLSteensAAWrapperPass());
|
|
|
|
break;
|
|
|
|
case CFLAAType::Andersen:
|
|
|
|
PM.add(createCFLAndersAAWrapperPass());
|
|
|
|
break;
|
|
|
|
case CFLAAType::Both:
|
|
|
|
PM.add(createCFLSteensAAWrapperPass());
|
|
|
|
PM.add(createCFLAndersAAWrapperPass());
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
|
|
|
|
// BasicAliasAnalysis wins if they disagree. This is intended to help
|
|
|
|
// support "obvious" type-punning idioms.
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
PM.add(createTypeBasedAAWrapperPass());
|
|
|
|
PM.add(createScopedNoAliasAAWrapperPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
|
|
|
|
2016-03-10 02:47:11 +08:00
|
|
|
void PassManagerBuilder::addInstructionCombiningPass(
|
|
|
|
legacy::PassManagerBase &PM) const {
|
|
|
|
bool ExpensiveCombines = OptLevel > 2;
|
|
|
|
PM.add(createInstructionCombiningPass(ExpensiveCombines));
|
|
|
|
}
|
|
|
|
|
2015-02-13 18:01:29 +08:00
|
|
|
void PassManagerBuilder::populateFunctionPassManager(
|
|
|
|
legacy::FunctionPassManager &FPM) {
|
2011-08-03 05:50:27 +08:00
|
|
|
addExtensionsToPM(EP_EarlyAsPossible, FPM);
|
|
|
|
|
|
|
|
// Add LibraryInfo if we have some.
|
2015-01-15 18:41:28 +08:00
|
|
|
if (LibraryInfo)
|
|
|
|
FPM.add(new TargetLibraryInfoWrapperPass(*LibraryInfo));
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
if (OptLevel == 0) return;
|
|
|
|
|
|
|
|
addInitialAliasAnalysisPasses(FPM);
|
|
|
|
|
|
|
|
FPM.add(createCFGSimplificationPass());
|
2016-06-15 08:19:09 +08:00
|
|
|
FPM.add(createSROAPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
FPM.add(createEarlyCSEPass());
|
2017-03-03 22:27:53 +08:00
|
|
|
if(EnableGVNHoist)
|
|
|
|
FPM.add(createGVNHoistPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
FPM.add(createLowerExpectIntrinsicPass());
|
|
|
|
}
|
|
|
|
|
2016-01-22 02:28:59 +08:00
|
|
|
// Do PGO instrumentation generation or use pass as the option specified.
|
|
|
|
void PassManagerBuilder::addPGOInstrPasses(legacy::PassManagerBase &MPM) {
|
2016-07-23 12:28:52 +08:00
|
|
|
if (!EnablePGOInstrGen && PGOInstrUse.empty())
|
2016-07-16 02:10:49 +08:00
|
|
|
return;
|
|
|
|
// Perform the preinline and cleanup passes for O1 and above.
|
|
|
|
// And avoid doing them if optimizing for size.
|
|
|
|
if (OptLevel > 0 && SizeLevel == 0 && !DisablePreInliner) {
|
2016-08-12 02:24:08 +08:00
|
|
|
// Create preinline pass. We construct an InlineParams object and specify
|
|
|
|
// the threshold here to avoid the command line options of the regular
|
|
|
|
// inliner to influence pre-inlining. The only fields of InlineParams we
|
|
|
|
// care about are DefaultThreshold and HintThreshold.
|
|
|
|
InlineParams IP;
|
|
|
|
IP.DefaultThreshold = PreInlineThreshold;
|
|
|
|
// FIXME: The hint threshold has the same value used by the regular inliner.
|
|
|
|
// This should probably be lowered after performance testing.
|
|
|
|
IP.HintThreshold = 325;
|
|
|
|
|
|
|
|
MPM.add(createFunctionInliningPass(IP));
|
2016-07-16 02:10:49 +08:00
|
|
|
MPM.add(createSROAPass());
|
|
|
|
MPM.add(createEarlyCSEPass()); // Catch trivial redundancies
|
|
|
|
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
|
|
|
|
MPM.add(createInstructionCombiningPass()); // Combine silly seq's
|
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
|
|
|
}
|
2016-07-23 12:28:52 +08:00
|
|
|
if (EnablePGOInstrGen) {
|
2016-05-06 13:49:19 +08:00
|
|
|
MPM.add(createPGOInstrumentationGenLegacyPass());
|
2016-01-22 02:28:59 +08:00
|
|
|
// Add the profile lowering pass.
|
|
|
|
InstrProfOptions Options;
|
2016-07-23 12:28:52 +08:00
|
|
|
if (!PGOInstrGen.empty())
|
|
|
|
Options.InstrProfileOutput = PGOInstrGen;
|
2016-04-19 01:47:38 +08:00
|
|
|
MPM.add(createInstrProfilingLegacyPass(Options));
|
2016-01-22 02:28:59 +08:00
|
|
|
}
|
|
|
|
if (!PGOInstrUse.empty())
|
2016-05-07 13:39:12 +08:00
|
|
|
MPM.add(createPGOInstrumentationUseLegacyPass(PGOInstrUse));
|
2016-01-22 02:28:59 +08:00
|
|
|
}
|
2016-02-17 06:54:27 +08:00
|
|
|
void PassManagerBuilder::addFunctionSimplificationPasses(
|
2016-02-12 06:09:11 +08:00
|
|
|
legacy::PassManagerBase &MPM) {
|
2011-08-03 05:50:27 +08:00
|
|
|
// Start of function pass.
|
|
|
|
// Break up aggregate allocas, using SSAUpdater.
|
2016-06-15 08:19:09 +08:00
|
|
|
MPM.add(createSROAPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createEarlyCSEPass()); // Catch trivial redundancies
|
2016-04-15 08:32:12 +08:00
|
|
|
// Speculative execution if the target has divergent branches; otherwise nop.
|
|
|
|
MPM.add(createSpeculativeExecutionIfHasBranchDivergencePass());
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createJumpThreadingPass()); // Thread jumps.
|
|
|
|
MPM.add(createCorrelatedValuePropagationPass()); // Propagate conditionals
|
|
|
|
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
|
2016-03-10 02:47:11 +08:00
|
|
|
// Combine silly seq's
|
|
|
|
addInstructionCombiningPass(MPM);
|
Conditionally eliminate library calls where the result value is not used
Summary:
This pass shrink-wraps a condition to some library calls where the call
result is not used. For example:
sqrt(val);
is transformed to
if (val < 0)
sqrt(val);
Even if the result of library call is not being used, the compiler cannot
safely delete the call because the function can set errno on error
conditions.
Note in many functions, the error condition solely depends on the incoming
parameter. In this optimization, we can generate the condition can lead to
the errno to shrink-wrap the call. Since the chances of hitting the error
condition is low, the runtime call is effectively eliminated.
These partially dead calls are usually results of C++ abstraction penalty
exposed by inlining. This optimization hits 108 times in 19 C/C++ programs
in SPEC2006.
Reviewers: hfinkel, mehdi_amini, davidxl
Subscribers: modocache, mgorny, mehdi_amini, xur, llvm-commits, beanz
Differential Revision: https://reviews.llvm.org/D24414
llvm-svn: 284542
2016-10-19 05:36:27 +08:00
|
|
|
if (SizeLevel == 0 && !DisableLibCallsShrinkWrap)
|
|
|
|
MPM.add(createLibCallsShrinkWrapPass());
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2015-06-10 03:07:19 +08:00
|
|
|
MPM.add(createTailCallEliminationPass()); // Eliminate tail calls
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
|
|
|
|
MPM.add(createReassociatePass()); // Reassociate expressions
|
2014-11-22 03:53:24 +08:00
|
|
|
// Rotate Loop - disable header duplication at -Oz
|
|
|
|
MPM.add(createLoopRotatePass(SizeLevel == 2 ? 0 : -1));
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createLICMPass()); // Hoist loop invariants
|
2017-03-18 01:13:41 +08:00
|
|
|
MPM.add(createLoopUnswitchPass(SizeLevel || OptLevel < 3, DivergentTarget));
|
2015-09-24 11:50:17 +08:00
|
|
|
MPM.add(createCFGSimplificationPass());
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createIndVarSimplifyPass()); // Canonicalize indvars
|
|
|
|
MPM.add(createLoopIdiomPass()); // Recognize idioms like memset.
|
2017-01-26 00:12:25 +08:00
|
|
|
addExtensionsToPM(EP_LateLoopOptimizations, MPM);
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createLoopDeletionPass()); // Delete dead loops
|
2017-01-26 00:12:25 +08:00
|
|
|
|
2015-04-23 12:51:44 +08:00
|
|
|
if (EnableLoopInterchange) {
|
2015-03-06 18:11:25 +08:00
|
|
|
MPM.add(createLoopInterchangePass()); // Interchange loops
|
2015-04-23 12:51:44 +08:00
|
|
|
MPM.add(createCFGSimplificationPass());
|
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
if (!DisableUnrollLoops)
|
Increases full-unroll threshold.
Summary:
The default threshold for fully unroll is too conservative. This patch doubles the full-unroll threshold
This change will affect the following speccpu2006 benchmarks (performance numbers were collected from Intel Sandybridge):
Performance:
403 0.11%
433 0.51%
445 0.48%
447 3.50%
453 1.49%
464 0.75%
Code size:
403 0.56%
433 0.96%
445 2.16%
447 2.96%
453 0.94%
464 8.02%
The compiler time overhead is similar with code size.
Reviewers: davidxl, mkuper, mzolotukhin, hfinkel, chandlerc
Reviewed By: hfinkel, chandlerc
Subscribers: mehdi_amini, zzheng, efriedma, haicheng, hfinkel, llvm-commits
Differential Revision: https://reviews.llvm.org/D28368
llvm-svn: 295538
2017-02-18 11:46:51 +08:00
|
|
|
MPM.add(createSimpleLoopUnrollPass(OptLevel)); // Unroll small loops
|
2011-08-03 05:50:27 +08:00
|
|
|
addExtensionsToPM(EP_LoopOptimizerEnd, MPM);
|
|
|
|
|
2014-07-19 03:13:09 +08:00
|
|
|
if (OptLevel > 1) {
|
2017-01-29 07:45:37 +08:00
|
|
|
MPM.add(createMergedLoadStoreMotionPass()); // Merge ld/st in diamonds
|
2016-12-27 02:26:19 +08:00
|
|
|
MPM.add(NewGVN ? createNewGVNPass()
|
|
|
|
: createGVNPass(DisableGVNLoadPRE)); // Remove redundancies
|
2014-07-19 03:13:09 +08:00
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createMemCpyOptPass()); // Remove memcpy / form memset
|
|
|
|
MPM.add(createSCCPPass()); // Constant prop with SCCP
|
|
|
|
|
[BDCE] Add a bit-tracking DCE pass
BDCE is a bit-tracking dead code elimination pass. It is based on ADCE (the
"aggressive DCE" pass), with the added capability to track dead bits of integer
valued instructions and remove those instructions when all of the bits are
dead.
Currently, it does not actually do this all-bits-dead removal, but rather
replaces the instruction's uses with a constant zero, and lets instcombine (and
the later run of ADCE) do the rest. Because we essentially get a run of ADCE
"for free" while tracking the dead bits, we also do what ADCE does and removes
actually-dead instructions as well (this includes instructions newly trivially
dead because all bits were dead, but not all such instructions can be removed).
The motivation for this is a case like:
int __attribute__((const)) foo(int i);
int bar(int x) {
x |= (4 & foo(5));
x |= (8 & foo(3));
x |= (16 & foo(2));
x |= (32 & foo(1));
x |= (64 & foo(0));
x |= (128& foo(4));
return x >> 4;
}
As it turns out, if you order the bit-field insertions so that all of the dead
ones come last, then instcombine will remove them. However, if you pick some
other order (such as the one above), the fact that some of the calls to foo()
are useless is not locally obvious, and we don't remove them (without this
pass).
I did a quick compile-time overhead check using sqlite from the test suite
(Release+Asserts). BDCE took ~0.4% of the compilation time (making it about
twice as expensive as ADCE).
I've not looked at why yet, but we eliminate instructions due to having
all-dead bits in:
External/SPEC/CFP2006/447.dealII/447.dealII
External/SPEC/CINT2006/400.perlbench/400.perlbench
External/SPEC/CINT2006/403.gcc/403.gcc
MultiSource/Applications/ClamAV/clamscan
MultiSource/Benchmarks/7zip/7zip-benchmark
llvm-svn: 229462
2015-02-17 09:36:59 +08:00
|
|
|
// Delete dead bit computations (instcombine runs after to fold away the dead
|
|
|
|
// computations, and then ADCE will run later to exploit any new DCE
|
|
|
|
// opportunities that creates).
|
|
|
|
MPM.add(createBitTrackingDCEPass()); // Delete dead bit computations
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Run instcombine after redundancy elimination to exploit opportunities
|
|
|
|
// opened up by them.
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createJumpThreadingPass()); // Thread jumps
|
|
|
|
MPM.add(createCorrelatedValuePropagationPass());
|
|
|
|
MPM.add(createDeadStoreEliminationPass()); // Delete dead stores
|
2015-02-17 02:59:54 +08:00
|
|
|
MPM.add(createLICMPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
addExtensionsToPM(EP_ScalarOptimizerLate, MPM);
|
|
|
|
|
2013-11-18 00:02:50 +08:00
|
|
|
if (RerollLoops)
|
2013-11-17 07:59:05 +08:00
|
|
|
MPM.add(createLoopRerollPass());
|
2014-08-06 20:56:19 +08:00
|
|
|
if (!RunSLPAfterLoopVectorization) {
|
|
|
|
if (SLPVectorize)
|
|
|
|
MPM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains.
|
|
|
|
|
|
|
|
if (BBVectorize) {
|
|
|
|
MPM.add(createBBVectorizePass());
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2014-08-06 20:56:19 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
|
|
|
if (OptLevel > 1 && UseGVNAfterVectorization)
|
2016-12-27 02:26:19 +08:00
|
|
|
MPM.add(NewGVN
|
|
|
|
? createNewGVNPass()
|
|
|
|
: createGVNPass(DisableGVNLoadPRE)); // Remove redundancies
|
2014-08-06 20:56:19 +08:00
|
|
|
else
|
|
|
|
MPM.add(createEarlyCSEPass()); // Catch trivial redundancies
|
|
|
|
|
|
|
|
// BBVectorize may have significantly shortened a loop body; unroll again.
|
|
|
|
if (!DisableUnrollLoops)
|
Increases full-unroll threshold.
Summary:
The default threshold for fully unroll is too conservative. This patch doubles the full-unroll threshold
This change will affect the following speccpu2006 benchmarks (performance numbers were collected from Intel Sandybridge):
Performance:
403 0.11%
433 0.51%
445 0.48%
447 3.50%
453 1.49%
464 0.75%
Code size:
403 0.56%
433 0.96%
445 2.16%
447 2.96%
453 0.94%
464 8.02%
The compiler time overhead is similar with code size.
Reviewers: davidxl, mkuper, mzolotukhin, hfinkel, chandlerc
Reviewed By: hfinkel, chandlerc
Subscribers: mehdi_amini, zzheng, efriedma, haicheng, hfinkel, llvm-commits
Differential Revision: https://reviews.llvm.org/D28368
llvm-svn: 295538
2017-02-18 11:46:51 +08:00
|
|
|
MPM.add(createLoopUnrollPass(OptLevel));
|
2014-08-06 20:56:19 +08:00
|
|
|
}
|
2012-02-01 11:51:43 +08:00
|
|
|
}
|
|
|
|
|
2014-05-29 09:55:07 +08:00
|
|
|
if (LoadCombine)
|
|
|
|
MPM.add(createLoadCombinePass());
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createAggressiveDCEPass()); // Delete dead instructions
|
2013-08-06 10:43:45 +08:00
|
|
|
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
|
2016-03-10 02:47:11 +08:00
|
|
|
// Clean up after everything.
|
|
|
|
addInstructionCombiningPass(MPM);
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2016-02-17 06:54:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void PassManagerBuilder::populateModulePassManager(
|
|
|
|
legacy::PassManagerBase &MPM) {
|
2016-12-15 05:40:47 +08:00
|
|
|
if (!PGOSampleUse.empty()) {
|
|
|
|
MPM.add(createPruneEHPass());
|
|
|
|
MPM.add(createSampleProfileLoaderPass(PGOSampleUse));
|
|
|
|
}
|
|
|
|
|
2016-02-17 06:54:27 +08:00
|
|
|
// Allow forcing function attributes as a debugging and tuning aid.
|
|
|
|
MPM.add(createForceFunctionAttrsLegacyPass());
|
|
|
|
|
|
|
|
// If all optimizations are disabled, just run the always-inline pass and,
|
|
|
|
// if enabled, the function merging pass.
|
|
|
|
if (OptLevel == 0) {
|
|
|
|
addPGOInstrPasses(MPM);
|
|
|
|
if (Inliner) {
|
|
|
|
MPM.add(Inliner);
|
|
|
|
Inliner = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: The BarrierNoopPass is a HACK! The inliner pass above implicitly
|
|
|
|
// creates a CGSCC pass manager, but we don't want to add extensions into
|
|
|
|
// that pass manager. To prevent this we insert a no-op module pass to reset
|
|
|
|
// the pass manager to get the same behavior as EP_OptimizerLast in non-O0
|
|
|
|
// builds. The function merging pass is
|
|
|
|
if (MergeFunctions)
|
|
|
|
MPM.add(createMergeFunctionsPass());
|
|
|
|
else if (!GlobalExtensions->empty() || !Extensions.empty())
|
|
|
|
MPM.add(createBarrierNoopPass());
|
|
|
|
|
2016-09-18 04:40:16 +08:00
|
|
|
if (PrepareForThinLTO)
|
|
|
|
// Rename anon globals to be able to export them in the summary.
|
|
|
|
MPM.add(createNameAnonGlobalPass());
|
|
|
|
|
2016-02-17 06:54:27 +08:00
|
|
|
addExtensionsToPM(EP_EnabledOnOptLevel0, MPM);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add LibraryInfo if we have some.
|
|
|
|
if (LibraryInfo)
|
|
|
|
MPM.add(new TargetLibraryInfoWrapperPass(*LibraryInfo));
|
|
|
|
|
|
|
|
addInitialAliasAnalysisPasses(MPM);
|
|
|
|
|
2016-08-30 06:46:56 +08:00
|
|
|
// For ThinLTO there are two passes of indirect call promotion. The
|
|
|
|
// first is during the compile phase when PerformThinLTO=false and
|
|
|
|
// intra-module indirect call targets are promoted. The second is during
|
|
|
|
// the ThinLTO backend when PerformThinLTO=true, when we promote imported
|
|
|
|
// inter-module indirect calls. For that we perform indirect call promotion
|
|
|
|
// earlier in the pass pipeline, here before globalopt. Otherwise imported
|
|
|
|
// available_externally functions look unreferenced and are removed.
|
|
|
|
if (PerformThinLTO)
|
2017-02-24 06:15:18 +08:00
|
|
|
MPM.add(createPGOIndirectCallPromotionLegacyPass(/*InLTO = */ true,
|
|
|
|
!PGOSampleUse.empty()));
|
2016-08-30 06:46:56 +08:00
|
|
|
|
2017-03-24 05:20:05 +08:00
|
|
|
// For SamplePGO in ThinLTO compile phase, we do not want to unroll loops
|
|
|
|
// as it will change the CFG too much to make the 2nd profile annotation
|
|
|
|
// in backend more difficult.
|
|
|
|
bool PrepareForThinLTOUsingPGOSampleProfile =
|
|
|
|
PrepareForThinLTO && !PGOSampleUse.empty();
|
|
|
|
if (PrepareForThinLTOUsingPGOSampleProfile)
|
|
|
|
DisableUnrollLoops = true;
|
|
|
|
|
2016-02-17 06:54:27 +08:00
|
|
|
if (!DisableUnitAtATime) {
|
|
|
|
// Infer attributes about declarations if possible.
|
|
|
|
MPM.add(createInferFunctionAttrsLegacyPass());
|
|
|
|
|
|
|
|
addExtensionsToPM(EP_ModuleOptimizerEarly, MPM);
|
|
|
|
|
|
|
|
MPM.add(createIPSCCPPass()); // IP SCCP
|
|
|
|
MPM.add(createGlobalOptimizerPass()); // Optimize out global vars
|
|
|
|
// Promote any localized global vars.
|
|
|
|
MPM.add(createPromoteMemoryToRegisterPass());
|
|
|
|
|
|
|
|
MPM.add(createDeadArgEliminationPass()); // Dead argument elimination
|
|
|
|
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM); // Clean up after IPCP & DAE
|
2016-02-17 06:54:27 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
|
|
|
MPM.add(createCFGSimplificationPass()); // Clean up after IPCP & DAE
|
|
|
|
}
|
|
|
|
|
2017-03-24 05:20:05 +08:00
|
|
|
// For SamplePGO in ThinLTO compile phase, we do not want to do indirect
|
|
|
|
// call promotion as it will change the CFG too much to make the 2nd
|
|
|
|
// profile annotation in backend more difficult.
|
|
|
|
if (!PerformThinLTO && !PrepareForThinLTOUsingPGOSampleProfile) {
|
2016-02-17 07:02:29 +08:00
|
|
|
/// PGO instrumentation is added during the compile phase for ThinLTO, do
|
|
|
|
/// not run it a second time
|
|
|
|
addPGOInstrPasses(MPM);
|
2016-08-30 06:46:56 +08:00
|
|
|
// Indirect call promotion that promotes intra-module targets only.
|
|
|
|
// For ThinLTO this is done earlier due to interactions with globalopt
|
|
|
|
// for imported functions.
|
2017-02-24 06:15:18 +08:00
|
|
|
MPM.add(
|
|
|
|
createPGOIndirectCallPromotionLegacyPass(false, !PGOSampleUse.empty()));
|
2016-04-28 07:20:27 +08:00
|
|
|
}
|
2016-02-17 06:54:27 +08:00
|
|
|
|
|
|
|
if (EnableNonLTOGlobalsModRef)
|
|
|
|
// We add a module alias analysis pass here. In part due to bugs in the
|
|
|
|
// analysis infrastructure this "works" in that the analysis stays alive
|
|
|
|
// for the entire SCC pass run below.
|
|
|
|
MPM.add(createGlobalsAAWrapperPass());
|
|
|
|
|
|
|
|
// Start of CallGraph SCC passes.
|
|
|
|
if (!DisableUnitAtATime)
|
|
|
|
MPM.add(createPruneEHPass()); // Remove dead EH info
|
|
|
|
if (Inliner) {
|
|
|
|
MPM.add(Inliner);
|
|
|
|
Inliner = nullptr;
|
|
|
|
}
|
|
|
|
if (!DisableUnitAtATime)
|
2016-02-18 19:03:11 +08:00
|
|
|
MPM.add(createPostOrderFunctionAttrsLegacyPass());
|
2016-02-17 06:54:27 +08:00
|
|
|
if (OptLevel > 2)
|
|
|
|
MPM.add(createArgumentPromotionPass()); // Scalarize uninlined fn args
|
|
|
|
|
2016-07-28 11:28:43 +08:00
|
|
|
addExtensionsToPM(EP_CGSCCOptimizerLate, MPM);
|
2016-02-17 06:54:27 +08:00
|
|
|
addFunctionSimplificationPasses(MPM);
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2016-05-03 00:53:16 +08:00
|
|
|
// FIXME: This is a HACK! The inliner pass above implicitly creates a CGSCC
|
|
|
|
// pass manager that we are specifically trying to avoid. To prevent this
|
|
|
|
// we must insert a no-op module pass to reset the pass manager.
|
|
|
|
MPM.add(createBarrierNoopPass());
|
|
|
|
|
2016-05-03 23:46:00 +08:00
|
|
|
if (!DisableUnitAtATime && OptLevel > 1 && !PrepareForLTO &&
|
|
|
|
!PrepareForThinLTO)
|
|
|
|
// Remove avail extern fns and globals definitions if we aren't
|
|
|
|
// compiling an object file for later LTO. For LTO we want to preserve
|
|
|
|
// these so they are eligible for inlining at link-time. Note if they
|
|
|
|
// are unreferenced they will be removed by GlobalDCE later, so
|
|
|
|
// this only impacts referenced available externally globals.
|
|
|
|
// Eventually they will be suppressed during codegen, but eliminating
|
|
|
|
// here enables more opportunity for GlobalDCE as it may make
|
|
|
|
// globals referenced by available external functions dead
|
|
|
|
// and saves running remaining passes on the eliminated functions.
|
|
|
|
MPM.add(createEliminateAvailableExternallyPass());
|
|
|
|
|
2016-05-03 00:53:16 +08:00
|
|
|
if (!DisableUnitAtATime)
|
|
|
|
MPM.add(createReversePostOrderFunctionAttrsPass());
|
|
|
|
|
2016-02-17 07:02:29 +08:00
|
|
|
// If we are planning to perform ThinLTO later, let's not bloat the code with
|
|
|
|
// unrolling/vectorization/... now. We'll first run the inliner + CGSCC passes
|
|
|
|
// during ThinLTO and perform the rest of the optimizations afterward.
|
2016-04-25 16:47:37 +08:00
|
|
|
if (PrepareForThinLTO) {
|
2016-04-25 16:47:49 +08:00
|
|
|
// Reduce the size of the IR as much as possible.
|
|
|
|
MPM.add(createGlobalOptimizerPass());
|
2016-09-17 00:56:30 +08:00
|
|
|
// Rename anon globals to be able to export them in the summary.
|
|
|
|
MPM.add(createNameAnonGlobalPass());
|
2016-02-17 07:02:29 +08:00
|
|
|
return;
|
2016-04-25 16:47:37 +08:00
|
|
|
}
|
2016-02-17 07:02:29 +08:00
|
|
|
|
2016-05-07 02:17:03 +08:00
|
|
|
if (PerformThinLTO)
|
|
|
|
// Optimize globals now when performing ThinLTO, this enables more
|
|
|
|
// optimizations later.
|
|
|
|
MPM.add(createGlobalOptimizerPass());
|
|
|
|
|
2016-02-11 17:23:53 +08:00
|
|
|
// Scheduling LoopVersioningLICM when inlining is over, because after that
|
New Loop Versioning LICM Pass
Summary:
When alias analysis is uncertain about the aliasing between any two accesses,
it will return MayAlias. This uncertainty from alias analysis restricts LICM
from proceeding further. In cases where alias analysis is uncertain we might
use loop versioning as an alternative.
Loop Versioning will create a version of the loop with aggressive aliasing
assumptions in addition to the original with conservative (default) aliasing
assumptions. The version of the loop making aggressive aliasing assumptions
will have all the memory accesses marked as no-alias. These two versions of
loop will be preceded by a memory runtime check. This runtime check consists
of bound checks for all unique memory accessed in loop, and it ensures the
lack of memory aliasing. The result of the runtime check determines which of
the loop versions is executed: If the runtime check detects any memory
aliasing, then the original loop is executed. Otherwise, the version with
aggressive aliasing assumptions is used.
The pass is off by default and can be enabled with command line option
-enable-loop-versioning-licm.
Reviewers: hfinkel, anemet, chatur01, reames
Subscribers: MatzeB, grosser, joker.eph, sanjoy, javed.absar, sbaranga,
llvm-commits
Differential Revision: http://reviews.llvm.org/D9151
llvm-svn: 259986
2016-02-06 15:47:48 +08:00
|
|
|
// we may see more accurate aliasing. Reason to run this late is that too
|
|
|
|
// early versioning may prevent further inlining due to increase of code
|
2016-05-07 02:17:03 +08:00
|
|
|
// size. By placing it just after inlining other optimizations which runs
|
|
|
|
// later might get benefit of no-alias assumption in clone loop.
|
New Loop Versioning LICM Pass
Summary:
When alias analysis is uncertain about the aliasing between any two accesses,
it will return MayAlias. This uncertainty from alias analysis restricts LICM
from proceeding further. In cases where alias analysis is uncertain we might
use loop versioning as an alternative.
Loop Versioning will create a version of the loop with aggressive aliasing
assumptions in addition to the original with conservative (default) aliasing
assumptions. The version of the loop making aggressive aliasing assumptions
will have all the memory accesses marked as no-alias. These two versions of
loop will be preceded by a memory runtime check. This runtime check consists
of bound checks for all unique memory accessed in loop, and it ensures the
lack of memory aliasing. The result of the runtime check determines which of
the loop versions is executed: If the runtime check detects any memory
aliasing, then the original loop is executed. Otherwise, the version with
aggressive aliasing assumptions is used.
The pass is off by default and can be enabled with command line option
-enable-loop-versioning-licm.
Reviewers: hfinkel, anemet, chatur01, reames
Subscribers: MatzeB, grosser, joker.eph, sanjoy, javed.absar, sbaranga,
llvm-commits
Differential Revision: http://reviews.llvm.org/D9151
llvm-svn: 259986
2016-02-06 15:47:48 +08:00
|
|
|
if (UseLoopVersioningLICM) {
|
|
|
|
MPM.add(createLoopVersioningLICMPass()); // Do LoopVersioningLICM
|
|
|
|
MPM.add(createLICMPass()); // Hoist loop invariants
|
|
|
|
}
|
|
|
|
|
2015-07-23 17:34:01 +08:00
|
|
|
if (EnableNonLTOGlobalsModRef)
|
|
|
|
// We add a fresh GlobalsModRef run at this point. This is particularly
|
|
|
|
// useful as the above will have inlined, DCE'ed, and function-attr
|
|
|
|
// propagated everything. We should at this point have a reasonably minimal
|
|
|
|
// and richly annotated call graph. By computing aliasing and mod/ref
|
|
|
|
// information for all local globals here, the late loop passes and notably
|
|
|
|
// the vectorizer will be able to use them to help recognize vectorizable
|
|
|
|
// memory operations.
|
|
|
|
//
|
|
|
|
// Note that this relies on a bug in the pass manager which preserves
|
|
|
|
// a module analysis into a function pass pipeline (and throughout it) so
|
|
|
|
// long as the first function pass doesn't invalidate the module analysis.
|
|
|
|
// Thus both Float2Int and LoopRotate have to preserve AliasAnalysis for
|
|
|
|
// this to work. Fortunately, it is trivial to preserve AliasAnalysis
|
|
|
|
// (doing nothing preserves it as it is required to be conservatively
|
|
|
|
// correct in the face of IR changes).
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
MPM.add(createGlobalsAAWrapperPass());
|
2015-07-23 17:34:01 +08:00
|
|
|
|
2017-01-03 01:49:18 +08:00
|
|
|
MPM.add(createFloat2IntPass());
|
2015-03-27 18:36:57 +08:00
|
|
|
|
2015-07-16 16:20:37 +08:00
|
|
|
addExtensionsToPM(EP_VectorizerStart, MPM);
|
|
|
|
|
2014-10-14 08:31:29 +08:00
|
|
|
// Re-rotate loops in all our loop nests. These may have fallout out of
|
|
|
|
// rotated form due to GVN or other transformations, and the vectorizer relies
|
2015-07-10 18:37:09 +08:00
|
|
|
// on the rotated form. Disable header duplication at -Oz.
|
|
|
|
MPM.add(createLoopRotatePass(SizeLevel == 2 ? 0 : -1));
|
2014-10-14 08:31:29 +08:00
|
|
|
|
2015-05-14 20:05:18 +08:00
|
|
|
// Distribute loops to allow partial vectorization. I.e. isolate dependences
|
2016-04-27 13:28:18 +08:00
|
|
|
// into separate loop that would otherwise inhibit vectorization. This is
|
|
|
|
// currently only performed for loops marked with the metadata
|
|
|
|
// llvm.loop.distribute=true or when -enable-loop-distribute is specified.
|
2016-12-21 12:07:40 +08:00
|
|
|
MPM.add(createLoopDistributePass());
|
2015-05-14 20:05:18 +08:00
|
|
|
|
2013-12-06 05:20:02 +08:00
|
|
|
MPM.add(createLoopVectorizePass(DisableUnrollLoops, LoopVectorize));
|
LLE 6/6: Add LoopLoadElimination pass
Summary:
The goal of this pass is to perform store-to-load forwarding across the
backedge of a loop. E.g.:
for (i)
A[i + 1] = A[i] + B[i]
=>
T = A[0]
for (i)
T = T + B[i]
A[i + 1] = T
The pass relies on loop dependence analysis via LoopAccessAnalisys to
find opportunities of loop-carried dependences with a distance of one
between a store and a load. Since it's using LoopAccessAnalysis, it was
easy to also add support for versioning away may-aliasing intervening
stores that would otherwise prevent this transformation.
This optimization is also performed by Load-PRE in GVN without the
option of multi-versioning. As was discussed with Daniel Berlin in
http://reviews.llvm.org/D9548, this is inferior to a more loop-aware
solution applied here. Hopefully, we will be able to remove some
complexity from GVN/MemorySSA as a consequence.
In the long run, we may want to extend this pass (or create a new one if
there is little overlap) to also eliminate loop-indepedent redundant
loads and store that *require* versioning due to may-aliasing
intervening stores/loads. I have some motivating cases for store
elimination. My plan right now is to wait for MemorySSA to come online
first rather than using memdep for this.
The main motiviation for this pass is the 456.hmmer loop in SPECint2006
where after distributing the original loop and vectorizing the top part,
we are left with the critical path exposed in the bottom loop. Being
able to promote the memory dependence into a register depedence (even
though the HW does perform store-to-load fowarding as well) results in a
major gain (~20%). This gain also transfers over to x86: it's
around 8-10%.
Right now the pass is off by default and can be enabled
with -enable-loop-load-elim. On the LNT testsuite, there are two
performance changes (negative number -> improvement):
1. -28% in Polybench/linear-algebra/solvers/dynprog: the length of the
critical paths is reduced
2. +2% in Polybench/stencils/adi: Unfortunately, I couldn't reproduce this
outside of LNT
The pass is scheduled after the loop vectorizer (which is after loop
distribution). The rational is to try to reuse LAA state, rather than
recomputing it. The order between LV and LLE is not critical because
normally LV does not touch scalar st->ld forwarding cases where
vectorizing would inhibit the CPU's st->ld forwarding to kick in.
LoopLoadElimination requires LAA to provide the full set of dependences
(including forward dependences). LAA is known to omit loop-independent
dependences in certain situations. The big comment before
removeDependencesFromMultipleStores explains why this should not occur
for the cases that we're interested in.
Reviewers: dberlin, hfinkel
Subscribers: junbuml, dberlin, mssimpso, rengolin, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13259
llvm-svn: 252017
2015-11-04 07:50:08 +08:00
|
|
|
|
|
|
|
// Eliminate loads by forwarding stores from the previous iteration to loads
|
|
|
|
// of the current iteration.
|
|
|
|
if (EnableLoopLoadElim)
|
|
|
|
MPM.add(createLoopLoadEliminationPass());
|
|
|
|
|
2013-12-06 05:20:02 +08:00
|
|
|
// FIXME: Because of #pragma vectorize enable, the passes below are always
|
|
|
|
// inserted in the pipeline, even when the vectorizer doesn't run (ex. when
|
|
|
|
// on -O1 and no #pragma is found). Would be good to have these two passes
|
|
|
|
// as function calls, so that we can only pass them when the vectorizer
|
|
|
|
// changed the code.
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2014-10-14 08:31:29 +08:00
|
|
|
if (OptLevel > 1 && ExtraVectorizerPasses) {
|
|
|
|
// At higher optimization levels, try to clean up any runtime overlap and
|
|
|
|
// alignment checks inserted by the vectorizer. We want to track correllated
|
|
|
|
// runtime checks for two inner loops in the same outer loop, fold any
|
|
|
|
// common computations, hoist loop-invariant aspects out of any outer loop,
|
|
|
|
// and unswitch the runtime checks if possible. Once hoisted, we may have
|
|
|
|
// dead (or speculatable) control flows or more combining opportunities.
|
|
|
|
MPM.add(createEarlyCSEPass());
|
|
|
|
MPM.add(createCorrelatedValuePropagationPass());
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2014-10-14 08:31:29 +08:00
|
|
|
MPM.add(createLICMPass());
|
2017-03-18 01:13:41 +08:00
|
|
|
MPM.add(createLoopUnswitchPass(SizeLevel || OptLevel < 3, DivergentTarget));
|
2014-10-14 08:31:29 +08:00
|
|
|
MPM.add(createCFGSimplificationPass());
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2014-10-14 08:31:29 +08:00
|
|
|
}
|
2014-08-06 20:56:19 +08:00
|
|
|
|
|
|
|
if (RunSLPAfterLoopVectorization) {
|
2014-10-14 08:31:29 +08:00
|
|
|
if (SLPVectorize) {
|
2014-08-06 20:56:19 +08:00
|
|
|
MPM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains.
|
2014-10-14 08:31:29 +08:00
|
|
|
if (OptLevel > 1 && ExtraVectorizerPasses) {
|
|
|
|
MPM.add(createEarlyCSEPass());
|
|
|
|
}
|
|
|
|
}
|
2014-08-06 20:56:19 +08:00
|
|
|
|
|
|
|
if (BBVectorize) {
|
|
|
|
MPM.add(createBBVectorizePass());
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2014-08-06 20:56:19 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
|
|
|
if (OptLevel > 1 && UseGVNAfterVectorization)
|
2016-12-27 02:26:19 +08:00
|
|
|
MPM.add(NewGVN
|
|
|
|
? createNewGVNPass()
|
|
|
|
: createGVNPass(DisableGVNLoadPRE)); // Remove redundancies
|
2014-08-06 20:56:19 +08:00
|
|
|
else
|
|
|
|
MPM.add(createEarlyCSEPass()); // Catch trivial redundancies
|
|
|
|
|
|
|
|
// BBVectorize may have significantly shortened a loop body; unroll again.
|
|
|
|
if (!DisableUnrollLoops)
|
Increases full-unroll threshold.
Summary:
The default threshold for fully unroll is too conservative. This patch doubles the full-unroll threshold
This change will affect the following speccpu2006 benchmarks (performance numbers were collected from Intel Sandybridge):
Performance:
403 0.11%
433 0.51%
445 0.48%
447 3.50%
453 1.49%
464 0.75%
Code size:
403 0.56%
433 0.96%
445 2.16%
447 2.96%
453 0.94%
464 8.02%
The compiler time overhead is similar with code size.
Reviewers: davidxl, mkuper, mzolotukhin, hfinkel, chandlerc
Reviewed By: hfinkel, chandlerc
Subscribers: mehdi_amini, zzheng, efriedma, haicheng, hfinkel, llvm-commits
Differential Revision: https://reviews.llvm.org/D28368
llvm-svn: 295538
2017-02-18 11:46:51 +08:00
|
|
|
MPM.add(createLoopUnrollPass(OptLevel));
|
2014-08-06 20:56:19 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2017-03-26 14:44:08 +08:00
|
|
|
MPM.add(createLateCFGSimplificationPass()); // Switches to lookup tables
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2013-06-24 15:21:47 +08:00
|
|
|
|
2015-03-12 13:36:01 +08:00
|
|
|
if (!DisableUnrollLoops) {
|
Increases full-unroll threshold.
Summary:
The default threshold for fully unroll is too conservative. This patch doubles the full-unroll threshold
This change will affect the following speccpu2006 benchmarks (performance numbers were collected from Intel Sandybridge):
Performance:
403 0.11%
433 0.51%
445 0.48%
447 3.50%
453 1.49%
464 0.75%
Code size:
403 0.56%
433 0.96%
445 2.16%
447 2.96%
453 0.94%
464 8.02%
The compiler time overhead is similar with code size.
Reviewers: davidxl, mkuper, mzolotukhin, hfinkel, chandlerc
Reviewed By: hfinkel, chandlerc
Subscribers: mehdi_amini, zzheng, efriedma, haicheng, hfinkel, llvm-commits
Differential Revision: https://reviews.llvm.org/D28368
llvm-svn: 295538
2017-02-18 11:46:51 +08:00
|
|
|
MPM.add(createLoopUnrollPass(OptLevel)); // Unroll small loops
|
2014-04-01 07:23:51 +08:00
|
|
|
|
2015-05-15 06:02:54 +08:00
|
|
|
// LoopUnroll may generate some redundency to cleanup.
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2015-05-15 06:02:54 +08:00
|
|
|
|
2015-03-12 13:36:01 +08:00
|
|
|
// Runtime unrolling will introduce runtime check in loop prologue. If the
|
|
|
|
// unrolled loop is a inner loop, then the prologue will be inside the
|
|
|
|
// outer loop. LICM pass can help to promote the runtime check out if the
|
|
|
|
// checked value is loop invariant.
|
|
|
|
MPM.add(createLICMPass());
|
2016-11-09 08:58:19 +08:00
|
|
|
}
|
2015-03-12 13:36:01 +08:00
|
|
|
|
2014-09-08 04:05:11 +08:00
|
|
|
// After vectorization and unrolling, assume intrinsics may tell us more
|
|
|
|
// about pointer alignments.
|
|
|
|
MPM.add(createAlignmentFromAssumptionsPass());
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
if (!DisableUnitAtATime) {
|
|
|
|
// FIXME: We shouldn't bother with this anymore.
|
|
|
|
MPM.add(createStripDeadPrototypesPass()); // Get rid of dead prototypes
|
|
|
|
|
2012-09-29 05:23:26 +08:00
|
|
|
// GlobalOpt already deletes dead functions and globals, at -O2 try a
|
2011-08-03 05:50:27 +08:00
|
|
|
// late pass of GlobalDCE. It is capable of deleting dead cycles.
|
2012-09-29 05:23:26 +08:00
|
|
|
if (OptLevel > 1) {
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createGlobalDCEPass()); // Remove dead fns and globals.
|
|
|
|
MPM.add(createConstantMergePass()); // Merge dup global constants
|
2012-09-29 05:23:26 +08:00
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
2014-09-14 05:46:00 +08:00
|
|
|
|
|
|
|
if (MergeFunctions)
|
|
|
|
MPM.add(createMergeFunctionsPass());
|
|
|
|
|
2016-11-11 01:42:18 +08:00
|
|
|
// LoopSink pass sinks instructions hoisted by LICM, which serves as a
|
|
|
|
// canonicalization pass that enables other optimizations. As a result,
|
|
|
|
// LoopSink pass needs to be a very late IR pass to avoid undoing LICM
|
|
|
|
// result too early.
|
2016-11-09 08:58:19 +08:00
|
|
|
MPM.add(createLoopSinkPass());
|
|
|
|
// Get rid of LCSSA nodes.
|
|
|
|
MPM.add(createInstructionSimplifierPass());
|
2012-03-24 07:22:59 +08:00
|
|
|
addExtensionsToPM(EP_OptimizerLast, MPM);
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
|
|
|
|
2015-02-13 18:01:29 +08:00
|
|
|
void PassManagerBuilder::addLTOOptimizationPasses(legacy::PassManagerBase &PM) {
|
2016-05-26 05:26:14 +08:00
|
|
|
// Remove unused virtual tables to improve the quality of code generated by
|
|
|
|
// whole-program devirtualization and bitset lowering.
|
|
|
|
PM.add(createGlobalDCEPass());
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Provide AliasAnalysis services for optimizations.
|
|
|
|
addInitialAliasAnalysisPasses(PM);
|
|
|
|
|
2015-12-27 16:13:45 +08:00
|
|
|
// Allow forcing function attributes as a debugging and tuning aid.
|
|
|
|
PM.add(createForceFunctionAttrsLegacyPass());
|
|
|
|
|
2015-12-27 16:41:34 +08:00
|
|
|
// Infer attributes about declarations if possible.
|
|
|
|
PM.add(createInferFunctionAttrsLegacyPass());
|
|
|
|
|
2016-05-26 05:26:14 +08:00
|
|
|
if (OptLevel > 1) {
|
|
|
|
// Indirect call promotion. This should promote all the targets that are
|
|
|
|
// left by the earlier promotion pass that promotes intra-module targets.
|
|
|
|
// This two-step promotion is to save the compile time. For LTO, it should
|
|
|
|
// produce the same result as if we only do promotion here.
|
2017-02-24 06:15:18 +08:00
|
|
|
PM.add(
|
|
|
|
createPGOIndirectCallPromotionLegacyPass(true, !PGOSampleUse.empty()));
|
2016-05-26 05:26:14 +08:00
|
|
|
|
|
|
|
// Propagate constants at call sites into the functions they call. This
|
|
|
|
// opens opportunities for globalopt (and inlining) by substituting function
|
|
|
|
// pointers passed as arguments to direct uses of functions.
|
|
|
|
PM.add(createIPSCCPPass());
|
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2016-05-26 05:26:14 +08:00
|
|
|
// Infer attributes about definitions. The readnone attribute in particular is
|
|
|
|
// required for virtual constant propagation.
|
2016-02-18 19:03:11 +08:00
|
|
|
PM.add(createPostOrderFunctionAttrsLegacyPass());
|
2016-01-08 18:55:52 +08:00
|
|
|
PM.add(createReversePostOrderFunctionAttrsPass());
|
2016-05-26 05:26:14 +08:00
|
|
|
|
2016-11-17 07:40:26 +08:00
|
|
|
// Split globals using inrange annotations on GEP indices. This can help
|
|
|
|
// improve the quality of generated code when virtual constant propagation or
|
|
|
|
// control flow integrity are enabled.
|
|
|
|
PM.add(createGlobalSplitPass());
|
|
|
|
|
2016-05-26 05:26:14 +08:00
|
|
|
// Apply whole-program devirtualization and virtual constant propagation.
|
2017-03-23 02:22:59 +08:00
|
|
|
PM.add(createWholeProgramDevirtPass(ExportSummary, nullptr));
|
2016-05-26 05:26:14 +08:00
|
|
|
|
|
|
|
// That's all we need at opt level 1.
|
|
|
|
if (OptLevel == 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Now that we internalized some globals, see if we can hack on them!
|
2011-08-03 05:50:27 +08:00
|
|
|
PM.add(createGlobalOptimizerPass());
|
2015-12-15 17:24:01 +08:00
|
|
|
// Promote any localized global vars.
|
|
|
|
PM.add(createPromoteMemoryToRegisterPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
// Linking modules together can lead to duplicated global constants, only
|
|
|
|
// keep one copy of each constant.
|
|
|
|
PM.add(createConstantMergePass());
|
|
|
|
|
|
|
|
// Remove unused arguments from functions.
|
|
|
|
PM.add(createDeadArgEliminationPass());
|
|
|
|
|
|
|
|
// Reduce the code after globalopt and ipsccp. Both can open up significant
|
|
|
|
// simplification opportunities, and both can propagate functions through
|
|
|
|
// function pointers. When this happens, we often have to resolve varargs
|
|
|
|
// calls, etc, so let instcombine do this.
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(PM);
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, PM);
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
// Inline small functions
|
2014-08-21 21:35:30 +08:00
|
|
|
bool RunInliner = Inliner;
|
|
|
|
if (RunInliner) {
|
|
|
|
PM.add(Inliner);
|
|
|
|
Inliner = nullptr;
|
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
PM.add(createPruneEHPass()); // Remove dead EH info.
|
|
|
|
|
|
|
|
// Optimize globals again if we ran the inliner.
|
|
|
|
if (RunInliner)
|
|
|
|
PM.add(createGlobalOptimizerPass());
|
|
|
|
PM.add(createGlobalDCEPass()); // Remove dead functions.
|
|
|
|
|
|
|
|
// If we didn't decide to inline a function, check to see if we can
|
|
|
|
// transform it to pass arguments by value instead of by reference.
|
|
|
|
PM.add(createArgumentPromotionPass());
|
|
|
|
|
|
|
|
// The IPO passes may leave cruft around. Clean up after them.
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(PM);
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, PM);
|
2011-08-03 05:50:27 +08:00
|
|
|
PM.add(createJumpThreadingPass());
|
2013-08-30 08:48:37 +08:00
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Break up allocas
|
2016-06-15 08:19:09 +08:00
|
|
|
PM.add(createSROAPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
// Run a few AA driven optimizations here and now, to cleanup the code.
|
2016-02-18 19:03:11 +08:00
|
|
|
PM.add(createPostOrderFunctionAttrsLegacyPass()); // Add nocapture.
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
PM.add(createGlobalsAAWrapperPass()); // IP alias analysis.
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2012-04-03 06:16:50 +08:00
|
|
|
PM.add(createLICMPass()); // Hoist loop invariants.
|
2017-01-29 07:45:37 +08:00
|
|
|
PM.add(createMergedLoadStoreMotionPass()); // Merge ld/st in diamonds.
|
2016-12-27 02:26:19 +08:00
|
|
|
PM.add(NewGVN ? createNewGVNPass()
|
|
|
|
: createGVNPass(DisableGVNLoadPRE)); // Remove redundancies.
|
2012-04-03 06:16:50 +08:00
|
|
|
PM.add(createMemCpyOptPass()); // Remove dead memcpys.
|
2013-08-30 08:48:37 +08:00
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Nuke dead stores.
|
|
|
|
PM.add(createDeadStoreEliminationPass());
|
|
|
|
|
2014-04-16 01:48:15 +08:00
|
|
|
// More loops are countable; try to optimize them.
|
|
|
|
PM.add(createIndVarSimplifyPass());
|
|
|
|
PM.add(createLoopDeletionPass());
|
2015-03-06 18:11:25 +08:00
|
|
|
if (EnableLoopInterchange)
|
|
|
|
PM.add(createLoopInterchangePass());
|
|
|
|
|
2016-01-14 23:00:09 +08:00
|
|
|
if (!DisableUnrollLoops)
|
Increases full-unroll threshold.
Summary:
The default threshold for fully unroll is too conservative. This patch doubles the full-unroll threshold
This change will affect the following speccpu2006 benchmarks (performance numbers were collected from Intel Sandybridge):
Performance:
403 0.11%
433 0.51%
445 0.48%
447 3.50%
453 1.49%
464 0.75%
Code size:
403 0.56%
433 0.96%
445 2.16%
447 2.96%
453 0.94%
464 8.02%
The compiler time overhead is similar with code size.
Reviewers: davidxl, mkuper, mzolotukhin, hfinkel, chandlerc
Reviewed By: hfinkel, chandlerc
Subscribers: mehdi_amini, zzheng, efriedma, haicheng, hfinkel, llvm-commits
Differential Revision: https://reviews.llvm.org/D28368
llvm-svn: 295538
2017-02-18 11:46:51 +08:00
|
|
|
PM.add(createSimpleLoopUnrollPass(OptLevel)); // Unroll small loops
|
2014-10-27 05:50:58 +08:00
|
|
|
PM.add(createLoopVectorizePass(true, LoopVectorize));
|
2016-01-14 23:00:09 +08:00
|
|
|
// The vectorizer may have significantly shortened a loop body; unroll again.
|
|
|
|
if (!DisableUnrollLoops)
|
Increases full-unroll threshold.
Summary:
The default threshold for fully unroll is too conservative. This patch doubles the full-unroll threshold
This change will affect the following speccpu2006 benchmarks (performance numbers were collected from Intel Sandybridge):
Performance:
403 0.11%
433 0.51%
445 0.48%
447 3.50%
453 1.49%
464 0.75%
Code size:
403 0.56%
433 0.96%
445 2.16%
447 2.96%
453 0.94%
464 8.02%
The compiler time overhead is similar with code size.
Reviewers: davidxl, mkuper, mzolotukhin, hfinkel, chandlerc
Reviewed By: hfinkel, chandlerc
Subscribers: mehdi_amini, zzheng, efriedma, haicheng, hfinkel, llvm-commits
Differential Revision: https://reviews.llvm.org/D28368
llvm-svn: 295538
2017-02-18 11:46:51 +08:00
|
|
|
PM.add(createLoopUnrollPass(OptLevel));
|
2014-02-25 02:19:31 +08:00
|
|
|
|
2015-12-15 17:24:01 +08:00
|
|
|
// Now that we've optimized loops (in particular loop induction variables),
|
|
|
|
// we may have exposed more scalar opportunities. Run parts of the scalar
|
|
|
|
// optimizer again at this point.
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(PM); // Initial cleanup
|
2015-12-15 17:24:01 +08:00
|
|
|
PM.add(createCFGSimplificationPass()); // if-convert
|
|
|
|
PM.add(createSCCPPass()); // Propagate exposed constants
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(PM); // Clean up again
|
2015-12-15 17:24:01 +08:00
|
|
|
PM.add(createBitTrackingDCEPass());
|
|
|
|
|
2014-05-06 07:14:46 +08:00
|
|
|
// More scalar chains could be vectorized due to more alias information
|
2014-10-22 07:18:21 +08:00
|
|
|
if (RunSLPAfterLoopVectorization)
|
|
|
|
if (SLPVectorize)
|
|
|
|
PM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains.
|
2014-05-06 07:14:46 +08:00
|
|
|
|
2014-09-08 04:05:11 +08:00
|
|
|
// After vectorization, assume intrinsics may tell us more about pointer
|
|
|
|
// alignments.
|
|
|
|
PM.add(createAlignmentFromAssumptionsPass());
|
|
|
|
|
2014-05-29 09:55:07 +08:00
|
|
|
if (LoadCombine)
|
|
|
|
PM.add(createLoadCombinePass());
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Cleanup and simplify the code after the scalar optimizations.
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(PM);
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, PM);
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
PM.add(createJumpThreadingPass());
|
2015-03-20 06:01:00 +08:00
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2015-03-20 06:01:00 +08:00
|
|
|
void PassManagerBuilder::addLateLTOOptimizationPasses(
|
|
|
|
legacy::PassManagerBase &PM) {
|
2011-08-03 05:50:27 +08:00
|
|
|
// Delete basic blocks, which optimization passes may have killed.
|
2013-08-06 10:43:45 +08:00
|
|
|
PM.add(createCFGSimplificationPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2015-08-12 00:26:41 +08:00
|
|
|
// Drop bodies of available externally objects to improve GlobalDCE.
|
|
|
|
PM.add(createEliminateAvailableExternallyPass());
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Now that we have optimized the program, discard unreachable functions.
|
|
|
|
PM.add(createGlobalDCEPass());
|
2014-09-14 05:46:00 +08:00
|
|
|
|
|
|
|
// FIXME: this is profitable (for compiler time) to do at -O0 too, but
|
|
|
|
// currently it damages debug info.
|
|
|
|
if (MergeFunctions)
|
|
|
|
PM.add(createMergeFunctionsPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
2011-08-10 06:17:34 +08:00
|
|
|
|
2016-02-17 07:02:29 +08:00
|
|
|
void PassManagerBuilder::populateThinLTOPassManager(
|
|
|
|
legacy::PassManagerBase &PM) {
|
|
|
|
PerformThinLTO = true;
|
|
|
|
|
|
|
|
if (VerifyInput)
|
|
|
|
PM.add(createVerifierPass());
|
|
|
|
|
2017-03-23 02:22:59 +08:00
|
|
|
if (ImportSummary) {
|
2017-02-16 07:48:38 +08:00
|
|
|
// These passes import type identifier resolutions for whole-program
|
|
|
|
// devirtualization and CFI. They must run early because other passes may
|
|
|
|
// disturb the specific instruction patterns that these passes look for,
|
|
|
|
// creating dependencies on resolutions that may not appear in the summary.
|
|
|
|
//
|
|
|
|
// For example, GVN may transform the pattern assume(type.test) appearing in
|
|
|
|
// two basic blocks into assume(phi(type.test, type.test)), which would
|
|
|
|
// transform a dependency on a WPD resolution into a dependency on a type
|
|
|
|
// identifier resolution for CFI.
|
|
|
|
//
|
|
|
|
// Also, WPD has access to more precise information than ICP and can
|
|
|
|
// devirtualize more effectively, so it should operate on the IR first.
|
2017-03-23 02:22:59 +08:00
|
|
|
PM.add(createWholeProgramDevirtPass(nullptr, ImportSummary));
|
|
|
|
PM.add(createLowerTypeTestsPass(nullptr, ImportSummary));
|
2017-02-16 07:48:38 +08:00
|
|
|
}
|
2017-01-21 06:18:52 +08:00
|
|
|
|
2016-02-17 07:02:29 +08:00
|
|
|
populateModulePassManager(PM);
|
|
|
|
|
|
|
|
if (VerifyOutput)
|
|
|
|
PM.add(createVerifierPass());
|
|
|
|
PerformThinLTO = false;
|
|
|
|
}
|
|
|
|
|
2015-02-13 18:01:29 +08:00
|
|
|
void PassManagerBuilder::populateLTOPassManager(legacy::PassManagerBase &PM) {
|
2014-08-22 04:03:44 +08:00
|
|
|
if (LibraryInfo)
|
2015-01-15 18:41:28 +08:00
|
|
|
PM.add(new TargetLibraryInfoWrapperPass(*LibraryInfo));
|
2014-08-22 04:03:44 +08:00
|
|
|
|
2015-03-20 06:24:17 +08:00
|
|
|
if (VerifyInput)
|
2014-08-22 04:03:44 +08:00
|
|
|
PM.add(createVerifierPass());
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
if (OptLevel != 0)
|
2014-08-22 04:03:44 +08:00
|
|
|
addLTOOptimizationPasses(PM);
|
|
|
|
|
2015-12-16 07:00:08 +08:00
|
|
|
// Create a function that performs CFI checks for cross-DSO calls with targets
|
|
|
|
// in the current module.
|
|
|
|
PM.add(createCrossDSOCFIPass());
|
|
|
|
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
// Lower type metadata and the type.test intrinsic. This pass supports Clang's
|
|
|
|
// control flow integrity mechanisms (-fsanitize=cfi*) and needs to run at
|
|
|
|
// link time if CFI is enabled. The pass does nothing if CFI is disabled.
|
2017-03-23 02:22:59 +08:00
|
|
|
PM.add(createLowerTypeTestsPass(ExportSummary, nullptr));
|
2015-03-20 06:01:00 +08:00
|
|
|
|
|
|
|
if (OptLevel != 0)
|
|
|
|
addLateLTOOptimizationPasses(PM);
|
|
|
|
|
2015-03-20 06:24:17 +08:00
|
|
|
if (VerifyOutput)
|
2014-08-22 04:03:44 +08:00
|
|
|
PM.add(createVerifierPass());
|
|
|
|
}
|
|
|
|
|
2013-04-23 06:47:22 +08:00
|
|
|
inline PassManagerBuilder *unwrap(LLVMPassManagerBuilderRef P) {
|
|
|
|
return reinterpret_cast<PassManagerBuilder*>(P);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline LLVMPassManagerBuilderRef wrap(PassManagerBuilder *P) {
|
|
|
|
return reinterpret_cast<LLVMPassManagerBuilderRef>(P);
|
|
|
|
}
|
|
|
|
|
2012-11-16 00:51:49 +08:00
|
|
|
LLVMPassManagerBuilderRef LLVMPassManagerBuilderCreate() {
|
2011-08-10 06:17:34 +08:00
|
|
|
PassManagerBuilder *PMB = new PassManagerBuilder();
|
|
|
|
return wrap(PMB);
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMPassManagerBuilderDispose(LLVMPassManagerBuilderRef PMB) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
delete Builder;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetOptLevel(LLVMPassManagerBuilderRef PMB,
|
|
|
|
unsigned OptLevel) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->OptLevel = OptLevel;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetSizeLevel(LLVMPassManagerBuilderRef PMB,
|
|
|
|
unsigned SizeLevel) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->SizeLevel = SizeLevel;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetDisableUnitAtATime(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMBool Value) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->DisableUnitAtATime = Value;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetDisableUnrollLoops(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMBool Value) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->DisableUnrollLoops = Value;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetDisableSimplifyLibCalls(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMBool Value) {
|
2013-06-21 03:48:07 +08:00
|
|
|
// NOTE: The simplify-libcalls pass has been removed.
|
2011-08-10 06:17:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderUseInlinerWithThreshold(LLVMPassManagerBuilderRef PMB,
|
|
|
|
unsigned Threshold) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->Inliner = createFunctionInliningPass(Threshold);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderPopulateFunctionPassManager(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMPassManagerRef PM) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
2015-02-13 18:01:29 +08:00
|
|
|
legacy::FunctionPassManager *FPM = unwrap<legacy::FunctionPassManager>(PM);
|
2011-08-10 06:17:34 +08:00
|
|
|
Builder->populateFunctionPassManager(*FPM);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderPopulateModulePassManager(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMPassManagerRef PM) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
2015-02-13 18:01:29 +08:00
|
|
|
legacy::PassManagerBase *MPM = unwrap(PM);
|
2011-08-10 06:17:34 +08:00
|
|
|
Builder->populateModulePassManager(*MPM);
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMPassManagerBuilderPopulateLTOPassManager(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMPassManagerRef PM,
|
2013-03-11 05:58:22 +08:00
|
|
|
LLVMBool Internalize,
|
|
|
|
LLVMBool RunInliner) {
|
2011-08-10 06:17:34 +08:00
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
2015-02-13 18:01:29 +08:00
|
|
|
legacy::PassManagerBase *LPM = unwrap(PM);
|
2014-08-21 21:35:30 +08:00
|
|
|
|
|
|
|
// A small backwards compatibility hack. populateLTOPassManager used to take
|
|
|
|
// an RunInliner option.
|
|
|
|
if (RunInliner && !Builder->Inliner)
|
|
|
|
Builder->Inliner = createFunctionInliningPass();
|
|
|
|
|
|
|
|
Builder->populateLTOPassManager(*LPM);
|
2011-08-10 06:17:34 +08:00
|
|
|
}
|