2011-08-03 05:50:27 +08:00
|
|
|
//===- PassManagerBuilder.cpp - Build Standard Pass -----------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the PassManagerBuilder class, which is used to set up a
|
|
|
|
// "standard" optimization sequence suitable for languages like C and C++.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
|
2011-08-10 06:17:34 +08:00
|
|
|
#include "llvm-c/Transforms/PassManagerBuilder.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2015-08-06 15:33:15 +08:00
|
|
|
#include "llvm/Analysis/BasicAliasAnalysis.h"
|
2015-08-14 10:42:20 +08:00
|
|
|
#include "llvm/Analysis/CFLAliasAnalysis.h"
|
2015-08-14 11:48:20 +08:00
|
|
|
#include "llvm/Analysis/GlobalsModRef.h"
|
2015-12-27 16:41:34 +08:00
|
|
|
#include "llvm/Analysis/Passes.h"
|
2015-08-14 10:55:50 +08:00
|
|
|
#include "llvm/Analysis/ScopedNoAliasAA.h"
|
2015-01-15 10:16:27 +08:00
|
|
|
#include "llvm/Analysis/TargetLibraryInfo.h"
|
2015-08-14 11:33:48 +08:00
|
|
|
#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
|
2015-12-27 16:41:34 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/LegacyPassManager.h"
|
2016-03-15 08:04:37 +08:00
|
|
|
#include "llvm/IR/ModuleSummaryIndex.h"
|
2015-12-27 16:41:34 +08:00
|
|
|
#include "llvm/IR/Verifier.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/ManagedStatic.h"
|
2014-08-22 04:03:44 +08:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Transforms/IPO.h"
|
2015-12-27 16:13:45 +08:00
|
|
|
#include "llvm/Transforms/IPO/ForceFunctionAttrs.h"
|
2016-02-18 19:03:11 +08:00
|
|
|
#include "llvm/Transforms/IPO/FunctionAttrs.h"
|
2015-12-27 16:41:34 +08:00
|
|
|
#include "llvm/Transforms/IPO/InferFunctionAttrs.h"
|
2016-03-15 08:04:37 +08:00
|
|
|
#include "llvm/Transforms/Instrumentation.h"
|
2011-08-03 05:50:27 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2016-03-11 16:50:55 +08:00
|
|
|
#include "llvm/Transforms/Scalar/GVN.h"
|
2012-02-01 11:51:43 +08:00
|
|
|
#include "llvm/Transforms/Vectorize.h"
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2012-02-01 11:51:43 +08:00
|
|
|
static cl::opt<bool>
|
2013-10-19 07:38:13 +08:00
|
|
|
RunLoopVectorization("vectorize-loops", cl::Hidden,
|
2012-10-31 02:37:43 +08:00
|
|
|
cl::desc("Run the Loop vectorization passes"));
|
2012-10-30 00:36:25 +08:00
|
|
|
|
|
|
|
static cl::opt<bool>
|
2013-10-19 07:38:13 +08:00
|
|
|
RunSLPVectorization("vectorize-slp", cl::Hidden,
|
2013-04-15 13:39:58 +08:00
|
|
|
cl::desc("Run the SLP vectorization passes"));
|
|
|
|
|
|
|
|
static cl::opt<bool>
|
2013-10-19 07:38:13 +08:00
|
|
|
RunBBVectorization("vectorize-slp-aggressive", cl::Hidden,
|
2013-04-15 13:39:58 +08:00
|
|
|
cl::desc("Run the BB vectorization passes"));
|
2012-02-01 11:51:43 +08:00
|
|
|
|
2012-04-14 01:15:33 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
UseGVNAfterVectorization("use-gvn-after-vectorization",
|
|
|
|
cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Run GVN instead of Early CSE after vectorization passes"));
|
|
|
|
|
2014-10-14 08:31:29 +08:00
|
|
|
static cl::opt<bool> ExtraVectorizerPasses(
|
|
|
|
"extra-vectorizer-passes", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Run cleanup optimization passes after vectorization."));
|
|
|
|
|
Introduce a new SROA implementation.
This is essentially a ground up re-think of the SROA pass in LLVM. It
was initially inspired by a few problems with the existing pass:
- It is subject to the bane of my existence in optimizations: arbitrary
thresholds.
- It is overly conservative about which constructs can be split and
promoted.
- The vector value replacement aspect is separated from the splitting
logic, missing many opportunities where splitting and vector value
formation can work together.
- The splitting is entirely based around the underlying type of the
alloca, despite this type often having little to do with the reality
of how that memory is used. This is especially prevelant with unions
and base classes where we tail-pack derived members.
- When splitting fails (often due to the thresholds), the vector value
replacement (again because it is separate) can kick in for
preposterous cases where we simply should have split the value. This
results in forming i1024 and i2048 integer "bit vectors" that
tremendously slow down subsequnet IR optimizations (due to large
APInts) and impede the backend's lowering.
The new design takes an approach that fundamentally is not susceptible
to many of these problems. It is the result of a discusison between
myself and Duncan Sands over IRC about how to premptively avoid these
types of problems and how to do SROA in a more principled way. Since
then, it has evolved and grown, but this remains an important aspect: it
fixes real world problems with the SROA process today.
First, the transform of SROA actually has little to do with replacement.
It has more to do with splitting. The goal is to take an aggregate
alloca and form a composition of scalar allocas which can replace it and
will be most suitable to the eventual replacement by scalar SSA values.
The actual replacement is performed by mem2reg (and in the future
SSAUpdater).
The splitting is divided into four phases. The first phase is an
analysis of the uses of the alloca. This phase recursively walks uses,
building up a dense datastructure representing the ranges of the
alloca's memory actually used and checking for uses which inhibit any
aspects of the transform such as the escape of a pointer.
Once we have a mapping of the ranges of the alloca used by individual
operations, we compute a partitioning of the used ranges. Some uses are
inherently splittable (such as memcpy and memset), while scalar uses are
not splittable. The goal is to build a partitioning that has the minimum
number of splits while placing each unsplittable use in its own
partition. Overlapping unsplittable uses belong to the same partition.
This is the target split of the aggregate alloca, and it maximizes the
number of scalar accesses which become accesses to their own alloca and
candidates for promotion.
Third, we re-walk the uses of the alloca and assign each specific memory
access to all the partitions touched so that we have dense use-lists for
each partition.
Finally, we build a new, smaller alloca for each partition and rewrite
each use of that partition to use the new alloca. During this phase the
pass will also work very hard to transform uses of an alloca into a form
suitable for promotion, including forming vector operations, speculating
loads throguh PHI nodes and selects, etc.
After splitting is complete, each newly refined alloca that is
a candidate for promotion to a scalar SSA value is run through mem2reg.
There are lots of reasonably detailed comments in the source code about
the design and algorithms, and I'm going to be trying to improve them in
subsequent commits to ensure this is well documented, as the new pass is
in many ways more complex than the old one.
Some of this is still a WIP, but the current state is reasonbly stable.
It has passed bootstrap, the nightly test suite, and Duncan has run it
successfully through the ACATS and DragonEgg test suites. That said, it
remains behind a default-off flag until the last few pieces are in
place, and full testing can be done.
Specific areas I'm looking at next:
- Improved comments and some code cleanup from reviews.
- SSAUpdater and enabling this pass inside the CGSCC pass manager.
- Some datastructure tuning and compile-time measurements.
- More aggressive FCA splitting and vector formation.
Many thanks to Duncan Sands for the thorough final review, as well as
Benjamin Kramer for lots of review during the process of writing this
pass, and Daniel Berlin for reviewing the data structures and algorithms
and general theory of the pass. Also, several other people on IRC, over
lunch tables, etc for lots of feedback and advice.
llvm-svn: 163883
2012-09-14 17:22:59 +08:00
|
|
|
static cl::opt<bool> UseNewSROA("use-new-sroa",
|
2012-10-02 12:24:01 +08:00
|
|
|
cl::init(true), cl::Hidden,
|
Introduce a new SROA implementation.
This is essentially a ground up re-think of the SROA pass in LLVM. It
was initially inspired by a few problems with the existing pass:
- It is subject to the bane of my existence in optimizations: arbitrary
thresholds.
- It is overly conservative about which constructs can be split and
promoted.
- The vector value replacement aspect is separated from the splitting
logic, missing many opportunities where splitting and vector value
formation can work together.
- The splitting is entirely based around the underlying type of the
alloca, despite this type often having little to do with the reality
of how that memory is used. This is especially prevelant with unions
and base classes where we tail-pack derived members.
- When splitting fails (often due to the thresholds), the vector value
replacement (again because it is separate) can kick in for
preposterous cases where we simply should have split the value. This
results in forming i1024 and i2048 integer "bit vectors" that
tremendously slow down subsequnet IR optimizations (due to large
APInts) and impede the backend's lowering.
The new design takes an approach that fundamentally is not susceptible
to many of these problems. It is the result of a discusison between
myself and Duncan Sands over IRC about how to premptively avoid these
types of problems and how to do SROA in a more principled way. Since
then, it has evolved and grown, but this remains an important aspect: it
fixes real world problems with the SROA process today.
First, the transform of SROA actually has little to do with replacement.
It has more to do with splitting. The goal is to take an aggregate
alloca and form a composition of scalar allocas which can replace it and
will be most suitable to the eventual replacement by scalar SSA values.
The actual replacement is performed by mem2reg (and in the future
SSAUpdater).
The splitting is divided into four phases. The first phase is an
analysis of the uses of the alloca. This phase recursively walks uses,
building up a dense datastructure representing the ranges of the
alloca's memory actually used and checking for uses which inhibit any
aspects of the transform such as the escape of a pointer.
Once we have a mapping of the ranges of the alloca used by individual
operations, we compute a partitioning of the used ranges. Some uses are
inherently splittable (such as memcpy and memset), while scalar uses are
not splittable. The goal is to build a partitioning that has the minimum
number of splits while placing each unsplittable use in its own
partition. Overlapping unsplittable uses belong to the same partition.
This is the target split of the aggregate alloca, and it maximizes the
number of scalar accesses which become accesses to their own alloca and
candidates for promotion.
Third, we re-walk the uses of the alloca and assign each specific memory
access to all the partitions touched so that we have dense use-lists for
each partition.
Finally, we build a new, smaller alloca for each partition and rewrite
each use of that partition to use the new alloca. During this phase the
pass will also work very hard to transform uses of an alloca into a form
suitable for promotion, including forming vector operations, speculating
loads throguh PHI nodes and selects, etc.
After splitting is complete, each newly refined alloca that is
a candidate for promotion to a scalar SSA value is run through mem2reg.
There are lots of reasonably detailed comments in the source code about
the design and algorithms, and I'm going to be trying to improve them in
subsequent commits to ensure this is well documented, as the new pass is
in many ways more complex than the old one.
Some of this is still a WIP, but the current state is reasonbly stable.
It has passed bootstrap, the nightly test suite, and Duncan has run it
successfully through the ACATS and DragonEgg test suites. That said, it
remains behind a default-off flag until the last few pieces are in
place, and full testing can be done.
Specific areas I'm looking at next:
- Improved comments and some code cleanup from reviews.
- SSAUpdater and enabling this pass inside the CGSCC pass manager.
- Some datastructure tuning and compile-time measurements.
- More aggressive FCA splitting and vector formation.
Many thanks to Duncan Sands for the thorough final review, as well as
Benjamin Kramer for lots of review during the process of writing this
pass, and Daniel Berlin for reviewing the data structures and algorithms
and general theory of the pass. Also, several other people on IRC, over
lunch tables, etc for lots of feedback and advice.
llvm-svn: 163883
2012-09-14 17:22:59 +08:00
|
|
|
cl::desc("Enable the new, experimental SROA pass"));
|
|
|
|
|
2013-11-17 07:59:05 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
RunLoopRerolling("reroll-loops", cl::Hidden,
|
|
|
|
cl::desc("Run the loop rerolling pass"));
|
|
|
|
|
2015-03-27 18:36:57 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
RunFloat2Int("float-to-int", cl::Hidden, cl::init(true),
|
|
|
|
cl::desc("Run the float2int (float demotion) pass"));
|
|
|
|
|
2014-05-29 09:55:07 +08:00
|
|
|
static cl::opt<bool> RunLoadCombine("combine-loads", cl::init(false),
|
|
|
|
cl::Hidden,
|
|
|
|
cl::desc("Run the load combining pass"));
|
|
|
|
|
2014-08-06 20:56:19 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
RunSLPAfterLoopVectorization("run-slp-after-loop-vectorization",
|
2014-09-04 21:23:08 +08:00
|
|
|
cl::init(true), cl::Hidden,
|
2014-08-06 20:56:19 +08:00
|
|
|
cl::desc("Run the SLP vectorizer (and BB vectorizer) after the Loop "
|
|
|
|
"vectorizer instead of before"));
|
|
|
|
|
2014-09-03 06:12:54 +08:00
|
|
|
static cl::opt<bool> UseCFLAA("use-cfl-aa",
|
|
|
|
cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable the new, experimental CFL alias analysis"));
|
2014-08-06 20:56:19 +08:00
|
|
|
|
2014-09-11 03:55:29 +08:00
|
|
|
static cl::opt<bool>
|
2014-09-11 04:24:03 +08:00
|
|
|
EnableMLSM("mlsm", cl::init(true), cl::Hidden,
|
|
|
|
cl::desc("Enable motion of merged load and store"));
|
2014-09-11 03:55:29 +08:00
|
|
|
|
2015-03-06 18:11:25 +08:00
|
|
|
static cl::opt<bool> EnableLoopInterchange(
|
|
|
|
"enable-loopinterchange", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable the new, experimental LoopInterchange Pass"));
|
|
|
|
|
2015-07-22 19:57:28 +08:00
|
|
|
static cl::opt<bool> EnableNonLTOGlobalsModRef(
|
2015-10-13 18:43:57 +08:00
|
|
|
"enable-non-lto-gmr", cl::init(true), cl::Hidden,
|
2015-07-22 19:57:28 +08:00
|
|
|
cl::desc(
|
|
|
|
"Enable the GlobalsModRef AliasAnalysis outside of the LTO pipeline."));
|
|
|
|
|
LLE 6/6: Add LoopLoadElimination pass
Summary:
The goal of this pass is to perform store-to-load forwarding across the
backedge of a loop. E.g.:
for (i)
A[i + 1] = A[i] + B[i]
=>
T = A[0]
for (i)
T = T + B[i]
A[i + 1] = T
The pass relies on loop dependence analysis via LoopAccessAnalisys to
find opportunities of loop-carried dependences with a distance of one
between a store and a load. Since it's using LoopAccessAnalysis, it was
easy to also add support for versioning away may-aliasing intervening
stores that would otherwise prevent this transformation.
This optimization is also performed by Load-PRE in GVN without the
option of multi-versioning. As was discussed with Daniel Berlin in
http://reviews.llvm.org/D9548, this is inferior to a more loop-aware
solution applied here. Hopefully, we will be able to remove some
complexity from GVN/MemorySSA as a consequence.
In the long run, we may want to extend this pass (or create a new one if
there is little overlap) to also eliminate loop-indepedent redundant
loads and store that *require* versioning due to may-aliasing
intervening stores/loads. I have some motivating cases for store
elimination. My plan right now is to wait for MemorySSA to come online
first rather than using memdep for this.
The main motiviation for this pass is the 456.hmmer loop in SPECint2006
where after distributing the original loop and vectorizing the top part,
we are left with the critical path exposed in the bottom loop. Being
able to promote the memory dependence into a register depedence (even
though the HW does perform store-to-load fowarding as well) results in a
major gain (~20%). This gain also transfers over to x86: it's
around 8-10%.
Right now the pass is off by default and can be enabled
with -enable-loop-load-elim. On the LNT testsuite, there are two
performance changes (negative number -> improvement):
1. -28% in Polybench/linear-algebra/solvers/dynprog: the length of the
critical paths is reduced
2. +2% in Polybench/stencils/adi: Unfortunately, I couldn't reproduce this
outside of LNT
The pass is scheduled after the loop vectorizer (which is after loop
distribution). The rational is to try to reuse LAA state, rather than
recomputing it. The order between LV and LLE is not critical because
normally LV does not touch scalar st->ld forwarding cases where
vectorizing would inhibit the CPU's st->ld forwarding to kick in.
LoopLoadElimination requires LAA to provide the full set of dependences
(including forward dependences). LAA is known to omit loop-independent
dependences in certain situations. The big comment before
removeDependencesFromMultipleStores explains why this should not occur
for the cases that we're interested in.
Reviewers: dberlin, hfinkel
Subscribers: junbuml, dberlin, mssimpso, rengolin, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13259
llvm-svn: 252017
2015-11-04 07:50:08 +08:00
|
|
|
static cl::opt<bool> EnableLoopLoadElim(
|
2016-03-16 06:26:12 +08:00
|
|
|
"enable-loop-load-elim", cl::init(true), cl::Hidden,
|
|
|
|
cl::desc("Enable the LoopLoadElimination Pass"));
|
LLE 6/6: Add LoopLoadElimination pass
Summary:
The goal of this pass is to perform store-to-load forwarding across the
backedge of a loop. E.g.:
for (i)
A[i + 1] = A[i] + B[i]
=>
T = A[0]
for (i)
T = T + B[i]
A[i + 1] = T
The pass relies on loop dependence analysis via LoopAccessAnalisys to
find opportunities of loop-carried dependences with a distance of one
between a store and a load. Since it's using LoopAccessAnalysis, it was
easy to also add support for versioning away may-aliasing intervening
stores that would otherwise prevent this transformation.
This optimization is also performed by Load-PRE in GVN without the
option of multi-versioning. As was discussed with Daniel Berlin in
http://reviews.llvm.org/D9548, this is inferior to a more loop-aware
solution applied here. Hopefully, we will be able to remove some
complexity from GVN/MemorySSA as a consequence.
In the long run, we may want to extend this pass (or create a new one if
there is little overlap) to also eliminate loop-indepedent redundant
loads and store that *require* versioning due to may-aliasing
intervening stores/loads. I have some motivating cases for store
elimination. My plan right now is to wait for MemorySSA to come online
first rather than using memdep for this.
The main motiviation for this pass is the 456.hmmer loop in SPECint2006
where after distributing the original loop and vectorizing the top part,
we are left with the critical path exposed in the bottom loop. Being
able to promote the memory dependence into a register depedence (even
though the HW does perform store-to-load fowarding as well) results in a
major gain (~20%). This gain also transfers over to x86: it's
around 8-10%.
Right now the pass is off by default and can be enabled
with -enable-loop-load-elim. On the LNT testsuite, there are two
performance changes (negative number -> improvement):
1. -28% in Polybench/linear-algebra/solvers/dynprog: the length of the
critical paths is reduced
2. +2% in Polybench/stencils/adi: Unfortunately, I couldn't reproduce this
outside of LNT
The pass is scheduled after the loop vectorizer (which is after loop
distribution). The rational is to try to reuse LAA state, rather than
recomputing it. The order between LV and LLE is not critical because
normally LV does not touch scalar st->ld forwarding cases where
vectorizing would inhibit the CPU's st->ld forwarding to kick in.
LoopLoadElimination requires LAA to provide the full set of dependences
(including forward dependences). LAA is known to omit loop-independent
dependences in certain situations. The big comment before
removeDependencesFromMultipleStores explains why this should not occur
for the cases that we're interested in.
Reviewers: dberlin, hfinkel
Subscribers: junbuml, dberlin, mssimpso, rengolin, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13259
llvm-svn: 252017
2015-11-04 07:50:08 +08:00
|
|
|
|
2016-01-22 02:28:59 +08:00
|
|
|
static cl::opt<std::string> RunPGOInstrGen(
|
|
|
|
"profile-generate", cl::init(""), cl::Hidden,
|
|
|
|
cl::desc("Enable generation phase of PGO instrumentation and specify the "
|
|
|
|
"path of profile data file"));
|
|
|
|
|
|
|
|
static cl::opt<std::string> RunPGOInstrUse(
|
|
|
|
"profile-use", cl::init(""), cl::Hidden, cl::value_desc("filename"),
|
|
|
|
cl::desc("Enable use phase of PGO instrumentation and specify the path "
|
|
|
|
"of profile data file"));
|
|
|
|
|
New Loop Versioning LICM Pass
Summary:
When alias analysis is uncertain about the aliasing between any two accesses,
it will return MayAlias. This uncertainty from alias analysis restricts LICM
from proceeding further. In cases where alias analysis is uncertain we might
use loop versioning as an alternative.
Loop Versioning will create a version of the loop with aggressive aliasing
assumptions in addition to the original with conservative (default) aliasing
assumptions. The version of the loop making aggressive aliasing assumptions
will have all the memory accesses marked as no-alias. These two versions of
loop will be preceded by a memory runtime check. This runtime check consists
of bound checks for all unique memory accessed in loop, and it ensures the
lack of memory aliasing. The result of the runtime check determines which of
the loop versions is executed: If the runtime check detects any memory
aliasing, then the original loop is executed. Otherwise, the version with
aggressive aliasing assumptions is used.
The pass is off by default and can be enabled with command line option
-enable-loop-versioning-licm.
Reviewers: hfinkel, anemet, chatur01, reames
Subscribers: MatzeB, grosser, joker.eph, sanjoy, javed.absar, sbaranga,
llvm-commits
Differential Revision: http://reviews.llvm.org/D9151
llvm-svn: 259986
2016-02-06 15:47:48 +08:00
|
|
|
static cl::opt<bool> UseLoopVersioningLICM(
|
|
|
|
"enable-loop-versioning-licm", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable the experimental Loop Versioning LICM pass"));
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
PassManagerBuilder::PassManagerBuilder() {
|
|
|
|
OptLevel = 2;
|
|
|
|
SizeLevel = 0;
|
2014-04-25 13:29:35 +08:00
|
|
|
LibraryInfo = nullptr;
|
|
|
|
Inliner = nullptr;
|
2016-03-15 08:04:37 +08:00
|
|
|
ModuleSummary = nullptr;
|
2011-08-03 05:50:27 +08:00
|
|
|
DisableUnitAtATime = false;
|
|
|
|
DisableUnrollLoops = false;
|
2013-04-15 13:39:58 +08:00
|
|
|
BBVectorize = RunBBVectorization;
|
2013-04-15 12:54:42 +08:00
|
|
|
SLPVectorize = RunSLPVectorization;
|
2012-10-30 00:36:25 +08:00
|
|
|
LoopVectorize = RunLoopVectorization;
|
2013-11-18 00:02:50 +08:00
|
|
|
RerollLoops = RunLoopRerolling;
|
2014-05-29 09:55:07 +08:00
|
|
|
LoadCombine = RunLoadCombine;
|
2014-08-21 21:13:17 +08:00
|
|
|
DisableGVNLoadPRE = false;
|
2014-08-22 04:03:44 +08:00
|
|
|
VerifyInput = false;
|
|
|
|
VerifyOutput = false;
|
2014-09-14 05:46:00 +08:00
|
|
|
MergeFunctions = false;
|
2015-07-07 00:22:42 +08:00
|
|
|
PrepareForLTO = false;
|
2016-01-22 02:28:59 +08:00
|
|
|
PGOInstrGen = RunPGOInstrGen;
|
|
|
|
PGOInstrUse = RunPGOInstrUse;
|
2016-02-17 07:02:29 +08:00
|
|
|
PrepareForThinLTO = false;
|
|
|
|
PerformThinLTO = false;
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
PassManagerBuilder::~PassManagerBuilder() {
|
|
|
|
delete LibraryInfo;
|
|
|
|
delete Inliner;
|
|
|
|
}
|
|
|
|
|
2011-08-16 21:58:41 +08:00
|
|
|
/// Set of global extensions, automatically added as part of the standard set.
|
|
|
|
static ManagedStatic<SmallVector<std::pair<PassManagerBuilder::ExtensionPointTy,
|
|
|
|
PassManagerBuilder::ExtensionFn>, 8> > GlobalExtensions;
|
|
|
|
|
|
|
|
void PassManagerBuilder::addGlobalExtension(
|
|
|
|
PassManagerBuilder::ExtensionPointTy Ty,
|
|
|
|
PassManagerBuilder::ExtensionFn Fn) {
|
2016-03-31 04:39:29 +08:00
|
|
|
GlobalExtensions->push_back(std::make_pair(Ty, std::move(Fn)));
|
2011-08-16 21:58:41 +08:00
|
|
|
}
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
void PassManagerBuilder::addExtension(ExtensionPointTy Ty, ExtensionFn Fn) {
|
2016-03-31 04:39:29 +08:00
|
|
|
Extensions.push_back(std::make_pair(Ty, std::move(Fn)));
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void PassManagerBuilder::addExtensionsToPM(ExtensionPointTy ETy,
|
2015-02-13 18:01:29 +08:00
|
|
|
legacy::PassManagerBase &PM) const {
|
2011-08-16 21:58:41 +08:00
|
|
|
for (unsigned i = 0, e = GlobalExtensions->size(); i != e; ++i)
|
|
|
|
if ((*GlobalExtensions)[i].first == ETy)
|
|
|
|
(*GlobalExtensions)[i].second(*this, PM);
|
2011-08-03 05:50:27 +08:00
|
|
|
for (unsigned i = 0, e = Extensions.size(); i != e; ++i)
|
|
|
|
if (Extensions[i].first == ETy)
|
|
|
|
Extensions[i].second(*this, PM);
|
|
|
|
}
|
|
|
|
|
2015-02-13 18:01:29 +08:00
|
|
|
void PassManagerBuilder::addInitialAliasAnalysisPasses(
|
|
|
|
legacy::PassManagerBase &PM) const {
|
2011-08-03 05:50:27 +08:00
|
|
|
// Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
|
|
|
|
// BasicAliasAnalysis wins if they disagree. This is intended to help
|
|
|
|
// support "obvious" type-punning idioms.
|
2014-09-03 06:12:54 +08:00
|
|
|
if (UseCFLAA)
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
PM.add(createCFLAAWrapperPass());
|
|
|
|
PM.add(createTypeBasedAAWrapperPass());
|
|
|
|
PM.add(createScopedNoAliasAAWrapperPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
|
|
|
|
2016-03-10 02:47:11 +08:00
|
|
|
void PassManagerBuilder::addInstructionCombiningPass(
|
|
|
|
legacy::PassManagerBase &PM) const {
|
|
|
|
bool ExpensiveCombines = OptLevel > 2;
|
|
|
|
PM.add(createInstructionCombiningPass(ExpensiveCombines));
|
|
|
|
}
|
|
|
|
|
2015-02-13 18:01:29 +08:00
|
|
|
void PassManagerBuilder::populateFunctionPassManager(
|
|
|
|
legacy::FunctionPassManager &FPM) {
|
2011-08-03 05:50:27 +08:00
|
|
|
addExtensionsToPM(EP_EarlyAsPossible, FPM);
|
|
|
|
|
|
|
|
// Add LibraryInfo if we have some.
|
2015-01-15 18:41:28 +08:00
|
|
|
if (LibraryInfo)
|
|
|
|
FPM.add(new TargetLibraryInfoWrapperPass(*LibraryInfo));
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
if (OptLevel == 0) return;
|
|
|
|
|
|
|
|
addInitialAliasAnalysisPasses(FPM);
|
|
|
|
|
|
|
|
FPM.add(createCFGSimplificationPass());
|
Introduce a new SROA implementation.
This is essentially a ground up re-think of the SROA pass in LLVM. It
was initially inspired by a few problems with the existing pass:
- It is subject to the bane of my existence in optimizations: arbitrary
thresholds.
- It is overly conservative about which constructs can be split and
promoted.
- The vector value replacement aspect is separated from the splitting
logic, missing many opportunities where splitting and vector value
formation can work together.
- The splitting is entirely based around the underlying type of the
alloca, despite this type often having little to do with the reality
of how that memory is used. This is especially prevelant with unions
and base classes where we tail-pack derived members.
- When splitting fails (often due to the thresholds), the vector value
replacement (again because it is separate) can kick in for
preposterous cases where we simply should have split the value. This
results in forming i1024 and i2048 integer "bit vectors" that
tremendously slow down subsequnet IR optimizations (due to large
APInts) and impede the backend's lowering.
The new design takes an approach that fundamentally is not susceptible
to many of these problems. It is the result of a discusison between
myself and Duncan Sands over IRC about how to premptively avoid these
types of problems and how to do SROA in a more principled way. Since
then, it has evolved and grown, but this remains an important aspect: it
fixes real world problems with the SROA process today.
First, the transform of SROA actually has little to do with replacement.
It has more to do with splitting. The goal is to take an aggregate
alloca and form a composition of scalar allocas which can replace it and
will be most suitable to the eventual replacement by scalar SSA values.
The actual replacement is performed by mem2reg (and in the future
SSAUpdater).
The splitting is divided into four phases. The first phase is an
analysis of the uses of the alloca. This phase recursively walks uses,
building up a dense datastructure representing the ranges of the
alloca's memory actually used and checking for uses which inhibit any
aspects of the transform such as the escape of a pointer.
Once we have a mapping of the ranges of the alloca used by individual
operations, we compute a partitioning of the used ranges. Some uses are
inherently splittable (such as memcpy and memset), while scalar uses are
not splittable. The goal is to build a partitioning that has the minimum
number of splits while placing each unsplittable use in its own
partition. Overlapping unsplittable uses belong to the same partition.
This is the target split of the aggregate alloca, and it maximizes the
number of scalar accesses which become accesses to their own alloca and
candidates for promotion.
Third, we re-walk the uses of the alloca and assign each specific memory
access to all the partitions touched so that we have dense use-lists for
each partition.
Finally, we build a new, smaller alloca for each partition and rewrite
each use of that partition to use the new alloca. During this phase the
pass will also work very hard to transform uses of an alloca into a form
suitable for promotion, including forming vector operations, speculating
loads throguh PHI nodes and selects, etc.
After splitting is complete, each newly refined alloca that is
a candidate for promotion to a scalar SSA value is run through mem2reg.
There are lots of reasonably detailed comments in the source code about
the design and algorithms, and I'm going to be trying to improve them in
subsequent commits to ensure this is well documented, as the new pass is
in many ways more complex than the old one.
Some of this is still a WIP, but the current state is reasonbly stable.
It has passed bootstrap, the nightly test suite, and Duncan has run it
successfully through the ACATS and DragonEgg test suites. That said, it
remains behind a default-off flag until the last few pieces are in
place, and full testing can be done.
Specific areas I'm looking at next:
- Improved comments and some code cleanup from reviews.
- SSAUpdater and enabling this pass inside the CGSCC pass manager.
- Some datastructure tuning and compile-time measurements.
- More aggressive FCA splitting and vector formation.
Many thanks to Duncan Sands for the thorough final review, as well as
Benjamin Kramer for lots of review during the process of writing this
pass, and Daniel Berlin for reviewing the data structures and algorithms
and general theory of the pass. Also, several other people on IRC, over
lunch tables, etc for lots of feedback and advice.
llvm-svn: 163883
2012-09-14 17:22:59 +08:00
|
|
|
if (UseNewSROA)
|
|
|
|
FPM.add(createSROAPass());
|
|
|
|
else
|
|
|
|
FPM.add(createScalarReplAggregatesPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
FPM.add(createEarlyCSEPass());
|
|
|
|
FPM.add(createLowerExpectIntrinsicPass());
|
|
|
|
}
|
|
|
|
|
2016-01-22 02:28:59 +08:00
|
|
|
// Do PGO instrumentation generation or use pass as the option specified.
|
|
|
|
void PassManagerBuilder::addPGOInstrPasses(legacy::PassManagerBase &MPM) {
|
|
|
|
if (!PGOInstrGen.empty()) {
|
|
|
|
MPM.add(createPGOInstrumentationGenPass());
|
|
|
|
// Add the profile lowering pass.
|
|
|
|
InstrProfOptions Options;
|
|
|
|
Options.InstrProfileOutput = PGOInstrGen;
|
2016-04-19 01:47:38 +08:00
|
|
|
MPM.add(createInstrProfilingLegacyPass(Options));
|
2016-01-22 02:28:59 +08:00
|
|
|
}
|
|
|
|
if (!PGOInstrUse.empty())
|
|
|
|
MPM.add(createPGOInstrumentationUsePass(PGOInstrUse));
|
|
|
|
}
|
2016-02-17 06:54:27 +08:00
|
|
|
void PassManagerBuilder::addFunctionSimplificationPasses(
|
2016-02-12 06:09:11 +08:00
|
|
|
legacy::PassManagerBase &MPM) {
|
2011-08-03 05:50:27 +08:00
|
|
|
// Start of function pass.
|
|
|
|
// Break up aggregate allocas, using SSAUpdater.
|
2012-09-15 19:43:14 +08:00
|
|
|
if (UseNewSROA)
|
2015-08-24 06:15:49 +08:00
|
|
|
MPM.add(createSROAPass());
|
2012-09-15 19:43:14 +08:00
|
|
|
else
|
|
|
|
MPM.add(createScalarReplAggregatesPass(-1, false));
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createEarlyCSEPass()); // Catch trivial redundancies
|
2016-04-15 08:32:12 +08:00
|
|
|
// Speculative execution if the target has divergent branches; otherwise nop.
|
|
|
|
MPM.add(createSpeculativeExecutionIfHasBranchDivergencePass());
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createJumpThreadingPass()); // Thread jumps.
|
|
|
|
MPM.add(createCorrelatedValuePropagationPass()); // Propagate conditionals
|
|
|
|
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
|
2016-03-10 02:47:11 +08:00
|
|
|
// Combine silly seq's
|
|
|
|
addInstructionCombiningPass(MPM);
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2015-06-10 03:07:19 +08:00
|
|
|
MPM.add(createTailCallEliminationPass()); // Eliminate tail calls
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
|
|
|
|
MPM.add(createReassociatePass()); // Reassociate expressions
|
2016-02-17 07:02:29 +08:00
|
|
|
if (PrepareForThinLTO) {
|
|
|
|
MPM.add(createAggressiveDCEPass()); // Delete dead instructions
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM); // Combine silly seq's
|
2016-02-17 07:02:29 +08:00
|
|
|
return;
|
|
|
|
}
|
2014-11-22 03:53:24 +08:00
|
|
|
// Rotate Loop - disable header duplication at -Oz
|
|
|
|
MPM.add(createLoopRotatePass(SizeLevel == 2 ? 0 : -1));
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createLICMPass()); // Hoist loop invariants
|
|
|
|
MPM.add(createLoopUnswitchPass(SizeLevel || OptLevel < 3));
|
2015-09-24 11:50:17 +08:00
|
|
|
MPM.add(createCFGSimplificationPass());
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createIndVarSimplifyPass()); // Canonicalize indvars
|
|
|
|
MPM.add(createLoopIdiomPass()); // Recognize idioms like memset.
|
|
|
|
MPM.add(createLoopDeletionPass()); // Delete dead loops
|
2015-04-23 12:51:44 +08:00
|
|
|
if (EnableLoopInterchange) {
|
2015-03-06 18:11:25 +08:00
|
|
|
MPM.add(createLoopInterchangePass()); // Interchange loops
|
2015-04-23 12:51:44 +08:00
|
|
|
MPM.add(createCFGSimplificationPass());
|
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
if (!DisableUnrollLoops)
|
2014-04-01 07:23:51 +08:00
|
|
|
MPM.add(createSimpleLoopUnrollPass()); // Unroll small loops
|
2011-08-03 05:50:27 +08:00
|
|
|
addExtensionsToPM(EP_LoopOptimizerEnd, MPM);
|
|
|
|
|
2014-07-19 03:13:09 +08:00
|
|
|
if (OptLevel > 1) {
|
2014-09-11 03:55:29 +08:00
|
|
|
if (EnableMLSM)
|
|
|
|
MPM.add(createMergedLoadStoreMotionPass()); // Merge ld/st in diamonds
|
2014-08-21 21:13:17 +08:00
|
|
|
MPM.add(createGVNPass(DisableGVNLoadPRE)); // Remove redundancies
|
2014-07-19 03:13:09 +08:00
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createMemCpyOptPass()); // Remove memcpy / form memset
|
|
|
|
MPM.add(createSCCPPass()); // Constant prop with SCCP
|
|
|
|
|
[BDCE] Add a bit-tracking DCE pass
BDCE is a bit-tracking dead code elimination pass. It is based on ADCE (the
"aggressive DCE" pass), with the added capability to track dead bits of integer
valued instructions and remove those instructions when all of the bits are
dead.
Currently, it does not actually do this all-bits-dead removal, but rather
replaces the instruction's uses with a constant zero, and lets instcombine (and
the later run of ADCE) do the rest. Because we essentially get a run of ADCE
"for free" while tracking the dead bits, we also do what ADCE does and removes
actually-dead instructions as well (this includes instructions newly trivially
dead because all bits were dead, but not all such instructions can be removed).
The motivation for this is a case like:
int __attribute__((const)) foo(int i);
int bar(int x) {
x |= (4 & foo(5));
x |= (8 & foo(3));
x |= (16 & foo(2));
x |= (32 & foo(1));
x |= (64 & foo(0));
x |= (128& foo(4));
return x >> 4;
}
As it turns out, if you order the bit-field insertions so that all of the dead
ones come last, then instcombine will remove them. However, if you pick some
other order (such as the one above), the fact that some of the calls to foo()
are useless is not locally obvious, and we don't remove them (without this
pass).
I did a quick compile-time overhead check using sqlite from the test suite
(Release+Asserts). BDCE took ~0.4% of the compilation time (making it about
twice as expensive as ADCE).
I've not looked at why yet, but we eliminate instructions due to having
all-dead bits in:
External/SPEC/CFP2006/447.dealII/447.dealII
External/SPEC/CINT2006/400.perlbench/400.perlbench
External/SPEC/CINT2006/403.gcc/403.gcc
MultiSource/Applications/ClamAV/clamscan
MultiSource/Benchmarks/7zip/7zip-benchmark
llvm-svn: 229462
2015-02-17 09:36:59 +08:00
|
|
|
// Delete dead bit computations (instcombine runs after to fold away the dead
|
|
|
|
// computations, and then ADCE will run later to exploit any new DCE
|
|
|
|
// opportunities that creates).
|
|
|
|
MPM.add(createBitTrackingDCEPass()); // Delete dead bit computations
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Run instcombine after redundancy elimination to exploit opportunities
|
|
|
|
// opened up by them.
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createJumpThreadingPass()); // Thread jumps
|
|
|
|
MPM.add(createCorrelatedValuePropagationPass());
|
|
|
|
MPM.add(createDeadStoreEliminationPass()); // Delete dead stores
|
2015-02-17 02:59:54 +08:00
|
|
|
MPM.add(createLICMPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
addExtensionsToPM(EP_ScalarOptimizerLate, MPM);
|
|
|
|
|
2013-11-18 00:02:50 +08:00
|
|
|
if (RerollLoops)
|
2013-11-17 07:59:05 +08:00
|
|
|
MPM.add(createLoopRerollPass());
|
2014-08-06 20:56:19 +08:00
|
|
|
if (!RunSLPAfterLoopVectorization) {
|
|
|
|
if (SLPVectorize)
|
|
|
|
MPM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains.
|
|
|
|
|
|
|
|
if (BBVectorize) {
|
|
|
|
MPM.add(createBBVectorizePass());
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2014-08-06 20:56:19 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
|
|
|
if (OptLevel > 1 && UseGVNAfterVectorization)
|
2014-08-21 21:13:17 +08:00
|
|
|
MPM.add(createGVNPass(DisableGVNLoadPRE)); // Remove redundancies
|
2014-08-06 20:56:19 +08:00
|
|
|
else
|
|
|
|
MPM.add(createEarlyCSEPass()); // Catch trivial redundancies
|
|
|
|
|
|
|
|
// BBVectorize may have significantly shortened a loop body; unroll again.
|
|
|
|
if (!DisableUnrollLoops)
|
|
|
|
MPM.add(createLoopUnrollPass());
|
|
|
|
}
|
2012-02-01 11:51:43 +08:00
|
|
|
}
|
|
|
|
|
2014-05-29 09:55:07 +08:00
|
|
|
if (LoadCombine)
|
|
|
|
MPM.add(createLoadCombinePass());
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createAggressiveDCEPass()); // Delete dead instructions
|
2013-08-06 10:43:45 +08:00
|
|
|
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
|
2016-03-10 02:47:11 +08:00
|
|
|
// Clean up after everything.
|
|
|
|
addInstructionCombiningPass(MPM);
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2016-02-17 06:54:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void PassManagerBuilder::populateModulePassManager(
|
|
|
|
legacy::PassManagerBase &MPM) {
|
|
|
|
// Allow forcing function attributes as a debugging and tuning aid.
|
|
|
|
MPM.add(createForceFunctionAttrsLegacyPass());
|
|
|
|
|
|
|
|
// If all optimizations are disabled, just run the always-inline pass and,
|
|
|
|
// if enabled, the function merging pass.
|
|
|
|
if (OptLevel == 0) {
|
|
|
|
addPGOInstrPasses(MPM);
|
|
|
|
if (Inliner) {
|
|
|
|
MPM.add(Inliner);
|
|
|
|
Inliner = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: The BarrierNoopPass is a HACK! The inliner pass above implicitly
|
|
|
|
// creates a CGSCC pass manager, but we don't want to add extensions into
|
|
|
|
// that pass manager. To prevent this we insert a no-op module pass to reset
|
|
|
|
// the pass manager to get the same behavior as EP_OptimizerLast in non-O0
|
|
|
|
// builds. The function merging pass is
|
|
|
|
if (MergeFunctions)
|
|
|
|
MPM.add(createMergeFunctionsPass());
|
|
|
|
else if (!GlobalExtensions->empty() || !Extensions.empty())
|
|
|
|
MPM.add(createBarrierNoopPass());
|
|
|
|
|
|
|
|
addExtensionsToPM(EP_EnabledOnOptLevel0, MPM);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add LibraryInfo if we have some.
|
|
|
|
if (LibraryInfo)
|
|
|
|
MPM.add(new TargetLibraryInfoWrapperPass(*LibraryInfo));
|
|
|
|
|
|
|
|
addInitialAliasAnalysisPasses(MPM);
|
|
|
|
|
|
|
|
if (!DisableUnitAtATime) {
|
|
|
|
// Infer attributes about declarations if possible.
|
|
|
|
MPM.add(createInferFunctionAttrsLegacyPass());
|
|
|
|
|
|
|
|
addExtensionsToPM(EP_ModuleOptimizerEarly, MPM);
|
|
|
|
|
|
|
|
MPM.add(createIPSCCPPass()); // IP SCCP
|
|
|
|
MPM.add(createGlobalOptimizerPass()); // Optimize out global vars
|
|
|
|
// Promote any localized global vars.
|
|
|
|
MPM.add(createPromoteMemoryToRegisterPass());
|
|
|
|
|
|
|
|
MPM.add(createDeadArgEliminationPass()); // Dead argument elimination
|
|
|
|
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM); // Clean up after IPCP & DAE
|
2016-02-17 06:54:27 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
|
|
|
MPM.add(createCFGSimplificationPass()); // Clean up after IPCP & DAE
|
|
|
|
}
|
|
|
|
|
2016-02-17 07:02:29 +08:00
|
|
|
if (!PerformThinLTO)
|
|
|
|
/// PGO instrumentation is added during the compile phase for ThinLTO, do
|
|
|
|
/// not run it a second time
|
|
|
|
addPGOInstrPasses(MPM);
|
2016-02-17 06:54:27 +08:00
|
|
|
|
|
|
|
if (EnableNonLTOGlobalsModRef)
|
|
|
|
// We add a module alias analysis pass here. In part due to bugs in the
|
|
|
|
// analysis infrastructure this "works" in that the analysis stays alive
|
|
|
|
// for the entire SCC pass run below.
|
|
|
|
MPM.add(createGlobalsAAWrapperPass());
|
|
|
|
|
|
|
|
// Start of CallGraph SCC passes.
|
|
|
|
if (!DisableUnitAtATime)
|
|
|
|
MPM.add(createPruneEHPass()); // Remove dead EH info
|
|
|
|
if (Inliner) {
|
|
|
|
MPM.add(Inliner);
|
|
|
|
Inliner = nullptr;
|
|
|
|
}
|
|
|
|
if (!DisableUnitAtATime)
|
2016-02-18 19:03:11 +08:00
|
|
|
MPM.add(createPostOrderFunctionAttrsLegacyPass());
|
2016-02-17 06:54:27 +08:00
|
|
|
if (OptLevel > 2)
|
|
|
|
MPM.add(createArgumentPromotionPass()); // Scalarize uninlined fn args
|
|
|
|
|
|
|
|
addFunctionSimplificationPasses(MPM);
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2016-02-17 07:02:29 +08:00
|
|
|
// If we are planning to perform ThinLTO later, let's not bloat the code with
|
|
|
|
// unrolling/vectorization/... now. We'll first run the inliner + CGSCC passes
|
|
|
|
// during ThinLTO and perform the rest of the optimizations afterward.
|
2016-04-25 16:47:37 +08:00
|
|
|
if (PrepareForThinLTO) {
|
2016-04-25 16:47:49 +08:00
|
|
|
// Reduce the size of the IR as much as possible.
|
|
|
|
MPM.add(createGlobalOptimizerPass());
|
2016-04-25 16:47:37 +08:00
|
|
|
// Rename anon function to be able to export them in the summary.
|
|
|
|
MPM.add(createNameAnonFunctionPass());
|
2016-02-17 07:02:29 +08:00
|
|
|
return;
|
2016-04-25 16:47:37 +08:00
|
|
|
}
|
2016-02-17 07:02:29 +08:00
|
|
|
|
2013-12-06 05:20:02 +08:00
|
|
|
// FIXME: This is a HACK! The inliner pass above implicitly creates a CGSCC
|
|
|
|
// pass manager that we are specifically trying to avoid. To prevent this
|
|
|
|
// we must insert a no-op module pass to reset the pass manager.
|
|
|
|
MPM.add(createBarrierNoopPass());
|
2014-10-14 08:31:29 +08:00
|
|
|
|
2016-02-11 17:23:53 +08:00
|
|
|
// Scheduling LoopVersioningLICM when inlining is over, because after that
|
New Loop Versioning LICM Pass
Summary:
When alias analysis is uncertain about the aliasing between any two accesses,
it will return MayAlias. This uncertainty from alias analysis restricts LICM
from proceeding further. In cases where alias analysis is uncertain we might
use loop versioning as an alternative.
Loop Versioning will create a version of the loop with aggressive aliasing
assumptions in addition to the original with conservative (default) aliasing
assumptions. The version of the loop making aggressive aliasing assumptions
will have all the memory accesses marked as no-alias. These two versions of
loop will be preceded by a memory runtime check. This runtime check consists
of bound checks for all unique memory accessed in loop, and it ensures the
lack of memory aliasing. The result of the runtime check determines which of
the loop versions is executed: If the runtime check detects any memory
aliasing, then the original loop is executed. Otherwise, the version with
aggressive aliasing assumptions is used.
The pass is off by default and can be enabled with command line option
-enable-loop-versioning-licm.
Reviewers: hfinkel, anemet, chatur01, reames
Subscribers: MatzeB, grosser, joker.eph, sanjoy, javed.absar, sbaranga,
llvm-commits
Differential Revision: http://reviews.llvm.org/D9151
llvm-svn: 259986
2016-02-06 15:47:48 +08:00
|
|
|
// we may see more accurate aliasing. Reason to run this late is that too
|
|
|
|
// early versioning may prevent further inlining due to increase of code
|
|
|
|
// size. By placing it just after inlining other optimizations which runs
|
|
|
|
// later might get benefit of no-alias assumption in clone loop.
|
|
|
|
if (UseLoopVersioningLICM) {
|
|
|
|
MPM.add(createLoopVersioningLICMPass()); // Do LoopVersioningLICM
|
|
|
|
MPM.add(createLICMPass()); // Hoist loop invariants
|
|
|
|
}
|
|
|
|
|
2016-01-08 18:55:52 +08:00
|
|
|
if (!DisableUnitAtATime)
|
|
|
|
MPM.add(createReversePostOrderFunctionAttrsPass());
|
|
|
|
|
2016-02-17 07:02:29 +08:00
|
|
|
if (!DisableUnitAtATime && OptLevel > 1 && !PrepareForLTO)
|
2015-09-02 14:34:11 +08:00
|
|
|
// Remove avail extern fns and globals definitions if we aren't
|
|
|
|
// compiling an object file for later LTO. For LTO we want to preserve
|
|
|
|
// these so they are eligible for inlining at link-time. Note if they
|
|
|
|
// are unreferenced they will be removed by GlobalDCE later, so
|
|
|
|
// this only impacts referenced available externally globals.
|
|
|
|
// Eventually they will be suppressed during codegen, but eliminating
|
|
|
|
// here enables more opportunity for GlobalDCE as it may make
|
|
|
|
// globals referenced by available external functions dead
|
|
|
|
// and saves running remaining passes on the eliminated functions.
|
|
|
|
MPM.add(createEliminateAvailableExternallyPass());
|
2016-02-17 07:02:29 +08:00
|
|
|
|
|
|
|
if (PerformThinLTO) {
|
|
|
|
// Remove dead fns and globals. Removing unreferenced functions could lead
|
|
|
|
// to more opportunities for globalopt.
|
|
|
|
MPM.add(createGlobalDCEPass());
|
|
|
|
MPM.add(createGlobalOptimizerPass());
|
|
|
|
// Remove dead fns and globals after globalopt.
|
|
|
|
MPM.add(createGlobalDCEPass());
|
|
|
|
addFunctionSimplificationPasses(MPM);
|
2015-09-02 14:34:11 +08:00
|
|
|
}
|
|
|
|
|
2015-07-23 17:34:01 +08:00
|
|
|
if (EnableNonLTOGlobalsModRef)
|
|
|
|
// We add a fresh GlobalsModRef run at this point. This is particularly
|
|
|
|
// useful as the above will have inlined, DCE'ed, and function-attr
|
|
|
|
// propagated everything. We should at this point have a reasonably minimal
|
|
|
|
// and richly annotated call graph. By computing aliasing and mod/ref
|
|
|
|
// information for all local globals here, the late loop passes and notably
|
|
|
|
// the vectorizer will be able to use them to help recognize vectorizable
|
|
|
|
// memory operations.
|
|
|
|
//
|
|
|
|
// Note that this relies on a bug in the pass manager which preserves
|
|
|
|
// a module analysis into a function pass pipeline (and throughout it) so
|
|
|
|
// long as the first function pass doesn't invalidate the module analysis.
|
|
|
|
// Thus both Float2Int and LoopRotate have to preserve AliasAnalysis for
|
|
|
|
// this to work. Fortunately, it is trivial to preserve AliasAnalysis
|
|
|
|
// (doing nothing preserves it as it is required to be conservatively
|
|
|
|
// correct in the face of IR changes).
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
MPM.add(createGlobalsAAWrapperPass());
|
2015-07-23 17:34:01 +08:00
|
|
|
|
2015-03-27 18:36:57 +08:00
|
|
|
if (RunFloat2Int)
|
|
|
|
MPM.add(createFloat2IntPass());
|
|
|
|
|
2015-07-16 16:20:37 +08:00
|
|
|
addExtensionsToPM(EP_VectorizerStart, MPM);
|
|
|
|
|
2014-10-14 08:31:29 +08:00
|
|
|
// Re-rotate loops in all our loop nests. These may have fallout out of
|
|
|
|
// rotated form due to GVN or other transformations, and the vectorizer relies
|
2015-07-10 18:37:09 +08:00
|
|
|
// on the rotated form. Disable header duplication at -Oz.
|
|
|
|
MPM.add(createLoopRotatePass(SizeLevel == 2 ? 0 : -1));
|
2014-10-14 08:31:29 +08:00
|
|
|
|
2015-05-14 20:05:18 +08:00
|
|
|
// Distribute loops to allow partial vectorization. I.e. isolate dependences
|
2016-04-27 13:28:18 +08:00
|
|
|
// into separate loop that would otherwise inhibit vectorization. This is
|
|
|
|
// currently only performed for loops marked with the metadata
|
|
|
|
// llvm.loop.distribute=true or when -enable-loop-distribute is specified.
|
|
|
|
MPM.add(createLoopDistributePass(/*ProcessAllLoopsByDefault=*/false));
|
2015-05-14 20:05:18 +08:00
|
|
|
|
2013-12-06 05:20:02 +08:00
|
|
|
MPM.add(createLoopVectorizePass(DisableUnrollLoops, LoopVectorize));
|
LLE 6/6: Add LoopLoadElimination pass
Summary:
The goal of this pass is to perform store-to-load forwarding across the
backedge of a loop. E.g.:
for (i)
A[i + 1] = A[i] + B[i]
=>
T = A[0]
for (i)
T = T + B[i]
A[i + 1] = T
The pass relies on loop dependence analysis via LoopAccessAnalisys to
find opportunities of loop-carried dependences with a distance of one
between a store and a load. Since it's using LoopAccessAnalysis, it was
easy to also add support for versioning away may-aliasing intervening
stores that would otherwise prevent this transformation.
This optimization is also performed by Load-PRE in GVN without the
option of multi-versioning. As was discussed with Daniel Berlin in
http://reviews.llvm.org/D9548, this is inferior to a more loop-aware
solution applied here. Hopefully, we will be able to remove some
complexity from GVN/MemorySSA as a consequence.
In the long run, we may want to extend this pass (or create a new one if
there is little overlap) to also eliminate loop-indepedent redundant
loads and store that *require* versioning due to may-aliasing
intervening stores/loads. I have some motivating cases for store
elimination. My plan right now is to wait for MemorySSA to come online
first rather than using memdep for this.
The main motiviation for this pass is the 456.hmmer loop in SPECint2006
where after distributing the original loop and vectorizing the top part,
we are left with the critical path exposed in the bottom loop. Being
able to promote the memory dependence into a register depedence (even
though the HW does perform store-to-load fowarding as well) results in a
major gain (~20%). This gain also transfers over to x86: it's
around 8-10%.
Right now the pass is off by default and can be enabled
with -enable-loop-load-elim. On the LNT testsuite, there are two
performance changes (negative number -> improvement):
1. -28% in Polybench/linear-algebra/solvers/dynprog: the length of the
critical paths is reduced
2. +2% in Polybench/stencils/adi: Unfortunately, I couldn't reproduce this
outside of LNT
The pass is scheduled after the loop vectorizer (which is after loop
distribution). The rational is to try to reuse LAA state, rather than
recomputing it. The order between LV and LLE is not critical because
normally LV does not touch scalar st->ld forwarding cases where
vectorizing would inhibit the CPU's st->ld forwarding to kick in.
LoopLoadElimination requires LAA to provide the full set of dependences
(including forward dependences). LAA is known to omit loop-independent
dependences in certain situations. The big comment before
removeDependencesFromMultipleStores explains why this should not occur
for the cases that we're interested in.
Reviewers: dberlin, hfinkel
Subscribers: junbuml, dberlin, mssimpso, rengolin, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13259
llvm-svn: 252017
2015-11-04 07:50:08 +08:00
|
|
|
|
|
|
|
// Eliminate loads by forwarding stores from the previous iteration to loads
|
|
|
|
// of the current iteration.
|
|
|
|
if (EnableLoopLoadElim)
|
|
|
|
MPM.add(createLoopLoadEliminationPass());
|
|
|
|
|
2013-12-06 05:20:02 +08:00
|
|
|
// FIXME: Because of #pragma vectorize enable, the passes below are always
|
|
|
|
// inserted in the pipeline, even when the vectorizer doesn't run (ex. when
|
|
|
|
// on -O1 and no #pragma is found). Would be good to have these two passes
|
|
|
|
// as function calls, so that we can only pass them when the vectorizer
|
|
|
|
// changed the code.
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2014-10-14 08:31:29 +08:00
|
|
|
if (OptLevel > 1 && ExtraVectorizerPasses) {
|
|
|
|
// At higher optimization levels, try to clean up any runtime overlap and
|
|
|
|
// alignment checks inserted by the vectorizer. We want to track correllated
|
|
|
|
// runtime checks for two inner loops in the same outer loop, fold any
|
|
|
|
// common computations, hoist loop-invariant aspects out of any outer loop,
|
|
|
|
// and unswitch the runtime checks if possible. Once hoisted, we may have
|
|
|
|
// dead (or speculatable) control flows or more combining opportunities.
|
|
|
|
MPM.add(createEarlyCSEPass());
|
|
|
|
MPM.add(createCorrelatedValuePropagationPass());
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2014-10-14 08:31:29 +08:00
|
|
|
MPM.add(createLICMPass());
|
|
|
|
MPM.add(createLoopUnswitchPass(SizeLevel || OptLevel < 3));
|
|
|
|
MPM.add(createCFGSimplificationPass());
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2014-10-14 08:31:29 +08:00
|
|
|
}
|
2014-08-06 20:56:19 +08:00
|
|
|
|
|
|
|
if (RunSLPAfterLoopVectorization) {
|
2014-10-14 08:31:29 +08:00
|
|
|
if (SLPVectorize) {
|
2014-08-06 20:56:19 +08:00
|
|
|
MPM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains.
|
2014-10-14 08:31:29 +08:00
|
|
|
if (OptLevel > 1 && ExtraVectorizerPasses) {
|
|
|
|
MPM.add(createEarlyCSEPass());
|
|
|
|
}
|
|
|
|
}
|
2014-08-06 20:56:19 +08:00
|
|
|
|
|
|
|
if (BBVectorize) {
|
|
|
|
MPM.add(createBBVectorizePass());
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2014-08-06 20:56:19 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
|
|
|
if (OptLevel > 1 && UseGVNAfterVectorization)
|
2014-08-21 21:13:17 +08:00
|
|
|
MPM.add(createGVNPass(DisableGVNLoadPRE)); // Remove redundancies
|
2014-08-06 20:56:19 +08:00
|
|
|
else
|
|
|
|
MPM.add(createEarlyCSEPass()); // Catch trivial redundancies
|
|
|
|
|
|
|
|
// BBVectorize may have significantly shortened a loop body; unroll again.
|
|
|
|
if (!DisableUnrollLoops)
|
|
|
|
MPM.add(createLoopUnrollPass());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2013-12-06 05:20:02 +08:00
|
|
|
MPM.add(createCFGSimplificationPass());
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2013-06-24 15:21:47 +08:00
|
|
|
|
2015-03-12 13:36:01 +08:00
|
|
|
if (!DisableUnrollLoops) {
|
2014-04-01 07:23:51 +08:00
|
|
|
MPM.add(createLoopUnrollPass()); // Unroll small loops
|
|
|
|
|
2015-05-15 06:02:54 +08:00
|
|
|
// LoopUnroll may generate some redundency to cleanup.
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(MPM);
|
2015-05-15 06:02:54 +08:00
|
|
|
|
2015-03-12 13:36:01 +08:00
|
|
|
// Runtime unrolling will introduce runtime check in loop prologue. If the
|
|
|
|
// unrolled loop is a inner loop, then the prologue will be inside the
|
|
|
|
// outer loop. LICM pass can help to promote the runtime check out if the
|
|
|
|
// checked value is loop invariant.
|
|
|
|
MPM.add(createLICMPass());
|
|
|
|
}
|
|
|
|
|
2014-09-08 04:05:11 +08:00
|
|
|
// After vectorization and unrolling, assume intrinsics may tell us more
|
|
|
|
// about pointer alignments.
|
|
|
|
MPM.add(createAlignmentFromAssumptionsPass());
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
if (!DisableUnitAtATime) {
|
|
|
|
// FIXME: We shouldn't bother with this anymore.
|
|
|
|
MPM.add(createStripDeadPrototypesPass()); // Get rid of dead prototypes
|
|
|
|
|
2012-09-29 05:23:26 +08:00
|
|
|
// GlobalOpt already deletes dead functions and globals, at -O2 try a
|
2011-08-03 05:50:27 +08:00
|
|
|
// late pass of GlobalDCE. It is capable of deleting dead cycles.
|
2012-09-29 05:23:26 +08:00
|
|
|
if (OptLevel > 1) {
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createGlobalDCEPass()); // Remove dead fns and globals.
|
|
|
|
MPM.add(createConstantMergePass()); // Merge dup global constants
|
2012-09-29 05:23:26 +08:00
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
2014-09-14 05:46:00 +08:00
|
|
|
|
|
|
|
if (MergeFunctions)
|
|
|
|
MPM.add(createMergeFunctionsPass());
|
|
|
|
|
2012-03-24 07:22:59 +08:00
|
|
|
addExtensionsToPM(EP_OptimizerLast, MPM);
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
|
|
|
|
2015-02-13 18:01:29 +08:00
|
|
|
void PassManagerBuilder::addLTOOptimizationPasses(legacy::PassManagerBase &PM) {
|
2011-08-03 05:50:27 +08:00
|
|
|
// Provide AliasAnalysis services for optimizations.
|
|
|
|
addInitialAliasAnalysisPasses(PM);
|
|
|
|
|
2016-03-15 08:04:37 +08:00
|
|
|
if (ModuleSummary)
|
|
|
|
PM.add(createFunctionImportPass(ModuleSummary));
|
2015-12-08 03:21:11 +08:00
|
|
|
|
2015-12-27 16:13:45 +08:00
|
|
|
// Allow forcing function attributes as a debugging and tuning aid.
|
|
|
|
PM.add(createForceFunctionAttrsLegacyPass());
|
|
|
|
|
2015-12-27 16:41:34 +08:00
|
|
|
// Infer attributes about declarations if possible.
|
|
|
|
PM.add(createInferFunctionAttrsLegacyPass());
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Propagate constants at call sites into the functions they call. This
|
|
|
|
// opens opportunities for globalopt (and inlining) by substituting function
|
|
|
|
// pointers passed as arguments to direct uses of functions.
|
|
|
|
PM.add(createIPSCCPPass());
|
|
|
|
|
|
|
|
// Now that we internalized some globals, see if we can hack on them!
|
2016-02-18 19:03:11 +08:00
|
|
|
PM.add(createPostOrderFunctionAttrsLegacyPass());
|
2016-01-08 18:55:52 +08:00
|
|
|
PM.add(createReversePostOrderFunctionAttrsPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
PM.add(createGlobalOptimizerPass());
|
2015-12-15 17:24:01 +08:00
|
|
|
// Promote any localized global vars.
|
|
|
|
PM.add(createPromoteMemoryToRegisterPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
// Linking modules together can lead to duplicated global constants, only
|
|
|
|
// keep one copy of each constant.
|
|
|
|
PM.add(createConstantMergePass());
|
|
|
|
|
|
|
|
// Remove unused arguments from functions.
|
|
|
|
PM.add(createDeadArgEliminationPass());
|
|
|
|
|
|
|
|
// Reduce the code after globalopt and ipsccp. Both can open up significant
|
|
|
|
// simplification opportunities, and both can propagate functions through
|
|
|
|
// function pointers. When this happens, we often have to resolve varargs
|
|
|
|
// calls, etc, so let instcombine do this.
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(PM);
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, PM);
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
// Inline small functions
|
2014-08-21 21:35:30 +08:00
|
|
|
bool RunInliner = Inliner;
|
|
|
|
if (RunInliner) {
|
|
|
|
PM.add(Inliner);
|
|
|
|
Inliner = nullptr;
|
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
PM.add(createPruneEHPass()); // Remove dead EH info.
|
|
|
|
|
|
|
|
// Optimize globals again if we ran the inliner.
|
|
|
|
if (RunInliner)
|
|
|
|
PM.add(createGlobalOptimizerPass());
|
|
|
|
PM.add(createGlobalDCEPass()); // Remove dead functions.
|
|
|
|
|
|
|
|
// If we didn't decide to inline a function, check to see if we can
|
|
|
|
// transform it to pass arguments by value instead of by reference.
|
|
|
|
PM.add(createArgumentPromotionPass());
|
|
|
|
|
|
|
|
// The IPO passes may leave cruft around. Clean up after them.
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(PM);
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, PM);
|
2011-08-03 05:50:27 +08:00
|
|
|
PM.add(createJumpThreadingPass());
|
2013-08-30 08:48:37 +08:00
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Break up allocas
|
Introduce a new SROA implementation.
This is essentially a ground up re-think of the SROA pass in LLVM. It
was initially inspired by a few problems with the existing pass:
- It is subject to the bane of my existence in optimizations: arbitrary
thresholds.
- It is overly conservative about which constructs can be split and
promoted.
- The vector value replacement aspect is separated from the splitting
logic, missing many opportunities where splitting and vector value
formation can work together.
- The splitting is entirely based around the underlying type of the
alloca, despite this type often having little to do with the reality
of how that memory is used. This is especially prevelant with unions
and base classes where we tail-pack derived members.
- When splitting fails (often due to the thresholds), the vector value
replacement (again because it is separate) can kick in for
preposterous cases where we simply should have split the value. This
results in forming i1024 and i2048 integer "bit vectors" that
tremendously slow down subsequnet IR optimizations (due to large
APInts) and impede the backend's lowering.
The new design takes an approach that fundamentally is not susceptible
to many of these problems. It is the result of a discusison between
myself and Duncan Sands over IRC about how to premptively avoid these
types of problems and how to do SROA in a more principled way. Since
then, it has evolved and grown, but this remains an important aspect: it
fixes real world problems with the SROA process today.
First, the transform of SROA actually has little to do with replacement.
It has more to do with splitting. The goal is to take an aggregate
alloca and form a composition of scalar allocas which can replace it and
will be most suitable to the eventual replacement by scalar SSA values.
The actual replacement is performed by mem2reg (and in the future
SSAUpdater).
The splitting is divided into four phases. The first phase is an
analysis of the uses of the alloca. This phase recursively walks uses,
building up a dense datastructure representing the ranges of the
alloca's memory actually used and checking for uses which inhibit any
aspects of the transform such as the escape of a pointer.
Once we have a mapping of the ranges of the alloca used by individual
operations, we compute a partitioning of the used ranges. Some uses are
inherently splittable (such as memcpy and memset), while scalar uses are
not splittable. The goal is to build a partitioning that has the minimum
number of splits while placing each unsplittable use in its own
partition. Overlapping unsplittable uses belong to the same partition.
This is the target split of the aggregate alloca, and it maximizes the
number of scalar accesses which become accesses to their own alloca and
candidates for promotion.
Third, we re-walk the uses of the alloca and assign each specific memory
access to all the partitions touched so that we have dense use-lists for
each partition.
Finally, we build a new, smaller alloca for each partition and rewrite
each use of that partition to use the new alloca. During this phase the
pass will also work very hard to transform uses of an alloca into a form
suitable for promotion, including forming vector operations, speculating
loads throguh PHI nodes and selects, etc.
After splitting is complete, each newly refined alloca that is
a candidate for promotion to a scalar SSA value is run through mem2reg.
There are lots of reasonably detailed comments in the source code about
the design and algorithms, and I'm going to be trying to improve them in
subsequent commits to ensure this is well documented, as the new pass is
in many ways more complex than the old one.
Some of this is still a WIP, but the current state is reasonbly stable.
It has passed bootstrap, the nightly test suite, and Duncan has run it
successfully through the ACATS and DragonEgg test suites. That said, it
remains behind a default-off flag until the last few pieces are in
place, and full testing can be done.
Specific areas I'm looking at next:
- Improved comments and some code cleanup from reviews.
- SSAUpdater and enabling this pass inside the CGSCC pass manager.
- Some datastructure tuning and compile-time measurements.
- More aggressive FCA splitting and vector formation.
Many thanks to Duncan Sands for the thorough final review, as well as
Benjamin Kramer for lots of review during the process of writing this
pass, and Daniel Berlin for reviewing the data structures and algorithms
and general theory of the pass. Also, several other people on IRC, over
lunch tables, etc for lots of feedback and advice.
llvm-svn: 163883
2012-09-14 17:22:59 +08:00
|
|
|
if (UseNewSROA)
|
|
|
|
PM.add(createSROAPass());
|
|
|
|
else
|
|
|
|
PM.add(createScalarReplAggregatesPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
// Run a few AA driven optimizations here and now, to cleanup the code.
|
2016-02-18 19:03:11 +08:00
|
|
|
PM.add(createPostOrderFunctionAttrsLegacyPass()); // Add nocapture.
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
PM.add(createGlobalsAAWrapperPass()); // IP alias analysis.
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2012-04-03 06:16:50 +08:00
|
|
|
PM.add(createLICMPass()); // Hoist loop invariants.
|
2014-09-11 03:55:29 +08:00
|
|
|
if (EnableMLSM)
|
|
|
|
PM.add(createMergedLoadStoreMotionPass()); // Merge ld/st in diamonds.
|
2012-04-03 06:16:50 +08:00
|
|
|
PM.add(createGVNPass(DisableGVNLoadPRE)); // Remove redundancies.
|
|
|
|
PM.add(createMemCpyOptPass()); // Remove dead memcpys.
|
2013-08-30 08:48:37 +08:00
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Nuke dead stores.
|
|
|
|
PM.add(createDeadStoreEliminationPass());
|
|
|
|
|
2014-04-16 01:48:15 +08:00
|
|
|
// More loops are countable; try to optimize them.
|
|
|
|
PM.add(createIndVarSimplifyPass());
|
|
|
|
PM.add(createLoopDeletionPass());
|
2015-03-06 18:11:25 +08:00
|
|
|
if (EnableLoopInterchange)
|
|
|
|
PM.add(createLoopInterchangePass());
|
|
|
|
|
2016-01-14 23:00:09 +08:00
|
|
|
if (!DisableUnrollLoops)
|
|
|
|
PM.add(createSimpleLoopUnrollPass()); // Unroll small loops
|
2014-10-27 05:50:58 +08:00
|
|
|
PM.add(createLoopVectorizePass(true, LoopVectorize));
|
2016-01-14 23:00:09 +08:00
|
|
|
// The vectorizer may have significantly shortened a loop body; unroll again.
|
|
|
|
if (!DisableUnrollLoops)
|
|
|
|
PM.add(createLoopUnrollPass());
|
2014-02-25 02:19:31 +08:00
|
|
|
|
2015-12-15 17:24:01 +08:00
|
|
|
// Now that we've optimized loops (in particular loop induction variables),
|
|
|
|
// we may have exposed more scalar opportunities. Run parts of the scalar
|
|
|
|
// optimizer again at this point.
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(PM); // Initial cleanup
|
2015-12-15 17:24:01 +08:00
|
|
|
PM.add(createCFGSimplificationPass()); // if-convert
|
|
|
|
PM.add(createSCCPPass()); // Propagate exposed constants
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(PM); // Clean up again
|
2015-12-15 17:24:01 +08:00
|
|
|
PM.add(createBitTrackingDCEPass());
|
|
|
|
|
2014-05-06 07:14:46 +08:00
|
|
|
// More scalar chains could be vectorized due to more alias information
|
2014-10-22 07:18:21 +08:00
|
|
|
if (RunSLPAfterLoopVectorization)
|
|
|
|
if (SLPVectorize)
|
|
|
|
PM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains.
|
2014-05-06 07:14:46 +08:00
|
|
|
|
2014-09-08 04:05:11 +08:00
|
|
|
// After vectorization, assume intrinsics may tell us more about pointer
|
|
|
|
// alignments.
|
|
|
|
PM.add(createAlignmentFromAssumptionsPass());
|
|
|
|
|
2014-05-29 09:55:07 +08:00
|
|
|
if (LoadCombine)
|
|
|
|
PM.add(createLoadCombinePass());
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Cleanup and simplify the code after the scalar optimizations.
|
2016-03-10 02:47:11 +08:00
|
|
|
addInstructionCombiningPass(PM);
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, PM);
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
PM.add(createJumpThreadingPass());
|
2015-03-20 06:01:00 +08:00
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
void PassManagerBuilder::addEarlyLTOOptimizationPasses(
|
|
|
|
legacy::PassManagerBase &PM) {
|
|
|
|
// Remove unused virtual tables to improve the quality of code generated by
|
|
|
|
// whole-program devirtualization and bitset lowering.
|
|
|
|
PM.add(createGlobalDCEPass());
|
|
|
|
|
|
|
|
// Apply whole-program devirtualization and virtual constant propagation.
|
|
|
|
PM.add(createWholeProgramDevirtPass());
|
|
|
|
}
|
|
|
|
|
2015-03-20 06:01:00 +08:00
|
|
|
void PassManagerBuilder::addLateLTOOptimizationPasses(
|
|
|
|
legacy::PassManagerBase &PM) {
|
2011-08-03 05:50:27 +08:00
|
|
|
// Delete basic blocks, which optimization passes may have killed.
|
2013-08-06 10:43:45 +08:00
|
|
|
PM.add(createCFGSimplificationPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2015-08-12 00:26:41 +08:00
|
|
|
// Drop bodies of available externally objects to improve GlobalDCE.
|
|
|
|
PM.add(createEliminateAvailableExternallyPass());
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Now that we have optimized the program, discard unreachable functions.
|
|
|
|
PM.add(createGlobalDCEPass());
|
2014-09-14 05:46:00 +08:00
|
|
|
|
|
|
|
// FIXME: this is profitable (for compiler time) to do at -O0 too, but
|
|
|
|
// currently it damages debug info.
|
|
|
|
if (MergeFunctions)
|
|
|
|
PM.add(createMergeFunctionsPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
2011-08-10 06:17:34 +08:00
|
|
|
|
2016-02-17 07:02:29 +08:00
|
|
|
void PassManagerBuilder::populateThinLTOPassManager(
|
|
|
|
legacy::PassManagerBase &PM) {
|
|
|
|
PerformThinLTO = true;
|
|
|
|
|
|
|
|
if (VerifyInput)
|
|
|
|
PM.add(createVerifierPass());
|
|
|
|
|
2016-03-15 08:04:37 +08:00
|
|
|
if (ModuleSummary)
|
|
|
|
PM.add(createFunctionImportPass(ModuleSummary));
|
2016-02-17 07:02:29 +08:00
|
|
|
|
|
|
|
populateModulePassManager(PM);
|
|
|
|
|
|
|
|
if (VerifyOutput)
|
|
|
|
PM.add(createVerifierPass());
|
|
|
|
PerformThinLTO = false;
|
|
|
|
}
|
|
|
|
|
2015-02-13 18:01:29 +08:00
|
|
|
void PassManagerBuilder::populateLTOPassManager(legacy::PassManagerBase &PM) {
|
2014-08-22 04:03:44 +08:00
|
|
|
if (LibraryInfo)
|
2015-01-15 18:41:28 +08:00
|
|
|
PM.add(new TargetLibraryInfoWrapperPass(*LibraryInfo));
|
2014-08-22 04:03:44 +08:00
|
|
|
|
2015-03-20 06:24:17 +08:00
|
|
|
if (VerifyInput)
|
2014-08-22 04:03:44 +08:00
|
|
|
PM.add(createVerifierPass());
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
if (OptLevel != 0)
|
|
|
|
addEarlyLTOOptimizationPasses(PM);
|
|
|
|
|
2015-03-20 06:01:00 +08:00
|
|
|
if (OptLevel > 1)
|
2014-08-22 04:03:44 +08:00
|
|
|
addLTOOptimizationPasses(PM);
|
|
|
|
|
2015-12-16 07:00:08 +08:00
|
|
|
// Create a function that performs CFI checks for cross-DSO calls with targets
|
|
|
|
// in the current module.
|
|
|
|
PM.add(createCrossDSOCFIPass());
|
|
|
|
|
2015-03-20 06:01:00 +08:00
|
|
|
// Lower bit sets to globals. This pass supports Clang's control flow
|
|
|
|
// integrity mechanisms (-fsanitize=cfi*) and needs to run at link time if CFI
|
|
|
|
// is enabled. The pass does nothing if CFI is disabled.
|
|
|
|
PM.add(createLowerBitSetsPass());
|
|
|
|
|
|
|
|
if (OptLevel != 0)
|
|
|
|
addLateLTOOptimizationPasses(PM);
|
|
|
|
|
2015-03-20 06:24:17 +08:00
|
|
|
if (VerifyOutput)
|
2014-08-22 04:03:44 +08:00
|
|
|
PM.add(createVerifierPass());
|
|
|
|
}
|
|
|
|
|
2013-04-23 06:47:22 +08:00
|
|
|
inline PassManagerBuilder *unwrap(LLVMPassManagerBuilderRef P) {
|
|
|
|
return reinterpret_cast<PassManagerBuilder*>(P);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline LLVMPassManagerBuilderRef wrap(PassManagerBuilder *P) {
|
|
|
|
return reinterpret_cast<LLVMPassManagerBuilderRef>(P);
|
|
|
|
}
|
|
|
|
|
2012-11-16 00:51:49 +08:00
|
|
|
LLVMPassManagerBuilderRef LLVMPassManagerBuilderCreate() {
|
2011-08-10 06:17:34 +08:00
|
|
|
PassManagerBuilder *PMB = new PassManagerBuilder();
|
|
|
|
return wrap(PMB);
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMPassManagerBuilderDispose(LLVMPassManagerBuilderRef PMB) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
delete Builder;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetOptLevel(LLVMPassManagerBuilderRef PMB,
|
|
|
|
unsigned OptLevel) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->OptLevel = OptLevel;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetSizeLevel(LLVMPassManagerBuilderRef PMB,
|
|
|
|
unsigned SizeLevel) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->SizeLevel = SizeLevel;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetDisableUnitAtATime(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMBool Value) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->DisableUnitAtATime = Value;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetDisableUnrollLoops(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMBool Value) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->DisableUnrollLoops = Value;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetDisableSimplifyLibCalls(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMBool Value) {
|
2013-06-21 03:48:07 +08:00
|
|
|
// NOTE: The simplify-libcalls pass has been removed.
|
2011-08-10 06:17:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderUseInlinerWithThreshold(LLVMPassManagerBuilderRef PMB,
|
|
|
|
unsigned Threshold) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->Inliner = createFunctionInliningPass(Threshold);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderPopulateFunctionPassManager(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMPassManagerRef PM) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
2015-02-13 18:01:29 +08:00
|
|
|
legacy::FunctionPassManager *FPM = unwrap<legacy::FunctionPassManager>(PM);
|
2011-08-10 06:17:34 +08:00
|
|
|
Builder->populateFunctionPassManager(*FPM);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderPopulateModulePassManager(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMPassManagerRef PM) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
2015-02-13 18:01:29 +08:00
|
|
|
legacy::PassManagerBase *MPM = unwrap(PM);
|
2011-08-10 06:17:34 +08:00
|
|
|
Builder->populateModulePassManager(*MPM);
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMPassManagerBuilderPopulateLTOPassManager(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMPassManagerRef PM,
|
2013-03-11 05:58:22 +08:00
|
|
|
LLVMBool Internalize,
|
|
|
|
LLVMBool RunInliner) {
|
2011-08-10 06:17:34 +08:00
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
2015-02-13 18:01:29 +08:00
|
|
|
legacy::PassManagerBase *LPM = unwrap(PM);
|
2014-08-21 21:35:30 +08:00
|
|
|
|
|
|
|
// A small backwards compatibility hack. populateLTOPassManager used to take
|
|
|
|
// an RunInliner option.
|
|
|
|
if (RunInliner && !Builder->Inliner)
|
|
|
|
Builder->Inliner = createFunctionInliningPass();
|
|
|
|
|
|
|
|
Builder->populateLTOPassManager(*LPM);
|
2011-08-10 06:17:34 +08:00
|
|
|
}
|