2011-08-03 05:50:27 +08:00
|
|
|
//===- PassManagerBuilder.cpp - Build Standard Pass -----------------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2011-08-03 05:50:27 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the PassManagerBuilder class, which is used to set up a
|
|
|
|
// "standard" optimization sequence suitable for languages like C and C++.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
|
2011-08-10 06:17:34 +08:00
|
|
|
#include "llvm-c/Transforms/PassManagerBuilder.h"
|
2020-01-29 13:45:27 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2015-08-06 15:33:15 +08:00
|
|
|
#include "llvm/Analysis/BasicAliasAnalysis.h"
|
2016-07-06 08:26:41 +08:00
|
|
|
#include "llvm/Analysis/CFLAndersAliasAnalysis.h"
|
|
|
|
#include "llvm/Analysis/CFLSteensAliasAnalysis.h"
|
2015-08-14 11:48:20 +08:00
|
|
|
#include "llvm/Analysis/GlobalsModRef.h"
|
2016-08-12 02:24:08 +08:00
|
|
|
#include "llvm/Analysis/InlineCost.h"
|
2015-12-27 16:41:34 +08:00
|
|
|
#include "llvm/Analysis/Passes.h"
|
2015-08-14 10:55:50 +08:00
|
|
|
#include "llvm/Analysis/ScopedNoAliasAA.h"
|
2015-01-15 10:16:27 +08:00
|
|
|
#include "llvm/Analysis/TargetLibraryInfo.h"
|
2015-08-14 11:33:48 +08:00
|
|
|
#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
|
2015-12-27 16:41:34 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/LegacyPassManager.h"
|
|
|
|
#include "llvm/IR/Verifier.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/ManagedStatic.h"
|
2018-04-24 23:40:07 +08:00
|
|
|
#include "llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Transforms/IPO.h"
|
[Attributor] Pass infrastructure and fixpoint framework
NOTE: Note that no attributes are derived yet. This patch will not go in
alone but only with others that derive attributes. The framework is
split for review purposes.
This commit introduces the Attributor pass infrastructure and fixpoint
iteration framework. Further patches will introduce abstract attributes
into this framework.
In a nutshell, the Attributor will update instances of abstract
arguments until a fixpoint, or a "timeout", is reached. Communication
between the Attributor and the abstract attributes that are derived is
restricted to the AbstractState and AbstractAttribute interfaces.
Please see the file comment in Attributor.h for detailed information
including design decisions and typical use case. Also consider the class
documentation for Attributor, AbstractState, and AbstractAttribute.
Reviewers: chandlerc, homerdin, hfinkel, fedor.sergeev, sanjoy, spatel, nlopes, nicholas, reames
Subscribers: mehdi_amini, mgorny, hiraditya, bollu, steven_wu, dexonsmith, dang, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59918
llvm-svn: 362578
2019-06-05 11:02:24 +08:00
|
|
|
#include "llvm/Transforms/IPO/Attributor.h"
|
2015-12-27 16:13:45 +08:00
|
|
|
#include "llvm/Transforms/IPO/ForceFunctionAttrs.h"
|
2016-02-18 19:03:11 +08:00
|
|
|
#include "llvm/Transforms/IPO/FunctionAttrs.h"
|
2015-12-27 16:41:34 +08:00
|
|
|
#include "llvm/Transforms/IPO/InferFunctionAttrs.h"
|
2018-04-24 08:48:59 +08:00
|
|
|
#include "llvm/Transforms/InstCombine/InstCombine.h"
|
2016-03-15 08:04:37 +08:00
|
|
|
#include "llvm/Transforms/Instrumentation.h"
|
2011-08-03 05:50:27 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2016-03-11 16:50:55 +08:00
|
|
|
#include "llvm/Transforms/Scalar/GVN.h"
|
2018-06-30 07:36:03 +08:00
|
|
|
#include "llvm/Transforms/Scalar/InstSimplifyPass.h"
|
2019-04-20 01:46:50 +08:00
|
|
|
#include "llvm/Transforms/Scalar/LICM.h"
|
2019-05-24 05:52:59 +08:00
|
|
|
#include "llvm/Transforms/Scalar/LoopUnrollPass.h"
|
[PM/LoopUnswitch] Introduce a new, simpler loop unswitch pass.
Currently, this pass only focuses on *trivial* loop unswitching. At that
reduced problem it remains significantly better than the current loop
unswitch:
- Old pass is worse than cubic complexity. New pass is (I think) linear.
- New pass is much simpler in its design by focusing on full unswitching. (See
below for details on this).
- New pass doesn't carry state for thresholds between pass iterations.
- New pass doesn't carry state for correctness (both miscompile and
infloop) between pass iterations.
- New pass produces substantially better code after unswitching.
- New pass can handle more trivial unswitch cases.
- New pass doesn't recompute the dominator tree for the entire function
and instead incrementally updates it.
I've ported all of the trivial unswitching test cases from the old pass
to the new one to make sure that major functionality isn't lost in the
process. For several of the test cases I've worked to improve the
precision and rigor of the CHECKs, but for many I've just updated them
to handle the new IR produced.
My initial motivation was the fact that the old pass carried state in
very unreliable ways between pass iterations, and these mechansims were
incompatible with the new pass manager. However, I discovered many more
improvements to make along the way.
This pass makes two very significant assumptions that enable most of these
improvements:
1) Focus on *full* unswitching -- that is, completely removing whatever
control flow construct is being unswitched from the loop. In the case
of trivial unswitching, this means removing the trivial (exiting)
edge. In non-trivial unswitching, this means removing the branch or
switch itself. This is in opposition to *partial* unswitching where
some part of the unswitched control flow remains in the loop. Partial
unswitching only really applies to switches and to folded branches.
These are very similar to full unrolling and partial unrolling. The
full form is an effective canonicalization, the partial form needs
a complex cost model, cannot be iterated, isn't canonicalizing, and
should be a separate pass that runs very late (much like unrolling).
2) Leverage LLVM's Loop machinery to the fullest. The original unswitch
dates from a time when a great deal of LLVM's loop infrastructure was
missing, ineffective, and/or unreliable. As a consequence, a lot of
complexity was added which we no longer need.
With these two overarching principles, I think we can build a fast and
effective unswitcher that fits in well in the new PM and in the
canonicalization pipeline. Some of the remaining functionality around
partial unswitching may not be relevant today (not many test cases or
benchmarks I can find) but if they are I'd like to add support for them
as a separate layer that runs very late in the pipeline.
Purely to make reviewing and introducing this code more manageable, I've
split this into first a trivial-unswitch-only pass and in the next patch
I'll add support for full non-trivial unswitching against a *fixed*
threshold, exactly like full unrolling. I even plan to re-use the
unrolling thresholds, as these are incredibly similar cost tradeoffs:
we're cloning a loop body in order to end up with simplified control
flow. We should only do that when the total growth is reasonably small.
One of the biggest changes with this pass compared to the previous one
is that previously, each individual trivial exiting edge from a switch
was unswitched separately as a branch. Now, we unswitch the entire
switch at once, with cases going to the various destinations. This lets
us unswitch multiple exiting edges in a single operation and also avoids
numerous extremely bad behaviors, where we would introduce 1000s of
branches to test for thousands of possible values, all of which would
take the exact same exit path bypassing the loop. Now we will use
a switch with 1000s of cases that can be efficiently lowered into
a jumptable. This avoids relying on somehow forming a switch out of the
branches or getting horrible code if that fails for any reason.
Another significant change is that this pass actively updates the CFG
based on unswitching. For trivial unswitching, this is actually very
easy because of the definition of loop simplified form. Doing this makes
the code coming out of loop unswitch dramatically more friendly. We
still should run loop-simplifycfg (at the least) after this to clean up,
but it will have to do a lot less work.
Finally, this pass makes much fewer attempts to simplify instructions
based on the unswitch. Something like loop-instsimplify, instcombine, or
GVN can be used to do increasingly powerful simplifications based on the
now dominating predicate. The old simplifications are things that
something like loop-instsimplify should get today or a very, very basic
loop-instcombine could get. Keeping that logic separate is a big
simplifying technique.
Most of the code in this pass that isn't in the old one has to do with
achieving specific goals:
- Updating the dominator tree as we go
- Unswitching all cases in a switch in a single step.
I think it is still shorter than just the trivial unswitching code in
the old pass despite having this functionality.
Differential Revision: https://reviews.llvm.org/D32409
llvm-svn: 301576
2017-04-28 02:45:20 +08:00
|
|
|
#include "llvm/Transforms/Scalar/SimpleLoopUnswitch.h"
|
2018-03-29 01:44:36 +08:00
|
|
|
#include "llvm/Transforms/Utils.h"
|
2012-02-01 11:51:43 +08:00
|
|
|
#include "llvm/Transforms/Vectorize.h"
|
2019-04-20 00:11:59 +08:00
|
|
|
#include "llvm/Transforms/Vectorize/LoopVectorize.h"
|
2019-05-09 01:58:35 +08:00
|
|
|
#include "llvm/Transforms/Vectorize/SLPVectorizer.h"
|
[VectorCombine] new IR transform pass for partial vector ops
We have several bug reports that could be characterized as "reducing scalarization",
and this topic was also raised on llvm-dev recently:
http://lists.llvm.org/pipermail/llvm-dev/2020-January/138157.html
...so I'm proposing that we deal with these patterns in a new, lightweight IR vector
pass that runs before/after other vectorization passes.
There are 4 alternate options that I can think of to deal with this kind of problem
(and we've seen various attempts at all of these), but they all have flaws:
InstCombine - can't happen without TTI, but we don't want target-specific
folds there.
SDAG - too late to assist other vectorization passes; TLI is not equipped
for these kind of cost queries; limited to a single basic block.
CGP - too late to assist other vectorization passes; would need to re-implement
basic cleanups like CSE/instcombine.
SLP - doesn't fit with existing transforms; limited to a single basic block.
This initial patch/transform is based on existing code in AggressiveInstCombine:
we walk backwards through the function looking for a pattern match. But we diverge
from that cost-independent IR canonicalization pass by using TTI to decide if the
vector alternative is profitable.
We probably have at least 10 similar bug reports/patterns (binops, constants,
inserts, cheap shuffles, etc) that would fit in this pass as follow-up enhancements.
It's possible that we could iterate on a worklist to fix-point like InstCombine does,
but it's safer to start with a most basic case and evolve from there, so I didn't
try to do anything fancy with this initial implementation.
Differential Revision: https://reviews.llvm.org/D73480
2020-02-09 23:04:41 +08:00
|
|
|
#include "llvm/Transforms/Vectorize/VectorCombine.h"
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2020-11-19 04:54:54 +08:00
|
|
|
cl::opt<bool> RunPartialInlining("enable-partial-inlining", cl::init(false),
|
|
|
|
cl::Hidden, cl::ZeroOrMore,
|
|
|
|
cl::desc("Run Partial inlinining pass"));
|
2017-05-23 00:41:57 +08:00
|
|
|
|
2012-04-14 01:15:33 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
UseGVNAfterVectorization("use-gvn-after-vectorization",
|
|
|
|
cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Run GVN instead of Early CSE after vectorization passes"));
|
|
|
|
|
2021-01-23 07:54:04 +08:00
|
|
|
cl::opt<bool> ExtraVectorizerPasses(
|
2014-10-14 08:31:29 +08:00
|
|
|
"extra-vectorizer-passes", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Run cleanup optimization passes after vectorization."));
|
|
|
|
|
2013-11-17 07:59:05 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
RunLoopRerolling("reroll-loops", cl::Hidden,
|
|
|
|
cl::desc("Run the loop rerolling pass"));
|
|
|
|
|
2020-11-19 04:54:54 +08:00
|
|
|
cl::opt<bool> RunNewGVN("enable-newgvn", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Run the NewGVN pass"));
|
2016-12-27 02:26:19 +08:00
|
|
|
|
2016-07-06 08:26:41 +08:00
|
|
|
// Experimental option to use CFL-AA
|
|
|
|
enum class CFLAAType { None, Steensgaard, Andersen, Both };
|
2020-12-30 15:15:03 +08:00
|
|
|
static cl::opt<::CFLAAType>
|
|
|
|
UseCFLAA("use-cfl-aa", cl::init(::CFLAAType::None), cl::Hidden,
|
2016-07-06 08:26:41 +08:00
|
|
|
cl::desc("Enable the new, experimental CFL alias analysis"),
|
2020-12-30 15:15:03 +08:00
|
|
|
cl::values(clEnumValN(::CFLAAType::None, "none", "Disable CFL-AA"),
|
|
|
|
clEnumValN(::CFLAAType::Steensgaard, "steens",
|
2016-07-06 08:26:41 +08:00
|
|
|
"Enable unification-based CFL-AA"),
|
2020-12-30 15:15:03 +08:00
|
|
|
clEnumValN(::CFLAAType::Andersen, "anders",
|
2016-07-06 08:26:41 +08:00
|
|
|
"Enable inclusion-based CFL-AA"),
|
2020-12-30 15:15:03 +08:00
|
|
|
clEnumValN(::CFLAAType::Both, "both",
|
2016-10-09 03:41:06 +08:00
|
|
|
"Enable both variants of CFL-AA")));
|
2014-08-06 20:56:19 +08:00
|
|
|
|
2021-03-07 02:37:54 +08:00
|
|
|
cl::opt<bool> EnableLoopInterchange(
|
2015-03-06 18:11:25 +08:00
|
|
|
"enable-loopinterchange", cl::init(false), cl::Hidden,
|
2021-03-07 02:37:54 +08:00
|
|
|
cl::desc("Enable the experimental LoopInterchange Pass"));
|
2015-03-06 18:11:25 +08:00
|
|
|
|
2020-11-19 04:54:54 +08:00
|
|
|
cl::opt<bool> EnableUnrollAndJam("enable-unroll-and-jam", cl::init(false),
|
|
|
|
cl::Hidden,
|
|
|
|
cl::desc("Enable Unroll And Jam Pass"));
|
2018-07-01 20:47:30 +08:00
|
|
|
|
2020-11-19 04:54:54 +08:00
|
|
|
cl::opt<bool> EnableLoopFlatten("enable-loop-flatten", cl::init(false),
|
|
|
|
cl::Hidden,
|
|
|
|
cl::desc("Enable the LoopFlatten Pass"));
|
2020-09-30 18:16:22 +08:00
|
|
|
|
2016-09-18 04:40:16 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnablePrepareForThinLTO("prepare-for-thinlto", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable preparation for ThinLTO."));
|
|
|
|
|
2019-01-18 04:48:34 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnablePerformThinLTO("perform-thinlto", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable performing ThinLTO."));
|
|
|
|
|
2020-10-19 18:23:22 +08:00
|
|
|
cl::opt<bool> EnableHotColdSplit("hot-cold-split", cl::init(false),
|
|
|
|
cl::ZeroOrMore, cl::desc("Enable hot-cold splitting pass"));
|
|
|
|
|
2020-09-16 07:05:38 +08:00
|
|
|
cl::opt<bool> EnableIROutliner("ir-outliner", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable ir outliner pass"));
|
|
|
|
|
New Loop Versioning LICM Pass
Summary:
When alias analysis is uncertain about the aliasing between any two accesses,
it will return MayAlias. This uncertainty from alias analysis restricts LICM
from proceeding further. In cases where alias analysis is uncertain we might
use loop versioning as an alternative.
Loop Versioning will create a version of the loop with aggressive aliasing
assumptions in addition to the original with conservative (default) aliasing
assumptions. The version of the loop making aggressive aliasing assumptions
will have all the memory accesses marked as no-alias. These two versions of
loop will be preceded by a memory runtime check. This runtime check consists
of bound checks for all unique memory accessed in loop, and it ensures the
lack of memory aliasing. The result of the runtime check determines which of
the loop versions is executed: If the runtime check detects any memory
aliasing, then the original loop is executed. Otherwise, the version with
aggressive aliasing assumptions is used.
The pass is off by default and can be enabled with command line option
-enable-loop-versioning-licm.
Reviewers: hfinkel, anemet, chatur01, reames
Subscribers: MatzeB, grosser, joker.eph, sanjoy, javed.absar, sbaranga,
llvm-commits
Differential Revision: http://reviews.llvm.org/D9151
llvm-svn: 259986
2016-02-06 15:47:48 +08:00
|
|
|
static cl::opt<bool> UseLoopVersioningLICM(
|
|
|
|
"enable-loop-versioning-licm", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable the experimental Loop Versioning LICM pass"));
|
|
|
|
|
2020-11-20 06:40:33 +08:00
|
|
|
cl::opt<bool>
|
2016-07-16 02:10:49 +08:00
|
|
|
DisablePreInliner("disable-preinline", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Disable pre-instrumentation inliner"));
|
|
|
|
|
2020-11-19 04:54:54 +08:00
|
|
|
cl::opt<int> PreInlineThreshold(
|
2016-07-16 02:10:49 +08:00
|
|
|
"preinline-threshold", cl::Hidden, cl::init(75), cl::ZeroOrMore,
|
|
|
|
cl::desc("Control the amount of inlining in pre-instrumentation inliner "
|
|
|
|
"(default = 75)"));
|
|
|
|
|
2020-11-19 04:54:54 +08:00
|
|
|
cl::opt<bool>
|
|
|
|
EnableGVNHoist("enable-gvn-hoist", cl::init(false), cl::ZeroOrMore,
|
|
|
|
cl::desc("Enable the GVN hoisting pass (default = off)"));
|
2016-07-23 06:02:19 +08:00
|
|
|
|
Conditionally eliminate library calls where the result value is not used
Summary:
This pass shrink-wraps a condition to some library calls where the call
result is not used. For example:
sqrt(val);
is transformed to
if (val < 0)
sqrt(val);
Even if the result of library call is not being used, the compiler cannot
safely delete the call because the function can set errno on error
conditions.
Note in many functions, the error condition solely depends on the incoming
parameter. In this optimization, we can generate the condition can lead to
the errno to shrink-wrap the call. Since the chances of hitting the error
condition is low, the runtime call is effectively eliminated.
These partially dead calls are usually results of C++ abstraction penalty
exposed by inlining. This optimization hits 108 times in 19 C/C++ programs
in SPEC2006.
Reviewers: hfinkel, mehdi_amini, davidxl
Subscribers: modocache, mgorny, mehdi_amini, xur, llvm-commits, beanz
Differential Revision: https://reviews.llvm.org/D24414
llvm-svn: 284542
2016-10-19 05:36:27 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
DisableLibCallsShrinkWrap("disable-libcalls-shrinkwrap", cl::init(false),
|
|
|
|
cl::Hidden,
|
|
|
|
cl::desc("Disable shrink-wrap library calls"));
|
|
|
|
|
2018-05-30 10:46:45 +08:00
|
|
|
static cl::opt<bool> EnableSimpleLoopUnswitch(
|
|
|
|
"enable-simple-loop-unswitch", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable the simple loop unswitch pass. Also enables independent "
|
|
|
|
"cleanup passes integrated into the loop pass manager pipeline."));
|
[PM/LoopUnswitch] Introduce a new, simpler loop unswitch pass.
Currently, this pass only focuses on *trivial* loop unswitching. At that
reduced problem it remains significantly better than the current loop
unswitch:
- Old pass is worse than cubic complexity. New pass is (I think) linear.
- New pass is much simpler in its design by focusing on full unswitching. (See
below for details on this).
- New pass doesn't carry state for thresholds between pass iterations.
- New pass doesn't carry state for correctness (both miscompile and
infloop) between pass iterations.
- New pass produces substantially better code after unswitching.
- New pass can handle more trivial unswitch cases.
- New pass doesn't recompute the dominator tree for the entire function
and instead incrementally updates it.
I've ported all of the trivial unswitching test cases from the old pass
to the new one to make sure that major functionality isn't lost in the
process. For several of the test cases I've worked to improve the
precision and rigor of the CHECKs, but for many I've just updated them
to handle the new IR produced.
My initial motivation was the fact that the old pass carried state in
very unreliable ways between pass iterations, and these mechansims were
incompatible with the new pass manager. However, I discovered many more
improvements to make along the way.
This pass makes two very significant assumptions that enable most of these
improvements:
1) Focus on *full* unswitching -- that is, completely removing whatever
control flow construct is being unswitched from the loop. In the case
of trivial unswitching, this means removing the trivial (exiting)
edge. In non-trivial unswitching, this means removing the branch or
switch itself. This is in opposition to *partial* unswitching where
some part of the unswitched control flow remains in the loop. Partial
unswitching only really applies to switches and to folded branches.
These are very similar to full unrolling and partial unrolling. The
full form is an effective canonicalization, the partial form needs
a complex cost model, cannot be iterated, isn't canonicalizing, and
should be a separate pass that runs very late (much like unrolling).
2) Leverage LLVM's Loop machinery to the fullest. The original unswitch
dates from a time when a great deal of LLVM's loop infrastructure was
missing, ineffective, and/or unreliable. As a consequence, a lot of
complexity was added which we no longer need.
With these two overarching principles, I think we can build a fast and
effective unswitcher that fits in well in the new PM and in the
canonicalization pipeline. Some of the remaining functionality around
partial unswitching may not be relevant today (not many test cases or
benchmarks I can find) but if they are I'd like to add support for them
as a separate layer that runs very late in the pipeline.
Purely to make reviewing and introducing this code more manageable, I've
split this into first a trivial-unswitch-only pass and in the next patch
I'll add support for full non-trivial unswitching against a *fixed*
threshold, exactly like full unrolling. I even plan to re-use the
unrolling thresholds, as these are incredibly similar cost tradeoffs:
we're cloning a loop body in order to end up with simplified control
flow. We should only do that when the total growth is reasonably small.
One of the biggest changes with this pass compared to the previous one
is that previously, each individual trivial exiting edge from a switch
was unswitched separately as a branch. Now, we unswitch the entire
switch at once, with cases going to the various destinations. This lets
us unswitch multiple exiting edges in a single operation and also avoids
numerous extremely bad behaviors, where we would introduce 1000s of
branches to test for thousands of possible values, all of which would
take the exact same exit path bypassing the loop. Now we will use
a switch with 1000s of cases that can be efficiently lowered into
a jumptable. This avoids relying on somehow forming a switch out of the
branches or getting horrible code if that fails for any reason.
Another significant change is that this pass actively updates the CFG
based on unswitching. For trivial unswitching, this is actually very
easy because of the definition of loop simplified form. Doing this makes
the code coming out of loop unswitch dramatically more friendly. We
still should run loop-simplifycfg (at the least) after this to clean up,
but it will have to do a lot less work.
Finally, this pass makes much fewer attempts to simplify instructions
based on the unswitch. Something like loop-instsimplify, instcombine, or
GVN can be used to do increasingly powerful simplifications based on the
now dominating predicate. The old simplifications are things that
something like loop-instsimplify should get today or a very, very basic
loop-instcombine could get. Keeping that logic separate is a big
simplifying technique.
Most of the code in this pass that isn't in the old one has to do with
achieving specific goals:
- Updating the dominator tree as we go
- Unswitching all cases in a switch in a single step.
I think it is still shorter than just the trivial unswitching code in
the old pass despite having this functionality.
Differential Revision: https://reviews.llvm.org/D32409
llvm-svn: 301576
2017-04-28 02:45:20 +08:00
|
|
|
|
2020-11-19 04:54:54 +08:00
|
|
|
cl::opt<bool>
|
|
|
|
EnableGVNSink("enable-gvn-sink", cl::init(false), cl::ZeroOrMore,
|
|
|
|
cl::desc("Enable the GVN sinking pass (default = off)"));
|
[GVNSink] GVNSink pass
This patch provides an initial prototype for a pass that sinks instructions based on GVN information, similar to GVNHoist. It is not yet ready for commiting but I've uploaded it to gather some initial thoughts.
This pass attempts to sink instructions into successors, reducing static
instruction count and enabling if-conversion.
We use a variant of global value numbering to decide what can be sunk.
Consider:
[ %a1 = add i32 %b, 1 ] [ %c1 = add i32 %d, 1 ]
[ %a2 = xor i32 %a1, 1 ] [ %c2 = xor i32 %c1, 1 ]
\ /
[ %e = phi i32 %a2, %c2 ]
[ add i32 %e, 4 ]
GVN would number %a1 and %c1 differently because they compute different
results - the VN of an instruction is a function of its opcode and the
transitive closure of its operands. This is the key property for hoisting
and CSE.
What we want when sinking however is for a numbering that is a function of
the *uses* of an instruction, which allows us to answer the question "if I
replace %a1 with %c1, will it contribute in an equivalent way to all
successive instructions?". The (new) PostValueTable class in GVN provides this
mapping.
This pass has some shown really impressive improvements especially for codesize already on internal benchmarks, so I have high hopes it can replace all the sinking logic in SimplifyCFG.
Differential revision: https://reviews.llvm.org/D24805
llvm-svn: 303850
2017-05-25 20:51:11 +08:00
|
|
|
|
2019-04-18 14:17:40 +08:00
|
|
|
// This option is used in simplifying testing SampleFDO optimizations for
|
|
|
|
// profile loading.
|
2020-11-19 04:54:54 +08:00
|
|
|
cl::opt<bool>
|
2018-09-05 01:19:13 +08:00
|
|
|
EnableCHR("enable-chr", cl::init(true), cl::Hidden,
|
|
|
|
cl::desc("Enable control height reduction optimization (CHR)"));
|
|
|
|
|
2019-01-18 04:48:34 +08:00
|
|
|
cl::opt<bool> FlattenedProfileUsed(
|
|
|
|
"flattened-profile-used", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Indicate the sample profile being used is flattened, i.e., "
|
|
|
|
"no inline hierachy exists in the profile. "));
|
|
|
|
|
2019-03-01 04:13:38 +08:00
|
|
|
cl::opt<bool> EnableOrderFileInstrumentation(
|
|
|
|
"enable-order-file-instrumentation", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable order file instrumentation (default = off)"));
|
|
|
|
|
2020-07-21 23:33:24 +08:00
|
|
|
cl::opt<bool> EnableMatrix(
|
|
|
|
"enable-matrix", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Enable lowering of the matrix intrinsics"));
|
[Matrix] Add first set of matrix intrinsics and initial lowering pass.
This is the first patch adding an initial set of matrix intrinsics and a
corresponding lowering pass. This has been discussed on llvm-dev:
http://lists.llvm.org/pipermail/llvm-dev/2019-October/136240.html
The first patch introduces four new intrinsics (transpose, multiply,
columnwise load and store) and a LowerMatrixIntrinsics pass, that
lowers those intrinsics to vector operations.
Matrixes are embedded in a 'flat' vector (e.g. a 4 x 4 float matrix
embedded in a <16 x float> vector) and the intrinsics take the dimension
information as parameters. Those parameters need to be ConstantInt.
For the memory layout, we initially assume column-major, but in the RFC
we also described how to extend the intrinsics to support row-major as
well.
For the initial lowering, we split the input of the intrinsics into a
set of column vectors, transform those column vectors and concatenate
the result columns to a flat result vector.
This allows us to lower the intrinsics without any shape propagation, as
mentioned in the RFC. In follow-up patches, we plan to submit the
following improvements:
* Shape propagation to eliminate the embedding/splitting for each
intrinsic.
* Fused & tiled lowering of multiply and other operations.
* Optimization remarks highlighting matrix expressions and costs.
* Generate loops for operations on large matrixes.
* More general block processing for operation on large vectors,
exploiting shape information.
We would like to add dedicated transpose, columnwise load and store
intrinsics, even though they are not strictly necessary. For example, we
could instead emit a large shufflevector instruction instead of the
transpose. But we expect that to
(1) become unwieldy for larger matrixes (even for 16x16 matrixes,
the resulting shufflevector masks would be huge),
(2) risk instcombine making small changes, causing us to fail to
detect the transpose, preventing better lowerings
For the load/store, we are additionally planning on exploiting the
intrinsics for better alias analysis.
Reviewers: anemet, Gerolf, reames, hfinkel, andrew.w.kaylor, efriedma, rengolin
Reviewed By: anemet
Differential Revision: https://reviews.llvm.org/D70456
2019-12-12 23:27:28 +08:00
|
|
|
|
2020-09-15 21:47:23 +08:00
|
|
|
cl::opt<bool> EnableConstraintElimination(
|
|
|
|
"enable-constraint-elimination", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc(
|
|
|
|
"Enable pass to eliminate conditions based on linear constraints."));
|
|
|
|
|
2020-04-18 10:43:54 +08:00
|
|
|
cl::opt<AttributorRunOption> AttributorRun(
|
|
|
|
"attributor-enable", cl::Hidden, cl::init(AttributorRunOption::NONE),
|
|
|
|
cl::desc("Enable the attributor inter-procedural deduction pass."),
|
|
|
|
cl::values(clEnumValN(AttributorRunOption::ALL, "all",
|
|
|
|
"enable all attributor runs"),
|
|
|
|
clEnumValN(AttributorRunOption::MODULE, "module",
|
|
|
|
"enable module-wide attributor runs"),
|
|
|
|
clEnumValN(AttributorRunOption::CGSCC, "cgscc",
|
|
|
|
"enable call graph SCC attributor runs"),
|
|
|
|
clEnumValN(AttributorRunOption::NONE, "none",
|
|
|
|
"disable attributor runs")));
|
2020-04-06 00:45:19 +08:00
|
|
|
|
2020-05-07 19:41:20 +08:00
|
|
|
extern cl::opt<bool> EnableKnowledgeRetention;
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
PassManagerBuilder::PassManagerBuilder() {
|
|
|
|
OptLevel = 2;
|
|
|
|
SizeLevel = 0;
|
2014-04-25 13:29:35 +08:00
|
|
|
LibraryInfo = nullptr;
|
|
|
|
Inliner = nullptr;
|
2011-08-03 05:50:27 +08:00
|
|
|
DisableUnrollLoops = false;
|
Allow disabling of vectorization using internal options
Summary:
Currently, the internal options -vectorize-loops, -vectorize-slp, and
-interleave-loops do not have much practical effect. This is because
they are used to initialize the corresponding flags in the pass
managers, and those flags are then unconditionally overwritten when
compiling via clang or via LTO from the linkers. The only exception was
-vectorize-loops via opt because of some special hackery there.
While vectorization could still be disabled when compiling via clang,
using -fno-[slp-]vectorize, this meant that there was no way to disable
it when compiling in LTO mode via the linkers. This only affected
ThinLTO, since for regular LTO vectorization is done during the compile
step for scalability reasons. For ThinLTO it is invoked in the LTO
backends. See also the discussion on PR45434.
This patch makes it so the internal options can actually be used to
disable these optimizations. Ultimately, the best long term solution is
to mark the loops with metadata (similar to the approach used to fix
-fno-unroll-loops in D77058), but this enables a shorter term
workaround, and actually makes these internal options useful.
I constant propagated the initial values of these internal flags into
the pass manager flags (for some reasons vectorize-loops and
interleave-loops were initialized to true, while vectorize-slp was
initialized to false). As mentioned above, they are overwritten
unconditionally so this doesn't have any real impact, and these initial
values aren't particularly meaningful.
I then changed the passes to check the internl values and return without
performing the associated optimization when false (I changed the default
of -vectorize-slp to true so the options behave similarly). I was able
to remove the hackery in opt used to get -vectorize-loops=false to work,
as well as a special option there used to disable SLP vectorization.
Finally, I changed thinlto-slp-vectorize-pm.c to:
a) Only test SLP (moved the loop vectorization checking to a new test).
b) Use code that is slp vectorized when it is enabled, and check that
instead of whether the pass is enabled.
c) Test the new behavior of -vectorize-slp.
d) Test both pass managers.
The loop vectorization (and associated interleaving) testing I moved to
a new thinlto-loop-vectorize-pm.c test, with several changes:
a) Changed the flags on the interleaving testing so that it will
actually interleave, and check that.
b) Test the new behavior of -vectorize-loops and -interleave-loops.
c) Test both pass managers.
Reviewers: fhahn, wmi
Subscribers: hiraditya, steven_wu, dexonsmith, cfe-commits, davezarzycki, llvm-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D77989
2020-04-13 10:12:38 +08:00
|
|
|
SLPVectorize = false;
|
|
|
|
LoopVectorize = true;
|
|
|
|
LoopsInterleaved = true;
|
2013-11-18 00:02:50 +08:00
|
|
|
RerollLoops = RunLoopRerolling;
|
2016-12-27 02:26:19 +08:00
|
|
|
NewGVN = RunNewGVN;
|
2019-04-20 01:46:50 +08:00
|
|
|
LicmMssaOptCap = SetLicmMssaOptCap;
|
|
|
|
LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap;
|
2014-08-21 21:13:17 +08:00
|
|
|
DisableGVNLoadPRE = false;
|
2019-04-13 03:16:07 +08:00
|
|
|
ForgetAllSCEVInLoopUnroll = ForgetSCEVInLoopUnroll;
|
2014-08-22 04:03:44 +08:00
|
|
|
VerifyInput = false;
|
|
|
|
VerifyOutput = false;
|
2014-09-14 05:46:00 +08:00
|
|
|
MergeFunctions = false;
|
2015-07-07 00:22:42 +08:00
|
|
|
PrepareForLTO = false;
|
2019-01-17 07:19:02 +08:00
|
|
|
EnablePGOInstrGen = false;
|
2019-03-05 04:21:27 +08:00
|
|
|
EnablePGOCSInstrGen = false;
|
|
|
|
EnablePGOCSInstrUse = false;
|
2019-01-17 07:31:40 +08:00
|
|
|
PGOInstrGen = "";
|
2019-01-17 07:19:02 +08:00
|
|
|
PGOInstrUse = "";
|
|
|
|
PGOSampleUse = "";
|
2016-09-18 04:40:16 +08:00
|
|
|
PrepareForThinLTO = EnablePrepareForThinLTO;
|
2019-01-18 04:48:34 +08:00
|
|
|
PerformThinLTO = EnablePerformThinLTO;
|
2017-03-18 01:13:41 +08:00
|
|
|
DivergentTarget = false;
|
[LPM] Port CGProfilePass from NPM to LPM
Reviewers: hans, chandlerc!, asbirlea, nikic
Reviewed By: hans, nikic
Subscribers: steven_wu, dexonsmith, nikic, echristo, void, zhizhouy, cfe-commits, aeubanks, MaskRay, jvesely, nhaehnle, hiraditya, kerbowa, llvm-commits
Tags: #llvm, #clang
Differential Revision: https://reviews.llvm.org/D83013
2020-07-09 03:30:28 +08:00
|
|
|
CallGraphProfile = true;
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
PassManagerBuilder::~PassManagerBuilder() {
|
|
|
|
delete LibraryInfo;
|
|
|
|
delete Inliner;
|
|
|
|
}
|
|
|
|
|
2011-08-16 21:58:41 +08:00
|
|
|
/// Set of global extensions, automatically added as part of the standard set.
|
2020-01-29 13:45:27 +08:00
|
|
|
static ManagedStatic<
|
|
|
|
SmallVector<std::tuple<PassManagerBuilder::ExtensionPointTy,
|
|
|
|
PassManagerBuilder::ExtensionFn,
|
|
|
|
PassManagerBuilder::GlobalExtensionID>,
|
|
|
|
8>>
|
|
|
|
GlobalExtensions;
|
|
|
|
static PassManagerBuilder::GlobalExtensionID GlobalExtensionsCounter;
|
2011-08-16 21:58:41 +08:00
|
|
|
|
2017-07-06 08:09:09 +08:00
|
|
|
/// Check if GlobalExtensions is constructed and not empty.
|
|
|
|
/// Since GlobalExtensions is a managed static, calling 'empty()' will trigger
|
|
|
|
/// the construction of the object.
|
|
|
|
static bool GlobalExtensionsNotEmpty() {
|
|
|
|
return GlobalExtensions.isConstructed() && !GlobalExtensions->empty();
|
|
|
|
}
|
|
|
|
|
2020-01-29 13:45:27 +08:00
|
|
|
PassManagerBuilder::GlobalExtensionID
|
|
|
|
PassManagerBuilder::addGlobalExtension(PassManagerBuilder::ExtensionPointTy Ty,
|
|
|
|
PassManagerBuilder::ExtensionFn Fn) {
|
|
|
|
auto ExtensionID = GlobalExtensionsCounter++;
|
|
|
|
GlobalExtensions->push_back(std::make_tuple(Ty, std::move(Fn), ExtensionID));
|
|
|
|
return ExtensionID;
|
|
|
|
}
|
|
|
|
|
|
|
|
void PassManagerBuilder::removeGlobalExtension(
|
|
|
|
PassManagerBuilder::GlobalExtensionID ExtensionID) {
|
|
|
|
// RegisterStandardPasses may try to call this function after GlobalExtensions
|
|
|
|
// has already been destroyed; doing so should not generate an error.
|
|
|
|
if (!GlobalExtensions.isConstructed())
|
|
|
|
return;
|
|
|
|
|
|
|
|
auto GlobalExtension =
|
|
|
|
llvm::find_if(*GlobalExtensions, [ExtensionID](const auto &elem) {
|
|
|
|
return std::get<2>(elem) == ExtensionID;
|
|
|
|
});
|
|
|
|
assert(GlobalExtension != GlobalExtensions->end() &&
|
|
|
|
"The extension ID to be removed should always be valid.");
|
|
|
|
|
|
|
|
GlobalExtensions->erase(GlobalExtension);
|
2011-08-16 21:58:41 +08:00
|
|
|
}
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
void PassManagerBuilder::addExtension(ExtensionPointTy Ty, ExtensionFn Fn) {
|
2016-03-31 04:39:29 +08:00
|
|
|
Extensions.push_back(std::make_pair(Ty, std::move(Fn)));
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void PassManagerBuilder::addExtensionsToPM(ExtensionPointTy ETy,
|
2015-02-13 18:01:29 +08:00
|
|
|
legacy::PassManagerBase &PM) const {
|
2017-07-06 08:09:09 +08:00
|
|
|
if (GlobalExtensionsNotEmpty()) {
|
|
|
|
for (auto &Ext : *GlobalExtensions) {
|
2020-01-29 13:45:27 +08:00
|
|
|
if (std::get<0>(Ext) == ETy)
|
|
|
|
std::get<1>(Ext)(*this, PM);
|
2017-07-06 08:09:09 +08:00
|
|
|
}
|
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
for (unsigned i = 0, e = Extensions.size(); i != e; ++i)
|
|
|
|
if (Extensions[i].first == ETy)
|
|
|
|
Extensions[i].second(*this, PM);
|
|
|
|
}
|
|
|
|
|
2015-02-13 18:01:29 +08:00
|
|
|
void PassManagerBuilder::addInitialAliasAnalysisPasses(
|
|
|
|
legacy::PassManagerBase &PM) const {
|
2016-07-06 08:26:41 +08:00
|
|
|
switch (UseCFLAA) {
|
2020-12-30 15:15:03 +08:00
|
|
|
case ::CFLAAType::Steensgaard:
|
2016-07-06 08:26:41 +08:00
|
|
|
PM.add(createCFLSteensAAWrapperPass());
|
|
|
|
break;
|
2020-12-30 15:15:03 +08:00
|
|
|
case ::CFLAAType::Andersen:
|
2016-07-06 08:26:41 +08:00
|
|
|
PM.add(createCFLAndersAAWrapperPass());
|
|
|
|
break;
|
2020-12-30 15:15:03 +08:00
|
|
|
case ::CFLAAType::Both:
|
2016-07-06 08:26:41 +08:00
|
|
|
PM.add(createCFLSteensAAWrapperPass());
|
|
|
|
PM.add(createCFLAndersAAWrapperPass());
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
|
|
|
|
// BasicAliasAnalysis wins if they disagree. This is intended to help
|
|
|
|
// support "obvious" type-punning idioms.
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
PM.add(createTypeBasedAAWrapperPass());
|
|
|
|
PM.add(createScopedNoAliasAAWrapperPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
|
|
|
|
2015-02-13 18:01:29 +08:00
|
|
|
void PassManagerBuilder::populateFunctionPassManager(
|
|
|
|
legacy::FunctionPassManager &FPM) {
|
2011-08-03 05:50:27 +08:00
|
|
|
addExtensionsToPM(EP_EarlyAsPossible, FPM);
|
|
|
|
|
|
|
|
// Add LibraryInfo if we have some.
|
2015-01-15 18:41:28 +08:00
|
|
|
if (LibraryInfo)
|
|
|
|
FPM.add(new TargetLibraryInfoWrapperPass(*LibraryInfo));
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2020-07-16 16:55:34 +08:00
|
|
|
// The backends do not handle matrix intrinsics currently.
|
|
|
|
// Make sure they are also lowered in O0.
|
|
|
|
// FIXME: A lightweight version of the pass should run in the backend
|
|
|
|
// pipeline on demand.
|
2020-07-17 22:50:47 +08:00
|
|
|
if (EnableMatrix && OptLevel == 0)
|
2020-07-20 17:50:15 +08:00
|
|
|
FPM.add(createLowerMatrixIntrinsicsMinimalPass());
|
2020-07-16 16:55:34 +08:00
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
if (OptLevel == 0) return;
|
|
|
|
|
|
|
|
addInitialAliasAnalysisPasses(FPM);
|
|
|
|
|
2021-04-13 02:51:51 +08:00
|
|
|
// Lower llvm.expect to metadata before attempting transforms.
|
|
|
|
// Compare/branch metadata may alter the behavior of passes like SimplifyCFG.
|
|
|
|
FPM.add(createLowerExpectIntrinsicPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
FPM.add(createCFGSimplificationPass());
|
2016-06-15 08:19:09 +08:00
|
|
|
FPM.add(createSROAPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
FPM.add(createEarlyCSEPass());
|
|
|
|
}
|
|
|
|
|
2016-01-22 02:28:59 +08:00
|
|
|
// Do PGO instrumentation generation or use pass as the option specified.
|
2019-03-05 04:21:27 +08:00
|
|
|
void PassManagerBuilder::addPGOInstrPasses(legacy::PassManagerBase &MPM,
|
|
|
|
bool IsCS = false) {
|
|
|
|
if (IsCS) {
|
|
|
|
if (!EnablePGOCSInstrGen && !EnablePGOCSInstrUse)
|
|
|
|
return;
|
|
|
|
} else if (!EnablePGOInstrGen && PGOInstrUse.empty() && PGOSampleUse.empty())
|
2016-07-16 02:10:49 +08:00
|
|
|
return;
|
2019-03-05 04:21:27 +08:00
|
|
|
|
2016-07-16 02:10:49 +08:00
|
|
|
// Perform the preinline and cleanup passes for O1 and above.
|
2019-03-05 04:21:27 +08:00
|
|
|
// We will not do this inline for context sensitive PGO (when IsCS is true).
|
2020-12-02 08:15:06 +08:00
|
|
|
if (OptLevel > 0 && !DisablePreInliner && PGOSampleUse.empty() && !IsCS) {
|
2016-08-12 02:24:08 +08:00
|
|
|
// Create preinline pass. We construct an InlineParams object and specify
|
|
|
|
// the threshold here to avoid the command line options of the regular
|
|
|
|
// inliner to influence pre-inlining. The only fields of InlineParams we
|
|
|
|
// care about are DefaultThreshold and HintThreshold.
|
|
|
|
InlineParams IP;
|
|
|
|
IP.DefaultThreshold = PreInlineThreshold;
|
2020-12-02 08:15:06 +08:00
|
|
|
// FIXME: The hint threshold has the same value used by the regular inliner
|
|
|
|
// when not optimzing for size. This should probably be lowered after
|
|
|
|
// performance testing.
|
|
|
|
// Use PreInlineThreshold for both -Os and -Oz. Not running preinliner makes
|
|
|
|
// the instrumented binary unusably large. Even if PreInlineThreshold is not
|
|
|
|
// correct thresold for -Oz, it is better than not running preinliner.
|
|
|
|
IP.HintThreshold = SizeLevel > 0 ? PreInlineThreshold : 325;
|
2016-08-12 02:24:08 +08:00
|
|
|
|
|
|
|
MPM.add(createFunctionInliningPass(IP));
|
2016-07-16 02:10:49 +08:00
|
|
|
MPM.add(createSROAPass());
|
|
|
|
MPM.add(createEarlyCSEPass()); // Catch trivial redundancies
|
|
|
|
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
|
|
|
|
MPM.add(createInstructionCombiningPass()); // Combine silly seq's
|
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
|
|
|
}
|
2019-03-05 04:21:27 +08:00
|
|
|
if ((EnablePGOInstrGen && !IsCS) || (EnablePGOCSInstrGen && IsCS)) {
|
|
|
|
MPM.add(createPGOInstrumentationGenLegacyPass(IsCS));
|
2016-01-22 02:28:59 +08:00
|
|
|
// Add the profile lowering pass.
|
|
|
|
InstrProfOptions Options;
|
2016-07-23 12:28:52 +08:00
|
|
|
if (!PGOInstrGen.empty())
|
|
|
|
Options.InstrProfileOutput = PGOInstrGen;
|
2017-06-25 08:26:43 +08:00
|
|
|
Options.DoCounterPromotion = true;
|
2019-03-05 04:21:27 +08:00
|
|
|
Options.UseBFIInPromotion = IsCS;
|
2017-06-25 08:26:43 +08:00
|
|
|
MPM.add(createLoopRotatePass());
|
2019-03-05 04:21:27 +08:00
|
|
|
MPM.add(createInstrProfilingLegacyPass(Options, IsCS));
|
2016-01-22 02:28:59 +08:00
|
|
|
}
|
|
|
|
if (!PGOInstrUse.empty())
|
2019-03-05 04:21:27 +08:00
|
|
|
MPM.add(createPGOInstrumentationUseLegacyPass(PGOInstrUse, IsCS));
|
2017-04-26 00:54:45 +08:00
|
|
|
// Indirect call promotion that promotes intra-module targets only.
|
|
|
|
// For ThinLTO this is done earlier due to interactions with globalopt
|
|
|
|
// for imported functions. We don't run this at -O0.
|
2019-03-05 04:21:27 +08:00
|
|
|
if (OptLevel > 0 && !IsCS)
|
2017-04-26 00:54:45 +08:00
|
|
|
MPM.add(
|
|
|
|
createPGOIndirectCallPromotionLegacyPass(false, !PGOSampleUse.empty()));
|
2016-01-22 02:28:59 +08:00
|
|
|
}
|
2016-02-17 06:54:27 +08:00
|
|
|
void PassManagerBuilder::addFunctionSimplificationPasses(
|
2016-02-12 06:09:11 +08:00
|
|
|
legacy::PassManagerBase &MPM) {
|
2011-08-03 05:50:27 +08:00
|
|
|
// Start of function pass.
|
|
|
|
// Break up aggregate allocas, using SSAUpdater.
|
2019-11-27 12:28:52 +08:00
|
|
|
assert(OptLevel >= 1 && "Calling function optimizer with no optimization level!");
|
2016-06-15 08:19:09 +08:00
|
|
|
MPM.add(createSROAPass());
|
2019-04-20 06:18:53 +08:00
|
|
|
MPM.add(createEarlyCSEPass(true /* Enable mem-ssa. */)); // Catch trivial redundancies
|
2020-05-07 19:41:20 +08:00
|
|
|
if (EnableKnowledgeRetention)
|
|
|
|
MPM.add(createAssumeSimplifyPass());
|
2019-11-27 12:28:52 +08:00
|
|
|
|
|
|
|
if (OptLevel > 1) {
|
|
|
|
if (EnableGVNHoist)
|
|
|
|
MPM.add(createGVNHoistPass());
|
|
|
|
if (EnableGVNSink) {
|
|
|
|
MPM.add(createGVNSinkPass());
|
|
|
|
MPM.add(createCFGSimplificationPass());
|
|
|
|
}
|
[GVNSink] GVNSink pass
This patch provides an initial prototype for a pass that sinks instructions based on GVN information, similar to GVNHoist. It is not yet ready for commiting but I've uploaded it to gather some initial thoughts.
This pass attempts to sink instructions into successors, reducing static
instruction count and enabling if-conversion.
We use a variant of global value numbering to decide what can be sunk.
Consider:
[ %a1 = add i32 %b, 1 ] [ %c1 = add i32 %d, 1 ]
[ %a2 = xor i32 %a1, 1 ] [ %c2 = xor i32 %c1, 1 ]
\ /
[ %e = phi i32 %a2, %c2 ]
[ add i32 %e, 4 ]
GVN would number %a1 and %c1 differently because they compute different
results - the VN of an instruction is a function of its opcode and the
transitive closure of its operands. This is the key property for hoisting
and CSE.
What we want when sinking however is for a numbering that is a function of
the *uses* of an instruction, which allows us to answer the question "if I
replace %a1 with %c1, will it contribute in an equivalent way to all
successive instructions?". The (new) PostValueTable class in GVN provides this
mapping.
This pass has some shown really impressive improvements especially for codesize already on internal benchmarks, so I have high hopes it can replace all the sinking logic in SimplifyCFG.
Differential revision: https://reviews.llvm.org/D24805
llvm-svn: 303850
2017-05-25 20:51:11 +08:00
|
|
|
}
|
|
|
|
|
2020-09-15 21:47:23 +08:00
|
|
|
if (EnableConstraintElimination)
|
|
|
|
MPM.add(createConstraintEliminationPass());
|
|
|
|
|
2019-11-27 12:28:52 +08:00
|
|
|
if (OptLevel > 1) {
|
|
|
|
// Speculative execution if the target has divergent branches; otherwise nop.
|
|
|
|
MPM.add(createSpeculativeExecutionIfHasBranchDivergencePass());
|
|
|
|
|
|
|
|
MPM.add(createJumpThreadingPass()); // Thread jumps.
|
|
|
|
MPM.add(createCorrelatedValuePropagationPass()); // Propagate conditionals
|
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
|
2016-03-10 02:47:11 +08:00
|
|
|
// Combine silly seq's
|
2018-01-25 20:06:32 +08:00
|
|
|
if (OptLevel > 2)
|
|
|
|
MPM.add(createAggressiveInstCombinerPass());
|
2020-03-21 17:14:17 +08:00
|
|
|
MPM.add(createInstructionCombiningPass());
|
Conditionally eliminate library calls where the result value is not used
Summary:
This pass shrink-wraps a condition to some library calls where the call
result is not used. For example:
sqrt(val);
is transformed to
if (val < 0)
sqrt(val);
Even if the result of library call is not being used, the compiler cannot
safely delete the call because the function can set errno on error
conditions.
Note in many functions, the error condition solely depends on the incoming
parameter. In this optimization, we can generate the condition can lead to
the errno to shrink-wrap the call. Since the chances of hitting the error
condition is low, the runtime call is effectively eliminated.
These partially dead calls are usually results of C++ abstraction penalty
exposed by inlining. This optimization hits 108 times in 19 C/C++ programs
in SPEC2006.
Reviewers: hfinkel, mehdi_amini, davidxl
Subscribers: modocache, mgorny, mehdi_amini, xur, llvm-commits, beanz
Differential Revision: https://reviews.llvm.org/D24414
llvm-svn: 284542
2016-10-19 05:36:27 +08:00
|
|
|
if (SizeLevel == 0 && !DisableLibCallsShrinkWrap)
|
|
|
|
MPM.add(createLibCallsShrinkWrapPass());
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2017-04-05 00:42:20 +08:00
|
|
|
// Optimize memory intrinsic calls based on the profiled size information.
|
|
|
|
if (SizeLevel == 0)
|
|
|
|
MPM.add(createPGOMemOPSizeOptLegacyPass());
|
|
|
|
|
2019-11-27 12:28:52 +08:00
|
|
|
// TODO: Investigate the cost/benefit of tail call elimination on debugging.
|
|
|
|
if (OptLevel > 1)
|
|
|
|
MPM.add(createTailCallEliminationPass()); // Eliminate tail calls
|
|
|
|
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createReassociatePass()); // Reassociate expressions
|
2018-05-30 10:46:45 +08:00
|
|
|
|
|
|
|
// Begin the loop pass pipeline.
|
|
|
|
if (EnableSimpleLoopUnswitch) {
|
|
|
|
// The simple loop unswitch pass relies on separate cleanup passes. Schedule
|
|
|
|
// them first so when we re-process a loop they run before other loop
|
|
|
|
// passes.
|
|
|
|
MPM.add(createLoopInstSimplifyPass());
|
|
|
|
MPM.add(createLoopSimplifyCFGPass());
|
|
|
|
}
|
[PassManager] Run additional LICM before LoopRotate
Loop rotation often has to perform code duplication
from header into preheader, which introduces PHI nodes.
>>! In D99204, @thopre wrote:
>
> With loop peeling, it is important that unnecessary PHIs be avoided or
> it will leads to spurious peeling. One source of such PHIs is loop
> rotation which creates PHIs for invariant loads. Those PHIs are
> particularly problematic since loop peeling is now run as part of simple
> loop unrolling before GVN is run, and are thus a source of spurious
> peeling.
>
> Note that while some of the load can be hoisted and eventually
> eliminated by instruction combine, this is not always possible due to
> alignment issue. In particular, the motivating example [1] was a load
> inside a class instance which cannot be hoisted because the `this'
> pointer has an alignment of 1.
>
> [1] http://lists.llvm.org/pipermail/llvm-dev/attachments/20210312/4ce73c47/attachment.cpp
Now, we could enhance LoopRotate to avoid duplicating code when not needed,
but instead hoist loop-invariant code, but isn't that a code duplication? (*sic*)
We have LICM, and in fact we already run it right after LoopRotation.
We could try to move it to before LoopRotation,
that is basically free from compile-time perspective:
https://llvm-compile-time-tracker.com/compare.php?from=6c93eb4477d88af046b915bc955c03693b2cbb58&to=a4bee6d07732b1184c436da489040b912f0dc271&stat=instructions
But, looking at stats, i think it isn't great that we would no longer do LICM after LoopRotation, in particular:
| statistic name | LoopRotate-LICM | LICM-LoopRotate | Δ | % | abs(%) |
| asm-printer.EmittedInsts | 9015930 | 9015799 | -131 | 0.00% | 0.00% |
| indvars.NumElimCmp | 3536 | 3544 | 8 | 0.23% | 0.23% |
| indvars.NumElimExt | 36725 | 36580 | -145 | -0.39% | 0.39% |
| indvars.NumElimIV | 1197 | 1187 | -10 | -0.84% | 0.84% |
| indvars.NumElimIdentity | 143 | 136 | -7 | -4.90% | 4.90% |
| indvars.NumElimRem | 4 | 5 | 1 | 25.00% | 25.00% |
| indvars.NumLFTR | 29842 | 29890 | 48 | 0.16% | 0.16% |
| indvars.NumReplaced | 2293 | 2227 | -66 | -2.88% | 2.88% |
| indvars.NumSimplifiedSDiv | 6 | 8 | 2 | 33.33% | 33.33% |
| indvars.NumWidened | 26438 | 26329 | -109 | -0.41% | 0.41% |
| instcount.TotalBlocks | 1178338 | 1173840 | -4498 | -0.38% | 0.38% |
| instcount.TotalFuncs | 111825 | 111829 | 4 | 0.00% | 0.00% |
| instcount.TotalInsts | 9905442 | 9896139 | -9303 | -0.09% | 0.09% |
| lcssa.NumLCSSA | 425871 | 423961 | -1910 | -0.45% | 0.45% |
| licm.NumHoisted | 378357 | 378753 | 396 | 0.10% | 0.10% |
| licm.NumMovedCalls | 2193 | 2208 | 15 | 0.68% | 0.68% |
| licm.NumMovedLoads | 35899 | 31821 | -4078 | -11.36% | 11.36% |
| licm.NumPromoted | 11178 | 11154 | -24 | -0.21% | 0.21% |
| licm.NumSunk | 13359 | 13587 | 228 | 1.71% | 1.71% |
| loop-delete.NumDeleted | 8547 | 8402 | -145 | -1.70% | 1.70% |
| loop-instsimplify.NumSimplified | 12876 | 11890 | -986 | -7.66% | 7.66% |
| loop-peel.NumPeeled | 1008 | 925 | -83 | -8.23% | 8.23% |
| loop-rotate.NumNotRotatedDueToHeaderSize | 368 | 365 | -3 | -0.82% | 0.82% |
| loop-rotate.NumRotated | 42015 | 42003 | -12 | -0.03% | 0.03% |
| loop-simplifycfg.NumLoopBlocksDeleted | 240 | 242 | 2 | 0.83% | 0.83% |
| loop-simplifycfg.NumLoopExitsDeleted | 497 | 20 | -477 | -95.98% | 95.98% |
| loop-simplifycfg.NumTerminatorsFolded | 618 | 336 | -282 | -45.63% | 45.63% |
| loop-unroll.NumCompletelyUnrolled | 11028 | 11032 | 4 | 0.04% | 0.04% |
| loop-unroll.NumUnrolled | 12608 | 12529 | -79 | -0.63% | 0.63% |
| mem2reg.NumDeadAlloca | 10222 | 10221 | -1 | -0.01% | 0.01% |
| mem2reg.NumPHIInsert | 192110 | 192106 | -4 | 0.00% | 0.00% |
| mem2reg.NumSingleStore | 637650 | 637643 | -7 | 0.00% | 0.00% |
| scalar-evolution.NumBruteForceTripCountsComputed | 814 | 812 | -2 | -0.25% | 0.25% |
| scalar-evolution.NumTripCountsComputed | 283108 | 282934 | -174 | -0.06% | 0.06% |
| scalar-evolution.NumTripCountsNotComputed | 106712 | 106718 | 6 | 0.01% | 0.01% |
| simple-loop-unswitch.NumBranches | 5178 | 4752 | -426 | -8.23% | 8.23% |
| simple-loop-unswitch.NumCostMultiplierSkipped | 914 | 503 | -411 | -44.97% | 44.97% |
| simple-loop-unswitch.NumSwitches | 20 | 18 | -2 | -10.00% | 10.00% |
| simple-loop-unswitch.NumTrivial | 183 | 95 | -88 | -48.09% | 48.09% |
... but that actually regresses LICM (-12% `licm.NumMovedLoads`),
loop-simplifycfg (`NumLoopExitsDeleted`, `NumTerminatorsFolded`),
simple-loop-unswitch (`NumTrivial`).
What if we instead have LICM both before and after LoopRotate?
| statistic name | LoopRotate-LICM | LICM-LoopRotate-LICM | Δ | % | abs(%) |
| asm-printer.EmittedInsts | 9015930 | 9014474 | -1456 | -0.02% | 0.02% |
| indvars.NumElimCmp | 3536 | 3546 | 10 | 0.28% | 0.28% |
| indvars.NumElimExt | 36725 | 36681 | -44 | -0.12% | 0.12% |
| indvars.NumElimIV | 1197 | 1185 | -12 | -1.00% | 1.00% |
| indvars.NumElimIdentity | 143 | 146 | 3 | 2.10% | 2.10% |
| indvars.NumElimRem | 4 | 5 | 1 | 25.00% | 25.00% |
| indvars.NumLFTR | 29842 | 29899 | 57 | 0.19% | 0.19% |
| indvars.NumReplaced | 2293 | 2299 | 6 | 0.26% | 0.26% |
| indvars.NumSimplifiedSDiv | 6 | 8 | 2 | 33.33% | 33.33% |
| indvars.NumWidened | 26438 | 26404 | -34 | -0.13% | 0.13% |
| instcount.TotalBlocks | 1178338 | 1173652 | -4686 | -0.40% | 0.40% |
| instcount.TotalFuncs | 111825 | 111829 | 4 | 0.00% | 0.00% |
| instcount.TotalInsts | 9905442 | 9895452 | -9990 | -0.10% | 0.10% |
| lcssa.NumLCSSA | 425871 | 425373 | -498 | -0.12% | 0.12% |
| licm.NumHoisted | 378357 | 383352 | 4995 | 1.32% | 1.32% |
| licm.NumMovedCalls | 2193 | 2204 | 11 | 0.50% | 0.50% |
| licm.NumMovedLoads | 35899 | 35755 | -144 | -0.40% | 0.40% |
| licm.NumPromoted | 11178 | 11163 | -15 | -0.13% | 0.13% |
| licm.NumSunk | 13359 | 14321 | 962 | 7.20% | 7.20% |
| loop-delete.NumDeleted | 8547 | 8538 | -9 | -0.11% | 0.11% |
| loop-instsimplify.NumSimplified | 12876 | 12041 | -835 | -6.48% | 6.48% |
| loop-peel.NumPeeled | 1008 | 924 | -84 | -8.33% | 8.33% |
| loop-rotate.NumNotRotatedDueToHeaderSize | 368 | 365 | -3 | -0.82% | 0.82% |
| loop-rotate.NumRotated | 42015 | 42005 | -10 | -0.02% | 0.02% |
| loop-simplifycfg.NumLoopBlocksDeleted | 240 | 241 | 1 | 0.42% | 0.42% |
| loop-simplifycfg.NumTerminatorsFolded | 618 | 619 | 1 | 0.16% | 0.16% |
| loop-unroll.NumCompletelyUnrolled | 11028 | 11029 | 1 | 0.01% | 0.01% |
| loop-unroll.NumUnrolled | 12608 | 12525 | -83 | -0.66% | 0.66% |
| mem2reg.NumPHIInsert | 192110 | 192073 | -37 | -0.02% | 0.02% |
| mem2reg.NumSingleStore | 637650 | 637652 | 2 | 0.00% | 0.00% |
| scalar-evolution.NumTripCountsComputed | 283108 | 282998 | -110 | -0.04% | 0.04% |
| scalar-evolution.NumTripCountsNotComputed | 106712 | 106691 | -21 | -0.02% | 0.02% |
| simple-loop-unswitch.NumBranches | 5178 | 5185 | 7 | 0.14% | 0.14% |
| simple-loop-unswitch.NumCostMultiplierSkipped | 914 | 925 | 11 | 1.20% | 1.20% |
| simple-loop-unswitch.NumTrivial | 183 | 179 | -4 | -2.19% | 2.19% |
| simple-loop-unswitch.NumBranches | 5178 | 4752 | -426 | -8.23% | 8.23% |
| simple-loop-unswitch.NumCostMultiplierSkipped | 914 | 503 | -411 | -44.97% | 44.97% |
| simple-loop-unswitch.NumSwitches | 20 | 18 | -2 | -10.00% | 10.00% |
| simple-loop-unswitch.NumTrivial | 183 | 95 | -88 | -48.09% | 48.09% |
I.e. we end up with less instructions, less peeling, more LICM activity,
also note how none of those 4 regressions are here. Namely:
| statistic name | LICM-LoopRotate | LICM-LoopRotate-LICM | Δ | % | abs(%) |
| asm-printer.EmittedInsts | 9015799 | 9014474 | -1325 | -0.01% | 0.01% |
| indvars.NumElimCmp | 3544 | 3546 | 2 | 0.06% | 0.06% |
| indvars.NumElimExt | 36580 | 36681 | 101 | 0.28% | 0.28% |
| indvars.NumElimIV | 1187 | 1185 | -2 | -0.17% | 0.17% |
| indvars.NumElimIdentity | 136 | 146 | 10 | 7.35% | 7.35% |
| indvars.NumLFTR | 29890 | 29899 | 9 | 0.03% | 0.03% |
| indvars.NumReplaced | 2227 | 2299 | 72 | 3.23% | 3.23% |
| indvars.NumWidened | 26329 | 26404 | 75 | 0.28% | 0.28% |
| instcount.TotalBlocks | 1173840 | 1173652 | -188 | -0.02% | 0.02% |
| instcount.TotalInsts | 9896139 | 9895452 | -687 | -0.01% | 0.01% |
| lcssa.NumLCSSA | 423961 | 425373 | 1412 | 0.33% | 0.33% |
| licm.NumHoisted | 378753 | 383352 | 4599 | 1.21% | 1.21% |
| licm.NumMovedCalls | 2208 | 2204 | -4 | -0.18% | 0.18% |
| licm.NumMovedLoads | 31821 | 35755 | 3934 | 12.36% | 12.36% |
| licm.NumPromoted | 11154 | 11163 | 9 | 0.08% | 0.08% |
| licm.NumSunk | 13587 | 14321 | 734 | 5.40% | 5.40% |
| loop-delete.NumDeleted | 8402 | 8538 | 136 | 1.62% | 1.62% |
| loop-instsimplify.NumSimplified | 11890 | 12041 | 151 | 1.27% | 1.27% |
| loop-peel.NumPeeled | 925 | 924 | -1 | -0.11% | 0.11% |
| loop-rotate.NumRotated | 42003 | 42005 | 2 | 0.00% | 0.00% |
| loop-simplifycfg.NumLoopBlocksDeleted | 242 | 241 | -1 | -0.41% | 0.41% |
| loop-simplifycfg.NumLoopExitsDeleted | 20 | 497 | 477 | 2385.00% | 2385.00% |
| loop-simplifycfg.NumTerminatorsFolded | 336 | 619 | 283 | 84.23% | 84.23% |
| loop-unroll.NumCompletelyUnrolled | 11032 | 11029 | -3 | -0.03% | 0.03% |
| loop-unroll.NumUnrolled | 12529 | 12525 | -4 | -0.03% | 0.03% |
| mem2reg.NumDeadAlloca | 10221 | 10222 | 1 | 0.01% | 0.01% |
| mem2reg.NumPHIInsert | 192106 | 192073 | -33 | -0.02% | 0.02% |
| mem2reg.NumSingleStore | 637643 | 637652 | 9 | 0.00% | 0.00% |
| scalar-evolution.NumBruteForceTripCountsComputed | 812 | 814 | 2 | 0.25% | 0.25% |
| scalar-evolution.NumTripCountsComputed | 282934 | 282998 | 64 | 0.02% | 0.02% |
| scalar-evolution.NumTripCountsNotComputed | 106718 | 106691 | -27 | -0.03% | 0.03% |
| simple-loop-unswitch.NumBranches | 4752 | 5185 | 433 | 9.11% | 9.11% |
| simple-loop-unswitch.NumCostMultiplierSkipped | 503 | 925 | 422 | 83.90% | 83.90% |
| simple-loop-unswitch.NumSwitches | 18 | 20 | 2 | 11.11% | 11.11% |
| simple-loop-unswitch.NumTrivial | 95 | 179 | 84 | 88.42% | 88.42% |
{F15983613} {F15983615} {F15983616}
(this is vanilla llvm testsuite + rawspeed + darktable)
As an example of the code where early LICM only is bad, see:
https://godbolt.org/z/GzEbacs4K
This does have an observable compile-time regression of +~0.5% geomean
https://llvm-compile-time-tracker.com/compare.php?from=7c5222e4d1a3a14f029e5f614c9aefd0fa505f1e&to=5d81826c3411982ca26e46b9d0aff34c80577664&stat=instructions
but i think that's basically nothing, and there's potential that it might
be avoidable in the future by fixing clang to produce alignment information
on function arguments, thus making the second run unneeded.
Differential Revision: https://reviews.llvm.org/D99249
2021-04-02 15:40:12 +08:00
|
|
|
// Try to remove as much code from the loop header as possible,
|
|
|
|
// to reduce amount of IR that will have to be duplicated.
|
|
|
|
// TODO: Investigate promotion cap for O1.
|
|
|
|
MPM.add(createLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap));
|
2014-11-22 03:53:24 +08:00
|
|
|
// Rotate Loop - disable header duplication at -Oz
|
[LoopRotate] Add PrepareForLTO stage, avoid rotating with inline cands.
D84108 exposed a bad interaction between inlining and loop-rotation
during regular LTO, which is causing notable regressions in at least
CINT2006/473.astar.
The problem boils down to: we now rotate a loop just before the vectorizer
which requires duplicating a function call in the preheader when compiling
the individual files ('prepare for LTO'). But this then prevents further
inlining of the function during LTO.
This patch tries to resolve this issue by making LoopRotate more
conservative with respect to rotating loops that have inline-able calls
during the 'prepare for LTO' stage.
I think this change intuitively improves the current situation in
general. Loop-rotate tries hard to avoid creating headers that are 'too
big'. At the moment, it assumes all inlining already happened and the
cost of duplicating a call is equal to just doing the call. But with LTO,
inlining also happens during full LTO and it is possible that a previously
duplicated call is actually a huge function which gets inlined
during LTO.
From the perspective of LV, not much should change overall. Most loops
calling user-provided functions won't get vectorized to start with
(unless we can infer that the function does not touch memory, has no
other side effects). If we do not inline the 'inline-able' call during
the LTO stage, we merely delayed loop-rotation & vectorization. If we
inline during LTO, chances should be very high that the inlined code is
itself vectorizable or the user call was not vectorizable to start with.
There could of course be scenarios where we inline a sufficiently large
function with code not profitable to vectorize, which would have be
vectorized earlier (by scalarzing the call). But even in that case,
there probably is no big performance impact, because it should be mostly
down to the cost-model to reject vectorization in that case. And then
the version with scalarized calls should also not be beneficial. In a way,
LV should have strictly more information after inlining and make more
accurate decisions (barring cost-model issues).
There is of course plenty of room for things to go wrong unexpectedly,
so we need to keep a close look at actual performance and address any
follow-up issues.
I took a look at the impact on statistics for
MultiSource/SPEC2000/SPEC2006. There are a few benchmarks with fewer
loops rotated, but no change to the number of loops vectorized.
Reviewed By: sanwou01
Differential Revision: https://reviews.llvm.org/D94232
2021-01-19 17:22:40 +08:00
|
|
|
MPM.add(createLoopRotatePass(SizeLevel == 2 ? 0 : -1, PrepareForLTO));
|
2019-11-27 12:28:52 +08:00
|
|
|
// TODO: Investigate promotion cap for O1.
|
2019-04-20 01:46:50 +08:00
|
|
|
MPM.add(createLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap));
|
[PM/LoopUnswitch] Introduce a new, simpler loop unswitch pass.
Currently, this pass only focuses on *trivial* loop unswitching. At that
reduced problem it remains significantly better than the current loop
unswitch:
- Old pass is worse than cubic complexity. New pass is (I think) linear.
- New pass is much simpler in its design by focusing on full unswitching. (See
below for details on this).
- New pass doesn't carry state for thresholds between pass iterations.
- New pass doesn't carry state for correctness (both miscompile and
infloop) between pass iterations.
- New pass produces substantially better code after unswitching.
- New pass can handle more trivial unswitch cases.
- New pass doesn't recompute the dominator tree for the entire function
and instead incrementally updates it.
I've ported all of the trivial unswitching test cases from the old pass
to the new one to make sure that major functionality isn't lost in the
process. For several of the test cases I've worked to improve the
precision and rigor of the CHECKs, but for many I've just updated them
to handle the new IR produced.
My initial motivation was the fact that the old pass carried state in
very unreliable ways between pass iterations, and these mechansims were
incompatible with the new pass manager. However, I discovered many more
improvements to make along the way.
This pass makes two very significant assumptions that enable most of these
improvements:
1) Focus on *full* unswitching -- that is, completely removing whatever
control flow construct is being unswitched from the loop. In the case
of trivial unswitching, this means removing the trivial (exiting)
edge. In non-trivial unswitching, this means removing the branch or
switch itself. This is in opposition to *partial* unswitching where
some part of the unswitched control flow remains in the loop. Partial
unswitching only really applies to switches and to folded branches.
These are very similar to full unrolling and partial unrolling. The
full form is an effective canonicalization, the partial form needs
a complex cost model, cannot be iterated, isn't canonicalizing, and
should be a separate pass that runs very late (much like unrolling).
2) Leverage LLVM's Loop machinery to the fullest. The original unswitch
dates from a time when a great deal of LLVM's loop infrastructure was
missing, ineffective, and/or unreliable. As a consequence, a lot of
complexity was added which we no longer need.
With these two overarching principles, I think we can build a fast and
effective unswitcher that fits in well in the new PM and in the
canonicalization pipeline. Some of the remaining functionality around
partial unswitching may not be relevant today (not many test cases or
benchmarks I can find) but if they are I'd like to add support for them
as a separate layer that runs very late in the pipeline.
Purely to make reviewing and introducing this code more manageable, I've
split this into first a trivial-unswitch-only pass and in the next patch
I'll add support for full non-trivial unswitching against a *fixed*
threshold, exactly like full unrolling. I even plan to re-use the
unrolling thresholds, as these are incredibly similar cost tradeoffs:
we're cloning a loop body in order to end up with simplified control
flow. We should only do that when the total growth is reasonably small.
One of the biggest changes with this pass compared to the previous one
is that previously, each individual trivial exiting edge from a switch
was unswitched separately as a branch. Now, we unswitch the entire
switch at once, with cases going to the various destinations. This lets
us unswitch multiple exiting edges in a single operation and also avoids
numerous extremely bad behaviors, where we would introduce 1000s of
branches to test for thousands of possible values, all of which would
take the exact same exit path bypassing the loop. Now we will use
a switch with 1000s of cases that can be efficiently lowered into
a jumptable. This avoids relying on somehow forming a switch out of the
branches or getting horrible code if that fails for any reason.
Another significant change is that this pass actively updates the CFG
based on unswitching. For trivial unswitching, this is actually very
easy because of the definition of loop simplified form. Doing this makes
the code coming out of loop unswitch dramatically more friendly. We
still should run loop-simplifycfg (at the least) after this to clean up,
but it will have to do a lot less work.
Finally, this pass makes much fewer attempts to simplify instructions
based on the unswitch. Something like loop-instsimplify, instcombine, or
GVN can be used to do increasingly powerful simplifications based on the
now dominating predicate. The old simplifications are things that
something like loop-instsimplify should get today or a very, very basic
loop-instcombine could get. Keeping that logic separate is a big
simplifying technique.
Most of the code in this pass that isn't in the old one has to do with
achieving specific goals:
- Updating the dominator tree as we go
- Unswitching all cases in a switch in a single step.
I think it is still shorter than just the trivial unswitching code in
the old pass despite having this functionality.
Differential Revision: https://reviews.llvm.org/D32409
llvm-svn: 301576
2017-04-28 02:45:20 +08:00
|
|
|
if (EnableSimpleLoopUnswitch)
|
|
|
|
MPM.add(createSimpleLoopUnswitchLegacyPass());
|
|
|
|
else
|
|
|
|
MPM.add(createLoopUnswitchPass(SizeLevel || OptLevel < 3, DivergentTarget));
|
2018-05-30 10:46:45 +08:00
|
|
|
// FIXME: We break the loop pass pipeline here in order to do full
|
|
|
|
// simplify-cfg. Eventually loop-simplifycfg should be enhanced to replace the
|
|
|
|
// need for this.
|
2015-09-24 11:50:17 +08:00
|
|
|
MPM.add(createCFGSimplificationPass());
|
2020-03-21 17:14:17 +08:00
|
|
|
MPM.add(createInstructionCombiningPass());
|
2018-05-30 10:46:45 +08:00
|
|
|
// We resume loop passes creating a second loop pipeline here.
|
2020-11-10 00:16:54 +08:00
|
|
|
if (EnableLoopFlatten) {
|
|
|
|
MPM.add(createLoopFlattenPass()); // Flatten loops
|
|
|
|
MPM.add(createLoopSimplifyCFGPass());
|
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createLoopIdiomPass()); // Recognize idioms like memset.
|
[PassManager] Run Induction Variable Simplification pass *after* Recognize loop idioms pass, not before
Currently, `-indvars` runs first, and then immediately after `-loop-idiom` does.
I'm not really sure if `-loop-idiom` requires `-indvars` to run beforehand,
but i'm *very* sure that `-indvars` requires `-loop-idiom` to run afterwards,
as it can be seen in the phase-ordering test.
LoopIdiom runs on two types of loops: countable ones, and uncountable ones.
For uncountable ones, IndVars obviously didn't make any change to them,
since they are uncountable, so for them the order should be irrelevant.
For countable ones, well, they should have been countable before IndVars
for IndVars to make any change to them, and since SCEV is used on them,
it shouldn't matter if IndVars have already canonicalized them.
So i don't really see why we'd want the current ordering.
Should this cause issues, it will give us a reproducer test case
that shows flaws in this logic, and we then could adjust accordingly.
While this is quite likely beneficial in-the-wild already,
it's a required part for the full motivational pattern
behind `left-shift-until-bittest` loop idiom (D91038).
Reviewed By: dmgreen
Differential Revision: https://reviews.llvm.org/D91800
2020-11-26 00:17:25 +08:00
|
|
|
MPM.add(createIndVarSimplifyPass()); // Canonicalize indvars
|
2017-01-26 00:12:25 +08:00
|
|
|
addExtensionsToPM(EP_LateLoopOptimizations, MPM);
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createLoopDeletionPass()); // Delete dead loops
|
2017-01-26 00:12:25 +08:00
|
|
|
|
2018-10-01 17:59:48 +08:00
|
|
|
if (EnableLoopInterchange)
|
2015-03-06 18:11:25 +08:00
|
|
|
MPM.add(createLoopInterchangePass()); // Interchange loops
|
2018-10-01 17:59:48 +08:00
|
|
|
|
2021-01-26 21:43:39 +08:00
|
|
|
// Unroll small loops and perform peeling.
|
2019-04-13 03:16:07 +08:00
|
|
|
MPM.add(createSimpleLoopUnrollPass(OptLevel, DisableUnrollLoops,
|
|
|
|
ForgetAllSCEVInLoopUnroll));
|
2011-08-03 05:50:27 +08:00
|
|
|
addExtensionsToPM(EP_LoopOptimizerEnd, MPM);
|
2018-05-30 10:46:45 +08:00
|
|
|
// This ends the loop pass pipelines.
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2020-10-04 16:27:07 +08:00
|
|
|
// Break up allocas that may now be splittable after loop unrolling.
|
|
|
|
MPM.add(createSROAPass());
|
|
|
|
|
2014-07-19 03:13:09 +08:00
|
|
|
if (OptLevel > 1) {
|
2017-01-29 07:45:37 +08:00
|
|
|
MPM.add(createMergedLoadStoreMotionPass()); // Merge ld/st in diamonds
|
2016-12-27 02:26:19 +08:00
|
|
|
MPM.add(NewGVN ? createNewGVNPass()
|
|
|
|
: createGVNPass(DisableGVNLoadPRE)); // Remove redundancies
|
2014-07-19 03:13:09 +08:00
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
MPM.add(createSCCPPass()); // Constant prop with SCCP
|
|
|
|
|
2020-12-08 02:14:57 +08:00
|
|
|
if (EnableConstraintElimination)
|
|
|
|
MPM.add(createConstraintEliminationPass());
|
|
|
|
|
[BDCE] Add a bit-tracking DCE pass
BDCE is a bit-tracking dead code elimination pass. It is based on ADCE (the
"aggressive DCE" pass), with the added capability to track dead bits of integer
valued instructions and remove those instructions when all of the bits are
dead.
Currently, it does not actually do this all-bits-dead removal, but rather
replaces the instruction's uses with a constant zero, and lets instcombine (and
the later run of ADCE) do the rest. Because we essentially get a run of ADCE
"for free" while tracking the dead bits, we also do what ADCE does and removes
actually-dead instructions as well (this includes instructions newly trivially
dead because all bits were dead, but not all such instructions can be removed).
The motivation for this is a case like:
int __attribute__((const)) foo(int i);
int bar(int x) {
x |= (4 & foo(5));
x |= (8 & foo(3));
x |= (16 & foo(2));
x |= (32 & foo(1));
x |= (64 & foo(0));
x |= (128& foo(4));
return x >> 4;
}
As it turns out, if you order the bit-field insertions so that all of the dead
ones come last, then instcombine will remove them. However, if you pick some
other order (such as the one above), the fact that some of the calls to foo()
are useless is not locally obvious, and we don't remove them (without this
pass).
I did a quick compile-time overhead check using sqlite from the test suite
(Release+Asserts). BDCE took ~0.4% of the compilation time (making it about
twice as expensive as ADCE).
I've not looked at why yet, but we eliminate instructions due to having
all-dead bits in:
External/SPEC/CFP2006/447.dealII/447.dealII
External/SPEC/CINT2006/400.perlbench/400.perlbench
External/SPEC/CINT2006/403.gcc/403.gcc
MultiSource/Applications/ClamAV/clamscan
MultiSource/Benchmarks/7zip/7zip-benchmark
llvm-svn: 229462
2015-02-17 09:36:59 +08:00
|
|
|
// Delete dead bit computations (instcombine runs after to fold away the dead
|
|
|
|
// computations, and then ADCE will run later to exploit any new DCE
|
|
|
|
// opportunities that creates).
|
|
|
|
MPM.add(createBitTrackingDCEPass()); // Delete dead bit computations
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Run instcombine after redundancy elimination to exploit opportunities
|
|
|
|
// opened up by them.
|
2020-03-21 17:14:17 +08:00
|
|
|
MPM.add(createInstructionCombiningPass());
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2019-11-27 12:28:52 +08:00
|
|
|
if (OptLevel > 1) {
|
|
|
|
MPM.add(createJumpThreadingPass()); // Thread jumps
|
|
|
|
MPM.add(createCorrelatedValuePropagationPass());
|
2020-10-21 17:21:50 +08:00
|
|
|
}
|
|
|
|
MPM.add(createAggressiveDCEPass()); // Delete dead instructions
|
|
|
|
|
2021-01-10 17:52:01 +08:00
|
|
|
MPM.add(createMemCpyOptPass()); // Remove memcpy / form memset
|
2020-10-21 17:21:50 +08:00
|
|
|
// TODO: Investigate if this is too expensive at O1.
|
|
|
|
if (OptLevel > 1) {
|
2019-11-27 12:28:52 +08:00
|
|
|
MPM.add(createDeadStoreEliminationPass()); // Delete dead stores
|
|
|
|
MPM.add(createLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap));
|
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
addExtensionsToPM(EP_ScalarOptimizerLate, MPM);
|
|
|
|
|
2013-11-18 00:02:50 +08:00
|
|
|
if (RerollLoops)
|
2013-11-17 07:59:05 +08:00
|
|
|
MPM.add(createLoopRerollPass());
|
2012-02-01 11:51:43 +08:00
|
|
|
|
2013-08-06 10:43:45 +08:00
|
|
|
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
|
2016-03-10 02:47:11 +08:00
|
|
|
// Clean up after everything.
|
2020-03-21 17:14:17 +08:00
|
|
|
MPM.add(createInstructionCombiningPass());
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2018-09-05 01:19:13 +08:00
|
|
|
|
|
|
|
if (EnableCHR && OptLevel >= 3 &&
|
2019-03-05 04:21:27 +08:00
|
|
|
(!PGOInstrUse.empty() || !PGOSampleUse.empty() || EnablePGOCSInstrGen))
|
2018-09-05 01:19:13 +08:00
|
|
|
MPM.add(createControlHeightReductionLegacyPass());
|
2016-02-17 06:54:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void PassManagerBuilder::populateModulePassManager(
|
|
|
|
legacy::PassManagerBase &MPM) {
|
2019-01-25 02:55:49 +08:00
|
|
|
// Whether this is a default or *LTO pre-link pipeline. The FullLTO post-link
|
|
|
|
// is handled separately, so just check this is not the ThinLTO post-link.
|
|
|
|
bool DefaultOrPreLinkPipeline = !PerformThinLTO;
|
|
|
|
|
2020-11-16 17:49:04 +08:00
|
|
|
MPM.add(createAnnotation2MetadataLegacyPass());
|
|
|
|
|
2016-12-15 05:40:47 +08:00
|
|
|
if (!PGOSampleUse.empty()) {
|
|
|
|
MPM.add(createPruneEHPass());
|
2019-01-18 04:48:34 +08:00
|
|
|
// In ThinLTO mode, when flattened profile is used, all the available
|
|
|
|
// profile information will be annotated in PreLink phase so there is
|
|
|
|
// no need to load the profile again in PostLink.
|
|
|
|
if (!(FlattenedProfileUsed && PerformThinLTO))
|
|
|
|
MPM.add(createSampleProfileLoaderPass(PGOSampleUse));
|
2016-12-15 05:40:47 +08:00
|
|
|
}
|
|
|
|
|
2016-02-17 06:54:27 +08:00
|
|
|
// Allow forcing function attributes as a debugging and tuning aid.
|
|
|
|
MPM.add(createForceFunctionAttrsLegacyPass());
|
|
|
|
|
|
|
|
// If all optimizations are disabled, just run the always-inline pass and,
|
|
|
|
// if enabled, the function merging pass.
|
|
|
|
if (OptLevel == 0) {
|
|
|
|
addPGOInstrPasses(MPM);
|
|
|
|
if (Inliner) {
|
|
|
|
MPM.add(Inliner);
|
|
|
|
Inliner = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: The BarrierNoopPass is a HACK! The inliner pass above implicitly
|
|
|
|
// creates a CGSCC pass manager, but we don't want to add extensions into
|
|
|
|
// that pass manager. To prevent this we insert a no-op module pass to reset
|
|
|
|
// the pass manager to get the same behavior as EP_OptimizerLast in non-O0
|
|
|
|
// builds. The function merging pass is
|
|
|
|
if (MergeFunctions)
|
|
|
|
MPM.add(createMergeFunctionsPass());
|
2017-07-06 08:09:09 +08:00
|
|
|
else if (GlobalExtensionsNotEmpty() || !Extensions.empty())
|
2016-02-17 06:54:27 +08:00
|
|
|
MPM.add(createBarrierNoopPass());
|
|
|
|
|
2017-11-02 01:58:39 +08:00
|
|
|
if (PerformThinLTO) {
|
Restore "[WPD/LowerTypeTests] Delay lowering/removal of type tests until after ICP"
This restores commit 80d0a137a5aba6998fadb764f1e11cb901aae233, and the
follow on fix in 873c0d0786dcf22f4af39f65df824917f70f2170, with a new
fix for test failures after a 2-stage clang bootstrap, and a more robust
fix for the Chromium build failure that an earlier version partially
fixed. See also discussion on D75201.
Reviewers: evgeny777
Subscribers: mehdi_amini, Prazek, hiraditya, steven_wu, dexonsmith, arphaman, davidxl, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D73242
2020-03-18 02:08:35 +08:00
|
|
|
MPM.add(createLowerTypeTestsPass(nullptr, nullptr, true));
|
2017-11-02 01:58:39 +08:00
|
|
|
// Drop available_externally and unreferenced globals. This is necessary
|
|
|
|
// with ThinLTO in order to avoid leaving undefined references to dead
|
|
|
|
// globals in the object file.
|
|
|
|
MPM.add(createEliminateAvailableExternallyPass());
|
|
|
|
MPM.add(createGlobalDCEPass());
|
|
|
|
}
|
|
|
|
|
2017-04-23 12:49:34 +08:00
|
|
|
addExtensionsToPM(EP_EnabledOnOptLevel0, MPM);
|
|
|
|
|
[ThinLTO] Handle chains of aliases
At -O0, globalopt is not run during the compile step, and we can have a
chain of an alias having an immediate aliasee of another alias. The
summaries are constructed assuming aliases in a canonical form
(flattened chains), and as a result only the base object but no
intermediate aliases were preserved.
Fix by adding a pass that canonicalize aliases, which ensures each
alias is a direct alias of the base object.
Reviewers: pcc, davidxl
Subscribers: mehdi_amini, inglorion, eraman, steven_wu, dexonsmith, arphaman, llvm-commits
Differential Revision: https://reviews.llvm.org/D54507
llvm-svn: 350423
2019-01-05 03:04:54 +08:00
|
|
|
if (PrepareForLTO || PrepareForThinLTO) {
|
|
|
|
MPM.add(createCanonicalizeAliasesPass());
|
|
|
|
// Rename anon globals to be able to export them in the summary.
|
|
|
|
// This has to be done after we add the extensions to the pass manager
|
|
|
|
// as there could be passes (e.g. Adddress sanitizer) which introduce
|
|
|
|
// new unnamed globals.
|
2016-09-18 04:40:16 +08:00
|
|
|
MPM.add(createNameAnonGlobalPass());
|
[ThinLTO] Handle chains of aliases
At -O0, globalopt is not run during the compile step, and we can have a
chain of an alias having an immediate aliasee of another alias. The
summaries are constructed assuming aliases in a canonical form
(flattened chains), and as a result only the base object but no
intermediate aliases were preserved.
Fix by adding a pass that canonicalize aliases, which ensures each
alias is a direct alias of the base object.
Reviewers: pcc, davidxl
Subscribers: mehdi_amini, inglorion, eraman, steven_wu, dexonsmith, arphaman, llvm-commits
Differential Revision: https://reviews.llvm.org/D54507
llvm-svn: 350423
2019-01-05 03:04:54 +08:00
|
|
|
}
|
2020-12-15 22:44:38 +08:00
|
|
|
|
|
|
|
MPM.add(createAnnotationRemarksLegacyPass());
|
2016-02-17 06:54:27 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add LibraryInfo if we have some.
|
|
|
|
if (LibraryInfo)
|
|
|
|
MPM.add(new TargetLibraryInfoWrapperPass(*LibraryInfo));
|
|
|
|
|
|
|
|
addInitialAliasAnalysisPasses(MPM);
|
|
|
|
|
2016-08-30 06:46:56 +08:00
|
|
|
// For ThinLTO there are two passes of indirect call promotion. The
|
|
|
|
// first is during the compile phase when PerformThinLTO=false and
|
|
|
|
// intra-module indirect call targets are promoted. The second is during
|
|
|
|
// the ThinLTO backend when PerformThinLTO=true, when we promote imported
|
|
|
|
// inter-module indirect calls. For that we perform indirect call promotion
|
|
|
|
// earlier in the pass pipeline, here before globalopt. Otherwise imported
|
|
|
|
// available_externally functions look unreferenced and are removed.
|
Restore "[WPD/LowerTypeTests] Delay lowering/removal of type tests until after ICP"
This restores commit 80d0a137a5aba6998fadb764f1e11cb901aae233, and the
follow on fix in 873c0d0786dcf22f4af39f65df824917f70f2170, with a new
fix for test failures after a 2-stage clang bootstrap, and a more robust
fix for the Chromium build failure that an earlier version partially
fixed. See also discussion on D75201.
Reviewers: evgeny777
Subscribers: mehdi_amini, Prazek, hiraditya, steven_wu, dexonsmith, arphaman, davidxl, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D73242
2020-03-18 02:08:35 +08:00
|
|
|
if (PerformThinLTO) {
|
2017-02-24 06:15:18 +08:00
|
|
|
MPM.add(createPGOIndirectCallPromotionLegacyPass(/*InLTO = */ true,
|
|
|
|
!PGOSampleUse.empty()));
|
Restore "[WPD/LowerTypeTests] Delay lowering/removal of type tests until after ICP"
This restores commit 80d0a137a5aba6998fadb764f1e11cb901aae233, and the
follow on fix in 873c0d0786dcf22f4af39f65df824917f70f2170, with a new
fix for test failures after a 2-stage clang bootstrap, and a more robust
fix for the Chromium build failure that an earlier version partially
fixed. See also discussion on D75201.
Reviewers: evgeny777
Subscribers: mehdi_amini, Prazek, hiraditya, steven_wu, dexonsmith, arphaman, davidxl, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D73242
2020-03-18 02:08:35 +08:00
|
|
|
MPM.add(createLowerTypeTestsPass(nullptr, nullptr, true));
|
|
|
|
}
|
2016-08-30 06:46:56 +08:00
|
|
|
|
2017-03-24 05:20:05 +08:00
|
|
|
// For SamplePGO in ThinLTO compile phase, we do not want to unroll loops
|
|
|
|
// as it will change the CFG too much to make the 2nd profile annotation
|
|
|
|
// in backend more difficult.
|
|
|
|
bool PrepareForThinLTOUsingPGOSampleProfile =
|
|
|
|
PrepareForThinLTO && !PGOSampleUse.empty();
|
|
|
|
if (PrepareForThinLTOUsingPGOSampleProfile)
|
|
|
|
DisableUnrollLoops = true;
|
|
|
|
|
2017-10-06 12:39:40 +08:00
|
|
|
// Infer attributes about declarations if possible.
|
|
|
|
MPM.add(createInferFunctionAttrsLegacyPass());
|
2016-02-17 06:54:27 +08:00
|
|
|
|
2019-11-27 14:30:12 +08:00
|
|
|
// Infer attributes on declarations, call sites, arguments, etc.
|
2020-04-18 10:43:54 +08:00
|
|
|
if (AttributorRun & AttributorRunOption::MODULE)
|
2020-04-06 00:45:19 +08:00
|
|
|
MPM.add(createAttributorLegacyPass());
|
2019-11-27 14:30:12 +08:00
|
|
|
|
2017-10-06 12:39:40 +08:00
|
|
|
addExtensionsToPM(EP_ModuleOptimizerEarly, MPM);
|
2016-02-17 06:54:27 +08:00
|
|
|
|
Recommit r317351 : Add CallSiteSplitting pass
This recommit r317351 after fixing a buildbot failure.
Original commit message:
Summary:
This change add a pass which tries to split a call-site to pass
more constrained arguments if its argument is predicated in the control flow
so that we can expose better context to the later passes (e.g, inliner, jump
threading, or IPA-CP based function cloning, etc.).
As of now we support two cases :
1) If a call site is dominated by an OR condition and if any of its arguments
are predicated on this OR condition, try to split the condition with more
constrained arguments. For example, in the code below, we try to split the
call site since we can predicate the argument (ptr) based on the OR condition.
Split from :
if (!ptr || c)
callee(ptr);
to :
if (!ptr)
callee(null ptr) // set the known constant value
else if (c)
callee(nonnull ptr) // set non-null attribute in the argument
2) We can also split a call-site based on constant incoming values of a PHI
For example,
from :
BB0:
%c = icmp eq i32 %i1, %i2
br i1 %c, label %BB2, label %BB1
BB1:
br label %BB2
BB2:
%p = phi i32 [ 0, %BB0 ], [ 1, %BB1 ]
call void @bar(i32 %p)
to
BB0:
%c = icmp eq i32 %i1, %i2
br i1 %c, label %BB2-split0, label %BB1
BB1:
br label %BB2-split1
BB2-split0:
call void @bar(i32 0)
br label %BB2
BB2-split1:
call void @bar(i32 1)
br label %BB2
BB2:
%p = phi i32 [ 0, %BB2-split0 ], [ 1, %BB2-split1 ]
llvm-svn: 317362
2017-11-04 04:41:16 +08:00
|
|
|
if (OptLevel > 2)
|
|
|
|
MPM.add(createCallSiteSplittingPass());
|
|
|
|
|
2017-10-06 12:39:40 +08:00
|
|
|
MPM.add(createIPSCCPPass()); // IP SCCP
|
2017-10-25 21:40:08 +08:00
|
|
|
MPM.add(createCalledValuePropagationPass());
|
[Attributor] Pass infrastructure and fixpoint framework
NOTE: Note that no attributes are derived yet. This patch will not go in
alone but only with others that derive attributes. The framework is
split for review purposes.
This commit introduces the Attributor pass infrastructure and fixpoint
iteration framework. Further patches will introduce abstract attributes
into this framework.
In a nutshell, the Attributor will update instances of abstract
arguments until a fixpoint, or a "timeout", is reached. Communication
between the Attributor and the abstract attributes that are derived is
restricted to the AbstractState and AbstractAttribute interfaces.
Please see the file comment in Attributor.h for detailed information
including design decisions and typical use case. Also consider the class
documentation for Attributor, AbstractState, and AbstractAttribute.
Reviewers: chandlerc, homerdin, hfinkel, fedor.sergeev, sanjoy, spatel, nlopes, nicholas, reames
Subscribers: mehdi_amini, mgorny, hiraditya, bollu, steven_wu, dexonsmith, dang, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59918
llvm-svn: 362578
2019-06-05 11:02:24 +08:00
|
|
|
|
2017-10-06 12:39:40 +08:00
|
|
|
MPM.add(createGlobalOptimizerPass()); // Optimize out global vars
|
|
|
|
// Promote any localized global vars.
|
|
|
|
MPM.add(createPromoteMemoryToRegisterPass());
|
2016-02-17 06:54:27 +08:00
|
|
|
|
2017-10-06 12:39:40 +08:00
|
|
|
MPM.add(createDeadArgEliminationPass()); // Dead argument elimination
|
2016-02-17 06:54:27 +08:00
|
|
|
|
2020-03-21 17:14:17 +08:00
|
|
|
MPM.add(createInstructionCombiningPass()); // Clean up after IPCP & DAE
|
2017-10-06 12:39:40 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
|
|
|
MPM.add(createCFGSimplificationPass()); // Clean up after IPCP & DAE
|
2016-02-17 06:54:27 +08:00
|
|
|
|
2017-03-24 05:20:05 +08:00
|
|
|
// For SamplePGO in ThinLTO compile phase, we do not want to do indirect
|
|
|
|
// call promotion as it will change the CFG too much to make the 2nd
|
|
|
|
// profile annotation in backend more difficult.
|
2017-04-26 00:54:45 +08:00
|
|
|
// PGO instrumentation is added during the compile phase for ThinLTO, do
|
|
|
|
// not run it a second time
|
2019-01-25 02:55:49 +08:00
|
|
|
if (DefaultOrPreLinkPipeline && !PrepareForThinLTOUsingPGOSampleProfile)
|
2016-02-17 07:02:29 +08:00
|
|
|
addPGOInstrPasses(MPM);
|
2016-02-17 06:54:27 +08:00
|
|
|
|
2019-03-05 04:21:27 +08:00
|
|
|
// Create profile COMDAT variables. Lld linker wants to see all variables
|
|
|
|
// before the LTO/ThinLTO link since it needs to resolve symbols/comdats.
|
|
|
|
if (!PerformThinLTO && EnablePGOCSInstrGen)
|
|
|
|
MPM.add(createPGOInstrumentationGenCreateVarLegacyPass(PGOInstrGen));
|
|
|
|
|
2017-10-03 07:39:20 +08:00
|
|
|
// We add a module alias analysis pass here. In part due to bugs in the
|
|
|
|
// analysis infrastructure this "works" in that the analysis stays alive
|
|
|
|
// for the entire SCC pass run below.
|
|
|
|
MPM.add(createGlobalsAAWrapperPass());
|
2016-02-17 06:54:27 +08:00
|
|
|
|
|
|
|
// Start of CallGraph SCC passes.
|
2017-10-06 12:39:40 +08:00
|
|
|
MPM.add(createPruneEHPass()); // Remove dead EH info
|
2017-10-06 02:06:37 +08:00
|
|
|
bool RunInliner = false;
|
2016-02-17 06:54:27 +08:00
|
|
|
if (Inliner) {
|
|
|
|
MPM.add(Inliner);
|
|
|
|
Inliner = nullptr;
|
2017-10-06 02:06:37 +08:00
|
|
|
RunInliner = true;
|
2016-02-17 06:54:27 +08:00
|
|
|
}
|
2017-10-06 12:39:40 +08:00
|
|
|
|
2019-11-27 14:30:12 +08:00
|
|
|
// Infer attributes on declarations, call sites, arguments, etc. for an SCC.
|
2020-04-18 10:43:54 +08:00
|
|
|
if (AttributorRun & AttributorRunOption::CGSCC)
|
2020-04-06 00:45:19 +08:00
|
|
|
MPM.add(createAttributorCGSCCLegacyPass());
|
2019-11-27 14:30:12 +08:00
|
|
|
|
2019-11-07 13:20:06 +08:00
|
|
|
// Try to perform OpenMP specific optimizations. This is a (quick!) no-op if
|
|
|
|
// there are no OpenMP runtime calls present in the module.
|
|
|
|
if (OptLevel > 1)
|
|
|
|
MPM.add(createOpenMPOptLegacyPass());
|
|
|
|
|
2017-10-06 12:39:40 +08:00
|
|
|
MPM.add(createPostOrderFunctionAttrsLegacyPass());
|
2016-02-17 06:54:27 +08:00
|
|
|
if (OptLevel > 2)
|
|
|
|
MPM.add(createArgumentPromotionPass()); // Scalarize uninlined fn args
|
|
|
|
|
2016-07-28 11:28:43 +08:00
|
|
|
addExtensionsToPM(EP_CGSCCOptimizerLate, MPM);
|
2016-02-17 06:54:27 +08:00
|
|
|
addFunctionSimplificationPasses(MPM);
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2016-05-03 00:53:16 +08:00
|
|
|
// FIXME: This is a HACK! The inliner pass above implicitly creates a CGSCC
|
|
|
|
// pass manager that we are specifically trying to avoid. To prevent this
|
|
|
|
// we must insert a no-op module pass to reset the pass manager.
|
|
|
|
MPM.add(createBarrierNoopPass());
|
2017-10-06 02:06:37 +08:00
|
|
|
|
2017-05-23 00:41:57 +08:00
|
|
|
if (RunPartialInlining)
|
|
|
|
MPM.add(createPartialInliningPass());
|
2016-05-03 00:53:16 +08:00
|
|
|
|
2018-04-27 01:54:53 +08:00
|
|
|
if (OptLevel > 1 && !PrepareForLTO && !PrepareForThinLTO)
|
2016-05-03 23:46:00 +08:00
|
|
|
// Remove avail extern fns and globals definitions if we aren't
|
|
|
|
// compiling an object file for later LTO. For LTO we want to preserve
|
|
|
|
// these so they are eligible for inlining at link-time. Note if they
|
|
|
|
// are unreferenced they will be removed by GlobalDCE later, so
|
|
|
|
// this only impacts referenced available externally globals.
|
|
|
|
// Eventually they will be suppressed during codegen, but eliminating
|
|
|
|
// here enables more opportunity for GlobalDCE as it may make
|
|
|
|
// globals referenced by available external functions dead
|
|
|
|
// and saves running remaining passes on the eliminated functions.
|
|
|
|
MPM.add(createEliminateAvailableExternallyPass());
|
|
|
|
|
2019-03-05 04:21:27 +08:00
|
|
|
// CSFDO instrumentation and use pass. Don't invoke this for Prepare pass
|
|
|
|
// for LTO and ThinLTO -- The actual pass will be called after all inlines
|
|
|
|
// are performed.
|
|
|
|
// Need to do this after COMDAT variables have been eliminated,
|
|
|
|
// (i.e. after EliminateAvailableExternallyPass).
|
|
|
|
if (!(PrepareForLTO || PrepareForThinLTO))
|
|
|
|
addPGOInstrPasses(MPM, /* IsCS */ true);
|
|
|
|
|
2019-03-01 04:13:38 +08:00
|
|
|
if (EnableOrderFileInstrumentation)
|
|
|
|
MPM.add(createInstrOrderFilePass());
|
|
|
|
|
2017-10-06 12:39:40 +08:00
|
|
|
MPM.add(createReversePostOrderFunctionAttrsPass());
|
2016-05-03 00:53:16 +08:00
|
|
|
|
2017-10-06 02:23:25 +08:00
|
|
|
// The inliner performs some kind of dead code elimination as it goes,
|
|
|
|
// but there are cases that are not really caught by it. We might
|
|
|
|
// at some point consider teaching the inliner about them, but it
|
|
|
|
// is OK for now to run GlobalOpt + GlobalDCE in tandem as their
|
|
|
|
// benefits generally outweight the cost, making the whole pipeline
|
|
|
|
// faster.
|
|
|
|
if (RunInliner) {
|
|
|
|
MPM.add(createGlobalOptimizerPass());
|
|
|
|
MPM.add(createGlobalDCEPass());
|
|
|
|
}
|
|
|
|
|
2016-02-17 07:02:29 +08:00
|
|
|
// If we are planning to perform ThinLTO later, let's not bloat the code with
|
|
|
|
// unrolling/vectorization/... now. We'll first run the inliner + CGSCC passes
|
|
|
|
// during ThinLTO and perform the rest of the optimizations afterward.
|
2016-04-25 16:47:37 +08:00
|
|
|
if (PrepareForThinLTO) {
|
2017-11-09 03:45:52 +08:00
|
|
|
// Ensure we perform any last passes, but do so before renaming anonymous
|
|
|
|
// globals in case the passes add any.
|
|
|
|
addExtensionsToPM(EP_OptimizerLast, MPM);
|
[ThinLTO] Handle chains of aliases
At -O0, globalopt is not run during the compile step, and we can have a
chain of an alias having an immediate aliasee of another alias. The
summaries are constructed assuming aliases in a canonical form
(flattened chains), and as a result only the base object but no
intermediate aliases were preserved.
Fix by adding a pass that canonicalize aliases, which ensures each
alias is a direct alias of the base object.
Reviewers: pcc, davidxl
Subscribers: mehdi_amini, inglorion, eraman, steven_wu, dexonsmith, arphaman, llvm-commits
Differential Revision: https://reviews.llvm.org/D54507
llvm-svn: 350423
2019-01-05 03:04:54 +08:00
|
|
|
MPM.add(createCanonicalizeAliasesPass());
|
2016-09-17 00:56:30 +08:00
|
|
|
// Rename anon globals to be able to export them in the summary.
|
|
|
|
MPM.add(createNameAnonGlobalPass());
|
2016-02-17 07:02:29 +08:00
|
|
|
return;
|
2016-04-25 16:47:37 +08:00
|
|
|
}
|
2016-02-17 07:02:29 +08:00
|
|
|
|
2016-05-07 02:17:03 +08:00
|
|
|
if (PerformThinLTO)
|
|
|
|
// Optimize globals now when performing ThinLTO, this enables more
|
|
|
|
// optimizations later.
|
|
|
|
MPM.add(createGlobalOptimizerPass());
|
|
|
|
|
2016-02-11 17:23:53 +08:00
|
|
|
// Scheduling LoopVersioningLICM when inlining is over, because after that
|
New Loop Versioning LICM Pass
Summary:
When alias analysis is uncertain about the aliasing between any two accesses,
it will return MayAlias. This uncertainty from alias analysis restricts LICM
from proceeding further. In cases where alias analysis is uncertain we might
use loop versioning as an alternative.
Loop Versioning will create a version of the loop with aggressive aliasing
assumptions in addition to the original with conservative (default) aliasing
assumptions. The version of the loop making aggressive aliasing assumptions
will have all the memory accesses marked as no-alias. These two versions of
loop will be preceded by a memory runtime check. This runtime check consists
of bound checks for all unique memory accessed in loop, and it ensures the
lack of memory aliasing. The result of the runtime check determines which of
the loop versions is executed: If the runtime check detects any memory
aliasing, then the original loop is executed. Otherwise, the version with
aggressive aliasing assumptions is used.
The pass is off by default and can be enabled with command line option
-enable-loop-versioning-licm.
Reviewers: hfinkel, anemet, chatur01, reames
Subscribers: MatzeB, grosser, joker.eph, sanjoy, javed.absar, sbaranga,
llvm-commits
Differential Revision: http://reviews.llvm.org/D9151
llvm-svn: 259986
2016-02-06 15:47:48 +08:00
|
|
|
// we may see more accurate aliasing. Reason to run this late is that too
|
|
|
|
// early versioning may prevent further inlining due to increase of code
|
2016-05-07 02:17:03 +08:00
|
|
|
// size. By placing it just after inlining other optimizations which runs
|
|
|
|
// later might get benefit of no-alias assumption in clone loop.
|
New Loop Versioning LICM Pass
Summary:
When alias analysis is uncertain about the aliasing between any two accesses,
it will return MayAlias. This uncertainty from alias analysis restricts LICM
from proceeding further. In cases where alias analysis is uncertain we might
use loop versioning as an alternative.
Loop Versioning will create a version of the loop with aggressive aliasing
assumptions in addition to the original with conservative (default) aliasing
assumptions. The version of the loop making aggressive aliasing assumptions
will have all the memory accesses marked as no-alias. These two versions of
loop will be preceded by a memory runtime check. This runtime check consists
of bound checks for all unique memory accessed in loop, and it ensures the
lack of memory aliasing. The result of the runtime check determines which of
the loop versions is executed: If the runtime check detects any memory
aliasing, then the original loop is executed. Otherwise, the version with
aggressive aliasing assumptions is used.
The pass is off by default and can be enabled with command line option
-enable-loop-versioning-licm.
Reviewers: hfinkel, anemet, chatur01, reames
Subscribers: MatzeB, grosser, joker.eph, sanjoy, javed.absar, sbaranga,
llvm-commits
Differential Revision: http://reviews.llvm.org/D9151
llvm-svn: 259986
2016-02-06 15:47:48 +08:00
|
|
|
if (UseLoopVersioningLICM) {
|
|
|
|
MPM.add(createLoopVersioningLICMPass()); // Do LoopVersioningLICM
|
2019-04-20 01:46:50 +08:00
|
|
|
MPM.add(createLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap));
|
New Loop Versioning LICM Pass
Summary:
When alias analysis is uncertain about the aliasing between any two accesses,
it will return MayAlias. This uncertainty from alias analysis restricts LICM
from proceeding further. In cases where alias analysis is uncertain we might
use loop versioning as an alternative.
Loop Versioning will create a version of the loop with aggressive aliasing
assumptions in addition to the original with conservative (default) aliasing
assumptions. The version of the loop making aggressive aliasing assumptions
will have all the memory accesses marked as no-alias. These two versions of
loop will be preceded by a memory runtime check. This runtime check consists
of bound checks for all unique memory accessed in loop, and it ensures the
lack of memory aliasing. The result of the runtime check determines which of
the loop versions is executed: If the runtime check detects any memory
aliasing, then the original loop is executed. Otherwise, the version with
aggressive aliasing assumptions is used.
The pass is off by default and can be enabled with command line option
-enable-loop-versioning-licm.
Reviewers: hfinkel, anemet, chatur01, reames
Subscribers: MatzeB, grosser, joker.eph, sanjoy, javed.absar, sbaranga,
llvm-commits
Differential Revision: http://reviews.llvm.org/D9151
llvm-svn: 259986
2016-02-06 15:47:48 +08:00
|
|
|
}
|
|
|
|
|
2017-10-03 07:39:20 +08:00
|
|
|
// We add a fresh GlobalsModRef run at this point. This is particularly
|
|
|
|
// useful as the above will have inlined, DCE'ed, and function-attr
|
|
|
|
// propagated everything. We should at this point have a reasonably minimal
|
|
|
|
// and richly annotated call graph. By computing aliasing and mod/ref
|
|
|
|
// information for all local globals here, the late loop passes and notably
|
|
|
|
// the vectorizer will be able to use them to help recognize vectorizable
|
|
|
|
// memory operations.
|
|
|
|
//
|
|
|
|
// Note that this relies on a bug in the pass manager which preserves
|
|
|
|
// a module analysis into a function pass pipeline (and throughout it) so
|
|
|
|
// long as the first function pass doesn't invalidate the module analysis.
|
|
|
|
// Thus both Float2Int and LoopRotate have to preserve AliasAnalysis for
|
|
|
|
// this to work. Fortunately, it is trivial to preserve AliasAnalysis
|
|
|
|
// (doing nothing preserves it as it is required to be conservatively
|
|
|
|
// correct in the face of IR changes).
|
|
|
|
MPM.add(createGlobalsAAWrapperPass());
|
2015-07-23 17:34:01 +08:00
|
|
|
|
2017-01-03 01:49:18 +08:00
|
|
|
MPM.add(createFloat2IntPass());
|
2019-10-15 00:15:14 +08:00
|
|
|
MPM.add(createLowerConstantIntrinsicsPass());
|
2015-03-27 18:36:57 +08:00
|
|
|
|
[Matrix] Add first set of matrix intrinsics and initial lowering pass.
This is the first patch adding an initial set of matrix intrinsics and a
corresponding lowering pass. This has been discussed on llvm-dev:
http://lists.llvm.org/pipermail/llvm-dev/2019-October/136240.html
The first patch introduces four new intrinsics (transpose, multiply,
columnwise load and store) and a LowerMatrixIntrinsics pass, that
lowers those intrinsics to vector operations.
Matrixes are embedded in a 'flat' vector (e.g. a 4 x 4 float matrix
embedded in a <16 x float> vector) and the intrinsics take the dimension
information as parameters. Those parameters need to be ConstantInt.
For the memory layout, we initially assume column-major, but in the RFC
we also described how to extend the intrinsics to support row-major as
well.
For the initial lowering, we split the input of the intrinsics into a
set of column vectors, transform those column vectors and concatenate
the result columns to a flat result vector.
This allows us to lower the intrinsics without any shape propagation, as
mentioned in the RFC. In follow-up patches, we plan to submit the
following improvements:
* Shape propagation to eliminate the embedding/splitting for each
intrinsic.
* Fused & tiled lowering of multiply and other operations.
* Optimization remarks highlighting matrix expressions and costs.
* Generate loops for operations on large matrixes.
* More general block processing for operation on large vectors,
exploiting shape information.
We would like to add dedicated transpose, columnwise load and store
intrinsics, even though they are not strictly necessary. For example, we
could instead emit a large shufflevector instruction instead of the
transpose. But we expect that to
(1) become unwieldy for larger matrixes (even for 16x16 matrixes,
the resulting shufflevector masks would be huge),
(2) risk instcombine making small changes, causing us to fail to
detect the transpose, preventing better lowerings
For the load/store, we are additionally planning on exploiting the
intrinsics for better alias analysis.
Reviewers: anemet, Gerolf, reames, hfinkel, andrew.w.kaylor, efriedma, rengolin
Reviewed By: anemet
Differential Revision: https://reviews.llvm.org/D70456
2019-12-12 23:27:28 +08:00
|
|
|
if (EnableMatrix) {
|
|
|
|
MPM.add(createLowerMatrixIntrinsicsPass());
|
|
|
|
// CSE the pointer arithmetic of the column vectors. This allows alias
|
|
|
|
// analysis to establish no-aliasing between loads and stores of different
|
|
|
|
// columns of the same matrix.
|
|
|
|
MPM.add(createEarlyCSEPass(false));
|
|
|
|
}
|
|
|
|
|
2015-07-16 16:20:37 +08:00
|
|
|
addExtensionsToPM(EP_VectorizerStart, MPM);
|
|
|
|
|
2014-10-14 08:31:29 +08:00
|
|
|
// Re-rotate loops in all our loop nests. These may have fallout out of
|
|
|
|
// rotated form due to GVN or other transformations, and the vectorizer relies
|
2015-07-10 18:37:09 +08:00
|
|
|
// on the rotated form. Disable header duplication at -Oz.
|
[LoopRotate] Add PrepareForLTO stage, avoid rotating with inline cands.
D84108 exposed a bad interaction between inlining and loop-rotation
during regular LTO, which is causing notable regressions in at least
CINT2006/473.astar.
The problem boils down to: we now rotate a loop just before the vectorizer
which requires duplicating a function call in the preheader when compiling
the individual files ('prepare for LTO'). But this then prevents further
inlining of the function during LTO.
This patch tries to resolve this issue by making LoopRotate more
conservative with respect to rotating loops that have inline-able calls
during the 'prepare for LTO' stage.
I think this change intuitively improves the current situation in
general. Loop-rotate tries hard to avoid creating headers that are 'too
big'. At the moment, it assumes all inlining already happened and the
cost of duplicating a call is equal to just doing the call. But with LTO,
inlining also happens during full LTO and it is possible that a previously
duplicated call is actually a huge function which gets inlined
during LTO.
From the perspective of LV, not much should change overall. Most loops
calling user-provided functions won't get vectorized to start with
(unless we can infer that the function does not touch memory, has no
other side effects). If we do not inline the 'inline-able' call during
the LTO stage, we merely delayed loop-rotation & vectorization. If we
inline during LTO, chances should be very high that the inlined code is
itself vectorizable or the user call was not vectorizable to start with.
There could of course be scenarios where we inline a sufficiently large
function with code not profitable to vectorize, which would have be
vectorized earlier (by scalarzing the call). But even in that case,
there probably is no big performance impact, because it should be mostly
down to the cost-model to reject vectorization in that case. And then
the version with scalarized calls should also not be beneficial. In a way,
LV should have strictly more information after inlining and make more
accurate decisions (barring cost-model issues).
There is of course plenty of room for things to go wrong unexpectedly,
so we need to keep a close look at actual performance and address any
follow-up issues.
I took a look at the impact on statistics for
MultiSource/SPEC2000/SPEC2006. There are a few benchmarks with fewer
loops rotated, but no change to the number of loops vectorized.
Reviewed By: sanwou01
Differential Revision: https://reviews.llvm.org/D94232
2021-01-19 17:22:40 +08:00
|
|
|
MPM.add(createLoopRotatePass(SizeLevel == 2 ? 0 : -1, PrepareForLTO));
|
2014-10-14 08:31:29 +08:00
|
|
|
|
2015-05-14 20:05:18 +08:00
|
|
|
// Distribute loops to allow partial vectorization. I.e. isolate dependences
|
2016-04-27 13:28:18 +08:00
|
|
|
// into separate loop that would otherwise inhibit vectorization. This is
|
|
|
|
// currently only performed for loops marked with the metadata
|
|
|
|
// llvm.loop.distribute=true or when -enable-loop-distribute is specified.
|
2016-12-21 12:07:40 +08:00
|
|
|
MPM.add(createLoopDistributePass());
|
2015-05-14 20:05:18 +08:00
|
|
|
|
2019-05-01 05:29:20 +08:00
|
|
|
MPM.add(createLoopVectorizePass(!LoopsInterleaved, !LoopVectorize));
|
LLE 6/6: Add LoopLoadElimination pass
Summary:
The goal of this pass is to perform store-to-load forwarding across the
backedge of a loop. E.g.:
for (i)
A[i + 1] = A[i] + B[i]
=>
T = A[0]
for (i)
T = T + B[i]
A[i + 1] = T
The pass relies on loop dependence analysis via LoopAccessAnalisys to
find opportunities of loop-carried dependences with a distance of one
between a store and a load. Since it's using LoopAccessAnalysis, it was
easy to also add support for versioning away may-aliasing intervening
stores that would otherwise prevent this transformation.
This optimization is also performed by Load-PRE in GVN without the
option of multi-versioning. As was discussed with Daniel Berlin in
http://reviews.llvm.org/D9548, this is inferior to a more loop-aware
solution applied here. Hopefully, we will be able to remove some
complexity from GVN/MemorySSA as a consequence.
In the long run, we may want to extend this pass (or create a new one if
there is little overlap) to also eliminate loop-indepedent redundant
loads and store that *require* versioning due to may-aliasing
intervening stores/loads. I have some motivating cases for store
elimination. My plan right now is to wait for MemorySSA to come online
first rather than using memdep for this.
The main motiviation for this pass is the 456.hmmer loop in SPECint2006
where after distributing the original loop and vectorizing the top part,
we are left with the critical path exposed in the bottom loop. Being
able to promote the memory dependence into a register depedence (even
though the HW does perform store-to-load fowarding as well) results in a
major gain (~20%). This gain also transfers over to x86: it's
around 8-10%.
Right now the pass is off by default and can be enabled
with -enable-loop-load-elim. On the LNT testsuite, there are two
performance changes (negative number -> improvement):
1. -28% in Polybench/linear-algebra/solvers/dynprog: the length of the
critical paths is reduced
2. +2% in Polybench/stencils/adi: Unfortunately, I couldn't reproduce this
outside of LNT
The pass is scheduled after the loop vectorizer (which is after loop
distribution). The rational is to try to reuse LAA state, rather than
recomputing it. The order between LV and LLE is not critical because
normally LV does not touch scalar st->ld forwarding cases where
vectorizing would inhibit the CPU's st->ld forwarding to kick in.
LoopLoadElimination requires LAA to provide the full set of dependences
(including forward dependences). LAA is known to omit loop-independent
dependences in certain situations. The big comment before
removeDependencesFromMultipleStores explains why this should not occur
for the cases that we're interested in.
Reviewers: dberlin, hfinkel
Subscribers: junbuml, dberlin, mssimpso, rengolin, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13259
llvm-svn: 252017
2015-11-04 07:50:08 +08:00
|
|
|
|
|
|
|
// Eliminate loads by forwarding stores from the previous iteration to loads
|
|
|
|
// of the current iteration.
|
2017-10-03 07:39:20 +08:00
|
|
|
MPM.add(createLoopLoadEliminationPass());
|
LLE 6/6: Add LoopLoadElimination pass
Summary:
The goal of this pass is to perform store-to-load forwarding across the
backedge of a loop. E.g.:
for (i)
A[i + 1] = A[i] + B[i]
=>
T = A[0]
for (i)
T = T + B[i]
A[i + 1] = T
The pass relies on loop dependence analysis via LoopAccessAnalisys to
find opportunities of loop-carried dependences with a distance of one
between a store and a load. Since it's using LoopAccessAnalysis, it was
easy to also add support for versioning away may-aliasing intervening
stores that would otherwise prevent this transformation.
This optimization is also performed by Load-PRE in GVN without the
option of multi-versioning. As was discussed with Daniel Berlin in
http://reviews.llvm.org/D9548, this is inferior to a more loop-aware
solution applied here. Hopefully, we will be able to remove some
complexity from GVN/MemorySSA as a consequence.
In the long run, we may want to extend this pass (or create a new one if
there is little overlap) to also eliminate loop-indepedent redundant
loads and store that *require* versioning due to may-aliasing
intervening stores/loads. I have some motivating cases for store
elimination. My plan right now is to wait for MemorySSA to come online
first rather than using memdep for this.
The main motiviation for this pass is the 456.hmmer loop in SPECint2006
where after distributing the original loop and vectorizing the top part,
we are left with the critical path exposed in the bottom loop. Being
able to promote the memory dependence into a register depedence (even
though the HW does perform store-to-load fowarding as well) results in a
major gain (~20%). This gain also transfers over to x86: it's
around 8-10%.
Right now the pass is off by default and can be enabled
with -enable-loop-load-elim. On the LNT testsuite, there are two
performance changes (negative number -> improvement):
1. -28% in Polybench/linear-algebra/solvers/dynprog: the length of the
critical paths is reduced
2. +2% in Polybench/stencils/adi: Unfortunately, I couldn't reproduce this
outside of LNT
The pass is scheduled after the loop vectorizer (which is after loop
distribution). The rational is to try to reuse LAA state, rather than
recomputing it. The order between LV and LLE is not critical because
normally LV does not touch scalar st->ld forwarding cases where
vectorizing would inhibit the CPU's st->ld forwarding to kick in.
LoopLoadElimination requires LAA to provide the full set of dependences
(including forward dependences). LAA is known to omit loop-independent
dependences in certain situations. The big comment before
removeDependencesFromMultipleStores explains why this should not occur
for the cases that we're interested in.
Reviewers: dberlin, hfinkel
Subscribers: junbuml, dberlin, mssimpso, rengolin, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13259
llvm-svn: 252017
2015-11-04 07:50:08 +08:00
|
|
|
|
2013-12-06 05:20:02 +08:00
|
|
|
// FIXME: Because of #pragma vectorize enable, the passes below are always
|
|
|
|
// inserted in the pipeline, even when the vectorizer doesn't run (ex. when
|
|
|
|
// on -O1 and no #pragma is found). Would be good to have these two passes
|
|
|
|
// as function calls, so that we can only pass them when the vectorizer
|
|
|
|
// changed the code.
|
2020-03-21 17:14:17 +08:00
|
|
|
MPM.add(createInstructionCombiningPass());
|
2014-10-14 08:31:29 +08:00
|
|
|
if (OptLevel > 1 && ExtraVectorizerPasses) {
|
|
|
|
// At higher optimization levels, try to clean up any runtime overlap and
|
|
|
|
// alignment checks inserted by the vectorizer. We want to track correllated
|
|
|
|
// runtime checks for two inner loops in the same outer loop, fold any
|
|
|
|
// common computations, hoist loop-invariant aspects out of any outer loop,
|
|
|
|
// and unswitch the runtime checks if possible. Once hoisted, we may have
|
|
|
|
// dead (or speculatable) control flows or more combining opportunities.
|
2020-06-14 21:50:10 +08:00
|
|
|
MPM.add(createEarlyCSEPass());
|
2014-10-14 08:31:29 +08:00
|
|
|
MPM.add(createCorrelatedValuePropagationPass());
|
2020-03-21 17:14:17 +08:00
|
|
|
MPM.add(createInstructionCombiningPass());
|
2019-04-20 01:46:50 +08:00
|
|
|
MPM.add(createLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap));
|
2017-03-18 01:13:41 +08:00
|
|
|
MPM.add(createLoopUnswitchPass(SizeLevel || OptLevel < 3, DivergentTarget));
|
2014-10-14 08:31:29 +08:00
|
|
|
MPM.add(createCFGSimplificationPass());
|
2020-03-21 17:14:17 +08:00
|
|
|
MPM.add(createInstructionCombiningPass());
|
2014-10-14 08:31:29 +08:00
|
|
|
}
|
2014-08-06 20:56:19 +08:00
|
|
|
|
2017-12-15 06:05:20 +08:00
|
|
|
// Cleanup after loop vectorization, etc. Simplification passes like CVP and
|
|
|
|
// GVN, loop transforms, and others have already run, so it's now better to
|
|
|
|
// convert to more optimized IR using more aggressive simplify CFG options.
|
|
|
|
// The extra sinking transform can create larger basic blocks, so do this
|
|
|
|
// before SLP vectorization.
|
Reland [SimplifyCFG][LoopRotate] SimplifyCFG: disable common instruction hoisting by default, enable late in pipeline
This was reverted in 503deec2183d466dad64b763bab4e15fd8804239
because it caused gigantic increase (3x) in branch mispredictions
in certain benchmarks on certain CPU's,
see https://reviews.llvm.org/D84108#2227365.
It has since been investigated and here are the results:
https://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20200907/827578.html
> It's an amazingly severe regression, but it's also all due to branch
> mispredicts (about 3x without this). The code layout looks ok so there's
> probably something else to deal with. I'm not sure there's anything we can
> reasonably do so we'll just have to take the hit for now and wait for
> another code reorganization to make the branch predictor a bit more happy :)
>
> Thanks for giving us some time to investigate and feel free to recommit
> whenever you'd like.
>
> -eric
So let's just reland this.
Original commit message:
I've been looking at missed vectorizations in one codebase.
One particular thing that stands out is that some of the loops
reach vectorizer in a rather mangled form, with weird PHI's,
and some of the loops aren't even in a rotated form.
After taking a more detailed look, that happened because
the loop's headers were too big by then. It is evident that
SimplifyCFG's common code hoisting transform is at fault there,
because the pattern it handles is precisely the unrotated
loop basic block structure.
Surprizingly, `SimplifyCFGOpt::HoistThenElseCodeToIf()` is enabled
by default, and is always run, unlike it's friend, common code sinking
transform, `SinkCommonCodeFromPredecessors()`, which is not enabled
by default and is only run once very late in the pipeline.
I'm proposing to harmonize this, and disable common code hoisting
until //late// in pipeline. Definition of //late// may vary,
here currently i've picked the same one as for code sinking,
but i suppose we could enable it as soon as right after
loop rotation happens.
Experimentation shows that this does indeed unsurprizingly help,
more loops got rotated, although other issues remain elsewhere.
Now, this undoubtedly seriously shakes phase ordering.
This will undoubtedly be a mixed bag in terms of both compile- and
run- time performance, codesize. Since we no longer aggressively
hoist+deduplicate common code, we don't pay the price of said hoisting
(which wasn't big). That may allow more loops to be rotated,
so we pay that price. That, in turn, that may enable all the transforms
that require canonical (rotated) loop form, including but not limited to
vectorization, so we pay that too. And in general, no deduplication means
more [duplicate] instructions going through the optimizations. But there's still
late hoisting, some of them will be caught late.
As per benchmarks i've run {F12360204}, this is mostly within the noise,
there are some small improvements, some small regressions.
One big regression i saw i fixed in rG8d487668d09fb0e4e54f36207f07c1480ffabbfd, but i'm sure
this will expose many more pre-existing missed optimizations, as usual :S
llvm-compile-time-tracker.com thoughts on this:
http://llvm-compile-time-tracker.com/compare.php?from=e40315d2b4ed1e38962a8f33ff151693ed4ada63&to=c8289c0ecbf235da9fb0e3bc052e3c0d6bff5cf9&stat=instructions
* this does regress compile-time by +0.5% geomean (unsurprizingly)
* size impact varies; for ThinLTO it's actually an improvement
The largest fallout appears to be in GVN's load partial redundancy
elimination, it spends *much* more time in
`MemoryDependenceResults::getNonLocalPointerDependency()`.
Non-local `MemoryDependenceResults` is widely-known to be, uh, costly.
There does not appear to be a proper solution to this issue,
other than silencing the compile-time performance regression
by tuning cut-off thresholds in `MemoryDependenceResults`,
at the cost of potentially regressing run-time performance.
D84609 attempts to move in that direction, but the path is unclear
and is going to take some time.
If we look at stats before/after diffs, some excerpts:
* RawSpeed (the target) {F12360200}
* -14 (-73.68%) loops not rotated due to the header size (yay)
* -272 (-0.67%) `"Number of live out of a loop variables"` - good for vectorizer
* -3937 (-64.19%) common instructions hoisted
* +561 (+0.06%) x86 asm instructions
* -2 basic blocks
* +2418 (+0.11%) IR instructions
* vanilla test-suite + RawSpeed + darktable {F12360201}
* -36396 (-65.29%) common instructions hoisted
* +1676 (+0.02%) x86 asm instructions
* +662 (+0.06%) basic blocks
* +4395 (+0.04%) IR instructions
It is likely to be sub-optimal for when optimizing for code size,
so one might want to change tune pipeline by enabling sinking/hoisting
when optimizing for size.
Reviewed By: mkazantsev
Differential Revision: https://reviews.llvm.org/D84108
This reverts commit 503deec2183d466dad64b763bab4e15fd8804239.
2020-09-08 04:54:06 +08:00
|
|
|
// FIXME: study whether hoisting and/or sinking of common instructions should
|
|
|
|
// be delayed until after SLP vectorizer.
|
2020-07-16 17:52:55 +08:00
|
|
|
MPM.add(createCFGSimplificationPass(SimplifyCFGOptions()
|
|
|
|
.forwardSwitchCondToPhi(true)
|
|
|
|
.convertSwitchToLookupTable(true)
|
|
|
|
.needCanonicalLoops(false)
|
Reland [SimplifyCFG][LoopRotate] SimplifyCFG: disable common instruction hoisting by default, enable late in pipeline
This was reverted in 503deec2183d466dad64b763bab4e15fd8804239
because it caused gigantic increase (3x) in branch mispredictions
in certain benchmarks on certain CPU's,
see https://reviews.llvm.org/D84108#2227365.
It has since been investigated and here are the results:
https://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20200907/827578.html
> It's an amazingly severe regression, but it's also all due to branch
> mispredicts (about 3x without this). The code layout looks ok so there's
> probably something else to deal with. I'm not sure there's anything we can
> reasonably do so we'll just have to take the hit for now and wait for
> another code reorganization to make the branch predictor a bit more happy :)
>
> Thanks for giving us some time to investigate and feel free to recommit
> whenever you'd like.
>
> -eric
So let's just reland this.
Original commit message:
I've been looking at missed vectorizations in one codebase.
One particular thing that stands out is that some of the loops
reach vectorizer in a rather mangled form, with weird PHI's,
and some of the loops aren't even in a rotated form.
After taking a more detailed look, that happened because
the loop's headers were too big by then. It is evident that
SimplifyCFG's common code hoisting transform is at fault there,
because the pattern it handles is precisely the unrotated
loop basic block structure.
Surprizingly, `SimplifyCFGOpt::HoistThenElseCodeToIf()` is enabled
by default, and is always run, unlike it's friend, common code sinking
transform, `SinkCommonCodeFromPredecessors()`, which is not enabled
by default and is only run once very late in the pipeline.
I'm proposing to harmonize this, and disable common code hoisting
until //late// in pipeline. Definition of //late// may vary,
here currently i've picked the same one as for code sinking,
but i suppose we could enable it as soon as right after
loop rotation happens.
Experimentation shows that this does indeed unsurprizingly help,
more loops got rotated, although other issues remain elsewhere.
Now, this undoubtedly seriously shakes phase ordering.
This will undoubtedly be a mixed bag in terms of both compile- and
run- time performance, codesize. Since we no longer aggressively
hoist+deduplicate common code, we don't pay the price of said hoisting
(which wasn't big). That may allow more loops to be rotated,
so we pay that price. That, in turn, that may enable all the transforms
that require canonical (rotated) loop form, including but not limited to
vectorization, so we pay that too. And in general, no deduplication means
more [duplicate] instructions going through the optimizations. But there's still
late hoisting, some of them will be caught late.
As per benchmarks i've run {F12360204}, this is mostly within the noise,
there are some small improvements, some small regressions.
One big regression i saw i fixed in rG8d487668d09fb0e4e54f36207f07c1480ffabbfd, but i'm sure
this will expose many more pre-existing missed optimizations, as usual :S
llvm-compile-time-tracker.com thoughts on this:
http://llvm-compile-time-tracker.com/compare.php?from=e40315d2b4ed1e38962a8f33ff151693ed4ada63&to=c8289c0ecbf235da9fb0e3bc052e3c0d6bff5cf9&stat=instructions
* this does regress compile-time by +0.5% geomean (unsurprizingly)
* size impact varies; for ThinLTO it's actually an improvement
The largest fallout appears to be in GVN's load partial redundancy
elimination, it spends *much* more time in
`MemoryDependenceResults::getNonLocalPointerDependency()`.
Non-local `MemoryDependenceResults` is widely-known to be, uh, costly.
There does not appear to be a proper solution to this issue,
other than silencing the compile-time performance regression
by tuning cut-off thresholds in `MemoryDependenceResults`,
at the cost of potentially regressing run-time performance.
D84609 attempts to move in that direction, but the path is unclear
and is going to take some time.
If we look at stats before/after diffs, some excerpts:
* RawSpeed (the target) {F12360200}
* -14 (-73.68%) loops not rotated due to the header size (yay)
* -272 (-0.67%) `"Number of live out of a loop variables"` - good for vectorizer
* -3937 (-64.19%) common instructions hoisted
* +561 (+0.06%) x86 asm instructions
* -2 basic blocks
* +2418 (+0.11%) IR instructions
* vanilla test-suite + RawSpeed + darktable {F12360201}
* -36396 (-65.29%) common instructions hoisted
* +1676 (+0.02%) x86 asm instructions
* +662 (+0.06%) basic blocks
* +4395 (+0.04%) IR instructions
It is likely to be sub-optimal for when optimizing for code size,
so one might want to change tune pipeline by enabling sinking/hoisting
when optimizing for size.
Reviewed By: mkazantsev
Differential Revision: https://reviews.llvm.org/D84108
This reverts commit 503deec2183d466dad64b763bab4e15fd8804239.
2020-09-08 04:54:06 +08:00
|
|
|
.hoistCommonInsts(true)
|
2020-07-16 17:52:55 +08:00
|
|
|
.sinkCommonInsts(true)));
|
2017-12-15 06:05:20 +08:00
|
|
|
|
2019-04-17 10:26:27 +08:00
|
|
|
if (SLPVectorize) {
|
2017-06-30 15:09:08 +08:00
|
|
|
MPM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains.
|
|
|
|
if (OptLevel > 1 && ExtraVectorizerPasses) {
|
|
|
|
MPM.add(createEarlyCSEPass());
|
2014-08-06 20:56:19 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-23 00:13:18 +08:00
|
|
|
// Enhance/cleanup vector code.
|
|
|
|
MPM.add(createVectorCombinePass());
|
|
|
|
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, MPM);
|
2020-03-21 17:14:17 +08:00
|
|
|
MPM.add(createInstructionCombiningPass());
|
2013-06-24 15:21:47 +08:00
|
|
|
|
2018-12-19 01:16:05 +08:00
|
|
|
if (EnableUnrollAndJam && !DisableUnrollLoops) {
|
|
|
|
// Unroll and Jam. We do this before unroll but need to be in a separate
|
|
|
|
// loop pass manager in order for the outer loop to be processed by
|
|
|
|
// unroll and jam before the inner loop is unrolled.
|
|
|
|
MPM.add(createLoopUnrollAndJamPass(OptLevel));
|
|
|
|
}
|
2018-07-01 20:47:30 +08:00
|
|
|
|
2019-04-13 03:16:07 +08:00
|
|
|
// Unroll small loops
|
|
|
|
MPM.add(createLoopUnrollPass(OptLevel, DisableUnrollLoops,
|
|
|
|
ForgetAllSCEVInLoopUnroll));
|
2014-04-01 07:23:51 +08:00
|
|
|
|
2018-12-19 01:16:05 +08:00
|
|
|
if (!DisableUnrollLoops) {
|
2015-05-15 06:02:54 +08:00
|
|
|
// LoopUnroll may generate some redundency to cleanup.
|
2020-03-21 17:14:17 +08:00
|
|
|
MPM.add(createInstructionCombiningPass());
|
2015-05-15 06:02:54 +08:00
|
|
|
|
2015-03-12 13:36:01 +08:00
|
|
|
// Runtime unrolling will introduce runtime check in loop prologue. If the
|
|
|
|
// unrolled loop is a inner loop, then the prologue will be inside the
|
|
|
|
// outer loop. LICM pass can help to promote the runtime check out if the
|
|
|
|
// checked value is loop invariant.
|
2019-04-20 01:46:50 +08:00
|
|
|
MPM.add(createLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap));
|
2018-12-19 01:16:05 +08:00
|
|
|
}
|
2015-03-12 13:36:01 +08:00
|
|
|
|
[Unroll/UnrollAndJam/Vectorizer/Distribute] Add followup loop attributes.
When multiple loop transformation are defined in a loop's metadata, their order of execution is defined by the order of their respective passes in the pass pipeline. For instance, e.g.
#pragma clang loop unroll_and_jam(enable)
#pragma clang loop distribute(enable)
is the same as
#pragma clang loop distribute(enable)
#pragma clang loop unroll_and_jam(enable)
and will try to loop-distribute before Unroll-And-Jam because the LoopDistribute pass is scheduled after UnrollAndJam pass. UnrollAndJamPass only supports one inner loop, i.e. it will necessarily fail after loop distribution. It is not possible to specify another execution order. Also,t the order of passes in the pipeline is subject to change between versions of LLVM, optimization options and which pass manager is used.
This patch adds 'followup' attributes to various loop transformation passes. These attributes define which attributes the resulting loop of a transformation should have. For instance,
!0 = !{!0, !1, !2}
!1 = !{!"llvm.loop.unroll_and_jam.enable"}
!2 = !{!"llvm.loop.unroll_and_jam.followup_inner", !3}
!3 = !{!"llvm.loop.distribute.enable"}
defines a loop ID (!0) to be unrolled-and-jammed (!1) and then the attribute !3 to be added to the jammed inner loop, which contains the instruction to distribute the inner loop.
Currently, in both pass managers, pass execution is in a fixed order and UnrollAndJamPass will not execute again after LoopDistribute. We hope to fix this in the future by allowing pass managers to run passes until a fixpoint is reached, use Polly to perform these transformations, or add a loop transformation pass which takes the order issue into account.
For mandatory/forced transformations (e.g. by having been declared by #pragma omp simd), the user must be notified when a transformation could not be performed. It is not possible that the responsible pass emits such a warning because the transformation might be 'hidden' in a followup attribute when it is executed, or it is not present in the pipeline at all. For this reason, this patche introduces a WarnMissedTransformations pass, to warn about orphaned transformations.
Since this changes the user-visible diagnostic message when a transformation is applied, two test cases in the clang repository need to be updated.
To ensure that no other transformation is executed before the intended one, the attribute `llvm.loop.disable_nonforced` can be added which should disable transformation heuristics before the intended transformation is applied. E.g. it would be surprising if a loop is distributed before a #pragma unroll_and_jam is applied.
With more supported code transformations (loop fusion, interchange, stripmining, offloading, etc.), transformations can be used as building blocks for more complex transformations (e.g. stripmining+stripmining+interchange -> tiling).
Reviewed By: hfinkel, dmgreen
Differential Revision: https://reviews.llvm.org/D49281
Differential Revision: https://reviews.llvm.org/D55288
llvm-svn: 348944
2018-12-13 01:32:52 +08:00
|
|
|
MPM.add(createWarnMissedTransformationsPass());
|
|
|
|
|
2014-09-08 04:05:11 +08:00
|
|
|
// After vectorization and unrolling, assume intrinsics may tell us more
|
|
|
|
// about pointer alignments.
|
|
|
|
MPM.add(createAlignmentFromAssumptionsPass());
|
|
|
|
|
2017-10-06 12:39:40 +08:00
|
|
|
// FIXME: We shouldn't bother with this anymore.
|
|
|
|
MPM.add(createStripDeadPrototypesPass()); // Get rid of dead prototypes
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2017-10-06 12:39:40 +08:00
|
|
|
// GlobalOpt already deletes dead functions and globals, at -O2 try a
|
|
|
|
// late pass of GlobalDCE. It is capable of deleting dead cycles.
|
|
|
|
if (OptLevel > 1) {
|
|
|
|
MPM.add(createGlobalDCEPass()); // Remove dead fns and globals.
|
|
|
|
MPM.add(createConstantMergePass()); // Merge dup global constants
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
2014-09-14 05:46:00 +08:00
|
|
|
|
2019-02-16 02:46:44 +08:00
|
|
|
// See comment in the new PM for justification of scheduling splitting at
|
|
|
|
// this stage (\ref buildModuleSimplificationPipeline).
|
2020-10-19 18:23:22 +08:00
|
|
|
if (EnableHotColdSplit && !(PrepareForLTO || PrepareForThinLTO))
|
2019-02-16 02:46:44 +08:00
|
|
|
MPM.add(createHotColdSplittingPass());
|
|
|
|
|
2020-09-16 07:05:38 +08:00
|
|
|
if (EnableIROutliner)
|
|
|
|
MPM.add(createIROutlinerPass());
|
|
|
|
|
2014-09-14 05:46:00 +08:00
|
|
|
if (MergeFunctions)
|
|
|
|
MPM.add(createMergeFunctionsPass());
|
|
|
|
|
[LPM] Port CGProfilePass from NPM to LPM
Reviewers: hans, chandlerc!, asbirlea, nikic
Reviewed By: hans, nikic
Subscribers: steven_wu, dexonsmith, nikic, echristo, void, zhizhouy, cfe-commits, aeubanks, MaskRay, jvesely, nhaehnle, hiraditya, kerbowa, llvm-commits
Tags: #llvm, #clang
Differential Revision: https://reviews.llvm.org/D83013
2020-07-09 03:30:28 +08:00
|
|
|
// Add Module flag "CG Profile" based on Branch Frequency Information.
|
|
|
|
if (CallGraphProfile)
|
|
|
|
MPM.add(createCGProfileLegacyPass());
|
|
|
|
|
2016-11-11 01:42:18 +08:00
|
|
|
// LoopSink pass sinks instructions hoisted by LICM, which serves as a
|
|
|
|
// canonicalization pass that enables other optimizations. As a result,
|
|
|
|
// LoopSink pass needs to be a very late IR pass to avoid undoing LICM
|
|
|
|
// result too early.
|
2016-11-09 08:58:19 +08:00
|
|
|
MPM.add(createLoopSinkPass());
|
|
|
|
// Get rid of LCSSA nodes.
|
2018-06-30 07:36:03 +08:00
|
|
|
MPM.add(createInstSimplifyLegacyPass());
|
2017-04-26 20:02:41 +08:00
|
|
|
|
2017-09-09 21:38:18 +08:00
|
|
|
// This hoists/decomposes div/rem ops. It should run after other sink/hoist
|
|
|
|
// passes to avoid re-sinking, but before SimplifyCFG because it can allow
|
|
|
|
// flattening of blocks.
|
|
|
|
MPM.add(createDivRemPairsPass());
|
|
|
|
|
2017-04-26 20:02:41 +08:00
|
|
|
// LoopSink (and other loop passes since the last simplifyCFG) might have
|
|
|
|
// resulted in single-entry-single-exit or empty blocks. Clean up the CFG.
|
|
|
|
MPM.add(createCFGSimplificationPass());
|
|
|
|
|
2012-03-24 07:22:59 +08:00
|
|
|
addExtensionsToPM(EP_OptimizerLast, MPM);
|
2018-06-23 04:23:21 +08:00
|
|
|
|
[ThinLTO] Handle chains of aliases
At -O0, globalopt is not run during the compile step, and we can have a
chain of an alias having an immediate aliasee of another alias. The
summaries are constructed assuming aliases in a canonical form
(flattened chains), and as a result only the base object but no
intermediate aliases were preserved.
Fix by adding a pass that canonicalize aliases, which ensures each
alias is a direct alias of the base object.
Reviewers: pcc, davidxl
Subscribers: mehdi_amini, inglorion, eraman, steven_wu, dexonsmith, arphaman, llvm-commits
Differential Revision: https://reviews.llvm.org/D54507
llvm-svn: 350423
2019-01-05 03:04:54 +08:00
|
|
|
if (PrepareForLTO) {
|
|
|
|
MPM.add(createCanonicalizeAliasesPass());
|
|
|
|
// Rename anon globals to be able to handle them in the summary
|
2018-06-23 04:23:21 +08:00
|
|
|
MPM.add(createNameAnonGlobalPass());
|
[ThinLTO] Handle chains of aliases
At -O0, globalopt is not run during the compile step, and we can have a
chain of an alias having an immediate aliasee of another alias. The
summaries are constructed assuming aliases in a canonical form
(flattened chains), and as a result only the base object but no
intermediate aliases were preserved.
Fix by adding a pass that canonicalize aliases, which ensures each
alias is a direct alias of the base object.
Reviewers: pcc, davidxl
Subscribers: mehdi_amini, inglorion, eraman, steven_wu, dexonsmith, arphaman, llvm-commits
Differential Revision: https://reviews.llvm.org/D54507
llvm-svn: 350423
2019-01-05 03:04:54 +08:00
|
|
|
}
|
2020-11-13 17:46:55 +08:00
|
|
|
|
|
|
|
MPM.add(createAnnotationRemarksLegacyPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
|
|
|
|
2015-02-13 18:01:29 +08:00
|
|
|
void PassManagerBuilder::addLTOOptimizationPasses(legacy::PassManagerBase &PM) {
|
2018-11-16 02:06:42 +08:00
|
|
|
// Load sample profile before running the LTO optimization pipeline.
|
|
|
|
if (!PGOSampleUse.empty()) {
|
|
|
|
PM.add(createPruneEHPass());
|
|
|
|
PM.add(createSampleProfileLoaderPass(PGOSampleUse));
|
|
|
|
}
|
|
|
|
|
2016-05-26 05:26:14 +08:00
|
|
|
// Remove unused virtual tables to improve the quality of code generated by
|
|
|
|
// whole-program devirtualization and bitset lowering.
|
|
|
|
PM.add(createGlobalDCEPass());
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Provide AliasAnalysis services for optimizations.
|
|
|
|
addInitialAliasAnalysisPasses(PM);
|
|
|
|
|
2015-12-27 16:13:45 +08:00
|
|
|
// Allow forcing function attributes as a debugging and tuning aid.
|
|
|
|
PM.add(createForceFunctionAttrsLegacyPass());
|
|
|
|
|
2015-12-27 16:41:34 +08:00
|
|
|
// Infer attributes about declarations if possible.
|
|
|
|
PM.add(createInferFunctionAttrsLegacyPass());
|
|
|
|
|
2016-05-26 05:26:14 +08:00
|
|
|
if (OptLevel > 1) {
|
Recommit r317351 : Add CallSiteSplitting pass
This recommit r317351 after fixing a buildbot failure.
Original commit message:
Summary:
This change add a pass which tries to split a call-site to pass
more constrained arguments if its argument is predicated in the control flow
so that we can expose better context to the later passes (e.g, inliner, jump
threading, or IPA-CP based function cloning, etc.).
As of now we support two cases :
1) If a call site is dominated by an OR condition and if any of its arguments
are predicated on this OR condition, try to split the condition with more
constrained arguments. For example, in the code below, we try to split the
call site since we can predicate the argument (ptr) based on the OR condition.
Split from :
if (!ptr || c)
callee(ptr);
to :
if (!ptr)
callee(null ptr) // set the known constant value
else if (c)
callee(nonnull ptr) // set non-null attribute in the argument
2) We can also split a call-site based on constant incoming values of a PHI
For example,
from :
BB0:
%c = icmp eq i32 %i1, %i2
br i1 %c, label %BB2, label %BB1
BB1:
br label %BB2
BB2:
%p = phi i32 [ 0, %BB0 ], [ 1, %BB1 ]
call void @bar(i32 %p)
to
BB0:
%c = icmp eq i32 %i1, %i2
br i1 %c, label %BB2-split0, label %BB1
BB1:
br label %BB2-split1
BB2-split0:
call void @bar(i32 0)
br label %BB2
BB2-split1:
call void @bar(i32 1)
br label %BB2
BB2:
%p = phi i32 [ 0, %BB2-split0 ], [ 1, %BB2-split1 ]
llvm-svn: 317362
2017-11-04 04:41:16 +08:00
|
|
|
// Split call-site with more constrained arguments.
|
|
|
|
PM.add(createCallSiteSplittingPass());
|
|
|
|
|
2016-05-26 05:26:14 +08:00
|
|
|
// Indirect call promotion. This should promote all the targets that are
|
|
|
|
// left by the earlier promotion pass that promotes intra-module targets.
|
|
|
|
// This two-step promotion is to save the compile time. For LTO, it should
|
|
|
|
// produce the same result as if we only do promotion here.
|
2017-02-24 06:15:18 +08:00
|
|
|
PM.add(
|
|
|
|
createPGOIndirectCallPromotionLegacyPass(true, !PGOSampleUse.empty()));
|
2016-05-26 05:26:14 +08:00
|
|
|
|
|
|
|
// Propagate constants at call sites into the functions they call. This
|
|
|
|
// opens opportunities for globalopt (and inlining) by substituting function
|
|
|
|
// pointers passed as arguments to direct uses of functions.
|
|
|
|
PM.add(createIPSCCPPass());
|
2017-10-25 21:40:08 +08:00
|
|
|
|
|
|
|
// Attach metadata to indirect call sites indicating the set of functions
|
|
|
|
// they may target at run-time. This should follow IPSCCP.
|
|
|
|
PM.add(createCalledValuePropagationPass());
|
[Attributor] Pass infrastructure and fixpoint framework
NOTE: Note that no attributes are derived yet. This patch will not go in
alone but only with others that derive attributes. The framework is
split for review purposes.
This commit introduces the Attributor pass infrastructure and fixpoint
iteration framework. Further patches will introduce abstract attributes
into this framework.
In a nutshell, the Attributor will update instances of abstract
arguments until a fixpoint, or a "timeout", is reached. Communication
between the Attributor and the abstract attributes that are derived is
restricted to the AbstractState and AbstractAttribute interfaces.
Please see the file comment in Attributor.h for detailed information
including design decisions and typical use case. Also consider the class
documentation for Attributor, AbstractState, and AbstractAttribute.
Reviewers: chandlerc, homerdin, hfinkel, fedor.sergeev, sanjoy, spatel, nlopes, nicholas, reames
Subscribers: mehdi_amini, mgorny, hiraditya, bollu, steven_wu, dexonsmith, dang, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59918
llvm-svn: 362578
2019-06-05 11:02:24 +08:00
|
|
|
|
|
|
|
// Infer attributes on declarations, call sites, arguments, etc.
|
2020-04-18 10:43:54 +08:00
|
|
|
if (AttributorRun & AttributorRunOption::MODULE)
|
2020-04-06 00:45:19 +08:00
|
|
|
PM.add(createAttributorLegacyPass());
|
2016-05-26 05:26:14 +08:00
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2016-05-26 05:26:14 +08:00
|
|
|
// Infer attributes about definitions. The readnone attribute in particular is
|
|
|
|
// required for virtual constant propagation.
|
2016-02-18 19:03:11 +08:00
|
|
|
PM.add(createPostOrderFunctionAttrsLegacyPass());
|
2016-01-08 18:55:52 +08:00
|
|
|
PM.add(createReversePostOrderFunctionAttrsPass());
|
2016-05-26 05:26:14 +08:00
|
|
|
|
2016-11-17 07:40:26 +08:00
|
|
|
// Split globals using inrange annotations on GEP indices. This can help
|
|
|
|
// improve the quality of generated code when virtual constant propagation or
|
|
|
|
// control flow integrity are enabled.
|
|
|
|
PM.add(createGlobalSplitPass());
|
|
|
|
|
2016-05-26 05:26:14 +08:00
|
|
|
// Apply whole-program devirtualization and virtual constant propagation.
|
2017-03-23 02:22:59 +08:00
|
|
|
PM.add(createWholeProgramDevirtPass(ExportSummary, nullptr));
|
2016-05-26 05:26:14 +08:00
|
|
|
|
|
|
|
// That's all we need at opt level 1.
|
|
|
|
if (OptLevel == 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Now that we internalized some globals, see if we can hack on them!
|
2011-08-03 05:50:27 +08:00
|
|
|
PM.add(createGlobalOptimizerPass());
|
2015-12-15 17:24:01 +08:00
|
|
|
// Promote any localized global vars.
|
|
|
|
PM.add(createPromoteMemoryToRegisterPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
// Linking modules together can lead to duplicated global constants, only
|
|
|
|
// keep one copy of each constant.
|
|
|
|
PM.add(createConstantMergePass());
|
|
|
|
|
|
|
|
// Remove unused arguments from functions.
|
|
|
|
PM.add(createDeadArgEliminationPass());
|
|
|
|
|
|
|
|
// Reduce the code after globalopt and ipsccp. Both can open up significant
|
|
|
|
// simplification opportunities, and both can propagate functions through
|
|
|
|
// function pointers. When this happens, we often have to resolve varargs
|
|
|
|
// calls, etc, so let instcombine do this.
|
2018-01-25 20:06:32 +08:00
|
|
|
if (OptLevel > 2)
|
|
|
|
PM.add(createAggressiveInstCombinerPass());
|
2020-03-21 17:14:17 +08:00
|
|
|
PM.add(createInstructionCombiningPass());
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, PM);
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
// Inline small functions
|
2014-08-21 21:35:30 +08:00
|
|
|
bool RunInliner = Inliner;
|
|
|
|
if (RunInliner) {
|
|
|
|
PM.add(Inliner);
|
|
|
|
Inliner = nullptr;
|
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
|
|
|
|
PM.add(createPruneEHPass()); // Remove dead EH info.
|
|
|
|
|
2019-03-05 04:21:27 +08:00
|
|
|
// CSFDO instrumentation and use pass.
|
|
|
|
addPGOInstrPasses(PM, /* IsCS */ true);
|
|
|
|
|
2019-11-27 14:30:12 +08:00
|
|
|
// Infer attributes on declarations, call sites, arguments, etc. for an SCC.
|
2020-04-18 10:43:54 +08:00
|
|
|
if (AttributorRun & AttributorRunOption::CGSCC)
|
2020-04-06 00:45:19 +08:00
|
|
|
PM.add(createAttributorCGSCCLegacyPass());
|
2019-11-27 14:30:12 +08:00
|
|
|
|
2019-11-07 13:20:06 +08:00
|
|
|
// Try to perform OpenMP specific optimizations. This is a (quick!) no-op if
|
|
|
|
// there are no OpenMP runtime calls present in the module.
|
|
|
|
if (OptLevel > 1)
|
|
|
|
PM.add(createOpenMPOptLegacyPass());
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Optimize globals again if we ran the inliner.
|
|
|
|
if (RunInliner)
|
|
|
|
PM.add(createGlobalOptimizerPass());
|
|
|
|
PM.add(createGlobalDCEPass()); // Remove dead functions.
|
|
|
|
|
|
|
|
// If we didn't decide to inline a function, check to see if we can
|
|
|
|
// transform it to pass arguments by value instead of by reference.
|
|
|
|
PM.add(createArgumentPromotionPass());
|
|
|
|
|
|
|
|
// The IPO passes may leave cruft around. Clean up after them.
|
2020-03-21 17:14:17 +08:00
|
|
|
PM.add(createInstructionCombiningPass());
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, PM);
|
2020-09-10 18:05:24 +08:00
|
|
|
PM.add(createJumpThreadingPass(/*FreezeSelectCond*/ true));
|
2013-08-30 08:48:37 +08:00
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Break up allocas
|
2016-06-15 08:19:09 +08:00
|
|
|
PM.add(createSROAPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2019-03-21 03:08:18 +08:00
|
|
|
// LTO provides additional opportunities for tailcall elimination due to
|
|
|
|
// link-time inlining, and visibility of nocapture attribute.
|
2019-11-27 12:28:52 +08:00
|
|
|
if (OptLevel > 1)
|
|
|
|
PM.add(createTailCallEliminationPass());
|
2019-03-21 03:08:18 +08:00
|
|
|
|
[Attributor] Pass infrastructure and fixpoint framework
NOTE: Note that no attributes are derived yet. This patch will not go in
alone but only with others that derive attributes. The framework is
split for review purposes.
This commit introduces the Attributor pass infrastructure and fixpoint
iteration framework. Further patches will introduce abstract attributes
into this framework.
In a nutshell, the Attributor will update instances of abstract
arguments until a fixpoint, or a "timeout", is reached. Communication
between the Attributor and the abstract attributes that are derived is
restricted to the AbstractState and AbstractAttribute interfaces.
Please see the file comment in Attributor.h for detailed information
including design decisions and typical use case. Also consider the class
documentation for Attributor, AbstractState, and AbstractAttribute.
Reviewers: chandlerc, homerdin, hfinkel, fedor.sergeev, sanjoy, spatel, nlopes, nicholas, reames
Subscribers: mehdi_amini, mgorny, hiraditya, bollu, steven_wu, dexonsmith, dang, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59918
llvm-svn: 362578
2019-06-05 11:02:24 +08:00
|
|
|
// Infer attributes on declarations, call sites, arguments, etc.
|
2016-02-18 19:03:11 +08:00
|
|
|
PM.add(createPostOrderFunctionAttrsLegacyPass()); // Add nocapture.
|
[Attributor] Pass infrastructure and fixpoint framework
NOTE: Note that no attributes are derived yet. This patch will not go in
alone but only with others that derive attributes. The framework is
split for review purposes.
This commit introduces the Attributor pass infrastructure and fixpoint
iteration framework. Further patches will introduce abstract attributes
into this framework.
In a nutshell, the Attributor will update instances of abstract
arguments until a fixpoint, or a "timeout", is reached. Communication
between the Attributor and the abstract attributes that are derived is
restricted to the AbstractState and AbstractAttribute interfaces.
Please see the file comment in Attributor.h for detailed information
including design decisions and typical use case. Also consider the class
documentation for Attributor, AbstractState, and AbstractAttribute.
Reviewers: chandlerc, homerdin, hfinkel, fedor.sergeev, sanjoy, spatel, nlopes, nicholas, reames
Subscribers: mehdi_amini, mgorny, hiraditya, bollu, steven_wu, dexonsmith, dang, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59918
llvm-svn: 362578
2019-06-05 11:02:24 +08:00
|
|
|
// Run a few AA driven optimizations here and now, to cleanup the code.
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
PM.add(createGlobalsAAWrapperPass()); // IP alias analysis.
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2019-04-20 01:46:50 +08:00
|
|
|
PM.add(createLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap));
|
2016-12-27 02:26:19 +08:00
|
|
|
PM.add(NewGVN ? createNewGVNPass()
|
|
|
|
: createGVNPass(DisableGVNLoadPRE)); // Remove redundancies.
|
2012-04-03 06:16:50 +08:00
|
|
|
PM.add(createMemCpyOptPass()); // Remove dead memcpys.
|
2013-08-30 08:48:37 +08:00
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Nuke dead stores.
|
|
|
|
PM.add(createDeadStoreEliminationPass());
|
2020-09-03 19:30:36 +08:00
|
|
|
PM.add(createMergedLoadStoreMotionPass()); // Merge ld/st in diamonds.
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2014-04-16 01:48:15 +08:00
|
|
|
// More loops are countable; try to optimize them.
|
2020-11-10 00:16:54 +08:00
|
|
|
if (EnableLoopFlatten)
|
|
|
|
PM.add(createLoopFlattenPass());
|
2014-04-16 01:48:15 +08:00
|
|
|
PM.add(createIndVarSimplifyPass());
|
|
|
|
PM.add(createLoopDeletionPass());
|
2015-03-06 18:11:25 +08:00
|
|
|
if (EnableLoopInterchange)
|
|
|
|
PM.add(createLoopInterchangePass());
|
|
|
|
|
2020-12-08 02:14:57 +08:00
|
|
|
if (EnableConstraintElimination)
|
|
|
|
PM.add(createConstraintEliminationPass());
|
|
|
|
|
2021-01-26 21:43:39 +08:00
|
|
|
// Unroll small loops and perform peeling.
|
2019-04-13 03:16:07 +08:00
|
|
|
PM.add(createSimpleLoopUnrollPass(OptLevel, DisableUnrollLoops,
|
|
|
|
ForgetAllSCEVInLoopUnroll));
|
2020-10-21 19:01:59 +08:00
|
|
|
PM.add(createLoopDistributePass());
|
2018-12-19 01:46:09 +08:00
|
|
|
PM.add(createLoopVectorizePass(true, !LoopVectorize));
|
2016-01-14 23:00:09 +08:00
|
|
|
// The vectorizer may have significantly shortened a loop body; unroll again.
|
2019-04-13 03:16:07 +08:00
|
|
|
PM.add(createLoopUnrollPass(OptLevel, DisableUnrollLoops,
|
|
|
|
ForgetAllSCEVInLoopUnroll));
|
2014-02-25 02:19:31 +08:00
|
|
|
|
[Unroll/UnrollAndJam/Vectorizer/Distribute] Add followup loop attributes.
When multiple loop transformation are defined in a loop's metadata, their order of execution is defined by the order of their respective passes in the pass pipeline. For instance, e.g.
#pragma clang loop unroll_and_jam(enable)
#pragma clang loop distribute(enable)
is the same as
#pragma clang loop distribute(enable)
#pragma clang loop unroll_and_jam(enable)
and will try to loop-distribute before Unroll-And-Jam because the LoopDistribute pass is scheduled after UnrollAndJam pass. UnrollAndJamPass only supports one inner loop, i.e. it will necessarily fail after loop distribution. It is not possible to specify another execution order. Also,t the order of passes in the pipeline is subject to change between versions of LLVM, optimization options and which pass manager is used.
This patch adds 'followup' attributes to various loop transformation passes. These attributes define which attributes the resulting loop of a transformation should have. For instance,
!0 = !{!0, !1, !2}
!1 = !{!"llvm.loop.unroll_and_jam.enable"}
!2 = !{!"llvm.loop.unroll_and_jam.followup_inner", !3}
!3 = !{!"llvm.loop.distribute.enable"}
defines a loop ID (!0) to be unrolled-and-jammed (!1) and then the attribute !3 to be added to the jammed inner loop, which contains the instruction to distribute the inner loop.
Currently, in both pass managers, pass execution is in a fixed order and UnrollAndJamPass will not execute again after LoopDistribute. We hope to fix this in the future by allowing pass managers to run passes until a fixpoint is reached, use Polly to perform these transformations, or add a loop transformation pass which takes the order issue into account.
For mandatory/forced transformations (e.g. by having been declared by #pragma omp simd), the user must be notified when a transformation could not be performed. It is not possible that the responsible pass emits such a warning because the transformation might be 'hidden' in a followup attribute when it is executed, or it is not present in the pipeline at all. For this reason, this patche introduces a WarnMissedTransformations pass, to warn about orphaned transformations.
Since this changes the user-visible diagnostic message when a transformation is applied, two test cases in the clang repository need to be updated.
To ensure that no other transformation is executed before the intended one, the attribute `llvm.loop.disable_nonforced` can be added which should disable transformation heuristics before the intended transformation is applied. E.g. it would be surprising if a loop is distributed before a #pragma unroll_and_jam is applied.
With more supported code transformations (loop fusion, interchange, stripmining, offloading, etc.), transformations can be used as building blocks for more complex transformations (e.g. stripmining+stripmining+interchange -> tiling).
Reviewed By: hfinkel, dmgreen
Differential Revision: https://reviews.llvm.org/D49281
Differential Revision: https://reviews.llvm.org/D55288
llvm-svn: 348944
2018-12-13 01:32:52 +08:00
|
|
|
PM.add(createWarnMissedTransformationsPass());
|
|
|
|
|
2015-12-15 17:24:01 +08:00
|
|
|
// Now that we've optimized loops (in particular loop induction variables),
|
|
|
|
// we may have exposed more scalar opportunities. Run parts of the scalar
|
|
|
|
// optimizer again at this point.
|
2020-03-21 17:14:17 +08:00
|
|
|
PM.add(createInstructionCombiningPass()); // Initial cleanup
|
2021-01-05 00:26:58 +08:00
|
|
|
PM.add(createCFGSimplificationPass(SimplifyCFGOptions() // if-convert
|
|
|
|
.hoistCommonInsts(true)));
|
2015-12-15 17:24:01 +08:00
|
|
|
PM.add(createSCCPPass()); // Propagate exposed constants
|
2020-03-21 17:14:17 +08:00
|
|
|
PM.add(createInstructionCombiningPass()); // Clean up again
|
2015-12-15 17:24:01 +08:00
|
|
|
PM.add(createBitTrackingDCEPass());
|
|
|
|
|
2014-05-06 07:14:46 +08:00
|
|
|
// More scalar chains could be vectorized due to more alias information
|
2020-05-25 00:20:22 +08:00
|
|
|
if (SLPVectorize)
|
2019-04-17 10:26:27 +08:00
|
|
|
PM.add(createSLPVectorizerPass()); // Vectorize parallel scalar chains.
|
2020-05-25 00:20:22 +08:00
|
|
|
|
|
|
|
PM.add(createVectorCombinePass()); // Clean up partial vectorization.
|
2014-05-06 07:14:46 +08:00
|
|
|
|
2014-09-08 04:05:11 +08:00
|
|
|
// After vectorization, assume intrinsics may tell us more about pointer
|
|
|
|
// alignments.
|
|
|
|
PM.add(createAlignmentFromAssumptionsPass());
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Cleanup and simplify the code after the scalar optimizations.
|
2020-03-21 17:14:17 +08:00
|
|
|
PM.add(createInstructionCombiningPass());
|
2014-05-25 18:27:02 +08:00
|
|
|
addExtensionsToPM(EP_Peephole, PM);
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2020-09-10 18:05:24 +08:00
|
|
|
PM.add(createJumpThreadingPass(/*FreezeSelectCond*/ true));
|
2015-03-20 06:01:00 +08:00
|
|
|
}
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2015-03-20 06:01:00 +08:00
|
|
|
void PassManagerBuilder::addLateLTOOptimizationPasses(
|
|
|
|
legacy::PassManagerBase &PM) {
|
2019-02-16 02:46:44 +08:00
|
|
|
// See comment in the new PM for justification of scheduling splitting at
|
|
|
|
// this stage (\ref buildLTODefaultPipeline).
|
2020-10-19 18:23:22 +08:00
|
|
|
if (EnableHotColdSplit)
|
|
|
|
PM.add(createHotColdSplittingPass());
|
2019-02-16 02:46:44 +08:00
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Delete basic blocks, which optimization passes may have killed.
|
2021-01-05 00:26:58 +08:00
|
|
|
PM.add(
|
|
|
|
createCFGSimplificationPass(SimplifyCFGOptions().hoistCommonInsts(true)));
|
2011-08-03 05:50:27 +08:00
|
|
|
|
2015-08-12 00:26:41 +08:00
|
|
|
// Drop bodies of available externally objects to improve GlobalDCE.
|
|
|
|
PM.add(createEliminateAvailableExternallyPass());
|
|
|
|
|
2011-08-03 05:50:27 +08:00
|
|
|
// Now that we have optimized the program, discard unreachable functions.
|
|
|
|
PM.add(createGlobalDCEPass());
|
2014-09-14 05:46:00 +08:00
|
|
|
|
|
|
|
// FIXME: this is profitable (for compiler time) to do at -O0 too, but
|
|
|
|
// currently it damages debug info.
|
|
|
|
if (MergeFunctions)
|
|
|
|
PM.add(createMergeFunctionsPass());
|
2011-08-03 05:50:27 +08:00
|
|
|
}
|
2011-08-10 06:17:34 +08:00
|
|
|
|
2016-02-17 07:02:29 +08:00
|
|
|
void PassManagerBuilder::populateThinLTOPassManager(
|
|
|
|
legacy::PassManagerBase &PM) {
|
|
|
|
PerformThinLTO = true;
|
2018-07-24 05:58:19 +08:00
|
|
|
if (LibraryInfo)
|
|
|
|
PM.add(new TargetLibraryInfoWrapperPass(*LibraryInfo));
|
2016-02-17 07:02:29 +08:00
|
|
|
|
|
|
|
if (VerifyInput)
|
|
|
|
PM.add(createVerifierPass());
|
|
|
|
|
2017-03-23 02:22:59 +08:00
|
|
|
if (ImportSummary) {
|
Restore "[WPD/LowerTypeTests] Delay lowering/removal of type tests until after ICP"
This restores commit 80d0a137a5aba6998fadb764f1e11cb901aae233, and the
follow on fix in 873c0d0786dcf22f4af39f65df824917f70f2170, with a new
fix for test failures after a 2-stage clang bootstrap, and a more robust
fix for the Chromium build failure that an earlier version partially
fixed. See also discussion on D75201.
Reviewers: evgeny777
Subscribers: mehdi_amini, Prazek, hiraditya, steven_wu, dexonsmith, arphaman, davidxl, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D73242
2020-03-18 02:08:35 +08:00
|
|
|
// This pass imports type identifier resolutions for whole-program
|
|
|
|
// devirtualization and CFI. It must run early because other passes may
|
2017-02-16 07:48:38 +08:00
|
|
|
// disturb the specific instruction patterns that these passes look for,
|
|
|
|
// creating dependencies on resolutions that may not appear in the summary.
|
|
|
|
//
|
|
|
|
// For example, GVN may transform the pattern assume(type.test) appearing in
|
|
|
|
// two basic blocks into assume(phi(type.test, type.test)), which would
|
|
|
|
// transform a dependency on a WPD resolution into a dependency on a type
|
|
|
|
// identifier resolution for CFI.
|
|
|
|
//
|
|
|
|
// Also, WPD has access to more precise information than ICP and can
|
|
|
|
// devirtualize more effectively, so it should operate on the IR first.
|
2017-03-23 02:22:59 +08:00
|
|
|
PM.add(createWholeProgramDevirtPass(nullptr, ImportSummary));
|
|
|
|
PM.add(createLowerTypeTestsPass(nullptr, ImportSummary));
|
2017-02-16 07:48:38 +08:00
|
|
|
}
|
2017-01-21 06:18:52 +08:00
|
|
|
|
2016-02-17 07:02:29 +08:00
|
|
|
populateModulePassManager(PM);
|
|
|
|
|
|
|
|
if (VerifyOutput)
|
|
|
|
PM.add(createVerifierPass());
|
|
|
|
PerformThinLTO = false;
|
|
|
|
}
|
|
|
|
|
2015-02-13 18:01:29 +08:00
|
|
|
void PassManagerBuilder::populateLTOPassManager(legacy::PassManagerBase &PM) {
|
2014-08-22 04:03:44 +08:00
|
|
|
if (LibraryInfo)
|
2015-01-15 18:41:28 +08:00
|
|
|
PM.add(new TargetLibraryInfoWrapperPass(*LibraryInfo));
|
2014-08-22 04:03:44 +08:00
|
|
|
|
2015-03-20 06:24:17 +08:00
|
|
|
if (VerifyInput)
|
2014-08-22 04:03:44 +08:00
|
|
|
PM.add(createVerifierPass());
|
|
|
|
|
2019-07-02 23:52:39 +08:00
|
|
|
addExtensionsToPM(EP_FullLinkTimeOptimizationEarly, PM);
|
|
|
|
|
2016-02-10 06:50:34 +08:00
|
|
|
if (OptLevel != 0)
|
2014-08-22 04:03:44 +08:00
|
|
|
addLTOOptimizationPasses(PM);
|
2017-05-27 02:27:13 +08:00
|
|
|
else {
|
|
|
|
// The whole-program-devirt pass needs to run at -O0 because only it knows
|
|
|
|
// about the llvm.type.checked.load intrinsic: it needs to both lower the
|
|
|
|
// intrinsic itself and handle it in the summary.
|
|
|
|
PM.add(createWholeProgramDevirtPass(ExportSummary, nullptr));
|
|
|
|
}
|
2014-08-22 04:03:44 +08:00
|
|
|
|
2015-12-16 07:00:08 +08:00
|
|
|
// Create a function that performs CFI checks for cross-DSO calls with targets
|
|
|
|
// in the current module.
|
|
|
|
PM.add(createCrossDSOCFIPass());
|
|
|
|
|
IR: New representation for CFI and virtual call optimization pass metadata.
The bitset metadata currently used in LLVM has a few problems:
1. It has the wrong name. The name "bitset" refers to an implementation
detail of one use of the metadata (i.e. its original use case, CFI).
This makes it harder to understand, as the name makes no sense in the
context of virtual call optimization.
2. It is represented using a global named metadata node, rather than
being directly associated with a global. This makes it harder to
manipulate the metadata when rebuilding global variables, summarise it
as part of ThinLTO and drop unused metadata when associated globals are
dropped. For this reason, CFI does not currently work correctly when
both CFI and vcall opt are enabled, as vcall opt needs to rebuild vtable
globals, and fails to associate metadata with the rebuilt globals. As I
understand it, the same problem could also affect ASan, which rebuilds
globals with a red zone.
This patch solves both of those problems in the following way:
1. Rename the metadata to "type metadata". This new name reflects how
the metadata is currently being used (i.e. to represent type information
for CFI and vtable opt). The new name is reflected in the name for the
associated intrinsic (llvm.type.test) and pass (LowerTypeTests).
2. Attach metadata directly to the globals that it pertains to, rather
than using the "llvm.bitsets" global metadata node as we are doing now.
This is done using the newly introduced capability to attach
metadata to global variables (r271348 and r271358).
See also: http://lists.llvm.org/pipermail/llvm-dev/2016-June/100462.html
Differential Revision: http://reviews.llvm.org/D21053
llvm-svn: 273729
2016-06-25 05:21:32 +08:00
|
|
|
// Lower type metadata and the type.test intrinsic. This pass supports Clang's
|
|
|
|
// control flow integrity mechanisms (-fsanitize=cfi*) and needs to run at
|
|
|
|
// link time if CFI is enabled. The pass does nothing if CFI is disabled.
|
2017-03-23 02:22:59 +08:00
|
|
|
PM.add(createLowerTypeTestsPass(ExportSummary, nullptr));
|
Restore "[WPD/LowerTypeTests] Delay lowering/removal of type tests until after ICP"
This restores commit 80d0a137a5aba6998fadb764f1e11cb901aae233, and the
follow on fix in 873c0d0786dcf22f4af39f65df824917f70f2170, with a new
fix for test failures after a 2-stage clang bootstrap, and a more robust
fix for the Chromium build failure that an earlier version partially
fixed. See also discussion on D75201.
Reviewers: evgeny777
Subscribers: mehdi_amini, Prazek, hiraditya, steven_wu, dexonsmith, arphaman, davidxl, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D73242
2020-03-18 02:08:35 +08:00
|
|
|
// Run a second time to clean up any type tests left behind by WPD for use
|
|
|
|
// in ICP (which is performed earlier than this in the regular LTO pipeline).
|
|
|
|
PM.add(createLowerTypeTestsPass(nullptr, nullptr, true));
|
2015-03-20 06:01:00 +08:00
|
|
|
|
|
|
|
if (OptLevel != 0)
|
|
|
|
addLateLTOOptimizationPasses(PM);
|
|
|
|
|
2019-07-02 23:52:39 +08:00
|
|
|
addExtensionsToPM(EP_FullLinkTimeOptimizationLast, PM);
|
|
|
|
|
2020-11-13 17:46:55 +08:00
|
|
|
PM.add(createAnnotationRemarksLegacyPass());
|
|
|
|
|
2015-03-20 06:24:17 +08:00
|
|
|
if (VerifyOutput)
|
2014-08-22 04:03:44 +08:00
|
|
|
PM.add(createVerifierPass());
|
|
|
|
}
|
|
|
|
|
2012-11-16 00:51:49 +08:00
|
|
|
LLVMPassManagerBuilderRef LLVMPassManagerBuilderCreate() {
|
2011-08-10 06:17:34 +08:00
|
|
|
PassManagerBuilder *PMB = new PassManagerBuilder();
|
|
|
|
return wrap(PMB);
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMPassManagerBuilderDispose(LLVMPassManagerBuilderRef PMB) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
delete Builder;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetOptLevel(LLVMPassManagerBuilderRef PMB,
|
|
|
|
unsigned OptLevel) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->OptLevel = OptLevel;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetSizeLevel(LLVMPassManagerBuilderRef PMB,
|
|
|
|
unsigned SizeLevel) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->SizeLevel = SizeLevel;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetDisableUnitAtATime(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMBool Value) {
|
2017-10-06 12:39:40 +08:00
|
|
|
// NOTE: The DisableUnitAtATime switch has been removed.
|
2011-08-10 06:17:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetDisableUnrollLoops(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMBool Value) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->DisableUnrollLoops = Value;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderSetDisableSimplifyLibCalls(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMBool Value) {
|
2013-06-21 03:48:07 +08:00
|
|
|
// NOTE: The simplify-libcalls pass has been removed.
|
2011-08-10 06:17:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderUseInlinerWithThreshold(LLVMPassManagerBuilderRef PMB,
|
|
|
|
unsigned Threshold) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
|
|
|
Builder->Inliner = createFunctionInliningPass(Threshold);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderPopulateFunctionPassManager(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMPassManagerRef PM) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
2015-02-13 18:01:29 +08:00
|
|
|
legacy::FunctionPassManager *FPM = unwrap<legacy::FunctionPassManager>(PM);
|
2011-08-10 06:17:34 +08:00
|
|
|
Builder->populateFunctionPassManager(*FPM);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
LLVMPassManagerBuilderPopulateModulePassManager(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMPassManagerRef PM) {
|
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
2015-02-13 18:01:29 +08:00
|
|
|
legacy::PassManagerBase *MPM = unwrap(PM);
|
2011-08-10 06:17:34 +08:00
|
|
|
Builder->populateModulePassManager(*MPM);
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMPassManagerBuilderPopulateLTOPassManager(LLVMPassManagerBuilderRef PMB,
|
|
|
|
LLVMPassManagerRef PM,
|
2013-03-11 05:58:22 +08:00
|
|
|
LLVMBool Internalize,
|
|
|
|
LLVMBool RunInliner) {
|
2011-08-10 06:17:34 +08:00
|
|
|
PassManagerBuilder *Builder = unwrap(PMB);
|
2015-02-13 18:01:29 +08:00
|
|
|
legacy::PassManagerBase *LPM = unwrap(PM);
|
2014-08-21 21:35:30 +08:00
|
|
|
|
|
|
|
// A small backwards compatibility hack. populateLTOPassManager used to take
|
|
|
|
// an RunInliner option.
|
|
|
|
if (RunInliner && !Builder->Inliner)
|
|
|
|
Builder->Inliner = createFunctionInliningPass();
|
|
|
|
|
|
|
|
Builder->populateLTOPassManager(*LPM);
|
2011-08-10 06:17:34 +08:00
|
|
|
}
|