2008-03-17 00:32:40 +08:00
|
|
|
//===-- Scalar.cpp --------------------------------------------------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2008-03-17 00:32:40 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2012-07-24 18:51:42 +08:00
|
|
|
// This file implements common infrastructure for libLLVMScalarOpts.a, which
|
2010-10-08 01:55:47 +08:00
|
|
|
// implements several scalar transformations over the LLVM intermediate
|
|
|
|
// representation, including the C bindings for that library.
|
2008-03-17 00:32:40 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2010-10-08 01:55:47 +08:00
|
|
|
#include "llvm-c/Initialization.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm-c/Transforms/Scalar.h"
|
2015-08-06 15:33:15 +08:00
|
|
|
#include "llvm/Analysis/BasicAliasAnalysis.h"
|
2011-04-13 23:44:58 +08:00
|
|
|
#include "llvm/Analysis/Passes.h"
|
2015-08-14 10:55:50 +08:00
|
|
|
#include "llvm/Analysis/ScopedNoAliasAA.h"
|
2015-08-14 11:33:48 +08:00
|
|
|
#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/IR/LegacyPassManager.h"
|
2014-01-13 17:26:24 +08:00
|
|
|
#include "llvm/IR/Verifier.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/InitializePasses.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/Transforms/Scalar/GVN.h"
|
2018-11-21 22:00:17 +08:00
|
|
|
#include "llvm/Transforms/Scalar/Scalarizer.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/Transforms/Scalar/SimpleLoopUnswitch.h"
|
2018-09-18 21:36:03 +08:00
|
|
|
#include "llvm/Transforms/Utils/UnifyFunctionExitNodes.h"
|
2008-03-17 00:32:40 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2012-07-24 18:51:42 +08:00
|
|
|
/// initializeScalarOptsPasses - Initialize all passes linked into the
|
2010-10-08 01:55:47 +08:00
|
|
|
/// ScalarOpts library.
|
|
|
|
void llvm::initializeScalarOpts(PassRegistry &Registry) {
|
2015-10-31 07:13:18 +08:00
|
|
|
initializeADCELegacyPassPass(Registry);
|
2016-05-25 09:57:04 +08:00
|
|
|
initializeBDCELegacyPassPass(Registry);
|
2014-09-08 04:05:11 +08:00
|
|
|
initializeAlignmentFromAssumptionsPass(Registry);
|
Recommit r317351 : Add CallSiteSplitting pass
This recommit r317351 after fixing a buildbot failure.
Original commit message:
Summary:
This change add a pass which tries to split a call-site to pass
more constrained arguments if its argument is predicated in the control flow
so that we can expose better context to the later passes (e.g, inliner, jump
threading, or IPA-CP based function cloning, etc.).
As of now we support two cases :
1) If a call site is dominated by an OR condition and if any of its arguments
are predicated on this OR condition, try to split the condition with more
constrained arguments. For example, in the code below, we try to split the
call site since we can predicate the argument (ptr) based on the OR condition.
Split from :
if (!ptr || c)
callee(ptr);
to :
if (!ptr)
callee(null ptr) // set the known constant value
else if (c)
callee(nonnull ptr) // set non-null attribute in the argument
2) We can also split a call-site based on constant incoming values of a PHI
For example,
from :
BB0:
%c = icmp eq i32 %i1, %i2
br i1 %c, label %BB2, label %BB1
BB1:
br label %BB2
BB2:
%p = phi i32 [ 0, %BB0 ], [ 1, %BB1 ]
call void @bar(i32 %p)
to
BB0:
%c = icmp eq i32 %i1, %i2
br i1 %c, label %BB2-split0, label %BB1
BB1:
br label %BB2-split1
BB2-split0:
call void @bar(i32 0)
br label %BB2
BB2-split1:
call void @bar(i32 1)
br label %BB2
BB2:
%p = phi i32 [ 0, %BB2-split0 ], [ 1, %BB2-split1 ]
llvm-svn: 317362
2017-11-04 04:41:16 +08:00
|
|
|
initializeCallSiteSplittingLegacyPassPass(Registry);
|
2016-07-02 08:16:47 +08:00
|
|
|
initializeConstantHoistingLegacyPassPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeConstantPropagationPass(Registry);
|
|
|
|
initializeCorrelatedValuePropagationPass(Registry);
|
2016-04-23 03:40:41 +08:00
|
|
|
initializeDCELegacyPassPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeDeadInstEliminationPass(Registry);
|
2017-09-09 21:38:18 +08:00
|
|
|
initializeDivRemPairsLegacyPassPass(Registry);
|
2018-11-21 22:00:17 +08:00
|
|
|
initializeScalarizerLegacyPassPass(Registry);
|
2016-05-18 05:38:13 +08:00
|
|
|
initializeDSELegacyPassPass(Registry);
|
2016-05-19 06:55:34 +08:00
|
|
|
initializeGuardWideningLegacyPassPass(Registry);
|
2018-04-28 01:29:10 +08:00
|
|
|
initializeLoopGuardWideningLegacyPassPass(Registry);
|
2016-03-11 16:50:55 +08:00
|
|
|
initializeGVNLegacyPassPass(Registry);
|
2017-03-12 12:46:45 +08:00
|
|
|
initializeNewGVNLegacyPassPass(Registry);
|
2015-01-27 09:34:14 +08:00
|
|
|
initializeEarlyCSELegacyPassPass(Registry);
|
2016-09-01 03:24:10 +08:00
|
|
|
initializeEarlyCSEMemSSALegacyPassPass(Registry);
|
Introduce llvm.experimental.widenable_condition intrinsic
This patch introduces a new instinsic `@llvm.experimental.widenable_condition`
that allows explicit representation for guards. It is an alternative to using
`@llvm.experimental.guard` intrinsic that does not contain implicit control flow.
We keep finding places where `@llvm.experimental.guard` is not supported or
treated too conservatively, and there are 2 reasons to that:
- `@llvm.experimental.guard` has memory write side effect to model implicit control flow,
and this sometimes confuses passes and analyzes that work with memory;
- Not all passes and analysis are aware of the semantics of guards. These passes treat them
as regular throwing call and have no idea that the condition of guard may be used to prove
something. One well-known place which had caused us troubles in the past is explicit loop
iteration count calculation in SCEV. Another example is new loop unswitching which is not
aware of guards. Whenever a new pass appears, we potentially have this problem there.
Rather than go and fix all these places (and commit to keep track of them and add support
in future), it seems more reasonable to leverage the existing optimizer's logic as much as possible.
The only significant difference between guards and regular explicit branches is that guard's condition
can be widened. It means that a guard contains (explicitly or implicitly) a `deopt` block successor,
and it is always legal to go there no matter what the guard condition is. The other successor is
a guarded block, and it is only legal to go there if the condition is true.
This patch introduces a new explicit form of guards alternative to `@llvm.experimental.guard`
intrinsic. Now a widenable guard can be represented in the CFG explicitly like this:
%widenable_condition = call i1 @llvm.experimental.widenable.condition()
%new_condition = and i1 %cond, %widenable_condition
br i1 %new_condition, label %guarded, label %deopt
guarded:
; Guarded instructions
deopt:
call type @llvm.experimental.deoptimize(<args...>) [ "deopt"(<deopt_args...>) ]
The new intrinsic `@llvm.experimental.widenable.condition` has semantics of an
`undef`, but the intrinsic prevents the optimizer from folding it early. This form
should exploit all optimization boons provided to `br` instuction, and it still can be
widened by replacing the result of `@llvm.experimental.widenable.condition()`
with `and` with any arbitrary boolean value (as long as the branch that is taken when
it is `false` has a deopt and has no side-effects).
For more motivation, please check llvm-dev discussion "[llvm-dev] Giving up using
implicit control flow in guards".
This patch introduces this new intrinsic with respective LangRef changes and a pass
that converts old-style guards (expressed as intrinsics) into the new form.
The naming discussion is still ungoing. Merging this to unblock further items. We can
later change the name of this intrinsic.
Reviewed By: reames, fedor.sergeev, sanjoy
Differential Revision: https://reviews.llvm.org/D51207
llvm-svn: 348593
2018-12-07 22:39:46 +08:00
|
|
|
initializeMakeGuardsExplicitLegacyPassPass(Registry);
|
2016-07-15 21:45:20 +08:00
|
|
|
initializeGVNHoistLegacyPassPass(Registry);
|
[GVNSink] GVNSink pass
This patch provides an initial prototype for a pass that sinks instructions based on GVN information, similar to GVNHoist. It is not yet ready for commiting but I've uploaded it to gather some initial thoughts.
This pass attempts to sink instructions into successors, reducing static
instruction count and enabling if-conversion.
We use a variant of global value numbering to decide what can be sunk.
Consider:
[ %a1 = add i32 %b, 1 ] [ %c1 = add i32 %d, 1 ]
[ %a2 = xor i32 %a1, 1 ] [ %c2 = xor i32 %c1, 1 ]
\ /
[ %e = phi i32 %a2, %c2 ]
[ add i32 %e, 4 ]
GVN would number %a1 and %c1 differently because they compute different
results - the VN of an instruction is a function of its opcode and the
transitive closure of its operands. This is the key property for hoisting
and CSE.
What we want when sinking however is for a numbering that is a function of
the *uses* of an instruction, which allows us to answer the question "if I
replace %a1 with %c1, will it contribute in an equivalent way to all
successive instructions?". The (new) PostValueTable class in GVN provides this
mapping.
This pass has some shown really impressive improvements especially for codesize already on internal benchmarks, so I have high hopes it can replace all the sinking logic in SimplifyCFG.
Differential revision: https://reviews.llvm.org/D24805
llvm-svn: 303850
2017-05-25 20:51:11 +08:00
|
|
|
initializeGVNSinkLegacyPassPass(Registry);
|
2014-08-14 04:31:52 +08:00
|
|
|
initializeFlattenCFGPassPass(Registry);
|
2018-03-15 19:01:19 +08:00
|
|
|
initializeIRCELegacyPassPass(Registry);
|
2016-05-30 05:42:00 +08:00
|
|
|
initializeIndVarSimplifyLegacyPassPass(Registry);
|
2017-01-31 09:10:58 +08:00
|
|
|
initializeInferAddressSpacesPass(Registry);
|
2018-06-30 07:36:03 +08:00
|
|
|
initializeInstSimplifyLegacyPassPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeJumpThreadingPass(Registry);
|
2016-07-13 06:37:48 +08:00
|
|
|
initializeLegacyLICMPassPass(Registry);
|
Add Loop Sink pass to reverse the LICM based of basic block frequency.
Summary: LICM may hoist instructions to preheader speculatively. Before code generation, we need to sink down the hoisted instructions inside to loop if it's beneficial. This pass is a reverse of LICM: looking at instructions in preheader and sinks the instruction to basic blocks inside the loop body if basic block frequency is smaller than the preheader frequency.
Reviewers: hfinkel, davidxl, chandlerc
Subscribers: anna, modocache, mgorny, beanz, reames, dberlin, chandlerc, mcrosier, junbuml, sanjoy, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D22778
llvm-svn: 285308
2016-10-28 00:30:08 +08:00
|
|
|
initializeLegacyLoopSinkPassPass(Registry);
|
2019-04-18 02:53:27 +08:00
|
|
|
initializeLoopFuseLegacyPass(Registry);
|
2016-08-13 12:11:27 +08:00
|
|
|
initializeLoopDataPrefetchLegacyPassPass(Registry);
|
2016-07-15 02:28:29 +08:00
|
|
|
initializeLoopDeletionLegacyPassPass(Registry);
|
2016-07-09 04:55:26 +08:00
|
|
|
initializeLoopAccessLegacyAnalysisPass(Registry);
|
2018-05-25 09:32:36 +08:00
|
|
|
initializeLoopInstSimplifyLegacyPassPass(Registry);
|
2015-03-06 18:11:25 +08:00
|
|
|
initializeLoopInterchangePass(Registry);
|
2017-01-26 00:00:44 +08:00
|
|
|
initializeLoopPredicationLegacyPassPass(Registry);
|
2016-05-04 06:02:31 +08:00
|
|
|
initializeLoopRotateLegacyPassPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeLoopStrengthReducePass(Registry);
|
2013-11-17 07:59:05 +08:00
|
|
|
initializeLoopRerollPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeLoopUnrollPass(Registry);
|
2018-07-01 20:47:30 +08:00
|
|
|
initializeLoopUnrollAndJamPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeLoopUnswitchPass(Registry);
|
[Unroll/UnrollAndJam/Vectorizer/Distribute] Add followup loop attributes.
When multiple loop transformation are defined in a loop's metadata, their order of execution is defined by the order of their respective passes in the pass pipeline. For instance, e.g.
#pragma clang loop unroll_and_jam(enable)
#pragma clang loop distribute(enable)
is the same as
#pragma clang loop distribute(enable)
#pragma clang loop unroll_and_jam(enable)
and will try to loop-distribute before Unroll-And-Jam because the LoopDistribute pass is scheduled after UnrollAndJam pass. UnrollAndJamPass only supports one inner loop, i.e. it will necessarily fail after loop distribution. It is not possible to specify another execution order. Also,t the order of passes in the pipeline is subject to change between versions of LLVM, optimization options and which pass manager is used.
This patch adds 'followup' attributes to various loop transformation passes. These attributes define which attributes the resulting loop of a transformation should have. For instance,
!0 = !{!0, !1, !2}
!1 = !{!"llvm.loop.unroll_and_jam.enable"}
!2 = !{!"llvm.loop.unroll_and_jam.followup_inner", !3}
!3 = !{!"llvm.loop.distribute.enable"}
defines a loop ID (!0) to be unrolled-and-jammed (!1) and then the attribute !3 to be added to the jammed inner loop, which contains the instruction to distribute the inner loop.
Currently, in both pass managers, pass execution is in a fixed order and UnrollAndJamPass will not execute again after LoopDistribute. We hope to fix this in the future by allowing pass managers to run passes until a fixpoint is reached, use Polly to perform these transformations, or add a loop transformation pass which takes the order issue into account.
For mandatory/forced transformations (e.g. by having been declared by #pragma omp simd), the user must be notified when a transformation could not be performed. It is not possible that the responsible pass emits such a warning because the transformation might be 'hidden' in a followup attribute when it is executed, or it is not present in the pipeline at all. For this reason, this patche introduces a WarnMissedTransformations pass, to warn about orphaned transformations.
Since this changes the user-visible diagnostic message when a transformation is applied, two test cases in the clang repository need to be updated.
To ensure that no other transformation is executed before the intended one, the attribute `llvm.loop.disable_nonforced` can be added which should disable transformation heuristics before the intended transformation is applied. E.g. it would be surprising if a loop is distributed before a #pragma unroll_and_jam is applied.
With more supported code transformations (loop fusion, interchange, stripmining, offloading, etc.), transformations can be used as building blocks for more complex transformations (e.g. stripmining+stripmining+interchange -> tiling).
Reviewed By: hfinkel, dmgreen
Differential Revision: https://reviews.llvm.org/D49281
Differential Revision: https://reviews.llvm.org/D55288
llvm-svn: 348944
2018-12-13 01:32:52 +08:00
|
|
|
initializeWarnMissedTransformationsLegacyPass(Registry);
|
New Loop Versioning LICM Pass
Summary:
When alias analysis is uncertain about the aliasing between any two accesses,
it will return MayAlias. This uncertainty from alias analysis restricts LICM
from proceeding further. In cases where alias analysis is uncertain we might
use loop versioning as an alternative.
Loop Versioning will create a version of the loop with aggressive aliasing
assumptions in addition to the original with conservative (default) aliasing
assumptions. The version of the loop making aggressive aliasing assumptions
will have all the memory accesses marked as no-alias. These two versions of
loop will be preceded by a memory runtime check. This runtime check consists
of bound checks for all unique memory accessed in loop, and it ensures the
lack of memory aliasing. The result of the runtime check determines which of
the loop versions is executed: If the runtime check detects any memory
aliasing, then the original loop is executed. Otherwise, the version with
aggressive aliasing assumptions is used.
The pass is off by default and can be enabled with command line option
-enable-loop-versioning-licm.
Reviewers: hfinkel, anemet, chatur01, reames
Subscribers: MatzeB, grosser, joker.eph, sanjoy, javed.absar, sbaranga,
llvm-commits
Differential Revision: http://reviews.llvm.org/D9151
llvm-svn: 259986
2016-02-06 15:47:48 +08:00
|
|
|
initializeLoopVersioningLICMPass(Registry);
|
2016-07-13 02:45:51 +08:00
|
|
|
initializeLoopIdiomRecognizeLegacyPassPass(Registry);
|
2016-05-14 06:52:35 +08:00
|
|
|
initializeLowerAtomicLegacyPassPass(Registry);
|
2019-10-15 00:15:14 +08:00
|
|
|
initializeLowerConstantIntrinsicsPass(Registry);
|
2011-07-07 02:22:43 +08:00
|
|
|
initializeLowerExpectIntrinsicPass(Registry);
|
2016-07-29 06:08:41 +08:00
|
|
|
initializeLowerGuardIntrinsicLegacyPassPass(Registry);
|
2019-01-31 17:10:17 +08:00
|
|
|
initializeLowerWidenableConditionLegacyPassPass(Registry);
|
2016-06-14 10:44:55 +08:00
|
|
|
initializeMemCpyOptLegacyPassPass(Registry);
|
2019-05-23 20:35:26 +08:00
|
|
|
initializeMergeICmpsLegacyPassPass(Registry);
|
2016-06-18 03:10:09 +08:00
|
|
|
initializeMergedLoadStoreMotionLegacyPassPass(Registry);
|
2016-07-22 06:28:52 +08:00
|
|
|
initializeNaryReassociateLegacyPassPass(Registry);
|
2016-05-26 07:38:53 +08:00
|
|
|
initializePartiallyInlineLibCallsLegacyPassPass(Registry);
|
2016-04-27 07:39:29 +08:00
|
|
|
initializeReassociateLegacyPassPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeRegToMemPass(Registry);
|
2017-12-15 17:32:11 +08:00
|
|
|
initializeRewriteStatepointsForGCLegacyPassPass(Registry);
|
2016-05-18 23:18:25 +08:00
|
|
|
initializeSCCPLegacyPassPass(Registry);
|
2015-09-12 17:09:14 +08:00
|
|
|
initializeSROALegacyPassPass(Registry);
|
2013-08-06 10:43:45 +08:00
|
|
|
initializeCFGSimplifyPassPass(Registry);
|
2013-06-20 04:18:24 +08:00
|
|
|
initializeStructurizeCFGPass(Registry);
|
[PM/LoopUnswitch] Introduce a new, simpler loop unswitch pass.
Currently, this pass only focuses on *trivial* loop unswitching. At that
reduced problem it remains significantly better than the current loop
unswitch:
- Old pass is worse than cubic complexity. New pass is (I think) linear.
- New pass is much simpler in its design by focusing on full unswitching. (See
below for details on this).
- New pass doesn't carry state for thresholds between pass iterations.
- New pass doesn't carry state for correctness (both miscompile and
infloop) between pass iterations.
- New pass produces substantially better code after unswitching.
- New pass can handle more trivial unswitch cases.
- New pass doesn't recompute the dominator tree for the entire function
and instead incrementally updates it.
I've ported all of the trivial unswitching test cases from the old pass
to the new one to make sure that major functionality isn't lost in the
process. For several of the test cases I've worked to improve the
precision and rigor of the CHECKs, but for many I've just updated them
to handle the new IR produced.
My initial motivation was the fact that the old pass carried state in
very unreliable ways between pass iterations, and these mechansims were
incompatible with the new pass manager. However, I discovered many more
improvements to make along the way.
This pass makes two very significant assumptions that enable most of these
improvements:
1) Focus on *full* unswitching -- that is, completely removing whatever
control flow construct is being unswitched from the loop. In the case
of trivial unswitching, this means removing the trivial (exiting)
edge. In non-trivial unswitching, this means removing the branch or
switch itself. This is in opposition to *partial* unswitching where
some part of the unswitched control flow remains in the loop. Partial
unswitching only really applies to switches and to folded branches.
These are very similar to full unrolling and partial unrolling. The
full form is an effective canonicalization, the partial form needs
a complex cost model, cannot be iterated, isn't canonicalizing, and
should be a separate pass that runs very late (much like unrolling).
2) Leverage LLVM's Loop machinery to the fullest. The original unswitch
dates from a time when a great deal of LLVM's loop infrastructure was
missing, ineffective, and/or unreliable. As a consequence, a lot of
complexity was added which we no longer need.
With these two overarching principles, I think we can build a fast and
effective unswitcher that fits in well in the new PM and in the
canonicalization pipeline. Some of the remaining functionality around
partial unswitching may not be relevant today (not many test cases or
benchmarks I can find) but if they are I'd like to add support for them
as a separate layer that runs very late in the pipeline.
Purely to make reviewing and introducing this code more manageable, I've
split this into first a trivial-unswitch-only pass and in the next patch
I'll add support for full non-trivial unswitching against a *fixed*
threshold, exactly like full unrolling. I even plan to re-use the
unrolling thresholds, as these are incredibly similar cost tradeoffs:
we're cloning a loop body in order to end up with simplified control
flow. We should only do that when the total growth is reasonably small.
One of the biggest changes with this pass compared to the previous one
is that previously, each individual trivial exiting edge from a switch
was unswitched separately as a branch. Now, we unswitch the entire
switch at once, with cases going to the various destinations. This lets
us unswitch multiple exiting edges in a single operation and also avoids
numerous extremely bad behaviors, where we would introduce 1000s of
branches to test for thousands of possible values, all of which would
take the exact same exit path bypassing the loop. Now we will use
a switch with 1000s of cases that can be efficiently lowered into
a jumptable. This avoids relying on somehow forming a switch out of the
branches or getting horrible code if that fails for any reason.
Another significant change is that this pass actively updates the CFG
based on unswitching. For trivial unswitching, this is actually very
easy because of the definition of loop simplified form. Doing this makes
the code coming out of loop unswitch dramatically more friendly. We
still should run loop-simplifycfg (at the least) after this to clean up,
but it will have to do a lot less work.
Finally, this pass makes much fewer attempts to simplify instructions
based on the unswitch. Something like loop-instsimplify, instcombine, or
GVN can be used to do increasingly powerful simplifications based on the
now dominating predicate. The old simplifications are things that
something like loop-instsimplify should get today or a very, very basic
loop-instcombine could get. Keeping that logic separate is a big
simplifying technique.
Most of the code in this pass that isn't in the old one has to do with
achieving specific goals:
- Updating the dominator tree as we go
- Unswitching all cases in a switch in a single step.
I think it is still shorter than just the trivial unswitching code in
the old pass despite having this functionality.
Differential Revision: https://reviews.llvm.org/D32409
llvm-svn: 301576
2017-04-28 02:45:20 +08:00
|
|
|
initializeSimpleLoopUnswitchLegacyPassPass(Registry);
|
2016-04-23 03:54:10 +08:00
|
|
|
initializeSinkingLegacyPassPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeTailCallElimPass(Registry);
|
2014-05-02 02:38:36 +08:00
|
|
|
initializeSeparateConstOffsetFromGEPPass(Registry);
|
2016-08-02 05:48:33 +08:00
|
|
|
initializeSpeculativeExecutionLegacyPassPass(Registry);
|
Add straight-line strength reduction to LLVM
Summary:
Straight-line strength reduction (SLSR) is implemented in GCC but not yet in
LLVM. It has proven to effectively simplify statements derived from an unrolled
loop, and can potentially benefit many other cases too. For example,
LLVM unrolls
#pragma unroll
foo (int i = 0; i < 3; ++i) {
sum += foo((b + i) * s);
}
into
sum += foo(b * s);
sum += foo((b + 1) * s);
sum += foo((b + 2) * s);
However, no optimizations yet reduce the internal redundancy of the three
expressions:
b * s
(b + 1) * s
(b + 2) * s
With SLSR, LLVM can optimize these three expressions into:
t1 = b * s
t2 = t1 + s
t3 = t2 + s
This commit is only an initial step towards implementing a series of such
optimizations. I will implement more (see TODO in the file commentary) in the
near future. This optimization is enabled for the NVPTX backend for now.
However, I am more than happy to push it to the standard optimization pipeline
after more thorough performance tests.
Test Plan: test/StraightLineStrengthReduce/slsr.ll
Reviewers: eliben, HaoLiu, meheff, hfinkel, jholewinski, atrick
Reviewed By: jholewinski, atrick
Subscribers: karthikthecool, jholewinski, llvm-commits
Differential Revision: http://reviews.llvm.org/D7310
llvm-svn: 228016
2015-02-04 03:37:06 +08:00
|
|
|
initializeStraightLineStrengthReducePass(Registry);
|
Add a pass for inserting safepoints into (nearly) arbitrary IR
This pass is responsible for figuring out where to place call safepoints and safepoint polls. It doesn't actually make the relocations explicit; that's the job of the RewriteStatepointsForGC pass (http://reviews.llvm.org/D6975).
Note that this code is not yet finalized. Its moving in tree for incremental development, but further cleanup is needed and will happen over the next few days. It is not yet part of the standard pass order.
Planned changes in the near future:
- I plan on restructuring the statepoint rewrite to use the functions add to the IRBuilder a while back.
- In the current pass, the function "gc.safepoint_poll" is treated specially but is not an intrinsic. I plan to make identifying the poll function a property of the GCStrategy at some point in the near future.
- As follow on patches, I will be separating a collection of test cases we have out of tree and submitting them upstream.
- It's not explicit in the code, but these two patches are introducing a new state for a statepoint which looks a lot like a patchpoint. There's no a transient form which doesn't yet have the relocations explicitly represented, but does prevent reordering of memory operations. Once this is in, I need to update actually make this explicit by reserving the 'unused' argument of the statepoint as a flag, updating the docs, and making the code explicitly check for such a thing. This wasn't really planned, but once I split the two passes - which was done for other reasons - the intermediate state fell out. Just reminds us once again that we need to merge statepoints and patchpoints at some point in the not that distant future.
Future directions planned:
- Identifying more cases where a backedge safepoint isn't required to ensure timely execution of a safepoint poll.
- Tweaking the insertion process to generate easier to optimize IR. (For example, investigating making SplitBackedge) the default.
- Adding opt-in flags for a GCStrategy to use this pass. Once done, add this pass to the actual pass ordering.
Differential Revision: http://reviews.llvm.org/D6981
llvm-svn: 228090
2015-02-04 08:37:33 +08:00
|
|
|
initializePlaceBackedgeSafepointsImplPass(Registry);
|
|
|
|
initializePlaceSafepointsPass(Registry);
|
2016-06-25 07:32:02 +08:00
|
|
|
initializeFloat2IntLegacyPassPass(Registry);
|
2016-07-19 00:29:27 +08:00
|
|
|
initializeLoopDistributeLegacyPass(Registry);
|
LLE 6/6: Add LoopLoadElimination pass
Summary:
The goal of this pass is to perform store-to-load forwarding across the
backedge of a loop. E.g.:
for (i)
A[i + 1] = A[i] + B[i]
=>
T = A[0]
for (i)
T = T + B[i]
A[i + 1] = T
The pass relies on loop dependence analysis via LoopAccessAnalisys to
find opportunities of loop-carried dependences with a distance of one
between a store and a load. Since it's using LoopAccessAnalysis, it was
easy to also add support for versioning away may-aliasing intervening
stores that would otherwise prevent this transformation.
This optimization is also performed by Load-PRE in GVN without the
option of multi-versioning. As was discussed with Daniel Berlin in
http://reviews.llvm.org/D9548, this is inferior to a more loop-aware
solution applied here. Hopefully, we will be able to remove some
complexity from GVN/MemorySSA as a consequence.
In the long run, we may want to extend this pass (or create a new one if
there is little overlap) to also eliminate loop-indepedent redundant
loads and store that *require* versioning due to may-aliasing
intervening stores/loads. I have some motivating cases for store
elimination. My plan right now is to wait for MemorySSA to come online
first rather than using memdep for this.
The main motiviation for this pass is the 456.hmmer loop in SPECint2006
where after distributing the original loop and vectorizing the top part,
we are left with the critical path exposed in the bottom loop. Being
able to promote the memory dependence into a register depedence (even
though the HW does perform store-to-load fowarding as well) results in a
major gain (~20%). This gain also transfers over to x86: it's
around 8-10%.
Right now the pass is off by default and can be enabled
with -enable-loop-load-elim. On the LNT testsuite, there are two
performance changes (negative number -> improvement):
1. -28% in Polybench/linear-algebra/solvers/dynprog: the length of the
critical paths is reduced
2. +2% in Polybench/stencils/adi: Unfortunately, I couldn't reproduce this
outside of LNT
The pass is scheduled after the loop vectorizer (which is after loop
distribution). The rational is to try to reuse LAA state, rather than
recomputing it. The order between LV and LLE is not critical because
normally LV does not touch scalar st->ld forwarding cases where
vectorizing would inhibit the CPU's st->ld forwarding to kick in.
LoopLoadElimination requires LAA to provide the full set of dependences
(including forward dependences). LAA is known to omit loop-independent
dependences in certain situations. The big comment before
removeDependencesFromMultipleStores explains why this should not occur
for the cases that we're interested in.
Reviewers: dberlin, hfinkel
Subscribers: junbuml, dberlin, mssimpso, rengolin, sanjoy, llvm-commits
Differential Revision: http://reviews.llvm.org/D13259
llvm-svn: 252017
2015-11-04 07:50:08 +08:00
|
|
|
initializeLoopLoadEliminationPass(Registry);
|
2016-05-04 05:47:32 +08:00
|
|
|
initializeLoopSimplifyCFGLegacyPassPass(Registry);
|
2016-02-03 08:06:10 +08:00
|
|
|
initializeLoopVersioningPassPass(Registry);
|
2017-11-15 05:09:45 +08:00
|
|
|
initializeEntryExitInstrumenterPass(Registry);
|
|
|
|
initializePostInlineEntryExitInstrumenterPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
}
|
|
|
|
|
2018-03-29 02:03:25 +08:00
|
|
|
void LLVMAddLoopSimplifyCFGPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLoopSimplifyCFGPass());
|
|
|
|
}
|
|
|
|
|
2010-10-08 01:55:47 +08:00
|
|
|
void LLVMInitializeScalarOpts(LLVMPassRegistryRef R) {
|
|
|
|
initializeScalarOpts(*unwrap(R));
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddAggressiveDCEPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createAggressiveDCEPass());
|
|
|
|
}
|
|
|
|
|
2019-09-30 00:06:22 +08:00
|
|
|
void LLVMAddDCEPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createDeadCodeEliminationPass());
|
|
|
|
}
|
|
|
|
|
[BDCE] Add a bit-tracking DCE pass
BDCE is a bit-tracking dead code elimination pass. It is based on ADCE (the
"aggressive DCE" pass), with the added capability to track dead bits of integer
valued instructions and remove those instructions when all of the bits are
dead.
Currently, it does not actually do this all-bits-dead removal, but rather
replaces the instruction's uses with a constant zero, and lets instcombine (and
the later run of ADCE) do the rest. Because we essentially get a run of ADCE
"for free" while tracking the dead bits, we also do what ADCE does and removes
actually-dead instructions as well (this includes instructions newly trivially
dead because all bits were dead, but not all such instructions can be removed).
The motivation for this is a case like:
int __attribute__((const)) foo(int i);
int bar(int x) {
x |= (4 & foo(5));
x |= (8 & foo(3));
x |= (16 & foo(2));
x |= (32 & foo(1));
x |= (64 & foo(0));
x |= (128& foo(4));
return x >> 4;
}
As it turns out, if you order the bit-field insertions so that all of the dead
ones come last, then instcombine will remove them. However, if you pick some
other order (such as the one above), the fact that some of the calls to foo()
are useless is not locally obvious, and we don't remove them (without this
pass).
I did a quick compile-time overhead check using sqlite from the test suite
(Release+Asserts). BDCE took ~0.4% of the compilation time (making it about
twice as expensive as ADCE).
I've not looked at why yet, but we eliminate instructions due to having
all-dead bits in:
External/SPEC/CFP2006/447.dealII/447.dealII
External/SPEC/CINT2006/400.perlbench/400.perlbench
External/SPEC/CINT2006/403.gcc/403.gcc
MultiSource/Applications/ClamAV/clamscan
MultiSource/Benchmarks/7zip/7zip-benchmark
llvm-svn: 229462
2015-02-17 09:36:59 +08:00
|
|
|
void LLVMAddBitTrackingDCEPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createBitTrackingDCEPass());
|
|
|
|
}
|
|
|
|
|
2014-09-08 04:05:11 +08:00
|
|
|
void LLVMAddAlignmentFromAssumptionsPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createAlignmentFromAssumptionsPass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddCFGSimplificationPass(LLVMPassManagerRef PM) {
|
2017-10-29 02:43:07 +08:00
|
|
|
unwrap(PM)->add(createCFGSimplificationPass(1, false, false, true));
|
2017-03-26 14:44:08 +08:00
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddDeadStoreEliminationPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createDeadStoreEliminationPass());
|
|
|
|
}
|
|
|
|
|
2013-11-23 00:58:05 +08:00
|
|
|
void LLVMAddScalarizerPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createScalarizerPass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddGVNPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createGVNPass());
|
|
|
|
}
|
|
|
|
|
[GVN] Initial check-in of a new global value numbering algorithm.
The code have been developed by Daniel Berlin over the years, and
the new implementation goal is that of addressing shortcomings of
the current GVN infrastructure, i.e. long compile time for large
testcases, lack of phi predication, no load/store value numbering
etc...
The current code just implements the "core" GVN algorithm, although
other pieces (load coercion, phi handling, predicate system) are
already implemented in a branch out of tree. Once the core is stable,
we'll start adding pieces on top of the base framework.
The test currently living in test/Transform/NewGVN are a copy
of the ones in GVN, with proper `XFAIL` (missing features in NewGVN).
A flag will be added in a future commit to enable NewGVN, so that
interested parties can exercise this code easily.
Differential Revision: https://reviews.llvm.org/D26224
llvm-svn: 290346
2016-12-23 00:03:48 +08:00
|
|
|
void LLVMAddNewGVNPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createNewGVNPass());
|
|
|
|
}
|
|
|
|
|
2014-07-19 03:13:09 +08:00
|
|
|
void LLVMAddMergedLoadStoreMotionPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createMergedLoadStoreMotionPass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddIndVarSimplifyPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createIndVarSimplifyPass());
|
2008-03-17 00:32:40 +08:00
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddJumpThreadingPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createJumpThreadingPass());
|
2008-03-21 01:16:03 +08:00
|
|
|
}
|
|
|
|
|
Add Loop Sink pass to reverse the LICM based of basic block frequency.
Summary: LICM may hoist instructions to preheader speculatively. Before code generation, we need to sink down the hoisted instructions inside to loop if it's beneficial. This pass is a reverse of LICM: looking at instructions in preheader and sinks the instruction to basic blocks inside the loop body if basic block frequency is smaller than the preheader frequency.
Reviewers: hfinkel, davidxl, chandlerc
Subscribers: anna, modocache, mgorny, beanz, reames, dberlin, chandlerc, mcrosier, junbuml, sanjoy, mzolotukhin, llvm-commits
Differential Revision: https://reviews.llvm.org/D22778
llvm-svn: 285308
2016-10-28 00:30:08 +08:00
|
|
|
void LLVMAddLoopSinkPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLoopSinkPass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddLICMPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLICMPass());
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMAddLoopDeletionPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLoopDeletionPass());
|
|
|
|
}
|
|
|
|
|
2011-04-08 02:20:46 +08:00
|
|
|
void LLVMAddLoopIdiomPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLoopIdiomPass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddLoopRotatePass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLoopRotatePass());
|
|
|
|
}
|
|
|
|
|
2013-11-17 07:59:05 +08:00
|
|
|
void LLVMAddLoopRerollPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLoopRerollPass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddLoopUnrollPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLoopUnrollPass());
|
|
|
|
}
|
|
|
|
|
2018-07-01 20:47:30 +08:00
|
|
|
void LLVMAddLoopUnrollAndJamPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLoopUnrollAndJamPass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddLoopUnswitchPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLoopUnswitchPass());
|
|
|
|
}
|
|
|
|
|
2018-09-18 21:35:50 +08:00
|
|
|
void LLVMAddLowerAtomicPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLowerAtomicPass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddMemCpyOptPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createMemCpyOptPass());
|
|
|
|
}
|
|
|
|
|
2013-08-23 18:27:02 +08:00
|
|
|
void LLVMAddPartiallyInlineLibCallsPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createPartiallyInlineLibCallsPass());
|
|
|
|
}
|
|
|
|
|
2008-03-17 00:32:40 +08:00
|
|
|
void LLVMAddReassociatePass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createReassociatePass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddSCCPPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createSCCPPass());
|
2008-03-17 00:32:40 +08:00
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddScalarReplAggregatesPass(LLVMPassManagerRef PM) {
|
2016-06-15 08:19:09 +08:00
|
|
|
unwrap(PM)->add(createSROAPass());
|
2009-03-07 00:52:18 +08:00
|
|
|
}
|
|
|
|
|
2011-04-08 02:20:46 +08:00
|
|
|
void LLVMAddScalarReplAggregatesPassSSA(LLVMPassManagerRef PM) {
|
2016-06-15 08:19:09 +08:00
|
|
|
unwrap(PM)->add(createSROAPass());
|
2011-04-08 02:20:46 +08:00
|
|
|
}
|
|
|
|
|
2010-03-12 07:06:07 +08:00
|
|
|
void LLVMAddScalarReplAggregatesPassWithThreshold(LLVMPassManagerRef PM,
|
|
|
|
int Threshold) {
|
2016-06-15 08:19:09 +08:00
|
|
|
unwrap(PM)->add(createSROAPass());
|
2010-03-12 07:06:07 +08:00
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddSimplifyLibCallsPass(LLVMPassManagerRef PM) {
|
2013-06-21 03:48:07 +08:00
|
|
|
// NOTE: The simplify-libcalls pass has been removed.
|
2009-03-07 00:52:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMAddTailCallEliminationPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createTailCallEliminationPass());
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMAddConstantPropagationPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createConstantPropagationPass());
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMAddDemoteMemoryToRegisterPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createDemoteRegisterToMemoryPass());
|
2008-03-17 00:32:40 +08:00
|
|
|
}
|
2010-03-12 07:06:07 +08:00
|
|
|
|
|
|
|
void LLVMAddVerifierPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createVerifierPass());
|
|
|
|
}
|
2011-04-08 02:20:46 +08:00
|
|
|
|
|
|
|
void LLVMAddCorrelatedValuePropagationPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createCorrelatedValuePropagationPass());
|
|
|
|
}
|
|
|
|
|
2016-09-01 23:07:46 +08:00
|
|
|
void LLVMAddEarlyCSEPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createEarlyCSEPass(false/*=UseMemorySSA*/));
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMAddEarlyCSEMemSSAPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createEarlyCSEPass(true/*=UseMemorySSA*/));
|
2011-04-08 02:20:46 +08:00
|
|
|
}
|
2011-04-13 23:44:58 +08:00
|
|
|
|
2016-07-15 21:45:20 +08:00
|
|
|
void LLVMAddGVNHoistLegacyPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createGVNHoistPass());
|
|
|
|
}
|
|
|
|
|
2011-04-13 23:44:58 +08:00
|
|
|
void LLVMAddTypeBasedAliasAnalysisPass(LLVMPassManagerRef PM) {
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
unwrap(PM)->add(createTypeBasedAAWrapperPass());
|
2011-04-13 23:44:58 +08:00
|
|
|
}
|
|
|
|
|
Add scoped-noalias metadata
This commit adds scoped noalias metadata. The primary motivations for this
feature are:
1. To preserve noalias function attribute information when inlining
2. To provide the ability to model block-scope C99 restrict pointers
Neither of these two abilities are added here, only the necessary
infrastructure. In fact, there should be no change to existing functionality,
only the addition of new features. The logic that converts noalias function
parameters into this metadata during inlining will come in a follow-up commit.
What is added here is the ability to generally specify noalias memory-access
sets. Regarding the metadata, alias-analysis scopes are defined similar to TBAA
nodes:
!scope0 = metadata !{ metadata !"scope of foo()" }
!scope1 = metadata !{ metadata !"scope 1", metadata !scope0 }
!scope2 = metadata !{ metadata !"scope 2", metadata !scope0 }
!scope3 = metadata !{ metadata !"scope 2.1", metadata !scope2 }
!scope4 = metadata !{ metadata !"scope 2.2", metadata !scope2 }
Loads and stores can be tagged with an alias-analysis scope, and also, with a
noalias tag for a specific scope:
... = load %ptr1, !alias.scope !{ !scope1 }
... = load %ptr2, !alias.scope !{ !scope1, !scope2 }, !noalias !{ !scope1 }
When evaluating an aliasing query, if one of the instructions is associated
with an alias.scope id that is identical to the noalias scope associated with
the other instruction, or is a descendant (in the scope hierarchy) of the
noalias scope associated with the other instruction, then the two memory
accesses are assumed not to alias.
Note that is the first element of the scope metadata is a string, then it can
be combined accross functions and translation units. The string can be replaced
by a self-reference to create globally unqiue scope identifiers.
[Note: This overview is slightly stylized, since the metadata nodes really need
to just be numbers (!0 instead of !scope0), and the scope lists are also global
unnamed metadata.]
Existing noalias metadata in a callee is "cloned" for use by the inlined code.
This is necessary because the aliasing scopes are unique to each call site
(because of possible control dependencies on the aliasing properties). For
example, consider a function: foo(noalias a, noalias b) { *a = *b; } that gets
inlined into bar() { ... if (...) foo(a1, b1); ... if (...) foo(a2, b2); } --
now just because we know that a1 does not alias with b1 at the first call site,
and a2 does not alias with b2 at the second call site, we cannot let inlining
these functons have the metadata imply that a1 does not alias with b2.
llvm-svn: 213864
2014-07-24 22:25:39 +08:00
|
|
|
void LLVMAddScopedNoAliasAAPass(LLVMPassManagerRef PM) {
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
unwrap(PM)->add(createScopedNoAliasAAWrapperPass());
|
Add scoped-noalias metadata
This commit adds scoped noalias metadata. The primary motivations for this
feature are:
1. To preserve noalias function attribute information when inlining
2. To provide the ability to model block-scope C99 restrict pointers
Neither of these two abilities are added here, only the necessary
infrastructure. In fact, there should be no change to existing functionality,
only the addition of new features. The logic that converts noalias function
parameters into this metadata during inlining will come in a follow-up commit.
What is added here is the ability to generally specify noalias memory-access
sets. Regarding the metadata, alias-analysis scopes are defined similar to TBAA
nodes:
!scope0 = metadata !{ metadata !"scope of foo()" }
!scope1 = metadata !{ metadata !"scope 1", metadata !scope0 }
!scope2 = metadata !{ metadata !"scope 2", metadata !scope0 }
!scope3 = metadata !{ metadata !"scope 2.1", metadata !scope2 }
!scope4 = metadata !{ metadata !"scope 2.2", metadata !scope2 }
Loads and stores can be tagged with an alias-analysis scope, and also, with a
noalias tag for a specific scope:
... = load %ptr1, !alias.scope !{ !scope1 }
... = load %ptr2, !alias.scope !{ !scope1, !scope2 }, !noalias !{ !scope1 }
When evaluating an aliasing query, if one of the instructions is associated
with an alias.scope id that is identical to the noalias scope associated with
the other instruction, or is a descendant (in the scope hierarchy) of the
noalias scope associated with the other instruction, then the two memory
accesses are assumed not to alias.
Note that is the first element of the scope metadata is a string, then it can
be combined accross functions and translation units. The string can be replaced
by a self-reference to create globally unqiue scope identifiers.
[Note: This overview is slightly stylized, since the metadata nodes really need
to just be numbers (!0 instead of !scope0), and the scope lists are also global
unnamed metadata.]
Existing noalias metadata in a callee is "cloned" for use by the inlined code.
This is necessary because the aliasing scopes are unique to each call site
(because of possible control dependencies on the aliasing properties). For
example, consider a function: foo(noalias a, noalias b) { *a = *b; } that gets
inlined into bar() { ... if (...) foo(a1, b1); ... if (...) foo(a2, b2); } --
now just because we know that a1 does not alias with b1 at the first call site,
and a2 does not alias with b2 at the second call site, we cannot let inlining
these functons have the metadata imply that a1 does not alias with b2.
llvm-svn: 213864
2014-07-24 22:25:39 +08:00
|
|
|
}
|
|
|
|
|
2011-04-13 23:44:58 +08:00
|
|
|
void LLVMAddBasicAliasAnalysisPass(LLVMPassManagerRef PM) {
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
unwrap(PM)->add(createBasicAAWrapperPass());
|
2011-04-13 23:44:58 +08:00
|
|
|
}
|
2011-07-26 04:57:59 +08:00
|
|
|
|
2019-10-15 00:15:14 +08:00
|
|
|
void LLVMAddLowerConstantIntrinsicsPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLowerConstantIntrinsicsPass());
|
|
|
|
}
|
|
|
|
|
2011-07-26 04:57:59 +08:00
|
|
|
void LLVMAddLowerExpectIntrinsicPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLowerExpectIntrinsicPass());
|
|
|
|
}
|
2018-09-18 21:36:03 +08:00
|
|
|
|
|
|
|
void LLVMAddUnifyFunctionExitNodesPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createUnifyFunctionExitNodesPass());
|
|
|
|
}
|