2008-03-17 00:32:40 +08:00
|
|
|
//===-- Scalar.cpp --------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2012-07-24 18:51:42 +08:00
|
|
|
// This file implements common infrastructure for libLLVMScalarOpts.a, which
|
2010-10-08 01:55:47 +08:00
|
|
|
// implements several scalar transformations over the LLVM intermediate
|
|
|
|
// representation, including the C bindings for that library.
|
2008-03-17 00:32:40 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2010-10-08 01:55:47 +08:00
|
|
|
#include "llvm-c/Initialization.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm-c/Transforms/Scalar.h"
|
2015-08-06 15:33:15 +08:00
|
|
|
#include "llvm/Analysis/BasicAliasAnalysis.h"
|
2011-04-13 23:44:58 +08:00
|
|
|
#include "llvm/Analysis/Passes.h"
|
2015-08-14 10:55:50 +08:00
|
|
|
#include "llvm/Analysis/ScopedNoAliasAA.h"
|
2015-08-14 11:33:48 +08:00
|
|
|
#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2014-01-13 17:26:24 +08:00
|
|
|
#include "llvm/IR/Verifier.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/InitializePasses.h"
|
2015-02-13 18:01:29 +08:00
|
|
|
#include "llvm/IR/LegacyPassManager.h"
|
2008-03-17 00:32:40 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2012-07-24 18:51:42 +08:00
|
|
|
/// initializeScalarOptsPasses - Initialize all passes linked into the
|
2010-10-08 01:55:47 +08:00
|
|
|
/// ScalarOpts library.
|
|
|
|
void llvm::initializeScalarOpts(PassRegistry &Registry) {
|
|
|
|
initializeADCEPass(Registry);
|
[BDCE] Add a bit-tracking DCE pass
BDCE is a bit-tracking dead code elimination pass. It is based on ADCE (the
"aggressive DCE" pass), with the added capability to track dead bits of integer
valued instructions and remove those instructions when all of the bits are
dead.
Currently, it does not actually do this all-bits-dead removal, but rather
replaces the instruction's uses with a constant zero, and lets instcombine (and
the later run of ADCE) do the rest. Because we essentially get a run of ADCE
"for free" while tracking the dead bits, we also do what ADCE does and removes
actually-dead instructions as well (this includes instructions newly trivially
dead because all bits were dead, but not all such instructions can be removed).
The motivation for this is a case like:
int __attribute__((const)) foo(int i);
int bar(int x) {
x |= (4 & foo(5));
x |= (8 & foo(3));
x |= (16 & foo(2));
x |= (32 & foo(1));
x |= (64 & foo(0));
x |= (128& foo(4));
return x >> 4;
}
As it turns out, if you order the bit-field insertions so that all of the dead
ones come last, then instcombine will remove them. However, if you pick some
other order (such as the one above), the fact that some of the calls to foo()
are useless is not locally obvious, and we don't remove them (without this
pass).
I did a quick compile-time overhead check using sqlite from the test suite
(Release+Asserts). BDCE took ~0.4% of the compilation time (making it about
twice as expensive as ADCE).
I've not looked at why yet, but we eliminate instructions due to having
all-dead bits in:
External/SPEC/CFP2006/447.dealII/447.dealII
External/SPEC/CINT2006/400.perlbench/400.perlbench
External/SPEC/CINT2006/403.gcc/403.gcc
MultiSource/Applications/ClamAV/clamscan
MultiSource/Benchmarks/7zip/7zip-benchmark
llvm-svn: 229462
2015-02-17 09:36:59 +08:00
|
|
|
initializeBDCEPass(Registry);
|
2014-09-08 04:05:11 +08:00
|
|
|
initializeAlignmentFromAssumptionsPass(Registry);
|
2014-01-25 10:02:55 +08:00
|
|
|
initializeConstantHoistingPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeConstantPropagationPass(Registry);
|
|
|
|
initializeCorrelatedValuePropagationPass(Registry);
|
|
|
|
initializeDCEPass(Registry);
|
|
|
|
initializeDeadInstEliminationPass(Registry);
|
2013-11-23 00:58:05 +08:00
|
|
|
initializeScalarizerPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeDSEPass(Registry);
|
|
|
|
initializeGVNPass(Registry);
|
2015-01-27 09:34:14 +08:00
|
|
|
initializeEarlyCSELegacyPassPass(Registry);
|
2014-08-14 04:31:52 +08:00
|
|
|
initializeFlattenCFGPassPass(Registry);
|
2015-01-16 09:03:22 +08:00
|
|
|
initializeInductiveRangeCheckEliminationPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeIndVarSimplifyPass(Registry);
|
|
|
|
initializeJumpThreadingPass(Registry);
|
|
|
|
initializeLICMPass(Registry);
|
|
|
|
initializeLoopDeletionPass(Registry);
|
2015-02-20 03:15:04 +08:00
|
|
|
initializeLoopAccessAnalysisPass(Registry);
|
2011-01-03 08:25:16 +08:00
|
|
|
initializeLoopInstSimplifyPass(Registry);
|
2015-03-06 18:11:25 +08:00
|
|
|
initializeLoopInterchangePass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeLoopRotatePass(Registry);
|
|
|
|
initializeLoopStrengthReducePass(Registry);
|
2013-11-17 07:59:05 +08:00
|
|
|
initializeLoopRerollPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeLoopUnrollPass(Registry);
|
|
|
|
initializeLoopUnswitchPass(Registry);
|
2010-12-27 03:32:44 +08:00
|
|
|
initializeLoopIdiomRecognizePass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeLowerAtomicPass(Registry);
|
2011-07-07 02:22:43 +08:00
|
|
|
initializeLowerExpectIntrinsicPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeMemCpyOptPass(Registry);
|
2014-07-19 03:13:09 +08:00
|
|
|
initializeMergedLoadStoreMotionPass(Registry);
|
2015-04-14 12:59:22 +08:00
|
|
|
initializeNaryReassociatePass(Registry);
|
2013-08-23 18:27:02 +08:00
|
|
|
initializePartiallyInlineLibCallsPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeReassociatePass(Registry);
|
|
|
|
initializeRegToMemPass(Registry);
|
Add a pass for constructing gc.statepoint sequences w/explicit relocations
This patch consists of a single pass whose only purpose is to visit previous inserted gc.statepoints which do not have gc.relocates inserted yet, and insert them. This can be used either immediately after IR generation to perform 'early safepoint insertion' or late in the pass order to perform 'late insertion'.
This patch is setting the stage for work to continue in tree. In particular, there are known naming and style violations in the current patch. I'll try to get those resolved over the next week or so. As I touch each area to make style changes, I need to make sure we have adequate testing in place. As part of the cleanup, I will be cleaning up a collection of test cases we have out of tree and submitting them upstream. The tests included in this change are very basic and mostly to provide examples of usage.
The pass has several main subproblems it needs to address:
- First, it has identify any live pointers. In the current code, the use of address spaces to distinguish pointers to GC managed objects is hard coded, but this will become parametrizable in the near future. Note that the current change doesn't actually contain a useful liveness analysis. It was seperated into a followup change as the code wasn't ready to be shared. Instead, the current implementation just considers any dominating def of appropriate pointer type to be live.
- Second, it has to identify base pointers for each live pointer. This is a fairly straight forward data flow algorithm.
- Third, the information in the previous steps is used to actually introduce rewrites. Rather than trying to do this by hand, we simply re-purpose the code behind Mem2Reg to do this for us.
llvm-svn: 229945
2015-02-20 09:06:44 +08:00
|
|
|
initializeRewriteStatepointsForGCPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeSCCPPass(Registry);
|
|
|
|
initializeIPSCCPPass(Registry);
|
Introduce a new SROA implementation.
This is essentially a ground up re-think of the SROA pass in LLVM. It
was initially inspired by a few problems with the existing pass:
- It is subject to the bane of my existence in optimizations: arbitrary
thresholds.
- It is overly conservative about which constructs can be split and
promoted.
- The vector value replacement aspect is separated from the splitting
logic, missing many opportunities where splitting and vector value
formation can work together.
- The splitting is entirely based around the underlying type of the
alloca, despite this type often having little to do with the reality
of how that memory is used. This is especially prevelant with unions
and base classes where we tail-pack derived members.
- When splitting fails (often due to the thresholds), the vector value
replacement (again because it is separate) can kick in for
preposterous cases where we simply should have split the value. This
results in forming i1024 and i2048 integer "bit vectors" that
tremendously slow down subsequnet IR optimizations (due to large
APInts) and impede the backend's lowering.
The new design takes an approach that fundamentally is not susceptible
to many of these problems. It is the result of a discusison between
myself and Duncan Sands over IRC about how to premptively avoid these
types of problems and how to do SROA in a more principled way. Since
then, it has evolved and grown, but this remains an important aspect: it
fixes real world problems with the SROA process today.
First, the transform of SROA actually has little to do with replacement.
It has more to do with splitting. The goal is to take an aggregate
alloca and form a composition of scalar allocas which can replace it and
will be most suitable to the eventual replacement by scalar SSA values.
The actual replacement is performed by mem2reg (and in the future
SSAUpdater).
The splitting is divided into four phases. The first phase is an
analysis of the uses of the alloca. This phase recursively walks uses,
building up a dense datastructure representing the ranges of the
alloca's memory actually used and checking for uses which inhibit any
aspects of the transform such as the escape of a pointer.
Once we have a mapping of the ranges of the alloca used by individual
operations, we compute a partitioning of the used ranges. Some uses are
inherently splittable (such as memcpy and memset), while scalar uses are
not splittable. The goal is to build a partitioning that has the minimum
number of splits while placing each unsplittable use in its own
partition. Overlapping unsplittable uses belong to the same partition.
This is the target split of the aggregate alloca, and it maximizes the
number of scalar accesses which become accesses to their own alloca and
candidates for promotion.
Third, we re-walk the uses of the alloca and assign each specific memory
access to all the partitions touched so that we have dense use-lists for
each partition.
Finally, we build a new, smaller alloca for each partition and rewrite
each use of that partition to use the new alloca. During this phase the
pass will also work very hard to transform uses of an alloca into a form
suitable for promotion, including forming vector operations, speculating
loads throguh PHI nodes and selects, etc.
After splitting is complete, each newly refined alloca that is
a candidate for promotion to a scalar SSA value is run through mem2reg.
There are lots of reasonably detailed comments in the source code about
the design and algorithms, and I'm going to be trying to improve them in
subsequent commits to ensure this is well documented, as the new pass is
in many ways more complex than the old one.
Some of this is still a WIP, but the current state is reasonbly stable.
It has passed bootstrap, the nightly test suite, and Duncan has run it
successfully through the ACATS and DragonEgg test suites. That said, it
remains behind a default-off flag until the last few pieces are in
place, and full testing can be done.
Specific areas I'm looking at next:
- Improved comments and some code cleanup from reviews.
- SSAUpdater and enabling this pass inside the CGSCC pass manager.
- Some datastructure tuning and compile-time measurements.
- More aggressive FCA splitting and vector formation.
Many thanks to Duncan Sands for the thorough final review, as well as
Benjamin Kramer for lots of review during the process of writing this
pass, and Daniel Berlin for reviewing the data structures and algorithms
and general theory of the pass. Also, several other people on IRC, over
lunch tables, etc for lots of feedback and advice.
llvm-svn: 163883
2012-09-14 17:22:59 +08:00
|
|
|
initializeSROAPass(Registry);
|
2011-01-18 11:53:26 +08:00
|
|
|
initializeSROA_DTPass(Registry);
|
2011-01-14 16:13:00 +08:00
|
|
|
initializeSROA_SSAUpPass(Registry);
|
2013-08-06 10:43:45 +08:00
|
|
|
initializeCFGSimplifyPassPass(Registry);
|
2013-06-20 04:18:24 +08:00
|
|
|
initializeStructurizeCFGPass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
initializeSinkingPass(Registry);
|
|
|
|
initializeTailCallElimPass(Registry);
|
2014-05-02 02:38:36 +08:00
|
|
|
initializeSeparateConstOffsetFromGEPPass(Registry);
|
2015-05-16 01:54:48 +08:00
|
|
|
initializeSpeculativeExecutionPass(Registry);
|
Add straight-line strength reduction to LLVM
Summary:
Straight-line strength reduction (SLSR) is implemented in GCC but not yet in
LLVM. It has proven to effectively simplify statements derived from an unrolled
loop, and can potentially benefit many other cases too. For example,
LLVM unrolls
#pragma unroll
foo (int i = 0; i < 3; ++i) {
sum += foo((b + i) * s);
}
into
sum += foo(b * s);
sum += foo((b + 1) * s);
sum += foo((b + 2) * s);
However, no optimizations yet reduce the internal redundancy of the three
expressions:
b * s
(b + 1) * s
(b + 2) * s
With SLSR, LLVM can optimize these three expressions into:
t1 = b * s
t2 = t1 + s
t3 = t2 + s
This commit is only an initial step towards implementing a series of such
optimizations. I will implement more (see TODO in the file commentary) in the
near future. This optimization is enabled for the NVPTX backend for now.
However, I am more than happy to push it to the standard optimization pipeline
after more thorough performance tests.
Test Plan: test/StraightLineStrengthReduce/slsr.ll
Reviewers: eliben, HaoLiu, meheff, hfinkel, jholewinski, atrick
Reviewed By: jholewinski, atrick
Subscribers: karthikthecool, jholewinski, llvm-commits
Differential Revision: http://reviews.llvm.org/D7310
llvm-svn: 228016
2015-02-04 03:37:06 +08:00
|
|
|
initializeStraightLineStrengthReducePass(Registry);
|
2014-05-29 09:55:07 +08:00
|
|
|
initializeLoadCombinePass(Registry);
|
Add a pass for inserting safepoints into (nearly) arbitrary IR
This pass is responsible for figuring out where to place call safepoints and safepoint polls. It doesn't actually make the relocations explicit; that's the job of the RewriteStatepointsForGC pass (http://reviews.llvm.org/D6975).
Note that this code is not yet finalized. Its moving in tree for incremental development, but further cleanup is needed and will happen over the next few days. It is not yet part of the standard pass order.
Planned changes in the near future:
- I plan on restructuring the statepoint rewrite to use the functions add to the IRBuilder a while back.
- In the current pass, the function "gc.safepoint_poll" is treated specially but is not an intrinsic. I plan to make identifying the poll function a property of the GCStrategy at some point in the near future.
- As follow on patches, I will be separating a collection of test cases we have out of tree and submitting them upstream.
- It's not explicit in the code, but these two patches are introducing a new state for a statepoint which looks a lot like a patchpoint. There's no a transient form which doesn't yet have the relocations explicitly represented, but does prevent reordering of memory operations. Once this is in, I need to update actually make this explicit by reserving the 'unused' argument of the statepoint as a flag, updating the docs, and making the code explicitly check for such a thing. This wasn't really planned, but once I split the two passes - which was done for other reasons - the intermediate state fell out. Just reminds us once again that we need to merge statepoints and patchpoints at some point in the not that distant future.
Future directions planned:
- Identifying more cases where a backedge safepoint isn't required to ensure timely execution of a safepoint poll.
- Tweaking the insertion process to generate easier to optimize IR. (For example, investigating making SplitBackedge) the default.
- Adding opt-in flags for a GCStrategy to use this pass. Once done, add this pass to the actual pass ordering.
Differential Revision: http://reviews.llvm.org/D6981
llvm-svn: 228090
2015-02-04 08:37:33 +08:00
|
|
|
initializePlaceBackedgeSafepointsImplPass(Registry);
|
|
|
|
initializePlaceSafepointsPass(Registry);
|
2015-03-27 18:36:57 +08:00
|
|
|
initializeFloat2IntPass(Registry);
|
2015-05-14 20:05:18 +08:00
|
|
|
initializeLoopDistributePass(Registry);
|
2010-10-08 01:55:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMInitializeScalarOpts(LLVMPassRegistryRef R) {
|
|
|
|
initializeScalarOpts(*unwrap(R));
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddAggressiveDCEPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createAggressiveDCEPass());
|
|
|
|
}
|
|
|
|
|
[BDCE] Add a bit-tracking DCE pass
BDCE is a bit-tracking dead code elimination pass. It is based on ADCE (the
"aggressive DCE" pass), with the added capability to track dead bits of integer
valued instructions and remove those instructions when all of the bits are
dead.
Currently, it does not actually do this all-bits-dead removal, but rather
replaces the instruction's uses with a constant zero, and lets instcombine (and
the later run of ADCE) do the rest. Because we essentially get a run of ADCE
"for free" while tracking the dead bits, we also do what ADCE does and removes
actually-dead instructions as well (this includes instructions newly trivially
dead because all bits were dead, but not all such instructions can be removed).
The motivation for this is a case like:
int __attribute__((const)) foo(int i);
int bar(int x) {
x |= (4 & foo(5));
x |= (8 & foo(3));
x |= (16 & foo(2));
x |= (32 & foo(1));
x |= (64 & foo(0));
x |= (128& foo(4));
return x >> 4;
}
As it turns out, if you order the bit-field insertions so that all of the dead
ones come last, then instcombine will remove them. However, if you pick some
other order (such as the one above), the fact that some of the calls to foo()
are useless is not locally obvious, and we don't remove them (without this
pass).
I did a quick compile-time overhead check using sqlite from the test suite
(Release+Asserts). BDCE took ~0.4% of the compilation time (making it about
twice as expensive as ADCE).
I've not looked at why yet, but we eliminate instructions due to having
all-dead bits in:
External/SPEC/CFP2006/447.dealII/447.dealII
External/SPEC/CINT2006/400.perlbench/400.perlbench
External/SPEC/CINT2006/403.gcc/403.gcc
MultiSource/Applications/ClamAV/clamscan
MultiSource/Benchmarks/7zip/7zip-benchmark
llvm-svn: 229462
2015-02-17 09:36:59 +08:00
|
|
|
void LLVMAddBitTrackingDCEPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createBitTrackingDCEPass());
|
|
|
|
}
|
|
|
|
|
2014-09-08 04:05:11 +08:00
|
|
|
void LLVMAddAlignmentFromAssumptionsPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createAlignmentFromAssumptionsPass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddCFGSimplificationPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createCFGSimplificationPass());
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMAddDeadStoreEliminationPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createDeadStoreEliminationPass());
|
|
|
|
}
|
|
|
|
|
2013-11-23 00:58:05 +08:00
|
|
|
void LLVMAddScalarizerPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createScalarizerPass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddGVNPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createGVNPass());
|
|
|
|
}
|
|
|
|
|
2014-07-19 03:13:09 +08:00
|
|
|
void LLVMAddMergedLoadStoreMotionPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createMergedLoadStoreMotionPass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddIndVarSimplifyPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createIndVarSimplifyPass());
|
2008-03-17 00:32:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMAddInstructionCombiningPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createInstructionCombiningPass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddJumpThreadingPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createJumpThreadingPass());
|
2008-03-21 01:16:03 +08:00
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddLICMPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLICMPass());
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMAddLoopDeletionPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLoopDeletionPass());
|
|
|
|
}
|
|
|
|
|
2011-04-08 02:20:46 +08:00
|
|
|
void LLVMAddLoopIdiomPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLoopIdiomPass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddLoopRotatePass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLoopRotatePass());
|
|
|
|
}
|
|
|
|
|
2013-11-17 07:59:05 +08:00
|
|
|
void LLVMAddLoopRerollPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLoopRerollPass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddLoopUnrollPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLoopUnrollPass());
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMAddLoopUnswitchPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLoopUnswitchPass());
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMAddMemCpyOptPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createMemCpyOptPass());
|
|
|
|
}
|
|
|
|
|
2013-08-23 18:27:02 +08:00
|
|
|
void LLVMAddPartiallyInlineLibCallsPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createPartiallyInlineLibCallsPass());
|
|
|
|
}
|
|
|
|
|
2014-09-12 05:32:32 +08:00
|
|
|
void LLVMAddLowerSwitchPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLowerSwitchPass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddPromoteMemoryToRegisterPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createPromoteMemoryToRegisterPass());
|
2008-03-21 01:16:03 +08:00
|
|
|
}
|
|
|
|
|
2008-03-17 00:32:40 +08:00
|
|
|
void LLVMAddReassociatePass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createReassociatePass());
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddSCCPPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createSCCPPass());
|
2008-03-17 00:32:40 +08:00
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddScalarReplAggregatesPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createScalarReplAggregatesPass());
|
|
|
|
}
|
|
|
|
|
2011-04-08 02:20:46 +08:00
|
|
|
void LLVMAddScalarReplAggregatesPassSSA(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createScalarReplAggregatesPass(-1, false));
|
|
|
|
}
|
|
|
|
|
2010-03-12 07:06:07 +08:00
|
|
|
void LLVMAddScalarReplAggregatesPassWithThreshold(LLVMPassManagerRef PM,
|
|
|
|
int Threshold) {
|
|
|
|
unwrap(PM)->add(createScalarReplAggregatesPass(Threshold));
|
|
|
|
}
|
|
|
|
|
2009-03-07 00:52:18 +08:00
|
|
|
void LLVMAddSimplifyLibCallsPass(LLVMPassManagerRef PM) {
|
2013-06-21 03:48:07 +08:00
|
|
|
// NOTE: The simplify-libcalls pass has been removed.
|
2009-03-07 00:52:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMAddTailCallEliminationPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createTailCallEliminationPass());
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMAddConstantPropagationPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createConstantPropagationPass());
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMAddDemoteMemoryToRegisterPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createDemoteRegisterToMemoryPass());
|
2008-03-17 00:32:40 +08:00
|
|
|
}
|
2010-03-12 07:06:07 +08:00
|
|
|
|
|
|
|
void LLVMAddVerifierPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createVerifierPass());
|
|
|
|
}
|
2011-04-08 02:20:46 +08:00
|
|
|
|
|
|
|
void LLVMAddCorrelatedValuePropagationPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createCorrelatedValuePropagationPass());
|
|
|
|
}
|
|
|
|
|
|
|
|
void LLVMAddEarlyCSEPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createEarlyCSEPass());
|
|
|
|
}
|
2011-04-13 23:44:58 +08:00
|
|
|
|
|
|
|
void LLVMAddTypeBasedAliasAnalysisPass(LLVMPassManagerRef PM) {
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
unwrap(PM)->add(createTypeBasedAAWrapperPass());
|
2011-04-13 23:44:58 +08:00
|
|
|
}
|
|
|
|
|
Add scoped-noalias metadata
This commit adds scoped noalias metadata. The primary motivations for this
feature are:
1. To preserve noalias function attribute information when inlining
2. To provide the ability to model block-scope C99 restrict pointers
Neither of these two abilities are added here, only the necessary
infrastructure. In fact, there should be no change to existing functionality,
only the addition of new features. The logic that converts noalias function
parameters into this metadata during inlining will come in a follow-up commit.
What is added here is the ability to generally specify noalias memory-access
sets. Regarding the metadata, alias-analysis scopes are defined similar to TBAA
nodes:
!scope0 = metadata !{ metadata !"scope of foo()" }
!scope1 = metadata !{ metadata !"scope 1", metadata !scope0 }
!scope2 = metadata !{ metadata !"scope 2", metadata !scope0 }
!scope3 = metadata !{ metadata !"scope 2.1", metadata !scope2 }
!scope4 = metadata !{ metadata !"scope 2.2", metadata !scope2 }
Loads and stores can be tagged with an alias-analysis scope, and also, with a
noalias tag for a specific scope:
... = load %ptr1, !alias.scope !{ !scope1 }
... = load %ptr2, !alias.scope !{ !scope1, !scope2 }, !noalias !{ !scope1 }
When evaluating an aliasing query, if one of the instructions is associated
with an alias.scope id that is identical to the noalias scope associated with
the other instruction, or is a descendant (in the scope hierarchy) of the
noalias scope associated with the other instruction, then the two memory
accesses are assumed not to alias.
Note that is the first element of the scope metadata is a string, then it can
be combined accross functions and translation units. The string can be replaced
by a self-reference to create globally unqiue scope identifiers.
[Note: This overview is slightly stylized, since the metadata nodes really need
to just be numbers (!0 instead of !scope0), and the scope lists are also global
unnamed metadata.]
Existing noalias metadata in a callee is "cloned" for use by the inlined code.
This is necessary because the aliasing scopes are unique to each call site
(because of possible control dependencies on the aliasing properties). For
example, consider a function: foo(noalias a, noalias b) { *a = *b; } that gets
inlined into bar() { ... if (...) foo(a1, b1); ... if (...) foo(a2, b2); } --
now just because we know that a1 does not alias with b1 at the first call site,
and a2 does not alias with b2 at the second call site, we cannot let inlining
these functons have the metadata imply that a1 does not alias with b2.
llvm-svn: 213864
2014-07-24 22:25:39 +08:00
|
|
|
void LLVMAddScopedNoAliasAAPass(LLVMPassManagerRef PM) {
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
unwrap(PM)->add(createScopedNoAliasAAWrapperPass());
|
Add scoped-noalias metadata
This commit adds scoped noalias metadata. The primary motivations for this
feature are:
1. To preserve noalias function attribute information when inlining
2. To provide the ability to model block-scope C99 restrict pointers
Neither of these two abilities are added here, only the necessary
infrastructure. In fact, there should be no change to existing functionality,
only the addition of new features. The logic that converts noalias function
parameters into this metadata during inlining will come in a follow-up commit.
What is added here is the ability to generally specify noalias memory-access
sets. Regarding the metadata, alias-analysis scopes are defined similar to TBAA
nodes:
!scope0 = metadata !{ metadata !"scope of foo()" }
!scope1 = metadata !{ metadata !"scope 1", metadata !scope0 }
!scope2 = metadata !{ metadata !"scope 2", metadata !scope0 }
!scope3 = metadata !{ metadata !"scope 2.1", metadata !scope2 }
!scope4 = metadata !{ metadata !"scope 2.2", metadata !scope2 }
Loads and stores can be tagged with an alias-analysis scope, and also, with a
noalias tag for a specific scope:
... = load %ptr1, !alias.scope !{ !scope1 }
... = load %ptr2, !alias.scope !{ !scope1, !scope2 }, !noalias !{ !scope1 }
When evaluating an aliasing query, if one of the instructions is associated
with an alias.scope id that is identical to the noalias scope associated with
the other instruction, or is a descendant (in the scope hierarchy) of the
noalias scope associated with the other instruction, then the two memory
accesses are assumed not to alias.
Note that is the first element of the scope metadata is a string, then it can
be combined accross functions and translation units. The string can be replaced
by a self-reference to create globally unqiue scope identifiers.
[Note: This overview is slightly stylized, since the metadata nodes really need
to just be numbers (!0 instead of !scope0), and the scope lists are also global
unnamed metadata.]
Existing noalias metadata in a callee is "cloned" for use by the inlined code.
This is necessary because the aliasing scopes are unique to each call site
(because of possible control dependencies on the aliasing properties). For
example, consider a function: foo(noalias a, noalias b) { *a = *b; } that gets
inlined into bar() { ... if (...) foo(a1, b1); ... if (...) foo(a2, b2); } --
now just because we know that a1 does not alias with b1 at the first call site,
and a2 does not alias with b2 at the second call site, we cannot let inlining
these functons have the metadata imply that a1 does not alias with b2.
llvm-svn: 213864
2014-07-24 22:25:39 +08:00
|
|
|
}
|
|
|
|
|
2011-04-13 23:44:58 +08:00
|
|
|
void LLVMAddBasicAliasAnalysisPass(LLVMPassManagerRef PM) {
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
unwrap(PM)->add(createBasicAAWrapperPass());
|
2011-04-13 23:44:58 +08:00
|
|
|
}
|
2011-07-26 04:57:59 +08:00
|
|
|
|
|
|
|
void LLVMAddLowerExpectIntrinsicPass(LLVMPassManagerRef PM) {
|
|
|
|
unwrap(PM)->add(createLowerExpectIntrinsicPass());
|
|
|
|
}
|