2014-09-08 04:05:11 +08:00
|
|
|
//===----------------------- AlignmentFromAssumptions.cpp -----------------===//
|
|
|
|
// Set Load/Store Alignments From Assumptions
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2014-09-08 04:05:11 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements a ScalarEvolution-based transformation to set
|
|
|
|
// the alignments of load, stores and memory intrinsics based on the truth
|
|
|
|
// expressions of assume intrinsics. The primary motivation is to handle
|
|
|
|
// complex alignment assumptions that apply to vector loads and stores that
|
|
|
|
// appear after vectorization and unrolling.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-09-12 19:36:45 +08:00
|
|
|
#include "llvm/IR/Instructions.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "llvm/InitializePasses.h"
|
2014-09-08 04:05:11 +08:00
|
|
|
#define AA_NAME "alignment-from-assumptions"
|
|
|
|
#define DEBUG_TYPE AA_NAME
|
2016-06-15 14:18:01 +08:00
|
|
|
#include "llvm/Transforms/Scalar/AlignmentFromAssumptions.h"
|
2014-09-08 04:05:11 +08:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/ADT/Statistic.h"
|
2015-12-12 01:46:01 +08:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2016-12-19 16:22:17 +08:00
|
|
|
#include "llvm/Analysis/AssumptionCache.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/Analysis/GlobalsModRef.h"
|
2014-09-08 04:05:11 +08:00
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
|
|
|
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
2015-03-24 03:32:43 +08:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2014-09-08 04:05:11 +08:00
|
|
|
#include "llvm/IR/Constant.h"
|
|
|
|
#include "llvm/IR/Dominators.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
2020-06-07 20:51:48 +08:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2014-09-08 04:05:11 +08:00
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2015-03-05 02:43:29 +08:00
|
|
|
#include "llvm/IR/Module.h"
|
2014-09-08 04:05:11 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2014-09-08 04:05:11 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
STATISTIC(NumLoadAlignChanged,
|
|
|
|
"Number of loads changed by alignment assumptions");
|
|
|
|
STATISTIC(NumStoreAlignChanged,
|
|
|
|
"Number of stores changed by alignment assumptions");
|
|
|
|
STATISTIC(NumMemIntAlignChanged,
|
|
|
|
"Number of memory intrinsics changed by alignment assumptions");
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
struct AlignmentFromAssumptions : public FunctionPass {
|
|
|
|
static char ID; // Pass identification, replacement for typeid
|
|
|
|
AlignmentFromAssumptions() : FunctionPass(ID) {
|
|
|
|
initializeAlignmentFromAssumptionsPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
2015-04-11 10:11:45 +08:00
|
|
|
bool runOnFunction(Function &F) override;
|
2014-09-08 04:05:11 +08:00
|
|
|
|
2015-04-11 10:11:45 +08:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
2016-12-19 16:22:17 +08:00
|
|
|
AU.addRequired<AssumptionCacheTracker>();
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 10:08:17 +08:00
|
|
|
AU.addRequired<ScalarEvolutionWrapperPass>();
|
2014-09-08 04:05:11 +08:00
|
|
|
AU.addRequired<DominatorTreeWrapperPass>();
|
|
|
|
|
|
|
|
AU.setPreservesCFG();
|
2015-12-12 01:46:01 +08:00
|
|
|
AU.addPreserved<AAResultsWrapperPass>();
|
|
|
|
AU.addPreserved<GlobalsAAWrapperPass>();
|
2015-01-17 22:16:18 +08:00
|
|
|
AU.addPreserved<LoopInfoWrapperPass>();
|
2014-09-08 04:05:11 +08:00
|
|
|
AU.addPreserved<DominatorTreeWrapperPass>();
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 10:08:17 +08:00
|
|
|
AU.addPreserved<ScalarEvolutionWrapperPass>();
|
2014-09-08 04:05:11 +08:00
|
|
|
}
|
|
|
|
|
2016-06-15 14:18:01 +08:00
|
|
|
AlignmentFromAssumptionsPass Impl;
|
2014-09-08 04:05:11 +08:00
|
|
|
};
|
2015-06-23 17:49:53 +08:00
|
|
|
}
|
2014-09-08 04:05:11 +08:00
|
|
|
|
|
|
|
char AlignmentFromAssumptions::ID = 0;
|
|
|
|
static const char aip_name[] = "Alignment from assumptions";
|
|
|
|
INITIALIZE_PASS_BEGIN(AlignmentFromAssumptions, AA_NAME,
|
|
|
|
aip_name, false, false)
|
2016-12-19 16:22:17 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
|
2014-09-08 04:05:11 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 10:08:17 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
|
2014-09-08 04:05:11 +08:00
|
|
|
INITIALIZE_PASS_END(AlignmentFromAssumptions, AA_NAME,
|
|
|
|
aip_name, false, false)
|
|
|
|
|
|
|
|
FunctionPass *llvm::createAlignmentFromAssumptionsPass() {
|
|
|
|
return new AlignmentFromAssumptions();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Given an expression for the (constant) alignment, AlignSCEV, and an
|
|
|
|
// expression for the displacement between a pointer and the aligned address,
|
2014-09-08 07:16:24 +08:00
|
|
|
// DiffSCEV, compute the alignment of the displaced pointer if it can be reduced
|
|
|
|
// to a constant. Using SCEV to compute alignment handles the case where
|
|
|
|
// DiffSCEV is a recurrence with constant start such that the aligned offset
|
|
|
|
// is constant. e.g. {16,+,32} % 32 -> 16.
|
2020-04-06 18:53:56 +08:00
|
|
|
static MaybeAlign getNewAlignmentDiff(const SCEV *DiffSCEV,
|
|
|
|
const SCEV *AlignSCEV,
|
|
|
|
ScalarEvolution *SE) {
|
2014-09-08 04:05:11 +08:00
|
|
|
// DiffUnits = Diff % int64_t(Alignment)
|
[AlignmentFromAssumptions] getNewAlignmentDiff(): use getURemExpr()
The alignment is calculated incorrectly, thus sometimes it doesn't generate aligned mov instructions, as shown by the example below:
```
// b.cc
typedef long long index;
extern "C" index g_tid;
extern "C" index g_num;
void add3(float* __restrict__ a, float* __restrict__ b, float* __restrict__ c) {
index n = 64*1024;
index m = 16*1024;
index k = 4*1024;
index tid = g_tid;
index num = g_num;
__builtin_assume_aligned(a, 32);
__builtin_assume_aligned(b, 32);
__builtin_assume_aligned(c, 32);
for (index i0=tid*k; i0<m; i0+=num*k)
for (index i1=0; i1<n*m; i1+=m)
for (index i2=0; i2<k; i2++)
c[i1+i0+i2] = b[i0+i2] + a[i1+i0+i2];
}
```
Compile with `clang b.cc -Ofast -march=skylake -mavx2 -S`
```
vmovaps -224(%rdi,%rbx,4), %ymm0
vmovups -192(%rdi,%rbx,4), %ymm1 # should be movaps
vmovups -160(%rdi,%rbx,4), %ymm2 # should be movaps
vmovups -128(%rdi,%rbx,4), %ymm3 # should be movaps
vaddps -224(%rsi,%rbx,4), %ymm0, %ymm0
vaddps -192(%rsi,%rbx,4), %ymm1, %ymm1
vaddps -160(%rsi,%rbx,4), %ymm2, %ymm2
vaddps -128(%rsi,%rbx,4), %ymm3, %ymm3
vmovaps %ymm0, -224(%rdx,%rbx,4)
vmovups %ymm1, -192(%rdx,%rbx,4) # should be movaps
vmovups %ymm2, -160(%rdx,%rbx,4) # should be movaps
vmovups %ymm3, -128(%rdx,%rbx,4) # should be movaps
```
Differential Revision: https://reviews.llvm.org/D66575
Patch by Dun Liang
llvm-svn: 369723
2019-08-23 10:17:04 +08:00
|
|
|
const SCEV *DiffUnitsSCEV = SE->getURemExpr(DiffSCEV, AlignSCEV);
|
2014-09-08 04:05:11 +08:00
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "\talignment relative to " << *AlignSCEV << " is "
|
|
|
|
<< *DiffUnitsSCEV << " (diff: " << *DiffSCEV << ")\n");
|
2014-09-08 04:05:11 +08:00
|
|
|
|
|
|
|
if (const SCEVConstant *ConstDUSCEV =
|
|
|
|
dyn_cast<SCEVConstant>(DiffUnitsSCEV)) {
|
|
|
|
int64_t DiffUnits = ConstDUSCEV->getValue()->getSExtValue();
|
|
|
|
|
|
|
|
// If the displacement is an exact multiple of the alignment, then the
|
|
|
|
// displaced pointer has the same alignment as the aligned pointer, so
|
|
|
|
// return the alignment value.
|
|
|
|
if (!DiffUnits)
|
2020-04-06 18:53:56 +08:00
|
|
|
return cast<SCEVConstant>(AlignSCEV)->getValue()->getAlignValue();
|
2014-09-08 04:05:11 +08:00
|
|
|
|
|
|
|
// If the displacement is not an exact multiple, but the remainder is a
|
|
|
|
// constant, then return this remainder (but only if it is a power of 2).
|
2015-03-10 04:20:16 +08:00
|
|
|
uint64_t DiffUnitsAbs = std::abs(DiffUnits);
|
2014-09-08 04:05:11 +08:00
|
|
|
if (isPowerOf2_64(DiffUnitsAbs))
|
2020-04-06 18:53:56 +08:00
|
|
|
return Align(DiffUnitsAbs);
|
2014-09-08 04:05:11 +08:00
|
|
|
}
|
|
|
|
|
2020-04-06 18:53:56 +08:00
|
|
|
return None;
|
2014-09-08 04:05:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// There is an address given by an offset OffSCEV from AASCEV which has an
|
|
|
|
// alignment AlignSCEV. Use that information, if possible, to compute a new
|
|
|
|
// alignment for Ptr.
|
2020-04-06 18:53:56 +08:00
|
|
|
static Align getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
|
|
|
|
const SCEV *OffSCEV, Value *Ptr,
|
|
|
|
ScalarEvolution *SE) {
|
2014-09-08 04:05:11 +08:00
|
|
|
const SCEV *PtrSCEV = SE->getSCEV(Ptr);
|
[AlignmentFromAssumptions] Fix a SCEV assertion resulting from address space differences.
Summary:
On targets with different pointer sizes, -alignment-from-assumptions could attempt to create SCEV expressions which use different effective SCEV types. The provided test illustrates the issue.
In `getNewAlignment`, AASCEV would be the (only) alloca, which would have an effective SCEV type of i32. But PtrSCEV, the GEP in this case, due to being in the flat/default address space, will have an effective SCEV of i64.
This patch resolves the issue by truncating PtrSCEV to AASCEV's effective type.
Reviewers: hfinkel, jdoerfert
Reviewed By: jdoerfert
Subscribers: jvesely, nhaehnle, hiraditya, javed.absar, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75471
2020-03-03 02:08:57 +08:00
|
|
|
// On a platform with 32-bit allocas, but 64-bit flat/global pointer sizes
|
|
|
|
// (*cough* AMDGPU), the effective SCEV type of AASCEV and PtrSCEV
|
|
|
|
// may disagree. Trunc/extend so they agree.
|
|
|
|
PtrSCEV = SE->getTruncateOrZeroExtend(
|
|
|
|
PtrSCEV, SE->getEffectiveSCEVType(AASCEV->getType()));
|
2014-09-08 04:05:11 +08:00
|
|
|
const SCEV *DiffSCEV = SE->getMinusSCEV(PtrSCEV, AASCEV);
|
|
|
|
|
2014-09-11 16:40:17 +08:00
|
|
|
// On 32-bit platforms, DiffSCEV might now have type i32 -- we've always
|
|
|
|
// sign-extended OffSCEV to i64, so make sure they agree again.
|
|
|
|
DiffSCEV = SE->getNoopOrSignExtend(DiffSCEV, OffSCEV->getType());
|
|
|
|
|
2014-09-08 04:05:11 +08:00
|
|
|
// What we really want to know is the overall offset to the aligned
|
|
|
|
// address. This address is displaced by the provided offset.
|
2021-04-02 10:53:45 +08:00
|
|
|
DiffSCEV = SE->getAddExpr(DiffSCEV, OffSCEV);
|
2014-09-08 04:05:11 +08:00
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "AFI: alignment of " << *Ptr << " relative to "
|
|
|
|
<< *AlignSCEV << " and offset " << *OffSCEV
|
|
|
|
<< " using diff " << *DiffSCEV << "\n");
|
2014-09-08 04:05:11 +08:00
|
|
|
|
2020-04-06 18:53:56 +08:00
|
|
|
if (MaybeAlign NewAlignment = getNewAlignmentDiff(DiffSCEV, AlignSCEV, SE)) {
|
|
|
|
LLVM_DEBUG(dbgs() << "\tnew alignment: " << DebugStr(NewAlignment) << "\n");
|
|
|
|
return *NewAlignment;
|
|
|
|
}
|
2014-09-08 04:05:11 +08:00
|
|
|
|
2020-04-06 18:53:56 +08:00
|
|
|
if (const SCEVAddRecExpr *DiffARSCEV = dyn_cast<SCEVAddRecExpr>(DiffSCEV)) {
|
2014-09-08 04:05:11 +08:00
|
|
|
// The relative offset to the alignment assumption did not yield a constant,
|
|
|
|
// but we should try harder: if we assume that a is 32-byte aligned, then in
|
|
|
|
// for (i = 0; i < 1024; i += 4) r += a[i]; not all of the loads from a are
|
|
|
|
// 32-byte aligned, but instead alternate between 32 and 16-byte alignment.
|
|
|
|
// As a result, the new alignment will not be a constant, but can still
|
|
|
|
// be improved over the default (of 4) to 16.
|
|
|
|
|
|
|
|
const SCEV *DiffStartSCEV = DiffARSCEV->getStart();
|
|
|
|
const SCEV *DiffIncSCEV = DiffARSCEV->getStepRecurrence(*SE);
|
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "\ttrying start/inc alignment using start "
|
|
|
|
<< *DiffStartSCEV << " and inc " << *DiffIncSCEV << "\n");
|
2014-09-08 04:05:11 +08:00
|
|
|
|
|
|
|
// Now compute the new alignment using the displacement to the value in the
|
|
|
|
// first iteration, and also the alignment using the per-iteration delta.
|
|
|
|
// If these are the same, then use that answer. Otherwise, use the smaller
|
|
|
|
// one, but only if it divides the larger one.
|
2020-04-06 18:53:56 +08:00
|
|
|
MaybeAlign NewAlignment = getNewAlignmentDiff(DiffStartSCEV, AlignSCEV, SE);
|
|
|
|
MaybeAlign NewIncAlignment =
|
|
|
|
getNewAlignmentDiff(DiffIncSCEV, AlignSCEV, SE);
|
|
|
|
|
|
|
|
LLVM_DEBUG(dbgs() << "\tnew start alignment: " << DebugStr(NewAlignment)
|
|
|
|
<< "\n");
|
|
|
|
LLVM_DEBUG(dbgs() << "\tnew inc alignment: " << DebugStr(NewIncAlignment)
|
|
|
|
<< "\n");
|
|
|
|
|
|
|
|
if (!NewAlignment || !NewIncAlignment)
|
|
|
|
return Align(1);
|
|
|
|
|
|
|
|
const Align NewAlign = *NewAlignment;
|
|
|
|
const Align NewIncAlign = *NewIncAlignment;
|
|
|
|
if (NewAlign > NewIncAlign) {
|
|
|
|
LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: "
|
|
|
|
<< DebugStr(NewIncAlign) << "\n");
|
|
|
|
return NewIncAlign;
|
|
|
|
}
|
|
|
|
if (NewIncAlign > NewAlign) {
|
|
|
|
LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << DebugStr(NewAlign)
|
2020-04-06 15:55:25 +08:00
|
|
|
<< "\n");
|
2020-04-06 18:53:56 +08:00
|
|
|
return NewAlign;
|
2014-09-08 04:05:11 +08:00
|
|
|
}
|
2020-04-06 18:53:56 +08:00
|
|
|
assert(NewIncAlign == NewAlign);
|
|
|
|
LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << DebugStr(NewAlign)
|
|
|
|
<< "\n");
|
|
|
|
return NewAlign;
|
2014-09-08 04:05:11 +08:00
|
|
|
}
|
|
|
|
|
2020-04-06 18:53:56 +08:00
|
|
|
return Align(1);
|
2014-09-08 04:05:11 +08:00
|
|
|
}
|
|
|
|
|
2016-06-15 14:18:01 +08:00
|
|
|
bool AlignmentFromAssumptionsPass::extractAlignmentInfo(CallInst *I,
|
2020-09-12 19:36:45 +08:00
|
|
|
unsigned Idx,
|
2016-06-15 14:18:01 +08:00
|
|
|
Value *&AAPtr,
|
|
|
|
const SCEV *&AlignSCEV,
|
|
|
|
const SCEV *&OffSCEV) {
|
2020-09-12 19:36:45 +08:00
|
|
|
Type *Int64Ty = Type::getInt64Ty(I->getContext());
|
|
|
|
OperandBundleUse AlignOB = I->getOperandBundleAt(Idx);
|
|
|
|
if (AlignOB.getTagName() != "align")
|
Revert "[AssumeBundles] Use operand bundles to encode alignment assumptions"
Assume bundle can have more than one entry with the same name,
but at least AlignmentFromAssumptionsPass::extractAlignmentInfo() uses
getOperandBundle("align"), which internally assumes that it isn't the
case, and happily crashes otherwise.
Minimal reduced reproducer: run `opt -alignment-from-assumptions` on
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
%0 = type { i64, %1*, i8*, i64, %2, i32, %3*, i8* }
%1 = type opaque
%2 = type { i8, i8, i16 }
%3 = type { i32, i32, i32, i32 }
; Function Attrs: nounwind
define i32 @f(%0* noalias nocapture readonly %arg, %0* noalias %arg1) local_unnamed_addr #0 {
bb:
call void @llvm.assume(i1 true) [ "align"(%0* %arg, i64 8), "align"(%0* %arg1, i64 8) ]
ret i32 0
}
; Function Attrs: nounwind willreturn
declare void @llvm.assume(i1) #1
attributes #0 = { nounwind "reciprocal-estimates"="none" }
attributes #1 = { nounwind willreturn }
This is what we'd have with -mllvm -enable-knowledge-retention
This reverts commit c95ffadb2474a4d8c4f598d94d35a9f31d9606cb.
2020-07-05 03:45:41 +08:00
|
|
|
return false;
|
2020-09-12 19:36:45 +08:00
|
|
|
assert(AlignOB.Inputs.size() >= 2);
|
|
|
|
AAPtr = AlignOB.Inputs[0].get();
|
|
|
|
// TODO: Consider accumulating the offset to the base.
|
|
|
|
AAPtr = AAPtr->stripPointerCastsSameRepresentation();
|
|
|
|
AlignSCEV = SE->getSCEV(AlignOB.Inputs[1].get());
|
|
|
|
AlignSCEV = SE->getTruncateOrZeroExtend(AlignSCEV, Int64Ty);
|
|
|
|
if (AlignOB.Inputs.size() == 3)
|
|
|
|
OffSCEV = SE->getSCEV(AlignOB.Inputs[2].get());
|
|
|
|
else
|
Revert "[AssumeBundles] Use operand bundles to encode alignment assumptions"
Assume bundle can have more than one entry with the same name,
but at least AlignmentFromAssumptionsPass::extractAlignmentInfo() uses
getOperandBundle("align"), which internally assumes that it isn't the
case, and happily crashes otherwise.
Minimal reduced reproducer: run `opt -alignment-from-assumptions` on
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
%0 = type { i64, %1*, i8*, i64, %2, i32, %3*, i8* }
%1 = type opaque
%2 = type { i8, i8, i16 }
%3 = type { i32, i32, i32, i32 }
; Function Attrs: nounwind
define i32 @f(%0* noalias nocapture readonly %arg, %0* noalias %arg1) local_unnamed_addr #0 {
bb:
call void @llvm.assume(i1 true) [ "align"(%0* %arg, i64 8), "align"(%0* %arg1, i64 8) ]
ret i32 0
}
; Function Attrs: nounwind willreturn
declare void @llvm.assume(i1) #1
attributes #0 = { nounwind "reciprocal-estimates"="none" }
attributes #1 = { nounwind willreturn }
This is what we'd have with -mllvm -enable-knowledge-retention
This reverts commit c95ffadb2474a4d8c4f598d94d35a9f31d9606cb.
2020-07-05 03:45:41 +08:00
|
|
|
OffSCEV = SE->getZero(Int64Ty);
|
2020-09-12 19:36:45 +08:00
|
|
|
OffSCEV = SE->getTruncateOrZeroExtend(OffSCEV, Int64Ty);
|
Revert "[AssumeBundles] Use operand bundles to encode alignment assumptions"
Assume bundle can have more than one entry with the same name,
but at least AlignmentFromAssumptionsPass::extractAlignmentInfo() uses
getOperandBundle("align"), which internally assumes that it isn't the
case, and happily crashes otherwise.
Minimal reduced reproducer: run `opt -alignment-from-assumptions` on
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
%0 = type { i64, %1*, i8*, i64, %2, i32, %3*, i8* }
%1 = type opaque
%2 = type { i8, i8, i16 }
%3 = type { i32, i32, i32, i32 }
; Function Attrs: nounwind
define i32 @f(%0* noalias nocapture readonly %arg, %0* noalias %arg1) local_unnamed_addr #0 {
bb:
call void @llvm.assume(i1 true) [ "align"(%0* %arg, i64 8), "align"(%0* %arg1, i64 8) ]
ret i32 0
}
; Function Attrs: nounwind willreturn
declare void @llvm.assume(i1) #1
attributes #0 = { nounwind "reciprocal-estimates"="none" }
attributes #1 = { nounwind willreturn }
This is what we'd have with -mllvm -enable-knowledge-retention
This reverts commit c95ffadb2474a4d8c4f598d94d35a9f31d9606cb.
2020-07-05 03:45:41 +08:00
|
|
|
return true;
|
2014-09-08 04:05:11 +08:00
|
|
|
}
|
|
|
|
|
2020-09-12 19:36:45 +08:00
|
|
|
bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall,
|
|
|
|
unsigned Idx) {
|
2014-09-08 04:05:11 +08:00
|
|
|
Value *AAPtr;
|
|
|
|
const SCEV *AlignSCEV, *OffSCEV;
|
2020-09-12 19:36:45 +08:00
|
|
|
if (!extractAlignmentInfo(ACall, Idx, AAPtr, AlignSCEV, OffSCEV))
|
2014-09-08 04:05:11 +08:00
|
|
|
return false;
|
|
|
|
|
2016-09-25 04:00:38 +08:00
|
|
|
// Skip ConstantPointerNull and UndefValue. Assumptions on these shouldn't
|
|
|
|
// affect other users.
|
|
|
|
if (isa<ConstantData>(AAPtr))
|
|
|
|
return false;
|
|
|
|
|
2014-09-08 04:05:11 +08:00
|
|
|
const SCEV *AASCEV = SE->getSCEV(AAPtr);
|
|
|
|
|
|
|
|
// Apply the assumption to all other users of the specified pointer.
|
|
|
|
SmallPtrSet<Instruction *, 32> Visited;
|
|
|
|
SmallVector<Instruction*, 16> WorkList;
|
|
|
|
for (User *J : AAPtr->users()) {
|
|
|
|
if (J == ACall)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (Instruction *K = dyn_cast<Instruction>(J))
|
|
|
|
WorkList.push_back(K);
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!WorkList.empty()) {
|
|
|
|
Instruction *J = WorkList.pop_back_val();
|
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(J)) {
|
2020-09-12 19:36:45 +08:00
|
|
|
if (!isValidAssumeForContext(ACall, J, DT))
|
|
|
|
continue;
|
2020-04-06 18:53:56 +08:00
|
|
|
Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
|
|
|
|
LI->getPointerOperand(), SE);
|
2020-05-18 04:14:42 +08:00
|
|
|
if (NewAlignment > LI->getAlign()) {
|
2020-04-06 18:53:56 +08:00
|
|
|
LI->setAlignment(NewAlignment);
|
2014-09-08 04:05:11 +08:00
|
|
|
++NumLoadAlignChanged;
|
|
|
|
}
|
|
|
|
} else if (StoreInst *SI = dyn_cast<StoreInst>(J)) {
|
2020-09-12 19:36:45 +08:00
|
|
|
if (!isValidAssumeForContext(ACall, J, DT))
|
|
|
|
continue;
|
2020-04-06 18:53:56 +08:00
|
|
|
Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
|
|
|
|
SI->getPointerOperand(), SE);
|
2020-05-18 04:14:42 +08:00
|
|
|
if (NewAlignment > SI->getAlign()) {
|
2020-04-06 18:53:56 +08:00
|
|
|
SI->setAlignment(NewAlignment);
|
2014-09-08 04:05:11 +08:00
|
|
|
++NumStoreAlignChanged;
|
|
|
|
}
|
|
|
|
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(J)) {
|
2020-09-12 19:36:45 +08:00
|
|
|
if (!isValidAssumeForContext(ACall, J, DT))
|
|
|
|
continue;
|
2020-04-06 18:53:56 +08:00
|
|
|
Align NewDestAlignment =
|
|
|
|
getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE);
|
2014-09-08 04:05:11 +08:00
|
|
|
|
2020-04-06 18:53:56 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "\tmem inst: " << DebugStr(NewDestAlignment)
|
|
|
|
<< "\n";);
|
|
|
|
if (NewDestAlignment > *MI->getDestAlign()) {
|
[AlignmentFromAssumptions] Set source and dest alignments of memory intrinsiscs separately
Summary:
This change is part of step five in the series of changes to remove alignment argument from
memcpy/memmove/memset in favour of alignment attributes. In particular, this changes the
AlignmentFromAssumptions pass to cease using the old getAlignment()/setAlignment API of
MemoryIntrinsic in favour of getting/setting source & dest specific alignments through
the new API. This allows us to simplify some of the code in this pass and also be more
aggressive about setting the source and destination alignments separately.
Steps:
Step 1) Remove alignment parameter and create alignment parameter attributes for
memcpy/memmove/memset. ( rL322965, rC322964, rL322963 )
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments. ( rL323597 )
Step 3) Update Clang to use the new IRBuilder API. ( rC323617 )
Step 4) Update Polly to use the new IRBuilder API. ( rL323618 )
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use [get|set]DestAlignment()
and [get|set]SourceAlignment() instead. ( rL323886, rL323891, rL324148, rL324273, rL324278,
rL324384, rL324395, rL324402, rL324626, rL324642, rL324653, rL324654, rL324773, rL324774,
rL324781, rL324784, rL324955, rL324960 )
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reference
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
Reviewers: hfinkel, bollu, reames
Reviewed By: reames
Subscribers: reames, llvm-commits
Differential Revision: https://reviews.llvm.org/D43081
llvm-svn: 325816
2018-02-23 02:55:59 +08:00
|
|
|
MI->setDestAlignment(NewDestAlignment);
|
|
|
|
++NumMemIntAlignChanged;
|
|
|
|
}
|
|
|
|
|
|
|
|
// For memory transfers, there is also a source alignment that
|
|
|
|
// can be set.
|
2014-09-08 04:05:11 +08:00
|
|
|
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
|
2020-04-06 18:53:56 +08:00
|
|
|
Align NewSrcAlignment =
|
|
|
|
getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MTI->getSource(), SE);
|
2014-09-08 04:05:11 +08:00
|
|
|
|
2020-04-06 18:53:56 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "\tmem trans: " << DebugStr(NewSrcAlignment)
|
|
|
|
<< "\n";);
|
[AlignmentFromAssumptions] Set source and dest alignments of memory intrinsiscs separately
Summary:
This change is part of step five in the series of changes to remove alignment argument from
memcpy/memmove/memset in favour of alignment attributes. In particular, this changes the
AlignmentFromAssumptions pass to cease using the old getAlignment()/setAlignment API of
MemoryIntrinsic in favour of getting/setting source & dest specific alignments through
the new API. This allows us to simplify some of the code in this pass and also be more
aggressive about setting the source and destination alignments separately.
Steps:
Step 1) Remove alignment parameter and create alignment parameter attributes for
memcpy/memmove/memset. ( rL322965, rC322964, rL322963 )
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments. ( rL323597 )
Step 3) Update Clang to use the new IRBuilder API. ( rC323617 )
Step 4) Update Polly to use the new IRBuilder API. ( rL323618 )
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use [get|set]DestAlignment()
and [get|set]SourceAlignment() instead. ( rL323886, rL323891, rL324148, rL324273, rL324278,
rL324384, rL324395, rL324402, rL324626, rL324642, rL324653, rL324654, rL324773, rL324774,
rL324781, rL324784, rL324955, rL324960 )
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reference
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
Reviewers: hfinkel, bollu, reames
Reviewed By: reames
Subscribers: reames, llvm-commits
Differential Revision: https://reviews.llvm.org/D43081
llvm-svn: 325816
2018-02-23 02:55:59 +08:00
|
|
|
|
2020-04-06 18:53:56 +08:00
|
|
|
if (NewSrcAlignment > *MTI->getSourceAlign()) {
|
[AlignmentFromAssumptions] Set source and dest alignments of memory intrinsiscs separately
Summary:
This change is part of step five in the series of changes to remove alignment argument from
memcpy/memmove/memset in favour of alignment attributes. In particular, this changes the
AlignmentFromAssumptions pass to cease using the old getAlignment()/setAlignment API of
MemoryIntrinsic in favour of getting/setting source & dest specific alignments through
the new API. This allows us to simplify some of the code in this pass and also be more
aggressive about setting the source and destination alignments separately.
Steps:
Step 1) Remove alignment parameter and create alignment parameter attributes for
memcpy/memmove/memset. ( rL322965, rC322964, rL322963 )
Step 2) Expand the IRBuilder API to allow creation of memcpy/memmove with differing
source and dest alignments. ( rL323597 )
Step 3) Update Clang to use the new IRBuilder API. ( rC323617 )
Step 4) Update Polly to use the new IRBuilder API. ( rL323618 )
Step 5) Update LLVM passes that create memcpy/memmove calls to use the new IRBuilder API,
and those that use use MemIntrinsicInst::[get|set]Alignment() to use [get|set]DestAlignment()
and [get|set]SourceAlignment() instead. ( rL323886, rL323891, rL324148, rL324273, rL324278,
rL324384, rL324395, rL324402, rL324626, rL324642, rL324653, rL324654, rL324773, rL324774,
rL324781, rL324784, rL324955, rL324960 )
Step 6) Remove the single-alignment IRBuilder API for memcpy/memmove, and the
MemIntrinsicInst::[get|set]Alignment() methods.
Reference
http://lists.llvm.org/pipermail/llvm-dev/2015-August/089384.html
http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20151109/312083.html
Reviewers: hfinkel, bollu, reames
Reviewed By: reames
Subscribers: reames, llvm-commits
Differential Revision: https://reviews.llvm.org/D43081
llvm-svn: 325816
2018-02-23 02:55:59 +08:00
|
|
|
MTI->setSourceAlignment(NewSrcAlignment);
|
2014-09-08 04:05:11 +08:00
|
|
|
++NumMemIntAlignChanged;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we've updated that use of the pointer, look for other uses of
|
|
|
|
// the pointer to update.
|
|
|
|
Visited.insert(J);
|
|
|
|
for (User *UJ : J->users()) {
|
|
|
|
Instruction *K = cast<Instruction>(UJ);
|
2020-09-12 19:36:45 +08:00
|
|
|
if (!Visited.count(K))
|
2014-09-08 04:05:11 +08:00
|
|
|
WorkList.push_back(K);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AlignmentFromAssumptions::runOnFunction(Function &F) {
|
2016-05-04 06:32:30 +08:00
|
|
|
if (skipFunction(F))
|
|
|
|
return false;
|
|
|
|
|
2016-12-19 16:22:17 +08:00
|
|
|
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
|
2016-06-15 14:18:01 +08:00
|
|
|
ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
|
|
|
|
DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
|
|
|
|
2016-12-19 16:22:17 +08:00
|
|
|
return Impl.runImpl(F, AC, SE, DT);
|
2016-06-15 14:18:01 +08:00
|
|
|
}
|
|
|
|
|
2016-12-19 16:22:17 +08:00
|
|
|
bool AlignmentFromAssumptionsPass::runImpl(Function &F, AssumptionCache &AC,
|
|
|
|
ScalarEvolution *SE_,
|
2016-06-15 14:18:01 +08:00
|
|
|
DominatorTree *DT_) {
|
|
|
|
SE = SE_;
|
|
|
|
DT = DT_;
|
2014-09-08 04:05:11 +08:00
|
|
|
|
2016-06-15 14:18:01 +08:00
|
|
|
bool Changed = false;
|
2021-02-12 02:03:20 +08:00
|
|
|
for (auto &AssumeVH : AC.assumptions())
|
|
|
|
if (AssumeVH) {
|
|
|
|
CallInst *Call = cast<CallInst>(AssumeVH);
|
|
|
|
for (unsigned Idx = 0; Idx < Call->getNumOperandBundles(); Idx++)
|
|
|
|
Changed |= processAssumption(Call, Idx);
|
|
|
|
}
|
2014-09-08 04:05:11 +08:00
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2016-06-15 14:18:01 +08:00
|
|
|
PreservedAnalyses
|
|
|
|
AlignmentFromAssumptionsPass::run(Function &F, FunctionAnalysisManager &AM) {
|
|
|
|
|
2016-12-19 16:22:17 +08:00
|
|
|
AssumptionCache &AC = AM.getResult<AssumptionAnalysis>(F);
|
2016-06-15 14:18:01 +08:00
|
|
|
ScalarEvolution &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
|
|
|
|
DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
|
2017-01-24 20:55:57 +08:00
|
|
|
if (!runImpl(F, AC, &SE, &DT))
|
2016-06-15 14:18:01 +08:00
|
|
|
return PreservedAnalyses::all();
|
2017-01-15 14:32:49 +08:00
|
|
|
|
2016-06-15 14:18:01 +08:00
|
|
|
PreservedAnalyses PA;
|
2017-01-15 14:32:49 +08:00
|
|
|
PA.preserveSet<CFGAnalyses>();
|
2016-06-15 14:18:01 +08:00
|
|
|
PA.preserve<AAManager>();
|
|
|
|
PA.preserve<ScalarEvolutionAnalysis>();
|
|
|
|
PA.preserve<GlobalsAA>();
|
|
|
|
return PA;
|
|
|
|
}
|