2009-08-26 22:53:06 +08:00
|
|
|
//===- ScalarEvolutionAliasAnalysis.cpp - SCEV-based Alias Analysis -------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the ScalarEvolutionAliasAnalysis pass, which implements a
|
|
|
|
// simple alias analysis implemented in terms of ScalarEvolution queries.
|
|
|
|
//
|
2010-03-02 01:56:04 +08:00
|
|
|
// This differs from traditional loop dependence analysis in that it tests
|
|
|
|
// for dependencies within a single iteration of a loop, rather than
|
2010-06-18 08:53:08 +08:00
|
|
|
// dependencies between different iterations.
|
2010-03-02 01:56:04 +08:00
|
|
|
//
|
2009-08-26 22:53:06 +08:00
|
|
|
// ScalarEvolution has a more complete understanding of pointer arithmetic
|
|
|
|
// than BasicAliasAnalysis' collection of ad-hoc analyses.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-08-14 11:11:16 +08:00
|
|
|
#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
|
2009-08-26 22:53:06 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
// Register this pass...
|
|
|
|
char ScalarEvolutionAliasAnalysis::ID = 0;
|
2010-10-13 03:48:12 +08:00
|
|
|
INITIALIZE_AG_PASS_BEGIN(ScalarEvolutionAliasAnalysis, AliasAnalysis, "scev-aa",
|
2015-08-14 11:12:16 +08:00
|
|
|
"ScalarEvolution-based Alias Analysis", false, true,
|
|
|
|
false)
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 10:08:17 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
|
2010-10-13 03:48:12 +08:00
|
|
|
INITIALIZE_AG_PASS_END(ScalarEvolutionAliasAnalysis, AliasAnalysis, "scev-aa",
|
2015-08-14 11:12:16 +08:00
|
|
|
"ScalarEvolution-based Alias Analysis", false, true,
|
|
|
|
false)
|
2009-08-26 22:53:06 +08:00
|
|
|
|
|
|
|
FunctionPass *llvm::createScalarEvolutionAliasAnalysisPass() {
|
|
|
|
return new ScalarEvolutionAliasAnalysis();
|
|
|
|
}
|
|
|
|
|
2015-08-14 11:12:16 +08:00
|
|
|
void ScalarEvolutionAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 10:08:17 +08:00
|
|
|
AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
|
2009-08-26 22:53:06 +08:00
|
|
|
AU.setPreservesAll();
|
|
|
|
AliasAnalysis::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
|
2015-08-14 11:12:16 +08:00
|
|
|
bool ScalarEvolutionAliasAnalysis::runOnFunction(Function &F) {
|
2015-03-05 02:43:29 +08:00
|
|
|
InitializeAliasAnalysis(this, &F.getParent()->getDataLayout());
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 10:08:17 +08:00
|
|
|
SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
|
2009-08-26 22:53:06 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-08-14 11:14:50 +08:00
|
|
|
/// Given an expression, try to find a base value.
|
|
|
|
///
|
|
|
|
/// Returns null if none was found.
|
2015-08-14 11:12:16 +08:00
|
|
|
Value *ScalarEvolutionAliasAnalysis::GetBaseValue(const SCEV *S) {
|
2009-08-26 22:53:06 +08:00
|
|
|
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
|
2009-08-30 07:36:57 +08:00
|
|
|
// In an addrec, assume that the base will be in the start, rather
|
|
|
|
// than the step.
|
2009-10-31 22:32:25 +08:00
|
|
|
return GetBaseValue(AR->getStart());
|
2009-08-26 22:53:06 +08:00
|
|
|
} else if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
|
|
|
|
// If there's a pointer operand, it'll be sorted at the end of the list.
|
2015-08-14 11:12:16 +08:00
|
|
|
const SCEV *Last = A->getOperand(A->getNumOperands() - 1);
|
2010-02-16 19:11:14 +08:00
|
|
|
if (Last->getType()->isPointerTy())
|
2009-10-31 22:32:25 +08:00
|
|
|
return GetBaseValue(Last);
|
2009-08-26 22:53:06 +08:00
|
|
|
} else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
|
2009-10-31 22:32:25 +08:00
|
|
|
// This is a leaf node.
|
|
|
|
return U->getValue();
|
2009-08-26 22:53:06 +08:00
|
|
|
}
|
|
|
|
// No Identified object found.
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2009-08-26 22:53:06 +08:00
|
|
|
}
|
|
|
|
|
2015-06-22 10:16:51 +08:00
|
|
|
AliasResult ScalarEvolutionAliasAnalysis::alias(const MemoryLocation &LocA,
|
|
|
|
const MemoryLocation &LocB) {
|
2010-06-30 14:12:16 +08:00
|
|
|
// If either of the memory references is empty, it doesn't matter what the
|
|
|
|
// pointer values are. This allows the code below to ignore this special
|
|
|
|
// case.
|
2010-09-15 05:25:10 +08:00
|
|
|
if (LocA.Size == 0 || LocB.Size == 0)
|
2010-06-30 14:12:16 +08:00
|
|
|
return NoAlias;
|
|
|
|
|
2009-08-26 22:53:06 +08:00
|
|
|
// This is ScalarEvolutionAliasAnalysis. Get the SCEVs!
|
2010-09-15 05:25:10 +08:00
|
|
|
const SCEV *AS = SE->getSCEV(const_cast<Value *>(LocA.Ptr));
|
|
|
|
const SCEV *BS = SE->getSCEV(const_cast<Value *>(LocB.Ptr));
|
2009-08-26 22:53:06 +08:00
|
|
|
|
|
|
|
// If they evaluate to the same expression, it's a MustAlias.
|
2015-08-14 11:12:16 +08:00
|
|
|
if (AS == BS)
|
|
|
|
return MustAlias;
|
2009-08-26 22:53:06 +08:00
|
|
|
|
|
|
|
// If something is known about the difference between the two addresses,
|
|
|
|
// see if it's enough to prove a NoAlias.
|
|
|
|
if (SE->getEffectiveSCEVType(AS->getType()) ==
|
|
|
|
SE->getEffectiveSCEVType(BS->getType())) {
|
|
|
|
unsigned BitWidth = SE->getTypeSizeInBits(AS->getType());
|
2010-09-15 05:25:10 +08:00
|
|
|
APInt ASizeInt(BitWidth, LocA.Size);
|
|
|
|
APInt BSizeInt(BitWidth, LocB.Size);
|
2010-06-30 14:12:16 +08:00
|
|
|
|
|
|
|
// Compute the difference between the two pointers.
|
2009-08-26 22:53:06 +08:00
|
|
|
const SCEV *BA = SE->getMinusSCEV(BS, AS);
|
2010-06-30 14:12:16 +08:00
|
|
|
|
|
|
|
// Test whether the difference is known to be great enough that memory of
|
|
|
|
// the given sizes don't overlap. This assumes that ASizeInt and BSizeInt
|
|
|
|
// are non-zero, which is special-cased above.
|
|
|
|
if (ASizeInt.ule(SE->getUnsignedRange(BA).getUnsignedMin()) &&
|
|
|
|
(-BSizeInt).uge(SE->getUnsignedRange(BA).getUnsignedMax()))
|
|
|
|
return NoAlias;
|
|
|
|
|
|
|
|
// Folding the subtraction while preserving range information can be tricky
|
|
|
|
// (because of INT_MIN, etc.); if the prior test failed, swap AS and BS
|
|
|
|
// and try again to see if things fold better that way.
|
|
|
|
|
|
|
|
// Compute the difference between the two pointers.
|
|
|
|
const SCEV *AB = SE->getMinusSCEV(AS, BS);
|
|
|
|
|
|
|
|
// Test whether the difference is known to be great enough that memory of
|
|
|
|
// the given sizes don't overlap. This assumes that ASizeInt and BSizeInt
|
|
|
|
// are non-zero, which is special-cased above.
|
|
|
|
if (BSizeInt.ule(SE->getUnsignedRange(AB).getUnsignedMin()) &&
|
|
|
|
(-ASizeInt).uge(SE->getUnsignedRange(AB).getUnsignedMax()))
|
|
|
|
return NoAlias;
|
2009-08-26 22:53:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If ScalarEvolution can find an underlying object, form a new query.
|
|
|
|
// The correctness of this depends on ScalarEvolution not recognizing
|
|
|
|
// inttoptr and ptrtoint operators.
|
2009-10-31 22:32:25 +08:00
|
|
|
Value *AO = GetBaseValue(AS);
|
|
|
|
Value *BO = GetBaseValue(BS);
|
2010-09-15 05:25:10 +08:00
|
|
|
if ((AO && AO != LocA.Ptr) || (BO && BO != LocB.Ptr))
|
2015-06-17 15:21:38 +08:00
|
|
|
if (alias(MemoryLocation(AO ? AO : LocA.Ptr,
|
|
|
|
AO ? +MemoryLocation::UnknownSize : LocA.Size,
|
2015-06-17 15:18:54 +08:00
|
|
|
AO ? AAMDNodes() : LocA.AATags),
|
2015-06-17 15:21:38 +08:00
|
|
|
MemoryLocation(BO ? BO : LocB.Ptr,
|
|
|
|
BO ? +MemoryLocation::UnknownSize : LocB.Size,
|
2015-06-17 15:18:54 +08:00
|
|
|
BO ? AAMDNodes() : LocB.AATags)) == NoAlias)
|
2009-08-26 22:53:06 +08:00
|
|
|
return NoAlias;
|
|
|
|
|
|
|
|
// Forward the query to the next analysis.
|
2010-09-15 05:25:10 +08:00
|
|
|
return AliasAnalysis::alias(LocA, LocB);
|
2009-08-26 22:53:06 +08:00
|
|
|
}
|