2004-05-24 05:21:17 +08:00
|
|
|
//===-- ArgumentPromotion.cpp - Promote by-reference arguments ------------===//
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2004-03-08 05:29:54 +08:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2004-03-08 05:29:54 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass promotes "by reference" arguments to be "by value" arguments. In
|
|
|
|
// practice, this means looking for internal functions that have pointer
|
2007-10-26 11:03:51 +08:00
|
|
|
// arguments. If it can prove, through the use of alias analysis, that an
|
|
|
|
// argument is *only* loaded, then it can pass the value into the function
|
2004-03-08 05:29:54 +08:00
|
|
|
// instead of the address of the value. This can cause recursive simplification
|
2004-05-24 05:21:17 +08:00
|
|
|
// of code and lead to the elimination of allocas (especially in C++ template
|
|
|
|
// code like the STL).
|
2004-03-08 05:29:54 +08:00
|
|
|
//
|
2004-03-08 09:04:36 +08:00
|
|
|
// This pass also handles aggregate arguments that are passed into a function,
|
|
|
|
// scalarizing them if the elements of the aggregate are only loaded. Note that
|
2008-07-29 18:00:13 +08:00
|
|
|
// by default it refuses to scalarize aggregates which would require passing in
|
|
|
|
// more than three operands to the function, because passing thousands of
|
2008-09-07 17:54:09 +08:00
|
|
|
// operands for a large array or structure is unprofitable! This limit can be
|
2008-07-29 18:00:13 +08:00
|
|
|
// configured or disabled, however.
|
2004-03-08 09:04:36 +08:00
|
|
|
//
|
2004-03-08 05:29:54 +08:00
|
|
|
// Note that this transformation could also be done for arguments that are only
|
2007-10-26 11:03:51 +08:00
|
|
|
// stored to (returning the value instead), but does not currently. This case
|
|
|
|
// would be best handled when and if LLVM begins supporting multiple return
|
|
|
|
// values from functions.
|
2004-03-08 05:29:54 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/DepthFirstIterator.h"
|
|
|
|
#include "llvm/ADT/Statistic.h"
|
|
|
|
#include "llvm/ADT/StringExtras.h"
|
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2016-12-19 16:22:17 +08:00
|
|
|
#include "llvm/Analysis/AssumptionCache.h"
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
#include "llvm/Analysis/BasicAliasAnalysis.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Analysis/CallGraph.h"
|
2013-01-07 23:26:48 +08:00
|
|
|
#include "llvm/Analysis/CallGraphSCCPass.h"
|
2016-02-24 20:49:04 +08:00
|
|
|
#include "llvm/Analysis/Loads.h"
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
#include "llvm/Analysis/TargetLibraryInfo.h"
|
2014-03-04 19:45:46 +08:00
|
|
|
#include "llvm/IR/CFG.h"
|
2014-03-04 19:01:28 +08:00
|
|
|
#include "llvm/IR/CallSite.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
2014-07-10 13:27:53 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2014-07-02 05:13:37 +08:00
|
|
|
#include "llvm/IR/DebugInfo.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/LLVMContext.h"
|
|
|
|
#include "llvm/IR/Module.h"
|
2004-09-02 06:55:40 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-25 08:23:56 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2017-01-29 16:03:19 +08:00
|
|
|
#include "llvm/Transforms/IPO.h"
|
2004-03-08 05:29:54 +08:00
|
|
|
#include <set>
|
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 10:55:47 +08:00
|
|
|
#define DEBUG_TYPE "argpromotion"
|
|
|
|
|
2017-01-29 16:03:19 +08:00
|
|
|
STATISTIC(NumArgumentsPromoted, "Number of pointer arguments promoted");
|
2006-12-20 06:09:18 +08:00
|
|
|
STATISTIC(NumAggregatesPromoted, "Number of aggregate arguments promoted");
|
2017-01-29 16:03:19 +08:00
|
|
|
STATISTIC(NumByValArgsPromoted, "Number of byval arguments promoted");
|
|
|
|
STATISTIC(NumArgumentsDead, "Number of dead pointer args eliminated");
|
2004-03-08 05:29:54 +08:00
|
|
|
|
2016-07-03 02:59:51 +08:00
|
|
|
/// A vector used to hold the indices of a single GEP instruction
|
|
|
|
typedef std::vector<uint64_t> IndicesVector;
|
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
/// DoPromotion - This method actually performs the promotion of the specified
|
|
|
|
/// arguments, and returns the new function. At this point, we know that it's
|
|
|
|
/// safe to do so.
|
2016-07-03 02:59:51 +08:00
|
|
|
static CallGraphNode *
|
2017-01-29 16:03:21 +08:00
|
|
|
doPromotion(Function *F, SmallPtrSetImpl<Argument *> &ArgsToPromote,
|
2017-01-29 16:03:16 +08:00
|
|
|
SmallPtrSetImpl<Argument *> &ByValArgsToTransform, CallGraph &CG) {
|
2004-03-08 05:29:54 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Start by computing a new prototype for the function, which is the same as
|
|
|
|
// the old function, but has modified arguments.
|
|
|
|
FunctionType *FTy = F->getFunctionType();
|
2017-01-29 16:03:19 +08:00
|
|
|
std::vector<Type *> Params;
|
2016-07-03 02:59:51 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
typedef std::set<std::pair<Type *, IndicesVector>> ScalarizeTable;
|
2016-07-03 02:59:51 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// ScalarizedElements - If we are promoting a pointer that has elements
|
|
|
|
// accessed out of it, keep track of which elements are accessed so that we
|
|
|
|
// can add one argument for each.
|
|
|
|
//
|
|
|
|
// Arguments that are directly loaded will have a zero element value here, to
|
|
|
|
// handle cases where there are both a direct load and GEP accesses.
|
|
|
|
//
|
2017-01-29 16:03:19 +08:00
|
|
|
std::map<Argument *, ScalarizeTable> ScalarizedElements;
|
2016-07-03 02:59:51 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// OriginalLoads - Keep track of a representative load instruction from the
|
|
|
|
// original function so that we can tell the alias analysis implementation
|
|
|
|
// what the new GEP/Load instructions we are inserting look like.
|
|
|
|
// We need to keep the original loads for each argument and the elements
|
|
|
|
// of the argument that are accessed.
|
2017-01-29 16:03:19 +08:00
|
|
|
std::map<std::pair<Argument *, IndicesVector>, LoadInst *> OriginalLoads;
|
2016-07-03 02:59:51 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Attribute - Keep track of the parameter attributes for the arguments
|
|
|
|
// that we are *not* promoting. For the ones that we do promote, the parameter
|
|
|
|
// attributes are lost
|
|
|
|
SmallVector<AttributeSet, 8> AttributesVec;
|
|
|
|
const AttributeSet &PAL = F->getAttributes();
|
2014-08-29 06:42:00 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Add any return attributes.
|
|
|
|
if (PAL.hasAttributes(AttributeSet::ReturnIndex))
|
2017-01-29 16:03:19 +08:00
|
|
|
AttributesVec.push_back(
|
|
|
|
AttributeSet::get(F->getContext(), PAL.getRetAttributes()));
|
2014-08-29 06:42:00 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// First, determine the new argument list
|
|
|
|
unsigned ArgIndex = 1;
|
|
|
|
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E;
|
|
|
|
++I, ++ArgIndex) {
|
|
|
|
if (ByValArgsToTransform.count(&*I)) {
|
|
|
|
// Simple byval argument? Just add all the struct element types.
|
|
|
|
Type *AgTy = cast<PointerType>(I->getType())->getElementType();
|
|
|
|
StructType *STy = cast<StructType>(AgTy);
|
|
|
|
Params.insert(Params.end(), STy->element_begin(), STy->element_end());
|
|
|
|
++NumByValArgsPromoted;
|
|
|
|
} else if (!ArgsToPromote.count(&*I)) {
|
|
|
|
// Unchanged argument
|
|
|
|
Params.push_back(I->getType());
|
|
|
|
AttributeSet attrs = PAL.getParamAttributes(ArgIndex);
|
|
|
|
if (attrs.hasAttributes(ArgIndex)) {
|
|
|
|
AttrBuilder B(attrs, ArgIndex);
|
2017-01-29 16:03:19 +08:00
|
|
|
AttributesVec.push_back(
|
|
|
|
AttributeSet::get(F->getContext(), Params.size(), B));
|
2017-01-29 16:03:16 +08:00
|
|
|
}
|
|
|
|
} else if (I->use_empty()) {
|
|
|
|
// Dead argument (which are always marked as promotable)
|
|
|
|
++NumArgumentsDead;
|
|
|
|
} else {
|
|
|
|
// Okay, this is being promoted. This means that the only uses are loads
|
|
|
|
// or GEPs which are only used by loads
|
2014-08-29 06:42:00 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// In this table, we will track which indices are loaded from the argument
|
|
|
|
// (where direct loads are tracked as no indices).
|
|
|
|
ScalarizeTable &ArgIndices = ScalarizedElements[&*I];
|
|
|
|
for (User *U : I->users()) {
|
|
|
|
Instruction *UI = cast<Instruction>(U);
|
|
|
|
Type *SrcTy;
|
|
|
|
if (LoadInst *L = dyn_cast<LoadInst>(UI))
|
|
|
|
SrcTy = L->getType();
|
|
|
|
else
|
|
|
|
SrcTy = cast<GetElementPtrInst>(UI)->getSourceElementType();
|
|
|
|
IndicesVector Indices;
|
|
|
|
Indices.reserve(UI->getNumOperands() - 1);
|
|
|
|
// Since loads will only have a single operand, and GEPs only a single
|
|
|
|
// non-index operand, this will record direct loads without any indices,
|
|
|
|
// and gep+loads with the GEP indices.
|
|
|
|
for (User::op_iterator II = UI->op_begin() + 1, IE = UI->op_end();
|
|
|
|
II != IE; ++II)
|
|
|
|
Indices.push_back(cast<ConstantInt>(*II)->getSExtValue());
|
|
|
|
// GEPs with a single 0 index can be merged with direct loads
|
|
|
|
if (Indices.size() == 1 && Indices.front() == 0)
|
|
|
|
Indices.clear();
|
|
|
|
ArgIndices.insert(std::make_pair(SrcTy, Indices));
|
|
|
|
LoadInst *OrigLoad;
|
|
|
|
if (LoadInst *L = dyn_cast<LoadInst>(UI))
|
|
|
|
OrigLoad = L;
|
|
|
|
else
|
|
|
|
// Take any load, we will use it only to update Alias Analysis
|
|
|
|
OrigLoad = cast<LoadInst>(UI->user_back());
|
|
|
|
OriginalLoads[std::make_pair(&*I, Indices)] = OrigLoad;
|
|
|
|
}
|
2014-08-29 06:42:00 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Add a parameter to the function for each element passed in.
|
|
|
|
for (const auto &ArgIndex : ArgIndices) {
|
|
|
|
// not allowed to dereference ->begin() if size() is 0
|
|
|
|
Params.push_back(GetElementPtrInst::getIndexedType(
|
|
|
|
cast<PointerType>(I->getType()->getScalarType())->getElementType(),
|
|
|
|
ArgIndex.second));
|
|
|
|
assert(Params.back());
|
|
|
|
}
|
2014-08-29 06:42:00 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
if (ArgIndices.size() == 1 && ArgIndices.begin()->second.empty())
|
|
|
|
++NumArgumentsPromoted;
|
|
|
|
else
|
|
|
|
++NumAggregatesPromoted;
|
|
|
|
}
|
2014-08-29 06:42:00 +08:00
|
|
|
}
|
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Add any function attributes.
|
|
|
|
if (PAL.hasAttributes(AttributeSet::FunctionIndex))
|
2017-01-29 16:03:19 +08:00
|
|
|
AttributesVec.push_back(
|
|
|
|
AttributeSet::get(FTy->getContext(), PAL.getFnAttributes()));
|
2014-08-29 06:42:00 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
Type *RetTy = FTy->getReturnType();
|
2014-08-29 06:42:00 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Construct the new function type using the new arguments.
|
|
|
|
FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg());
|
2014-08-29 06:42:00 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Create the new function body and insert it into the module.
|
|
|
|
Function *NF = Function::Create(NFTy, F->getLinkage(), F->getName());
|
|
|
|
NF->copyAttributesFrom(F);
|
2014-08-29 06:42:00 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Patch the pointer to LLVM function in debug info descriptor.
|
|
|
|
NF->setSubprogram(F->getSubprogram());
|
|
|
|
F->setSubprogram(nullptr);
|
2014-08-29 06:42:00 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
DEBUG(dbgs() << "ARG PROMOTION: Promoting to:" << *NF << "\n"
|
2017-01-29 16:03:19 +08:00
|
|
|
<< "From: " << *F);
|
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Recompute the parameter attributes list based on the new arguments for
|
|
|
|
// the function.
|
|
|
|
NF->setAttributes(AttributeSet::get(F->getContext(), AttributesVec));
|
|
|
|
AttributesVec.clear();
|
2014-08-29 06:42:00 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
F->getParent()->getFunctionList().insert(F->getIterator(), NF);
|
|
|
|
NF->takeName(F);
|
2014-08-29 06:42:00 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Get a new callgraph node for NF.
|
|
|
|
CallGraphNode *NF_CGN = CG.getOrInsertFunction(NF);
|
2014-08-29 06:42:00 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Loop over all of the callers of the function, transforming the call sites
|
|
|
|
// to pass in the loaded pointers.
|
|
|
|
//
|
2017-01-29 16:03:19 +08:00
|
|
|
SmallVector<Value *, 16> Args;
|
2017-01-29 16:03:16 +08:00
|
|
|
while (!F->use_empty()) {
|
|
|
|
CallSite CS(F->user_back());
|
|
|
|
assert(CS.getCalledFunction() == F);
|
|
|
|
Instruction *Call = CS.getInstruction();
|
|
|
|
const AttributeSet &CallPAL = CS.getAttributes();
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Add any return attributes.
|
|
|
|
if (CallPAL.hasAttributes(AttributeSet::ReturnIndex))
|
2017-01-29 16:03:19 +08:00
|
|
|
AttributesVec.push_back(
|
|
|
|
AttributeSet::get(F->getContext(), CallPAL.getRetAttributes()));
|
2008-09-07 17:54:09 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Loop over the operands, inserting GEP and loads in the caller as
|
|
|
|
// appropriate.
|
|
|
|
CallSite::arg_iterator AI = CS.arg_begin();
|
|
|
|
ArgIndex = 1;
|
2017-01-29 16:03:19 +08:00
|
|
|
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E;
|
|
|
|
++I, ++AI, ++ArgIndex)
|
2017-01-29 16:03:16 +08:00
|
|
|
if (!ArgsToPromote.count(&*I) && !ByValArgsToTransform.count(&*I)) {
|
2017-01-29 16:03:19 +08:00
|
|
|
Args.push_back(*AI); // Unmodified argument
|
2015-06-11 05:14:34 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
if (CallPAL.hasAttributes(ArgIndex)) {
|
|
|
|
AttrBuilder B(CallPAL, ArgIndex);
|
2017-01-29 16:03:19 +08:00
|
|
|
AttributesVec.push_back(
|
|
|
|
AttributeSet::get(F->getContext(), Args.size(), B));
|
2011-01-16 16:09:24 +08:00
|
|
|
}
|
2017-01-29 16:03:16 +08:00
|
|
|
} else if (ByValArgsToTransform.count(&*I)) {
|
|
|
|
// Emit a GEP and load for each element of the struct.
|
|
|
|
Type *AgTy = cast<PointerType>(I->getType())->getElementType();
|
|
|
|
StructType *STy = cast<StructType>(AgTy);
|
|
|
|
Value *Idxs[2] = {
|
2017-01-29 16:03:19 +08:00
|
|
|
ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr};
|
2017-01-29 16:03:16 +08:00
|
|
|
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
|
|
|
|
Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i);
|
|
|
|
Value *Idx = GetElementPtrInst::Create(
|
|
|
|
STy, *AI, Idxs, (*AI)->getName() + "." + Twine(i), Call);
|
|
|
|
// TODO: Tell AA about the new values?
|
2017-01-29 16:03:19 +08:00
|
|
|
Args.push_back(new LoadInst(Idx, Idx->getName() + ".val", Call));
|
2008-01-12 06:31:41 +08:00
|
|
|
}
|
2017-01-29 16:03:16 +08:00
|
|
|
} else if (!I->use_empty()) {
|
|
|
|
// Non-dead argument: insert GEPs and loads as appropriate.
|
|
|
|
ScalarizeTable &ArgIndices = ScalarizedElements[&*I];
|
|
|
|
// Store the Value* version of the indices in here, but declare it now
|
|
|
|
// for reuse.
|
2017-01-29 16:03:19 +08:00
|
|
|
std::vector<Value *> Ops;
|
2017-01-29 16:03:16 +08:00
|
|
|
for (const auto &ArgIndex : ArgIndices) {
|
|
|
|
Value *V = *AI;
|
|
|
|
LoadInst *OrigLoad =
|
|
|
|
OriginalLoads[std::make_pair(&*I, ArgIndex.second)];
|
|
|
|
if (!ArgIndex.second.empty()) {
|
|
|
|
Ops.reserve(ArgIndex.second.size());
|
|
|
|
Type *ElTy = V->getType();
|
|
|
|
for (unsigned long II : ArgIndex.second) {
|
|
|
|
// Use i32 to index structs, and i64 for others (pointers/arrays).
|
|
|
|
// This satisfies GEP constraints.
|
2017-01-29 16:03:19 +08:00
|
|
|
Type *IdxTy =
|
|
|
|
(ElTy->isStructTy() ? Type::getInt32Ty(F->getContext())
|
|
|
|
: Type::getInt64Ty(F->getContext()));
|
2017-01-29 16:03:16 +08:00
|
|
|
Ops.push_back(ConstantInt::get(IdxTy, II));
|
|
|
|
// Keep track of the type we're currently indexing.
|
|
|
|
if (auto *ElPTy = dyn_cast<PointerType>(ElTy))
|
|
|
|
ElTy = ElPTy->getElementType();
|
|
|
|
else
|
|
|
|
ElTy = cast<CompositeType>(ElTy)->getTypeAtIndex(II);
|
|
|
|
}
|
|
|
|
// And create a GEP to extract those indices.
|
|
|
|
V = GetElementPtrInst::Create(ArgIndex.first, V, Ops,
|
|
|
|
V->getName() + ".idx", Call);
|
|
|
|
Ops.clear();
|
|
|
|
}
|
|
|
|
// Since we're replacing a load make sure we take the alignment
|
|
|
|
// of the previous load.
|
2017-01-29 16:03:19 +08:00
|
|
|
LoadInst *newLoad = new LoadInst(V, V->getName() + ".val", Call);
|
2017-01-29 16:03:16 +08:00
|
|
|
newLoad->setAlignment(OrigLoad->getAlignment());
|
|
|
|
// Transfer the AA info too.
|
|
|
|
AAMDNodes AAInfo;
|
|
|
|
OrigLoad->getAAMetadata(AAInfo);
|
|
|
|
newLoad->setAAMetadata(AAInfo);
|
2011-01-16 16:09:24 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
Args.push_back(newLoad);
|
2011-01-16 16:09:24 +08:00
|
|
|
}
|
2008-05-27 19:50:51 +08:00
|
|
|
}
|
2008-09-07 17:54:09 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Push any varargs arguments on the list.
|
|
|
|
for (; AI != CS.arg_end(); ++AI, ++ArgIndex) {
|
|
|
|
Args.push_back(*AI);
|
|
|
|
if (CallPAL.hasAttributes(ArgIndex)) {
|
|
|
|
AttrBuilder B(CallPAL, ArgIndex);
|
2017-01-29 16:03:19 +08:00
|
|
|
AttributesVec.push_back(
|
|
|
|
AttributeSet::get(F->getContext(), Args.size(), B));
|
2011-01-16 16:09:24 +08:00
|
|
|
}
|
|
|
|
}
|
2008-09-07 17:54:09 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Add any function attributes.
|
|
|
|
if (CallPAL.hasAttributes(AttributeSet::FunctionIndex))
|
2017-01-29 16:03:19 +08:00
|
|
|
AttributesVec.push_back(
|
|
|
|
AttributeSet::get(Call->getContext(), CallPAL.getFnAttributes()));
|
2004-03-08 05:29:54 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
SmallVector<OperandBundleDef, 1> OpBundles;
|
|
|
|
CS.getOperandBundlesAsDefs(OpBundles);
|
2004-03-08 05:29:54 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
Instruction *New;
|
|
|
|
if (InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
|
|
|
|
New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(),
|
|
|
|
Args, OpBundles, "", Call);
|
|
|
|
cast<InvokeInst>(New)->setCallingConv(CS.getCallingConv());
|
2017-01-29 16:03:19 +08:00
|
|
|
cast<InvokeInst>(New)->setAttributes(
|
|
|
|
AttributeSet::get(II->getContext(), AttributesVec));
|
2017-01-29 16:03:16 +08:00
|
|
|
} else {
|
|
|
|
New = CallInst::Create(NF, Args, OpBundles, "", Call);
|
|
|
|
cast<CallInst>(New)->setCallingConv(CS.getCallingConv());
|
2017-01-29 16:03:19 +08:00
|
|
|
cast<CallInst>(New)->setAttributes(
|
|
|
|
AttributeSet::get(New->getContext(), AttributesVec));
|
2017-01-29 16:03:16 +08:00
|
|
|
cast<CallInst>(New)->setTailCallKind(
|
|
|
|
cast<CallInst>(Call)->getTailCallKind());
|
|
|
|
}
|
|
|
|
New->setDebugLoc(Call->getDebugLoc());
|
|
|
|
Args.clear();
|
|
|
|
AttributesVec.clear();
|
2004-11-14 07:31:34 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Update the callgraph to know that the callsite has been transformed.
|
|
|
|
CallGraphNode *CalleeNode = CG[Call->getParent()->getParent()];
|
|
|
|
CalleeNode->replaceCallEdge(CS, CallSite(New), NF_CGN);
|
2004-11-14 07:31:34 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
if (!Call->use_empty()) {
|
|
|
|
Call->replaceAllUsesWith(New);
|
|
|
|
New->takeName(Call);
|
|
|
|
}
|
2004-11-14 07:31:34 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Finally, remove the old call from the program, reducing the use-count of
|
|
|
|
// F.
|
|
|
|
Call->eraseFromParent();
|
2004-11-14 07:31:34 +08:00
|
|
|
}
|
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Since we have now created the new function, splice the body of the old
|
|
|
|
// function right into the new function, leaving the old rotting hulk of the
|
|
|
|
// function empty.
|
|
|
|
NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList());
|
|
|
|
|
|
|
|
// Loop over the argument list, transferring uses of the old arguments over to
|
|
|
|
// the new arguments, also transferring over the names as well.
|
|
|
|
//
|
|
|
|
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(),
|
2017-01-29 16:03:19 +08:00
|
|
|
I2 = NF->arg_begin();
|
|
|
|
I != E; ++I) {
|
2017-01-29 16:03:16 +08:00
|
|
|
if (!ArgsToPromote.count(&*I) && !ByValArgsToTransform.count(&*I)) {
|
|
|
|
// If this is an unmodified argument, move the name and users over to the
|
|
|
|
// new version.
|
|
|
|
I->replaceAllUsesWith(&*I2);
|
|
|
|
I2->takeName(&*I);
|
|
|
|
++I2;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ByValArgsToTransform.count(&*I)) {
|
|
|
|
// In the callee, we create an alloca, and store each of the new incoming
|
|
|
|
// arguments into the alloca.
|
|
|
|
Instruction *InsertPt = &NF->begin()->front();
|
|
|
|
|
|
|
|
// Just add all the struct element types.
|
|
|
|
Type *AgTy = cast<PointerType>(I->getType())->getElementType();
|
|
|
|
Value *TheAlloca = new AllocaInst(AgTy, nullptr, "", InsertPt);
|
|
|
|
StructType *STy = cast<StructType>(AgTy);
|
2017-01-29 16:03:19 +08:00
|
|
|
Value *Idxs[2] = {ConstantInt::get(Type::getInt32Ty(F->getContext()), 0),
|
|
|
|
nullptr};
|
2017-01-29 16:03:16 +08:00
|
|
|
|
|
|
|
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
|
|
|
|
Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i);
|
|
|
|
Value *Idx = GetElementPtrInst::Create(
|
|
|
|
AgTy, TheAlloca, Idxs, TheAlloca->getName() + "." + Twine(i),
|
|
|
|
InsertPt);
|
2017-01-29 16:03:19 +08:00
|
|
|
I2->setName(I->getName() + "." + Twine(i));
|
2017-01-29 16:03:16 +08:00
|
|
|
new StoreInst(&*I2++, Idx, InsertPt);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Anything that used the arg should now use the alloca.
|
|
|
|
I->replaceAllUsesWith(TheAlloca);
|
|
|
|
TheAlloca->takeName(&*I);
|
|
|
|
|
|
|
|
// If the alloca is used in a call, we must clear the tail flag since
|
|
|
|
// the callee now uses an alloca from the caller.
|
|
|
|
for (User *U : TheAlloca->users()) {
|
|
|
|
CallInst *Call = dyn_cast<CallInst>(U);
|
|
|
|
if (!Call)
|
|
|
|
continue;
|
|
|
|
Call->setTailCall(false);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (I->use_empty())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Otherwise, if we promoted this argument, then all users are load
|
|
|
|
// instructions (or GEPs with only load users), and all loads should be
|
|
|
|
// using the new argument that we added.
|
|
|
|
ScalarizeTable &ArgIndices = ScalarizedElements[&*I];
|
|
|
|
|
|
|
|
while (!I->use_empty()) {
|
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(I->user_back())) {
|
|
|
|
assert(ArgIndices.begin()->second.empty() &&
|
|
|
|
"Load element should sort to front!");
|
2017-01-29 16:03:19 +08:00
|
|
|
I2->setName(I->getName() + ".val");
|
2017-01-29 16:03:16 +08:00
|
|
|
LI->replaceAllUsesWith(&*I2);
|
|
|
|
LI->eraseFromParent();
|
|
|
|
DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName()
|
2017-01-29 16:03:19 +08:00
|
|
|
<< "' in function '" << F->getName() << "'\n");
|
2017-01-29 16:03:16 +08:00
|
|
|
} else {
|
|
|
|
GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->user_back());
|
|
|
|
IndicesVector Operands;
|
|
|
|
Operands.reserve(GEP->getNumIndices());
|
|
|
|
for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end();
|
|
|
|
II != IE; ++II)
|
|
|
|
Operands.push_back(cast<ConstantInt>(*II)->getSExtValue());
|
|
|
|
|
|
|
|
// GEPs with a single 0 index can be merged with direct loads
|
|
|
|
if (Operands.size() == 1 && Operands.front() == 0)
|
|
|
|
Operands.clear();
|
|
|
|
|
|
|
|
Function::arg_iterator TheArg = I2;
|
|
|
|
for (ScalarizeTable::iterator It = ArgIndices.begin();
|
|
|
|
It->second != Operands; ++It, ++TheArg) {
|
|
|
|
assert(It != ArgIndices.end() && "GEP not handled??");
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string NewName = I->getName();
|
|
|
|
for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
|
2017-01-29 16:03:19 +08:00
|
|
|
NewName += "." + utostr(Operands[i]);
|
2017-01-29 16:03:16 +08:00
|
|
|
}
|
|
|
|
NewName += ".val";
|
|
|
|
TheArg->setName(NewName);
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "*** Promoted agg argument '" << TheArg->getName()
|
2017-01-29 16:03:19 +08:00
|
|
|
<< "' of function '" << NF->getName() << "'\n");
|
2017-01-29 16:03:16 +08:00
|
|
|
|
|
|
|
// All of the uses must be load instructions. Replace them all with
|
|
|
|
// the argument specified by ArgNo.
|
|
|
|
while (!GEP->use_empty()) {
|
|
|
|
LoadInst *L = cast<LoadInst>(GEP->user_back());
|
|
|
|
L->replaceAllUsesWith(&*TheArg);
|
|
|
|
L->eraseFromParent();
|
|
|
|
}
|
|
|
|
GEP->eraseFromParent();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Increment I2 past all of the arguments added for this promoted pointer.
|
|
|
|
std::advance(I2, ArgIndices.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
NF_CGN->stealCalledFunctionsFrom(CG[F]);
|
2017-01-29 16:03:19 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Now that the old function is dead, delete it. If there is a dangling
|
|
|
|
// reference to the CallgraphNode, just leave the dead function around for
|
|
|
|
// someone else to nuke.
|
|
|
|
CallGraphNode *CGN = CG[F];
|
|
|
|
if (CGN->getNumReferences() == 0)
|
|
|
|
delete CG.removeFunctionFromModule(CGN);
|
|
|
|
else
|
|
|
|
F->setLinkage(Function::ExternalLinkage);
|
2017-01-29 16:03:19 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
return NF_CGN;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// AllCallersPassInValidPointerForArgument - Return true if we can prove that
|
|
|
|
/// all callees pass in a valid pointer for the specified function argument.
|
2017-01-29 16:03:21 +08:00
|
|
|
static bool allCallersPassInValidPointerForArgument(Argument *Arg) {
|
2017-01-29 16:03:16 +08:00
|
|
|
Function *Callee = Arg->getParent();
|
|
|
|
const DataLayout &DL = Callee->getParent()->getDataLayout();
|
|
|
|
|
|
|
|
unsigned ArgNo = Arg->getArgNo();
|
|
|
|
|
|
|
|
// Look at all call sites of the function. At this point we know we only have
|
|
|
|
// direct callees.
|
|
|
|
for (User *U : Callee->users()) {
|
|
|
|
CallSite CS(U);
|
|
|
|
assert(CS && "Should only have direct calls!");
|
|
|
|
|
|
|
|
if (!isDereferenceablePointer(CS.getArgument(ArgNo), DL))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true if Prefix is a prefix of longer. That means, Longer has a size
|
|
|
|
/// that is greater than or equal to the size of prefix, and each of the
|
|
|
|
/// elements in Prefix is the same as the corresponding elements in Longer.
|
|
|
|
///
|
|
|
|
/// This means it also returns true when Prefix and Longer are equal!
|
2017-01-29 16:03:21 +08:00
|
|
|
static bool isPrefix(const IndicesVector &Prefix, const IndicesVector &Longer) {
|
2017-01-29 16:03:16 +08:00
|
|
|
if (Prefix.size() > Longer.size())
|
2008-07-29 18:00:13 +08:00
|
|
|
return false;
|
2012-07-19 18:46:05 +08:00
|
|
|
return std::equal(Prefix.begin(), Prefix.end(), Longer.begin());
|
2008-07-29 18:00:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Checks if Indices, or a prefix of Indices, is in Set.
|
2017-01-29 16:03:21 +08:00
|
|
|
static bool prefixIn(const IndicesVector &Indices,
|
2016-07-03 02:59:51 +08:00
|
|
|
std::set<IndicesVector> &Set) {
|
2017-01-29 16:03:19 +08:00
|
|
|
std::set<IndicesVector>::iterator Low;
|
|
|
|
Low = Set.upper_bound(Indices);
|
|
|
|
if (Low != Set.begin())
|
|
|
|
Low--;
|
|
|
|
// Low is now the last element smaller than or equal to Indices. This means
|
|
|
|
// it points to a prefix of Indices (possibly Indices itself), if such
|
|
|
|
// prefix exists.
|
|
|
|
//
|
|
|
|
// This load is safe if any prefix of its operands is safe to load.
|
2017-01-29 16:03:21 +08:00
|
|
|
return Low != Set.end() && isPrefix(*Low, Indices);
|
2008-07-29 18:00:13 +08:00
|
|
|
}
|
|
|
|
|
2010-02-11 00:03:48 +08:00
|
|
|
/// Mark the given indices (ToMark) as safe in the given set of indices
|
2008-07-29 18:00:13 +08:00
|
|
|
/// (Safe). Marking safe usually means adding ToMark to Safe. However, if there
|
|
|
|
/// is already a prefix of Indices in Safe, Indices are implicitely marked safe
|
|
|
|
/// already. Furthermore, any indices that Indices is itself a prefix of, are
|
|
|
|
/// removed from Safe (since they are implicitely safe because of Indices now).
|
2017-01-29 16:03:21 +08:00
|
|
|
static void markIndicesSafe(const IndicesVector &ToMark,
|
2016-07-03 02:59:51 +08:00
|
|
|
std::set<IndicesVector> &Safe) {
|
|
|
|
std::set<IndicesVector>::iterator Low;
|
2008-07-29 18:00:13 +08:00
|
|
|
Low = Safe.upper_bound(ToMark);
|
|
|
|
// Guard against the case where Safe is empty
|
|
|
|
if (Low != Safe.begin())
|
|
|
|
Low--;
|
|
|
|
// Low is now the last element smaller than or equal to Indices. This
|
|
|
|
// means it points to a prefix of Indices (possibly Indices itself), if
|
|
|
|
// such prefix exists.
|
|
|
|
if (Low != Safe.end()) {
|
2017-01-29 16:03:21 +08:00
|
|
|
if (isPrefix(*Low, ToMark))
|
2008-07-29 18:00:13 +08:00
|
|
|
// If there is already a prefix of these indices (or exactly these
|
|
|
|
// indices) marked a safe, don't bother adding these indices
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Increment Low, so we can use it as a "insert before" hint
|
2008-09-07 17:54:09 +08:00
|
|
|
++Low;
|
2008-07-29 18:00:13 +08:00
|
|
|
}
|
2008-09-07 17:54:09 +08:00
|
|
|
// Insert
|
2008-07-29 18:00:13 +08:00
|
|
|
Low = Safe.insert(Low, ToMark);
|
|
|
|
++Low;
|
|
|
|
// If there we're a prefix of longer index list(s), remove those
|
2016-07-03 02:59:51 +08:00
|
|
|
std::set<IndicesVector>::iterator End = Safe.end();
|
2017-01-29 16:03:21 +08:00
|
|
|
while (Low != End && isPrefix(ToMark, *Low)) {
|
2016-07-03 02:59:51 +08:00
|
|
|
std::set<IndicesVector>::iterator Remove = Low;
|
2008-07-29 18:00:13 +08:00
|
|
|
++Low;
|
|
|
|
Safe.erase(Remove);
|
|
|
|
}
|
|
|
|
}
|
2004-05-24 05:21:17 +08:00
|
|
|
|
|
|
|
/// isSafeToPromoteArgument - As you might guess from the name of this method,
|
|
|
|
/// it checks to see if it is both safe and useful to promote the argument.
|
|
|
|
/// This method limits promotion of aggregates to only promote up to three
|
|
|
|
/// elements of the aggregate in order to avoid exploding the number of
|
|
|
|
/// arguments passed in.
|
2016-07-03 02:59:51 +08:00
|
|
|
static bool isSafeToPromoteArgument(Argument *Arg, bool isByValOrInAlloca,
|
|
|
|
AAResults &AAR, unsigned MaxElements) {
|
2008-07-29 18:00:13 +08:00
|
|
|
typedef std::set<IndicesVector> GEPIndicesSet;
|
|
|
|
|
|
|
|
// Quick exit for unused arguments
|
|
|
|
if (Arg->use_empty())
|
|
|
|
return true;
|
|
|
|
|
2004-03-08 09:04:36 +08:00
|
|
|
// We can only promote this argument if all of the uses are loads, or are GEP
|
|
|
|
// instructions (with constant indices) that are subsequently loaded.
|
2008-07-29 18:00:13 +08:00
|
|
|
//
|
|
|
|
// Promoting the argument causes it to be loaded in the caller
|
|
|
|
// unconditionally. This is only safe if we can prove that either the load
|
|
|
|
// would have happened in the callee anyway (ie, there is a load in the entry
|
|
|
|
// block) or the pointer passed in at every call site is guaranteed to be
|
|
|
|
// valid.
|
|
|
|
// In the former case, invalid loads can happen, but would have happened
|
|
|
|
// anyway, in the latter case, invalid loads won't happen. This prevents us
|
|
|
|
// from introducing an invalid load that wouldn't have happened in the
|
|
|
|
// original code.
|
|
|
|
//
|
|
|
|
// This set will contain all sets of indices that are loaded in the entry
|
|
|
|
// block, and thus are safe to unconditionally load in the caller.
|
2014-01-28 10:38:36 +08:00
|
|
|
//
|
|
|
|
// This optimization is also safe for InAlloca parameters, because it verifies
|
|
|
|
// that the address isn't captured.
|
2008-07-29 18:00:13 +08:00
|
|
|
GEPIndicesSet SafeToUnconditionallyLoad;
|
2008-01-12 03:34:32 +08:00
|
|
|
|
2008-07-29 18:00:13 +08:00
|
|
|
// This set contains all the sets of indices that we are planning to promote.
|
|
|
|
// This makes it possible to limit the number of arguments added.
|
|
|
|
GEPIndicesSet ToPromote;
|
2008-09-07 17:54:09 +08:00
|
|
|
|
2008-07-29 18:00:13 +08:00
|
|
|
// If the pointer is always valid, any load with first index 0 is valid.
|
2017-01-29 16:03:21 +08:00
|
|
|
if (isByValOrInAlloca || allCallersPassInValidPointerForArgument(Arg))
|
2008-07-29 18:00:13 +08:00
|
|
|
SafeToUnconditionallyLoad.insert(IndicesVector(1, 0));
|
|
|
|
|
|
|
|
// First, iterate the entry block and mark loads of (geps of) arguments as
|
|
|
|
// safe.
|
2015-10-14 01:51:03 +08:00
|
|
|
BasicBlock &EntryBlock = Arg->getParent()->front();
|
2008-07-29 18:00:13 +08:00
|
|
|
// Declare this here so we can reuse it
|
|
|
|
IndicesVector Indices;
|
2015-10-14 01:51:03 +08:00
|
|
|
for (Instruction &I : EntryBlock)
|
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
|
2008-07-29 18:00:13 +08:00
|
|
|
Value *V = LI->getPointerOperand();
|
|
|
|
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
|
|
|
|
V = GEP->getPointerOperand();
|
|
|
|
if (V == Arg) {
|
|
|
|
// This load actually loads (part of) Arg? Check the indices then.
|
|
|
|
Indices.reserve(GEP->getNumIndices());
|
|
|
|
for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end();
|
|
|
|
II != IE; ++II)
|
|
|
|
if (ConstantInt *CI = dyn_cast<ConstantInt>(*II))
|
|
|
|
Indices.push_back(CI->getSExtValue());
|
|
|
|
else
|
|
|
|
// We found a non-constant GEP index for this argument? Bail out
|
|
|
|
// right away, can't promote this argument at all.
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Indices checked out, mark them as safe
|
2017-01-29 16:03:21 +08:00
|
|
|
markIndicesSafe(Indices, SafeToUnconditionallyLoad);
|
2008-07-29 18:00:13 +08:00
|
|
|
Indices.clear();
|
|
|
|
}
|
|
|
|
} else if (V == Arg) {
|
|
|
|
// Direct loads are equivalent to a GEP with a single 0 index.
|
2017-01-29 16:03:21 +08:00
|
|
|
markIndicesSafe(IndicesVector(1, 0), SafeToUnconditionallyLoad);
|
2008-07-29 18:00:13 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now, iterate all uses of the argument to see if there are any uses that are
|
|
|
|
// not (GEP+)loads, or any (GEP+)loads that are not safe to promote.
|
2017-01-29 16:03:19 +08:00
|
|
|
SmallVector<LoadInst *, 16> Loads;
|
2008-07-29 18:00:13 +08:00
|
|
|
IndicesVector Operands;
|
2014-03-09 11:16:01 +08:00
|
|
|
for (Use &U : Arg->uses()) {
|
|
|
|
User *UR = U.getUser();
|
2008-07-29 18:00:13 +08:00
|
|
|
Operands.clear();
|
2014-03-09 11:16:01 +08:00
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(UR)) {
|
2011-08-16 06:16:46 +08:00
|
|
|
// Don't hack volatile/atomic loads
|
2017-01-29 16:03:19 +08:00
|
|
|
if (!LI->isSimple())
|
|
|
|
return false;
|
2004-03-08 05:29:54 +08:00
|
|
|
Loads.push_back(LI);
|
2008-07-29 18:00:13 +08:00
|
|
|
// Direct loads are equivalent to a GEP with a zero index and then a load.
|
|
|
|
Operands.push_back(0);
|
2014-03-09 11:16:01 +08:00
|
|
|
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UR)) {
|
2004-03-08 09:04:36 +08:00
|
|
|
if (GEP->use_empty()) {
|
|
|
|
// Dead GEP's cause trouble later. Just remove them if we run into
|
|
|
|
// them.
|
2008-01-12 03:20:39 +08:00
|
|
|
GEP->eraseFromParent();
|
2010-07-12 22:15:10 +08:00
|
|
|
// TODO: This runs the above loop over and over again for dead GEPs
|
2008-07-29 18:00:13 +08:00
|
|
|
// Couldn't we just do increment the UI iterator earlier and erase the
|
|
|
|
// use?
|
2016-07-03 02:59:51 +08:00
|
|
|
return isSafeToPromoteArgument(Arg, isByValOrInAlloca, AAR,
|
|
|
|
MaxElements);
|
2004-03-08 09:04:36 +08:00
|
|
|
}
|
2008-07-29 18:00:13 +08:00
|
|
|
|
2004-03-08 09:04:36 +08:00
|
|
|
// Ensure that all of the indices are constants.
|
2017-01-29 16:03:19 +08:00
|
|
|
for (User::op_iterator i = GEP->idx_begin(), e = GEP->idx_end(); i != e;
|
|
|
|
++i)
|
2008-05-29 09:59:18 +08:00
|
|
|
if (ConstantInt *C = dyn_cast<ConstantInt>(*i))
|
2008-07-29 18:00:13 +08:00
|
|
|
Operands.push_back(C->getSExtValue());
|
2004-03-08 09:04:36 +08:00
|
|
|
else
|
2017-01-29 16:03:19 +08:00
|
|
|
return false; // Not a constant operand GEP!
|
2004-03-08 09:04:36 +08:00
|
|
|
|
|
|
|
// Ensure that the only users of the GEP are load instructions.
|
2014-03-09 11:16:01 +08:00
|
|
|
for (User *GEPU : GEP->users())
|
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(GEPU)) {
|
2011-08-16 06:16:46 +08:00
|
|
|
// Don't hack volatile/atomic loads
|
2017-01-29 16:03:19 +08:00
|
|
|
if (!LI->isSimple())
|
|
|
|
return false;
|
2004-03-08 09:04:36 +08:00
|
|
|
Loads.push_back(LI);
|
|
|
|
} else {
|
2008-07-29 18:00:13 +08:00
|
|
|
// Other uses than load?
|
2004-03-08 09:04:36 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
2017-01-29 16:03:19 +08:00
|
|
|
return false; // Not a load or a GEP.
|
2004-03-08 09:04:36 +08:00
|
|
|
}
|
2008-09-07 17:54:09 +08:00
|
|
|
|
2008-07-29 18:00:13 +08:00
|
|
|
// Now, see if it is safe to promote this load / loads of this GEP. Loading
|
|
|
|
// is safe if Operands, or a prefix of Operands, is marked as safe.
|
2017-01-29 16:03:21 +08:00
|
|
|
if (!prefixIn(Operands, SafeToUnconditionallyLoad))
|
2008-07-29 18:00:13 +08:00
|
|
|
return false;
|
2004-03-08 05:29:54 +08:00
|
|
|
|
2008-07-29 18:00:13 +08:00
|
|
|
// See if we are already promoting a load with these indices. If not, check
|
|
|
|
// to make sure that we aren't promoting too many elements. If so, nothing
|
|
|
|
// to do.
|
|
|
|
if (ToPromote.find(Operands) == ToPromote.end()) {
|
2016-07-03 02:59:51 +08:00
|
|
|
if (MaxElements > 0 && ToPromote.size() == MaxElements) {
|
2010-01-05 09:28:37 +08:00
|
|
|
DEBUG(dbgs() << "argpromotion not promoting argument '"
|
2017-01-29 16:03:19 +08:00
|
|
|
<< Arg->getName()
|
|
|
|
<< "' because it would require adding more "
|
|
|
|
<< "than " << MaxElements
|
|
|
|
<< " arguments to the function.\n");
|
2008-07-29 18:00:13 +08:00
|
|
|
// We limit aggregate promotion to only promoting up to a fixed number
|
|
|
|
// of elements of the aggregate.
|
|
|
|
return false;
|
|
|
|
}
|
2014-10-04 02:33:16 +08:00
|
|
|
ToPromote.insert(std::move(Operands));
|
2008-07-29 18:00:13 +08:00
|
|
|
}
|
|
|
|
}
|
2004-03-08 05:29:54 +08:00
|
|
|
|
2017-01-29 16:03:19 +08:00
|
|
|
if (Loads.empty())
|
|
|
|
return true; // No users, this is a dead argument.
|
2004-11-14 07:31:34 +08:00
|
|
|
|
|
|
|
// Okay, now we know that the argument is only used by load instructions and
|
2008-07-29 18:00:13 +08:00
|
|
|
// it is safe to unconditionally perform all of them. Use alias analysis to
|
2004-11-14 07:31:34 +08:00
|
|
|
// check to see if the pointer is guaranteed to not be modified from entry of
|
|
|
|
// the function to each of the load instructions.
|
2004-03-08 05:29:54 +08:00
|
|
|
|
|
|
|
// Because there could be several/many load instructions, remember which
|
|
|
|
// blocks we know to be transparent to the load.
|
2017-01-29 16:03:19 +08:00
|
|
|
df_iterator_default_set<BasicBlock *, 16> TranspBlocks;
|
2006-09-15 13:22:51 +08:00
|
|
|
|
2016-07-09 18:36:36 +08:00
|
|
|
for (LoadInst *Load : Loads) {
|
2004-03-08 05:29:54 +08:00
|
|
|
// Check to see if the load is invalidated from the start of the block to
|
|
|
|
// the load itself.
|
|
|
|
BasicBlock *BB = Load->getParent();
|
2004-03-08 09:04:36 +08:00
|
|
|
|
2015-06-17 15:18:54 +08:00
|
|
|
MemoryLocation Loc = MemoryLocation::get(Load);
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
if (AAR.canInstructionRangeModRef(BB->front(), *Load, Loc, MRI_Mod))
|
2017-01-29 16:03:19 +08:00
|
|
|
return false; // Pointer is invalidated!
|
2004-03-08 05:29:54 +08:00
|
|
|
|
|
|
|
// Now check every path from the entry block to the load for transparency.
|
|
|
|
// To do this, we perform a depth first search on the inverse CFG from the
|
|
|
|
// loading block.
|
2015-02-05 03:14:57 +08:00
|
|
|
for (BasicBlock *P : predecessors(BB)) {
|
2014-08-25 07:23:06 +08:00
|
|
|
for (BasicBlock *TranspBB : inverse_depth_first_ext(P, TranspBlocks))
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
if (AAR.canBasicBlockModify(*TranspBB, Loc))
|
2004-03-08 05:29:54 +08:00
|
|
|
return false;
|
2010-07-12 22:15:10 +08:00
|
|
|
}
|
2004-03-08 05:29:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the path from the entry of the function to each load is free of
|
|
|
|
// instructions that potentially invalidate the load, we can make the
|
|
|
|
// transformation!
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
/// \brief Checks if a type could have padding bytes.
|
|
|
|
static bool isDenselyPacked(Type *type, const DataLayout &DL) {
|
2004-03-08 05:29:54 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// There is no size information, so be conservative.
|
|
|
|
if (!type->isSized())
|
|
|
|
return false;
|
2004-06-21 08:07:58 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// If the alloc size is not equal to the storage size, then there are padding
|
|
|
|
// bytes. For x86_fp80 on x86-64, size: 80 alloc size: 128.
|
|
|
|
if (DL.getTypeSizeInBits(type) != DL.getTypeAllocSizeInBits(type))
|
|
|
|
return false;
|
2004-03-08 09:04:36 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
if (!isa<CompositeType>(type))
|
|
|
|
return true;
|
2004-05-24 05:21:17 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// For homogenous sequential types, check for padding within members.
|
|
|
|
if (SequentialType *seqTy = dyn_cast<SequentialType>(type))
|
|
|
|
return isDenselyPacked(seqTy->getElementType(), DL);
|
2007-11-27 21:23:08 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Check for padding within and between elements of a struct.
|
|
|
|
StructType *StructTy = cast<StructType>(type);
|
|
|
|
const StructLayout *Layout = DL.getStructLayout(StructTy);
|
|
|
|
uint64_t StartPos = 0;
|
|
|
|
for (unsigned i = 0, E = StructTy->getNumElements(); i < E; ++i) {
|
|
|
|
Type *ElTy = StructTy->getElementType(i);
|
|
|
|
if (!isDenselyPacked(ElTy, DL))
|
|
|
|
return false;
|
|
|
|
if (StartPos != Layout->getElementOffsetInBits(i))
|
|
|
|
return false;
|
|
|
|
StartPos += DL.getTypeAllocSizeInBits(ElTy);
|
|
|
|
}
|
2008-09-07 17:54:09 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
return true;
|
|
|
|
}
|
2008-07-29 18:00:13 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
/// \brief Checks if the padding bytes of an argument could be accessed.
|
|
|
|
static bool canPaddingBeAccessed(Argument *arg) {
|
2004-03-08 09:04:36 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
assert(arg->hasByValAttr());
|
2004-03-08 09:04:36 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Track all the pointers to the argument to make sure they are not captured.
|
|
|
|
SmallPtrSet<Value *, 16> PtrValues;
|
|
|
|
PtrValues.insert(arg);
|
|
|
|
|
|
|
|
// Track all of the stores.
|
|
|
|
SmallVector<StoreInst *, 16> Stores;
|
|
|
|
|
|
|
|
// Scan through the uses recursively to make sure the pointer is always used
|
|
|
|
// sanely.
|
|
|
|
SmallVector<Value *, 16> WorkList;
|
|
|
|
WorkList.insert(WorkList.end(), arg->user_begin(), arg->user_end());
|
|
|
|
while (!WorkList.empty()) {
|
|
|
|
Value *V = WorkList.back();
|
|
|
|
WorkList.pop_back();
|
|
|
|
if (isa<GetElementPtrInst>(V) || isa<PHINode>(V)) {
|
|
|
|
if (PtrValues.insert(V).second)
|
|
|
|
WorkList.insert(WorkList.end(), V->user_begin(), V->user_end());
|
|
|
|
} else if (StoreInst *Store = dyn_cast<StoreInst>(V)) {
|
|
|
|
Stores.push_back(Store);
|
|
|
|
} else if (!isa<LoadInst>(V)) {
|
|
|
|
return true;
|
2004-03-08 05:29:54 +08:00
|
|
|
}
|
2008-01-12 06:31:41 +08:00
|
|
|
}
|
2004-03-08 05:29:54 +08:00
|
|
|
|
2017-01-29 16:03:19 +08:00
|
|
|
// Check to make sure the pointers aren't captured
|
2017-01-29 16:03:16 +08:00
|
|
|
for (StoreInst *Store : Stores)
|
|
|
|
if (PtrValues.count(Store->getValueOperand()))
|
|
|
|
return true;
|
2004-03-08 05:29:54 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
return false;
|
|
|
|
}
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
/// PromoteArguments - This method checks the specified function to see if there
|
|
|
|
/// are any promotable arguments and if it is safe to promote the function (for
|
|
|
|
/// example, all callers are direct). If safe to promote some arguments, it
|
|
|
|
/// calls the DoPromotion method.
|
|
|
|
///
|
|
|
|
static CallGraphNode *
|
2017-01-29 16:03:21 +08:00
|
|
|
promoteArguments(CallGraphNode *CGN, CallGraph &CG,
|
2017-01-29 16:03:16 +08:00
|
|
|
function_ref<AAResults &(Function &F)> AARGetter,
|
|
|
|
unsigned MaxElements) {
|
|
|
|
Function *F = CGN->getFunction();
|
2008-01-17 09:17:03 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Make sure that it is local to this module.
|
2017-01-29 16:03:19 +08:00
|
|
|
if (!F || !F->hasLocalLinkage())
|
|
|
|
return nullptr;
|
2014-07-24 06:09:29 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Don't promote arguments for variadic functions. Adding, removing, or
|
|
|
|
// changing non-pack parameters can change the classification of pack
|
|
|
|
// parameters. Frontends encode that classification at the call site in the
|
|
|
|
// IR, while in the callee the classification is determined dynamically based
|
|
|
|
// on the number of registers consumed so far.
|
2017-01-29 16:03:19 +08:00
|
|
|
if (F->isVarArg())
|
|
|
|
return nullptr;
|
2008-05-27 03:58:59 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// First check: see if there are any pointer arguments! If not, quick exit.
|
2017-01-29 16:03:19 +08:00
|
|
|
SmallVector<Argument *, 16> PointerArgs;
|
2017-01-29 16:03:16 +08:00
|
|
|
for (Argument &I : F->args())
|
|
|
|
if (I.getType()->isPointerTy())
|
|
|
|
PointerArgs.push_back(&I);
|
2017-01-29 16:03:19 +08:00
|
|
|
if (PointerArgs.empty())
|
|
|
|
return nullptr;
|
2004-05-24 05:21:17 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Second check: make sure that all callers are direct callers. We can't
|
|
|
|
// transform functions that have indirect callers. Also see if the function
|
|
|
|
// is self-recursive.
|
|
|
|
bool isSelfRecursive = false;
|
|
|
|
for (Use &U : F->uses()) {
|
|
|
|
CallSite CS(U.getUser());
|
|
|
|
// Must be a direct call.
|
2017-01-29 16:03:19 +08:00
|
|
|
if (CS.getInstruction() == nullptr || !CS.isCallee(&U))
|
|
|
|
return nullptr;
|
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
if (CS.getInstruction()->getParent()->getParent() == F)
|
|
|
|
isSelfRecursive = true;
|
|
|
|
}
|
2017-01-29 16:03:19 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
const DataLayout &DL = F->getParent()->getDataLayout();
|
2008-09-08 19:07:35 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
AAResults &AAR = AARGetter(*F);
|
2008-09-07 17:54:09 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Check to see which arguments are promotable. If an argument is promotable,
|
|
|
|
// add it to ArgsToPromote.
|
2017-01-29 16:03:19 +08:00
|
|
|
SmallPtrSet<Argument *, 8> ArgsToPromote;
|
|
|
|
SmallPtrSet<Argument *, 8> ByValArgsToTransform;
|
2017-01-29 16:03:16 +08:00
|
|
|
for (Argument *PtrArg : PointerArgs) {
|
|
|
|
Type *AgTy = cast<PointerType>(PtrArg->getType())->getElementType();
|
2008-02-02 04:37:16 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Replace sret attribute with noalias. This reduces register pressure by
|
|
|
|
// avoiding a register copy.
|
|
|
|
if (PtrArg->hasStructRetAttr()) {
|
|
|
|
unsigned ArgNo = PtrArg->getArgNo();
|
|
|
|
F->setAttributes(
|
|
|
|
F->getAttributes()
|
|
|
|
.removeAttribute(F->getContext(), ArgNo + 1, Attribute::StructRet)
|
|
|
|
.addAttribute(F->getContext(), ArgNo + 1, Attribute::NoAlias));
|
|
|
|
for (Use &U : F->uses()) {
|
|
|
|
CallSite CS(U.getUser());
|
|
|
|
CS.setAttributes(
|
|
|
|
CS.getAttributes()
|
|
|
|
.removeAttribute(F->getContext(), ArgNo + 1,
|
|
|
|
Attribute::StructRet)
|
|
|
|
.addAttribute(F->getContext(), ArgNo + 1, Attribute::NoAlias));
|
|
|
|
}
|
|
|
|
}
|
2008-09-07 17:54:09 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// If this is a byval argument, and if the aggregate type is small, just
|
|
|
|
// pass the elements, which is always safe, if the passed value is densely
|
|
|
|
// packed or if we can prove the padding bytes are never accessed. This does
|
|
|
|
// not apply to inalloca.
|
|
|
|
bool isSafeToPromote =
|
|
|
|
PtrArg->hasByValAttr() &&
|
|
|
|
(isDenselyPacked(AgTy, DL) || !canPaddingBeAccessed(PtrArg));
|
|
|
|
if (isSafeToPromote) {
|
|
|
|
if (StructType *STy = dyn_cast<StructType>(AgTy)) {
|
|
|
|
if (MaxElements > 0 && STy->getNumElements() > MaxElements) {
|
|
|
|
DEBUG(dbgs() << "argpromotion disable promoting argument '"
|
2017-01-29 16:03:19 +08:00
|
|
|
<< PtrArg->getName()
|
|
|
|
<< "' because it would require adding more"
|
|
|
|
<< " than " << MaxElements
|
|
|
|
<< " arguments to the function.\n");
|
2017-01-29 16:03:16 +08:00
|
|
|
continue;
|
2008-09-07 17:54:09 +08:00
|
|
|
}
|
2017-01-29 16:03:19 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// If all the elements are single-value types, we can promote it.
|
|
|
|
bool AllSimple = true;
|
|
|
|
for (const auto *EltTy : STy->elements()) {
|
|
|
|
if (!EltTy->isSingleValueType()) {
|
|
|
|
AllSimple = false;
|
|
|
|
break;
|
2004-05-24 05:21:17 +08:00
|
|
|
}
|
2017-01-29 16:03:16 +08:00
|
|
|
}
|
2014-07-24 20:16:19 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Safe to transform, don't even bother trying to "promote" it.
|
|
|
|
// Passing the elements as a scalar will allow sroa to hack on
|
|
|
|
// the new alloca we introduce.
|
|
|
|
if (AllSimple) {
|
|
|
|
ByValArgsToTransform.insert(PtrArg);
|
|
|
|
continue;
|
2004-03-08 09:04:36 +08:00
|
|
|
}
|
2004-03-08 05:29:54 +08:00
|
|
|
}
|
2017-01-29 16:03:16 +08:00
|
|
|
}
|
2004-03-08 05:29:54 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// If the argument is a recursive type and we're in a recursive
|
|
|
|
// function, we could end up infinitely peeling the function argument.
|
|
|
|
if (isSelfRecursive) {
|
|
|
|
if (StructType *STy = dyn_cast<StructType>(AgTy)) {
|
|
|
|
bool RecursiveType = false;
|
|
|
|
for (const auto *EltTy : STy->elements()) {
|
|
|
|
if (EltTy == PtrArg->getType()) {
|
|
|
|
RecursiveType = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (RecursiveType)
|
|
|
|
continue;
|
2013-01-23 14:14:59 +08:00
|
|
|
}
|
2008-01-17 09:17:03 +08:00
|
|
|
}
|
2017-01-29 16:03:19 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Otherwise, see if we can promote the pointer to its value.
|
|
|
|
if (isSafeToPromoteArgument(PtrArg, PtrArg->hasByValOrInAllocaAttr(), AAR,
|
|
|
|
MaxElements))
|
|
|
|
ArgsToPromote.insert(PtrArg);
|
|
|
|
}
|
2004-03-08 05:29:54 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// No promotable pointer arguments.
|
2017-01-29 16:03:19 +08:00
|
|
|
if (ArgsToPromote.empty() && ByValArgsToTransform.empty())
|
2017-01-29 16:03:16 +08:00
|
|
|
return nullptr;
|
2004-03-08 05:29:54 +08:00
|
|
|
|
2017-01-29 16:03:21 +08:00
|
|
|
return doPromotion(F, ArgsToPromote, ByValArgsToTransform, CG);
|
2017-01-29 16:03:16 +08:00
|
|
|
}
|
2008-09-08 19:07:35 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
namespace {
|
2017-01-29 16:03:19 +08:00
|
|
|
/// ArgPromotion - The 'by reference' to 'by value' argument promotion pass.
|
|
|
|
///
|
|
|
|
struct ArgPromotion : public CallGraphSCCPass {
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.addRequired<AssumptionCacheTracker>();
|
|
|
|
AU.addRequired<TargetLibraryInfoWrapperPass>();
|
|
|
|
getAAResultsAnalysisUsage(AU);
|
|
|
|
CallGraphSCCPass::getAnalysisUsage(AU);
|
|
|
|
}
|
2008-09-07 17:54:09 +08:00
|
|
|
|
2017-01-29 16:03:19 +08:00
|
|
|
bool runOnSCC(CallGraphSCC &SCC) override;
|
|
|
|
static char ID; // Pass identification, replacement for typeid
|
|
|
|
explicit ArgPromotion(unsigned MaxElements = 3)
|
|
|
|
: CallGraphSCCPass(ID), MaxElements(MaxElements) {
|
|
|
|
initializeArgPromotionPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
2008-09-07 17:54:09 +08:00
|
|
|
|
2017-01-29 16:03:19 +08:00
|
|
|
private:
|
|
|
|
using llvm::Pass::doInitialization;
|
|
|
|
bool doInitialization(CallGraph &CG) override;
|
|
|
|
/// The maximum number of elements to expand, or 0 for unlimited.
|
|
|
|
unsigned MaxElements;
|
|
|
|
};
|
2017-01-29 16:03:16 +08:00
|
|
|
}
|
2008-09-07 17:54:09 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
char ArgPromotion::ID = 0;
|
|
|
|
INITIALIZE_PASS_BEGIN(ArgPromotion, "argpromotion",
|
2017-01-29 16:03:19 +08:00
|
|
|
"Promote 'by reference' arguments to scalars", false,
|
|
|
|
false)
|
2017-01-29 16:03:16 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
|
|
|
|
INITIALIZE_PASS_END(ArgPromotion, "argpromotion",
|
2017-01-29 16:03:19 +08:00
|
|
|
"Promote 'by reference' arguments to scalars", false, false)
|
2008-01-12 06:31:41 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
Pass *llvm::createArgumentPromotionPass(unsigned MaxElements) {
|
|
|
|
return new ArgPromotion(MaxElements);
|
|
|
|
}
|
2008-07-29 18:00:13 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
bool ArgPromotion::runOnSCC(CallGraphSCC &SCC) {
|
|
|
|
if (skipSCC(SCC))
|
|
|
|
return false;
|
2008-01-12 06:31:41 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Get the callgraph information that we need to update to reflect our
|
|
|
|
// changes.
|
|
|
|
CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
|
2004-03-08 09:04:36 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// We compute dedicated AA results for each function in the SCC as needed. We
|
|
|
|
// use a lambda referencing external objects so that they live long enough to
|
|
|
|
// be queried, but we re-use them each time.
|
|
|
|
Optional<BasicAAResult> BAR;
|
|
|
|
Optional<AAResults> AAR;
|
|
|
|
auto AARGetter = [&](Function &F) -> AAResults & {
|
|
|
|
BAR.emplace(createLegacyPMBasicAAResult(*this, F));
|
|
|
|
AAR.emplace(createLegacyPMAAResults(*this, F, *BAR));
|
|
|
|
return *AAR;
|
|
|
|
};
|
2008-01-12 06:31:41 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
bool Changed = false, LocalChange;
|
2008-01-12 06:31:41 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
// Iterate until we stop promoting from this SCC.
|
|
|
|
do {
|
|
|
|
LocalChange = false;
|
|
|
|
// Attempt to promote arguments from all functions in this SCC.
|
|
|
|
for (CallGraphNode *OldNode : SCC) {
|
|
|
|
if (CallGraphNode *NewNode =
|
2017-01-29 16:03:21 +08:00
|
|
|
promoteArguments(OldNode, CG, AARGetter, MaxElements)) {
|
2017-01-29 16:03:16 +08:00
|
|
|
LocalChange = true;
|
|
|
|
SCC.ReplaceNode(OldNode, NewNode);
|
2004-03-08 05:29:54 +08:00
|
|
|
}
|
|
|
|
}
|
2017-01-29 16:03:16 +08:00
|
|
|
// Remember that we changed something.
|
|
|
|
Changed |= LocalChange;
|
|
|
|
} while (LocalChange);
|
2004-03-08 05:29:54 +08:00
|
|
|
|
2017-01-29 16:03:16 +08:00
|
|
|
return Changed;
|
2004-03-08 05:29:54 +08:00
|
|
|
}
|
2014-07-02 05:13:37 +08:00
|
|
|
|
|
|
|
bool ArgPromotion::doInitialization(CallGraph &CG) {
|
|
|
|
return CallGraphSCCPass::doInitialization(CG);
|
|
|
|
}
|