2003-10-13 05:44:18 +08:00
|
|
|
//===- LoopSimplify.cpp - Loop Canonicalization Pass ----------------------===//
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2002-09-27 00:17:31 +08:00
|
|
|
//
|
2003-10-13 05:43:28 +08:00
|
|
|
// This pass performs several transformations to transform natural loops into a
|
|
|
|
// simpler form, which makes subsequent analyses and transformations simpler and
|
|
|
|
// more effective.
|
2003-02-28 04:27:08 +08:00
|
|
|
//
|
|
|
|
// Loop pre-header insertion guarantees that there is a single, non-critical
|
|
|
|
// entry edge from outside of the loop to the loop header. This simplifies a
|
|
|
|
// number of analyses and transformations, such as LICM.
|
|
|
|
//
|
|
|
|
// Loop exit-block insertion guarantees that all exit blocks from the loop
|
|
|
|
// (blocks which are outside of the loop that have predecessors inside of the
|
2003-12-11 01:20:35 +08:00
|
|
|
// loop) only have predecessors from inside of the loop (and are thus dominated
|
|
|
|
// by the loop header). This simplifies transformations such as store-sinking
|
|
|
|
// that are built into LICM.
|
2003-02-28 04:27:08 +08:00
|
|
|
//
|
2003-10-13 08:37:13 +08:00
|
|
|
// This pass also guarantees that loops will have exactly one backedge.
|
|
|
|
//
|
2009-11-06 05:14:46 +08:00
|
|
|
// Indirectbr instructions introduce several complications. If the loop
|
|
|
|
// contains or is entered by an indirectbr instruction, it may not be possible
|
|
|
|
// to transform the loop and make these guarantees. Client code should check
|
|
|
|
// that these conditions are true before relying on them.
|
|
|
|
//
|
2003-02-28 04:27:08 +08:00
|
|
|
// Note that the simplifycfg pass will clean up blocks which are split out but
|
2003-10-13 05:43:28 +08:00
|
|
|
// end up being unnecessary, so usage of this pass should not pessimize
|
|
|
|
// generated code.
|
|
|
|
//
|
|
|
|
// This pass obviously modifies the CFG, but updates loop information and
|
|
|
|
// dominator information.
|
2002-09-27 00:17:31 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-07-09 11:03:01 +08:00
|
|
|
#include "llvm/Transforms/Utils/LoopSimplify.h"
|
2002-09-27 00:17:31 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/DepthFirstIterator.h"
|
|
|
|
#include "llvm/ADT/SetOperations.h"
|
|
|
|
#include "llvm/ADT/SetVector.h"
|
2014-01-23 19:23:19 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2005-03-25 14:37:22 +08:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
#include "llvm/Analysis/BasicAliasAnalysis.h"
|
2015-01-04 20:03:27 +08:00
|
|
|
#include "llvm/Analysis/AssumptionCache.h"
|
2012-10-27 01:40:50 +08:00
|
|
|
#include "llvm/Analysis/DependenceAnalysis.h"
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
#include "llvm/Analysis/GlobalsModRef.h"
|
2010-11-17 01:41:24 +08:00
|
|
|
#include "llvm/Analysis/InstructionSimplify.h"
|
2014-01-23 19:23:19 +08:00
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
2010-11-17 01:41:24 +08:00
|
|
|
#include "llvm/Analysis/ScalarEvolution.h"
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
|
2014-03-04 19:45:46 +08:00
|
|
|
#include "llvm/IR/CFG.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
2014-07-10 22:41:31 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2014-01-13 17:26:24 +08:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
|
|
|
#include "llvm/IR/LLVMContext.h"
|
2015-03-05 02:43:29 +08:00
|
|
|
#include "llvm/IR/Module.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Type.h"
|
2010-03-02 01:55:27 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2015-03-24 03:32:43 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
|
|
|
#include "llvm/Transforms/Utils/Local.h"
|
2013-05-21 04:46:30 +08:00
|
|
|
#include "llvm/Transforms/Utils/LoopUtils.h"
|
2003-12-11 01:20:35 +08:00
|
|
|
using namespace llvm;
|
2003-11-12 06:41:34 +08:00
|
|
|
|
2014-04-22 10:55:47 +08:00
|
|
|
#define DEBUG_TYPE "loop-simplify"
|
|
|
|
|
2006-12-20 06:17:40 +08:00
|
|
|
STATISTIC(NumInserted, "Number of pre-header or exit blocks inserted");
|
|
|
|
STATISTIC(NumNested , "Number of nested loops split out");
|
2002-09-27 00:17:31 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// If the block isn't already, move the new block to right after some 'outside
|
|
|
|
// block' block. This prevents the preheader from being placed inside the loop
|
|
|
|
// body, e.g. when the loop hasn't been rotated.
|
|
|
|
static void placeSplitBlockCarefully(BasicBlock *NewBB,
|
|
|
|
SmallVectorImpl<BasicBlock *> &SplitPreds,
|
|
|
|
Loop *L) {
|
|
|
|
// Check to see if NewBB is already well placed.
|
2015-10-13 10:39:05 +08:00
|
|
|
Function::iterator BBI = --NewBB->getIterator();
|
2014-01-23 19:23:19 +08:00
|
|
|
for (unsigned i = 0, e = SplitPreds.size(); i != e; ++i) {
|
|
|
|
if (&*BBI == SplitPreds[i])
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If it isn't already after an outside block, move it after one. This is
|
|
|
|
// always good as it makes the uncond branch from the outside block into a
|
|
|
|
// fall-through.
|
|
|
|
|
|
|
|
// Figure out *which* outside block to put this after. Prefer an outside
|
|
|
|
// block that neighbors a BB actually in the loop.
|
2014-04-25 13:29:35 +08:00
|
|
|
BasicBlock *FoundBB = nullptr;
|
2014-01-23 19:23:19 +08:00
|
|
|
for (unsigned i = 0, e = SplitPreds.size(); i != e; ++i) {
|
2015-10-13 10:39:05 +08:00
|
|
|
Function::iterator BBI = SplitPreds[i]->getIterator();
|
|
|
|
if (++BBI != NewBB->getParent()->end() && L->contains(&*BBI)) {
|
2014-01-23 19:23:19 +08:00
|
|
|
FoundBB = SplitPreds[i];
|
|
|
|
break;
|
2010-10-20 01:21:58 +08:00
|
|
|
}
|
2014-01-23 19:23:19 +08:00
|
|
|
}
|
2007-05-02 05:15:47 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// If our heuristic for a *good* bb to place this after doesn't find
|
|
|
|
// anything, just pick something. It's likely better than leaving it within
|
|
|
|
// the loop.
|
|
|
|
if (!FoundBB)
|
|
|
|
FoundBB = SplitPreds[0];
|
|
|
|
NewBB->moveAfter(FoundBB);
|
|
|
|
}
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
/// InsertPreheaderForLoop - Once we discover that a loop doesn't have a
|
|
|
|
/// preheader, this method is called to insert one. This method has two phases:
|
|
|
|
/// preheader insertion and analysis updating.
|
|
|
|
///
|
2015-12-16 03:40:57 +08:00
|
|
|
BasicBlock *llvm::InsertPreheaderForLoop(Loop *L, DominatorTree *DT,
|
|
|
|
LoopInfo *LI, bool PreserveLCSSA) {
|
2014-01-23 19:23:19 +08:00
|
|
|
BasicBlock *Header = L->getHeader();
|
2010-07-17 01:58:45 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// Compute the set of predecessors of the loop that are not in the loop.
|
|
|
|
SmallVector<BasicBlock*, 8> OutsideBlocks;
|
2014-07-22 01:06:51 +08:00
|
|
|
for (pred_iterator PI = pred_begin(Header), PE = pred_end(Header);
|
|
|
|
PI != PE; ++PI) {
|
|
|
|
BasicBlock *P = *PI;
|
2014-01-23 19:23:19 +08:00
|
|
|
if (!L->contains(P)) { // Coming in from outside the loop?
|
|
|
|
// If the loop is branched to from an indirect branch, we won't
|
|
|
|
// be able to fully transform the loop, because it prohibits
|
|
|
|
// edge splitting.
|
2014-04-25 13:29:35 +08:00
|
|
|
if (isa<IndirectBrInst>(P->getTerminator())) return nullptr;
|
2010-07-17 01:58:45 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// Keep track of it.
|
|
|
|
OutsideBlocks.push_back(P);
|
2002-09-27 00:17:31 +08:00
|
|
|
}
|
2014-01-23 19:23:19 +08:00
|
|
|
}
|
2007-07-20 02:02:32 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// Split out the loop pre-header.
|
|
|
|
BasicBlock *PreheaderBB;
|
2015-07-22 17:52:54 +08:00
|
|
|
PreheaderBB = SplitBlockPredecessors(Header, OutsideBlocks, ".preheader", DT,
|
|
|
|
LI, PreserveLCSSA);
|
2015-08-01 01:58:14 +08:00
|
|
|
if (!PreheaderBB)
|
|
|
|
return nullptr;
|
2007-07-20 02:02:32 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
DEBUG(dbgs() << "LoopSimplify: Creating pre-header "
|
|
|
|
<< PreheaderBB->getName() << "\n");
|
|
|
|
|
|
|
|
// Make sure that NewBB is put someplace intelligent, which doesn't mess up
|
|
|
|
// code layout too horribly.
|
|
|
|
placeSplitBlockCarefully(PreheaderBB, OutsideBlocks, L);
|
|
|
|
|
|
|
|
return PreheaderBB;
|
2002-09-27 00:17:31 +08:00
|
|
|
}
|
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
/// \brief Ensure that the loop preheader dominates all exit blocks.
|
|
|
|
///
|
|
|
|
/// This method is used to split exit blocks that have predecessors outside of
|
|
|
|
/// the loop.
|
2015-01-18 17:21:15 +08:00
|
|
|
static BasicBlock *rewriteLoopExitBlock(Loop *L, BasicBlock *Exit,
|
2015-07-22 17:52:54 +08:00
|
|
|
DominatorTree *DT, LoopInfo *LI,
|
2015-12-16 03:40:57 +08:00
|
|
|
bool PreserveLCSSA) {
|
2014-01-23 19:23:19 +08:00
|
|
|
SmallVector<BasicBlock*, 8> LoopBlocks;
|
2014-07-22 01:06:51 +08:00
|
|
|
for (pred_iterator I = pred_begin(Exit), E = pred_end(Exit); I != E; ++I) {
|
|
|
|
BasicBlock *P = *I;
|
2014-01-23 19:23:19 +08:00
|
|
|
if (L->contains(P)) {
|
|
|
|
// Don't do this if the loop is exited via an indirect branch.
|
2014-04-25 13:29:35 +08:00
|
|
|
if (isa<IndirectBrInst>(P->getTerminator())) return nullptr;
|
2013-05-21 00:47:07 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
LoopBlocks.push_back(P);
|
|
|
|
}
|
|
|
|
}
|
2008-05-13 08:00:25 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
assert(!LoopBlocks.empty() && "No edges coming in from outside the loop?");
|
2014-04-25 13:29:35 +08:00
|
|
|
BasicBlock *NewExitBB = nullptr;
|
2002-09-27 00:17:31 +08:00
|
|
|
|
2015-07-22 17:52:54 +08:00
|
|
|
NewExitBB = SplitBlockPredecessors(Exit, LoopBlocks, ".loopexit", DT, LI,
|
|
|
|
PreserveLCSSA);
|
2015-08-01 01:58:14 +08:00
|
|
|
if (!NewExitBB)
|
|
|
|
return nullptr;
|
2002-09-27 00:17:31 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
DEBUG(dbgs() << "LoopSimplify: Creating dedicated exit block "
|
|
|
|
<< NewExitBB->getName() << "\n");
|
|
|
|
return NewExitBB;
|
|
|
|
}
|
2002-09-27 00:17:31 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
/// Add the specified block, and all of its predecessors, to the specified set,
|
|
|
|
/// if it's not already in there. Stop predecessor traversal when we reach
|
|
|
|
/// StopBlock.
|
|
|
|
static void addBlockAndPredsToSet(BasicBlock *InputBB, BasicBlock *StopBlock,
|
|
|
|
std::set<BasicBlock*> &Blocks) {
|
|
|
|
SmallVector<BasicBlock *, 8> Worklist;
|
|
|
|
Worklist.push_back(InputBB);
|
|
|
|
do {
|
|
|
|
BasicBlock *BB = Worklist.pop_back_val();
|
|
|
|
if (Blocks.insert(BB).second && BB != StopBlock)
|
|
|
|
// If BB is not already processed and it is not a stop block then
|
|
|
|
// insert its predecessor in the work list
|
2014-07-22 01:06:51 +08:00
|
|
|
for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
|
|
|
|
BasicBlock *WBB = *I;
|
2014-01-23 19:23:19 +08:00
|
|
|
Worklist.push_back(WBB);
|
2014-07-22 01:06:51 +08:00
|
|
|
}
|
2014-01-23 19:23:19 +08:00
|
|
|
} while (!Worklist.empty());
|
2002-09-27 00:17:31 +08:00
|
|
|
}
|
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
/// \brief The first part of loop-nestification is to find a PHI node that tells
|
|
|
|
/// us how to partition the loops.
|
2015-07-22 17:52:54 +08:00
|
|
|
static PHINode *findPHIToPartitionLoops(Loop *L, DominatorTree *DT,
|
2015-01-04 20:03:27 +08:00
|
|
|
AssumptionCache *AC) {
|
2015-03-10 10:37:25 +08:00
|
|
|
const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
|
2014-01-23 19:23:19 +08:00
|
|
|
for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ) {
|
|
|
|
PHINode *PN = cast<PHINode>(I);
|
|
|
|
++I;
|
2015-03-10 10:37:25 +08:00
|
|
|
if (Value *V = SimplifyInstruction(PN, DL, nullptr, DT, AC)) {
|
2014-01-23 19:23:19 +08:00
|
|
|
// This is a degenerate PHI already, don't modify it!
|
|
|
|
PN->replaceAllUsesWith(V);
|
|
|
|
PN->eraseFromParent();
|
|
|
|
continue;
|
|
|
|
}
|
2009-09-28 22:37:51 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// Scan this PHI node looking for a use of the PHI node by itself.
|
|
|
|
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
|
|
|
|
if (PN->getIncomingValue(i) == PN &&
|
|
|
|
L->contains(PN->getIncomingBlock(i)))
|
|
|
|
// We found something tasty to remove.
|
|
|
|
return PN;
|
|
|
|
}
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2014-01-23 19:23:19 +08:00
|
|
|
}
|
2009-09-28 22:37:51 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
/// \brief If this loop has multiple backedges, try to pull one of them out into
|
|
|
|
/// a nested loop.
|
|
|
|
///
|
|
|
|
/// This is important for code that looks like
|
|
|
|
/// this:
|
|
|
|
///
|
|
|
|
/// Loop:
|
|
|
|
/// ...
|
|
|
|
/// br cond, Loop, Next
|
|
|
|
/// ...
|
|
|
|
/// br cond2, Loop, Out
|
|
|
|
///
|
|
|
|
/// To identify this common case, we look at the PHI nodes in the header of the
|
|
|
|
/// loop. PHI nodes with unchanging values on one backedge correspond to values
|
|
|
|
/// that change in the "outer" loop, but not in the "inner" loop.
|
|
|
|
///
|
|
|
|
/// If we are able to separate out a loop, return the new outer loop that was
|
|
|
|
/// created.
|
|
|
|
///
|
|
|
|
static Loop *separateNestedLoop(Loop *L, BasicBlock *Preheader,
|
2015-07-22 17:52:54 +08:00
|
|
|
DominatorTree *DT, LoopInfo *LI,
|
2015-12-16 03:40:57 +08:00
|
|
|
ScalarEvolution *SE, bool PreserveLCSSA,
|
2015-01-04 20:03:27 +08:00
|
|
|
AssumptionCache *AC) {
|
2014-01-23 19:23:19 +08:00
|
|
|
// Don't try to separate loops without a preheader.
|
|
|
|
if (!Preheader)
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2009-09-28 22:37:51 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// The header is not a landing pad; preheader insertion should ensure this.
|
2015-08-01 01:58:14 +08:00
|
|
|
BasicBlock *Header = L->getHeader();
|
2015-08-04 16:21:40 +08:00
|
|
|
assert(!Header->isEHPad() && "Can't insert backedge to EH pad");
|
2010-03-02 01:55:27 +08:00
|
|
|
|
2015-07-22 17:52:54 +08:00
|
|
|
PHINode *PN = findPHIToPartitionLoops(L, DT, AC);
|
2014-04-25 13:29:35 +08:00
|
|
|
if (!PN) return nullptr; // No known way to partition.
|
2010-03-02 01:55:27 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// Pull out all predecessors that have varying values in the loop. This
|
|
|
|
// handles the case when a PHI node has multiple instances of itself as
|
|
|
|
// arguments.
|
|
|
|
SmallVector<BasicBlock*, 8> OuterLoopPreds;
|
|
|
|
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
|
|
|
|
if (PN->getIncomingValue(i) != PN ||
|
|
|
|
!L->contains(PN->getIncomingBlock(i))) {
|
|
|
|
// We can't split indirectbr edges.
|
|
|
|
if (isa<IndirectBrInst>(PN->getIncomingBlock(i)->getTerminator()))
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2014-01-23 19:23:19 +08:00
|
|
|
OuterLoopPreds.push_back(PN->getIncomingBlock(i));
|
2009-09-28 22:37:51 +08:00
|
|
|
}
|
|
|
|
}
|
2014-01-23 19:23:19 +08:00
|
|
|
DEBUG(dbgs() << "LoopSimplify: Splitting out a new outer loop\n");
|
2004-03-14 11:59:22 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// If ScalarEvolution is around and knows anything about values in
|
|
|
|
// this loop, tell it to forget them, because we're about to
|
|
|
|
// substantially change it.
|
|
|
|
if (SE)
|
|
|
|
SE->forgetLoop(L);
|
2010-03-02 01:55:27 +08:00
|
|
|
|
2015-01-18 17:21:15 +08:00
|
|
|
BasicBlock *NewBB = SplitBlockPredecessors(Header, OuterLoopPreds, ".outer",
|
2015-07-22 17:52:54 +08:00
|
|
|
DT, LI, PreserveLCSSA);
|
2010-03-02 01:55:27 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// Make sure that NewBB is put someplace intelligent, which doesn't mess up
|
|
|
|
// code layout too horribly.
|
|
|
|
placeSplitBlockCarefully(NewBB, OuterLoopPreds, L);
|
2012-10-27 01:31:43 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// Create the new outer loop.
|
|
|
|
Loop *NewOuter = new Loop();
|
|
|
|
|
|
|
|
// Change the parent loop to use the outer loop as its child now.
|
|
|
|
if (Loop *Parent = L->getParentLoop())
|
|
|
|
Parent->replaceChildLoopWith(L, NewOuter);
|
|
|
|
else
|
|
|
|
LI->changeTopLevelLoop(L, NewOuter);
|
|
|
|
|
|
|
|
// L is now a subloop of our outer loop.
|
|
|
|
NewOuter->addChildLoop(L);
|
|
|
|
|
|
|
|
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
|
|
|
|
I != E; ++I)
|
|
|
|
NewOuter->addBlockEntry(*I);
|
|
|
|
|
|
|
|
// Now reset the header in L, which had been moved by
|
|
|
|
// SplitBlockPredecessors for the outer loop.
|
|
|
|
L->moveToHeader(Header);
|
|
|
|
|
|
|
|
// Determine which blocks should stay in L and which should be moved out to
|
|
|
|
// the Outer loop now.
|
|
|
|
std::set<BasicBlock*> BlocksInL;
|
2014-07-22 01:06:51 +08:00
|
|
|
for (pred_iterator PI=pred_begin(Header), E = pred_end(Header); PI!=E; ++PI) {
|
|
|
|
BasicBlock *P = *PI;
|
2014-01-23 19:23:19 +08:00
|
|
|
if (DT->dominates(Header, P))
|
|
|
|
addBlockAndPredsToSet(P, Header, BlocksInL);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Scan all of the loop children of L, moving them to OuterLoop if they are
|
|
|
|
// not part of the inner loop.
|
|
|
|
const std::vector<Loop*> &SubLoops = L->getSubLoops();
|
|
|
|
for (size_t I = 0; I != SubLoops.size(); )
|
|
|
|
if (BlocksInL.count(SubLoops[I]->getHeader()))
|
|
|
|
++I; // Loop remains in L
|
|
|
|
else
|
|
|
|
NewOuter->addChildLoop(L->removeChildLoop(SubLoops.begin() + I));
|
|
|
|
|
2016-07-20 09:55:27 +08:00
|
|
|
SmallVector<BasicBlock *, 8> OuterLoopBlocks;
|
|
|
|
OuterLoopBlocks.push_back(NewBB);
|
2014-01-23 19:23:19 +08:00
|
|
|
// Now that we know which blocks are in L and which need to be moved to
|
|
|
|
// OuterLoop, move any blocks that need it.
|
|
|
|
for (unsigned i = 0; i != L->getBlocks().size(); ++i) {
|
|
|
|
BasicBlock *BB = L->getBlocks()[i];
|
|
|
|
if (!BlocksInL.count(BB)) {
|
|
|
|
// Move this block to the parent, updating the exit blocks sets
|
|
|
|
L->removeBlockFromLoop(BB);
|
2016-07-20 09:55:27 +08:00
|
|
|
if ((*LI)[BB] == L) {
|
2014-01-23 19:23:19 +08:00
|
|
|
LI->changeLoopFor(BB, NewOuter);
|
2016-07-20 09:55:27 +08:00
|
|
|
OuterLoopBlocks.push_back(BB);
|
|
|
|
}
|
2014-01-23 19:23:19 +08:00
|
|
|
--i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-20 09:55:27 +08:00
|
|
|
// Split edges to exit blocks from the inner loop, if they emerged in the
|
|
|
|
// process of separating the outer one.
|
|
|
|
SmallVector<BasicBlock *, 8> ExitBlocks;
|
|
|
|
L->getExitBlocks(ExitBlocks);
|
|
|
|
SmallSetVector<BasicBlock *, 8> ExitBlockSet(ExitBlocks.begin(),
|
|
|
|
ExitBlocks.end());
|
|
|
|
for (BasicBlock *ExitBlock : ExitBlockSet) {
|
|
|
|
if (any_of(predecessors(ExitBlock),
|
|
|
|
[L](BasicBlock *BB) { return !L->contains(BB); })) {
|
|
|
|
rewriteLoopExitBlock(L, ExitBlock, DT, LI, PreserveLCSSA);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PreserveLCSSA) {
|
|
|
|
// Fix LCSSA form for L. Some values, which previously were only used inside
|
|
|
|
// L, can now be used in NewOuter loop. We need to insert phi-nodes for them
|
|
|
|
// in corresponding exit blocks.
|
|
|
|
|
|
|
|
// Go through all instructions in OuterLoopBlocks and check if they are
|
|
|
|
// using operands from the inner loop. In this case we'll need to fix LCSSA
|
|
|
|
// for these instructions.
|
|
|
|
SmallSetVector<Instruction *, 8> WorklistSet;
|
|
|
|
for (BasicBlock *OuterBB: OuterLoopBlocks) {
|
|
|
|
for (Instruction &I : *OuterBB) {
|
|
|
|
for (Value *Op : I.operands()) {
|
|
|
|
Instruction *OpI = dyn_cast<Instruction>(Op);
|
|
|
|
if (!OpI || !L->contains(OpI))
|
|
|
|
continue;
|
|
|
|
WorklistSet.insert(OpI);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SmallVector<Instruction *, 8> Worklist(WorklistSet.begin(),
|
|
|
|
WorklistSet.end());
|
|
|
|
formLCSSAForInstructions(Worklist, *DT, *LI);
|
|
|
|
assert(NewOuter->isRecursivelyLCSSAForm(*DT) &&
|
|
|
|
"LCSSA is broken after separating nested loops!");
|
|
|
|
}
|
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
return NewOuter;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief This method is called when the specified loop has more than one
|
|
|
|
/// backedge in it.
|
|
|
|
///
|
|
|
|
/// If this occurs, revector all of these backedges to target a new basic block
|
|
|
|
/// and have that block branch to the loop header. This ensures that loops
|
|
|
|
/// have exactly one backedge.
|
|
|
|
static BasicBlock *insertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader,
|
|
|
|
DominatorTree *DT, LoopInfo *LI) {
|
|
|
|
assert(L->getNumBackEdges() > 1 && "Must have > 1 backedge!");
|
|
|
|
|
|
|
|
// Get information about the loop
|
|
|
|
BasicBlock *Header = L->getHeader();
|
|
|
|
Function *F = Header->getParent();
|
|
|
|
|
|
|
|
// Unique backedge insertion currently depends on having a preheader.
|
|
|
|
if (!Preheader)
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2014-01-23 19:23:19 +08:00
|
|
|
|
2015-08-04 16:21:40 +08:00
|
|
|
// The header is not an EH pad; preheader insertion should ensure this.
|
|
|
|
assert(!Header->isEHPad() && "Can't insert backedge to EH pad");
|
2014-01-23 19:23:19 +08:00
|
|
|
|
|
|
|
// Figure out which basic blocks contain back-edges to the loop header.
|
|
|
|
std::vector<BasicBlock*> BackedgeBlocks;
|
2014-07-22 01:06:51 +08:00
|
|
|
for (pred_iterator I = pred_begin(Header), E = pred_end(Header); I != E; ++I){
|
|
|
|
BasicBlock *P = *I;
|
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// Indirectbr edges cannot be split, so we must fail if we find one.
|
|
|
|
if (isa<IndirectBrInst>(P->getTerminator()))
|
2014-04-25 13:29:35 +08:00
|
|
|
return nullptr;
|
2014-01-23 19:23:19 +08:00
|
|
|
|
|
|
|
if (P != Preheader) BackedgeBlocks.push_back(P);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create and insert the new backedge block...
|
|
|
|
BasicBlock *BEBlock = BasicBlock::Create(Header->getContext(),
|
2015-06-30 05:30:14 +08:00
|
|
|
Header->getName() + ".backedge", F);
|
2014-01-23 19:23:19 +08:00
|
|
|
BranchInst *BETerminator = BranchInst::Create(Header, BEBlock);
|
2015-06-30 05:30:14 +08:00
|
|
|
BETerminator->setDebugLoc(Header->getFirstNonPHI()->getDebugLoc());
|
2014-01-23 19:23:19 +08:00
|
|
|
|
|
|
|
DEBUG(dbgs() << "LoopSimplify: Inserting unique backedge block "
|
|
|
|
<< BEBlock->getName() << "\n");
|
|
|
|
|
|
|
|
// Move the new backedge block to right after the last backedge block.
|
2015-10-13 10:39:05 +08:00
|
|
|
Function::iterator InsertPos = ++BackedgeBlocks.back()->getIterator();
|
2014-01-23 19:23:19 +08:00
|
|
|
F->getBasicBlockList().splice(InsertPos, F->getBasicBlockList(), BEBlock);
|
|
|
|
|
|
|
|
// Now that the block has been inserted into the function, create PHI nodes in
|
|
|
|
// the backedge block which correspond to any PHI nodes in the header block.
|
|
|
|
for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
|
|
|
|
PHINode *PN = cast<PHINode>(I);
|
|
|
|
PHINode *NewPN = PHINode::Create(PN->getType(), BackedgeBlocks.size(),
|
|
|
|
PN->getName()+".be", BETerminator);
|
|
|
|
|
|
|
|
// Loop over the PHI node, moving all entries except the one for the
|
|
|
|
// preheader over to the new PHI node.
|
|
|
|
unsigned PreheaderIdx = ~0U;
|
|
|
|
bool HasUniqueIncomingValue = true;
|
2014-04-25 13:29:35 +08:00
|
|
|
Value *UniqueValue = nullptr;
|
2014-01-23 19:23:19 +08:00
|
|
|
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
|
|
|
|
BasicBlock *IBB = PN->getIncomingBlock(i);
|
|
|
|
Value *IV = PN->getIncomingValue(i);
|
|
|
|
if (IBB == Preheader) {
|
|
|
|
PreheaderIdx = i;
|
|
|
|
} else {
|
|
|
|
NewPN->addIncoming(IV, IBB);
|
|
|
|
if (HasUniqueIncomingValue) {
|
2014-04-25 13:29:35 +08:00
|
|
|
if (!UniqueValue)
|
2014-01-23 19:23:19 +08:00
|
|
|
UniqueValue = IV;
|
|
|
|
else if (UniqueValue != IV)
|
|
|
|
HasUniqueIncomingValue = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete all of the incoming values from the old PN except the preheader's
|
|
|
|
assert(PreheaderIdx != ~0U && "PHI has no preheader entry??");
|
|
|
|
if (PreheaderIdx != 0) {
|
|
|
|
PN->setIncomingValue(0, PN->getIncomingValue(PreheaderIdx));
|
|
|
|
PN->setIncomingBlock(0, PN->getIncomingBlock(PreheaderIdx));
|
|
|
|
}
|
|
|
|
// Nuke all entries except the zero'th.
|
|
|
|
for (unsigned i = 0, e = PN->getNumIncomingValues()-1; i != e; ++i)
|
|
|
|
PN->removeIncomingValue(e-i, false);
|
|
|
|
|
|
|
|
// Finally, add the newly constructed PHI node as the entry for the BEBlock.
|
|
|
|
PN->addIncoming(NewPN, BEBlock);
|
|
|
|
|
|
|
|
// As an optimization, if all incoming values in the new PhiNode (which is a
|
|
|
|
// subset of the incoming values of the old PHI node) have the same value,
|
|
|
|
// eliminate the PHI Node.
|
|
|
|
if (HasUniqueIncomingValue) {
|
|
|
|
NewPN->replaceAllUsesWith(UniqueValue);
|
|
|
|
BEBlock->getInstList().erase(NewPN);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that all of the PHI nodes have been inserted and adjusted, modify the
|
|
|
|
// backedge blocks to just to the BEBlock instead of the header.
|
|
|
|
for (unsigned i = 0, e = BackedgeBlocks.size(); i != e; ++i) {
|
|
|
|
TerminatorInst *TI = BackedgeBlocks[i]->getTerminator();
|
|
|
|
for (unsigned Op = 0, e = TI->getNumSuccessors(); Op != e; ++Op)
|
|
|
|
if (TI->getSuccessor(Op) == Header)
|
|
|
|
TI->setSuccessor(Op, BEBlock);
|
|
|
|
}
|
|
|
|
|
|
|
|
//===--- Update all analyses which we must preserve now -----------------===//
|
|
|
|
|
|
|
|
// Update Loop Information - we know that this block is now in the current
|
|
|
|
// loop and all parent loops.
|
2015-01-18 09:25:51 +08:00
|
|
|
L->addBasicBlockToLoop(BEBlock, *LI);
|
2014-01-23 19:23:19 +08:00
|
|
|
|
|
|
|
// Update dominator information
|
|
|
|
DT->splitBlock(BEBlock);
|
|
|
|
|
|
|
|
return BEBlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Simplify one loop and queue further loops for simplification.
|
|
|
|
static bool simplifyOneLoop(Loop *L, SmallVectorImpl<Loop *> &Worklist,
|
2015-07-22 17:52:54 +08:00
|
|
|
DominatorTree *DT, LoopInfo *LI,
|
2015-12-16 03:40:57 +08:00
|
|
|
ScalarEvolution *SE, AssumptionCache *AC,
|
|
|
|
bool PreserveLCSSA) {
|
2014-01-23 19:23:19 +08:00
|
|
|
bool Changed = false;
|
|
|
|
ReprocessLoop:
|
|
|
|
|
|
|
|
// Check to see that no blocks (other than the header) in this loop have
|
|
|
|
// predecessors that are not in the loop. This is not valid for natural
|
|
|
|
// loops, but can occur if the blocks are unreachable. Since they are
|
|
|
|
// unreachable we can just shamelessly delete those CFG edges!
|
|
|
|
for (Loop::block_iterator BB = L->block_begin(), E = L->block_end();
|
|
|
|
BB != E; ++BB) {
|
|
|
|
if (*BB == L->getHeader()) continue;
|
|
|
|
|
|
|
|
SmallPtrSet<BasicBlock*, 4> BadPreds;
|
2014-07-22 01:06:51 +08:00
|
|
|
for (pred_iterator PI = pred_begin(*BB),
|
|
|
|
PE = pred_end(*BB); PI != PE; ++PI) {
|
|
|
|
BasicBlock *P = *PI;
|
2014-01-23 19:23:19 +08:00
|
|
|
if (!L->contains(P))
|
|
|
|
BadPreds.insert(P);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete each unique out-of-loop (and thus dead) predecessor.
|
2014-08-25 07:23:06 +08:00
|
|
|
for (BasicBlock *P : BadPreds) {
|
2014-01-23 19:23:19 +08:00
|
|
|
|
|
|
|
DEBUG(dbgs() << "LoopSimplify: Deleting edge from dead predecessor "
|
2014-08-25 07:23:06 +08:00
|
|
|
<< P->getName() << "\n");
|
2014-01-23 19:23:19 +08:00
|
|
|
|
|
|
|
// Zap the dead pred's terminator and replace it with unreachable.
|
2014-08-25 07:23:06 +08:00
|
|
|
TerminatorInst *TI = P->getTerminator();
|
2016-06-25 16:19:55 +08:00
|
|
|
changeToUnreachable(TI, /*UseLLVMTrap=*/false);
|
2014-01-23 19:23:19 +08:00
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there are exiting blocks with branches on undef, resolve the undef in
|
|
|
|
// the direction which will exit the loop. This will help simplify loop
|
|
|
|
// trip count computations.
|
|
|
|
SmallVector<BasicBlock*, 8> ExitingBlocks;
|
|
|
|
L->getExitingBlocks(ExitingBlocks);
|
2016-06-26 20:28:59 +08:00
|
|
|
for (BasicBlock *ExitingBlock : ExitingBlocks)
|
|
|
|
if (BranchInst *BI = dyn_cast<BranchInst>(ExitingBlock->getTerminator()))
|
2014-01-23 19:23:19 +08:00
|
|
|
if (BI->isConditional()) {
|
|
|
|
if (UndefValue *Cond = dyn_cast<UndefValue>(BI->getCondition())) {
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "LoopSimplify: Resolving \"br i1 undef\" to exit in "
|
2016-06-26 20:28:59 +08:00
|
|
|
<< ExitingBlock->getName() << "\n");
|
2014-01-23 19:23:19 +08:00
|
|
|
|
|
|
|
BI->setCondition(ConstantInt::get(Cond->getType(),
|
|
|
|
!L->contains(BI->getSuccessor(0))));
|
|
|
|
|
|
|
|
// This may make the loop analyzable, force SCEV recomputation.
|
|
|
|
if (SE)
|
|
|
|
SE->forgetLoop(L);
|
2012-10-27 01:31:43 +08:00
|
|
|
|
2010-02-25 14:57:05 +08:00
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-08-12 12:51:20 +08:00
|
|
|
// Does the loop already have a preheader? If so, don't insert one.
|
2009-07-14 09:37:59 +08:00
|
|
|
BasicBlock *Preheader = L->getLoopPreheader();
|
|
|
|
if (!Preheader) {
|
2015-12-16 03:40:57 +08:00
|
|
|
Preheader = InsertPreheaderForLoop(L, DT, LI, PreserveLCSSA);
|
2009-11-06 05:14:46 +08:00
|
|
|
if (Preheader) {
|
2010-06-22 23:08:57 +08:00
|
|
|
++NumInserted;
|
2009-11-06 05:14:46 +08:00
|
|
|
Changed = true;
|
|
|
|
}
|
2002-09-27 00:17:31 +08:00
|
|
|
}
|
|
|
|
|
2003-12-11 01:20:35 +08:00
|
|
|
// Next, check to make sure that all exit nodes of the loop only have
|
|
|
|
// predecessors that are inside of the loop. This check guarantees that the
|
|
|
|
// loop preheader/header will dominate the exit blocks. If the exit block has
|
2006-02-12 09:59:10 +08:00
|
|
|
// predecessors from outside of the loop, split the edge now.
|
2007-08-21 08:31:24 +08:00
|
|
|
SmallVector<BasicBlock*, 8> ExitBlocks;
|
2006-02-12 09:59:10 +08:00
|
|
|
L->getExitBlocks(ExitBlocks);
|
2011-08-04 02:28:21 +08:00
|
|
|
|
2010-02-06 03:20:15 +08:00
|
|
|
SmallSetVector<BasicBlock *, 8> ExitBlockSet(ExitBlocks.begin(),
|
|
|
|
ExitBlocks.end());
|
2016-06-26 20:28:59 +08:00
|
|
|
for (BasicBlock *ExitBlock : ExitBlockSet) {
|
2016-07-20 09:55:27 +08:00
|
|
|
if (any_of(predecessors(ExitBlock),
|
|
|
|
[L](BasicBlock *BB) { return !L->contains(BB); })) {
|
|
|
|
rewriteLoopExitBlock(L, ExitBlock, DT, LI, PreserveLCSSA);
|
|
|
|
++NumInserted;
|
|
|
|
Changed = true;
|
|
|
|
}
|
2004-07-15 16:20:22 +08:00
|
|
|
}
|
2003-02-28 04:27:08 +08:00
|
|
|
|
2004-04-13 13:05:33 +08:00
|
|
|
// If the header has more than two predecessors at this point (from the
|
|
|
|
// preheader and from multiple backedges), we must adjust the loop.
|
2009-11-06 05:14:46 +08:00
|
|
|
BasicBlock *LoopLatch = L->getLoopLatch();
|
|
|
|
if (!LoopLatch) {
|
2006-08-12 13:25:00 +08:00
|
|
|
// If this is really a nested loop, rip it out into a child loop. Don't do
|
|
|
|
// this for loops with a giant number of backedges, just factor them into a
|
|
|
|
// common backedge instead.
|
2009-11-06 05:14:46 +08:00
|
|
|
if (L->getNumBackEdges() < 8) {
|
2015-12-16 03:40:57 +08:00
|
|
|
if (Loop *OuterL =
|
|
|
|
separateNestedLoop(L, Preheader, DT, LI, SE, PreserveLCSSA, AC)) {
|
2006-08-12 13:25:00 +08:00
|
|
|
++NumNested;
|
2014-01-23 19:23:19 +08:00
|
|
|
// Enqueue the outer loop as it should be processed next in our
|
|
|
|
// depth-first nest walk.
|
|
|
|
Worklist.push_back(OuterL);
|
|
|
|
|
2006-08-12 13:25:00 +08:00
|
|
|
// This is a big restructuring change, reprocess the whole loop.
|
|
|
|
Changed = true;
|
|
|
|
// GCC doesn't tail recursion eliminate this.
|
2014-01-23 19:23:19 +08:00
|
|
|
// FIXME: It isn't clear we can't rely on LLVM to TRE this.
|
2006-08-12 13:25:00 +08:00
|
|
|
goto ReprocessLoop;
|
|
|
|
}
|
2004-04-13 13:05:33 +08:00
|
|
|
}
|
|
|
|
|
2006-08-12 13:25:00 +08:00
|
|
|
// If we either couldn't, or didn't want to, identify nesting of the loops,
|
|
|
|
// insert a new block that all backedges target, then make it jump to the
|
|
|
|
// loop header.
|
2015-07-22 17:52:54 +08:00
|
|
|
LoopLatch = insertUniqueBackedgeBlock(L, Preheader, DT, LI);
|
2009-11-06 05:14:46 +08:00
|
|
|
if (LoopLatch) {
|
2010-06-22 23:08:57 +08:00
|
|
|
++NumInserted;
|
2009-11-06 05:14:46 +08:00
|
|
|
Changed = true;
|
|
|
|
}
|
2003-10-13 08:37:13 +08:00
|
|
|
}
|
|
|
|
|
2015-03-10 10:37:25 +08:00
|
|
|
const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
|
|
|
|
|
2005-08-10 10:07:32 +08:00
|
|
|
// Scan over the PHI nodes in the loop header. Since they now have only two
|
|
|
|
// incoming values (the loop is canonicalized), we may have simplified the PHI
|
|
|
|
// down to 'X = phi [X, Y]', which should be replaced with 'Y'.
|
|
|
|
PHINode *PN;
|
|
|
|
for (BasicBlock::iterator I = L->getHeader()->begin();
|
|
|
|
(PN = dyn_cast<PHINode>(I++)); )
|
2015-03-10 10:37:25 +08:00
|
|
|
if (Value *V = SimplifyInstruction(PN, DL, nullptr, DT, AC)) {
|
2011-01-12 02:14:50 +08:00
|
|
|
if (SE) SE->forgetValue(PN);
|
2011-01-02 21:38:21 +08:00
|
|
|
PN->replaceAllUsesWith(V);
|
|
|
|
PN->eraseFromParent();
|
|
|
|
}
|
2005-08-10 10:07:32 +08:00
|
|
|
|
2010-01-16 05:55:02 +08:00
|
|
|
// If this loop has multiple exits and the exits all go to the same
|
2009-06-28 05:30:38 +08:00
|
|
|
// block, attempt to merge the exits. This helps several passes, such
|
|
|
|
// as LoopRotation, which do not support loops with multiple exits.
|
|
|
|
// SimplifyCFG also does this (and this code uses the same utility
|
|
|
|
// function), however this code is loop-aware, where SimplifyCFG is
|
2014-01-23 19:23:19 +08:00
|
|
|
// not. That gives it the advantage of being able to hoist
|
|
|
|
// loop-invariant instructions out of the way to open up more
|
|
|
|
// opportunities, and the disadvantage of having the responsibility
|
|
|
|
// to preserve dominator information.
|
|
|
|
bool UniqueExit = true;
|
|
|
|
if (!ExitBlocks.empty())
|
|
|
|
for (unsigned i = 1, e = ExitBlocks.size(); i != e; ++i)
|
|
|
|
if (ExitBlocks[i] != ExitBlocks[0]) {
|
|
|
|
UniqueExit = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (UniqueExit) {
|
|
|
|
for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
|
|
|
|
BasicBlock *ExitingBlock = ExitingBlocks[i];
|
|
|
|
if (!ExitingBlock->getSinglePredecessor()) continue;
|
|
|
|
BranchInst *BI = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
|
|
|
|
if (!BI || !BI->isConditional()) continue;
|
|
|
|
CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition());
|
|
|
|
if (!CI || CI->getParent() != ExitingBlock) continue;
|
2004-04-13 13:05:33 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// Attempt to hoist out all instructions except for the
|
|
|
|
// comparison and the branch.
|
|
|
|
bool AllInvariant = true;
|
|
|
|
bool AnyInvariant = false;
|
|
|
|
for (BasicBlock::iterator I = ExitingBlock->begin(); &*I != BI; ) {
|
2015-10-13 10:39:05 +08:00
|
|
|
Instruction *Inst = &*I++;
|
2014-01-23 19:23:19 +08:00
|
|
|
// Skip debug info intrinsics.
|
|
|
|
if (isa<DbgInfoIntrinsic>(Inst))
|
|
|
|
continue;
|
|
|
|
if (Inst == CI)
|
|
|
|
continue;
|
|
|
|
if (!L->makeLoopInvariant(Inst, AnyInvariant,
|
2014-04-25 13:29:35 +08:00
|
|
|
Preheader ? Preheader->getTerminator()
|
|
|
|
: nullptr)) {
|
2014-01-23 19:23:19 +08:00
|
|
|
AllInvariant = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (AnyInvariant) {
|
|
|
|
Changed = true;
|
|
|
|
// The loop disposition of all SCEV expressions that depend on any
|
|
|
|
// hoisted values have also changed.
|
|
|
|
if (SE)
|
|
|
|
SE->forgetLoopDispositions(L);
|
|
|
|
}
|
|
|
|
if (!AllInvariant) continue;
|
2004-04-13 13:05:33 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// The block has now been cleared of all instructions except for
|
|
|
|
// a comparison and a conditional branch. SimplifyCFG may be able
|
|
|
|
// to fold it now.
|
2015-03-10 10:37:25 +08:00
|
|
|
if (!FoldBranchToCommonDest(BI))
|
|
|
|
continue;
|
2009-09-28 22:37:51 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// Success. The block is now dead, so remove it from the loop,
|
|
|
|
// update the dominator tree and delete it.
|
|
|
|
DEBUG(dbgs() << "LoopSimplify: Eliminating exiting block "
|
|
|
|
<< ExitingBlock->getName() << "\n");
|
2004-04-13 13:05:33 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// Notify ScalarEvolution before deleting this block. Currently assume the
|
|
|
|
// parent loop doesn't change (spliting edges doesn't count). If blocks,
|
|
|
|
// CFG edges, or other values in the parent loop change, then we need call
|
|
|
|
// to forgetLoop() for the parent instead.
|
|
|
|
if (SE)
|
|
|
|
SE->forgetLoop(L);
|
2009-09-08 23:45:00 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
assert(pred_begin(ExitingBlock) == pred_end(ExitingBlock));
|
|
|
|
Changed = true;
|
|
|
|
LI->removeBlock(ExitingBlock);
|
2004-04-13 13:05:33 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
DomTreeNode *Node = DT->getNode(ExitingBlock);
|
|
|
|
const std::vector<DomTreeNodeBase<BasicBlock> *> &Children =
|
|
|
|
Node->getChildren();
|
|
|
|
while (!Children.empty()) {
|
|
|
|
DomTreeNode *Child = Children.front();
|
|
|
|
DT->changeImmediateDominator(Child, Node->getIDom());
|
|
|
|
}
|
|
|
|
DT->eraseNode(ExitingBlock);
|
2004-04-13 13:05:33 +08:00
|
|
|
|
2016-06-09 07:13:21 +08:00
|
|
|
BI->getSuccessor(0)->removePredecessor(
|
|
|
|
ExitingBlock, /* DontDeleteUselessPHIs */ PreserveLCSSA);
|
|
|
|
BI->getSuccessor(1)->removePredecessor(
|
|
|
|
ExitingBlock, /* DontDeleteUselessPHIs */ PreserveLCSSA);
|
2014-01-23 19:23:19 +08:00
|
|
|
ExitingBlock->eraseFromParent();
|
2004-04-13 13:05:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
return Changed;
|
2004-04-13 13:05:33 +08:00
|
|
|
}
|
|
|
|
|
2015-12-16 03:40:57 +08:00
|
|
|
bool llvm::simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI,
|
|
|
|
ScalarEvolution *SE, AssumptionCache *AC,
|
|
|
|
bool PreserveLCSSA) {
|
2014-01-23 19:23:19 +08:00
|
|
|
bool Changed = false;
|
2004-04-13 13:05:33 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// Worklist maintains our depth-first queue of loops in this nest to process.
|
|
|
|
SmallVector<Loop *, 4> Worklist;
|
|
|
|
Worklist.push_back(L);
|
|
|
|
|
|
|
|
// Walk the worklist from front to back, pushing newly found sub loops onto
|
|
|
|
// the back. This will let us process loops from back to front in depth-first
|
|
|
|
// order. We can use this simple process because loops form a tree.
|
|
|
|
for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
|
|
|
|
Loop *L2 = Worklist[Idx];
|
2015-02-17 23:29:18 +08:00
|
|
|
Worklist.append(L2->begin(), L2->end());
|
2014-01-23 19:23:19 +08:00
|
|
|
}
|
2004-04-13 13:05:33 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
while (!Worklist.empty())
|
2015-12-16 03:40:57 +08:00
|
|
|
Changed |= simplifyOneLoop(Worklist.pop_back_val(), Worklist, DT, LI, SE,
|
|
|
|
AC, PreserveLCSSA);
|
2010-08-14 08:43:09 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
return Changed;
|
|
|
|
}
|
2003-10-13 08:37:13 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
namespace {
|
|
|
|
struct LoopSimplify : public FunctionPass {
|
|
|
|
static char ID; // Pass identification, replacement for typeid
|
|
|
|
LoopSimplify() : FunctionPass(ID) {
|
|
|
|
initializeLoopSimplifyPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
2003-10-13 08:37:13 +08:00
|
|
|
|
2014-03-05 17:10:37 +08:00
|
|
|
bool runOnFunction(Function &F) override;
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2014-03-05 17:10:37 +08:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
2015-01-04 20:03:27 +08:00
|
|
|
AU.addRequired<AssumptionCacheTracker>();
|
Make use of @llvm.assume in ValueTracking (computeKnownBits, etc.)
This change, which allows @llvm.assume to be used from within computeKnownBits
(and other associated functions in ValueTracking), adds some (optional)
parameters to computeKnownBits and friends. These functions now (optionally)
take a "context" instruction pointer, an AssumptionTracker pointer, and also a
DomTree pointer, and most of the changes are just to pass this new information
when it is easily available from InstSimplify, InstCombine, etc.
As explained below, the significant conceptual change is that known properties
of a value might depend on the control-flow location of the use (because we
care that the @llvm.assume dominates the use because assumptions have
control-flow dependencies). This means that, when we ask if bits are known in a
value, we might get different answers for different uses.
The significant changes are all in ValueTracking. Two main changes: First, as
with the rest of the code, new parameters need to be passed around. To make
this easier, I grouped them into a structure, and I made internal static
versions of the relevant functions that take this structure as a parameter. The
new code does as you might expect, it looks for @llvm.assume calls that make
use of the value we're trying to learn something about (often indirectly),
attempts to pattern match that expression, and uses the result if successful.
By making use of the AssumptionTracker, the process of finding @llvm.assume
calls is not expensive.
Part of the structure being passed around inside ValueTracking is a set of
already-considered @llvm.assume calls. This is to prevent a query using, for
example, the assume(a == b), to recurse on itself. The context and DT params
are used to find applicable assumptions. An assumption needs to dominate the
context instruction, or come after it deterministically. In this latter case we
only handle the specific case where both the assumption and the context
instruction are in the same block, and we need to exclude assumptions from
being used to simplify their own ephemeral values (those which contribute only
to the assumption) because otherwise the assumption would prove its feeding
comparison trivial and would be removed.
This commit adds the plumbing and the logic for a simple masked-bit propagation
(just enough to write a regression test). Future commits add more patterns
(and, correspondingly, more regression tests).
llvm-svn: 217342
2014-09-08 02:57:58 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// We need loop information to identify the loops...
|
|
|
|
AU.addRequired<DominatorTreeWrapperPass>();
|
|
|
|
AU.addPreserved<DominatorTreeWrapperPass>();
|
2003-10-13 08:37:13 +08:00
|
|
|
|
2015-01-17 22:16:18 +08:00
|
|
|
AU.addRequired<LoopInfoWrapperPass>();
|
|
|
|
AU.addPreserved<LoopInfoWrapperPass>();
|
2005-04-22 07:48:37 +08:00
|
|
|
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
AU.addPreserved<BasicAAWrapperPass>();
|
|
|
|
AU.addPreserved<AAResultsWrapperPass>();
|
|
|
|
AU.addPreserved<GlobalsAAWrapperPass>();
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 10:08:17 +08:00
|
|
|
AU.addPreserved<ScalarEvolutionWrapperPass>();
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
AU.addPreserved<SCEVAAWrapperPass>();
|
2016-06-09 07:13:21 +08:00
|
|
|
AU.addPreservedID(LCSSAID);
|
2016-05-13 06:19:39 +08:00
|
|
|
AU.addPreserved<DependenceAnalysisWrapperPass>();
|
2014-01-23 19:23:19 +08:00
|
|
|
AU.addPreservedID(BreakCriticalEdgesID); // No critical edges added.
|
2003-10-13 08:37:13 +08:00
|
|
|
}
|
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
/// verifyAnalysis() - Verify LoopSimplifyForm's guarantees.
|
2014-03-05 17:10:37 +08:00
|
|
|
void verifyAnalysis() const override;
|
2014-01-23 19:23:19 +08:00
|
|
|
};
|
2015-06-23 17:49:53 +08:00
|
|
|
}
|
2003-10-13 08:37:13 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
char LoopSimplify::ID = 0;
|
|
|
|
INITIALIZE_PASS_BEGIN(LoopSimplify, "loop-simplify",
|
2014-11-05 07:02:09 +08:00
|
|
|
"Canonicalize natural loops", false, false)
|
2015-01-04 20:03:27 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
|
2014-01-23 19:23:19 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
2015-01-17 22:16:18 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
|
2014-01-23 19:23:19 +08:00
|
|
|
INITIALIZE_PASS_END(LoopSimplify, "loop-simplify",
|
2014-11-05 07:02:09 +08:00
|
|
|
"Canonicalize natural loops", false, false)
|
2003-10-13 08:37:13 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// Publicly exposed interface to pass...
|
|
|
|
char &llvm::LoopSimplifyID = LoopSimplify::ID;
|
|
|
|
Pass *llvm::createLoopSimplifyPass() { return new LoopSimplify(); }
|
2003-10-13 08:37:13 +08:00
|
|
|
|
2014-04-29 15:35:33 +08:00
|
|
|
/// runOnFunction - Run down all loops in the CFG (recursively, but we could do
|
2014-01-23 19:23:19 +08:00
|
|
|
/// it in any convenient order) inserting preheaders...
|
|
|
|
///
|
|
|
|
bool LoopSimplify::runOnFunction(Function &F) {
|
|
|
|
bool Changed = false;
|
2016-06-16 02:51:25 +08:00
|
|
|
LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
|
|
|
|
DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 10:08:17 +08:00
|
|
|
auto *SEWP = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
|
2016-06-16 02:51:25 +08:00
|
|
|
ScalarEvolution *SE = SEWP ? &SEWP->getSE() : nullptr;
|
|
|
|
AssumptionCache *AC =
|
|
|
|
&getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
|
|
|
|
|
2015-12-16 03:40:57 +08:00
|
|
|
bool PreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
|
2016-06-09 07:13:21 +08:00
|
|
|
#ifndef NDEBUG
|
|
|
|
if (PreserveLCSSA) {
|
|
|
|
assert(DT && "DT not available.");
|
|
|
|
assert(LI && "LI not available.");
|
|
|
|
bool InLCSSA =
|
|
|
|
all_of(*LI, [&](Loop *L) { return L->isRecursivelyLCSSAForm(*DT); });
|
|
|
|
assert(InLCSSA && "Requested to preserve LCSSA, but it's already broken.");
|
|
|
|
}
|
|
|
|
#endif
|
2003-10-13 08:37:13 +08:00
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// Simplify each loop nest in the function.
|
|
|
|
for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
|
2015-12-16 03:40:57 +08:00
|
|
|
Changed |= simplifyLoop(*I, DT, LI, SE, AC, PreserveLCSSA);
|
2009-11-06 05:14:46 +08:00
|
|
|
|
2016-06-09 07:13:21 +08:00
|
|
|
#ifndef NDEBUG
|
|
|
|
if (PreserveLCSSA) {
|
|
|
|
bool InLCSSA =
|
|
|
|
all_of(*LI, [&](Loop *L) { return L->isRecursivelyLCSSAForm(*DT); });
|
|
|
|
assert(InLCSSA && "LCSSA is broken after loop-simplify.");
|
|
|
|
}
|
|
|
|
#endif
|
2014-01-23 19:23:19 +08:00
|
|
|
return Changed;
|
2009-11-06 05:14:46 +08:00
|
|
|
}
|
|
|
|
|
2016-07-09 11:03:01 +08:00
|
|
|
PreservedAnalyses LoopSimplifyPass::run(Function &F,
|
|
|
|
AnalysisManager<Function> &AM) {
|
|
|
|
bool Changed = false;
|
|
|
|
LoopInfo *LI = &AM.getResult<LoopAnalysis>(F);
|
|
|
|
DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F);
|
|
|
|
ScalarEvolution *SE = AM.getCachedResult<ScalarEvolutionAnalysis>(F);
|
|
|
|
AssumptionCache *AC = &AM.getResult<AssumptionAnalysis>(F);
|
|
|
|
|
|
|
|
// FIXME: This pass should verify that the loops on which it's operating
|
|
|
|
// are in canonical SSA form, and that the pass itself preserves this form.
|
|
|
|
for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
|
|
|
|
Changed |= simplifyLoop(*I, DT, LI, SE, AC, true /* PreserveLCSSA */);
|
|
|
|
|
|
|
|
if (!Changed)
|
|
|
|
return PreservedAnalyses::all();
|
|
|
|
PreservedAnalyses PA;
|
|
|
|
PA.preserve<DominatorTreeAnalysis>();
|
|
|
|
PA.preserve<LoopAnalysis>();
|
|
|
|
PA.preserve<BasicAA>();
|
|
|
|
PA.preserve<GlobalsAA>();
|
|
|
|
PA.preserve<SCEVAA>();
|
|
|
|
PA.preserve<ScalarEvolutionAnalysis>();
|
|
|
|
PA.preserve<DependenceAnalysis>();
|
|
|
|
return PA;
|
|
|
|
}
|
|
|
|
|
2014-01-23 19:23:19 +08:00
|
|
|
// FIXME: Restore this code when we re-enable verification in verifyAnalysis
|
|
|
|
// below.
|
|
|
|
#if 0
|
|
|
|
static void verifyLoop(Loop *L) {
|
|
|
|
// Verify subloops.
|
|
|
|
for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
|
|
|
|
verifyLoop(*I);
|
|
|
|
|
2009-11-06 05:14:46 +08:00
|
|
|
// It used to be possible to just assert L->isLoopSimplifyForm(), however
|
|
|
|
// with the introduction of indirectbr, there are now cases where it's
|
|
|
|
// not possible to transform a loop as necessary. We can at least check
|
|
|
|
// that there is an indirectbr near any time there's trouble.
|
|
|
|
|
|
|
|
// Indirectbr can interfere with preheader and unique backedge insertion.
|
|
|
|
if (!L->getLoopPreheader() || !L->getLoopLatch()) {
|
|
|
|
bool HasIndBrPred = false;
|
|
|
|
for (pred_iterator PI = pred_begin(L->getHeader()),
|
|
|
|
PE = pred_end(L->getHeader()); PI != PE; ++PI)
|
|
|
|
if (isa<IndirectBrInst>((*PI)->getTerminator())) {
|
|
|
|
HasIndBrPred = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
assert(HasIndBrPred &&
|
|
|
|
"LoopSimplify has no excuse for missing loop header info!");
|
2011-08-12 22:54:45 +08:00
|
|
|
(void)HasIndBrPred;
|
2009-11-06 05:14:46 +08:00
|
|
|
}
|
|
|
|
|
2011-08-19 05:10:01 +08:00
|
|
|
// Indirectbr can interfere with exit block canonicalization.
|
2009-11-06 05:14:46 +08:00
|
|
|
if (!L->hasDedicatedExits()) {
|
|
|
|
bool HasIndBrExiting = false;
|
|
|
|
SmallVector<BasicBlock*, 8> ExitingBlocks;
|
|
|
|
L->getExitingBlocks(ExitingBlocks);
|
2011-08-18 05:20:43 +08:00
|
|
|
for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
|
2009-11-06 05:14:46 +08:00
|
|
|
if (isa<IndirectBrInst>((ExitingBlocks[i])->getTerminator())) {
|
|
|
|
HasIndBrExiting = true;
|
|
|
|
break;
|
|
|
|
}
|
2011-08-18 05:20:43 +08:00
|
|
|
}
|
|
|
|
|
2011-08-19 05:10:01 +08:00
|
|
|
assert(HasIndBrExiting &&
|
2009-11-06 05:14:46 +08:00
|
|
|
"LoopSimplify has no excuse for missing exit block info!");
|
2011-08-19 05:10:01 +08:00
|
|
|
(void)HasIndBrExiting;
|
2009-11-06 05:14:46 +08:00
|
|
|
}
|
2002-09-27 00:17:31 +08:00
|
|
|
}
|
2014-01-23 19:23:19 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
void LoopSimplify::verifyAnalysis() const {
|
|
|
|
// FIXME: This routine is being called mid-way through the loop pass manager
|
|
|
|
// as loop passes destroy this analysis. That's actually fine, but we have no
|
|
|
|
// way of expressing that here. Once all of the passes that destroy this are
|
|
|
|
// hoisted out of the loop pass manager we can add back verification here.
|
|
|
|
#if 0
|
|
|
|
for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
|
|
|
|
verifyLoop(*I);
|
|
|
|
#endif
|
|
|
|
}
|