2010-05-07 23:40:13 +08:00
|
|
|
//===-- Sink.cpp - Code Sinking -------------------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass moves instructions into successor blocks, when possible, so that
|
|
|
|
// they aren't executed on paths where their results aren't needed.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-04-23 03:54:10 +08:00
|
|
|
#include "llvm/Transforms/Scalar/Sink.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2010-05-07 23:40:13 +08:00
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
2011-12-15 07:49:11 +08:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2014-03-04 19:45:46 +08:00
|
|
|
#include "llvm/IR/CFG.h"
|
2014-07-11 00:07:11 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2014-01-13 17:26:24 +08:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2015-03-05 02:43:29 +08:00
|
|
|
#include "llvm/IR/Module.h"
|
2010-05-07 23:40:13 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2016-04-23 03:54:10 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2010-05-07 23:40:13 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 10:55:47 +08:00
|
|
|
#define DEBUG_TYPE "sink"
|
|
|
|
|
2010-05-07 23:40:13 +08:00
|
|
|
STATISTIC(NumSunk, "Number of instructions sunk");
|
2012-05-31 16:09:49 +08:00
|
|
|
STATISTIC(NumSinkIter, "Number of sinking iterations");
|
2010-05-07 23:40:13 +08:00
|
|
|
|
|
|
|
/// AllUsesDominatedByBlock - Return true if all uses of the specified value
|
|
|
|
/// occur in blocks dominated by the specified block.
|
2016-04-23 03:54:10 +08:00
|
|
|
static bool AllUsesDominatedByBlock(Instruction *Inst, BasicBlock *BB,
|
|
|
|
DominatorTree &DT) {
|
2010-05-07 23:40:13 +08:00
|
|
|
// Ignoring debug uses is necessary so debug info doesn't affect the code.
|
|
|
|
// This may leave a referencing dbg_value in the original block, before
|
|
|
|
// the definition of the vreg. Dwarf generator handles this although the
|
|
|
|
// user might not get the right info at runtime.
|
2014-03-09 11:16:01 +08:00
|
|
|
for (Use &U : Inst->uses()) {
|
2010-05-07 23:40:13 +08:00
|
|
|
// Determine the block of the use.
|
2014-03-09 11:16:01 +08:00
|
|
|
Instruction *UseInst = cast<Instruction>(U.getUser());
|
2010-05-07 23:40:13 +08:00
|
|
|
BasicBlock *UseBlock = UseInst->getParent();
|
|
|
|
if (PHINode *PN = dyn_cast<PHINode>(UseInst)) {
|
|
|
|
// PHI nodes use the operand in the predecessor block, not the block with
|
|
|
|
// the PHI.
|
2014-03-09 11:16:01 +08:00
|
|
|
unsigned Num = PHINode::getIncomingValueNumForOperand(U.getOperandNo());
|
2010-05-07 23:40:13 +08:00
|
|
|
UseBlock = PN->getIncomingBlock(Num);
|
|
|
|
}
|
|
|
|
// Check that it dominates.
|
2016-04-23 03:54:10 +08:00
|
|
|
if (!DT.dominates(BB, UseBlock))
|
2010-05-07 23:40:13 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-04-23 03:54:10 +08:00
|
|
|
static bool isSafeToMove(Instruction *Inst, AliasAnalysis &AA,
|
2014-08-21 13:55:13 +08:00
|
|
|
SmallPtrSetImpl<Instruction *> &Stores) {
|
2010-05-07 23:40:13 +08:00
|
|
|
|
2011-09-02 05:21:24 +08:00
|
|
|
if (Inst->mayWriteToMemory()) {
|
|
|
|
Stores.insert(Inst);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (LoadInst *L = dyn_cast<LoadInst>(Inst)) {
|
2015-06-17 15:18:54 +08:00
|
|
|
MemoryLocation Loc = MemoryLocation::get(L);
|
2014-08-25 07:23:06 +08:00
|
|
|
for (Instruction *S : Stores)
|
2017-12-06 04:12:23 +08:00
|
|
|
if (isModSet(AA.getModRefInfo(S, Loc)))
|
2010-05-07 23:40:13 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
if (isa<TerminatorInst>(Inst) || isa<PHINode>(Inst) || Inst->isEHPad() ||
|
|
|
|
Inst->mayThrow())
|
2010-11-12 00:20:28 +08:00
|
|
|
return false;
|
|
|
|
|
2015-06-02 01:20:31 +08:00
|
|
|
if (auto CS = CallSite(Inst)) {
|
2016-07-11 22:11:51 +08:00
|
|
|
// Convergent operations cannot be made control-dependent on additional
|
|
|
|
// values.
|
2015-06-02 01:20:31 +08:00
|
|
|
if (CS.hasFnAttr(Attribute::Convergent))
|
|
|
|
return false;
|
2016-07-11 22:11:51 +08:00
|
|
|
|
|
|
|
for (Instruction *S : Stores)
|
2017-12-06 04:12:23 +08:00
|
|
|
if (isModSet(AA.getModRefInfo(S, CS)))
|
2016-07-11 22:11:51 +08:00
|
|
|
return false;
|
2015-06-02 01:20:31 +08:00
|
|
|
}
|
|
|
|
|
2010-11-12 00:20:28 +08:00
|
|
|
return true;
|
2010-05-07 23:40:13 +08:00
|
|
|
}
|
|
|
|
|
2012-05-31 16:09:49 +08:00
|
|
|
/// IsAcceptableTarget - Return true if it is possible to sink the instruction
|
|
|
|
/// in the specified basic block.
|
2016-04-23 03:54:10 +08:00
|
|
|
static bool IsAcceptableTarget(Instruction *Inst, BasicBlock *SuccToSinkTo,
|
|
|
|
DominatorTree &DT, LoopInfo &LI) {
|
2012-05-31 16:09:49 +08:00
|
|
|
assert(Inst && "Instruction to be sunk is null");
|
|
|
|
assert(SuccToSinkTo && "Candidate sink target is null");
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2012-05-31 16:09:49 +08:00
|
|
|
// It is not possible to sink an instruction into its own block. This can
|
|
|
|
// happen with loops.
|
|
|
|
if (Inst->getParent() == SuccToSinkTo)
|
|
|
|
return false;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
// It's never legal to sink an instruction into a block which terminates in an
|
|
|
|
// EH-pad.
|
2018-08-26 16:56:42 +08:00
|
|
|
if (SuccToSinkTo->getTerminator()->isExceptionalTerminator())
|
[IR] Reformulate LLVM's EH funclet IR
While we have successfully implemented a funclet-oriented EH scheme on
top of LLVM IR, our scheme has some notable deficiencies:
- catchendpad and cleanupendpad are necessary in the current design
but they are difficult to explain to others, even to seasoned LLVM
experts.
- catchendpad and cleanupendpad are optimization barriers. They cannot
be split and force all potentially throwing call-sites to be invokes.
This has a noticable effect on the quality of our code generation.
- catchpad, while similar in some aspects to invoke, is fairly awkward.
It is unsplittable, starts a funclet, and has control flow to other
funclets.
- The nesting relationship between funclets is currently a property of
control flow edges. Because of this, we are forced to carefully
analyze the flow graph to see if there might potentially exist illegal
nesting among funclets. While we have logic to clone funclets when
they are illegally nested, it would be nicer if we had a
representation which forbade them upfront.
Let's clean this up a bit by doing the following:
- Instead, make catchpad more like cleanuppad and landingpad: no control
flow, just a bunch of simple operands; catchpad would be splittable.
- Introduce catchswitch, a control flow instruction designed to model
the constraints of funclet oriented EH.
- Make funclet scoping explicit by having funclet instructions consume
the token produced by the funclet which contains them.
- Remove catchendpad and cleanupendpad. Their presence can be inferred
implicitly using coloring information.
N.B. The state numbering code for the CLR has been updated but the
veracity of it's output cannot be spoken for. An expert should take a
look to make sure the results are reasonable.
Reviewers: rnk, JosephTremoulet, andrew.w.kaylor
Differential Revision: http://reviews.llvm.org/D15139
llvm-svn: 255422
2015-12-12 13:38:55 +08:00
|
|
|
return false;
|
|
|
|
|
2012-07-24 18:51:42 +08:00
|
|
|
// If the block has multiple predecessors, this would introduce computation
|
2012-05-31 16:09:49 +08:00
|
|
|
// on different code paths. We could split the critical edge, but for now we
|
|
|
|
// just punt.
|
|
|
|
// FIXME: Split critical edges if not backedges.
|
|
|
|
if (SuccToSinkTo->getUniquePredecessor() != Inst->getParent()) {
|
|
|
|
// We cannot sink a load across a critical edge - there may be stores in
|
|
|
|
// other code paths.
|
2018-01-12 05:28:57 +08:00
|
|
|
if (Inst->mayReadFromMemory())
|
2012-05-31 16:09:49 +08:00
|
|
|
return false;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2012-05-31 16:09:49 +08:00
|
|
|
// We don't want to sink across a critical edge if we don't dominate the
|
|
|
|
// successor. We could be introducing calculations to new code paths.
|
2016-04-23 03:54:10 +08:00
|
|
|
if (!DT.dominates(Inst->getParent(), SuccToSinkTo))
|
2012-05-31 16:09:49 +08:00
|
|
|
return false;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2012-05-31 16:09:49 +08:00
|
|
|
// Don't sink instructions into a loop.
|
2016-04-23 03:54:10 +08:00
|
|
|
Loop *succ = LI.getLoopFor(SuccToSinkTo);
|
|
|
|
Loop *cur = LI.getLoopFor(Inst->getParent());
|
2014-04-25 13:29:35 +08:00
|
|
|
if (succ != nullptr && succ != cur)
|
2012-05-31 16:09:49 +08:00
|
|
|
return false;
|
|
|
|
}
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2012-05-31 16:09:49 +08:00
|
|
|
// Finally, check that all the uses of the instruction are actually
|
|
|
|
// dominated by the candidate
|
2016-04-23 03:54:10 +08:00
|
|
|
return AllUsesDominatedByBlock(Inst, SuccToSinkTo, DT);
|
2012-05-31 16:09:49 +08:00
|
|
|
}
|
|
|
|
|
2010-05-07 23:40:13 +08:00
|
|
|
/// SinkInstruction - Determine whether it is safe to sink the specified machine
|
|
|
|
/// instruction out of its current block into a successor.
|
2016-04-23 03:54:10 +08:00
|
|
|
static bool SinkInstruction(Instruction *Inst,
|
|
|
|
SmallPtrSetImpl<Instruction *> &Stores,
|
|
|
|
DominatorTree &DT, LoopInfo &LI, AAResults &AA) {
|
2014-03-21 23:51:51 +08:00
|
|
|
|
|
|
|
// Don't sink static alloca instructions. CodeGen assumes allocas outside the
|
|
|
|
// entry block are dynamically sized stack objects.
|
|
|
|
if (AllocaInst *AI = dyn_cast<AllocaInst>(Inst))
|
|
|
|
if (AI->isStaticAlloca())
|
|
|
|
return false;
|
|
|
|
|
2010-05-07 23:40:13 +08:00
|
|
|
// Check if it's safe to move the instruction.
|
|
|
|
if (!isSafeToMove(Inst, AA, Stores))
|
|
|
|
return false;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2010-05-07 23:40:13 +08:00
|
|
|
// FIXME: This should include support for sinking instructions within the
|
|
|
|
// block they are currently in to shorten the live ranges. We often get
|
|
|
|
// instructions sunk into the top of a large block, but it would be better to
|
|
|
|
// also sink them down before their first use in the block. This xform has to
|
|
|
|
// be careful not to *increase* register pressure though, e.g. sinking
|
|
|
|
// "x = y + z" down if it kills y and z would increase the live ranges of y
|
|
|
|
// and z and only shrink the live range of x.
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2010-05-07 23:40:13 +08:00
|
|
|
// SuccToSinkTo - This is the successor to sink this instruction to, once we
|
|
|
|
// decide.
|
2014-04-25 13:29:35 +08:00
|
|
|
BasicBlock *SuccToSinkTo = nullptr;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2010-05-07 23:40:13 +08:00
|
|
|
// Instructions can only be sunk if all their uses are in blocks
|
|
|
|
// dominated by one of the successors.
|
2017-03-20 08:30:19 +08:00
|
|
|
// Look at all the dominated blocks and see if we can sink it in one.
|
2016-04-23 03:54:10 +08:00
|
|
|
DomTreeNode *DTN = DT.getNode(Inst->getParent());
|
2012-07-24 18:51:42 +08:00
|
|
|
for (DomTreeNode::iterator I = DTN->begin(), E = DTN->end();
|
2014-04-25 13:29:35 +08:00
|
|
|
I != E && SuccToSinkTo == nullptr; ++I) {
|
2012-05-31 16:09:49 +08:00
|
|
|
BasicBlock *Candidate = (*I)->getBlock();
|
2017-03-20 08:30:19 +08:00
|
|
|
// A node always immediate-dominates its children on the dominator
|
|
|
|
// tree.
|
|
|
|
if (IsAcceptableTarget(Inst, Candidate, DT, LI))
|
2012-05-31 16:09:49 +08:00
|
|
|
SuccToSinkTo = Candidate;
|
|
|
|
}
|
|
|
|
|
2012-07-24 18:51:42 +08:00
|
|
|
// If no suitable postdominator was found, look at all the successors and
|
2012-05-31 16:09:49 +08:00
|
|
|
// decide which one we should sink to, if any.
|
2014-07-22 01:06:51 +08:00
|
|
|
for (succ_iterator I = succ_begin(Inst->getParent()),
|
|
|
|
E = succ_end(Inst->getParent()); I != E && !SuccToSinkTo; ++I) {
|
2016-04-23 03:54:10 +08:00
|
|
|
if (IsAcceptableTarget(Inst, *I, DT, LI))
|
2014-07-22 01:06:51 +08:00
|
|
|
SuccToSinkTo = *I;
|
2010-05-07 23:40:13 +08:00
|
|
|
}
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2010-05-07 23:40:13 +08:00
|
|
|
// If we couldn't find a block to sink to, ignore this instruction.
|
2014-04-25 13:29:35 +08:00
|
|
|
if (!SuccToSinkTo)
|
2010-05-07 23:40:13 +08:00
|
|
|
return false;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Sink" << *Inst << " (";
|
|
|
|
Inst->getParent()->printAsOperand(dbgs(), false); dbgs() << " -> ";
|
|
|
|
SuccToSinkTo->printAsOperand(dbgs(), false); dbgs() << ")\n");
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2010-05-07 23:40:13 +08:00
|
|
|
// Move the instruction.
|
2015-10-14 03:26:58 +08:00
|
|
|
Inst->moveBefore(&*SuccToSinkTo->getFirstInsertionPt());
|
2010-05-07 23:40:13 +08:00
|
|
|
return true;
|
|
|
|
}
|
2016-04-23 03:54:04 +08:00
|
|
|
|
2016-04-23 03:54:10 +08:00
|
|
|
static bool ProcessBlock(BasicBlock &BB, DominatorTree &DT, LoopInfo &LI,
|
|
|
|
AAResults &AA) {
|
2016-04-23 03:54:04 +08:00
|
|
|
// Can't sink anything out of a block that has less than two successors.
|
|
|
|
if (BB.getTerminator()->getNumSuccessors() <= 1) return false;
|
|
|
|
|
|
|
|
// Don't bother sinking code out of unreachable blocks. In addition to being
|
|
|
|
// unprofitable, it can also lead to infinite looping, because in an
|
|
|
|
// unreachable loop there may be nowhere to stop.
|
2016-04-23 03:54:10 +08:00
|
|
|
if (!DT.isReachableFromEntry(&BB)) return false;
|
2016-04-23 03:54:04 +08:00
|
|
|
|
|
|
|
bool MadeChange = false;
|
|
|
|
|
|
|
|
// Walk the basic block bottom-up. Remember if we saw a store.
|
|
|
|
BasicBlock::iterator I = BB.end();
|
|
|
|
--I;
|
|
|
|
bool ProcessedBegin = false;
|
|
|
|
SmallPtrSet<Instruction *, 8> Stores;
|
|
|
|
do {
|
|
|
|
Instruction *Inst = &*I; // The instruction to sink.
|
|
|
|
|
|
|
|
// Predecrement I (if it's not begin) so that it isn't invalidated by
|
|
|
|
// sinking.
|
|
|
|
ProcessedBegin = I == BB.begin();
|
|
|
|
if (!ProcessedBegin)
|
|
|
|
--I;
|
|
|
|
|
|
|
|
if (isa<DbgInfoIntrinsic>(Inst))
|
|
|
|
continue;
|
|
|
|
|
2016-04-23 03:54:10 +08:00
|
|
|
if (SinkInstruction(Inst, Stores, DT, LI, AA)) {
|
2016-04-23 03:54:04 +08:00
|
|
|
++NumSunk;
|
|
|
|
MadeChange = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we just processed the first instruction in the block, we're done.
|
|
|
|
} while (!ProcessedBegin);
|
|
|
|
|
|
|
|
return MadeChange;
|
|
|
|
}
|
|
|
|
|
2016-04-23 03:54:10 +08:00
|
|
|
static bool iterativelySinkInstructions(Function &F, DominatorTree &DT,
|
|
|
|
LoopInfo &LI, AAResults &AA) {
|
2016-04-23 03:54:04 +08:00
|
|
|
bool MadeChange, EverMadeChange = false;
|
|
|
|
|
|
|
|
do {
|
|
|
|
MadeChange = false;
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Sinking iteration " << NumSinkIter << "\n");
|
2016-04-23 03:54:04 +08:00
|
|
|
// Process all basic blocks.
|
2016-06-26 20:28:59 +08:00
|
|
|
for (BasicBlock &I : F)
|
|
|
|
MadeChange |= ProcessBlock(I, DT, LI, AA);
|
2016-04-23 03:54:04 +08:00
|
|
|
EverMadeChange |= MadeChange;
|
|
|
|
NumSinkIter++;
|
|
|
|
} while (MadeChange);
|
|
|
|
|
|
|
|
return EverMadeChange;
|
|
|
|
}
|
2016-04-23 03:54:10 +08:00
|
|
|
|
2016-08-09 08:28:15 +08:00
|
|
|
PreservedAnalyses SinkingPass::run(Function &F, FunctionAnalysisManager &AM) {
|
2016-04-23 03:54:10 +08:00
|
|
|
auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
|
|
|
|
auto &LI = AM.getResult<LoopAnalysis>(F);
|
|
|
|
auto &AA = AM.getResult<AAManager>(F);
|
|
|
|
|
|
|
|
if (!iterativelySinkInstructions(F, DT, LI, AA))
|
|
|
|
return PreservedAnalyses::all();
|
|
|
|
|
2017-01-15 14:32:49 +08:00
|
|
|
PreservedAnalyses PA;
|
|
|
|
PA.preserveSet<CFGAnalyses>();
|
2016-04-23 03:54:10 +08:00
|
|
|
return PA;
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
class SinkingLegacyPass : public FunctionPass {
|
|
|
|
public:
|
|
|
|
static char ID; // Pass identification
|
|
|
|
SinkingLegacyPass() : FunctionPass(ID) {
|
|
|
|
initializeSinkingLegacyPassPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
bool runOnFunction(Function &F) override {
|
|
|
|
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
|
|
|
auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
|
|
|
|
auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
|
|
|
|
|
|
|
|
return iterativelySinkInstructions(F, DT, LI, AA);
|
|
|
|
}
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.setPreservesCFG();
|
|
|
|
FunctionPass::getAnalysisUsage(AU);
|
|
|
|
AU.addRequired<AAResultsWrapperPass>();
|
|
|
|
AU.addRequired<DominatorTreeWrapperPass>();
|
|
|
|
AU.addRequired<LoopInfoWrapperPass>();
|
|
|
|
AU.addPreserved<DominatorTreeWrapperPass>();
|
|
|
|
AU.addPreserved<LoopInfoWrapperPass>();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
char SinkingLegacyPass::ID = 0;
|
|
|
|
INITIALIZE_PASS_BEGIN(SinkingLegacyPass, "sink", "Code sinking", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
|
|
|
|
INITIALIZE_PASS_END(SinkingLegacyPass, "sink", "Code sinking", false, false)
|
|
|
|
|
|
|
|
FunctionPass *llvm::createSinkingPass() { return new SinkingLegacyPass(); }
|