2014-07-19 03:13:09 +08:00
|
|
|
//===- MergedLoadStoreMotion.cpp - merge and hoist/sink load/stores -------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
//! \file
|
|
|
|
//! \brief This pass performs merges of loads and stores on both sides of a
|
|
|
|
// diamond (hammock). It hoists the loads and sinks the stores.
|
|
|
|
//
|
|
|
|
// The algorithm iteratively hoists two loads to the same address out of a
|
|
|
|
// diamond (hammock) and merges them into a single load in the header. Similar
|
|
|
|
// it sinks and merges two stores to the tail block (footer). The algorithm
|
|
|
|
// iterates over the instructions of one side of the diamond and attempts to
|
|
|
|
// find a matching load/store on the other side. It hoists / sinks when it
|
|
|
|
// thinks it safe to do so. This optimization helps with eg. hiding load
|
|
|
|
// latencies, triggering if-conversion, and reducing static code size.
|
|
|
|
//
|
Remove the load hoisting code of MLSM, it is completely subsumed by GVNHoist
Summary:
GVNHoist performs all the optimizations that MLSM does to loads, in a
more general way, and in a faster time bound (MLSM is N^3 in most
cases, N^4 in a few edge cases).
This disables the load portion.
Note that the way ld_hoist_st_sink.ll is written makes one think that
the loads should be moved to the while.preheader block, but
1. Neither MLSM nor GVNHoist do it (they both move them to identical places).
2. MLSM couldn't possibly do it anyway, as the while.preheader block
is not the head of the diamond, while.body is. (GVNHoist could do it
if it was legal).
3. At a glance, it's not legal anyway because the in-loop load
conflict with the in-loop store, so the loads must stay in-loop.
I am happy to update the test to use update_test_checks so that
checking is tighter, just was going to do it as a followup.
Note that i can find no particular benefit to the store portion on any
real testcase/benchmark i have (even size-wise). If we really still
want it, i am happy to commit to writing a targeted store sinker, just
taking the code from the MemorySSA port of MergedLoadStoreMotion
(which is N^2 worst case, and N most of the time).
We can do what it does in a much better time bound.
We also should be both hoisting and sinking stores, not just sinking
them, anyway, since whether we should hoist or sink to merge depends
basically on luck of the draw of where the blockers are placed.
Nonetheless, i have left it alone for now.
Reviewers: chandlerc, davide
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29079
llvm-svn: 292971
2017-01-25 03:55:36 +08:00
|
|
|
// NOTE: This code no longer performs load hoisting, it is subsumed by GVNHoist.
|
|
|
|
//
|
2014-07-19 03:13:09 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// Example:
|
|
|
|
// Diamond shaped code before merge:
|
|
|
|
//
|
|
|
|
// header:
|
|
|
|
// br %cond, label %if.then, label %if.else
|
2014-08-08 07:19:55 +08:00
|
|
|
// + +
|
|
|
|
// + +
|
|
|
|
// + +
|
2014-07-19 03:13:09 +08:00
|
|
|
// if.then: if.else:
|
|
|
|
// %lt = load %addr_l %le = load %addr_l
|
|
|
|
// <use %lt> <use %le>
|
|
|
|
// <...> <...>
|
|
|
|
// store %st, %addr_s store %se, %addr_s
|
|
|
|
// br label %if.end br label %if.end
|
2014-08-08 07:19:55 +08:00
|
|
|
// + +
|
|
|
|
// + +
|
|
|
|
// + +
|
2014-07-19 03:13:09 +08:00
|
|
|
// if.end ("footer"):
|
|
|
|
// <...>
|
|
|
|
//
|
|
|
|
// Diamond shaped code after merge:
|
|
|
|
//
|
|
|
|
// header:
|
|
|
|
// %l = load %addr_l
|
|
|
|
// br %cond, label %if.then, label %if.else
|
2014-08-08 07:19:55 +08:00
|
|
|
// + +
|
|
|
|
// + +
|
|
|
|
// + +
|
2014-07-19 03:13:09 +08:00
|
|
|
// if.then: if.else:
|
|
|
|
// <use %l> <use %l>
|
|
|
|
// <...> <...>
|
|
|
|
// br label %if.end br label %if.end
|
2014-08-08 07:19:55 +08:00
|
|
|
// + +
|
|
|
|
// + +
|
|
|
|
// + +
|
2014-07-19 03:13:09 +08:00
|
|
|
// if.end ("footer"):
|
|
|
|
// %s.sink = phi [%st, if.then], [%se, if.else]
|
|
|
|
// <...>
|
|
|
|
// store %s.sink, %addr_s
|
|
|
|
// <...>
|
|
|
|
//
|
|
|
|
//
|
|
|
|
//===----------------------- TODO -----------------------------------------===//
|
|
|
|
//
|
|
|
|
// 1) Generalize to regions other than diamonds
|
|
|
|
// 2) Be more aggressive merging memory operations
|
|
|
|
// Note that both changes require register pressure control
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-06-18 03:10:09 +08:00
|
|
|
#include "llvm/Transforms/Scalar/MergedLoadStoreMotion.h"
|
2014-07-19 03:13:09 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
|
|
|
#include "llvm/Analysis/CFG.h"
|
[PM/AA] Rebuild LLVM's alias analysis infrastructure in a way compatible
with the new pass manager, and no longer relying on analysis groups.
This builds essentially a ground-up new AA infrastructure stack for
LLVM. The core ideas are the same that are used throughout the new pass
manager: type erased polymorphism and direct composition. The design is
as follows:
- FunctionAAResults is a type-erasing alias analysis results aggregation
interface to walk a single query across a range of results from
different alias analyses. Currently this is function-specific as we
always assume that aliasing queries are *within* a function.
- AAResultBase is a CRTP utility providing stub implementations of
various parts of the alias analysis result concept, notably in several
cases in terms of other more general parts of the interface. This can
be used to implement only a narrow part of the interface rather than
the entire interface. This isn't really ideal, this logic should be
hoisted into FunctionAAResults as currently it will cause
a significant amount of redundant work, but it faithfully models the
behavior of the prior infrastructure.
- All the alias analysis passes are ported to be wrapper passes for the
legacy PM and new-style analysis passes for the new PM with a shared
result object. In some cases (most notably CFL), this is an extremely
naive approach that we should revisit when we can specialize for the
new pass manager.
- BasicAA has been restructured to reflect that it is much more
fundamentally a function analysis because it uses dominator trees and
loop info that need to be constructed for each function.
All of the references to getting alias analysis results have been
updated to use the new aggregation interface. All the preservation and
other pass management code has been updated accordingly.
The way the FunctionAAResultsWrapperPass works is to detect the
available alias analyses when run, and add them to the results object.
This means that we should be able to continue to respect when various
passes are added to the pipeline, for example adding CFL or adding TBAA
passes should just cause their results to be available and to get folded
into this. The exception to this rule is BasicAA which really needs to
be a function pass due to using dominator trees and loop info. As
a consequence, the FunctionAAResultsWrapperPass directly depends on
BasicAA and always includes it in the aggregation.
This has significant implications for preserving analyses. Generally,
most passes shouldn't bother preserving FunctionAAResultsWrapperPass
because rebuilding the results just updates the set of known AA passes.
The exception to this rule are LoopPass instances which need to preserve
all the function analyses that the loop pass manager will end up
needing. This means preserving both BasicAAWrapperPass and the
aggregating FunctionAAResultsWrapperPass.
Now, when preserving an alias analysis, you do so by directly preserving
that analysis. This is only necessary for non-immutable-pass-provided
alias analyses though, and there are only three of interest: BasicAA,
GlobalsAA (formerly GlobalsModRef), and SCEVAA. Usually BasicAA is
preserved when needed because it (like DominatorTree and LoopInfo) is
marked as a CFG-only pass. I've expanded GlobalsAA into the preserved
set everywhere we previously were preserving all of AliasAnalysis, and
I've added SCEVAA in the intersection of that with where we preserve
SCEV itself.
One significant challenge to all of this is that the CGSCC passes were
actually using the alias analysis implementations by taking advantage of
a pretty amazing set of loop holes in the old pass manager's analysis
management code which allowed analysis groups to slide through in many
cases. Moving away from analysis groups makes this problem much more
obvious. To fix it, I've leveraged the flexibility the design of the new
PM components provides to just directly construct the relevant alias
analyses for the relevant functions in the IPO passes that need them.
This is a bit hacky, but should go away with the new pass manager, and
is already in many ways cleaner than the prior state.
Another significant challenge is that various facilities of the old
alias analysis infrastructure just don't fit any more. The most
significant of these is the alias analysis 'counter' pass. That pass
relied on the ability to snoop on AA queries at different points in the
analysis group chain. Instead, I'm planning to build printing
functionality directly into the aggregation layer. I've not included
that in this patch merely to keep it smaller.
Note that all of this needs a nearly complete rewrite of the AA
documentation. I'm planning to do that, but I'd like to make sure the
new design settles, and to flesh out a bit more of what it looks like in
the new pass manager first.
Differential Revision: http://reviews.llvm.org/D12080
llvm-svn: 247167
2015-09-10 01:55:00 +08:00
|
|
|
#include "llvm/Analysis/GlobalsModRef.h"
|
2014-07-19 03:13:09 +08:00
|
|
|
#include "llvm/Analysis/Loads.h"
|
|
|
|
#include "llvm/Analysis/MemoryBuiltins.h"
|
|
|
|
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
|
2016-06-12 10:11:20 +08:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2014-07-19 03:13:09 +08:00
|
|
|
#include "llvm/IR/Metadata.h"
|
|
|
|
#include "llvm/IR/PatternMatch.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
2015-03-24 02:45:56 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2016-04-18 17:17:29 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2014-07-19 03:13:09 +08:00
|
|
|
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
2015-10-07 07:24:35 +08:00
|
|
|
|
2014-07-19 03:13:09 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "mldst-motion"
|
|
|
|
|
2016-07-10 19:28:51 +08:00
|
|
|
namespace {
|
2014-07-19 03:13:09 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// MergedLoadStoreMotion Pass
|
|
|
|
//===----------------------------------------------------------------------===//
|
2016-06-18 03:10:09 +08:00
|
|
|
class MergedLoadStoreMotion {
|
|
|
|
MemoryDependenceResults *MD = nullptr;
|
|
|
|
AliasAnalysis *AA = nullptr;
|
2014-07-19 03:13:09 +08:00
|
|
|
|
2016-06-18 03:10:09 +08:00
|
|
|
// The mergeLoad/Store algorithms could have Size0 * Size1 complexity,
|
|
|
|
// where Size0 and Size1 are the #instructions on the two sides of
|
|
|
|
// the diamond. The constant chosen here is arbitrary. Compiler Time
|
|
|
|
// Control is enforced by the check Size0 * Size1 < MagicCompileTimeControl.
|
|
|
|
const int MagicCompileTimeControl = 250;
|
2016-06-17 01:40:53 +08:00
|
|
|
|
|
|
|
public:
|
2016-06-18 03:10:09 +08:00
|
|
|
bool run(Function &F, MemoryDependenceResults *MD, AliasAnalysis &AA);
|
2016-06-17 01:40:53 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
///
|
|
|
|
/// \brief Remove instruction from parent and update memory dependence
|
|
|
|
/// analysis.
|
|
|
|
///
|
|
|
|
void removeInstruction(Instruction *Inst);
|
|
|
|
BasicBlock *getDiamondTail(BasicBlock *BB);
|
|
|
|
bool isDiamondHead(BasicBlock *BB);
|
|
|
|
// Routines for sinking stores
|
|
|
|
StoreInst *canSinkFromBlock(BasicBlock *BB, StoreInst *SI);
|
|
|
|
PHINode *getPHIOperand(BasicBlock *BB, StoreInst *S0, StoreInst *S1);
|
|
|
|
bool isStoreSinkBarrierInRange(const Instruction &Start,
|
|
|
|
const Instruction &End, MemoryLocation Loc);
|
|
|
|
bool sinkStore(BasicBlock *BB, StoreInst *SinkCand, StoreInst *ElseInst);
|
|
|
|
bool mergeStores(BasicBlock *BB);
|
|
|
|
};
|
2016-07-10 19:28:51 +08:00
|
|
|
} // end anonymous namespace
|
2016-06-17 01:40:53 +08:00
|
|
|
|
2014-07-19 03:13:09 +08:00
|
|
|
///
|
|
|
|
/// \brief Remove instruction from parent and update memory dependence analysis.
|
|
|
|
///
|
2016-06-17 01:40:53 +08:00
|
|
|
void MergedLoadStoreMotion::removeInstruction(Instruction *Inst) {
|
2014-07-19 03:13:09 +08:00
|
|
|
// Notify the memory dependence analysis.
|
|
|
|
if (MD) {
|
|
|
|
MD->removeInstruction(Inst);
|
2016-05-26 13:43:12 +08:00
|
|
|
if (auto *LI = dyn_cast<LoadInst>(Inst))
|
2014-07-19 03:13:09 +08:00
|
|
|
MD->invalidateCachedPointerInfo(LI->getPointerOperand());
|
2016-05-26 13:43:12 +08:00
|
|
|
if (Inst->getType()->isPtrOrPtrVectorTy()) {
|
2014-07-19 03:13:09 +08:00
|
|
|
MD->invalidateCachedPointerInfo(Inst);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Inst->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
2016-06-17 01:40:53 +08:00
|
|
|
///
|
|
|
|
/// \brief Return tail block of a diamond.
|
|
|
|
///
|
|
|
|
BasicBlock *MergedLoadStoreMotion::getDiamondTail(BasicBlock *BB) {
|
|
|
|
assert(isDiamondHead(BB) && "Basic block is not head of a diamond");
|
|
|
|
return BB->getTerminator()->getSuccessor(0)->getSingleSuccessor();
|
|
|
|
}
|
|
|
|
|
2014-07-19 03:13:09 +08:00
|
|
|
///
|
|
|
|
/// \brief True when BB is the head of a diamond (hammock)
|
|
|
|
///
|
2016-06-17 01:40:53 +08:00
|
|
|
bool MergedLoadStoreMotion::isDiamondHead(BasicBlock *BB) {
|
2014-07-19 03:13:09 +08:00
|
|
|
if (!BB)
|
|
|
|
return false;
|
2016-05-26 13:43:12 +08:00
|
|
|
auto *BI = dyn_cast<BranchInst>(BB->getTerminator());
|
|
|
|
if (!BI || !BI->isConditional())
|
2014-07-19 03:13:09 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
BasicBlock *Succ0 = BI->getSuccessor(0);
|
|
|
|
BasicBlock *Succ1 = BI->getSuccessor(1);
|
|
|
|
|
2016-05-26 13:43:12 +08:00
|
|
|
if (!Succ0->getSinglePredecessor())
|
2014-07-19 03:13:09 +08:00
|
|
|
return false;
|
2016-05-26 13:43:12 +08:00
|
|
|
if (!Succ1->getSinglePredecessor())
|
2014-07-19 03:13:09 +08:00
|
|
|
return false;
|
|
|
|
|
2016-05-26 13:43:12 +08:00
|
|
|
BasicBlock *Succ0Succ = Succ0->getSingleSuccessor();
|
|
|
|
BasicBlock *Succ1Succ = Succ1->getSingleSuccessor();
|
2014-07-19 03:13:09 +08:00
|
|
|
// Ignore triangles.
|
2016-05-26 13:43:12 +08:00
|
|
|
if (!Succ0Succ || !Succ1Succ || Succ0Succ != Succ1Succ)
|
2014-07-19 03:13:09 +08:00
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
///
|
2014-12-15 22:09:53 +08:00
|
|
|
/// \brief True when instruction is a sink barrier for a store
|
|
|
|
/// located in Loc
|
|
|
|
///
|
|
|
|
/// Whenever an instruction could possibly read or modify the
|
|
|
|
/// value being stored or protect against the store from
|
|
|
|
/// happening it is considered a sink barrier.
|
|
|
|
///
|
2016-06-17 01:40:53 +08:00
|
|
|
bool MergedLoadStoreMotion::isStoreSinkBarrierInRange(const Instruction &Start,
|
|
|
|
const Instruction &End,
|
|
|
|
MemoryLocation Loc) {
|
2016-05-26 15:11:09 +08:00
|
|
|
for (const Instruction &Inst :
|
|
|
|
make_range(Start.getIterator(), End.getIterator()))
|
|
|
|
if (Inst.mayThrow())
|
|
|
|
return true;
|
2015-07-23 07:15:57 +08:00
|
|
|
return AA->canInstructionRangeModRef(Start, End, Loc, MRI_ModRef);
|
2014-07-19 03:13:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
///
|
|
|
|
/// \brief Check if \p BB contains a store to the same address as \p SI
|
|
|
|
///
|
|
|
|
/// \return The store in \p when it is safe to sink. Otherwise return Null.
|
|
|
|
///
|
2016-06-17 01:40:53 +08:00
|
|
|
StoreInst *MergedLoadStoreMotion::canSinkFromBlock(BasicBlock *BB1,
|
|
|
|
StoreInst *Store0) {
|
2014-12-15 22:09:53 +08:00
|
|
|
DEBUG(dbgs() << "can Sink? : "; Store0->dump(); dbgs() << "\n");
|
2015-02-17 21:10:05 +08:00
|
|
|
BasicBlock *BB0 = Store0->getParent();
|
2016-06-24 12:05:21 +08:00
|
|
|
for (Instruction &Inst : reverse(*BB1)) {
|
|
|
|
auto *Store1 = dyn_cast<StoreInst>(&Inst);
|
2016-05-26 15:11:09 +08:00
|
|
|
if (!Store1)
|
|
|
|
continue;
|
2014-12-15 22:09:53 +08:00
|
|
|
|
2015-06-17 15:18:54 +08:00
|
|
|
MemoryLocation Loc0 = MemoryLocation::get(Store0);
|
|
|
|
MemoryLocation Loc1 = MemoryLocation::get(Store1);
|
2014-12-15 22:09:53 +08:00
|
|
|
if (AA->isMustAlias(Loc0, Loc1) && Store0->isSameOperationAs(Store1) &&
|
2016-06-17 01:40:53 +08:00
|
|
|
!isStoreSinkBarrierInRange(*Store1->getNextNode(), BB1->back(), Loc1) &&
|
|
|
|
!isStoreSinkBarrierInRange(*Store0->getNextNode(), BB0->back(), Loc0)) {
|
2014-12-15 22:09:53 +08:00
|
|
|
return Store1;
|
2014-07-19 03:13:09 +08:00
|
|
|
}
|
|
|
|
}
|
2014-12-15 22:09:53 +08:00
|
|
|
return nullptr;
|
2014-07-19 03:13:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
///
|
|
|
|
/// \brief Create a PHI node in BB for the operands of S0 and S1
|
|
|
|
///
|
2016-06-17 01:40:53 +08:00
|
|
|
PHINode *MergedLoadStoreMotion::getPHIOperand(BasicBlock *BB, StoreInst *S0,
|
|
|
|
StoreInst *S1) {
|
2014-07-19 03:13:09 +08:00
|
|
|
// Create a phi if the values mismatch.
|
|
|
|
Value *Opd1 = S0->getValueOperand();
|
|
|
|
Value *Opd2 = S1->getValueOperand();
|
2016-05-26 13:43:12 +08:00
|
|
|
if (Opd1 == Opd2)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
auto *NewPN = PHINode::Create(Opd1->getType(), 2, Opd2->getName() + ".sink",
|
|
|
|
&BB->front());
|
|
|
|
NewPN->addIncoming(Opd1, S0->getParent());
|
|
|
|
NewPN->addIncoming(Opd2, S1->getParent());
|
|
|
|
if (MD && NewPN->getType()->getScalarType()->isPointerTy())
|
|
|
|
MD->invalidateCachedPointerInfo(NewPN);
|
2014-07-19 03:13:09 +08:00
|
|
|
return NewPN;
|
|
|
|
}
|
|
|
|
|
|
|
|
///
|
|
|
|
/// \brief Merge two stores to same address and sink into \p BB
|
|
|
|
///
|
|
|
|
/// Also sinks GEP instruction computing the store address
|
|
|
|
///
|
2016-06-17 01:40:53 +08:00
|
|
|
bool MergedLoadStoreMotion::sinkStore(BasicBlock *BB, StoreInst *S0,
|
|
|
|
StoreInst *S1) {
|
2014-07-19 03:13:09 +08:00
|
|
|
// Only one definition?
|
2016-05-26 13:43:12 +08:00
|
|
|
auto *A0 = dyn_cast<Instruction>(S0->getPointerOperand());
|
|
|
|
auto *A1 = dyn_cast<Instruction>(S1->getPointerOperand());
|
2014-07-19 03:13:09 +08:00
|
|
|
if (A0 && A1 && A0->isIdenticalTo(A1) && A0->hasOneUse() &&
|
|
|
|
(A0->getParent() == S0->getParent()) && A1->hasOneUse() &&
|
|
|
|
(A1->getParent() == S1->getParent()) && isa<GetElementPtrInst>(A0)) {
|
|
|
|
DEBUG(dbgs() << "Sink Instruction into BB \n"; BB->dump();
|
|
|
|
dbgs() << "Instruction Left\n"; S0->dump(); dbgs() << "\n";
|
|
|
|
dbgs() << "Instruction Right\n"; S1->dump(); dbgs() << "\n");
|
|
|
|
// Hoist the instruction.
|
|
|
|
BasicBlock::iterator InsertPt = BB->getFirstInsertionPt();
|
|
|
|
// Intersect optional metadata.
|
2016-09-08 07:39:04 +08:00
|
|
|
S0->andIRFlags(S1);
|
2015-08-21 06:00:30 +08:00
|
|
|
S0->dropUnknownNonDebugMetadata();
|
2014-07-19 03:13:09 +08:00
|
|
|
|
|
|
|
// Create the new store to be inserted at the join point.
|
2016-05-26 13:43:12 +08:00
|
|
|
StoreInst *SNew = cast<StoreInst>(S0->clone());
|
2014-07-19 03:13:09 +08:00
|
|
|
Instruction *ANew = A0->clone();
|
2015-10-14 03:26:58 +08:00
|
|
|
SNew->insertBefore(&*InsertPt);
|
2014-07-19 03:13:09 +08:00
|
|
|
ANew->insertBefore(SNew);
|
|
|
|
|
|
|
|
assert(S0->getParent() == A0->getParent());
|
|
|
|
assert(S1->getParent() == A1->getParent());
|
|
|
|
|
|
|
|
// New PHI operand? Use it.
|
2016-06-17 01:40:53 +08:00
|
|
|
if (PHINode *NewPN = getPHIOperand(BB, S0, S1))
|
2014-07-19 03:13:09 +08:00
|
|
|
SNew->setOperand(0, NewPN);
|
2016-06-17 01:40:53 +08:00
|
|
|
removeInstruction(S0);
|
|
|
|
removeInstruction(S1);
|
2014-07-19 03:13:09 +08:00
|
|
|
A0->replaceAllUsesWith(ANew);
|
2016-06-17 01:40:53 +08:00
|
|
|
removeInstruction(A0);
|
2014-07-19 03:13:09 +08:00
|
|
|
A1->replaceAllUsesWith(ANew);
|
2016-06-17 01:40:53 +08:00
|
|
|
removeInstruction(A1);
|
2014-07-19 03:13:09 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
///
|
|
|
|
/// \brief True when two stores are equivalent and can sink into the footer
|
|
|
|
///
|
|
|
|
/// Starting from a diamond tail block, iterate over the instructions in one
|
|
|
|
/// predecessor block and try to match a store in the second predecessor.
|
|
|
|
///
|
2016-06-17 01:40:53 +08:00
|
|
|
bool MergedLoadStoreMotion::mergeStores(BasicBlock *T) {
|
2014-07-19 03:13:09 +08:00
|
|
|
|
|
|
|
bool MergedStores = false;
|
|
|
|
assert(T && "Footer of a diamond cannot be empty");
|
|
|
|
|
|
|
|
pred_iterator PI = pred_begin(T), E = pred_end(T);
|
|
|
|
assert(PI != E);
|
|
|
|
BasicBlock *Pred0 = *PI;
|
|
|
|
++PI;
|
|
|
|
BasicBlock *Pred1 = *PI;
|
|
|
|
++PI;
|
|
|
|
// tail block of a diamond/hammock?
|
|
|
|
if (Pred0 == Pred1)
|
|
|
|
return false; // No.
|
|
|
|
if (PI != E)
|
|
|
|
return false; // No. More than 2 predecessors.
|
|
|
|
|
|
|
|
// #Instructions in Succ1 for Compile Time Control
|
|
|
|
int Size1 = Pred1->size();
|
|
|
|
int NStores = 0;
|
|
|
|
|
|
|
|
for (BasicBlock::reverse_iterator RBI = Pred0->rbegin(), RBE = Pred0->rend();
|
|
|
|
RBI != RBE;) {
|
|
|
|
|
|
|
|
Instruction *I = &*RBI;
|
|
|
|
++RBI;
|
2014-12-15 22:09:53 +08:00
|
|
|
|
2016-05-26 13:43:12 +08:00
|
|
|
// Don't sink non-simple (atomic, volatile) stores.
|
|
|
|
auto *S0 = dyn_cast<StoreInst>(I);
|
|
|
|
if (!S0 || !S0->isSimple())
|
2014-07-19 03:13:09 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
++NStores;
|
|
|
|
if (NStores * Size1 >= MagicCompileTimeControl)
|
|
|
|
break;
|
2016-06-17 01:40:53 +08:00
|
|
|
if (StoreInst *S1 = canSinkFromBlock(Pred1, S0)) {
|
|
|
|
bool Res = sinkStore(T, S0, S1);
|
2014-07-19 03:13:09 +08:00
|
|
|
MergedStores |= Res;
|
|
|
|
// Don't attempt to sink below stores that had to stick around
|
|
|
|
// But after removal of a store and some of its feeding
|
|
|
|
// instruction search again from the beginning since the iterator
|
|
|
|
// is likely stale at this point.
|
|
|
|
if (!Res)
|
|
|
|
break;
|
2016-05-26 13:43:12 +08:00
|
|
|
RBI = Pred0->rbegin();
|
|
|
|
RBE = Pred0->rend();
|
|
|
|
DEBUG(dbgs() << "Search again\n"; Instruction *I = &*RBI; I->dump());
|
2014-07-19 03:13:09 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return MergedStores;
|
|
|
|
}
|
2015-10-07 07:24:35 +08:00
|
|
|
|
2016-06-18 03:10:09 +08:00
|
|
|
bool MergedLoadStoreMotion::run(Function &F, MemoryDependenceResults *MD,
|
|
|
|
AliasAnalysis &AA) {
|
|
|
|
this->MD = MD;
|
|
|
|
this->AA = &AA;
|
2016-06-17 01:40:53 +08:00
|
|
|
|
2014-07-19 03:13:09 +08:00
|
|
|
bool Changed = false;
|
|
|
|
DEBUG(dbgs() << "Instruction Merger\n");
|
|
|
|
|
|
|
|
// Merge unconditional branches, allowing PRE to catch more
|
|
|
|
// optimization opportunities.
|
|
|
|
for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE;) {
|
2015-10-14 03:26:58 +08:00
|
|
|
BasicBlock *BB = &*FI++;
|
2014-07-19 03:13:09 +08:00
|
|
|
|
|
|
|
// Hoist equivalent loads and sink stores
|
|
|
|
// outside diamonds when possible
|
|
|
|
if (isDiamondHead(BB)) {
|
2016-06-17 01:40:53 +08:00
|
|
|
Changed |= mergeStores(getDiamondTail(BB));
|
2014-07-19 03:13:09 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return Changed;
|
|
|
|
}
|
2016-06-18 03:10:09 +08:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
class MergedLoadStoreMotionLegacyPass : public FunctionPass {
|
|
|
|
public:
|
|
|
|
static char ID; // Pass identification, replacement for typeid
|
|
|
|
MergedLoadStoreMotionLegacyPass() : FunctionPass(ID) {
|
|
|
|
initializeMergedLoadStoreMotionLegacyPassPass(
|
|
|
|
*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
///
|
|
|
|
/// \brief Run the transformation for each function
|
|
|
|
///
|
|
|
|
bool runOnFunction(Function &F) override {
|
|
|
|
if (skipFunction(F))
|
|
|
|
return false;
|
|
|
|
MergedLoadStoreMotion Impl;
|
|
|
|
auto *MDWP = getAnalysisIfAvailable<MemoryDependenceWrapperPass>();
|
|
|
|
return Impl.run(F, MDWP ? &MDWP->getMemDep() : nullptr,
|
|
|
|
getAnalysis<AAResultsWrapperPass>().getAAResults());
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.setPreservesCFG();
|
|
|
|
AU.addRequired<AAResultsWrapperPass>();
|
|
|
|
AU.addPreserved<GlobalsAAWrapperPass>();
|
|
|
|
AU.addPreserved<MemoryDependenceWrapperPass>();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
char MergedLoadStoreMotionLegacyPass::ID = 0;
|
|
|
|
} // anonymous namespace
|
|
|
|
|
|
|
|
///
|
|
|
|
/// \brief createMergedLoadStoreMotionPass - The public interface to this file.
|
|
|
|
///
|
|
|
|
FunctionPass *llvm::createMergedLoadStoreMotionPass() {
|
|
|
|
return new MergedLoadStoreMotionLegacyPass();
|
|
|
|
}
|
|
|
|
|
|
|
|
INITIALIZE_PASS_BEGIN(MergedLoadStoreMotionLegacyPass, "mldst-motion",
|
|
|
|
"MergedLoadStoreMotion", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
|
|
|
|
INITIALIZE_PASS_END(MergedLoadStoreMotionLegacyPass, "mldst-motion",
|
|
|
|
"MergedLoadStoreMotion", false, false)
|
|
|
|
|
|
|
|
PreservedAnalyses
|
2016-08-09 08:28:15 +08:00
|
|
|
MergedLoadStoreMotionPass::run(Function &F, FunctionAnalysisManager &AM) {
|
2016-06-18 03:10:09 +08:00
|
|
|
MergedLoadStoreMotion Impl;
|
|
|
|
auto *MD = AM.getCachedResult<MemoryDependenceAnalysis>(F);
|
|
|
|
auto &AA = AM.getResult<AAManager>(F);
|
|
|
|
if (!Impl.run(F, MD, AA))
|
|
|
|
return PreservedAnalyses::all();
|
|
|
|
|
|
|
|
PreservedAnalyses PA;
|
2017-01-15 14:32:49 +08:00
|
|
|
PA.preserveSet<CFGAnalyses>();
|
2016-06-18 03:10:09 +08:00
|
|
|
PA.preserve<GlobalsAA>();
|
|
|
|
PA.preserve<MemoryDependenceAnalysis>();
|
|
|
|
return PA;
|
|
|
|
}
|