2017-08-17 06:07:40 +08:00
|
|
|
//===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
|
2016-02-03 06:46:49 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
2017-08-17 06:07:40 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2016-02-03 06:46:49 +08:00
|
|
|
//
|
|
|
|
// This file implements the MemorySSA class.
|
|
|
|
//
|
2017-08-17 06:07:40 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-04-12 04:06:36 +08:00
|
|
|
#include "llvm/Analysis/MemorySSA.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/ADT/DenseMapInfo.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/ADT/DenseSet.h"
|
|
|
|
#include "llvm/ADT/DepthFirstIterator.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/ADT/Hashing.h"
|
|
|
|
#include "llvm/ADT/None.h"
|
|
|
|
#include "llvm/ADT/Optional.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/iterator.h"
|
|
|
|
#include "llvm/ADT/iterator_range.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
|
|
|
#include "llvm/Analysis/IteratedDominanceFrontier.h"
|
|
|
|
#include "llvm/Analysis/MemoryLocation.h"
|
2018-04-30 22:59:11 +08:00
|
|
|
#include "llvm/Config/llvm-config.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/IR/AssemblyAnnotationWriter.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/IR/BasicBlock.h"
|
|
|
|
#include "llvm/IR/CallSite.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/IR/LLVMContext.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/IR/PassManager.h"
|
|
|
|
#include "llvm/IR/Use.h"
|
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/AtomicOrdering.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Compiler.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/Support/FormattedStream.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include <algorithm>
|
2017-08-17 06:07:40 +08:00
|
|
|
#include <cassert>
|
|
|
|
#include <iterator>
|
|
|
|
#include <memory>
|
|
|
|
#include <utility>
|
2016-02-03 06:46:49 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
2017-08-17 06:07:40 +08:00
|
|
|
|
|
|
|
#define DEBUG_TYPE "memoryssa"
|
|
|
|
|
2016-06-15 05:19:40 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
|
2016-06-02 05:30:40 +08:00
|
|
|
true)
|
2016-02-03 06:46:49 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
|
2016-06-15 05:19:40 +08:00
|
|
|
INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
|
|
|
|
true)
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-07-07 05:20:47 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
|
|
|
|
"Memory SSA Printer", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
|
|
|
|
INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
|
|
|
|
"Memory SSA Printer", false, false)
|
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
static cl::opt<unsigned> MaxCheckLimit(
|
|
|
|
"memssa-check-limit", cl::Hidden, cl::init(100),
|
|
|
|
cl::desc("The maximum number of stores/phis MemorySSA"
|
|
|
|
"will consider trying to walk past (default = 100)"));
|
|
|
|
|
2016-07-07 05:20:47 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
VerifyMemorySSA("verify-memoryssa", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Verify MemorySSA in legacy printer pass."));
|
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
namespace llvm {
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// An assembly annotator class to print Memory SSA information in
|
2016-02-03 06:46:49 +08:00
|
|
|
/// comments.
|
|
|
|
class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
|
|
|
|
friend class MemorySSA;
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
const MemorySSA *MSSA;
|
|
|
|
|
|
|
|
public:
|
|
|
|
MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
void emitBasicBlockStartAnnot(const BasicBlock *BB,
|
|
|
|
formatted_raw_ostream &OS) override {
|
2016-02-03 06:46:49 +08:00
|
|
|
if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
|
|
|
|
OS << "; " << *MA << "\n";
|
|
|
|
}
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
void emitInstructionAnnot(const Instruction *I,
|
|
|
|
formatted_raw_ostream &OS) override {
|
2016-02-03 06:46:49 +08:00
|
|
|
if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
|
|
|
|
OS << "; " << *MA << "\n";
|
|
|
|
}
|
|
|
|
};
|
2017-08-17 06:07:40 +08:00
|
|
|
|
|
|
|
} // end namespace llvm
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
2016-08-03 05:57:52 +08:00
|
|
|
namespace {
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-08-03 05:57:52 +08:00
|
|
|
/// Our current alias analysis API differentiates heavily between calls and
|
|
|
|
/// non-calls, and functions called on one usually assert on the other.
|
|
|
|
/// This class encapsulates the distinction to simplify other code that wants
|
|
|
|
/// "Memory affecting instructions and related data" to use as a key.
|
|
|
|
/// For example, this class is used as a densemap key in the use optimizer.
|
|
|
|
class MemoryLocOrCall {
|
|
|
|
public:
|
2017-08-17 06:07:40 +08:00
|
|
|
bool IsCall = false;
|
|
|
|
|
|
|
|
MemoryLocOrCall() = default;
|
2016-08-03 05:57:52 +08:00
|
|
|
MemoryLocOrCall(MemoryUseOrDef *MUD)
|
|
|
|
: MemoryLocOrCall(MUD->getMemoryInst()) {}
|
2016-10-13 11:23:33 +08:00
|
|
|
MemoryLocOrCall(const MemoryUseOrDef *MUD)
|
|
|
|
: MemoryLocOrCall(MUD->getMemoryInst()) {}
|
2016-08-03 05:57:52 +08:00
|
|
|
|
|
|
|
MemoryLocOrCall(Instruction *Inst) {
|
|
|
|
if (ImmutableCallSite(Inst)) {
|
|
|
|
IsCall = true;
|
|
|
|
CS = ImmutableCallSite(Inst);
|
|
|
|
} else {
|
|
|
|
IsCall = false;
|
|
|
|
// There is no such thing as a memorylocation for a fence inst, and it is
|
|
|
|
// unique in that regard.
|
|
|
|
if (!isa<FenceInst>(Inst))
|
|
|
|
Loc = MemoryLocation::get(Inst);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
|
2016-08-03 05:57:52 +08:00
|
|
|
|
|
|
|
ImmutableCallSite getCS() const {
|
|
|
|
assert(IsCall);
|
|
|
|
return CS;
|
|
|
|
}
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-08-03 05:57:52 +08:00
|
|
|
MemoryLocation getLoc() const {
|
|
|
|
assert(!IsCall);
|
|
|
|
return Loc;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool operator==(const MemoryLocOrCall &Other) const {
|
|
|
|
if (IsCall != Other.IsCall)
|
|
|
|
return false;
|
|
|
|
|
2018-03-29 08:54:39 +08:00
|
|
|
if (!IsCall)
|
|
|
|
return Loc == Other.Loc;
|
|
|
|
|
|
|
|
if (CS.getCalledValue() != Other.CS.getCalledValue())
|
|
|
|
return false;
|
|
|
|
|
2018-03-29 11:12:03 +08:00
|
|
|
return CS.arg_size() == Other.CS.arg_size() &&
|
|
|
|
std::equal(CS.arg_begin(), CS.arg_end(), Other.CS.arg_begin());
|
2016-08-03 05:57:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2016-10-22 12:15:41 +08:00
|
|
|
union {
|
2017-01-26 04:56:19 +08:00
|
|
|
ImmutableCallSite CS;
|
|
|
|
MemoryLocation Loc;
|
2016-10-22 12:15:41 +08:00
|
|
|
};
|
2016-08-03 05:57:52 +08:00
|
|
|
};
|
2017-08-17 06:07:40 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2016-08-03 05:57:52 +08:00
|
|
|
|
|
|
|
namespace llvm {
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-08-03 05:57:52 +08:00
|
|
|
template <> struct DenseMapInfo<MemoryLocOrCall> {
|
|
|
|
static inline MemoryLocOrCall getEmptyKey() {
|
|
|
|
return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
|
|
|
|
}
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-08-03 05:57:52 +08:00
|
|
|
static inline MemoryLocOrCall getTombstoneKey() {
|
|
|
|
return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
|
|
|
|
}
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-08-03 05:57:52 +08:00
|
|
|
static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
|
2018-03-29 08:54:39 +08:00
|
|
|
if (!MLOC.IsCall)
|
|
|
|
return hash_combine(
|
|
|
|
MLOC.IsCall,
|
|
|
|
DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
|
|
|
|
|
|
|
|
hash_code hash =
|
|
|
|
hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
|
|
|
|
MLOC.getCS().getCalledValue()));
|
|
|
|
|
|
|
|
for (const Value *Arg : MLOC.getCS().args())
|
|
|
|
hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
|
|
|
|
return hash;
|
2016-08-03 05:57:52 +08:00
|
|
|
}
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-08-03 05:57:52 +08:00
|
|
|
static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
|
|
|
|
return LHS == RHS;
|
|
|
|
}
|
|
|
|
};
|
2016-08-03 08:01:46 +08:00
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
} // end namespace llvm
|
|
|
|
|
2016-08-04 03:39:54 +08:00
|
|
|
/// This does one-way checks to see if Use could theoretically be hoisted above
|
|
|
|
/// MayClobber. This will not check the other way around.
|
|
|
|
///
|
|
|
|
/// This assumes that, for the purposes of MemorySSA, Use comes directly after
|
|
|
|
/// MayClobber, with no potentially clobbering operations in between them.
|
|
|
|
/// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
|
2017-12-23 03:54:03 +08:00
|
|
|
static bool areLoadsReorderable(const LoadInst *Use,
|
|
|
|
const LoadInst *MayClobber) {
|
2016-08-04 03:39:54 +08:00
|
|
|
bool VolatileUse = Use->isVolatile();
|
|
|
|
bool VolatileClobber = MayClobber->isVolatile();
|
|
|
|
// Volatile operations may never be reordered with other volatile operations.
|
|
|
|
if (VolatileUse && VolatileClobber)
|
2017-12-23 03:54:03 +08:00
|
|
|
return false;
|
|
|
|
// Otherwise, volatile doesn't matter here. From the language reference:
|
|
|
|
// 'optimizers may change the order of volatile operations relative to
|
|
|
|
// non-volatile operations.'"
|
2016-08-04 03:39:54 +08:00
|
|
|
|
|
|
|
// If a load is seq_cst, it cannot be moved above other loads. If its ordering
|
|
|
|
// is weaker, it can be moved above other loads. We just need to be sure that
|
|
|
|
// MayClobber isn't an acquire load, because loads can't be moved above
|
|
|
|
// acquire loads.
|
|
|
|
//
|
|
|
|
// Note that this explicitly *does* allow the free reordering of monotonic (or
|
|
|
|
// weaker) loads of the same address.
|
|
|
|
bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
|
|
|
|
bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
|
|
|
|
AtomicOrdering::Acquire);
|
2017-12-23 03:54:03 +08:00
|
|
|
return !(SeqCstUse || MayClobberIsAcquire);
|
2016-08-04 03:39:54 +08:00
|
|
|
}
|
|
|
|
|
2018-03-09 02:03:14 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct ClobberAlias {
|
|
|
|
bool IsClobber;
|
|
|
|
Optional<AliasResult> AR;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
// Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
|
|
|
|
// ignored if IsClobber = false.
|
|
|
|
static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
|
|
|
|
const MemoryLocation &UseLoc,
|
|
|
|
const Instruction *UseInst,
|
|
|
|
AliasAnalysis &AA) {
|
2016-08-03 00:24:03 +08:00
|
|
|
Instruction *DefInst = MD->getMemoryInst();
|
|
|
|
assert(DefInst && "Defining instruction not actually an instruction");
|
2017-04-11 02:46:00 +08:00
|
|
|
ImmutableCallSite UseCS(UseInst);
|
2018-03-09 02:03:14 +08:00
|
|
|
Optional<AliasResult> AR;
|
2016-08-03 05:57:52 +08:00
|
|
|
|
2016-08-03 08:01:46 +08:00
|
|
|
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
|
|
|
|
// These intrinsics will show up as affecting memory, but they are just
|
|
|
|
// markers.
|
|
|
|
switch (II->getIntrinsicID()) {
|
|
|
|
case Intrinsic::lifetime_start:
|
2017-04-11 02:46:00 +08:00
|
|
|
if (UseCS)
|
2018-03-09 02:03:14 +08:00
|
|
|
return {false, NoAlias};
|
|
|
|
AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
|
|
|
|
return {AR == MustAlias, AR};
|
2016-08-03 08:01:46 +08:00
|
|
|
case Intrinsic::lifetime_end:
|
|
|
|
case Intrinsic::invariant_start:
|
|
|
|
case Intrinsic::invariant_end:
|
|
|
|
case Intrinsic::assume:
|
2018-03-09 02:03:14 +08:00
|
|
|
return {false, NoAlias};
|
2016-08-03 08:01:46 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-22 02:00:01 +08:00
|
|
|
if (UseCS) {
|
2016-08-03 05:57:52 +08:00
|
|
|
ModRefInfo I = AA.getModRefInfo(DefInst, UseCS);
|
2018-03-09 02:03:14 +08:00
|
|
|
AR = isMustSet(I) ? MustAlias : MayAlias;
|
|
|
|
return {isModOrRefSet(I), AR};
|
2017-11-22 02:00:01 +08:00
|
|
|
}
|
2016-08-04 03:39:54 +08:00
|
|
|
|
2017-12-23 03:54:03 +08:00
|
|
|
if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
|
|
|
|
if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
|
2018-03-09 02:03:14 +08:00
|
|
|
return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias};
|
2016-08-04 03:39:54 +08:00
|
|
|
|
2018-03-09 02:03:14 +08:00
|
|
|
ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
|
|
|
|
AR = isMustSet(I) ? MustAlias : MayAlias;
|
|
|
|
return {isModSet(I), AR};
|
2016-08-03 05:57:52 +08:00
|
|
|
}
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
2018-03-09 02:03:14 +08:00
|
|
|
static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
|
|
|
|
const MemoryUseOrDef *MU,
|
|
|
|
const MemoryLocOrCall &UseMLOC,
|
|
|
|
AliasAnalysis &AA) {
|
2016-10-13 11:23:33 +08:00
|
|
|
// FIXME: This is a temporary hack to allow a single instructionClobbersQuery
|
|
|
|
// to exist while MemoryLocOrCall is pushed through places.
|
|
|
|
if (UseMLOC.IsCall)
|
|
|
|
return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
|
|
|
|
AA);
|
|
|
|
return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
|
|
|
|
AA);
|
|
|
|
}
|
|
|
|
|
2016-10-13 09:39:10 +08:00
|
|
|
// Return true when MD may alias MU, return false otherwise.
|
2017-03-03 07:06:46 +08:00
|
|
|
bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
|
|
|
|
AliasAnalysis &AA) {
|
2018-03-09 02:03:14 +08:00
|
|
|
return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
|
2016-10-13 09:39:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-10-13 09:39:10 +08:00
|
|
|
struct UpwardsMemoryQuery {
|
|
|
|
// True if our original query started off as a call
|
2017-08-17 06:07:40 +08:00
|
|
|
bool IsCall = false;
|
2016-10-13 09:39:10 +08:00
|
|
|
// The pointer location we started the query with. This will be empty if
|
|
|
|
// IsCall is true.
|
|
|
|
MemoryLocation StartingLoc;
|
|
|
|
// This is the instruction we were querying about.
|
2017-08-17 06:07:40 +08:00
|
|
|
const Instruction *Inst = nullptr;
|
2016-10-13 09:39:10 +08:00
|
|
|
// The MemoryAccess we actually got called with, used to test local domination
|
2017-08-17 06:07:40 +08:00
|
|
|
const MemoryAccess *OriginalAccess = nullptr;
|
2018-03-09 02:03:14 +08:00
|
|
|
Optional<AliasResult> AR = MayAlias;
|
2016-10-13 09:39:10 +08:00
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
UpwardsMemoryQuery() = default;
|
2016-10-13 09:39:10 +08:00
|
|
|
|
|
|
|
UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
|
|
|
|
: IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) {
|
|
|
|
if (!IsCall)
|
|
|
|
StartingLoc = MemoryLocation::get(Inst);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
2016-10-13 09:39:10 +08:00
|
|
|
static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
|
|
|
|
AliasAnalysis &AA) {
|
|
|
|
Instruction *Inst = MD->getMemoryInst();
|
|
|
|
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
|
|
|
|
switch (II->getIntrinsicID()) {
|
|
|
|
case Intrinsic::lifetime_end:
|
|
|
|
return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
|
|
|
|
const Instruction *I) {
|
|
|
|
// If the memory can't be changed, then loads of the memory can't be
|
|
|
|
// clobbered.
|
|
|
|
return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
|
2017-04-09 20:57:50 +08:00
|
|
|
AA.pointsToConstantMemory(cast<LoadInst>(I)->
|
|
|
|
getPointerOperand()));
|
2016-10-13 09:39:10 +08:00
|
|
|
}
|
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
/// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
|
|
|
|
/// inbetween `Start` and `ClobberAt` can clobbers `Start`.
|
|
|
|
///
|
|
|
|
/// This is meant to be as simple and self-contained as possible. Because it
|
|
|
|
/// uses no cache, etc., it can be relatively expensive.
|
|
|
|
///
|
|
|
|
/// \param Start The MemoryAccess that we want to walk from.
|
|
|
|
/// \param ClobberAt A clobber for Start.
|
|
|
|
/// \param StartLoc The MemoryLocation for Start.
|
|
|
|
/// \param MSSA The MemorySSA isntance that Start and ClobberAt belong to.
|
|
|
|
/// \param Query The UpwardsMemoryQuery we used for our search.
|
|
|
|
/// \param AA The AliasAnalysis we used for our search.
|
|
|
|
static void LLVM_ATTRIBUTE_UNUSED
|
|
|
|
checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt,
|
|
|
|
const MemoryLocation &StartLoc, const MemorySSA &MSSA,
|
|
|
|
const UpwardsMemoryQuery &Query, AliasAnalysis &AA) {
|
|
|
|
assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
|
|
|
|
|
|
|
|
if (MSSA.isLiveOnEntryDef(Start)) {
|
|
|
|
assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
|
|
|
|
"liveOnEntry must clobber itself");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool FoundClobber = false;
|
|
|
|
DenseSet<MemoryAccessPair> VisitedPhis;
|
|
|
|
SmallVector<MemoryAccessPair, 8> Worklist;
|
|
|
|
Worklist.emplace_back(Start, StartLoc);
|
|
|
|
// Walk all paths from Start to ClobberAt, while looking for clobbers. If one
|
|
|
|
// is found, complain.
|
|
|
|
while (!Worklist.empty()) {
|
|
|
|
MemoryAccessPair MAP = Worklist.pop_back_val();
|
|
|
|
// All we care about is that nothing from Start to ClobberAt clobbers Start.
|
|
|
|
// We learn nothing from revisiting nodes.
|
|
|
|
if (!VisitedPhis.insert(MAP).second)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (MemoryAccess *MA : def_chain(MAP.first)) {
|
|
|
|
if (MA == ClobberAt) {
|
|
|
|
if (auto *MD = dyn_cast<MemoryDef>(MA)) {
|
|
|
|
// instructionClobbersQuery isn't essentially free, so don't use `|=`,
|
|
|
|
// since it won't let us short-circuit.
|
|
|
|
//
|
|
|
|
// Also, note that this can't be hoisted out of the `Worklist` loop,
|
|
|
|
// since MD may only act as a clobber for 1 of N MemoryLocations.
|
2018-03-09 02:03:14 +08:00
|
|
|
FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
|
|
|
|
if (!FoundClobber) {
|
|
|
|
ClobberAlias CA =
|
|
|
|
instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
|
|
|
|
if (CA.IsClobber) {
|
|
|
|
FoundClobber = true;
|
|
|
|
// Not used: CA.AR;
|
|
|
|
}
|
|
|
|
}
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should never hit liveOnEntry, unless it's the clobber.
|
|
|
|
assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
|
|
|
|
|
|
|
|
if (auto *MD = dyn_cast<MemoryDef>(MA)) {
|
|
|
|
(void)MD;
|
2018-03-09 02:03:14 +08:00
|
|
|
assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
|
|
|
|
.IsClobber &&
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
"Found clobber before reaching ClobberAt!");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(isa<MemoryPhi>(MA));
|
|
|
|
Worklist.append(upward_defs_begin({MA, MAP.second}), upward_defs_end());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If ClobberAt is a MemoryPhi, we can assume something above it acted as a
|
|
|
|
// clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
|
|
|
|
assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
|
|
|
|
"ClobberAt never acted as a clobber");
|
|
|
|
}
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
namespace {
|
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
/// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
|
|
|
|
/// in one class.
|
|
|
|
class ClobberWalker {
|
|
|
|
/// Save a few bytes by using unsigned instead of size_t.
|
|
|
|
using ListIndex = unsigned;
|
|
|
|
|
|
|
|
/// Represents a span of contiguous MemoryDefs, potentially ending in a
|
|
|
|
/// MemoryPhi.
|
|
|
|
struct DefPath {
|
|
|
|
MemoryLocation Loc;
|
|
|
|
// Note that, because we always walk in reverse, Last will always dominate
|
|
|
|
// First. Also note that First and Last are inclusive.
|
|
|
|
MemoryAccess *First;
|
|
|
|
MemoryAccess *Last;
|
|
|
|
Optional<ListIndex> Previous;
|
|
|
|
|
|
|
|
DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
|
|
|
|
Optional<ListIndex> Previous)
|
|
|
|
: Loc(Loc), First(First), Last(Last), Previous(Previous) {}
|
|
|
|
|
|
|
|
DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
|
|
|
|
Optional<ListIndex> Previous)
|
|
|
|
: DefPath(Loc, Init, Init, Previous) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
const MemorySSA &MSSA;
|
|
|
|
AliasAnalysis &AA;
|
|
|
|
DominatorTree &DT;
|
|
|
|
UpwardsMemoryQuery *Query;
|
|
|
|
|
|
|
|
// Phi optimization bookkeeping
|
|
|
|
SmallVector<DefPath, 32> Paths;
|
|
|
|
DenseSet<ConstMemoryAccessPair> VisitedPhis;
|
|
|
|
|
|
|
|
/// Find the nearest def or phi that `From` can legally be optimized to.
|
2017-04-01 17:01:12 +08:00
|
|
|
const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
assert(From->getNumOperands() && "Phi with no operands?");
|
|
|
|
|
|
|
|
BasicBlock *BB = From->getBlock();
|
|
|
|
MemoryAccess *Result = MSSA.getLiveOnEntryDef();
|
|
|
|
DomTreeNode *Node = DT.getNode(BB);
|
|
|
|
while ((Node = Node->getIDom())) {
|
2017-04-01 16:59:45 +08:00
|
|
|
auto *Defs = MSSA.getBlockDefs(Node->getBlock());
|
|
|
|
if (Defs)
|
2017-04-01 17:01:12 +08:00
|
|
|
return &*Defs->rbegin();
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
}
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Result of calling walkToPhiOrClobber.
|
|
|
|
struct UpwardsWalkResult {
|
|
|
|
/// The "Result" of the walk. Either a clobber, the last thing we walked, or
|
2018-03-09 02:03:14 +08:00
|
|
|
/// both. Include alias info when clobber found.
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
MemoryAccess *Result;
|
|
|
|
bool IsKnownClobber;
|
2018-03-09 02:03:14 +08:00
|
|
|
Optional<AliasResult> AR;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
|
|
|
|
/// This will update Desc.Last as it walks. It will (optionally) also stop at
|
|
|
|
/// StopAt.
|
|
|
|
///
|
|
|
|
/// This does not test for whether StopAt is a clobber
|
2017-04-01 17:01:12 +08:00
|
|
|
UpwardsWalkResult
|
|
|
|
walkToPhiOrClobber(DefPath &Desc,
|
|
|
|
const MemoryAccess *StopAt = nullptr) const {
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
|
|
|
|
|
|
|
|
for (MemoryAccess *Current : def_chain(Desc.Last)) {
|
|
|
|
Desc.Last = Current;
|
|
|
|
if (Current == StopAt)
|
2018-03-09 02:03:14 +08:00
|
|
|
return {Current, false, MayAlias};
|
|
|
|
|
|
|
|
if (auto *MD = dyn_cast<MemoryDef>(Current)) {
|
|
|
|
if (MSSA.isLiveOnEntryDef(MD))
|
|
|
|
return {MD, true, MustAlias};
|
|
|
|
ClobberAlias CA =
|
|
|
|
instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
|
|
|
|
if (CA.IsClobber)
|
|
|
|
return {MD, true, CA.AR};
|
|
|
|
}
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
assert(isa<MemoryPhi>(Desc.Last) &&
|
|
|
|
"Ended at a non-clobber that's not a phi?");
|
2018-03-09 02:03:14 +08:00
|
|
|
return {Desc.Last, false, MayAlias};
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
|
|
|
|
ListIndex PriorNode) {
|
|
|
|
auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
|
|
|
|
upward_defs_end());
|
|
|
|
for (const MemoryAccessPair &P : UpwardDefs) {
|
|
|
|
PausedSearches.push_back(Paths.size());
|
|
|
|
Paths.emplace_back(P.second, P.first, PriorNode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Represents a search that terminated after finding a clobber. This clobber
|
|
|
|
/// may or may not be present in the path of defs from LastNode..SearchStart,
|
|
|
|
/// since it may have been retrieved from cache.
|
|
|
|
struct TerminatedPath {
|
|
|
|
MemoryAccess *Clobber;
|
|
|
|
ListIndex LastNode;
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Get an access that keeps us from optimizing to the given phi.
|
|
|
|
///
|
|
|
|
/// PausedSearches is an array of indices into the Paths array. Its incoming
|
|
|
|
/// value is the indices of searches that stopped at the last phi optimization
|
|
|
|
/// target. It's left in an unspecified state.
|
|
|
|
///
|
|
|
|
/// If this returns None, NewPaused is a vector of searches that terminated
|
|
|
|
/// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
|
2016-08-03 09:22:19 +08:00
|
|
|
Optional<TerminatedPath>
|
2017-04-01 17:01:12 +08:00
|
|
|
getBlockingAccess(const MemoryAccess *StopWhere,
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
SmallVectorImpl<ListIndex> &PausedSearches,
|
|
|
|
SmallVectorImpl<ListIndex> &NewPaused,
|
|
|
|
SmallVectorImpl<TerminatedPath> &Terminated) {
|
|
|
|
assert(!PausedSearches.empty() && "No searches to continue?");
|
|
|
|
|
|
|
|
// BFS vs DFS really doesn't make a difference here, so just do a DFS with
|
|
|
|
// PausedSearches as our stack.
|
|
|
|
while (!PausedSearches.empty()) {
|
|
|
|
ListIndex PathIndex = PausedSearches.pop_back_val();
|
|
|
|
DefPath &Node = Paths[PathIndex];
|
|
|
|
|
|
|
|
// If we've already visited this path with this MemoryLocation, we don't
|
|
|
|
// need to do so again.
|
|
|
|
//
|
|
|
|
// NOTE: That we just drop these paths on the ground makes caching
|
|
|
|
// behavior sporadic. e.g. given a diamond:
|
|
|
|
// A
|
|
|
|
// B C
|
|
|
|
// D
|
|
|
|
//
|
|
|
|
// ...If we walk D, B, A, C, we'll only cache the result of phi
|
|
|
|
// optimization for A, B, and D; C will be skipped because it dies here.
|
|
|
|
// This arguably isn't the worst thing ever, since:
|
|
|
|
// - We generally query things in a top-down order, so if we got below D
|
|
|
|
// without needing cache entries for {C, MemLoc}, then chances are
|
|
|
|
// that those cache entries would end up ultimately unused.
|
|
|
|
// - We still cache things for A, so C only needs to walk up a bit.
|
|
|
|
// If this behavior becomes problematic, we can fix without a ton of extra
|
|
|
|
// work.
|
|
|
|
if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere);
|
|
|
|
if (Res.IsKnownClobber) {
|
2017-04-06 03:01:58 +08:00
|
|
|
assert(Res.Result != StopWhere);
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
// If this wasn't a cache hit, we hit a clobber when walking. That's a
|
|
|
|
// failure.
|
2016-08-03 09:22:19 +08:00
|
|
|
TerminatedPath Term{Res.Result, PathIndex};
|
2017-04-06 03:01:58 +08:00
|
|
|
if (!MSSA.dominates(Res.Result, StopWhere))
|
2016-08-03 09:22:19 +08:00
|
|
|
return Term;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
|
|
|
// Otherwise, it's a valid thing to potentially optimize to.
|
2016-08-03 09:22:19 +08:00
|
|
|
Terminated.push_back(Term);
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Res.Result == StopWhere) {
|
|
|
|
// We've hit our target. Save this path off for if we want to continue
|
|
|
|
// walking.
|
|
|
|
NewPaused.push_back(PathIndex);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
|
|
|
|
addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
|
|
|
|
}
|
|
|
|
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename Walker>
|
|
|
|
struct generic_def_path_iterator
|
|
|
|
: public iterator_facade_base<generic_def_path_iterator<T, Walker>,
|
|
|
|
std::forward_iterator_tag, T *> {
|
2017-08-17 06:07:40 +08:00
|
|
|
generic_def_path_iterator() = default;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
|
|
|
|
|
|
|
|
T &operator*() const { return curNode(); }
|
|
|
|
|
|
|
|
generic_def_path_iterator &operator++() {
|
|
|
|
N = curNode().Previous;
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool operator==(const generic_def_path_iterator &O) const {
|
|
|
|
if (N.hasValue() != O.N.hasValue())
|
|
|
|
return false;
|
|
|
|
return !N.hasValue() || *N == *O.N;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
T &curNode() const { return W->Paths[*N]; }
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
Walker *W = nullptr;
|
|
|
|
Optional<ListIndex> N = None;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
|
|
|
|
using const_def_path_iterator =
|
|
|
|
generic_def_path_iterator<const DefPath, const ClobberWalker>;
|
|
|
|
|
|
|
|
iterator_range<def_path_iterator> def_path(ListIndex From) {
|
|
|
|
return make_range(def_path_iterator(this, From), def_path_iterator());
|
|
|
|
}
|
|
|
|
|
|
|
|
iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
|
|
|
|
return make_range(const_def_path_iterator(this, From),
|
|
|
|
const_def_path_iterator());
|
|
|
|
}
|
|
|
|
|
|
|
|
struct OptznResult {
|
|
|
|
/// The path that contains our result.
|
|
|
|
TerminatedPath PrimaryClobber;
|
|
|
|
/// The paths that we can legally cache back from, but that aren't
|
|
|
|
/// necessarily the result of the Phi optimization.
|
|
|
|
SmallVector<TerminatedPath, 4> OtherClobbers;
|
|
|
|
};
|
|
|
|
|
|
|
|
ListIndex defPathIndex(const DefPath &N) const {
|
|
|
|
// The assert looks nicer if we don't need to do &N
|
|
|
|
const DefPath *NP = &N;
|
|
|
|
assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
|
|
|
|
"Out of bounds DefPath!");
|
|
|
|
return NP - &Paths.front();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
|
|
|
|
/// that act as legal clobbers. Note that this won't return *all* clobbers.
|
|
|
|
///
|
|
|
|
/// Phi optimization algorithm tl;dr:
|
|
|
|
/// - Find the earliest def/phi, A, we can optimize to
|
|
|
|
/// - Find if all paths from the starting memory access ultimately reach A
|
|
|
|
/// - If not, optimization isn't possible.
|
|
|
|
/// - Otherwise, walk from A to another clobber or phi, A'.
|
|
|
|
/// - If A' is a def, we're done.
|
|
|
|
/// - If A' is a phi, try to optimize it.
|
|
|
|
///
|
|
|
|
/// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
|
|
|
|
/// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
|
|
|
|
OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
|
|
|
|
const MemoryLocation &Loc) {
|
|
|
|
assert(Paths.empty() && VisitedPhis.empty() &&
|
|
|
|
"Reset the optimization state.");
|
|
|
|
|
|
|
|
Paths.emplace_back(Loc, Start, Phi, None);
|
|
|
|
// Stores how many "valid" optimization nodes we had prior to calling
|
|
|
|
// addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
|
|
|
|
auto PriorPathsSize = Paths.size();
|
|
|
|
|
|
|
|
SmallVector<ListIndex, 16> PausedSearches;
|
|
|
|
SmallVector<ListIndex, 8> NewPaused;
|
|
|
|
SmallVector<TerminatedPath, 4> TerminatedPaths;
|
|
|
|
|
|
|
|
addSearches(Phi, PausedSearches, 0);
|
|
|
|
|
|
|
|
// Moves the TerminatedPath with the "most dominated" Clobber to the end of
|
|
|
|
// Paths.
|
|
|
|
auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
|
|
|
|
assert(!Paths.empty() && "Need a path to move");
|
|
|
|
auto Dom = Paths.begin();
|
|
|
|
for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
|
|
|
|
if (!MSSA.dominates(I->Clobber, Dom->Clobber))
|
|
|
|
Dom = I;
|
|
|
|
auto Last = Paths.end() - 1;
|
|
|
|
if (Last != Dom)
|
|
|
|
std::iter_swap(Last, Dom);
|
|
|
|
};
|
|
|
|
|
|
|
|
MemoryPhi *Current = Phi;
|
2017-08-17 06:07:40 +08:00
|
|
|
while (true) {
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
assert(!MSSA.isLiveOnEntryDef(Current) &&
|
|
|
|
"liveOnEntry wasn't treated as a clobber?");
|
|
|
|
|
2017-04-01 17:01:12 +08:00
|
|
|
const auto *Target = getWalkTarget(Current);
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
// If a TerminatedPath doesn't dominate Target, then it wasn't a legal
|
|
|
|
// optimization for the prior phi.
|
|
|
|
assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
|
|
|
|
return MSSA.dominates(P.Clobber, Target);
|
|
|
|
}));
|
|
|
|
|
|
|
|
// FIXME: This is broken, because the Blocker may be reported to be
|
|
|
|
// liveOnEntry, and we'll happily wait for that to disappear (read: never)
|
2016-08-23 07:40:01 +08:00
|
|
|
// For the moment, this is fine, since we do nothing with blocker info.
|
2016-08-03 09:22:19 +08:00
|
|
|
if (Optional<TerminatedPath> Blocker = getBlockingAccess(
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
Target, PausedSearches, NewPaused, TerminatedPaths)) {
|
|
|
|
|
|
|
|
// Find the node we started at. We can't search based on N->Last, since
|
|
|
|
// we may have gone around a loop with a different MemoryLocation.
|
2016-08-03 09:22:19 +08:00
|
|
|
auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
return defPathIndex(N) < PriorPathsSize;
|
|
|
|
});
|
|
|
|
assert(Iter != def_path_iterator());
|
|
|
|
|
|
|
|
DefPath &CurNode = *Iter;
|
|
|
|
assert(CurNode.Last == Current);
|
|
|
|
|
|
|
|
// Two things:
|
|
|
|
// A. We can't reliably cache all of NewPaused back. Consider a case
|
|
|
|
// where we have two paths in NewPaused; one of which can't optimize
|
|
|
|
// above this phi, whereas the other can. If we cache the second path
|
|
|
|
// back, we'll end up with suboptimal cache entries. We can handle
|
|
|
|
// cases like this a bit better when we either try to find all
|
|
|
|
// clobbers that block phi optimization, or when our cache starts
|
|
|
|
// supporting unfinished searches.
|
|
|
|
// B. We can't reliably cache TerminatedPaths back here without doing
|
|
|
|
// extra checks; consider a case like:
|
|
|
|
// T
|
|
|
|
// / \
|
|
|
|
// D C
|
|
|
|
// \ /
|
|
|
|
// S
|
|
|
|
// Where T is our target, C is a node with a clobber on it, D is a
|
|
|
|
// diamond (with a clobber *only* on the left or right node, N), and
|
|
|
|
// S is our start. Say we walk to D, through the node opposite N
|
|
|
|
// (read: ignoring the clobber), and see a cache entry in the top
|
|
|
|
// node of D. That cache entry gets put into TerminatedPaths. We then
|
|
|
|
// walk up to C (N is later in our worklist), find the clobber, and
|
|
|
|
// quit. If we append TerminatedPaths to OtherClobbers, we'll cache
|
|
|
|
// the bottom part of D to the cached clobber, ignoring the clobber
|
|
|
|
// in N. Again, this problem goes away if we start tracking all
|
|
|
|
// blockers for a given phi optimization.
|
|
|
|
TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
|
|
|
|
return {Result, {}};
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there's nothing left to search, then all paths led to valid clobbers
|
|
|
|
// that we got from our cache; pick the nearest to the start, and allow
|
|
|
|
// the rest to be cached back.
|
|
|
|
if (NewPaused.empty()) {
|
|
|
|
MoveDominatedPathToEnd(TerminatedPaths);
|
|
|
|
TerminatedPath Result = TerminatedPaths.pop_back_val();
|
|
|
|
return {Result, std::move(TerminatedPaths)};
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryAccess *DefChainEnd = nullptr;
|
|
|
|
SmallVector<TerminatedPath, 4> Clobbers;
|
|
|
|
for (ListIndex Paused : NewPaused) {
|
|
|
|
UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
|
|
|
|
if (WR.IsKnownClobber)
|
|
|
|
Clobbers.push_back({WR.Result, Paused});
|
|
|
|
else
|
|
|
|
// Micro-opt: If we hit the end of the chain, save it.
|
|
|
|
DefChainEnd = WR.Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!TerminatedPaths.empty()) {
|
|
|
|
// If we couldn't find the dominating phi/liveOnEntry in the above loop,
|
|
|
|
// do it now.
|
|
|
|
if (!DefChainEnd)
|
2017-04-01 17:01:12 +08:00
|
|
|
for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
DefChainEnd = MA;
|
|
|
|
|
|
|
|
// If any of the terminated paths don't dominate the phi we'll try to
|
|
|
|
// optimize, we need to figure out what they are and quit.
|
|
|
|
const BasicBlock *ChainBB = DefChainEnd->getBlock();
|
|
|
|
for (const TerminatedPath &TP : TerminatedPaths) {
|
|
|
|
// Because we know that DefChainEnd is as "high" as we can go, we
|
|
|
|
// don't need local dominance checks; BB dominance is sufficient.
|
|
|
|
if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
|
|
|
|
Clobbers.push_back(TP);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have clobbers in the def chain, find the one closest to Current
|
|
|
|
// and quit.
|
|
|
|
if (!Clobbers.empty()) {
|
|
|
|
MoveDominatedPathToEnd(Clobbers);
|
|
|
|
TerminatedPath Result = Clobbers.pop_back_val();
|
|
|
|
return {Result, std::move(Clobbers)};
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(all_of(NewPaused,
|
|
|
|
[&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
|
|
|
|
|
|
|
|
// Because liveOnEntry is a clobber, this must be a phi.
|
|
|
|
auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
|
|
|
|
|
|
|
|
PriorPathsSize = Paths.size();
|
|
|
|
PausedSearches.clear();
|
|
|
|
for (ListIndex I : NewPaused)
|
|
|
|
addSearches(DefChainPhi, PausedSearches, I);
|
|
|
|
NewPaused.clear();
|
|
|
|
|
|
|
|
Current = DefChainPhi;
|
|
|
|
}
|
|
|
|
}
|
2016-06-25 05:02:12 +08:00
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
void verifyOptResult(const OptznResult &R) const {
|
|
|
|
assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
|
|
|
|
return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
|
|
|
|
void resetPhiOptznState() {
|
|
|
|
Paths.clear();
|
|
|
|
VisitedPhis.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
2017-04-06 03:01:58 +08:00
|
|
|
ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT)
|
|
|
|
: MSSA(MSSA), AA(AA), DT(DT) {}
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
|
|
|
/// Finds the nearest clobber for the given query, optimizing phis if
|
|
|
|
/// possible.
|
2017-04-06 03:01:58 +08:00
|
|
|
MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) {
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
Query = &Q;
|
|
|
|
|
|
|
|
MemoryAccess *Current = Start;
|
|
|
|
// This walker pretends uses don't exist. If we're handed one, silently grab
|
|
|
|
// its def. (This has the nice side-effect of ensuring we never cache uses)
|
|
|
|
if (auto *MU = dyn_cast<MemoryUse>(Start))
|
|
|
|
Current = MU->getDefiningAccess();
|
|
|
|
|
|
|
|
DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
|
|
|
|
// Fast path for the overly-common case (no crazy phi optimization
|
|
|
|
// necessary)
|
|
|
|
UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
|
2016-07-24 15:03:49 +08:00
|
|
|
MemoryAccess *Result;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
if (WalkResult.IsKnownClobber) {
|
2016-07-24 15:03:49 +08:00
|
|
|
Result = WalkResult.Result;
|
2018-03-09 02:03:14 +08:00
|
|
|
Q.AR = WalkResult.AR;
|
2016-07-24 15:03:49 +08:00
|
|
|
} else {
|
|
|
|
OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
|
|
|
|
Current, Q.StartingLoc);
|
|
|
|
verifyOptResult(OptRes);
|
|
|
|
resetPhiOptznState();
|
|
|
|
Result = OptRes.PrimaryClobber.Clobber;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef EXPENSIVE_CHECKS
|
2016-07-24 15:03:49 +08:00
|
|
|
checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
#endif
|
2016-07-24 15:03:49 +08:00
|
|
|
return Result;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
}
|
2016-08-09 01:52:01 +08:00
|
|
|
|
|
|
|
void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA); }
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct RenamePassData {
|
|
|
|
DomTreeNode *DTN;
|
|
|
|
DomTreeNode::const_iterator ChildIt;
|
|
|
|
MemoryAccess *IncomingVal;
|
|
|
|
|
|
|
|
RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
|
|
|
|
MemoryAccess *M)
|
|
|
|
: DTN(D), ChildIt(It), IncomingVal(M) {}
|
2017-08-17 06:07:40 +08:00
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
void swap(RenamePassData &RHS) {
|
|
|
|
std::swap(DTN, RHS.DTN);
|
|
|
|
std::swap(ChildIt, RHS.ChildIt);
|
|
|
|
std::swap(IncomingVal, RHS.IncomingVal);
|
|
|
|
}
|
|
|
|
};
|
2017-08-17 06:07:40 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
|
|
|
namespace llvm {
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
|
2018-05-26 10:28:55 +08:00
|
|
|
/// longer does caching on its own, but the name has been retained for the
|
|
|
|
/// moment.
|
2016-06-25 05:02:12 +08:00
|
|
|
class MemorySSA::CachingWalker final : public MemorySSAWalker {
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
ClobberWalker Walker;
|
|
|
|
|
|
|
|
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
|
|
|
|
|
2016-06-25 05:02:12 +08:00
|
|
|
public:
|
|
|
|
CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
|
2017-08-17 06:07:40 +08:00
|
|
|
~CachingWalker() override = default;
|
2016-06-25 05:02:12 +08:00
|
|
|
|
2016-07-21 03:51:34 +08:00
|
|
|
using MemorySSAWalker::getClobberingMemoryAccess;
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-07-21 03:51:34 +08:00
|
|
|
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
|
2016-06-25 05:02:12 +08:00
|
|
|
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
|
2016-10-29 03:22:46 +08:00
|
|
|
const MemoryLocation &) override;
|
2016-06-25 05:02:12 +08:00
|
|
|
void invalidateInfo(MemoryAccess *) override;
|
|
|
|
|
2016-08-09 01:52:01 +08:00
|
|
|
void verify(const MemorySSA *MSSA) override {
|
|
|
|
MemorySSAWalker::verify(MSSA);
|
|
|
|
Walker.verify(MSSA);
|
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
};
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
} // end namespace llvm
|
|
|
|
|
2017-02-21 06:26:03 +08:00
|
|
|
void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
|
|
|
|
bool RenameAllUses) {
|
|
|
|
// Pass through values to our successors
|
|
|
|
for (const BasicBlock *S : successors(BB)) {
|
|
|
|
auto It = PerBlockAccesses.find(S);
|
|
|
|
// Rename the phi nodes in our successor block
|
|
|
|
if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
|
|
|
|
continue;
|
|
|
|
AccessList *Accesses = It->second.get();
|
|
|
|
auto *Phi = cast<MemoryPhi>(&Accesses->front());
|
|
|
|
if (RenameAllUses) {
|
|
|
|
int PhiIndex = Phi->getBasicBlockIndex(BB);
|
|
|
|
assert(PhiIndex != -1 && "Incomplete phi during partial rename");
|
|
|
|
Phi->setIncomingValue(PhiIndex, IncomingVal);
|
|
|
|
} else
|
|
|
|
Phi->addIncoming(IncomingVal, BB);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Rename a single basic block into MemorySSA form.
|
2016-02-03 06:46:49 +08:00
|
|
|
/// Uses the standard SSA renaming algorithm.
|
|
|
|
/// \returns The new incoming value.
|
2017-02-21 06:26:03 +08:00
|
|
|
MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
|
|
|
|
bool RenameAllUses) {
|
2016-02-03 06:46:49 +08:00
|
|
|
auto It = PerBlockAccesses.find(BB);
|
|
|
|
// Skip most processing if the list is empty.
|
|
|
|
if (It != PerBlockAccesses.end()) {
|
2016-06-21 04:21:33 +08:00
|
|
|
AccessList *Accesses = It->second.get();
|
2016-02-03 06:46:49 +08:00
|
|
|
for (MemoryAccess &L : *Accesses) {
|
2016-08-23 03:14:16 +08:00
|
|
|
if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
|
2017-02-21 06:26:03 +08:00
|
|
|
if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
|
2016-08-23 03:14:16 +08:00
|
|
|
MUD->setDefiningAccess(IncomingVal);
|
|
|
|
if (isa<MemoryDef>(&L))
|
|
|
|
IncomingVal = &L;
|
|
|
|
} else {
|
2016-02-03 06:46:49 +08:00
|
|
|
IncomingVal = &L;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return IncomingVal;
|
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// This is the standard SSA renaming algorithm.
|
2016-02-03 06:46:49 +08:00
|
|
|
///
|
|
|
|
/// We walk the dominator tree in preorder, renaming accesses, and then filling
|
|
|
|
/// in phi nodes in our successors.
|
|
|
|
void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
|
2017-02-21 06:26:03 +08:00
|
|
|
SmallPtrSetImpl<BasicBlock *> &Visited,
|
|
|
|
bool SkipVisited, bool RenameAllUses) {
|
2016-02-03 06:46:49 +08:00
|
|
|
SmallVector<RenamePassData, 32> WorkStack;
|
2017-02-21 06:26:03 +08:00
|
|
|
// Skip everything if we already renamed this block and we are skipping.
|
|
|
|
// Note: You can't sink this into the if, because we need it to occur
|
|
|
|
// regardless of whether we skip blocks or not.
|
|
|
|
bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
|
|
|
|
if (SkipVisited && AlreadyVisited)
|
|
|
|
return;
|
|
|
|
|
|
|
|
IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
|
|
|
|
renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
|
2016-02-03 06:46:49 +08:00
|
|
|
WorkStack.push_back({Root, Root->begin(), IncomingVal});
|
|
|
|
|
|
|
|
while (!WorkStack.empty()) {
|
|
|
|
DomTreeNode *Node = WorkStack.back().DTN;
|
|
|
|
DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
|
|
|
|
IncomingVal = WorkStack.back().IncomingVal;
|
|
|
|
|
|
|
|
if (ChildIt == Node->end()) {
|
|
|
|
WorkStack.pop_back();
|
|
|
|
} else {
|
|
|
|
DomTreeNode *Child = *ChildIt;
|
|
|
|
++WorkStack.back().ChildIt;
|
|
|
|
BasicBlock *BB = Child->getBlock();
|
2017-02-21 06:26:03 +08:00
|
|
|
// Note: You can't sink this into the if, because we need it to occur
|
|
|
|
// regardless of whether we skip blocks or not.
|
|
|
|
AlreadyVisited = !Visited.insert(BB).second;
|
|
|
|
if (SkipVisited && AlreadyVisited) {
|
|
|
|
// We already visited this during our renaming, which can happen when
|
|
|
|
// being asked to rename multiple blocks. Figure out the incoming val,
|
|
|
|
// which is the last def.
|
|
|
|
// Incoming value can only change if there is a block def, and in that
|
|
|
|
// case, it's the last block def in the list.
|
|
|
|
if (auto *BlockDefs = getWritableBlockDefs(BB))
|
|
|
|
IncomingVal = &*BlockDefs->rbegin();
|
|
|
|
} else
|
|
|
|
IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
|
|
|
|
renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
|
2016-02-03 06:46:49 +08:00
|
|
|
WorkStack.push_back({Child, Child->begin(), IncomingVal});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// This handles unreachable block accesses by deleting phi nodes in
|
2016-02-03 06:46:49 +08:00
|
|
|
/// unreachable blocks, and marking all other unreachable MemoryAccess's as
|
|
|
|
/// being uses of the live on entry definition.
|
|
|
|
void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
|
|
|
|
assert(!DT->isReachableFromEntry(BB) &&
|
|
|
|
"Reachable block found while handling unreachable blocks");
|
|
|
|
|
2016-07-06 13:32:05 +08:00
|
|
|
// Make sure phi nodes in our reachable successors end up with a
|
|
|
|
// LiveOnEntryDef for our incoming edge, even though our block is forward
|
|
|
|
// unreachable. We could just disconnect these blocks from the CFG fully,
|
|
|
|
// but we do not right now.
|
|
|
|
for (const BasicBlock *S : successors(BB)) {
|
|
|
|
if (!DT->isReachableFromEntry(S))
|
|
|
|
continue;
|
|
|
|
auto It = PerBlockAccesses.find(S);
|
|
|
|
// Rename the phi nodes in our successor block
|
|
|
|
if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
|
|
|
|
continue;
|
|
|
|
AccessList *Accesses = It->second.get();
|
|
|
|
auto *Phi = cast<MemoryPhi>(&Accesses->front());
|
|
|
|
Phi->addIncoming(LiveOnEntryDef.get(), BB);
|
|
|
|
}
|
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
auto It = PerBlockAccesses.find(BB);
|
|
|
|
if (It == PerBlockAccesses.end())
|
|
|
|
return;
|
|
|
|
|
|
|
|
auto &Accesses = It->second;
|
|
|
|
for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
|
|
|
|
auto Next = std::next(AI);
|
|
|
|
// If we have a phi, just remove it. We are going to replace all
|
|
|
|
// users with live on entry.
|
|
|
|
if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
|
|
|
|
UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
|
|
|
|
else
|
|
|
|
Accesses->erase(AI);
|
|
|
|
AI = Next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
|
|
|
|
: AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
|
2018-02-24 07:07:18 +08:00
|
|
|
NextID(0) {
|
2016-06-28 02:22:27 +08:00
|
|
|
buildMemorySSA();
|
2016-06-02 05:30:40 +08:00
|
|
|
}
|
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
MemorySSA::~MemorySSA() {
|
|
|
|
// Drop all our references
|
|
|
|
for (const auto &Pair : PerBlockAccesses)
|
|
|
|
for (MemoryAccess &MA : *Pair.second)
|
|
|
|
MA.dropAllReferences();
|
|
|
|
}
|
|
|
|
|
2016-06-22 02:39:20 +08:00
|
|
|
MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
|
2016-02-03 06:46:49 +08:00
|
|
|
auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
|
|
|
|
|
|
|
|
if (Res.second)
|
2017-08-17 06:07:40 +08:00
|
|
|
Res.first->second = llvm::make_unique<AccessList>();
|
2016-02-03 06:46:49 +08:00
|
|
|
return Res.first->second.get();
|
|
|
|
}
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2017-01-26 04:56:19 +08:00
|
|
|
MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
|
|
|
|
auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
|
|
|
|
|
|
|
|
if (Res.second)
|
2017-08-17 06:07:40 +08:00
|
|
|
Res.first->second = llvm::make_unique<DefsList>();
|
2017-01-26 04:56:19 +08:00
|
|
|
return Res.first->second.get();
|
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
namespace llvm {
|
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
/// This class is a batch walker of all MemoryUse's in the program, and points
|
|
|
|
/// their defining access at the thing that actually clobbers them. Because it
|
|
|
|
/// is a batch walker that touches everything, it does not operate like the
|
|
|
|
/// other walkers. This walker is basically performing a top-down SSA renaming
|
|
|
|
/// pass, where the version stack is used as the cache. This enables it to be
|
|
|
|
/// significantly more time and memory efficient than using the regular walker,
|
|
|
|
/// which is walking bottom-up.
|
|
|
|
class MemorySSA::OptimizeUses {
|
|
|
|
public:
|
|
|
|
OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA,
|
|
|
|
DominatorTree *DT)
|
|
|
|
: MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) {
|
|
|
|
Walker = MSSA->getWalker();
|
|
|
|
}
|
|
|
|
|
|
|
|
void optimizeUses();
|
|
|
|
|
|
|
|
private:
|
|
|
|
/// This represents where a given memorylocation is in the stack.
|
|
|
|
struct MemlocStackInfo {
|
|
|
|
// This essentially is keeping track of versions of the stack. Whenever
|
|
|
|
// the stack changes due to pushes or pops, these versions increase.
|
|
|
|
unsigned long StackEpoch;
|
|
|
|
unsigned long PopEpoch;
|
|
|
|
// This is the lower bound of places on the stack to check. It is equal to
|
|
|
|
// the place the last stack walk ended.
|
|
|
|
// Note: Correctness depends on this being initialized to 0, which densemap
|
|
|
|
// does
|
|
|
|
unsigned long LowerBound;
|
2016-08-08 12:44:53 +08:00
|
|
|
const BasicBlock *LowerBoundBlock;
|
2016-08-03 00:24:03 +08:00
|
|
|
// This is where the last walk for this memory location ended.
|
|
|
|
unsigned long LastKill;
|
|
|
|
bool LastKillValid;
|
2018-03-09 02:03:14 +08:00
|
|
|
Optional<AliasResult> AR;
|
2016-08-03 00:24:03 +08:00
|
|
|
};
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
|
|
|
|
SmallVectorImpl<MemoryAccess *> &,
|
|
|
|
DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
MemorySSA *MSSA;
|
|
|
|
MemorySSAWalker *Walker;
|
|
|
|
AliasAnalysis *AA;
|
|
|
|
DominatorTree *DT;
|
|
|
|
};
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
} // end namespace llvm
|
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
/// Optimize the uses in a given block This is basically the SSA renaming
|
|
|
|
/// algorithm, with one caveat: We are able to use a single stack for all
|
|
|
|
/// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
|
|
|
|
/// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
|
|
|
|
/// going to be some position in that stack of possible ones.
|
|
|
|
///
|
|
|
|
/// We track the stack positions that each MemoryLocation needs
|
|
|
|
/// to check, and last ended at. This is because we only want to check the
|
|
|
|
/// things that changed since last time. The same MemoryLocation should
|
|
|
|
/// get clobbered by the same store (getModRefInfo does not use invariantness or
|
|
|
|
/// things like this, and if they start, we can modify MemoryLocOrCall to
|
|
|
|
/// include relevant data)
|
|
|
|
void MemorySSA::OptimizeUses::optimizeUsesInBlock(
|
|
|
|
const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
|
|
|
|
SmallVectorImpl<MemoryAccess *> &VersionStack,
|
|
|
|
DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
|
|
|
|
|
|
|
|
/// If no accesses, nothing to do.
|
|
|
|
MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
|
|
|
|
if (Accesses == nullptr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Pop everything that doesn't dominate the current block off the stack,
|
|
|
|
// increment the PopEpoch to account for this.
|
2017-02-19 04:34:36 +08:00
|
|
|
while (true) {
|
|
|
|
assert(
|
|
|
|
!VersionStack.empty() &&
|
|
|
|
"Version stack should have liveOnEntry sentinel dominating everything");
|
2016-08-03 00:24:03 +08:00
|
|
|
BasicBlock *BackBlock = VersionStack.back()->getBlock();
|
|
|
|
if (DT->dominates(BackBlock, BB))
|
|
|
|
break;
|
|
|
|
while (VersionStack.back()->getBlock() == BackBlock)
|
|
|
|
VersionStack.pop_back();
|
|
|
|
++PopEpoch;
|
|
|
|
}
|
2017-02-19 04:34:36 +08:00
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
for (MemoryAccess &MA : *Accesses) {
|
|
|
|
auto *MU = dyn_cast<MemoryUse>(&MA);
|
|
|
|
if (!MU) {
|
|
|
|
VersionStack.push_back(&MA);
|
|
|
|
++StackEpoch;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-08-04 03:57:02 +08:00
|
|
|
if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
|
2018-03-09 02:03:14 +08:00
|
|
|
MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
|
2016-08-04 03:57:02 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
MemoryLocOrCall UseMLOC(MU);
|
|
|
|
auto &LocInfo = LocStackInfo[UseMLOC];
|
2016-08-03 04:02:21 +08:00
|
|
|
// If the pop epoch changed, it means we've removed stuff from top of
|
2016-08-03 00:24:03 +08:00
|
|
|
// stack due to changing blocks. We may have to reset the lower bound or
|
|
|
|
// last kill info.
|
|
|
|
if (LocInfo.PopEpoch != PopEpoch) {
|
|
|
|
LocInfo.PopEpoch = PopEpoch;
|
|
|
|
LocInfo.StackEpoch = StackEpoch;
|
2016-08-08 12:44:53 +08:00
|
|
|
// If the lower bound was in something that no longer dominates us, we
|
|
|
|
// have to reset it.
|
|
|
|
// We can't simply track stack size, because the stack may have had
|
|
|
|
// pushes/pops in the meantime.
|
|
|
|
// XXX: This is non-optimal, but only is slower cases with heavily
|
|
|
|
// branching dominator trees. To get the optimal number of queries would
|
|
|
|
// be to make lowerbound and lastkill a per-loc stack, and pop it until
|
|
|
|
// the top of that stack dominates us. This does not seem worth it ATM.
|
|
|
|
// A much cheaper optimization would be to always explore the deepest
|
|
|
|
// branch of the dominator tree first. This will guarantee this resets on
|
|
|
|
// the smallest set of blocks.
|
|
|
|
if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
|
2016-09-27 01:22:54 +08:00
|
|
|
!DT->dominates(LocInfo.LowerBoundBlock, BB)) {
|
2016-08-03 00:24:03 +08:00
|
|
|
// Reset the lower bound of things to check.
|
|
|
|
// TODO: Some day we should be able to reset to last kill, rather than
|
|
|
|
// 0.
|
|
|
|
LocInfo.LowerBound = 0;
|
2016-08-08 12:44:53 +08:00
|
|
|
LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
|
2016-08-03 00:24:03 +08:00
|
|
|
LocInfo.LastKillValid = false;
|
|
|
|
}
|
|
|
|
} else if (LocInfo.StackEpoch != StackEpoch) {
|
|
|
|
// If all that has changed is the StackEpoch, we only have to check the
|
|
|
|
// new things on the stack, because we've checked everything before. In
|
|
|
|
// this case, the lower bound of things to check remains the same.
|
|
|
|
LocInfo.PopEpoch = PopEpoch;
|
|
|
|
LocInfo.StackEpoch = StackEpoch;
|
|
|
|
}
|
|
|
|
if (!LocInfo.LastKillValid) {
|
|
|
|
LocInfo.LastKill = VersionStack.size() - 1;
|
|
|
|
LocInfo.LastKillValid = true;
|
2018-03-09 02:03:14 +08:00
|
|
|
LocInfo.AR = MayAlias;
|
2016-08-03 00:24:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// At this point, we should have corrected last kill and LowerBound to be
|
|
|
|
// in bounds.
|
|
|
|
assert(LocInfo.LowerBound < VersionStack.size() &&
|
|
|
|
"Lower bound out of range");
|
|
|
|
assert(LocInfo.LastKill < VersionStack.size() &&
|
|
|
|
"Last kill info out of range");
|
|
|
|
// In any case, the new upper bound is the top of the stack.
|
|
|
|
unsigned long UpperBound = VersionStack.size() - 1;
|
|
|
|
|
|
|
|
if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
|
|
|
|
<< *(MU->getMemoryInst()) << ")"
|
|
|
|
<< " because there are "
|
|
|
|
<< UpperBound - LocInfo.LowerBound
|
|
|
|
<< " stores to disambiguate\n");
|
2016-08-03 00:24:03 +08:00
|
|
|
// Because we did not walk, LastKill is no longer valid, as this may
|
|
|
|
// have been a kill.
|
|
|
|
LocInfo.LastKillValid = false;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
bool FoundClobberResult = false;
|
|
|
|
while (UpperBound > LocInfo.LowerBound) {
|
|
|
|
if (isa<MemoryPhi>(VersionStack[UpperBound])) {
|
|
|
|
// For phis, use the walker, see where we ended up, go there
|
|
|
|
Instruction *UseInst = MU->getMemoryInst();
|
|
|
|
MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst);
|
|
|
|
// We are guaranteed to find it or something is wrong
|
|
|
|
while (VersionStack[UpperBound] != Result) {
|
|
|
|
assert(UpperBound != 0);
|
|
|
|
--UpperBound;
|
|
|
|
}
|
|
|
|
FoundClobberResult = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
|
2016-08-03 08:01:46 +08:00
|
|
|
// If the lifetime of the pointer ends at this instruction, it's live on
|
|
|
|
// entry.
|
|
|
|
if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
|
|
|
|
// Reset UpperBound to liveOnEntryDef's place in the stack
|
|
|
|
UpperBound = 0;
|
|
|
|
FoundClobberResult = true;
|
2018-03-09 02:03:14 +08:00
|
|
|
LocInfo.AR = MustAlias;
|
2016-08-03 08:01:46 +08:00
|
|
|
break;
|
|
|
|
}
|
2018-03-09 02:03:14 +08:00
|
|
|
ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
|
|
|
|
if (CA.IsClobber) {
|
2016-08-03 00:24:03 +08:00
|
|
|
FoundClobberResult = true;
|
2018-03-09 02:03:14 +08:00
|
|
|
LocInfo.AR = CA.AR;
|
2016-08-03 00:24:03 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
--UpperBound;
|
|
|
|
}
|
2018-03-09 02:03:14 +08:00
|
|
|
|
|
|
|
// Note: Phis always have AliasResult AR set to MayAlias ATM.
|
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
// At the end of this loop, UpperBound is either a clobber, or lower bound
|
|
|
|
// PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
|
|
|
|
if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
|
|
|
|
// We were last killed now by where we got to
|
2018-03-09 02:03:14 +08:00
|
|
|
if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
|
|
|
|
LocInfo.AR = None;
|
|
|
|
MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
|
2016-08-03 00:24:03 +08:00
|
|
|
LocInfo.LastKill = UpperBound;
|
|
|
|
} else {
|
|
|
|
// Otherwise, we checked all the new ones, and now we know we can get to
|
|
|
|
// LastKill.
|
2018-03-09 02:03:14 +08:00
|
|
|
MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
|
2016-08-03 00:24:03 +08:00
|
|
|
}
|
|
|
|
LocInfo.LowerBound = VersionStack.size() - 1;
|
2016-08-08 12:44:53 +08:00
|
|
|
LocInfo.LowerBoundBlock = BB;
|
2016-08-03 00:24:03 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Optimize uses to point to their actual clobbering definitions.
|
|
|
|
void MemorySSA::OptimizeUses::optimizeUses() {
|
|
|
|
SmallVector<MemoryAccess *, 16> VersionStack;
|
|
|
|
DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
|
|
|
|
VersionStack.push_back(MSSA->getLiveOnEntryDef());
|
|
|
|
|
|
|
|
unsigned long StackEpoch = 1;
|
|
|
|
unsigned long PopEpoch = 1;
|
2017-02-19 04:34:36 +08:00
|
|
|
// We perform a non-recursive top-down dominator tree walk.
|
2016-08-06 06:09:14 +08:00
|
|
|
for (const auto *DomNode : depth_first(DT->getRootNode()))
|
|
|
|
optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
|
|
|
|
LocStackInfo);
|
2016-08-03 00:24:03 +08:00
|
|
|
}
|
|
|
|
|
2016-08-23 03:14:30 +08:00
|
|
|
void MemorySSA::placePHINodes(
|
2018-05-16 02:40:29 +08:00
|
|
|
const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
|
2016-08-23 03:14:30 +08:00
|
|
|
// Determine where our MemoryPhi's should go
|
|
|
|
ForwardIDFCalculator IDFs(*DT);
|
|
|
|
IDFs.setDefiningBlocks(DefiningBlocks);
|
|
|
|
SmallVector<BasicBlock *, 32> IDFBlocks;
|
|
|
|
IDFs.calculate(IDFBlocks);
|
|
|
|
|
|
|
|
// Now place MemoryPhi nodes.
|
2017-01-26 04:56:19 +08:00
|
|
|
for (auto &BB : IDFBlocks)
|
|
|
|
createMemoryPhi(BB);
|
2016-08-23 03:14:30 +08:00
|
|
|
}
|
|
|
|
|
2016-06-28 02:22:27 +08:00
|
|
|
void MemorySSA::buildMemorySSA() {
|
2016-02-03 06:46:49 +08:00
|
|
|
// We create an access to represent "live on entry", for things like
|
|
|
|
// arguments or users of globals, where the memory they use is defined before
|
|
|
|
// the beginning of the function. We do not actually insert it into the IR.
|
|
|
|
// We do not define a live on exit for the immediate uses, and thus our
|
|
|
|
// semantics do *not* imply that something with no immediate uses can simply
|
|
|
|
// be removed.
|
|
|
|
BasicBlock &StartingPoint = F.getEntryBlock();
|
2018-02-27 14:43:19 +08:00
|
|
|
LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
|
|
|
|
&StartingPoint, NextID++));
|
2016-02-03 06:46:49 +08:00
|
|
|
|
|
|
|
// We maintain lists of memory accesses per-block, trading memory for time. We
|
|
|
|
// could just look up the memory access for every possible instruction in the
|
|
|
|
// stream.
|
|
|
|
SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
|
|
|
|
// Go through each block, figure out where defs occur, and chain together all
|
|
|
|
// the accesses.
|
|
|
|
for (BasicBlock &B : F) {
|
2016-02-07 09:52:15 +08:00
|
|
|
bool InsertIntoDef = false;
|
2016-06-21 04:21:33 +08:00
|
|
|
AccessList *Accesses = nullptr;
|
2017-01-26 04:56:19 +08:00
|
|
|
DefsList *Defs = nullptr;
|
2016-02-03 06:46:49 +08:00
|
|
|
for (Instruction &I : B) {
|
2016-05-26 09:19:17 +08:00
|
|
|
MemoryUseOrDef *MUD = createNewAccess(&I);
|
2016-03-12 03:34:03 +08:00
|
|
|
if (!MUD)
|
2016-02-03 06:46:49 +08:00
|
|
|
continue;
|
2016-02-07 09:52:19 +08:00
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
if (!Accesses)
|
|
|
|
Accesses = getOrCreateAccessList(&B);
|
2016-03-12 03:34:03 +08:00
|
|
|
Accesses->push_back(MUD);
|
2017-01-26 04:56:19 +08:00
|
|
|
if (isa<MemoryDef>(MUD)) {
|
|
|
|
InsertIntoDef = true;
|
|
|
|
if (!Defs)
|
|
|
|
Defs = getOrCreateDefsList(&B);
|
|
|
|
Defs->push_back(*MUD);
|
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
2016-02-07 09:52:15 +08:00
|
|
|
if (InsertIntoDef)
|
|
|
|
DefiningBlocks.insert(&B);
|
2016-02-07 09:52:19 +08:00
|
|
|
}
|
2018-05-16 02:40:29 +08:00
|
|
|
placePHINodes(DefiningBlocks);
|
2016-02-03 06:46:49 +08:00
|
|
|
|
|
|
|
// Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
|
|
|
|
// filled in with all blocks.
|
|
|
|
SmallPtrSet<BasicBlock *, 16> Visited;
|
|
|
|
renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
|
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
CachingWalker *Walker = getWalkerImpl();
|
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
OptimizeUses(this, Walker, AA, DT).optimizeUses();
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
// Mark the uses in unreachable blocks as live on entry, so that they go
|
|
|
|
// somewhere.
|
|
|
|
for (auto &BB : F)
|
|
|
|
if (!Visited.count(&BB))
|
|
|
|
markUnreachableAsLiveOnEntry(&BB);
|
2016-06-28 02:22:27 +08:00
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
|
|
|
|
|
|
|
|
MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
|
2016-06-28 02:22:27 +08:00
|
|
|
if (Walker)
|
|
|
|
return Walker.get();
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
Walker = llvm::make_unique<CachingWalker>(this, AA, DT);
|
2016-06-02 05:30:40 +08:00
|
|
|
return Walker.get();
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2017-01-26 04:56:19 +08:00
|
|
|
// This is a helper function used by the creation routines. It places NewAccess
|
|
|
|
// into the access and defs lists for a given basic block, at the given
|
|
|
|
// insertion point.
|
|
|
|
void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
|
|
|
|
const BasicBlock *BB,
|
|
|
|
InsertionPlace Point) {
|
|
|
|
auto *Accesses = getOrCreateAccessList(BB);
|
|
|
|
if (Point == Beginning) {
|
|
|
|
// If it's a phi node, it goes first, otherwise, it goes after any phi
|
|
|
|
// nodes.
|
|
|
|
if (isa<MemoryPhi>(NewAccess)) {
|
|
|
|
Accesses->push_front(NewAccess);
|
|
|
|
auto *Defs = getOrCreateDefsList(BB);
|
|
|
|
Defs->push_front(*NewAccess);
|
|
|
|
} else {
|
|
|
|
auto AI = find_if_not(
|
|
|
|
*Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
|
|
|
|
Accesses->insert(AI, NewAccess);
|
|
|
|
if (!isa<MemoryUse>(NewAccess)) {
|
|
|
|
auto *Defs = getOrCreateDefsList(BB);
|
|
|
|
auto DI = find_if_not(
|
|
|
|
*Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
|
|
|
|
Defs->insert(DI, *NewAccess);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
Accesses->push_back(NewAccess);
|
|
|
|
if (!isa<MemoryUse>(NewAccess)) {
|
|
|
|
auto *Defs = getOrCreateDefsList(BB);
|
|
|
|
Defs->push_back(*NewAccess);
|
|
|
|
}
|
|
|
|
}
|
2017-01-30 19:35:39 +08:00
|
|
|
BlockNumberingValid.erase(BB);
|
2017-01-26 04:56:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
|
|
|
|
AccessList::iterator InsertPt) {
|
|
|
|
auto *Accesses = getWritableBlockAccesses(BB);
|
|
|
|
bool WasEnd = InsertPt == Accesses->end();
|
|
|
|
Accesses->insert(AccessList::iterator(InsertPt), What);
|
|
|
|
if (!isa<MemoryUse>(What)) {
|
|
|
|
auto *Defs = getOrCreateDefsList(BB);
|
|
|
|
// If we got asked to insert at the end, we have an easy job, just shove it
|
|
|
|
// at the end. If we got asked to insert before an existing def, we also get
|
2018-04-05 05:08:11 +08:00
|
|
|
// an iterator. If we got asked to insert before a use, we have to hunt for
|
2017-01-26 04:56:19 +08:00
|
|
|
// the next def.
|
|
|
|
if (WasEnd) {
|
|
|
|
Defs->push_back(*What);
|
|
|
|
} else if (isa<MemoryDef>(InsertPt)) {
|
|
|
|
Defs->insert(InsertPt->getDefsIterator(), *What);
|
|
|
|
} else {
|
|
|
|
while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
|
|
|
|
++InsertPt;
|
|
|
|
// Either we found a def, or we are inserting at the end
|
|
|
|
if (InsertPt == Accesses->end())
|
|
|
|
Defs->push_back(*What);
|
|
|
|
else
|
|
|
|
Defs->insert(InsertPt->getDefsIterator(), *What);
|
|
|
|
}
|
|
|
|
}
|
2017-01-30 19:35:39 +08:00
|
|
|
BlockNumberingValid.erase(BB);
|
2017-01-26 04:56:19 +08:00
|
|
|
}
|
|
|
|
|
2018-04-05 05:08:11 +08:00
|
|
|
// Move What before Where in the IR. The end result is that What will belong to
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 09:23:13 +08:00
|
|
|
// the right lists and have the right Block set, but will not otherwise be
|
|
|
|
// correct. It will not have the right defining access, and if it is a def,
|
|
|
|
// things below it will not properly be updated.
|
|
|
|
void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
|
|
|
|
AccessList::iterator Where) {
|
|
|
|
// Keep it in the lookup tables, remove from the lists
|
|
|
|
removeFromLists(What, false);
|
|
|
|
What->setBlock(BB);
|
|
|
|
insertIntoListsBefore(What, BB, Where);
|
|
|
|
}
|
|
|
|
|
2017-01-30 19:35:39 +08:00
|
|
|
void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
|
|
|
|
InsertionPlace Point) {
|
|
|
|
removeFromLists(What, false);
|
|
|
|
What->setBlock(BB);
|
|
|
|
insertIntoListsForBlock(What, BB, Point);
|
|
|
|
}
|
|
|
|
|
2016-06-22 02:39:20 +08:00
|
|
|
MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
|
|
|
|
assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
|
|
|
|
MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
|
2017-01-30 19:35:39 +08:00
|
|
|
// Phi's always are placed at the front of the block.
|
2017-01-26 04:56:19 +08:00
|
|
|
insertIntoListsForBlock(Phi, BB, Beginning);
|
2016-08-01 05:08:20 +08:00
|
|
|
ValueToMemoryAccess[BB] = Phi;
|
2016-06-22 02:39:20 +08:00
|
|
|
return Phi;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
|
|
|
|
MemoryAccess *Definition) {
|
|
|
|
assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
|
|
|
|
MemoryUseOrDef *NewAccess = createNewAccess(I);
|
|
|
|
assert(
|
|
|
|
NewAccess != nullptr &&
|
|
|
|
"Tried to create a memory access for a non-memory touching instruction");
|
|
|
|
NewAccess->setDefiningAccess(Definition);
|
|
|
|
return NewAccess;
|
|
|
|
}
|
|
|
|
|
2017-04-07 09:28:36 +08:00
|
|
|
// Return true if the instruction has ordering constraints.
|
|
|
|
// Note specifically that this only considers stores and loads
|
|
|
|
// because others are still considered ModRef by getModRefInfo.
|
|
|
|
static inline bool isOrdered(const Instruction *I) {
|
|
|
|
if (auto *SI = dyn_cast<StoreInst>(I)) {
|
|
|
|
if (!SI->isUnordered())
|
|
|
|
return true;
|
|
|
|
} else if (auto *LI = dyn_cast<LoadInst>(I)) {
|
|
|
|
if (!LI->isUnordered())
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Helper function to create new memory accesses
|
2016-05-26 09:19:17 +08:00
|
|
|
MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {
|
2016-05-26 12:58:46 +08:00
|
|
|
// The assume intrinsic has a control dependency which we model by claiming
|
|
|
|
// that it writes arbitrarily. Ignore that fake memory dependency here.
|
|
|
|
// FIXME: Replace this special casing with a more accurate modelling of
|
|
|
|
// assume's control dependency.
|
|
|
|
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
|
|
|
|
if (II->getIntrinsicID() == Intrinsic::assume)
|
|
|
|
return nullptr;
|
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
// Find out what affect this instruction has on memory.
|
Allow None as a MemoryLocation to getModRefInfo
Summary:
Adding part of the changes in D30369 (needed to make progress):
Current patch updates AliasAnalysis and MemoryLocation, but does _not_ clean up MemorySSA.
Original summary from D30369, by dberlin:
Currently, we have instructions which affect memory but have no memory
location. If you call, for example, MemoryLocation::get on a fence,
it asserts. This means things specifically have to avoid that. It
also means we end up with a copy of each API, one taking a memory
location, one not.
This starts to fix that.
We add MemoryLocation::getOrNone as a new call, and reimplement the
old asserting version in terms of it.
We make MemoryLocation optional in the (Instruction, MemoryLocation)
version of getModRefInfo, and kill the old one argument version in
favor of passing None (it had one caller). Now both can handle fences
because you can just use MemoryLocation::getOrNone on an instruction
and it will return a correct answer.
We use all this to clean up part of MemorySSA that had to handle this difference.
Note that literally every actual getModRefInfo interface we have could be made private and replaced with:
getModRefInfo(Instruction, Optional<MemoryLocation>)
and
getModRefInfo(Instruction, Optional<MemoryLocation>, Instruction, Optional<MemoryLocation>)
and delegating to the right ones, if we wanted to.
I have not attempted to do this yet.
Reviewers: dberlin, davide, dblaikie
Subscribers: sanjoy, hfinkel, chandlerc, llvm-commits
Differential Revision: https://reviews.llvm.org/D35441
llvm-svn: 309641
2017-08-01 08:28:29 +08:00
|
|
|
ModRefInfo ModRef = AA->getModRefInfo(I, None);
|
2017-04-07 09:28:36 +08:00
|
|
|
// The isOrdered check is used to ensure that volatiles end up as defs
|
|
|
|
// (atomics end up as ModRef right now anyway). Until we separate the
|
|
|
|
// ordering chain from the memory chain, this enables people to see at least
|
|
|
|
// some relative ordering to volatiles. Note that getClobberingMemoryAccess
|
|
|
|
// will still give an answer that bypasses other volatile loads. TODO:
|
|
|
|
// Separate memory aliasing and ordering into two different chains so that we
|
|
|
|
// can precisely represent both "what memory will this read/write/is clobbered
|
|
|
|
// by" and "what instructions can I move this past".
|
2017-12-06 04:12:23 +08:00
|
|
|
bool Def = isModSet(ModRef) || isOrdered(I);
|
|
|
|
bool Use = isRefSet(ModRef);
|
2016-02-03 06:46:49 +08:00
|
|
|
|
|
|
|
// It's possible for an instruction to not modify memory at all. During
|
|
|
|
// construction, we ignore them.
|
2016-05-26 09:19:17 +08:00
|
|
|
if (!Def && !Use)
|
2016-02-03 06:46:49 +08:00
|
|
|
return nullptr;
|
|
|
|
|
2016-03-12 03:34:03 +08:00
|
|
|
MemoryUseOrDef *MUD;
|
2016-02-03 06:46:49 +08:00
|
|
|
if (Def)
|
2016-03-12 03:34:03 +08:00
|
|
|
MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
|
2016-02-03 06:46:49 +08:00
|
|
|
else
|
2016-03-12 03:34:03 +08:00
|
|
|
MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
|
2016-08-01 05:08:20 +08:00
|
|
|
ValueToMemoryAccess[I] = MUD;
|
2016-03-12 03:34:03 +08:00
|
|
|
return MUD;
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Returns true if \p Replacer dominates \p Replacee .
|
2016-02-03 06:46:49 +08:00
|
|
|
bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
|
|
|
|
const MemoryAccess *Replacee) const {
|
|
|
|
if (isa<MemoryUseOrDef>(Replacee))
|
|
|
|
return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
|
|
|
|
const auto *MP = cast<MemoryPhi>(Replacee);
|
|
|
|
// For a phi node, the use occurs in the predecessor block of the phi node.
|
|
|
|
// Since we may occur multiple times in the phi node, we have to check each
|
|
|
|
// operand to ensure Replacer dominates each operand where Replacee occurs.
|
|
|
|
for (const Use &Arg : MP->operands()) {
|
2016-02-03 07:15:26 +08:00
|
|
|
if (Arg.get() != Replacee &&
|
2016-02-03 06:46:49 +08:00
|
|
|
!DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Properly remove \p MA from all of MemorySSA's lookup tables.
|
2016-03-02 02:46:54 +08:00
|
|
|
void MemorySSA::removeFromLookups(MemoryAccess *MA) {
|
|
|
|
assert(MA->use_empty() &&
|
|
|
|
"Trying to remove memory access that still has uses");
|
2016-07-20 06:49:43 +08:00
|
|
|
BlockNumbering.erase(MA);
|
2018-06-23 06:34:07 +08:00
|
|
|
if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
|
2016-03-02 02:46:54 +08:00
|
|
|
MUD->setDefiningAccess(nullptr);
|
|
|
|
// Invalidate our walker's cache if necessary
|
|
|
|
if (!isa<MemoryUse>(MA))
|
|
|
|
Walker->invalidateInfo(MA);
|
2018-06-23 06:34:07 +08:00
|
|
|
|
2016-03-02 02:46:54 +08:00
|
|
|
Value *MemoryInst;
|
2018-06-23 06:34:07 +08:00
|
|
|
if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
|
2016-03-02 02:46:54 +08:00
|
|
|
MemoryInst = MUD->getMemoryInst();
|
2018-06-23 06:34:07 +08:00
|
|
|
else
|
2016-03-02 02:46:54 +08:00
|
|
|
MemoryInst = MA->getBlock();
|
2018-06-23 06:34:07 +08:00
|
|
|
|
2016-08-01 05:08:20 +08:00
|
|
|
auto VMA = ValueToMemoryAccess.find(MemoryInst);
|
|
|
|
if (VMA->second == MA)
|
|
|
|
ValueToMemoryAccess.erase(VMA);
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 09:23:13 +08:00
|
|
|
}
|
2016-03-02 02:46:54 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Properly remove \p MA from all of MemorySSA's lists.
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 09:23:13 +08:00
|
|
|
///
|
|
|
|
/// Because of the way the intrusive list and use lists work, it is important to
|
|
|
|
/// do removal in the right order.
|
|
|
|
/// ShouldDelete defaults to true, and will cause the memory access to also be
|
|
|
|
/// deleted, not just removed.
|
|
|
|
void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
|
2017-01-26 04:56:19 +08:00
|
|
|
// The access list owns the reference, so we erase it from the non-owning list
|
|
|
|
// first.
|
|
|
|
if (!isa<MemoryUse>(MA)) {
|
|
|
|
auto DefsIt = PerBlockDefs.find(MA->getBlock());
|
|
|
|
std::unique_ptr<DefsList> &Defs = DefsIt->second;
|
|
|
|
Defs->remove(*MA);
|
|
|
|
if (Defs->empty())
|
|
|
|
PerBlockDefs.erase(DefsIt);
|
|
|
|
}
|
|
|
|
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 09:23:13 +08:00
|
|
|
// The erase call here will delete it. If we don't want it deleted, we call
|
|
|
|
// remove instead.
|
2016-03-02 10:35:04 +08:00
|
|
|
auto AccessIt = PerBlockAccesses.find(MA->getBlock());
|
2016-06-21 04:21:33 +08:00
|
|
|
std::unique_ptr<AccessList> &Accesses = AccessIt->second;
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 09:23:13 +08:00
|
|
|
if (ShouldDelete)
|
|
|
|
Accesses->erase(MA);
|
|
|
|
else
|
|
|
|
Accesses->remove(MA);
|
|
|
|
|
2016-03-02 10:35:04 +08:00
|
|
|
if (Accesses->empty())
|
|
|
|
PerBlockAccesses.erase(AccessIt);
|
2016-03-02 02:46:54 +08:00
|
|
|
}
|
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
void MemorySSA::print(raw_ostream &OS) const {
|
|
|
|
MemorySSAAnnotatedWriter Writer(this);
|
|
|
|
F.print(OS, &Writer);
|
|
|
|
}
|
|
|
|
|
2017-10-15 22:32:27 +08:00
|
|
|
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
2017-02-21 06:26:03 +08:00
|
|
|
LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
|
2017-01-28 10:02:38 +08:00
|
|
|
#endif
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-02-11 01:39:43 +08:00
|
|
|
void MemorySSA::verifyMemorySSA() const {
|
|
|
|
verifyDefUses(F);
|
|
|
|
verifyDomination(F);
|
2016-06-22 02:39:20 +08:00
|
|
|
verifyOrdering(F);
|
2016-08-09 01:52:01 +08:00
|
|
|
Walker->verify(this);
|
2016-06-22 02:39:20 +08:00
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Verify that the order and existence of MemoryAccesses matches the
|
2016-06-22 02:39:20 +08:00
|
|
|
/// order and existence of memory affecting instructions.
|
|
|
|
void MemorySSA::verifyOrdering(Function &F) const {
|
|
|
|
// Walk all the blocks, comparing what the lookups think and what the access
|
|
|
|
// lists think, as well as the order in the blocks vs the order in the access
|
|
|
|
// lists.
|
|
|
|
SmallVector<MemoryAccess *, 32> ActualAccesses;
|
2017-01-26 04:56:19 +08:00
|
|
|
SmallVector<MemoryAccess *, 32> ActualDefs;
|
2016-06-22 02:39:20 +08:00
|
|
|
for (BasicBlock &B : F) {
|
|
|
|
const AccessList *AL = getBlockAccesses(&B);
|
2017-01-26 04:56:19 +08:00
|
|
|
const auto *DL = getBlockDefs(&B);
|
2016-06-22 02:39:20 +08:00
|
|
|
MemoryAccess *Phi = getMemoryAccess(&B);
|
2017-01-26 04:56:19 +08:00
|
|
|
if (Phi) {
|
2016-06-22 02:39:20 +08:00
|
|
|
ActualAccesses.push_back(Phi);
|
2017-01-26 04:56:19 +08:00
|
|
|
ActualDefs.push_back(Phi);
|
|
|
|
}
|
|
|
|
|
2016-06-22 02:39:20 +08:00
|
|
|
for (Instruction &I : B) {
|
|
|
|
MemoryAccess *MA = getMemoryAccess(&I);
|
2017-01-26 04:56:19 +08:00
|
|
|
assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
|
|
|
|
"We have memory affecting instructions "
|
|
|
|
"in this block but they are not in the "
|
|
|
|
"access list or defs list");
|
|
|
|
if (MA) {
|
2016-06-22 02:39:20 +08:00
|
|
|
ActualAccesses.push_back(MA);
|
2017-01-26 04:56:19 +08:00
|
|
|
if (isa<MemoryDef>(MA))
|
|
|
|
ActualDefs.push_back(MA);
|
|
|
|
}
|
2016-06-22 02:39:20 +08:00
|
|
|
}
|
|
|
|
// Either we hit the assert, really have no accesses, or we have both
|
2017-01-26 04:56:19 +08:00
|
|
|
// accesses and an access list.
|
|
|
|
// Same with defs.
|
|
|
|
if (!AL && !DL)
|
2016-06-22 02:39:20 +08:00
|
|
|
continue;
|
|
|
|
assert(AL->size() == ActualAccesses.size() &&
|
|
|
|
"We don't have the same number of accesses in the block as on the "
|
|
|
|
"access list");
|
2017-01-30 11:16:43 +08:00
|
|
|
assert((DL || ActualDefs.size() == 0) &&
|
|
|
|
"Either we should have a defs list, or we should have no defs");
|
2017-01-26 04:56:19 +08:00
|
|
|
assert((!DL || DL->size() == ActualDefs.size()) &&
|
|
|
|
"We don't have the same number of defs in the block as on the "
|
|
|
|
"def list");
|
2016-06-22 02:39:20 +08:00
|
|
|
auto ALI = AL->begin();
|
|
|
|
auto AAI = ActualAccesses.begin();
|
|
|
|
while (ALI != AL->end() && AAI != ActualAccesses.end()) {
|
|
|
|
assert(&*ALI == *AAI && "Not the same accesses in the same order");
|
|
|
|
++ALI;
|
|
|
|
++AAI;
|
|
|
|
}
|
|
|
|
ActualAccesses.clear();
|
2017-01-26 04:56:19 +08:00
|
|
|
if (DL) {
|
|
|
|
auto DLI = DL->begin();
|
|
|
|
auto ADI = ActualDefs.begin();
|
|
|
|
while (DLI != DL->end() && ADI != ActualDefs.end()) {
|
|
|
|
assert(&*DLI == *ADI && "Not the same defs in the same order");
|
|
|
|
++DLI;
|
|
|
|
++ADI;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ActualDefs.clear();
|
2016-06-22 02:39:20 +08:00
|
|
|
}
|
2016-02-11 01:39:43 +08:00
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Verify the domination properties of MemorySSA by checking that each
|
2016-02-03 06:46:49 +08:00
|
|
|
/// definition dominates all of its uses.
|
2016-02-11 01:39:43 +08:00
|
|
|
void MemorySSA::verifyDomination(Function &F) const {
|
2016-08-06 05:47:20 +08:00
|
|
|
#ifndef NDEBUG
|
2016-02-03 06:46:49 +08:00
|
|
|
for (BasicBlock &B : F) {
|
|
|
|
// Phi nodes are attached to basic blocks
|
2016-08-06 05:46:52 +08:00
|
|
|
if (MemoryPhi *MP = getMemoryAccess(&B))
|
|
|
|
for (const Use &U : MP->uses())
|
|
|
|
assert(dominates(MP, U) && "Memory PHI does not dominate it's uses");
|
2016-08-06 05:47:20 +08:00
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
for (Instruction &I : B) {
|
|
|
|
MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
|
|
|
|
if (!MD)
|
|
|
|
continue;
|
|
|
|
|
2016-08-06 05:46:52 +08:00
|
|
|
for (const Use &U : MD->uses())
|
|
|
|
assert(dominates(MD, U) && "Memory Def does not dominate it's uses");
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
}
|
2016-08-06 05:47:20 +08:00
|
|
|
#endif
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Verify the def-use lists in MemorySSA, by verifying that \p Use
|
2016-02-03 06:46:49 +08:00
|
|
|
/// appears in the use list of \p Def.
|
2016-02-11 01:39:43 +08:00
|
|
|
void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
|
2016-08-06 05:47:20 +08:00
|
|
|
#ifndef NDEBUG
|
2016-02-03 06:46:49 +08:00
|
|
|
// The live on entry use may cause us to get a NULL def here
|
2016-08-06 05:47:20 +08:00
|
|
|
if (!Def)
|
|
|
|
assert(isLiveOnEntryDef(Use) &&
|
|
|
|
"Null def but use not point to live on entry def");
|
|
|
|
else
|
2016-08-12 05:26:50 +08:00
|
|
|
assert(is_contained(Def->users(), Use) &&
|
2016-08-06 05:47:20 +08:00
|
|
|
"Did not find use in def's use list");
|
|
|
|
#endif
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Verify the immediate use information, by walking all the memory
|
2016-02-03 06:46:49 +08:00
|
|
|
/// accesses and verifying that, for each use, it appears in the
|
|
|
|
/// appropriate def's use list
|
2016-02-11 01:39:43 +08:00
|
|
|
void MemorySSA::verifyDefUses(Function &F) const {
|
2016-02-03 06:46:49 +08:00
|
|
|
for (BasicBlock &B : F) {
|
|
|
|
// Phi nodes are attached to basic blocks
|
2016-06-22 02:39:20 +08:00
|
|
|
if (MemoryPhi *Phi = getMemoryAccess(&B)) {
|
2016-06-25 08:04:06 +08:00
|
|
|
assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
|
|
|
|
pred_begin(&B), pred_end(&B))) &&
|
2016-06-22 02:39:20 +08:00
|
|
|
"Incomplete MemoryPhi Node");
|
2018-06-21 05:06:13 +08:00
|
|
|
for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
|
2016-02-03 06:46:49 +08:00
|
|
|
verifyUseInDefs(Phi->getIncomingValue(I), Phi);
|
2018-06-21 05:06:13 +08:00
|
|
|
assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=
|
|
|
|
pred_end(&B) &&
|
|
|
|
"Incoming phi block not a block predecessor");
|
|
|
|
}
|
2016-06-22 02:39:20 +08:00
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
|
|
|
|
for (Instruction &I : B) {
|
2016-11-02 05:17:46 +08:00
|
|
|
if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
|
|
|
|
verifyUseInDefs(MA->getDefiningAccess(), MA);
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-02 05:17:46 +08:00
|
|
|
MemoryUseOrDef *MemorySSA::getMemoryAccess(const Instruction *I) const {
|
|
|
|
return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
MemoryPhi *MemorySSA::getMemoryAccess(const BasicBlock *BB) const {
|
2016-11-02 05:17:46 +08:00
|
|
|
return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2016-07-20 06:49:43 +08:00
|
|
|
/// Perform a local numbering on blocks so that instruction ordering can be
|
|
|
|
/// determined in constant time.
|
|
|
|
/// TODO: We currently just number in order. If we numbered by N, we could
|
|
|
|
/// allow at least N-1 sequences of insertBefore or insertAfter (and at least
|
|
|
|
/// log2(N) sequences of mixed before and after) without needing to invalidate
|
|
|
|
/// the numbering.
|
|
|
|
void MemorySSA::renumberBlock(const BasicBlock *B) const {
|
|
|
|
// The pre-increment ensures the numbers really start at 1.
|
|
|
|
unsigned long CurrentNumber = 0;
|
|
|
|
const AccessList *AL = getBlockAccesses(B);
|
|
|
|
assert(AL != nullptr && "Asking to renumber an empty block");
|
|
|
|
for (const auto &I : *AL)
|
|
|
|
BlockNumbering[&I] = ++CurrentNumber;
|
|
|
|
BlockNumberingValid.insert(B);
|
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Determine, for two memory accesses in the same block,
|
2016-02-03 06:46:49 +08:00
|
|
|
/// whether \p Dominator dominates \p Dominatee.
|
|
|
|
/// \returns True if \p Dominator dominates \p Dominatee.
|
|
|
|
bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
|
|
|
|
const MemoryAccess *Dominatee) const {
|
2016-07-20 06:49:43 +08:00
|
|
|
const BasicBlock *DominatorBlock = Dominator->getBlock();
|
|
|
|
|
2016-07-20 07:08:08 +08:00
|
|
|
assert((DominatorBlock == Dominatee->getBlock()) &&
|
2016-07-20 06:49:43 +08:00
|
|
|
"Asking for local domination when accesses are in different blocks!");
|
2016-06-11 05:36:41 +08:00
|
|
|
// A node dominates itself.
|
|
|
|
if (Dominatee == Dominator)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// When Dominatee is defined on function entry, it is not dominated by another
|
|
|
|
// memory access.
|
|
|
|
if (isLiveOnEntryDef(Dominatee))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// When Dominator is defined on function entry, it dominates the other memory
|
|
|
|
// access.
|
|
|
|
if (isLiveOnEntryDef(Dominator))
|
|
|
|
return true;
|
|
|
|
|
2016-07-20 06:49:43 +08:00
|
|
|
if (!BlockNumberingValid.count(DominatorBlock))
|
|
|
|
renumberBlock(DominatorBlock);
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-07-20 06:49:43 +08:00
|
|
|
unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
|
|
|
|
// All numbers start with 1
|
|
|
|
assert(DominatorNum != 0 && "Block was not numbered properly");
|
|
|
|
unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
|
|
|
|
assert(DominateeNum != 0 && "Block was not numbered properly");
|
|
|
|
return DominatorNum < DominateeNum;
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
bool MemorySSA::dominates(const MemoryAccess *Dominator,
|
|
|
|
const MemoryAccess *Dominatee) const {
|
|
|
|
if (Dominator == Dominatee)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (isLiveOnEntryDef(Dominatee))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (Dominator->getBlock() != Dominatee->getBlock())
|
|
|
|
return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
|
|
|
|
return locallyDominates(Dominator, Dominatee);
|
|
|
|
}
|
|
|
|
|
2016-08-06 05:46:52 +08:00
|
|
|
bool MemorySSA::dominates(const MemoryAccess *Dominator,
|
|
|
|
const Use &Dominatee) const {
|
|
|
|
if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
|
|
|
|
BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
|
|
|
|
// The def must dominate the incoming block of the phi.
|
|
|
|
if (UseBB != Dominator->getBlock())
|
|
|
|
return DT->dominates(Dominator->getBlock(), UseBB);
|
|
|
|
// If the UseBB and the DefBB are the same, compare locally.
|
|
|
|
return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
|
|
|
|
}
|
|
|
|
// If it's not a PHI node use, the normal dominates can already handle it.
|
|
|
|
return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
|
|
|
|
}
|
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
const static char LiveOnEntryStr[] = "liveOnEntry";
|
|
|
|
|
[IR] De-virtualize ~Value to save a vptr
Summary:
Implements PR889
Removing the virtual table pointer from Value saves 1% of RSS when doing
LTO of llc on Linux. The impact on time was positive, but too noisy to
conclusively say that performance improved. Here is a link to the
spreadsheet with the original data:
https://docs.google.com/spreadsheets/d/1F4FHir0qYnV0MEp2sYYp_BuvnJgWlWPhWOwZ6LbW7W4/edit?usp=sharing
This change makes it invalid to directly delete a Value, User, or
Instruction pointer. Instead, such code can be rewritten to a null check
and a call Value::deleteValue(). Value objects tend to have their
lifetimes managed through iplist, so for the most part, this isn't a big
deal. However, there are some places where LLVM deletes values, and
those places had to be migrated to deleteValue. I have also created
llvm::unique_value, which has a custom deleter, so it can be used in
place of std::unique_ptr<Value>.
I had to add the "DerivedUser" Deleter escape hatch for MemorySSA, which
derives from User outside of lib/IR. Code in IR cannot include MemorySSA
headers or call the MemoryAccess object destructors without introducing
a circular dependency, so we need some level of indirection.
Unfortunately, no class derived from User may have any virtual methods,
because adding a virtual method would break User::getHungOffOperands(),
which assumes that it can find the use list immediately prior to the
User object. I've added a static_assert to the appropriate OperandTraits
templates to help people avoid this trap.
Reviewers: chandlerc, mehdi_amini, pete, dberlin, george.burgess.iv
Reviewed By: chandlerc
Subscribers: krytarowski, eraman, george.burgess.iv, mzolotukhin, Prazek, nlewycky, hans, inglorion, pcc, tejohnson, dberlin, llvm-commits
Differential Revision: https://reviews.llvm.org/D31261
llvm-svn: 303362
2017-05-19 01:24:10 +08:00
|
|
|
void MemoryAccess::print(raw_ostream &OS) const {
|
|
|
|
switch (getValueID()) {
|
|
|
|
case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
|
|
|
|
case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
|
|
|
|
case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
|
|
|
|
}
|
|
|
|
llvm_unreachable("invalid value id");
|
|
|
|
}
|
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
void MemoryDef::print(raw_ostream &OS) const {
|
|
|
|
MemoryAccess *UO = getDefiningAccess();
|
|
|
|
|
2018-06-15 03:55:53 +08:00
|
|
|
auto printID = [&OS](MemoryAccess *A) {
|
|
|
|
if (A && A->getID())
|
|
|
|
OS << A->getID();
|
|
|
|
else
|
|
|
|
OS << LiveOnEntryStr;
|
|
|
|
};
|
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
OS << getID() << " = MemoryDef(";
|
2018-06-15 03:55:53 +08:00
|
|
|
printID(UO);
|
|
|
|
OS << ")";
|
|
|
|
|
|
|
|
if (isOptimized()) {
|
|
|
|
OS << "->";
|
|
|
|
printID(getOptimized());
|
|
|
|
|
|
|
|
if (Optional<AliasResult> AR = getOptimizedAccessType())
|
|
|
|
OS << " " << *AR;
|
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryPhi::print(raw_ostream &OS) const {
|
|
|
|
bool First = true;
|
|
|
|
OS << getID() << " = MemoryPhi(";
|
|
|
|
for (const auto &Op : operands()) {
|
|
|
|
BasicBlock *BB = getIncomingBlock(Op);
|
|
|
|
MemoryAccess *MA = cast<MemoryAccess>(Op);
|
|
|
|
if (!First)
|
|
|
|
OS << ',';
|
|
|
|
else
|
|
|
|
First = false;
|
|
|
|
|
|
|
|
OS << '{';
|
|
|
|
if (BB->hasName())
|
|
|
|
OS << BB->getName();
|
|
|
|
else
|
|
|
|
BB->printAsOperand(OS, false);
|
|
|
|
OS << ',';
|
|
|
|
if (unsigned ID = MA->getID())
|
|
|
|
OS << ID;
|
|
|
|
else
|
|
|
|
OS << LiveOnEntryStr;
|
|
|
|
OS << '}';
|
|
|
|
}
|
|
|
|
OS << ')';
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryUse::print(raw_ostream &OS) const {
|
|
|
|
MemoryAccess *UO = getDefiningAccess();
|
|
|
|
OS << "MemoryUse(";
|
|
|
|
if (UO && UO->getID())
|
|
|
|
OS << UO->getID();
|
|
|
|
else
|
|
|
|
OS << LiveOnEntryStr;
|
|
|
|
OS << ')';
|
2018-06-15 03:55:53 +08:00
|
|
|
|
|
|
|
if (Optional<AliasResult> AR = getOptimizedAccessType())
|
|
|
|
OS << " " << *AR;
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryAccess::dump() const {
|
2017-02-21 06:26:03 +08:00
|
|
|
// Cannot completely remove virtual function even in release mode.
|
2017-10-15 22:32:27 +08:00
|
|
|
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
2016-02-03 06:46:49 +08:00
|
|
|
print(dbgs());
|
|
|
|
dbgs() << "\n";
|
2017-01-28 10:02:38 +08:00
|
|
|
#endif
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2016-07-07 05:20:47 +08:00
|
|
|
char MemorySSAPrinterLegacyPass::ID = 0;
|
|
|
|
|
|
|
|
MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
|
|
|
|
initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
|
|
|
|
AU.setPreservesAll();
|
|
|
|
AU.addRequired<MemorySSAWrapperPass>();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
|
|
|
|
auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
|
|
|
|
MSSA.print(dbgs());
|
|
|
|
if (VerifyMemorySSA)
|
|
|
|
MSSA.verifyMemorySSA();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-11-24 01:53:26 +08:00
|
|
|
AnalysisKey MemorySSAAnalysis::Key;
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-09-27 01:22:54 +08:00
|
|
|
MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
|
|
|
|
FunctionAnalysisManager &AM) {
|
2016-06-02 05:30:40 +08:00
|
|
|
auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
|
|
|
|
auto &AA = AM.getResult<AAManager>(F);
|
2017-08-17 06:07:40 +08:00
|
|
|
return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
|
|
|
|
FunctionAnalysisManager &AM) {
|
|
|
|
OS << "MemorySSA for function: " << F.getName() << "\n";
|
2016-08-09 02:27:22 +08:00
|
|
|
AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
return PreservedAnalyses::all();
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
|
|
|
|
FunctionAnalysisManager &AM) {
|
2016-08-09 02:27:22 +08:00
|
|
|
AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
return PreservedAnalyses::all();
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
char MemorySSAWrapperPass::ID = 0;
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
|
|
|
|
initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
|
|
|
|
AU.setPreservesAll();
|
|
|
|
AU.addRequiredTransitive<DominatorTreeWrapperPass>();
|
|
|
|
AU.addRequiredTransitive<AAResultsWrapperPass>();
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
bool MemorySSAWrapperPass::runOnFunction(Function &F) {
|
|
|
|
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
|
|
|
auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
|
|
|
|
MSSA.reset(new MemorySSA(F, &AA, &DT));
|
|
|
|
return false;
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
|
|
|
|
MSSA->print(OS);
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
|
|
|
|
|
2016-06-25 05:02:12 +08:00
|
|
|
MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A,
|
|
|
|
DominatorTree *D)
|
2017-08-17 06:07:40 +08:00
|
|
|
: MemorySSAWalker(M), Walker(*M, *A, *D) {}
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-06-25 05:02:12 +08:00
|
|
|
void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) {
|
2017-04-06 03:01:58 +08:00
|
|
|
if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
|
|
|
|
MUD->resetOptimized();
|
2016-03-02 02:46:54 +08:00
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Walk the use-def chains starting at \p MA and find
|
2016-02-03 06:46:49 +08:00
|
|
|
/// the MemoryAccess that actually clobbers Loc.
|
|
|
|
///
|
|
|
|
/// \returns our clobbering memory access
|
2016-06-25 05:02:12 +08:00
|
|
|
MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
|
|
|
|
MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) {
|
2018-04-10 07:09:27 +08:00
|
|
|
return Walker.findClobber(StartingAccess, Q);
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2016-06-25 05:02:12 +08:00
|
|
|
MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
|
2016-10-29 03:22:46 +08:00
|
|
|
MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
|
2016-02-03 06:46:49 +08:00
|
|
|
if (isa<MemoryPhi>(StartingAccess))
|
|
|
|
return StartingAccess;
|
|
|
|
|
|
|
|
auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
|
|
|
|
if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
|
|
|
|
return StartingUseOrDef;
|
|
|
|
|
|
|
|
Instruction *I = StartingUseOrDef->getMemoryInst();
|
|
|
|
|
|
|
|
// Conservatively, fences are always clobbers, so don't perform the walk if we
|
|
|
|
// hit a fence.
|
2016-07-16 01:19:24 +08:00
|
|
|
if (!ImmutableCallSite(I) && I->isFenceLike())
|
2016-02-03 06:46:49 +08:00
|
|
|
return StartingUseOrDef;
|
|
|
|
|
|
|
|
UpwardsMemoryQuery Q;
|
|
|
|
Q.OriginalAccess = StartingUseOrDef;
|
|
|
|
Q.StartingLoc = Loc;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
Q.Inst = I;
|
2016-02-03 06:46:49 +08:00
|
|
|
Q.IsCall = false;
|
|
|
|
|
|
|
|
// Unlike the other function, do not walk to the def of a def, because we are
|
|
|
|
// handed something we already believe is the clobbering access.
|
|
|
|
MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
|
|
|
|
? StartingUseOrDef->getDefiningAccess()
|
|
|
|
: StartingUseOrDef;
|
|
|
|
|
|
|
|
MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
|
|
|
|
LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
|
|
|
|
LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
|
|
|
|
LLVM_DEBUG(dbgs() << *Clobber << "\n");
|
2016-02-03 06:46:49 +08:00
|
|
|
return Clobber;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryAccess *
|
2016-07-21 03:51:34 +08:00
|
|
|
MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
|
|
|
|
auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
|
|
|
|
// If this is a MemoryPhi, we can't do anything.
|
|
|
|
if (!StartingAccess)
|
|
|
|
return MA;
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-10-21 04:13:45 +08:00
|
|
|
// If this is an already optimized use or def, return the optimized result.
|
2018-03-09 02:03:14 +08:00
|
|
|
// Note: Currently, we store the optimized def result in a separate field,
|
|
|
|
// since we can't use the defining access.
|
2018-02-24 08:15:21 +08:00
|
|
|
if (StartingAccess->isOptimized())
|
|
|
|
return StartingAccess->getOptimized();
|
2016-10-21 04:13:45 +08:00
|
|
|
|
2016-07-21 03:51:34 +08:00
|
|
|
const Instruction *I = StartingAccess->getMemoryInst();
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
UpwardsMemoryQuery Q(I, StartingAccess);
|
2018-03-11 12:16:12 +08:00
|
|
|
// We can't sanely do anything with a fence, since they conservatively clobber
|
|
|
|
// all memory, and have no locations to get pointers from to try to
|
|
|
|
// disambiguate.
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
if (!Q.IsCall && I->isFenceLike())
|
2016-02-03 06:46:49 +08:00
|
|
|
return StartingAccess;
|
|
|
|
|
2016-08-04 03:57:02 +08:00
|
|
|
if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) {
|
|
|
|
MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
|
2018-03-11 12:16:12 +08:00
|
|
|
StartingAccess->setOptimized(LiveOnEntry);
|
|
|
|
StartingAccess->setOptimizedAccessType(None);
|
2016-08-04 03:57:02 +08:00
|
|
|
return LiveOnEntry;
|
|
|
|
}
|
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
// Start with the thing we already think clobbers this location
|
|
|
|
MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
|
|
|
|
|
|
|
|
// At this point, DefiningAccess may be the live on entry def.
|
|
|
|
// If it is, we will not get a better result.
|
2018-03-09 02:03:14 +08:00
|
|
|
if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
|
2018-03-11 12:16:12 +08:00
|
|
|
StartingAccess->setOptimized(DefiningAccess);
|
|
|
|
StartingAccess->setOptimizedAccessType(None);
|
2016-02-03 06:46:49 +08:00
|
|
|
return DefiningAccess;
|
2018-03-09 02:03:14 +08:00
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
|
|
|
|
MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
|
|
|
|
LLVM_DEBUG(dbgs() << *DefiningAccess << "\n");
|
|
|
|
LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
|
|
|
|
LLVM_DEBUG(dbgs() << *Result << "\n");
|
2018-03-09 02:03:14 +08:00
|
|
|
|
2018-03-11 12:16:12 +08:00
|
|
|
StartingAccess->setOptimized(Result);
|
|
|
|
if (MSSA->isLiveOnEntryDef(Result))
|
|
|
|
StartingAccess->setOptimizedAccessType(None);
|
|
|
|
else if (Q.AR == MustAlias)
|
|
|
|
StartingAccess->setOptimizedAccessType(MustAlias);
|
2016-02-03 06:46:49 +08:00
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryAccess *
|
2016-07-21 03:51:34 +08:00
|
|
|
DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
|
2016-02-03 06:46:49 +08:00
|
|
|
if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
|
|
|
|
return Use->getDefiningAccess();
|
|
|
|
return MA;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
|
2016-10-29 03:22:46 +08:00
|
|
|
MemoryAccess *StartingAccess, const MemoryLocation &) {
|
2016-02-03 06:46:49 +08:00
|
|
|
if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
|
|
|
|
return Use->getDefiningAccess();
|
|
|
|
return StartingAccess;
|
|
|
|
}
|
[IR] De-virtualize ~Value to save a vptr
Summary:
Implements PR889
Removing the virtual table pointer from Value saves 1% of RSS when doing
LTO of llc on Linux. The impact on time was positive, but too noisy to
conclusively say that performance improved. Here is a link to the
spreadsheet with the original data:
https://docs.google.com/spreadsheets/d/1F4FHir0qYnV0MEp2sYYp_BuvnJgWlWPhWOwZ6LbW7W4/edit?usp=sharing
This change makes it invalid to directly delete a Value, User, or
Instruction pointer. Instead, such code can be rewritten to a null check
and a call Value::deleteValue(). Value objects tend to have their
lifetimes managed through iplist, so for the most part, this isn't a big
deal. However, there are some places where LLVM deletes values, and
those places had to be migrated to deleteValue. I have also created
llvm::unique_value, which has a custom deleter, so it can be used in
place of std::unique_ptr<Value>.
I had to add the "DerivedUser" Deleter escape hatch for MemorySSA, which
derives from User outside of lib/IR. Code in IR cannot include MemorySSA
headers or call the MemoryAccess object destructors without introducing
a circular dependency, so we need some level of indirection.
Unfortunately, no class derived from User may have any virtual methods,
because adding a virtual method would break User::getHungOffOperands(),
which assumes that it can find the use list immediately prior to the
User object. I've added a static_assert to the appropriate OperandTraits
templates to help people avoid this trap.
Reviewers: chandlerc, mehdi_amini, pete, dberlin, george.burgess.iv
Reviewed By: chandlerc
Subscribers: krytarowski, eraman, george.burgess.iv, mzolotukhin, Prazek, nlewycky, hans, inglorion, pcc, tejohnson, dberlin, llvm-commits
Differential Revision: https://reviews.llvm.org/D31261
llvm-svn: 303362
2017-05-19 01:24:10 +08:00
|
|
|
|
|
|
|
void MemoryPhi::deleteMe(DerivedUser *Self) {
|
|
|
|
delete static_cast<MemoryPhi *>(Self);
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryDef::deleteMe(DerivedUser *Self) {
|
|
|
|
delete static_cast<MemoryDef *>(Self);
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryUse::deleteMe(DerivedUser *Self) {
|
|
|
|
delete static_cast<MemoryUse *>(Self);
|
|
|
|
}
|