2017-08-17 06:07:40 +08:00
|
|
|
//===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
|
2016-02-03 06:46:49 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2016-02-03 06:46:49 +08:00
|
|
|
//
|
2017-08-17 06:07:40 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2016-02-03 06:46:49 +08:00
|
|
|
//
|
|
|
|
// This file implements the MemorySSA class.
|
|
|
|
//
|
2017-08-17 06:07:40 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-04-12 04:06:36 +08:00
|
|
|
#include "llvm/Analysis/MemorySSA.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/ADT/DenseMapInfo.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/ADT/DenseSet.h"
|
|
|
|
#include "llvm/ADT/DepthFirstIterator.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/ADT/Hashing.h"
|
|
|
|
#include "llvm/ADT/None.h"
|
|
|
|
#include "llvm/ADT/Optional.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2021-01-27 12:00:17 +08:00
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/ADT/iterator.h"
|
|
|
|
#include "llvm/ADT/iterator_range.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2020-11-13 01:39:10 +08:00
|
|
|
#include "llvm/Analysis/CFGPrinter.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/Analysis/IteratedDominanceFrontier.h"
|
|
|
|
#include "llvm/Analysis/MemoryLocation.h"
|
2018-04-30 22:59:11 +08:00
|
|
|
#include "llvm/Config/llvm-config.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/IR/AssemblyAnnotationWriter.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/IR/BasicBlock.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/IR/Intrinsics.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/IR/LLVMContext.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/IR/PassManager.h"
|
|
|
|
#include "llvm/IR/Use.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "llvm/InitializePasses.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/AtomicOrdering.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Compiler.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include "llvm/Support/FormattedStream.h"
|
2017-08-17 06:07:40 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2016-02-03 06:46:49 +08:00
|
|
|
#include <algorithm>
|
2017-08-17 06:07:40 +08:00
|
|
|
#include <cassert>
|
2019-08-01 01:41:04 +08:00
|
|
|
#include <cstdlib>
|
2017-08-17 06:07:40 +08:00
|
|
|
#include <iterator>
|
|
|
|
#include <memory>
|
|
|
|
#include <utility>
|
2016-02-03 06:46:49 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
2017-08-17 06:07:40 +08:00
|
|
|
|
|
|
|
#define DEBUG_TYPE "memoryssa"
|
|
|
|
|
2020-11-13 01:39:10 +08:00
|
|
|
static cl::opt<std::string>
|
|
|
|
DotCFGMSSA("dot-cfg-mssa",
|
|
|
|
cl::value_desc("file name for generated dot file"),
|
|
|
|
cl::desc("file name for generated dot file"), cl::init(""));
|
|
|
|
|
2016-06-15 05:19:40 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
|
2016-06-02 05:30:40 +08:00
|
|
|
true)
|
2016-02-03 06:46:49 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
|
2016-06-15 05:19:40 +08:00
|
|
|
INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
|
|
|
|
true)
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-07-07 05:20:47 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
|
|
|
|
"Memory SSA Printer", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
|
|
|
|
INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
|
|
|
|
"Memory SSA Printer", false, false)
|
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
static cl::opt<unsigned> MaxCheckLimit(
|
|
|
|
"memssa-check-limit", cl::Hidden, cl::init(100),
|
|
|
|
cl::desc("The maximum number of stores/phis MemorySSA"
|
|
|
|
"will consider trying to walk past (default = 100)"));
|
|
|
|
|
2018-08-16 01:34:55 +08:00
|
|
|
// Always verify MemorySSA if expensive checking is enabled.
|
|
|
|
#ifdef EXPENSIVE_CHECKS
|
|
|
|
bool llvm::VerifyMemorySSA = true;
|
|
|
|
#else
|
|
|
|
bool llvm::VerifyMemorySSA = false;
|
|
|
|
#endif
|
2019-04-24 04:59:44 +08:00
|
|
|
|
2018-08-16 01:34:55 +08:00
|
|
|
static cl::opt<bool, true>
|
|
|
|
VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
|
|
|
|
cl::Hidden, cl::desc("Enable verification of MemorySSA."));
|
2016-07-07 05:20:47 +08:00
|
|
|
|
2021-09-03 02:21:05 +08:00
|
|
|
const static char LiveOnEntryStr[] = "liveOnEntry";
|
|
|
|
|
2021-09-01 05:43:21 +08:00
|
|
|
namespace {
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// An assembly annotator class to print Memory SSA information in
|
2016-02-03 06:46:49 +08:00
|
|
|
/// comments.
|
|
|
|
class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
|
|
|
|
const MemorySSA *MSSA;
|
|
|
|
|
|
|
|
public:
|
|
|
|
MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
void emitBasicBlockStartAnnot(const BasicBlock *BB,
|
|
|
|
formatted_raw_ostream &OS) override {
|
2016-02-03 06:46:49 +08:00
|
|
|
if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
|
|
|
|
OS << "; " << *MA << "\n";
|
|
|
|
}
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
void emitInstructionAnnot(const Instruction *I,
|
|
|
|
formatted_raw_ostream &OS) override {
|
2016-02-03 06:46:49 +08:00
|
|
|
if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
|
|
|
|
OS << "; " << *MA << "\n";
|
|
|
|
}
|
|
|
|
};
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2021-09-01 05:43:21 +08:00
|
|
|
/// An assembly annotator class to print Memory SSA information in
|
|
|
|
/// comments.
|
|
|
|
class MemorySSAWalkerAnnotatedWriter : public AssemblyAnnotationWriter {
|
|
|
|
MemorySSA *MSSA;
|
|
|
|
MemorySSAWalker *Walker;
|
|
|
|
|
|
|
|
public:
|
|
|
|
MemorySSAWalkerAnnotatedWriter(MemorySSA *M)
|
|
|
|
: MSSA(M), Walker(M->getWalker()) {}
|
|
|
|
|
|
|
|
void emitInstructionAnnot(const Instruction *I,
|
|
|
|
formatted_raw_ostream &OS) override {
|
|
|
|
if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) {
|
|
|
|
MemoryAccess *Clobber = Walker->getClobberingMemoryAccess(MA);
|
|
|
|
OS << "; " << *MA;
|
2021-09-03 02:21:05 +08:00
|
|
|
if (Clobber) {
|
|
|
|
OS << " - clobbered by ";
|
|
|
|
if (MSSA->isLiveOnEntryDef(Clobber))
|
|
|
|
OS << LiveOnEntryStr;
|
|
|
|
else
|
|
|
|
OS << *Clobber;
|
|
|
|
}
|
2021-09-01 05:43:21 +08:00
|
|
|
OS << "\n";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
2016-08-03 05:57:52 +08:00
|
|
|
namespace {
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-08-03 05:57:52 +08:00
|
|
|
/// Our current alias analysis API differentiates heavily between calls and
|
|
|
|
/// non-calls, and functions called on one usually assert on the other.
|
|
|
|
/// This class encapsulates the distinction to simplify other code that wants
|
|
|
|
/// "Memory affecting instructions and related data" to use as a key.
|
|
|
|
/// For example, this class is used as a densemap key in the use optimizer.
|
|
|
|
class MemoryLocOrCall {
|
|
|
|
public:
|
2017-08-17 06:07:40 +08:00
|
|
|
bool IsCall = false;
|
|
|
|
|
2016-08-03 05:57:52 +08:00
|
|
|
MemoryLocOrCall(MemoryUseOrDef *MUD)
|
|
|
|
: MemoryLocOrCall(MUD->getMemoryInst()) {}
|
2016-10-13 11:23:33 +08:00
|
|
|
MemoryLocOrCall(const MemoryUseOrDef *MUD)
|
|
|
|
: MemoryLocOrCall(MUD->getMemoryInst()) {}
|
2016-08-03 05:57:52 +08:00
|
|
|
|
|
|
|
MemoryLocOrCall(Instruction *Inst) {
|
2019-01-07 13:42:51 +08:00
|
|
|
if (auto *C = dyn_cast<CallBase>(Inst)) {
|
2016-08-03 05:57:52 +08:00
|
|
|
IsCall = true;
|
2019-01-07 13:42:51 +08:00
|
|
|
Call = C;
|
2016-08-03 05:57:52 +08:00
|
|
|
} else {
|
|
|
|
IsCall = false;
|
|
|
|
// There is no such thing as a memorylocation for a fence inst, and it is
|
|
|
|
// unique in that regard.
|
|
|
|
if (!isa<FenceInst>(Inst))
|
|
|
|
Loc = MemoryLocation::get(Inst);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
|
2016-08-03 05:57:52 +08:00
|
|
|
|
2019-01-07 13:42:51 +08:00
|
|
|
const CallBase *getCall() const {
|
2016-08-03 05:57:52 +08:00
|
|
|
assert(IsCall);
|
2019-01-07 13:42:51 +08:00
|
|
|
return Call;
|
2016-08-03 05:57:52 +08:00
|
|
|
}
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-08-03 05:57:52 +08:00
|
|
|
MemoryLocation getLoc() const {
|
|
|
|
assert(!IsCall);
|
|
|
|
return Loc;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool operator==(const MemoryLocOrCall &Other) const {
|
|
|
|
if (IsCall != Other.IsCall)
|
|
|
|
return false;
|
|
|
|
|
2018-03-29 08:54:39 +08:00
|
|
|
if (!IsCall)
|
|
|
|
return Loc == Other.Loc;
|
|
|
|
|
2020-04-28 11:15:59 +08:00
|
|
|
if (Call->getCalledOperand() != Other.Call->getCalledOperand())
|
2018-03-29 08:54:39 +08:00
|
|
|
return false;
|
|
|
|
|
2019-01-07 13:42:51 +08:00
|
|
|
return Call->arg_size() == Other.Call->arg_size() &&
|
|
|
|
std::equal(Call->arg_begin(), Call->arg_end(),
|
|
|
|
Other.Call->arg_begin());
|
2016-08-03 05:57:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2016-10-22 12:15:41 +08:00
|
|
|
union {
|
2019-01-07 13:42:51 +08:00
|
|
|
const CallBase *Call;
|
2017-01-26 04:56:19 +08:00
|
|
|
MemoryLocation Loc;
|
2016-10-22 12:15:41 +08:00
|
|
|
};
|
2016-08-03 05:57:52 +08:00
|
|
|
};
|
2017-08-17 06:07:40 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2016-08-03 05:57:52 +08:00
|
|
|
|
|
|
|
namespace llvm {
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-08-03 05:57:52 +08:00
|
|
|
template <> struct DenseMapInfo<MemoryLocOrCall> {
|
|
|
|
static inline MemoryLocOrCall getEmptyKey() {
|
|
|
|
return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
|
|
|
|
}
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-08-03 05:57:52 +08:00
|
|
|
static inline MemoryLocOrCall getTombstoneKey() {
|
|
|
|
return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
|
|
|
|
}
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-08-03 05:57:52 +08:00
|
|
|
static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
|
2018-03-29 08:54:39 +08:00
|
|
|
if (!MLOC.IsCall)
|
|
|
|
return hash_combine(
|
|
|
|
MLOC.IsCall,
|
|
|
|
DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
|
|
|
|
|
|
|
|
hash_code hash =
|
|
|
|
hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
|
2020-04-28 11:15:59 +08:00
|
|
|
MLOC.getCall()->getCalledOperand()));
|
2018-03-29 08:54:39 +08:00
|
|
|
|
2019-01-07 13:42:51 +08:00
|
|
|
for (const Value *Arg : MLOC.getCall()->args())
|
2018-03-29 08:54:39 +08:00
|
|
|
hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
|
|
|
|
return hash;
|
2016-08-03 05:57:52 +08:00
|
|
|
}
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-08-03 05:57:52 +08:00
|
|
|
static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
|
|
|
|
return LHS == RHS;
|
|
|
|
}
|
|
|
|
};
|
2016-08-03 08:01:46 +08:00
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
} // end namespace llvm
|
|
|
|
|
2016-08-04 03:39:54 +08:00
|
|
|
/// This does one-way checks to see if Use could theoretically be hoisted above
|
|
|
|
/// MayClobber. This will not check the other way around.
|
|
|
|
///
|
|
|
|
/// This assumes that, for the purposes of MemorySSA, Use comes directly after
|
|
|
|
/// MayClobber, with no potentially clobbering operations in between them.
|
|
|
|
/// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
|
2017-12-23 03:54:03 +08:00
|
|
|
static bool areLoadsReorderable(const LoadInst *Use,
|
|
|
|
const LoadInst *MayClobber) {
|
2016-08-04 03:39:54 +08:00
|
|
|
bool VolatileUse = Use->isVolatile();
|
|
|
|
bool VolatileClobber = MayClobber->isVolatile();
|
|
|
|
// Volatile operations may never be reordered with other volatile operations.
|
|
|
|
if (VolatileUse && VolatileClobber)
|
2017-12-23 03:54:03 +08:00
|
|
|
return false;
|
|
|
|
// Otherwise, volatile doesn't matter here. From the language reference:
|
|
|
|
// 'optimizers may change the order of volatile operations relative to
|
|
|
|
// non-volatile operations.'"
|
2016-08-04 03:39:54 +08:00
|
|
|
|
|
|
|
// If a load is seq_cst, it cannot be moved above other loads. If its ordering
|
|
|
|
// is weaker, it can be moved above other loads. We just need to be sure that
|
|
|
|
// MayClobber isn't an acquire load, because loads can't be moved above
|
|
|
|
// acquire loads.
|
|
|
|
//
|
|
|
|
// Note that this explicitly *does* allow the free reordering of monotonic (or
|
|
|
|
// weaker) loads of the same address.
|
|
|
|
bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
|
|
|
|
bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
|
|
|
|
AtomicOrdering::Acquire);
|
2017-12-23 03:54:03 +08:00
|
|
|
return !(SeqCstUse || MayClobberIsAcquire);
|
2016-08-04 03:39:54 +08:00
|
|
|
}
|
|
|
|
|
2018-03-09 02:03:14 +08:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct ClobberAlias {
|
|
|
|
bool IsClobber;
|
|
|
|
Optional<AliasResult> AR;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
// Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
|
|
|
|
// ignored if IsClobber = false.
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
template <typename AliasAnalysisType>
|
|
|
|
static ClobberAlias
|
|
|
|
instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
|
|
|
|
const Instruction *UseInst, AliasAnalysisType &AA) {
|
2016-08-03 00:24:03 +08:00
|
|
|
Instruction *DefInst = MD->getMemoryInst();
|
|
|
|
assert(DefInst && "Defining instruction not actually an instruction");
|
2018-03-09 02:03:14 +08:00
|
|
|
Optional<AliasResult> AR;
|
2016-08-03 05:57:52 +08:00
|
|
|
|
2016-08-03 08:01:46 +08:00
|
|
|
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
|
|
|
|
// These intrinsics will show up as affecting memory, but they are just
|
2018-08-10 13:14:43 +08:00
|
|
|
// markers, mostly.
|
|
|
|
//
|
|
|
|
// FIXME: We probably don't actually want MemorySSA to model these at all
|
|
|
|
// (including creating MemoryAccesses for them): we just end up inventing
|
|
|
|
// clobbers where they don't really exist at all. Please see D43269 for
|
|
|
|
// context.
|
2016-08-03 08:01:46 +08:00
|
|
|
switch (II->getIntrinsicID()) {
|
|
|
|
case Intrinsic::invariant_start:
|
|
|
|
case Intrinsic::invariant_end:
|
|
|
|
case Intrinsic::assume:
|
2021-01-20 03:04:52 +08:00
|
|
|
case Intrinsic::experimental_noalias_scope_decl:
|
2021-03-16 21:36:17 +08:00
|
|
|
return {false, AliasResult(AliasResult::NoAlias)};
|
2019-09-11 06:35:27 +08:00
|
|
|
case Intrinsic::dbg_addr:
|
|
|
|
case Intrinsic::dbg_declare:
|
|
|
|
case Intrinsic::dbg_label:
|
|
|
|
case Intrinsic::dbg_value:
|
|
|
|
llvm_unreachable("debuginfo shouldn't have associated defs!");
|
2016-08-03 08:01:46 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-03 23:28:12 +08:00
|
|
|
if (auto *CB = dyn_cast_or_null<CallBase>(UseInst)) {
|
|
|
|
ModRefInfo I = AA.getModRefInfo(DefInst, CB);
|
2021-03-05 18:58:13 +08:00
|
|
|
AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias;
|
2018-03-09 02:03:14 +08:00
|
|
|
return {isModOrRefSet(I), AR};
|
2017-11-22 02:00:01 +08:00
|
|
|
}
|
2016-08-04 03:39:54 +08:00
|
|
|
|
2017-12-23 03:54:03 +08:00
|
|
|
if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
|
2020-10-03 23:28:12 +08:00
|
|
|
if (auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst))
|
2021-03-16 21:36:17 +08:00
|
|
|
return {!areLoadsReorderable(UseLoad, DefLoad),
|
|
|
|
AliasResult(AliasResult::MayAlias)};
|
2016-08-04 03:39:54 +08:00
|
|
|
|
2018-03-09 02:03:14 +08:00
|
|
|
ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
|
2021-03-05 18:58:13 +08:00
|
|
|
AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias;
|
2018-03-09 02:03:14 +08:00
|
|
|
return {isModSet(I), AR};
|
2016-08-03 05:57:52 +08:00
|
|
|
}
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
template <typename AliasAnalysisType>
|
2018-03-09 02:03:14 +08:00
|
|
|
static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
|
|
|
|
const MemoryUseOrDef *MU,
|
|
|
|
const MemoryLocOrCall &UseMLOC,
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
AliasAnalysisType &AA) {
|
2016-10-13 11:23:33 +08:00
|
|
|
// FIXME: This is a temporary hack to allow a single instructionClobbersQuery
|
|
|
|
// to exist while MemoryLocOrCall is pushed through places.
|
|
|
|
if (UseMLOC.IsCall)
|
|
|
|
return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
|
|
|
|
AA);
|
|
|
|
return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
|
|
|
|
AA);
|
|
|
|
}
|
|
|
|
|
2016-10-13 09:39:10 +08:00
|
|
|
// Return true when MD may alias MU, return false otherwise.
|
2017-03-03 07:06:46 +08:00
|
|
|
bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
|
|
|
|
AliasAnalysis &AA) {
|
2018-03-09 02:03:14 +08:00
|
|
|
return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
|
2016-10-13 09:39:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-10-13 09:39:10 +08:00
|
|
|
struct UpwardsMemoryQuery {
|
|
|
|
// True if our original query started off as a call
|
2017-08-17 06:07:40 +08:00
|
|
|
bool IsCall = false;
|
2016-10-13 09:39:10 +08:00
|
|
|
// The pointer location we started the query with. This will be empty if
|
|
|
|
// IsCall is true.
|
|
|
|
MemoryLocation StartingLoc;
|
|
|
|
// This is the instruction we were querying about.
|
2017-08-17 06:07:40 +08:00
|
|
|
const Instruction *Inst = nullptr;
|
2016-10-13 09:39:10 +08:00
|
|
|
// The MemoryAccess we actually got called with, used to test local domination
|
2017-08-17 06:07:40 +08:00
|
|
|
const MemoryAccess *OriginalAccess = nullptr;
|
2021-03-16 21:36:17 +08:00
|
|
|
Optional<AliasResult> AR = AliasResult(AliasResult::MayAlias);
|
2019-01-08 02:40:27 +08:00
|
|
|
bool SkipSelfAccess = false;
|
2016-10-13 09:39:10 +08:00
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
UpwardsMemoryQuery() = default;
|
2016-10-13 09:39:10 +08:00
|
|
|
|
|
|
|
UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
|
2019-01-07 13:42:51 +08:00
|
|
|
: IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
|
2016-10-13 09:39:10 +08:00
|
|
|
if (!IsCall)
|
|
|
|
StartingLoc = MemoryLocation::get(Inst);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
} // end anonymous namespace
|
|
|
|
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
template <typename AliasAnalysisType>
|
|
|
|
static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
|
2016-10-13 09:39:10 +08:00
|
|
|
const Instruction *I) {
|
|
|
|
// If the memory can't be changed, then loads of the memory can't be
|
|
|
|
// clobbered.
|
2020-11-20 04:41:51 +08:00
|
|
|
if (auto *LI = dyn_cast<LoadInst>(I))
|
|
|
|
return I->hasMetadata(LLVMContext::MD_invariant_load) ||
|
|
|
|
AA.pointsToConstantMemory(MemoryLocation::get(LI));
|
|
|
|
return false;
|
2016-10-13 09:39:10 +08:00
|
|
|
}
|
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
/// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
|
|
|
|
/// inbetween `Start` and `ClobberAt` can clobbers `Start`.
|
|
|
|
///
|
|
|
|
/// This is meant to be as simple and self-contained as possible. Because it
|
|
|
|
/// uses no cache, etc., it can be relatively expensive.
|
|
|
|
///
|
|
|
|
/// \param Start The MemoryAccess that we want to walk from.
|
|
|
|
/// \param ClobberAt A clobber for Start.
|
|
|
|
/// \param StartLoc The MemoryLocation for Start.
|
2018-08-30 02:26:04 +08:00
|
|
|
/// \param MSSA The MemorySSA instance that Start and ClobberAt belong to.
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
/// \param Query The UpwardsMemoryQuery we used for our search.
|
|
|
|
/// \param AA The AliasAnalysis we used for our search.
|
2018-09-08 07:51:41 +08:00
|
|
|
/// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
|
|
|
|
template <typename AliasAnalysisType>
|
2019-02-12 03:51:21 +08:00
|
|
|
LLVM_ATTRIBUTE_UNUSED static void
|
2018-08-30 02:26:04 +08:00
|
|
|
checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
const MemoryLocation &StartLoc, const MemorySSA &MSSA,
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
|
2018-09-08 07:51:41 +08:00
|
|
|
bool AllowImpreciseClobber = false) {
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
|
|
|
|
|
|
|
|
if (MSSA.isLiveOnEntryDef(Start)) {
|
|
|
|
assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
|
|
|
|
"liveOnEntry must clobber itself");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool FoundClobber = false;
|
2018-08-30 02:26:04 +08:00
|
|
|
DenseSet<ConstMemoryAccessPair> VisitedPhis;
|
|
|
|
SmallVector<ConstMemoryAccessPair, 8> Worklist;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
Worklist.emplace_back(Start, StartLoc);
|
|
|
|
// Walk all paths from Start to ClobberAt, while looking for clobbers. If one
|
|
|
|
// is found, complain.
|
|
|
|
while (!Worklist.empty()) {
|
2018-08-30 02:26:04 +08:00
|
|
|
auto MAP = Worklist.pop_back_val();
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
// All we care about is that nothing from Start to ClobberAt clobbers Start.
|
|
|
|
// We learn nothing from revisiting nodes.
|
|
|
|
if (!VisitedPhis.insert(MAP).second)
|
|
|
|
continue;
|
|
|
|
|
2018-08-30 02:26:04 +08:00
|
|
|
for (const auto *MA : def_chain(MAP.first)) {
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
if (MA == ClobberAt) {
|
2018-08-30 02:26:04 +08:00
|
|
|
if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
// instructionClobbersQuery isn't essentially free, so don't use `|=`,
|
|
|
|
// since it won't let us short-circuit.
|
|
|
|
//
|
|
|
|
// Also, note that this can't be hoisted out of the `Worklist` loop,
|
|
|
|
// since MD may only act as a clobber for 1 of N MemoryLocations.
|
2018-03-09 02:03:14 +08:00
|
|
|
FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
|
|
|
|
if (!FoundClobber) {
|
|
|
|
ClobberAlias CA =
|
|
|
|
instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
|
|
|
|
if (CA.IsClobber) {
|
|
|
|
FoundClobber = true;
|
|
|
|
// Not used: CA.AR;
|
|
|
|
}
|
|
|
|
}
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should never hit liveOnEntry, unless it's the clobber.
|
|
|
|
assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
|
|
|
|
|
2018-08-30 02:26:04 +08:00
|
|
|
if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
|
2018-08-30 06:38:51 +08:00
|
|
|
// If Start is a Def, skip self.
|
|
|
|
if (MD == Start)
|
|
|
|
continue;
|
|
|
|
|
2018-03-09 02:03:14 +08:00
|
|
|
assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
|
|
|
|
.IsClobber &&
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
"Found clobber before reaching ClobberAt!");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-08-30 06:38:51 +08:00
|
|
|
if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
|
2018-08-30 07:20:29 +08:00
|
|
|
(void)MU;
|
2018-08-30 06:38:51 +08:00
|
|
|
assert (MU == Start &&
|
|
|
|
"Can only find use in def chain if Start is a use");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
assert(isa<MemoryPhi>(MA));
|
2020-10-17 08:25:30 +08:00
|
|
|
|
|
|
|
// Add reachable phi predecessors
|
|
|
|
for (auto ItB = upward_defs_begin(
|
|
|
|
{const_cast<MemoryAccess *>(MA), MAP.second},
|
|
|
|
MSSA.getDomTree()),
|
|
|
|
ItE = upward_defs_end();
|
|
|
|
ItB != ItE; ++ItB)
|
|
|
|
if (MSSA.getDomTree().isReachableFromEntry(ItB.getPhiArgBlock()))
|
|
|
|
Worklist.emplace_back(*ItB);
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-08 07:51:41 +08:00
|
|
|
// If the verify is done following an optimization, it's possible that
|
|
|
|
// ClobberAt was a conservative clobbering, that we can now infer is not a
|
|
|
|
// true clobbering access. Don't fail the verify if that's the case.
|
|
|
|
// We do have accesses that claim they're optimized, but could be optimized
|
|
|
|
// further. Updating all these can be expensive, so allow it for now (FIXME).
|
|
|
|
if (AllowImpreciseClobber)
|
|
|
|
return;
|
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
// If ClobberAt is a MemoryPhi, we can assume something above it acted as a
|
|
|
|
// clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
|
|
|
|
assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
|
|
|
|
"ClobberAt never acted as a clobber");
|
|
|
|
}
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
namespace {
|
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
/// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
|
|
|
|
/// in one class.
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
template <class AliasAnalysisType> class ClobberWalker {
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
/// Save a few bytes by using unsigned instead of size_t.
|
|
|
|
using ListIndex = unsigned;
|
|
|
|
|
|
|
|
/// Represents a span of contiguous MemoryDefs, potentially ending in a
|
|
|
|
/// MemoryPhi.
|
|
|
|
struct DefPath {
|
|
|
|
MemoryLocation Loc;
|
|
|
|
// Note that, because we always walk in reverse, Last will always dominate
|
|
|
|
// First. Also note that First and Last are inclusive.
|
|
|
|
MemoryAccess *First;
|
|
|
|
MemoryAccess *Last;
|
|
|
|
Optional<ListIndex> Previous;
|
|
|
|
|
|
|
|
DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
|
|
|
|
Optional<ListIndex> Previous)
|
|
|
|
: Loc(Loc), First(First), Last(Last), Previous(Previous) {}
|
|
|
|
|
|
|
|
DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
|
|
|
|
Optional<ListIndex> Previous)
|
|
|
|
: DefPath(Loc, Init, Init, Previous) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
const MemorySSA &MSSA;
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
AliasAnalysisType &AA;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
DominatorTree &DT;
|
|
|
|
UpwardsMemoryQuery *Query;
|
2019-03-30 05:56:09 +08:00
|
|
|
unsigned *UpwardWalkLimit;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
2020-06-26 07:50:15 +08:00
|
|
|
// Phi optimization bookkeeping:
|
|
|
|
// List of DefPath to process during the current phi optimization walk.
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
SmallVector<DefPath, 32> Paths;
|
2020-06-26 07:50:15 +08:00
|
|
|
// List of visited <Access, Location> pairs; we can skip paths already
|
|
|
|
// visited with the same memory location.
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
DenseSet<ConstMemoryAccessPair> VisitedPhis;
|
2020-06-26 07:50:15 +08:00
|
|
|
// Record if phi translation has been performed during the current phi
|
|
|
|
// optimization walk, as merging alias results after phi translation can
|
|
|
|
// yield incorrect results. Context in PR46156.
|
|
|
|
bool PerformedPhiTranslation = false;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
|
|
|
/// Find the nearest def or phi that `From` can legally be optimized to.
|
2017-04-01 17:01:12 +08:00
|
|
|
const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
assert(From->getNumOperands() && "Phi with no operands?");
|
|
|
|
|
|
|
|
BasicBlock *BB = From->getBlock();
|
|
|
|
MemoryAccess *Result = MSSA.getLiveOnEntryDef();
|
|
|
|
DomTreeNode *Node = DT.getNode(BB);
|
|
|
|
while ((Node = Node->getIDom())) {
|
2017-04-01 16:59:45 +08:00
|
|
|
auto *Defs = MSSA.getBlockDefs(Node->getBlock());
|
|
|
|
if (Defs)
|
2017-04-01 17:01:12 +08:00
|
|
|
return &*Defs->rbegin();
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
}
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Result of calling walkToPhiOrClobber.
|
|
|
|
struct UpwardsWalkResult {
|
|
|
|
/// The "Result" of the walk. Either a clobber, the last thing we walked, or
|
2018-03-09 02:03:14 +08:00
|
|
|
/// both. Include alias info when clobber found.
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
MemoryAccess *Result;
|
|
|
|
bool IsKnownClobber;
|
2018-03-09 02:03:14 +08:00
|
|
|
Optional<AliasResult> AR;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
|
|
|
|
/// This will update Desc.Last as it walks. It will (optionally) also stop at
|
|
|
|
/// StopAt.
|
|
|
|
///
|
|
|
|
/// This does not test for whether StopAt is a clobber
|
2017-04-01 17:01:12 +08:00
|
|
|
UpwardsWalkResult
|
2019-01-08 02:40:27 +08:00
|
|
|
walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
|
|
|
|
const MemoryAccess *SkipStopAt = nullptr) const {
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
|
2019-03-30 06:55:59 +08:00
|
|
|
assert(UpwardWalkLimit && "Need a valid walk limit");
|
2019-04-13 02:48:46 +08:00
|
|
|
bool LimitAlreadyReached = false;
|
|
|
|
// (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
|
|
|
|
// it to 1. This will not do any alias() calls. It either returns in the
|
|
|
|
// first iteration in the loop below, or is set back to 0 if all def chains
|
|
|
|
// are free of MemoryDefs.
|
|
|
|
if (!*UpwardWalkLimit) {
|
|
|
|
*UpwardWalkLimit = 1;
|
|
|
|
LimitAlreadyReached = true;
|
|
|
|
}
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
|
|
|
for (MemoryAccess *Current : def_chain(Desc.Last)) {
|
|
|
|
Desc.Last = Current;
|
2019-01-08 02:40:27 +08:00
|
|
|
if (Current == StopAt || Current == SkipStopAt)
|
2021-03-16 21:36:17 +08:00
|
|
|
return {Current, false, AliasResult(AliasResult::MayAlias)};
|
2018-03-09 02:03:14 +08:00
|
|
|
|
|
|
|
if (auto *MD = dyn_cast<MemoryDef>(Current)) {
|
|
|
|
if (MSSA.isLiveOnEntryDef(MD))
|
2021-03-16 21:36:17 +08:00
|
|
|
return {MD, true, AliasResult(AliasResult::MustAlias)};
|
2019-03-30 05:56:09 +08:00
|
|
|
|
|
|
|
if (!--*UpwardWalkLimit)
|
2021-03-16 21:36:17 +08:00
|
|
|
return {Current, true, AliasResult(AliasResult::MayAlias)};
|
2019-03-30 05:56:09 +08:00
|
|
|
|
2018-03-09 02:03:14 +08:00
|
|
|
ClobberAlias CA =
|
|
|
|
instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
|
|
|
|
if (CA.IsClobber)
|
|
|
|
return {MD, true, CA.AR};
|
|
|
|
}
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
}
|
|
|
|
|
2019-04-13 02:48:46 +08:00
|
|
|
if (LimitAlreadyReached)
|
|
|
|
*UpwardWalkLimit = 0;
|
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
assert(isa<MemoryPhi>(Desc.Last) &&
|
|
|
|
"Ended at a non-clobber that's not a phi?");
|
2021-03-16 21:36:17 +08:00
|
|
|
return {Desc.Last, false, AliasResult(AliasResult::MayAlias)};
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
|
|
|
|
ListIndex PriorNode) {
|
2020-09-14 22:51:17 +08:00
|
|
|
auto UpwardDefsBegin = upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT,
|
|
|
|
&PerformedPhiTranslation);
|
2020-06-26 07:50:15 +08:00
|
|
|
auto UpwardDefs = make_range(UpwardDefsBegin, upward_defs_end());
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
for (const MemoryAccessPair &P : UpwardDefs) {
|
|
|
|
PausedSearches.push_back(Paths.size());
|
|
|
|
Paths.emplace_back(P.second, P.first, PriorNode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Represents a search that terminated after finding a clobber. This clobber
|
|
|
|
/// may or may not be present in the path of defs from LastNode..SearchStart,
|
|
|
|
/// since it may have been retrieved from cache.
|
|
|
|
struct TerminatedPath {
|
|
|
|
MemoryAccess *Clobber;
|
|
|
|
ListIndex LastNode;
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Get an access that keeps us from optimizing to the given phi.
|
|
|
|
///
|
|
|
|
/// PausedSearches is an array of indices into the Paths array. Its incoming
|
|
|
|
/// value is the indices of searches that stopped at the last phi optimization
|
|
|
|
/// target. It's left in an unspecified state.
|
|
|
|
///
|
|
|
|
/// If this returns None, NewPaused is a vector of searches that terminated
|
|
|
|
/// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
|
2016-08-03 09:22:19 +08:00
|
|
|
Optional<TerminatedPath>
|
2017-04-01 17:01:12 +08:00
|
|
|
getBlockingAccess(const MemoryAccess *StopWhere,
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
SmallVectorImpl<ListIndex> &PausedSearches,
|
|
|
|
SmallVectorImpl<ListIndex> &NewPaused,
|
|
|
|
SmallVectorImpl<TerminatedPath> &Terminated) {
|
|
|
|
assert(!PausedSearches.empty() && "No searches to continue?");
|
|
|
|
|
|
|
|
// BFS vs DFS really doesn't make a difference here, so just do a DFS with
|
|
|
|
// PausedSearches as our stack.
|
|
|
|
while (!PausedSearches.empty()) {
|
|
|
|
ListIndex PathIndex = PausedSearches.pop_back_val();
|
|
|
|
DefPath &Node = Paths[PathIndex];
|
|
|
|
|
|
|
|
// If we've already visited this path with this MemoryLocation, we don't
|
|
|
|
// need to do so again.
|
|
|
|
//
|
|
|
|
// NOTE: That we just drop these paths on the ground makes caching
|
|
|
|
// behavior sporadic. e.g. given a diamond:
|
|
|
|
// A
|
|
|
|
// B C
|
|
|
|
// D
|
|
|
|
//
|
|
|
|
// ...If we walk D, B, A, C, we'll only cache the result of phi
|
|
|
|
// optimization for A, B, and D; C will be skipped because it dies here.
|
|
|
|
// This arguably isn't the worst thing ever, since:
|
|
|
|
// - We generally query things in a top-down order, so if we got below D
|
|
|
|
// without needing cache entries for {C, MemLoc}, then chances are
|
|
|
|
// that those cache entries would end up ultimately unused.
|
|
|
|
// - We still cache things for A, so C only needs to walk up a bit.
|
|
|
|
// If this behavior becomes problematic, we can fix without a ton of extra
|
|
|
|
// work.
|
2020-06-26 07:50:15 +08:00
|
|
|
if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) {
|
|
|
|
if (PerformedPhiTranslation) {
|
|
|
|
// If visiting this path performed Phi translation, don't continue,
|
|
|
|
// since it may not be correct to merge results from two paths if one
|
|
|
|
// relies on the phi translation.
|
|
|
|
TerminatedPath Term{Node.Last, PathIndex};
|
|
|
|
return Term;
|
|
|
|
}
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
continue;
|
2020-06-26 07:50:15 +08:00
|
|
|
}
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
2019-01-08 02:40:27 +08:00
|
|
|
const MemoryAccess *SkipStopWhere = nullptr;
|
|
|
|
if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
|
|
|
|
assert(isa<MemoryDef>(Query->OriginalAccess));
|
|
|
|
SkipStopWhere = Query->OriginalAccess;
|
|
|
|
}
|
|
|
|
|
2019-03-30 05:56:09 +08:00
|
|
|
UpwardsWalkResult Res = walkToPhiOrClobber(Node,
|
|
|
|
/*StopAt=*/StopWhere,
|
2019-01-08 02:40:27 +08:00
|
|
|
/*SkipStopAt=*/SkipStopWhere);
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
if (Res.IsKnownClobber) {
|
2019-01-08 02:40:27 +08:00
|
|
|
assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
|
2019-03-30 05:56:09 +08:00
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
// If this wasn't a cache hit, we hit a clobber when walking. That's a
|
|
|
|
// failure.
|
2016-08-03 09:22:19 +08:00
|
|
|
TerminatedPath Term{Res.Result, PathIndex};
|
2017-04-06 03:01:58 +08:00
|
|
|
if (!MSSA.dominates(Res.Result, StopWhere))
|
2016-08-03 09:22:19 +08:00
|
|
|
return Term;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
|
|
|
// Otherwise, it's a valid thing to potentially optimize to.
|
2016-08-03 09:22:19 +08:00
|
|
|
Terminated.push_back(Term);
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-01-08 02:40:27 +08:00
|
|
|
if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
// We've hit our target. Save this path off for if we want to continue
|
2019-01-08 02:40:27 +08:00
|
|
|
// walking. If we are in the mode of skipping the OriginalAccess, and
|
|
|
|
// we've reached back to the OriginalAccess, do not save path, we've
|
|
|
|
// just looped back to self.
|
|
|
|
if (Res.Result != SkipStopWhere)
|
|
|
|
NewPaused.push_back(PathIndex);
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
|
|
|
|
addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
|
|
|
|
}
|
|
|
|
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename Walker>
|
|
|
|
struct generic_def_path_iterator
|
|
|
|
: public iterator_facade_base<generic_def_path_iterator<T, Walker>,
|
|
|
|
std::forward_iterator_tag, T *> {
|
2019-03-25 17:27:42 +08:00
|
|
|
generic_def_path_iterator() {}
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
|
|
|
|
|
|
|
|
T &operator*() const { return curNode(); }
|
|
|
|
|
|
|
|
generic_def_path_iterator &operator++() {
|
|
|
|
N = curNode().Previous;
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool operator==(const generic_def_path_iterator &O) const {
|
|
|
|
if (N.hasValue() != O.N.hasValue())
|
|
|
|
return false;
|
|
|
|
return !N.hasValue() || *N == *O.N;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
T &curNode() const { return W->Paths[*N]; }
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
Walker *W = nullptr;
|
|
|
|
Optional<ListIndex> N = None;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
|
|
|
|
using const_def_path_iterator =
|
|
|
|
generic_def_path_iterator<const DefPath, const ClobberWalker>;
|
|
|
|
|
|
|
|
iterator_range<def_path_iterator> def_path(ListIndex From) {
|
|
|
|
return make_range(def_path_iterator(this, From), def_path_iterator());
|
|
|
|
}
|
|
|
|
|
|
|
|
iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
|
|
|
|
return make_range(const_def_path_iterator(this, From),
|
|
|
|
const_def_path_iterator());
|
|
|
|
}
|
|
|
|
|
|
|
|
struct OptznResult {
|
|
|
|
/// The path that contains our result.
|
|
|
|
TerminatedPath PrimaryClobber;
|
|
|
|
/// The paths that we can legally cache back from, but that aren't
|
|
|
|
/// necessarily the result of the Phi optimization.
|
|
|
|
SmallVector<TerminatedPath, 4> OtherClobbers;
|
|
|
|
};
|
|
|
|
|
|
|
|
ListIndex defPathIndex(const DefPath &N) const {
|
|
|
|
// The assert looks nicer if we don't need to do &N
|
|
|
|
const DefPath *NP = &N;
|
|
|
|
assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
|
|
|
|
"Out of bounds DefPath!");
|
|
|
|
return NP - &Paths.front();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
|
|
|
|
/// that act as legal clobbers. Note that this won't return *all* clobbers.
|
|
|
|
///
|
|
|
|
/// Phi optimization algorithm tl;dr:
|
|
|
|
/// - Find the earliest def/phi, A, we can optimize to
|
|
|
|
/// - Find if all paths from the starting memory access ultimately reach A
|
|
|
|
/// - If not, optimization isn't possible.
|
|
|
|
/// - Otherwise, walk from A to another clobber or phi, A'.
|
|
|
|
/// - If A' is a def, we're done.
|
|
|
|
/// - If A' is a phi, try to optimize it.
|
|
|
|
///
|
|
|
|
/// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
|
|
|
|
/// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
|
|
|
|
OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
|
|
|
|
const MemoryLocation &Loc) {
|
2020-06-26 07:50:15 +08:00
|
|
|
assert(Paths.empty() && VisitedPhis.empty() && !PerformedPhiTranslation &&
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
"Reset the optimization state.");
|
|
|
|
|
|
|
|
Paths.emplace_back(Loc, Start, Phi, None);
|
|
|
|
// Stores how many "valid" optimization nodes we had prior to calling
|
|
|
|
// addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
|
|
|
|
auto PriorPathsSize = Paths.size();
|
|
|
|
|
|
|
|
SmallVector<ListIndex, 16> PausedSearches;
|
|
|
|
SmallVector<ListIndex, 8> NewPaused;
|
|
|
|
SmallVector<TerminatedPath, 4> TerminatedPaths;
|
|
|
|
|
|
|
|
addSearches(Phi, PausedSearches, 0);
|
|
|
|
|
|
|
|
// Moves the TerminatedPath with the "most dominated" Clobber to the end of
|
|
|
|
// Paths.
|
|
|
|
auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
|
|
|
|
assert(!Paths.empty() && "Need a path to move");
|
|
|
|
auto Dom = Paths.begin();
|
|
|
|
for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
|
|
|
|
if (!MSSA.dominates(I->Clobber, Dom->Clobber))
|
|
|
|
Dom = I;
|
|
|
|
auto Last = Paths.end() - 1;
|
|
|
|
if (Last != Dom)
|
|
|
|
std::iter_swap(Last, Dom);
|
|
|
|
};
|
|
|
|
|
|
|
|
MemoryPhi *Current = Phi;
|
2017-08-17 06:07:40 +08:00
|
|
|
while (true) {
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
assert(!MSSA.isLiveOnEntryDef(Current) &&
|
|
|
|
"liveOnEntry wasn't treated as a clobber?");
|
|
|
|
|
2017-04-01 17:01:12 +08:00
|
|
|
const auto *Target = getWalkTarget(Current);
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
// If a TerminatedPath doesn't dominate Target, then it wasn't a legal
|
|
|
|
// optimization for the prior phi.
|
|
|
|
assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
|
|
|
|
return MSSA.dominates(P.Clobber, Target);
|
|
|
|
}));
|
|
|
|
|
|
|
|
// FIXME: This is broken, because the Blocker may be reported to be
|
|
|
|
// liveOnEntry, and we'll happily wait for that to disappear (read: never)
|
2016-08-23 07:40:01 +08:00
|
|
|
// For the moment, this is fine, since we do nothing with blocker info.
|
2016-08-03 09:22:19 +08:00
|
|
|
if (Optional<TerminatedPath> Blocker = getBlockingAccess(
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
Target, PausedSearches, NewPaused, TerminatedPaths)) {
|
|
|
|
|
|
|
|
// Find the node we started at. We can't search based on N->Last, since
|
|
|
|
// we may have gone around a loop with a different MemoryLocation.
|
2016-08-03 09:22:19 +08:00
|
|
|
auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
return defPathIndex(N) < PriorPathsSize;
|
|
|
|
});
|
|
|
|
assert(Iter != def_path_iterator());
|
|
|
|
|
|
|
|
DefPath &CurNode = *Iter;
|
|
|
|
assert(CurNode.Last == Current);
|
|
|
|
|
|
|
|
// Two things:
|
|
|
|
// A. We can't reliably cache all of NewPaused back. Consider a case
|
|
|
|
// where we have two paths in NewPaused; one of which can't optimize
|
|
|
|
// above this phi, whereas the other can. If we cache the second path
|
|
|
|
// back, we'll end up with suboptimal cache entries. We can handle
|
|
|
|
// cases like this a bit better when we either try to find all
|
|
|
|
// clobbers that block phi optimization, or when our cache starts
|
|
|
|
// supporting unfinished searches.
|
|
|
|
// B. We can't reliably cache TerminatedPaths back here without doing
|
|
|
|
// extra checks; consider a case like:
|
|
|
|
// T
|
|
|
|
// / \
|
|
|
|
// D C
|
|
|
|
// \ /
|
|
|
|
// S
|
|
|
|
// Where T is our target, C is a node with a clobber on it, D is a
|
|
|
|
// diamond (with a clobber *only* on the left or right node, N), and
|
|
|
|
// S is our start. Say we walk to D, through the node opposite N
|
|
|
|
// (read: ignoring the clobber), and see a cache entry in the top
|
|
|
|
// node of D. That cache entry gets put into TerminatedPaths. We then
|
|
|
|
// walk up to C (N is later in our worklist), find the clobber, and
|
|
|
|
// quit. If we append TerminatedPaths to OtherClobbers, we'll cache
|
|
|
|
// the bottom part of D to the cached clobber, ignoring the clobber
|
|
|
|
// in N. Again, this problem goes away if we start tracking all
|
|
|
|
// blockers for a given phi optimization.
|
|
|
|
TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
|
|
|
|
return {Result, {}};
|
|
|
|
}
|
|
|
|
|
|
|
|
// If there's nothing left to search, then all paths led to valid clobbers
|
|
|
|
// that we got from our cache; pick the nearest to the start, and allow
|
|
|
|
// the rest to be cached back.
|
|
|
|
if (NewPaused.empty()) {
|
|
|
|
MoveDominatedPathToEnd(TerminatedPaths);
|
|
|
|
TerminatedPath Result = TerminatedPaths.pop_back_val();
|
|
|
|
return {Result, std::move(TerminatedPaths)};
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryAccess *DefChainEnd = nullptr;
|
|
|
|
SmallVector<TerminatedPath, 4> Clobbers;
|
|
|
|
for (ListIndex Paused : NewPaused) {
|
|
|
|
UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
|
|
|
|
if (WR.IsKnownClobber)
|
|
|
|
Clobbers.push_back({WR.Result, Paused});
|
|
|
|
else
|
|
|
|
// Micro-opt: If we hit the end of the chain, save it.
|
|
|
|
DefChainEnd = WR.Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!TerminatedPaths.empty()) {
|
|
|
|
// If we couldn't find the dominating phi/liveOnEntry in the above loop,
|
|
|
|
// do it now.
|
|
|
|
if (!DefChainEnd)
|
2017-04-01 17:01:12 +08:00
|
|
|
for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
DefChainEnd = MA;
|
2019-10-02 21:09:04 +08:00
|
|
|
assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry");
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
|
|
|
// If any of the terminated paths don't dominate the phi we'll try to
|
|
|
|
// optimize, we need to figure out what they are and quit.
|
|
|
|
const BasicBlock *ChainBB = DefChainEnd->getBlock();
|
|
|
|
for (const TerminatedPath &TP : TerminatedPaths) {
|
|
|
|
// Because we know that DefChainEnd is as "high" as we can go, we
|
|
|
|
// don't need local dominance checks; BB dominance is sufficient.
|
|
|
|
if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
|
|
|
|
Clobbers.push_back(TP);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have clobbers in the def chain, find the one closest to Current
|
|
|
|
// and quit.
|
|
|
|
if (!Clobbers.empty()) {
|
|
|
|
MoveDominatedPathToEnd(Clobbers);
|
|
|
|
TerminatedPath Result = Clobbers.pop_back_val();
|
|
|
|
return {Result, std::move(Clobbers)};
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(all_of(NewPaused,
|
|
|
|
[&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
|
|
|
|
|
|
|
|
// Because liveOnEntry is a clobber, this must be a phi.
|
|
|
|
auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
|
|
|
|
|
|
|
|
PriorPathsSize = Paths.size();
|
|
|
|
PausedSearches.clear();
|
|
|
|
for (ListIndex I : NewPaused)
|
|
|
|
addSearches(DefChainPhi, PausedSearches, I);
|
|
|
|
NewPaused.clear();
|
|
|
|
|
|
|
|
Current = DefChainPhi;
|
|
|
|
}
|
|
|
|
}
|
2016-06-25 05:02:12 +08:00
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
void verifyOptResult(const OptznResult &R) const {
|
|
|
|
assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
|
|
|
|
return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
|
|
|
|
void resetPhiOptznState() {
|
|
|
|
Paths.clear();
|
|
|
|
VisitedPhis.clear();
|
2020-06-26 07:50:15 +08:00
|
|
|
PerformedPhiTranslation = false;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
|
2017-04-06 03:01:58 +08:00
|
|
|
: MSSA(MSSA), AA(AA), DT(DT) {}
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
AliasAnalysisType *getAA() { return &AA; }
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
/// Finds the nearest clobber for the given query, optimizing phis if
|
|
|
|
/// possible.
|
2019-03-30 05:56:09 +08:00
|
|
|
MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q,
|
|
|
|
unsigned &UpWalkLimit) {
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
Query = &Q;
|
2019-03-30 05:56:09 +08:00
|
|
|
UpwardWalkLimit = &UpWalkLimit;
|
|
|
|
// Starting limit must be > 0.
|
|
|
|
if (!UpWalkLimit)
|
|
|
|
UpWalkLimit++;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
|
|
|
MemoryAccess *Current = Start;
|
|
|
|
// This walker pretends uses don't exist. If we're handed one, silently grab
|
|
|
|
// its def. (This has the nice side-effect of ensuring we never cache uses)
|
|
|
|
if (auto *MU = dyn_cast<MemoryUse>(Start))
|
|
|
|
Current = MU->getDefiningAccess();
|
|
|
|
|
|
|
|
DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
|
|
|
|
// Fast path for the overly-common case (no crazy phi optimization
|
|
|
|
// necessary)
|
|
|
|
UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
|
2016-07-24 15:03:49 +08:00
|
|
|
MemoryAccess *Result;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
if (WalkResult.IsKnownClobber) {
|
2016-07-24 15:03:49 +08:00
|
|
|
Result = WalkResult.Result;
|
2018-03-09 02:03:14 +08:00
|
|
|
Q.AR = WalkResult.AR;
|
2016-07-24 15:03:49 +08:00
|
|
|
} else {
|
|
|
|
OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
|
|
|
|
Current, Q.StartingLoc);
|
|
|
|
verifyOptResult(OptRes);
|
|
|
|
resetPhiOptznState();
|
|
|
|
Result = OptRes.PrimaryClobber.Clobber;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef EXPENSIVE_CHECKS
|
2019-03-30 05:56:09 +08:00
|
|
|
if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
|
2019-01-11 05:47:15 +08:00
|
|
|
checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
#endif
|
2016-07-24 15:03:49 +08:00
|
|
|
return Result;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct RenamePassData {
|
|
|
|
DomTreeNode *DTN;
|
|
|
|
DomTreeNode::const_iterator ChildIt;
|
|
|
|
MemoryAccess *IncomingVal;
|
|
|
|
|
|
|
|
RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
|
|
|
|
MemoryAccess *M)
|
|
|
|
: DTN(D), ChildIt(It), IncomingVal(M) {}
|
2017-08-17 06:07:40 +08:00
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
void swap(RenamePassData &RHS) {
|
|
|
|
std::swap(DTN, RHS.DTN);
|
|
|
|
std::swap(ChildIt, RHS.ChildIt);
|
|
|
|
std::swap(IncomingVal, RHS.IncomingVal);
|
|
|
|
}
|
|
|
|
};
|
2017-08-17 06:07:40 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
|
|
|
namespace llvm {
|
2017-08-17 06:07:40 +08:00
|
|
|
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
|
|
|
|
ClobberWalker<AliasAnalysisType> Walker;
|
2019-01-08 03:22:37 +08:00
|
|
|
MemorySSA *MSSA;
|
|
|
|
|
|
|
|
public:
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
|
2019-01-08 03:22:37 +08:00
|
|
|
: Walker(*M, *A, *D), MSSA(M) {}
|
|
|
|
|
|
|
|
MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
|
2019-03-30 05:56:09 +08:00
|
|
|
const MemoryLocation &,
|
|
|
|
unsigned &);
|
|
|
|
// Third argument (bool), defines whether the clobber search should skip the
|
2019-01-08 03:22:37 +08:00
|
|
|
// original queried access. If true, there will be a follow-up query searching
|
|
|
|
// for a clobber access past "self". Note that the Optimized access is not
|
|
|
|
// updated if a new clobber is found by this SkipSelf search. If this
|
|
|
|
// additional query becomes heavily used we may decide to cache the result.
|
|
|
|
// Walker instantiations will decide how to set the SkipSelf bool.
|
2019-03-30 05:56:09 +08:00
|
|
|
MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool);
|
2019-01-08 03:22:37 +08:00
|
|
|
};
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
|
2018-05-26 10:28:55 +08:00
|
|
|
/// longer does caching on its own, but the name has been retained for the
|
|
|
|
/// moment.
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
template <class AliasAnalysisType>
|
2016-06-25 05:02:12 +08:00
|
|
|
class MemorySSA::CachingWalker final : public MemorySSAWalker {
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
ClobberWalkerBase<AliasAnalysisType> *Walker;
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
2016-06-25 05:02:12 +08:00
|
|
|
public:
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
|
2019-01-08 03:22:37 +08:00
|
|
|
: MemorySSAWalker(M), Walker(W) {}
|
2017-08-17 06:07:40 +08:00
|
|
|
~CachingWalker() override = default;
|
2016-06-25 05:02:12 +08:00
|
|
|
|
2016-07-21 03:51:34 +08:00
|
|
|
using MemorySSAWalker::getClobberingMemoryAccess;
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2019-03-30 05:56:09 +08:00
|
|
|
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
|
|
|
|
return Walker->getClobberingMemoryAccessBase(MA, UWL, false);
|
|
|
|
}
|
|
|
|
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
|
|
|
|
const MemoryLocation &Loc,
|
|
|
|
unsigned &UWL) {
|
|
|
|
return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
|
|
|
|
}
|
|
|
|
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
|
2019-03-30 05:56:09 +08:00
|
|
|
unsigned UpwardWalkLimit = MaxCheckLimit;
|
|
|
|
return getClobberingMemoryAccess(MA, UpwardWalkLimit);
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
}
|
2019-01-08 03:22:37 +08:00
|
|
|
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
const MemoryLocation &Loc) override {
|
2019-03-30 05:56:09 +08:00
|
|
|
unsigned UpwardWalkLimit = MaxCheckLimit;
|
|
|
|
return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
}
|
2019-01-08 03:22:37 +08:00
|
|
|
|
|
|
|
void invalidateInfo(MemoryAccess *MA) override {
|
|
|
|
if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
|
|
|
|
MUD->resetOptimized();
|
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
};
|
|
|
|
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
template <class AliasAnalysisType>
|
2019-01-08 03:38:47 +08:00
|
|
|
class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
ClobberWalkerBase<AliasAnalysisType> *Walker;
|
2019-01-08 03:38:47 +08:00
|
|
|
|
|
|
|
public:
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
|
2019-01-08 03:38:47 +08:00
|
|
|
: MemorySSAWalker(M), Walker(W) {}
|
|
|
|
~SkipSelfWalker() override = default;
|
|
|
|
|
|
|
|
using MemorySSAWalker::getClobberingMemoryAccess;
|
|
|
|
|
2019-03-30 05:56:09 +08:00
|
|
|
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
|
|
|
|
return Walker->getClobberingMemoryAccessBase(MA, UWL, true);
|
|
|
|
}
|
|
|
|
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
|
|
|
|
const MemoryLocation &Loc,
|
|
|
|
unsigned &UWL) {
|
|
|
|
return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
|
|
|
|
}
|
|
|
|
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
|
2019-03-30 05:56:09 +08:00
|
|
|
unsigned UpwardWalkLimit = MaxCheckLimit;
|
|
|
|
return getClobberingMemoryAccess(MA, UpwardWalkLimit);
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
}
|
2019-01-08 03:38:47 +08:00
|
|
|
MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
const MemoryLocation &Loc) override {
|
2019-03-30 05:56:09 +08:00
|
|
|
unsigned UpwardWalkLimit = MaxCheckLimit;
|
|
|
|
return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
}
|
2019-01-08 03:38:47 +08:00
|
|
|
|
|
|
|
void invalidateInfo(MemoryAccess *MA) override {
|
|
|
|
if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
|
|
|
|
MUD->resetOptimized();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
} // end namespace llvm
|
|
|
|
|
2017-02-21 06:26:03 +08:00
|
|
|
void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
|
|
|
|
bool RenameAllUses) {
|
|
|
|
// Pass through values to our successors
|
|
|
|
for (const BasicBlock *S : successors(BB)) {
|
|
|
|
auto It = PerBlockAccesses.find(S);
|
|
|
|
// Rename the phi nodes in our successor block
|
|
|
|
if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
|
|
|
|
continue;
|
|
|
|
AccessList *Accesses = It->second.get();
|
|
|
|
auto *Phi = cast<MemoryPhi>(&Accesses->front());
|
|
|
|
if (RenameAllUses) {
|
2019-08-31 07:02:53 +08:00
|
|
|
bool ReplacementDone = false;
|
|
|
|
for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
|
|
|
|
if (Phi->getIncomingBlock(I) == BB) {
|
|
|
|
Phi->setIncomingValue(I, IncomingVal);
|
|
|
|
ReplacementDone = true;
|
|
|
|
}
|
|
|
|
(void) ReplacementDone;
|
|
|
|
assert(ReplacementDone && "Incomplete phi during partial rename");
|
2017-02-21 06:26:03 +08:00
|
|
|
} else
|
|
|
|
Phi->addIncoming(IncomingVal, BB);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Rename a single basic block into MemorySSA form.
|
2016-02-03 06:46:49 +08:00
|
|
|
/// Uses the standard SSA renaming algorithm.
|
|
|
|
/// \returns The new incoming value.
|
2017-02-21 06:26:03 +08:00
|
|
|
MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
|
|
|
|
bool RenameAllUses) {
|
2016-02-03 06:46:49 +08:00
|
|
|
auto It = PerBlockAccesses.find(BB);
|
|
|
|
// Skip most processing if the list is empty.
|
|
|
|
if (It != PerBlockAccesses.end()) {
|
2016-06-21 04:21:33 +08:00
|
|
|
AccessList *Accesses = It->second.get();
|
2016-02-03 06:46:49 +08:00
|
|
|
for (MemoryAccess &L : *Accesses) {
|
2016-08-23 03:14:16 +08:00
|
|
|
if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
|
2017-02-21 06:26:03 +08:00
|
|
|
if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
|
2016-08-23 03:14:16 +08:00
|
|
|
MUD->setDefiningAccess(IncomingVal);
|
|
|
|
if (isa<MemoryDef>(&L))
|
|
|
|
IncomingVal = &L;
|
|
|
|
} else {
|
2016-02-03 06:46:49 +08:00
|
|
|
IncomingVal = &L;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return IncomingVal;
|
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// This is the standard SSA renaming algorithm.
|
2016-02-03 06:46:49 +08:00
|
|
|
///
|
|
|
|
/// We walk the dominator tree in preorder, renaming accesses, and then filling
|
|
|
|
/// in phi nodes in our successors.
|
|
|
|
void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
|
2017-02-21 06:26:03 +08:00
|
|
|
SmallPtrSetImpl<BasicBlock *> &Visited,
|
|
|
|
bool SkipVisited, bool RenameAllUses) {
|
2019-05-03 07:41:58 +08:00
|
|
|
assert(Root && "Trying to rename accesses in an unreachable block");
|
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
SmallVector<RenamePassData, 32> WorkStack;
|
2017-02-21 06:26:03 +08:00
|
|
|
// Skip everything if we already renamed this block and we are skipping.
|
|
|
|
// Note: You can't sink this into the if, because we need it to occur
|
|
|
|
// regardless of whether we skip blocks or not.
|
|
|
|
bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
|
|
|
|
if (SkipVisited && AlreadyVisited)
|
|
|
|
return;
|
|
|
|
|
|
|
|
IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
|
|
|
|
renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
|
2016-02-03 06:46:49 +08:00
|
|
|
WorkStack.push_back({Root, Root->begin(), IncomingVal});
|
|
|
|
|
|
|
|
while (!WorkStack.empty()) {
|
|
|
|
DomTreeNode *Node = WorkStack.back().DTN;
|
|
|
|
DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
|
|
|
|
IncomingVal = WorkStack.back().IncomingVal;
|
|
|
|
|
|
|
|
if (ChildIt == Node->end()) {
|
|
|
|
WorkStack.pop_back();
|
|
|
|
} else {
|
|
|
|
DomTreeNode *Child = *ChildIt;
|
|
|
|
++WorkStack.back().ChildIt;
|
|
|
|
BasicBlock *BB = Child->getBlock();
|
2017-02-21 06:26:03 +08:00
|
|
|
// Note: You can't sink this into the if, because we need it to occur
|
|
|
|
// regardless of whether we skip blocks or not.
|
|
|
|
AlreadyVisited = !Visited.insert(BB).second;
|
|
|
|
if (SkipVisited && AlreadyVisited) {
|
|
|
|
// We already visited this during our renaming, which can happen when
|
|
|
|
// being asked to rename multiple blocks. Figure out the incoming val,
|
|
|
|
// which is the last def.
|
|
|
|
// Incoming value can only change if there is a block def, and in that
|
|
|
|
// case, it's the last block def in the list.
|
|
|
|
if (auto *BlockDefs = getWritableBlockDefs(BB))
|
|
|
|
IncomingVal = &*BlockDefs->rbegin();
|
|
|
|
} else
|
|
|
|
IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
|
|
|
|
renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
|
2016-02-03 06:46:49 +08:00
|
|
|
WorkStack.push_back({Child, Child->begin(), IncomingVal});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// This handles unreachable block accesses by deleting phi nodes in
|
2016-02-03 06:46:49 +08:00
|
|
|
/// unreachable blocks, and marking all other unreachable MemoryAccess's as
|
|
|
|
/// being uses of the live on entry definition.
|
|
|
|
void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
|
|
|
|
assert(!DT->isReachableFromEntry(BB) &&
|
|
|
|
"Reachable block found while handling unreachable blocks");
|
|
|
|
|
2016-07-06 13:32:05 +08:00
|
|
|
// Make sure phi nodes in our reachable successors end up with a
|
|
|
|
// LiveOnEntryDef for our incoming edge, even though our block is forward
|
|
|
|
// unreachable. We could just disconnect these blocks from the CFG fully,
|
|
|
|
// but we do not right now.
|
|
|
|
for (const BasicBlock *S : successors(BB)) {
|
|
|
|
if (!DT->isReachableFromEntry(S))
|
|
|
|
continue;
|
|
|
|
auto It = PerBlockAccesses.find(S);
|
|
|
|
// Rename the phi nodes in our successor block
|
|
|
|
if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
|
|
|
|
continue;
|
|
|
|
AccessList *Accesses = It->second.get();
|
|
|
|
auto *Phi = cast<MemoryPhi>(&Accesses->front());
|
|
|
|
Phi->addIncoming(LiveOnEntryDef.get(), BB);
|
|
|
|
}
|
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
auto It = PerBlockAccesses.find(BB);
|
|
|
|
if (It == PerBlockAccesses.end())
|
|
|
|
return;
|
|
|
|
|
|
|
|
auto &Accesses = It->second;
|
|
|
|
for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
|
|
|
|
auto Next = std::next(AI);
|
|
|
|
// If we have a phi, just remove it. We are going to replace all
|
|
|
|
// users with live on entry.
|
|
|
|
if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
|
|
|
|
UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
|
|
|
|
else
|
|
|
|
Accesses->erase(AI);
|
|
|
|
AI = Next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
: AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
|
2019-01-08 03:38:47 +08:00
|
|
|
SkipWalker(nullptr), NextID(0) {
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
// Build MemorySSA using a batch alias analysis. This reuses the internal
|
|
|
|
// state that AA collects during an alias()/getModRefInfo() call. This is
|
|
|
|
// safe because there are no CFG changes while building MemorySSA and can
|
|
|
|
// significantly reduce the time spent by the compiler in AA, because we will
|
|
|
|
// make queries about all the instructions in the Function.
|
2019-11-04 03:27:40 +08:00
|
|
|
assert(AA && "No alias analysis?");
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
BatchAAResults BatchAA(*AA);
|
|
|
|
buildMemorySSA(BatchAA);
|
|
|
|
// Intentionally leave AA to nullptr while building so we don't accidently
|
|
|
|
// use non-batch AliasAnalysis.
|
|
|
|
this->AA = AA;
|
|
|
|
// Also create the walker here.
|
|
|
|
getWalker();
|
2016-06-02 05:30:40 +08:00
|
|
|
}
|
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
MemorySSA::~MemorySSA() {
|
|
|
|
// Drop all our references
|
|
|
|
for (const auto &Pair : PerBlockAccesses)
|
|
|
|
for (MemoryAccess &MA : *Pair.second)
|
|
|
|
MA.dropAllReferences();
|
|
|
|
}
|
|
|
|
|
2016-06-22 02:39:20 +08:00
|
|
|
MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
|
2016-02-03 06:46:49 +08:00
|
|
|
auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
|
|
|
|
|
|
|
|
if (Res.second)
|
2019-08-15 23:54:37 +08:00
|
|
|
Res.first->second = std::make_unique<AccessList>();
|
2016-02-03 06:46:49 +08:00
|
|
|
return Res.first->second.get();
|
|
|
|
}
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2017-01-26 04:56:19 +08:00
|
|
|
MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
|
|
|
|
auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
|
|
|
|
|
|
|
|
if (Res.second)
|
2019-08-15 23:54:37 +08:00
|
|
|
Res.first->second = std::make_unique<DefsList>();
|
2017-01-26 04:56:19 +08:00
|
|
|
return Res.first->second.get();
|
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
namespace llvm {
|
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
/// This class is a batch walker of all MemoryUse's in the program, and points
|
|
|
|
/// their defining access at the thing that actually clobbers them. Because it
|
|
|
|
/// is a batch walker that touches everything, it does not operate like the
|
|
|
|
/// other walkers. This walker is basically performing a top-down SSA renaming
|
|
|
|
/// pass, where the version stack is used as the cache. This enables it to be
|
|
|
|
/// significantly more time and memory efficient than using the regular walker,
|
|
|
|
/// which is walking bottom-up.
|
|
|
|
class MemorySSA::OptimizeUses {
|
|
|
|
public:
|
2019-03-30 05:56:09 +08:00
|
|
|
OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker,
|
|
|
|
BatchAAResults *BAA, DominatorTree *DT)
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
: MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
|
2016-08-03 00:24:03 +08:00
|
|
|
|
|
|
|
void optimizeUses();
|
|
|
|
|
|
|
|
private:
|
|
|
|
/// This represents where a given memorylocation is in the stack.
|
|
|
|
struct MemlocStackInfo {
|
|
|
|
// This essentially is keeping track of versions of the stack. Whenever
|
|
|
|
// the stack changes due to pushes or pops, these versions increase.
|
|
|
|
unsigned long StackEpoch;
|
|
|
|
unsigned long PopEpoch;
|
|
|
|
// This is the lower bound of places on the stack to check. It is equal to
|
|
|
|
// the place the last stack walk ended.
|
|
|
|
// Note: Correctness depends on this being initialized to 0, which densemap
|
|
|
|
// does
|
|
|
|
unsigned long LowerBound;
|
2016-08-08 12:44:53 +08:00
|
|
|
const BasicBlock *LowerBoundBlock;
|
2016-08-03 00:24:03 +08:00
|
|
|
// This is where the last walk for this memory location ended.
|
|
|
|
unsigned long LastKill;
|
|
|
|
bool LastKillValid;
|
2018-03-09 02:03:14 +08:00
|
|
|
Optional<AliasResult> AR;
|
2016-08-03 00:24:03 +08:00
|
|
|
};
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
|
|
|
|
SmallVectorImpl<MemoryAccess *> &,
|
|
|
|
DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
MemorySSA *MSSA;
|
2019-03-30 05:56:09 +08:00
|
|
|
CachingWalker<BatchAAResults> *Walker;
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
BatchAAResults *AA;
|
2016-08-03 00:24:03 +08:00
|
|
|
DominatorTree *DT;
|
|
|
|
};
|
|
|
|
|
2017-08-17 06:07:40 +08:00
|
|
|
} // end namespace llvm
|
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
/// Optimize the uses in a given block This is basically the SSA renaming
|
|
|
|
/// algorithm, with one caveat: We are able to use a single stack for all
|
|
|
|
/// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
|
|
|
|
/// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
|
|
|
|
/// going to be some position in that stack of possible ones.
|
|
|
|
///
|
|
|
|
/// We track the stack positions that each MemoryLocation needs
|
|
|
|
/// to check, and last ended at. This is because we only want to check the
|
|
|
|
/// things that changed since last time. The same MemoryLocation should
|
|
|
|
/// get clobbered by the same store (getModRefInfo does not use invariantness or
|
|
|
|
/// things like this, and if they start, we can modify MemoryLocOrCall to
|
|
|
|
/// include relevant data)
|
|
|
|
void MemorySSA::OptimizeUses::optimizeUsesInBlock(
|
|
|
|
const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
|
|
|
|
SmallVectorImpl<MemoryAccess *> &VersionStack,
|
|
|
|
DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
|
|
|
|
|
|
|
|
/// If no accesses, nothing to do.
|
|
|
|
MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
|
|
|
|
if (Accesses == nullptr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Pop everything that doesn't dominate the current block off the stack,
|
|
|
|
// increment the PopEpoch to account for this.
|
2017-02-19 04:34:36 +08:00
|
|
|
while (true) {
|
|
|
|
assert(
|
|
|
|
!VersionStack.empty() &&
|
|
|
|
"Version stack should have liveOnEntry sentinel dominating everything");
|
2016-08-03 00:24:03 +08:00
|
|
|
BasicBlock *BackBlock = VersionStack.back()->getBlock();
|
|
|
|
if (DT->dominates(BackBlock, BB))
|
|
|
|
break;
|
|
|
|
while (VersionStack.back()->getBlock() == BackBlock)
|
|
|
|
VersionStack.pop_back();
|
|
|
|
++PopEpoch;
|
|
|
|
}
|
2017-02-19 04:34:36 +08:00
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
for (MemoryAccess &MA : *Accesses) {
|
|
|
|
auto *MU = dyn_cast<MemoryUse>(&MA);
|
|
|
|
if (!MU) {
|
|
|
|
VersionStack.push_back(&MA);
|
|
|
|
++StackEpoch;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-08-04 03:57:02 +08:00
|
|
|
if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
|
2018-03-09 02:03:14 +08:00
|
|
|
MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
|
2016-08-04 03:57:02 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
MemoryLocOrCall UseMLOC(MU);
|
|
|
|
auto &LocInfo = LocStackInfo[UseMLOC];
|
2016-08-03 04:02:21 +08:00
|
|
|
// If the pop epoch changed, it means we've removed stuff from top of
|
2016-08-03 00:24:03 +08:00
|
|
|
// stack due to changing blocks. We may have to reset the lower bound or
|
|
|
|
// last kill info.
|
|
|
|
if (LocInfo.PopEpoch != PopEpoch) {
|
|
|
|
LocInfo.PopEpoch = PopEpoch;
|
|
|
|
LocInfo.StackEpoch = StackEpoch;
|
2016-08-08 12:44:53 +08:00
|
|
|
// If the lower bound was in something that no longer dominates us, we
|
|
|
|
// have to reset it.
|
|
|
|
// We can't simply track stack size, because the stack may have had
|
|
|
|
// pushes/pops in the meantime.
|
|
|
|
// XXX: This is non-optimal, but only is slower cases with heavily
|
|
|
|
// branching dominator trees. To get the optimal number of queries would
|
|
|
|
// be to make lowerbound and lastkill a per-loc stack, and pop it until
|
|
|
|
// the top of that stack dominates us. This does not seem worth it ATM.
|
|
|
|
// A much cheaper optimization would be to always explore the deepest
|
|
|
|
// branch of the dominator tree first. This will guarantee this resets on
|
|
|
|
// the smallest set of blocks.
|
|
|
|
if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
|
2016-09-27 01:22:54 +08:00
|
|
|
!DT->dominates(LocInfo.LowerBoundBlock, BB)) {
|
2016-08-03 00:24:03 +08:00
|
|
|
// Reset the lower bound of things to check.
|
|
|
|
// TODO: Some day we should be able to reset to last kill, rather than
|
|
|
|
// 0.
|
|
|
|
LocInfo.LowerBound = 0;
|
2016-08-08 12:44:53 +08:00
|
|
|
LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
|
2016-08-03 00:24:03 +08:00
|
|
|
LocInfo.LastKillValid = false;
|
|
|
|
}
|
|
|
|
} else if (LocInfo.StackEpoch != StackEpoch) {
|
|
|
|
// If all that has changed is the StackEpoch, we only have to check the
|
|
|
|
// new things on the stack, because we've checked everything before. In
|
|
|
|
// this case, the lower bound of things to check remains the same.
|
|
|
|
LocInfo.PopEpoch = PopEpoch;
|
|
|
|
LocInfo.StackEpoch = StackEpoch;
|
|
|
|
}
|
|
|
|
if (!LocInfo.LastKillValid) {
|
|
|
|
LocInfo.LastKill = VersionStack.size() - 1;
|
|
|
|
LocInfo.LastKillValid = true;
|
2021-03-05 18:58:13 +08:00
|
|
|
LocInfo.AR = AliasResult::MayAlias;
|
2016-08-03 00:24:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// At this point, we should have corrected last kill and LowerBound to be
|
|
|
|
// in bounds.
|
|
|
|
assert(LocInfo.LowerBound < VersionStack.size() &&
|
|
|
|
"Lower bound out of range");
|
|
|
|
assert(LocInfo.LastKill < VersionStack.size() &&
|
|
|
|
"Last kill info out of range");
|
|
|
|
// In any case, the new upper bound is the top of the stack.
|
|
|
|
unsigned long UpperBound = VersionStack.size() - 1;
|
|
|
|
|
|
|
|
if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
|
|
|
|
<< *(MU->getMemoryInst()) << ")"
|
|
|
|
<< " because there are "
|
|
|
|
<< UpperBound - LocInfo.LowerBound
|
|
|
|
<< " stores to disambiguate\n");
|
2016-08-03 00:24:03 +08:00
|
|
|
// Because we did not walk, LastKill is no longer valid, as this may
|
|
|
|
// have been a kill.
|
|
|
|
LocInfo.LastKillValid = false;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
bool FoundClobberResult = false;
|
2019-03-30 05:56:09 +08:00
|
|
|
unsigned UpwardWalkLimit = MaxCheckLimit;
|
2016-08-03 00:24:03 +08:00
|
|
|
while (UpperBound > LocInfo.LowerBound) {
|
|
|
|
if (isa<MemoryPhi>(VersionStack[UpperBound])) {
|
|
|
|
// For phis, use the walker, see where we ended up, go there
|
2019-03-30 05:56:09 +08:00
|
|
|
MemoryAccess *Result =
|
|
|
|
Walker->getClobberingMemoryAccess(MU, UpwardWalkLimit);
|
2016-08-03 00:24:03 +08:00
|
|
|
// We are guaranteed to find it or something is wrong
|
|
|
|
while (VersionStack[UpperBound] != Result) {
|
|
|
|
assert(UpperBound != 0);
|
|
|
|
--UpperBound;
|
|
|
|
}
|
|
|
|
FoundClobberResult = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
|
2018-03-09 02:03:14 +08:00
|
|
|
ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
|
|
|
|
if (CA.IsClobber) {
|
2016-08-03 00:24:03 +08:00
|
|
|
FoundClobberResult = true;
|
2018-03-09 02:03:14 +08:00
|
|
|
LocInfo.AR = CA.AR;
|
2016-08-03 00:24:03 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
--UpperBound;
|
|
|
|
}
|
2018-03-09 02:03:14 +08:00
|
|
|
|
|
|
|
// Note: Phis always have AliasResult AR set to MayAlias ATM.
|
|
|
|
|
2016-08-03 00:24:03 +08:00
|
|
|
// At the end of this loop, UpperBound is either a clobber, or lower bound
|
|
|
|
// PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
|
|
|
|
if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
|
|
|
|
// We were last killed now by where we got to
|
2018-03-09 02:03:14 +08:00
|
|
|
if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
|
|
|
|
LocInfo.AR = None;
|
|
|
|
MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
|
2016-08-03 00:24:03 +08:00
|
|
|
LocInfo.LastKill = UpperBound;
|
|
|
|
} else {
|
|
|
|
// Otherwise, we checked all the new ones, and now we know we can get to
|
|
|
|
// LastKill.
|
2018-03-09 02:03:14 +08:00
|
|
|
MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
|
2016-08-03 00:24:03 +08:00
|
|
|
}
|
|
|
|
LocInfo.LowerBound = VersionStack.size() - 1;
|
2016-08-08 12:44:53 +08:00
|
|
|
LocInfo.LowerBoundBlock = BB;
|
2016-08-03 00:24:03 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Optimize uses to point to their actual clobbering definitions.
|
|
|
|
void MemorySSA::OptimizeUses::optimizeUses() {
|
|
|
|
SmallVector<MemoryAccess *, 16> VersionStack;
|
|
|
|
DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
|
|
|
|
VersionStack.push_back(MSSA->getLiveOnEntryDef());
|
|
|
|
|
|
|
|
unsigned long StackEpoch = 1;
|
|
|
|
unsigned long PopEpoch = 1;
|
2017-02-19 04:34:36 +08:00
|
|
|
// We perform a non-recursive top-down dominator tree walk.
|
2016-08-06 06:09:14 +08:00
|
|
|
for (const auto *DomNode : depth_first(DT->getRootNode()))
|
|
|
|
optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
|
|
|
|
LocStackInfo);
|
2016-08-03 00:24:03 +08:00
|
|
|
}
|
|
|
|
|
2016-08-23 03:14:30 +08:00
|
|
|
void MemorySSA::placePHINodes(
|
2018-05-16 02:40:29 +08:00
|
|
|
const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
|
2016-08-23 03:14:30 +08:00
|
|
|
// Determine where our MemoryPhi's should go
|
|
|
|
ForwardIDFCalculator IDFs(*DT);
|
|
|
|
IDFs.setDefiningBlocks(DefiningBlocks);
|
|
|
|
SmallVector<BasicBlock *, 32> IDFBlocks;
|
|
|
|
IDFs.calculate(IDFBlocks);
|
|
|
|
|
|
|
|
// Now place MemoryPhi nodes.
|
2017-01-26 04:56:19 +08:00
|
|
|
for (auto &BB : IDFBlocks)
|
|
|
|
createMemoryPhi(BB);
|
2016-08-23 03:14:30 +08:00
|
|
|
}
|
|
|
|
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
|
2016-02-03 06:46:49 +08:00
|
|
|
// We create an access to represent "live on entry", for things like
|
|
|
|
// arguments or users of globals, where the memory they use is defined before
|
|
|
|
// the beginning of the function. We do not actually insert it into the IR.
|
|
|
|
// We do not define a live on exit for the immediate uses, and thus our
|
|
|
|
// semantics do *not* imply that something with no immediate uses can simply
|
|
|
|
// be removed.
|
|
|
|
BasicBlock &StartingPoint = F.getEntryBlock();
|
2018-02-27 14:43:19 +08:00
|
|
|
LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
|
|
|
|
&StartingPoint, NextID++));
|
2016-02-03 06:46:49 +08:00
|
|
|
|
|
|
|
// We maintain lists of memory accesses per-block, trading memory for time. We
|
|
|
|
// could just look up the memory access for every possible instruction in the
|
|
|
|
// stream.
|
|
|
|
SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
|
|
|
|
// Go through each block, figure out where defs occur, and chain together all
|
|
|
|
// the accesses.
|
|
|
|
for (BasicBlock &B : F) {
|
2016-02-07 09:52:15 +08:00
|
|
|
bool InsertIntoDef = false;
|
2016-06-21 04:21:33 +08:00
|
|
|
AccessList *Accesses = nullptr;
|
2017-01-26 04:56:19 +08:00
|
|
|
DefsList *Defs = nullptr;
|
2016-02-03 06:46:49 +08:00
|
|
|
for (Instruction &I : B) {
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
|
2016-03-12 03:34:03 +08:00
|
|
|
if (!MUD)
|
2016-02-03 06:46:49 +08:00
|
|
|
continue;
|
2016-02-07 09:52:19 +08:00
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
if (!Accesses)
|
|
|
|
Accesses = getOrCreateAccessList(&B);
|
2016-03-12 03:34:03 +08:00
|
|
|
Accesses->push_back(MUD);
|
2017-01-26 04:56:19 +08:00
|
|
|
if (isa<MemoryDef>(MUD)) {
|
|
|
|
InsertIntoDef = true;
|
|
|
|
if (!Defs)
|
|
|
|
Defs = getOrCreateDefsList(&B);
|
|
|
|
Defs->push_back(*MUD);
|
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
2016-02-07 09:52:15 +08:00
|
|
|
if (InsertIntoDef)
|
|
|
|
DefiningBlocks.insert(&B);
|
2016-02-07 09:52:19 +08:00
|
|
|
}
|
2018-05-16 02:40:29 +08:00
|
|
|
placePHINodes(DefiningBlocks);
|
2016-02-03 06:46:49 +08:00
|
|
|
|
|
|
|
// Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
|
|
|
|
// filled in with all blocks.
|
|
|
|
SmallPtrSet<BasicBlock *, 16> Visited;
|
|
|
|
renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
|
|
|
|
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT);
|
|
|
|
CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
|
|
|
|
OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses();
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
// Mark the uses in unreachable blocks as live on entry, so that they go
|
|
|
|
// somewhere.
|
|
|
|
for (auto &BB : F)
|
|
|
|
if (!Visited.count(&BB))
|
|
|
|
markUnreachableAsLiveOnEntry(&BB);
|
2016-06-28 02:22:27 +08:00
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
|
|
|
|
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
|
2016-06-28 02:22:27 +08:00
|
|
|
if (Walker)
|
|
|
|
return Walker.get();
|
|
|
|
|
2019-01-08 03:22:37 +08:00
|
|
|
if (!WalkerBase)
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
WalkerBase =
|
2019-08-15 23:54:37 +08:00
|
|
|
std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
|
2019-01-08 03:22:37 +08:00
|
|
|
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
Walker =
|
2019-08-15 23:54:37 +08:00
|
|
|
std::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
|
2016-06-02 05:30:40 +08:00
|
|
|
return Walker.get();
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2019-01-08 03:38:47 +08:00
|
|
|
MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
|
|
|
|
if (SkipWalker)
|
|
|
|
return SkipWalker.get();
|
|
|
|
|
|
|
|
if (!WalkerBase)
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
WalkerBase =
|
2019-08-15 23:54:37 +08:00
|
|
|
std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
|
2019-01-08 03:38:47 +08:00
|
|
|
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
SkipWalker =
|
2019-08-15 23:54:37 +08:00
|
|
|
std::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
|
2019-01-08 03:38:47 +08:00
|
|
|
return SkipWalker.get();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-01-26 04:56:19 +08:00
|
|
|
// This is a helper function used by the creation routines. It places NewAccess
|
|
|
|
// into the access and defs lists for a given basic block, at the given
|
|
|
|
// insertion point.
|
|
|
|
void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
|
|
|
|
const BasicBlock *BB,
|
|
|
|
InsertionPlace Point) {
|
|
|
|
auto *Accesses = getOrCreateAccessList(BB);
|
|
|
|
if (Point == Beginning) {
|
|
|
|
// If it's a phi node, it goes first, otherwise, it goes after any phi
|
|
|
|
// nodes.
|
|
|
|
if (isa<MemoryPhi>(NewAccess)) {
|
|
|
|
Accesses->push_front(NewAccess);
|
|
|
|
auto *Defs = getOrCreateDefsList(BB);
|
|
|
|
Defs->push_front(*NewAccess);
|
|
|
|
} else {
|
|
|
|
auto AI = find_if_not(
|
|
|
|
*Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
|
|
|
|
Accesses->insert(AI, NewAccess);
|
|
|
|
if (!isa<MemoryUse>(NewAccess)) {
|
|
|
|
auto *Defs = getOrCreateDefsList(BB);
|
|
|
|
auto DI = find_if_not(
|
|
|
|
*Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
|
|
|
|
Defs->insert(DI, *NewAccess);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
Accesses->push_back(NewAccess);
|
|
|
|
if (!isa<MemoryUse>(NewAccess)) {
|
|
|
|
auto *Defs = getOrCreateDefsList(BB);
|
|
|
|
Defs->push_back(*NewAccess);
|
|
|
|
}
|
|
|
|
}
|
2017-01-30 19:35:39 +08:00
|
|
|
BlockNumberingValid.erase(BB);
|
2017-01-26 04:56:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
|
|
|
|
AccessList::iterator InsertPt) {
|
|
|
|
auto *Accesses = getWritableBlockAccesses(BB);
|
|
|
|
bool WasEnd = InsertPt == Accesses->end();
|
|
|
|
Accesses->insert(AccessList::iterator(InsertPt), What);
|
|
|
|
if (!isa<MemoryUse>(What)) {
|
|
|
|
auto *Defs = getOrCreateDefsList(BB);
|
|
|
|
// If we got asked to insert at the end, we have an easy job, just shove it
|
|
|
|
// at the end. If we got asked to insert before an existing def, we also get
|
2018-04-05 05:08:11 +08:00
|
|
|
// an iterator. If we got asked to insert before a use, we have to hunt for
|
2017-01-26 04:56:19 +08:00
|
|
|
// the next def.
|
|
|
|
if (WasEnd) {
|
|
|
|
Defs->push_back(*What);
|
|
|
|
} else if (isa<MemoryDef>(InsertPt)) {
|
|
|
|
Defs->insert(InsertPt->getDefsIterator(), *What);
|
|
|
|
} else {
|
|
|
|
while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
|
|
|
|
++InsertPt;
|
|
|
|
// Either we found a def, or we are inserting at the end
|
|
|
|
if (InsertPt == Accesses->end())
|
|
|
|
Defs->push_back(*What);
|
|
|
|
else
|
|
|
|
Defs->insert(InsertPt->getDefsIterator(), *What);
|
|
|
|
}
|
|
|
|
}
|
2017-01-30 19:35:39 +08:00
|
|
|
BlockNumberingValid.erase(BB);
|
2017-01-26 04:56:19 +08:00
|
|
|
}
|
|
|
|
|
2018-08-23 06:34:38 +08:00
|
|
|
void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
|
|
|
|
// Keep it in the lookup tables, remove from the lists
|
|
|
|
removeFromLists(What, false);
|
|
|
|
|
|
|
|
// Note that moving should implicitly invalidate the optimized state of a
|
|
|
|
// MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
|
|
|
|
// MemoryDef.
|
|
|
|
if (auto *MD = dyn_cast<MemoryDef>(What))
|
|
|
|
MD->resetOptimized();
|
|
|
|
What->setBlock(BB);
|
|
|
|
}
|
|
|
|
|
2018-04-05 05:08:11 +08:00
|
|
|
// Move What before Where in the IR. The end result is that What will belong to
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 09:23:13 +08:00
|
|
|
// the right lists and have the right Block set, but will not otherwise be
|
|
|
|
// correct. It will not have the right defining access, and if it is a def,
|
|
|
|
// things below it will not properly be updated.
|
|
|
|
void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
|
|
|
|
AccessList::iterator Where) {
|
2018-08-23 06:34:38 +08:00
|
|
|
prepareForMoveTo(What, BB);
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 09:23:13 +08:00
|
|
|
insertIntoListsBefore(What, BB, Where);
|
|
|
|
}
|
|
|
|
|
2018-07-12 06:11:46 +08:00
|
|
|
void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
|
2017-01-30 19:35:39 +08:00
|
|
|
InsertionPlace Point) {
|
2018-07-12 06:11:46 +08:00
|
|
|
if (isa<MemoryPhi>(What)) {
|
|
|
|
assert(Point == Beginning &&
|
|
|
|
"Can only move a Phi at the beginning of the block");
|
|
|
|
// Update lookup table entry
|
|
|
|
ValueToMemoryAccess.erase(What->getBlock());
|
|
|
|
bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
|
|
|
|
(void)Inserted;
|
|
|
|
assert(Inserted && "Cannot move a Phi to a block that already has one");
|
|
|
|
}
|
|
|
|
|
2018-08-23 06:34:38 +08:00
|
|
|
prepareForMoveTo(What, BB);
|
2017-01-30 19:35:39 +08:00
|
|
|
insertIntoListsForBlock(What, BB, Point);
|
|
|
|
}
|
|
|
|
|
2016-06-22 02:39:20 +08:00
|
|
|
MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
|
|
|
|
assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
|
|
|
|
MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
|
2017-01-30 19:35:39 +08:00
|
|
|
// Phi's always are placed at the front of the block.
|
2017-01-26 04:56:19 +08:00
|
|
|
insertIntoListsForBlock(Phi, BB, Beginning);
|
2016-08-01 05:08:20 +08:00
|
|
|
ValueToMemoryAccess[BB] = Phi;
|
2016-06-22 02:39:20 +08:00
|
|
|
return Phi;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
|
2018-09-11 04:13:01 +08:00
|
|
|
MemoryAccess *Definition,
|
2019-07-31 04:10:33 +08:00
|
|
|
const MemoryUseOrDef *Template,
|
|
|
|
bool CreationMustSucceed) {
|
2016-06-22 02:39:20 +08:00
|
|
|
assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
|
2019-07-31 04:10:33 +08:00
|
|
|
if (CreationMustSucceed)
|
|
|
|
assert(NewAccess != nullptr && "Tried to create a memory access for a "
|
|
|
|
"non-memory touching instruction");
|
2020-08-28 07:39:53 +08:00
|
|
|
if (NewAccess) {
|
|
|
|
assert((!Definition || !isa<MemoryUse>(Definition)) &&
|
|
|
|
"A use cannot be a defining access");
|
2019-07-31 04:10:33 +08:00
|
|
|
NewAccess->setDefiningAccess(Definition);
|
2020-08-28 07:39:53 +08:00
|
|
|
}
|
2016-06-22 02:39:20 +08:00
|
|
|
return NewAccess;
|
|
|
|
}
|
|
|
|
|
2017-04-07 09:28:36 +08:00
|
|
|
// Return true if the instruction has ordering constraints.
|
|
|
|
// Note specifically that this only considers stores and loads
|
|
|
|
// because others are still considered ModRef by getModRefInfo.
|
|
|
|
static inline bool isOrdered(const Instruction *I) {
|
|
|
|
if (auto *SI = dyn_cast<StoreInst>(I)) {
|
|
|
|
if (!SI->isUnordered())
|
|
|
|
return true;
|
|
|
|
} else if (auto *LI = dyn_cast<LoadInst>(I)) {
|
|
|
|
if (!LI->isUnordered())
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2017-08-17 06:07:40 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Helper function to create new memory accesses
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
template <typename AliasAnalysisType>
|
2018-09-11 04:13:01 +08:00
|
|
|
MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
AliasAnalysisType *AAP,
|
2018-09-11 04:13:01 +08:00
|
|
|
const MemoryUseOrDef *Template) {
|
2016-05-26 12:58:46 +08:00
|
|
|
// The assume intrinsic has a control dependency which we model by claiming
|
2019-09-11 06:35:27 +08:00
|
|
|
// that it writes arbitrarily. Debuginfo intrinsics may be considered
|
|
|
|
// clobbers when we have a nonstandard AA pipeline. Ignore these fake memory
|
|
|
|
// dependencies here.
|
2016-05-26 12:58:46 +08:00
|
|
|
// FIXME: Replace this special casing with a more accurate modelling of
|
|
|
|
// assume's control dependency.
|
2021-01-20 03:04:52 +08:00
|
|
|
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
|
|
|
|
switch (II->getIntrinsicID()) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case Intrinsic::assume:
|
|
|
|
case Intrinsic::experimental_noalias_scope_decl:
|
2016-05-26 12:58:46 +08:00
|
|
|
return nullptr;
|
2021-01-20 03:04:52 +08:00
|
|
|
}
|
|
|
|
}
|
2016-05-26 12:58:46 +08:00
|
|
|
|
2019-09-18 00:31:37 +08:00
|
|
|
// Using a nonstandard AA pipelines might leave us with unexpected modref
|
|
|
|
// results for I, so add a check to not model instructions that may not read
|
|
|
|
// from or write to memory. This is necessary for correctness.
|
|
|
|
if (!I->mayReadFromMemory() && !I->mayWriteToMemory())
|
|
|
|
return nullptr;
|
|
|
|
|
2018-09-11 04:13:01 +08:00
|
|
|
bool Def, Use;
|
|
|
|
if (Template) {
|
2021-01-01 01:39:12 +08:00
|
|
|
Def = isa<MemoryDef>(Template);
|
|
|
|
Use = isa<MemoryUse>(Template);
|
2018-09-11 04:13:01 +08:00
|
|
|
#if !defined(NDEBUG)
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
ModRefInfo ModRef = AAP->getModRefInfo(I, None);
|
2018-09-11 04:13:01 +08:00
|
|
|
bool DefCheck, UseCheck;
|
|
|
|
DefCheck = isModSet(ModRef) || isOrdered(I);
|
|
|
|
UseCheck = isRefSet(ModRef);
|
|
|
|
assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
// Find out what affect this instruction has on memory.
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
ModRefInfo ModRef = AAP->getModRefInfo(I, None);
|
2018-09-11 04:13:01 +08:00
|
|
|
// The isOrdered check is used to ensure that volatiles end up as defs
|
|
|
|
// (atomics end up as ModRef right now anyway). Until we separate the
|
|
|
|
// ordering chain from the memory chain, this enables people to see at least
|
|
|
|
// some relative ordering to volatiles. Note that getClobberingMemoryAccess
|
|
|
|
// will still give an answer that bypasses other volatile loads. TODO:
|
|
|
|
// Separate memory aliasing and ordering into two different chains so that
|
|
|
|
// we can precisely represent both "what memory will this read/write/is
|
|
|
|
// clobbered by" and "what instructions can I move this past".
|
|
|
|
Def = isModSet(ModRef) || isOrdered(I);
|
|
|
|
Use = isRefSet(ModRef);
|
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
|
|
|
|
// It's possible for an instruction to not modify memory at all. During
|
|
|
|
// construction, we ignore them.
|
2016-05-26 09:19:17 +08:00
|
|
|
if (!Def && !Use)
|
2016-02-03 06:46:49 +08:00
|
|
|
return nullptr;
|
|
|
|
|
2016-03-12 03:34:03 +08:00
|
|
|
MemoryUseOrDef *MUD;
|
2016-02-03 06:46:49 +08:00
|
|
|
if (Def)
|
2016-03-12 03:34:03 +08:00
|
|
|
MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
|
2016-02-03 06:46:49 +08:00
|
|
|
else
|
2016-03-12 03:34:03 +08:00
|
|
|
MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
|
2016-08-01 05:08:20 +08:00
|
|
|
ValueToMemoryAccess[I] = MUD;
|
2016-03-12 03:34:03 +08:00
|
|
|
return MUD;
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Properly remove \p MA from all of MemorySSA's lookup tables.
|
2016-03-02 02:46:54 +08:00
|
|
|
void MemorySSA::removeFromLookups(MemoryAccess *MA) {
|
|
|
|
assert(MA->use_empty() &&
|
|
|
|
"Trying to remove memory access that still has uses");
|
2016-07-20 06:49:43 +08:00
|
|
|
BlockNumbering.erase(MA);
|
2018-06-23 06:34:07 +08:00
|
|
|
if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
|
2016-03-02 02:46:54 +08:00
|
|
|
MUD->setDefiningAccess(nullptr);
|
|
|
|
// Invalidate our walker's cache if necessary
|
|
|
|
if (!isa<MemoryUse>(MA))
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
getWalker()->invalidateInfo(MA);
|
2018-06-23 06:34:07 +08:00
|
|
|
|
2016-03-02 02:46:54 +08:00
|
|
|
Value *MemoryInst;
|
2018-06-23 06:34:07 +08:00
|
|
|
if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
|
2016-03-02 02:46:54 +08:00
|
|
|
MemoryInst = MUD->getMemoryInst();
|
2018-06-23 06:34:07 +08:00
|
|
|
else
|
2016-03-02 02:46:54 +08:00
|
|
|
MemoryInst = MA->getBlock();
|
2018-06-23 06:34:07 +08:00
|
|
|
|
2016-08-01 05:08:20 +08:00
|
|
|
auto VMA = ValueToMemoryAccess.find(MemoryInst);
|
|
|
|
if (VMA->second == MA)
|
|
|
|
ValueToMemoryAccess.erase(VMA);
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 09:23:13 +08:00
|
|
|
}
|
2016-03-02 02:46:54 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Properly remove \p MA from all of MemorySSA's lists.
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 09:23:13 +08:00
|
|
|
///
|
|
|
|
/// Because of the way the intrusive list and use lists work, it is important to
|
|
|
|
/// do removal in the right order.
|
|
|
|
/// ShouldDelete defaults to true, and will cause the memory access to also be
|
|
|
|
/// deleted, not just removed.
|
|
|
|
void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
|
2018-06-30 04:46:16 +08:00
|
|
|
BasicBlock *BB = MA->getBlock();
|
2017-01-26 04:56:19 +08:00
|
|
|
// The access list owns the reference, so we erase it from the non-owning list
|
|
|
|
// first.
|
|
|
|
if (!isa<MemoryUse>(MA)) {
|
2018-06-30 04:46:16 +08:00
|
|
|
auto DefsIt = PerBlockDefs.find(BB);
|
2017-01-26 04:56:19 +08:00
|
|
|
std::unique_ptr<DefsList> &Defs = DefsIt->second;
|
|
|
|
Defs->remove(*MA);
|
|
|
|
if (Defs->empty())
|
|
|
|
PerBlockDefs.erase(DefsIt);
|
|
|
|
}
|
|
|
|
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 09:23:13 +08:00
|
|
|
// The erase call here will delete it. If we don't want it deleted, we call
|
|
|
|
// remove instead.
|
2018-06-30 04:46:16 +08:00
|
|
|
auto AccessIt = PerBlockAccesses.find(BB);
|
2016-06-21 04:21:33 +08:00
|
|
|
std::unique_ptr<AccessList> &Accesses = AccessIt->second;
|
Introduce a basic MemorySSA updater, that supports insertDef,
insertUse, moveBefore and moveAfter operations.
Summary:
This creates a basic MemorySSA updater that handles arbitrary
insertion of uses and defs into MemorySSA, as well as arbitrary
movement around the CFG. It replaces the current splice API.
It can be made to handle arbitrary control flow changes.
Currently, it uses the same updater algorithm from D28934.
The main difference is because MemorySSA is single variable, we have
the complete def and use list, and don't need anyone to give it to us
as part of the API. We also have to rename stores below us in some
cases.
If we go that direction in that patch, i will merge all the updater
implementations (using an updater_traits or something to provide the
get* functions we use, called read*/write* in that patch).
Sadly, the current SSAUpdater algorithm is way too slow to use for
what we are doing here.
I have updated the tests we have to basically build memoryssa
incrementally using the updater api, and make sure it still comes out
the same.
Reviewers: george.burgess.iv
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D29047
llvm-svn: 293356
2017-01-28 09:23:13 +08:00
|
|
|
if (ShouldDelete)
|
|
|
|
Accesses->erase(MA);
|
|
|
|
else
|
|
|
|
Accesses->remove(MA);
|
|
|
|
|
2018-06-30 04:46:16 +08:00
|
|
|
if (Accesses->empty()) {
|
2016-03-02 10:35:04 +08:00
|
|
|
PerBlockAccesses.erase(AccessIt);
|
2018-06-30 04:46:16 +08:00
|
|
|
BlockNumberingValid.erase(BB);
|
|
|
|
}
|
2016-03-02 02:46:54 +08:00
|
|
|
}
|
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
void MemorySSA::print(raw_ostream &OS) const {
|
|
|
|
MemorySSAAnnotatedWriter Writer(this);
|
|
|
|
F.print(OS, &Writer);
|
|
|
|
}
|
|
|
|
|
2017-10-15 22:32:27 +08:00
|
|
|
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
2017-02-21 06:26:03 +08:00
|
|
|
LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
|
2017-01-28 10:02:38 +08:00
|
|
|
#endif
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-02-11 01:39:43 +08:00
|
|
|
void MemorySSA::verifyMemorySSA() const {
|
2019-11-23 06:12:28 +08:00
|
|
|
verifyOrderingDominationAndDefUses(F);
|
2018-06-25 13:30:36 +08:00
|
|
|
verifyDominationNumbers(F);
|
2019-08-01 01:41:04 +08:00
|
|
|
verifyPrevDefInPhis(F);
|
2019-02-12 03:51:21 +08:00
|
|
|
// Previously, the verification used to also verify that the clobberingAccess
|
|
|
|
// cached by MemorySSA is the same as the clobberingAccess found at a later
|
|
|
|
// query to AA. This does not hold true in general due to the current fragility
|
|
|
|
// of BasicAA which has arbitrary caps on the things it analyzes before giving
|
|
|
|
// up. As a result, transformations that are correct, will lead to BasicAA
|
|
|
|
// returning different Alias answers before and after that transformation.
|
|
|
|
// Invalidating MemorySSA is not an option, as the results in BasicAA can be so
|
|
|
|
// random, in the worst case we'd need to rebuild MemorySSA from scratch after
|
|
|
|
// every transformation, which defeats the purpose of using it. For such an
|
|
|
|
// example, see test4 added in D51960.
|
2016-06-22 02:39:20 +08:00
|
|
|
}
|
|
|
|
|
2019-08-01 01:41:04 +08:00
|
|
|
void MemorySSA::verifyPrevDefInPhis(Function &F) const {
|
2019-09-04 08:44:54 +08:00
|
|
|
#if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
|
2019-08-01 01:41:04 +08:00
|
|
|
for (const BasicBlock &BB : F) {
|
|
|
|
if (MemoryPhi *Phi = getMemoryAccess(&BB)) {
|
|
|
|
for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
|
|
|
|
auto *Pred = Phi->getIncomingBlock(I);
|
|
|
|
auto *IncAcc = Phi->getIncomingValue(I);
|
|
|
|
// If Pred has no unreachable predecessors, get last def looking at
|
|
|
|
// IDoms. If, while walkings IDoms, any of these has an unreachable
|
2019-08-20 02:57:40 +08:00
|
|
|
// predecessor, then the incoming def can be any access.
|
2019-08-01 01:41:04 +08:00
|
|
|
if (auto *DTNode = DT->getNode(Pred)) {
|
|
|
|
while (DTNode) {
|
|
|
|
if (auto *DefList = getBlockDefs(DTNode->getBlock())) {
|
|
|
|
auto *LastAcc = &*(--DefList->end());
|
|
|
|
assert(LastAcc == IncAcc &&
|
|
|
|
"Incorrect incoming access into phi.");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
DTNode = DTNode->getIDom();
|
|
|
|
}
|
2019-08-20 02:57:40 +08:00
|
|
|
} else {
|
2019-08-01 01:41:04 +08:00
|
|
|
// If Pred has unreachable predecessors, but has at least a Def, the
|
|
|
|
// incoming access can be the last Def in Pred, or it could have been
|
2019-08-20 02:57:40 +08:00
|
|
|
// optimized to LoE. After an update, though, the LoE may have been
|
|
|
|
// replaced by another access, so IncAcc may be any access.
|
2019-08-01 01:41:04 +08:00
|
|
|
// If Pred has unreachable predecessors and no Defs, incoming access
|
2019-08-20 02:57:40 +08:00
|
|
|
// should be LoE; However, after an update, it may be any access.
|
2019-08-01 01:41:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-06-25 13:30:36 +08:00
|
|
|
/// Verify that all of the blocks we believe to have valid domination numbers
|
|
|
|
/// actually have valid domination numbers.
|
|
|
|
void MemorySSA::verifyDominationNumbers(const Function &F) const {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
if (BlockNumberingValid.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
|
|
|
|
for (const BasicBlock &BB : F) {
|
|
|
|
if (!ValidBlocks.count(&BB))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ValidBlocks.erase(&BB);
|
|
|
|
|
|
|
|
const AccessList *Accesses = getBlockAccesses(&BB);
|
|
|
|
// It's correct to say an empty block has valid numbering.
|
|
|
|
if (!Accesses)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Block numbering starts at 1.
|
|
|
|
unsigned long LastNumber = 0;
|
|
|
|
for (const MemoryAccess &MA : *Accesses) {
|
|
|
|
auto ThisNumberIter = BlockNumbering.find(&MA);
|
|
|
|
assert(ThisNumberIter != BlockNumbering.end() &&
|
|
|
|
"MemoryAccess has no domination number in a valid block!");
|
|
|
|
|
|
|
|
unsigned long ThisNumber = ThisNumberIter->second;
|
|
|
|
assert(ThisNumber > LastNumber &&
|
|
|
|
"Domination numbers should be strictly increasing!");
|
|
|
|
LastNumber = ThisNumber;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(ValidBlocks.empty() &&
|
|
|
|
"All valid BasicBlocks should exist in F -- dangling pointers?");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-11-23 06:12:28 +08:00
|
|
|
/// Verify ordering: the order and existence of MemoryAccesses matches the
|
2016-06-22 02:39:20 +08:00
|
|
|
/// order and existence of memory affecting instructions.
|
2019-11-23 06:12:28 +08:00
|
|
|
/// Verify domination: each definition dominates all of its uses.
|
|
|
|
/// Verify def-uses: the immediate use information - walk all the memory
|
|
|
|
/// accesses and verifying that, for each use, it appears in the appropriate
|
|
|
|
/// def's use list
|
|
|
|
void MemorySSA::verifyOrderingDominationAndDefUses(Function &F) const {
|
|
|
|
#if !defined(NDEBUG)
|
2016-06-22 02:39:20 +08:00
|
|
|
// Walk all the blocks, comparing what the lookups think and what the access
|
|
|
|
// lists think, as well as the order in the blocks vs the order in the access
|
|
|
|
// lists.
|
|
|
|
SmallVector<MemoryAccess *, 32> ActualAccesses;
|
2017-01-26 04:56:19 +08:00
|
|
|
SmallVector<MemoryAccess *, 32> ActualDefs;
|
2016-06-22 02:39:20 +08:00
|
|
|
for (BasicBlock &B : F) {
|
|
|
|
const AccessList *AL = getBlockAccesses(&B);
|
2017-01-26 04:56:19 +08:00
|
|
|
const auto *DL = getBlockDefs(&B);
|
2019-11-23 06:12:28 +08:00
|
|
|
MemoryPhi *Phi = getMemoryAccess(&B);
|
2017-01-26 04:56:19 +08:00
|
|
|
if (Phi) {
|
2019-11-23 06:12:28 +08:00
|
|
|
// Verify ordering.
|
2016-06-22 02:39:20 +08:00
|
|
|
ActualAccesses.push_back(Phi);
|
2017-01-26 04:56:19 +08:00
|
|
|
ActualDefs.push_back(Phi);
|
2019-11-23 06:12:28 +08:00
|
|
|
// Verify domination
|
|
|
|
for (const Use &U : Phi->uses())
|
|
|
|
assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses");
|
|
|
|
#if defined(EXPENSIVE_CHECKS)
|
|
|
|
// Verify def-uses.
|
|
|
|
assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
|
|
|
|
pred_begin(&B), pred_end(&B))) &&
|
|
|
|
"Incomplete MemoryPhi Node");
|
|
|
|
for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
|
|
|
|
verifyUseInDefs(Phi->getIncomingValue(I), Phi);
|
2020-12-23 11:58:54 +08:00
|
|
|
assert(is_contained(predecessors(&B), Phi->getIncomingBlock(I)) &&
|
2019-11-23 06:12:28 +08:00
|
|
|
"Incoming phi block not a block predecessor");
|
|
|
|
}
|
|
|
|
#endif
|
2017-01-26 04:56:19 +08:00
|
|
|
}
|
|
|
|
|
2016-06-22 02:39:20 +08:00
|
|
|
for (Instruction &I : B) {
|
2019-11-23 06:12:28 +08:00
|
|
|
MemoryUseOrDef *MA = getMemoryAccess(&I);
|
2017-01-26 04:56:19 +08:00
|
|
|
assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
|
|
|
|
"We have memory affecting instructions "
|
|
|
|
"in this block but they are not in the "
|
|
|
|
"access list or defs list");
|
|
|
|
if (MA) {
|
2019-11-23 06:12:28 +08:00
|
|
|
// Verify ordering.
|
2016-06-22 02:39:20 +08:00
|
|
|
ActualAccesses.push_back(MA);
|
2019-11-23 06:12:28 +08:00
|
|
|
if (MemoryAccess *MD = dyn_cast<MemoryDef>(MA)) {
|
|
|
|
// Verify ordering.
|
2017-01-26 04:56:19 +08:00
|
|
|
ActualDefs.push_back(MA);
|
2019-11-23 06:12:28 +08:00
|
|
|
// Verify domination.
|
|
|
|
for (const Use &U : MD->uses())
|
|
|
|
assert(dominates(MD, U) &&
|
|
|
|
"Memory Def does not dominate it's uses");
|
|
|
|
}
|
|
|
|
#if defined(EXPENSIVE_CHECKS)
|
|
|
|
// Verify def-uses.
|
|
|
|
verifyUseInDefs(MA->getDefiningAccess(), MA);
|
|
|
|
#endif
|
2017-01-26 04:56:19 +08:00
|
|
|
}
|
2016-06-22 02:39:20 +08:00
|
|
|
}
|
|
|
|
// Either we hit the assert, really have no accesses, or we have both
|
2019-11-23 06:12:28 +08:00
|
|
|
// accesses and an access list. Same with defs.
|
2017-01-26 04:56:19 +08:00
|
|
|
if (!AL && !DL)
|
2016-06-22 02:39:20 +08:00
|
|
|
continue;
|
2019-11-23 06:12:28 +08:00
|
|
|
// Verify ordering.
|
2016-06-22 02:39:20 +08:00
|
|
|
assert(AL->size() == ActualAccesses.size() &&
|
|
|
|
"We don't have the same number of accesses in the block as on the "
|
|
|
|
"access list");
|
2017-01-30 11:16:43 +08:00
|
|
|
assert((DL || ActualDefs.size() == 0) &&
|
|
|
|
"Either we should have a defs list, or we should have no defs");
|
2017-01-26 04:56:19 +08:00
|
|
|
assert((!DL || DL->size() == ActualDefs.size()) &&
|
|
|
|
"We don't have the same number of defs in the block as on the "
|
|
|
|
"def list");
|
2016-06-22 02:39:20 +08:00
|
|
|
auto ALI = AL->begin();
|
|
|
|
auto AAI = ActualAccesses.begin();
|
|
|
|
while (ALI != AL->end() && AAI != ActualAccesses.end()) {
|
|
|
|
assert(&*ALI == *AAI && "Not the same accesses in the same order");
|
|
|
|
++ALI;
|
|
|
|
++AAI;
|
|
|
|
}
|
|
|
|
ActualAccesses.clear();
|
2017-01-26 04:56:19 +08:00
|
|
|
if (DL) {
|
|
|
|
auto DLI = DL->begin();
|
|
|
|
auto ADI = ActualDefs.begin();
|
|
|
|
while (DLI != DL->end() && ADI != ActualDefs.end()) {
|
|
|
|
assert(&*DLI == *ADI && "Not the same defs in the same order");
|
|
|
|
++DLI;
|
|
|
|
++ADI;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ActualDefs.clear();
|
2016-06-22 02:39:20 +08:00
|
|
|
}
|
2018-08-28 08:32:32 +08:00
|
|
|
#endif
|
2016-02-11 01:39:43 +08:00
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Verify the def-use lists in MemorySSA, by verifying that \p Use
|
2016-02-03 06:46:49 +08:00
|
|
|
/// appears in the use list of \p Def.
|
2016-02-11 01:39:43 +08:00
|
|
|
void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
|
2016-08-06 05:47:20 +08:00
|
|
|
#ifndef NDEBUG
|
2016-02-03 06:46:49 +08:00
|
|
|
// The live on entry use may cause us to get a NULL def here
|
2016-08-06 05:47:20 +08:00
|
|
|
if (!Def)
|
|
|
|
assert(isLiveOnEntryDef(Use) &&
|
|
|
|
"Null def but use not point to live on entry def");
|
|
|
|
else
|
2016-08-12 05:26:50 +08:00
|
|
|
assert(is_contained(Def->users(), Use) &&
|
2016-08-06 05:47:20 +08:00
|
|
|
"Did not find use in def's use list");
|
|
|
|
#endif
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2016-07-20 06:49:43 +08:00
|
|
|
/// Perform a local numbering on blocks so that instruction ordering can be
|
|
|
|
/// determined in constant time.
|
|
|
|
/// TODO: We currently just number in order. If we numbered by N, we could
|
|
|
|
/// allow at least N-1 sequences of insertBefore or insertAfter (and at least
|
|
|
|
/// log2(N) sequences of mixed before and after) without needing to invalidate
|
|
|
|
/// the numbering.
|
|
|
|
void MemorySSA::renumberBlock(const BasicBlock *B) const {
|
|
|
|
// The pre-increment ensures the numbers really start at 1.
|
|
|
|
unsigned long CurrentNumber = 0;
|
|
|
|
const AccessList *AL = getBlockAccesses(B);
|
|
|
|
assert(AL != nullptr && "Asking to renumber an empty block");
|
|
|
|
for (const auto &I : *AL)
|
|
|
|
BlockNumbering[&I] = ++CurrentNumber;
|
|
|
|
BlockNumberingValid.insert(B);
|
|
|
|
}
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Determine, for two memory accesses in the same block,
|
2016-02-03 06:46:49 +08:00
|
|
|
/// whether \p Dominator dominates \p Dominatee.
|
|
|
|
/// \returns True if \p Dominator dominates \p Dominatee.
|
|
|
|
bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
|
|
|
|
const MemoryAccess *Dominatee) const {
|
2016-07-20 06:49:43 +08:00
|
|
|
const BasicBlock *DominatorBlock = Dominator->getBlock();
|
|
|
|
|
2016-07-20 07:08:08 +08:00
|
|
|
assert((DominatorBlock == Dominatee->getBlock()) &&
|
2016-07-20 06:49:43 +08:00
|
|
|
"Asking for local domination when accesses are in different blocks!");
|
2016-06-11 05:36:41 +08:00
|
|
|
// A node dominates itself.
|
|
|
|
if (Dominatee == Dominator)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// When Dominatee is defined on function entry, it is not dominated by another
|
|
|
|
// memory access.
|
|
|
|
if (isLiveOnEntryDef(Dominatee))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// When Dominator is defined on function entry, it dominates the other memory
|
|
|
|
// access.
|
|
|
|
if (isLiveOnEntryDef(Dominator))
|
|
|
|
return true;
|
|
|
|
|
2016-07-20 06:49:43 +08:00
|
|
|
if (!BlockNumberingValid.count(DominatorBlock))
|
|
|
|
renumberBlock(DominatorBlock);
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-07-20 06:49:43 +08:00
|
|
|
unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
|
|
|
|
// All numbers start with 1
|
|
|
|
assert(DominatorNum != 0 && "Block was not numbered properly");
|
|
|
|
unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
|
|
|
|
assert(DominateeNum != 0 && "Block was not numbered properly");
|
|
|
|
return DominatorNum < DominateeNum;
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
[MemorySSA] Update to the new shiny walker.
This patch updates MemorySSA's use-optimizing walker to be more
accurate and, in some cases, faster.
Essentially, this changed our core walking algorithm from a
cache-as-you-go DFS to an iteratively expanded DFS, with all of the
caching happening at the end. Said expansion happens when we hit a Phi,
P; we'll try to do the smallest amount of work possible to see if
optimizing above that Phi is legal in the first place. If so, we'll
expand the search to see if we can optimize to the next phi, etc.
An iteratively expanded DFS lets us potentially quit earlier (because we
don't assume that we can optimize above all phis) than our old walker.
Additionally, because we don't cache as we go, we can now optimize above
loops.
As an added bonus, this patch adds a ton of verification (if
EXPENSIVE_CHECKS are enabled), so finding bugs is easier.
Differential Revision: https://reviews.llvm.org/D21777
llvm-svn: 275940
2016-07-19 09:29:15 +08:00
|
|
|
bool MemorySSA::dominates(const MemoryAccess *Dominator,
|
|
|
|
const MemoryAccess *Dominatee) const {
|
|
|
|
if (Dominator == Dominatee)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (isLiveOnEntryDef(Dominatee))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (Dominator->getBlock() != Dominatee->getBlock())
|
|
|
|
return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
|
|
|
|
return locallyDominates(Dominator, Dominatee);
|
|
|
|
}
|
|
|
|
|
2016-08-06 05:46:52 +08:00
|
|
|
bool MemorySSA::dominates(const MemoryAccess *Dominator,
|
|
|
|
const Use &Dominatee) const {
|
|
|
|
if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
|
|
|
|
BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
|
|
|
|
// The def must dominate the incoming block of the phi.
|
|
|
|
if (UseBB != Dominator->getBlock())
|
|
|
|
return DT->dominates(Dominator->getBlock(), UseBB);
|
|
|
|
// If the UseBB and the DefBB are the same, compare locally.
|
|
|
|
return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
|
|
|
|
}
|
|
|
|
// If it's not a PHI node use, the normal dominates can already handle it.
|
|
|
|
return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
|
|
|
|
}
|
|
|
|
|
[IR] De-virtualize ~Value to save a vptr
Summary:
Implements PR889
Removing the virtual table pointer from Value saves 1% of RSS when doing
LTO of llc on Linux. The impact on time was positive, but too noisy to
conclusively say that performance improved. Here is a link to the
spreadsheet with the original data:
https://docs.google.com/spreadsheets/d/1F4FHir0qYnV0MEp2sYYp_BuvnJgWlWPhWOwZ6LbW7W4/edit?usp=sharing
This change makes it invalid to directly delete a Value, User, or
Instruction pointer. Instead, such code can be rewritten to a null check
and a call Value::deleteValue(). Value objects tend to have their
lifetimes managed through iplist, so for the most part, this isn't a big
deal. However, there are some places where LLVM deletes values, and
those places had to be migrated to deleteValue. I have also created
llvm::unique_value, which has a custom deleter, so it can be used in
place of std::unique_ptr<Value>.
I had to add the "DerivedUser" Deleter escape hatch for MemorySSA, which
derives from User outside of lib/IR. Code in IR cannot include MemorySSA
headers or call the MemoryAccess object destructors without introducing
a circular dependency, so we need some level of indirection.
Unfortunately, no class derived from User may have any virtual methods,
because adding a virtual method would break User::getHungOffOperands(),
which assumes that it can find the use list immediately prior to the
User object. I've added a static_assert to the appropriate OperandTraits
templates to help people avoid this trap.
Reviewers: chandlerc, mehdi_amini, pete, dberlin, george.burgess.iv
Reviewed By: chandlerc
Subscribers: krytarowski, eraman, george.burgess.iv, mzolotukhin, Prazek, nlewycky, hans, inglorion, pcc, tejohnson, dberlin, llvm-commits
Differential Revision: https://reviews.llvm.org/D31261
llvm-svn: 303362
2017-05-19 01:24:10 +08:00
|
|
|
void MemoryAccess::print(raw_ostream &OS) const {
|
|
|
|
switch (getValueID()) {
|
|
|
|
case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
|
|
|
|
case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
|
|
|
|
case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
|
|
|
|
}
|
|
|
|
llvm_unreachable("invalid value id");
|
|
|
|
}
|
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
void MemoryDef::print(raw_ostream &OS) const {
|
|
|
|
MemoryAccess *UO = getDefiningAccess();
|
|
|
|
|
2018-06-15 03:55:53 +08:00
|
|
|
auto printID = [&OS](MemoryAccess *A) {
|
|
|
|
if (A && A->getID())
|
|
|
|
OS << A->getID();
|
|
|
|
else
|
|
|
|
OS << LiveOnEntryStr;
|
|
|
|
};
|
|
|
|
|
2016-02-03 06:46:49 +08:00
|
|
|
OS << getID() << " = MemoryDef(";
|
2018-06-15 03:55:53 +08:00
|
|
|
printID(UO);
|
|
|
|
OS << ")";
|
|
|
|
|
|
|
|
if (isOptimized()) {
|
|
|
|
OS << "->";
|
|
|
|
printID(getOptimized());
|
|
|
|
|
|
|
|
if (Optional<AliasResult> AR = getOptimizedAccessType())
|
|
|
|
OS << " " << *AR;
|
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryPhi::print(raw_ostream &OS) const {
|
2021-01-27 12:00:17 +08:00
|
|
|
ListSeparator LS(",");
|
2016-02-03 06:46:49 +08:00
|
|
|
OS << getID() << " = MemoryPhi(";
|
|
|
|
for (const auto &Op : operands()) {
|
|
|
|
BasicBlock *BB = getIncomingBlock(Op);
|
|
|
|
MemoryAccess *MA = cast<MemoryAccess>(Op);
|
|
|
|
|
2021-01-27 12:00:17 +08:00
|
|
|
OS << LS << '{';
|
2016-02-03 06:46:49 +08:00
|
|
|
if (BB->hasName())
|
|
|
|
OS << BB->getName();
|
|
|
|
else
|
|
|
|
BB->printAsOperand(OS, false);
|
|
|
|
OS << ',';
|
|
|
|
if (unsigned ID = MA->getID())
|
|
|
|
OS << ID;
|
|
|
|
else
|
|
|
|
OS << LiveOnEntryStr;
|
|
|
|
OS << '}';
|
|
|
|
}
|
|
|
|
OS << ')';
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryUse::print(raw_ostream &OS) const {
|
|
|
|
MemoryAccess *UO = getDefiningAccess();
|
|
|
|
OS << "MemoryUse(";
|
|
|
|
if (UO && UO->getID())
|
|
|
|
OS << UO->getID();
|
|
|
|
else
|
|
|
|
OS << LiveOnEntryStr;
|
|
|
|
OS << ')';
|
2018-06-15 03:55:53 +08:00
|
|
|
|
|
|
|
if (Optional<AliasResult> AR = getOptimizedAccessType())
|
|
|
|
OS << " " << *AR;
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryAccess::dump() const {
|
2017-02-21 06:26:03 +08:00
|
|
|
// Cannot completely remove virtual function even in release mode.
|
2017-10-15 22:32:27 +08:00
|
|
|
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
2016-02-03 06:46:49 +08:00
|
|
|
print(dbgs());
|
|
|
|
dbgs() << "\n";
|
2017-01-28 10:02:38 +08:00
|
|
|
#endif
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2016-07-07 05:20:47 +08:00
|
|
|
char MemorySSAPrinterLegacyPass::ID = 0;
|
|
|
|
|
|
|
|
MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
|
|
|
|
initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
|
|
|
|
AU.setPreservesAll();
|
|
|
|
AU.addRequired<MemorySSAWrapperPass>();
|
|
|
|
}
|
|
|
|
|
2020-11-13 01:39:10 +08:00
|
|
|
class DOTFuncMSSAInfo {
|
|
|
|
private:
|
|
|
|
const Function &F;
|
|
|
|
MemorySSAAnnotatedWriter MSSAWriter;
|
|
|
|
|
|
|
|
public:
|
|
|
|
DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA)
|
|
|
|
: F(F), MSSAWriter(&MSSA) {}
|
|
|
|
|
|
|
|
const Function *getFunction() { return &F; }
|
|
|
|
MemorySSAAnnotatedWriter &getWriter() { return MSSAWriter; }
|
|
|
|
};
|
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct GraphTraits<DOTFuncMSSAInfo *> : public GraphTraits<const BasicBlock *> {
|
|
|
|
static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo) {
|
|
|
|
return &(CFGInfo->getFunction()->getEntryBlock());
|
|
|
|
}
|
|
|
|
|
|
|
|
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
|
|
|
|
using nodes_iterator = pointer_iterator<Function::const_iterator>;
|
|
|
|
|
|
|
|
static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo) {
|
|
|
|
return nodes_iterator(CFGInfo->getFunction()->begin());
|
|
|
|
}
|
|
|
|
|
|
|
|
static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo) {
|
|
|
|
return nodes_iterator(CFGInfo->getFunction()->end());
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t size(DOTFuncMSSAInfo *CFGInfo) {
|
|
|
|
return CFGInfo->getFunction()->size();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
template <>
|
|
|
|
struct DOTGraphTraits<DOTFuncMSSAInfo *> : public DefaultDOTGraphTraits {
|
|
|
|
|
|
|
|
DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {}
|
|
|
|
|
|
|
|
static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo) {
|
|
|
|
return "MSSA CFG for '" + CFGInfo->getFunction()->getName().str() +
|
|
|
|
"' function";
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo) {
|
|
|
|
return DOTGraphTraits<DOTFuncInfo *>::getCompleteNodeLabel(
|
|
|
|
Node, nullptr,
|
|
|
|
[CFGInfo](raw_string_ostream &OS, const BasicBlock &BB) -> void {
|
|
|
|
BB.print(OS, &CFGInfo->getWriter(), true, true);
|
|
|
|
},
|
|
|
|
[](std::string &S, unsigned &I, unsigned Idx) -> void {
|
|
|
|
std::string Str = S.substr(I, Idx - I);
|
|
|
|
StringRef SR = Str;
|
|
|
|
if (SR.count(" = MemoryDef(") || SR.count(" = MemoryPhi(") ||
|
|
|
|
SR.count("MemoryUse("))
|
|
|
|
return;
|
|
|
|
DOTGraphTraits<DOTFuncInfo *>::eraseComment(S, I, Idx);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::string getEdgeSourceLabel(const BasicBlock *Node,
|
|
|
|
const_succ_iterator I) {
|
|
|
|
return DOTGraphTraits<DOTFuncInfo *>::getEdgeSourceLabel(Node, I);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Display the raw branch weights from PGO.
|
|
|
|
std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I,
|
|
|
|
DOTFuncMSSAInfo *CFGInfo) {
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string getNodeAttributes(const BasicBlock *Node,
|
|
|
|
DOTFuncMSSAInfo *CFGInfo) {
|
|
|
|
return getNodeLabel(Node, CFGInfo).find(';') != std::string::npos
|
|
|
|
? "style=filled, fillcolor=lightpink"
|
|
|
|
: "";
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace llvm
|
|
|
|
|
2016-07-07 05:20:47 +08:00
|
|
|
bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
|
|
|
|
auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
|
2020-11-13 01:39:10 +08:00
|
|
|
if (DotCFGMSSA != "") {
|
|
|
|
DOTFuncMSSAInfo CFGInfo(F, MSSA);
|
|
|
|
WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
|
|
|
|
} else
|
|
|
|
MSSA.print(dbgs());
|
|
|
|
|
2016-07-07 05:20:47 +08:00
|
|
|
if (VerifyMemorySSA)
|
|
|
|
MSSA.verifyMemorySSA();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-11-24 01:53:26 +08:00
|
|
|
AnalysisKey MemorySSAAnalysis::Key;
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-09-27 01:22:54 +08:00
|
|
|
MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
|
|
|
|
FunctionAnalysisManager &AM) {
|
2016-06-02 05:30:40 +08:00
|
|
|
auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
|
|
|
|
auto &AA = AM.getResult<AAManager>(F);
|
2019-08-15 23:54:37 +08:00
|
|
|
return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT));
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2019-05-01 06:43:55 +08:00
|
|
|
bool MemorySSAAnalysis::Result::invalidate(
|
|
|
|
Function &F, const PreservedAnalyses &PA,
|
|
|
|
FunctionAnalysisManager::Invalidator &Inv) {
|
|
|
|
auto PAC = PA.getChecker<MemorySSAAnalysis>();
|
|
|
|
return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
|
|
|
|
Inv.invalidate<AAManager>(F, PA) ||
|
|
|
|
Inv.invalidate<DominatorTreeAnalysis>(F, PA);
|
|
|
|
}
|
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
|
|
|
|
FunctionAnalysisManager &AM) {
|
2020-11-13 01:39:10 +08:00
|
|
|
auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
|
|
|
|
if (DotCFGMSSA != "") {
|
|
|
|
DOTFuncMSSAInfo CFGInfo(F, MSSA);
|
|
|
|
WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
|
|
|
|
} else {
|
|
|
|
OS << "MemorySSA for function: " << F.getName() << "\n";
|
|
|
|
MSSA.print(OS);
|
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
return PreservedAnalyses::all();
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2021-09-01 05:43:21 +08:00
|
|
|
PreservedAnalyses MemorySSAWalkerPrinterPass::run(Function &F,
|
|
|
|
FunctionAnalysisManager &AM) {
|
|
|
|
auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
|
|
|
|
OS << "MemorySSA (walker) for function: " << F.getName() << "\n";
|
|
|
|
MemorySSAWalkerAnnotatedWriter Writer(&MSSA);
|
|
|
|
F.print(OS, &Writer);
|
|
|
|
|
|
|
|
return PreservedAnalyses::all();
|
|
|
|
}
|
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
|
|
|
|
FunctionAnalysisManager &AM) {
|
2016-08-09 02:27:22 +08:00
|
|
|
AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
return PreservedAnalyses::all();
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
char MemorySSAWrapperPass::ID = 0;
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
|
|
|
|
initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
|
|
|
|
AU.setPreservesAll();
|
|
|
|
AU.addRequiredTransitive<DominatorTreeWrapperPass>();
|
|
|
|
AU.addRequiredTransitive<AAResultsWrapperPass>();
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
bool MemorySSAWrapperPass::runOnFunction(Function &F) {
|
|
|
|
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
|
|
|
auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
|
|
|
|
MSSA.reset(new MemorySSA(F, &AA, &DT));
|
|
|
|
return false;
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
2020-02-13 05:25:27 +08:00
|
|
|
void MemorySSAWrapperPass::verifyAnalysis() const {
|
|
|
|
if (VerifyMemorySSA)
|
|
|
|
MSSA->verifyMemorySSA();
|
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2016-06-02 05:30:40 +08:00
|
|
|
void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
|
|
|
|
MSSA->print(OS);
|
2016-02-03 06:46:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
|
|
|
|
|
2019-01-08 03:22:37 +08:00
|
|
|
/// Walk the use-def chains starting at \p StartingAccess and find
|
2016-02-03 06:46:49 +08:00
|
|
|
/// the MemoryAccess that actually clobbers Loc.
|
|
|
|
///
|
|
|
|
/// \returns our clobbering memory access
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
template <typename AliasAnalysisType>
|
|
|
|
MemoryAccess *
|
|
|
|
MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
|
2019-03-30 05:56:09 +08:00
|
|
|
MemoryAccess *StartingAccess, const MemoryLocation &Loc,
|
|
|
|
unsigned &UpwardWalkLimit) {
|
2021-03-13 06:03:36 +08:00
|
|
|
assert(!isa<MemoryUse>(StartingAccess) && "Use cannot be defining access");
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2021-03-13 06:03:36 +08:00
|
|
|
Instruction *I = nullptr;
|
|
|
|
if (auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
|
|
|
|
if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
|
|
|
|
return StartingUseOrDef;
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2021-03-13 06:03:36 +08:00
|
|
|
I = StartingUseOrDef->getMemoryInst();
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2021-03-13 06:03:36 +08:00
|
|
|
// Conservatively, fences are always clobbers, so don't perform the walk if
|
|
|
|
// we hit a fence.
|
|
|
|
if (!isa<CallBase>(I) && I->isFenceLike())
|
|
|
|
return StartingUseOrDef;
|
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
|
|
|
|
UpwardsMemoryQuery Q;
|
2021-03-13 06:03:36 +08:00
|
|
|
Q.OriginalAccess = StartingAccess;
|
2016-02-03 06:46:49 +08:00
|
|
|
Q.StartingLoc = Loc;
|
2020-10-03 23:28:12 +08:00
|
|
|
Q.Inst = nullptr;
|
2016-02-03 06:46:49 +08:00
|
|
|
Q.IsCall = false;
|
|
|
|
|
|
|
|
// Unlike the other function, do not walk to the def of a def, because we are
|
|
|
|
// handed something we already believe is the clobbering access.
|
2019-01-08 03:22:37 +08:00
|
|
|
// We never set SkipSelf to true in Q in this method.
|
2019-03-30 05:56:09 +08:00
|
|
|
MemoryAccess *Clobber =
|
2021-03-13 06:03:36 +08:00
|
|
|
Walker.findClobber(StartingAccess, Q, UpwardWalkLimit);
|
|
|
|
LLVM_DEBUG({
|
|
|
|
dbgs() << "Clobber starting at access " << *StartingAccess << "\n";
|
|
|
|
if (I)
|
|
|
|
dbgs() << " for instruction " << *I << "\n";
|
|
|
|
dbgs() << " is " << *Clobber << "\n";
|
|
|
|
});
|
2016-02-03 06:46:49 +08:00
|
|
|
return Clobber;
|
|
|
|
}
|
|
|
|
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
template <typename AliasAnalysisType>
|
2016-02-03 06:46:49 +08:00
|
|
|
MemoryAccess *
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
|
2019-03-30 05:56:09 +08:00
|
|
|
MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf) {
|
2016-07-21 03:51:34 +08:00
|
|
|
auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
|
|
|
|
// If this is a MemoryPhi, we can't do anything.
|
|
|
|
if (!StartingAccess)
|
|
|
|
return MA;
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2019-01-08 03:22:37 +08:00
|
|
|
bool IsOptimized = false;
|
|
|
|
|
2016-10-21 04:13:45 +08:00
|
|
|
// If this is an already optimized use or def, return the optimized result.
|
2018-03-09 02:03:14 +08:00
|
|
|
// Note: Currently, we store the optimized def result in a separate field,
|
|
|
|
// since we can't use the defining access.
|
2019-01-08 03:22:37 +08:00
|
|
|
if (StartingAccess->isOptimized()) {
|
|
|
|
if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
|
|
|
|
return StartingAccess->getOptimized();
|
|
|
|
IsOptimized = true;
|
|
|
|
}
|
2016-10-21 04:13:45 +08:00
|
|
|
|
2016-07-21 03:51:34 +08:00
|
|
|
const Instruction *I = StartingAccess->getMemoryInst();
|
2018-03-11 12:16:12 +08:00
|
|
|
// We can't sanely do anything with a fence, since they conservatively clobber
|
|
|
|
// all memory, and have no locations to get pointers from to try to
|
|
|
|
// disambiguate.
|
2019-01-07 13:42:51 +08:00
|
|
|
if (!isa<CallBase>(I) && I->isFenceLike())
|
2016-02-03 06:46:49 +08:00
|
|
|
return StartingAccess;
|
|
|
|
|
2018-11-14 05:12:49 +08:00
|
|
|
UpwardsMemoryQuery Q(I, StartingAccess);
|
|
|
|
|
[AliasAnalysis] Second prototype to cache BasicAA / anyAA state.
Summary:
Adding contained caching to AliasAnalysis. BasicAA is currently the only one using it.
AA changes:
- This patch is pulling the caches from BasicAAResults to AAResults, meaning the getModRefInfo call benefits from the IsCapturedCache as well when in "batch mode".
- All AAResultBase implementations add the QueryInfo member to all APIs. AAResults APIs maintain wrapper APIs such that all alias()/getModRefInfo call sites are unchanged.
- AA now provides a BatchAAResults type as a wrapper to AAResults. It keeps the AAResults instance and a QueryInfo instantiated to batch mode. It delegates all work to the AAResults instance with the batched QueryInfo. More API wrappers may be needed in BatchAAResults; only the minimum needed is currently added.
MemorySSA changes:
- All walkers are now templated on the AA used (AliasAnalysis=AAResults or BatchAAResults).
- At build time, we optimize uses; now we create a local walker (lives only as long as OptimizeUses does) using BatchAAResults.
- All Walkers have an internal AA and only use that now, never the AA in MemorySSA. The Walkers receive the AA they will use when built.
- The walker we use for queries after the build is instantiated on AliasAnalysis and is built after building MemorySSA and setting AA.
- All static methods doing walking are now templated on AliasAnalysisType if they are used both during build and after. If used only during build, the method now only takes a BatchAAResults. If used only after build, the method now takes an AliasAnalysis.
Subscribers: sanjoy, arsenm, jvesely, nhaehnle, jlebar, george.burgess.iv, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D59315
llvm-svn: 356783
2019-03-23 01:22:19 +08:00
|
|
|
if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
|
2016-08-04 03:57:02 +08:00
|
|
|
MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
|
2018-03-11 12:16:12 +08:00
|
|
|
StartingAccess->setOptimized(LiveOnEntry);
|
|
|
|
StartingAccess->setOptimizedAccessType(None);
|
2016-08-04 03:57:02 +08:00
|
|
|
return LiveOnEntry;
|
|
|
|
}
|
|
|
|
|
2019-01-08 03:22:37 +08:00
|
|
|
MemoryAccess *OptimizedAccess;
|
|
|
|
if (!IsOptimized) {
|
|
|
|
// Start with the thing we already think clobbers this location
|
|
|
|
MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
|
|
|
|
|
|
|
|
// At this point, DefiningAccess may be the live on entry def.
|
|
|
|
// If it is, we will not get a better result.
|
|
|
|
if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
|
|
|
|
StartingAccess->setOptimized(DefiningAccess);
|
|
|
|
StartingAccess->setOptimizedAccessType(None);
|
|
|
|
return DefiningAccess;
|
|
|
|
}
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2019-03-30 05:56:09 +08:00
|
|
|
OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
|
2019-01-08 03:22:37 +08:00
|
|
|
StartingAccess->setOptimized(OptimizedAccess);
|
|
|
|
if (MSSA->isLiveOnEntryDef(OptimizedAccess))
|
|
|
|
StartingAccess->setOptimizedAccessType(None);
|
2021-03-16 21:36:17 +08:00
|
|
|
else if (Q.AR && *Q.AR == AliasResult::MustAlias)
|
|
|
|
StartingAccess->setOptimizedAccessType(
|
|
|
|
AliasResult(AliasResult::MustAlias));
|
2019-01-08 03:22:37 +08:00
|
|
|
} else
|
|
|
|
OptimizedAccess = StartingAccess->getOptimized();
|
2016-02-03 06:46:49 +08:00
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
|
2019-01-08 03:22:37 +08:00
|
|
|
LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
|
|
|
|
LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
|
|
|
|
LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
|
|
|
|
|
|
|
|
MemoryAccess *Result;
|
|
|
|
if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
|
2019-03-30 05:56:09 +08:00
|
|
|
isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
|
2019-01-08 03:22:37 +08:00
|
|
|
assert(isa<MemoryDef>(Q.OriginalAccess));
|
|
|
|
Q.SkipSelfAccess = true;
|
2019-03-30 05:56:09 +08:00
|
|
|
Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit);
|
2019-01-08 03:22:37 +08:00
|
|
|
} else
|
|
|
|
Result = OptimizedAccess;
|
|
|
|
|
|
|
|
LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
|
|
|
|
LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
|
2016-02-03 06:46:49 +08:00
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryAccess *
|
2016-07-21 03:51:34 +08:00
|
|
|
DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
|
2016-02-03 06:46:49 +08:00
|
|
|
if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
|
|
|
|
return Use->getDefiningAccess();
|
|
|
|
return MA;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
|
2016-10-29 03:22:46 +08:00
|
|
|
MemoryAccess *StartingAccess, const MemoryLocation &) {
|
2016-02-03 06:46:49 +08:00
|
|
|
if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
|
|
|
|
return Use->getDefiningAccess();
|
|
|
|
return StartingAccess;
|
|
|
|
}
|
[IR] De-virtualize ~Value to save a vptr
Summary:
Implements PR889
Removing the virtual table pointer from Value saves 1% of RSS when doing
LTO of llc on Linux. The impact on time was positive, but too noisy to
conclusively say that performance improved. Here is a link to the
spreadsheet with the original data:
https://docs.google.com/spreadsheets/d/1F4FHir0qYnV0MEp2sYYp_BuvnJgWlWPhWOwZ6LbW7W4/edit?usp=sharing
This change makes it invalid to directly delete a Value, User, or
Instruction pointer. Instead, such code can be rewritten to a null check
and a call Value::deleteValue(). Value objects tend to have their
lifetimes managed through iplist, so for the most part, this isn't a big
deal. However, there are some places where LLVM deletes values, and
those places had to be migrated to deleteValue. I have also created
llvm::unique_value, which has a custom deleter, so it can be used in
place of std::unique_ptr<Value>.
I had to add the "DerivedUser" Deleter escape hatch for MemorySSA, which
derives from User outside of lib/IR. Code in IR cannot include MemorySSA
headers or call the MemoryAccess object destructors without introducing
a circular dependency, so we need some level of indirection.
Unfortunately, no class derived from User may have any virtual methods,
because adding a virtual method would break User::getHungOffOperands(),
which assumes that it can find the use list immediately prior to the
User object. I've added a static_assert to the appropriate OperandTraits
templates to help people avoid this trap.
Reviewers: chandlerc, mehdi_amini, pete, dberlin, george.burgess.iv
Reviewed By: chandlerc
Subscribers: krytarowski, eraman, george.burgess.iv, mzolotukhin, Prazek, nlewycky, hans, inglorion, pcc, tejohnson, dberlin, llvm-commits
Differential Revision: https://reviews.llvm.org/D31261
llvm-svn: 303362
2017-05-19 01:24:10 +08:00
|
|
|
|
|
|
|
void MemoryPhi::deleteMe(DerivedUser *Self) {
|
|
|
|
delete static_cast<MemoryPhi *>(Self);
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryDef::deleteMe(DerivedUser *Self) {
|
|
|
|
delete static_cast<MemoryDef *>(Self);
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryUse::deleteMe(DerivedUser *Self) {
|
|
|
|
delete static_cast<MemoryUse *>(Self);
|
|
|
|
}
|
2020-12-09 20:06:50 +08:00
|
|
|
|
|
|
|
bool upward_defs_iterator::IsGuaranteedLoopInvariant(Value *Ptr) const {
|
|
|
|
auto IsGuaranteedLoopInvariantBase = [](Value *Ptr) {
|
|
|
|
Ptr = Ptr->stripPointerCasts();
|
|
|
|
if (!isa<Instruction>(Ptr))
|
|
|
|
return true;
|
|
|
|
return isa<AllocaInst>(Ptr);
|
|
|
|
};
|
|
|
|
|
|
|
|
Ptr = Ptr->stripPointerCasts();
|
2021-03-24 05:31:25 +08:00
|
|
|
if (auto *I = dyn_cast<Instruction>(Ptr)) {
|
2021-05-15 18:38:27 +08:00
|
|
|
if (I->getParent()->isEntryBlock())
|
2021-03-24 05:31:25 +08:00
|
|
|
return true;
|
|
|
|
}
|
2020-12-09 20:06:50 +08:00
|
|
|
if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
|
|
|
|
return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
|
|
|
|
GEP->hasAllConstantIndices();
|
|
|
|
}
|
|
|
|
return IsGuaranteedLoopInvariantBase(Ptr);
|
|
|
|
}
|