2011-01-03 05:47:05 +08:00
|
|
|
//===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2011-01-03 05:47:05 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass performs a simple dominator tree walk that eliminates trivially
|
|
|
|
// redundant instructions.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-02-01 18:51:23 +08:00
|
|
|
#include "llvm/Transforms/Scalar/EarlyCSE.h"
|
2017-10-14 05:17:07 +08:00
|
|
|
#include "llvm/ADT/DenseMapInfo.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/Hashing.h"
|
2017-10-14 05:17:07 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/ScopedHashTable.h"
|
2017-06-15 03:29:53 +08:00
|
|
|
#include "llvm/ADT/SetVector.h"
|
2017-10-14 05:17:07 +08:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2016-12-19 16:22:17 +08:00
|
|
|
#include "llvm/Analysis/AssumptionCache.h"
|
2016-04-28 22:59:27 +08:00
|
|
|
#include "llvm/Analysis/GlobalsModRef.h"
|
2018-08-30 11:39:16 +08:00
|
|
|
#include "llvm/Analysis/GuardUtils.h"
|
2011-01-03 07:04:14 +08:00
|
|
|
#include "llvm/Analysis/InstructionSimplify.h"
|
2017-04-12 04:06:36 +08:00
|
|
|
#include "llvm/Analysis/MemorySSA.h"
|
|
|
|
#include "llvm/Analysis/MemorySSAUpdater.h"
|
2015-03-24 03:32:43 +08:00
|
|
|
#include "llvm/Analysis/TargetLibraryInfo.h"
|
2015-01-27 06:51:15 +08:00
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2017-12-14 05:58:15 +08:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2017-10-14 05:17:07 +08:00
|
|
|
#include "llvm/IR/BasicBlock.h"
|
|
|
|
#include "llvm/IR/Constants.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2014-01-13 17:26:24 +08:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2017-10-14 05:17:07 +08:00
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/InstrTypes.h"
|
|
|
|
#include "llvm/IR/Instruction.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Instructions.h"
|
2014-11-04 04:21:32 +08:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2017-10-14 05:17:07 +08:00
|
|
|
#include "llvm/IR/Intrinsics.h"
|
|
|
|
#include "llvm/IR/LLVMContext.h"
|
|
|
|
#include "llvm/IR/PassManager.h"
|
2014-11-04 04:21:32 +08:00
|
|
|
#include "llvm/IR/PatternMatch.h"
|
2017-10-14 05:17:07 +08:00
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/IR/Use.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "llvm/InitializePasses.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Pass.h"
|
2017-10-14 05:17:07 +08:00
|
|
|
#include "llvm/Support/Allocator.h"
|
|
|
|
#include "llvm/Support/AtomicOrdering.h"
|
|
|
|
#include "llvm/Support/Casting.h"
|
2011-01-03 07:19:45 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2018-04-07 02:47:33 +08:00
|
|
|
#include "llvm/Support/DebugCounter.h"
|
2011-01-03 09:42:46 +08:00
|
|
|
#include "llvm/Support/RecyclingAllocator.h"
|
2015-03-24 03:32:43 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2015-02-01 18:51:23 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2018-08-30 11:39:16 +08:00
|
|
|
#include "llvm/Transforms/Utils/GuardUtils.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "llvm/Transforms/Utils/Local.h"
|
2017-10-14 05:17:07 +08:00
|
|
|
#include <cassert>
|
2014-09-20 21:29:20 +08:00
|
|
|
#include <deque>
|
2017-10-14 05:17:07 +08:00
|
|
|
#include <memory>
|
|
|
|
#include <utility>
|
|
|
|
|
2011-01-03 05:47:05 +08:00
|
|
|
using namespace llvm;
|
2014-11-04 04:21:32 +08:00
|
|
|
using namespace llvm::PatternMatch;
|
2011-01-03 05:47:05 +08:00
|
|
|
|
2014-04-22 10:55:47 +08:00
|
|
|
#define DEBUG_TYPE "early-cse"
|
|
|
|
|
2011-01-03 11:28:23 +08:00
|
|
|
STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
|
|
|
|
STATISTIC(NumCSE, "Number of instructions CSE'd");
|
2016-04-23 02:47:21 +08:00
|
|
|
STATISTIC(NumCSECVP, "Number of compare instructions CVP'd");
|
2011-01-03 11:41:27 +08:00
|
|
|
STATISTIC(NumCSELoad, "Number of load instructions CSE'd");
|
|
|
|
STATISTIC(NumCSECall, "Number of call instructions CSE'd");
|
2011-01-03 12:17:24 +08:00
|
|
|
STATISTIC(NumDSE, "Number of trivial dead stores removed");
|
2011-01-03 11:18:43 +08:00
|
|
|
|
2018-04-07 02:47:33 +08:00
|
|
|
DEBUG_COUNTER(CSECounter, "early-cse",
|
|
|
|
"Controls which instructions are removed");
|
|
|
|
|
2019-02-16 06:47:54 +08:00
|
|
|
static cl::opt<unsigned> EarlyCSEMssaOptCap(
|
|
|
|
"earlycse-mssa-optimization-cap", cl::init(500), cl::Hidden,
|
|
|
|
cl::desc("Enable imprecision in EarlyCSE in pathological cases, in exchange "
|
|
|
|
"for faster compile. Caps the MemorySSA clobbering calls."));
|
|
|
|
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
static cl::opt<bool> EarlyCSEDebugHash(
|
|
|
|
"earlycse-debug-hash", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Perform extra assertion checking to verify that SimpleValue's hash "
|
|
|
|
"function is well-behaved w.r.t. its isEqual predicate"));
|
|
|
|
|
2011-01-03 10:20:48 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2012-07-24 18:51:42 +08:00
|
|
|
// SimpleValue
|
2011-01-03 10:20:48 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2011-01-03 05:47:05 +08:00
|
|
|
namespace {
|
2017-10-14 05:17:07 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Struct representing the available values in the scoped hash table.
|
2015-01-24 19:33:55 +08:00
|
|
|
struct SimpleValue {
|
|
|
|
Instruction *Inst;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2015-01-24 19:33:55 +08:00
|
|
|
SimpleValue(Instruction *I) : Inst(I) {
|
|
|
|
assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
|
|
|
|
}
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2015-01-24 19:33:55 +08:00
|
|
|
bool isSentinel() const {
|
|
|
|
return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
|
|
|
|
Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
|
|
|
|
}
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2015-01-24 19:33:55 +08:00
|
|
|
static bool canHandle(Instruction *Inst) {
|
|
|
|
// This can only handle non-void readnone functions.
|
|
|
|
if (CallInst *CI = dyn_cast<CallInst>(Inst))
|
|
|
|
return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy();
|
2019-08-07 22:34:41 +08:00
|
|
|
return isa<CastInst>(Inst) || isa<UnaryOperator>(Inst) ||
|
|
|
|
isa<BinaryOperator>(Inst) || isa<GetElementPtrInst>(Inst) ||
|
|
|
|
isa<CmpInst>(Inst) || isa<SelectInst>(Inst) ||
|
|
|
|
isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
|
|
|
|
isa<ShuffleVectorInst>(Inst) || isa<ExtractValueInst>(Inst) ||
|
|
|
|
isa<InsertValueInst>(Inst);
|
2015-01-24 19:33:55 +08:00
|
|
|
}
|
|
|
|
};
|
2017-10-14 05:17:07 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2011-01-03 07:04:14 +08:00
|
|
|
|
|
|
|
namespace llvm {
|
2017-10-14 05:17:07 +08:00
|
|
|
|
2015-01-24 19:33:55 +08:00
|
|
|
template <> struct DenseMapInfo<SimpleValue> {
|
2011-01-03 10:20:48 +08:00
|
|
|
static inline SimpleValue getEmptyKey() {
|
2015-01-24 19:33:55 +08:00
|
|
|
return DenseMapInfo<Instruction *>::getEmptyKey();
|
2011-01-03 07:04:14 +08:00
|
|
|
}
|
2017-10-14 05:17:07 +08:00
|
|
|
|
2011-01-03 10:20:48 +08:00
|
|
|
static inline SimpleValue getTombstoneKey() {
|
2015-01-24 19:33:55 +08:00
|
|
|
return DenseMapInfo<Instruction *>::getTombstoneKey();
|
2011-01-03 07:04:14 +08:00
|
|
|
}
|
2017-10-14 05:17:07 +08:00
|
|
|
|
2011-01-03 10:20:48 +08:00
|
|
|
static unsigned getHashValue(SimpleValue Val);
|
|
|
|
static bool isEqual(SimpleValue LHS, SimpleValue RHS);
|
2011-01-03 07:04:14 +08:00
|
|
|
};
|
2017-10-14 05:17:07 +08:00
|
|
|
|
|
|
|
} // end namespace llvm
|
2011-01-03 07:04:14 +08:00
|
|
|
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
/// Match a 'select' including an optional 'not's of the condition.
|
|
|
|
static bool matchSelectWithOptionalNotCond(Value *V, Value *&Cond, Value *&A,
|
|
|
|
Value *&B,
|
|
|
|
SelectPatternFlavor &Flavor) {
|
|
|
|
// Return false if V is not even a select.
|
|
|
|
if (!match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Look through a 'not' of the condition operand by swapping A/B.
|
|
|
|
Value *CondNot;
|
|
|
|
if (match(Cond, m_Not(m_Value(CondNot)))) {
|
|
|
|
Cond = CondNot;
|
|
|
|
std::swap(A, B);
|
2019-04-17 04:41:20 +08:00
|
|
|
}
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
|
|
|
|
// Set flavor if we find a match, or set it to unknown otherwise; in
|
|
|
|
// either case, return true to indicate that this is a select we can
|
|
|
|
// process.
|
|
|
|
if (auto *CmpI = dyn_cast<ICmpInst>(Cond))
|
|
|
|
Flavor = matchDecomposedSelectPattern(CmpI, A, B, A, B).Flavor;
|
|
|
|
else
|
|
|
|
Flavor = SPF_UNKNOWN;
|
|
|
|
|
|
|
|
return true;
|
2019-04-17 04:41:20 +08:00
|
|
|
}
|
|
|
|
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
static unsigned getHashValueImpl(SimpleValue Val) {
|
2011-01-03 07:04:14 +08:00
|
|
|
Instruction *Inst = Val.Inst;
|
2011-01-03 09:10:08 +08:00
|
|
|
// Hash in all of the operands as pointers.
|
2015-01-24 19:33:55 +08:00
|
|
|
if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) {
|
2012-10-10 00:57:38 +08:00
|
|
|
Value *LHS = BinOp->getOperand(0);
|
|
|
|
Value *RHS = BinOp->getOperand(1);
|
|
|
|
if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
|
|
|
|
std::swap(LHS, RHS);
|
|
|
|
|
|
|
|
return hash_combine(BinOp->getOpcode(), LHS, RHS);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) {
|
2019-06-18 03:11:28 +08:00
|
|
|
// Compares can be commuted by swapping the comparands and
|
|
|
|
// updating the predicate. Choose the form that has the
|
|
|
|
// comparands in sorted order, or in the case of a tie, the
|
|
|
|
// one with the lower predicate.
|
2012-10-10 00:57:38 +08:00
|
|
|
Value *LHS = CI->getOperand(0);
|
|
|
|
Value *RHS = CI->getOperand(1);
|
|
|
|
CmpInst::Predicate Pred = CI->getPredicate();
|
2019-06-18 03:11:28 +08:00
|
|
|
CmpInst::Predicate SwappedPred = CI->getSwappedPredicate();
|
|
|
|
if (std::tie(LHS, Pred) > std::tie(RHS, SwappedPred)) {
|
2012-10-10 00:57:38 +08:00
|
|
|
std::swap(LHS, RHS);
|
2019-06-18 03:11:28 +08:00
|
|
|
Pred = SwappedPred;
|
2012-10-10 00:57:38 +08:00
|
|
|
}
|
|
|
|
return hash_combine(Inst->getOpcode(), Pred, LHS, RHS);
|
2011-01-03 07:19:45 +08:00
|
|
|
}
|
2011-01-03 09:10:08 +08:00
|
|
|
|
2019-04-17 04:41:20 +08:00
|
|
|
// Hash general selects to allow matching commuted true/false operands.
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
SelectPatternFlavor SPF;
|
|
|
|
Value *Cond, *A, *B;
|
|
|
|
if (matchSelectWithOptionalNotCond(Inst, Cond, A, B, SPF)) {
|
|
|
|
// Hash min/max/abs (cmp + select) to allow for commuted operands.
|
|
|
|
// Min/max may also have non-canonical compare predicate (eg, the compare for
|
|
|
|
// smin may use 'sgt' rather than 'slt'), and non-canonical operands in the
|
|
|
|
// compare.
|
|
|
|
// TODO: We should also detect FP min/max.
|
|
|
|
if (SPF == SPF_SMIN || SPF == SPF_SMAX ||
|
|
|
|
SPF == SPF_UMIN || SPF == SPF_UMAX) {
|
|
|
|
if (A > B)
|
|
|
|
std::swap(A, B);
|
|
|
|
return hash_combine(Inst->getOpcode(), SPF, A, B);
|
|
|
|
}
|
|
|
|
if (SPF == SPF_ABS || SPF == SPF_NABS) {
|
|
|
|
// ABS/NABS always puts the input in A and its negation in B.
|
|
|
|
return hash_combine(Inst->getOpcode(), SPF, A, B);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hash general selects to allow matching commuted true/false operands.
|
|
|
|
|
2019-04-17 04:41:20 +08:00
|
|
|
// If we do not have a compare as the condition, just hash in the condition.
|
|
|
|
CmpInst::Predicate Pred;
|
|
|
|
Value *X, *Y;
|
|
|
|
if (!match(Cond, m_Cmp(Pred, m_Value(X), m_Value(Y))))
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
return hash_combine(Inst->getOpcode(), Cond, A, B);
|
2019-04-17 04:41:20 +08:00
|
|
|
|
|
|
|
// Similar to cmp normalization (above) - canonicalize the predicate value:
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
// select (icmp Pred, X, Y), A, B --> select (icmp InvPred, X, Y), B, A
|
2019-04-17 04:41:20 +08:00
|
|
|
if (CmpInst::getInversePredicate(Pred) < Pred) {
|
|
|
|
Pred = CmpInst::getInversePredicate(Pred);
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
std::swap(A, B);
|
2019-04-17 04:41:20 +08:00
|
|
|
}
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
return hash_combine(Inst->getOpcode(), Pred, X, Y, A, B);
|
2019-04-17 04:41:20 +08:00
|
|
|
}
|
|
|
|
|
2012-10-10 00:57:38 +08:00
|
|
|
if (CastInst *CI = dyn_cast<CastInst>(Inst))
|
|
|
|
return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
|
|
|
|
|
|
|
|
if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst))
|
|
|
|
return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
|
|
|
|
hash_combine_range(EVI->idx_begin(), EVI->idx_end()));
|
|
|
|
|
|
|
|
if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst))
|
|
|
|
return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
|
|
|
|
IVI->getOperand(1),
|
|
|
|
hash_combine_range(IVI->idx_begin(), IVI->idx_end()));
|
|
|
|
|
2019-04-17 04:41:20 +08:00
|
|
|
assert((isa<CallInst>(Inst) || isa<GetElementPtrInst>(Inst) ||
|
2012-10-10 00:57:38 +08:00
|
|
|
isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
|
2019-08-07 22:34:41 +08:00
|
|
|
isa<ShuffleVectorInst>(Inst) || isa<UnaryOperator>(Inst)) &&
|
2015-01-24 19:33:55 +08:00
|
|
|
"Invalid/unknown instruction");
|
2012-10-10 00:57:38 +08:00
|
|
|
|
2011-01-03 09:10:08 +08:00
|
|
|
// Mix in the opcode.
|
2015-01-24 19:33:55 +08:00
|
|
|
return hash_combine(
|
|
|
|
Inst->getOpcode(),
|
|
|
|
hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
|
2011-01-03 07:04:14 +08:00
|
|
|
}
|
|
|
|
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
// If -earlycse-debug-hash was specified, return a constant -- this
|
|
|
|
// will force all hashing to collide, so we'll exhaustively search
|
|
|
|
// the table for a match, and the assertion in isEqual will fire if
|
|
|
|
// there's a bug causing equal keys to hash differently.
|
|
|
|
if (EarlyCSEDebugHash)
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
return getHashValueImpl(Val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS) {
|
2011-01-03 07:04:14 +08:00
|
|
|
Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
|
|
|
|
|
|
|
|
if (LHS.isSentinel() || RHS.isSentinel())
|
|
|
|
return LHSI == RHSI;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2015-01-24 19:33:55 +08:00
|
|
|
if (LHSI->getOpcode() != RHSI->getOpcode())
|
|
|
|
return false;
|
2016-04-22 14:37:45 +08:00
|
|
|
if (LHSI->isIdenticalToWhenDefined(RHSI))
|
2015-01-24 19:33:55 +08:00
|
|
|
return true;
|
2012-10-10 00:57:38 +08:00
|
|
|
|
|
|
|
// If we're not strictly identical, we still might be a commutable instruction
|
|
|
|
if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) {
|
|
|
|
if (!LHSBinOp->isCommutative())
|
|
|
|
return false;
|
|
|
|
|
2015-01-24 19:33:55 +08:00
|
|
|
assert(isa<BinaryOperator>(RHSI) &&
|
|
|
|
"same opcode, but different instruction type?");
|
2012-10-10 00:57:38 +08:00
|
|
|
BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI);
|
|
|
|
|
|
|
|
// Commuted equality
|
|
|
|
return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) &&
|
2015-01-24 19:33:55 +08:00
|
|
|
LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0);
|
2012-10-10 00:57:38 +08:00
|
|
|
}
|
|
|
|
if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) {
|
2015-01-24 19:33:55 +08:00
|
|
|
assert(isa<CmpInst>(RHSI) &&
|
|
|
|
"same opcode, but different instruction type?");
|
2012-10-10 00:57:38 +08:00
|
|
|
CmpInst *RHSCmp = cast<CmpInst>(RHSI);
|
|
|
|
// Commuted equality
|
|
|
|
return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) &&
|
2015-01-24 19:33:55 +08:00
|
|
|
LHSCmp->getOperand(1) == RHSCmp->getOperand(0) &&
|
|
|
|
LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate();
|
2012-10-10 00:57:38 +08:00
|
|
|
}
|
|
|
|
|
2017-12-14 06:57:35 +08:00
|
|
|
// Min/max/abs can occur with commuted operands, non-canonical predicates,
|
|
|
|
// and/or non-canonical operands.
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
// Selects can be non-trivially equivalent via inverted conditions and swaps.
|
|
|
|
SelectPatternFlavor LSPF, RSPF;
|
|
|
|
Value *CondL, *CondR, *LHSA, *RHSA, *LHSB, *RHSB;
|
|
|
|
if (matchSelectWithOptionalNotCond(LHSI, CondL, LHSA, LHSB, LSPF) &&
|
|
|
|
matchSelectWithOptionalNotCond(RHSI, CondR, RHSA, RHSB, RSPF)) {
|
2018-05-22 02:42:42 +08:00
|
|
|
if (LSPF == RSPF) {
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
// TODO: We should also detect FP min/max.
|
|
|
|
if (LSPF == SPF_SMIN || LSPF == SPF_SMAX ||
|
|
|
|
LSPF == SPF_UMIN || LSPF == SPF_UMAX)
|
|
|
|
return ((LHSA == RHSA && LHSB == RHSB) ||
|
|
|
|
(LHSA == RHSB && LHSB == RHSA));
|
|
|
|
|
|
|
|
if (LSPF == SPF_ABS || LSPF == SPF_NABS) {
|
|
|
|
// Abs results are placed in a defined order by matchSelectPattern.
|
2018-05-22 02:42:42 +08:00
|
|
|
return LHSA == RHSA && LHSB == RHSB;
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
}
|
2017-12-14 05:58:15 +08:00
|
|
|
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
// select Cond, A, B <--> select not(Cond), B, A
|
|
|
|
if (CondL == CondR && LHSA == RHSA && LHSB == RHSB)
|
|
|
|
return true;
|
|
|
|
}
|
2019-04-17 04:41:20 +08:00
|
|
|
|
|
|
|
// If the true/false operands are swapped and the conditions are compares
|
|
|
|
// with inverted predicates, the selects are equal:
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
// select (icmp Pred, X, Y), A, B <--> select (icmp InvPred, X, Y), B, A
|
2019-04-17 04:41:20 +08:00
|
|
|
//
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
// This also handles patterns with a double-negation in the sense of not +
|
|
|
|
// inverse, because we looked through a 'not' in the matching function and
|
|
|
|
// swapped A/B:
|
|
|
|
// select (cmp Pred, X, Y), A, B <--> select (not (cmp InvPred, X, Y)), B, A
|
|
|
|
//
|
|
|
|
// This intentionally does NOT handle patterns with a double-negation in
|
|
|
|
// the sense of not + not, because doing so could result in values
|
|
|
|
// comparing
|
|
|
|
// as equal that hash differently in the min/max/abs cases like:
|
|
|
|
// select (cmp slt, X, Y), X, Y <--> select (not (not (cmp slt, X, Y))), X, Y
|
|
|
|
// ^ hashes as min ^ would not hash as min
|
|
|
|
// In the context of the EarlyCSE pass, however, such cases never reach
|
|
|
|
// this code, as we simplify the double-negation before hashing the second
|
|
|
|
// select (and so still succeed at CSEing them).
|
|
|
|
if (LHSA == RHSB && LHSB == RHSA) {
|
2019-04-17 04:41:20 +08:00
|
|
|
CmpInst::Predicate PredL, PredR;
|
|
|
|
Value *X, *Y;
|
|
|
|
if (match(CondL, m_Cmp(PredL, m_Value(X), m_Value(Y))) &&
|
|
|
|
match(CondR, m_Cmp(PredR, m_Specific(X), m_Specific(Y))) &&
|
|
|
|
CmpInst::getInversePredicate(PredL) == PredR)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-10 00:57:38 +08:00
|
|
|
return false;
|
2011-01-03 07:04:14 +08:00
|
|
|
}
|
|
|
|
|
[EarlyCSE] Ensure equal keys have the same hash value
Summary:
The logic in EarlyCSE that looks through 'not' operations in the
predicate recognizes e.g. that `select (not (cmp sgt X, Y)), X, Y` is
equivalent to `select (cmp sgt X, Y), Y, X`. Without this change,
however, only the latter is recognized as a form of `smin X, Y`, so the
two expressions receive different hash codes. This leads to missed
optimization opportunities when the quadratic probing for the two hashes
doesn't happen to collide, and assertion failures when probing doesn't
collide on insertion but does collide on a subsequent table grow
operation.
This change inverts the order of some of the pattern matching, checking
first for the optional `not` and then for the min/max/abs patterns, so
that e.g. both expressions above are recognized as a form of `smin X, Y`.
It also adds an assertion to isEqual verifying that it implies equal
hash codes; this fires when there's a collision during insertion, not
just grow, and so will make it easier to notice if these functions fall
out of sync again. A new flag --earlycse-debug-hash is added which can
be used when changing the hash function; it forces hash collisions so
that any pair of values inserted which compare as equal but hash
differently will be caught by the isEqual assertion.
Reviewers: spatel, nikic
Reviewed By: spatel, nikic
Subscribers: lebedev.ri, arsenm, craig.topper, efriedma, hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62644
llvm-svn: 363274
2019-06-13 23:24:11 +08:00
|
|
|
bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
|
|
|
|
// These comparisons are nontrivial, so assert that equality implies
|
|
|
|
// hash equality (DenseMap demands this as an invariant).
|
|
|
|
bool Result = isEqualImpl(LHS, RHS);
|
|
|
|
assert(!Result || (LHS.isSentinel() && LHS.Inst == RHS.Inst) ||
|
|
|
|
getHashValueImpl(LHS) == getHashValueImpl(RHS));
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2011-01-03 11:18:43 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2012-07-24 18:51:42 +08:00
|
|
|
// CallValue
|
2011-01-03 11:18:43 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
2017-10-14 05:17:07 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Struct representing the available call values in the scoped hash
|
2015-01-24 19:44:32 +08:00
|
|
|
/// table.
|
2015-01-24 19:33:55 +08:00
|
|
|
struct CallValue {
|
|
|
|
Instruction *Inst;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2015-01-24 19:33:55 +08:00
|
|
|
CallValue(Instruction *I) : Inst(I) {
|
|
|
|
assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
|
|
|
|
}
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2015-01-24 19:33:55 +08:00
|
|
|
bool isSentinel() const {
|
|
|
|
return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
|
|
|
|
Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
|
|
|
|
}
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2015-01-24 19:33:55 +08:00
|
|
|
static bool canHandle(Instruction *Inst) {
|
|
|
|
// Don't value number anything that returns void.
|
|
|
|
if (Inst->getType()->isVoidTy())
|
|
|
|
return false;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2015-01-24 19:33:55 +08:00
|
|
|
CallInst *CI = dyn_cast<CallInst>(Inst);
|
|
|
|
if (!CI || !CI->onlyReadsMemory())
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
};
|
2017-10-14 05:17:07 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2011-01-03 11:18:43 +08:00
|
|
|
|
|
|
|
namespace llvm {
|
2017-10-14 05:17:07 +08:00
|
|
|
|
2015-01-24 19:33:55 +08:00
|
|
|
template <> struct DenseMapInfo<CallValue> {
|
|
|
|
static inline CallValue getEmptyKey() {
|
|
|
|
return DenseMapInfo<Instruction *>::getEmptyKey();
|
|
|
|
}
|
2017-10-14 05:17:07 +08:00
|
|
|
|
2015-01-24 19:33:55 +08:00
|
|
|
static inline CallValue getTombstoneKey() {
|
|
|
|
return DenseMapInfo<Instruction *>::getTombstoneKey();
|
|
|
|
}
|
2017-10-14 05:17:07 +08:00
|
|
|
|
2015-01-24 19:33:55 +08:00
|
|
|
static unsigned getHashValue(CallValue Val);
|
|
|
|
static bool isEqual(CallValue LHS, CallValue RHS);
|
|
|
|
};
|
2017-10-14 05:17:07 +08:00
|
|
|
|
|
|
|
} // end namespace llvm
|
2015-01-24 19:33:55 +08:00
|
|
|
|
2011-01-03 11:41:27 +08:00
|
|
|
unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
|
2011-01-03 11:18:43 +08:00
|
|
|
Instruction *Inst = Val.Inst;
|
2015-02-01 20:30:59 +08:00
|
|
|
// Hash all of the operands as pointers and mix in the opcode.
|
|
|
|
return hash_combine(
|
|
|
|
Inst->getOpcode(),
|
|
|
|
hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
|
2011-01-03 11:18:43 +08:00
|
|
|
}
|
|
|
|
|
2011-01-03 11:41:27 +08:00
|
|
|
bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
|
2011-01-03 11:18:43 +08:00
|
|
|
Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
|
|
|
|
if (LHS.isSentinel() || RHS.isSentinel())
|
|
|
|
return LHSI == RHSI;
|
|
|
|
return LHSI->isIdenticalTo(RHSI);
|
|
|
|
}
|
|
|
|
|
2011-01-03 10:20:48 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2015-01-27 09:34:14 +08:00
|
|
|
// EarlyCSE implementation
|
2011-01-03 10:20:48 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2011-01-03 07:04:14 +08:00
|
|
|
namespace {
|
2017-10-14 05:17:07 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// A simple and fast domtree-based CSE pass.
|
2015-01-24 19:44:32 +08:00
|
|
|
///
|
|
|
|
/// This pass does a simple depth-first walk over the dominator tree,
|
|
|
|
/// eliminating trivially redundant instructions and using instsimplify to
|
|
|
|
/// canonicalize things as it goes. It is intended to be fast and catch obvious
|
|
|
|
/// cases so that instcombine and other passes are more effective. It is
|
|
|
|
/// expected that a later pass of GVN will catch the interesting/hard cases.
|
2015-01-27 09:34:14 +08:00
|
|
|
class EarlyCSE {
|
2011-01-03 05:47:05 +08:00
|
|
|
public:
|
2015-01-27 09:34:14 +08:00
|
|
|
const TargetLibraryInfo &TLI;
|
|
|
|
const TargetTransformInfo &TTI;
|
|
|
|
DominatorTree &DT;
|
2016-12-19 16:22:17 +08:00
|
|
|
AssumptionCache &AC;
|
2017-04-29 03:55:38 +08:00
|
|
|
const SimplifyQuery SQ;
|
2016-09-01 03:24:10 +08:00
|
|
|
MemorySSA *MSSA;
|
2017-02-23 06:19:55 +08:00
|
|
|
std::unique_ptr<MemorySSAUpdater> MSSAUpdater;
|
2017-10-14 05:17:07 +08:00
|
|
|
|
|
|
|
using AllocatorTy =
|
|
|
|
RecyclingAllocator<BumpPtrAllocator,
|
|
|
|
ScopedHashTableVal<SimpleValue, Value *>>;
|
|
|
|
using ScopedHTType =
|
|
|
|
ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>,
|
|
|
|
AllocatorTy>;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// A scoped hash table of the current values of all of our simple
|
2015-01-24 19:44:32 +08:00
|
|
|
/// scalar expressions.
|
|
|
|
///
|
|
|
|
/// As we walk down the domtree, we look to see if instructions are in this:
|
|
|
|
/// if so, we replace them with what we find, otherwise we insert them so
|
|
|
|
/// that dominated values can succeed in their lookup.
|
2015-01-27 09:34:14 +08:00
|
|
|
ScopedHTType AvailableValues;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2018-06-14 13:41:49 +08:00
|
|
|
/// A scoped hash table of the current values of previously encountered
|
|
|
|
/// memory locations.
|
2015-01-24 19:44:32 +08:00
|
|
|
///
|
2015-12-09 05:45:41 +08:00
|
|
|
/// This allows us to get efficient access to dominating loads or stores when
|
|
|
|
/// we have a fully redundant load. In addition to the most recent load, we
|
|
|
|
/// keep track of a generation count of the read, which is compared against
|
|
|
|
/// the current generation count. The current generation count is incremented
|
2015-01-24 19:44:32 +08:00
|
|
|
/// after every possibly writing memory operation, which ensures that we only
|
2015-12-09 05:45:41 +08:00
|
|
|
/// CSE loads with other loads that have no intervening store. Ordering
|
|
|
|
/// events (such as fences or atomic instructions) increment the generation
|
|
|
|
/// count as well; essentially, we model these as writes to all possible
|
|
|
|
/// locations. Note that atomic and/or volatile loads and stores can be
|
|
|
|
/// present the table; it is the responsibility of the consumer to inspect
|
|
|
|
/// the atomicity/volatility if needed.
|
2015-10-07 15:41:29 +08:00
|
|
|
struct LoadValue {
|
2017-10-14 05:17:07 +08:00
|
|
|
Instruction *DefInst = nullptr;
|
|
|
|
unsigned Generation = 0;
|
|
|
|
int MatchingId = -1;
|
|
|
|
bool IsAtomic = false;
|
2018-03-15 05:35:06 +08:00
|
|
|
|
2017-10-14 05:17:07 +08:00
|
|
|
LoadValue() = default;
|
2016-04-28 23:22:37 +08:00
|
|
|
LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId,
|
2018-03-16 01:29:32 +08:00
|
|
|
bool IsAtomic)
|
2016-06-17 04:47:57 +08:00
|
|
|
: DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
|
2018-03-16 01:29:32 +08:00
|
|
|
IsAtomic(IsAtomic) {}
|
2015-10-07 15:41:29 +08:00
|
|
|
};
|
2017-10-14 05:17:07 +08:00
|
|
|
|
|
|
|
using LoadMapAllocator =
|
|
|
|
RecyclingAllocator<BumpPtrAllocator,
|
|
|
|
ScopedHashTableVal<Value *, LoadValue>>;
|
|
|
|
using LoadHTType =
|
|
|
|
ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>,
|
|
|
|
LoadMapAllocator>;
|
|
|
|
|
2015-01-27 09:34:14 +08:00
|
|
|
LoadHTType AvailableLoads;
|
2018-07-31 03:41:25 +08:00
|
|
|
|
2018-03-15 05:35:06 +08:00
|
|
|
// A scoped hash table mapping memory locations (represented as typed
|
|
|
|
// addresses) to generation numbers at which that memory location became
|
|
|
|
// (henceforth indefinitely) invariant.
|
|
|
|
using InvariantMapAllocator =
|
|
|
|
RecyclingAllocator<BumpPtrAllocator,
|
|
|
|
ScopedHashTableVal<MemoryLocation, unsigned>>;
|
|
|
|
using InvariantHTType =
|
|
|
|
ScopedHashTable<MemoryLocation, unsigned, DenseMapInfo<MemoryLocation>,
|
|
|
|
InvariantMapAllocator>;
|
|
|
|
InvariantHTType AvailableInvariants;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// A scoped hash table of the current values of read-only call
|
2015-01-24 19:44:32 +08:00
|
|
|
/// values.
|
|
|
|
///
|
|
|
|
/// It uses the same generation count as loads.
|
2017-10-14 05:17:07 +08:00
|
|
|
using CallHTType =
|
|
|
|
ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>;
|
2015-01-27 09:34:14 +08:00
|
|
|
CallHTType AvailableCalls;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// This is the current generation of the memory value.
|
2017-10-14 05:17:07 +08:00
|
|
|
unsigned CurrentGeneration = 0;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Set up the EarlyCSE runner for a particular function.
|
2017-04-29 03:55:38 +08:00
|
|
|
EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI,
|
|
|
|
const TargetTransformInfo &TTI, DominatorTree &DT,
|
|
|
|
AssumptionCache &AC, MemorySSA *MSSA)
|
|
|
|
: TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA),
|
2019-08-15 23:54:37 +08:00
|
|
|
MSSAUpdater(std::make_unique<MemorySSAUpdater>(MSSA)) {}
|
2011-01-03 05:47:05 +08:00
|
|
|
|
2015-01-27 09:34:14 +08:00
|
|
|
bool run();
|
2011-01-03 05:47:05 +08:00
|
|
|
|
|
|
|
private:
|
2019-02-16 06:47:54 +08:00
|
|
|
unsigned ClobberCounter = 0;
|
2015-01-24 19:44:32 +08:00
|
|
|
// Almost a POD, but needs to call the constructors for the scoped hash
|
|
|
|
// tables so that a new scope gets pushed on. These are RAII so that the
|
|
|
|
// scope gets popped when the NodeScope is destroyed.
|
2012-02-01 07:14:41 +08:00
|
|
|
class NodeScope {
|
2015-01-24 19:33:55 +08:00
|
|
|
public:
|
2015-01-27 09:34:14 +08:00
|
|
|
NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
|
2018-03-15 05:35:06 +08:00
|
|
|
InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls)
|
|
|
|
: Scope(AvailableValues), LoadScope(AvailableLoads),
|
|
|
|
InvariantScope(AvailableInvariants), CallScope(AvailableCalls) {}
|
2015-02-16 06:54:22 +08:00
|
|
|
NodeScope(const NodeScope &) = delete;
|
2017-10-14 05:17:07 +08:00
|
|
|
NodeScope &operator=(const NodeScope &) = delete;
|
2012-02-01 07:14:41 +08:00
|
|
|
|
2017-10-14 05:17:07 +08:00
|
|
|
private:
|
2012-02-01 07:14:41 +08:00
|
|
|
ScopedHTType::ScopeTy Scope;
|
|
|
|
LoadHTType::ScopeTy LoadScope;
|
2018-03-15 05:35:06 +08:00
|
|
|
InvariantHTType::ScopeTy InvariantScope;
|
2012-02-01 07:14:41 +08:00
|
|
|
CallHTType::ScopeTy CallScope;
|
|
|
|
};
|
|
|
|
|
2015-01-24 19:44:32 +08:00
|
|
|
// Contains all the needed information to create a stack for doing a depth
|
2016-09-07 09:49:41 +08:00
|
|
|
// first traversal of the tree. This includes scopes for values, loads, and
|
2015-01-24 19:44:32 +08:00
|
|
|
// calls as well as the generation. There is a child iterator so that the
|
2016-04-27 09:44:31 +08:00
|
|
|
// children do not need to be store separately.
|
2012-02-01 07:14:41 +08:00
|
|
|
class StackNode {
|
2015-01-24 19:33:55 +08:00
|
|
|
public:
|
2015-01-27 09:34:14 +08:00
|
|
|
StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
|
2018-03-15 05:35:06 +08:00
|
|
|
InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls,
|
|
|
|
unsigned cg, DomTreeNode *n, DomTreeNode::iterator child,
|
|
|
|
DomTreeNode::iterator end)
|
2015-01-24 19:33:55 +08:00
|
|
|
: CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
|
2018-03-15 05:35:06 +08:00
|
|
|
EndIter(end),
|
|
|
|
Scopes(AvailableValues, AvailableLoads, AvailableInvariants,
|
|
|
|
AvailableCalls)
|
2017-10-14 05:17:07 +08:00
|
|
|
{}
|
|
|
|
StackNode(const StackNode &) = delete;
|
|
|
|
StackNode &operator=(const StackNode &) = delete;
|
2012-02-01 07:14:41 +08:00
|
|
|
|
|
|
|
// Accessors.
|
|
|
|
unsigned currentGeneration() { return CurrentGeneration; }
|
|
|
|
unsigned childGeneration() { return ChildGeneration; }
|
|
|
|
void childGeneration(unsigned generation) { ChildGeneration = generation; }
|
|
|
|
DomTreeNode *node() { return Node; }
|
|
|
|
DomTreeNode::iterator childIter() { return ChildIter; }
|
2017-10-14 05:17:07 +08:00
|
|
|
|
2012-02-01 07:14:41 +08:00
|
|
|
DomTreeNode *nextChild() {
|
|
|
|
DomTreeNode *child = *ChildIter;
|
|
|
|
++ChildIter;
|
|
|
|
return child;
|
|
|
|
}
|
2017-10-14 05:17:07 +08:00
|
|
|
|
2012-02-01 07:14:41 +08:00
|
|
|
DomTreeNode::iterator end() { return EndIter; }
|
|
|
|
bool isProcessed() { return Processed; }
|
|
|
|
void process() { Processed = true; }
|
|
|
|
|
2015-01-24 19:33:55 +08:00
|
|
|
private:
|
2012-02-01 07:14:41 +08:00
|
|
|
unsigned CurrentGeneration;
|
|
|
|
unsigned ChildGeneration;
|
|
|
|
DomTreeNode *Node;
|
|
|
|
DomTreeNode::iterator ChildIter;
|
|
|
|
DomTreeNode::iterator EndIter;
|
|
|
|
NodeScope Scopes;
|
2017-10-14 05:17:07 +08:00
|
|
|
bool Processed = false;
|
2012-02-01 07:14:41 +08:00
|
|
|
};
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Wrapper class to handle memory instructions, including loads,
|
2015-01-27 06:51:15 +08:00
|
|
|
/// stores and intrinsic loads and stores defined by the target.
|
|
|
|
class ParseMemoryInst {
|
|
|
|
public:
|
2015-01-27 09:34:14 +08:00
|
|
|
ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI)
|
2017-10-14 05:17:07 +08:00
|
|
|
: Inst(Inst) {
|
Reapply 254950 w/fix
254950 ended up being not NFC. The previous code was overriding the flags for whether an instruction read or wrote memory using the target specific flags returned via TTI. I'd missed this in my refactoring. Since I mistakenly built only x86 and didn't notice the number of unsupported tests, I didn't catch that before the original checkin.
This raises an interesting issue though. Given we have function attributes (i.e. readonly, readnone, argmemonly) which describe the aliasing of intrinsics, why does TTI have this information overriding the instruction definition at all? I see no reason for this, but decided to preserve existing behavior for the moment. The root issue might be that we don't have a "writeonly" attribute.
Original commit message:
[EarlyCSE] Simplify and invert ParseMemoryInst [NFCI]
Restructure ParseMemoryInst - which was introduced to abstract over target specific load and stores instructions - to just query the underlying instructions. In theory, this could be slightly slower than caching the results, but in practice, it's very unlikely to be measurable.
The simple query scheme makes it far easier to understand, and much easier to extend with new queries. Given I'm about to need to add new query types, doing the cleanup first seemed worthwhile.
Do we still believe the target specific intrinsic handling is worthwhile in EarlyCSE? It adds quite a bit of complexity and makes the code harder to read. Being able to delete the abstraction entirely would be wonderful.
llvm-svn: 254957
2015-12-08 06:41:23 +08:00
|
|
|
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
|
2017-03-25 02:56:43 +08:00
|
|
|
if (TTI.getTgtMemIntrinsic(II, Info))
|
Reapply 254950 w/fix
254950 ended up being not NFC. The previous code was overriding the flags for whether an instruction read or wrote memory using the target specific flags returned via TTI. I'd missed this in my refactoring. Since I mistakenly built only x86 and didn't notice the number of unsupported tests, I didn't catch that before the original checkin.
This raises an interesting issue though. Given we have function attributes (i.e. readonly, readnone, argmemonly) which describe the aliasing of intrinsics, why does TTI have this information overriding the instruction definition at all? I see no reason for this, but decided to preserve existing behavior for the moment. The root issue might be that we don't have a "writeonly" attribute.
Original commit message:
[EarlyCSE] Simplify and invert ParseMemoryInst [NFCI]
Restructure ParseMemoryInst - which was introduced to abstract over target specific load and stores instructions - to just query the underlying instructions. In theory, this could be slightly slower than caching the results, but in practice, it's very unlikely to be measurable.
The simple query scheme makes it far easier to understand, and much easier to extend with new queries. Given I'm about to need to add new query types, doing the cleanup first seemed worthwhile.
Do we still believe the target specific intrinsic handling is worthwhile in EarlyCSE? It adds quite a bit of complexity and makes the code harder to read. Being able to delete the abstraction entirely would be wonderful.
llvm-svn: 254957
2015-12-08 06:41:23 +08:00
|
|
|
IsTargetMemInst = true;
|
|
|
|
}
|
2017-10-14 05:17:07 +08:00
|
|
|
|
Reapply 254950 w/fix
254950 ended up being not NFC. The previous code was overriding the flags for whether an instruction read or wrote memory using the target specific flags returned via TTI. I'd missed this in my refactoring. Since I mistakenly built only x86 and didn't notice the number of unsupported tests, I didn't catch that before the original checkin.
This raises an interesting issue though. Given we have function attributes (i.e. readonly, readnone, argmemonly) which describe the aliasing of intrinsics, why does TTI have this information overriding the instruction definition at all? I see no reason for this, but decided to preserve existing behavior for the moment. The root issue might be that we don't have a "writeonly" attribute.
Original commit message:
[EarlyCSE] Simplify and invert ParseMemoryInst [NFCI]
Restructure ParseMemoryInst - which was introduced to abstract over target specific load and stores instructions - to just query the underlying instructions. In theory, this could be slightly slower than caching the results, but in practice, it's very unlikely to be measurable.
The simple query scheme makes it far easier to understand, and much easier to extend with new queries. Given I'm about to need to add new query types, doing the cleanup first seemed worthwhile.
Do we still believe the target specific intrinsic handling is worthwhile in EarlyCSE? It adds quite a bit of complexity and makes the code harder to read. Being able to delete the abstraction entirely would be wonderful.
llvm-svn: 254957
2015-12-08 06:41:23 +08:00
|
|
|
bool isLoad() const {
|
|
|
|
if (IsTargetMemInst) return Info.ReadMem;
|
|
|
|
return isa<LoadInst>(Inst);
|
|
|
|
}
|
2017-10-14 05:17:07 +08:00
|
|
|
|
Reapply 254950 w/fix
254950 ended up being not NFC. The previous code was overriding the flags for whether an instruction read or wrote memory using the target specific flags returned via TTI. I'd missed this in my refactoring. Since I mistakenly built only x86 and didn't notice the number of unsupported tests, I didn't catch that before the original checkin.
This raises an interesting issue though. Given we have function attributes (i.e. readonly, readnone, argmemonly) which describe the aliasing of intrinsics, why does TTI have this information overriding the instruction definition at all? I see no reason for this, but decided to preserve existing behavior for the moment. The root issue might be that we don't have a "writeonly" attribute.
Original commit message:
[EarlyCSE] Simplify and invert ParseMemoryInst [NFCI]
Restructure ParseMemoryInst - which was introduced to abstract over target specific load and stores instructions - to just query the underlying instructions. In theory, this could be slightly slower than caching the results, but in practice, it's very unlikely to be measurable.
The simple query scheme makes it far easier to understand, and much easier to extend with new queries. Given I'm about to need to add new query types, doing the cleanup first seemed worthwhile.
Do we still believe the target specific intrinsic handling is worthwhile in EarlyCSE? It adds quite a bit of complexity and makes the code harder to read. Being able to delete the abstraction entirely would be wonderful.
llvm-svn: 254957
2015-12-08 06:41:23 +08:00
|
|
|
bool isStore() const {
|
|
|
|
if (IsTargetMemInst) return Info.WriteMem;
|
|
|
|
return isa<StoreInst>(Inst);
|
|
|
|
}
|
2017-10-14 05:17:07 +08:00
|
|
|
|
2015-12-09 05:45:41 +08:00
|
|
|
bool isAtomic() const {
|
2017-03-25 02:56:43 +08:00
|
|
|
if (IsTargetMemInst)
|
|
|
|
return Info.Ordering != AtomicOrdering::NotAtomic;
|
2015-12-09 05:45:41 +08:00
|
|
|
return Inst->isAtomic();
|
|
|
|
}
|
2017-10-14 05:17:07 +08:00
|
|
|
|
2015-12-09 05:45:41 +08:00
|
|
|
bool isUnordered() const {
|
2017-03-25 02:56:43 +08:00
|
|
|
if (IsTargetMemInst)
|
|
|
|
return Info.isUnordered();
|
|
|
|
|
2015-12-09 05:45:41 +08:00
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
|
|
|
|
return LI->isUnordered();
|
|
|
|
} else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
|
|
|
|
return SI->isUnordered();
|
|
|
|
}
|
|
|
|
// Conservative answer
|
|
|
|
return !Inst->isAtomic();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool isVolatile() const {
|
2017-03-25 02:56:43 +08:00
|
|
|
if (IsTargetMemInst)
|
|
|
|
return Info.IsVolatile;
|
|
|
|
|
2015-12-09 05:45:41 +08:00
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
|
|
|
|
return LI->isVolatile();
|
|
|
|
} else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
|
|
|
|
return SI->isVolatile();
|
|
|
|
}
|
|
|
|
// Conservative answer
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-06-17 04:47:57 +08:00
|
|
|
bool isInvariantLoad() const {
|
|
|
|
if (auto *LI = dyn_cast<LoadInst>(Inst))
|
2019-09-05 01:28:48 +08:00
|
|
|
return LI->hasMetadata(LLVMContext::MD_invariant_load);
|
2016-06-17 04:47:57 +08:00
|
|
|
return false;
|
|
|
|
}
|
2016-02-18 18:09:20 +08:00
|
|
|
|
2015-10-06 21:35:30 +08:00
|
|
|
bool isMatchingMemLoc(const ParseMemoryInst &Inst) const {
|
Reapply 254950 w/fix
254950 ended up being not NFC. The previous code was overriding the flags for whether an instruction read or wrote memory using the target specific flags returned via TTI. I'd missed this in my refactoring. Since I mistakenly built only x86 and didn't notice the number of unsupported tests, I didn't catch that before the original checkin.
This raises an interesting issue though. Given we have function attributes (i.e. readonly, readnone, argmemonly) which describe the aliasing of intrinsics, why does TTI have this information overriding the instruction definition at all? I see no reason for this, but decided to preserve existing behavior for the moment. The root issue might be that we don't have a "writeonly" attribute.
Original commit message:
[EarlyCSE] Simplify and invert ParseMemoryInst [NFCI]
Restructure ParseMemoryInst - which was introduced to abstract over target specific load and stores instructions - to just query the underlying instructions. In theory, this could be slightly slower than caching the results, but in practice, it's very unlikely to be measurable.
The simple query scheme makes it far easier to understand, and much easier to extend with new queries. Given I'm about to need to add new query types, doing the cleanup first seemed worthwhile.
Do we still believe the target specific intrinsic handling is worthwhile in EarlyCSE? It adds quite a bit of complexity and makes the code harder to read. Being able to delete the abstraction entirely would be wonderful.
llvm-svn: 254957
2015-12-08 06:41:23 +08:00
|
|
|
return (getPointerOperand() == Inst.getPointerOperand() &&
|
|
|
|
getMatchingId() == Inst.getMatchingId());
|
2015-01-27 06:51:15 +08:00
|
|
|
}
|
2017-10-14 05:17:07 +08:00
|
|
|
|
Reapply 254950 w/fix
254950 ended up being not NFC. The previous code was overriding the flags for whether an instruction read or wrote memory using the target specific flags returned via TTI. I'd missed this in my refactoring. Since I mistakenly built only x86 and didn't notice the number of unsupported tests, I didn't catch that before the original checkin.
This raises an interesting issue though. Given we have function attributes (i.e. readonly, readnone, argmemonly) which describe the aliasing of intrinsics, why does TTI have this information overriding the instruction definition at all? I see no reason for this, but decided to preserve existing behavior for the moment. The root issue might be that we don't have a "writeonly" attribute.
Original commit message:
[EarlyCSE] Simplify and invert ParseMemoryInst [NFCI]
Restructure ParseMemoryInst - which was introduced to abstract over target specific load and stores instructions - to just query the underlying instructions. In theory, this could be slightly slower than caching the results, but in practice, it's very unlikely to be measurable.
The simple query scheme makes it far easier to understand, and much easier to extend with new queries. Given I'm about to need to add new query types, doing the cleanup first seemed worthwhile.
Do we still believe the target specific intrinsic handling is worthwhile in EarlyCSE? It adds quite a bit of complexity and makes the code harder to read. Being able to delete the abstraction entirely would be wonderful.
llvm-svn: 254957
2015-12-08 06:41:23 +08:00
|
|
|
bool isValid() const { return getPointerOperand() != nullptr; }
|
2015-01-27 06:51:15 +08:00
|
|
|
|
|
|
|
// For regular (non-intrinsic) loads/stores, this is set to -1. For
|
|
|
|
// intrinsic loads/stores, the id is retrieved from the corresponding
|
|
|
|
// field in the MemIntrinsicInfo structure. That field contains
|
|
|
|
// non-negative values only.
|
Reapply 254950 w/fix
254950 ended up being not NFC. The previous code was overriding the flags for whether an instruction read or wrote memory using the target specific flags returned via TTI. I'd missed this in my refactoring. Since I mistakenly built only x86 and didn't notice the number of unsupported tests, I didn't catch that before the original checkin.
This raises an interesting issue though. Given we have function attributes (i.e. readonly, readnone, argmemonly) which describe the aliasing of intrinsics, why does TTI have this information overriding the instruction definition at all? I see no reason for this, but decided to preserve existing behavior for the moment. The root issue might be that we don't have a "writeonly" attribute.
Original commit message:
[EarlyCSE] Simplify and invert ParseMemoryInst [NFCI]
Restructure ParseMemoryInst - which was introduced to abstract over target specific load and stores instructions - to just query the underlying instructions. In theory, this could be slightly slower than caching the results, but in practice, it's very unlikely to be measurable.
The simple query scheme makes it far easier to understand, and much easier to extend with new queries. Given I'm about to need to add new query types, doing the cleanup first seemed worthwhile.
Do we still believe the target specific intrinsic handling is worthwhile in EarlyCSE? It adds quite a bit of complexity and makes the code harder to read. Being able to delete the abstraction entirely would be wonderful.
llvm-svn: 254957
2015-12-08 06:41:23 +08:00
|
|
|
int getMatchingId() const {
|
|
|
|
if (IsTargetMemInst) return Info.MatchingId;
|
|
|
|
return -1;
|
|
|
|
}
|
2017-10-14 05:17:07 +08:00
|
|
|
|
Reapply 254950 w/fix
254950 ended up being not NFC. The previous code was overriding the flags for whether an instruction read or wrote memory using the target specific flags returned via TTI. I'd missed this in my refactoring. Since I mistakenly built only x86 and didn't notice the number of unsupported tests, I didn't catch that before the original checkin.
This raises an interesting issue though. Given we have function attributes (i.e. readonly, readnone, argmemonly) which describe the aliasing of intrinsics, why does TTI have this information overriding the instruction definition at all? I see no reason for this, but decided to preserve existing behavior for the moment. The root issue might be that we don't have a "writeonly" attribute.
Original commit message:
[EarlyCSE] Simplify and invert ParseMemoryInst [NFCI]
Restructure ParseMemoryInst - which was introduced to abstract over target specific load and stores instructions - to just query the underlying instructions. In theory, this could be slightly slower than caching the results, but in practice, it's very unlikely to be measurable.
The simple query scheme makes it far easier to understand, and much easier to extend with new queries. Given I'm about to need to add new query types, doing the cleanup first seemed worthwhile.
Do we still believe the target specific intrinsic handling is worthwhile in EarlyCSE? It adds quite a bit of complexity and makes the code harder to read. Being able to delete the abstraction entirely would be wonderful.
llvm-svn: 254957
2015-12-08 06:41:23 +08:00
|
|
|
Value *getPointerOperand() const {
|
|
|
|
if (IsTargetMemInst) return Info.PtrVal;
|
2018-03-10 05:05:58 +08:00
|
|
|
return getLoadStorePointerOperand(Inst);
|
Reapply 254950 w/fix
254950 ended up being not NFC. The previous code was overriding the flags for whether an instruction read or wrote memory using the target specific flags returned via TTI. I'd missed this in my refactoring. Since I mistakenly built only x86 and didn't notice the number of unsupported tests, I didn't catch that before the original checkin.
This raises an interesting issue though. Given we have function attributes (i.e. readonly, readnone, argmemonly) which describe the aliasing of intrinsics, why does TTI have this information overriding the instruction definition at all? I see no reason for this, but decided to preserve existing behavior for the moment. The root issue might be that we don't have a "writeonly" attribute.
Original commit message:
[EarlyCSE] Simplify and invert ParseMemoryInst [NFCI]
Restructure ParseMemoryInst - which was introduced to abstract over target specific load and stores instructions - to just query the underlying instructions. In theory, this could be slightly slower than caching the results, but in practice, it's very unlikely to be measurable.
The simple query scheme makes it far easier to understand, and much easier to extend with new queries. Given I'm about to need to add new query types, doing the cleanup first seemed worthwhile.
Do we still believe the target specific intrinsic handling is worthwhile in EarlyCSE? It adds quite a bit of complexity and makes the code harder to read. Being able to delete the abstraction entirely would be wonderful.
llvm-svn: 254957
2015-12-08 06:41:23 +08:00
|
|
|
}
|
2017-10-14 05:17:07 +08:00
|
|
|
|
Reapply 254950 w/fix
254950 ended up being not NFC. The previous code was overriding the flags for whether an instruction read or wrote memory using the target specific flags returned via TTI. I'd missed this in my refactoring. Since I mistakenly built only x86 and didn't notice the number of unsupported tests, I didn't catch that before the original checkin.
This raises an interesting issue though. Given we have function attributes (i.e. readonly, readnone, argmemonly) which describe the aliasing of intrinsics, why does TTI have this information overriding the instruction definition at all? I see no reason for this, but decided to preserve existing behavior for the moment. The root issue might be that we don't have a "writeonly" attribute.
Original commit message:
[EarlyCSE] Simplify and invert ParseMemoryInst [NFCI]
Restructure ParseMemoryInst - which was introduced to abstract over target specific load and stores instructions - to just query the underlying instructions. In theory, this could be slightly slower than caching the results, but in practice, it's very unlikely to be measurable.
The simple query scheme makes it far easier to understand, and much easier to extend with new queries. Given I'm about to need to add new query types, doing the cleanup first seemed worthwhile.
Do we still believe the target specific intrinsic handling is worthwhile in EarlyCSE? It adds quite a bit of complexity and makes the code harder to read. Being able to delete the abstraction entirely would be wonderful.
llvm-svn: 254957
2015-12-08 06:41:23 +08:00
|
|
|
bool mayReadFromMemory() const {
|
|
|
|
if (IsTargetMemInst) return Info.ReadMem;
|
|
|
|
return Inst->mayReadFromMemory();
|
|
|
|
}
|
2017-10-14 05:17:07 +08:00
|
|
|
|
Reapply 254950 w/fix
254950 ended up being not NFC. The previous code was overriding the flags for whether an instruction read or wrote memory using the target specific flags returned via TTI. I'd missed this in my refactoring. Since I mistakenly built only x86 and didn't notice the number of unsupported tests, I didn't catch that before the original checkin.
This raises an interesting issue though. Given we have function attributes (i.e. readonly, readnone, argmemonly) which describe the aliasing of intrinsics, why does TTI have this information overriding the instruction definition at all? I see no reason for this, but decided to preserve existing behavior for the moment. The root issue might be that we don't have a "writeonly" attribute.
Original commit message:
[EarlyCSE] Simplify and invert ParseMemoryInst [NFCI]
Restructure ParseMemoryInst - which was introduced to abstract over target specific load and stores instructions - to just query the underlying instructions. In theory, this could be slightly slower than caching the results, but in practice, it's very unlikely to be measurable.
The simple query scheme makes it far easier to understand, and much easier to extend with new queries. Given I'm about to need to add new query types, doing the cleanup first seemed worthwhile.
Do we still believe the target specific intrinsic handling is worthwhile in EarlyCSE? It adds quite a bit of complexity and makes the code harder to read. Being able to delete the abstraction entirely would be wonderful.
llvm-svn: 254957
2015-12-08 06:41:23 +08:00
|
|
|
bool mayWriteToMemory() const {
|
|
|
|
if (IsTargetMemInst) return Info.WriteMem;
|
|
|
|
return Inst->mayWriteToMemory();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2017-10-14 05:17:07 +08:00
|
|
|
bool IsTargetMemInst = false;
|
Reapply 254950 w/fix
254950 ended up being not NFC. The previous code was overriding the flags for whether an instruction read or wrote memory using the target specific flags returned via TTI. I'd missed this in my refactoring. Since I mistakenly built only x86 and didn't notice the number of unsupported tests, I didn't catch that before the original checkin.
This raises an interesting issue though. Given we have function attributes (i.e. readonly, readnone, argmemonly) which describe the aliasing of intrinsics, why does TTI have this information overriding the instruction definition at all? I see no reason for this, but decided to preserve existing behavior for the moment. The root issue might be that we don't have a "writeonly" attribute.
Original commit message:
[EarlyCSE] Simplify and invert ParseMemoryInst [NFCI]
Restructure ParseMemoryInst - which was introduced to abstract over target specific load and stores instructions - to just query the underlying instructions. In theory, this could be slightly slower than caching the results, but in practice, it's very unlikely to be measurable.
The simple query scheme makes it far easier to understand, and much easier to extend with new queries. Given I'm about to need to add new query types, doing the cleanup first seemed worthwhile.
Do we still believe the target specific intrinsic handling is worthwhile in EarlyCSE? It adds quite a bit of complexity and makes the code harder to read. Being able to delete the abstraction entirely would be wonderful.
llvm-svn: 254957
2015-12-08 06:41:23 +08:00
|
|
|
MemIntrinsicInfo Info;
|
|
|
|
Instruction *Inst;
|
2015-01-27 06:51:15 +08:00
|
|
|
};
|
|
|
|
|
2011-01-03 07:04:14 +08:00
|
|
|
bool processNode(DomTreeNode *Node);
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2018-05-31 16:08:34 +08:00
|
|
|
bool handleBranchCondition(Instruction *CondInst, const BranchInst *BI,
|
|
|
|
const BasicBlock *BB, const BasicBlock *Pred);
|
|
|
|
|
2015-01-27 06:51:15 +08:00
|
|
|
Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const {
|
2017-01-03 08:16:24 +08:00
|
|
|
if (auto *LI = dyn_cast<LoadInst>(Inst))
|
2015-01-27 06:51:15 +08:00
|
|
|
return LI;
|
2017-01-03 08:16:24 +08:00
|
|
|
if (auto *SI = dyn_cast<StoreInst>(Inst))
|
2015-01-27 06:51:15 +08:00
|
|
|
return SI->getValueOperand();
|
|
|
|
assert(isa<IntrinsicInst>(Inst) && "Instruction not supported");
|
2015-01-27 09:34:14 +08:00
|
|
|
return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst),
|
|
|
|
ExpectedType);
|
2015-01-27 06:51:15 +08:00
|
|
|
}
|
2016-09-01 03:24:10 +08:00
|
|
|
|
2018-03-15 05:35:06 +08:00
|
|
|
/// Return true if the instruction is known to only operate on memory
|
|
|
|
/// provably invariant in the given "generation".
|
|
|
|
bool isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt);
|
|
|
|
|
2016-09-01 03:24:10 +08:00
|
|
|
bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration,
|
|
|
|
Instruction *EarlierInst, Instruction *LaterInst);
|
|
|
|
|
|
|
|
void removeMSSA(Instruction *Inst) {
|
|
|
|
if (!MSSA)
|
|
|
|
return;
|
2018-09-18 06:35:21 +08:00
|
|
|
if (VerifyMemorySSA)
|
|
|
|
MSSA->verifyMemorySSA();
|
2016-10-26 00:18:47 +08:00
|
|
|
// Removing a store here can leave MemorySSA in an unoptimized state by
|
|
|
|
// creating MemoryPhis that have identical arguments and by creating
|
2019-02-01 05:12:41 +08:00
|
|
|
// MemoryUses whose defining access is not an actual clobber. The phi case
|
|
|
|
// is handled by MemorySSA when passing OptimizePhis = true to
|
|
|
|
// removeMemoryAccess. The non-optimized MemoryUse case is lazily updated
|
|
|
|
// by MemorySSA's getClobberingMemoryAccess.
|
|
|
|
MSSAUpdater->removeMemoryAccess(Inst, true);
|
2016-09-01 03:24:10 +08:00
|
|
|
}
|
2011-01-03 05:47:05 +08:00
|
|
|
};
|
2017-10-14 05:17:07 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2011-01-03 05:47:05 +08:00
|
|
|
|
2016-10-24 23:54:00 +08:00
|
|
|
/// Determine if the memory referenced by LaterInst is from the same heap
|
|
|
|
/// version as EarlierInst.
|
2016-09-01 03:24:10 +08:00
|
|
|
/// This is currently called in two scenarios:
|
|
|
|
///
|
|
|
|
/// load p
|
|
|
|
/// ...
|
|
|
|
/// load p
|
|
|
|
///
|
|
|
|
/// and
|
|
|
|
///
|
|
|
|
/// x = load p
|
|
|
|
/// ...
|
|
|
|
/// store x, p
|
|
|
|
///
|
|
|
|
/// in both cases we want to verify that there are no possible writes to the
|
|
|
|
/// memory referenced by p between the earlier and later instruction.
|
|
|
|
bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration,
|
|
|
|
unsigned LaterGeneration,
|
|
|
|
Instruction *EarlierInst,
|
|
|
|
Instruction *LaterInst) {
|
|
|
|
// Check the simple memory generation tracking first.
|
|
|
|
if (EarlierGeneration == LaterGeneration)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (!MSSA)
|
|
|
|
return false;
|
|
|
|
|
2017-07-15 04:13:21 +08:00
|
|
|
// If MemorySSA has determined that one of EarlierInst or LaterInst does not
|
|
|
|
// read/write memory, then we can safely return true here.
|
|
|
|
// FIXME: We could be more aggressive when checking doesNotAccessMemory(),
|
|
|
|
// onlyReadsMemory(), mayReadFromMemory(), and mayWriteToMemory() in this pass
|
|
|
|
// by also checking the MemorySSA MemoryAccess on the instruction. Initial
|
|
|
|
// experiments suggest this isn't worthwhile, at least for C/C++ code compiled
|
|
|
|
// with the default optimization pipeline.
|
|
|
|
auto *EarlierMA = MSSA->getMemoryAccess(EarlierInst);
|
|
|
|
if (!EarlierMA)
|
|
|
|
return true;
|
|
|
|
auto *LaterMA = MSSA->getMemoryAccess(LaterInst);
|
|
|
|
if (!LaterMA)
|
|
|
|
return true;
|
|
|
|
|
2016-09-01 03:24:10 +08:00
|
|
|
// Since we know LaterDef dominates LaterInst and EarlierInst dominates
|
|
|
|
// LaterInst, if LaterDef dominates EarlierInst then it can't occur between
|
|
|
|
// EarlierInst and LaterInst and neither can any other write that potentially
|
|
|
|
// clobbers LaterInst.
|
2019-02-16 06:47:54 +08:00
|
|
|
MemoryAccess *LaterDef;
|
|
|
|
if (ClobberCounter < EarlyCSEMssaOptCap) {
|
|
|
|
LaterDef = MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
|
|
|
|
ClobberCounter++;
|
|
|
|
} else
|
|
|
|
LaterDef = LaterMA->getDefiningAccess();
|
|
|
|
|
2017-07-15 04:13:21 +08:00
|
|
|
return MSSA->dominates(LaterDef, EarlierMA);
|
2016-09-01 03:24:10 +08:00
|
|
|
}
|
|
|
|
|
2018-03-15 05:35:06 +08:00
|
|
|
bool EarlyCSE::isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt) {
|
|
|
|
// A location loaded from with an invariant_load is assumed to *never* change
|
|
|
|
// within the visible scope of the compilation.
|
|
|
|
if (auto *LI = dyn_cast<LoadInst>(I))
|
2019-09-05 02:27:31 +08:00
|
|
|
if (LI->hasMetadata(LLVMContext::MD_invariant_load))
|
2018-03-15 05:35:06 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
auto MemLocOpt = MemoryLocation::getOrNone(I);
|
|
|
|
if (!MemLocOpt)
|
|
|
|
// "target" intrinsic forms of loads aren't currently known to
|
|
|
|
// MemoryLocation::get. TODO
|
|
|
|
return false;
|
|
|
|
MemoryLocation MemLoc = *MemLocOpt;
|
|
|
|
if (!AvailableInvariants.count(MemLoc))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Is the generation at which this became invariant older than the
|
|
|
|
// current one?
|
|
|
|
return AvailableInvariants.lookup(MemLoc) <= GenAt;
|
|
|
|
}
|
|
|
|
|
2018-05-31 16:08:34 +08:00
|
|
|
bool EarlyCSE::handleBranchCondition(Instruction *CondInst,
|
|
|
|
const BranchInst *BI, const BasicBlock *BB,
|
|
|
|
const BasicBlock *Pred) {
|
|
|
|
assert(BI->isConditional() && "Should be a conditional branch!");
|
|
|
|
assert(BI->getCondition() == CondInst && "Wrong condition?");
|
|
|
|
assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB);
|
|
|
|
auto *TorF = (BI->getSuccessor(0) == BB)
|
|
|
|
? ConstantInt::getTrue(BB->getContext())
|
|
|
|
: ConstantInt::getFalse(BB->getContext());
|
2018-06-14 22:22:03 +08:00
|
|
|
auto MatchBinOp = [](Instruction *I, unsigned Opcode) {
|
2018-06-14 21:02:13 +08:00
|
|
|
if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(I))
|
2018-06-14 22:22:03 +08:00
|
|
|
return BOp->getOpcode() == Opcode;
|
2018-06-14 21:02:13 +08:00
|
|
|
return false;
|
|
|
|
};
|
|
|
|
// If the condition is AND operation, we can propagate its operands into the
|
|
|
|
// true branch. If it is OR operation, we can propagate them into the false
|
|
|
|
// branch.
|
2018-06-14 22:22:03 +08:00
|
|
|
unsigned PropagateOpcode =
|
|
|
|
(BI->getSuccessor(0) == BB) ? Instruction::And : Instruction::Or;
|
2018-06-14 21:02:13 +08:00
|
|
|
|
|
|
|
bool MadeChanges = false;
|
|
|
|
SmallVector<Instruction *, 4> WorkList;
|
|
|
|
SmallPtrSet<Instruction *, 4> Visited;
|
|
|
|
WorkList.push_back(CondInst);
|
|
|
|
while (!WorkList.empty()) {
|
|
|
|
Instruction *Curr = WorkList.pop_back_val();
|
|
|
|
|
|
|
|
AvailableValues.insert(Curr, TorF);
|
|
|
|
LLVM_DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
|
|
|
|
<< Curr->getName() << "' as " << *TorF << " in "
|
|
|
|
<< BB->getName() << "\n");
|
|
|
|
if (!DebugCounter::shouldExecute(CSECounter)) {
|
|
|
|
LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
|
|
|
|
} else {
|
|
|
|
// Replace all dominated uses with the known value.
|
|
|
|
if (unsigned Count = replaceDominatedUsesWith(Curr, TorF, DT,
|
|
|
|
BasicBlockEdge(Pred, BB))) {
|
|
|
|
NumCSECVP += Count;
|
|
|
|
MadeChanges = true;
|
|
|
|
}
|
2018-05-31 16:08:34 +08:00
|
|
|
}
|
2018-06-14 21:02:13 +08:00
|
|
|
|
2018-06-14 22:22:03 +08:00
|
|
|
if (MatchBinOp(Curr, PropagateOpcode))
|
2018-06-14 21:02:13 +08:00
|
|
|
for (auto &Op : cast<BinaryOperator>(Curr)->operands())
|
|
|
|
if (Instruction *OPI = dyn_cast<Instruction>(Op))
|
|
|
|
if (SimpleValue::canHandle(OPI) && Visited.insert(OPI).second)
|
|
|
|
WorkList.push_back(OPI);
|
2018-05-31 16:08:34 +08:00
|
|
|
}
|
2018-06-14 21:02:13 +08:00
|
|
|
|
|
|
|
return MadeChanges;
|
2018-05-31 16:08:34 +08:00
|
|
|
}
|
|
|
|
|
2011-01-03 07:04:14 +08:00
|
|
|
bool EarlyCSE::processNode(DomTreeNode *Node) {
|
2016-04-23 02:47:21 +08:00
|
|
|
bool Changed = false;
|
2011-01-03 07:04:14 +08:00
|
|
|
BasicBlock *BB = Node->getBlock();
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2011-01-03 11:18:43 +08:00
|
|
|
// If this block has a single predecessor, then the predecessor is the parent
|
|
|
|
// of the domtree node and all of the live out memory values are still current
|
|
|
|
// in this block. If this block has multiple predecessors, then they could
|
|
|
|
// have invalidated the live-out memory values of our parent value. For now,
|
|
|
|
// just be conservative and invalidate memory if this block has multiple
|
|
|
|
// predecessors.
|
2014-04-25 13:29:35 +08:00
|
|
|
if (!BB->getSinglePredecessor())
|
2011-01-03 11:18:43 +08:00
|
|
|
++CurrentGeneration;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2015-05-23 07:53:24 +08:00
|
|
|
// If this node has a single predecessor which ends in a conditional branch,
|
|
|
|
// we can infer the value of the branch condition given that we took this
|
2016-04-21 03:16:23 +08:00
|
|
|
// path. We need the single predecessor to ensure there's not another path
|
2015-05-23 07:53:24 +08:00
|
|
|
// which reaches this block where the condition might hold a different
|
|
|
|
// value. Since we're adding this to the scoped hash table (like any other
|
|
|
|
// def), it will have been popped if we encounter a future merge block.
|
2017-03-16 04:25:05 +08:00
|
|
|
if (BasicBlock *Pred = BB->getSinglePredecessor()) {
|
|
|
|
auto *BI = dyn_cast<BranchInst>(Pred->getTerminator());
|
|
|
|
if (BI && BI->isConditional()) {
|
|
|
|
auto *CondInst = dyn_cast<Instruction>(BI->getCondition());
|
2018-05-31 16:08:34 +08:00
|
|
|
if (CondInst && SimpleValue::canHandle(CondInst))
|
|
|
|
Changed |= handleBranchCondition(CondInst, BI, BB, Pred);
|
2017-03-16 04:25:05 +08:00
|
|
|
}
|
|
|
|
}
|
2015-05-23 07:53:24 +08:00
|
|
|
|
2011-01-03 12:17:24 +08:00
|
|
|
/// LastStore - Keep track of the last non-volatile store that we saw... for
|
|
|
|
/// as long as there in no instruction that reads memory. If we see a store
|
|
|
|
/// to the same location, we delete the dead store. This zaps trivial dead
|
|
|
|
/// stores which can occur in bitfield code among other things.
|
2015-01-27 06:51:15 +08:00
|
|
|
Instruction *LastStore = nullptr;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2011-01-03 07:04:14 +08:00
|
|
|
// See if any instructions in the block can be eliminated. If so, do it. If
|
|
|
|
// not, add them to AvailableValues.
|
2015-01-24 19:33:55 +08:00
|
|
|
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
|
2015-10-14 02:26:00 +08:00
|
|
|
Instruction *Inst = &*I++;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2011-01-03 07:04:14 +08:00
|
|
|
// Dead instructions should just be removed.
|
2015-01-27 09:34:14 +08:00
|
|
|
if (isInstructionTriviallyDead(Inst, &TLI)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
|
2018-04-07 02:47:33 +08:00
|
|
|
if (!DebugCounter::shouldExecute(CSECounter)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
|
2018-04-07 02:47:33 +08:00
|
|
|
continue;
|
|
|
|
}
|
2019-12-05 21:28:55 +08:00
|
|
|
|
|
|
|
salvageDebugInfoOrMarkUndef(*Inst);
|
2016-09-01 03:24:10 +08:00
|
|
|
removeMSSA(Inst);
|
2011-01-03 07:04:14 +08:00
|
|
|
Inst->eraseFromParent();
|
|
|
|
Changed = true;
|
2011-01-03 07:19:45 +08:00
|
|
|
++NumSimplify;
|
2011-01-03 07:04:14 +08:00
|
|
|
continue;
|
|
|
|
}
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2014-11-04 04:21:32 +08:00
|
|
|
// Skip assume intrinsics, they don't really have side effects (although
|
|
|
|
// they're marked as such to ensure preservation of control dependencies),
|
2017-04-28 14:25:39 +08:00
|
|
|
// and this pass will not bother with its removal. However, we should mark
|
|
|
|
// its condition as true for all dominated blocks.
|
2014-11-04 04:21:32 +08:00
|
|
|
if (match(Inst, m_Intrinsic<Intrinsic::assume>())) {
|
2017-04-28 14:25:39 +08:00
|
|
|
auto *CondI =
|
|
|
|
dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0));
|
|
|
|
if (CondI && SimpleValue::canHandle(CondI)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst
|
|
|
|
<< '\n');
|
2017-04-28 14:25:39 +08:00
|
|
|
AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
|
|
|
|
} else
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n');
|
2014-11-04 04:21:32 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
Add an @llvm.sideeffect intrinsic
This patch implements Chandler's idea [0] for supporting languages that
require support for infinite loops with side effects, such as Rust, providing
part of a solution to bug 965 [1].
Specifically, it adds an `llvm.sideeffect()` intrinsic, which has no actual
effect, but which appears to optimization passes to have obscure side effects,
such that they don't optimize away loops containing it. It also teaches
several optimization passes to ignore this intrinsic, so that it doesn't
significantly impact optimization in most cases.
As discussed on llvm-dev [2], this patch is the first of two major parts.
The second part, to change LLVM's semantics to have defined behavior
on infinite loops by default, with a function attribute for opting into
potential-undefined-behavior, will be implemented and posted for review in
a separate patch.
[0] http://lists.llvm.org/pipermail/llvm-dev/2015-July/088103.html
[1] https://bugs.llvm.org/show_bug.cgi?id=965
[2] http://lists.llvm.org/pipermail/llvm-dev/2017-October/118632.html
Differential Revision: https://reviews.llvm.org/D38336
llvm-svn: 317729
2017-11-09 05:59:51 +08:00
|
|
|
// Skip sideeffect intrinsics, for the same reason as assume intrinsics.
|
|
|
|
if (match(Inst, m_Intrinsic<Intrinsic::sideeffect>())) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << *Inst << '\n');
|
Add an @llvm.sideeffect intrinsic
This patch implements Chandler's idea [0] for supporting languages that
require support for infinite loops with side effects, such as Rust, providing
part of a solution to bug 965 [1].
Specifically, it adds an `llvm.sideeffect()` intrinsic, which has no actual
effect, but which appears to optimization passes to have obscure side effects,
such that they don't optimize away loops containing it. It also teaches
several optimization passes to ignore this intrinsic, so that it doesn't
significantly impact optimization in most cases.
As discussed on llvm-dev [2], this patch is the first of two major parts.
The second part, to change LLVM's semantics to have defined behavior
on infinite loops by default, with a function attribute for opting into
potential-undefined-behavior, will be implemented and posted for review in
a separate patch.
[0] http://lists.llvm.org/pipermail/llvm-dev/2015-July/088103.html
[1] https://bugs.llvm.org/show_bug.cgi?id=965
[2] http://lists.llvm.org/pipermail/llvm-dev/2017-October/118632.html
Differential Revision: https://reviews.llvm.org/D38336
llvm-svn: 317729
2017-11-09 05:59:51 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-03-15 05:35:06 +08:00
|
|
|
// We can skip all invariant.start intrinsics since they only read memory,
|
|
|
|
// and we can forward values across it. For invariant starts without
|
|
|
|
// invariant ends, we can use the fact that the invariantness never ends to
|
|
|
|
// start a scope in the current generaton which is true for all future
|
|
|
|
// generations. Also, we dont need to consume the last store since the
|
|
|
|
// semantics of invariant.start allow us to perform DSE of the last
|
2018-07-31 03:41:25 +08:00
|
|
|
// store, if there was a store following invariant.start. Consider:
|
2016-08-10 04:00:47 +08:00
|
|
|
//
|
|
|
|
// store 30, i8* p
|
|
|
|
// invariant.start(p)
|
|
|
|
// store 40, i8* p
|
|
|
|
// We can DSE the store to 30, since the store 40 to invariant location p
|
|
|
|
// causes undefined behaviour.
|
2018-03-15 05:35:06 +08:00
|
|
|
if (match(Inst, m_Intrinsic<Intrinsic::invariant_start>())) {
|
2018-07-31 03:41:25 +08:00
|
|
|
// If there are any uses, the scope might end.
|
2018-03-15 05:35:06 +08:00
|
|
|
if (!Inst->use_empty())
|
|
|
|
continue;
|
|
|
|
auto *CI = cast<CallInst>(Inst);
|
|
|
|
MemoryLocation MemLoc = MemoryLocation::getForArgument(CI, 1, TLI);
|
2018-03-16 02:12:27 +08:00
|
|
|
// Don't start a scope if we already have a better one pushed
|
|
|
|
if (!AvailableInvariants.count(MemLoc))
|
|
|
|
AvailableInvariants.insert(MemLoc, CurrentGeneration);
|
2016-08-10 04:00:47 +08:00
|
|
|
continue;
|
2018-03-15 05:35:06 +08:00
|
|
|
}
|
2016-08-10 04:00:47 +08:00
|
|
|
|
2018-08-30 11:39:16 +08:00
|
|
|
if (isGuard(Inst)) {
|
2016-04-30 06:23:16 +08:00
|
|
|
if (auto *CondI =
|
|
|
|
dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0))) {
|
[EarlyCSE] Remove guards with conditions known to be true
If a condition is calculated only once, and there are multiple guards on this condition, we should be able
to remove all guards dominated by the first of them. This patch allows EarlyCSE to try to find the condition
of a guard among the known values, and if it is true, remove the guard. Otherwise we keep the guard and
mark its condition as 'true' for future consideration.
Reviewers: sanjoy, reames, apilipenko, skatkov, anna, dberlin
Reviewed By: reames, sanjoy
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D32476
llvm-svn: 301623
2017-04-28 14:05:48 +08:00
|
|
|
if (SimpleValue::canHandle(CondI)) {
|
|
|
|
// Do we already know the actual value of this condition?
|
|
|
|
if (auto *KnownCond = AvailableValues.lookup(CondI)) {
|
|
|
|
// Is the condition known to be true?
|
|
|
|
if (isa<ConstantInt>(KnownCond) &&
|
2017-07-07 02:39:47 +08:00
|
|
|
cast<ConstantInt>(KnownCond)->isOne()) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs()
|
|
|
|
<< "EarlyCSE removing guard: " << *Inst << '\n');
|
[EarlyCSE] Remove guards with conditions known to be true
If a condition is calculated only once, and there are multiple guards on this condition, we should be able
to remove all guards dominated by the first of them. This patch allows EarlyCSE to try to find the condition
of a guard among the known values, and if it is true, remove the guard. Otherwise we keep the guard and
mark its condition as 'true' for future consideration.
Reviewers: sanjoy, reames, apilipenko, skatkov, anna, dberlin
Reviewed By: reames, sanjoy
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D32476
llvm-svn: 301623
2017-04-28 14:05:48 +08:00
|
|
|
removeMSSA(Inst);
|
|
|
|
Inst->eraseFromParent();
|
|
|
|
Changed = true;
|
|
|
|
continue;
|
|
|
|
} else
|
|
|
|
// Use the known value if it wasn't true.
|
|
|
|
cast<CallInst>(Inst)->setArgOperand(0, KnownCond);
|
|
|
|
}
|
|
|
|
// The condition we're on guarding here is true for all dominated
|
|
|
|
// locations.
|
2016-04-30 05:52:58 +08:00
|
|
|
AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
|
[EarlyCSE] Remove guards with conditions known to be true
If a condition is calculated only once, and there are multiple guards on this condition, we should be able
to remove all guards dominated by the first of them. This patch allows EarlyCSE to try to find the condition
of a guard among the known values, and if it is true, remove the guard. Otherwise we keep the guard and
mark its condition as 'true' for future consideration.
Reviewers: sanjoy, reames, apilipenko, skatkov, anna, dberlin
Reviewed By: reames, sanjoy
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D32476
llvm-svn: 301623
2017-04-28 14:05:48 +08:00
|
|
|
}
|
2016-04-30 05:52:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Guard intrinsics read all memory, but don't write any memory.
|
|
|
|
// Accordingly, don't update the generation but consume the last store (to
|
|
|
|
// avoid an incorrect DSE).
|
|
|
|
LastStore = nullptr;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2011-01-03 07:04:14 +08:00
|
|
|
// If the instruction can be simplified (e.g. X+0 = X) then replace it with
|
|
|
|
// its simpler value.
|
2017-04-29 03:55:38 +08:00
|
|
|
if (Value *V = SimplifyInstruction(Inst, SQ)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V
|
|
|
|
<< '\n');
|
2018-04-07 02:47:33 +08:00
|
|
|
if (!DebugCounter::shouldExecute(CSECounter)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
|
2018-04-07 02:47:33 +08:00
|
|
|
} else {
|
|
|
|
bool Killed = false;
|
|
|
|
if (!Inst->use_empty()) {
|
|
|
|
Inst->replaceAllUsesWith(V);
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
if (isInstructionTriviallyDead(Inst, &TLI)) {
|
|
|
|
removeMSSA(Inst);
|
|
|
|
Inst->eraseFromParent();
|
|
|
|
Changed = true;
|
|
|
|
Killed = true;
|
|
|
|
}
|
|
|
|
if (Changed)
|
|
|
|
++NumSimplify;
|
|
|
|
if (Killed)
|
|
|
|
continue;
|
2016-06-25 08:04:10 +08:00
|
|
|
}
|
2011-01-03 07:04:14 +08:00
|
|
|
}
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2011-01-03 11:18:43 +08:00
|
|
|
// If this is a simple instruction that we can value number, process it.
|
|
|
|
if (SimpleValue::canHandle(Inst)) {
|
|
|
|
// See if the instruction has an available value. If so, use it.
|
2015-01-27 09:34:14 +08:00
|
|
|
if (Value *V = AvailableValues.lookup(Inst)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V
|
|
|
|
<< '\n');
|
2018-04-07 02:47:33 +08:00
|
|
|
if (!DebugCounter::shouldExecute(CSECounter)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
|
2018-04-07 02:47:33 +08:00
|
|
|
continue;
|
|
|
|
}
|
2016-04-22 14:37:45 +08:00
|
|
|
if (auto *I = dyn_cast<Instruction>(V))
|
|
|
|
I->andIRFlags(Inst);
|
2011-01-03 11:18:43 +08:00
|
|
|
Inst->replaceAllUsesWith(V);
|
2016-09-01 03:24:10 +08:00
|
|
|
removeMSSA(Inst);
|
2011-01-03 11:18:43 +08:00
|
|
|
Inst->eraseFromParent();
|
|
|
|
Changed = true;
|
|
|
|
++NumCSE;
|
|
|
|
continue;
|
|
|
|
}
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2011-01-03 11:18:43 +08:00
|
|
|
// Otherwise, just remember that this value is available.
|
2015-01-27 09:34:14 +08:00
|
|
|
AvailableValues.insert(Inst, Inst);
|
2011-01-03 07:04:14 +08:00
|
|
|
continue;
|
2011-01-03 11:18:43 +08:00
|
|
|
}
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2015-01-27 06:51:15 +08:00
|
|
|
ParseMemoryInst MemInst(Inst, TTI);
|
2011-01-03 11:41:27 +08:00
|
|
|
// If this is a non-volatile load, process it.
|
2015-01-27 06:51:15 +08:00
|
|
|
if (MemInst.isValid() && MemInst.isLoad()) {
|
2015-12-09 05:45:41 +08:00
|
|
|
// (conservatively) we can't peak past the ordering implied by this
|
|
|
|
// operation, but we can add this load to our set of available values
|
|
|
|
if (MemInst.isVolatile() || !MemInst.isUnordered()) {
|
2014-04-25 13:29:35 +08:00
|
|
|
LastStore = nullptr;
|
2015-12-09 05:45:41 +08:00
|
|
|
++CurrentGeneration;
|
2011-01-03 12:17:24 +08:00
|
|
|
}
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2018-03-16 01:29:32 +08:00
|
|
|
if (MemInst.isInvariantLoad()) {
|
|
|
|
// If we pass an invariant load, we know that memory location is
|
|
|
|
// indefinitely constant from the moment of first dereferenceability.
|
2018-03-16 02:12:27 +08:00
|
|
|
// We conservatively treat the invariant_load as that moment. If we
|
|
|
|
// pass a invariant load after already establishing a scope, don't
|
|
|
|
// restart it since we want to preserve the earliest point seen.
|
2018-03-16 01:29:32 +08:00
|
|
|
auto MemLoc = MemoryLocation::get(Inst);
|
2018-03-16 02:12:27 +08:00
|
|
|
if (!AvailableInvariants.count(MemLoc))
|
|
|
|
AvailableInvariants.insert(MemLoc, CurrentGeneration);
|
2018-03-16 01:29:32 +08:00
|
|
|
}
|
|
|
|
|
2011-01-03 11:41:27 +08:00
|
|
|
// If we have an available version of this load, and if it is the right
|
2016-06-17 04:47:57 +08:00
|
|
|
// generation or the load is known to be from an invariant location,
|
|
|
|
// replace this instruction.
|
|
|
|
//
|
2016-09-01 01:45:31 +08:00
|
|
|
// If either the dominating load or the current load are invariant, then
|
|
|
|
// we can assume the current load loads the same value as the dominating
|
|
|
|
// load.
|
Reapply 254950 w/fix
254950 ended up being not NFC. The previous code was overriding the flags for whether an instruction read or wrote memory using the target specific flags returned via TTI. I'd missed this in my refactoring. Since I mistakenly built only x86 and didn't notice the number of unsupported tests, I didn't catch that before the original checkin.
This raises an interesting issue though. Given we have function attributes (i.e. readonly, readnone, argmemonly) which describe the aliasing of intrinsics, why does TTI have this information overriding the instruction definition at all? I see no reason for this, but decided to preserve existing behavior for the moment. The root issue might be that we don't have a "writeonly" attribute.
Original commit message:
[EarlyCSE] Simplify and invert ParseMemoryInst [NFCI]
Restructure ParseMemoryInst - which was introduced to abstract over target specific load and stores instructions - to just query the underlying instructions. In theory, this could be slightly slower than caching the results, but in practice, it's very unlikely to be measurable.
The simple query scheme makes it far easier to understand, and much easier to extend with new queries. Given I'm about to need to add new query types, doing the cleanup first seemed worthwhile.
Do we still believe the target specific intrinsic handling is worthwhile in EarlyCSE? It adds quite a bit of complexity and makes the code harder to read. Being able to delete the abstraction entirely would be wonderful.
llvm-svn: 254957
2015-12-08 06:41:23 +08:00
|
|
|
LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
|
2016-06-17 04:47:57 +08:00
|
|
|
if (InVal.DefInst != nullptr &&
|
2015-12-09 05:45:41 +08:00
|
|
|
InVal.MatchingId == MemInst.getMatchingId() &&
|
|
|
|
// We don't yet handle removing loads with ordering of any kind.
|
|
|
|
!MemInst.isVolatile() && MemInst.isUnordered() &&
|
|
|
|
// We can't replace an atomic load with one which isn't also atomic.
|
2016-09-01 03:24:10 +08:00
|
|
|
InVal.IsAtomic >= MemInst.isAtomic() &&
|
2018-03-16 01:29:32 +08:00
|
|
|
(isOperatingOnInvariantMemAt(Inst, InVal.Generation) ||
|
2016-09-01 03:24:10 +08:00
|
|
|
isSameMemGeneration(InVal.Generation, CurrentGeneration,
|
|
|
|
InVal.DefInst, Inst))) {
|
2016-05-06 09:13:58 +08:00
|
|
|
Value *Op = getOrCreateResult(InVal.DefInst, Inst->getType());
|
2015-01-27 06:51:15 +08:00
|
|
|
if (Op != nullptr) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst
|
|
|
|
<< " to: " << *InVal.DefInst << '\n');
|
2018-04-07 02:47:33 +08:00
|
|
|
if (!DebugCounter::shouldExecute(CSECounter)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
|
2018-04-07 02:47:33 +08:00
|
|
|
continue;
|
|
|
|
}
|
2015-01-27 06:51:15 +08:00
|
|
|
if (!Inst->use_empty())
|
|
|
|
Inst->replaceAllUsesWith(Op);
|
2016-09-01 03:24:10 +08:00
|
|
|
removeMSSA(Inst);
|
2015-01-27 06:51:15 +08:00
|
|
|
Inst->eraseFromParent();
|
|
|
|
Changed = true;
|
|
|
|
++NumCSELoad;
|
|
|
|
continue;
|
|
|
|
}
|
2011-01-03 11:41:27 +08:00
|
|
|
}
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2011-01-03 11:41:27 +08:00
|
|
|
// Otherwise, remember that we have this instruction.
|
2015-10-07 15:41:29 +08:00
|
|
|
AvailableLoads.insert(
|
Reapply 254950 w/fix
254950 ended up being not NFC. The previous code was overriding the flags for whether an instruction read or wrote memory using the target specific flags returned via TTI. I'd missed this in my refactoring. Since I mistakenly built only x86 and didn't notice the number of unsupported tests, I didn't catch that before the original checkin.
This raises an interesting issue though. Given we have function attributes (i.e. readonly, readnone, argmemonly) which describe the aliasing of intrinsics, why does TTI have this information overriding the instruction definition at all? I see no reason for this, but decided to preserve existing behavior for the moment. The root issue might be that we don't have a "writeonly" attribute.
Original commit message:
[EarlyCSE] Simplify and invert ParseMemoryInst [NFCI]
Restructure ParseMemoryInst - which was introduced to abstract over target specific load and stores instructions - to just query the underlying instructions. In theory, this could be slightly slower than caching the results, but in practice, it's very unlikely to be measurable.
The simple query scheme makes it far easier to understand, and much easier to extend with new queries. Given I'm about to need to add new query types, doing the cleanup first seemed worthwhile.
Do we still believe the target specific intrinsic handling is worthwhile in EarlyCSE? It adds quite a bit of complexity and makes the code harder to read. Being able to delete the abstraction entirely would be wonderful.
llvm-svn: 254957
2015-12-08 06:41:23 +08:00
|
|
|
MemInst.getPointerOperand(),
|
2015-12-09 05:45:41 +08:00
|
|
|
LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
|
2018-03-16 01:29:32 +08:00
|
|
|
MemInst.isAtomic()));
|
2014-04-25 13:29:35 +08:00
|
|
|
LastStore = nullptr;
|
2011-01-03 11:41:27 +08:00
|
|
|
continue;
|
|
|
|
}
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2017-01-18 04:15:47 +08:00
|
|
|
// If this instruction may read from memory or throw (and potentially read
|
|
|
|
// from memory in the exception handler), forget LastStore. Load/store
|
|
|
|
// intrinsics will indicate both a read and a write to memory. The target
|
|
|
|
// may override this (e.g. so that a store intrinsic does not read from
|
|
|
|
// memory, and thus will be treated the same as a regular store for
|
|
|
|
// commoning purposes).
|
|
|
|
if ((Inst->mayReadFromMemory() || Inst->mayThrow()) &&
|
2015-01-27 06:51:15 +08:00
|
|
|
!(MemInst.isValid() && !MemInst.mayReadFromMemory()))
|
2014-04-25 13:29:35 +08:00
|
|
|
LastStore = nullptr;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2011-01-03 11:41:27 +08:00
|
|
|
// If this is a read-only call, process it.
|
|
|
|
if (CallValue::canHandle(Inst)) {
|
|
|
|
// If we have an available version of this call, and if it is the right
|
|
|
|
// generation, replace this instruction.
|
2016-05-14 01:54:58 +08:00
|
|
|
std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(Inst);
|
2016-09-01 03:24:10 +08:00
|
|
|
if (InVal.first != nullptr &&
|
|
|
|
isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
|
|
|
|
Inst)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst
|
|
|
|
<< " to: " << *InVal.first << '\n');
|
2018-04-07 02:47:33 +08:00
|
|
|
if (!DebugCounter::shouldExecute(CSECounter)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
|
2018-04-07 02:47:33 +08:00
|
|
|
continue;
|
|
|
|
}
|
2015-01-24 19:33:55 +08:00
|
|
|
if (!Inst->use_empty())
|
|
|
|
Inst->replaceAllUsesWith(InVal.first);
|
2016-09-01 03:24:10 +08:00
|
|
|
removeMSSA(Inst);
|
2011-01-03 11:18:43 +08:00
|
|
|
Inst->eraseFromParent();
|
|
|
|
Changed = true;
|
2011-01-03 11:41:27 +08:00
|
|
|
++NumCSECall;
|
2011-01-03 11:18:43 +08:00
|
|
|
continue;
|
|
|
|
}
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2011-01-03 11:18:43 +08:00
|
|
|
// Otherwise, remember that we have this instruction.
|
2015-01-27 09:34:14 +08:00
|
|
|
AvailableCalls.insert(
|
2016-05-14 01:54:58 +08:00
|
|
|
Inst, std::pair<Instruction *, unsigned>(Inst, CurrentGeneration));
|
2011-01-03 07:04:14 +08:00
|
|
|
continue;
|
|
|
|
}
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2015-08-27 09:32:33 +08:00
|
|
|
// A release fence requires that all stores complete before it, but does
|
|
|
|
// not prevent the reordering of following loads 'before' the fence. As a
|
|
|
|
// result, we don't need to consider it as writing to memory and don't need
|
|
|
|
// to advance the generation. We do need to prevent DSE across the fence,
|
|
|
|
// but that's handled above.
|
|
|
|
if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
|
2016-04-07 05:19:33 +08:00
|
|
|
if (FI->getOrdering() == AtomicOrdering::Release) {
|
2015-08-27 09:32:33 +08:00
|
|
|
assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-12-16 09:01:30 +08:00
|
|
|
// write back DSE - If we write back the same value we just loaded from
|
|
|
|
// the same location and haven't passed any intervening writes or ordering
|
|
|
|
// operations, we can remove the write. The primary benefit is in allowing
|
|
|
|
// the available load table to remain valid and value forward past where
|
|
|
|
// the store originally was.
|
|
|
|
if (MemInst.isValid() && MemInst.isStore()) {
|
|
|
|
LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
|
2016-05-06 09:13:58 +08:00
|
|
|
if (InVal.DefInst &&
|
|
|
|
InVal.DefInst == getOrCreateResult(Inst, InVal.DefInst->getType()) &&
|
2015-12-16 09:01:30 +08:00
|
|
|
InVal.MatchingId == MemInst.getMatchingId() &&
|
|
|
|
// We don't yet handle removing stores with ordering of any kind.
|
2016-09-01 03:24:10 +08:00
|
|
|
!MemInst.isVolatile() && MemInst.isUnordered() &&
|
2018-03-15 05:35:06 +08:00
|
|
|
(isOperatingOnInvariantMemAt(Inst, InVal.Generation) ||
|
|
|
|
isSameMemGeneration(InVal.Generation, CurrentGeneration,
|
|
|
|
InVal.DefInst, Inst))) {
|
2016-09-01 03:24:10 +08:00
|
|
|
// It is okay to have a LastStore to a different pointer here if MemorySSA
|
|
|
|
// tells us that the load and store are from the same memory generation.
|
|
|
|
// In that case, LastStore should keep its present value since we're
|
|
|
|
// removing the current store.
|
2015-12-16 09:01:30 +08:00
|
|
|
assert((!LastStore ||
|
|
|
|
ParseMemoryInst(LastStore, TTI).getPointerOperand() ==
|
2016-09-01 03:24:10 +08:00
|
|
|
MemInst.getPointerOperand() ||
|
|
|
|
MSSA) &&
|
|
|
|
"can't have an intervening store if not using MemorySSA!");
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n');
|
2018-04-07 02:47:33 +08:00
|
|
|
if (!DebugCounter::shouldExecute(CSECounter)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
|
2018-04-07 02:47:33 +08:00
|
|
|
continue;
|
|
|
|
}
|
2016-09-01 03:24:10 +08:00
|
|
|
removeMSSA(Inst);
|
2015-12-16 09:01:30 +08:00
|
|
|
Inst->eraseFromParent();
|
|
|
|
Changed = true;
|
|
|
|
++NumDSE;
|
|
|
|
// We can avoid incrementing the generation count since we were able
|
|
|
|
// to eliminate this store.
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-03 11:18:43 +08:00
|
|
|
// Okay, this isn't something we can CSE at all. Check to see if it is
|
|
|
|
// something that could modify memory. If so, our available memory values
|
|
|
|
// cannot be used so bump the generation count.
|
2011-01-03 11:46:34 +08:00
|
|
|
if (Inst->mayWriteToMemory()) {
|
2011-01-03 11:18:43 +08:00
|
|
|
++CurrentGeneration;
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2015-01-27 06:51:15 +08:00
|
|
|
if (MemInst.isValid() && MemInst.isStore()) {
|
2011-01-03 12:17:24 +08:00
|
|
|
// We do a trivial form of DSE if there are two stores to the same
|
2015-12-18 02:50:50 +08:00
|
|
|
// location with no intervening loads. Delete the earlier store.
|
|
|
|
// At the moment, we don't remove ordered stores, but do remove
|
|
|
|
// unordered atomic stores. There's no special requirement (for
|
|
|
|
// unordered atomics) about removing atomic stores only in favor of
|
2019-03-12 15:08:19 +08:00
|
|
|
// other atomic stores since we were going to execute the non-atomic
|
2015-12-18 02:50:50 +08:00
|
|
|
// one anyway and the atomic one might never have become visible.
|
2015-01-27 06:51:15 +08:00
|
|
|
if (LastStore) {
|
|
|
|
ParseMemoryInst LastStoreMemInst(LastStore, TTI);
|
2015-12-18 02:50:50 +08:00
|
|
|
assert(LastStoreMemInst.isUnordered() &&
|
|
|
|
!LastStoreMemInst.isVolatile() &&
|
|
|
|
"Violated invariant");
|
2015-01-27 06:51:15 +08:00
|
|
|
if (LastStoreMemInst.isMatchingMemLoc(MemInst)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
|
|
|
|
<< " due to: " << *Inst << '\n');
|
2018-04-07 02:47:33 +08:00
|
|
|
if (!DebugCounter::shouldExecute(CSECounter)) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
|
2018-04-07 02:47:33 +08:00
|
|
|
} else {
|
|
|
|
removeMSSA(LastStore);
|
|
|
|
LastStore->eraseFromParent();
|
|
|
|
Changed = true;
|
|
|
|
++NumDSE;
|
|
|
|
LastStore = nullptr;
|
|
|
|
}
|
2015-01-27 06:51:15 +08:00
|
|
|
}
|
2014-11-19 01:46:32 +08:00
|
|
|
// fallthrough - we can exploit information about this store
|
2011-01-03 12:17:24 +08:00
|
|
|
}
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2011-01-03 12:17:24 +08:00
|
|
|
// Okay, we just invalidated anything we knew about loaded values. Try
|
|
|
|
// to salvage *something* by remembering that the stored value is a live
|
|
|
|
// version of the pointer. It is safe to forward from volatile stores
|
|
|
|
// to non-volatile loads, so we don't have to check for volatility of
|
|
|
|
// the store.
|
2015-10-07 15:41:29 +08:00
|
|
|
AvailableLoads.insert(
|
Reapply 254950 w/fix
254950 ended up being not NFC. The previous code was overriding the flags for whether an instruction read or wrote memory using the target specific flags returned via TTI. I'd missed this in my refactoring. Since I mistakenly built only x86 and didn't notice the number of unsupported tests, I didn't catch that before the original checkin.
This raises an interesting issue though. Given we have function attributes (i.e. readonly, readnone, argmemonly) which describe the aliasing of intrinsics, why does TTI have this information overriding the instruction definition at all? I see no reason for this, but decided to preserve existing behavior for the moment. The root issue might be that we don't have a "writeonly" attribute.
Original commit message:
[EarlyCSE] Simplify and invert ParseMemoryInst [NFCI]
Restructure ParseMemoryInst - which was introduced to abstract over target specific load and stores instructions - to just query the underlying instructions. In theory, this could be slightly slower than caching the results, but in practice, it's very unlikely to be measurable.
The simple query scheme makes it far easier to understand, and much easier to extend with new queries. Given I'm about to need to add new query types, doing the cleanup first seemed worthwhile.
Do we still believe the target specific intrinsic handling is worthwhile in EarlyCSE? It adds quite a bit of complexity and makes the code harder to read. Being able to delete the abstraction entirely would be wonderful.
llvm-svn: 254957
2015-12-08 06:41:23 +08:00
|
|
|
MemInst.getPointerOperand(),
|
2015-12-09 05:45:41 +08:00
|
|
|
LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
|
2018-03-16 01:29:32 +08:00
|
|
|
MemInst.isAtomic()));
|
2012-07-24 18:51:42 +08:00
|
|
|
|
2015-12-18 02:50:50 +08:00
|
|
|
// Remember that this was the last unordered store we saw for DSE. We
|
|
|
|
// don't yet handle DSE on ordered or volatile stores since we don't
|
|
|
|
// have a good way to model the ordering requirement for following
|
|
|
|
// passes once the store is removed. We could insert a fence, but
|
|
|
|
// since fences are slightly stronger than stores in their ordering,
|
|
|
|
// it's not clear this is a profitable transform. Another option would
|
|
|
|
// be to merge the ordering with that of the post dominating store.
|
|
|
|
if (MemInst.isUnordered() && !MemInst.isVolatile())
|
2015-01-27 06:51:15 +08:00
|
|
|
LastStore = Inst;
|
2015-12-09 05:45:41 +08:00
|
|
|
else
|
|
|
|
LastStore = nullptr;
|
2011-01-03 11:46:34 +08:00
|
|
|
}
|
|
|
|
}
|
2011-01-03 07:04:14 +08:00
|
|
|
}
|
2012-02-01 07:14:41 +08:00
|
|
|
|
2011-01-03 07:04:14 +08:00
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2015-01-27 09:34:14 +08:00
|
|
|
bool EarlyCSE::run() {
|
2015-01-24 19:33:55 +08:00
|
|
|
// Note, deque is being used here because there is significant performance
|
|
|
|
// gains over vector when the container becomes very large due to the
|
|
|
|
// specific access patterns. For more information see the mailing list
|
|
|
|
// discussion on this:
|
2015-08-05 11:51:17 +08:00
|
|
|
// http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html
|
2014-09-20 21:29:20 +08:00
|
|
|
std::deque<StackNode *> nodesToProcess;
|
2012-02-01 07:14:41 +08:00
|
|
|
|
|
|
|
bool Changed = false;
|
|
|
|
|
|
|
|
// Process the root node.
|
2015-01-24 19:33:55 +08:00
|
|
|
nodesToProcess.push_back(new StackNode(
|
2018-03-15 05:35:06 +08:00
|
|
|
AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls,
|
|
|
|
CurrentGeneration, DT.getRootNode(),
|
|
|
|
DT.getRootNode()->begin(), DT.getRootNode()->end()));
|
2012-02-01 07:14:41 +08:00
|
|
|
|
2019-02-22 03:49:57 +08:00
|
|
|
assert(!CurrentGeneration && "Create a new EarlyCSE instance to rerun it.");
|
2012-02-01 07:14:41 +08:00
|
|
|
|
|
|
|
// Process the stack.
|
|
|
|
while (!nodesToProcess.empty()) {
|
|
|
|
// Grab the first item off the stack. Set the current generation, remove
|
|
|
|
// the node from the stack, and process it.
|
2013-12-06 02:42:12 +08:00
|
|
|
StackNode *NodeToProcess = nodesToProcess.back();
|
2012-02-01 07:14:41 +08:00
|
|
|
|
|
|
|
// Initialize class members.
|
|
|
|
CurrentGeneration = NodeToProcess->currentGeneration();
|
|
|
|
|
|
|
|
// Check if the node needs to be processed.
|
|
|
|
if (!NodeToProcess->isProcessed()) {
|
|
|
|
// Process the node.
|
|
|
|
Changed |= processNode(NodeToProcess->node());
|
|
|
|
NodeToProcess->childGeneration(CurrentGeneration);
|
|
|
|
NodeToProcess->process();
|
|
|
|
} else if (NodeToProcess->childIter() != NodeToProcess->end()) {
|
|
|
|
// Push the next child onto the stack.
|
|
|
|
DomTreeNode *child = NodeToProcess->nextChild();
|
2013-12-06 02:42:12 +08:00
|
|
|
nodesToProcess.push_back(
|
2018-03-15 05:35:06 +08:00
|
|
|
new StackNode(AvailableValues, AvailableLoads, AvailableInvariants,
|
|
|
|
AvailableCalls, NodeToProcess->childGeneration(),
|
|
|
|
child, child->begin(), child->end()));
|
2012-02-01 07:14:41 +08:00
|
|
|
} else {
|
|
|
|
// It has been processed, and there are no more children to process,
|
|
|
|
// so delete it and pop it off the stack.
|
|
|
|
delete NodeToProcess;
|
2013-12-06 02:42:12 +08:00
|
|
|
nodesToProcess.pop_back();
|
2012-02-01 07:14:41 +08:00
|
|
|
}
|
|
|
|
} // while (!nodes...)
|
|
|
|
|
|
|
|
return Changed;
|
2011-01-03 05:47:05 +08:00
|
|
|
}
|
2015-01-27 09:34:14 +08:00
|
|
|
|
2015-02-01 18:51:23 +08:00
|
|
|
PreservedAnalyses EarlyCSEPass::run(Function &F,
|
2016-08-09 08:28:15 +08:00
|
|
|
FunctionAnalysisManager &AM) {
|
2016-03-11 19:05:24 +08:00
|
|
|
auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
|
|
|
|
auto &TTI = AM.getResult<TargetIRAnalysis>(F);
|
|
|
|
auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
|
2016-12-19 16:22:17 +08:00
|
|
|
auto &AC = AM.getResult<AssumptionAnalysis>(F);
|
2016-09-01 03:24:10 +08:00
|
|
|
auto *MSSA =
|
|
|
|
UseMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : nullptr;
|
2015-02-01 18:51:23 +08:00
|
|
|
|
2017-04-29 03:55:38 +08:00
|
|
|
EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
|
2015-02-01 18:51:23 +08:00
|
|
|
|
|
|
|
if (!CSE.run())
|
|
|
|
return PreservedAnalyses::all();
|
|
|
|
|
|
|
|
PreservedAnalyses PA;
|
2017-01-15 14:32:49 +08:00
|
|
|
PA.preserveSet<CFGAnalyses>();
|
2016-06-09 05:31:55 +08:00
|
|
|
PA.preserve<GlobalsAA>();
|
2016-09-01 03:24:10 +08:00
|
|
|
if (UseMemorySSA)
|
|
|
|
PA.preserve<MemorySSAAnalysis>();
|
2015-02-01 18:51:23 +08:00
|
|
|
return PA;
|
|
|
|
}
|
|
|
|
|
2015-01-27 09:34:14 +08:00
|
|
|
namespace {
|
2017-10-14 05:17:07 +08:00
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// A simple and fast domtree-based CSE pass.
|
2015-01-27 09:34:14 +08:00
|
|
|
///
|
|
|
|
/// This pass does a simple depth-first walk over the dominator tree,
|
|
|
|
/// eliminating trivially redundant instructions and using instsimplify to
|
|
|
|
/// canonicalize things as it goes. It is intended to be fast and catch obvious
|
|
|
|
/// cases so that instcombine and other passes are more effective. It is
|
|
|
|
/// expected that a later pass of GVN will catch the interesting/hard cases.
|
2016-09-01 03:24:10 +08:00
|
|
|
template<bool UseMemorySSA>
|
|
|
|
class EarlyCSELegacyCommonPass : public FunctionPass {
|
2015-01-27 09:34:14 +08:00
|
|
|
public:
|
|
|
|
static char ID;
|
|
|
|
|
2016-09-01 03:24:10 +08:00
|
|
|
EarlyCSELegacyCommonPass() : FunctionPass(ID) {
|
|
|
|
if (UseMemorySSA)
|
|
|
|
initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry());
|
|
|
|
else
|
|
|
|
initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry());
|
2015-01-27 09:34:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool runOnFunction(Function &F) override {
|
2016-04-23 06:06:11 +08:00
|
|
|
if (skipFunction(F))
|
2015-01-27 09:34:14 +08:00
|
|
|
return false;
|
|
|
|
|
Change TargetLibraryInfo analysis passes to always require Function
Summary:
This is the first change to enable the TLI to be built per-function so
that -fno-builtin* handling can be migrated to use function attributes.
See discussion on D61634 for background. This is an enabler for fixing
handling of these options for LTO, for example.
This change should not affect behavior, as the provided function is not
yet used to build a specifically per-function TLI, but rather enables
that migration.
Most of the changes were very mechanical, e.g. passing a Function to the
legacy analysis pass's getTLI interface, or in Module level cases,
adding a callback. This is similar to the way the per-function TTI
analysis works.
There was one place where we were looking for builtins but not in the
context of a specific function. See FindCXAAtExit in
lib/Transforms/IPO/GlobalOpt.cpp. I'm somewhat concerned my workaround
could provide the wrong behavior in some corner cases. Suggestions
welcome.
Reviewers: chandlerc, hfinkel
Subscribers: arsenm, dschuff, jvesely, nhaehnle, mehdi_amini, javed.absar, sbc100, jgravelle-google, eraman, aheejin, steven_wu, george.burgess.iv, dexonsmith, jfb, asbirlea, gchatelet, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D66428
llvm-svn: 371284
2019-09-07 11:09:36 +08:00
|
|
|
auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
|
2015-02-01 20:01:35 +08:00
|
|
|
auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
|
2015-01-27 09:34:14 +08:00
|
|
|
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
2016-12-19 16:22:17 +08:00
|
|
|
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
|
2016-09-01 03:24:10 +08:00
|
|
|
auto *MSSA =
|
|
|
|
UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() : nullptr;
|
2015-01-27 09:34:14 +08:00
|
|
|
|
2017-04-29 03:55:38 +08:00
|
|
|
EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
|
2015-01-27 09:34:14 +08:00
|
|
|
|
|
|
|
return CSE.run();
|
|
|
|
}
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
2016-12-19 16:22:17 +08:00
|
|
|
AU.addRequired<AssumptionCacheTracker>();
|
2015-01-27 09:34:14 +08:00
|
|
|
AU.addRequired<DominatorTreeWrapperPass>();
|
|
|
|
AU.addRequired<TargetLibraryInfoWrapperPass>();
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
llvm-svn: 227669
2015-01-31 11:43:40 +08:00
|
|
|
AU.addRequired<TargetTransformInfoWrapperPass>();
|
2016-09-01 03:24:10 +08:00
|
|
|
if (UseMemorySSA) {
|
|
|
|
AU.addRequired<MemorySSAWrapperPass>();
|
|
|
|
AU.addPreserved<MemorySSAWrapperPass>();
|
|
|
|
}
|
2015-09-10 18:22:12 +08:00
|
|
|
AU.addPreserved<GlobalsAAWrapperPass>();
|
2019-10-01 01:08:40 +08:00
|
|
|
AU.addPreserved<AAResultsWrapperPass>();
|
2015-01-27 09:34:14 +08:00
|
|
|
AU.setPreservesCFG();
|
|
|
|
}
|
|
|
|
};
|
2017-10-14 05:17:07 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
2015-01-27 09:34:14 +08:00
|
|
|
|
2016-09-01 03:24:10 +08:00
|
|
|
using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>;
|
2015-01-27 09:34:14 +08:00
|
|
|
|
2016-09-01 03:24:10 +08:00
|
|
|
template<>
|
|
|
|
char EarlyCSELegacyPass::ID = 0;
|
2015-01-27 09:34:14 +08:00
|
|
|
|
|
|
|
INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false,
|
|
|
|
false)
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
llvm-svn: 227669
2015-01-31 11:43:40 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
|
2016-12-19 16:22:17 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
|
2015-01-27 09:34:14 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
|
|
|
|
INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false)
|
2016-09-01 03:24:10 +08:00
|
|
|
|
|
|
|
using EarlyCSEMemSSALegacyPass =
|
|
|
|
EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>;
|
|
|
|
|
|
|
|
template<>
|
|
|
|
char EarlyCSEMemSSALegacyPass::ID = 0;
|
|
|
|
|
|
|
|
FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) {
|
|
|
|
if (UseMemorySSA)
|
|
|
|
return new EarlyCSEMemSSALegacyPass();
|
|
|
|
else
|
|
|
|
return new EarlyCSELegacyPass();
|
|
|
|
}
|
|
|
|
|
|
|
|
INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
|
|
|
|
"Early CSE w/ MemorySSA", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
|
2016-12-19 16:22:17 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
|
2016-09-01 03:24:10 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
|
|
|
|
INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
|
|
|
|
"Early CSE w/ MemorySSA", false, false)
|