2015-01-16 09:03:22 +08:00
|
|
|
//===-- InductiveRangeCheckElimination.cpp - ------------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// The InductiveRangeCheckElimination pass splits a loop's iteration space into
|
|
|
|
// three disjoint ranges. It does that in a way such that the loop running in
|
|
|
|
// the middle loop provably does not need range checks. As an example, it will
|
|
|
|
// convert
|
|
|
|
//
|
|
|
|
// len = < known positive >
|
|
|
|
// for (i = 0; i < n; i++) {
|
|
|
|
// if (0 <= i && i < len) {
|
|
|
|
// do_something();
|
|
|
|
// } else {
|
|
|
|
// throw_out_of_bounds();
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// to
|
|
|
|
//
|
|
|
|
// len = < known positive >
|
|
|
|
// limit = smin(n, len)
|
|
|
|
// // no first segment
|
|
|
|
// for (i = 0; i < limit; i++) {
|
|
|
|
// if (0 <= i && i < len) { // this check is fully redundant
|
|
|
|
// do_something();
|
|
|
|
// } else {
|
|
|
|
// throw_out_of_bounds();
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// for (i = limit; i < n; i++) {
|
|
|
|
// if (0 <= i && i < len) {
|
|
|
|
// do_something();
|
|
|
|
// } else {
|
|
|
|
// throw_out_of_bounds();
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/ADT/Optional.h"
|
2015-01-28 05:38:12 +08:00
|
|
|
#include "llvm/Analysis/BranchProbabilityInfo.h"
|
2015-01-16 09:03:22 +08:00
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
|
|
|
#include "llvm/Analysis/LoopPass.h"
|
|
|
|
#include "llvm/Analysis/ScalarEvolution.h"
|
|
|
|
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
|
|
|
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
|
|
|
#include "llvm/IR/Dominators.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/IRBuilder.h"
|
2015-03-24 03:32:43 +08:00
|
|
|
#include "llvm/IR/Instructions.h"
|
2015-01-16 09:03:22 +08:00
|
|
|
#include "llvm/IR/PatternMatch.h"
|
2015-03-24 03:32:43 +08:00
|
|
|
#include "llvm/Pass.h"
|
2015-01-16 09:03:22 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2015-03-24 03:32:43 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2015-01-16 09:03:22 +08:00
|
|
|
#include "llvm/Transforms/Scalar.h"
|
|
|
|
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
|
|
|
#include "llvm/Transforms/Utils/Cloning.h"
|
|
|
|
#include "llvm/Transforms/Utils/LoopUtils.h"
|
2016-08-06 08:01:56 +08:00
|
|
|
#include "llvm/Transforms/Utils/LoopSimplify.h"
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2015-02-07 01:51:54 +08:00
|
|
|
static cl::opt<unsigned> LoopSizeCutoff("irce-loop-size-cutoff", cl::Hidden,
|
|
|
|
cl::init(64));
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-02-07 01:51:54 +08:00
|
|
|
static cl::opt<bool> PrintChangedLoops("irce-print-changed-loops", cl::Hidden,
|
|
|
|
cl::init(false));
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-03-17 09:40:22 +08:00
|
|
|
static cl::opt<bool> PrintRangeChecks("irce-print-range-checks", cl::Hidden,
|
|
|
|
cl::init(false));
|
|
|
|
|
2015-02-26 16:56:04 +08:00
|
|
|
static cl::opt<int> MaxExitProbReciprocal("irce-max-exit-prob-reciprocal",
|
|
|
|
cl::Hidden, cl::init(10));
|
|
|
|
|
2016-07-22 08:40:56 +08:00
|
|
|
static cl::opt<bool> SkipProfitabilityChecks("irce-skip-profitability-checks",
|
|
|
|
cl::Hidden, cl::init(false));
|
|
|
|
|
2016-08-14 09:04:36 +08:00
|
|
|
static const char *ClonedLoopTag = "irce.loop.clone";
|
|
|
|
|
2015-01-16 09:03:22 +08:00
|
|
|
#define DEBUG_TYPE "irce"
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
/// An inductive range check is conditional branch in a loop with
|
|
|
|
///
|
|
|
|
/// 1. a very cold successor (i.e. the branch jumps to that successor very
|
|
|
|
/// rarely)
|
|
|
|
///
|
|
|
|
/// and
|
|
|
|
///
|
2015-03-17 08:42:13 +08:00
|
|
|
/// 2. a condition that is provably true for some contiguous range of values
|
|
|
|
/// taken by the containing loop's induction variable.
|
2015-01-16 09:03:22 +08:00
|
|
|
///
|
|
|
|
class InductiveRangeCheck {
|
2015-03-17 08:42:13 +08:00
|
|
|
// Classifies a range check
|
2015-03-18 00:50:20 +08:00
|
|
|
enum RangeCheckKind : unsigned {
|
2015-03-17 08:42:13 +08:00
|
|
|
// Range check of the form "0 <= I".
|
|
|
|
RANGE_CHECK_LOWER = 1,
|
|
|
|
|
|
|
|
// Range check of the form "I < L" where L is known positive.
|
|
|
|
RANGE_CHECK_UPPER = 2,
|
|
|
|
|
|
|
|
// The logical and of the RANGE_CHECK_LOWER and RANGE_CHECK_UPPER
|
|
|
|
// conditions.
|
|
|
|
RANGE_CHECK_BOTH = RANGE_CHECK_LOWER | RANGE_CHECK_UPPER,
|
|
|
|
|
|
|
|
// Unrecognized range check condition.
|
|
|
|
RANGE_CHECK_UNKNOWN = (unsigned)-1
|
|
|
|
};
|
|
|
|
|
2016-03-09 10:34:19 +08:00
|
|
|
static StringRef rangeCheckKindToStr(RangeCheckKind);
|
2015-03-17 08:42:13 +08:00
|
|
|
|
2016-05-26 09:50:18 +08:00
|
|
|
const SCEV *Offset = nullptr;
|
|
|
|
const SCEV *Scale = nullptr;
|
|
|
|
Value *Length = nullptr;
|
|
|
|
Use *CheckUse = nullptr;
|
|
|
|
RangeCheckKind Kind = RANGE_CHECK_UNKNOWN;
|
2015-03-17 08:42:13 +08:00
|
|
|
|
2015-03-25 03:29:18 +08:00
|
|
|
static RangeCheckKind parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
|
|
|
|
ScalarEvolution &SE, Value *&Index,
|
|
|
|
Value *&Length);
|
2015-03-17 08:42:13 +08:00
|
|
|
|
2016-05-26 08:09:02 +08:00
|
|
|
static void
|
|
|
|
extractRangeChecksFromCond(Loop *L, ScalarEvolution &SE, Use &ConditionUse,
|
|
|
|
SmallVectorImpl<InductiveRangeCheck> &Checks,
|
|
|
|
SmallPtrSetImpl<Value *> &Visited);
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
public:
|
|
|
|
const SCEV *getOffset() const { return Offset; }
|
|
|
|
const SCEV *getScale() const { return Scale; }
|
|
|
|
Value *getLength() const { return Length; }
|
|
|
|
|
|
|
|
void print(raw_ostream &OS) const {
|
|
|
|
OS << "InductiveRangeCheck:\n";
|
2015-03-17 08:42:13 +08:00
|
|
|
OS << " Kind: " << rangeCheckKindToStr(Kind) << "\n";
|
2015-01-16 09:03:22 +08:00
|
|
|
OS << " Offset: ";
|
|
|
|
Offset->print(OS);
|
|
|
|
OS << " Scale: ";
|
|
|
|
Scale->print(OS);
|
|
|
|
OS << " Length: ";
|
2015-03-17 08:42:13 +08:00
|
|
|
if (Length)
|
|
|
|
Length->print(OS);
|
|
|
|
else
|
|
|
|
OS << "(null)";
|
2016-05-24 06:16:45 +08:00
|
|
|
OS << "\n CheckUse: ";
|
|
|
|
getCheckUse()->getUser()->print(OS);
|
|
|
|
OS << " Operand: " << getCheckUse()->getOperandNo() << "\n";
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
|
2016-08-18 23:55:49 +08:00
|
|
|
LLVM_DUMP_METHOD
|
2015-01-16 09:03:22 +08:00
|
|
|
void dump() {
|
|
|
|
print(dbgs());
|
|
|
|
}
|
|
|
|
|
2016-05-24 06:16:45 +08:00
|
|
|
Use *getCheckUse() const { return CheckUse; }
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-01-22 17:32:02 +08:00
|
|
|
/// Represents an signed integer range [Range.getBegin(), Range.getEnd()). If
|
|
|
|
/// R.getEnd() sle R.getBegin(), then R denotes the empty range.
|
|
|
|
|
|
|
|
class Range {
|
2015-02-22 06:07:32 +08:00
|
|
|
const SCEV *Begin;
|
|
|
|
const SCEV *End;
|
2015-01-22 17:32:02 +08:00
|
|
|
|
|
|
|
public:
|
2015-02-22 06:07:32 +08:00
|
|
|
Range(const SCEV *Begin, const SCEV *End) : Begin(Begin), End(End) {
|
2015-01-22 17:32:02 +08:00
|
|
|
assert(Begin->getType() == End->getType() && "ill-typed range!");
|
|
|
|
}
|
|
|
|
|
|
|
|
Type *getType() const { return Begin->getType(); }
|
2015-02-22 06:07:32 +08:00
|
|
|
const SCEV *getBegin() const { return Begin; }
|
|
|
|
const SCEV *getEnd() const { return End; }
|
2015-01-22 17:32:02 +08:00
|
|
|
};
|
|
|
|
|
2015-01-16 09:03:22 +08:00
|
|
|
/// This is the value the condition of the branch needs to evaluate to for the
|
|
|
|
/// branch to take the hot successor (see (1) above).
|
|
|
|
bool getPassingDirection() { return true; }
|
|
|
|
|
2015-02-22 06:20:22 +08:00
|
|
|
/// Computes a range for the induction variable (IndVar) in which the range
|
|
|
|
/// check is redundant and can be constant-folded away. The induction
|
|
|
|
/// variable is not required to be the canonical {0,+,1} induction variable.
|
2015-01-16 09:03:22 +08:00
|
|
|
Optional<Range> computeSafeIterationSpace(ScalarEvolution &SE,
|
2016-05-21 10:31:51 +08:00
|
|
|
const SCEVAddRecExpr *IndVar) const;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2016-05-26 08:09:02 +08:00
|
|
|
/// Parse out a set of inductive range checks from \p BI and append them to \p
|
|
|
|
/// Checks.
|
|
|
|
///
|
|
|
|
/// NB! There may be conditions feeding into \p BI that aren't inductive range
|
|
|
|
/// checks, and hence don't end up in \p Checks.
|
|
|
|
static void
|
|
|
|
extractRangeChecksFromBranch(BranchInst *BI, Loop *L, ScalarEvolution &SE,
|
|
|
|
BranchProbabilityInfo &BPI,
|
|
|
|
SmallVectorImpl<InductiveRangeCheck> &Checks);
|
2015-01-16 09:03:22 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
class InductiveRangeCheckElimination : public LoopPass {
|
|
|
|
public:
|
|
|
|
static char ID;
|
|
|
|
InductiveRangeCheckElimination() : LoopPass(ID) {
|
|
|
|
initializeInductiveRangeCheckEliminationPass(
|
|
|
|
*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
2015-07-16 06:48:29 +08:00
|
|
|
AU.addRequired<BranchProbabilityInfoWrapperPass>();
|
[LPM] Factor all of the loop analysis usage updates into a common helper
routine.
We were getting this wrong in small ways and generally being very
inconsistent about it across loop passes. Instead, let's have a common
place where we do this. One minor downside is that this will require
some analyses like SCEV in more places than they are strictly needed.
However, this seems benign as these analyses are complete no-ops, and
without this consistency we can in many cases end up with the legacy
pass manager scheduling deciding to split up a loop pass pipeline in
order to run the function analysis half-way through. It is very, very
annoying to fix these without just being very pedantic across the board.
The only loop passes I've not updated here are ones that use
AU.setPreservesAll() such as IVUsers (an analysis) and the pass printer.
They seemed less relevant.
With this patch, almost all of the problems in PR24804 around loop pass
pipelines are fixed. The one remaining issue is that we run simplify-cfg
and instcombine in the middle of the loop pass pipeline. We've recently
added some loop variants of these passes that would seem substantially
cleaner to use, but this at least gets us much closer to the previous
state. Notably, the seven loop pass managers is down to three.
I've not updated the loop passes using LoopAccessAnalysis because that
analysis hasn't been fully wired into LoopSimplify/LCSSA, and it isn't
clear that those transforms want to support those forms anyways. They
all run late anyways, so this is harmless. Similarly, LSR is left alone
because it already carefully manages its forms and doesn't need to get
fused into a single loop pass manager with a bunch of other loop passes.
LoopReroll didn't use loop simplified form previously, and I've updated
the test case to match the trivially different output.
Finally, I've also factored all the pass initialization for the passes
that use this technique as well, so that should be done regularly and
reliably.
Thanks to James for the help reviewing and thinking about this stuff,
and Ben for help thinking about it as well!
Differential Revision: http://reviews.llvm.org/D17435
llvm-svn: 261316
2016-02-19 18:45:18 +08:00
|
|
|
getLoopAnalysisUsage(AU);
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool runOnLoop(Loop *L, LPPassManager &LPM) override;
|
|
|
|
};
|
|
|
|
|
|
|
|
char InductiveRangeCheckElimination::ID = 0;
|
2015-06-23 17:49:53 +08:00
|
|
|
}
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-09-09 11:47:18 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(InductiveRangeCheckElimination, "irce",
|
|
|
|
"Inductive range check elimination", false, false)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(BranchProbabilityInfoWrapperPass)
|
[LPM] Factor all of the loop analysis usage updates into a common helper
routine.
We were getting this wrong in small ways and generally being very
inconsistent about it across loop passes. Instead, let's have a common
place where we do this. One minor downside is that this will require
some analyses like SCEV in more places than they are strictly needed.
However, this seems benign as these analyses are complete no-ops, and
without this consistency we can in many cases end up with the legacy
pass manager scheduling deciding to split up a loop pass pipeline in
order to run the function analysis half-way through. It is very, very
annoying to fix these without just being very pedantic across the board.
The only loop passes I've not updated here are ones that use
AU.setPreservesAll() such as IVUsers (an analysis) and the pass printer.
They seemed less relevant.
With this patch, almost all of the problems in PR24804 around loop pass
pipelines are fixed. The one remaining issue is that we run simplify-cfg
and instcombine in the middle of the loop pass pipeline. We've recently
added some loop variants of these passes that would seem substantially
cleaner to use, but this at least gets us much closer to the previous
state. Notably, the seven loop pass managers is down to three.
I've not updated the loop passes using LoopAccessAnalysis because that
analysis hasn't been fully wired into LoopSimplify/LCSSA, and it isn't
clear that those transforms want to support those forms anyways. They
all run late anyways, so this is harmless. Similarly, LSR is left alone
because it already carefully manages its forms and doesn't need to get
fused into a single loop pass manager with a bunch of other loop passes.
LoopReroll didn't use loop simplified form previously, and I've updated
the test case to match the trivially different output.
Finally, I've also factored all the pass initialization for the passes
that use this technique as well, so that should be done regularly and
reliably.
Thanks to James for the help reviewing and thinking about this stuff,
and Ben for help thinking about it as well!
Differential Revision: http://reviews.llvm.org/D17435
llvm-svn: 261316
2016-02-19 18:45:18 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopPass)
|
2015-09-09 11:47:18 +08:00
|
|
|
INITIALIZE_PASS_END(InductiveRangeCheckElimination, "irce",
|
|
|
|
"Inductive range check elimination", false, false)
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2016-03-09 10:34:19 +08:00
|
|
|
StringRef InductiveRangeCheck::rangeCheckKindToStr(
|
2015-03-17 08:42:13 +08:00
|
|
|
InductiveRangeCheck::RangeCheckKind RCK) {
|
|
|
|
switch (RCK) {
|
|
|
|
case InductiveRangeCheck::RANGE_CHECK_UNKNOWN:
|
|
|
|
return "RANGE_CHECK_UNKNOWN";
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-03-17 08:42:13 +08:00
|
|
|
case InductiveRangeCheck::RANGE_CHECK_UPPER:
|
|
|
|
return "RANGE_CHECK_UPPER";
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-03-17 08:42:13 +08:00
|
|
|
case InductiveRangeCheck::RANGE_CHECK_LOWER:
|
|
|
|
return "RANGE_CHECK_LOWER";
|
|
|
|
|
|
|
|
case InductiveRangeCheck::RANGE_CHECK_BOTH:
|
|
|
|
return "RANGE_CHECK_BOTH";
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm_unreachable("unknown range check type!");
|
|
|
|
}
|
|
|
|
|
2016-03-09 10:34:15 +08:00
|
|
|
/// Parse a single ICmp instruction, `ICI`, into a range check. If `ICI` cannot
|
2015-03-17 08:42:13 +08:00
|
|
|
/// be interpreted as a range check, return `RANGE_CHECK_UNKNOWN` and set
|
2016-03-09 10:34:15 +08:00
|
|
|
/// `Index` and `Length` to `nullptr`. Otherwise set `Index` to the value being
|
2015-03-17 08:42:13 +08:00
|
|
|
/// range checked, and set `Length` to the upper limit `Index` is being range
|
|
|
|
/// checked with if (and only if) the range check type is stronger or equal to
|
|
|
|
/// RANGE_CHECK_UPPER.
|
|
|
|
///
|
|
|
|
InductiveRangeCheck::RangeCheckKind
|
2015-03-25 03:29:18 +08:00
|
|
|
InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI,
|
|
|
|
ScalarEvolution &SE, Value *&Index,
|
|
|
|
Value *&Length) {
|
|
|
|
|
|
|
|
auto IsNonNegativeAndNotLoopVarying = [&SE, L](Value *V) {
|
|
|
|
const SCEV *S = SE.getSCEV(V);
|
|
|
|
if (isa<SCEVCouldNotCompute>(S))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return SE.getLoopDisposition(S, L) == ScalarEvolution::LoopInvariant &&
|
|
|
|
SE.isKnownNonNegative(S);
|
|
|
|
};
|
2015-03-17 08:42:13 +08:00
|
|
|
|
|
|
|
using namespace llvm::PatternMatch;
|
|
|
|
|
|
|
|
ICmpInst::Predicate Pred = ICI->getPredicate();
|
|
|
|
Value *LHS = ICI->getOperand(0);
|
|
|
|
Value *RHS = ICI->getOperand(1);
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
switch (Pred) {
|
|
|
|
default:
|
2015-03-17 08:42:13 +08:00
|
|
|
return RANGE_CHECK_UNKNOWN;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
case ICmpInst::ICMP_SLE:
|
|
|
|
std::swap(LHS, RHS);
|
2016-08-17 13:10:15 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2015-01-16 09:03:22 +08:00
|
|
|
case ICmpInst::ICMP_SGE:
|
2015-03-17 08:42:13 +08:00
|
|
|
if (match(RHS, m_ConstantInt<0>())) {
|
|
|
|
Index = LHS;
|
|
|
|
return RANGE_CHECK_LOWER;
|
|
|
|
}
|
|
|
|
return RANGE_CHECK_UNKNOWN;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
case ICmpInst::ICMP_SLT:
|
|
|
|
std::swap(LHS, RHS);
|
2016-08-17 13:10:15 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2015-01-16 09:03:22 +08:00
|
|
|
case ICmpInst::ICMP_SGT:
|
2015-03-17 08:42:13 +08:00
|
|
|
if (match(RHS, m_ConstantInt<-1>())) {
|
|
|
|
Index = LHS;
|
|
|
|
return RANGE_CHECK_LOWER;
|
|
|
|
}
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-03-25 03:29:18 +08:00
|
|
|
if (IsNonNegativeAndNotLoopVarying(LHS)) {
|
2015-03-17 08:42:13 +08:00
|
|
|
Index = RHS;
|
|
|
|
Length = LHS;
|
|
|
|
return RANGE_CHECK_UPPER;
|
|
|
|
}
|
|
|
|
return RANGE_CHECK_UNKNOWN;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-03-17 08:42:13 +08:00
|
|
|
case ICmpInst::ICMP_ULT:
|
2015-01-16 09:03:22 +08:00
|
|
|
std::swap(LHS, RHS);
|
2016-08-17 13:10:15 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2015-01-16 09:03:22 +08:00
|
|
|
case ICmpInst::ICMP_UGT:
|
2015-03-25 03:29:18 +08:00
|
|
|
if (IsNonNegativeAndNotLoopVarying(LHS)) {
|
2015-03-17 08:42:13 +08:00
|
|
|
Index = RHS;
|
|
|
|
Length = LHS;
|
|
|
|
return RANGE_CHECK_BOTH;
|
|
|
|
}
|
|
|
|
return RANGE_CHECK_UNKNOWN;
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
2015-03-17 08:42:13 +08:00
|
|
|
|
|
|
|
llvm_unreachable("default clause returns!");
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
|
2016-05-26 08:09:02 +08:00
|
|
|
void InductiveRangeCheck::extractRangeChecksFromCond(
|
|
|
|
Loop *L, ScalarEvolution &SE, Use &ConditionUse,
|
|
|
|
SmallVectorImpl<InductiveRangeCheck> &Checks,
|
|
|
|
SmallPtrSetImpl<Value *> &Visited) {
|
2015-01-16 09:03:22 +08:00
|
|
|
using namespace llvm::PatternMatch;
|
|
|
|
|
2016-05-26 08:08:24 +08:00
|
|
|
Value *Condition = ConditionUse.get();
|
2016-05-26 08:09:02 +08:00
|
|
|
if (!Visited.insert(Condition).second)
|
|
|
|
return;
|
2016-05-26 08:08:24 +08:00
|
|
|
|
2016-05-26 08:09:02 +08:00
|
|
|
if (match(Condition, m_And(m_Value(), m_Value()))) {
|
|
|
|
SmallVector<InductiveRangeCheck, 8> SubChecks;
|
|
|
|
extractRangeChecksFromCond(L, SE, cast<User>(Condition)->getOperandUse(0),
|
|
|
|
SubChecks, Visited);
|
|
|
|
extractRangeChecksFromCond(L, SE, cast<User>(Condition)->getOperandUse(1),
|
|
|
|
SubChecks, Visited);
|
|
|
|
|
|
|
|
if (SubChecks.size() == 2) {
|
|
|
|
// Handle a special case where we know how to merge two checks separately
|
|
|
|
// checking the upper and lower bounds into a full range check.
|
|
|
|
const auto &RChkA = SubChecks[0];
|
|
|
|
const auto &RChkB = SubChecks[1];
|
|
|
|
if ((RChkA.Length == RChkB.Length || !RChkA.Length || !RChkB.Length) &&
|
|
|
|
RChkA.Offset == RChkB.Offset && RChkA.Scale == RChkB.Scale) {
|
|
|
|
|
|
|
|
// If RChkA.Kind == RChkB.Kind then we just found two identical checks.
|
|
|
|
// But if one of them is a RANGE_CHECK_LOWER and the other is a
|
|
|
|
// RANGE_CHECK_UPPER (only possibility if they're different) then
|
|
|
|
// together they form a RANGE_CHECK_BOTH.
|
|
|
|
SubChecks[0].Kind =
|
|
|
|
(InductiveRangeCheck::RangeCheckKind)(RChkA.Kind | RChkB.Kind);
|
|
|
|
SubChecks[0].Length = RChkA.Length ? RChkA.Length : RChkB.Length;
|
|
|
|
SubChecks[0].CheckUse = &ConditionUse;
|
|
|
|
|
|
|
|
// We updated one of the checks in place, now erase the other.
|
|
|
|
SubChecks.pop_back();
|
|
|
|
}
|
|
|
|
}
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2016-05-26 08:09:02 +08:00
|
|
|
Checks.insert(Checks.end(), SubChecks.begin(), SubChecks.end());
|
|
|
|
return;
|
|
|
|
}
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2016-05-26 08:09:02 +08:00
|
|
|
ICmpInst *ICI = dyn_cast<ICmpInst>(Condition);
|
|
|
|
if (!ICI)
|
|
|
|
return;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2016-05-26 08:09:02 +08:00
|
|
|
Value *Length = nullptr, *Index;
|
|
|
|
auto RCKind = parseRangeCheckICmp(L, ICI, SE, Index, Length);
|
|
|
|
if (RCKind == InductiveRangeCheck::RANGE_CHECK_UNKNOWN)
|
|
|
|
return;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2016-05-26 08:08:24 +08:00
|
|
|
const auto *IndexAddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Index));
|
|
|
|
bool IsAffineIndex =
|
|
|
|
IndexAddRec && (IndexAddRec->getLoop() == L) && IndexAddRec->isAffine();
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2016-05-26 08:08:24 +08:00
|
|
|
if (!IsAffineIndex)
|
2016-05-26 08:09:02 +08:00
|
|
|
return;
|
2016-05-26 08:08:24 +08:00
|
|
|
|
|
|
|
InductiveRangeCheck IRC;
|
|
|
|
IRC.Length = Length;
|
|
|
|
IRC.Offset = IndexAddRec->getStart();
|
|
|
|
IRC.Scale = IndexAddRec->getStepRecurrence(SE);
|
|
|
|
IRC.CheckUse = &ConditionUse;
|
|
|
|
IRC.Kind = RCKind;
|
2016-05-26 08:09:02 +08:00
|
|
|
Checks.push_back(IRC);
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
|
2016-05-26 08:09:02 +08:00
|
|
|
void InductiveRangeCheck::extractRangeChecksFromBranch(
|
|
|
|
BranchInst *BI, Loop *L, ScalarEvolution &SE, BranchProbabilityInfo &BPI,
|
|
|
|
SmallVectorImpl<InductiveRangeCheck> &Checks) {
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
if (BI->isUnconditional() || BI->getParent() == L->getLoopLatch())
|
2016-05-26 08:09:02 +08:00
|
|
|
return;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-01-28 05:38:12 +08:00
|
|
|
BranchProbability LikelyTaken(15, 16);
|
|
|
|
|
2016-07-22 08:40:56 +08:00
|
|
|
if (!SkipProfitabilityChecks &&
|
|
|
|
BPI.getEdgeProbability(BI->getParent(), (unsigned)0) < LikelyTaken)
|
2016-05-26 08:09:02 +08:00
|
|
|
return;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2016-05-26 08:09:02 +08:00
|
|
|
SmallPtrSet<Value *, 8> Visited;
|
|
|
|
InductiveRangeCheck::extractRangeChecksFromCond(L, SE, BI->getOperandUse(0),
|
|
|
|
Checks, Visited);
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
|
2016-12-14 05:05:21 +08:00
|
|
|
// Add metadata to the loop L to disable loop optimizations. Callers need to
|
|
|
|
// confirm that optimizing loop L is not beneficial.
|
|
|
|
static void DisableAllLoopOptsOnLoop(Loop &L) {
|
|
|
|
// We do not care about any existing loopID related metadata for L, since we
|
|
|
|
// are setting all loop metadata to false.
|
|
|
|
LLVMContext &Context = L.getHeader()->getContext();
|
|
|
|
// Reserve first location for self reference to the LoopID metadata node.
|
|
|
|
MDNode *Dummy = MDNode::get(Context, {});
|
|
|
|
MDNode *DisableUnroll = MDNode::get(
|
|
|
|
Context, {MDString::get(Context, "llvm.loop.unroll.disable")});
|
|
|
|
Metadata *FalseVal =
|
|
|
|
ConstantAsMetadata::get(ConstantInt::get(Type::getInt1Ty(Context), 0));
|
|
|
|
MDNode *DisableVectorize = MDNode::get(
|
|
|
|
Context,
|
|
|
|
{MDString::get(Context, "llvm.loop.vectorize.enable"), FalseVal});
|
|
|
|
MDNode *DisableLICMVersioning = MDNode::get(
|
|
|
|
Context, {MDString::get(Context, "llvm.loop.licm_versioning.disable")});
|
|
|
|
MDNode *DisableDistribution= MDNode::get(
|
|
|
|
Context,
|
|
|
|
{MDString::get(Context, "llvm.loop.distribute.enable"), FalseVal});
|
|
|
|
MDNode *NewLoopID =
|
|
|
|
MDNode::get(Context, {Dummy, DisableUnroll, DisableVectorize,
|
|
|
|
DisableLICMVersioning, DisableDistribution});
|
|
|
|
// Set operand 0 to refer to the loop id itself.
|
|
|
|
NewLoopID->replaceOperandWith(0, NewLoopID);
|
|
|
|
L.setLoopID(NewLoopID);
|
|
|
|
}
|
|
|
|
|
2015-01-16 09:03:22 +08:00
|
|
|
namespace {
|
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
// Keeps track of the structure of a loop. This is similar to llvm::Loop,
|
|
|
|
// except that it is more lightweight and can track the state of a loop through
|
|
|
|
// changing and potentially invalid IR. This structure also formalizes the
|
|
|
|
// kinds of loops we can deal with -- ones that have a single latch that is also
|
|
|
|
// an exiting block *and* have a canonical induction variable.
|
|
|
|
struct LoopStructure {
|
|
|
|
const char *Tag;
|
|
|
|
|
|
|
|
BasicBlock *Header;
|
|
|
|
BasicBlock *Latch;
|
|
|
|
|
|
|
|
// `Latch's terminator instruction is `LatchBr', and it's `LatchBrExitIdx'th
|
|
|
|
// successor is `LatchExit', the exit block of the loop.
|
|
|
|
BranchInst *LatchBr;
|
|
|
|
BasicBlock *LatchExit;
|
|
|
|
unsigned LatchBrExitIdx;
|
|
|
|
|
|
|
|
Value *IndVarNext;
|
|
|
|
Value *IndVarStart;
|
|
|
|
Value *LoopExitAt;
|
|
|
|
bool IndVarIncreasing;
|
|
|
|
|
|
|
|
LoopStructure()
|
|
|
|
: Tag(""), Header(nullptr), Latch(nullptr), LatchBr(nullptr),
|
|
|
|
LatchExit(nullptr), LatchBrExitIdx(-1), IndVarNext(nullptr),
|
|
|
|
IndVarStart(nullptr), LoopExitAt(nullptr), IndVarIncreasing(false) {}
|
|
|
|
|
|
|
|
template <typename M> LoopStructure map(M Map) const {
|
|
|
|
LoopStructure Result;
|
|
|
|
Result.Tag = Tag;
|
|
|
|
Result.Header = cast<BasicBlock>(Map(Header));
|
|
|
|
Result.Latch = cast<BasicBlock>(Map(Latch));
|
|
|
|
Result.LatchBr = cast<BranchInst>(Map(LatchBr));
|
|
|
|
Result.LatchExit = cast<BasicBlock>(Map(LatchExit));
|
|
|
|
Result.LatchBrExitIdx = LatchBrExitIdx;
|
|
|
|
Result.IndVarNext = Map(IndVarNext);
|
|
|
|
Result.IndVarStart = Map(IndVarStart);
|
|
|
|
Result.LoopExitAt = Map(LoopExitAt);
|
|
|
|
Result.IndVarIncreasing = IndVarIncreasing;
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2015-02-26 16:56:04 +08:00
|
|
|
static Optional<LoopStructure> parseLoopStructure(ScalarEvolution &,
|
|
|
|
BranchProbabilityInfo &BPI,
|
|
|
|
Loop &,
|
2015-02-26 16:19:31 +08:00
|
|
|
const char *&);
|
|
|
|
};
|
|
|
|
|
2015-01-16 09:03:22 +08:00
|
|
|
/// This class is used to constrain loops to run within a given iteration space.
|
|
|
|
/// The algorithm this class implements is given a Loop and a range [Begin,
|
|
|
|
/// End). The algorithm then tries to break out a "main loop" out of the loop
|
|
|
|
/// it is given in a way that the "main loop" runs with the induction variable
|
|
|
|
/// in a subset of [Begin, End). The algorithm emits appropriate pre and post
|
|
|
|
/// loops to run any remaining iterations. The pre loop runs any iterations in
|
|
|
|
/// which the induction variable is < Begin, and the post loop runs any
|
|
|
|
/// iterations in which the induction variable is >= End.
|
|
|
|
///
|
|
|
|
class LoopConstrainer {
|
|
|
|
// The representation of a clone of the original loop we started out with.
|
|
|
|
struct ClonedLoop {
|
|
|
|
// The cloned blocks
|
|
|
|
std::vector<BasicBlock *> Blocks;
|
|
|
|
|
|
|
|
// `Map` maps values in the clonee into values in the cloned version
|
|
|
|
ValueToValueMapTy Map;
|
|
|
|
|
|
|
|
// An instance of `LoopStructure` for the cloned loop
|
|
|
|
LoopStructure Structure;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Result of rewriting the range of a loop. See changeIterationSpaceEnd for
|
|
|
|
// more details on what these fields mean.
|
|
|
|
struct RewrittenRangeInfo {
|
|
|
|
BasicBlock *PseudoExit;
|
|
|
|
BasicBlock *ExitSelector;
|
|
|
|
std::vector<PHINode *> PHIValuesAtPseudoExit;
|
2015-02-26 16:19:31 +08:00
|
|
|
PHINode *IndVarEnd;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
RewrittenRangeInfo()
|
|
|
|
: PseudoExit(nullptr), ExitSelector(nullptr), IndVarEnd(nullptr) {}
|
2015-01-16 09:03:22 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
// Calculated subranges we restrict the iteration space of the main loop to.
|
|
|
|
// See the implementation of `calculateSubRanges' for more details on how
|
2015-02-26 16:19:31 +08:00
|
|
|
// these fields are computed. `LowLimit` is None if there is no restriction
|
|
|
|
// on low end of the restricted iteration space of the main loop. `HighLimit`
|
|
|
|
// is None if there is no restriction on high end of the restricted iteration
|
|
|
|
// space of the main loop.
|
|
|
|
|
2015-01-16 09:03:22 +08:00
|
|
|
struct SubRanges {
|
2015-02-26 16:19:31 +08:00
|
|
|
Optional<const SCEV *> LowLimit;
|
|
|
|
Optional<const SCEV *> HighLimit;
|
2015-01-16 09:03:22 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
// A utility function that does a `replaceUsesOfWith' on the incoming block
|
|
|
|
// set of a `PHINode' -- replaces instances of `Block' in the `PHINode's
|
|
|
|
// incoming block list with `ReplaceBy'.
|
|
|
|
static void replacePHIBlock(PHINode *PN, BasicBlock *Block,
|
|
|
|
BasicBlock *ReplaceBy);
|
|
|
|
|
|
|
|
// Compute a safe set of limits for the main loop to run in -- effectively the
|
|
|
|
// intersection of `Range' and the iteration space of the original loop.
|
2015-01-22 16:29:18 +08:00
|
|
|
// Return None if unable to compute the set of subranges.
|
2015-01-16 09:03:22 +08:00
|
|
|
//
|
2015-02-26 16:19:31 +08:00
|
|
|
Optional<SubRanges> calculateSubRanges() const;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
// Clone `OriginalLoop' and return the result in CLResult. The IR after
|
|
|
|
// running `cloneLoop' is well formed except for the PHI nodes in CLResult --
|
|
|
|
// the PHI nodes say that there is an incoming edge from `OriginalPreheader`
|
|
|
|
// but there is no such edge.
|
|
|
|
//
|
|
|
|
void cloneLoop(ClonedLoop &CLResult, const char *Tag) const;
|
|
|
|
|
2016-08-14 09:04:46 +08:00
|
|
|
// Create the appropriate loop structure needed to describe a cloned copy of
|
|
|
|
// `Original`. The clone is described by `VM`.
|
|
|
|
Loop *createClonedLoopStructure(Loop *Original, Loop *Parent,
|
|
|
|
ValueToValueMapTy &VM);
|
|
|
|
|
2015-01-16 09:03:22 +08:00
|
|
|
// Rewrite the iteration space of the loop denoted by (LS, Preheader). The
|
|
|
|
// iteration space of the rewritten loop ends at ExitLoopAt. The start of the
|
|
|
|
// iteration space is not changed. `ExitLoopAt' is assumed to be slt
|
|
|
|
// `OriginalHeaderCount'.
|
|
|
|
//
|
|
|
|
// If there are iterations left to execute, control is made to jump to
|
|
|
|
// `ContinuationBlock', otherwise they take the normal loop exit. The
|
|
|
|
// returned `RewrittenRangeInfo' object is populated as follows:
|
|
|
|
//
|
|
|
|
// .PseudoExit is a basic block that unconditionally branches to
|
|
|
|
// `ContinuationBlock'.
|
|
|
|
//
|
|
|
|
// .ExitSelector is a basic block that decides, on exit from the loop,
|
|
|
|
// whether to branch to the "true" exit or to `PseudoExit'.
|
|
|
|
//
|
|
|
|
// .PHIValuesAtPseudoExit are PHINodes in `PseudoExit' that compute the value
|
|
|
|
// for each PHINode in the loop header on taking the pseudo exit.
|
|
|
|
//
|
|
|
|
// After changeIterationSpaceEnd, `Preheader' is no longer a legitimate
|
|
|
|
// preheader because it is made to branch to the loop header only
|
|
|
|
// conditionally.
|
|
|
|
//
|
|
|
|
RewrittenRangeInfo
|
|
|
|
changeIterationSpaceEnd(const LoopStructure &LS, BasicBlock *Preheader,
|
|
|
|
Value *ExitLoopAt,
|
|
|
|
BasicBlock *ContinuationBlock) const;
|
|
|
|
|
|
|
|
// The loop denoted by `LS' has `OldPreheader' as its preheader. This
|
|
|
|
// function creates a new preheader for `LS' and returns it.
|
|
|
|
//
|
2015-02-26 16:19:31 +08:00
|
|
|
BasicBlock *createPreheader(const LoopStructure &LS, BasicBlock *OldPreheader,
|
|
|
|
const char *Tag) const;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
// `ContinuationBlockAndPreheader' was the continuation block for some call to
|
|
|
|
// `changeIterationSpaceEnd' and is the preheader to the loop denoted by `LS'.
|
|
|
|
// This function rewrites the PHI nodes in `LS.Header' to start with the
|
|
|
|
// correct value.
|
|
|
|
void rewriteIncomingValuesForPHIs(
|
2015-02-26 16:19:31 +08:00
|
|
|
LoopStructure &LS, BasicBlock *ContinuationBlockAndPreheader,
|
2015-01-16 09:03:22 +08:00
|
|
|
const LoopConstrainer::RewrittenRangeInfo &RRI) const;
|
|
|
|
|
|
|
|
// Even though we do not preserve any passes at this time, we at least need to
|
|
|
|
// keep the parent loop structure consistent. The `LPPassManager' seems to
|
|
|
|
// verify this after running a loop pass. This function adds the list of
|
2015-02-06 22:43:49 +08:00
|
|
|
// blocks denoted by BBs to this loops parent loop if required.
|
|
|
|
void addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs);
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
// Some global state.
|
|
|
|
Function &F;
|
|
|
|
LLVMContext &Ctx;
|
|
|
|
ScalarEvolution &SE;
|
2016-08-03 03:31:54 +08:00
|
|
|
DominatorTree &DT;
|
2016-08-14 09:04:46 +08:00
|
|
|
LPPassManager &LPM;
|
2016-08-14 09:04:50 +08:00
|
|
|
LoopInfo &LI;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
// Information about the original loop we started out with.
|
|
|
|
Loop &OriginalLoop;
|
|
|
|
const SCEV *LatchTakenCount;
|
|
|
|
BasicBlock *OriginalPreheader;
|
|
|
|
|
|
|
|
// The preheader of the main loop. This may or may not be different from
|
|
|
|
// `OriginalPreheader'.
|
|
|
|
BasicBlock *MainLoopPreheader;
|
|
|
|
|
|
|
|
// The range we need to run the main loop in.
|
|
|
|
InductiveRangeCheck::Range Range;
|
|
|
|
|
|
|
|
// The structure of the main loop (see comment at the beginning of this class
|
|
|
|
// for a definition)
|
|
|
|
LoopStructure MainLoopStructure;
|
|
|
|
|
|
|
|
public:
|
2016-08-14 09:04:46 +08:00
|
|
|
LoopConstrainer(Loop &L, LoopInfo &LI, LPPassManager &LPM,
|
|
|
|
const LoopStructure &LS, ScalarEvolution &SE,
|
|
|
|
DominatorTree &DT, InductiveRangeCheck::Range R)
|
2015-02-26 16:19:31 +08:00
|
|
|
: F(*L.getHeader()->getParent()), Ctx(L.getHeader()->getContext()),
|
2016-08-14 09:04:50 +08:00
|
|
|
SE(SE), DT(DT), LPM(LPM), LI(LI), OriginalLoop(L),
|
2016-08-14 09:04:46 +08:00
|
|
|
LatchTakenCount(nullptr), OriginalPreheader(nullptr),
|
|
|
|
MainLoopPreheader(nullptr), Range(R), MainLoopStructure(LS) {}
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
// Entry point for the algorithm. Returns true on success.
|
|
|
|
bool run();
|
|
|
|
};
|
|
|
|
|
2015-06-23 17:49:53 +08:00
|
|
|
}
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
void LoopConstrainer::replacePHIBlock(PHINode *PN, BasicBlock *Block,
|
|
|
|
BasicBlock *ReplaceBy) {
|
|
|
|
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
|
|
|
|
if (PN->getIncomingBlock(i) == Block)
|
|
|
|
PN->setIncomingBlock(i, ReplaceBy);
|
|
|
|
}
|
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
static bool CanBeSMax(ScalarEvolution &SE, const SCEV *S) {
|
|
|
|
APInt SMax =
|
|
|
|
APInt::getSignedMaxValue(cast<IntegerType>(S->getType())->getBitWidth());
|
|
|
|
return SE.getSignedRange(S).contains(SMax) &&
|
|
|
|
SE.getUnsignedRange(S).contains(SMax);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool CanBeSMin(ScalarEvolution &SE, const SCEV *S) {
|
|
|
|
APInt SMin =
|
|
|
|
APInt::getSignedMinValue(cast<IntegerType>(S->getType())->getBitWidth());
|
|
|
|
return SE.getSignedRange(S).contains(SMin) &&
|
|
|
|
SE.getUnsignedRange(S).contains(SMin);
|
|
|
|
}
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
Optional<LoopStructure>
|
2015-02-26 16:56:04 +08:00
|
|
|
LoopStructure::parseLoopStructure(ScalarEvolution &SE, BranchProbabilityInfo &BPI,
|
|
|
|
Loop &L, const char *&FailureReason) {
|
2016-08-14 09:04:31 +08:00
|
|
|
if (!L.isLoopSimplifyForm()) {
|
|
|
|
FailureReason = "loop not in LoopSimplify form";
|
2016-08-14 07:36:35 +08:00
|
|
|
return None;
|
2016-08-14 09:04:31 +08:00
|
|
|
}
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
BasicBlock *Latch = L.getLoopLatch();
|
2016-08-14 07:36:35 +08:00
|
|
|
assert(Latch && "Simplified loops only have one latch!");
|
|
|
|
|
2016-08-14 09:04:36 +08:00
|
|
|
if (Latch->getTerminator()->getMetadata(ClonedLoopTag)) {
|
|
|
|
FailureReason = "loop has already been cloned";
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
if (!L.isLoopExiting(Latch)) {
|
2015-01-16 09:03:22 +08:00
|
|
|
FailureReason = "no loop latch";
|
2015-02-26 16:19:31 +08:00
|
|
|
return None;
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
BasicBlock *Header = L.getHeader();
|
|
|
|
BasicBlock *Preheader = L.getLoopPreheader();
|
2015-01-16 09:03:22 +08:00
|
|
|
if (!Preheader) {
|
|
|
|
FailureReason = "no preheader";
|
2015-02-26 16:19:31 +08:00
|
|
|
return None;
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
|
2016-06-24 02:03:26 +08:00
|
|
|
BranchInst *LatchBr = dyn_cast<BranchInst>(Latch->getTerminator());
|
2015-02-26 16:19:31 +08:00
|
|
|
if (!LatchBr || LatchBr->isUnconditional()) {
|
|
|
|
FailureReason = "latch terminator not conditional branch";
|
|
|
|
return None;
|
|
|
|
}
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
unsigned LatchBrExitIdx = LatchBr->getSuccessor(0) == Header ? 1 : 0;
|
|
|
|
|
2015-02-26 16:56:04 +08:00
|
|
|
BranchProbability ExitProbability =
|
|
|
|
BPI.getEdgeProbability(LatchBr->getParent(), LatchBrExitIdx);
|
|
|
|
|
2016-07-22 08:40:56 +08:00
|
|
|
if (!SkipProfitabilityChecks &&
|
|
|
|
ExitProbability > BranchProbability(1, MaxExitProbReciprocal)) {
|
2015-02-26 16:56:04 +08:00
|
|
|
FailureReason = "short running loop, not profitable";
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
ICmpInst *ICI = dyn_cast<ICmpInst>(LatchBr->getCondition());
|
|
|
|
if (!ICI || !isa<IntegerType>(ICI->getOperand(0)->getType())) {
|
|
|
|
FailureReason = "latch terminator branch not conditional on integral icmp";
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
const SCEV *LatchCount = SE.getExitCount(&L, Latch);
|
2015-01-16 09:03:22 +08:00
|
|
|
if (isa<SCEVCouldNotCompute>(LatchCount)) {
|
|
|
|
FailureReason = "could not compute latch count";
|
2015-02-26 16:19:31 +08:00
|
|
|
return None;
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
ICmpInst::Predicate Pred = ICI->getPredicate();
|
|
|
|
Value *LeftValue = ICI->getOperand(0);
|
|
|
|
const SCEV *LeftSCEV = SE.getSCEV(LeftValue);
|
|
|
|
IntegerType *IndVarTy = cast<IntegerType>(LeftValue->getType());
|
|
|
|
|
|
|
|
Value *RightValue = ICI->getOperand(1);
|
|
|
|
const SCEV *RightSCEV = SE.getSCEV(RightValue);
|
|
|
|
|
|
|
|
// We canonicalize `ICI` such that `LeftSCEV` is an add recurrence.
|
|
|
|
if (!isa<SCEVAddRecExpr>(LeftSCEV)) {
|
|
|
|
if (isa<SCEVAddRecExpr>(RightSCEV)) {
|
|
|
|
std::swap(LeftSCEV, RightSCEV);
|
|
|
|
std::swap(LeftValue, RightValue);
|
|
|
|
Pred = ICmpInst::getSwappedPredicate(Pred);
|
|
|
|
} else {
|
|
|
|
FailureReason = "no add recurrences in the icmp";
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
}
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-03-25 03:29:22 +08:00
|
|
|
auto HasNoSignedWrap = [&](const SCEVAddRecExpr *AR) {
|
|
|
|
if (AR->getNoWrapFlags(SCEV::FlagNSW))
|
|
|
|
return true;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
IntegerType *Ty = cast<IntegerType>(AR->getType());
|
|
|
|
IntegerType *WideTy =
|
|
|
|
IntegerType::get(Ty->getContext(), Ty->getBitWidth() * 2);
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
const SCEVAddRecExpr *ExtendAfterOp =
|
|
|
|
dyn_cast<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy));
|
2015-03-25 03:29:22 +08:00
|
|
|
if (ExtendAfterOp) {
|
|
|
|
const SCEV *ExtendedStart = SE.getSignExtendExpr(AR->getStart(), WideTy);
|
|
|
|
const SCEV *ExtendedStep =
|
|
|
|
SE.getSignExtendExpr(AR->getStepRecurrence(SE), WideTy);
|
|
|
|
|
|
|
|
bool NoSignedWrap = ExtendAfterOp->getStart() == ExtendedStart &&
|
|
|
|
ExtendAfterOp->getStepRecurrence(SE) == ExtendedStep;
|
|
|
|
|
|
|
|
if (NoSignedWrap)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We may have proved this when computing the sign extension above.
|
|
|
|
return AR->getNoWrapFlags(SCEV::FlagNSW) != SCEV::FlagAnyWrap;
|
|
|
|
};
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-03-25 03:29:22 +08:00
|
|
|
auto IsInductionVar = [&](const SCEVAddRecExpr *AR, bool &IsIncreasing) {
|
|
|
|
if (!AR->isAffine())
|
|
|
|
return false;
|
2015-02-26 16:19:31 +08:00
|
|
|
|
2015-03-25 03:29:22 +08:00
|
|
|
// Currently we only work with induction variables that have been proved to
|
|
|
|
// not wrap. This restriction can potentially be lifted in the future.
|
2015-02-26 16:19:31 +08:00
|
|
|
|
2015-03-25 03:29:22 +08:00
|
|
|
if (!HasNoSignedWrap(AR))
|
2015-02-26 16:19:31 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (const SCEVConstant *StepExpr =
|
|
|
|
dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) {
|
|
|
|
ConstantInt *StepCI = StepExpr->getValue();
|
|
|
|
if (StepCI->isOne() || StepCI->isMinusOne()) {
|
|
|
|
IsIncreasing = StepCI->isOne();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
return false;
|
2015-02-26 16:19:31 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
// `ICI` is interpreted as taking the backedge if the *next* value of the
|
|
|
|
// induction variable satisfies some constraint.
|
|
|
|
|
|
|
|
const SCEVAddRecExpr *IndVarNext = cast<SCEVAddRecExpr>(LeftSCEV);
|
|
|
|
bool IsIncreasing = false;
|
|
|
|
if (!IsInductionVar(IndVarNext, IsIncreasing)) {
|
|
|
|
FailureReason = "LHS in icmp not induction variable";
|
|
|
|
return None;
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
ConstantInt *One = ConstantInt::get(IndVarTy, 1);
|
|
|
|
// TODO: generalize the predicates here to also match their unsigned variants.
|
|
|
|
if (IsIncreasing) {
|
|
|
|
bool FoundExpectedPred =
|
|
|
|
(Pred == ICmpInst::ICMP_SLT && LatchBrExitIdx == 1) ||
|
|
|
|
(Pred == ICmpInst::ICMP_SGT && LatchBrExitIdx == 0);
|
|
|
|
|
|
|
|
if (!FoundExpectedPred) {
|
|
|
|
FailureReason = "expected icmp slt semantically, found something else";
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (LatchBrExitIdx == 0) {
|
|
|
|
if (CanBeSMax(SE, RightSCEV)) {
|
|
|
|
// TODO: this restriction is easily removable -- we just have to
|
|
|
|
// remember that the icmp was an slt and not an sle.
|
|
|
|
FailureReason = "limit may overflow when coercing sle to slt";
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2016-06-24 02:03:26 +08:00
|
|
|
IRBuilder<> B(Preheader->getTerminator());
|
2015-02-26 16:19:31 +08:00
|
|
|
RightValue = B.CreateAdd(RightValue, One);
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
bool FoundExpectedPred =
|
|
|
|
(Pred == ICmpInst::ICMP_SGT && LatchBrExitIdx == 1) ||
|
|
|
|
(Pred == ICmpInst::ICMP_SLT && LatchBrExitIdx == 0);
|
|
|
|
|
|
|
|
if (!FoundExpectedPred) {
|
|
|
|
FailureReason = "expected icmp sgt semantically, found something else";
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (LatchBrExitIdx == 0) {
|
|
|
|
if (CanBeSMin(SE, RightSCEV)) {
|
|
|
|
// TODO: this restriction is easily removable -- we just have to
|
|
|
|
// remember that the icmp was an sgt and not an sge.
|
|
|
|
FailureReason = "limit may overflow when coercing sge to sgt";
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2016-06-24 02:03:26 +08:00
|
|
|
IRBuilder<> B(Preheader->getTerminator());
|
2015-02-26 16:19:31 +08:00
|
|
|
RightValue = B.CreateSub(RightValue, One);
|
|
|
|
}
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
const SCEV *StartNext = IndVarNext->getStart();
|
|
|
|
const SCEV *Addend = SE.getNegativeSCEV(IndVarNext->getStepRecurrence(SE));
|
|
|
|
const SCEV *IndVarStart = SE.getAddExpr(StartNext, Addend);
|
|
|
|
|
2015-01-16 09:03:22 +08:00
|
|
|
BasicBlock *LatchExit = LatchBr->getSuccessor(LatchBrExitIdx);
|
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
assert(SE.getLoopDisposition(LatchCount, &L) ==
|
2015-01-16 09:03:22 +08:00
|
|
|
ScalarEvolution::LoopInvariant &&
|
|
|
|
"loop variant exit count doesn't make sense!");
|
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
assert(!L.contains(LatchExit) && "expected an exit block!");
|
2015-03-10 10:37:25 +08:00
|
|
|
const DataLayout &DL = Preheader->getModule()->getDataLayout();
|
|
|
|
Value *IndVarStartV =
|
|
|
|
SCEVExpander(SE, DL, "irce")
|
2016-06-24 02:03:26 +08:00
|
|
|
.expandCodeFor(IndVarStart, IndVarTy, Preheader->getTerminator());
|
2015-02-26 16:19:31 +08:00
|
|
|
IndVarStartV->setName("indvar.start");
|
|
|
|
|
|
|
|
LoopStructure Result;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
Result.Tag = "main";
|
|
|
|
Result.Header = Header;
|
|
|
|
Result.Latch = Latch;
|
|
|
|
Result.LatchBr = LatchBr;
|
|
|
|
Result.LatchExit = LatchExit;
|
|
|
|
Result.LatchBrExitIdx = LatchBrExitIdx;
|
|
|
|
Result.IndVarStart = IndVarStartV;
|
|
|
|
Result.IndVarNext = LeftValue;
|
|
|
|
Result.IndVarIncreasing = IsIncreasing;
|
|
|
|
Result.LoopExitAt = RightValue;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
FailureReason = nullptr;
|
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
return Result;
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
|
2015-01-22 16:29:18 +08:00
|
|
|
Optional<LoopConstrainer::SubRanges>
|
2015-02-26 16:19:31 +08:00
|
|
|
LoopConstrainer::calculateSubRanges() const {
|
2015-01-16 09:03:22 +08:00
|
|
|
IntegerType *Ty = cast<IntegerType>(LatchTakenCount->getType());
|
|
|
|
|
2015-01-22 17:32:02 +08:00
|
|
|
if (Range.getType() != Ty)
|
2015-01-22 16:29:18 +08:00
|
|
|
return None;
|
|
|
|
|
2015-01-16 09:03:22 +08:00
|
|
|
LoopConstrainer::SubRanges Result;
|
|
|
|
|
|
|
|
// I think we can be more aggressive here and make this nuw / nsw if the
|
|
|
|
// addition that feeds into the icmp for the latch's terminating branch is nuw
|
|
|
|
// / nsw. In any case, a wrapping 2's complement addition is safe.
|
|
|
|
ConstantInt *One = ConstantInt::get(Ty, 1);
|
2015-02-26 16:19:31 +08:00
|
|
|
const SCEV *Start = SE.getSCEV(MainLoopStructure.IndVarStart);
|
|
|
|
const SCEV *End = SE.getSCEV(MainLoopStructure.LoopExitAt);
|
|
|
|
|
|
|
|
bool Increasing = MainLoopStructure.IndVarIncreasing;
|
2015-03-17 08:42:16 +08:00
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
// We compute `Smallest` and `Greatest` such that [Smallest, Greatest) is the
|
|
|
|
// range of values the induction variable takes.
|
2015-03-17 08:42:16 +08:00
|
|
|
|
|
|
|
const SCEV *Smallest = nullptr, *Greatest = nullptr;
|
|
|
|
|
|
|
|
if (Increasing) {
|
|
|
|
Smallest = Start;
|
|
|
|
Greatest = End;
|
|
|
|
} else {
|
|
|
|
// These two computations may sign-overflow. Here is why that is okay:
|
|
|
|
//
|
|
|
|
// We know that the induction variable does not sign-overflow on any
|
|
|
|
// iteration except the last one, and it starts at `Start` and ends at
|
|
|
|
// `End`, decrementing by one every time.
|
|
|
|
//
|
|
|
|
// * if `Smallest` sign-overflows we know `End` is `INT_SMAX`. Since the
|
|
|
|
// induction variable is decreasing we know that that the smallest value
|
|
|
|
// the loop body is actually executed with is `INT_SMIN` == `Smallest`.
|
|
|
|
//
|
|
|
|
// * if `Greatest` sign-overflows, we know it can only be `INT_SMIN`. In
|
|
|
|
// that case, `Clamp` will always return `Smallest` and
|
|
|
|
// [`Result.LowLimit`, `Result.HighLimit`) = [`Smallest`, `Smallest`)
|
|
|
|
// will be an empty range. Returning an empty range is always safe.
|
|
|
|
//
|
|
|
|
|
|
|
|
Smallest = SE.getAddExpr(End, SE.getSCEV(One));
|
|
|
|
Greatest = SE.getAddExpr(Start, SE.getSCEV(One));
|
|
|
|
}
|
2015-02-26 16:19:31 +08:00
|
|
|
|
|
|
|
auto Clamp = [this, Smallest, Greatest](const SCEV *S) {
|
|
|
|
return SE.getSMaxExpr(Smallest, SE.getSMinExpr(Greatest, S));
|
|
|
|
};
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
// In some cases we can prove that we don't need a pre or post loop
|
|
|
|
|
|
|
|
bool ProvablyNoPreloop =
|
2015-02-26 16:19:31 +08:00
|
|
|
SE.isKnownPredicate(ICmpInst::ICMP_SLE, Range.getBegin(), Smallest);
|
|
|
|
if (!ProvablyNoPreloop)
|
|
|
|
Result.LowLimit = Clamp(Range.getBegin());
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
bool ProvablyNoPostLoop =
|
2015-02-26 16:19:31 +08:00
|
|
|
SE.isKnownPredicate(ICmpInst::ICMP_SLE, Greatest, Range.getEnd());
|
|
|
|
if (!ProvablyNoPostLoop)
|
|
|
|
Result.HighLimit = Clamp(Range.getEnd());
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void LoopConstrainer::cloneLoop(LoopConstrainer::ClonedLoop &Result,
|
|
|
|
const char *Tag) const {
|
|
|
|
for (BasicBlock *BB : OriginalLoop.getBlocks()) {
|
|
|
|
BasicBlock *Clone = CloneBasicBlock(BB, Result.Map, Twine(".") + Tag, &F);
|
|
|
|
Result.Blocks.push_back(Clone);
|
|
|
|
Result.Map[BB] = Clone;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto GetClonedValue = [&Result](Value *V) {
|
|
|
|
assert(V && "null values not in domain!");
|
|
|
|
auto It = Result.Map.find(V);
|
|
|
|
if (It == Result.Map.end())
|
|
|
|
return V;
|
|
|
|
return static_cast<Value *>(It->second);
|
|
|
|
};
|
|
|
|
|
2016-08-14 09:04:36 +08:00
|
|
|
auto *ClonedLatch =
|
|
|
|
cast<BasicBlock>(GetClonedValue(OriginalLoop.getLoopLatch()));
|
|
|
|
ClonedLatch->getTerminator()->setMetadata(ClonedLoopTag,
|
|
|
|
MDNode::get(Ctx, {}));
|
|
|
|
|
2015-01-16 09:03:22 +08:00
|
|
|
Result.Structure = MainLoopStructure.map(GetClonedValue);
|
|
|
|
Result.Structure.Tag = Tag;
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = Result.Blocks.size(); i != e; ++i) {
|
|
|
|
BasicBlock *ClonedBB = Result.Blocks[i];
|
|
|
|
BasicBlock *OriginalBB = OriginalLoop.getBlocks()[i];
|
|
|
|
|
|
|
|
assert(Result.Map[OriginalBB] == ClonedBB && "invariant!");
|
|
|
|
|
|
|
|
for (Instruction &I : *ClonedBB)
|
|
|
|
RemapInstruction(&I, Result.Map,
|
2016-04-07 08:26:43 +08:00
|
|
|
RF_NoModuleLevelChanges | RF_IgnoreMissingLocals);
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
// Exit blocks will now have one more predecessor and their PHI nodes need
|
|
|
|
// to be edited to reflect that. No phi nodes need to be introduced because
|
|
|
|
// the loop is in LCSSA.
|
|
|
|
|
2016-08-14 06:00:09 +08:00
|
|
|
for (auto *SBB : successors(OriginalBB)) {
|
|
|
|
if (OriginalLoop.contains(SBB))
|
2015-01-16 09:03:22 +08:00
|
|
|
continue; // not an exit block
|
|
|
|
|
2016-08-14 06:00:09 +08:00
|
|
|
for (Instruction &I : *SBB) {
|
2016-08-14 06:00:12 +08:00
|
|
|
auto *PN = dyn_cast<PHINode>(&I);
|
|
|
|
if (!PN)
|
2015-01-16 09:03:22 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
Value *OldIncoming = PN->getIncomingValueForBlock(OriginalBB);
|
|
|
|
PN->addIncoming(GetClonedValue(OldIncoming), ClonedBB);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
LoopConstrainer::RewrittenRangeInfo LoopConstrainer::changeIterationSpaceEnd(
|
2015-02-26 16:19:31 +08:00
|
|
|
const LoopStructure &LS, BasicBlock *Preheader, Value *ExitSubloopAt,
|
2015-01-16 09:03:22 +08:00
|
|
|
BasicBlock *ContinuationBlock) const {
|
|
|
|
|
|
|
|
// We start with a loop with a single latch:
|
|
|
|
//
|
|
|
|
// +--------------------+
|
|
|
|
// | |
|
|
|
|
// | preheader |
|
|
|
|
// | |
|
|
|
|
// +--------+-----------+
|
|
|
|
// | ----------------\
|
|
|
|
// | / |
|
|
|
|
// +--------v----v------+ |
|
|
|
|
// | | |
|
|
|
|
// | header | |
|
|
|
|
// | | |
|
|
|
|
// +--------------------+ |
|
|
|
|
// |
|
|
|
|
// ..... |
|
|
|
|
// |
|
|
|
|
// +--------------------+ |
|
|
|
|
// | | |
|
|
|
|
// | latch >----------/
|
|
|
|
// | |
|
|
|
|
// +-------v------------+
|
|
|
|
// |
|
|
|
|
// |
|
|
|
|
// | +--------------------+
|
|
|
|
// | | |
|
|
|
|
// +---> original exit |
|
|
|
|
// | |
|
|
|
|
// +--------------------+
|
|
|
|
//
|
|
|
|
// We change the control flow to look like
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// +--------------------+
|
|
|
|
// | |
|
|
|
|
// | preheader >-------------------------+
|
|
|
|
// | | |
|
|
|
|
// +--------v-----------+ |
|
|
|
|
// | /-------------+ |
|
|
|
|
// | / | |
|
|
|
|
// +--------v--v--------+ | |
|
|
|
|
// | | | |
|
|
|
|
// | header | | +--------+ |
|
|
|
|
// | | | | | |
|
|
|
|
// +--------------------+ | | +-----v-----v-----------+
|
|
|
|
// | | | |
|
|
|
|
// | | | .pseudo.exit |
|
|
|
|
// | | | |
|
|
|
|
// | | +-----------v-----------+
|
|
|
|
// | | |
|
|
|
|
// ..... | | |
|
|
|
|
// | | +--------v-------------+
|
|
|
|
// +--------------------+ | | | |
|
|
|
|
// | | | | | ContinuationBlock |
|
|
|
|
// | latch >------+ | | |
|
|
|
|
// | | | +----------------------+
|
|
|
|
// +---------v----------+ |
|
|
|
|
// | |
|
|
|
|
// | |
|
|
|
|
// | +---------------^-----+
|
|
|
|
// | | |
|
|
|
|
// +-----> .exit.selector |
|
|
|
|
// | |
|
|
|
|
// +----------v----------+
|
|
|
|
// |
|
|
|
|
// +--------------------+ |
|
|
|
|
// | | |
|
|
|
|
// | original exit <----+
|
|
|
|
// | |
|
|
|
|
// +--------------------+
|
|
|
|
//
|
|
|
|
|
|
|
|
RewrittenRangeInfo RRI;
|
|
|
|
|
2016-08-17 09:16:17 +08:00
|
|
|
BasicBlock *BBInsertLocation = LS.Latch->getNextNode();
|
2015-01-16 09:03:22 +08:00
|
|
|
RRI.ExitSelector = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".exit.selector",
|
2016-08-17 09:16:17 +08:00
|
|
|
&F, BBInsertLocation);
|
2015-01-16 09:03:22 +08:00
|
|
|
RRI.PseudoExit = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".pseudo.exit", &F,
|
2016-08-17 09:16:17 +08:00
|
|
|
BBInsertLocation);
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2016-06-24 02:03:26 +08:00
|
|
|
BranchInst *PreheaderJump = cast<BranchInst>(Preheader->getTerminator());
|
2015-02-26 16:19:31 +08:00
|
|
|
bool Increasing = LS.IndVarIncreasing;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
IRBuilder<> B(PreheaderJump);
|
|
|
|
|
|
|
|
// EnterLoopCond - is it okay to start executing this `LS'?
|
2015-02-26 16:19:31 +08:00
|
|
|
Value *EnterLoopCond = Increasing
|
|
|
|
? B.CreateICmpSLT(LS.IndVarStart, ExitSubloopAt)
|
|
|
|
: B.CreateICmpSGT(LS.IndVarStart, ExitSubloopAt);
|
|
|
|
|
2015-01-16 09:03:22 +08:00
|
|
|
B.CreateCondBr(EnterLoopCond, LS.Header, RRI.PseudoExit);
|
|
|
|
PreheaderJump->eraseFromParent();
|
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
LS.LatchBr->setSuccessor(LS.LatchBrExitIdx, RRI.ExitSelector);
|
2015-01-16 09:03:22 +08:00
|
|
|
B.SetInsertPoint(LS.LatchBr);
|
2015-02-26 16:19:31 +08:00
|
|
|
Value *TakeBackedgeLoopCond =
|
|
|
|
Increasing ? B.CreateICmpSLT(LS.IndVarNext, ExitSubloopAt)
|
|
|
|
: B.CreateICmpSGT(LS.IndVarNext, ExitSubloopAt);
|
|
|
|
Value *CondForBranch = LS.LatchBrExitIdx == 1
|
|
|
|
? TakeBackedgeLoopCond
|
|
|
|
: B.CreateNot(TakeBackedgeLoopCond);
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
LS.LatchBr->setCondition(CondForBranch);
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
B.SetInsertPoint(RRI.ExitSelector);
|
|
|
|
|
|
|
|
// IterationsLeft - are there any more iterations left, given the original
|
|
|
|
// upper bound on the induction variable? If not, we branch to the "real"
|
|
|
|
// exit.
|
2015-02-26 16:19:31 +08:00
|
|
|
Value *IterationsLeft = Increasing
|
|
|
|
? B.CreateICmpSLT(LS.IndVarNext, LS.LoopExitAt)
|
|
|
|
: B.CreateICmpSGT(LS.IndVarNext, LS.LoopExitAt);
|
2015-01-16 09:03:22 +08:00
|
|
|
B.CreateCondBr(IterationsLeft, RRI.PseudoExit, LS.LatchExit);
|
|
|
|
|
|
|
|
BranchInst *BranchToContinuation =
|
|
|
|
BranchInst::Create(ContinuationBlock, RRI.PseudoExit);
|
|
|
|
|
|
|
|
// We emit PHI nodes into `RRI.PseudoExit' that compute the "latest" value of
|
|
|
|
// each of the PHI nodes in the loop header. This feeds into the initial
|
|
|
|
// value of the same PHI nodes if/when we continue execution.
|
|
|
|
for (Instruction &I : *LS.Header) {
|
2016-08-14 06:00:12 +08:00
|
|
|
auto *PN = dyn_cast<PHINode>(&I);
|
|
|
|
if (!PN)
|
2015-01-16 09:03:22 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
PHINode *NewPHI = PHINode::Create(PN->getType(), 2, PN->getName() + ".copy",
|
|
|
|
BranchToContinuation);
|
|
|
|
|
|
|
|
NewPHI->addIncoming(PN->getIncomingValueForBlock(Preheader), Preheader);
|
|
|
|
NewPHI->addIncoming(PN->getIncomingValueForBlock(LS.Latch),
|
|
|
|
RRI.ExitSelector);
|
|
|
|
RRI.PHIValuesAtPseudoExit.push_back(NewPHI);
|
|
|
|
}
|
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
RRI.IndVarEnd = PHINode::Create(LS.IndVarNext->getType(), 2, "indvar.end",
|
|
|
|
BranchToContinuation);
|
|
|
|
RRI.IndVarEnd->addIncoming(LS.IndVarStart, Preheader);
|
|
|
|
RRI.IndVarEnd->addIncoming(LS.IndVarNext, RRI.ExitSelector);
|
|
|
|
|
2015-01-16 09:03:22 +08:00
|
|
|
// The latch exit now has a branch from `RRI.ExitSelector' instead of
|
|
|
|
// `LS.Latch'. The PHI nodes need to be updated to reflect that.
|
|
|
|
for (Instruction &I : *LS.LatchExit) {
|
|
|
|
if (PHINode *PN = dyn_cast<PHINode>(&I))
|
|
|
|
replacePHIBlock(PN, LS.Latch, RRI.ExitSelector);
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return RRI;
|
|
|
|
}
|
|
|
|
|
|
|
|
void LoopConstrainer::rewriteIncomingValuesForPHIs(
|
2015-02-26 16:19:31 +08:00
|
|
|
LoopStructure &LS, BasicBlock *ContinuationBlock,
|
2015-01-16 09:03:22 +08:00
|
|
|
const LoopConstrainer::RewrittenRangeInfo &RRI) const {
|
|
|
|
|
|
|
|
unsigned PHIIndex = 0;
|
|
|
|
for (Instruction &I : *LS.Header) {
|
2016-08-14 06:00:12 +08:00
|
|
|
auto *PN = dyn_cast<PHINode>(&I);
|
|
|
|
if (!PN)
|
2015-01-16 09:03:22 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i)
|
|
|
|
if (PN->getIncomingBlock(i) == ContinuationBlock)
|
|
|
|
PN->setIncomingValue(i, RRI.PHIValuesAtPseudoExit[PHIIndex++]);
|
|
|
|
}
|
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
LS.IndVarStart = RRI.IndVarEnd;
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
BasicBlock *LoopConstrainer::createPreheader(const LoopStructure &LS,
|
|
|
|
BasicBlock *OldPreheader,
|
|
|
|
const char *Tag) const {
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
BasicBlock *Preheader = BasicBlock::Create(Ctx, Tag, &F, LS.Header);
|
|
|
|
BranchInst::Create(LS.Header, Preheader);
|
|
|
|
|
|
|
|
for (Instruction &I : *LS.Header) {
|
2016-08-14 06:00:12 +08:00
|
|
|
auto *PN = dyn_cast<PHINode>(&I);
|
|
|
|
if (!PN)
|
2015-01-16 09:03:22 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i)
|
|
|
|
replacePHIBlock(PN, OldPreheader, Preheader);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Preheader;
|
|
|
|
}
|
|
|
|
|
2015-02-06 22:43:49 +08:00
|
|
|
void LoopConstrainer::addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs) {
|
2015-01-16 09:03:22 +08:00
|
|
|
Loop *ParentLoop = OriginalLoop.getParentLoop();
|
|
|
|
if (!ParentLoop)
|
|
|
|
return;
|
|
|
|
|
2015-02-06 22:43:49 +08:00
|
|
|
for (BasicBlock *BB : BBs)
|
2016-08-03 03:32:01 +08:00
|
|
|
ParentLoop->addBasicBlockToLoop(BB, LI);
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
|
2016-08-14 09:04:46 +08:00
|
|
|
Loop *LoopConstrainer::createClonedLoopStructure(Loop *Original, Loop *Parent,
|
|
|
|
ValueToValueMapTy &VM) {
|
|
|
|
Loop &New = LPM.addLoop(Parent);
|
|
|
|
|
|
|
|
// Add all of the blocks in Original to the new loop.
|
|
|
|
for (auto *BB : Original->blocks())
|
|
|
|
if (LI.getLoopFor(BB) == Original)
|
|
|
|
New.addBasicBlockToLoop(cast<BasicBlock>(VM[BB]), LI);
|
|
|
|
|
|
|
|
// Add all of the subloops to the new loop.
|
|
|
|
for (Loop *SubLoop : *Original)
|
|
|
|
createClonedLoopStructure(SubLoop, &New, VM);
|
|
|
|
|
|
|
|
return &New;
|
|
|
|
}
|
|
|
|
|
2015-01-16 09:03:22 +08:00
|
|
|
bool LoopConstrainer::run() {
|
|
|
|
BasicBlock *Preheader = nullptr;
|
2015-02-26 16:19:31 +08:00
|
|
|
LatchTakenCount = SE.getExitCount(&OriginalLoop, MainLoopStructure.Latch);
|
|
|
|
Preheader = OriginalLoop.getLoopPreheader();
|
|
|
|
assert(!isa<SCEVCouldNotCompute>(LatchTakenCount) && Preheader != nullptr &&
|
|
|
|
"preconditions!");
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
OriginalPreheader = Preheader;
|
|
|
|
MainLoopPreheader = Preheader;
|
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
Optional<SubRanges> MaybeSR = calculateSubRanges();
|
2015-01-22 16:29:18 +08:00
|
|
|
if (!MaybeSR.hasValue()) {
|
|
|
|
DEBUG(dbgs() << "irce: could not compute subranges\n");
|
|
|
|
return false;
|
|
|
|
}
|
2015-02-26 16:19:31 +08:00
|
|
|
|
2015-01-22 16:29:18 +08:00
|
|
|
SubRanges SR = MaybeSR.getValue();
|
2015-02-26 16:19:31 +08:00
|
|
|
bool Increasing = MainLoopStructure.IndVarIncreasing;
|
|
|
|
IntegerType *IVTy =
|
|
|
|
cast<IntegerType>(MainLoopStructure.IndVarNext->getType());
|
|
|
|
|
2015-03-10 10:37:25 +08:00
|
|
|
SCEVExpander Expander(SE, F.getParent()->getDataLayout(), "irce");
|
2015-02-26 16:19:31 +08:00
|
|
|
Instruction *InsertPt = OriginalPreheader->getTerminator();
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
// It would have been better to make `PreLoop' and `PostLoop'
|
|
|
|
// `Optional<ClonedLoop>'s, but `ValueToValueMapTy' does not have a copy
|
|
|
|
// constructor.
|
|
|
|
ClonedLoop PreLoop, PostLoop;
|
2015-02-26 16:19:31 +08:00
|
|
|
bool NeedsPreLoop =
|
|
|
|
Increasing ? SR.LowLimit.hasValue() : SR.HighLimit.hasValue();
|
|
|
|
bool NeedsPostLoop =
|
|
|
|
Increasing ? SR.HighLimit.hasValue() : SR.LowLimit.hasValue();
|
|
|
|
|
|
|
|
Value *ExitPreLoopAt = nullptr;
|
|
|
|
Value *ExitMainLoopAt = nullptr;
|
|
|
|
const SCEVConstant *MinusOneS =
|
|
|
|
cast<SCEVConstant>(SE.getConstant(IVTy, -1, true /* isSigned */));
|
|
|
|
|
|
|
|
if (NeedsPreLoop) {
|
|
|
|
const SCEV *ExitPreLoopAtSCEV = nullptr;
|
|
|
|
|
|
|
|
if (Increasing)
|
|
|
|
ExitPreLoopAtSCEV = *SR.LowLimit;
|
|
|
|
else {
|
|
|
|
if (CanBeSMin(SE, *SR.HighLimit)) {
|
|
|
|
DEBUG(dbgs() << "irce: could not prove no-overflow when computing "
|
|
|
|
<< "preloop exit limit. HighLimit = " << *(*SR.HighLimit)
|
|
|
|
<< "\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
ExitPreLoopAtSCEV = SE.getAddExpr(*SR.HighLimit, MinusOneS);
|
|
|
|
}
|
|
|
|
|
|
|
|
ExitPreLoopAt = Expander.expandCodeFor(ExitPreLoopAtSCEV, IVTy, InsertPt);
|
|
|
|
ExitPreLoopAt->setName("exit.preloop.at");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NeedsPostLoop) {
|
|
|
|
const SCEV *ExitMainLoopAtSCEV = nullptr;
|
|
|
|
|
|
|
|
if (Increasing)
|
|
|
|
ExitMainLoopAtSCEV = *SR.HighLimit;
|
|
|
|
else {
|
|
|
|
if (CanBeSMin(SE, *SR.LowLimit)) {
|
|
|
|
DEBUG(dbgs() << "irce: could not prove no-overflow when computing "
|
|
|
|
<< "mainloop exit limit. LowLimit = " << *(*SR.LowLimit)
|
|
|
|
<< "\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
ExitMainLoopAtSCEV = SE.getAddExpr(*SR.LowLimit, MinusOneS);
|
|
|
|
}
|
|
|
|
|
|
|
|
ExitMainLoopAt = Expander.expandCodeFor(ExitMainLoopAtSCEV, IVTy, InsertPt);
|
|
|
|
ExitMainLoopAt->setName("exit.mainloop.at");
|
|
|
|
}
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
// We clone these ahead of time so that we don't have to deal with changing
|
|
|
|
// and temporarily invalid IR as we transform the loops.
|
|
|
|
if (NeedsPreLoop)
|
|
|
|
cloneLoop(PreLoop, "preloop");
|
|
|
|
if (NeedsPostLoop)
|
|
|
|
cloneLoop(PostLoop, "postloop");
|
|
|
|
|
|
|
|
RewrittenRangeInfo PreLoopRRI;
|
|
|
|
|
|
|
|
if (NeedsPreLoop) {
|
|
|
|
Preheader->getTerminator()->replaceUsesOfWith(MainLoopStructure.Header,
|
|
|
|
PreLoop.Structure.Header);
|
|
|
|
|
|
|
|
MainLoopPreheader =
|
|
|
|
createPreheader(MainLoopStructure, Preheader, "mainloop");
|
2015-02-26 16:19:31 +08:00
|
|
|
PreLoopRRI = changeIterationSpaceEnd(PreLoop.Structure, Preheader,
|
|
|
|
ExitPreLoopAt, MainLoopPreheader);
|
2015-01-16 09:03:22 +08:00
|
|
|
rewriteIncomingValuesForPHIs(MainLoopStructure, MainLoopPreheader,
|
|
|
|
PreLoopRRI);
|
|
|
|
}
|
|
|
|
|
|
|
|
BasicBlock *PostLoopPreheader = nullptr;
|
|
|
|
RewrittenRangeInfo PostLoopRRI;
|
|
|
|
|
|
|
|
if (NeedsPostLoop) {
|
|
|
|
PostLoopPreheader =
|
|
|
|
createPreheader(PostLoop.Structure, Preheader, "postloop");
|
|
|
|
PostLoopRRI = changeIterationSpaceEnd(MainLoopStructure, MainLoopPreheader,
|
2015-02-26 16:19:31 +08:00
|
|
|
ExitMainLoopAt, PostLoopPreheader);
|
2015-01-16 09:03:22 +08:00
|
|
|
rewriteIncomingValuesForPHIs(PostLoop.Structure, PostLoopPreheader,
|
|
|
|
PostLoopRRI);
|
|
|
|
}
|
|
|
|
|
2015-02-06 22:43:49 +08:00
|
|
|
BasicBlock *NewMainLoopPreheader =
|
|
|
|
MainLoopPreheader != Preheader ? MainLoopPreheader : nullptr;
|
|
|
|
BasicBlock *NewBlocks[] = {PostLoopPreheader, PreLoopRRI.PseudoExit,
|
|
|
|
PreLoopRRI.ExitSelector, PostLoopRRI.PseudoExit,
|
|
|
|
PostLoopRRI.ExitSelector, NewMainLoopPreheader};
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
// Some of the above may be nullptr, filter them out before passing to
|
|
|
|
// addToParentLoopIfNeeded.
|
2015-02-06 22:43:49 +08:00
|
|
|
auto NewBlocksEnd =
|
|
|
|
std::remove(std::begin(NewBlocks), std::end(NewBlocks), nullptr);
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-02-06 22:43:49 +08:00
|
|
|
addToParentLoopIfNeeded(makeArrayRef(std::begin(NewBlocks), NewBlocksEnd));
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2016-08-03 03:31:54 +08:00
|
|
|
DT.recalculate(F);
|
2016-08-14 09:04:46 +08:00
|
|
|
|
|
|
|
if (!PreLoop.Blocks.empty()) {
|
|
|
|
auto *L = createClonedLoopStructure(
|
|
|
|
&OriginalLoop, OriginalLoop.getParentLoop(), PreLoop.Map);
|
|
|
|
formLCSSARecursively(*L, DT, &LI, &SE);
|
2016-12-19 16:22:17 +08:00
|
|
|
simplifyLoop(L, &DT, &LI, &SE, nullptr, true);
|
2016-12-14 05:05:21 +08:00
|
|
|
// Pre loops are slow paths, we do not need to perform any loop
|
|
|
|
// optimizations on them.
|
|
|
|
DisableAllLoopOptsOnLoop(*L);
|
2016-08-14 09:04:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!PostLoop.Blocks.empty()) {
|
|
|
|
auto *L = createClonedLoopStructure(
|
|
|
|
&OriginalLoop, OriginalLoop.getParentLoop(), PostLoop.Map);
|
|
|
|
formLCSSARecursively(*L, DT, &LI, &SE);
|
2016-12-19 16:22:17 +08:00
|
|
|
simplifyLoop(L, &DT, &LI, &SE, nullptr, true);
|
2016-12-14 05:05:21 +08:00
|
|
|
// Post loops are slow paths, we do not need to perform any loop
|
|
|
|
// optimizations on them.
|
|
|
|
DisableAllLoopOptsOnLoop(*L);
|
2016-08-14 09:04:46 +08:00
|
|
|
}
|
|
|
|
|
2016-08-03 03:32:01 +08:00
|
|
|
formLCSSARecursively(OriginalLoop, DT, &LI, &SE);
|
2016-12-19 16:22:17 +08:00
|
|
|
simplifyLoop(&OriginalLoop, &DT, &LI, &SE, nullptr, true);
|
2016-08-03 03:31:54 +08:00
|
|
|
|
2015-01-16 09:03:22 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-02-22 06:20:22 +08:00
|
|
|
/// Computes and returns a range of values for the induction variable (IndVar)
|
|
|
|
/// in which the range check can be safely elided. If it cannot compute such a
|
|
|
|
/// range, returns None.
|
2015-01-16 09:03:22 +08:00
|
|
|
Optional<InductiveRangeCheck::Range>
|
2016-05-21 10:31:51 +08:00
|
|
|
InductiveRangeCheck::computeSafeIterationSpace(
|
|
|
|
ScalarEvolution &SE, const SCEVAddRecExpr *IndVar) const {
|
2015-02-22 06:20:22 +08:00
|
|
|
// IndVar is of the form "A + B * I" (where "I" is the canonical induction
|
|
|
|
// variable, that may or may not exist as a real llvm::Value in the loop) and
|
|
|
|
// this inductive range check is a range check on the "C + D * I" ("C" is
|
|
|
|
// getOffset() and "D" is getScale()). We rewrite the value being range
|
|
|
|
// checked to "M + N * IndVar" where "N" = "D * B^(-1)" and "M" = "C - NA".
|
|
|
|
// Currently we support this only for "B" = "D" = { 1 or -1 }, but the code
|
|
|
|
// can be generalized as needed.
|
|
|
|
//
|
|
|
|
// The actual inequalities we solve are of the form
|
2015-01-16 09:03:22 +08:00
|
|
|
//
|
2015-02-22 06:20:22 +08:00
|
|
|
// 0 <= M + 1 * IndVar < L given L >= 0 (i.e. N == 1)
|
2015-01-16 09:03:22 +08:00
|
|
|
//
|
2015-02-22 06:20:22 +08:00
|
|
|
// The inequality is satisfied by -M <= IndVar < (L - M) [^1]. All additions
|
|
|
|
// and subtractions are twos-complement wrapping and comparisons are signed.
|
2015-01-16 09:03:22 +08:00
|
|
|
//
|
|
|
|
// Proof:
|
|
|
|
//
|
2015-02-22 06:20:22 +08:00
|
|
|
// If there exists IndVar such that -M <= IndVar < (L - M) then it follows
|
|
|
|
// that -M <= (-M + L) [== Eq. 1]. Since L >= 0, if (-M + L) sign-overflows
|
|
|
|
// then (-M + L) < (-M). Hence by [Eq. 1], (-M + L) could not have
|
|
|
|
// overflown.
|
2015-01-16 09:03:22 +08:00
|
|
|
//
|
2015-02-22 06:20:22 +08:00
|
|
|
// This means IndVar = t + (-M) for t in [0, L). Hence (IndVar + M) = t.
|
|
|
|
// Hence 0 <= (IndVar + M) < L
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-02-22 06:20:22 +08:00
|
|
|
// [^1]: Note that the solution does _not_ apply if L < 0; consider values M =
|
|
|
|
// 127, IndVar = 126 and L = -2 in an i8 world.
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-02-22 06:20:22 +08:00
|
|
|
if (!IndVar->isAffine())
|
|
|
|
return None;
|
|
|
|
|
|
|
|
const SCEV *A = IndVar->getStart();
|
|
|
|
const SCEVConstant *B = dyn_cast<SCEVConstant>(IndVar->getStepRecurrence(SE));
|
|
|
|
if (!B)
|
2015-01-16 09:03:22 +08:00
|
|
|
return None;
|
|
|
|
|
2015-02-22 06:20:22 +08:00
|
|
|
const SCEV *C = getOffset();
|
|
|
|
const SCEVConstant *D = dyn_cast<SCEVConstant>(getScale());
|
|
|
|
if (D != B)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
ConstantInt *ConstD = D->getValue();
|
|
|
|
if (!(ConstD->isMinusOne() || ConstD->isOne()))
|
|
|
|
return None;
|
|
|
|
|
|
|
|
const SCEV *M = SE.getMinusSCEV(C, A);
|
|
|
|
|
|
|
|
const SCEV *Begin = SE.getNegativeSCEV(M);
|
2015-03-17 08:42:13 +08:00
|
|
|
const SCEV *UpperLimit = nullptr;
|
|
|
|
|
|
|
|
// We strengthen "0 <= I" to "0 <= I < INT_SMAX" and "I < L" to "0 <= I < L".
|
|
|
|
// We can potentially do much better here.
|
|
|
|
if (Value *V = getLength()) {
|
|
|
|
UpperLimit = SE.getSCEV(V);
|
|
|
|
} else {
|
|
|
|
assert(Kind == InductiveRangeCheck::RANGE_CHECK_LOWER && "invariant!");
|
|
|
|
unsigned BitWidth = cast<IntegerType>(IndVar->getType())->getBitWidth();
|
|
|
|
UpperLimit = SE.getConstant(APInt::getSignedMaxValue(BitWidth));
|
|
|
|
}
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-03-17 08:42:13 +08:00
|
|
|
const SCEV *End = SE.getMinusSCEV(UpperLimit, M);
|
2015-01-22 17:32:02 +08:00
|
|
|
return InductiveRangeCheck::Range(Begin, End);
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
|
2015-01-22 16:29:18 +08:00
|
|
|
static Optional<InductiveRangeCheck::Range>
|
2015-02-22 06:07:32 +08:00
|
|
|
IntersectRange(ScalarEvolution &SE,
|
|
|
|
const Optional<InductiveRangeCheck::Range> &R1,
|
2016-05-21 10:31:51 +08:00
|
|
|
const InductiveRangeCheck::Range &R2) {
|
2015-01-16 09:03:22 +08:00
|
|
|
if (!R1.hasValue())
|
|
|
|
return R2;
|
|
|
|
auto &R1Value = R1.getValue();
|
|
|
|
|
2015-01-22 16:29:18 +08:00
|
|
|
// TODO: we could widen the smaller range and have this work; but for now we
|
|
|
|
// bail out to keep things simple.
|
2015-01-22 17:32:02 +08:00
|
|
|
if (R1Value.getType() != R2.getType())
|
2015-01-22 16:29:18 +08:00
|
|
|
return None;
|
|
|
|
|
2015-02-22 06:07:32 +08:00
|
|
|
const SCEV *NewBegin = SE.getSMaxExpr(R1Value.getBegin(), R2.getBegin());
|
|
|
|
const SCEV *NewEnd = SE.getSMinExpr(R1Value.getEnd(), R2.getEnd());
|
|
|
|
|
|
|
|
return InductiveRangeCheck::Range(NewBegin, NewEnd);
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool InductiveRangeCheckElimination::runOnLoop(Loop *L, LPPassManager &LPM) {
|
2016-05-04 06:32:30 +08:00
|
|
|
if (skipLoop(L))
|
|
|
|
return false;
|
|
|
|
|
2015-01-16 09:03:22 +08:00
|
|
|
if (L->getBlocks().size() >= LoopSizeCutoff) {
|
|
|
|
DEBUG(dbgs() << "irce: giving up constraining loop, too large\n";);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
BasicBlock *Preheader = L->getLoopPreheader();
|
|
|
|
if (!Preheader) {
|
|
|
|
DEBUG(dbgs() << "irce: loop has no preheader, leaving\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
LLVMContext &Context = Preheader->getContext();
|
2016-05-21 10:52:13 +08:00
|
|
|
SmallVector<InductiveRangeCheck, 16> RangeChecks;
|
[PM] Port ScalarEvolution to the new pass manager.
This change makes ScalarEvolution a stand-alone object and just produces
one from a pass as needed. Making this work well requires making the
object movable, using references instead of overwritten pointers in
a number of places, and other refactorings.
I've also wired it up to the new pass manager and added a RUN line to
a test to exercise it under the new pass manager. This includes basic
printing support much like with other analyses.
But there is a big and somewhat scary change here. Prior to this patch
ScalarEvolution was never *actually* invalidated!!! Re-running the pass
just re-wired up the various other analyses and didn't remove any of the
existing entries in the SCEV caches or clear out anything at all. This
might seem OK as everything in SCEV that can uses ValueHandles to track
updates to the values that serve as SCEV keys. However, this still means
that as we ran SCEV over each function in the module, we kept
accumulating more and more SCEVs into the cache. At the end, we would
have a SCEV cache with every value that we ever needed a SCEV for in the
entire module!!! Yowzers. The releaseMemory routine would dump all of
this, but that isn't realy called during normal runs of the pipeline as
far as I can see.
To make matters worse, there *is* actually a key that we don't update
with value handles -- there is a map keyed off of Loop*s. Because
LoopInfo *does* release its memory from run to run, it is entirely
possible to run SCEV over one function, then over another function, and
then lookup a Loop* from the second function but find an entry inserted
for the first function! Ouch.
To make matters still worse, there are plenty of updates that *don't*
trip a value handle. It seems incredibly unlikely that today GVN or
another pass that invalidates SCEV can update values in *just* such
a way that a subsequent run of SCEV will incorrectly find lookups in
a cache, but it is theoretically possible and would be a nightmare to
debug.
With this refactoring, I've fixed all this by actually destroying and
recreating the ScalarEvolution object from run to run. Technically, this
could increase the amount of malloc traffic we see, but then again it is
also technically correct. ;] I don't actually think we're suffering from
tons of malloc traffic from SCEV because if we were, the fact that we
never clear the memory would seem more likely to have come up as an
actual problem before now. So, I've made the simple fix here. If in fact
there are serious issues with too much allocation and deallocation,
I can work on a clever fix that preserves the allocations (while
clearing the data) between each run, but I'd prefer to do that kind of
optimization with a test case / benchmark that shows why we need such
cleverness (and that can test that we actually make it faster). It's
possible that this will make some things faster by making the SCEV
caches have higher locality (due to being significantly smaller) so
until there is a clear benchmark, I think the simple change is best.
Differential Revision: http://reviews.llvm.org/D12063
llvm-svn: 245193
2015-08-17 10:08:17 +08:00
|
|
|
ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
|
2015-07-16 06:48:29 +08:00
|
|
|
BranchProbabilityInfo &BPI =
|
|
|
|
getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
for (auto BBI : L->getBlocks())
|
|
|
|
if (BranchInst *TBI = dyn_cast<BranchInst>(BBI->getTerminator()))
|
2016-05-26 08:09:02 +08:00
|
|
|
InductiveRangeCheck::extractRangeChecksFromBranch(TBI, L, SE, BPI,
|
|
|
|
RangeChecks);
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
if (RangeChecks.empty())
|
|
|
|
return false;
|
|
|
|
|
2015-03-17 09:40:22 +08:00
|
|
|
auto PrintRecognizedRangeChecks = [&](raw_ostream &OS) {
|
|
|
|
OS << "irce: looking at loop "; L->print(OS);
|
|
|
|
OS << "irce: loop has " << RangeChecks.size()
|
|
|
|
<< " inductive range checks: \n";
|
2016-05-21 10:52:13 +08:00
|
|
|
for (InductiveRangeCheck &IRC : RangeChecks)
|
|
|
|
IRC.print(OS);
|
2015-03-17 09:40:22 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
DEBUG(PrintRecognizedRangeChecks(dbgs()));
|
|
|
|
|
|
|
|
if (PrintRangeChecks)
|
|
|
|
PrintRecognizedRangeChecks(errs());
|
2015-01-16 09:03:22 +08:00
|
|
|
|
2015-02-26 16:19:31 +08:00
|
|
|
const char *FailureReason = nullptr;
|
|
|
|
Optional<LoopStructure> MaybeLoopStructure =
|
2015-02-26 16:56:04 +08:00
|
|
|
LoopStructure::parseLoopStructure(SE, BPI, *L, FailureReason);
|
2015-02-26 16:19:31 +08:00
|
|
|
if (!MaybeLoopStructure.hasValue()) {
|
|
|
|
DEBUG(dbgs() << "irce: could not parse loop structure: " << FailureReason
|
|
|
|
<< "\n";);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
LoopStructure LS = MaybeLoopStructure.getValue();
|
|
|
|
bool Increasing = LS.IndVarIncreasing;
|
|
|
|
const SCEV *MinusOne =
|
|
|
|
SE.getConstant(LS.IndVarNext->getType(), Increasing ? -1 : 1, true);
|
|
|
|
const SCEVAddRecExpr *IndVar =
|
|
|
|
cast<SCEVAddRecExpr>(SE.getAddExpr(SE.getSCEV(LS.IndVarNext), MinusOne));
|
|
|
|
|
2015-01-16 09:03:22 +08:00
|
|
|
Optional<InductiveRangeCheck::Range> SafeIterRange;
|
|
|
|
Instruction *ExprInsertPt = Preheader->getTerminator();
|
|
|
|
|
2016-05-21 10:52:13 +08:00
|
|
|
SmallVector<InductiveRangeCheck, 4> RangeChecksToEliminate;
|
2015-01-16 09:03:22 +08:00
|
|
|
|
|
|
|
IRBuilder<> B(ExprInsertPt);
|
2016-05-21 10:52:13 +08:00
|
|
|
for (InductiveRangeCheck &IRC : RangeChecks) {
|
|
|
|
auto Result = IRC.computeSafeIterationSpace(SE, IndVar);
|
2015-01-16 09:03:22 +08:00
|
|
|
if (Result.hasValue()) {
|
2015-01-22 16:29:18 +08:00
|
|
|
auto MaybeSafeIterRange =
|
2016-05-21 10:31:51 +08:00
|
|
|
IntersectRange(SE, SafeIterRange, Result.getValue());
|
2015-01-22 16:29:18 +08:00
|
|
|
if (MaybeSafeIterRange.hasValue()) {
|
|
|
|
RangeChecksToEliminate.push_back(IRC);
|
|
|
|
SafeIterRange = MaybeSafeIterRange.getValue();
|
|
|
|
}
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!SafeIterRange.hasValue())
|
|
|
|
return false;
|
|
|
|
|
2016-08-03 03:31:54 +08:00
|
|
|
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
2016-08-14 09:04:46 +08:00
|
|
|
LoopConstrainer LC(*L, getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), LPM,
|
|
|
|
LS, SE, DT, SafeIterRange.getValue());
|
2015-01-16 09:03:22 +08:00
|
|
|
bool Changed = LC.run();
|
|
|
|
|
|
|
|
if (Changed) {
|
|
|
|
auto PrintConstrainedLoopInfo = [L]() {
|
|
|
|
dbgs() << "irce: in function ";
|
|
|
|
dbgs() << L->getHeader()->getParent()->getName() << ": ";
|
|
|
|
dbgs() << "constrained ";
|
|
|
|
L->print(dbgs());
|
|
|
|
};
|
|
|
|
|
|
|
|
DEBUG(PrintConstrainedLoopInfo());
|
|
|
|
|
|
|
|
if (PrintChangedLoops)
|
|
|
|
PrintConstrainedLoopInfo();
|
|
|
|
|
|
|
|
// Optimize away the now-redundant range checks.
|
|
|
|
|
2016-05-21 10:52:13 +08:00
|
|
|
for (InductiveRangeCheck &IRC : RangeChecksToEliminate) {
|
|
|
|
ConstantInt *FoldedRangeCheck = IRC.getPassingDirection()
|
2015-01-16 09:03:22 +08:00
|
|
|
? ConstantInt::getTrue(Context)
|
|
|
|
: ConstantInt::getFalse(Context);
|
2016-05-24 06:16:45 +08:00
|
|
|
IRC.getCheckUse()->set(FoldedRangeCheck);
|
2015-01-16 09:03:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
|
|
|
Pass *llvm::createInductiveRangeCheckEliminationPass() {
|
|
|
|
return new InductiveRangeCheckElimination;
|
|
|
|
}
|