2015-04-20 12:38:33 +08:00
|
|
|
//===-- LoopUtils.cpp - Loop Utility functions -------------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines common loop utility functions.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-07-27 01:52:02 +08:00
|
|
|
#include "llvm/Transforms/Utils/LoopUtils.h"
|
[LPM] Factor all of the loop analysis usage updates into a common helper
routine.
We were getting this wrong in small ways and generally being very
inconsistent about it across loop passes. Instead, let's have a common
place where we do this. One minor downside is that this will require
some analyses like SCEV in more places than they are strictly needed.
However, this seems benign as these analyses are complete no-ops, and
without this consistency we can in many cases end up with the legacy
pass manager scheduling deciding to split up a loop pass pipeline in
order to run the function analysis half-way through. It is very, very
annoying to fix these without just being very pedantic across the board.
The only loop passes I've not updated here are ones that use
AU.setPreservesAll() such as IVUsers (an analysis) and the pass printer.
They seemed less relevant.
With this patch, almost all of the problems in PR24804 around loop pass
pipelines are fixed. The one remaining issue is that we run simplify-cfg
and instcombine in the middle of the loop pass pipeline. We've recently
added some loop variants of these passes that would seem substantially
cleaner to use, but this at least gets us much closer to the previous
state. Notably, the seven loop pass managers is down to three.
I've not updated the loop passes using LoopAccessAnalysis because that
analysis hasn't been fully wired into LoopSimplify/LCSSA, and it isn't
clear that those transforms want to support those forms anyways. They
all run late anyways, so this is harmless. Similarly, LSR is left alone
because it already carefully manages its forms and doesn't need to get
fused into a single loop pass manager with a bunch of other loop passes.
LoopReroll didn't use loop simplified form previously, and I've updated
the test case to match the trivially different output.
Finally, I've also factored all the pass initialization for the passes
that use this technique as well, so that should be done regularly and
reliably.
Thanks to James for the help reviewing and thinking about this stuff,
and Ben for help thinking about it as well!
Differential Revision: http://reviews.llvm.org/D17435
llvm-svn: 261316
2016-02-19 18:45:18 +08:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
|
|
|
#include "llvm/Analysis/BasicAliasAnalysis.h"
|
|
|
|
#include "llvm/Analysis/GlobalsModRef.h"
|
2016-07-27 01:52:02 +08:00
|
|
|
#include "llvm/Analysis/GlobalsModRef.h"
|
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
2016-10-28 20:57:20 +08:00
|
|
|
#include "llvm/Analysis/LoopPass.h"
|
2015-11-25 02:57:06 +08:00
|
|
|
#include "llvm/Analysis/ScalarEvolution.h"
|
2016-07-27 01:52:02 +08:00
|
|
|
#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
|
2016-05-10 15:33:35 +08:00
|
|
|
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
2015-11-25 02:57:06 +08:00
|
|
|
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
[LPM] Factor all of the loop analysis usage updates into a common helper
routine.
We were getting this wrong in small ways and generally being very
inconsistent about it across loop passes. Instead, let's have a common
place where we do this. One minor downside is that this will require
some analyses like SCEV in more places than they are strictly needed.
However, this seems benign as these analyses are complete no-ops, and
without this consistency we can in many cases end up with the legacy
pass manager scheduling deciding to split up a loop pass pipeline in
order to run the function analysis half-way through. It is very, very
annoying to fix these without just being very pedantic across the board.
The only loop passes I've not updated here are ones that use
AU.setPreservesAll() such as IVUsers (an analysis) and the pass printer.
They seemed less relevant.
With this patch, almost all of the problems in PR24804 around loop pass
pipelines are fixed. The one remaining issue is that we run simplify-cfg
and instcombine in the middle of the loop pass pipeline. We've recently
added some loop variants of these passes that would seem substantially
cleaner to use, but this at least gets us much closer to the previous
state. Notably, the seven loop pass managers is down to three.
I've not updated the loop passes using LoopAccessAnalysis because that
analysis hasn't been fully wired into LoopSimplify/LCSSA, and it isn't
clear that those transforms want to support those forms anyways. They
all run late anyways, so this is harmless. Similarly, LSR is left alone
because it already carefully manages its forms and doesn't need to get
fused into a single loop pass manager with a bunch of other loop passes.
LoopReroll didn't use loop simplified form previously, and I've updated
the test case to match the trivially different output.
Finally, I've also factored all the pass initialization for the passes
that use this technique as well, so that should be done regularly and
reliably.
Thanks to James for the help reviewing and thinking about this stuff,
and Ben for help thinking about it as well!
Differential Revision: http://reviews.llvm.org/D17435
llvm-svn: 261316
2016-02-19 18:45:18 +08:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2015-04-20 12:38:33 +08:00
|
|
|
#include "llvm/IR/Instructions.h"
|
2015-11-25 02:57:06 +08:00
|
|
|
#include "llvm/IR/Module.h"
|
2015-04-20 12:38:33 +08:00
|
|
|
#include "llvm/IR/PatternMatch.h"
|
|
|
|
#include "llvm/IR/ValueHandle.h"
|
[LPM] Factor all of the loop analysis usage updates into a common helper
routine.
We were getting this wrong in small ways and generally being very
inconsistent about it across loop passes. Instead, let's have a common
place where we do this. One minor downside is that this will require
some analyses like SCEV in more places than they are strictly needed.
However, this seems benign as these analyses are complete no-ops, and
without this consistency we can in many cases end up with the legacy
pass manager scheduling deciding to split up a loop pass pipeline in
order to run the function analysis half-way through. It is very, very
annoying to fix these without just being very pedantic across the board.
The only loop passes I've not updated here are ones that use
AU.setPreservesAll() such as IVUsers (an analysis) and the pass printer.
They seemed less relevant.
With this patch, almost all of the problems in PR24804 around loop pass
pipelines are fixed. The one remaining issue is that we run simplify-cfg
and instcombine in the middle of the loop pass pipeline. We've recently
added some loop variants of these passes that would seem substantially
cleaner to use, but this at least gets us much closer to the previous
state. Notably, the seven loop pass managers is down to three.
I've not updated the loop passes using LoopAccessAnalysis because that
analysis hasn't been fully wired into LoopSimplify/LCSSA, and it isn't
clear that those transforms want to support those forms anyways. They
all run late anyways, so this is harmless. Similarly, LSR is left alone
because it already carefully manages its forms and doesn't need to get
fused into a single loop pass manager with a bunch of other loop passes.
LoopReroll didn't use loop simplified form previously, and I've updated
the test case to match the trivially different output.
Finally, I've also factored all the pass initialization for the passes
that use this technique as well, so that should be done regularly and
reliably.
Thanks to James for the help reviewing and thinking about this stuff,
and Ben for help thinking about it as well!
Differential Revision: http://reviews.llvm.org/D17435
llvm-svn: 261316
2016-02-19 18:45:18 +08:00
|
|
|
#include "llvm/Pass.h"
|
2015-04-20 12:38:33 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
using namespace llvm::PatternMatch;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "loop-utils"
|
|
|
|
|
2015-06-17 02:07:34 +08:00
|
|
|
bool RecurrenceDescriptor::areAllUsesIn(Instruction *I,
|
|
|
|
SmallPtrSetImpl<Instruction *> &Set) {
|
2015-04-20 12:38:33 +08:00
|
|
|
for (User::op_iterator Use = I->op_begin(), E = I->op_end(); Use != E; ++Use)
|
|
|
|
if (!Set.count(dyn_cast<Instruction>(*Use)))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-08-27 22:12:17 +08:00
|
|
|
bool RecurrenceDescriptor::isIntegerRecurrenceKind(RecurrenceKind Kind) {
|
|
|
|
switch (Kind) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case RK_IntegerAdd:
|
|
|
|
case RK_IntegerMult:
|
|
|
|
case RK_IntegerOr:
|
|
|
|
case RK_IntegerAnd:
|
|
|
|
case RK_IntegerXor:
|
|
|
|
case RK_IntegerMinMax:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool RecurrenceDescriptor::isFloatingPointRecurrenceKind(RecurrenceKind Kind) {
|
|
|
|
return (Kind != RK_NoRecurrence) && !isIntegerRecurrenceKind(Kind);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool RecurrenceDescriptor::isArithmeticRecurrenceKind(RecurrenceKind Kind) {
|
|
|
|
switch (Kind) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case RK_IntegerAdd:
|
|
|
|
case RK_IntegerMult:
|
|
|
|
case RK_FloatAdd:
|
|
|
|
case RK_FloatMult:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Instruction *
|
|
|
|
RecurrenceDescriptor::lookThroughAnd(PHINode *Phi, Type *&RT,
|
|
|
|
SmallPtrSetImpl<Instruction *> &Visited,
|
|
|
|
SmallPtrSetImpl<Instruction *> &CI) {
|
|
|
|
if (!Phi->hasOneUse())
|
|
|
|
return Phi;
|
|
|
|
|
|
|
|
const APInt *M = nullptr;
|
|
|
|
Instruction *I, *J = cast<Instruction>(Phi->use_begin()->getUser());
|
|
|
|
|
|
|
|
// Matches either I & 2^x-1 or 2^x-1 & I. If we find a match, we update RT
|
|
|
|
// with a new integer type of the corresponding bit width.
|
|
|
|
if (match(J, m_CombineOr(m_And(m_Instruction(I), m_APInt(M)),
|
|
|
|
m_And(m_APInt(M), m_Instruction(I))))) {
|
|
|
|
int32_t Bits = (*M + 1).exactLogBase2();
|
|
|
|
if (Bits > 0) {
|
|
|
|
RT = IntegerType::get(Phi->getContext(), Bits);
|
|
|
|
Visited.insert(Phi);
|
|
|
|
CI.insert(J);
|
|
|
|
return J;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Phi;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool RecurrenceDescriptor::getSourceExtensionKind(
|
|
|
|
Instruction *Start, Instruction *Exit, Type *RT, bool &IsSigned,
|
|
|
|
SmallPtrSetImpl<Instruction *> &Visited,
|
|
|
|
SmallPtrSetImpl<Instruction *> &CI) {
|
|
|
|
|
|
|
|
SmallVector<Instruction *, 8> Worklist;
|
|
|
|
bool FoundOneOperand = false;
|
2015-09-11 05:12:57 +08:00
|
|
|
unsigned DstSize = RT->getPrimitiveSizeInBits();
|
2015-08-27 22:12:17 +08:00
|
|
|
Worklist.push_back(Exit);
|
|
|
|
|
|
|
|
// Traverse the instructions in the reduction expression, beginning with the
|
|
|
|
// exit value.
|
|
|
|
while (!Worklist.empty()) {
|
|
|
|
Instruction *I = Worklist.pop_back_val();
|
|
|
|
for (Use &U : I->operands()) {
|
|
|
|
|
|
|
|
// Terminate the traversal if the operand is not an instruction, or we
|
|
|
|
// reach the starting value.
|
|
|
|
Instruction *J = dyn_cast<Instruction>(U.get());
|
|
|
|
if (!J || J == Start)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Otherwise, investigate the operation if it is also in the expression.
|
|
|
|
if (Visited.count(J)) {
|
|
|
|
Worklist.push_back(J);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the operand is not in Visited, it is not a reduction operation, but
|
|
|
|
// it does feed into one. Make sure it is either a single-use sign- or
|
2015-09-11 05:12:57 +08:00
|
|
|
// zero-extend instruction.
|
2015-08-27 22:12:17 +08:00
|
|
|
CastInst *Cast = dyn_cast<CastInst>(J);
|
|
|
|
bool IsSExtInst = isa<SExtInst>(J);
|
2015-09-11 05:12:57 +08:00
|
|
|
if (!Cast || !Cast->hasOneUse() || !(isa<ZExtInst>(J) || IsSExtInst))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Ensure the source type of the extend is no larger than the reduction
|
|
|
|
// type. It is not necessary for the types to be identical.
|
|
|
|
unsigned SrcSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
|
|
|
|
if (SrcSize > DstSize)
|
2015-08-27 22:12:17 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Furthermore, ensure that all such extends are of the same kind.
|
|
|
|
if (FoundOneOperand) {
|
|
|
|
if (IsSigned != IsSExtInst)
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
FoundOneOperand = true;
|
|
|
|
IsSigned = IsSExtInst;
|
|
|
|
}
|
|
|
|
|
2015-09-11 05:12:57 +08:00
|
|
|
// Lastly, if the source type of the extend matches the reduction type,
|
|
|
|
// add the extend to CI so that we can avoid accounting for it in the
|
|
|
|
// cost model.
|
|
|
|
if (SrcSize == DstSize)
|
|
|
|
CI.insert(Cast);
|
2015-08-27 22:12:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-06-17 02:07:34 +08:00
|
|
|
bool RecurrenceDescriptor::AddReductionVar(PHINode *Phi, RecurrenceKind Kind,
|
|
|
|
Loop *TheLoop, bool HasFunNoNaNAttr,
|
|
|
|
RecurrenceDescriptor &RedDes) {
|
2015-04-20 12:38:33 +08:00
|
|
|
if (Phi->getNumIncomingValues() != 2)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Reduction variables are only found in the loop header block.
|
|
|
|
if (Phi->getParent() != TheLoop->getHeader())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Obtain the reduction start value from the value that comes from the loop
|
|
|
|
// preheader.
|
|
|
|
Value *RdxStart = Phi->getIncomingValueForBlock(TheLoop->getLoopPreheader());
|
|
|
|
|
|
|
|
// ExitInstruction is the single value which is used outside the loop.
|
|
|
|
// We only allow for a single reduction value to be used outside the loop.
|
|
|
|
// This includes users of the reduction, variables (which form a cycle
|
|
|
|
// which ends in the phi node).
|
|
|
|
Instruction *ExitInstruction = nullptr;
|
|
|
|
// Indicates that we found a reduction operation in our scan.
|
|
|
|
bool FoundReduxOp = false;
|
|
|
|
|
|
|
|
// We start with the PHI node and scan for all of the users of this
|
|
|
|
// instruction. All users must be instructions that can be used as reduction
|
|
|
|
// variables (such as ADD). We must have a single out-of-block user. The cycle
|
|
|
|
// must include the original PHI.
|
|
|
|
bool FoundStartPHI = false;
|
|
|
|
|
|
|
|
// To recognize min/max patterns formed by a icmp select sequence, we store
|
|
|
|
// the number of instruction we saw from the recognized min/max pattern,
|
|
|
|
// to make sure we only see exactly the two instructions.
|
|
|
|
unsigned NumCmpSelectPatternInst = 0;
|
2015-06-17 06:59:45 +08:00
|
|
|
InstDesc ReduxDesc(false, nullptr);
|
2015-04-20 12:38:33 +08:00
|
|
|
|
2015-08-27 22:12:17 +08:00
|
|
|
// Data used for determining if the recurrence has been type-promoted.
|
|
|
|
Type *RecurrenceType = Phi->getType();
|
|
|
|
SmallPtrSet<Instruction *, 4> CastInsts;
|
|
|
|
Instruction *Start = Phi;
|
|
|
|
bool IsSigned = false;
|
|
|
|
|
2015-04-20 12:38:33 +08:00
|
|
|
SmallPtrSet<Instruction *, 8> VisitedInsts;
|
|
|
|
SmallVector<Instruction *, 8> Worklist;
|
2015-08-27 22:12:17 +08:00
|
|
|
|
|
|
|
// Return early if the recurrence kind does not match the type of Phi. If the
|
|
|
|
// recurrence kind is arithmetic, we attempt to look through AND operations
|
|
|
|
// resulting from the type promotion performed by InstCombine. Vector
|
|
|
|
// operations are not limited to the legal integer widths, so we may be able
|
|
|
|
// to evaluate the reduction in the narrower width.
|
|
|
|
if (RecurrenceType->isFloatingPointTy()) {
|
|
|
|
if (!isFloatingPointRecurrenceKind(Kind))
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
if (!isIntegerRecurrenceKind(Kind))
|
|
|
|
return false;
|
|
|
|
if (isArithmeticRecurrenceKind(Kind))
|
|
|
|
Start = lookThroughAnd(Phi, RecurrenceType, VisitedInsts, CastInsts);
|
|
|
|
}
|
|
|
|
|
|
|
|
Worklist.push_back(Start);
|
|
|
|
VisitedInsts.insert(Start);
|
2015-04-20 12:38:33 +08:00
|
|
|
|
|
|
|
// A value in the reduction can be used:
|
|
|
|
// - By the reduction:
|
|
|
|
// - Reduction operation:
|
|
|
|
// - One use of reduction value (safe).
|
|
|
|
// - Multiple use of reduction value (not safe).
|
|
|
|
// - PHI:
|
|
|
|
// - All uses of the PHI must be the reduction (safe).
|
|
|
|
// - Otherwise, not safe.
|
2017-01-19 03:02:52 +08:00
|
|
|
// - By instructions outside of the loop (safe).
|
|
|
|
// * One value may have several outside users, but all outside
|
|
|
|
// uses must be of the same value.
|
2015-04-20 12:38:33 +08:00
|
|
|
// - By an instruction that is not part of the reduction (not safe).
|
|
|
|
// This is either:
|
|
|
|
// * An instruction type other than PHI or the reduction operation.
|
|
|
|
// * A PHI in the header other than the initial PHI.
|
|
|
|
while (!Worklist.empty()) {
|
|
|
|
Instruction *Cur = Worklist.back();
|
|
|
|
Worklist.pop_back();
|
|
|
|
|
|
|
|
// No Users.
|
|
|
|
// If the instruction has no users then this is a broken chain and can't be
|
|
|
|
// a reduction variable.
|
|
|
|
if (Cur->use_empty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool IsAPhi = isa<PHINode>(Cur);
|
|
|
|
|
|
|
|
// A header PHI use other than the original PHI.
|
|
|
|
if (Cur != Phi && IsAPhi && Cur->getParent() == Phi->getParent())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Reductions of instructions such as Div, and Sub is only possible if the
|
|
|
|
// LHS is the reduction variable.
|
|
|
|
if (!Cur->isCommutative() && !IsAPhi && !isa<SelectInst>(Cur) &&
|
|
|
|
!isa<ICmpInst>(Cur) && !isa<FCmpInst>(Cur) &&
|
|
|
|
!VisitedInsts.count(dyn_cast<Instruction>(Cur->getOperand(0))))
|
|
|
|
return false;
|
|
|
|
|
2015-08-27 22:12:17 +08:00
|
|
|
// Any reduction instruction must be of one of the allowed kinds. We ignore
|
|
|
|
// the starting value (the Phi or an AND instruction if the Phi has been
|
|
|
|
// type-promoted).
|
|
|
|
if (Cur != Start) {
|
|
|
|
ReduxDesc = isRecurrenceInstr(Cur, Kind, ReduxDesc, HasFunNoNaNAttr);
|
|
|
|
if (!ReduxDesc.isRecurrence())
|
|
|
|
return false;
|
|
|
|
}
|
2015-04-20 12:38:33 +08:00
|
|
|
|
|
|
|
// A reduction operation must only have one use of the reduction value.
|
|
|
|
if (!IsAPhi && Kind != RK_IntegerMinMax && Kind != RK_FloatMinMax &&
|
|
|
|
hasMultipleUsesOf(Cur, VisitedInsts))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// All inputs to a PHI node must be a reduction value.
|
|
|
|
if (IsAPhi && Cur != Phi && !areAllUsesIn(Cur, VisitedInsts))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (Kind == RK_IntegerMinMax &&
|
|
|
|
(isa<ICmpInst>(Cur) || isa<SelectInst>(Cur)))
|
|
|
|
++NumCmpSelectPatternInst;
|
|
|
|
if (Kind == RK_FloatMinMax && (isa<FCmpInst>(Cur) || isa<SelectInst>(Cur)))
|
|
|
|
++NumCmpSelectPatternInst;
|
|
|
|
|
|
|
|
// Check whether we found a reduction operator.
|
2015-08-27 22:12:17 +08:00
|
|
|
FoundReduxOp |= !IsAPhi && Cur != Start;
|
2015-04-20 12:38:33 +08:00
|
|
|
|
|
|
|
// Process users of current instruction. Push non-PHI nodes after PHI nodes
|
|
|
|
// onto the stack. This way we are going to have seen all inputs to PHI
|
|
|
|
// nodes once we get to them.
|
|
|
|
SmallVector<Instruction *, 8> NonPHIs;
|
|
|
|
SmallVector<Instruction *, 8> PHIs;
|
|
|
|
for (User *U : Cur->users()) {
|
|
|
|
Instruction *UI = cast<Instruction>(U);
|
|
|
|
|
|
|
|
// Check if we found the exit user.
|
|
|
|
BasicBlock *Parent = UI->getParent();
|
|
|
|
if (!TheLoop->contains(Parent)) {
|
2017-01-19 03:02:52 +08:00
|
|
|
// If we already know this instruction is used externally, move on to
|
|
|
|
// the next user.
|
|
|
|
if (ExitInstruction == Cur)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Exit if you find multiple values used outside or if the header phi
|
|
|
|
// node is being used. In this case the user uses the value of the
|
|
|
|
// previous iteration, in which case we would loose "VF-1" iterations of
|
|
|
|
// the reduction operation if we vectorize.
|
2015-04-20 12:38:33 +08:00
|
|
|
if (ExitInstruction != nullptr || Cur == Phi)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The instruction used by an outside user must be the last instruction
|
|
|
|
// before we feed back to the reduction phi. Otherwise, we loose VF-1
|
|
|
|
// operations on the value.
|
2016-08-12 11:55:06 +08:00
|
|
|
if (!is_contained(Phi->operands(), Cur))
|
2015-04-20 12:38:33 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
ExitInstruction = Cur;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process instructions only once (termination). Each reduction cycle
|
|
|
|
// value must only be used once, except by phi nodes and min/max
|
|
|
|
// reductions which are represented as a cmp followed by a select.
|
2015-06-17 06:59:45 +08:00
|
|
|
InstDesc IgnoredVal(false, nullptr);
|
2015-04-20 12:38:33 +08:00
|
|
|
if (VisitedInsts.insert(UI).second) {
|
|
|
|
if (isa<PHINode>(UI))
|
|
|
|
PHIs.push_back(UI);
|
|
|
|
else
|
|
|
|
NonPHIs.push_back(UI);
|
|
|
|
} else if (!isa<PHINode>(UI) &&
|
|
|
|
((!isa<FCmpInst>(UI) && !isa<ICmpInst>(UI) &&
|
|
|
|
!isa<SelectInst>(UI)) ||
|
2015-06-17 02:07:34 +08:00
|
|
|
!isMinMaxSelectCmpPattern(UI, IgnoredVal).isRecurrence()))
|
2015-04-20 12:38:33 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Remember that we completed the cycle.
|
|
|
|
if (UI == Phi)
|
|
|
|
FoundStartPHI = true;
|
|
|
|
}
|
|
|
|
Worklist.append(PHIs.begin(), PHIs.end());
|
|
|
|
Worklist.append(NonPHIs.begin(), NonPHIs.end());
|
|
|
|
}
|
|
|
|
|
|
|
|
// This means we have seen one but not the other instruction of the
|
|
|
|
// pattern or more than just a select and cmp.
|
|
|
|
if ((Kind == RK_IntegerMinMax || Kind == RK_FloatMinMax) &&
|
|
|
|
NumCmpSelectPatternInst != 2)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!FoundStartPHI || !FoundReduxOp || !ExitInstruction)
|
|
|
|
return false;
|
|
|
|
|
2015-08-27 22:12:17 +08:00
|
|
|
// If we think Phi may have been type-promoted, we also need to ensure that
|
|
|
|
// all source operands of the reduction are either SExtInsts or ZEstInsts. If
|
|
|
|
// so, we will be able to evaluate the reduction in the narrower bit width.
|
|
|
|
if (Start != Phi)
|
|
|
|
if (!getSourceExtensionKind(Start, ExitInstruction, RecurrenceType,
|
|
|
|
IsSigned, VisitedInsts, CastInsts))
|
|
|
|
return false;
|
|
|
|
|
2015-04-20 12:38:33 +08:00
|
|
|
// We found a reduction var if we have reached the original phi node and we
|
|
|
|
// only have a single instruction with out-of-loop users.
|
|
|
|
|
|
|
|
// The ExitInstruction(Instruction which is allowed to have out-of-loop users)
|
2015-06-17 02:07:34 +08:00
|
|
|
// is saved as part of the RecurrenceDescriptor.
|
2015-04-20 12:38:33 +08:00
|
|
|
|
|
|
|
// Save the description of this reduction variable.
|
2015-08-27 22:12:17 +08:00
|
|
|
RecurrenceDescriptor RD(
|
|
|
|
RdxStart, ExitInstruction, Kind, ReduxDesc.getMinMaxKind(),
|
|
|
|
ReduxDesc.getUnsafeAlgebraInst(), RecurrenceType, IsSigned, CastInsts);
|
2015-04-20 12:38:33 +08:00
|
|
|
RedDes = RD;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true if the instruction is a Select(ICmp(X, Y), X, Y) instruction
|
|
|
|
/// pattern corresponding to a min(X, Y) or max(X, Y).
|
2015-06-17 06:59:45 +08:00
|
|
|
RecurrenceDescriptor::InstDesc
|
|
|
|
RecurrenceDescriptor::isMinMaxSelectCmpPattern(Instruction *I, InstDesc &Prev) {
|
2015-04-20 12:38:33 +08:00
|
|
|
|
|
|
|
assert((isa<ICmpInst>(I) || isa<FCmpInst>(I) || isa<SelectInst>(I)) &&
|
|
|
|
"Expect a select instruction");
|
|
|
|
Instruction *Cmp = nullptr;
|
|
|
|
SelectInst *Select = nullptr;
|
|
|
|
|
|
|
|
// We must handle the select(cmp()) as a single instruction. Advance to the
|
|
|
|
// select.
|
|
|
|
if ((Cmp = dyn_cast<ICmpInst>(I)) || (Cmp = dyn_cast<FCmpInst>(I))) {
|
|
|
|
if (!Cmp->hasOneUse() || !(Select = dyn_cast<SelectInst>(*I->user_begin())))
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(false, I);
|
|
|
|
return InstDesc(Select, Prev.getMinMaxKind());
|
2015-04-20 12:38:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Only handle single use cases for now.
|
|
|
|
if (!(Select = dyn_cast<SelectInst>(I)))
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(false, I);
|
2015-04-20 12:38:33 +08:00
|
|
|
if (!(Cmp = dyn_cast<ICmpInst>(I->getOperand(0))) &&
|
|
|
|
!(Cmp = dyn_cast<FCmpInst>(I->getOperand(0))))
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(false, I);
|
2015-04-20 12:38:33 +08:00
|
|
|
if (!Cmp->hasOneUse())
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(false, I);
|
2015-04-20 12:38:33 +08:00
|
|
|
|
|
|
|
Value *CmpLeft;
|
|
|
|
Value *CmpRight;
|
|
|
|
|
|
|
|
// Look for a min/max pattern.
|
|
|
|
if (m_UMin(m_Value(CmpLeft), m_Value(CmpRight)).match(Select))
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(Select, MRK_UIntMin);
|
2015-04-20 12:38:33 +08:00
|
|
|
else if (m_UMax(m_Value(CmpLeft), m_Value(CmpRight)).match(Select))
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(Select, MRK_UIntMax);
|
2015-04-20 12:38:33 +08:00
|
|
|
else if (m_SMax(m_Value(CmpLeft), m_Value(CmpRight)).match(Select))
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(Select, MRK_SIntMax);
|
2015-04-20 12:38:33 +08:00
|
|
|
else if (m_SMin(m_Value(CmpLeft), m_Value(CmpRight)).match(Select))
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(Select, MRK_SIntMin);
|
2015-04-20 12:38:33 +08:00
|
|
|
else if (m_OrdFMin(m_Value(CmpLeft), m_Value(CmpRight)).match(Select))
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(Select, MRK_FloatMin);
|
2015-04-20 12:38:33 +08:00
|
|
|
else if (m_OrdFMax(m_Value(CmpLeft), m_Value(CmpRight)).match(Select))
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(Select, MRK_FloatMax);
|
2015-04-20 12:38:33 +08:00
|
|
|
else if (m_UnordFMin(m_Value(CmpLeft), m_Value(CmpRight)).match(Select))
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(Select, MRK_FloatMin);
|
2015-04-20 12:38:33 +08:00
|
|
|
else if (m_UnordFMax(m_Value(CmpLeft), m_Value(CmpRight)).match(Select))
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(Select, MRK_FloatMax);
|
2015-04-20 12:38:33 +08:00
|
|
|
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(false, I);
|
2015-04-20 12:38:33 +08:00
|
|
|
}
|
|
|
|
|
2015-06-17 06:59:45 +08:00
|
|
|
RecurrenceDescriptor::InstDesc
|
2015-06-17 02:07:34 +08:00
|
|
|
RecurrenceDescriptor::isRecurrenceInstr(Instruction *I, RecurrenceKind Kind,
|
2015-06-17 06:59:45 +08:00
|
|
|
InstDesc &Prev, bool HasFunNoNaNAttr) {
|
2015-04-20 12:38:33 +08:00
|
|
|
bool FP = I->getType()->isFloatingPointTy();
|
2015-08-11 03:51:46 +08:00
|
|
|
Instruction *UAI = Prev.getUnsafeAlgebraInst();
|
|
|
|
if (!UAI && FP && !I->hasUnsafeAlgebra())
|
|
|
|
UAI = I; // Found an unsafe (unvectorizable) algebra instruction.
|
|
|
|
|
2015-04-20 12:38:33 +08:00
|
|
|
switch (I->getOpcode()) {
|
|
|
|
default:
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(false, I);
|
2015-04-20 12:38:33 +08:00
|
|
|
case Instruction::PHI:
|
2016-05-28 00:40:27 +08:00
|
|
|
return InstDesc(I, Prev.getMinMaxKind(), Prev.getUnsafeAlgebraInst());
|
2015-04-20 12:38:33 +08:00
|
|
|
case Instruction::Sub:
|
|
|
|
case Instruction::Add:
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(Kind == RK_IntegerAdd, I);
|
2015-04-20 12:38:33 +08:00
|
|
|
case Instruction::Mul:
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(Kind == RK_IntegerMult, I);
|
2015-04-20 12:38:33 +08:00
|
|
|
case Instruction::And:
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(Kind == RK_IntegerAnd, I);
|
2015-04-20 12:38:33 +08:00
|
|
|
case Instruction::Or:
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(Kind == RK_IntegerOr, I);
|
2015-04-20 12:38:33 +08:00
|
|
|
case Instruction::Xor:
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(Kind == RK_IntegerXor, I);
|
2015-04-20 12:38:33 +08:00
|
|
|
case Instruction::FMul:
|
2015-08-11 03:51:46 +08:00
|
|
|
return InstDesc(Kind == RK_FloatMult, I, UAI);
|
2015-04-20 12:38:33 +08:00
|
|
|
case Instruction::FSub:
|
|
|
|
case Instruction::FAdd:
|
2015-08-11 03:51:46 +08:00
|
|
|
return InstDesc(Kind == RK_FloatAdd, I, UAI);
|
2015-04-20 12:38:33 +08:00
|
|
|
case Instruction::FCmp:
|
|
|
|
case Instruction::ICmp:
|
|
|
|
case Instruction::Select:
|
|
|
|
if (Kind != RK_IntegerMinMax &&
|
|
|
|
(!HasFunNoNaNAttr || Kind != RK_FloatMinMax))
|
2015-06-17 06:59:45 +08:00
|
|
|
return InstDesc(false, I);
|
2015-04-20 12:38:33 +08:00
|
|
|
return isMinMaxSelectCmpPattern(I, Prev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-17 02:07:34 +08:00
|
|
|
bool RecurrenceDescriptor::hasMultipleUsesOf(
|
2015-04-20 12:38:33 +08:00
|
|
|
Instruction *I, SmallPtrSetImpl<Instruction *> &Insts) {
|
|
|
|
unsigned NumUses = 0;
|
|
|
|
for (User::op_iterator Use = I->op_begin(), E = I->op_end(); Use != E;
|
|
|
|
++Use) {
|
|
|
|
if (Insts.count(dyn_cast<Instruction>(*Use)))
|
|
|
|
++NumUses;
|
|
|
|
if (NumUses > 1)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2015-06-17 02:07:34 +08:00
|
|
|
bool RecurrenceDescriptor::isReductionPHI(PHINode *Phi, Loop *TheLoop,
|
|
|
|
RecurrenceDescriptor &RedDes) {
|
2015-04-20 12:38:33 +08:00
|
|
|
|
|
|
|
BasicBlock *Header = TheLoop->getHeader();
|
|
|
|
Function &F = *Header->getParent();
|
2016-03-30 23:41:12 +08:00
|
|
|
bool HasFunNoNaNAttr =
|
|
|
|
F.getFnAttribute("no-nans-fp-math").getValueAsString() == "true";
|
2015-04-20 12:38:33 +08:00
|
|
|
|
|
|
|
if (AddReductionVar(Phi, RK_IntegerAdd, TheLoop, HasFunNoNaNAttr, RedDes)) {
|
|
|
|
DEBUG(dbgs() << "Found an ADD reduction PHI." << *Phi << "\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (AddReductionVar(Phi, RK_IntegerMult, TheLoop, HasFunNoNaNAttr, RedDes)) {
|
|
|
|
DEBUG(dbgs() << "Found a MUL reduction PHI." << *Phi << "\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (AddReductionVar(Phi, RK_IntegerOr, TheLoop, HasFunNoNaNAttr, RedDes)) {
|
|
|
|
DEBUG(dbgs() << "Found an OR reduction PHI." << *Phi << "\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (AddReductionVar(Phi, RK_IntegerAnd, TheLoop, HasFunNoNaNAttr, RedDes)) {
|
|
|
|
DEBUG(dbgs() << "Found an AND reduction PHI." << *Phi << "\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (AddReductionVar(Phi, RK_IntegerXor, TheLoop, HasFunNoNaNAttr, RedDes)) {
|
|
|
|
DEBUG(dbgs() << "Found a XOR reduction PHI." << *Phi << "\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (AddReductionVar(Phi, RK_IntegerMinMax, TheLoop, HasFunNoNaNAttr,
|
|
|
|
RedDes)) {
|
|
|
|
DEBUG(dbgs() << "Found a MINMAX reduction PHI." << *Phi << "\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (AddReductionVar(Phi, RK_FloatMult, TheLoop, HasFunNoNaNAttr, RedDes)) {
|
|
|
|
DEBUG(dbgs() << "Found an FMult reduction PHI." << *Phi << "\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (AddReductionVar(Phi, RK_FloatAdd, TheLoop, HasFunNoNaNAttr, RedDes)) {
|
|
|
|
DEBUG(dbgs() << "Found an FAdd reduction PHI." << *Phi << "\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (AddReductionVar(Phi, RK_FloatMinMax, TheLoop, HasFunNoNaNAttr, RedDes)) {
|
|
|
|
DEBUG(dbgs() << "Found an float MINMAX reduction PHI." << *Phi << "\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// Not a reduction of known type.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-02-20 01:56:08 +08:00
|
|
|
bool RecurrenceDescriptor::isFirstOrderRecurrence(PHINode *Phi, Loop *TheLoop,
|
|
|
|
DominatorTree *DT) {
|
|
|
|
|
|
|
|
// Ensure the phi node is in the loop header and has two incoming values.
|
|
|
|
if (Phi->getParent() != TheLoop->getHeader() ||
|
|
|
|
Phi->getNumIncomingValues() != 2)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Ensure the loop has a preheader and a single latch block. The loop
|
|
|
|
// vectorizer will need the latch to set up the next iteration of the loop.
|
|
|
|
auto *Preheader = TheLoop->getLoopPreheader();
|
|
|
|
auto *Latch = TheLoop->getLoopLatch();
|
|
|
|
if (!Preheader || !Latch)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Ensure the phi node's incoming blocks are the loop preheader and latch.
|
|
|
|
if (Phi->getBasicBlockIndex(Preheader) < 0 ||
|
|
|
|
Phi->getBasicBlockIndex(Latch) < 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Get the previous value. The previous value comes from the latch edge while
|
|
|
|
// the initial value comes form the preheader edge.
|
|
|
|
auto *Previous = dyn_cast<Instruction>(Phi->getIncomingValueForBlock(Latch));
|
2016-04-12 03:48:18 +08:00
|
|
|
if (!Previous || !TheLoop->contains(Previous) || isa<PHINode>(Previous))
|
2016-02-20 01:56:08 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Ensure every user of the phi node is dominated by the previous value. The
|
|
|
|
// dominance requirement ensures the loop vectorizer will not need to
|
|
|
|
// vectorize the initial value prior to the first iteration of the loop.
|
|
|
|
for (User *U : Phi->users())
|
|
|
|
if (auto *I = dyn_cast<Instruction>(U))
|
|
|
|
if (!DT->dominates(Previous, I))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-04-20 12:38:33 +08:00
|
|
|
/// This function returns the identity element (or neutral element) for
|
|
|
|
/// the operation K.
|
2015-06-17 02:07:34 +08:00
|
|
|
Constant *RecurrenceDescriptor::getRecurrenceIdentity(RecurrenceKind K,
|
|
|
|
Type *Tp) {
|
2015-04-20 12:38:33 +08:00
|
|
|
switch (K) {
|
|
|
|
case RK_IntegerXor:
|
|
|
|
case RK_IntegerAdd:
|
|
|
|
case RK_IntegerOr:
|
|
|
|
// Adding, Xoring, Oring zero to a number does not change it.
|
|
|
|
return ConstantInt::get(Tp, 0);
|
|
|
|
case RK_IntegerMult:
|
|
|
|
// Multiplying a number by 1 does not change it.
|
|
|
|
return ConstantInt::get(Tp, 1);
|
|
|
|
case RK_IntegerAnd:
|
|
|
|
// AND-ing a number with an all-1 value does not change it.
|
|
|
|
return ConstantInt::get(Tp, -1, true);
|
|
|
|
case RK_FloatMult:
|
|
|
|
// Multiplying a number by 1 does not change it.
|
|
|
|
return ConstantFP::get(Tp, 1.0L);
|
|
|
|
case RK_FloatAdd:
|
|
|
|
// Adding zero to a number does not change it.
|
|
|
|
return ConstantFP::get(Tp, 0.0L);
|
|
|
|
default:
|
2015-06-17 02:07:34 +08:00
|
|
|
llvm_unreachable("Unknown recurrence kind");
|
2015-04-20 12:38:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-17 02:07:34 +08:00
|
|
|
/// This function translates the recurrence kind to an LLVM binary operator.
|
|
|
|
unsigned RecurrenceDescriptor::getRecurrenceBinOp(RecurrenceKind Kind) {
|
2015-04-20 12:38:33 +08:00
|
|
|
switch (Kind) {
|
|
|
|
case RK_IntegerAdd:
|
|
|
|
return Instruction::Add;
|
|
|
|
case RK_IntegerMult:
|
|
|
|
return Instruction::Mul;
|
|
|
|
case RK_IntegerOr:
|
|
|
|
return Instruction::Or;
|
|
|
|
case RK_IntegerAnd:
|
|
|
|
return Instruction::And;
|
|
|
|
case RK_IntegerXor:
|
|
|
|
return Instruction::Xor;
|
|
|
|
case RK_FloatMult:
|
|
|
|
return Instruction::FMul;
|
|
|
|
case RK_FloatAdd:
|
|
|
|
return Instruction::FAdd;
|
|
|
|
case RK_IntegerMinMax:
|
|
|
|
return Instruction::ICmp;
|
|
|
|
case RK_FloatMinMax:
|
|
|
|
return Instruction::FCmp;
|
|
|
|
default:
|
2015-06-17 02:07:34 +08:00
|
|
|
llvm_unreachable("Unknown recurrence operation");
|
2015-04-20 12:38:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-17 06:59:45 +08:00
|
|
|
Value *RecurrenceDescriptor::createMinMaxOp(IRBuilder<> &Builder,
|
|
|
|
MinMaxRecurrenceKind RK,
|
|
|
|
Value *Left, Value *Right) {
|
2015-04-20 12:38:33 +08:00
|
|
|
CmpInst::Predicate P = CmpInst::ICMP_NE;
|
|
|
|
switch (RK) {
|
|
|
|
default:
|
2015-06-17 02:07:34 +08:00
|
|
|
llvm_unreachable("Unknown min/max recurrence kind");
|
2015-06-17 06:59:45 +08:00
|
|
|
case MRK_UIntMin:
|
2015-04-20 12:38:33 +08:00
|
|
|
P = CmpInst::ICMP_ULT;
|
|
|
|
break;
|
2015-06-17 06:59:45 +08:00
|
|
|
case MRK_UIntMax:
|
2015-04-20 12:38:33 +08:00
|
|
|
P = CmpInst::ICMP_UGT;
|
|
|
|
break;
|
2015-06-17 06:59:45 +08:00
|
|
|
case MRK_SIntMin:
|
2015-04-20 12:38:33 +08:00
|
|
|
P = CmpInst::ICMP_SLT;
|
|
|
|
break;
|
2015-06-17 06:59:45 +08:00
|
|
|
case MRK_SIntMax:
|
2015-04-20 12:38:33 +08:00
|
|
|
P = CmpInst::ICMP_SGT;
|
|
|
|
break;
|
2015-06-17 06:59:45 +08:00
|
|
|
case MRK_FloatMin:
|
2015-04-20 12:38:33 +08:00
|
|
|
P = CmpInst::FCMP_OLT;
|
|
|
|
break;
|
2015-06-17 06:59:45 +08:00
|
|
|
case MRK_FloatMax:
|
2015-04-20 12:38:33 +08:00
|
|
|
P = CmpInst::FCMP_OGT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-09-22 03:41:19 +08:00
|
|
|
// We only match FP sequences with unsafe algebra, so we can unconditionally
|
|
|
|
// set it on any generated instructions.
|
|
|
|
IRBuilder<>::FastMathFlagGuard FMFG(Builder);
|
|
|
|
FastMathFlags FMF;
|
|
|
|
FMF.setUnsafeAlgebra();
|
2016-01-13 02:03:37 +08:00
|
|
|
Builder.setFastMathFlags(FMF);
|
2015-09-22 03:41:19 +08:00
|
|
|
|
2015-04-20 12:38:33 +08:00
|
|
|
Value *Cmp;
|
2015-06-17 06:59:45 +08:00
|
|
|
if (RK == MRK_FloatMin || RK == MRK_FloatMax)
|
2015-04-20 12:38:33 +08:00
|
|
|
Cmp = Builder.CreateFCmp(P, Left, Right, "rdx.minmax.cmp");
|
|
|
|
else
|
|
|
|
Cmp = Builder.CreateICmp(P, Left, Right, "rdx.minmax.cmp");
|
|
|
|
|
|
|
|
Value *Select = Builder.CreateSelect(Cmp, Left, Right, "rdx.minmax.select");
|
|
|
|
return Select;
|
|
|
|
}
|
2015-04-23 16:29:20 +08:00
|
|
|
|
2015-08-27 17:53:00 +08:00
|
|
|
InductionDescriptor::InductionDescriptor(Value *Start, InductionKind K,
|
2016-07-24 15:24:54 +08:00
|
|
|
const SCEV *Step, BinaryOperator *BOp)
|
|
|
|
: StartValue(Start), IK(K), Step(Step), InductionBinOp(BOp) {
|
2015-08-27 17:53:00 +08:00
|
|
|
assert(IK != IK_NoInduction && "Not an induction");
|
2016-05-10 15:33:35 +08:00
|
|
|
|
|
|
|
// Start value type should match the induction kind and the value
|
|
|
|
// itself should not be null.
|
2015-08-27 17:53:00 +08:00
|
|
|
assert(StartValue && "StartValue is null");
|
|
|
|
assert((IK != IK_PtrInduction || StartValue->getType()->isPointerTy()) &&
|
|
|
|
"StartValue is not a pointer for pointer induction");
|
|
|
|
assert((IK != IK_IntInduction || StartValue->getType()->isIntegerTy()) &&
|
|
|
|
"StartValue is not an integer for integer induction");
|
2016-05-10 15:33:35 +08:00
|
|
|
|
|
|
|
// Check the Step Value. It should be non-zero integer value.
|
|
|
|
assert((!getConstIntStepValue() || !getConstIntStepValue()->isZero()) &&
|
|
|
|
"Step value is zero");
|
|
|
|
|
|
|
|
assert((IK != IK_PtrInduction || getConstIntStepValue()) &&
|
|
|
|
"Step value should be constant for pointer induction");
|
2016-07-24 15:24:54 +08:00
|
|
|
assert((IK == IK_FpInduction || Step->getType()->isIntegerTy()) &&
|
|
|
|
"StepValue is not an integer");
|
|
|
|
|
|
|
|
assert((IK != IK_FpInduction || Step->getType()->isFloatingPointTy()) &&
|
|
|
|
"StepValue is not FP for FpInduction");
|
|
|
|
assert((IK != IK_FpInduction || (InductionBinOp &&
|
|
|
|
(InductionBinOp->getOpcode() == Instruction::FAdd ||
|
|
|
|
InductionBinOp->getOpcode() == Instruction::FSub))) &&
|
|
|
|
"Binary opcode should be specified for FP induction");
|
2015-08-27 17:53:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int InductionDescriptor::getConsecutiveDirection() const {
|
2016-05-10 15:33:35 +08:00
|
|
|
ConstantInt *ConstStep = getConstIntStepValue();
|
|
|
|
if (ConstStep && (ConstStep->isOne() || ConstStep->isMinusOne()))
|
|
|
|
return ConstStep->getSExtValue();
|
2015-08-27 17:53:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-10 15:33:35 +08:00
|
|
|
ConstantInt *InductionDescriptor::getConstIntStepValue() const {
|
|
|
|
if (isa<SCEVConstant>(Step))
|
|
|
|
return dyn_cast<ConstantInt>(cast<SCEVConstant>(Step)->getValue());
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *InductionDescriptor::transform(IRBuilder<> &B, Value *Index,
|
|
|
|
ScalarEvolution *SE,
|
|
|
|
const DataLayout& DL) const {
|
|
|
|
|
|
|
|
SCEVExpander Exp(*SE, DL, "induction");
|
2016-07-24 15:24:54 +08:00
|
|
|
assert(Index->getType() == Step->getType() &&
|
|
|
|
"Index type does not match StepValue type");
|
2015-08-27 17:53:00 +08:00
|
|
|
switch (IK) {
|
2016-05-10 15:33:35 +08:00
|
|
|
case IK_IntInduction: {
|
2015-08-27 17:53:00 +08:00
|
|
|
assert(Index->getType() == StartValue->getType() &&
|
|
|
|
"Index type does not match StartValue type");
|
|
|
|
|
2016-05-10 15:33:35 +08:00
|
|
|
// FIXME: Theoretically, we can call getAddExpr() of ScalarEvolution
|
|
|
|
// and calculate (Start + Index * Step) for all cases, without
|
|
|
|
// special handling for "isOne" and "isMinusOne".
|
|
|
|
// But in the real life the result code getting worse. We mix SCEV
|
|
|
|
// expressions and ADD/SUB operations and receive redundant
|
|
|
|
// intermediate values being calculated in different ways and
|
|
|
|
// Instcombine is unable to reduce them all.
|
|
|
|
|
|
|
|
if (getConstIntStepValue() &&
|
|
|
|
getConstIntStepValue()->isMinusOne())
|
|
|
|
return B.CreateSub(StartValue, Index);
|
|
|
|
if (getConstIntStepValue() &&
|
|
|
|
getConstIntStepValue()->isOne())
|
|
|
|
return B.CreateAdd(StartValue, Index);
|
|
|
|
const SCEV *S = SE->getAddExpr(SE->getSCEV(StartValue),
|
|
|
|
SE->getMulExpr(Step, SE->getSCEV(Index)));
|
|
|
|
return Exp.expandCodeFor(S, StartValue->getType(), &*B.GetInsertPoint());
|
|
|
|
}
|
|
|
|
case IK_PtrInduction: {
|
|
|
|
assert(isa<SCEVConstant>(Step) &&
|
|
|
|
"Expected constant step for pointer induction");
|
|
|
|
const SCEV *S = SE->getMulExpr(SE->getSCEV(Index), Step);
|
|
|
|
Index = Exp.expandCodeFor(S, Index->getType(), &*B.GetInsertPoint());
|
2015-08-27 17:53:00 +08:00
|
|
|
return B.CreateGEP(nullptr, StartValue, Index);
|
2016-05-10 15:33:35 +08:00
|
|
|
}
|
2016-07-24 15:24:54 +08:00
|
|
|
case IK_FpInduction: {
|
|
|
|
assert(Step->getType()->isFloatingPointTy() && "Expected FP Step value");
|
|
|
|
assert(InductionBinOp &&
|
|
|
|
(InductionBinOp->getOpcode() == Instruction::FAdd ||
|
|
|
|
InductionBinOp->getOpcode() == Instruction::FSub) &&
|
|
|
|
"Original bin op should be defined for FP induction");
|
|
|
|
|
|
|
|
Value *StepValue = cast<SCEVUnknown>(Step)->getValue();
|
|
|
|
|
|
|
|
// Floating point operations had to be 'fast' to enable the induction.
|
|
|
|
FastMathFlags Flags;
|
|
|
|
Flags.setUnsafeAlgebra();
|
|
|
|
|
|
|
|
Value *MulExp = B.CreateFMul(StepValue, Index);
|
|
|
|
if (isa<Instruction>(MulExp))
|
|
|
|
// We have to check, the MulExp may be a constant.
|
|
|
|
cast<Instruction>(MulExp)->setFastMathFlags(Flags);
|
|
|
|
|
|
|
|
Value *BOp = B.CreateBinOp(InductionBinOp->getOpcode() , StartValue,
|
|
|
|
MulExp, "induction");
|
|
|
|
if (isa<Instruction>(BOp))
|
|
|
|
cast<Instruction>(BOp)->setFastMathFlags(Flags);
|
|
|
|
|
|
|
|
return BOp;
|
|
|
|
}
|
2015-08-27 17:53:00 +08:00
|
|
|
case IK_NoInduction:
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
llvm_unreachable("invalid enum");
|
|
|
|
}
|
|
|
|
|
2016-07-24 15:24:54 +08:00
|
|
|
bool InductionDescriptor::isFPInductionPHI(PHINode *Phi, const Loop *TheLoop,
|
|
|
|
ScalarEvolution *SE,
|
|
|
|
InductionDescriptor &D) {
|
|
|
|
|
|
|
|
// Here we only handle FP induction variables.
|
|
|
|
assert(Phi->getType()->isFloatingPointTy() && "Unexpected Phi type");
|
|
|
|
|
|
|
|
if (TheLoop->getHeader() != Phi->getParent())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The loop may have multiple entrances or multiple exits; we can analyze
|
|
|
|
// this phi if it has a unique entry value and a unique backedge value.
|
|
|
|
if (Phi->getNumIncomingValues() != 2)
|
|
|
|
return false;
|
|
|
|
Value *BEValue = nullptr, *StartValue = nullptr;
|
|
|
|
if (TheLoop->contains(Phi->getIncomingBlock(0))) {
|
|
|
|
BEValue = Phi->getIncomingValue(0);
|
|
|
|
StartValue = Phi->getIncomingValue(1);
|
|
|
|
} else {
|
|
|
|
assert(TheLoop->contains(Phi->getIncomingBlock(1)) &&
|
|
|
|
"Unexpected Phi node in the loop");
|
|
|
|
BEValue = Phi->getIncomingValue(1);
|
|
|
|
StartValue = Phi->getIncomingValue(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
BinaryOperator *BOp = dyn_cast<BinaryOperator>(BEValue);
|
|
|
|
if (!BOp)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Value *Addend = nullptr;
|
|
|
|
if (BOp->getOpcode() == Instruction::FAdd) {
|
|
|
|
if (BOp->getOperand(0) == Phi)
|
|
|
|
Addend = BOp->getOperand(1);
|
|
|
|
else if (BOp->getOperand(1) == Phi)
|
|
|
|
Addend = BOp->getOperand(0);
|
|
|
|
} else if (BOp->getOpcode() == Instruction::FSub)
|
|
|
|
if (BOp->getOperand(0) == Phi)
|
|
|
|
Addend = BOp->getOperand(1);
|
|
|
|
|
|
|
|
if (!Addend)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The addend should be loop invariant
|
|
|
|
if (auto *I = dyn_cast<Instruction>(Addend))
|
|
|
|
if (TheLoop->contains(I))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// FP Step has unknown SCEV
|
|
|
|
const SCEV *Step = SE->getUnknown(Addend);
|
|
|
|
D = InductionDescriptor(StartValue, IK_FpInduction, Step, BOp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool InductionDescriptor::isInductionPHI(PHINode *Phi, const Loop *TheLoop,
|
2016-05-05 23:20:39 +08:00
|
|
|
PredicatedScalarEvolution &PSE,
|
|
|
|
InductionDescriptor &D,
|
|
|
|
bool Assume) {
|
|
|
|
Type *PhiTy = Phi->getType();
|
2016-07-24 15:24:54 +08:00
|
|
|
|
|
|
|
// Handle integer and pointer inductions variables.
|
|
|
|
// Now we handle also FP induction but not trying to make a
|
|
|
|
// recurrent expression from the PHI node in-place.
|
|
|
|
|
|
|
|
if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy() &&
|
|
|
|
!PhiTy->isFloatTy() && !PhiTy->isDoubleTy() && !PhiTy->isHalfTy())
|
2016-05-05 23:20:39 +08:00
|
|
|
return false;
|
|
|
|
|
2016-07-24 15:24:54 +08:00
|
|
|
if (PhiTy->isFloatingPointTy())
|
|
|
|
return isFPInductionPHI(Phi, TheLoop, PSE.getSE(), D);
|
|
|
|
|
2016-05-05 23:20:39 +08:00
|
|
|
const SCEV *PhiScev = PSE.getSCEV(Phi);
|
|
|
|
const auto *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
|
|
|
|
|
|
|
|
// We need this expression to be an AddRecExpr.
|
|
|
|
if (Assume && !AR)
|
|
|
|
AR = PSE.getAsAddRec(Phi);
|
|
|
|
|
|
|
|
if (!AR) {
|
|
|
|
DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-07-24 15:24:54 +08:00
|
|
|
return isInductionPHI(Phi, TheLoop, PSE.getSE(), D, AR);
|
2016-05-05 23:20:39 +08:00
|
|
|
}
|
|
|
|
|
2016-07-24 15:24:54 +08:00
|
|
|
bool InductionDescriptor::isInductionPHI(PHINode *Phi, const Loop *TheLoop,
|
2016-05-05 23:20:39 +08:00
|
|
|
ScalarEvolution *SE,
|
|
|
|
InductionDescriptor &D,
|
|
|
|
const SCEV *Expr) {
|
2015-04-23 16:29:20 +08:00
|
|
|
Type *PhiTy = Phi->getType();
|
|
|
|
// We only handle integer and pointer inductions variables.
|
|
|
|
if (!PhiTy->isIntegerTy() && !PhiTy->isPointerTy())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check that the PHI is consecutive.
|
2016-05-05 23:20:39 +08:00
|
|
|
const SCEV *PhiScev = Expr ? Expr : SE->getSCEV(Phi);
|
2015-04-23 16:29:20 +08:00
|
|
|
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PhiScev);
|
2016-05-05 23:20:39 +08:00
|
|
|
|
2015-04-23 16:29:20 +08:00
|
|
|
if (!AR) {
|
|
|
|
DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-01-11 03:32:30 +08:00
|
|
|
if (AR->getLoop() != TheLoop) {
|
|
|
|
// FIXME: We should treat this as a uniform. Unfortunately, we
|
|
|
|
// don't currently know how to handled uniform PHIs.
|
|
|
|
DEBUG(dbgs() << "LV: PHI is a recurrence with respect to an outer loop.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-08-27 17:53:00 +08:00
|
|
|
Value *StartValue =
|
|
|
|
Phi->getIncomingValueForBlock(AR->getLoop()->getLoopPreheader());
|
2015-04-23 16:29:20 +08:00
|
|
|
const SCEV *Step = AR->getStepRecurrence(*SE);
|
|
|
|
// Calculate the pointer stride and check if it is consecutive.
|
2016-05-10 15:33:35 +08:00
|
|
|
// The stride may be a constant or a loop invariant integer value.
|
|
|
|
const SCEVConstant *ConstStep = dyn_cast<SCEVConstant>(Step);
|
2016-07-24 15:24:54 +08:00
|
|
|
if (!ConstStep && !SE->isLoopInvariant(Step, TheLoop))
|
2015-04-23 16:29:20 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (PhiTy->isIntegerTy()) {
|
2016-05-10 15:33:35 +08:00
|
|
|
D = InductionDescriptor(StartValue, IK_IntInduction, Step);
|
2015-04-23 16:29:20 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(PhiTy->isPointerTy() && "The PHI must be a pointer");
|
2016-05-10 15:33:35 +08:00
|
|
|
// Pointer induction should be a constant.
|
|
|
|
if (!ConstStep)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ConstantInt *CV = ConstStep->getValue();
|
2015-04-23 16:29:20 +08:00
|
|
|
Type *PointerElementType = PhiTy->getPointerElementType();
|
|
|
|
// The pointer stride cannot be determined if the pointer element type is not
|
|
|
|
// sized.
|
|
|
|
if (!PointerElementType->isSized())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const DataLayout &DL = Phi->getModule()->getDataLayout();
|
|
|
|
int64_t Size = static_cast<int64_t>(DL.getTypeAllocSize(PointerElementType));
|
2015-06-05 18:52:40 +08:00
|
|
|
if (!Size)
|
|
|
|
return false;
|
|
|
|
|
2015-04-23 16:29:20 +08:00
|
|
|
int64_t CVSize = CV->getSExtValue();
|
|
|
|
if (CVSize % Size)
|
|
|
|
return false;
|
2016-05-10 15:33:35 +08:00
|
|
|
auto *StepValue = SE->getConstant(CV->getType(), CVSize / Size,
|
|
|
|
true /* signed */);
|
2015-08-27 17:53:00 +08:00
|
|
|
D = InductionDescriptor(StartValue, IK_PtrInduction, StepValue);
|
2015-04-23 16:29:20 +08:00
|
|
|
return true;
|
|
|
|
}
|
2015-08-19 13:40:42 +08:00
|
|
|
|
|
|
|
/// \brief Returns the instructions that use values defined in the loop.
|
|
|
|
SmallVector<Instruction *, 8> llvm::findDefsUsedOutsideOfLoop(Loop *L) {
|
|
|
|
SmallVector<Instruction *, 8> UsedOutside;
|
|
|
|
|
|
|
|
for (auto *Block : L->getBlocks())
|
|
|
|
// FIXME: I believe that this could use copy_if if the Inst reference could
|
|
|
|
// be adapted into a pointer.
|
|
|
|
for (auto &Inst : *Block) {
|
|
|
|
auto Users = Inst.users();
|
2016-08-12 05:15:00 +08:00
|
|
|
if (any_of(Users, [&](User *U) {
|
2015-08-19 13:40:42 +08:00
|
|
|
auto *Use = cast<Instruction>(U);
|
|
|
|
return !L->contains(Use->getParent());
|
|
|
|
}))
|
|
|
|
UsedOutside.push_back(&Inst);
|
|
|
|
}
|
|
|
|
|
|
|
|
return UsedOutside;
|
|
|
|
}
|
[LPM] Factor all of the loop analysis usage updates into a common helper
routine.
We were getting this wrong in small ways and generally being very
inconsistent about it across loop passes. Instead, let's have a common
place where we do this. One minor downside is that this will require
some analyses like SCEV in more places than they are strictly needed.
However, this seems benign as these analyses are complete no-ops, and
without this consistency we can in many cases end up with the legacy
pass manager scheduling deciding to split up a loop pass pipeline in
order to run the function analysis half-way through. It is very, very
annoying to fix these without just being very pedantic across the board.
The only loop passes I've not updated here are ones that use
AU.setPreservesAll() such as IVUsers (an analysis) and the pass printer.
They seemed less relevant.
With this patch, almost all of the problems in PR24804 around loop pass
pipelines are fixed. The one remaining issue is that we run simplify-cfg
and instcombine in the middle of the loop pass pipeline. We've recently
added some loop variants of these passes that would seem substantially
cleaner to use, but this at least gets us much closer to the previous
state. Notably, the seven loop pass managers is down to three.
I've not updated the loop passes using LoopAccessAnalysis because that
analysis hasn't been fully wired into LoopSimplify/LCSSA, and it isn't
clear that those transforms want to support those forms anyways. They
all run late anyways, so this is harmless. Similarly, LSR is left alone
because it already carefully manages its forms and doesn't need to get
fused into a single loop pass manager with a bunch of other loop passes.
LoopReroll didn't use loop simplified form previously, and I've updated
the test case to match the trivially different output.
Finally, I've also factored all the pass initialization for the passes
that use this technique as well, so that should be done regularly and
reliably.
Thanks to James for the help reviewing and thinking about this stuff,
and Ben for help thinking about it as well!
Differential Revision: http://reviews.llvm.org/D17435
llvm-svn: 261316
2016-02-19 18:45:18 +08:00
|
|
|
|
|
|
|
void llvm::getLoopAnalysisUsage(AnalysisUsage &AU) {
|
|
|
|
// By definition, all loop passes need the LoopInfo analysis and the
|
|
|
|
// Dominator tree it depends on. Because they all participate in the loop
|
|
|
|
// pass manager, they must also preserve these.
|
|
|
|
AU.addRequired<DominatorTreeWrapperPass>();
|
|
|
|
AU.addPreserved<DominatorTreeWrapperPass>();
|
|
|
|
AU.addRequired<LoopInfoWrapperPass>();
|
|
|
|
AU.addPreserved<LoopInfoWrapperPass>();
|
|
|
|
|
|
|
|
// We must also preserve LoopSimplify and LCSSA. We locally access their IDs
|
|
|
|
// here because users shouldn't directly get them from this header.
|
|
|
|
extern char &LoopSimplifyID;
|
|
|
|
extern char &LCSSAID;
|
|
|
|
AU.addRequiredID(LoopSimplifyID);
|
|
|
|
AU.addPreservedID(LoopSimplifyID);
|
|
|
|
AU.addRequiredID(LCSSAID);
|
|
|
|
AU.addPreservedID(LCSSAID);
|
2016-10-28 20:57:20 +08:00
|
|
|
// This is used in the LPPassManager to perform LCSSA verification on passes
|
|
|
|
// which preserve lcssa form
|
|
|
|
AU.addRequired<LCSSAVerificationPass>();
|
|
|
|
AU.addPreserved<LCSSAVerificationPass>();
|
[LPM] Factor all of the loop analysis usage updates into a common helper
routine.
We were getting this wrong in small ways and generally being very
inconsistent about it across loop passes. Instead, let's have a common
place where we do this. One minor downside is that this will require
some analyses like SCEV in more places than they are strictly needed.
However, this seems benign as these analyses are complete no-ops, and
without this consistency we can in many cases end up with the legacy
pass manager scheduling deciding to split up a loop pass pipeline in
order to run the function analysis half-way through. It is very, very
annoying to fix these without just being very pedantic across the board.
The only loop passes I've not updated here are ones that use
AU.setPreservesAll() such as IVUsers (an analysis) and the pass printer.
They seemed less relevant.
With this patch, almost all of the problems in PR24804 around loop pass
pipelines are fixed. The one remaining issue is that we run simplify-cfg
and instcombine in the middle of the loop pass pipeline. We've recently
added some loop variants of these passes that would seem substantially
cleaner to use, but this at least gets us much closer to the previous
state. Notably, the seven loop pass managers is down to three.
I've not updated the loop passes using LoopAccessAnalysis because that
analysis hasn't been fully wired into LoopSimplify/LCSSA, and it isn't
clear that those transforms want to support those forms anyways. They
all run late anyways, so this is harmless. Similarly, LSR is left alone
because it already carefully manages its forms and doesn't need to get
fused into a single loop pass manager with a bunch of other loop passes.
LoopReroll didn't use loop simplified form previously, and I've updated
the test case to match the trivially different output.
Finally, I've also factored all the pass initialization for the passes
that use this technique as well, so that should be done regularly and
reliably.
Thanks to James for the help reviewing and thinking about this stuff,
and Ben for help thinking about it as well!
Differential Revision: http://reviews.llvm.org/D17435
llvm-svn: 261316
2016-02-19 18:45:18 +08:00
|
|
|
|
|
|
|
// Loop passes are designed to run inside of a loop pass manager which means
|
|
|
|
// that any function analyses they require must be required by the first loop
|
|
|
|
// pass in the manager (so that it is computed before the loop pass manager
|
|
|
|
// runs) and preserved by all loop pasess in the manager. To make this
|
|
|
|
// reasonably robust, the set needed for most loop passes is maintained here.
|
|
|
|
// If your loop pass requires an analysis not listed here, you will need to
|
|
|
|
// carefully audit the loop pass manager nesting structure that results.
|
|
|
|
AU.addRequired<AAResultsWrapperPass>();
|
|
|
|
AU.addPreserved<AAResultsWrapperPass>();
|
|
|
|
AU.addPreserved<BasicAAWrapperPass>();
|
|
|
|
AU.addPreserved<GlobalsAAWrapperPass>();
|
|
|
|
AU.addPreserved<SCEVAAWrapperPass>();
|
|
|
|
AU.addRequired<ScalarEvolutionWrapperPass>();
|
|
|
|
AU.addPreserved<ScalarEvolutionWrapperPass>();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Manually defined generic "LoopPass" dependency initialization. This is used
|
|
|
|
/// to initialize the exact set of passes from above in \c
|
|
|
|
/// getLoopAnalysisUsage. It can be used within a loop pass's initialization
|
|
|
|
/// with:
|
|
|
|
///
|
|
|
|
/// INITIALIZE_PASS_DEPENDENCY(LoopPass)
|
|
|
|
///
|
|
|
|
/// As-if "LoopPass" were a pass.
|
|
|
|
void llvm::initializeLoopPassPass(PassRegistry &Registry) {
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
|
2016-06-10 03:44:46 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass)
|
[LPM] Factor all of the loop analysis usage updates into a common helper
routine.
We were getting this wrong in small ways and generally being very
inconsistent about it across loop passes. Instead, let's have a common
place where we do this. One minor downside is that this will require
some analyses like SCEV in more places than they are strictly needed.
However, this seems benign as these analyses are complete no-ops, and
without this consistency we can in many cases end up with the legacy
pass manager scheduling deciding to split up a loop pass pipeline in
order to run the function analysis half-way through. It is very, very
annoying to fix these without just being very pedantic across the board.
The only loop passes I've not updated here are ones that use
AU.setPreservesAll() such as IVUsers (an analysis) and the pass printer.
They seemed less relevant.
With this patch, almost all of the problems in PR24804 around loop pass
pipelines are fixed. The one remaining issue is that we run simplify-cfg
and instcombine in the middle of the loop pass pipeline. We've recently
added some loop variants of these passes that would seem substantially
cleaner to use, but this at least gets us much closer to the previous
state. Notably, the seven loop pass managers is down to three.
I've not updated the loop passes using LoopAccessAnalysis because that
analysis hasn't been fully wired into LoopSimplify/LCSSA, and it isn't
clear that those transforms want to support those forms anyways. They
all run late anyways, so this is harmless. Similarly, LSR is left alone
because it already carefully manages its forms and doesn't need to get
fused into a single loop pass manager with a bunch of other loop passes.
LoopReroll didn't use loop simplified form previously, and I've updated
the test case to match the trivially different output.
Finally, I've also factored all the pass initialization for the passes
that use this technique as well, so that should be done regularly and
reliably.
Thanks to James for the help reviewing and thinking about this stuff,
and Ben for help thinking about it as well!
Differential Revision: http://reviews.llvm.org/D17435
llvm-svn: 261316
2016-02-19 18:45:18 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
|
|
|
|
}
|
2016-04-22 01:33:17 +08:00
|
|
|
|
2016-04-23 03:10:05 +08:00
|
|
|
/// \brief Find string metadata for loop
|
|
|
|
///
|
|
|
|
/// If it has a value (e.g. {"llvm.distribute", 1} return the value as an
|
|
|
|
/// operand or null otherwise. If the string metadata is not found return
|
|
|
|
/// Optional's not-a-value.
|
|
|
|
Optional<const MDOperand *> llvm::findStringMetadataForLoop(Loop *TheLoop,
|
|
|
|
StringRef Name) {
|
2016-04-22 01:33:17 +08:00
|
|
|
MDNode *LoopID = TheLoop->getLoopID();
|
2016-04-23 03:10:05 +08:00
|
|
|
// Return none if LoopID is false.
|
2016-04-22 01:33:17 +08:00
|
|
|
if (!LoopID)
|
2016-04-23 03:10:05 +08:00
|
|
|
return None;
|
2016-04-22 01:33:20 +08:00
|
|
|
|
|
|
|
// First operand should refer to the loop id itself.
|
|
|
|
assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
|
|
|
|
assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
|
|
|
|
|
2016-04-22 01:33:17 +08:00
|
|
|
// Iterate over LoopID operands and look for MDString Metadata
|
|
|
|
for (unsigned i = 1, e = LoopID->getNumOperands(); i < e; ++i) {
|
|
|
|
MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
|
|
|
|
if (!MD)
|
|
|
|
continue;
|
|
|
|
MDString *S = dyn_cast<MDString>(MD->getOperand(0));
|
|
|
|
if (!S)
|
|
|
|
continue;
|
|
|
|
// Return true if MDString holds expected MetaData.
|
|
|
|
if (Name.equals(S->getString()))
|
2016-04-23 03:10:05 +08:00
|
|
|
switch (MD->getNumOperands()) {
|
|
|
|
case 1:
|
|
|
|
return nullptr;
|
|
|
|
case 2:
|
|
|
|
return &MD->getOperand(1);
|
|
|
|
default:
|
|
|
|
llvm_unreachable("loop metadata has 0 or 1 operand");
|
|
|
|
}
|
2016-04-22 01:33:17 +08:00
|
|
|
}
|
2016-04-23 03:10:05 +08:00
|
|
|
return None;
|
2016-04-22 01:33:17 +08:00
|
|
|
}
|
2016-06-11 04:03:17 +08:00
|
|
|
|
|
|
|
/// Returns true if the instruction in a loop is guaranteed to execute at least
|
|
|
|
/// once.
|
|
|
|
bool llvm::isGuaranteedToExecute(const Instruction &Inst,
|
|
|
|
const DominatorTree *DT, const Loop *CurLoop,
|
|
|
|
const LoopSafetyInfo *SafetyInfo) {
|
|
|
|
// We have to check to make sure that the instruction dominates all
|
|
|
|
// of the exit blocks. If it doesn't, then there is a path out of the loop
|
|
|
|
// which does not execute this instruction, so we can't hoist it.
|
|
|
|
|
|
|
|
// If the instruction is in the header block for the loop (which is very
|
|
|
|
// common), it is always guaranteed to dominate the exit blocks. Since this
|
|
|
|
// is a common case, and can save some work, check it now.
|
|
|
|
if (Inst.getParent() == CurLoop->getHeader())
|
|
|
|
// If there's a throw in the header block, we can't guarantee we'll reach
|
|
|
|
// Inst.
|
|
|
|
return !SafetyInfo->HeaderMayThrow;
|
|
|
|
|
|
|
|
// Somewhere in this loop there is an instruction which may throw and make us
|
|
|
|
// exit the loop.
|
|
|
|
if (SafetyInfo->MayThrow)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Get the exit blocks for the current loop.
|
|
|
|
SmallVector<BasicBlock *, 8> ExitBlocks;
|
|
|
|
CurLoop->getExitBlocks(ExitBlocks);
|
|
|
|
|
|
|
|
// Verify that the block dominates each of the exit blocks of the loop.
|
|
|
|
for (BasicBlock *ExitBlock : ExitBlocks)
|
|
|
|
if (!DT->dominates(Inst.getParent(), ExitBlock))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// As a degenerate case, if the loop is statically infinite then we haven't
|
|
|
|
// proven anything since there are no exit blocks.
|
|
|
|
if (ExitBlocks.empty())
|
|
|
|
return false;
|
|
|
|
|
[LICM] Make isGuaranteedToExecute more accurate.
Summary:
Make isGuaranteedToExecute use the
isGuaranteedToTransferExecutionToSuccessor helper, and make that helper
a bit more accurate.
There's a potential performance impact here from assuming that arbitrary
calls might not return. This probably has little impact on loads and
stores to a pointer because most things alias analysis can reason about
are dereferenceable anyway. The other impacts, like less aggressive
hoisting of sdiv by a variable and less aggressive hoisting around
volatile memory operations, are unlikely to matter for real code.
This also impacts SCEV, which uses the same helper. It's a minor
improvement there because we can tell that, for example, memcpy always
returns normally. Strictly speaking, it's also introducing
a bug, but it's not any worse than everywhere else we assume readonly
functions terminate.
Fixes http://llvm.org/PR27857.
Reviewers: hfinkel, reames, chandlerc, sanjoy
Subscribers: broune, llvm-commits
Differential Revision: http://reviews.llvm.org/D21167
llvm-svn: 272489
2016-06-12 05:48:25 +08:00
|
|
|
// FIXME: In general, we have to prove that the loop isn't an infinite loop.
|
|
|
|
// See http::llvm.org/PR24078 . (The "ExitBlocks.empty()" check above is
|
|
|
|
// just a special case of this.)
|
2016-06-11 04:03:17 +08:00
|
|
|
return true;
|
|
|
|
}
|
2016-11-17 09:17:02 +08:00
|
|
|
|
|
|
|
Optional<unsigned> llvm::getLoopEstimatedTripCount(Loop *L) {
|
|
|
|
// Only support loops with a unique exiting block, and a latch.
|
|
|
|
if (!L->getExitingBlock())
|
|
|
|
return None;
|
|
|
|
|
|
|
|
// Get the branch weights for the the loop's backedge.
|
|
|
|
BranchInst *LatchBR =
|
|
|
|
dyn_cast<BranchInst>(L->getLoopLatch()->getTerminator());
|
|
|
|
if (!LatchBR || LatchBR->getNumSuccessors() != 2)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
assert((LatchBR->getSuccessor(0) == L->getHeader() ||
|
|
|
|
LatchBR->getSuccessor(1) == L->getHeader()) &&
|
|
|
|
"At least one edge out of the latch must go to the header");
|
|
|
|
|
|
|
|
// To estimate the number of times the loop body was executed, we want to
|
|
|
|
// know the number of times the backedge was taken, vs. the number of times
|
|
|
|
// we exited the loop.
|
|
|
|
uint64_t TrueVal, FalseVal;
|
2016-12-01 05:13:57 +08:00
|
|
|
if (!LatchBR->extractProfMetadata(TrueVal, FalseVal))
|
2016-11-17 09:17:02 +08:00
|
|
|
return None;
|
|
|
|
|
2016-12-01 05:13:57 +08:00
|
|
|
if (!TrueVal || !FalseVal)
|
|
|
|
return 0;
|
2016-11-17 09:17:02 +08:00
|
|
|
|
2016-12-01 05:13:57 +08:00
|
|
|
// Divide the count of the backedge by the count of the edge exiting the loop,
|
|
|
|
// rounding to nearest.
|
2016-11-17 09:17:02 +08:00
|
|
|
if (LatchBR->getSuccessor(0) == L->getHeader())
|
2016-12-01 05:13:57 +08:00
|
|
|
return (TrueVal + (FalseVal / 2)) / FalseVal;
|
2016-11-17 09:17:02 +08:00
|
|
|
else
|
2016-12-01 05:13:57 +08:00
|
|
|
return (FalseVal + (TrueVal / 2)) / TrueVal;
|
2016-11-17 09:17:02 +08:00
|
|
|
}
|