2003-10-13 11:32:08 +08:00
|
|
|
//===- LoopInfo.cpp - Natural Loop Calculator -----------------------------===//
|
2005-04-22 05:13:18 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2005-04-22 05:13:18 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2001-11-27 02:41:20 +08:00
|
|
|
//
|
|
|
|
// This file defines the LoopInfo class that is used to identify natural loops
|
|
|
|
// and determine the loop depth of various nodes of the CFG. Note that the
|
|
|
|
// loops identified may actually be several natural loops that share the same
|
|
|
|
// header node... not just a single natural loop.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2004-01-31 01:26:24 +08:00
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/DepthFirstIterator.h"
|
2017-09-20 10:31:57 +08:00
|
|
|
#include "llvm/ADT/ScopeExit.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
[LOOPINFO] Extend Loop object to add utilities to get the loop bounds,
step, and loop induction variable.
Summary: This PR extends the loop object with more utilities to get loop
bounds, step, and loop induction variable. There already exists passes
which try to obtain the loop induction variable in their own pass, e.g.
loop interchange. It would be useful to have a common area to get these
information.
/// Example:
/// for (int i = lb; i < ub; i+=step)
/// <loop body>
/// --- pseudo LLVMIR ---
/// beforeloop:
/// guardcmp = (lb < ub)
/// if (guardcmp) goto preheader; else goto afterloop
/// preheader:
/// loop:
/// i1 = phi[{lb, preheader}, {i2, latch}]
/// <loop body>
/// i2 = i1 + step
/// latch:
/// cmp = (i2 < ub)
/// if (cmp) goto loop
/// exit:
/// afterloop:
///
/// getBounds
/// getInitialIVValue --> lb
/// getStepInst --> i2 = i1 + step
/// getStepValue --> step
/// getFinalIVValue --> ub
/// getCanonicalPredicate --> '<'
/// getDirection --> Increasing
/// getInductionVariable --> i1
/// getAuxiliaryInductionVariable --> {i1}
/// isCanonical --> false
Reviewers: kbarton, hfinkel, dmgreen, Meinersbur, jdoerfert, syzaara,
fhahn
Reviewed By: kbarton
Subscribers: tvvikram, bmahjour, etiotto, fhahn, jsji, hiraditya,
llvm-commits
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D60565
llvm-svn: 362644
2019-06-06 04:42:47 +08:00
|
|
|
#include "llvm/Analysis/IVDescriptors.h"
|
2012-06-20 11:42:09 +08:00
|
|
|
#include "llvm/Analysis/LoopInfoImpl.h"
|
2011-08-10 09:59:05 +08:00
|
|
|
#include "llvm/Analysis/LoopIterator.h"
|
2021-05-07 23:36:55 +08:00
|
|
|
#include "llvm/Analysis/LoopNestAnalysis.h"
|
[MemorySSA] Teach LoopSimplify to preserve MemorySSA.
Summary:
Preserve MemorySSA in LoopSimplify, in the old pass manager, if the analysis is available.
Do not preserve it in the new pass manager.
Update tests.
Subscribers: nemanjai, jlebar, javed.absar, Prazek, kbarton, zzheng, jsji, llvm-commits, george.burgess.iv, chandlerc
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60833
llvm-svn: 360270
2019-05-09 01:05:36 +08:00
|
|
|
#include "llvm/Analysis/MemorySSA.h"
|
|
|
|
#include "llvm/Analysis/MemorySSAUpdater.h"
|
[LOOPINFO] Extend Loop object to add utilities to get the loop bounds,
step, and loop induction variable.
Summary: This PR extends the loop object with more utilities to get loop
bounds, step, and loop induction variable. There already exists passes
which try to obtain the loop induction variable in their own pass, e.g.
loop interchange. It would be useful to have a common area to get these
information.
/// Example:
/// for (int i = lb; i < ub; i+=step)
/// <loop body>
/// --- pseudo LLVMIR ---
/// beforeloop:
/// guardcmp = (lb < ub)
/// if (guardcmp) goto preheader; else goto afterloop
/// preheader:
/// loop:
/// i1 = phi[{lb, preheader}, {i2, latch}]
/// <loop body>
/// i2 = i1 + step
/// latch:
/// cmp = (i2 < ub)
/// if (cmp) goto loop
/// exit:
/// afterloop:
///
/// getBounds
/// getInitialIVValue --> lb
/// getStepInst --> i2 = i1 + step
/// getStepValue --> step
/// getFinalIVValue --> ub
/// getCanonicalPredicate --> '<'
/// getDirection --> Increasing
/// getInductionVariable --> i1
/// getAuxiliaryInductionVariable --> {i1}
/// isCanonical --> false
Reviewers: kbarton, hfinkel, dmgreen, Meinersbur, jdoerfert, syzaara,
fhahn
Reviewed By: kbarton
Subscribers: tvvikram, bmahjour, etiotto, fhahn, jsji, hiraditya,
llvm-commits
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D60565
llvm-svn: 362644
2019-06-06 04:42:47 +08:00
|
|
|
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
2011-12-15 07:49:11 +08:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2018-04-30 22:59:11 +08:00
|
|
|
#include "llvm/Config/llvm-config.h"
|
2014-03-04 19:45:46 +08:00
|
|
|
#include "llvm/IR/CFG.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
Look for a loop's starting location in the llvm.loop metadata
Getting accurate locations for loops is important, because those locations are
used by the frontend to generate optimization remarks. Currently, optimization
remarks for loops often appear on the wrong line, often the first line of the
loop body instead of the loop itself. This is confusing because that line might
itself be another loop, or might be somewhere else completely if the body was
inlined function call. This happens because of the way we find the loop's
starting location. First, we look for a preheader, and if we find one, and its
terminator has a debug location, then we use that. Otherwise, we look for a
location on an instruction in the loop header.
The fallback heuristic is not bad, but will almost always find the beginning of
the body, and not the loop statement itself. The preheader location search
often fails because there's often not a preheader, and even when there is a
preheader, depending on how it was formed, it sometimes carries the location of
some preceeding code.
I don't see any good theoretical way to fix this problem. On the other hand,
this seems like a straightforward solution: Put the debug location in the
loop's llvm.loop metadata. A companion Clang patch will cause Clang to insert
llvm.loop metadata with appropriate locations when generating debugging
information. With these changes, our loop remarks have much more accurate
locations.
Differential Revision: http://reviews.llvm.org/D19738
llvm-svn: 270771
2016-05-26 05:42:37 +08:00
|
|
|
#include "llvm/IR/DebugLoc.h"
|
2014-01-13 17:26:24 +08:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2018-09-25 00:08:15 +08:00
|
|
|
#include "llvm/IR/IRPrintingPasses.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Instructions.h"
|
2014-10-21 08:13:20 +08:00
|
|
|
#include "llvm/IR/LLVMContext.h"
|
2013-02-14 02:08:57 +08:00
|
|
|
#include "llvm/IR/Metadata.h"
|
2015-01-20 18:58:50 +08:00
|
|
|
#include "llvm/IR/PassManager.h"
|
2020-12-04 02:59:10 +08:00
|
|
|
#include "llvm/IR/PrintPasses.h"
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
#include "llvm/InitializePasses.h"
|
2009-09-28 08:27:48 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2010-01-06 05:08:02 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2015-03-24 03:32:43 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2001-11-27 02:41:20 +08:00
|
|
|
#include <algorithm>
|
2004-04-13 04:26:17 +08:00
|
|
|
using namespace llvm;
|
2003-11-12 06:41:34 +08:00
|
|
|
|
2012-06-20 11:42:09 +08:00
|
|
|
// Explicitly instantiate methods in LoopInfoImpl.h for IR-level Loops.
|
|
|
|
template class llvm::LoopBase<BasicBlock, Loop>;
|
|
|
|
template class llvm::LoopInfoBase<BasicBlock, Loop>;
|
|
|
|
|
2009-09-28 08:27:48 +08:00
|
|
|
// Always verify loopinfo if expensive checking is enabled.
|
2016-04-29 23:22:48 +08:00
|
|
|
#ifdef EXPENSIVE_CHECKS
|
2017-01-24 13:52:07 +08:00
|
|
|
bool llvm::VerifyLoopInfo = true;
|
2009-09-28 08:27:48 +08:00
|
|
|
#else
|
2017-01-24 13:52:07 +08:00
|
|
|
bool llvm::VerifyLoopInfo = false;
|
2009-09-28 08:27:48 +08:00
|
|
|
#endif
|
2017-09-20 09:12:09 +08:00
|
|
|
static cl::opt<bool, true>
|
|
|
|
VerifyLoopInfoX("verify-loop-info", cl::location(VerifyLoopInfo),
|
2017-12-01 08:53:10 +08:00
|
|
|
cl::Hidden, cl::desc("Verify loop info (time consuming)"));
|
2009-09-28 08:27:48 +08:00
|
|
|
|
2002-01-31 08:42:27 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2002-04-29 00:21:30 +08:00
|
|
|
// Loop implementation
|
2002-01-31 08:42:27 +08:00
|
|
|
//
|
2002-10-11 13:31:10 +08:00
|
|
|
|
2015-05-13 09:12:06 +08:00
|
|
|
bool Loop::isLoopInvariant(const Value *V) const {
|
|
|
|
if (const Instruction *I = dyn_cast<Instruction>(V))
|
2010-09-06 09:05:37 +08:00
|
|
|
return !contains(I);
|
2017-09-20 09:12:09 +08:00
|
|
|
return true; // All non-instructions are loop invariant
|
2009-07-14 06:02:44 +08:00
|
|
|
}
|
|
|
|
|
2015-05-13 09:12:06 +08:00
|
|
|
bool Loop::hasLoopInvariantOperands(const Instruction *I) const {
|
2015-05-14 06:19:13 +08:00
|
|
|
return all_of(I->operands(), [this](Value *V) { return isLoopInvariant(V); });
|
2009-07-14 09:06:29 +08:00
|
|
|
}
|
|
|
|
|
[MemorySSA] Teach LoopSimplify to preserve MemorySSA.
Summary:
Preserve MemorySSA in LoopSimplify, in the old pass manager, if the analysis is available.
Do not preserve it in the new pass manager.
Update tests.
Subscribers: nemanjai, jlebar, javed.absar, Prazek, kbarton, zzheng, jsji, llvm-commits, george.burgess.iv, chandlerc
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60833
llvm-svn: 360270
2019-05-09 01:05:36 +08:00
|
|
|
bool Loop::makeLoopInvariant(Value *V, bool &Changed, Instruction *InsertPt,
|
|
|
|
MemorySSAUpdater *MSSAU) const {
|
2009-07-14 09:06:29 +08:00
|
|
|
if (Instruction *I = dyn_cast<Instruction>(V))
|
[MemorySSA] Teach LoopSimplify to preserve MemorySSA.
Summary:
Preserve MemorySSA in LoopSimplify, in the old pass manager, if the analysis is available.
Do not preserve it in the new pass manager.
Update tests.
Subscribers: nemanjai, jlebar, javed.absar, Prazek, kbarton, zzheng, jsji, llvm-commits, george.burgess.iv, chandlerc
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60833
llvm-svn: 360270
2019-05-09 01:05:36 +08:00
|
|
|
return makeLoopInvariant(I, Changed, InsertPt, MSSAU);
|
2017-09-20 09:12:09 +08:00
|
|
|
return true; // All non-instructions are loop-invariant.
|
2009-07-14 09:06:29 +08:00
|
|
|
}
|
|
|
|
|
2009-07-15 09:25:43 +08:00
|
|
|
bool Loop::makeLoopInvariant(Instruction *I, bool &Changed,
|
[MemorySSA] Teach LoopSimplify to preserve MemorySSA.
Summary:
Preserve MemorySSA in LoopSimplify, in the old pass manager, if the analysis is available.
Do not preserve it in the new pass manager.
Update tests.
Subscribers: nemanjai, jlebar, javed.absar, Prazek, kbarton, zzheng, jsji, llvm-commits, george.burgess.iv, chandlerc
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60833
llvm-svn: 360270
2019-05-09 01:05:36 +08:00
|
|
|
Instruction *InsertPt,
|
|
|
|
MemorySSAUpdater *MSSAU) const {
|
2009-07-14 09:06:29 +08:00
|
|
|
// Test if the value is already loop-invariant.
|
|
|
|
if (isLoopInvariant(I))
|
|
|
|
return true;
|
2011-12-15 07:49:11 +08:00
|
|
|
if (!isSafeToSpeculativelyExecute(I))
|
2009-07-14 09:06:29 +08:00
|
|
|
return false;
|
2009-07-17 12:28:42 +08:00
|
|
|
if (I->mayReadFromMemory())
|
2009-07-14 09:06:29 +08:00
|
|
|
return false;
|
2015-08-01 01:58:14 +08:00
|
|
|
// EH block instructions are immobile.
|
|
|
|
if (I->isEHPad())
|
2011-08-18 04:36:44 +08:00
|
|
|
return false;
|
2009-07-14 09:06:29 +08:00
|
|
|
// Determine the insertion point, unless one was given.
|
|
|
|
if (!InsertPt) {
|
|
|
|
BasicBlock *Preheader = getLoopPreheader();
|
|
|
|
// Without a preheader, hoisting is not feasible.
|
|
|
|
if (!Preheader)
|
|
|
|
return false;
|
|
|
|
InsertPt = Preheader->getTerminator();
|
|
|
|
}
|
|
|
|
// Don't hoist instructions with loop-variant operands.
|
2016-01-15 08:08:10 +08:00
|
|
|
for (Value *Operand : I->operands())
|
[MemorySSA] Teach LoopSimplify to preserve MemorySSA.
Summary:
Preserve MemorySSA in LoopSimplify, in the old pass manager, if the analysis is available.
Do not preserve it in the new pass manager.
Update tests.
Subscribers: nemanjai, jlebar, javed.absar, Prazek, kbarton, zzheng, jsji, llvm-commits, george.burgess.iv, chandlerc
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60833
llvm-svn: 360270
2019-05-09 01:05:36 +08:00
|
|
|
if (!makeLoopInvariant(Operand, Changed, InsertPt, MSSAU))
|
2009-07-14 09:06:29 +08:00
|
|
|
return false;
|
2011-08-04 07:45:50 +08:00
|
|
|
|
2009-07-14 09:06:29 +08:00
|
|
|
// Hoist.
|
|
|
|
I->moveBefore(InsertPt);
|
[MemorySSA] Teach LoopSimplify to preserve MemorySSA.
Summary:
Preserve MemorySSA in LoopSimplify, in the old pass manager, if the analysis is available.
Do not preserve it in the new pass manager.
Update tests.
Subscribers: nemanjai, jlebar, javed.absar, Prazek, kbarton, zzheng, jsji, llvm-commits, george.burgess.iv, chandlerc
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60833
llvm-svn: 360270
2019-05-09 01:05:36 +08:00
|
|
|
if (MSSAU)
|
|
|
|
if (auto *MUD = MSSAU->getMemorySSA()->getMemoryAccess(I))
|
2019-11-21 08:09:37 +08:00
|
|
|
MSSAU->moveToPlace(MUD, InsertPt->getParent(),
|
|
|
|
MemorySSA::BeforeTerminator);
|
2015-11-18 22:50:18 +08:00
|
|
|
|
|
|
|
// There is possibility of hoisting this instruction above some arbitrary
|
|
|
|
// condition. Any metadata defined on it can be control dependent on this
|
|
|
|
// condition. Conservatively strip it here so that we don't give any wrong
|
|
|
|
// information to the optimizer.
|
|
|
|
I->dropUnknownNonDebugMetadata();
|
|
|
|
|
2009-07-15 09:25:43 +08:00
|
|
|
Changed = true;
|
2009-07-14 09:06:29 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-03-30 01:39:17 +08:00
|
|
|
bool Loop::getIncomingAndBackEdge(BasicBlock *&Incoming,
|
|
|
|
BasicBlock *&Backedge) const {
|
2009-07-14 06:02:44 +08:00
|
|
|
BasicBlock *H = getHeader();
|
|
|
|
|
2019-03-30 01:39:17 +08:00
|
|
|
Incoming = nullptr;
|
|
|
|
Backedge = nullptr;
|
2010-07-24 05:25:16 +08:00
|
|
|
pred_iterator PI = pred_begin(H);
|
2017-09-20 09:12:09 +08:00
|
|
|
assert(PI != pred_end(H) && "Loop must have at least one backedge!");
|
2009-07-14 06:02:44 +08:00
|
|
|
Backedge = *PI++;
|
2017-09-20 09:12:09 +08:00
|
|
|
if (PI == pred_end(H))
|
2019-03-30 01:39:17 +08:00
|
|
|
return false; // dead loop
|
2009-07-14 06:02:44 +08:00
|
|
|
Incoming = *PI++;
|
2017-09-20 09:12:09 +08:00
|
|
|
if (PI != pred_end(H))
|
2019-03-30 01:39:17 +08:00
|
|
|
return false; // multiple backedges?
|
2009-07-14 06:02:44 +08:00
|
|
|
|
|
|
|
if (contains(Incoming)) {
|
|
|
|
if (contains(Backedge))
|
2019-03-30 01:39:17 +08:00
|
|
|
return false;
|
2009-07-14 06:02:44 +08:00
|
|
|
std::swap(Incoming, Backedge);
|
|
|
|
} else if (!contains(Backedge))
|
2019-03-30 01:39:17 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
assert(Incoming && Backedge && "expected non-null incoming and backedges");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
PHINode *Loop::getCanonicalInductionVariable() const {
|
|
|
|
BasicBlock *H = getHeader();
|
|
|
|
|
|
|
|
BasicBlock *Incoming = nullptr, *Backedge = nullptr;
|
|
|
|
if (!getIncomingAndBackEdge(Incoming, Backedge))
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2009-07-14 06:02:44 +08:00
|
|
|
|
|
|
|
// Loop over all of the PHI nodes, looking for a canonical indvar.
|
|
|
|
for (BasicBlock::iterator I = H->begin(); isa<PHINode>(I); ++I) {
|
|
|
|
PHINode *PN = cast<PHINode>(I);
|
|
|
|
if (ConstantInt *CI =
|
2017-09-20 09:12:09 +08:00
|
|
|
dyn_cast<ConstantInt>(PN->getIncomingValueForBlock(Incoming)))
|
2017-07-07 02:39:47 +08:00
|
|
|
if (CI->isZero())
|
2009-07-14 06:02:44 +08:00
|
|
|
if (Instruction *Inc =
|
2017-09-20 09:12:09 +08:00
|
|
|
dyn_cast<Instruction>(PN->getIncomingValueForBlock(Backedge)))
|
|
|
|
if (Inc->getOpcode() == Instruction::Add && Inc->getOperand(0) == PN)
|
2009-07-14 06:02:44 +08:00
|
|
|
if (ConstantInt *CI = dyn_cast<ConstantInt>(Inc->getOperand(1)))
|
2017-07-07 02:39:49 +08:00
|
|
|
if (CI->isOne())
|
2009-07-14 06:02:44 +08:00
|
|
|
return PN;
|
|
|
|
}
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2009-07-14 06:02:44 +08:00
|
|
|
}
|
|
|
|
|
[LOOPINFO] Extend Loop object to add utilities to get the loop bounds,
step, and loop induction variable.
Summary: This PR extends the loop object with more utilities to get loop
bounds, step, and loop induction variable. There already exists passes
which try to obtain the loop induction variable in their own pass, e.g.
loop interchange. It would be useful to have a common area to get these
information.
/// Example:
/// for (int i = lb; i < ub; i+=step)
/// <loop body>
/// --- pseudo LLVMIR ---
/// beforeloop:
/// guardcmp = (lb < ub)
/// if (guardcmp) goto preheader; else goto afterloop
/// preheader:
/// loop:
/// i1 = phi[{lb, preheader}, {i2, latch}]
/// <loop body>
/// i2 = i1 + step
/// latch:
/// cmp = (i2 < ub)
/// if (cmp) goto loop
/// exit:
/// afterloop:
///
/// getBounds
/// getInitialIVValue --> lb
/// getStepInst --> i2 = i1 + step
/// getStepValue --> step
/// getFinalIVValue --> ub
/// getCanonicalPredicate --> '<'
/// getDirection --> Increasing
/// getInductionVariable --> i1
/// getAuxiliaryInductionVariable --> {i1}
/// isCanonical --> false
Reviewers: kbarton, hfinkel, dmgreen, Meinersbur, jdoerfert, syzaara,
fhahn
Reviewed By: kbarton
Subscribers: tvvikram, bmahjour, etiotto, fhahn, jsji, hiraditya,
llvm-commits
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D60565
llvm-svn: 362644
2019-06-06 04:42:47 +08:00
|
|
|
/// Get the latch condition instruction.
|
2021-07-15 21:08:30 +08:00
|
|
|
ICmpInst *Loop::getLatchCmpInst() const {
|
|
|
|
if (BasicBlock *Latch = getLoopLatch())
|
[LOOPINFO] Extend Loop object to add utilities to get the loop bounds,
step, and loop induction variable.
Summary: This PR extends the loop object with more utilities to get loop
bounds, step, and loop induction variable. There already exists passes
which try to obtain the loop induction variable in their own pass, e.g.
loop interchange. It would be useful to have a common area to get these
information.
/// Example:
/// for (int i = lb; i < ub; i+=step)
/// <loop body>
/// --- pseudo LLVMIR ---
/// beforeloop:
/// guardcmp = (lb < ub)
/// if (guardcmp) goto preheader; else goto afterloop
/// preheader:
/// loop:
/// i1 = phi[{lb, preheader}, {i2, latch}]
/// <loop body>
/// i2 = i1 + step
/// latch:
/// cmp = (i2 < ub)
/// if (cmp) goto loop
/// exit:
/// afterloop:
///
/// getBounds
/// getInitialIVValue --> lb
/// getStepInst --> i2 = i1 + step
/// getStepValue --> step
/// getFinalIVValue --> ub
/// getCanonicalPredicate --> '<'
/// getDirection --> Increasing
/// getInductionVariable --> i1
/// getAuxiliaryInductionVariable --> {i1}
/// isCanonical --> false
Reviewers: kbarton, hfinkel, dmgreen, Meinersbur, jdoerfert, syzaara,
fhahn
Reviewed By: kbarton
Subscribers: tvvikram, bmahjour, etiotto, fhahn, jsji, hiraditya,
llvm-commits
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D60565
llvm-svn: 362644
2019-06-06 04:42:47 +08:00
|
|
|
if (BranchInst *BI = dyn_cast_or_null<BranchInst>(Latch->getTerminator()))
|
|
|
|
if (BI->isConditional())
|
|
|
|
return dyn_cast<ICmpInst>(BI->getCondition());
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the final value of the loop induction variable if found.
|
|
|
|
static Value *findFinalIVValue(const Loop &L, const PHINode &IndVar,
|
|
|
|
const Instruction &StepInst) {
|
2021-07-15 21:08:30 +08:00
|
|
|
ICmpInst *LatchCmpInst = L.getLatchCmpInst();
|
[LOOPINFO] Extend Loop object to add utilities to get the loop bounds,
step, and loop induction variable.
Summary: This PR extends the loop object with more utilities to get loop
bounds, step, and loop induction variable. There already exists passes
which try to obtain the loop induction variable in their own pass, e.g.
loop interchange. It would be useful to have a common area to get these
information.
/// Example:
/// for (int i = lb; i < ub; i+=step)
/// <loop body>
/// --- pseudo LLVMIR ---
/// beforeloop:
/// guardcmp = (lb < ub)
/// if (guardcmp) goto preheader; else goto afterloop
/// preheader:
/// loop:
/// i1 = phi[{lb, preheader}, {i2, latch}]
/// <loop body>
/// i2 = i1 + step
/// latch:
/// cmp = (i2 < ub)
/// if (cmp) goto loop
/// exit:
/// afterloop:
///
/// getBounds
/// getInitialIVValue --> lb
/// getStepInst --> i2 = i1 + step
/// getStepValue --> step
/// getFinalIVValue --> ub
/// getCanonicalPredicate --> '<'
/// getDirection --> Increasing
/// getInductionVariable --> i1
/// getAuxiliaryInductionVariable --> {i1}
/// isCanonical --> false
Reviewers: kbarton, hfinkel, dmgreen, Meinersbur, jdoerfert, syzaara,
fhahn
Reviewed By: kbarton
Subscribers: tvvikram, bmahjour, etiotto, fhahn, jsji, hiraditya,
llvm-commits
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D60565
llvm-svn: 362644
2019-06-06 04:42:47 +08:00
|
|
|
if (!LatchCmpInst)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
Value *Op0 = LatchCmpInst->getOperand(0);
|
|
|
|
Value *Op1 = LatchCmpInst->getOperand(1);
|
|
|
|
if (Op0 == &IndVar || Op0 == &StepInst)
|
|
|
|
return Op1;
|
|
|
|
|
|
|
|
if (Op1 == &IndVar || Op1 == &StepInst)
|
|
|
|
return Op0;
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
Optional<Loop::LoopBounds> Loop::LoopBounds::getBounds(const Loop &L,
|
|
|
|
PHINode &IndVar,
|
|
|
|
ScalarEvolution &SE) {
|
|
|
|
InductionDescriptor IndDesc;
|
|
|
|
if (!InductionDescriptor::isInductionPHI(&IndVar, &L, &SE, IndDesc))
|
|
|
|
return None;
|
|
|
|
|
|
|
|
Value *InitialIVValue = IndDesc.getStartValue();
|
|
|
|
Instruction *StepInst = IndDesc.getInductionBinOp();
|
|
|
|
if (!InitialIVValue || !StepInst)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
const SCEV *Step = IndDesc.getStep();
|
|
|
|
Value *StepInstOp1 = StepInst->getOperand(1);
|
|
|
|
Value *StepInstOp0 = StepInst->getOperand(0);
|
|
|
|
Value *StepValue = nullptr;
|
|
|
|
if (SE.getSCEV(StepInstOp1) == Step)
|
|
|
|
StepValue = StepInstOp1;
|
|
|
|
else if (SE.getSCEV(StepInstOp0) == Step)
|
|
|
|
StepValue = StepInstOp0;
|
|
|
|
|
|
|
|
Value *FinalIVValue = findFinalIVValue(L, IndVar, *StepInst);
|
|
|
|
if (!FinalIVValue)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
return LoopBounds(L, *InitialIVValue, *StepInst, StepValue, *FinalIVValue,
|
|
|
|
SE);
|
|
|
|
}
|
|
|
|
|
|
|
|
using Direction = Loop::LoopBounds::Direction;
|
|
|
|
|
|
|
|
ICmpInst::Predicate Loop::LoopBounds::getCanonicalPredicate() const {
|
|
|
|
BasicBlock *Latch = L.getLoopLatch();
|
|
|
|
assert(Latch && "Expecting valid latch");
|
|
|
|
|
|
|
|
BranchInst *BI = dyn_cast_or_null<BranchInst>(Latch->getTerminator());
|
|
|
|
assert(BI && BI->isConditional() && "Expecting conditional latch branch");
|
|
|
|
|
|
|
|
ICmpInst *LatchCmpInst = dyn_cast<ICmpInst>(BI->getCondition());
|
|
|
|
assert(LatchCmpInst &&
|
|
|
|
"Expecting the latch compare instruction to be a CmpInst");
|
|
|
|
|
|
|
|
// Need to inverse the predicate when first successor is not the loop
|
|
|
|
// header
|
|
|
|
ICmpInst::Predicate Pred = (BI->getSuccessor(0) == L.getHeader())
|
|
|
|
? LatchCmpInst->getPredicate()
|
|
|
|
: LatchCmpInst->getInversePredicate();
|
|
|
|
|
|
|
|
if (LatchCmpInst->getOperand(0) == &getFinalIVValue())
|
|
|
|
Pred = ICmpInst::getSwappedPredicate(Pred);
|
|
|
|
|
|
|
|
// Need to flip strictness of the predicate when the latch compare instruction
|
|
|
|
// is not using StepInst
|
|
|
|
if (LatchCmpInst->getOperand(0) == &getStepInst() ||
|
|
|
|
LatchCmpInst->getOperand(1) == &getStepInst())
|
|
|
|
return Pred;
|
|
|
|
|
|
|
|
// Cannot flip strictness of NE and EQ
|
|
|
|
if (Pred != ICmpInst::ICMP_NE && Pred != ICmpInst::ICMP_EQ)
|
|
|
|
return ICmpInst::getFlippedStrictnessPredicate(Pred);
|
|
|
|
|
|
|
|
Direction D = getDirection();
|
|
|
|
if (D == Direction::Increasing)
|
|
|
|
return ICmpInst::ICMP_SLT;
|
|
|
|
|
|
|
|
if (D == Direction::Decreasing)
|
|
|
|
return ICmpInst::ICMP_SGT;
|
|
|
|
|
|
|
|
// If cannot determine the direction, then unable to find the canonical
|
|
|
|
// predicate
|
|
|
|
return ICmpInst::BAD_ICMP_PREDICATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
Direction Loop::LoopBounds::getDirection() const {
|
|
|
|
if (const SCEVAddRecExpr *StepAddRecExpr =
|
|
|
|
dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&getStepInst())))
|
|
|
|
if (const SCEV *StepRecur = StepAddRecExpr->getStepRecurrence(SE)) {
|
|
|
|
if (SE.isKnownPositive(StepRecur))
|
|
|
|
return Direction::Increasing;
|
|
|
|
if (SE.isKnownNegative(StepRecur))
|
|
|
|
return Direction::Decreasing;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Direction::Unknown;
|
|
|
|
}
|
|
|
|
|
|
|
|
Optional<Loop::LoopBounds> Loop::getBounds(ScalarEvolution &SE) const {
|
|
|
|
if (PHINode *IndVar = getInductionVariable(SE))
|
|
|
|
return LoopBounds::getBounds(*this, *IndVar, SE);
|
|
|
|
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
PHINode *Loop::getInductionVariable(ScalarEvolution &SE) const {
|
|
|
|
if (!isLoopSimplifyForm())
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
BasicBlock *Header = getHeader();
|
|
|
|
assert(Header && "Expected a valid loop header");
|
2021-07-15 21:08:30 +08:00
|
|
|
ICmpInst *CmpInst = getLatchCmpInst();
|
[LOOPINFO] Extend Loop object to add utilities to get the loop bounds,
step, and loop induction variable.
Summary: This PR extends the loop object with more utilities to get loop
bounds, step, and loop induction variable. There already exists passes
which try to obtain the loop induction variable in their own pass, e.g.
loop interchange. It would be useful to have a common area to get these
information.
/// Example:
/// for (int i = lb; i < ub; i+=step)
/// <loop body>
/// --- pseudo LLVMIR ---
/// beforeloop:
/// guardcmp = (lb < ub)
/// if (guardcmp) goto preheader; else goto afterloop
/// preheader:
/// loop:
/// i1 = phi[{lb, preheader}, {i2, latch}]
/// <loop body>
/// i2 = i1 + step
/// latch:
/// cmp = (i2 < ub)
/// if (cmp) goto loop
/// exit:
/// afterloop:
///
/// getBounds
/// getInitialIVValue --> lb
/// getStepInst --> i2 = i1 + step
/// getStepValue --> step
/// getFinalIVValue --> ub
/// getCanonicalPredicate --> '<'
/// getDirection --> Increasing
/// getInductionVariable --> i1
/// getAuxiliaryInductionVariable --> {i1}
/// isCanonical --> false
Reviewers: kbarton, hfinkel, dmgreen, Meinersbur, jdoerfert, syzaara,
fhahn
Reviewed By: kbarton
Subscribers: tvvikram, bmahjour, etiotto, fhahn, jsji, hiraditya,
llvm-commits
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D60565
llvm-svn: 362644
2019-06-06 04:42:47 +08:00
|
|
|
if (!CmpInst)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
Instruction *LatchCmpOp0 = dyn_cast<Instruction>(CmpInst->getOperand(0));
|
|
|
|
Instruction *LatchCmpOp1 = dyn_cast<Instruction>(CmpInst->getOperand(1));
|
|
|
|
|
|
|
|
for (PHINode &IndVar : Header->phis()) {
|
|
|
|
InductionDescriptor IndDesc;
|
|
|
|
if (!InductionDescriptor::isInductionPHI(&IndVar, this, &SE, IndDesc))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
Instruction *StepInst = IndDesc.getInductionBinOp();
|
|
|
|
|
|
|
|
// case 1:
|
|
|
|
// IndVar = phi[{InitialValue, preheader}, {StepInst, latch}]
|
|
|
|
// StepInst = IndVar + step
|
|
|
|
// cmp = StepInst < FinalValue
|
|
|
|
if (StepInst == LatchCmpOp0 || StepInst == LatchCmpOp1)
|
|
|
|
return &IndVar;
|
|
|
|
|
|
|
|
// case 2:
|
|
|
|
// IndVar = phi[{InitialValue, preheader}, {StepInst, latch}]
|
|
|
|
// StepInst = IndVar + step
|
|
|
|
// cmp = IndVar < FinalValue
|
|
|
|
if (&IndVar == LatchCmpOp0 || &IndVar == LatchCmpOp1)
|
|
|
|
return &IndVar;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Loop::getInductionDescriptor(ScalarEvolution &SE,
|
|
|
|
InductionDescriptor &IndDesc) const {
|
|
|
|
if (PHINode *IndVar = getInductionVariable(SE))
|
|
|
|
return InductionDescriptor::isInductionPHI(IndVar, this, &SE, IndDesc);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Loop::isAuxiliaryInductionVariable(PHINode &AuxIndVar,
|
|
|
|
ScalarEvolution &SE) const {
|
|
|
|
// Located in the loop header
|
|
|
|
BasicBlock *Header = getHeader();
|
|
|
|
if (AuxIndVar.getParent() != Header)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// No uses outside of the loop
|
|
|
|
for (User *U : AuxIndVar.users())
|
|
|
|
if (const Instruction *I = dyn_cast<Instruction>(U))
|
|
|
|
if (!contains(I))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
InductionDescriptor IndDesc;
|
|
|
|
if (!InductionDescriptor::isInductionPHI(&AuxIndVar, this, &SE, IndDesc))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The step instruction opcode should be add or sub.
|
|
|
|
if (IndDesc.getInductionOpcode() != Instruction::Add &&
|
|
|
|
IndDesc.getInductionOpcode() != Instruction::Sub)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Incremented by a loop invariant step for each loop iteration
|
|
|
|
return SE.isLoopInvariant(IndDesc.getStep(), this);
|
|
|
|
}
|
|
|
|
|
[LOOPINFO] Introduce the loop guard API.
Summary:
This is the first patch for the loop guard. We introduced
getLoopGuardBranch() and isGuarded().
This currently only works on simplified loop, as it requires a preheader
and a latch to identify the guard.
It will work on loops of the form:
/// GuardBB:
/// br cond1, Preheader, ExitSucc <== GuardBranch
/// Preheader:
/// br Header
/// Header:
/// ...
/// br Latch
/// Latch:
/// br cond2, Header, ExitBlock
/// ExitBlock:
/// br ExitSucc
/// ExitSucc:
Prior discussions leading upto the decision to introduce the loop guard
API: http://lists.llvm.org/pipermail/llvm-dev/2019-May/132607.html
Reviewer: reames, kbarton, hfinkel, jdoerfert, Meinersbur, dmgreen
Reviewed By: reames
Subscribers: wuzish, hiraditya, jsji, llvm-commits, bmahjour, etiotto
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D63885
llvm-svn: 367033
2019-07-26 00:13:18 +08:00
|
|
|
BranchInst *Loop::getLoopGuardBranch() const {
|
2019-10-07 00:39:43 +08:00
|
|
|
if (!isLoopSimplifyForm())
|
|
|
|
return nullptr;
|
|
|
|
|
[LOOPINFO] Introduce the loop guard API.
Summary:
This is the first patch for the loop guard. We introduced
getLoopGuardBranch() and isGuarded().
This currently only works on simplified loop, as it requires a preheader
and a latch to identify the guard.
It will work on loops of the form:
/// GuardBB:
/// br cond1, Preheader, ExitSucc <== GuardBranch
/// Preheader:
/// br Header
/// Header:
/// ...
/// br Latch
/// Latch:
/// br cond2, Header, ExitBlock
/// ExitBlock:
/// br ExitSucc
/// ExitSucc:
Prior discussions leading upto the decision to introduce the loop guard
API: http://lists.llvm.org/pipermail/llvm-dev/2019-May/132607.html
Reviewer: reames, kbarton, hfinkel, jdoerfert, Meinersbur, dmgreen
Reviewed By: reames
Subscribers: wuzish, hiraditya, jsji, llvm-commits, bmahjour, etiotto
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D63885
llvm-svn: 367033
2019-07-26 00:13:18 +08:00
|
|
|
BasicBlock *Preheader = getLoopPreheader();
|
2019-12-12 02:59:45 +08:00
|
|
|
assert(Preheader && getLoopLatch() &&
|
[LOOPINFO] Introduce the loop guard API.
Summary:
This is the first patch for the loop guard. We introduced
getLoopGuardBranch() and isGuarded().
This currently only works on simplified loop, as it requires a preheader
and a latch to identify the guard.
It will work on loops of the form:
/// GuardBB:
/// br cond1, Preheader, ExitSucc <== GuardBranch
/// Preheader:
/// br Header
/// Header:
/// ...
/// br Latch
/// Latch:
/// br cond2, Header, ExitBlock
/// ExitBlock:
/// br ExitSucc
/// ExitSucc:
Prior discussions leading upto the decision to introduce the loop guard
API: http://lists.llvm.org/pipermail/llvm-dev/2019-May/132607.html
Reviewer: reames, kbarton, hfinkel, jdoerfert, Meinersbur, dmgreen
Reviewed By: reames
Subscribers: wuzish, hiraditya, jsji, llvm-commits, bmahjour, etiotto
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D63885
llvm-svn: 367033
2019-07-26 00:13:18 +08:00
|
|
|
"Expecting a loop with valid preheader and latch");
|
2019-10-07 00:39:43 +08:00
|
|
|
|
|
|
|
// Loop should be in rotate form.
|
2019-12-13 03:18:39 +08:00
|
|
|
if (!isRotatedForm())
|
2019-10-07 00:39:43 +08:00
|
|
|
return nullptr;
|
[LOOPINFO] Introduce the loop guard API.
Summary:
This is the first patch for the loop guard. We introduced
getLoopGuardBranch() and isGuarded().
This currently only works on simplified loop, as it requires a preheader
and a latch to identify the guard.
It will work on loops of the form:
/// GuardBB:
/// br cond1, Preheader, ExitSucc <== GuardBranch
/// Preheader:
/// br Header
/// Header:
/// ...
/// br Latch
/// Latch:
/// br cond2, Header, ExitBlock
/// ExitBlock:
/// br ExitSucc
/// ExitSucc:
Prior discussions leading upto the decision to introduce the loop guard
API: http://lists.llvm.org/pipermail/llvm-dev/2019-May/132607.html
Reviewer: reames, kbarton, hfinkel, jdoerfert, Meinersbur, dmgreen
Reviewed By: reames
Subscribers: wuzish, hiraditya, jsji, llvm-commits, bmahjour, etiotto
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D63885
llvm-svn: 367033
2019-07-26 00:13:18 +08:00
|
|
|
|
[LOOPGUARD] Disable loop with multiple loop exiting blocks.
Summary: As discussed in the loop group meeting. With the current
definition of loop guard, we should not allow multiple loop exiting
blocks. For loops that has multiple loop exiting blocks, we can simply
unable to find the loop guard.
When getUniqueExitBlock() obtains a vector size not equals to one, that
means there is either no exit blocks or there exists more than one
unique block the loop exit to.
If we don't disallow loop with multiple loop exit blocks, then with our
current implementation, there can exist exit blocks don't post dominated
by the non pre-header successor of the guard block.
Reviewer: reames, Meinersbur, kbarton, etiotto, bmahjour
Reviewed By: Meinersbur, kbarton
Subscribers: fhahn, hiraditya, llvm-commits
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D66529
llvm-svn: 373011
2019-09-27 04:20:42 +08:00
|
|
|
// Disallow loops with more than one unique exit block, as we do not verify
|
|
|
|
// that GuardOtherSucc post dominates all exit blocks.
|
|
|
|
BasicBlock *ExitFromLatch = getUniqueExitBlock();
|
|
|
|
if (!ExitFromLatch)
|
[LOOPINFO] Introduce the loop guard API.
Summary:
This is the first patch for the loop guard. We introduced
getLoopGuardBranch() and isGuarded().
This currently only works on simplified loop, as it requires a preheader
and a latch to identify the guard.
It will work on loops of the form:
/// GuardBB:
/// br cond1, Preheader, ExitSucc <== GuardBranch
/// Preheader:
/// br Header
/// Header:
/// ...
/// br Latch
/// Latch:
/// br cond2, Header, ExitBlock
/// ExitBlock:
/// br ExitSucc
/// ExitSucc:
Prior discussions leading upto the decision to introduce the loop guard
API: http://lists.llvm.org/pipermail/llvm-dev/2019-May/132607.html
Reviewer: reames, kbarton, hfinkel, jdoerfert, Meinersbur, dmgreen
Reviewed By: reames
Subscribers: wuzish, hiraditya, jsji, llvm-commits, bmahjour, etiotto
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D63885
llvm-svn: 367033
2019-07-26 00:13:18 +08:00
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
BasicBlock *GuardBB = Preheader->getUniquePredecessor();
|
|
|
|
if (!GuardBB)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
assert(GuardBB->getTerminator() && "Expecting valid guard terminator");
|
|
|
|
|
|
|
|
BranchInst *GuardBI = dyn_cast<BranchInst>(GuardBB->getTerminator());
|
|
|
|
if (!GuardBI || GuardBI->isUnconditional())
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
BasicBlock *GuardOtherSucc = (GuardBI->getSuccessor(0) == Preheader)
|
|
|
|
? GuardBI->getSuccessor(1)
|
|
|
|
: GuardBI->getSuccessor(0);
|
2021-05-07 23:36:55 +08:00
|
|
|
|
|
|
|
// Check if ExitFromLatch (or any BasicBlock which is an empty unique
|
|
|
|
// successor of ExitFromLatch) is equal to GuardOtherSucc. If
|
|
|
|
// skipEmptyBlockUntil returns GuardOtherSucc, then the guard branch for the
|
|
|
|
// loop is GuardBI (return GuardBI), otherwise return nullptr.
|
|
|
|
if (&LoopNest::skipEmptyBlockUntil(ExitFromLatch, GuardOtherSucc,
|
|
|
|
/*CheckUniquePred=*/true) ==
|
|
|
|
GuardOtherSucc)
|
|
|
|
return GuardBI;
|
|
|
|
else
|
|
|
|
return nullptr;
|
[LOOPINFO] Introduce the loop guard API.
Summary:
This is the first patch for the loop guard. We introduced
getLoopGuardBranch() and isGuarded().
This currently only works on simplified loop, as it requires a preheader
and a latch to identify the guard.
It will work on loops of the form:
/// GuardBB:
/// br cond1, Preheader, ExitSucc <== GuardBranch
/// Preheader:
/// br Header
/// Header:
/// ...
/// br Latch
/// Latch:
/// br cond2, Header, ExitBlock
/// ExitBlock:
/// br ExitSucc
/// ExitSucc:
Prior discussions leading upto the decision to introduce the loop guard
API: http://lists.llvm.org/pipermail/llvm-dev/2019-May/132607.html
Reviewer: reames, kbarton, hfinkel, jdoerfert, Meinersbur, dmgreen
Reviewed By: reames
Subscribers: wuzish, hiraditya, jsji, llvm-commits, bmahjour, etiotto
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D63885
llvm-svn: 367033
2019-07-26 00:13:18 +08:00
|
|
|
}
|
|
|
|
|
[LOOPINFO] Extend Loop object to add utilities to get the loop bounds,
step, and loop induction variable.
Summary: This PR extends the loop object with more utilities to get loop
bounds, step, and loop induction variable. There already exists passes
which try to obtain the loop induction variable in their own pass, e.g.
loop interchange. It would be useful to have a common area to get these
information.
/// Example:
/// for (int i = lb; i < ub; i+=step)
/// <loop body>
/// --- pseudo LLVMIR ---
/// beforeloop:
/// guardcmp = (lb < ub)
/// if (guardcmp) goto preheader; else goto afterloop
/// preheader:
/// loop:
/// i1 = phi[{lb, preheader}, {i2, latch}]
/// <loop body>
/// i2 = i1 + step
/// latch:
/// cmp = (i2 < ub)
/// if (cmp) goto loop
/// exit:
/// afterloop:
///
/// getBounds
/// getInitialIVValue --> lb
/// getStepInst --> i2 = i1 + step
/// getStepValue --> step
/// getFinalIVValue --> ub
/// getCanonicalPredicate --> '<'
/// getDirection --> Increasing
/// getInductionVariable --> i1
/// getAuxiliaryInductionVariable --> {i1}
/// isCanonical --> false
Reviewers: kbarton, hfinkel, dmgreen, Meinersbur, jdoerfert, syzaara,
fhahn
Reviewed By: kbarton
Subscribers: tvvikram, bmahjour, etiotto, fhahn, jsji, hiraditya,
llvm-commits
Tag: LLVM
Differential Revision: https://reviews.llvm.org/D60565
llvm-svn: 362644
2019-06-06 04:42:47 +08:00
|
|
|
bool Loop::isCanonical(ScalarEvolution &SE) const {
|
|
|
|
InductionDescriptor IndDesc;
|
|
|
|
if (!getInductionDescriptor(SE, IndDesc))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ConstantInt *Init = dyn_cast_or_null<ConstantInt>(IndDesc.getStartValue());
|
|
|
|
if (!Init || !Init->isZero())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (IndDesc.getInductionOpcode() != Instruction::Add)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ConstantInt *Step = IndDesc.getConstIntStepValue();
|
|
|
|
if (!Step || !Step->isOne())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-10-11 21:37:22 +08:00
|
|
|
// Check that 'BB' doesn't have any uses outside of the 'L'
|
|
|
|
static bool isBlockInLCSSAForm(const Loop &L, const BasicBlock &BB,
|
2020-04-17 18:18:01 +08:00
|
|
|
const DominatorTree &DT) {
|
2016-10-11 21:37:22 +08:00
|
|
|
for (const Instruction &I : BB) {
|
|
|
|
// Tokens can't be used in PHI nodes and live-out tokens prevent loop
|
|
|
|
// optimizations, so for the purposes of considered LCSSA form, we
|
|
|
|
// can ignore them.
|
|
|
|
if (I.getType()->isTokenTy())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (const Use &U : I.uses()) {
|
|
|
|
const Instruction *UI = cast<Instruction>(U.getUser());
|
|
|
|
const BasicBlock *UserBB = UI->getParent();
|
2020-10-30 04:50:07 +08:00
|
|
|
|
|
|
|
// For practical purposes, we consider that the use in a PHI
|
|
|
|
// occurs in the respective predecessor block. For more info,
|
|
|
|
// see the `phi` doc in LangRef and the LCSSA doc.
|
2016-10-11 21:37:22 +08:00
|
|
|
if (const PHINode *P = dyn_cast<PHINode>(UI))
|
|
|
|
UserBB = P->getIncomingBlock(U);
|
|
|
|
|
|
|
|
// Check the current block, as a fast-path, before checking whether
|
|
|
|
// the use is anywhere in the loop. Most values are used in the same
|
|
|
|
// block they are defined in. Also, blocks not reachable from the
|
|
|
|
// entry are special; uses in them don't need to go through PHIs.
|
|
|
|
if (UserBB != &BB && !L.contains(UserBB) &&
|
|
|
|
DT.isReachableFromEntry(UserBB))
|
|
|
|
return false;
|
2015-12-19 02:12:35 +08:00
|
|
|
}
|
2009-07-14 06:02:44 +08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2009-07-17 00:16:23 +08:00
|
|
|
|
2020-04-17 18:18:01 +08:00
|
|
|
bool Loop::isLCSSAForm(const DominatorTree &DT) const {
|
2016-10-11 21:37:22 +08:00
|
|
|
// For each block we check that it doesn't have any uses outside of this loop.
|
|
|
|
return all_of(this->blocks(), [&](const BasicBlock *BB) {
|
|
|
|
return isBlockInLCSSAForm(*this, *BB, DT);
|
|
|
|
});
|
|
|
|
}
|
2015-12-08 08:13:21 +08:00
|
|
|
|
2020-04-17 18:18:01 +08:00
|
|
|
bool Loop::isRecursivelyLCSSAForm(const DominatorTree &DT,
|
|
|
|
const LoopInfo &LI) const {
|
2017-01-09 06:22:09 +08:00
|
|
|
// For each block we check that it doesn't have any uses outside of its
|
|
|
|
// innermost loop. This process will transitively guarantee that the current
|
|
|
|
// loop and all of the nested loops are in LCSSA form.
|
2016-10-11 21:37:22 +08:00
|
|
|
return all_of(this->blocks(), [&](const BasicBlock *BB) {
|
|
|
|
return isBlockInLCSSAForm(*LI.getLoopFor(BB), *BB, DT);
|
|
|
|
});
|
2015-12-08 08:13:21 +08:00
|
|
|
}
|
|
|
|
|
2009-07-17 00:16:23 +08:00
|
|
|
bool Loop::isLoopSimplifyForm() const {
|
2009-11-06 03:21:41 +08:00
|
|
|
// Normal-form loops have a preheader, a single backedge, and all of their
|
|
|
|
// exits have all their predecessors inside the loop.
|
|
|
|
return getLoopPreheader() && getLoopLatch() && hasDedicatedExits();
|
|
|
|
}
|
|
|
|
|
2016-01-15 07:23:04 +08:00
|
|
|
// Routines that reform the loop CFG and split edges often fail on indirectbr.
|
2012-04-10 13:14:42 +08:00
|
|
|
bool Loop::isSafeToClone() const {
|
2012-12-21 00:04:27 +08:00
|
|
|
// Return false if any loop blocks contain indirectbrs, or there are any calls
|
|
|
|
// to noduplicate functions.
|
[LoopUnroll+LoopUnswitch] do not transform loops containing callbr
Summary:
There is currently a correctness issue when unrolling loops containing
callbr's where their indirect targets are being updated correctly to the
newly created labels, but their operands are not. This manifests in
unrolled loops where the second and subsequent copies of callbr
instructions have blockaddresses of the label from the first instance of
the unrolled loop, which would result in nonsensical runtime control
flow.
For now, conservatively do not unroll the loop. In the future, I think
we can pursue unrolling such loops provided we transform the cloned
callbr's operands correctly.
Such a transform and its legalities are being discussed in:
https://reviews.llvm.org/D64101
Link: https://bugs.llvm.org/show_bug.cgi?id=42489
Link: https://groups.google.com/forum/#!topic/clang-built-linux/z-hRWP9KqPI
Reviewers: fhahn, hfinkel, efriedma
Reviewed By: fhahn, hfinkel, efriedma
Subscribers: efriedma, hiraditya, zzheng, dmgreen, llvm-commits, pirama, kees, nathanchance, E5ten, craig.topper, chandlerc, glider, void, srhines
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64368
llvm-svn: 366130
2019-07-16 05:16:29 +08:00
|
|
|
// FIXME: it should be ok to clone CallBrInst's if we correctly update the
|
|
|
|
// operand list to reflect the newly cloned labels.
|
2016-01-15 08:08:10 +08:00
|
|
|
for (BasicBlock *BB : this->blocks()) {
|
[LoopUnroll+LoopUnswitch] do not transform loops containing callbr
Summary:
There is currently a correctness issue when unrolling loops containing
callbr's where their indirect targets are being updated correctly to the
newly created labels, but their operands are not. This manifests in
unrolled loops where the second and subsequent copies of callbr
instructions have blockaddresses of the label from the first instance of
the unrolled loop, which would result in nonsensical runtime control
flow.
For now, conservatively do not unroll the loop. In the future, I think
we can pursue unrolling such loops provided we transform the cloned
callbr's operands correctly.
Such a transform and its legalities are being discussed in:
https://reviews.llvm.org/D64101
Link: https://bugs.llvm.org/show_bug.cgi?id=42489
Link: https://groups.google.com/forum/#!topic/clang-built-linux/z-hRWP9KqPI
Reviewers: fhahn, hfinkel, efriedma
Reviewed By: fhahn, hfinkel, efriedma
Subscribers: efriedma, hiraditya, zzheng, dmgreen, llvm-commits, pirama, kees, nathanchance, E5ten, craig.topper, chandlerc, glider, void, srhines
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D64368
llvm-svn: 366130
2019-07-16 05:16:29 +08:00
|
|
|
if (isa<IndirectBrInst>(BB->getTerminator()) ||
|
|
|
|
isa<CallBrInst>(BB->getTerminator()))
|
2012-04-10 13:14:42 +08:00
|
|
|
return false;
|
2013-11-14 04:18:38 +08:00
|
|
|
|
2016-05-03 11:57:40 +08:00
|
|
|
for (Instruction &I : *BB)
|
[llvm][NFC][CallSite] Remove Implementation uses of CallSite
Reviewers: dblaikie, davidxl, craig.topper
Subscribers: arsenm, dschuff, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, hiraditya, aheejin, kbarton, kerbowa, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D78142
2020-04-15 03:33:49 +08:00
|
|
|
if (auto *CB = dyn_cast<CallBase>(&I))
|
|
|
|
if (CB->cannotDuplicate())
|
2012-12-21 00:04:27 +08:00
|
|
|
return false;
|
2012-04-10 13:14:42 +08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-05-29 04:00:34 +08:00
|
|
|
MDNode *Loop::getLoopID() const {
|
2014-04-15 12:59:12 +08:00
|
|
|
MDNode *LoopID = nullptr;
|
2018-09-18 02:40:29 +08:00
|
|
|
|
|
|
|
// Go through the latch blocks and check the terminator for the metadata.
|
|
|
|
SmallVector<BasicBlock *, 4> LatchesBlocks;
|
|
|
|
getLoopLatches(LatchesBlocks);
|
|
|
|
for (BasicBlock *BB : LatchesBlocks) {
|
2018-10-15 18:04:59 +08:00
|
|
|
Instruction *TI = BB->getTerminator();
|
2018-09-18 02:40:29 +08:00
|
|
|
MDNode *MD = TI->getMetadata(LLVMContext::MD_loop);
|
|
|
|
|
|
|
|
if (!MD)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
if (!LoopID)
|
|
|
|
LoopID = MD;
|
|
|
|
else if (MD != LoopID)
|
|
|
|
return nullptr;
|
2013-05-29 04:00:34 +08:00
|
|
|
}
|
|
|
|
if (!LoopID || LoopID->getNumOperands() == 0 ||
|
|
|
|
LoopID->getOperand(0) != LoopID)
|
2014-04-15 12:59:12 +08:00
|
|
|
return nullptr;
|
2013-05-29 04:00:34 +08:00
|
|
|
return LoopID;
|
|
|
|
}
|
2013-02-14 02:08:57 +08:00
|
|
|
|
2013-05-29 04:00:34 +08:00
|
|
|
void Loop::setLoopID(MDNode *LoopID) const {
|
[Unroll/UnrollAndJam/Vectorizer/Distribute] Add followup loop attributes.
When multiple loop transformation are defined in a loop's metadata, their order of execution is defined by the order of their respective passes in the pass pipeline. For instance, e.g.
#pragma clang loop unroll_and_jam(enable)
#pragma clang loop distribute(enable)
is the same as
#pragma clang loop distribute(enable)
#pragma clang loop unroll_and_jam(enable)
and will try to loop-distribute before Unroll-And-Jam because the LoopDistribute pass is scheduled after UnrollAndJam pass. UnrollAndJamPass only supports one inner loop, i.e. it will necessarily fail after loop distribution. It is not possible to specify another execution order. Also,t the order of passes in the pipeline is subject to change between versions of LLVM, optimization options and which pass manager is used.
This patch adds 'followup' attributes to various loop transformation passes. These attributes define which attributes the resulting loop of a transformation should have. For instance,
!0 = !{!0, !1, !2}
!1 = !{!"llvm.loop.unroll_and_jam.enable"}
!2 = !{!"llvm.loop.unroll_and_jam.followup_inner", !3}
!3 = !{!"llvm.loop.distribute.enable"}
defines a loop ID (!0) to be unrolled-and-jammed (!1) and then the attribute !3 to be added to the jammed inner loop, which contains the instruction to distribute the inner loop.
Currently, in both pass managers, pass execution is in a fixed order and UnrollAndJamPass will not execute again after LoopDistribute. We hope to fix this in the future by allowing pass managers to run passes until a fixpoint is reached, use Polly to perform these transformations, or add a loop transformation pass which takes the order issue into account.
For mandatory/forced transformations (e.g. by having been declared by #pragma omp simd), the user must be notified when a transformation could not be performed. It is not possible that the responsible pass emits such a warning because the transformation might be 'hidden' in a followup attribute when it is executed, or it is not present in the pipeline at all. For this reason, this patche introduces a WarnMissedTransformations pass, to warn about orphaned transformations.
Since this changes the user-visible diagnostic message when a transformation is applied, two test cases in the clang repository need to be updated.
To ensure that no other transformation is executed before the intended one, the attribute `llvm.loop.disable_nonforced` can be added which should disable transformation heuristics before the intended transformation is applied. E.g. it would be surprising if a loop is distributed before a #pragma unroll_and_jam is applied.
With more supported code transformations (loop fusion, interchange, stripmining, offloading, etc.), transformations can be used as building blocks for more complex transformations (e.g. stripmining+stripmining+interchange -> tiling).
Reviewed By: hfinkel, dmgreen
Differential Revision: https://reviews.llvm.org/D49281
Differential Revision: https://reviews.llvm.org/D55288
llvm-svn: 348944
2018-12-13 01:32:52 +08:00
|
|
|
assert((!LoopID || LoopID->getNumOperands() > 0) &&
|
|
|
|
"Loop ID needs at least one operand");
|
|
|
|
assert((!LoopID || LoopID->getOperand(0) == LoopID) &&
|
|
|
|
"Loop ID should refer to itself");
|
2013-05-29 04:00:34 +08:00
|
|
|
|
2019-05-01 22:39:11 +08:00
|
|
|
SmallVector<BasicBlock *, 4> LoopLatches;
|
|
|
|
getLoopLatches(LoopLatches);
|
|
|
|
for (BasicBlock *BB : LoopLatches)
|
|
|
|
BB->getTerminator()->setMetadata(LLVMContext::MD_loop, LoopID);
|
2013-05-29 04:00:34 +08:00
|
|
|
}
|
|
|
|
|
2017-10-15 15:31:02 +08:00
|
|
|
void Loop::setLoopAlreadyUnrolled() {
|
|
|
|
LLVMContext &Context = getHeader()->getContext();
|
|
|
|
|
2019-02-12 03:45:44 +08:00
|
|
|
MDNode *DisableUnrollMD =
|
|
|
|
MDNode::get(Context, MDString::get(Context, "llvm.loop.unroll.disable"));
|
|
|
|
MDNode *LoopID = getLoopID();
|
|
|
|
MDNode *NewLoopID = makePostTransformationMetadata(
|
|
|
|
Context, LoopID, {"llvm.loop.unroll."}, {DisableUnrollMD});
|
2017-10-15 15:31:02 +08:00
|
|
|
setLoopID(NewLoopID);
|
|
|
|
}
|
|
|
|
|
2020-11-05 11:30:58 +08:00
|
|
|
void Loop::setLoopMustProgress() {
|
|
|
|
LLVMContext &Context = getHeader()->getContext();
|
|
|
|
|
|
|
|
MDNode *MustProgress = findOptionMDForLoop(this, "llvm.loop.mustprogress");
|
|
|
|
|
|
|
|
if (MustProgress)
|
|
|
|
return;
|
|
|
|
|
|
|
|
MDNode *MustProgressMD =
|
|
|
|
MDNode::get(Context, MDString::get(Context, "llvm.loop.mustprogress"));
|
|
|
|
MDNode *LoopID = getLoopID();
|
|
|
|
MDNode *NewLoopID =
|
|
|
|
makePostTransformationMetadata(Context, LoopID, {}, {MustProgressMD});
|
|
|
|
setLoopID(NewLoopID);
|
|
|
|
}
|
|
|
|
|
2013-05-29 04:00:34 +08:00
|
|
|
bool Loop::isAnnotatedParallel() const {
|
2016-01-18 07:18:05 +08:00
|
|
|
MDNode *DesiredLoopIdMetadata = getLoopID();
|
2013-02-14 02:08:57 +08:00
|
|
|
|
2016-01-18 07:18:05 +08:00
|
|
|
if (!DesiredLoopIdMetadata)
|
2017-09-20 09:12:09 +08:00
|
|
|
return false;
|
2013-02-14 02:08:57 +08:00
|
|
|
|
2018-12-20 12:58:07 +08:00
|
|
|
MDNode *ParallelAccesses =
|
|
|
|
findOptionMDForLoop(this, "llvm.loop.parallel_accesses");
|
|
|
|
SmallPtrSet<MDNode *, 4>
|
|
|
|
ParallelAccessGroups; // For scalable 'contains' check.
|
|
|
|
if (ParallelAccesses) {
|
2021-01-19 02:16:36 +08:00
|
|
|
for (const MDOperand &MD : drop_begin(ParallelAccesses->operands())) {
|
2018-12-20 12:58:07 +08:00
|
|
|
MDNode *AccGroup = cast<MDNode>(MD.get());
|
|
|
|
assert(isValidAsAccessGroup(AccGroup) &&
|
|
|
|
"List item must be an access group");
|
|
|
|
ParallelAccessGroups.insert(AccGroup);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-14 02:08:57 +08:00
|
|
|
// The loop branch contains the parallel loop metadata. In order to ensure
|
|
|
|
// that any parallel-loop-unaware optimization pass hasn't added loop-carried
|
|
|
|
// dependencies (thus converted the loop back to a sequential loop), check
|
2018-12-20 12:58:07 +08:00
|
|
|
// that all the memory instructions in the loop belong to an access group that
|
|
|
|
// is parallel to this loop.
|
2016-01-15 08:08:10 +08:00
|
|
|
for (BasicBlock *BB : this->blocks()) {
|
|
|
|
for (Instruction &I : *BB) {
|
|
|
|
if (!I.mayReadOrWriteMemory())
|
2013-02-14 02:08:57 +08:00
|
|
|
continue;
|
|
|
|
|
2018-12-20 12:58:07 +08:00
|
|
|
if (MDNode *AccessGroup = I.getMetadata(LLVMContext::MD_access_group)) {
|
|
|
|
auto ContainsAccessGroup = [&ParallelAccessGroups](MDNode *AG) -> bool {
|
|
|
|
if (AG->getNumOperands() == 0) {
|
|
|
|
assert(isValidAsAccessGroup(AG) && "Item must be an access group");
|
|
|
|
return ParallelAccessGroups.count(AG);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (const MDOperand &AccessListItem : AG->operands()) {
|
|
|
|
MDNode *AccGroup = cast<MDNode>(AccessListItem.get());
|
|
|
|
assert(isValidAsAccessGroup(AccGroup) &&
|
|
|
|
"List item must be an access group");
|
|
|
|
if (ParallelAccessGroups.count(AccGroup))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (ContainsAccessGroup(AccessGroup))
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-02-14 02:08:57 +08:00
|
|
|
// The memory instruction can refer to the loop identifier metadata
|
|
|
|
// directly or indirectly through another list metadata (in case of
|
|
|
|
// nested parallel loops). The loop identifier metadata refers to
|
|
|
|
// itself so we can check both cases with the same routine.
|
2016-01-18 07:18:05 +08:00
|
|
|
MDNode *LoopIdMD =
|
2016-01-15 08:08:10 +08:00
|
|
|
I.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
|
2013-11-14 04:18:38 +08:00
|
|
|
|
2016-01-18 07:18:05 +08:00
|
|
|
if (!LoopIdMD)
|
2013-11-14 04:18:38 +08:00
|
|
|
return false;
|
|
|
|
|
2021-02-15 00:36:20 +08:00
|
|
|
if (!llvm::is_contained(LoopIdMD->operands(), DesiredLoopIdMetadata))
|
2013-02-14 02:08:57 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-09-20 09:12:09 +08:00
|
|
|
DebugLoc Loop::getStartLoc() const { return getLocRange().getStart(); }
|
2016-11-08 19:18:59 +08:00
|
|
|
|
|
|
|
Loop::LocRange Loop::getLocRange() const {
|
Look for a loop's starting location in the llvm.loop metadata
Getting accurate locations for loops is important, because those locations are
used by the frontend to generate optimization remarks. Currently, optimization
remarks for loops often appear on the wrong line, often the first line of the
loop body instead of the loop itself. This is confusing because that line might
itself be another loop, or might be somewhere else completely if the body was
inlined function call. This happens because of the way we find the loop's
starting location. First, we look for a preheader, and if we find one, and its
terminator has a debug location, then we use that. Otherwise, we look for a
location on an instruction in the loop header.
The fallback heuristic is not bad, but will almost always find the beginning of
the body, and not the loop statement itself. The preheader location search
often fails because there's often not a preheader, and even when there is a
preheader, depending on how it was formed, it sometimes carries the location of
some preceeding code.
I don't see any good theoretical way to fix this problem. On the other hand,
this seems like a straightforward solution: Put the debug location in the
loop's llvm.loop metadata. A companion Clang patch will cause Clang to insert
llvm.loop metadata with appropriate locations when generating debugging
information. With these changes, our loop remarks have much more accurate
locations.
Differential Revision: http://reviews.llvm.org/D19738
llvm-svn: 270771
2016-05-26 05:42:37 +08:00
|
|
|
// If we have a debug location in the loop ID, then use it.
|
2016-11-08 19:18:59 +08:00
|
|
|
if (MDNode *LoopID = getLoopID()) {
|
|
|
|
DebugLoc Start;
|
|
|
|
// We use the first DebugLoc in the header as the start location of the loop
|
|
|
|
// and if there is a second DebugLoc in the header we use it as end location
|
|
|
|
// of the loop.
|
|
|
|
for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
|
|
|
|
if (DILocation *L = dyn_cast<DILocation>(LoopID->getOperand(i))) {
|
|
|
|
if (!Start)
|
|
|
|
Start = DebugLoc(L);
|
|
|
|
else
|
|
|
|
return LocRange(Start, DebugLoc(L));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Start)
|
|
|
|
return LocRange(Start);
|
|
|
|
}
|
Look for a loop's starting location in the llvm.loop metadata
Getting accurate locations for loops is important, because those locations are
used by the frontend to generate optimization remarks. Currently, optimization
remarks for loops often appear on the wrong line, often the first line of the
loop body instead of the loop itself. This is confusing because that line might
itself be another loop, or might be somewhere else completely if the body was
inlined function call. This happens because of the way we find the loop's
starting location. First, we look for a preheader, and if we find one, and its
terminator has a debug location, then we use that. Otherwise, we look for a
location on an instruction in the loop header.
The fallback heuristic is not bad, but will almost always find the beginning of
the body, and not the loop statement itself. The preheader location search
often fails because there's often not a preheader, and even when there is a
preheader, depending on how it was formed, it sometimes carries the location of
some preceeding code.
I don't see any good theoretical way to fix this problem. On the other hand,
this seems like a straightforward solution: Put the debug location in the
loop's llvm.loop metadata. A companion Clang patch will cause Clang to insert
llvm.loop metadata with appropriate locations when generating debugging
information. With these changes, our loop remarks have much more accurate
locations.
Differential Revision: http://reviews.llvm.org/D19738
llvm-svn: 270771
2016-05-26 05:42:37 +08:00
|
|
|
|
|
|
|
// Try the pre-header first.
|
|
|
|
if (BasicBlock *PHeadBB = getLoopPreheader())
|
|
|
|
if (DebugLoc DL = PHeadBB->getTerminator()->getDebugLoc())
|
2016-11-08 19:18:59 +08:00
|
|
|
return LocRange(DL);
|
Look for a loop's starting location in the llvm.loop metadata
Getting accurate locations for loops is important, because those locations are
used by the frontend to generate optimization remarks. Currently, optimization
remarks for loops often appear on the wrong line, often the first line of the
loop body instead of the loop itself. This is confusing because that line might
itself be another loop, or might be somewhere else completely if the body was
inlined function call. This happens because of the way we find the loop's
starting location. First, we look for a preheader, and if we find one, and its
terminator has a debug location, then we use that. Otherwise, we look for a
location on an instruction in the loop header.
The fallback heuristic is not bad, but will almost always find the beginning of
the body, and not the loop statement itself. The preheader location search
often fails because there's often not a preheader, and even when there is a
preheader, depending on how it was formed, it sometimes carries the location of
some preceeding code.
I don't see any good theoretical way to fix this problem. On the other hand,
this seems like a straightforward solution: Put the debug location in the
loop's llvm.loop metadata. A companion Clang patch will cause Clang to insert
llvm.loop metadata with appropriate locations when generating debugging
information. With these changes, our loop remarks have much more accurate
locations.
Differential Revision: http://reviews.llvm.org/D19738
llvm-svn: 270771
2016-05-26 05:42:37 +08:00
|
|
|
|
|
|
|
// If we have no pre-header or there are no instructions with debug
|
|
|
|
// info in it, try the header.
|
|
|
|
if (BasicBlock *HeadBB = getHeader())
|
2016-11-08 19:18:59 +08:00
|
|
|
return LocRange(HeadBB->getTerminator()->getDebugLoc());
|
Look for a loop's starting location in the llvm.loop metadata
Getting accurate locations for loops is important, because those locations are
used by the frontend to generate optimization remarks. Currently, optimization
remarks for loops often appear on the wrong line, often the first line of the
loop body instead of the loop itself. This is confusing because that line might
itself be another loop, or might be somewhere else completely if the body was
inlined function call. This happens because of the way we find the loop's
starting location. First, we look for a preheader, and if we find one, and its
terminator has a debug location, then we use that. Otherwise, we look for a
location on an instruction in the loop header.
The fallback heuristic is not bad, but will almost always find the beginning of
the body, and not the loop statement itself. The preheader location search
often fails because there's often not a preheader, and even when there is a
preheader, depending on how it was formed, it sometimes carries the location of
some preceeding code.
I don't see any good theoretical way to fix this problem. On the other hand,
this seems like a straightforward solution: Put the debug location in the
loop's llvm.loop metadata. A companion Clang patch will cause Clang to insert
llvm.loop metadata with appropriate locations when generating debugging
information. With these changes, our loop remarks have much more accurate
locations.
Differential Revision: http://reviews.llvm.org/D19738
llvm-svn: 270771
2016-05-26 05:42:37 +08:00
|
|
|
|
2016-11-08 19:18:59 +08:00
|
|
|
return LocRange();
|
Look for a loop's starting location in the llvm.loop metadata
Getting accurate locations for loops is important, because those locations are
used by the frontend to generate optimization remarks. Currently, optimization
remarks for loops often appear on the wrong line, often the first line of the
loop body instead of the loop itself. This is confusing because that line might
itself be another loop, or might be somewhere else completely if the body was
inlined function call. This happens because of the way we find the loop's
starting location. First, we look for a preheader, and if we find one, and its
terminator has a debug location, then we use that. Otherwise, we look for a
location on an instruction in the loop header.
The fallback heuristic is not bad, but will almost always find the beginning of
the body, and not the loop statement itself. The preheader location search
often fails because there's often not a preheader, and even when there is a
preheader, depending on how it was formed, it sometimes carries the location of
some preceeding code.
I don't see any good theoretical way to fix this problem. On the other hand,
this seems like a straightforward solution: Put the debug location in the
loop's llvm.loop metadata. A companion Clang patch will cause Clang to insert
llvm.loop metadata with appropriate locations when generating debugging
information. With these changes, our loop remarks have much more accurate
locations.
Differential Revision: http://reviews.llvm.org/D19738
llvm-svn: 270771
2016-05-26 05:42:37 +08:00
|
|
|
}
|
|
|
|
|
2017-10-15 22:32:27 +08:00
|
|
|
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
|
2017-09-20 09:12:09 +08:00
|
|
|
LLVM_DUMP_METHOD void Loop::dump() const { print(dbgs()); }
|
2016-07-27 13:02:17 +08:00
|
|
|
|
|
|
|
LLVM_DUMP_METHOD void Loop::dumpVerbose() const {
|
2021-04-15 05:52:50 +08:00
|
|
|
print(dbgs(), /*Verbose=*/true);
|
2016-07-27 13:02:17 +08:00
|
|
|
}
|
2012-09-07 03:55:56 +08:00
|
|
|
#endif
|
2010-01-06 05:08:02 +08:00
|
|
|
|
2011-08-11 07:22:57 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// UnloopUpdater implementation
|
|
|
|
//
|
|
|
|
|
2011-08-19 09:42:18 +08:00
|
|
|
namespace {
|
2011-08-11 07:22:57 +08:00
|
|
|
/// Find the new parent loop for all blocks within the "unloop" whose last
|
|
|
|
/// backedges has just been removed.
|
|
|
|
class UnloopUpdater {
|
2016-05-13 22:54:50 +08:00
|
|
|
Loop &Unloop;
|
2011-08-11 07:22:57 +08:00
|
|
|
LoopInfo *LI;
|
|
|
|
|
|
|
|
LoopBlocksDFS DFS;
|
|
|
|
|
|
|
|
// Map unloop's immediate subloops to their nearest reachable parents. Nested
|
|
|
|
// loops within these subloops will not change parents. However, an immediate
|
|
|
|
// subloop's new parent will be the nearest loop reachable from either its own
|
|
|
|
// exits *or* any of its nested loop's exits.
|
2017-09-20 09:12:09 +08:00
|
|
|
DenseMap<Loop *, Loop *> SubloopParents;
|
2011-08-11 07:22:57 +08:00
|
|
|
|
|
|
|
// Flag the presence of an irreducible backedge whose destination is a block
|
|
|
|
// directly contained by the original unloop.
|
|
|
|
bool FoundIB;
|
|
|
|
|
|
|
|
public:
|
2017-09-20 09:12:09 +08:00
|
|
|
UnloopUpdater(Loop *UL, LoopInfo *LInfo)
|
|
|
|
: Unloop(*UL), LI(LInfo), DFS(UL), FoundIB(false) {}
|
2011-08-11 07:22:57 +08:00
|
|
|
|
|
|
|
void updateBlockParents();
|
|
|
|
|
2011-08-12 04:27:32 +08:00
|
|
|
void removeBlocksFromAncestors();
|
|
|
|
|
2011-08-11 07:22:57 +08:00
|
|
|
void updateSubloopParents();
|
|
|
|
|
|
|
|
protected:
|
|
|
|
Loop *getNearestLoop(BasicBlock *BB, Loop *BBLoop);
|
|
|
|
};
|
2011-08-19 09:42:18 +08:00
|
|
|
} // end anonymous namespace
|
2011-08-11 07:22:57 +08:00
|
|
|
|
2016-01-15 07:23:04 +08:00
|
|
|
/// Update the parent loop for all blocks that are directly contained within the
|
|
|
|
/// original "unloop".
|
2011-08-11 07:22:57 +08:00
|
|
|
void UnloopUpdater::updateBlockParents() {
|
2016-05-13 22:54:50 +08:00
|
|
|
if (Unloop.getNumBlocks()) {
|
2011-08-11 07:22:57 +08:00
|
|
|
// Perform a post order CFG traversal of all blocks within this loop,
|
2017-07-09 13:54:44 +08:00
|
|
|
// propagating the nearest loop from successors to predecessors.
|
2011-08-11 07:22:57 +08:00
|
|
|
LoopBlocksTraversal Traversal(DFS, LI);
|
2016-06-27 01:27:42 +08:00
|
|
|
for (BasicBlock *POI : Traversal) {
|
2011-08-11 07:22:57 +08:00
|
|
|
|
2016-06-27 01:27:42 +08:00
|
|
|
Loop *L = LI->getLoopFor(POI);
|
|
|
|
Loop *NL = getNearestLoop(POI, L);
|
2011-08-11 07:22:57 +08:00
|
|
|
|
|
|
|
if (NL != L) {
|
|
|
|
// For reducible loops, NL is now an ancestor of Unloop.
|
2016-05-13 22:54:50 +08:00
|
|
|
assert((NL != &Unloop && (!NL || NL->contains(&Unloop))) &&
|
2011-08-11 07:22:57 +08:00
|
|
|
"uninitialized successor");
|
2016-06-27 01:27:42 +08:00
|
|
|
LI->changeLoopFor(POI, NL);
|
2017-09-20 09:12:09 +08:00
|
|
|
} else {
|
2011-08-11 07:22:57 +08:00
|
|
|
// Or the current block is part of a subloop, in which case its parent
|
|
|
|
// is unchanged.
|
2016-05-13 22:54:50 +08:00
|
|
|
assert((FoundIB || Unloop.contains(L)) && "uninitialized successor");
|
2011-08-11 07:22:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Each irreducible loop within the unloop induces a round of iteration using
|
|
|
|
// the DFS result cached by Traversal.
|
|
|
|
bool Changed = FoundIB;
|
|
|
|
for (unsigned NIters = 0; Changed; ++NIters) {
|
2016-05-13 22:54:50 +08:00
|
|
|
assert(NIters < Unloop.getNumBlocks() && "runaway iterative algorithm");
|
2011-08-11 07:22:57 +08:00
|
|
|
|
|
|
|
// Iterate over the postorder list of blocks, propagating the nearest loop
|
|
|
|
// from successors to predecessors as before.
|
|
|
|
Changed = false;
|
|
|
|
for (LoopBlocksDFS::POIterator POI = DFS.beginPostorder(),
|
2017-09-20 09:12:09 +08:00
|
|
|
POE = DFS.endPostorder();
|
|
|
|
POI != POE; ++POI) {
|
2011-08-11 07:22:57 +08:00
|
|
|
|
|
|
|
Loop *L = LI->getLoopFor(*POI);
|
|
|
|
Loop *NL = getNearestLoop(*POI, L);
|
|
|
|
if (NL != L) {
|
2016-05-13 22:54:50 +08:00
|
|
|
assert(NL != &Unloop && (!NL || NL->contains(&Unloop)) &&
|
2011-08-11 07:22:57 +08:00
|
|
|
"uninitialized successor");
|
|
|
|
LI->changeLoopFor(*POI, NL);
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-15 07:23:04 +08:00
|
|
|
/// Remove unloop's blocks from all ancestors below their new parents.
|
2011-08-12 04:27:32 +08:00
|
|
|
void UnloopUpdater::removeBlocksFromAncestors() {
|
2011-11-18 11:42:41 +08:00
|
|
|
// Remove all unloop's blocks (including those in nested subloops) from
|
|
|
|
// ancestors below the new parent loop.
|
2021-02-07 03:17:09 +08:00
|
|
|
for (BasicBlock *BB : Unloop.blocks()) {
|
|
|
|
Loop *OuterParent = LI->getLoopFor(BB);
|
2016-05-13 22:54:50 +08:00
|
|
|
if (Unloop.contains(OuterParent)) {
|
|
|
|
while (OuterParent->getParentLoop() != &Unloop)
|
2011-11-18 11:42:41 +08:00
|
|
|
OuterParent = OuterParent->getParentLoop();
|
|
|
|
OuterParent = SubloopParents[OuterParent];
|
|
|
|
}
|
2011-08-12 04:27:32 +08:00
|
|
|
// Remove blocks from former Ancestors except Unloop itself which will be
|
|
|
|
// deleted.
|
2016-05-13 22:54:50 +08:00
|
|
|
for (Loop *OldParent = Unloop.getParentLoop(); OldParent != OuterParent;
|
2011-08-12 04:27:32 +08:00
|
|
|
OldParent = OldParent->getParentLoop()) {
|
|
|
|
assert(OldParent && "new loop is not an ancestor of the original");
|
2021-02-07 03:17:09 +08:00
|
|
|
OldParent->removeBlockFromLoop(BB);
|
2011-08-12 04:27:32 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-15 07:23:04 +08:00
|
|
|
/// Update the parent loop for all subloops directly nested within unloop.
|
2011-08-11 07:22:57 +08:00
|
|
|
void UnloopUpdater::updateSubloopParents() {
|
2020-09-23 04:28:00 +08:00
|
|
|
while (!Unloop.isInnermost()) {
|
2016-05-13 22:54:50 +08:00
|
|
|
Loop *Subloop = *std::prev(Unloop.end());
|
|
|
|
Unloop.removeChildLoop(std::prev(Unloop.end()));
|
2011-08-11 07:22:57 +08:00
|
|
|
|
|
|
|
assert(SubloopParents.count(Subloop) && "DFS failed to visit subloop");
|
2012-08-22 23:37:57 +08:00
|
|
|
if (Loop *Parent = SubloopParents[Subloop])
|
|
|
|
Parent->addChildLoop(Subloop);
|
2011-08-26 11:06:34 +08:00
|
|
|
else
|
|
|
|
LI->addTopLevelLoop(Subloop);
|
2011-08-11 07:22:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-15 07:23:04 +08:00
|
|
|
/// Return the nearest parent loop among this block's successors. If a successor
|
|
|
|
/// is a subloop header, consider its parent to be the nearest parent of the
|
|
|
|
/// subloop's exits.
|
2011-08-11 07:22:57 +08:00
|
|
|
///
|
|
|
|
/// For subloop blocks, simply update SubloopParents and return NULL.
|
|
|
|
Loop *UnloopUpdater::getNearestLoop(BasicBlock *BB, Loop *BBLoop) {
|
|
|
|
|
2011-08-12 01:54:58 +08:00
|
|
|
// Initially for blocks directly contained by Unloop, NearLoop == Unloop and
|
|
|
|
// is considered uninitialized.
|
2011-08-11 07:22:57 +08:00
|
|
|
Loop *NearLoop = BBLoop;
|
|
|
|
|
2014-04-15 12:59:12 +08:00
|
|
|
Loop *Subloop = nullptr;
|
2016-05-13 22:54:50 +08:00
|
|
|
if (NearLoop != &Unloop && Unloop.contains(NearLoop)) {
|
2011-08-11 07:22:57 +08:00
|
|
|
Subloop = NearLoop;
|
|
|
|
// Find the subloop ancestor that is directly contained within Unloop.
|
2016-05-13 22:54:50 +08:00
|
|
|
while (Subloop->getParentLoop() != &Unloop) {
|
2011-08-11 07:22:57 +08:00
|
|
|
Subloop = Subloop->getParentLoop();
|
|
|
|
assert(Subloop && "subloop is not an ancestor of the original loop");
|
|
|
|
}
|
|
|
|
// Get the current nearest parent of the Subloop exits, initially Unloop.
|
2016-08-12 05:15:00 +08:00
|
|
|
NearLoop = SubloopParents.insert({Subloop, &Unloop}).first->second;
|
2011-08-11 07:22:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
succ_iterator I = succ_begin(BB), E = succ_end(BB);
|
|
|
|
if (I == E) {
|
|
|
|
assert(!Subloop && "subloop blocks must have a successor");
|
2014-04-15 12:59:12 +08:00
|
|
|
NearLoop = nullptr; // unloop blocks may now exit the function.
|
2011-08-11 07:22:57 +08:00
|
|
|
}
|
|
|
|
for (; I != E; ++I) {
|
|
|
|
if (*I == BB)
|
|
|
|
continue; // self loops are uninteresting
|
|
|
|
|
|
|
|
Loop *L = LI->getLoopFor(*I);
|
2016-05-13 22:54:50 +08:00
|
|
|
if (L == &Unloop) {
|
2011-08-11 07:22:57 +08:00
|
|
|
// This successor has not been processed. This path must lead to an
|
|
|
|
// irreducible backedge.
|
|
|
|
assert((FoundIB || !DFS.hasPostorder(*I)) && "should have seen IB");
|
|
|
|
FoundIB = true;
|
|
|
|
}
|
2016-05-13 22:54:50 +08:00
|
|
|
if (L != &Unloop && Unloop.contains(L)) {
|
2011-08-11 07:22:57 +08:00
|
|
|
// Successor is in a subloop.
|
|
|
|
if (Subloop)
|
|
|
|
continue; // Branching within subloops. Ignore it.
|
|
|
|
|
|
|
|
// BB branches from the original into a subloop header.
|
2016-05-13 22:54:50 +08:00
|
|
|
assert(L->getParentLoop() == &Unloop && "cannot skip into nested loops");
|
2011-08-11 07:22:57 +08:00
|
|
|
|
|
|
|
// Get the current nearest parent of the Subloop's exits.
|
|
|
|
L = SubloopParents[L];
|
|
|
|
// L could be Unloop if the only exit was an irreducible backedge.
|
|
|
|
}
|
2016-05-13 22:54:50 +08:00
|
|
|
if (L == &Unloop) {
|
2011-08-11 07:22:57 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Handle critical edges from Unloop into a sibling loop.
|
2016-05-13 22:54:50 +08:00
|
|
|
if (L && !L->contains(&Unloop)) {
|
2011-08-11 07:22:57 +08:00
|
|
|
L = L->getParentLoop();
|
|
|
|
}
|
|
|
|
// Remember the nearest parent loop among successors or subloop exits.
|
2016-05-13 22:54:50 +08:00
|
|
|
if (NearLoop == &Unloop || !NearLoop || NearLoop->contains(L))
|
2011-08-11 07:22:57 +08:00
|
|
|
NearLoop = L;
|
|
|
|
}
|
|
|
|
if (Subloop) {
|
|
|
|
SubloopParents[Subloop] = NearLoop;
|
|
|
|
return BBLoop;
|
|
|
|
}
|
|
|
|
return NearLoop;
|
|
|
|
}
|
|
|
|
|
2017-09-20 09:12:09 +08:00
|
|
|
LoopInfo::LoopInfo(const DomTreeBase<BasicBlock> &DomTree) { analyze(DomTree); }
|
2015-07-17 07:23:35 +08:00
|
|
|
|
2017-01-15 14:32:49 +08:00
|
|
|
bool LoopInfo::invalidate(Function &F, const PreservedAnalyses &PA,
|
|
|
|
FunctionAnalysisManager::Invalidator &) {
|
|
|
|
// Check whether the analysis, all analyses on functions, or the function's
|
|
|
|
// CFG have been preserved.
|
|
|
|
auto PAC = PA.getChecker<LoopAnalysis>();
|
|
|
|
return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>() ||
|
|
|
|
PAC.preservedSet<CFGAnalyses>());
|
|
|
|
}
|
|
|
|
|
2017-09-22 09:47:41 +08:00
|
|
|
void LoopInfo::erase(Loop *Unloop) {
|
2017-09-20 07:19:00 +08:00
|
|
|
assert(!Unloop->isInvalid() && "Loop has already been erased!");
|
2011-08-11 07:22:57 +08:00
|
|
|
|
2017-09-28 10:45:42 +08:00
|
|
|
auto InvalidateOnExit = make_scope_exit([&]() { destroy(Unloop); });
|
2017-09-20 10:31:57 +08:00
|
|
|
|
2011-08-11 07:22:57 +08:00
|
|
|
// First handle the special case of no parent loop to simplify the algorithm.
|
2020-09-23 04:28:00 +08:00
|
|
|
if (Unloop->isOutermost()) {
|
2011-08-11 07:22:57 +08:00
|
|
|
// Since BBLoop had no parent, Unloop blocks are no longer in a loop.
|
2021-02-23 12:17:18 +08:00
|
|
|
for (BasicBlock *BB : Unloop->blocks()) {
|
2011-08-11 07:22:57 +08:00
|
|
|
// Don't reparent blocks in subloops.
|
2021-02-23 12:17:18 +08:00
|
|
|
if (getLoopFor(BB) != Unloop)
|
2011-08-11 07:22:57 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Blocks no longer have a parent but are still referenced by Unloop until
|
|
|
|
// the Unloop object is deleted.
|
2021-02-23 12:17:18 +08:00
|
|
|
changeLoopFor(BB, nullptr);
|
2011-08-11 07:22:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the loop from the top-level LoopInfo object.
|
2015-01-18 09:25:51 +08:00
|
|
|
for (iterator I = begin();; ++I) {
|
|
|
|
assert(I != end() && "Couldn't find loop");
|
2011-08-11 07:22:57 +08:00
|
|
|
if (*I == Unloop) {
|
2015-01-18 09:25:51 +08:00
|
|
|
removeLoop(I);
|
2011-08-11 07:22:57 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Move all of the subloops to the top-level.
|
2020-09-23 04:28:00 +08:00
|
|
|
while (!Unloop->isInnermost())
|
2015-01-18 09:25:51 +08:00
|
|
|
addTopLevelLoop(Unloop->removeChildLoop(std::prev(Unloop->end())));
|
2011-08-11 07:22:57 +08:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the parent loop for all blocks within the loop. Blocks within
|
|
|
|
// subloops will not change parents.
|
|
|
|
UnloopUpdater Updater(Unloop, this);
|
|
|
|
Updater.updateBlockParents();
|
|
|
|
|
2011-08-12 04:27:32 +08:00
|
|
|
// Remove blocks from former ancestor loops.
|
|
|
|
Updater.removeBlocksFromAncestors();
|
2011-08-11 07:22:57 +08:00
|
|
|
|
|
|
|
// Add direct subloops as children in their new parent loop.
|
|
|
|
Updater.updateSubloopParents();
|
|
|
|
|
|
|
|
// Remove unloop from its parent loop.
|
|
|
|
Loop *ParentLoop = Unloop->getParentLoop();
|
2011-08-12 22:54:45 +08:00
|
|
|
for (Loop::iterator I = ParentLoop->begin();; ++I) {
|
|
|
|
assert(I != ParentLoop->end() && "Couldn't find loop");
|
2011-08-11 07:22:57 +08:00
|
|
|
if (*I == Unloop) {
|
|
|
|
ParentLoop->removeChildLoop(I);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-18 03:12:00 +08:00
|
|
|
bool
|
|
|
|
LoopInfo::wouldBeOutOfLoopUseRequiringLCSSA(const Value *V,
|
|
|
|
const BasicBlock *ExitBB) const {
|
|
|
|
if (V->getType()->isTokenTy())
|
|
|
|
// We can't form PHIs of token type, so the definition of LCSSA excludes
|
|
|
|
// values of that type.
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const Instruction *I = dyn_cast<Instruction>(V);
|
|
|
|
if (!I)
|
|
|
|
return false;
|
|
|
|
const Loop *L = getLoopFor(I->getParent());
|
|
|
|
if (!L)
|
|
|
|
return false;
|
|
|
|
if (L->contains(ExitBB))
|
|
|
|
// Could be an exit bb of a subloop and contained in defining loop
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// We found a (new) out-of-loop use location, for a value defined in-loop.
|
|
|
|
// (Note that because of LCSSA, we don't have to account for values defined
|
|
|
|
// in sibling loops. Such values will have LCSSA phis of their own in the
|
|
|
|
// common parent loop.)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-11-24 01:53:26 +08:00
|
|
|
AnalysisKey LoopAnalysis::Key;
|
2016-02-29 01:17:00 +08:00
|
|
|
|
2016-08-09 08:28:15 +08:00
|
|
|
LoopInfo LoopAnalysis::run(Function &F, FunctionAnalysisManager &AM) {
|
2015-01-20 18:58:50 +08:00
|
|
|
// FIXME: Currently we create a LoopInfo from scratch for every function.
|
|
|
|
// This may prove to be too wasteful due to deallocating and re-allocating
|
|
|
|
// memory each time for the underlying map and vector datastructures. At some
|
|
|
|
// point it may prove worthwhile to use a freelist and recycle LoopInfo
|
|
|
|
// objects. I don't want to add that kind of complexity until the scope of
|
|
|
|
// the problem is better understood.
|
|
|
|
LoopInfo LI;
|
2016-03-11 19:05:24 +08:00
|
|
|
LI.analyze(AM.getResult<DominatorTreeAnalysis>(F));
|
2015-05-01 07:07:00 +08:00
|
|
|
return LI;
|
2015-01-20 18:58:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
PreservedAnalyses LoopPrinterPass::run(Function &F,
|
2016-08-09 08:28:15 +08:00
|
|
|
FunctionAnalysisManager &AM) {
|
2016-03-11 19:05:24 +08:00
|
|
|
AM.getResult<LoopAnalysis>(F).print(OS);
|
2015-01-20 18:58:50 +08:00
|
|
|
return PreservedAnalyses::all();
|
|
|
|
}
|
|
|
|
|
2017-01-11 14:23:21 +08:00
|
|
|
void llvm::printLoop(Loop &L, raw_ostream &OS, const std::string &Banner) {
|
2017-12-02 02:33:58 +08:00
|
|
|
|
|
|
|
if (forcePrintModuleIR()) {
|
|
|
|
// handling -print-module-scope
|
|
|
|
OS << Banner << " (loop: ";
|
|
|
|
L.getHeader()->printAsOperand(OS, false);
|
|
|
|
OS << ")\n";
|
|
|
|
|
|
|
|
// printing whole module
|
|
|
|
OS << *L.getHeader()->getModule();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-11-05 06:24:08 +08:00
|
|
|
OS << Banner;
|
2017-11-23 04:59:53 +08:00
|
|
|
|
|
|
|
auto *PreHeader = L.getLoopPreheader();
|
|
|
|
if (PreHeader) {
|
|
|
|
OS << "\n; Preheader:";
|
|
|
|
PreHeader->print(OS);
|
|
|
|
OS << "\n; Loop:";
|
|
|
|
}
|
|
|
|
|
2015-11-05 06:24:08 +08:00
|
|
|
for (auto *Block : L.blocks())
|
|
|
|
if (Block)
|
|
|
|
Block->print(OS);
|
|
|
|
else
|
|
|
|
OS << "Printing <null> block";
|
2017-11-23 04:59:53 +08:00
|
|
|
|
|
|
|
SmallVector<BasicBlock *, 8> ExitBlocks;
|
|
|
|
L.getExitBlocks(ExitBlocks);
|
|
|
|
if (!ExitBlocks.empty()) {
|
|
|
|
OS << "\n; Exit blocks";
|
|
|
|
for (auto *Block : ExitBlocks)
|
|
|
|
if (Block)
|
|
|
|
Block->print(OS);
|
|
|
|
else
|
|
|
|
OS << "Printing <null> block";
|
|
|
|
}
|
2015-11-05 06:24:08 +08:00
|
|
|
}
|
|
|
|
|
2018-12-20 12:58:07 +08:00
|
|
|
MDNode *llvm::findOptionMDForLoopID(MDNode *LoopID, StringRef Name) {
|
|
|
|
// No loop metadata node, no loop properties.
|
|
|
|
if (!LoopID)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// First operand should refer to the metadata node itself, for legacy reasons.
|
|
|
|
assert(LoopID->getNumOperands() > 0 && "requires at least one operand");
|
|
|
|
assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
|
|
|
|
|
|
|
|
// Iterate over the metdata node operands and look for MDString metadata.
|
|
|
|
for (unsigned i = 1, e = LoopID->getNumOperands(); i < e; ++i) {
|
|
|
|
MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
|
|
|
|
if (!MD || MD->getNumOperands() < 1)
|
|
|
|
continue;
|
|
|
|
MDString *S = dyn_cast<MDString>(MD->getOperand(0));
|
|
|
|
if (!S)
|
|
|
|
continue;
|
|
|
|
// Return the operand node if MDString holds expected metadata.
|
|
|
|
if (Name.equals(S->getString()))
|
|
|
|
return MD;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Loop property not found.
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
MDNode *llvm::findOptionMDForLoop(const Loop *TheLoop, StringRef Name) {
|
|
|
|
return findOptionMDForLoopID(TheLoop->getLoopID(), Name);
|
|
|
|
}
|
|
|
|
|
2021-06-11 04:00:21 +08:00
|
|
|
/// Find string metadata for loop
|
|
|
|
///
|
|
|
|
/// If it has a value (e.g. {"llvm.distribute", 1} return the value as an
|
|
|
|
/// operand or null otherwise. If the string metadata is not found return
|
|
|
|
/// Optional's not-a-value.
|
|
|
|
Optional<const MDOperand *> llvm::findStringMetadataForLoop(const Loop *TheLoop,
|
|
|
|
StringRef Name) {
|
|
|
|
MDNode *MD = findOptionMDForLoop(TheLoop, Name);
|
|
|
|
if (!MD)
|
|
|
|
return None;
|
|
|
|
switch (MD->getNumOperands()) {
|
|
|
|
case 1:
|
|
|
|
return nullptr;
|
|
|
|
case 2:
|
|
|
|
return &MD->getOperand(1);
|
|
|
|
default:
|
|
|
|
llvm_unreachable("loop metadata has 0 or 1 operand");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Optional<bool> llvm::getOptionalBoolLoopAttribute(const Loop *TheLoop,
|
|
|
|
StringRef Name) {
|
|
|
|
MDNode *MD = findOptionMDForLoop(TheLoop, Name);
|
|
|
|
if (!MD)
|
|
|
|
return None;
|
|
|
|
switch (MD->getNumOperands()) {
|
|
|
|
case 1:
|
|
|
|
// When the value is absent it is interpreted as 'attribute set'.
|
|
|
|
return true;
|
|
|
|
case 2:
|
|
|
|
if (ConstantInt *IntMD =
|
|
|
|
mdconst::extract_or_null<ConstantInt>(MD->getOperand(1).get()))
|
|
|
|
return IntMD->getZExtValue();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
llvm_unreachable("unexpected number of options");
|
|
|
|
}
|
|
|
|
|
|
|
|
bool llvm::getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name) {
|
|
|
|
return getOptionalBoolLoopAttribute(TheLoop, Name).getValueOr(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm::Optional<int> llvm::getOptionalIntLoopAttribute(const Loop *TheLoop,
|
|
|
|
StringRef Name) {
|
|
|
|
const MDOperand *AttrMD =
|
|
|
|
findStringMetadataForLoop(TheLoop, Name).getValueOr(nullptr);
|
|
|
|
if (!AttrMD)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
ConstantInt *IntMD = mdconst::extract_or_null<ConstantInt>(AttrMD->get());
|
|
|
|
if (!IntMD)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
return IntMD->getSExtValue();
|
|
|
|
}
|
|
|
|
|
2021-09-05 07:50:49 +08:00
|
|
|
int llvm::getIntLoopAttribute(const Loop *TheLoop, StringRef Name,
|
|
|
|
int Default) {
|
|
|
|
return getOptionalIntLoopAttribute(TheLoop, Name).getValueOr(Default);
|
|
|
|
}
|
|
|
|
|
2021-06-11 04:00:21 +08:00
|
|
|
static const char *LLVMLoopMustProgress = "llvm.loop.mustprogress";
|
|
|
|
|
|
|
|
bool llvm::hasMustProgress(const Loop *L) {
|
|
|
|
return getBooleanLoopAttribute(L, LLVMLoopMustProgress);
|
|
|
|
}
|
|
|
|
|
2021-06-11 04:36:14 +08:00
|
|
|
bool llvm::isMustProgress(const Loop *L) {
|
|
|
|
return L->getHeader()->getParent()->mustProgress() || hasMustProgress(L);
|
|
|
|
}
|
|
|
|
|
2018-12-20 12:58:07 +08:00
|
|
|
bool llvm::isValidAsAccessGroup(MDNode *Node) {
|
|
|
|
return Node->getNumOperands() == 0 && Node->isDistinct();
|
|
|
|
}
|
|
|
|
|
2019-02-12 03:45:44 +08:00
|
|
|
MDNode *llvm::makePostTransformationMetadata(LLVMContext &Context,
|
|
|
|
MDNode *OrigLoopID,
|
|
|
|
ArrayRef<StringRef> RemovePrefixes,
|
|
|
|
ArrayRef<MDNode *> AddAttrs) {
|
|
|
|
// First remove any existing loop metadata related to this transformation.
|
|
|
|
SmallVector<Metadata *, 4> MDs;
|
|
|
|
|
|
|
|
// Reserve first location for self reference to the LoopID metadata node.
|
2020-10-24 05:55:41 +08:00
|
|
|
MDs.push_back(nullptr);
|
2019-02-12 03:45:44 +08:00
|
|
|
|
|
|
|
// Remove metadata for the transformation that has been applied or that became
|
|
|
|
// outdated.
|
|
|
|
if (OrigLoopID) {
|
|
|
|
for (unsigned i = 1, ie = OrigLoopID->getNumOperands(); i < ie; ++i) {
|
|
|
|
bool IsVectorMetadata = false;
|
|
|
|
Metadata *Op = OrigLoopID->getOperand(i);
|
|
|
|
if (MDNode *MD = dyn_cast<MDNode>(Op)) {
|
|
|
|
const MDString *S = dyn_cast<MDString>(MD->getOperand(0));
|
|
|
|
if (S)
|
|
|
|
IsVectorMetadata =
|
|
|
|
llvm::any_of(RemovePrefixes, [S](StringRef Prefix) -> bool {
|
|
|
|
return S->getString().startswith(Prefix);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
if (!IsVectorMetadata)
|
|
|
|
MDs.push_back(Op);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add metadata to avoid reapplying a transformation, such as
|
|
|
|
// llvm.loop.unroll.disable and llvm.loop.isvectorized.
|
|
|
|
MDs.append(AddAttrs.begin(), AddAttrs.end());
|
|
|
|
|
|
|
|
MDNode *NewLoopID = MDNode::getDistinct(Context, MDs);
|
|
|
|
// Replace the temporary node with a self-reference.
|
|
|
|
NewLoopID->replaceOperandWith(0, NewLoopID);
|
|
|
|
return NewLoopID;
|
|
|
|
}
|
|
|
|
|
2015-01-17 22:16:18 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// LoopInfo implementation
|
|
|
|
//
|
|
|
|
|
Sink all InitializePasses.h includes
This file lists every pass in LLVM, and is included by Pass.h, which is
very popular. Every time we add, remove, or rename a pass in LLVM, it
caused lots of recompilation.
I found this fact by looking at this table, which is sorted by the
number of times a file was changed over the last 100,000 git commits
multiplied by the number of object files that depend on it in the
current checkout:
recompiles touches affected_files header
342380 95 3604 llvm/include/llvm/ADT/STLExtras.h
314730 234 1345 llvm/include/llvm/InitializePasses.h
307036 118 2602 llvm/include/llvm/ADT/APInt.h
213049 59 3611 llvm/include/llvm/Support/MathExtras.h
170422 47 3626 llvm/include/llvm/Support/Compiler.h
162225 45 3605 llvm/include/llvm/ADT/Optional.h
158319 63 2513 llvm/include/llvm/ADT/Triple.h
140322 39 3598 llvm/include/llvm/ADT/StringRef.h
137647 59 2333 llvm/include/llvm/Support/Error.h
131619 73 1803 llvm/include/llvm/Support/FileSystem.h
Before this change, touching InitializePasses.h would cause 1345 files
to recompile. After this change, touching it only causes 550 compiles in
an incremental rebuild.
Reviewers: bkramer, asbirlea, bollu, jdoerfert
Differential Revision: https://reviews.llvm.org/D70211
2019-11-14 05:15:01 +08:00
|
|
|
LoopInfoWrapperPass::LoopInfoWrapperPass() : FunctionPass(ID) {
|
|
|
|
initializeLoopInfoWrapperPassPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
2015-01-17 22:16:18 +08:00
|
|
|
char LoopInfoWrapperPass::ID = 0;
|
|
|
|
INITIALIZE_PASS_BEGIN(LoopInfoWrapperPass, "loops", "Natural Loop Information",
|
|
|
|
true, true)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
|
|
|
INITIALIZE_PASS_END(LoopInfoWrapperPass, "loops", "Natural Loop Information",
|
|
|
|
true, true)
|
|
|
|
|
|
|
|
bool LoopInfoWrapperPass::runOnFunction(Function &) {
|
|
|
|
releaseMemory();
|
2015-07-17 02:23:57 +08:00
|
|
|
LI.analyze(getAnalysis<DominatorTreeWrapperPass>().getDomTree());
|
2015-01-17 22:16:18 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void LoopInfoWrapperPass::verifyAnalysis() const {
|
|
|
|
// LoopInfoWrapperPass is a FunctionPass, but verifying every loop in the
|
|
|
|
// function each time verifyAnalysis is called is very expensive. The
|
2009-09-28 08:27:48 +08:00
|
|
|
// -verify-loop-info option can enable this. In order to perform some
|
2015-01-17 22:16:18 +08:00
|
|
|
// checking by default, LoopPass has been taught to call verifyLoop manually
|
|
|
|
// during loop pass sequences.
|
2016-09-01 03:26:19 +08:00
|
|
|
if (VerifyLoopInfo) {
|
2017-01-15 18:23:18 +08:00
|
|
|
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
|
|
|
LI.verify(DT);
|
2016-09-01 03:26:19 +08:00
|
|
|
}
|
2009-09-08 23:45:00 +08:00
|
|
|
}
|
|
|
|
|
2015-01-17 22:16:18 +08:00
|
|
|
void LoopInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
|
2002-04-27 14:56:12 +08:00
|
|
|
AU.setPreservesAll();
|
2019-07-18 07:31:59 +08:00
|
|
|
AU.addRequiredTransitive<DominatorTreeWrapperPass>();
|
2002-01-31 08:42:27 +08:00
|
|
|
}
|
2009-08-23 13:17:37 +08:00
|
|
|
|
2015-01-17 22:16:18 +08:00
|
|
|
void LoopInfoWrapperPass::print(raw_ostream &OS, const Module *) const {
|
2015-01-18 09:25:51 +08:00
|
|
|
LI.print(OS);
|
2009-08-23 13:17:37 +08:00
|
|
|
}
|
|
|
|
|
2016-07-20 07:54:23 +08:00
|
|
|
PreservedAnalyses LoopVerifierPass::run(Function &F,
|
2016-08-09 08:28:15 +08:00
|
|
|
FunctionAnalysisManager &AM) {
|
2016-07-20 07:54:23 +08:00
|
|
|
LoopInfo &LI = AM.getResult<LoopAnalysis>(F);
|
2016-09-01 03:26:19 +08:00
|
|
|
auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
|
|
|
|
LI.verify(DT);
|
2016-07-20 07:54:23 +08:00
|
|
|
return PreservedAnalyses::all();
|
|
|
|
}
|
|
|
|
|
2011-08-10 09:59:05 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// LoopBlocksDFS implementation
|
|
|
|
//
|
|
|
|
|
|
|
|
/// Traverse the loop blocks and store the DFS result.
|
|
|
|
/// Useful for clients that just want the final DFS result and don't need to
|
|
|
|
/// visit blocks during the initial traversal.
|
|
|
|
void LoopBlocksDFS::perform(LoopInfo *LI) {
|
|
|
|
LoopBlocksTraversal Traversal(*this, LI);
|
|
|
|
for (LoopBlocksTraversal::POTIterator POI = Traversal.begin(),
|
2017-09-20 09:12:09 +08:00
|
|
|
POE = Traversal.end();
|
|
|
|
POI != POE; ++POI)
|
|
|
|
;
|
2011-08-10 09:59:05 +08:00
|
|
|
}
|