2004-04-18 13:20:17 +08:00
|
|
|
//===-- LoopUnroll.cpp - Loop unroller pass -------------------------------===//
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2004-04-18 13:20:17 +08:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-04-22 07:48:37 +08:00
|
|
|
//
|
2004-04-18 13:20:17 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass implements a simple loop unroller. It works best when loops have
|
|
|
|
// been canonicalized by the -indvars pass, allowing it to determine the trip
|
|
|
|
// counts of loops easily.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Transforms/Scalar.h"
|
2015-02-13 11:57:40 +08:00
|
|
|
#include "llvm/ADT/SetVector.h"
|
2015-01-04 20:03:27 +08:00
|
|
|
#include "llvm/Analysis/AssumptionCache.h"
|
2011-01-02 15:35:53 +08:00
|
|
|
#include "llvm/Analysis/CodeMetrics.h"
|
2015-03-24 03:32:43 +08:00
|
|
|
#include "llvm/Analysis/InstructionSimplify.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Analysis/LoopPass.h"
|
2010-07-27 02:11:16 +08:00
|
|
|
#include "llvm/Analysis/ScalarEvolution.h"
|
2015-02-05 10:34:00 +08:00
|
|
|
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
2013-01-21 21:04:33 +08:00
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2014-06-17 07:53:02 +08:00
|
|
|
#include "llvm/IR/DiagnosticInfo.h"
|
2014-01-13 17:26:24 +08:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2015-03-24 03:32:43 +08:00
|
|
|
#include "llvm/IR/InstVisitor.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2014-06-17 07:53:02 +08:00
|
|
|
#include "llvm/IR/Metadata.h"
|
2004-09-02 06:55:40 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-25 08:23:56 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2008-05-14 08:24:14 +08:00
|
|
|
#include "llvm/Transforms/Utils/UnrollLoop.h"
|
2008-05-16 17:30:00 +08:00
|
|
|
#include <climits>
|
2004-04-18 13:20:17 +08:00
|
|
|
|
2008-05-14 08:24:14 +08:00
|
|
|
using namespace llvm;
|
2004-04-18 13:20:17 +08:00
|
|
|
|
2014-04-22 10:55:47 +08:00
|
|
|
#define DEBUG_TYPE "loop-unroll"
|
|
|
|
|
2008-05-13 08:00:25 +08:00
|
|
|
static cl::opt<unsigned>
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
UnrollThreshold("unroll-threshold", cl::init(150), cl::Hidden,
|
|
|
|
cl::desc("The baseline cost threshold for loop unrolling"));
|
|
|
|
|
|
|
|
static cl::opt<unsigned> UnrollPercentDynamicCostSavedThreshold(
|
|
|
|
"unroll-percent-dynamic-cost-saved-threshold", cl::init(20), cl::Hidden,
|
|
|
|
cl::desc("The percentage of estimated dynamic cost which must be saved by "
|
|
|
|
"unrolling to allow unrolling up to the max threshold."));
|
|
|
|
|
|
|
|
static cl::opt<unsigned> UnrollDynamicCostSavingsDiscount(
|
|
|
|
"unroll-dynamic-cost-savings-discount", cl::init(2000), cl::Hidden,
|
|
|
|
cl::desc("This is the amount discounted from the total unroll cost when "
|
|
|
|
"the unrolled form has a high dynamic cost savings (triggered by "
|
|
|
|
"the '-unroll-perecent-dynamic-cost-saved-threshold' flag)."));
|
2007-05-12 04:53:41 +08:00
|
|
|
|
2015-02-05 10:34:00 +08:00
|
|
|
static cl::opt<unsigned> UnrollMaxIterationsCountToAnalyze(
|
2015-02-13 13:31:46 +08:00
|
|
|
"unroll-max-iteration-count-to-analyze", cl::init(0), cl::Hidden,
|
2015-02-05 10:34:00 +08:00
|
|
|
cl::desc("Don't allow loop unrolling to simulate more than this number of"
|
|
|
|
"iterations when checking full unroll profitability"));
|
|
|
|
|
2008-05-13 08:00:25 +08:00
|
|
|
static cl::opt<unsigned>
|
|
|
|
UnrollCount("unroll-count", cl::init(0), cl::Hidden,
|
2014-06-17 07:53:02 +08:00
|
|
|
cl::desc("Use this unroll count for all loops including those with "
|
|
|
|
"unroll_count pragma values, for testing purposes"));
|
2004-04-18 13:20:17 +08:00
|
|
|
|
2008-07-29 21:21:23 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
UnrollAllowPartial("unroll-allow-partial", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Allows loops to be partially unrolled until "
|
|
|
|
"-unroll-threshold loop size is reached."));
|
|
|
|
|
2011-12-09 14:19:40 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
UnrollRuntime("unroll-runtime", cl::ZeroOrMore, cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Unroll loops with run-time trip counts"));
|
|
|
|
|
2014-06-17 07:53:02 +08:00
|
|
|
static cl::opt<unsigned>
|
|
|
|
PragmaUnrollThreshold("pragma-unroll-threshold", cl::init(16 * 1024), cl::Hidden,
|
2014-07-24 01:31:37 +08:00
|
|
|
cl::desc("Unrolled size limit for loops with an unroll(full) or "
|
2014-06-17 07:53:02 +08:00
|
|
|
"unroll_count pragma."));
|
|
|
|
|
2008-05-13 08:00:25 +08:00
|
|
|
namespace {
|
2009-09-02 14:11:42 +08:00
|
|
|
class LoopUnroll : public LoopPass {
|
2004-04-18 13:20:17 +08:00
|
|
|
public:
|
2007-05-03 09:11:54 +08:00
|
|
|
static char ID; // Pass ID, replacement for typeid
|
2013-11-05 08:08:03 +08:00
|
|
|
LoopUnroll(int T = -1, int C = -1, int P = -1, int R = -1) : LoopPass(ID) {
|
2011-04-14 10:27:25 +08:00
|
|
|
CurrentThreshold = (T == -1) ? UnrollThreshold : unsigned(T);
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
CurrentPercentDynamicCostSavedThreshold =
|
|
|
|
UnrollPercentDynamicCostSavedThreshold;
|
|
|
|
CurrentDynamicCostSavingsDiscount = UnrollDynamicCostSavingsDiscount;
|
2011-04-14 10:27:25 +08:00
|
|
|
CurrentCount = (C == -1) ? UnrollCount : unsigned(C);
|
2011-04-14 00:15:29 +08:00
|
|
|
CurrentAllowPartial = (P == -1) ? UnrollAllowPartial : (bool)P;
|
2013-11-05 08:08:03 +08:00
|
|
|
CurrentRuntime = (R == -1) ? UnrollRuntime : (bool)R;
|
2011-04-14 00:15:29 +08:00
|
|
|
|
|
|
|
UserThreshold = (T != -1) || (UnrollThreshold.getNumOccurrences() > 0);
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
UserPercentDynamicCostSavedThreshold =
|
|
|
|
(UnrollPercentDynamicCostSavedThreshold.getNumOccurrences() > 0);
|
|
|
|
UserDynamicCostSavingsDiscount =
|
|
|
|
(UnrollDynamicCostSavingsDiscount.getNumOccurrences() > 0);
|
2013-09-12 03:25:43 +08:00
|
|
|
UserAllowPartial = (P != -1) ||
|
|
|
|
(UnrollAllowPartial.getNumOccurrences() > 0);
|
2013-11-05 08:08:03 +08:00
|
|
|
UserRuntime = (R != -1) || (UnrollRuntime.getNumOccurrences() > 0);
|
2013-09-12 03:25:43 +08:00
|
|
|
UserCount = (C != -1) || (UnrollCount.getNumOccurrences() > 0);
|
2011-07-23 08:29:16 +08:00
|
|
|
|
2010-10-20 01:21:58 +08:00
|
|
|
initializeLoopUnrollPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
2007-05-02 05:15:47 +08:00
|
|
|
|
2007-05-12 04:53:41 +08:00
|
|
|
/// A magic value for use with the Threshold parameter to indicate
|
|
|
|
/// that the loop unroll should be performed regardless of how much
|
|
|
|
/// code expansion would result.
|
|
|
|
static const unsigned NoThreshold = UINT_MAX;
|
2011-07-23 08:29:16 +08:00
|
|
|
|
2010-09-08 07:15:30 +08:00
|
|
|
// Threshold to use when optsize is specified (and there is no
|
|
|
|
// explicit -unroll-threshold).
|
|
|
|
static const unsigned OptSizeUnrollThreshold = 50;
|
2011-07-23 08:29:16 +08:00
|
|
|
|
2011-12-09 14:19:40 +08:00
|
|
|
// Default unroll count for loops with run-time trip count if
|
|
|
|
// -unroll-count is not set
|
|
|
|
static const unsigned UnrollRuntimeCount = 8;
|
|
|
|
|
2011-04-14 00:15:29 +08:00
|
|
|
unsigned CurrentCount;
|
2010-09-08 07:15:30 +08:00
|
|
|
unsigned CurrentThreshold;
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
unsigned CurrentPercentDynamicCostSavedThreshold;
|
|
|
|
unsigned CurrentDynamicCostSavingsDiscount;
|
|
|
|
bool CurrentAllowPartial;
|
|
|
|
bool CurrentRuntime;
|
|
|
|
|
|
|
|
// Flags for whether the 'current' settings are user-specified.
|
|
|
|
bool UserCount;
|
|
|
|
bool UserThreshold;
|
|
|
|
bool UserPercentDynamicCostSavedThreshold;
|
|
|
|
bool UserDynamicCostSavingsDiscount;
|
|
|
|
bool UserAllowPartial;
|
|
|
|
bool UserRuntime;
|
2007-05-12 04:53:41 +08:00
|
|
|
|
2014-03-05 17:10:37 +08:00
|
|
|
bool runOnLoop(Loop *L, LPPassManager &LPM) override;
|
2004-04-18 13:20:17 +08:00
|
|
|
|
|
|
|
/// This transformation requires natural loop information & requires that
|
|
|
|
/// loop preheaders be inserted into the CFG...
|
|
|
|
///
|
2014-03-05 17:10:37 +08:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
2015-01-04 20:03:27 +08:00
|
|
|
AU.addRequired<AssumptionCacheTracker>();
|
2015-08-04 04:32:27 +08:00
|
|
|
AU.addRequired<DominatorTreeWrapperPass>();
|
2015-01-17 22:16:18 +08:00
|
|
|
AU.addRequired<LoopInfoWrapperPass>();
|
|
|
|
AU.addPreserved<LoopInfoWrapperPass>();
|
2004-04-18 13:20:17 +08:00
|
|
|
AU.addRequiredID(LoopSimplifyID);
|
2010-07-27 02:11:16 +08:00
|
|
|
AU.addPreservedID(LoopSimplifyID);
|
2006-08-25 05:28:19 +08:00
|
|
|
AU.addRequiredID(LCSSAID);
|
|
|
|
AU.addPreservedID(LCSSAID);
|
2011-08-10 12:29:49 +08:00
|
|
|
AU.addRequired<ScalarEvolution>();
|
2010-08-30 01:21:35 +08:00
|
|
|
AU.addPreserved<ScalarEvolution>();
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
llvm-svn: 227669
2015-01-31 11:43:40 +08:00
|
|
|
AU.addRequired<TargetTransformInfoWrapperPass>();
|
2008-07-03 15:04:22 +08:00
|
|
|
// FIXME: Loop unroll requires LCSSA. And LCSSA requires dom info.
|
|
|
|
// If loop unroll does not preserve dom info then LCSSA pass on next
|
|
|
|
// loop will receive invalid dom info.
|
|
|
|
// For now, recreate dom info, if loop is unrolled.
|
2014-01-13 21:07:17 +08:00
|
|
|
AU.addPreserved<DominatorTreeWrapperPass>();
|
2004-04-18 13:20:17 +08:00
|
|
|
}
|
2014-06-17 07:53:02 +08:00
|
|
|
|
|
|
|
// Fill in the UnrollingPreferences parameter with values from the
|
|
|
|
// TargetTransformationInfo.
|
2015-02-01 22:37:03 +08:00
|
|
|
void getUnrollingPreferences(Loop *L, const TargetTransformInfo &TTI,
|
2014-06-17 07:53:02 +08:00
|
|
|
TargetTransformInfo::UnrollingPreferences &UP) {
|
|
|
|
UP.Threshold = CurrentThreshold;
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
UP.PercentDynamicCostSavedThreshold =
|
|
|
|
CurrentPercentDynamicCostSavedThreshold;
|
|
|
|
UP.DynamicCostSavingsDiscount = CurrentDynamicCostSavingsDiscount;
|
2014-06-17 07:53:02 +08:00
|
|
|
UP.OptSizeThreshold = OptSizeUnrollThreshold;
|
|
|
|
UP.PartialThreshold = CurrentThreshold;
|
|
|
|
UP.PartialOptSizeThreshold = OptSizeUnrollThreshold;
|
|
|
|
UP.Count = CurrentCount;
|
|
|
|
UP.MaxCount = UINT_MAX;
|
|
|
|
UP.Partial = CurrentAllowPartial;
|
|
|
|
UP.Runtime = CurrentRuntime;
|
2015-04-14 11:20:38 +08:00
|
|
|
UP.AllowExpensiveTripCount = false;
|
2015-02-01 22:37:03 +08:00
|
|
|
TTI.getUnrollingPreferences(L, UP);
|
2014-06-17 07:53:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Select and return an unroll count based on parameters from
|
|
|
|
// user, unroll preferences, unroll pragmas, or a heuristic.
|
|
|
|
// SetExplicitly is set to true if the unroll count is is set by
|
|
|
|
// the user or a pragma rather than selected heuristically.
|
|
|
|
unsigned
|
2014-07-24 01:31:37 +08:00
|
|
|
selectUnrollCount(const Loop *L, unsigned TripCount, bool PragmaFullUnroll,
|
2014-06-17 07:53:02 +08:00
|
|
|
unsigned PragmaCount,
|
|
|
|
const TargetTransformInfo::UnrollingPreferences &UP,
|
|
|
|
bool &SetExplicitly);
|
|
|
|
|
|
|
|
// Select threshold values used to limit unrolling based on a
|
|
|
|
// total unrolled size. Parameters Threshold and PartialThreshold
|
|
|
|
// are set to the maximum unrolled size for fully and partially
|
|
|
|
// unrolled loops respectively.
|
|
|
|
void selectThresholds(const Loop *L, bool HasPragma,
|
|
|
|
const TargetTransformInfo::UnrollingPreferences &UP,
|
2015-02-05 10:34:00 +08:00
|
|
|
unsigned &Threshold, unsigned &PartialThreshold,
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
unsigned &PercentDynamicCostSavedThreshold,
|
|
|
|
unsigned &DynamicCostSavingsDiscount) {
|
2014-06-17 07:53:02 +08:00
|
|
|
// Determine the current unrolling threshold. While this is
|
|
|
|
// normally set from UnrollThreshold, it is overridden to a
|
|
|
|
// smaller value if the current function is marked as
|
|
|
|
// optimize-for-size, and the unroll threshold was not user
|
|
|
|
// specified.
|
|
|
|
Threshold = UserThreshold ? CurrentThreshold : UP.Threshold;
|
|
|
|
PartialThreshold = UserThreshold ? CurrentThreshold : UP.PartialThreshold;
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
PercentDynamicCostSavedThreshold =
|
|
|
|
UserPercentDynamicCostSavedThreshold
|
|
|
|
? CurrentPercentDynamicCostSavedThreshold
|
|
|
|
: UP.PercentDynamicCostSavedThreshold;
|
|
|
|
DynamicCostSavingsDiscount = UserDynamicCostSavingsDiscount
|
|
|
|
? CurrentDynamicCostSavingsDiscount
|
|
|
|
: UP.DynamicCostSavingsDiscount;
|
2015-05-13 01:20:03 +08:00
|
|
|
|
2014-06-17 07:53:02 +08:00
|
|
|
if (!UserThreshold &&
|
2015-08-04 23:49:57 +08:00
|
|
|
// FIXME: Use Function::optForSize().
|
2015-02-14 09:11:29 +08:00
|
|
|
L->getHeader()->getParent()->hasFnAttribute(
|
|
|
|
Attribute::OptimizeForSize)) {
|
2014-06-17 07:53:02 +08:00
|
|
|
Threshold = UP.OptSizeThreshold;
|
|
|
|
PartialThreshold = UP.PartialOptSizeThreshold;
|
|
|
|
}
|
|
|
|
if (HasPragma) {
|
|
|
|
// If the loop has an unrolling pragma, we want to be more
|
|
|
|
// aggressive with unrolling limits. Set thresholds to at
|
|
|
|
// least the PragmaTheshold value which is larger than the
|
|
|
|
// default limits.
|
|
|
|
if (Threshold != NoThreshold)
|
|
|
|
Threshold = std::max<unsigned>(Threshold, PragmaUnrollThreshold);
|
|
|
|
if (PartialThreshold != NoThreshold)
|
|
|
|
PartialThreshold =
|
|
|
|
std::max<unsigned>(PartialThreshold, PragmaUnrollThreshold);
|
|
|
|
}
|
|
|
|
}
|
2015-05-13 01:20:03 +08:00
|
|
|
bool canUnrollCompletely(Loop *L, unsigned Threshold,
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
unsigned PercentDynamicCostSavedThreshold,
|
|
|
|
unsigned DynamicCostSavingsDiscount,
|
2015-06-06 13:24:10 +08:00
|
|
|
uint64_t UnrolledCost, uint64_t RolledDynamicCost);
|
2004-04-18 13:20:17 +08:00
|
|
|
};
|
2015-06-23 17:49:53 +08:00
|
|
|
}
|
2004-04-18 13:20:17 +08:00
|
|
|
|
2008-05-13 08:00:25 +08:00
|
|
|
char LoopUnroll::ID = 0;
|
2010-10-13 03:48:12 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
llvm-svn: 227669
2015-01-31 11:43:40 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
|
2015-01-04 20:03:27 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
|
2015-08-04 04:32:27 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
|
2015-01-17 22:16:18 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
|
2010-10-13 03:48:12 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LCSSA)
|
2011-10-20 07:56:07 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
|
2010-10-13 03:48:12 +08:00
|
|
|
INITIALIZE_PASS_END(LoopUnroll, "loop-unroll", "Unroll loops", false, false)
|
2008-05-13 08:00:25 +08:00
|
|
|
|
2013-11-05 08:08:03 +08:00
|
|
|
Pass *llvm::createLoopUnrollPass(int Threshold, int Count, int AllowPartial,
|
|
|
|
int Runtime) {
|
|
|
|
return new LoopUnroll(Threshold, Count, AllowPartial, Runtime);
|
2011-04-14 00:15:29 +08:00
|
|
|
}
|
2004-04-18 13:20:17 +08:00
|
|
|
|
2014-04-01 07:23:51 +08:00
|
|
|
Pass *llvm::createSimpleLoopUnrollPass() {
|
|
|
|
return llvm::createLoopUnrollPass(-1, -1, 0, 0);
|
|
|
|
}
|
|
|
|
|
2015-05-22 11:02:22 +08:00
|
|
|
namespace {
|
2015-02-05 10:34:00 +08:00
|
|
|
// This class is used to get an estimate of the optimization effects that we
|
|
|
|
// could get from complete loop unrolling. It comes from the fact that some
|
|
|
|
// loads might be replaced with concrete constant values and that could trigger
|
|
|
|
// a chain of instruction simplifications.
|
|
|
|
//
|
|
|
|
// E.g. we might have:
|
|
|
|
// int a[] = {0, 1, 0};
|
|
|
|
// v = 0;
|
|
|
|
// for (i = 0; i < 3; i ++)
|
|
|
|
// v += b[i]*a[i];
|
|
|
|
// If we completely unroll the loop, we would get:
|
|
|
|
// v = b[0]*a[0] + b[1]*a[1] + b[2]*a[2]
|
|
|
|
// Which then will be simplified to:
|
|
|
|
// v = b[0]* 0 + b[1]* 1 + b[2]* 0
|
|
|
|
// And finally:
|
|
|
|
// v = b[1]
|
2015-05-23 01:41:35 +08:00
|
|
|
class UnrolledInstAnalyzer : private InstVisitor<UnrolledInstAnalyzer, bool> {
|
|
|
|
typedef InstVisitor<UnrolledInstAnalyzer, bool> Base;
|
|
|
|
friend class InstVisitor<UnrolledInstAnalyzer, bool>;
|
2015-06-08 11:28:06 +08:00
|
|
|
struct SimplifiedAddress {
|
|
|
|
Value *Base = nullptr;
|
|
|
|
ConstantInt *Offset = nullptr;
|
|
|
|
};
|
2015-02-05 10:34:00 +08:00
|
|
|
|
2015-05-23 01:41:35 +08:00
|
|
|
public:
|
|
|
|
UnrolledInstAnalyzer(unsigned Iteration,
|
|
|
|
DenseMap<Value *, Constant *> &SimplifiedValues,
|
2015-06-08 11:28:06 +08:00
|
|
|
const Loop *L, ScalarEvolution &SE)
|
|
|
|
: Iteration(Iteration), SimplifiedValues(SimplifiedValues), L(L), SE(SE) {
|
|
|
|
IterationNumber = SE.getConstant(APInt(64, Iteration));
|
|
|
|
}
|
2015-05-13 01:20:03 +08:00
|
|
|
|
2015-05-23 01:41:35 +08:00
|
|
|
// Allow access to the initial visit method.
|
|
|
|
using Base::visit;
|
2015-05-13 01:20:03 +08:00
|
|
|
|
2015-05-23 01:41:35 +08:00
|
|
|
private:
|
2015-06-08 11:28:06 +08:00
|
|
|
/// \brief A cache of pointer bases and constant-folded offsets corresponding
|
|
|
|
/// to GEP (or derived from GEP) instructions.
|
|
|
|
///
|
|
|
|
/// In order to find the base pointer one needs to perform non-trivial
|
|
|
|
/// traversal of the corresponding SCEV expression, so it's good to have the
|
|
|
|
/// results saved.
|
|
|
|
DenseMap<Value *, SimplifiedAddress> SimplifiedAddresses;
|
|
|
|
|
2015-05-23 01:41:35 +08:00
|
|
|
/// \brief Number of currently simulated iteration.
|
|
|
|
///
|
|
|
|
/// If an expression is ConstAddress+Constant, then the Constant is
|
|
|
|
/// Start + Iteration*Step, where Start and Step could be obtained from
|
|
|
|
/// SCEVGEPCache.
|
|
|
|
unsigned Iteration;
|
2015-02-05 10:34:00 +08:00
|
|
|
|
2015-06-08 11:28:06 +08:00
|
|
|
/// \brief SCEV expression corresponding to number of currently simulated
|
|
|
|
/// iteration.
|
|
|
|
const SCEV *IterationNumber;
|
|
|
|
|
|
|
|
/// \brief A Value->Constant map for keeping values that we managed to
|
|
|
|
/// constant-fold on the given iteration.
|
|
|
|
///
|
|
|
|
/// While we walk the loop instructions, we build up and maintain a mapping
|
|
|
|
/// of simplified values specific to this iteration. The idea is to propagate
|
|
|
|
/// any special information we have about loads that can be replaced with
|
|
|
|
/// constants after complete unrolling, and account for likely simplifications
|
|
|
|
/// post-unrolling.
|
2015-05-23 01:41:35 +08:00
|
|
|
DenseMap<Value *, Constant *> &SimplifiedValues;
|
2015-02-05 10:34:00 +08:00
|
|
|
|
2015-06-08 11:28:06 +08:00
|
|
|
const Loop *L;
|
|
|
|
ScalarEvolution &SE;
|
|
|
|
|
|
|
|
/// \brief Try to simplify instruction \param I using its SCEV expression.
|
|
|
|
///
|
|
|
|
/// The idea is that some AddRec expressions become constants, which then
|
|
|
|
/// could trigger folding of other instructions. However, that only happens
|
|
|
|
/// for expressions whose start value is also constant, which isn't always the
|
|
|
|
/// case. In another common and important case the start value is just some
|
|
|
|
/// address (i.e. SCEVUnknown) - in this case we compute the offset and save
|
|
|
|
/// it along with the base address instead.
|
|
|
|
bool simplifyInstWithSCEV(Instruction *I) {
|
|
|
|
if (!SE.isSCEVable(I->getType()))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const SCEV *S = SE.getSCEV(I);
|
|
|
|
if (auto *SC = dyn_cast<SCEVConstant>(S)) {
|
|
|
|
SimplifiedValues[I] = SC->getValue();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto *AR = dyn_cast<SCEVAddRecExpr>(S);
|
|
|
|
if (!AR)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const SCEV *ValueAtIteration = AR->evaluateAtIteration(IterationNumber, SE);
|
|
|
|
// Check if the AddRec expression becomes a constant.
|
|
|
|
if (auto *SC = dyn_cast<SCEVConstant>(ValueAtIteration)) {
|
|
|
|
SimplifiedValues[I] = SC->getValue();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the offset from the base address becomes a constant.
|
|
|
|
auto *Base = dyn_cast<SCEVUnknown>(SE.getPointerBase(S));
|
|
|
|
if (!Base)
|
|
|
|
return false;
|
|
|
|
auto *Offset =
|
|
|
|
dyn_cast<SCEVConstant>(SE.getMinusSCEV(ValueAtIteration, Base));
|
|
|
|
if (!Offset)
|
|
|
|
return false;
|
|
|
|
SimplifiedAddress Address;
|
|
|
|
Address.Base = Base->getValue();
|
|
|
|
Address.Offset = Offset->getValue();
|
|
|
|
SimplifiedAddresses[I] = Address;
|
|
|
|
return true;
|
|
|
|
}
|
2015-02-13 10:10:56 +08:00
|
|
|
|
2015-05-13 01:20:03 +08:00
|
|
|
/// Base case for the instruction visitor.
|
2015-06-08 11:28:06 +08:00
|
|
|
bool visitInstruction(Instruction &I) {
|
|
|
|
return simplifyInstWithSCEV(&I);
|
|
|
|
}
|
2015-05-13 01:20:03 +08:00
|
|
|
|
|
|
|
/// Try to simplify binary operator I.
|
|
|
|
///
|
|
|
|
/// TODO: Probaly it's worth to hoist the code for estimating the
|
|
|
|
/// simplifications effects to a separate class, since we have a very similar
|
|
|
|
/// code in InlineCost already.
|
2015-02-05 10:34:00 +08:00
|
|
|
bool visitBinaryOperator(BinaryOperator &I) {
|
|
|
|
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
|
|
|
|
if (!isa<Constant>(LHS))
|
|
|
|
if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
|
|
|
|
LHS = SimpleLHS;
|
|
|
|
if (!isa<Constant>(RHS))
|
|
|
|
if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
|
|
|
|
RHS = SimpleRHS;
|
2015-06-08 11:28:06 +08:00
|
|
|
|
2015-02-07 04:02:51 +08:00
|
|
|
Value *SimpleV = nullptr;
|
2015-03-10 10:37:25 +08:00
|
|
|
const DataLayout &DL = I.getModule()->getDataLayout();
|
2015-02-07 04:02:51 +08:00
|
|
|
if (auto FI = dyn_cast<FPMathOperator>(&I))
|
|
|
|
SimpleV =
|
2015-03-10 10:37:25 +08:00
|
|
|
SimplifyFPBinOp(I.getOpcode(), LHS, RHS, FI->getFastMathFlags(), DL);
|
2015-02-07 04:02:51 +08:00
|
|
|
else
|
2015-03-10 10:37:25 +08:00
|
|
|
SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL);
|
2015-02-05 10:34:00 +08:00
|
|
|
|
2015-05-22 10:47:29 +08:00
|
|
|
if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
|
2015-02-05 10:34:00 +08:00
|
|
|
SimplifiedValues[&I] = C;
|
2015-05-22 10:47:29 +08:00
|
|
|
|
2015-06-08 11:28:06 +08:00
|
|
|
if (SimpleV)
|
|
|
|
return true;
|
|
|
|
return Base::visitBinaryOperator(I);
|
2015-02-05 10:34:00 +08:00
|
|
|
}
|
|
|
|
|
2015-05-13 01:20:03 +08:00
|
|
|
/// Try to fold load I.
|
|
|
|
bool visitLoad(LoadInst &I) {
|
|
|
|
Value *AddrOp = I.getPointerOperand();
|
2015-02-05 10:34:00 +08:00
|
|
|
|
2015-06-08 11:28:06 +08:00
|
|
|
auto AddressIt = SimplifiedAddresses.find(AddrOp);
|
|
|
|
if (AddressIt == SimplifiedAddresses.end())
|
2015-05-13 01:20:03 +08:00
|
|
|
return false;
|
2015-06-08 11:28:06 +08:00
|
|
|
ConstantInt *SimplifiedAddrOp = AddressIt->second.Offset;
|
2015-05-13 01:20:03 +08:00
|
|
|
|
2015-06-08 11:28:06 +08:00
|
|
|
auto *GV = dyn_cast<GlobalVariable>(AddressIt->second.Base);
|
2015-05-13 01:20:03 +08:00
|
|
|
// We're only interested in loads that can be completely folded to a
|
|
|
|
// constant.
|
|
|
|
if (!GV || !GV->hasInitializer())
|
|
|
|
return false;
|
2015-02-05 10:34:00 +08:00
|
|
|
|
|
|
|
ConstantDataSequential *CDS =
|
|
|
|
dyn_cast<ConstantDataSequential>(GV->getInitializer());
|
|
|
|
if (!CDS)
|
2015-05-13 01:20:03 +08:00
|
|
|
return false;
|
2015-02-05 10:34:00 +08:00
|
|
|
|
2015-05-13 07:32:56 +08:00
|
|
|
int ElemSize = CDS->getElementType()->getPrimitiveSizeInBits() / 8U;
|
2015-06-08 11:28:06 +08:00
|
|
|
assert(SimplifiedAddrOp->getValue().getActiveBits() < 64 &&
|
|
|
|
"Unexpectedly large index value.");
|
|
|
|
int64_t Index = SimplifiedAddrOp->getSExtValue() / ElemSize;
|
2015-05-13 01:20:03 +08:00
|
|
|
if (Index >= CDS->getNumElements()) {
|
|
|
|
// FIXME: For now we conservatively ignore out of bound accesses, but
|
|
|
|
// we're allowed to perform the optimization in this case.
|
|
|
|
return false;
|
|
|
|
}
|
2015-02-05 10:34:00 +08:00
|
|
|
|
|
|
|
Constant *CV = CDS->getElementAsConstant(Index);
|
2015-05-13 01:20:03 +08:00
|
|
|
assert(CV && "Constant expected.");
|
|
|
|
SimplifiedValues[&I] = CV;
|
2015-02-05 10:34:00 +08:00
|
|
|
|
2015-05-13 01:20:03 +08:00
|
|
|
return true;
|
2015-02-05 10:34:00 +08:00
|
|
|
}
|
2015-07-15 08:19:51 +08:00
|
|
|
|
|
|
|
bool visitCastInst(CastInst &I) {
|
|
|
|
// Propagate constants through casts.
|
|
|
|
Constant *COp = dyn_cast<Constant>(I.getOperand(0));
|
|
|
|
if (!COp)
|
|
|
|
COp = SimplifiedValues.lookup(I.getOperand(0));
|
|
|
|
if (COp)
|
|
|
|
if (Constant *C =
|
|
|
|
ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) {
|
|
|
|
SimplifiedValues[&I] = C;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Base::visitCastInst(I);
|
|
|
|
}
|
2015-07-24 09:53:04 +08:00
|
|
|
|
|
|
|
bool visitCmpInst(CmpInst &I) {
|
|
|
|
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
|
|
|
|
|
|
|
|
// First try to handle simplified comparisons.
|
|
|
|
if (!isa<Constant>(LHS))
|
|
|
|
if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
|
|
|
|
LHS = SimpleLHS;
|
|
|
|
if (!isa<Constant>(RHS))
|
|
|
|
if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
|
|
|
|
RHS = SimpleRHS;
|
|
|
|
|
|
|
|
if (!isa<Constant>(LHS) && !isa<Constant>(RHS)) {
|
|
|
|
auto SimplifiedLHS = SimplifiedAddresses.find(LHS);
|
|
|
|
if (SimplifiedLHS != SimplifiedAddresses.end()) {
|
|
|
|
auto SimplifiedRHS = SimplifiedAddresses.find(RHS);
|
|
|
|
if (SimplifiedRHS != SimplifiedAddresses.end()) {
|
|
|
|
SimplifiedAddress &LHSAddr = SimplifiedLHS->second;
|
|
|
|
SimplifiedAddress &RHSAddr = SimplifiedRHS->second;
|
|
|
|
if (LHSAddr.Base == RHSAddr.Base) {
|
|
|
|
LHS = LHSAddr.Offset;
|
|
|
|
RHS = RHSAddr.Offset;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
|
|
|
|
if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
|
|
|
|
if (Constant *C = ConstantExpr::getCompare(I.getPredicate(), CLHS, CRHS)) {
|
|
|
|
SimplifiedValues[&I] = C;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Base::visitCmpInst(I);
|
|
|
|
}
|
2015-05-23 01:41:35 +08:00
|
|
|
};
|
|
|
|
} // namespace
|
2015-02-05 10:34:00 +08:00
|
|
|
|
|
|
|
|
2015-05-23 01:41:35 +08:00
|
|
|
namespace {
|
|
|
|
struct EstimatedUnrollCost {
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
/// \brief The estimated cost after unrolling.
|
2015-08-06 02:46:21 +08:00
|
|
|
int UnrolledCost;
|
2015-02-13 10:10:56 +08:00
|
|
|
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
/// \brief The estimated dynamic cost of executing the instructions in the
|
|
|
|
/// rolled form.
|
2015-08-06 02:46:21 +08:00
|
|
|
int RolledDynamicCost;
|
2015-05-23 01:41:35 +08:00
|
|
|
};
|
|
|
|
}
|
2015-05-13 01:20:03 +08:00
|
|
|
|
2015-05-23 01:41:35 +08:00
|
|
|
/// \brief Figure out if the loop is worth full unrolling.
|
|
|
|
///
|
|
|
|
/// Complete loop unrolling can make some loads constant, and we need to know
|
|
|
|
/// if that would expose any further optimization opportunities. This routine
|
2015-06-12 06:17:39 +08:00
|
|
|
/// estimates this optimization. It computes cost of unrolled loop
|
|
|
|
/// (UnrolledCost) and dynamic cost of the original loop (RolledDynamicCost). By
|
|
|
|
/// dynamic cost we mean that we won't count costs of blocks that are known not
|
|
|
|
/// to be executed (i.e. if we have a branch in the loop and we know that at the
|
|
|
|
/// given iteration its condition would be resolved to true, we won't add up the
|
|
|
|
/// cost of the 'false'-block).
|
|
|
|
/// \returns Optional value, holding the RolledDynamicCost and UnrolledCost. If
|
|
|
|
/// the analysis failed (no benefits expected from the unrolling, or the loop is
|
|
|
|
/// too big to analyze), the returned value is None.
|
2015-05-23 01:41:35 +08:00
|
|
|
Optional<EstimatedUnrollCost>
|
2015-08-04 04:32:27 +08:00
|
|
|
analyzeLoopUnrollCost(const Loop *L, unsigned TripCount, DominatorTree &DT,
|
|
|
|
ScalarEvolution &SE, const TargetTransformInfo &TTI,
|
2015-08-06 02:46:21 +08:00
|
|
|
int MaxUnrolledLoopSize) {
|
2015-05-23 01:41:35 +08:00
|
|
|
// We want to be able to scale offsets by the trip count and add more offsets
|
|
|
|
// to them without checking for overflows, and we already don't want to
|
|
|
|
// analyze *massive* trip counts, so we force the max to be reasonably small.
|
|
|
|
assert(UnrollMaxIterationsCountToAnalyze < (INT_MAX / 2) &&
|
|
|
|
"The unroll iterations max is too large!");
|
|
|
|
|
|
|
|
// Don't simulate loops with a big or unknown tripcount
|
|
|
|
if (!UnrollMaxIterationsCountToAnalyze || !TripCount ||
|
|
|
|
TripCount > UnrollMaxIterationsCountToAnalyze)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
SmallSetVector<BasicBlock *, 16> BBWorklist;
|
|
|
|
DenseMap<Value *, Constant *> SimplifiedValues;
|
2015-08-04 04:32:27 +08:00
|
|
|
SmallVector<std::pair<Value *, Constant *>, 4> SimplifiedInputValues;
|
2015-02-13 10:17:39 +08:00
|
|
|
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
// The estimated cost of the unrolled form of the loop. We try to estimate
|
|
|
|
// this by simplifying as much as we can while computing the estimate.
|
2015-08-06 02:46:21 +08:00
|
|
|
int UnrolledCost = 0;
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
// We also track the estimated dynamic (that is, actually executed) cost in
|
|
|
|
// the rolled form. This helps identify cases when the savings from unrolling
|
|
|
|
// aren't just exposing dead control flows, but actual reduced dynamic
|
|
|
|
// instructions due to the simplifications which we expect to occur after
|
|
|
|
// unrolling.
|
2015-08-06 02:46:21 +08:00
|
|
|
int RolledDynamicCost = 0;
|
2015-05-23 01:41:35 +08:00
|
|
|
|
2015-08-04 04:32:27 +08:00
|
|
|
// Ensure that we don't violate the loop structure invariants relied on by
|
|
|
|
// this analysis.
|
|
|
|
assert(L->isLoopSimplifyForm() && "Must put loop into normal form first.");
|
|
|
|
assert(L->isLCSSAForm(DT) &&
|
|
|
|
"Must have loops in LCSSA form to track live-out values.");
|
|
|
|
|
2015-07-29 04:07:29 +08:00
|
|
|
DEBUG(dbgs() << "Starting LoopUnroll profitability analysis...\n");
|
|
|
|
|
2015-05-23 01:41:35 +08:00
|
|
|
// Simulate execution of each iteration of the loop counting instructions,
|
|
|
|
// which would be simplified.
|
|
|
|
// Since the same load will take different values on different iterations,
|
|
|
|
// we literally have to go through all loop's iterations.
|
|
|
|
for (unsigned Iteration = 0; Iteration < TripCount; ++Iteration) {
|
2015-07-29 04:07:29 +08:00
|
|
|
DEBUG(dbgs() << " Analyzing iteration " << Iteration << "\n");
|
2015-08-04 04:32:27 +08:00
|
|
|
|
|
|
|
// Prepare for the iteration by collecting any simplified entry or backedge
|
|
|
|
// inputs.
|
|
|
|
for (Instruction &I : *L->getHeader()) {
|
|
|
|
auto *PHI = dyn_cast<PHINode>(&I);
|
|
|
|
if (!PHI)
|
|
|
|
break;
|
|
|
|
|
|
|
|
// The loop header PHI nodes must have exactly two input: one from the
|
|
|
|
// loop preheader and one from the loop latch.
|
|
|
|
assert(
|
|
|
|
PHI->getNumIncomingValues() == 2 &&
|
|
|
|
"Must have an incoming value only for the preheader and the latch.");
|
|
|
|
|
|
|
|
Value *V = PHI->getIncomingValueForBlock(
|
|
|
|
Iteration == 0 ? L->getLoopPreheader() : L->getLoopLatch());
|
|
|
|
Constant *C = dyn_cast<Constant>(V);
|
|
|
|
if (Iteration != 0 && !C)
|
|
|
|
C = SimplifiedValues.lookup(V);
|
|
|
|
if (C)
|
|
|
|
SimplifiedInputValues.push_back({PHI, C});
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now clear and re-populate the map for the next iteration.
|
2015-05-23 01:41:35 +08:00
|
|
|
SimplifiedValues.clear();
|
2015-08-04 04:32:27 +08:00
|
|
|
while (!SimplifiedInputValues.empty())
|
|
|
|
SimplifiedValues.insert(SimplifiedInputValues.pop_back_val());
|
|
|
|
|
2015-06-08 11:28:06 +08:00
|
|
|
UnrolledInstAnalyzer Analyzer(Iteration, SimplifiedValues, L, SE);
|
2015-05-23 01:41:35 +08:00
|
|
|
|
|
|
|
BBWorklist.clear();
|
|
|
|
BBWorklist.insert(L->getHeader());
|
|
|
|
// Note that we *must not* cache the size, this loop grows the worklist.
|
|
|
|
for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
|
|
|
|
BasicBlock *BB = BBWorklist[Idx];
|
|
|
|
|
|
|
|
// Visit all instructions in the given basic block and try to simplify
|
|
|
|
// it. We don't change the actual IR, just count optimization
|
|
|
|
// opportunities.
|
|
|
|
for (Instruction &I : *BB) {
|
2015-08-06 02:46:21 +08:00
|
|
|
int InstCost = TTI.getUserCost(&I);
|
2015-05-23 01:41:35 +08:00
|
|
|
|
|
|
|
// Visit the instruction to analyze its loop cost after unrolling,
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
// and if the visitor returns false, include this instruction in the
|
|
|
|
// unrolled cost.
|
|
|
|
if (!Analyzer.visit(I))
|
|
|
|
UnrolledCost += InstCost;
|
2015-07-29 04:07:29 +08:00
|
|
|
else {
|
|
|
|
DEBUG(dbgs() << " " << I
|
|
|
|
<< " would be simplified if loop is unrolled.\n");
|
|
|
|
(void)0;
|
|
|
|
}
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
|
|
|
|
// Also track this instructions expected cost when executing the rolled
|
|
|
|
// loop form.
|
|
|
|
RolledDynamicCost += InstCost;
|
2015-05-23 01:41:35 +08:00
|
|
|
|
|
|
|
// If unrolled body turns out to be too big, bail out.
|
2015-07-29 04:07:29 +08:00
|
|
|
if (UnrolledCost > MaxUnrolledLoopSize) {
|
|
|
|
DEBUG(dbgs() << " Exceeded threshold.. exiting.\n"
|
|
|
|
<< " UnrolledCost: " << UnrolledCost
|
|
|
|
<< ", MaxUnrolledLoopSize: " << MaxUnrolledLoopSize
|
|
|
|
<< "\n");
|
2015-05-23 01:41:35 +08:00
|
|
|
return None;
|
2015-07-29 04:07:29 +08:00
|
|
|
}
|
[unroll] Make the unroll cost analysis terminate deterministically and
reasonably quickly.
I don't have a reduced test case, but for a version of FFMPEG, this
makes the loop unroller start finishing at all (after over 15 minutes of
running, it hadn't terminated for me, no idea if it was a true infloop
or just exponential work).
The key thing here is to check the DeadInstructions set when pulling
things off the worklist. Without this, we would re-walk the user list of
already dead instructions again and again and again. Consider phi nodes
with many, many operands and other patterns.
The other important aspect of this is that because we would keep
re-visiting instructions that were already known dead, we kept adding
their cost savings to this! This would cause our cost savings to be
*insanely* inflated from this.
While I was here, I also rotated the operand walk out of the worklist
loop to make the code easier to read. There is still work to be done to
minimize worklist traffic because we don't de-duplicate operands. This
means we may add the same instruction onto the worklist 1000s of times
if it shows up in 1000s of operansd to a PHI node for example.
Still, with this patch, the ffmpeg testcase I have finishes quickly and
I can't measure the runtime impact of the unroll analysis any more. I'll
probably try to do a few more cleanups to this code, but not sure how
much cleanup I can justify right now.
llvm-svn: 229038
2015-02-13 11:40:58 +08:00
|
|
|
}
|
2015-05-13 01:20:03 +08:00
|
|
|
|
2015-07-24 09:53:04 +08:00
|
|
|
TerminatorInst *TI = BB->getTerminator();
|
|
|
|
|
|
|
|
// Add in the live successors by first checking whether we have terminator
|
|
|
|
// that may be simplified based on the values simplified by this call.
|
|
|
|
if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
|
|
|
|
if (BI->isConditional()) {
|
|
|
|
if (Constant *SimpleCond =
|
|
|
|
SimplifiedValues.lookup(BI->getCondition())) {
|
2015-07-30 02:10:29 +08:00
|
|
|
BasicBlock *Succ = nullptr;
|
|
|
|
// Just take the first successor if condition is undef
|
|
|
|
if (isa<UndefValue>(SimpleCond))
|
|
|
|
Succ = BI->getSuccessor(0);
|
|
|
|
else
|
|
|
|
Succ = BI->getSuccessor(
|
|
|
|
cast<ConstantInt>(SimpleCond)->isZero() ? 1 : 0);
|
2015-07-29 03:21:21 +08:00
|
|
|
if (L->contains(Succ))
|
|
|
|
BBWorklist.insert(Succ);
|
2015-07-24 09:53:04 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
|
|
|
|
if (Constant *SimpleCond =
|
|
|
|
SimplifiedValues.lookup(SI->getCondition())) {
|
2015-07-30 02:10:29 +08:00
|
|
|
BasicBlock *Succ = nullptr;
|
|
|
|
// Just take the first successor if condition is undef
|
|
|
|
if (isa<UndefValue>(SimpleCond))
|
|
|
|
Succ = SI->getSuccessor(0);
|
|
|
|
else
|
2015-07-30 02:10:33 +08:00
|
|
|
Succ = SI->findCaseValue(cast<ConstantInt>(SimpleCond))
|
|
|
|
.getCaseSuccessor();
|
2015-07-29 03:21:21 +08:00
|
|
|
if (L->contains(Succ))
|
|
|
|
BBWorklist.insert(Succ);
|
2015-07-24 09:53:04 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-23 01:41:35 +08:00
|
|
|
// Add BB's successors to the worklist.
|
|
|
|
for (BasicBlock *Succ : successors(BB))
|
|
|
|
if (L->contains(Succ))
|
|
|
|
BBWorklist.insert(Succ);
|
2015-02-05 10:34:00 +08:00
|
|
|
}
|
2015-05-23 01:41:35 +08:00
|
|
|
|
|
|
|
// If we found no optimization opportunities on the first iteration, we
|
|
|
|
// won't find them on later ones too.
|
2015-07-29 04:07:29 +08:00
|
|
|
if (UnrolledCost == RolledDynamicCost) {
|
|
|
|
DEBUG(dbgs() << " No opportunities found.. exiting.\n"
|
|
|
|
<< " UnrolledCost: " << UnrolledCost << "\n");
|
2015-05-23 01:41:35 +08:00
|
|
|
return None;
|
2015-07-29 04:07:29 +08:00
|
|
|
}
|
2015-02-05 10:34:00 +08:00
|
|
|
}
|
2015-07-29 04:07:29 +08:00
|
|
|
DEBUG(dbgs() << "Analysis finished:\n"
|
|
|
|
<< "UnrolledCost: " << UnrolledCost << ", "
|
|
|
|
<< "RolledDynamicCost: " << RolledDynamicCost << "\n");
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
return {{UnrolledCost, RolledDynamicCost}};
|
2015-05-23 01:41:35 +08:00
|
|
|
}
|
2015-02-05 10:34:00 +08:00
|
|
|
|
2007-05-08 23:14:19 +08:00
|
|
|
/// ApproximateLoopSize - Approximate the size of the loop.
|
2011-10-01 09:39:05 +08:00
|
|
|
static unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls,
|
2013-01-21 21:04:33 +08:00
|
|
|
bool &NotDuplicatable,
|
2014-09-07 21:49:57 +08:00
|
|
|
const TargetTransformInfo &TTI,
|
2015-01-04 20:03:27 +08:00
|
|
|
AssumptionCache *AC) {
|
2014-09-07 21:49:57 +08:00
|
|
|
SmallPtrSet<const Value *, 32> EphValues;
|
2015-01-04 20:03:27 +08:00
|
|
|
CodeMetrics::collectEphemeralValues(L, AC, EphValues);
|
2014-09-07 21:49:57 +08:00
|
|
|
|
2009-10-31 22:54:17 +08:00
|
|
|
CodeMetrics Metrics;
|
2008-06-23 04:18:58 +08:00
|
|
|
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
|
2009-10-31 22:54:17 +08:00
|
|
|
I != E; ++I)
|
2014-09-07 21:49:57 +08:00
|
|
|
Metrics.analyzeBasicBlock(*I, TTI, EphValues);
|
2010-09-10 04:32:23 +08:00
|
|
|
NumCalls = Metrics.NumInlineCandidates;
|
2012-12-21 00:04:27 +08:00
|
|
|
NotDuplicatable = Metrics.notDuplicatable;
|
2011-07-23 08:29:16 +08:00
|
|
|
|
2010-09-10 03:07:31 +08:00
|
|
|
unsigned LoopSize = Metrics.NumInsts;
|
2011-07-23 08:29:16 +08:00
|
|
|
|
2010-09-10 03:07:31 +08:00
|
|
|
// Don't allow an estimate of size zero. This would allows unrolling of loops
|
|
|
|
// with huge iteration counts, which is a compile time problem even if it's
|
2015-01-10 08:30:55 +08:00
|
|
|
// not a problem for code quality. Also, the code using this size may assume
|
|
|
|
// that each loop has at least three instructions (likely a conditional
|
|
|
|
// branch, a comparison feeding that branch, and some kind of loop increment
|
|
|
|
// feeding that comparison instruction).
|
|
|
|
LoopSize = std::max(LoopSize, 3u);
|
2011-07-23 08:29:16 +08:00
|
|
|
|
2010-09-10 03:07:31 +08:00
|
|
|
return LoopSize;
|
2004-04-18 13:20:17 +08:00
|
|
|
}
|
|
|
|
|
2014-07-24 01:31:37 +08:00
|
|
|
// Returns the loop hint metadata node with the given name (for example,
|
|
|
|
// "llvm.loop.unroll.count"). If no such metadata node exists, then nullptr is
|
|
|
|
// returned.
|
2015-02-03 04:41:11 +08:00
|
|
|
static MDNode *GetUnrollMetadataForLoop(const Loop *L, StringRef Name) {
|
|
|
|
if (MDNode *LoopID = L->getLoopID())
|
|
|
|
return GetUnrollMetadata(LoopID, Name);
|
|
|
|
return nullptr;
|
2014-06-17 07:53:02 +08:00
|
|
|
}
|
|
|
|
|
2014-07-24 01:31:37 +08:00
|
|
|
// Returns true if the loop has an unroll(full) pragma.
|
|
|
|
static bool HasUnrollFullPragma(const Loop *L) {
|
2015-02-01 10:27:45 +08:00
|
|
|
return GetUnrollMetadataForLoop(L, "llvm.loop.unroll.full");
|
2014-06-17 07:53:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true if the loop has an unroll(disable) pragma.
|
|
|
|
static bool HasUnrollDisablePragma(const Loop *L) {
|
2015-02-01 10:27:45 +08:00
|
|
|
return GetUnrollMetadataForLoop(L, "llvm.loop.unroll.disable");
|
2014-06-17 07:53:02 +08:00
|
|
|
}
|
|
|
|
|
2015-03-09 14:14:18 +08:00
|
|
|
// Returns true if the loop has an runtime unroll(disable) pragma.
|
|
|
|
static bool HasRuntimeUnrollDisablePragma(const Loop *L) {
|
|
|
|
return GetUnrollMetadataForLoop(L, "llvm.loop.unroll.runtime.disable");
|
|
|
|
}
|
|
|
|
|
2014-06-17 07:53:02 +08:00
|
|
|
// If loop has an unroll_count pragma return the (necessarily
|
|
|
|
// positive) value from the pragma. Otherwise return 0.
|
|
|
|
static unsigned UnrollCountPragmaValue(const Loop *L) {
|
2015-02-03 04:41:11 +08:00
|
|
|
MDNode *MD = GetUnrollMetadataForLoop(L, "llvm.loop.unroll.count");
|
2014-07-24 01:31:37 +08:00
|
|
|
if (MD) {
|
|
|
|
assert(MD->getNumOperands() == 2 &&
|
|
|
|
"Unroll count hint metadata should have two operands.");
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
unsigned Count =
|
|
|
|
mdconst::extract<ConstantInt>(MD->getOperand(1))->getZExtValue();
|
2014-06-17 07:53:02 +08:00
|
|
|
assert(Count >= 1 && "Unroll count must be positive.");
|
|
|
|
return Count;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-19 05:04:33 +08:00
|
|
|
// Remove existing unroll metadata and add unroll disable metadata to
|
|
|
|
// indicate the loop has already been unrolled. This prevents a loop
|
|
|
|
// from being unrolled more than is directed by a pragma if the loop
|
|
|
|
// unrolling pass is run more than once (which it generally is).
|
|
|
|
static void SetLoopAlreadyUnrolled(Loop *L) {
|
|
|
|
MDNode *LoopID = L->getLoopID();
|
|
|
|
if (!LoopID) return;
|
|
|
|
|
|
|
|
// First remove any existing loop unrolling metadata.
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
SmallVector<Metadata *, 4> MDs;
|
2014-07-19 05:04:33 +08:00
|
|
|
// Reserve first location for self reference to the LoopID metadata node.
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
MDs.push_back(nullptr);
|
2014-07-19 05:04:33 +08:00
|
|
|
for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
|
|
|
|
bool IsUnrollMetadata = false;
|
|
|
|
MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i));
|
|
|
|
if (MD) {
|
|
|
|
const MDString *S = dyn_cast<MDString>(MD->getOperand(0));
|
|
|
|
IsUnrollMetadata = S && S->getString().startswith("llvm.loop.unroll.");
|
|
|
|
}
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
if (!IsUnrollMetadata)
|
|
|
|
MDs.push_back(LoopID->getOperand(i));
|
2014-07-19 05:04:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add unroll(disable) metadata to disable future unrolling.
|
|
|
|
LLVMContext &Context = L->getHeader()->getContext();
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
SmallVector<Metadata *, 1> DisableOperands;
|
2014-07-24 01:31:37 +08:00
|
|
|
DisableOperands.push_back(MDString::get(Context, "llvm.loop.unroll.disable"));
|
2014-07-19 05:29:41 +08:00
|
|
|
MDNode *DisableNode = MDNode::get(Context, DisableOperands);
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
MDs.push_back(DisableNode);
|
2014-07-19 05:04:33 +08:00
|
|
|
|
IR: Split Metadata from Value
Split `Metadata` away from the `Value` class hierarchy, as part of
PR21532. Assembly and bitcode changes are in the wings, but this is the
bulk of the change for the IR C++ API.
I have a follow-up patch prepared for `clang`. If this breaks other
sub-projects, I apologize in advance :(. Help me compile it on Darwin
I'll try to fix it. FWIW, the errors should be easy to fix, so it may
be simpler to just fix it yourself.
This breaks the build for all metadata-related code that's out-of-tree.
Rest assured the transition is mechanical and the compiler should catch
almost all of the problems.
Here's a quick guide for updating your code:
- `Metadata` is the root of a class hierarchy with three main classes:
`MDNode`, `MDString`, and `ValueAsMetadata`. It is distinct from
the `Value` class hierarchy. It is typeless -- i.e., instances do
*not* have a `Type`.
- `MDNode`'s operands are all `Metadata *` (instead of `Value *`).
- `TrackingVH<MDNode>` and `WeakVH` referring to metadata can be
replaced with `TrackingMDNodeRef` and `TrackingMDRef`, respectively.
If you're referring solely to resolved `MDNode`s -- post graph
construction -- just use `MDNode*`.
- `MDNode` (and the rest of `Metadata`) have only limited support for
`replaceAllUsesWith()`.
As long as an `MDNode` is pointing at a forward declaration -- the
result of `MDNode::getTemporary()` -- it maintains a side map of its
uses and can RAUW itself. Once the forward declarations are fully
resolved RAUW support is dropped on the ground. This means that
uniquing collisions on changing operands cause nodes to become
"distinct". (This already happened fairly commonly, whenever an
operand went to null.)
If you're constructing complex (non self-reference) `MDNode` cycles,
you need to call `MDNode::resolveCycles()` on each node (or on a
top-level node that somehow references all of the nodes). Also,
don't do that. Metadata cycles (and the RAUW machinery needed to
construct them) are expensive.
- An `MDNode` can only refer to a `Constant` through a bridge called
`ConstantAsMetadata` (one of the subclasses of `ValueAsMetadata`).
As a side effect, accessing an operand of an `MDNode` that is known
to be, e.g., `ConstantInt`, takes three steps: first, cast from
`Metadata` to `ConstantAsMetadata`; second, extract the `Constant`;
third, cast down to `ConstantInt`.
The eventual goal is to introduce `MDInt`/`MDFloat`/etc. and have
metadata schema owners transition away from using `Constant`s when
the type isn't important (and they don't care about referring to
`GlobalValue`s).
In the meantime, I've added transitional API to the `mdconst`
namespace that matches semantics with the old code, in order to
avoid adding the error-prone three-step equivalent to every call
site. If your old code was:
MDNode *N = foo();
bar(isa <ConstantInt>(N->getOperand(0)));
baz(cast <ConstantInt>(N->getOperand(1)));
bak(cast_or_null <ConstantInt>(N->getOperand(2)));
bat(dyn_cast <ConstantInt>(N->getOperand(3)));
bay(dyn_cast_or_null<ConstantInt>(N->getOperand(4)));
you can trivially match its semantics with:
MDNode *N = foo();
bar(mdconst::hasa <ConstantInt>(N->getOperand(0)));
baz(mdconst::extract <ConstantInt>(N->getOperand(1)));
bak(mdconst::extract_or_null <ConstantInt>(N->getOperand(2)));
bat(mdconst::dyn_extract <ConstantInt>(N->getOperand(3)));
bay(mdconst::dyn_extract_or_null<ConstantInt>(N->getOperand(4)));
and when you transition your metadata schema to `MDInt`:
MDNode *N = foo();
bar(isa <MDInt>(N->getOperand(0)));
baz(cast <MDInt>(N->getOperand(1)));
bak(cast_or_null <MDInt>(N->getOperand(2)));
bat(dyn_cast <MDInt>(N->getOperand(3)));
bay(dyn_cast_or_null<MDInt>(N->getOperand(4)));
- A `CallInst` -- specifically, intrinsic instructions -- can refer to
metadata through a bridge called `MetadataAsValue`. This is a
subclass of `Value` where `getType()->isMetadataTy()`.
`MetadataAsValue` is the *only* class that can legally refer to a
`LocalAsMetadata`, which is a bridged form of non-`Constant` values
like `Argument` and `Instruction`. It can also refer to any other
`Metadata` subclass.
(I'll break all your testcases in a follow-up commit, when I propagate
this change to assembly.)
llvm-svn: 223802
2014-12-10 02:38:53 +08:00
|
|
|
MDNode *NewLoopID = MDNode::get(Context, MDs);
|
2014-07-19 05:04:33 +08:00
|
|
|
// Set operand 0 to refer to the loop id itself.
|
|
|
|
NewLoopID->replaceOperandWith(0, NewLoopID);
|
|
|
|
L->setLoopID(NewLoopID);
|
|
|
|
}
|
|
|
|
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
bool LoopUnroll::canUnrollCompletely(Loop *L, unsigned Threshold,
|
|
|
|
unsigned PercentDynamicCostSavedThreshold,
|
|
|
|
unsigned DynamicCostSavingsDiscount,
|
2015-06-06 13:24:10 +08:00
|
|
|
uint64_t UnrolledCost,
|
|
|
|
uint64_t RolledDynamicCost) {
|
2015-05-13 01:20:03 +08:00
|
|
|
|
|
|
|
if (Threshold == NoThreshold) {
|
|
|
|
DEBUG(dbgs() << " Can fully unroll, because no threshold is set.\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
if (UnrolledCost <= Threshold) {
|
|
|
|
DEBUG(dbgs() << " Can fully unroll, because unrolled cost: "
|
|
|
|
<< UnrolledCost << "<" << Threshold << "\n");
|
2015-05-13 01:20:03 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
assert(UnrolledCost && "UnrolledCost can't be 0 at this point.");
|
|
|
|
assert(RolledDynamicCost >= UnrolledCost &&
|
|
|
|
"Cannot have a higher unrolled cost than a rolled cost!");
|
|
|
|
|
|
|
|
// Compute the percentage of the dynamic cost in the rolled form that is
|
|
|
|
// saved when unrolled. If unrolling dramatically reduces the estimated
|
|
|
|
// dynamic cost of the loop, we use a higher threshold to allow more
|
|
|
|
// unrolling.
|
|
|
|
unsigned PercentDynamicCostSaved =
|
|
|
|
(uint64_t)(RolledDynamicCost - UnrolledCost) * 100ull / RolledDynamicCost;
|
|
|
|
|
|
|
|
if (PercentDynamicCostSaved >= PercentDynamicCostSavedThreshold &&
|
|
|
|
(int64_t)UnrolledCost - (int64_t)DynamicCostSavingsDiscount <=
|
|
|
|
(int64_t)Threshold) {
|
|
|
|
DEBUG(dbgs() << " Can fully unroll, because unrolling will reduce the "
|
|
|
|
"expected dynamic cost by " << PercentDynamicCostSaved
|
|
|
|
<< "% (threshold: " << PercentDynamicCostSavedThreshold
|
|
|
|
<< "%)\n"
|
|
|
|
<< " and the unrolled cost (" << UnrolledCost
|
|
|
|
<< ") is less than the max threshold ("
|
|
|
|
<< DynamicCostSavingsDiscount << ").\n");
|
2015-05-13 01:20:03 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUG(dbgs() << " Too large to fully unroll:\n");
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
DEBUG(dbgs() << " Threshold: " << Threshold << "\n");
|
|
|
|
DEBUG(dbgs() << " Max threshold: " << DynamicCostSavingsDiscount << "\n");
|
|
|
|
DEBUG(dbgs() << " Percent cost saved threshold: "
|
|
|
|
<< PercentDynamicCostSavedThreshold << "%\n");
|
|
|
|
DEBUG(dbgs() << " Unrolled cost: " << UnrolledCost << "\n");
|
|
|
|
DEBUG(dbgs() << " Rolled dynamic cost: " << RolledDynamicCost << "\n");
|
|
|
|
DEBUG(dbgs() << " Percent cost saved: " << PercentDynamicCostSaved
|
|
|
|
<< "\n");
|
2015-05-13 01:20:03 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2014-06-17 07:53:02 +08:00
|
|
|
unsigned LoopUnroll::selectUnrollCount(
|
2014-07-24 01:31:37 +08:00
|
|
|
const Loop *L, unsigned TripCount, bool PragmaFullUnroll,
|
2014-06-17 07:53:02 +08:00
|
|
|
unsigned PragmaCount, const TargetTransformInfo::UnrollingPreferences &UP,
|
|
|
|
bool &SetExplicitly) {
|
|
|
|
SetExplicitly = true;
|
|
|
|
|
|
|
|
// User-specified count (either as a command-line option or
|
|
|
|
// constructor parameter) has highest precedence.
|
|
|
|
unsigned Count = UserCount ? CurrentCount : 0;
|
|
|
|
|
|
|
|
// If there is no user-specified count, unroll pragmas have the next
|
|
|
|
// highest precendence.
|
|
|
|
if (Count == 0) {
|
|
|
|
if (PragmaCount) {
|
|
|
|
Count = PragmaCount;
|
2014-07-24 01:31:37 +08:00
|
|
|
} else if (PragmaFullUnroll) {
|
2014-06-17 07:53:02 +08:00
|
|
|
Count = TripCount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Count == 0)
|
|
|
|
Count = UP.Count;
|
|
|
|
|
|
|
|
if (Count == 0) {
|
|
|
|
SetExplicitly = false;
|
|
|
|
if (TripCount == 0)
|
|
|
|
// Runtime trip count.
|
|
|
|
Count = UnrollRuntimeCount;
|
|
|
|
else
|
|
|
|
// Conservative heuristic: if we know the trip count, see if we can
|
|
|
|
// completely unroll (subject to the threshold, checked below); otherwise
|
|
|
|
// try to find greatest modulo of the trip count which is still under
|
|
|
|
// threshold value.
|
|
|
|
Count = TripCount;
|
|
|
|
}
|
|
|
|
if (TripCount && Count > TripCount)
|
|
|
|
return TripCount;
|
|
|
|
return Count;
|
|
|
|
}
|
|
|
|
|
2007-03-07 09:38:05 +08:00
|
|
|
bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
|
2014-02-06 08:07:05 +08:00
|
|
|
if (skipOptnoneFunction(L))
|
|
|
|
return false;
|
|
|
|
|
2015-02-01 20:01:35 +08:00
|
|
|
Function &F = *L->getHeader()->getParent();
|
|
|
|
|
2015-08-04 04:32:27 +08:00
|
|
|
auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
2015-01-17 22:16:18 +08:00
|
|
|
LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
|
2011-08-12 07:36:16 +08:00
|
|
|
ScalarEvolution *SE = &getAnalysis<ScalarEvolution>();
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
llvm-svn: 227669
2015-01-31 11:43:40 +08:00
|
|
|
const TargetTransformInfo &TTI =
|
2015-02-01 20:01:35 +08:00
|
|
|
getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
|
|
|
|
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
|
2007-05-12 04:53:41 +08:00
|
|
|
|
2007-05-08 23:19:19 +08:00
|
|
|
BasicBlock *Header = L->getHeader();
|
2010-01-05 09:27:44 +08:00
|
|
|
DEBUG(dbgs() << "Loop Unroll: F[" << Header->getParent()->getName()
|
2009-07-25 08:23:56 +08:00
|
|
|
<< "] Loop %" << Header->getName() << "\n");
|
2011-07-23 08:29:16 +08:00
|
|
|
|
2014-06-17 07:53:02 +08:00
|
|
|
if (HasUnrollDisablePragma(L)) {
|
|
|
|
return false;
|
2014-04-02 02:50:30 +08:00
|
|
|
}
|
2014-07-24 01:31:37 +08:00
|
|
|
bool PragmaFullUnroll = HasUnrollFullPragma(L);
|
2014-06-17 07:53:02 +08:00
|
|
|
unsigned PragmaCount = UnrollCountPragmaValue(L);
|
2014-07-24 01:31:37 +08:00
|
|
|
bool HasPragma = PragmaFullUnroll || PragmaCount > 0;
|
2014-06-17 07:53:02 +08:00
|
|
|
|
|
|
|
TargetTransformInfo::UnrollingPreferences UP;
|
2015-02-01 22:37:03 +08:00
|
|
|
getUnrollingPreferences(L, TTI, UP);
|
2004-04-18 13:20:17 +08:00
|
|
|
|
2011-08-12 07:36:16 +08:00
|
|
|
// Find trip count and trip multiple if count is not available
|
|
|
|
unsigned TripCount = 0;
|
2011-07-23 08:33:05 +08:00
|
|
|
unsigned TripMultiple = 1;
|
2014-10-11 08:12:11 +08:00
|
|
|
// If there are multiple exiting blocks but one of them is the latch, use the
|
|
|
|
// latch for the trip count estimation. Otherwise insist on a single exiting
|
|
|
|
// block for the trip count estimation.
|
|
|
|
BasicBlock *ExitingBlock = L->getLoopLatch();
|
|
|
|
if (!ExitingBlock || !L->isLoopExiting(ExitingBlock))
|
|
|
|
ExitingBlock = L->getExitingBlock();
|
|
|
|
if (ExitingBlock) {
|
|
|
|
TripCount = SE->getSmallConstantTripCount(L, ExitingBlock);
|
|
|
|
TripMultiple = SE->getSmallConstantTripMultiple(L, ExitingBlock);
|
2011-08-12 07:36:16 +08:00
|
|
|
}
|
2013-09-12 03:25:43 +08:00
|
|
|
|
2014-06-17 07:53:02 +08:00
|
|
|
// Select an initial unroll count. This may be reduced later based
|
|
|
|
// on size thresholds.
|
|
|
|
bool CountSetExplicitly;
|
2014-07-24 01:31:37 +08:00
|
|
|
unsigned Count = selectUnrollCount(L, TripCount, PragmaFullUnroll,
|
|
|
|
PragmaCount, UP, CountSetExplicitly);
|
2014-06-17 07:53:02 +08:00
|
|
|
|
|
|
|
unsigned NumInlineCandidates;
|
|
|
|
bool notDuplicatable;
|
|
|
|
unsigned LoopSize =
|
2015-01-04 20:03:27 +08:00
|
|
|
ApproximateLoopSize(L, NumInlineCandidates, notDuplicatable, TTI, &AC);
|
2014-06-17 07:53:02 +08:00
|
|
|
DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n");
|
2015-01-10 08:30:55 +08:00
|
|
|
|
|
|
|
// When computing the unrolled size, note that the conditional branch on the
|
|
|
|
// backedge and the comparison feeding it are not replicated like the rest of
|
|
|
|
// the loop body (which is why 2 is subtracted).
|
|
|
|
uint64_t UnrolledSize = (uint64_t)(LoopSize-2) * Count + 2;
|
2014-06-17 07:53:02 +08:00
|
|
|
if (notDuplicatable) {
|
|
|
|
DEBUG(dbgs() << " Not unrolling loop which contains non-duplicatable"
|
|
|
|
<< " instructions.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (NumInlineCandidates != 0) {
|
|
|
|
DEBUG(dbgs() << " Not unrolling loop with inlinable calls.\n");
|
|
|
|
return false;
|
|
|
|
}
|
2011-12-09 14:19:40 +08:00
|
|
|
|
2014-06-17 07:53:02 +08:00
|
|
|
unsigned Threshold, PartialThreshold;
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
unsigned PercentDynamicCostSavedThreshold;
|
|
|
|
unsigned DynamicCostSavingsDiscount;
|
2015-02-05 10:34:00 +08:00
|
|
|
selectThresholds(L, HasPragma, UP, Threshold, PartialThreshold,
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
PercentDynamicCostSavedThreshold,
|
|
|
|
DynamicCostSavingsDiscount);
|
2014-06-17 07:53:02 +08:00
|
|
|
|
|
|
|
// Given Count, TripCount and thresholds determine the type of
|
|
|
|
// unrolling which is to be performed.
|
|
|
|
enum { Full = 0, Partial = 1, Runtime = 2 };
|
|
|
|
int Unrolling;
|
|
|
|
if (TripCount && Count == TripCount) {
|
2015-05-13 01:20:03 +08:00
|
|
|
Unrolling = Partial;
|
|
|
|
// If the loop is really small, we don't need to run an expensive analysis.
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
if (canUnrollCompletely(L, Threshold, 100, DynamicCostSavingsDiscount,
|
|
|
|
UnrolledSize, UnrolledSize)) {
|
2014-06-17 07:53:02 +08:00
|
|
|
Unrolling = Full;
|
2015-05-13 01:20:03 +08:00
|
|
|
} else {
|
|
|
|
// The loop isn't that small, but we still can fully unroll it if that
|
|
|
|
// helps to remove a significant number of instructions.
|
|
|
|
// To check that, run additional analysis on the loop.
|
2015-08-04 04:32:27 +08:00
|
|
|
if (Optional<EstimatedUnrollCost> Cost =
|
|
|
|
analyzeLoopUnrollCost(L, TripCount, DT, *SE, TTI,
|
|
|
|
Threshold + DynamicCostSavingsDiscount))
|
[Unroll] Rework the naming and structure of the new unroll heuristics.
The new naming is (to me) much easier to understand. Here is a summary
of the new state of the world:
- '*Threshold' is the threshold for full unrolling. It is measured
against the estimated unrolled cost as computed by getUserCost in TTI
(or CodeMetrics, etc). We will exceed this threshold when unrolling
loops where unrolling exposes a significant degree of simplification
of the logic within the loop.
- '*PercentDynamicCostSavedThreshold' is the percentage of the loop's
estimated dynamic execution cost which needs to be saved by unrolling
to apply a discount to the estimated unrolled cost.
- '*DynamicCostSavingsDiscount' is the discount applied to the estimated
unrolling cost when the dynamic savings are expected to be high.
When actually analyzing the loop, we now produce both an estimated
unrolled cost, and an estimated rolled cost. The rolled cost is notably
a dynamic estimate based on our analysis of the expected execution of
each iteration.
While we're still working to build up the infrastructure for making
these estimates, to me it is much more clear *how* to make them better
when they have reasonably descriptive names. For example, we may want to
apply estimated (from heuristics or profiles) dynamic execution weights
to the *dynamic* cost estimates. If we start doing that, we would also
need to track the static unrolled cost and the dynamic unrolled cost, as
only the latter could reasonably be weighted by profile information.
This patch is sadly not without functionality change for the new unroll
analysis logic. Buried in the heuristic management were several things
that surprised me. For example, we never subtracted the optimized
instruction count off when comparing against the unroll heursistics!
I don't know if this just got lost somewhere along the way or what, but
with the new accounting of things, this is much easier to keep track of
and we use the post-simplification cost estimate to compare to the
thresholds, and use the dynamic cost reduction ratio to select whether
we can exceed the baseline threshold.
The old values of these flags also don't necessarily make sense. My
impression is that none of these thresholds or discounts have been tuned
yet, and so they're just arbitrary placehold numbers. As such, I've not
bothered to adjust for the fact that this is now a discount and not
a tow-tier threshold model. We need to tune all these values once the
logic is ready to be enabled.
Differential Revision: http://reviews.llvm.org/D9966
llvm-svn: 239164
2015-06-06 01:01:43 +08:00
|
|
|
if (canUnrollCompletely(L, Threshold, PercentDynamicCostSavedThreshold,
|
|
|
|
DynamicCostSavingsDiscount, Cost->UnrolledCost,
|
|
|
|
Cost->RolledDynamicCost)) {
|
2015-05-23 01:41:35 +08:00
|
|
|
Unrolling = Full;
|
|
|
|
}
|
2014-06-17 07:53:02 +08:00
|
|
|
}
|
|
|
|
} else if (TripCount && Count < TripCount) {
|
|
|
|
Unrolling = Partial;
|
|
|
|
} else {
|
|
|
|
Unrolling = Runtime;
|
2007-05-12 04:53:41 +08:00
|
|
|
}
|
2007-03-03 07:31:34 +08:00
|
|
|
|
2014-06-17 07:53:02 +08:00
|
|
|
// Reduce count based on the type of unrolling and the threshold values.
|
|
|
|
unsigned OriginalCount = Count;
|
2015-07-14 02:26:27 +08:00
|
|
|
bool AllowRuntime =
|
|
|
|
(PragmaCount > 0) || (UserRuntime ? CurrentRuntime : UP.Runtime);
|
|
|
|
// Don't unroll a runtime trip count loop with unroll full pragma.
|
|
|
|
if (HasRuntimeUnrollDisablePragma(L) || PragmaFullUnroll) {
|
2015-03-09 14:14:18 +08:00
|
|
|
AllowRuntime = false;
|
|
|
|
}
|
2014-06-17 07:53:02 +08:00
|
|
|
if (Unrolling == Partial) {
|
|
|
|
bool AllowPartial = UserAllowPartial ? CurrentAllowPartial : UP.Partial;
|
|
|
|
if (!AllowPartial && !CountSetExplicitly) {
|
|
|
|
DEBUG(dbgs() << " will not try to unroll partially because "
|
|
|
|
<< "-unroll-allow-partial not given\n");
|
2012-12-21 00:04:27 +08:00
|
|
|
return false;
|
|
|
|
}
|
2014-06-17 07:53:02 +08:00
|
|
|
if (PartialThreshold != NoThreshold && UnrolledSize > PartialThreshold) {
|
|
|
|
// Reduce unroll count to be modulo of TripCount for partial unrolling.
|
2015-01-10 08:30:55 +08:00
|
|
|
Count = (std::max(PartialThreshold, 3u)-2) / (LoopSize-2);
|
2014-06-17 07:53:02 +08:00
|
|
|
while (Count != 0 && TripCount % Count != 0)
|
|
|
|
Count--;
|
|
|
|
}
|
|
|
|
} else if (Unrolling == Runtime) {
|
|
|
|
if (!AllowRuntime && !CountSetExplicitly) {
|
|
|
|
DEBUG(dbgs() << " will not try to unroll loop with runtime trip count "
|
|
|
|
<< "-unroll-runtime not given\n");
|
2010-09-10 04:02:23 +08:00
|
|
|
return false;
|
2010-02-06 07:21:31 +08:00
|
|
|
}
|
2014-06-17 07:53:02 +08:00
|
|
|
// Reduce unroll count to be the largest power-of-two factor of
|
|
|
|
// the original count which satisfies the threshold limit.
|
|
|
|
while (Count != 0 && UnrolledSize > PartialThreshold) {
|
|
|
|
Count >>= 1;
|
2015-01-10 08:30:55 +08:00
|
|
|
UnrolledSize = (LoopSize-2) * Count + 2;
|
2014-06-17 07:53:02 +08:00
|
|
|
}
|
|
|
|
if (Count > UP.MaxCount)
|
|
|
|
Count = UP.MaxCount;
|
|
|
|
DEBUG(dbgs() << " partially unrolling with count: " << Count << "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (HasPragma) {
|
2014-07-24 04:05:44 +08:00
|
|
|
if (PragmaCount != 0)
|
|
|
|
// If loop has an unroll count pragma mark loop as unrolled to prevent
|
|
|
|
// unrolling beyond that requested by the pragma.
|
|
|
|
SetLoopAlreadyUnrolled(L);
|
2014-07-19 05:04:33 +08:00
|
|
|
|
2014-06-17 07:53:02 +08:00
|
|
|
// Emit optimization remarks if we are unable to unroll the loop
|
|
|
|
// as directed by a pragma.
|
|
|
|
DebugLoc LoopLoc = L->getStartLoc();
|
|
|
|
Function *F = Header->getParent();
|
|
|
|
LLVMContext &Ctx = F->getContext();
|
2014-07-24 01:31:37 +08:00
|
|
|
if (PragmaFullUnroll && PragmaCount == 0) {
|
2014-06-17 07:53:02 +08:00
|
|
|
if (TripCount && Count != TripCount) {
|
|
|
|
emitOptimizationRemarkMissed(
|
|
|
|
Ctx, DEBUG_TYPE, *F, LoopLoc,
|
2014-07-24 01:31:37 +08:00
|
|
|
"Unable to fully unroll loop as directed by unroll(full) pragma "
|
2014-06-17 07:53:02 +08:00
|
|
|
"because unrolled size is too large.");
|
|
|
|
} else if (!TripCount) {
|
|
|
|
emitOptimizationRemarkMissed(
|
|
|
|
Ctx, DEBUG_TYPE, *F, LoopLoc,
|
2014-07-24 01:31:37 +08:00
|
|
|
"Unable to fully unroll loop as directed by unroll(full) pragma "
|
2014-06-17 07:53:02 +08:00
|
|
|
"because loop has a runtime trip count.");
|
2009-08-13 11:00:57 +08:00
|
|
|
}
|
2014-06-17 07:53:02 +08:00
|
|
|
} else if (PragmaCount > 0 && Count != OriginalCount) {
|
|
|
|
emitOptimizationRemarkMissed(
|
|
|
|
Ctx, DEBUG_TYPE, *F, LoopLoc,
|
|
|
|
"Unable to unroll loop the number of times directed by "
|
|
|
|
"unroll_count pragma because unrolled size is too large.");
|
2007-05-12 04:53:41 +08:00
|
|
|
}
|
2004-04-18 13:20:17 +08:00
|
|
|
}
|
2005-04-22 07:48:37 +08:00
|
|
|
|
2014-06-17 07:53:02 +08:00
|
|
|
if (Unrolling != Full && Count < 2) {
|
|
|
|
// Partial unrolling by 1 is a nop. For full unrolling, a factor
|
|
|
|
// of 1 makes sense because loop control can be eliminated.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-05-14 08:24:14 +08:00
|
|
|
// Unroll the loop.
|
2015-04-14 11:20:38 +08:00
|
|
|
if (!UnrollLoop(L, Count, TripCount, AllowRuntime, UP.AllowExpensiveTripCount,
|
|
|
|
TripMultiple, LI, this, &LPM, &AC))
|
2008-05-14 08:24:14 +08:00
|
|
|
return false;
|
2004-04-18 13:20:17 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|