2015-09-28 17:48:53 +08:00
|
|
|
//===--------- ScopInfo.cpp - Create Scops from LLVM IR ------------------===//
|
2011-04-29 14:27:02 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Create a polyhedral description for a static control flow region.
|
|
|
|
//
|
|
|
|
// The pass creates a polyhedral description of the Scops detected by the Scop
|
|
|
|
// detection derived from their LLVM-IR code.
|
|
|
|
//
|
2014-10-30 03:58:28 +08:00
|
|
|
// This representation is shared among several tools in the polyhedral
|
2011-04-29 14:27:02 +08:00
|
|
|
// community, which are e.g. Cloog, Pluto, Loopo, Graphite.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-12-21 20:38:56 +08:00
|
|
|
#include "polly/ScopInfo.h"
|
2011-04-29 14:27:02 +08:00
|
|
|
#include "polly/LinkAllPasses.h"
|
2014-06-18 01:31:36 +08:00
|
|
|
#include "polly/Options.h"
|
2011-04-29 14:27:02 +08:00
|
|
|
#include "polly/Support/GICHelper.h"
|
2011-11-08 23:41:28 +08:00
|
|
|
#include "polly/Support/SCEVValidator.h"
|
2013-05-07 16:11:54 +08:00
|
|
|
#include "polly/Support/ScopHelper.h"
|
2015-11-22 19:06:51 +08:00
|
|
|
#include "llvm/ADT/DepthFirstIterator.h"
|
2015-04-05 21:11:54 +08:00
|
|
|
#include "llvm/ADT/MapVector.h"
|
2015-09-25 17:49:19 +08:00
|
|
|
#include "llvm/ADT/PostOrderIterator.h"
|
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2015-05-09 17:13:42 +08:00
|
|
|
#include "llvm/ADT/SetVector.h"
|
2013-05-07 16:11:54 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2014-09-18 19:17:17 +08:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2015-11-12 11:25:01 +08:00
|
|
|
#include "llvm/Analysis/AssumptionCache.h"
|
2015-05-09 17:13:42 +08:00
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
2015-09-25 17:49:19 +08:00
|
|
|
#include "llvm/Analysis/LoopIterator.h"
|
2011-04-29 14:27:02 +08:00
|
|
|
#include "llvm/Analysis/RegionIterator.h"
|
2013-05-07 16:11:54 +08:00
|
|
|
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
2015-11-12 10:32:32 +08:00
|
|
|
#include "llvm/IR/DiagnosticInfo.h"
|
2011-04-29 14:27:02 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2011-08-18 14:31:50 +08:00
|
|
|
#include "isl/aff.h"
|
2015-05-09 17:13:42 +08:00
|
|
|
#include "isl/constraint.h"
|
2011-10-06 08:03:35 +08:00
|
|
|
#include "isl/local_space.h"
|
2015-05-09 17:13:42 +08:00
|
|
|
#include "isl/map.h"
|
2011-12-07 15:42:51 +08:00
|
|
|
#include "isl/options.h"
|
2015-05-09 17:13:42 +08:00
|
|
|
#include "isl/printer.h"
|
2015-07-14 17:33:13 +08:00
|
|
|
#include "isl/schedule.h"
|
|
|
|
#include "isl/schedule_node.h"
|
2015-05-09 17:13:42 +08:00
|
|
|
#include "isl/set.h"
|
|
|
|
#include "isl/union_map.h"
|
2015-05-09 17:36:38 +08:00
|
|
|
#include "isl/union_set.h"
|
2013-06-21 14:41:31 +08:00
|
|
|
#include "isl/val.h"
|
2011-04-29 14:27:02 +08:00
|
|
|
#include <sstream>
|
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
using namespace polly;
|
|
|
|
|
2014-04-22 11:30:19 +08:00
|
|
|
#define DEBUG_TYPE "polly-scops"
|
|
|
|
|
2013-01-15 06:40:23 +08:00
|
|
|
STATISTIC(ScopFound, "Number of valid Scops");
|
|
|
|
STATISTIC(RichScopFound, "Number of Scops containing a loop");
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2015-12-20 21:31:48 +08:00
|
|
|
// The maximal number of basic sets we allow during domain construction to
|
|
|
|
// be created. More complex scops will result in very high compile time and
|
|
|
|
// are also unlikely to result in good code
|
|
|
|
static int const MaxConjunctsInDomain = 20;
|
|
|
|
|
2015-09-10 20:46:52 +08:00
|
|
|
static cl::opt<bool> ModelReadOnlyScalars(
|
|
|
|
"polly-analyze-read-only-scalars",
|
|
|
|
cl::desc("Model read-only scalar values in the scop description"),
|
|
|
|
cl::Hidden, cl::ZeroOrMore, cl::init(true), cl::cat(PollyCategory));
|
|
|
|
|
2014-08-18 08:40:13 +08:00
|
|
|
// Multiplicative reductions can be disabled separately as these kind of
|
2014-06-18 01:31:36 +08:00
|
|
|
// operations can overflow easily. Additive reductions and bit operations
|
|
|
|
// are in contrast pretty stable.
|
2014-07-09 18:50:10 +08:00
|
|
|
static cl::opt<bool> DisableMultiplicativeReductions(
|
|
|
|
"polly-disable-multiplicative-reductions",
|
|
|
|
cl::desc("Disable multiplicative reductions"), cl::Hidden, cl::ZeroOrMore,
|
|
|
|
cl::init(false), cl::cat(PollyCategory));
|
2014-06-18 01:31:36 +08:00
|
|
|
|
2014-09-27 19:02:39 +08:00
|
|
|
static cl::opt<unsigned> RunTimeChecksMaxParameters(
|
|
|
|
"polly-rtc-max-parameters",
|
|
|
|
cl::desc("The maximal number of parameters allowed in RTCs."), cl::Hidden,
|
|
|
|
cl::ZeroOrMore, cl::init(8), cl::cat(PollyCategory));
|
|
|
|
|
2015-03-28 23:11:14 +08:00
|
|
|
static cl::opt<unsigned> RunTimeChecksMaxArraysPerGroup(
|
|
|
|
"polly-rtc-max-arrays-per-group",
|
|
|
|
cl::desc("The maximal number of arrays to compare in each alias group."),
|
|
|
|
cl::Hidden, cl::ZeroOrMore, cl::init(20), cl::cat(PollyCategory));
|
2015-08-16 18:19:29 +08:00
|
|
|
static cl::opt<std::string> UserContextStr(
|
|
|
|
"polly-context", cl::value_desc("isl parameter set"),
|
|
|
|
cl::desc("Provide additional constraints on the context parameters"),
|
|
|
|
cl::init(""), cl::cat(PollyCategory));
|
2015-03-28 23:11:14 +08:00
|
|
|
|
2015-08-21 03:08:11 +08:00
|
|
|
static cl::opt<bool> DetectReductions("polly-detect-reductions",
|
|
|
|
cl::desc("Detect and exploit reductions"),
|
|
|
|
cl::Hidden, cl::ZeroOrMore,
|
|
|
|
cl::init(true), cl::cat(PollyCategory));
|
|
|
|
|
2015-11-12 00:22:36 +08:00
|
|
|
static cl::opt<int> MaxDisjunctsAssumed(
|
|
|
|
"polly-max-disjuncts-assumed",
|
|
|
|
cl::desc("The maximal number of disjuncts we allow in the assumption "
|
|
|
|
"context (this bounds compile time)"),
|
|
|
|
cl::Hidden, cl::ZeroOrMore, cl::init(150), cl::cat(PollyCategory));
|
|
|
|
|
2015-11-24 20:50:02 +08:00
|
|
|
static cl::opt<bool> IgnoreIntegerWrapping(
|
|
|
|
"polly-ignore-integer-wrapping",
|
|
|
|
cl::desc("Do not build run-time checks to proof absence of integer "
|
|
|
|
"wrapping"),
|
|
|
|
cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::cat(PollyCategory));
|
|
|
|
|
2015-09-10 20:46:52 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-08-10 21:01:57 +08:00
|
|
|
// Create a sequence of two schedules. Either argument may be null and is
|
|
|
|
// interpreted as the empty schedule. Can also return null if both schedules are
|
|
|
|
// empty.
|
|
|
|
static __isl_give isl_schedule *
|
|
|
|
combineInSequence(__isl_take isl_schedule *Prev,
|
|
|
|
__isl_take isl_schedule *Succ) {
|
|
|
|
if (!Prev)
|
|
|
|
return Succ;
|
|
|
|
if (!Succ)
|
|
|
|
return Prev;
|
|
|
|
|
|
|
|
return isl_schedule_sequence(Prev, Succ);
|
|
|
|
}
|
|
|
|
|
2015-02-24 19:58:30 +08:00
|
|
|
static __isl_give isl_set *addRangeBoundsToSet(__isl_take isl_set *S,
|
|
|
|
const ConstantRange &Range,
|
|
|
|
int dim,
|
|
|
|
enum isl_dim_type type) {
|
|
|
|
isl_val *V;
|
|
|
|
isl_ctx *ctx = isl_set_get_ctx(S);
|
|
|
|
|
2015-04-27 04:07:21 +08:00
|
|
|
bool useLowerUpperBound = Range.isSignWrappedSet() && !Range.isFullSet();
|
|
|
|
const auto LB = useLowerUpperBound ? Range.getLower() : Range.getSignedMin();
|
2015-03-09 03:49:50 +08:00
|
|
|
V = isl_valFromAPInt(ctx, LB, true);
|
2015-02-24 19:58:30 +08:00
|
|
|
isl_set *SLB = isl_set_lower_bound_val(isl_set_copy(S), type, dim, V);
|
|
|
|
|
2015-04-27 04:07:21 +08:00
|
|
|
const auto UB = useLowerUpperBound ? Range.getUpper() : Range.getSignedMax();
|
2015-03-09 03:49:50 +08:00
|
|
|
V = isl_valFromAPInt(ctx, UB, true);
|
2015-04-27 04:07:21 +08:00
|
|
|
if (useLowerUpperBound)
|
2015-03-09 03:49:50 +08:00
|
|
|
V = isl_val_sub_ui(V, 1);
|
2015-02-24 19:58:30 +08:00
|
|
|
isl_set *SUB = isl_set_upper_bound_val(S, type, dim, V);
|
|
|
|
|
2015-04-27 04:07:21 +08:00
|
|
|
if (useLowerUpperBound)
|
2015-02-24 19:58:30 +08:00
|
|
|
return isl_set_union(SLB, SUB);
|
|
|
|
else
|
|
|
|
return isl_set_intersect(SLB, SUB);
|
|
|
|
}
|
|
|
|
|
2015-08-21 02:04:22 +08:00
|
|
|
static const ScopArrayInfo *identifyBasePtrOriginSAI(Scop *S, Value *BasePtr) {
|
|
|
|
LoadInst *BasePtrLI = dyn_cast<LoadInst>(BasePtr);
|
|
|
|
if (!BasePtrLI)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
if (!S->getRegion().contains(BasePtrLI))
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
ScalarEvolution &SE = *S->getSE();
|
|
|
|
|
|
|
|
auto *OriginBaseSCEV =
|
|
|
|
SE.getPointerBase(SE.getSCEV(BasePtrLI->getPointerOperand()));
|
|
|
|
if (!OriginBaseSCEV)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
auto *OriginBaseSCEVUnknown = dyn_cast<SCEVUnknown>(OriginBaseSCEV);
|
|
|
|
if (!OriginBaseSCEVUnknown)
|
|
|
|
return nullptr;
|
|
|
|
|
2015-11-11 01:31:31 +08:00
|
|
|
return S->getScopArrayInfo(OriginBaseSCEVUnknown->getValue(),
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
ScopArrayInfo::MK_Array);
|
2015-08-21 02:04:22 +08:00
|
|
|
}
|
|
|
|
|
2015-05-20 16:05:31 +08:00
|
|
|
ScopArrayInfo::ScopArrayInfo(Value *BasePtr, Type *ElementType, isl_ctx *Ctx,
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
ArrayRef<const SCEV *> Sizes, enum MemoryKind Kind,
|
2015-11-13 04:15:08 +08:00
|
|
|
const DataLayout &DL, Scop *S)
|
|
|
|
: BasePtr(BasePtr), ElementType(ElementType), Kind(Kind), DL(DL), S(*S) {
|
2015-07-28 22:53:44 +08:00
|
|
|
std::string BasePtrName =
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
getIslCompatibleName("MemRef_", BasePtr, Kind == MK_PHI ? "__phi" : "");
|
2014-10-05 19:32:18 +08:00
|
|
|
Id = isl_id_alloc(Ctx, BasePtrName.c_str(), this);
|
2015-08-21 02:04:22 +08:00
|
|
|
|
Support accesses with differently sized types to the same array
This allows code such as:
void multiple_types(char *Short, char *Float, char *Double) {
for (long i = 0; i < 100; i++) {
Short[i] = *(short *)&Short[2 * i];
Float[i] = *(float *)&Float[4 * i];
Double[i] = *(double *)&Double[8 * i];
}
}
To model such code we use as canonical element type of the modeled array the
smallest element type of all original array accesses, if type allocation sizes
are multiples of each other. Otherwise, we use a newly created iN type, where N
is the gcd of the allocation size of the types used in the accesses to this
array. Accesses with types larger as the canonical element type are modeled as
multiple accesses with the smaller type.
For example the second load access is modeled as:
{ Stmt_bb2[i0] -> MemRef_Float[o0] : 4i0 <= o0 <= 3 + 4i0 }
To support code-generating these memory accesses, we introduce a new method
getAccessAddressFunction that assigns each statement instance a single memory
location, the address we load from/store to. Currently we obtain this address by
taking the lexmin of the access function. We may consider keeping track of the
memory location more explicitly in the future.
We currently do _not_ handle multi-dimensional arrays and also keep the
restriction of not supporting accesses where the offset expression is not a
multiple of the access element type size. This patch adds tests that ensure
we correctly invalidate a scop in case these accesses are found. Both types of
accesses can be handled using the very same model, but are left to be added in
the future.
We also move the initialization of the scop-context into the constructor to
ensure it is already available when invalidating the scop.
Finally, we add this as a new item to the 2.9 release notes
Reviewers: jdoerfert, Meinersbur
Differential Revision: http://reviews.llvm.org/D16878
llvm-svn: 259784
2016-02-04 21:18:42 +08:00
|
|
|
updateSizes(Sizes, ElementType);
|
2015-08-21 02:04:22 +08:00
|
|
|
BasePtrOriginSAI = identifyBasePtrOriginSAI(S, BasePtr);
|
|
|
|
if (BasePtrOriginSAI)
|
|
|
|
const_cast<ScopArrayInfo *>(BasePtrOriginSAI)->addDerivedSAI(this);
|
2014-10-05 19:32:18 +08:00
|
|
|
}
|
|
|
|
|
2015-09-26 16:55:54 +08:00
|
|
|
__isl_give isl_space *ScopArrayInfo::getSpace() const {
|
|
|
|
auto Space =
|
|
|
|
isl_space_set_alloc(isl_id_get_ctx(Id), 0, getNumberOfDimensions());
|
|
|
|
Space = isl_space_set_tuple_id(Space, isl_dim_set, isl_id_copy(Id));
|
|
|
|
return Space;
|
|
|
|
}
|
|
|
|
|
Support accesses with differently sized types to the same array
This allows code such as:
void multiple_types(char *Short, char *Float, char *Double) {
for (long i = 0; i < 100; i++) {
Short[i] = *(short *)&Short[2 * i];
Float[i] = *(float *)&Float[4 * i];
Double[i] = *(double *)&Double[8 * i];
}
}
To model such code we use as canonical element type of the modeled array the
smallest element type of all original array accesses, if type allocation sizes
are multiples of each other. Otherwise, we use a newly created iN type, where N
is the gcd of the allocation size of the types used in the accesses to this
array. Accesses with types larger as the canonical element type are modeled as
multiple accesses with the smaller type.
For example the second load access is modeled as:
{ Stmt_bb2[i0] -> MemRef_Float[o0] : 4i0 <= o0 <= 3 + 4i0 }
To support code-generating these memory accesses, we introduce a new method
getAccessAddressFunction that assigns each statement instance a single memory
location, the address we load from/store to. Currently we obtain this address by
taking the lexmin of the access function. We may consider keeping track of the
memory location more explicitly in the future.
We currently do _not_ handle multi-dimensional arrays and also keep the
restriction of not supporting accesses where the offset expression is not a
multiple of the access element type size. This patch adds tests that ensure
we correctly invalidate a scop in case these accesses are found. Both types of
accesses can be handled using the very same model, but are left to be added in
the future.
We also move the initialization of the scop-context into the constructor to
ensure it is already available when invalidating the scop.
Finally, we add this as a new item to the 2.9 release notes
Reviewers: jdoerfert, Meinersbur
Differential Revision: http://reviews.llvm.org/D16878
llvm-svn: 259784
2016-02-04 21:18:42 +08:00
|
|
|
bool ScopArrayInfo::updateSizes(ArrayRef<const SCEV *> NewSizes,
|
|
|
|
Type *NewElementType) {
|
|
|
|
auto OldElementSize = DL.getTypeAllocSizeInBits(ElementType);
|
|
|
|
auto NewElementSize = DL.getTypeAllocSizeInBits(NewElementType);
|
|
|
|
|
|
|
|
if (NewElementSize != OldElementSize) {
|
|
|
|
if (NewElementSize % OldElementSize == 0 &&
|
|
|
|
NewElementSize < OldElementSize) {
|
|
|
|
ElementType = NewElementType;
|
|
|
|
} else {
|
|
|
|
auto GCD = GreatestCommonDivisor64(NewElementSize, OldElementSize);
|
|
|
|
ElementType = IntegerType::get(ElementType->getContext(), GCD);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-26 16:55:54 +08:00
|
|
|
int SharedDims = std::min(NewSizes.size(), DimensionSizes.size());
|
|
|
|
int ExtraDimsNew = NewSizes.size() - SharedDims;
|
|
|
|
int ExtraDimsOld = DimensionSizes.size() - SharedDims;
|
2015-11-02 19:29:32 +08:00
|
|
|
for (int i = 0; i < SharedDims; i++)
|
|
|
|
if (NewSizes[i + ExtraDimsNew] != DimensionSizes[i + ExtraDimsOld])
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (DimensionSizes.size() >= NewSizes.size())
|
|
|
|
return true;
|
2015-09-26 16:55:54 +08:00
|
|
|
|
|
|
|
DimensionSizes.clear();
|
|
|
|
DimensionSizes.insert(DimensionSizes.begin(), NewSizes.begin(),
|
|
|
|
NewSizes.end());
|
|
|
|
for (isl_pw_aff *Size : DimensionSizesPw)
|
|
|
|
isl_pw_aff_free(Size);
|
|
|
|
DimensionSizesPw.clear();
|
|
|
|
for (const SCEV *Expr : DimensionSizes) {
|
|
|
|
isl_pw_aff *Size = S.getPwAff(Expr);
|
|
|
|
DimensionSizesPw.push_back(Size);
|
|
|
|
}
|
2015-11-02 19:29:32 +08:00
|
|
|
return true;
|
2015-09-26 16:55:54 +08:00
|
|
|
}
|
|
|
|
|
2015-08-12 23:27:16 +08:00
|
|
|
ScopArrayInfo::~ScopArrayInfo() {
|
|
|
|
isl_id_free(Id);
|
|
|
|
for (isl_pw_aff *Size : DimensionSizesPw)
|
|
|
|
isl_pw_aff_free(Size);
|
|
|
|
}
|
2014-10-05 19:32:18 +08:00
|
|
|
|
2015-05-20 16:05:31 +08:00
|
|
|
std::string ScopArrayInfo::getName() const { return isl_id_get_name(Id); }
|
|
|
|
|
|
|
|
int ScopArrayInfo::getElemSizeInBytes() const {
|
2015-11-13 04:15:08 +08:00
|
|
|
return DL.getTypeAllocSize(ElementType);
|
2015-05-20 16:05:31 +08:00
|
|
|
}
|
|
|
|
|
2014-10-05 19:32:18 +08:00
|
|
|
isl_id *ScopArrayInfo::getBasePtrId() const { return isl_id_copy(Id); }
|
|
|
|
|
|
|
|
void ScopArrayInfo::dump() const { print(errs()); }
|
|
|
|
|
2015-08-12 23:27:16 +08:00
|
|
|
void ScopArrayInfo::print(raw_ostream &OS, bool SizeAsPwAff) const {
|
2015-11-10 22:02:54 +08:00
|
|
|
OS.indent(8) << *getElementType() << " " << getName();
|
|
|
|
if (getNumberOfDimensions() > 0)
|
|
|
|
OS << "[*]";
|
2015-11-10 22:24:21 +08:00
|
|
|
for (unsigned u = 1; u < getNumberOfDimensions(); u++) {
|
2015-08-12 23:27:16 +08:00
|
|
|
OS << "[";
|
|
|
|
|
2015-11-10 22:24:21 +08:00
|
|
|
if (SizeAsPwAff) {
|
|
|
|
auto Size = getDimensionSizePw(u);
|
|
|
|
OS << " " << Size << " ";
|
|
|
|
isl_pw_aff_free(Size);
|
|
|
|
} else {
|
|
|
|
OS << *getDimensionSize(u);
|
|
|
|
}
|
2015-08-12 23:27:16 +08:00
|
|
|
|
|
|
|
OS << "]";
|
|
|
|
}
|
|
|
|
|
2015-11-10 22:02:54 +08:00
|
|
|
OS << ";";
|
|
|
|
|
2015-08-21 02:04:22 +08:00
|
|
|
if (BasePtrOriginSAI)
|
|
|
|
OS << " [BasePtrOrigin: " << BasePtrOriginSAI->getName() << "]";
|
|
|
|
|
2015-05-20 16:05:31 +08:00
|
|
|
OS << " // Element size " << getElemSizeInBytes() << "\n";
|
2014-10-05 19:32:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
const ScopArrayInfo *
|
|
|
|
ScopArrayInfo::getFromAccessFunction(__isl_keep isl_pw_multi_aff *PMA) {
|
|
|
|
isl_id *Id = isl_pw_multi_aff_get_tuple_id(PMA, isl_dim_out);
|
|
|
|
assert(Id && "Output dimension didn't have an ID");
|
|
|
|
return getFromId(Id);
|
|
|
|
}
|
|
|
|
|
|
|
|
const ScopArrayInfo *ScopArrayInfo::getFromId(isl_id *Id) {
|
|
|
|
void *User = isl_id_get_user(Id);
|
|
|
|
const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
|
|
|
|
isl_id_free(Id);
|
|
|
|
return SAI;
|
|
|
|
}
|
|
|
|
|
2015-09-26 16:55:54 +08:00
|
|
|
void MemoryAccess::updateDimensionality() {
|
|
|
|
auto ArraySpace = getScopArrayInfo()->getSpace();
|
|
|
|
auto AccessSpace = isl_space_range(isl_map_get_space(AccessRelation));
|
|
|
|
|
|
|
|
auto DimsArray = isl_space_dim(ArraySpace, isl_dim_set);
|
|
|
|
auto DimsAccess = isl_space_dim(AccessSpace, isl_dim_set);
|
|
|
|
auto DimsMissing = DimsArray - DimsAccess;
|
|
|
|
|
Support accesses with differently sized types to the same array
This allows code such as:
void multiple_types(char *Short, char *Float, char *Double) {
for (long i = 0; i < 100; i++) {
Short[i] = *(short *)&Short[2 * i];
Float[i] = *(float *)&Float[4 * i];
Double[i] = *(double *)&Double[8 * i];
}
}
To model such code we use as canonical element type of the modeled array the
smallest element type of all original array accesses, if type allocation sizes
are multiples of each other. Otherwise, we use a newly created iN type, where N
is the gcd of the allocation size of the types used in the accesses to this
array. Accesses with types larger as the canonical element type are modeled as
multiple accesses with the smaller type.
For example the second load access is modeled as:
{ Stmt_bb2[i0] -> MemRef_Float[o0] : 4i0 <= o0 <= 3 + 4i0 }
To support code-generating these memory accesses, we introduce a new method
getAccessAddressFunction that assigns each statement instance a single memory
location, the address we load from/store to. Currently we obtain this address by
taking the lexmin of the access function. We may consider keeping track of the
memory location more explicitly in the future.
We currently do _not_ handle multi-dimensional arrays and also keep the
restriction of not supporting accesses where the offset expression is not a
multiple of the access element type size. This patch adds tests that ensure
we correctly invalidate a scop in case these accesses are found. Both types of
accesses can be handled using the very same model, but are left to be added in
the future.
We also move the initialization of the scop-context into the constructor to
ensure it is already available when invalidating the scop.
Finally, we add this as a new item to the 2.9 release notes
Reviewers: jdoerfert, Meinersbur
Differential Revision: http://reviews.llvm.org/D16878
llvm-svn: 259784
2016-02-04 21:18:42 +08:00
|
|
|
auto Map = isl_map_from_domain_and_range(
|
|
|
|
isl_set_universe(AccessSpace),
|
|
|
|
isl_set_universe(isl_space_copy(ArraySpace)));
|
2015-09-26 16:55:54 +08:00
|
|
|
|
|
|
|
for (unsigned i = 0; i < DimsMissing; i++)
|
|
|
|
Map = isl_map_fix_si(Map, isl_dim_out, i, 0);
|
|
|
|
|
|
|
|
for (unsigned i = DimsMissing; i < DimsArray; i++)
|
|
|
|
Map = isl_map_equate(Map, isl_dim_in, i - DimsMissing, isl_dim_out, i);
|
|
|
|
|
|
|
|
AccessRelation = isl_map_apply_range(AccessRelation, Map);
|
2016-01-08 22:01:59 +08:00
|
|
|
|
Support accesses with differently sized types to the same array
This allows code such as:
void multiple_types(char *Short, char *Float, char *Double) {
for (long i = 0; i < 100; i++) {
Short[i] = *(short *)&Short[2 * i];
Float[i] = *(float *)&Float[4 * i];
Double[i] = *(double *)&Double[8 * i];
}
}
To model such code we use as canonical element type of the modeled array the
smallest element type of all original array accesses, if type allocation sizes
are multiples of each other. Otherwise, we use a newly created iN type, where N
is the gcd of the allocation size of the types used in the accesses to this
array. Accesses with types larger as the canonical element type are modeled as
multiple accesses with the smaller type.
For example the second load access is modeled as:
{ Stmt_bb2[i0] -> MemRef_Float[o0] : 4i0 <= o0 <= 3 + 4i0 }
To support code-generating these memory accesses, we introduce a new method
getAccessAddressFunction that assigns each statement instance a single memory
location, the address we load from/store to. Currently we obtain this address by
taking the lexmin of the access function. We may consider keeping track of the
memory location more explicitly in the future.
We currently do _not_ handle multi-dimensional arrays and also keep the
restriction of not supporting accesses where the offset expression is not a
multiple of the access element type size. This patch adds tests that ensure
we correctly invalidate a scop in case these accesses are found. Both types of
accesses can be handled using the very same model, but are left to be added in
the future.
We also move the initialization of the scop-context into the constructor to
ensure it is already available when invalidating the scop.
Finally, we add this as a new item to the 2.9 release notes
Reviewers: jdoerfert, Meinersbur
Differential Revision: http://reviews.llvm.org/D16878
llvm-svn: 259784
2016-02-04 21:18:42 +08:00
|
|
|
// Introduce multi-element accesses in case the type loaded by this memory
|
|
|
|
// access is larger than the canonical element type of the array.
|
|
|
|
//
|
|
|
|
// An access ((float *)A)[i] to an array char *A is modeled as
|
|
|
|
// {[i] -> A[o] : 4 i <= o <= 4 i + 3
|
|
|
|
unsigned ArrayElemSize = getScopArrayInfo()->getElemSizeInBytes();
|
|
|
|
if (ElemBytes > ArrayElemSize) {
|
|
|
|
assert(ElemBytes % ArrayElemSize == 0 &&
|
|
|
|
"Loaded element size should be multiple of canonical element size");
|
|
|
|
auto Map = isl_map_from_domain_and_range(
|
|
|
|
isl_set_universe(isl_space_copy(ArraySpace)),
|
|
|
|
isl_set_universe(isl_space_copy(ArraySpace)));
|
|
|
|
for (unsigned i = 0; i < DimsArray - 1; i++)
|
|
|
|
Map = isl_map_equate(Map, isl_dim_in, i, isl_dim_out, i);
|
|
|
|
|
|
|
|
isl_ctx *Ctx;
|
|
|
|
isl_constraint *C;
|
|
|
|
isl_local_space *LS;
|
|
|
|
|
|
|
|
LS = isl_local_space_from_space(isl_map_get_space(Map));
|
|
|
|
Ctx = isl_map_get_ctx(Map);
|
|
|
|
int Num = ElemBytes / getScopArrayInfo()->getElemSizeInBytes();
|
|
|
|
|
|
|
|
C = isl_constraint_alloc_inequality(isl_local_space_copy(LS));
|
|
|
|
C = isl_constraint_set_constant_val(C, isl_val_int_from_si(Ctx, Num - 1));
|
|
|
|
C = isl_constraint_set_coefficient_si(C, isl_dim_in,
|
|
|
|
DimsArray - 1 - DimsMissing, Num);
|
|
|
|
C = isl_constraint_set_coefficient_si(C, isl_dim_out, DimsArray - 1, -1);
|
|
|
|
Map = isl_map_add_constraint(Map, C);
|
|
|
|
|
|
|
|
C = isl_constraint_alloc_inequality(LS);
|
|
|
|
C = isl_constraint_set_coefficient_si(C, isl_dim_in,
|
|
|
|
DimsArray - 1 - DimsMissing, -Num);
|
|
|
|
C = isl_constraint_set_coefficient_si(C, isl_dim_out, DimsArray - 1, 1);
|
|
|
|
C = isl_constraint_set_constant_val(C, isl_val_int_from_si(Ctx, 0));
|
|
|
|
Map = isl_map_add_constraint(Map, C);
|
|
|
|
AccessRelation = isl_map_apply_range(AccessRelation, Map);
|
|
|
|
}
|
|
|
|
|
|
|
|
isl_space_free(ArraySpace);
|
|
|
|
|
2016-01-08 22:01:59 +08:00
|
|
|
assumeNoOutOfBound();
|
2015-09-26 16:55:54 +08:00
|
|
|
}
|
|
|
|
|
2014-08-01 16:13:25 +08:00
|
|
|
const std::string
|
|
|
|
MemoryAccess::getReductionOperatorStr(MemoryAccess::ReductionType RT) {
|
|
|
|
switch (RT) {
|
|
|
|
case MemoryAccess::RT_NONE:
|
|
|
|
llvm_unreachable("Requested a reduction operator string for a memory "
|
|
|
|
"access which isn't a reduction");
|
|
|
|
case MemoryAccess::RT_ADD:
|
|
|
|
return "+";
|
|
|
|
case MemoryAccess::RT_MUL:
|
|
|
|
return "*";
|
|
|
|
case MemoryAccess::RT_BOR:
|
|
|
|
return "|";
|
|
|
|
case MemoryAccess::RT_BXOR:
|
|
|
|
return "^";
|
|
|
|
case MemoryAccess::RT_BAND:
|
|
|
|
return "&";
|
|
|
|
}
|
|
|
|
llvm_unreachable("Unknown reduction type");
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
2014-07-02 04:52:51 +08:00
|
|
|
/// @brief Return the reduction type for a given binary operator
|
|
|
|
static MemoryAccess::ReductionType getReductionType(const BinaryOperator *BinOp,
|
|
|
|
const Instruction *Load) {
|
|
|
|
if (!BinOp)
|
|
|
|
return MemoryAccess::RT_NONE;
|
|
|
|
switch (BinOp->getOpcode()) {
|
|
|
|
case Instruction::FAdd:
|
|
|
|
if (!BinOp->hasUnsafeAlgebra())
|
|
|
|
return MemoryAccess::RT_NONE;
|
|
|
|
// Fall through
|
|
|
|
case Instruction::Add:
|
|
|
|
return MemoryAccess::RT_ADD;
|
|
|
|
case Instruction::Or:
|
|
|
|
return MemoryAccess::RT_BOR;
|
|
|
|
case Instruction::Xor:
|
|
|
|
return MemoryAccess::RT_BXOR;
|
|
|
|
case Instruction::And:
|
|
|
|
return MemoryAccess::RT_BAND;
|
|
|
|
case Instruction::FMul:
|
|
|
|
if (!BinOp->hasUnsafeAlgebra())
|
|
|
|
return MemoryAccess::RT_NONE;
|
|
|
|
// Fall through
|
|
|
|
case Instruction::Mul:
|
|
|
|
if (DisableMultiplicativeReductions)
|
|
|
|
return MemoryAccess::RT_NONE;
|
|
|
|
return MemoryAccess::RT_MUL;
|
|
|
|
default:
|
|
|
|
return MemoryAccess::RT_NONE;
|
|
|
|
}
|
|
|
|
}
|
2015-09-18 01:28:15 +08:00
|
|
|
|
|
|
|
/// @brief Derive the individual index expressions from a GEP instruction
|
|
|
|
///
|
|
|
|
/// This function optimistically assumes the GEP references into a fixed size
|
|
|
|
/// array. If this is actually true, this function returns a list of array
|
|
|
|
/// subscript expressions as SCEV as well as a list of integers describing
|
|
|
|
/// the size of the individual array dimensions. Both lists have either equal
|
|
|
|
/// length of the size list is one element shorter in case there is no known
|
|
|
|
/// size available for the outermost array dimension.
|
|
|
|
///
|
|
|
|
/// @param GEP The GetElementPtr instruction to analyze.
|
|
|
|
///
|
|
|
|
/// @return A tuple with the subscript expressions and the dimension sizes.
|
|
|
|
static std::tuple<std::vector<const SCEV *>, std::vector<int>>
|
|
|
|
getIndexExpressionsFromGEP(GetElementPtrInst *GEP, ScalarEvolution &SE) {
|
|
|
|
std::vector<const SCEV *> Subscripts;
|
|
|
|
std::vector<int> Sizes;
|
|
|
|
|
|
|
|
Type *Ty = GEP->getPointerOperandType();
|
|
|
|
|
|
|
|
bool DroppedFirstDim = false;
|
|
|
|
|
2015-09-25 01:32:49 +08:00
|
|
|
for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
|
2015-09-18 01:28:15 +08:00
|
|
|
|
|
|
|
const SCEV *Expr = SE.getSCEV(GEP->getOperand(i));
|
|
|
|
|
|
|
|
if (i == 1) {
|
|
|
|
if (auto PtrTy = dyn_cast<PointerType>(Ty)) {
|
|
|
|
Ty = PtrTy->getElementType();
|
|
|
|
} else if (auto ArrayTy = dyn_cast<ArrayType>(Ty)) {
|
|
|
|
Ty = ArrayTy->getElementType();
|
|
|
|
} else {
|
|
|
|
Subscripts.clear();
|
|
|
|
Sizes.clear();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (auto Const = dyn_cast<SCEVConstant>(Expr))
|
|
|
|
if (Const->getValue()->isZero()) {
|
|
|
|
DroppedFirstDim = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
Subscripts.push_back(Expr);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto ArrayTy = dyn_cast<ArrayType>(Ty);
|
|
|
|
if (!ArrayTy) {
|
|
|
|
Subscripts.clear();
|
|
|
|
Sizes.clear();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
Subscripts.push_back(Expr);
|
|
|
|
if (!(DroppedFirstDim && i == 2))
|
|
|
|
Sizes.push_back(ArrayTy->getNumElements());
|
|
|
|
|
|
|
|
Ty = ArrayTy->getElementType();
|
|
|
|
}
|
|
|
|
|
|
|
|
return std::make_tuple(Subscripts, Sizes);
|
|
|
|
}
|
|
|
|
|
2011-04-29 14:27:02 +08:00
|
|
|
MemoryAccess::~MemoryAccess() {
|
2015-05-15 17:58:32 +08:00
|
|
|
isl_id_free(Id);
|
2011-08-18 14:31:46 +08:00
|
|
|
isl_map_free(AccessRelation);
|
2015-09-05 15:46:40 +08:00
|
|
|
isl_map_free(NewAccessRelation);
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2014-10-05 19:32:18 +08:00
|
|
|
const ScopArrayInfo *MemoryAccess::getScopArrayInfo() const {
|
|
|
|
isl_id *ArrayId = getArrayId();
|
|
|
|
void *User = isl_id_get_user(ArrayId);
|
|
|
|
const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
|
|
|
|
isl_id_free(ArrayId);
|
|
|
|
return SAI;
|
|
|
|
}
|
|
|
|
|
2015-03-30 19:52:59 +08:00
|
|
|
__isl_give isl_id *MemoryAccess::getArrayId() const {
|
2014-07-29 16:37:55 +08:00
|
|
|
return isl_map_get_tuple_id(AccessRelation, isl_dim_out);
|
|
|
|
}
|
|
|
|
|
Support accesses with differently sized types to the same array
This allows code such as:
void multiple_types(char *Short, char *Float, char *Double) {
for (long i = 0; i < 100; i++) {
Short[i] = *(short *)&Short[2 * i];
Float[i] = *(float *)&Float[4 * i];
Double[i] = *(double *)&Double[8 * i];
}
}
To model such code we use as canonical element type of the modeled array the
smallest element type of all original array accesses, if type allocation sizes
are multiples of each other. Otherwise, we use a newly created iN type, where N
is the gcd of the allocation size of the types used in the accesses to this
array. Accesses with types larger as the canonical element type are modeled as
multiple accesses with the smaller type.
For example the second load access is modeled as:
{ Stmt_bb2[i0] -> MemRef_Float[o0] : 4i0 <= o0 <= 3 + 4i0 }
To support code-generating these memory accesses, we introduce a new method
getAccessAddressFunction that assigns each statement instance a single memory
location, the address we load from/store to. Currently we obtain this address by
taking the lexmin of the access function. We may consider keeping track of the
memory location more explicitly in the future.
We currently do _not_ handle multi-dimensional arrays and also keep the
restriction of not supporting accesses where the offset expression is not a
multiple of the access element type size. This patch adds tests that ensure
we correctly invalidate a scop in case these accesses are found. Both types of
accesses can be handled using the very same model, but are left to be added in
the future.
We also move the initialization of the scop-context into the constructor to
ensure it is already available when invalidating the scop.
Finally, we add this as a new item to the 2.9 release notes
Reviewers: jdoerfert, Meinersbur
Differential Revision: http://reviews.llvm.org/D16878
llvm-svn: 259784
2016-02-04 21:18:42 +08:00
|
|
|
__isl_give isl_map *MemoryAccess::getAddressFunction() const {
|
|
|
|
return isl_map_lexmin(getAccessRelation());
|
|
|
|
}
|
|
|
|
|
2015-03-30 19:52:59 +08:00
|
|
|
__isl_give isl_pw_multi_aff *MemoryAccess::applyScheduleToAccessRelation(
|
|
|
|
__isl_take isl_union_map *USchedule) const {
|
2014-10-13 20:58:03 +08:00
|
|
|
isl_map *Schedule, *ScheduledAccRel;
|
|
|
|
isl_union_set *UDomain;
|
|
|
|
|
|
|
|
UDomain = isl_union_set_from_set(getStatement()->getDomain());
|
|
|
|
USchedule = isl_union_map_intersect_domain(USchedule, UDomain);
|
|
|
|
Schedule = isl_map_from_union_map(USchedule);
|
Support accesses with differently sized types to the same array
This allows code such as:
void multiple_types(char *Short, char *Float, char *Double) {
for (long i = 0; i < 100; i++) {
Short[i] = *(short *)&Short[2 * i];
Float[i] = *(float *)&Float[4 * i];
Double[i] = *(double *)&Double[8 * i];
}
}
To model such code we use as canonical element type of the modeled array the
smallest element type of all original array accesses, if type allocation sizes
are multiples of each other. Otherwise, we use a newly created iN type, where N
is the gcd of the allocation size of the types used in the accesses to this
array. Accesses with types larger as the canonical element type are modeled as
multiple accesses with the smaller type.
For example the second load access is modeled as:
{ Stmt_bb2[i0] -> MemRef_Float[o0] : 4i0 <= o0 <= 3 + 4i0 }
To support code-generating these memory accesses, we introduce a new method
getAccessAddressFunction that assigns each statement instance a single memory
location, the address we load from/store to. Currently we obtain this address by
taking the lexmin of the access function. We may consider keeping track of the
memory location more explicitly in the future.
We currently do _not_ handle multi-dimensional arrays and also keep the
restriction of not supporting accesses where the offset expression is not a
multiple of the access element type size. This patch adds tests that ensure
we correctly invalidate a scop in case these accesses are found. Both types of
accesses can be handled using the very same model, but are left to be added in
the future.
We also move the initialization of the scop-context into the constructor to
ensure it is already available when invalidating the scop.
Finally, we add this as a new item to the 2.9 release notes
Reviewers: jdoerfert, Meinersbur
Differential Revision: http://reviews.llvm.org/D16878
llvm-svn: 259784
2016-02-04 21:18:42 +08:00
|
|
|
ScheduledAccRel = isl_map_apply_domain(getAddressFunction(), Schedule);
|
2014-10-13 20:58:03 +08:00
|
|
|
return isl_pw_multi_aff_from_map(ScheduledAccRel);
|
|
|
|
}
|
|
|
|
|
2015-03-30 19:52:59 +08:00
|
|
|
__isl_give isl_map *MemoryAccess::getOriginalAccessRelation() const {
|
2011-10-06 08:04:11 +08:00
|
|
|
return isl_map_copy(AccessRelation);
|
|
|
|
}
|
|
|
|
|
2014-10-13 20:58:03 +08:00
|
|
|
std::string MemoryAccess::getOriginalAccessRelationStr() const {
|
2011-10-06 08:04:11 +08:00
|
|
|
return stringFromIslObj(AccessRelation);
|
|
|
|
}
|
|
|
|
|
2014-10-13 20:58:03 +08:00
|
|
|
__isl_give isl_space *MemoryAccess::getOriginalAccessRelationSpace() const {
|
2014-07-03 01:47:48 +08:00
|
|
|
return isl_map_get_space(AccessRelation);
|
|
|
|
}
|
|
|
|
|
2015-03-30 19:52:59 +08:00
|
|
|
__isl_give isl_map *MemoryAccess::getNewAccessRelation() const {
|
2015-09-05 15:46:40 +08:00
|
|
|
return isl_map_copy(NewAccessRelation);
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2015-09-05 15:46:47 +08:00
|
|
|
std::string MemoryAccess::getNewAccessRelationStr() const {
|
|
|
|
return stringFromIslObj(NewAccessRelation);
|
|
|
|
}
|
|
|
|
|
2015-03-30 19:52:59 +08:00
|
|
|
__isl_give isl_basic_map *
|
|
|
|
MemoryAccess::createBasicAccessMap(ScopStmt *Statement) {
|
2012-05-29 17:29:44 +08:00
|
|
|
isl_space *Space = isl_space_set_alloc(Statement->getIslCtx(), 0, 1);
|
2012-09-11 21:50:21 +08:00
|
|
|
Space = isl_space_align_params(Space, Statement->getDomainSpace());
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2012-05-29 17:29:44 +08:00
|
|
|
return isl_basic_map_from_domain_and_range(
|
2013-02-05 20:09:06 +08:00
|
|
|
isl_basic_set_universe(Statement->getDomainSpace()),
|
|
|
|
isl_basic_set_universe(Space));
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2014-07-03 01:47:48 +08:00
|
|
|
// Formalize no out-of-bound access assumption
|
|
|
|
//
|
|
|
|
// When delinearizing array accesses we optimistically assume that the
|
|
|
|
// delinearized accesses do not access out of bound locations (the subscript
|
|
|
|
// expression of each array evaluates for each statement instance that is
|
|
|
|
// executed to a value that is larger than zero and strictly smaller than the
|
|
|
|
// size of the corresponding dimension). The only exception is the outermost
|
2014-08-04 05:07:30 +08:00
|
|
|
// dimension for which we do not need to assume any upper bound. At this point
|
|
|
|
// we formalize this assumption to ensure that at code generation time the
|
|
|
|
// relevant run-time checks can be generated.
|
2014-07-03 01:47:48 +08:00
|
|
|
//
|
|
|
|
// To find the set of constraints necessary to avoid out of bound accesses, we
|
|
|
|
// first build the set of data locations that are not within array bounds. We
|
|
|
|
// then apply the reverse access relation to obtain the set of iterations that
|
|
|
|
// may contain invalid accesses and reduce this set of iterations to the ones
|
|
|
|
// that are actually executed by intersecting them with the domain of the
|
|
|
|
// statement. If we now project out all loop dimensions, we obtain a set of
|
|
|
|
// parameters that may cause statement instances to be executed that may
|
|
|
|
// possibly yield out of bound memory accesses. The complement of these
|
|
|
|
// constraints is the set of constraints that needs to be assumed to ensure such
|
|
|
|
// statement instances are never executed.
|
2015-09-19 03:59:43 +08:00
|
|
|
void MemoryAccess::assumeNoOutOfBound() {
|
2016-02-07 21:57:32 +08:00
|
|
|
auto *SAI = getScopArrayInfo();
|
2014-10-13 20:58:03 +08:00
|
|
|
isl_space *Space = isl_space_range(getOriginalAccessRelationSpace());
|
2014-07-03 01:47:48 +08:00
|
|
|
isl_set *Outside = isl_set_empty(isl_space_copy(Space));
|
2016-01-08 22:01:59 +08:00
|
|
|
for (int i = 1, Size = isl_space_dim(Space, isl_dim_set); i < Size; ++i) {
|
2014-07-03 01:47:48 +08:00
|
|
|
isl_local_space *LS = isl_local_space_from_space(isl_space_copy(Space));
|
|
|
|
isl_pw_aff *Var =
|
|
|
|
isl_pw_aff_var_on_domain(isl_local_space_copy(LS), isl_dim_set, i);
|
|
|
|
isl_pw_aff *Zero = isl_pw_aff_zero_on_domain(LS);
|
|
|
|
|
|
|
|
isl_set *DimOutside;
|
|
|
|
|
2014-08-04 05:07:30 +08:00
|
|
|
DimOutside = isl_pw_aff_lt_set(isl_pw_aff_copy(Var), Zero);
|
2016-02-07 21:57:32 +08:00
|
|
|
isl_pw_aff *SizeE = SAI->getDimensionSizePw(i);
|
2014-08-04 05:07:30 +08:00
|
|
|
SizeE = isl_pw_aff_add_dims(SizeE, isl_dim_in,
|
|
|
|
isl_space_dim(Space, isl_dim_set));
|
|
|
|
SizeE = isl_pw_aff_set_tuple_id(SizeE, isl_dim_in,
|
|
|
|
isl_space_get_tuple_id(Space, isl_dim_set));
|
|
|
|
|
|
|
|
DimOutside = isl_set_union(DimOutside, isl_pw_aff_le_set(SizeE, Var));
|
2014-07-03 01:47:48 +08:00
|
|
|
|
|
|
|
Outside = isl_set_union(Outside, DimOutside);
|
|
|
|
}
|
|
|
|
|
|
|
|
Outside = isl_set_apply(Outside, isl_map_reverse(getAccessRelation()));
|
|
|
|
Outside = isl_set_intersect(Outside, Statement->getDomain());
|
|
|
|
Outside = isl_set_params(Outside);
|
2015-06-26 20:09:28 +08:00
|
|
|
|
|
|
|
// Remove divs to avoid the construction of overly complicated assumptions.
|
|
|
|
// Doing so increases the set of parameter combinations that are assumed to
|
|
|
|
// not appear. This is always save, but may make the resulting run-time check
|
|
|
|
// bail out more often than strictly necessary.
|
|
|
|
Outside = isl_set_remove_divs(Outside);
|
2014-07-03 01:47:48 +08:00
|
|
|
Outside = isl_set_complement(Outside);
|
2016-01-26 21:33:15 +08:00
|
|
|
Statement->getParent()->addAssumption(
|
|
|
|
INBOUNDS, Outside,
|
|
|
|
getAccessInstruction() ? getAccessInstruction()->getDebugLoc() : nullptr);
|
2014-07-03 01:47:48 +08:00
|
|
|
isl_space_free(Space);
|
|
|
|
}
|
|
|
|
|
2015-02-24 19:58:30 +08:00
|
|
|
void MemoryAccess::computeBoundsOnAccessRelation(unsigned ElementSize) {
|
|
|
|
ScalarEvolution *SE = Statement->getParent()->getSE();
|
|
|
|
|
2016-01-28 01:09:17 +08:00
|
|
|
Value *Ptr = MemAccInst(getAccessInstruction()).getPointerOperand();
|
2015-02-24 19:58:30 +08:00
|
|
|
if (!Ptr || !SE->isSCEVable(Ptr->getType()))
|
|
|
|
return;
|
|
|
|
|
|
|
|
auto *PtrSCEV = SE->getSCEV(Ptr);
|
|
|
|
if (isa<SCEVCouldNotCompute>(PtrSCEV))
|
|
|
|
return;
|
|
|
|
|
|
|
|
auto *BasePtrSCEV = SE->getPointerBase(PtrSCEV);
|
|
|
|
if (BasePtrSCEV && !isa<SCEVCouldNotCompute>(BasePtrSCEV))
|
|
|
|
PtrSCEV = SE->getMinusSCEV(PtrSCEV, BasePtrSCEV);
|
|
|
|
|
|
|
|
const ConstantRange &Range = SE->getSignedRange(PtrSCEV);
|
|
|
|
if (Range.isFullSet())
|
|
|
|
return;
|
|
|
|
|
2015-03-09 03:49:50 +08:00
|
|
|
bool isWrapping = Range.isSignWrappedSet();
|
2015-02-24 19:58:30 +08:00
|
|
|
unsigned BW = Range.getBitWidth();
|
2016-02-07 21:59:03 +08:00
|
|
|
const auto One = APInt(BW, 1);
|
2015-03-09 03:49:50 +08:00
|
|
|
const auto LB = isWrapping ? Range.getLower() : Range.getSignedMin();
|
2016-02-07 21:59:03 +08:00
|
|
|
const auto UB = isWrapping ? (Range.getUpper() - One) : Range.getSignedMax();
|
2015-03-09 03:49:50 +08:00
|
|
|
|
|
|
|
auto Min = LB.sdiv(APInt(BW, ElementSize));
|
2016-02-07 21:59:03 +08:00
|
|
|
auto Max = UB.sdiv(APInt(BW, ElementSize)) + One;
|
2015-02-24 19:58:30 +08:00
|
|
|
|
|
|
|
isl_set *AccessRange = isl_map_range(isl_map_copy(AccessRelation));
|
|
|
|
AccessRange =
|
|
|
|
addRangeBoundsToSet(AccessRange, ConstantRange(Min, Max), 0, isl_dim_set);
|
|
|
|
AccessRelation = isl_map_intersect_range(AccessRelation, AccessRange);
|
|
|
|
}
|
|
|
|
|
2015-09-19 03:59:43 +08:00
|
|
|
__isl_give isl_map *MemoryAccess::foldAccess(__isl_take isl_map *AccessRelation,
|
2015-03-31 01:22:28 +08:00
|
|
|
ScopStmt *Statement) {
|
2015-09-19 03:59:43 +08:00
|
|
|
int Size = Subscripts.size();
|
2015-03-31 01:22:28 +08:00
|
|
|
|
|
|
|
for (int i = Size - 2; i >= 0; --i) {
|
|
|
|
isl_space *Space;
|
|
|
|
isl_map *MapOne, *MapTwo;
|
2015-09-19 03:59:43 +08:00
|
|
|
isl_pw_aff *DimSize = Statement->getPwAff(Sizes[i]);
|
2015-03-31 01:22:28 +08:00
|
|
|
|
|
|
|
isl_space *SpaceSize = isl_pw_aff_get_space(DimSize);
|
|
|
|
isl_pw_aff_free(DimSize);
|
|
|
|
isl_id *ParamId = isl_space_get_dim_id(SpaceSize, isl_dim_param, 0);
|
|
|
|
|
|
|
|
Space = isl_map_get_space(AccessRelation);
|
|
|
|
Space = isl_space_map_from_set(isl_space_range(Space));
|
|
|
|
Space = isl_space_align_params(Space, SpaceSize);
|
|
|
|
|
|
|
|
int ParamLocation = isl_space_find_dim_by_id(Space, isl_dim_param, ParamId);
|
|
|
|
isl_id_free(ParamId);
|
|
|
|
|
|
|
|
MapOne = isl_map_universe(isl_space_copy(Space));
|
|
|
|
for (int j = 0; j < Size; ++j)
|
|
|
|
MapOne = isl_map_equate(MapOne, isl_dim_in, j, isl_dim_out, j);
|
|
|
|
MapOne = isl_map_lower_bound_si(MapOne, isl_dim_in, i + 1, 0);
|
|
|
|
|
|
|
|
MapTwo = isl_map_universe(isl_space_copy(Space));
|
|
|
|
for (int j = 0; j < Size; ++j)
|
|
|
|
if (j < i || j > i + 1)
|
|
|
|
MapTwo = isl_map_equate(MapTwo, isl_dim_in, j, isl_dim_out, j);
|
|
|
|
|
|
|
|
isl_local_space *LS = isl_local_space_from_space(Space);
|
|
|
|
isl_constraint *C;
|
|
|
|
C = isl_equality_alloc(isl_local_space_copy(LS));
|
|
|
|
C = isl_constraint_set_constant_si(C, -1);
|
|
|
|
C = isl_constraint_set_coefficient_si(C, isl_dim_in, i, 1);
|
|
|
|
C = isl_constraint_set_coefficient_si(C, isl_dim_out, i, -1);
|
|
|
|
MapTwo = isl_map_add_constraint(MapTwo, C);
|
|
|
|
C = isl_equality_alloc(LS);
|
|
|
|
C = isl_constraint_set_coefficient_si(C, isl_dim_in, i + 1, 1);
|
|
|
|
C = isl_constraint_set_coefficient_si(C, isl_dim_out, i + 1, -1);
|
|
|
|
C = isl_constraint_set_coefficient_si(C, isl_dim_param, ParamLocation, 1);
|
|
|
|
MapTwo = isl_map_add_constraint(MapTwo, C);
|
|
|
|
MapTwo = isl_map_upper_bound_si(MapTwo, isl_dim_in, i + 1, -1);
|
|
|
|
|
|
|
|
MapOne = isl_map_union(MapOne, MapTwo);
|
|
|
|
AccessRelation = isl_map_apply_range(AccessRelation, MapOne);
|
|
|
|
}
|
|
|
|
return AccessRelation;
|
|
|
|
}
|
|
|
|
|
2015-11-13 04:15:32 +08:00
|
|
|
/// @brief Check if @p Expr is divisible by @p Size.
|
|
|
|
static bool isDivisible(const SCEV *Expr, unsigned Size, ScalarEvolution &SE) {
|
|
|
|
|
|
|
|
// Only one factor needs to be divisible.
|
|
|
|
if (auto *MulExpr = dyn_cast<SCEVMulExpr>(Expr)) {
|
|
|
|
for (auto *FactorExpr : MulExpr->operands())
|
|
|
|
if (isDivisible(FactorExpr, Size, SE))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// For other n-ary expressions (Add, AddRec, Max,...) all operands need
|
|
|
|
// to be divisble.
|
|
|
|
if (auto *NAryExpr = dyn_cast<SCEVNAryExpr>(Expr)) {
|
|
|
|
for (auto *OpExpr : NAryExpr->operands())
|
|
|
|
if (!isDivisible(OpExpr, Size, SE))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto *SizeSCEV = SE.getConstant(Expr->getType(), Size);
|
|
|
|
auto *UDivSCEV = SE.getUDivExpr(Expr, SizeSCEV);
|
|
|
|
auto *MulSCEV = SE.getMulExpr(UDivSCEV, SizeSCEV);
|
|
|
|
return MulSCEV == Expr;
|
|
|
|
}
|
|
|
|
|
2015-09-19 03:59:43 +08:00
|
|
|
void MemoryAccess::buildAccessRelation(const ScopArrayInfo *SAI) {
|
|
|
|
assert(!AccessRelation && "AccessReltation already built");
|
2014-10-05 19:32:18 +08:00
|
|
|
|
2015-09-19 03:59:43 +08:00
|
|
|
isl_ctx *Ctx = isl_id_get_ctx(Id);
|
2014-10-05 19:32:18 +08:00
|
|
|
isl_id *BaseAddrId = SAI->getBasePtrId();
|
2011-11-10 06:34:34 +08:00
|
|
|
|
2015-09-19 03:59:43 +08:00
|
|
|
if (!isAffine()) {
|
2013-06-23 13:21:18 +08:00
|
|
|
// We overapproximate non-affine accesses with a possible access to the
|
|
|
|
// whole array. For read accesses it does not make a difference, if an
|
|
|
|
// access must or may happen. However, for write accesses it is important to
|
|
|
|
// differentiate between writes that must happen and writes that may happen.
|
2013-06-23 14:04:54 +08:00
|
|
|
AccessRelation = isl_map_from_basic_map(createBasicAccessMap(Statement));
|
2014-07-29 16:37:55 +08:00
|
|
|
AccessRelation =
|
|
|
|
isl_map_set_tuple_id(AccessRelation, isl_dim_out, BaseAddrId);
|
2015-02-24 19:58:30 +08:00
|
|
|
|
2015-09-19 03:59:43 +08:00
|
|
|
computeBoundsOnAccessRelation(getElemSizeInBytes());
|
2011-12-20 18:43:14 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-11-13 04:15:32 +08:00
|
|
|
Scop &S = *getStatement()->getParent();
|
2014-07-29 16:37:55 +08:00
|
|
|
isl_space *Space = isl_space_alloc(Ctx, 0, Statement->getNumIterators(), 0);
|
2014-04-10 16:38:02 +08:00
|
|
|
AccessRelation = isl_map_universe(Space);
|
2014-04-09 05:20:44 +08:00
|
|
|
|
2015-09-19 03:59:43 +08:00
|
|
|
for (int i = 0, Size = Subscripts.size(); i < Size; ++i) {
|
|
|
|
isl_pw_aff *Affine = Statement->getPwAff(Subscripts[i]);
|
2014-04-09 05:20:44 +08:00
|
|
|
|
record delinearization result and reuse it in polyhedral translation
Without this patch, the testcase would fail on the delinearization of the second
array:
; void foo(long n, long m, long o, double A[n][m][o]) {
; for (long i = 0; i < n; i++)
; for (long j = 0; j < m; j++)
; for (long k = 0; k < o; k++) {
; A[i+3][j-4][k+7] = 1.0;
; A[i][0][k] = 2.0;
; }
; }
; CHECK: [n, m, o] -> { Stmt_for_body6[i0, i1, i2] -> MemRef_A[3 + i0, -4 + i1, 7 + i2] };
; CHECK: [n, m, o] -> { Stmt_for_body6[i0, i1, i2] -> MemRef_A[i0, 0, i2] };
Here is the output of FileCheck on the testcase without this patch:
; CHECK: [n, m, o] -> { Stmt_for_body6[i0, i1, i2] -> MemRef_A[i0, 0, i2] };
^
<stdin>:26:2: note: possible intended match here
[n, m, o] -> { Stmt_for_body6[i0, i1, i2] -> MemRef_A[o0] };
^
It is possible to find a good delinearization for A[i][0][k] only in the context
of the delinearization of both array accesses.
There are two ways to delinearize together all array subscripts touching the
same base address: either duplicate the code from scop detection to first gather
all array references and then run the delinearization; or as implemented in this
patch, use the same delinearization info that we computed during scop detection.
llvm-svn: 210117
2014-06-04 02:16:31 +08:00
|
|
|
if (Size == 1) {
|
|
|
|
// For the non delinearized arrays, divide the access function of the last
|
|
|
|
// subscript by the size of the elements in the array.
|
2014-04-09 05:20:44 +08:00
|
|
|
//
|
|
|
|
// A stride one array access in C expressed as A[i] is expressed in
|
|
|
|
// LLVM-IR as something like A[i * elementsize]. This hides the fact that
|
|
|
|
// two subsequent values of 'i' index two values that are stored next to
|
|
|
|
// each other in memory. By this division we make this characteristic
|
2015-11-13 04:15:32 +08:00
|
|
|
// obvious again. However, if the index is not divisible by the element
|
|
|
|
// size we will bail out.
|
2015-09-19 03:59:43 +08:00
|
|
|
isl_val *v = isl_val_int_from_si(Ctx, getElemSizeInBytes());
|
2014-04-09 05:20:44 +08:00
|
|
|
Affine = isl_pw_aff_scale_down_val(Affine, v);
|
2015-11-13 04:15:32 +08:00
|
|
|
|
|
|
|
if (!isDivisible(Subscripts[0], getElemSizeInBytes(), *S.getSE()))
|
2015-12-12 17:52:26 +08:00
|
|
|
S.invalidate(ALIGNMENT, AccessInstruction->getDebugLoc());
|
2014-04-09 05:20:44 +08:00
|
|
|
}
|
2011-12-20 18:43:14 +08:00
|
|
|
|
2014-04-09 05:20:44 +08:00
|
|
|
isl_map *SubscriptMap = isl_map_from_pw_aff(Affine);
|
|
|
|
|
2014-04-10 16:38:02 +08:00
|
|
|
AccessRelation = isl_map_flat_range_product(AccessRelation, SubscriptMap);
|
2014-04-09 05:20:44 +08:00
|
|
|
}
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2016-02-03 00:46:45 +08:00
|
|
|
if (Sizes.size() >= 1 && !isa<SCEVConstant>(Sizes[0]))
|
2015-09-19 03:59:43 +08:00
|
|
|
AccessRelation = foldAccess(AccessRelation, Statement);
|
2015-03-31 01:22:28 +08:00
|
|
|
|
2014-04-10 16:38:02 +08:00
|
|
|
Space = Statement->getDomainSpace();
|
2013-02-05 20:09:06 +08:00
|
|
|
AccessRelation = isl_map_set_tuple_id(
|
|
|
|
AccessRelation, isl_dim_in, isl_space_get_tuple_id(Space, isl_dim_set));
|
2014-07-29 16:37:55 +08:00
|
|
|
AccessRelation =
|
|
|
|
isl_map_set_tuple_id(AccessRelation, isl_dim_out, BaseAddrId);
|
|
|
|
|
2015-03-30 08:07:50 +08:00
|
|
|
AccessRelation = isl_map_gist_domain(AccessRelation, Statement->getDomain());
|
2014-07-29 16:37:55 +08:00
|
|
|
isl_space_free(Space);
|
2011-11-08 23:41:08 +08:00
|
|
|
}
|
2011-08-18 15:51:37 +08:00
|
|
|
|
2015-10-02 21:53:07 +08:00
|
|
|
MemoryAccess::MemoryAccess(ScopStmt *Stmt, Instruction *AccessInst,
|
2015-11-06 04:15:37 +08:00
|
|
|
AccessType Type, Value *BaseAddress,
|
|
|
|
unsigned ElemBytes, bool Affine,
|
2015-09-19 03:59:43 +08:00
|
|
|
ArrayRef<const SCEV *> Subscripts,
|
|
|
|
ArrayRef<const SCEV *> Sizes, Value *AccessValue,
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
ScopArrayInfo::MemoryKind Kind, StringRef BaseName)
|
|
|
|
: Kind(Kind), AccType(Type), RedType(RT_NONE), Statement(Stmt),
|
2015-10-02 21:53:07 +08:00
|
|
|
BaseAddr(BaseAddress), BaseName(BaseName), ElemBytes(ElemBytes),
|
|
|
|
Sizes(Sizes.begin(), Sizes.end()), AccessInstruction(AccessInst),
|
|
|
|
AccessValue(AccessValue), IsAffine(Affine),
|
2015-09-19 03:59:43 +08:00
|
|
|
Subscripts(Subscripts.begin(), Subscripts.end()), AccessRelation(nullptr),
|
2015-11-06 04:15:37 +08:00
|
|
|
NewAccessRelation(nullptr) {
|
|
|
|
|
|
|
|
std::string IdName = "__polly_array_ref";
|
|
|
|
Id = isl_id_alloc(Stmt->getParent()->getIslCtx(), IdName.c_str(), this);
|
|
|
|
}
|
2015-09-19 03:59:43 +08:00
|
|
|
|
2011-11-08 23:41:08 +08:00
|
|
|
void MemoryAccess::realignParams() {
|
2014-04-10 16:37:44 +08:00
|
|
|
isl_space *ParamSpace = Statement->getParent()->getParamSpace();
|
2011-10-06 08:03:42 +08:00
|
|
|
AccessRelation = isl_map_align_params(AccessRelation, ParamSpace);
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2014-08-01 16:13:25 +08:00
|
|
|
const std::string MemoryAccess::getReductionOperatorStr() const {
|
|
|
|
return MemoryAccess::getReductionOperatorStr(getReductionType());
|
|
|
|
}
|
|
|
|
|
2015-05-15 17:58:32 +08:00
|
|
|
__isl_give isl_id *MemoryAccess::getId() const { return isl_id_copy(Id); }
|
|
|
|
|
2014-07-02 04:52:51 +08:00
|
|
|
raw_ostream &polly::operator<<(raw_ostream &OS,
|
|
|
|
MemoryAccess::ReductionType RT) {
|
2014-08-01 16:13:25 +08:00
|
|
|
if (RT == MemoryAccess::RT_NONE)
|
2014-07-02 04:52:51 +08:00
|
|
|
OS << "NONE";
|
2014-08-01 16:13:25 +08:00
|
|
|
else
|
|
|
|
OS << MemoryAccess::getReductionOperatorStr(RT);
|
2014-07-02 04:52:51 +08:00
|
|
|
return OS;
|
|
|
|
}
|
|
|
|
|
2011-04-29 14:27:02 +08:00
|
|
|
void MemoryAccess::print(raw_ostream &OS) const {
|
2014-10-08 18:11:33 +08:00
|
|
|
switch (AccType) {
|
2013-07-14 04:41:24 +08:00
|
|
|
case READ:
|
2014-06-27 02:47:03 +08:00
|
|
|
OS.indent(12) << "ReadAccess :=\t";
|
2013-06-23 13:21:18 +08:00
|
|
|
break;
|
2013-07-14 04:41:24 +08:00
|
|
|
case MUST_WRITE:
|
2014-06-27 02:47:03 +08:00
|
|
|
OS.indent(12) << "MustWriteAccess :=\t";
|
2013-06-23 13:21:18 +08:00
|
|
|
break;
|
2013-07-14 04:41:24 +08:00
|
|
|
case MAY_WRITE:
|
2014-06-27 02:47:03 +08:00
|
|
|
OS.indent(12) << "MayWriteAccess :=\t";
|
2013-06-23 13:21:18 +08:00
|
|
|
break;
|
|
|
|
}
|
2015-02-07 04:13:15 +08:00
|
|
|
OS << "[Reduction Type: " << getReductionType() << "] ";
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
OS << "[Scalar: " << isScalarKind() << "]\n";
|
2015-12-14 03:35:26 +08:00
|
|
|
OS.indent(16) << getOriginalAccessRelationStr() << ";\n";
|
2015-09-05 15:46:47 +08:00
|
|
|
if (hasNewAccessRelation())
|
|
|
|
OS.indent(11) << "new: " << getNewAccessRelationStr() << ";\n";
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2013-01-15 06:40:23 +08:00
|
|
|
void MemoryAccess::dump() const { print(errs()); }
|
2011-04-29 14:27:02 +08:00
|
|
|
|
|
|
|
// Create a map in the size of the provided set domain, that maps from the
|
|
|
|
// one element of the provided set domain to another element of the provided
|
|
|
|
// set domain.
|
|
|
|
// The mapping is limited to all points that are equal in all but the last
|
|
|
|
// dimension and for which the last dimension of the input is strict smaller
|
|
|
|
// than the last dimension of the output.
|
|
|
|
//
|
|
|
|
// getEqualAndLarger(set[i0, i1, ..., iX]):
|
|
|
|
//
|
|
|
|
// set[i0, i1, ..., iX] -> set[o0, o1, ..., oX]
|
|
|
|
// : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1), iX < oX
|
|
|
|
//
|
2011-10-06 08:03:35 +08:00
|
|
|
static isl_map *getEqualAndLarger(isl_space *setDomain) {
|
2012-02-01 22:23:36 +08:00
|
|
|
isl_space *Space = isl_space_map_from_set(setDomain);
|
2015-05-22 03:02:44 +08:00
|
|
|
isl_map *Map = isl_map_universe(Space);
|
2013-10-05 01:14:53 +08:00
|
|
|
unsigned lastDimension = isl_map_dim(Map, isl_dim_in) - 1;
|
2011-04-29 14:27:02 +08:00
|
|
|
|
|
|
|
// Set all but the last dimension to be equal for the input and output
|
|
|
|
//
|
|
|
|
// input[i0, i1, ..., iX] -> output[o0, o1, ..., oX]
|
|
|
|
// : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1)
|
2013-10-05 01:14:53 +08:00
|
|
|
for (unsigned i = 0; i < lastDimension; ++i)
|
2012-02-01 22:23:36 +08:00
|
|
|
Map = isl_map_equate(Map, isl_dim_in, i, isl_dim_out, i);
|
2011-04-29 14:27:02 +08:00
|
|
|
|
|
|
|
// Set the last dimension of the input to be strict smaller than the
|
|
|
|
// last dimension of the output.
|
|
|
|
//
|
|
|
|
// input[?,?,?,...,iX] -> output[?,?,?,...,oX] : iX < oX
|
2015-05-22 03:02:44 +08:00
|
|
|
Map = isl_map_order_lt(Map, isl_dim_in, lastDimension, isl_dim_out,
|
|
|
|
lastDimension);
|
2012-02-01 22:23:36 +08:00
|
|
|
return Map;
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2015-03-30 19:52:59 +08:00
|
|
|
__isl_give isl_set *
|
|
|
|
MemoryAccess::getStride(__isl_take const isl_map *Schedule) const {
|
2013-02-05 20:09:06 +08:00
|
|
|
isl_map *S = const_cast<isl_map *>(Schedule);
|
2014-10-13 20:58:03 +08:00
|
|
|
isl_map *AccessRelation = getAccessRelation();
|
2012-12-18 15:46:06 +08:00
|
|
|
isl_space *Space = isl_space_range(isl_map_get_space(S));
|
|
|
|
isl_map *NextScatt = getEqualAndLarger(Space);
|
|
|
|
|
|
|
|
S = isl_map_reverse(S);
|
|
|
|
NextScatt = isl_map_lexmin(NextScatt);
|
|
|
|
|
|
|
|
NextScatt = isl_map_apply_range(NextScatt, isl_map_copy(S));
|
|
|
|
NextScatt = isl_map_apply_range(NextScatt, isl_map_copy(AccessRelation));
|
|
|
|
NextScatt = isl_map_apply_domain(NextScatt, S);
|
|
|
|
NextScatt = isl_map_apply_domain(NextScatt, AccessRelation);
|
|
|
|
|
|
|
|
isl_set *Deltas = isl_map_deltas(NextScatt);
|
|
|
|
return Deltas;
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2012-12-18 15:46:06 +08:00
|
|
|
bool MemoryAccess::isStrideX(__isl_take const isl_map *Schedule,
|
2012-01-25 00:42:16 +08:00
|
|
|
int StrideWidth) const {
|
|
|
|
isl_set *Stride, *StrideX;
|
|
|
|
bool IsStrideX;
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2012-12-18 15:46:06 +08:00
|
|
|
Stride = getStride(Schedule);
|
2012-01-25 00:42:16 +08:00
|
|
|
StrideX = isl_set_universe(isl_set_get_space(Stride));
|
2015-08-25 06:20:46 +08:00
|
|
|
for (unsigned i = 0; i < isl_set_dim(StrideX, isl_dim_set) - 1; i++)
|
|
|
|
StrideX = isl_set_fix_si(StrideX, isl_dim_set, i, 0);
|
|
|
|
StrideX = isl_set_fix_si(StrideX, isl_dim_set,
|
|
|
|
isl_set_dim(StrideX, isl_dim_set) - 1, StrideWidth);
|
2015-08-19 00:12:05 +08:00
|
|
|
IsStrideX = isl_set_is_subset(Stride, StrideX);
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2012-01-25 00:42:16 +08:00
|
|
|
isl_set_free(StrideX);
|
2012-01-18 04:34:27 +08:00
|
|
|
isl_set_free(Stride);
|
2011-08-20 19:11:25 +08:00
|
|
|
|
2012-01-25 00:42:16 +08:00
|
|
|
return IsStrideX;
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2012-12-18 15:46:06 +08:00
|
|
|
bool MemoryAccess::isStrideZero(const isl_map *Schedule) const {
|
|
|
|
return isStrideX(Schedule, 0);
|
2012-01-25 00:42:16 +08:00
|
|
|
}
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2012-12-18 15:46:06 +08:00
|
|
|
bool MemoryAccess::isStrideOne(const isl_map *Schedule) const {
|
|
|
|
return isStrideX(Schedule, 1);
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2015-09-05 15:46:40 +08:00
|
|
|
void MemoryAccess::setNewAccessRelation(isl_map *NewAccess) {
|
|
|
|
isl_map_free(NewAccessRelation);
|
|
|
|
NewAccessRelation = NewAccess;
|
2011-07-13 01:14:03 +08:00
|
|
|
}
|
2011-04-29 14:27:02 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
2011-10-06 08:04:05 +08:00
|
|
|
|
2015-07-14 17:33:13 +08:00
|
|
|
isl_map *ScopStmt::getSchedule() const {
|
|
|
|
isl_set *Domain = getDomain();
|
|
|
|
if (isl_set_is_empty(Domain)) {
|
|
|
|
isl_set_free(Domain);
|
|
|
|
return isl_map_from_aff(
|
|
|
|
isl_aff_zero_on_domain(isl_local_space_from_space(getDomainSpace())));
|
|
|
|
}
|
|
|
|
auto *Schedule = getParent()->getSchedule();
|
|
|
|
Schedule = isl_union_map_intersect_domain(
|
|
|
|
Schedule, isl_union_set_from_set(isl_set_copy(Domain)));
|
|
|
|
if (isl_union_map_is_empty(Schedule)) {
|
|
|
|
isl_set_free(Domain);
|
|
|
|
isl_union_map_free(Schedule);
|
|
|
|
return isl_map_from_aff(
|
|
|
|
isl_aff_zero_on_domain(isl_local_space_from_space(getDomainSpace())));
|
|
|
|
}
|
|
|
|
auto *M = isl_map_from_union_map(Schedule);
|
|
|
|
M = isl_map_coalesce(M);
|
|
|
|
M = isl_map_gist_domain(M, Domain);
|
|
|
|
M = isl_map_coalesce(M);
|
|
|
|
return M;
|
|
|
|
}
|
2011-10-06 08:04:05 +08:00
|
|
|
|
2015-08-12 18:19:50 +08:00
|
|
|
__isl_give isl_pw_aff *ScopStmt::getPwAff(const SCEV *E) {
|
2015-09-16 06:49:04 +08:00
|
|
|
return getParent()->getPwAff(E, isBlockStmt() ? getBasicBlock()
|
|
|
|
: getRegion()->getEntry());
|
2015-08-12 18:19:50 +08:00
|
|
|
}
|
|
|
|
|
2014-02-21 05:43:54 +08:00
|
|
|
void ScopStmt::restrictDomain(__isl_take isl_set *NewDomain) {
|
|
|
|
assert(isl_set_is_subset(NewDomain, Domain) &&
|
|
|
|
"New domain is not a subset of old domain!");
|
|
|
|
isl_set_free(Domain);
|
|
|
|
Domain = NewDomain;
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2015-10-02 21:53:07 +08:00
|
|
|
void ScopStmt::buildAccessRelations() {
|
2016-02-07 21:57:32 +08:00
|
|
|
Scop &S = *getParent();
|
2015-10-02 21:53:07 +08:00
|
|
|
for (MemoryAccess *Access : MemAccs) {
|
|
|
|
Type *ElementType = Access->getAccessValue()->getType();
|
2014-10-05 19:32:18 +08:00
|
|
|
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
ScopArrayInfo::MemoryKind Ty;
|
|
|
|
if (Access->isPHIKind())
|
|
|
|
Ty = ScopArrayInfo::MK_PHI;
|
|
|
|
else if (Access->isExitPHIKind())
|
|
|
|
Ty = ScopArrayInfo::MK_ExitPHI;
|
|
|
|
else if (Access->isValueKind())
|
|
|
|
Ty = ScopArrayInfo::MK_Value;
|
2015-11-11 01:31:31 +08:00
|
|
|
else
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
Ty = ScopArrayInfo::MK_Array;
|
2015-11-11 01:31:31 +08:00
|
|
|
|
2016-02-07 21:57:32 +08:00
|
|
|
auto *SAI = S.getOrCreateScopArrayInfo(Access->getBaseAddr(), ElementType,
|
|
|
|
Access->Sizes, Ty);
|
2015-10-02 21:53:07 +08:00
|
|
|
Access->buildAccessRelation(SAI);
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-02 21:53:07 +08:00
|
|
|
void ScopStmt::addAccess(MemoryAccess *Access) {
|
|
|
|
Instruction *AccessInst = Access->getAccessInstruction();
|
|
|
|
|
2015-12-23 07:25:11 +08:00
|
|
|
if (Access->isArrayKind()) {
|
|
|
|
MemoryAccessList &MAL = InstructionToAccess[AccessInst];
|
|
|
|
MAL.emplace_front(Access);
|
2016-01-26 21:33:10 +08:00
|
|
|
} else if (Access->isValueKind() && Access->isWrite()) {
|
|
|
|
Instruction *AccessVal = cast<Instruction>(Access->getAccessValue());
|
|
|
|
assert(Parent.getStmtForBasicBlock(AccessVal->getParent()) == this);
|
|
|
|
assert(!ValueWrites.lookup(AccessVal));
|
|
|
|
|
|
|
|
ValueWrites[AccessVal] = Access;
|
2016-01-26 21:33:15 +08:00
|
|
|
} else if (Access->isValueKind() && Access->isRead()) {
|
|
|
|
Value *AccessVal = Access->getAccessValue();
|
|
|
|
assert(!ValueReads.lookup(AccessVal));
|
|
|
|
|
|
|
|
ValueReads[AccessVal] = Access;
|
2016-01-26 21:33:27 +08:00
|
|
|
} else if (Access->isAnyPHIKind() && Access->isWrite()) {
|
|
|
|
PHINode *PHI = cast<PHINode>(Access->getBaseAddr());
|
|
|
|
assert(!PHIWrites.lookup(PHI));
|
|
|
|
|
|
|
|
PHIWrites[PHI] = Access;
|
2015-12-23 07:25:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
MemAccs.push_back(Access);
|
2015-10-02 21:53:07 +08:00
|
|
|
}
|
|
|
|
|
2011-11-08 23:41:08 +08:00
|
|
|
void ScopStmt::realignParams() {
|
2014-06-14 02:01:45 +08:00
|
|
|
for (MemoryAccess *MA : *this)
|
|
|
|
MA->realignParams();
|
2011-11-08 23:41:08 +08:00
|
|
|
|
|
|
|
Domain = isl_set_align_params(Domain, Parent.getParamSpace());
|
|
|
|
}
|
|
|
|
|
2015-09-10 21:00:06 +08:00
|
|
|
/// @brief Add @p BSet to the set @p User if @p BSet is bounded.
|
|
|
|
static isl_stat collectBoundedParts(__isl_take isl_basic_set *BSet,
|
|
|
|
void *User) {
|
|
|
|
isl_set **BoundedParts = static_cast<isl_set **>(User);
|
|
|
|
if (isl_basic_set_is_bounded(BSet))
|
|
|
|
*BoundedParts = isl_set_union(*BoundedParts, isl_set_from_basic_set(BSet));
|
|
|
|
else
|
|
|
|
isl_basic_set_free(BSet);
|
|
|
|
return isl_stat_ok;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief Return the bounded parts of @p S.
|
|
|
|
static __isl_give isl_set *collectBoundedParts(__isl_take isl_set *S) {
|
|
|
|
isl_set *BoundedParts = isl_set_empty(isl_set_get_space(S));
|
|
|
|
isl_set_foreach_basic_set(S, collectBoundedParts, &BoundedParts);
|
|
|
|
isl_set_free(S);
|
|
|
|
return BoundedParts;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief Compute the (un)bounded parts of @p S wrt. to dimension @p Dim.
|
|
|
|
///
|
|
|
|
/// @returns A separation of @p S into first an unbounded then a bounded subset,
|
|
|
|
/// both with regards to the dimension @p Dim.
|
|
|
|
static std::pair<__isl_give isl_set *, __isl_give isl_set *>
|
|
|
|
partitionSetParts(__isl_take isl_set *S, unsigned Dim) {
|
|
|
|
|
|
|
|
for (unsigned u = 0, e = isl_set_n_dim(S); u < e; u++)
|
2015-09-21 00:15:32 +08:00
|
|
|
S = isl_set_lower_bound_si(S, isl_dim_set, u, 0);
|
2015-09-10 21:00:06 +08:00
|
|
|
|
|
|
|
unsigned NumDimsS = isl_set_n_dim(S);
|
2015-09-21 00:15:32 +08:00
|
|
|
isl_set *OnlyDimS = isl_set_copy(S);
|
2015-09-10 21:00:06 +08:00
|
|
|
|
|
|
|
// Remove dimensions that are greater than Dim as they are not interesting.
|
|
|
|
assert(NumDimsS >= Dim + 1);
|
|
|
|
OnlyDimS =
|
|
|
|
isl_set_project_out(OnlyDimS, isl_dim_set, Dim + 1, NumDimsS - Dim - 1);
|
|
|
|
|
|
|
|
// Create artificial parametric upper bounds for dimensions smaller than Dim
|
|
|
|
// as we are not interested in them.
|
|
|
|
OnlyDimS = isl_set_insert_dims(OnlyDimS, isl_dim_param, 0, Dim);
|
|
|
|
for (unsigned u = 0; u < Dim; u++) {
|
|
|
|
isl_constraint *C = isl_inequality_alloc(
|
|
|
|
isl_local_space_from_space(isl_set_get_space(OnlyDimS)));
|
|
|
|
C = isl_constraint_set_coefficient_si(C, isl_dim_param, u, 1);
|
|
|
|
C = isl_constraint_set_coefficient_si(C, isl_dim_set, u, -1);
|
|
|
|
OnlyDimS = isl_set_add_constraint(OnlyDimS, C);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect all bounded parts of OnlyDimS.
|
|
|
|
isl_set *BoundedParts = collectBoundedParts(OnlyDimS);
|
|
|
|
|
|
|
|
// Create the dimensions greater than Dim again.
|
|
|
|
BoundedParts = isl_set_insert_dims(BoundedParts, isl_dim_set, Dim + 1,
|
|
|
|
NumDimsS - Dim - 1);
|
|
|
|
|
|
|
|
// Remove the artificial upper bound parameters again.
|
|
|
|
BoundedParts = isl_set_remove_dims(BoundedParts, isl_dim_param, 0, Dim);
|
|
|
|
|
2015-09-21 00:15:32 +08:00
|
|
|
isl_set *UnboundedParts = isl_set_subtract(S, isl_set_copy(BoundedParts));
|
2015-09-10 21:00:06 +08:00
|
|
|
return std::make_pair(UnboundedParts, BoundedParts);
|
|
|
|
}
|
|
|
|
|
2015-09-28 17:33:22 +08:00
|
|
|
/// @brief Set the dimension Ids from @p From in @p To.
|
|
|
|
static __isl_give isl_set *setDimensionIds(__isl_keep isl_set *From,
|
|
|
|
__isl_take isl_set *To) {
|
|
|
|
for (unsigned u = 0, e = isl_set_n_dim(From); u < e; u++) {
|
|
|
|
isl_id *DimId = isl_set_get_dim_id(From, isl_dim_set, u);
|
|
|
|
To = isl_set_set_dim_id(To, isl_dim_set, u, DimId);
|
|
|
|
}
|
|
|
|
return To;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief Create the conditions under which @p L @p Pred @p R is true.
|
2015-08-31 05:13:53 +08:00
|
|
|
static __isl_give isl_set *buildConditionSet(ICmpInst::Predicate Pred,
|
2015-09-28 17:33:22 +08:00
|
|
|
__isl_take isl_pw_aff *L,
|
|
|
|
__isl_take isl_pw_aff *R) {
|
2015-08-31 05:13:53 +08:00
|
|
|
switch (Pred) {
|
|
|
|
case ICmpInst::ICMP_EQ:
|
|
|
|
return isl_pw_aff_eq_set(L, R);
|
|
|
|
case ICmpInst::ICMP_NE:
|
|
|
|
return isl_pw_aff_ne_set(L, R);
|
|
|
|
case ICmpInst::ICMP_SLT:
|
|
|
|
return isl_pw_aff_lt_set(L, R);
|
|
|
|
case ICmpInst::ICMP_SLE:
|
|
|
|
return isl_pw_aff_le_set(L, R);
|
|
|
|
case ICmpInst::ICMP_SGT:
|
|
|
|
return isl_pw_aff_gt_set(L, R);
|
|
|
|
case ICmpInst::ICMP_SGE:
|
|
|
|
return isl_pw_aff_ge_set(L, R);
|
|
|
|
case ICmpInst::ICMP_ULT:
|
|
|
|
return isl_pw_aff_lt_set(L, R);
|
|
|
|
case ICmpInst::ICMP_UGT:
|
|
|
|
return isl_pw_aff_gt_set(L, R);
|
|
|
|
case ICmpInst::ICMP_ULE:
|
|
|
|
return isl_pw_aff_le_set(L, R);
|
|
|
|
case ICmpInst::ICMP_UGE:
|
|
|
|
return isl_pw_aff_ge_set(L, R);
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Non integer predicate not supported");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-28 17:33:22 +08:00
|
|
|
/// @brief Create the conditions under which @p L @p Pred @p R is true.
|
|
|
|
///
|
|
|
|
/// Helper function that will make sure the dimensions of the result have the
|
|
|
|
/// same isl_id's as the @p Domain.
|
|
|
|
static __isl_give isl_set *buildConditionSet(ICmpInst::Predicate Pred,
|
|
|
|
__isl_take isl_pw_aff *L,
|
|
|
|
__isl_take isl_pw_aff *R,
|
|
|
|
__isl_keep isl_set *Domain) {
|
|
|
|
isl_set *ConsequenceCondSet = buildConditionSet(Pred, L, R);
|
|
|
|
return setDimensionIds(Domain, ConsequenceCondSet);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief Build the conditions sets for the switch @p SI in the @p Domain.
|
2015-08-31 05:13:53 +08:00
|
|
|
///
|
|
|
|
/// This will fill @p ConditionSets with the conditions under which control
|
2015-09-28 17:33:22 +08:00
|
|
|
/// will be moved from @p SI to its successors. Hence, @p ConditionSets will
|
|
|
|
/// have as many elements as @p SI has successors.
|
2015-08-31 05:13:53 +08:00
|
|
|
static void
|
2015-09-28 17:33:22 +08:00
|
|
|
buildConditionSets(Scop &S, SwitchInst *SI, Loop *L, __isl_keep isl_set *Domain,
|
2015-08-31 05:13:53 +08:00
|
|
|
SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
|
|
|
|
|
2015-09-28 17:33:22 +08:00
|
|
|
Value *Condition = getConditionFromTerminator(SI);
|
|
|
|
assert(Condition && "No condition for switch");
|
|
|
|
|
|
|
|
ScalarEvolution &SE = *S.getSE();
|
|
|
|
BasicBlock *BB = SI->getParent();
|
|
|
|
isl_pw_aff *LHS, *RHS;
|
|
|
|
LHS = S.getPwAff(SE.getSCEVAtScope(Condition, L), BB);
|
|
|
|
|
|
|
|
unsigned NumSuccessors = SI->getNumSuccessors();
|
|
|
|
ConditionSets.resize(NumSuccessors);
|
|
|
|
for (auto &Case : SI->cases()) {
|
|
|
|
unsigned Idx = Case.getSuccessorIndex();
|
|
|
|
ConstantInt *CaseValue = Case.getCaseValue();
|
|
|
|
|
|
|
|
RHS = S.getPwAff(SE.getSCEV(CaseValue), BB);
|
|
|
|
isl_set *CaseConditionSet =
|
|
|
|
buildConditionSet(ICmpInst::ICMP_EQ, isl_pw_aff_copy(LHS), RHS, Domain);
|
|
|
|
ConditionSets[Idx] = isl_set_coalesce(
|
|
|
|
isl_set_intersect(CaseConditionSet, isl_set_copy(Domain)));
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(ConditionSets[0] == nullptr && "Default condition set was set");
|
|
|
|
isl_set *ConditionSetUnion = isl_set_copy(ConditionSets[1]);
|
|
|
|
for (unsigned u = 2; u < NumSuccessors; u++)
|
|
|
|
ConditionSetUnion =
|
|
|
|
isl_set_union(ConditionSetUnion, isl_set_copy(ConditionSets[u]));
|
|
|
|
ConditionSets[0] = setDimensionIds(
|
|
|
|
Domain, isl_set_subtract(isl_set_copy(Domain), ConditionSetUnion));
|
|
|
|
|
|
|
|
S.markAsOptimized();
|
|
|
|
isl_pw_aff_free(LHS);
|
|
|
|
}
|
|
|
|
|
2015-10-11 21:21:03 +08:00
|
|
|
/// @brief Build the conditions sets for the branch condition @p Condition in
|
|
|
|
/// the @p Domain.
|
2015-09-28 17:33:22 +08:00
|
|
|
///
|
|
|
|
/// This will fill @p ConditionSets with the conditions under which control
|
|
|
|
/// will be moved from @p TI to its successors. Hence, @p ConditionSets will
|
2015-11-12 11:25:01 +08:00
|
|
|
/// have as many elements as @p TI has successors. If @p TI is nullptr the
|
|
|
|
/// context under which @p Condition is true/false will be returned as the
|
|
|
|
/// new elements of @p ConditionSets.
|
2015-09-28 17:33:22 +08:00
|
|
|
static void
|
2015-10-11 21:21:03 +08:00
|
|
|
buildConditionSets(Scop &S, Value *Condition, TerminatorInst *TI, Loop *L,
|
2015-09-28 17:33:22 +08:00
|
|
|
__isl_keep isl_set *Domain,
|
|
|
|
SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
|
|
|
|
|
2015-08-31 05:13:53 +08:00
|
|
|
isl_set *ConsequenceCondSet = nullptr;
|
|
|
|
if (auto *CCond = dyn_cast<ConstantInt>(Condition)) {
|
|
|
|
if (CCond->isZero())
|
|
|
|
ConsequenceCondSet = isl_set_empty(isl_set_get_space(Domain));
|
|
|
|
else
|
|
|
|
ConsequenceCondSet = isl_set_universe(isl_set_get_space(Domain));
|
2015-10-11 21:21:03 +08:00
|
|
|
} else if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Condition)) {
|
|
|
|
auto Opcode = BinOp->getOpcode();
|
|
|
|
assert(Opcode == Instruction::And || Opcode == Instruction::Or);
|
|
|
|
|
|
|
|
buildConditionSets(S, BinOp->getOperand(0), TI, L, Domain, ConditionSets);
|
|
|
|
buildConditionSets(S, BinOp->getOperand(1), TI, L, Domain, ConditionSets);
|
|
|
|
|
|
|
|
isl_set_free(ConditionSets.pop_back_val());
|
|
|
|
isl_set *ConsCondPart0 = ConditionSets.pop_back_val();
|
|
|
|
isl_set_free(ConditionSets.pop_back_val());
|
|
|
|
isl_set *ConsCondPart1 = ConditionSets.pop_back_val();
|
|
|
|
|
|
|
|
if (Opcode == Instruction::And)
|
|
|
|
ConsequenceCondSet = isl_set_intersect(ConsCondPart0, ConsCondPart1);
|
|
|
|
else
|
|
|
|
ConsequenceCondSet = isl_set_union(ConsCondPart0, ConsCondPart1);
|
2015-08-31 05:13:53 +08:00
|
|
|
} else {
|
|
|
|
auto *ICond = dyn_cast<ICmpInst>(Condition);
|
|
|
|
assert(ICond &&
|
|
|
|
"Condition of exiting branch was neither constant nor ICmp!");
|
|
|
|
|
|
|
|
ScalarEvolution &SE = *S.getSE();
|
2015-11-12 11:25:01 +08:00
|
|
|
BasicBlock *BB = TI ? TI->getParent() : nullptr;
|
2015-08-31 05:13:53 +08:00
|
|
|
isl_pw_aff *LHS, *RHS;
|
2015-09-16 06:49:04 +08:00
|
|
|
LHS = S.getPwAff(SE.getSCEVAtScope(ICond->getOperand(0), L), BB);
|
|
|
|
RHS = S.getPwAff(SE.getSCEVAtScope(ICond->getOperand(1), L), BB);
|
2015-09-28 17:33:22 +08:00
|
|
|
ConsequenceCondSet =
|
|
|
|
buildConditionSet(ICond->getPredicate(), LHS, RHS, Domain);
|
2015-08-31 05:13:53 +08:00
|
|
|
}
|
|
|
|
|
2015-11-12 11:25:01 +08:00
|
|
|
// If no terminator was given we are only looking for parameter constraints
|
|
|
|
// under which @p Condition is true/false.
|
|
|
|
if (!TI)
|
|
|
|
ConsequenceCondSet = isl_set_params(ConsequenceCondSet);
|
|
|
|
|
2015-08-31 05:13:53 +08:00
|
|
|
assert(ConsequenceCondSet);
|
|
|
|
isl_set *AlternativeCondSet =
|
|
|
|
isl_set_complement(isl_set_copy(ConsequenceCondSet));
|
|
|
|
|
|
|
|
ConditionSets.push_back(isl_set_coalesce(
|
|
|
|
isl_set_intersect(ConsequenceCondSet, isl_set_copy(Domain))));
|
|
|
|
ConditionSets.push_back(isl_set_coalesce(
|
|
|
|
isl_set_intersect(AlternativeCondSet, isl_set_copy(Domain))));
|
|
|
|
}
|
|
|
|
|
2015-10-11 21:21:03 +08:00
|
|
|
/// @brief Build the conditions sets for the terminator @p TI in the @p Domain.
|
|
|
|
///
|
|
|
|
/// This will fill @p ConditionSets with the conditions under which control
|
|
|
|
/// will be moved from @p TI to its successors. Hence, @p ConditionSets will
|
|
|
|
/// have as many elements as @p TI has successors.
|
|
|
|
static void
|
|
|
|
buildConditionSets(Scop &S, TerminatorInst *TI, Loop *L,
|
|
|
|
__isl_keep isl_set *Domain,
|
|
|
|
SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
|
|
|
|
|
|
|
|
if (SwitchInst *SI = dyn_cast<SwitchInst>(TI))
|
|
|
|
return buildConditionSets(S, SI, L, Domain, ConditionSets);
|
|
|
|
|
|
|
|
assert(isa<BranchInst>(TI) && "Terminator was neither branch nor switch.");
|
|
|
|
|
|
|
|
if (TI->getNumSuccessors() == 1) {
|
|
|
|
ConditionSets.push_back(isl_set_copy(Domain));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *Condition = getConditionFromTerminator(TI);
|
|
|
|
assert(Condition && "No condition for Terminator");
|
|
|
|
|
|
|
|
return buildConditionSets(S, Condition, TI, L, Domain, ConditionSets);
|
|
|
|
}
|
|
|
|
|
2015-09-10 21:12:02 +08:00
|
|
|
void ScopStmt::buildDomain() {
|
2012-05-29 17:29:44 +08:00
|
|
|
isl_id *Id;
|
2011-10-07 16:46:57 +08:00
|
|
|
|
2012-05-29 17:29:44 +08:00
|
|
|
Id = isl_id_alloc(getIslCtx(), getBaseName(), this);
|
|
|
|
|
2015-09-10 21:00:06 +08:00
|
|
|
Domain = getParent()->getDomainConditions(this);
|
2012-05-29 17:29:44 +08:00
|
|
|
Domain = isl_set_set_tuple_id(Domain, Id);
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
Assume GetElementPtr offsets to be inbounds
In case a GEP instruction references into a fixed size array e.g., an access
A[i][j] into an array A[100x100], LLVM-IR does not guarantee that the subscripts
always compute values that are within array bounds. We now derive the set of
parameter values for which all accesses are within bounds and add the assumption
that the scop is only every executed with this set of parameter values.
Example:
void foo(float A[][20], long n, long m {
for (long i = 0; i < n; i++)
for (long j = 0; j < m; j++)
A[i][j] = ...
This loop yields out-of-bound accesses if m is at least 20 and at the same time
at least one iteration of the outer loop is executed. Hence, we assume:
n <= 0 or m <= 20.
Doing so simplifies the dependence analysis problem, allows us to perform
more optimizations and generate better code.
TODO: The location where the GEP instruction is executed is not necessarily the
location where the memory is actually accessed. As a result scanning for GEP[s]
is imprecise. Even though this is not a correctness problem, this imprecision
may result in missed optimizations or non-optimal run-time checks.
In polybench where this mismatch between parametric loop bounds and fixed size
arrays is common, we see with this patch significant reductions in compile time
(up to 50%) and execution time (up to 70%). We see two significant compile time
regressions (fdtd-2d, jacobi-2d-imper), and one execution time regression
(trmm). Both regressions arise due to additional optimizations that have been
enabled by this patch. They can be addressed in subsequent commits.
http://reviews.llvm.org/D6369
llvm-svn: 222754
2014-11-25 18:51:12 +08:00
|
|
|
void ScopStmt::deriveAssumptionsFromGEP(GetElementPtrInst *GEP) {
|
|
|
|
isl_ctx *Ctx = Parent.getIslCtx();
|
|
|
|
isl_local_space *LSpace = isl_local_space_from_space(getDomainSpace());
|
|
|
|
Type *Ty = GEP->getPointerOperandType();
|
|
|
|
ScalarEvolution &SE = *Parent.getSE();
|
2015-10-08 04:17:36 +08:00
|
|
|
ScopDetection &SD = Parent.getSD();
|
|
|
|
|
|
|
|
// The set of loads that are required to be invariant.
|
|
|
|
auto &ScopRIL = *SD.getRequiredInvariantLoads(&Parent.getRegion());
|
Assume GetElementPtr offsets to be inbounds
In case a GEP instruction references into a fixed size array e.g., an access
A[i][j] into an array A[100x100], LLVM-IR does not guarantee that the subscripts
always compute values that are within array bounds. We now derive the set of
parameter values for which all accesses are within bounds and add the assumption
that the scop is only every executed with this set of parameter values.
Example:
void foo(float A[][20], long n, long m {
for (long i = 0; i < n; i++)
for (long j = 0; j < m; j++)
A[i][j] = ...
This loop yields out-of-bound accesses if m is at least 20 and at the same time
at least one iteration of the outer loop is executed. Hence, we assume:
n <= 0 or m <= 20.
Doing so simplifies the dependence analysis problem, allows us to perform
more optimizations and generate better code.
TODO: The location where the GEP instruction is executed is not necessarily the
location where the memory is actually accessed. As a result scanning for GEP[s]
is imprecise. Even though this is not a correctness problem, this imprecision
may result in missed optimizations or non-optimal run-time checks.
In polybench where this mismatch between parametric loop bounds and fixed size
arrays is common, we see with this patch significant reductions in compile time
(up to 50%) and execution time (up to 70%). We see two significant compile time
regressions (fdtd-2d, jacobi-2d-imper), and one execution time regression
(trmm). Both regressions arise due to additional optimizations that have been
enabled by this patch. They can be addressed in subsequent commits.
http://reviews.llvm.org/D6369
llvm-svn: 222754
2014-11-25 18:51:12 +08:00
|
|
|
|
2015-09-17 23:47:52 +08:00
|
|
|
std::vector<const SCEV *> Subscripts;
|
|
|
|
std::vector<int> Sizes;
|
|
|
|
|
2015-09-18 01:28:15 +08:00
|
|
|
std::tie(Subscripts, Sizes) = getIndexExpressionsFromGEP(GEP, SE);
|
2015-09-17 23:47:52 +08:00
|
|
|
|
Assume GetElementPtr offsets to be inbounds
In case a GEP instruction references into a fixed size array e.g., an access
A[i][j] into an array A[100x100], LLVM-IR does not guarantee that the subscripts
always compute values that are within array bounds. We now derive the set of
parameter values for which all accesses are within bounds and add the assumption
that the scop is only every executed with this set of parameter values.
Example:
void foo(float A[][20], long n, long m {
for (long i = 0; i < n; i++)
for (long j = 0; j < m; j++)
A[i][j] = ...
This loop yields out-of-bound accesses if m is at least 20 and at the same time
at least one iteration of the outer loop is executed. Hence, we assume:
n <= 0 or m <= 20.
Doing so simplifies the dependence analysis problem, allows us to perform
more optimizations and generate better code.
TODO: The location where the GEP instruction is executed is not necessarily the
location where the memory is actually accessed. As a result scanning for GEP[s]
is imprecise. Even though this is not a correctness problem, this imprecision
may result in missed optimizations or non-optimal run-time checks.
In polybench where this mismatch between parametric loop bounds and fixed size
arrays is common, we see with this patch significant reductions in compile time
(up to 50%) and execution time (up to 70%). We see two significant compile time
regressions (fdtd-2d, jacobi-2d-imper), and one execution time regression
(trmm). Both regressions arise due to additional optimizations that have been
enabled by this patch. They can be addressed in subsequent commits.
http://reviews.llvm.org/D6369
llvm-svn: 222754
2014-11-25 18:51:12 +08:00
|
|
|
if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
|
|
|
|
Ty = PtrTy->getElementType();
|
|
|
|
}
|
|
|
|
|
2015-09-17 23:47:52 +08:00
|
|
|
int IndexOffset = Subscripts.size() - Sizes.size();
|
Assume GetElementPtr offsets to be inbounds
In case a GEP instruction references into a fixed size array e.g., an access
A[i][j] into an array A[100x100], LLVM-IR does not guarantee that the subscripts
always compute values that are within array bounds. We now derive the set of
parameter values for which all accesses are within bounds and add the assumption
that the scop is only every executed with this set of parameter values.
Example:
void foo(float A[][20], long n, long m {
for (long i = 0; i < n; i++)
for (long j = 0; j < m; j++)
A[i][j] = ...
This loop yields out-of-bound accesses if m is at least 20 and at the same time
at least one iteration of the outer loop is executed. Hence, we assume:
n <= 0 or m <= 20.
Doing so simplifies the dependence analysis problem, allows us to perform
more optimizations and generate better code.
TODO: The location where the GEP instruction is executed is not necessarily the
location where the memory is actually accessed. As a result scanning for GEP[s]
is imprecise. Even though this is not a correctness problem, this imprecision
may result in missed optimizations or non-optimal run-time checks.
In polybench where this mismatch between parametric loop bounds and fixed size
arrays is common, we see with this patch significant reductions in compile time
(up to 50%) and execution time (up to 70%). We see two significant compile time
regressions (fdtd-2d, jacobi-2d-imper), and one execution time regression
(trmm). Both regressions arise due to additional optimizations that have been
enabled by this patch. They can be addressed in subsequent commits.
http://reviews.llvm.org/D6369
llvm-svn: 222754
2014-11-25 18:51:12 +08:00
|
|
|
|
2015-09-17 23:47:52 +08:00
|
|
|
assert(IndexOffset <= 1 && "Unexpected large index offset");
|
Assume GetElementPtr offsets to be inbounds
In case a GEP instruction references into a fixed size array e.g., an access
A[i][j] into an array A[100x100], LLVM-IR does not guarantee that the subscripts
always compute values that are within array bounds. We now derive the set of
parameter values for which all accesses are within bounds and add the assumption
that the scop is only every executed with this set of parameter values.
Example:
void foo(float A[][20], long n, long m {
for (long i = 0; i < n; i++)
for (long j = 0; j < m; j++)
A[i][j] = ...
This loop yields out-of-bound accesses if m is at least 20 and at the same time
at least one iteration of the outer loop is executed. Hence, we assume:
n <= 0 or m <= 20.
Doing so simplifies the dependence analysis problem, allows us to perform
more optimizations and generate better code.
TODO: The location where the GEP instruction is executed is not necessarily the
location where the memory is actually accessed. As a result scanning for GEP[s]
is imprecise. Even though this is not a correctness problem, this imprecision
may result in missed optimizations or non-optimal run-time checks.
In polybench where this mismatch between parametric loop bounds and fixed size
arrays is common, we see with this patch significant reductions in compile time
(up to 50%) and execution time (up to 70%). We see two significant compile time
regressions (fdtd-2d, jacobi-2d-imper), and one execution time regression
(trmm). Both regressions arise due to additional optimizations that have been
enabled by this patch. They can be addressed in subsequent commits.
http://reviews.llvm.org/D6369
llvm-svn: 222754
2014-11-25 18:51:12 +08:00
|
|
|
|
2015-09-17 23:47:52 +08:00
|
|
|
for (size_t i = 0; i < Sizes.size(); i++) {
|
|
|
|
auto Expr = Subscripts[i + IndexOffset];
|
|
|
|
auto Size = Sizes[i];
|
Assume GetElementPtr offsets to be inbounds
In case a GEP instruction references into a fixed size array e.g., an access
A[i][j] into an array A[100x100], LLVM-IR does not guarantee that the subscripts
always compute values that are within array bounds. We now derive the set of
parameter values for which all accesses are within bounds and add the assumption
that the scop is only every executed with this set of parameter values.
Example:
void foo(float A[][20], long n, long m {
for (long i = 0; i < n; i++)
for (long j = 0; j < m; j++)
A[i][j] = ...
This loop yields out-of-bound accesses if m is at least 20 and at the same time
at least one iteration of the outer loop is executed. Hence, we assume:
n <= 0 or m <= 20.
Doing so simplifies the dependence analysis problem, allows us to perform
more optimizations and generate better code.
TODO: The location where the GEP instruction is executed is not necessarily the
location where the memory is actually accessed. As a result scanning for GEP[s]
is imprecise. Even though this is not a correctness problem, this imprecision
may result in missed optimizations or non-optimal run-time checks.
In polybench where this mismatch between parametric loop bounds and fixed size
arrays is common, we see with this patch significant reductions in compile time
(up to 50%) and execution time (up to 70%). We see two significant compile time
regressions (fdtd-2d, jacobi-2d-imper), and one execution time regression
(trmm). Both regressions arise due to additional optimizations that have been
enabled by this patch. They can be addressed in subsequent commits.
http://reviews.llvm.org/D6369
llvm-svn: 222754
2014-11-25 18:51:12 +08:00
|
|
|
|
2015-10-08 04:17:36 +08:00
|
|
|
InvariantLoadsSetTy AccessILS;
|
|
|
|
if (!isAffineExpr(&Parent.getRegion(), Expr, SE, nullptr, &AccessILS))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
bool NonAffine = false;
|
|
|
|
for (LoadInst *LInst : AccessILS)
|
|
|
|
if (!ScopRIL.count(LInst))
|
|
|
|
NonAffine = true;
|
|
|
|
|
|
|
|
if (NonAffine)
|
2015-09-17 23:47:52 +08:00
|
|
|
continue;
|
Assume GetElementPtr offsets to be inbounds
In case a GEP instruction references into a fixed size array e.g., an access
A[i][j] into an array A[100x100], LLVM-IR does not guarantee that the subscripts
always compute values that are within array bounds. We now derive the set of
parameter values for which all accesses are within bounds and add the assumption
that the scop is only every executed with this set of parameter values.
Example:
void foo(float A[][20], long n, long m {
for (long i = 0; i < n; i++)
for (long j = 0; j < m; j++)
A[i][j] = ...
This loop yields out-of-bound accesses if m is at least 20 and at the same time
at least one iteration of the outer loop is executed. Hence, we assume:
n <= 0 or m <= 20.
Doing so simplifies the dependence analysis problem, allows us to perform
more optimizations and generate better code.
TODO: The location where the GEP instruction is executed is not necessarily the
location where the memory is actually accessed. As a result scanning for GEP[s]
is imprecise. Even though this is not a correctness problem, this imprecision
may result in missed optimizations or non-optimal run-time checks.
In polybench where this mismatch between parametric loop bounds and fixed size
arrays is common, we see with this patch significant reductions in compile time
(up to 50%) and execution time (up to 70%). We see two significant compile time
regressions (fdtd-2d, jacobi-2d-imper), and one execution time regression
(trmm). Both regressions arise due to additional optimizations that have been
enabled by this patch. They can be addressed in subsequent commits.
http://reviews.llvm.org/D6369
llvm-svn: 222754
2014-11-25 18:51:12 +08:00
|
|
|
|
2015-09-17 23:47:52 +08:00
|
|
|
isl_pw_aff *AccessOffset = getPwAff(Expr);
|
|
|
|
AccessOffset =
|
|
|
|
isl_pw_aff_set_tuple_id(AccessOffset, isl_dim_in, getDomainId());
|
Assume GetElementPtr offsets to be inbounds
In case a GEP instruction references into a fixed size array e.g., an access
A[i][j] into an array A[100x100], LLVM-IR does not guarantee that the subscripts
always compute values that are within array bounds. We now derive the set of
parameter values for which all accesses are within bounds and add the assumption
that the scop is only every executed with this set of parameter values.
Example:
void foo(float A[][20], long n, long m {
for (long i = 0; i < n; i++)
for (long j = 0; j < m; j++)
A[i][j] = ...
This loop yields out-of-bound accesses if m is at least 20 and at the same time
at least one iteration of the outer loop is executed. Hence, we assume:
n <= 0 or m <= 20.
Doing so simplifies the dependence analysis problem, allows us to perform
more optimizations and generate better code.
TODO: The location where the GEP instruction is executed is not necessarily the
location where the memory is actually accessed. As a result scanning for GEP[s]
is imprecise. Even though this is not a correctness problem, this imprecision
may result in missed optimizations or non-optimal run-time checks.
In polybench where this mismatch between parametric loop bounds and fixed size
arrays is common, we see with this patch significant reductions in compile time
(up to 50%) and execution time (up to 70%). We see two significant compile time
regressions (fdtd-2d, jacobi-2d-imper), and one execution time regression
(trmm). Both regressions arise due to additional optimizations that have been
enabled by this patch. They can be addressed in subsequent commits.
http://reviews.llvm.org/D6369
llvm-svn: 222754
2014-11-25 18:51:12 +08:00
|
|
|
|
2015-09-17 23:47:52 +08:00
|
|
|
isl_pw_aff *DimSize = isl_pw_aff_from_aff(isl_aff_val_on_domain(
|
|
|
|
isl_local_space_copy(LSpace), isl_val_int_from_si(Ctx, Size)));
|
Assume GetElementPtr offsets to be inbounds
In case a GEP instruction references into a fixed size array e.g., an access
A[i][j] into an array A[100x100], LLVM-IR does not guarantee that the subscripts
always compute values that are within array bounds. We now derive the set of
parameter values for which all accesses are within bounds and add the assumption
that the scop is only every executed with this set of parameter values.
Example:
void foo(float A[][20], long n, long m {
for (long i = 0; i < n; i++)
for (long j = 0; j < m; j++)
A[i][j] = ...
This loop yields out-of-bound accesses if m is at least 20 and at the same time
at least one iteration of the outer loop is executed. Hence, we assume:
n <= 0 or m <= 20.
Doing so simplifies the dependence analysis problem, allows us to perform
more optimizations and generate better code.
TODO: The location where the GEP instruction is executed is not necessarily the
location where the memory is actually accessed. As a result scanning for GEP[s]
is imprecise. Even though this is not a correctness problem, this imprecision
may result in missed optimizations or non-optimal run-time checks.
In polybench where this mismatch between parametric loop bounds and fixed size
arrays is common, we see with this patch significant reductions in compile time
(up to 50%) and execution time (up to 70%). We see two significant compile time
regressions (fdtd-2d, jacobi-2d-imper), and one execution time regression
(trmm). Both regressions arise due to additional optimizations that have been
enabled by this patch. They can be addressed in subsequent commits.
http://reviews.llvm.org/D6369
llvm-svn: 222754
2014-11-25 18:51:12 +08:00
|
|
|
|
2015-09-17 23:47:52 +08:00
|
|
|
isl_set *OutOfBound = isl_pw_aff_ge_set(AccessOffset, DimSize);
|
|
|
|
OutOfBound = isl_set_intersect(getDomain(), OutOfBound);
|
|
|
|
OutOfBound = isl_set_params(OutOfBound);
|
|
|
|
isl_set *InBound = isl_set_complement(OutOfBound);
|
|
|
|
isl_set *Executed = isl_set_params(getDomain());
|
Assume GetElementPtr offsets to be inbounds
In case a GEP instruction references into a fixed size array e.g., an access
A[i][j] into an array A[100x100], LLVM-IR does not guarantee that the subscripts
always compute values that are within array bounds. We now derive the set of
parameter values for which all accesses are within bounds and add the assumption
that the scop is only every executed with this set of parameter values.
Example:
void foo(float A[][20], long n, long m {
for (long i = 0; i < n; i++)
for (long j = 0; j < m; j++)
A[i][j] = ...
This loop yields out-of-bound accesses if m is at least 20 and at the same time
at least one iteration of the outer loop is executed. Hence, we assume:
n <= 0 or m <= 20.
Doing so simplifies the dependence analysis problem, allows us to perform
more optimizations and generate better code.
TODO: The location where the GEP instruction is executed is not necessarily the
location where the memory is actually accessed. As a result scanning for GEP[s]
is imprecise. Even though this is not a correctness problem, this imprecision
may result in missed optimizations or non-optimal run-time checks.
In polybench where this mismatch between parametric loop bounds and fixed size
arrays is common, we see with this patch significant reductions in compile time
(up to 50%) and execution time (up to 70%). We see two significant compile time
regressions (fdtd-2d, jacobi-2d-imper), and one execution time regression
(trmm). Both regressions arise due to additional optimizations that have been
enabled by this patch. They can be addressed in subsequent commits.
http://reviews.llvm.org/D6369
llvm-svn: 222754
2014-11-25 18:51:12 +08:00
|
|
|
|
2015-09-17 23:47:52 +08:00
|
|
|
// A => B == !A or B
|
|
|
|
isl_set *InBoundIfExecuted =
|
|
|
|
isl_set_union(isl_set_complement(Executed), InBound);
|
Assume GetElementPtr offsets to be inbounds
In case a GEP instruction references into a fixed size array e.g., an access
A[i][j] into an array A[100x100], LLVM-IR does not guarantee that the subscripts
always compute values that are within array bounds. We now derive the set of
parameter values for which all accesses are within bounds and add the assumption
that the scop is only every executed with this set of parameter values.
Example:
void foo(float A[][20], long n, long m {
for (long i = 0; i < n; i++)
for (long j = 0; j < m; j++)
A[i][j] = ...
This loop yields out-of-bound accesses if m is at least 20 and at the same time
at least one iteration of the outer loop is executed. Hence, we assume:
n <= 0 or m <= 20.
Doing so simplifies the dependence analysis problem, allows us to perform
more optimizations and generate better code.
TODO: The location where the GEP instruction is executed is not necessarily the
location where the memory is actually accessed. As a result scanning for GEP[s]
is imprecise. Even though this is not a correctness problem, this imprecision
may result in missed optimizations or non-optimal run-time checks.
In polybench where this mismatch between parametric loop bounds and fixed size
arrays is common, we see with this patch significant reductions in compile time
(up to 50%) and execution time (up to 70%). We see two significant compile time
regressions (fdtd-2d, jacobi-2d-imper), and one execution time regression
(trmm). Both regressions arise due to additional optimizations that have been
enabled by this patch. They can be addressed in subsequent commits.
http://reviews.llvm.org/D6369
llvm-svn: 222754
2014-11-25 18:51:12 +08:00
|
|
|
|
2016-01-08 22:01:59 +08:00
|
|
|
InBoundIfExecuted = isl_set_coalesce(InBoundIfExecuted);
|
2015-11-12 10:33:38 +08:00
|
|
|
Parent.addAssumption(INBOUNDS, InBoundIfExecuted, GEP->getDebugLoc());
|
Assume GetElementPtr offsets to be inbounds
In case a GEP instruction references into a fixed size array e.g., an access
A[i][j] into an array A[100x100], LLVM-IR does not guarantee that the subscripts
always compute values that are within array bounds. We now derive the set of
parameter values for which all accesses are within bounds and add the assumption
that the scop is only every executed with this set of parameter values.
Example:
void foo(float A[][20], long n, long m {
for (long i = 0; i < n; i++)
for (long j = 0; j < m; j++)
A[i][j] = ...
This loop yields out-of-bound accesses if m is at least 20 and at the same time
at least one iteration of the outer loop is executed. Hence, we assume:
n <= 0 or m <= 20.
Doing so simplifies the dependence analysis problem, allows us to perform
more optimizations and generate better code.
TODO: The location where the GEP instruction is executed is not necessarily the
location where the memory is actually accessed. As a result scanning for GEP[s]
is imprecise. Even though this is not a correctness problem, this imprecision
may result in missed optimizations or non-optimal run-time checks.
In polybench where this mismatch between parametric loop bounds and fixed size
arrays is common, we see with this patch significant reductions in compile time
(up to 50%) and execution time (up to 70%). We see two significant compile time
regressions (fdtd-2d, jacobi-2d-imper), and one execution time regression
(trmm). Both regressions arise due to additional optimizations that have been
enabled by this patch. They can be addressed in subsequent commits.
http://reviews.llvm.org/D6369
llvm-svn: 222754
2014-11-25 18:51:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
isl_local_space_free(LSpace);
|
|
|
|
}
|
|
|
|
|
2015-02-24 20:00:50 +08:00
|
|
|
void ScopStmt::deriveAssumptions(BasicBlock *Block) {
|
|
|
|
for (Instruction &Inst : *Block)
|
Assume GetElementPtr offsets to be inbounds
In case a GEP instruction references into a fixed size array e.g., an access
A[i][j] into an array A[100x100], LLVM-IR does not guarantee that the subscripts
always compute values that are within array bounds. We now derive the set of
parameter values for which all accesses are within bounds and add the assumption
that the scop is only every executed with this set of parameter values.
Example:
void foo(float A[][20], long n, long m {
for (long i = 0; i < n; i++)
for (long j = 0; j < m; j++)
A[i][j] = ...
This loop yields out-of-bound accesses if m is at least 20 and at the same time
at least one iteration of the outer loop is executed. Hence, we assume:
n <= 0 or m <= 20.
Doing so simplifies the dependence analysis problem, allows us to perform
more optimizations and generate better code.
TODO: The location where the GEP instruction is executed is not necessarily the
location where the memory is actually accessed. As a result scanning for GEP[s]
is imprecise. Even though this is not a correctness problem, this imprecision
may result in missed optimizations or non-optimal run-time checks.
In polybench where this mismatch between parametric loop bounds and fixed size
arrays is common, we see with this patch significant reductions in compile time
(up to 50%) and execution time (up to 70%). We see two significant compile time
regressions (fdtd-2d, jacobi-2d-imper), and one execution time regression
(trmm). Both regressions arise due to additional optimizations that have been
enabled by this patch. They can be addressed in subsequent commits.
http://reviews.llvm.org/D6369
llvm-svn: 222754
2014-11-25 18:51:12 +08:00
|
|
|
if (auto *GEP = dyn_cast<GetElementPtrInst>(&Inst))
|
|
|
|
deriveAssumptionsFromGEP(GEP);
|
|
|
|
}
|
|
|
|
|
2015-09-10 23:27:46 +08:00
|
|
|
void ScopStmt::collectSurroundingLoops() {
|
|
|
|
for (unsigned u = 0, e = isl_set_n_dim(Domain); u < e; u++) {
|
|
|
|
isl_id *DimId = isl_set_get_dim_id(Domain, isl_dim_set, u);
|
|
|
|
NestLoops.push_back(static_cast<Loop *>(isl_id_get_user(DimId)));
|
|
|
|
isl_id_free(DimId);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-12 05:41:48 +08:00
|
|
|
ScopStmt::ScopStmt(Scop &parent, Region &R)
|
2015-10-02 21:53:07 +08:00
|
|
|
: Parent(parent), Domain(nullptr), BB(nullptr), R(&R), Build(nullptr) {
|
2015-02-24 20:00:50 +08:00
|
|
|
|
2015-07-09 15:31:45 +08:00
|
|
|
BaseName = getIslCompatibleName("Stmt_", R.getNameStr(), "");
|
2015-02-24 20:00:50 +08:00
|
|
|
}
|
|
|
|
|
2015-09-12 05:41:48 +08:00
|
|
|
ScopStmt::ScopStmt(Scop &parent, BasicBlock &bb)
|
2015-10-02 21:53:07 +08:00
|
|
|
: Parent(parent), Domain(nullptr), BB(&bb), R(nullptr), Build(nullptr) {
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2014-07-25 07:48:02 +08:00
|
|
|
BaseName = getIslCompatibleName("Stmt_", &bb, "");
|
2015-10-02 21:53:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ScopStmt::init() {
|
|
|
|
assert(!Domain && "init must be called only once");
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2015-09-10 21:12:02 +08:00
|
|
|
buildDomain();
|
2015-09-10 23:27:46 +08:00
|
|
|
collectSurroundingLoops();
|
2015-10-02 21:53:07 +08:00
|
|
|
buildAccessRelations();
|
|
|
|
|
|
|
|
if (BB) {
|
|
|
|
deriveAssumptions(BB);
|
|
|
|
} else {
|
|
|
|
for (BasicBlock *Block : R->blocks()) {
|
|
|
|
deriveAssumptions(Block);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-21 03:08:11 +08:00
|
|
|
if (DetectReductions)
|
|
|
|
checkForReductions();
|
2014-06-28 04:31:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief Collect loads which might form a reduction chain with @p StoreMA
|
|
|
|
///
|
2015-02-25 00:00:29 +08:00
|
|
|
/// Check if the stored value for @p StoreMA is a binary operator with one or
|
|
|
|
/// two loads as operands. If the binary operand is commutative & associative,
|
2014-06-28 04:31:28 +08:00
|
|
|
/// used only once (by @p StoreMA) and its load operands are also used only
|
|
|
|
/// once, we have found a possible reduction chain. It starts at an operand
|
|
|
|
/// load and includes the binary operator and @p StoreMA.
|
|
|
|
///
|
2015-02-25 00:00:29 +08:00
|
|
|
/// Note: We allow only one use to ensure the load and binary operator cannot
|
2014-06-28 04:31:28 +08:00
|
|
|
/// escape this block or into any other store except @p StoreMA.
|
|
|
|
void ScopStmt::collectCandiateReductionLoads(
|
|
|
|
MemoryAccess *StoreMA, SmallVectorImpl<MemoryAccess *> &Loads) {
|
|
|
|
auto *Store = dyn_cast<StoreInst>(StoreMA->getAccessInstruction());
|
|
|
|
if (!Store)
|
2014-06-18 01:31:36 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
// Skip if there is not one binary operator between the load and the store
|
|
|
|
auto *BinOp = dyn_cast<BinaryOperator>(Store->getValueOperand());
|
2014-06-28 04:31:28 +08:00
|
|
|
if (!BinOp)
|
2014-06-18 01:31:36 +08:00
|
|
|
return;
|
|
|
|
|
2014-06-28 04:31:28 +08:00
|
|
|
// Skip if the binary operators has multiple uses
|
|
|
|
if (BinOp->getNumUses() != 1)
|
2014-06-18 01:31:36 +08:00
|
|
|
return;
|
|
|
|
|
2015-02-25 00:00:29 +08:00
|
|
|
// Skip if the opcode of the binary operator is not commutative/associative
|
2014-06-28 04:31:28 +08:00
|
|
|
if (!BinOp->isCommutative() || !BinOp->isAssociative())
|
2014-06-18 01:31:36 +08:00
|
|
|
return;
|
|
|
|
|
2014-07-01 08:32:29 +08:00
|
|
|
// Skip if the binary operator is outside the current SCoP
|
|
|
|
if (BinOp->getParent() != Store->getParent())
|
|
|
|
return;
|
|
|
|
|
2014-06-18 01:31:36 +08:00
|
|
|
// Skip if it is a multiplicative reduction and we disabled them
|
|
|
|
if (DisableMultiplicativeReductions &&
|
|
|
|
(BinOp->getOpcode() == Instruction::Mul ||
|
|
|
|
BinOp->getOpcode() == Instruction::FMul))
|
|
|
|
return;
|
|
|
|
|
2014-06-28 04:31:28 +08:00
|
|
|
// Check the binary operator operands for a candidate load
|
|
|
|
auto *PossibleLoad0 = dyn_cast<LoadInst>(BinOp->getOperand(0));
|
|
|
|
auto *PossibleLoad1 = dyn_cast<LoadInst>(BinOp->getOperand(1));
|
|
|
|
if (!PossibleLoad0 && !PossibleLoad1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// A load is only a candidate if it cannot escape (thus has only this use)
|
|
|
|
if (PossibleLoad0 && PossibleLoad0->getNumUses() == 1)
|
2014-07-01 08:32:29 +08:00
|
|
|
if (PossibleLoad0->getParent() == Store->getParent())
|
2015-12-16 07:50:04 +08:00
|
|
|
Loads.push_back(&getArrayAccessFor(PossibleLoad0));
|
2014-06-28 04:31:28 +08:00
|
|
|
if (PossibleLoad1 && PossibleLoad1->getNumUses() == 1)
|
2014-07-01 08:32:29 +08:00
|
|
|
if (PossibleLoad1->getParent() == Store->getParent())
|
2015-12-16 07:50:04 +08:00
|
|
|
Loads.push_back(&getArrayAccessFor(PossibleLoad1));
|
2014-06-28 04:31:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief Check for reductions in this ScopStmt
|
|
|
|
///
|
2015-02-25 00:00:29 +08:00
|
|
|
/// Iterate over all store memory accesses and check for valid binary reduction
|
|
|
|
/// like chains. For all candidates we check if they have the same base address
|
|
|
|
/// and there are no other accesses which overlap with them. The base address
|
|
|
|
/// check rules out impossible reductions candidates early. The overlap check,
|
|
|
|
/// together with the "only one user" check in collectCandiateReductionLoads,
|
2014-06-28 04:31:28 +08:00
|
|
|
/// guarantees that none of the intermediate results will escape during
|
|
|
|
/// execution of the loop nest. We basically check here that no other memory
|
|
|
|
/// access can access the same memory as the potential reduction.
|
|
|
|
void ScopStmt::checkForReductions() {
|
|
|
|
SmallVector<MemoryAccess *, 2> Loads;
|
|
|
|
SmallVector<std::pair<MemoryAccess *, MemoryAccess *>, 4> Candidates;
|
|
|
|
|
2015-02-25 00:00:29 +08:00
|
|
|
// First collect candidate load-store reduction chains by iterating over all
|
2014-06-28 04:31:28 +08:00
|
|
|
// stores and collecting possible reduction loads.
|
|
|
|
for (MemoryAccess *StoreMA : MemAccs) {
|
|
|
|
if (StoreMA->isRead())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
Loads.clear();
|
|
|
|
collectCandiateReductionLoads(StoreMA, Loads);
|
|
|
|
for (MemoryAccess *LoadMA : Loads)
|
|
|
|
Candidates.push_back(std::make_pair(LoadMA, StoreMA));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Then check each possible candidate pair.
|
|
|
|
for (const auto &CandidatePair : Candidates) {
|
|
|
|
bool Valid = true;
|
|
|
|
isl_map *LoadAccs = CandidatePair.first->getAccessRelation();
|
|
|
|
isl_map *StoreAccs = CandidatePair.second->getAccessRelation();
|
|
|
|
|
|
|
|
// Skip those with obviously unequal base addresses.
|
|
|
|
if (!isl_map_has_equal_space(LoadAccs, StoreAccs)) {
|
|
|
|
isl_map_free(LoadAccs);
|
|
|
|
isl_map_free(StoreAccs);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// And check if the remaining for overlap with other memory accesses.
|
|
|
|
isl_map *AllAccsRel = isl_map_union(LoadAccs, StoreAccs);
|
|
|
|
AllAccsRel = isl_map_intersect_domain(AllAccsRel, getDomain());
|
|
|
|
isl_set *AllAccs = isl_map_range(AllAccsRel);
|
|
|
|
|
|
|
|
for (MemoryAccess *MA : MemAccs) {
|
|
|
|
if (MA == CandidatePair.first || MA == CandidatePair.second)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
isl_map *AccRel =
|
|
|
|
isl_map_intersect_domain(MA->getAccessRelation(), getDomain());
|
|
|
|
isl_set *Accs = isl_map_range(AccRel);
|
|
|
|
|
|
|
|
if (isl_set_has_equal_space(AllAccs, Accs) || isl_set_free(Accs)) {
|
|
|
|
isl_set *OverlapAccs = isl_set_intersect(Accs, isl_set_copy(AllAccs));
|
|
|
|
Valid = Valid && isl_set_is_empty(OverlapAccs);
|
|
|
|
isl_set_free(OverlapAccs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
isl_set_free(AllAccs);
|
|
|
|
if (!Valid)
|
|
|
|
continue;
|
|
|
|
|
2014-07-02 04:52:51 +08:00
|
|
|
const LoadInst *Load =
|
|
|
|
dyn_cast<const LoadInst>(CandidatePair.first->getAccessInstruction());
|
|
|
|
MemoryAccess::ReductionType RT =
|
|
|
|
getReductionType(dyn_cast<BinaryOperator>(Load->user_back()), Load);
|
|
|
|
|
2014-06-28 04:31:28 +08:00
|
|
|
// If no overlapping access was found we mark the load and store as
|
|
|
|
// reduction like.
|
2014-07-02 04:52:51 +08:00
|
|
|
CandidatePair.first->markAsReductionLike(RT);
|
|
|
|
CandidatePair.second->markAsReductionLike(RT);
|
2014-06-28 04:31:28 +08:00
|
|
|
}
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2013-01-15 06:40:23 +08:00
|
|
|
std::string ScopStmt::getDomainStr() const { return stringFromIslObj(Domain); }
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2015-04-21 19:37:25 +08:00
|
|
|
std::string ScopStmt::getScheduleStr() const {
|
2015-07-14 17:33:13 +08:00
|
|
|
auto *S = getSchedule();
|
|
|
|
auto Str = stringFromIslObj(S);
|
|
|
|
isl_map_free(S);
|
|
|
|
return Str;
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2013-01-15 06:40:23 +08:00
|
|
|
unsigned ScopStmt::getNumParams() const { return Parent.getNumParams(); }
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2015-02-20 06:16:12 +08:00
|
|
|
unsigned ScopStmt::getNumIterators() const { return NestLoops.size(); }
|
2011-04-29 14:27:02 +08:00
|
|
|
|
|
|
|
const char *ScopStmt::getBaseName() const { return BaseName.c_str(); }
|
|
|
|
|
2011-04-30 11:26:51 +08:00
|
|
|
const Loop *ScopStmt::getLoopForDimension(unsigned Dimension) const {
|
2013-02-16 05:26:44 +08:00
|
|
|
return NestLoops[Dimension];
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2013-01-15 06:40:23 +08:00
|
|
|
isl_ctx *ScopStmt::getIslCtx() const { return Parent.getIslCtx(); }
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2015-03-30 19:52:59 +08:00
|
|
|
__isl_give isl_set *ScopStmt::getDomain() const { return isl_set_copy(Domain); }
|
2011-05-07 03:52:19 +08:00
|
|
|
|
2015-03-30 20:22:39 +08:00
|
|
|
__isl_give isl_space *ScopStmt::getDomainSpace() const {
|
2012-01-18 04:34:23 +08:00
|
|
|
return isl_set_get_space(Domain);
|
|
|
|
}
|
|
|
|
|
2015-03-30 19:52:59 +08:00
|
|
|
__isl_give isl_id *ScopStmt::getDomainId() const {
|
|
|
|
return isl_set_get_tuple_id(Domain);
|
|
|
|
}
|
2012-08-30 19:49:38 +08:00
|
|
|
|
2015-12-17 00:14:03 +08:00
|
|
|
ScopStmt::~ScopStmt() { isl_set_free(Domain); }
|
2011-04-29 14:27:02 +08:00
|
|
|
|
|
|
|
void ScopStmt::print(raw_ostream &OS) const {
|
|
|
|
OS << "\t" << getBaseName() << "\n";
|
|
|
|
OS.indent(12) << "Domain :=\n";
|
|
|
|
|
|
|
|
if (Domain) {
|
|
|
|
OS.indent(16) << getDomainStr() << ";\n";
|
|
|
|
} else
|
|
|
|
OS.indent(16) << "n/a\n";
|
|
|
|
|
2015-04-21 19:37:25 +08:00
|
|
|
OS.indent(12) << "Schedule :=\n";
|
2011-04-29 14:27:02 +08:00
|
|
|
|
|
|
|
if (Domain) {
|
2015-04-21 19:37:25 +08:00
|
|
|
OS.indent(16) << getScheduleStr() << ";\n";
|
2011-04-29 14:27:02 +08:00
|
|
|
} else
|
|
|
|
OS.indent(16) << "n/a\n";
|
|
|
|
|
2014-06-28 16:59:45 +08:00
|
|
|
for (MemoryAccess *Access : MemAccs)
|
|
|
|
Access->print(OS);
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ScopStmt::dump() const { print(dbgs()); }
|
|
|
|
|
2015-10-18 20:39:19 +08:00
|
|
|
void ScopStmt::removeMemoryAccesses(MemoryAccessList &InvMAs) {
|
2015-12-01 01:20:40 +08:00
|
|
|
// Remove all memory accesses in @p InvMAs from this statement
|
|
|
|
// together with all scalar accesses that were caused by them.
|
2016-01-26 21:33:15 +08:00
|
|
|
// MK_Value READs have no access instruction, hence would not be removed by
|
|
|
|
// this function. However, it is only used for invariant LoadInst accesses,
|
|
|
|
// its arguments are always affine, hence synthesizable, and therefore there
|
|
|
|
// are no MK_Value READ accesses to be removed.
|
2015-09-30 07:47:21 +08:00
|
|
|
for (MemoryAccess *MA : InvMAs) {
|
2015-12-01 01:20:40 +08:00
|
|
|
auto Predicate = [&](MemoryAccess *Acc) {
|
2015-12-01 05:13:43 +08:00
|
|
|
return Acc->getAccessInstruction() == MA->getAccessInstruction();
|
2015-12-01 01:20:40 +08:00
|
|
|
};
|
|
|
|
MemAccs.erase(std::remove_if(MemAccs.begin(), MemAccs.end(), Predicate),
|
|
|
|
MemAccs.end());
|
2015-09-30 07:47:21 +08:00
|
|
|
InstructionToAccess.erase(MA->getAccessInstruction());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-29 14:27:02 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// Scop class implement
|
2011-11-08 23:41:28 +08:00
|
|
|
|
2011-11-17 20:56:10 +08:00
|
|
|
void Scop::setContext(__isl_take isl_set *NewContext) {
|
2011-11-15 19:38:44 +08:00
|
|
|
NewContext = isl_set_align_params(NewContext, isl_set_get_space(Context));
|
|
|
|
isl_set_free(Context);
|
|
|
|
Context = NewContext;
|
|
|
|
}
|
|
|
|
|
2015-11-04 00:47:58 +08:00
|
|
|
/// @brief Remap parameter values but keep AddRecs valid wrt. invariant loads.
|
|
|
|
struct SCEVSensitiveParameterRewriter
|
|
|
|
: public SCEVVisitor<SCEVSensitiveParameterRewriter, const SCEV *> {
|
|
|
|
ValueToValueMap &VMap;
|
|
|
|
ScalarEvolution &SE;
|
|
|
|
|
|
|
|
public:
|
|
|
|
SCEVSensitiveParameterRewriter(ValueToValueMap &VMap, ScalarEvolution &SE)
|
|
|
|
: VMap(VMap), SE(SE) {}
|
|
|
|
|
|
|
|
static const SCEV *rewrite(const SCEV *E, ScalarEvolution &SE,
|
|
|
|
ValueToValueMap &VMap) {
|
|
|
|
SCEVSensitiveParameterRewriter SSPR(VMap, SE);
|
|
|
|
return SSPR.visit(E);
|
|
|
|
}
|
|
|
|
|
|
|
|
const SCEV *visit(const SCEV *E) {
|
|
|
|
return SCEVVisitor<SCEVSensitiveParameterRewriter, const SCEV *>::visit(E);
|
|
|
|
}
|
|
|
|
|
|
|
|
const SCEV *visitConstant(const SCEVConstant *E) { return E; }
|
|
|
|
|
|
|
|
const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) {
|
|
|
|
return SE.getTruncateExpr(visit(E->getOperand()), E->getType());
|
|
|
|
}
|
|
|
|
|
|
|
|
const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) {
|
|
|
|
return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType());
|
|
|
|
}
|
|
|
|
|
|
|
|
const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) {
|
|
|
|
return SE.getSignExtendExpr(visit(E->getOperand()), E->getType());
|
|
|
|
}
|
|
|
|
|
|
|
|
const SCEV *visitAddExpr(const SCEVAddExpr *E) {
|
|
|
|
SmallVector<const SCEV *, 4> Operands;
|
|
|
|
for (int i = 0, e = E->getNumOperands(); i < e; ++i)
|
|
|
|
Operands.push_back(visit(E->getOperand(i)));
|
|
|
|
return SE.getAddExpr(Operands);
|
|
|
|
}
|
|
|
|
|
|
|
|
const SCEV *visitMulExpr(const SCEVMulExpr *E) {
|
|
|
|
SmallVector<const SCEV *, 4> Operands;
|
|
|
|
for (int i = 0, e = E->getNumOperands(); i < e; ++i)
|
|
|
|
Operands.push_back(visit(E->getOperand(i)));
|
|
|
|
return SE.getMulExpr(Operands);
|
|
|
|
}
|
|
|
|
|
|
|
|
const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) {
|
|
|
|
SmallVector<const SCEV *, 4> Operands;
|
|
|
|
for (int i = 0, e = E->getNumOperands(); i < e; ++i)
|
|
|
|
Operands.push_back(visit(E->getOperand(i)));
|
|
|
|
return SE.getSMaxExpr(Operands);
|
|
|
|
}
|
|
|
|
|
|
|
|
const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) {
|
|
|
|
SmallVector<const SCEV *, 4> Operands;
|
|
|
|
for (int i = 0, e = E->getNumOperands(); i < e; ++i)
|
|
|
|
Operands.push_back(visit(E->getOperand(i)));
|
|
|
|
return SE.getUMaxExpr(Operands);
|
|
|
|
}
|
|
|
|
|
|
|
|
const SCEV *visitUDivExpr(const SCEVUDivExpr *E) {
|
|
|
|
return SE.getUDivExpr(visit(E->getLHS()), visit(E->getRHS()));
|
|
|
|
}
|
|
|
|
|
|
|
|
const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
|
|
|
|
auto *Start = visit(E->getStart());
|
|
|
|
auto *AddRec = SE.getAddRecExpr(SE.getConstant(E->getType(), 0),
|
|
|
|
visit(E->getStepRecurrence(SE)),
|
|
|
|
E->getLoop(), SCEV::FlagAnyWrap);
|
|
|
|
return SE.getAddExpr(Start, AddRec);
|
|
|
|
}
|
|
|
|
|
|
|
|
const SCEV *visitUnknown(const SCEVUnknown *E) {
|
|
|
|
if (auto *NewValue = VMap.lookup(E->getValue()))
|
|
|
|
return SE.getUnknown(NewValue);
|
|
|
|
return E;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-10-18 20:39:19 +08:00
|
|
|
const SCEV *Scop::getRepresentingInvariantLoadSCEV(const SCEV *S) {
|
2015-11-04 00:47:58 +08:00
|
|
|
return SCEVSensitiveParameterRewriter::rewrite(S, *SE, InvEquivClassVMap);
|
2015-10-10 01:12:26 +08:00
|
|
|
}
|
|
|
|
|
2013-02-05 20:09:06 +08:00
|
|
|
void Scop::addParams(std::vector<const SCEV *> NewParameters) {
|
2014-06-28 16:59:45 +08:00
|
|
|
for (const SCEV *Parameter : NewParameters) {
|
2015-03-30 04:45:09 +08:00
|
|
|
Parameter = extractConstantFactor(Parameter, *SE).second;
|
2015-10-10 01:12:26 +08:00
|
|
|
|
|
|
|
// Normalize the SCEV to get the representing element for an invariant load.
|
|
|
|
Parameter = getRepresentingInvariantLoadSCEV(Parameter);
|
|
|
|
|
2011-11-08 23:41:28 +08:00
|
|
|
if (ParameterIds.find(Parameter) != ParameterIds.end())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
int dimension = Parameters.size();
|
|
|
|
|
|
|
|
Parameters.push_back(Parameter);
|
|
|
|
ParameterIds[Parameter] = dimension;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-18 20:39:19 +08:00
|
|
|
__isl_give isl_id *Scop::getIdForParam(const SCEV *Parameter) {
|
2015-10-10 01:12:26 +08:00
|
|
|
// Normalize the SCEV to get the representing element for an invariant load.
|
|
|
|
Parameter = getRepresentingInvariantLoadSCEV(Parameter);
|
|
|
|
|
2011-11-08 23:41:03 +08:00
|
|
|
ParamIdType::const_iterator IdIter = ParameterIds.find(Parameter);
|
2011-11-07 20:58:59 +08:00
|
|
|
|
2011-11-08 23:41:03 +08:00
|
|
|
if (IdIter == ParameterIds.end())
|
2014-04-16 15:33:47 +08:00
|
|
|
return nullptr;
|
2011-11-07 20:58:59 +08:00
|
|
|
|
2011-11-15 19:38:55 +08:00
|
|
|
std::string ParameterName;
|
|
|
|
|
2016-02-01 04:36:20 +08:00
|
|
|
ParameterName = "p_" + utostr(IdIter->second);
|
2015-11-17 19:54:51 +08:00
|
|
|
|
2011-11-15 19:38:55 +08:00
|
|
|
if (const SCEVUnknown *ValueParameter = dyn_cast<SCEVUnknown>(Parameter)) {
|
|
|
|
Value *Val = ValueParameter->getValue();
|
2015-11-17 19:54:51 +08:00
|
|
|
|
|
|
|
// If this parameter references a specific Value and this value has a name
|
|
|
|
// we use this name as it is likely to be unique and more useful than just
|
|
|
|
// a number.
|
|
|
|
if (Val->hasName())
|
|
|
|
ParameterName = Val->getName();
|
|
|
|
else if (LoadInst *LI = dyn_cast<LoadInst>(Val)) {
|
|
|
|
auto LoadOrigin = LI->getPointerOperand()->stripInBoundsOffsets();
|
|
|
|
if (LoadOrigin->hasName()) {
|
|
|
|
ParameterName += "_loaded_from_";
|
|
|
|
ParameterName +=
|
2015-11-04 00:49:59 +08:00
|
|
|
LI->getPointerOperand()->stripInBoundsOffsets()->getName();
|
2015-11-17 19:54:51 +08:00
|
|
|
}
|
|
|
|
}
|
2011-11-15 19:38:55 +08:00
|
|
|
}
|
|
|
|
|
2014-04-12 01:56:49 +08:00
|
|
|
return isl_id_alloc(getIslCtx(), ParameterName.c_str(),
|
|
|
|
const_cast<void *>((const void *)Parameter));
|
2011-11-07 20:58:59 +08:00
|
|
|
}
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2015-08-21 02:06:30 +08:00
|
|
|
isl_set *Scop::addNonEmptyDomainConstraints(isl_set *C) const {
|
|
|
|
isl_set *DomainContext = isl_union_set_params(getDomains());
|
|
|
|
return isl_set_intersect_params(C, DomainContext);
|
|
|
|
}
|
|
|
|
|
2015-09-16 06:52:53 +08:00
|
|
|
void Scop::buildBoundaryContext() {
|
2015-11-24 20:50:02 +08:00
|
|
|
if (IgnoreIntegerWrapping) {
|
|
|
|
BoundaryContext = isl_set_universe(getParamSpace());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-09-16 06:52:53 +08:00
|
|
|
BoundaryContext = Affinator.getWrappingContext();
|
2015-11-12 01:34:02 +08:00
|
|
|
|
|
|
|
// The isl_set_complement operation used to create the boundary context
|
|
|
|
// can possibly become very expensive. We bound the compile time of
|
|
|
|
// this operation by setting a compute out.
|
|
|
|
//
|
|
|
|
// TODO: We can probably get around using isl_set_complement and directly
|
|
|
|
// AST generate BoundaryContext.
|
|
|
|
long MaxOpsOld = isl_ctx_get_max_operations(getIslCtx());
|
2015-11-14 00:56:13 +08:00
|
|
|
isl_ctx_reset_operations(getIslCtx());
|
2015-11-12 01:34:02 +08:00
|
|
|
isl_ctx_set_max_operations(getIslCtx(), 300000);
|
|
|
|
isl_options_set_on_error(getIslCtx(), ISL_ON_ERROR_CONTINUE);
|
|
|
|
|
2015-09-16 06:52:53 +08:00
|
|
|
BoundaryContext = isl_set_complement(BoundaryContext);
|
2015-11-12 01:34:02 +08:00
|
|
|
|
2015-11-12 01:59:53 +08:00
|
|
|
if (isl_ctx_last_error(getIslCtx()) == isl_error_quota) {
|
|
|
|
isl_set_free(BoundaryContext);
|
2015-11-12 01:34:02 +08:00
|
|
|
BoundaryContext = isl_set_empty(getParamSpace());
|
2015-11-12 01:59:53 +08:00
|
|
|
}
|
2015-11-12 01:34:02 +08:00
|
|
|
|
|
|
|
isl_options_set_on_error(getIslCtx(), ISL_ON_ERROR_ABORT);
|
|
|
|
isl_ctx_reset_operations(getIslCtx());
|
|
|
|
isl_ctx_set_max_operations(getIslCtx(), MaxOpsOld);
|
2015-09-16 06:52:53 +08:00
|
|
|
BoundaryContext = isl_set_gist_params(BoundaryContext, getContext());
|
2015-11-12 10:33:38 +08:00
|
|
|
trackAssumption(WRAPPING, BoundaryContext, DebugLoc());
|
2015-09-16 06:52:53 +08:00
|
|
|
}
|
|
|
|
|
2015-11-12 11:25:01 +08:00
|
|
|
void Scop::addUserAssumptions(AssumptionCache &AC) {
|
|
|
|
auto *R = &getRegion();
|
|
|
|
auto &F = *R->getEntry()->getParent();
|
|
|
|
for (auto &Assumption : AC.assumptions()) {
|
|
|
|
auto *CI = dyn_cast_or_null<CallInst>(Assumption);
|
|
|
|
if (!CI || CI->getNumArgOperands() != 1)
|
|
|
|
continue;
|
|
|
|
if (!DT.dominates(CI->getParent(), R->getEntry()))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
auto *Val = CI->getArgOperand(0);
|
|
|
|
std::vector<const SCEV *> Params;
|
|
|
|
if (!isAffineParamConstraint(Val, R, *SE, Params)) {
|
|
|
|
emitOptimizationRemarkAnalysis(F.getContext(), DEBUG_TYPE, F,
|
|
|
|
CI->getDebugLoc(),
|
|
|
|
"Non-affine user assumption ignored.");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
addParams(Params);
|
|
|
|
|
|
|
|
auto *L = LI.getLoopFor(CI->getParent());
|
|
|
|
SmallVector<isl_set *, 2> ConditionSets;
|
|
|
|
buildConditionSets(*this, Val, nullptr, L, Context, ConditionSets);
|
|
|
|
assert(ConditionSets.size() == 2);
|
|
|
|
isl_set_free(ConditionSets[1]);
|
|
|
|
|
|
|
|
auto *AssumptionCtx = ConditionSets[0];
|
|
|
|
emitOptimizationRemarkAnalysis(
|
|
|
|
F.getContext(), DEBUG_TYPE, F, CI->getDebugLoc(),
|
|
|
|
"Use user assumption: " + stringFromIslObj(AssumptionCtx));
|
|
|
|
Context = isl_set_intersect(Context, AssumptionCtx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-16 18:19:29 +08:00
|
|
|
void Scop::addUserContext() {
|
|
|
|
if (UserContextStr.empty())
|
|
|
|
return;
|
|
|
|
|
|
|
|
isl_set *UserContext = isl_set_read_from_str(IslCtx, UserContextStr.c_str());
|
|
|
|
isl_space *Space = getParamSpace();
|
|
|
|
if (isl_space_dim(Space, isl_dim_param) !=
|
|
|
|
isl_set_dim(UserContext, isl_dim_param)) {
|
|
|
|
auto SpaceStr = isl_space_to_str(Space);
|
|
|
|
errs() << "Error: the context provided in -polly-context has not the same "
|
|
|
|
<< "number of dimensions than the computed context. Due to this "
|
|
|
|
<< "mismatch, the -polly-context option is ignored. Please provide "
|
|
|
|
<< "the context in the parameter space: " << SpaceStr << ".\n";
|
|
|
|
free(SpaceStr);
|
|
|
|
isl_set_free(UserContext);
|
|
|
|
isl_space_free(Space);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < isl_space_dim(Space, isl_dim_param); i++) {
|
|
|
|
auto NameContext = isl_set_get_dim_name(Context, isl_dim_param, i);
|
|
|
|
auto NameUserContext = isl_set_get_dim_name(UserContext, isl_dim_param, i);
|
|
|
|
|
|
|
|
if (strcmp(NameContext, NameUserContext) != 0) {
|
|
|
|
auto SpaceStr = isl_space_to_str(Space);
|
|
|
|
errs() << "Error: the name of dimension " << i
|
|
|
|
<< " provided in -polly-context "
|
|
|
|
<< "is '" << NameUserContext << "', but the name in the computed "
|
|
|
|
<< "context is '" << NameContext
|
|
|
|
<< "'. Due to this name mismatch, "
|
|
|
|
<< "the -polly-context option is ignored. Please provide "
|
|
|
|
<< "the context in the parameter space: " << SpaceStr << ".\n";
|
|
|
|
free(SpaceStr);
|
|
|
|
isl_set_free(UserContext);
|
|
|
|
isl_space_free(Space);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
UserContext =
|
|
|
|
isl_set_set_dim_id(UserContext, isl_dim_param, i,
|
|
|
|
isl_space_get_dim_id(Space, isl_dim_param, i));
|
|
|
|
}
|
|
|
|
|
|
|
|
Context = isl_set_intersect(Context, UserContext);
|
|
|
|
isl_space_free(Space);
|
|
|
|
}
|
|
|
|
|
2015-10-10 01:12:26 +08:00
|
|
|
void Scop::buildInvariantEquivalenceClasses() {
|
2016-02-08 01:30:13 +08:00
|
|
|
DenseMap<std::pair<const SCEV *, Type *>, LoadInst *> EquivClasses;
|
2015-10-18 20:39:19 +08:00
|
|
|
|
2015-10-10 01:12:26 +08:00
|
|
|
const InvariantLoadsSetTy &RIL = *SD.getRequiredInvariantLoads(&getRegion());
|
|
|
|
for (LoadInst *LInst : RIL) {
|
|
|
|
const SCEV *PointerSCEV = SE->getSCEV(LInst->getPointerOperand());
|
|
|
|
|
2016-02-08 01:30:13 +08:00
|
|
|
Type *Ty = LInst->getType();
|
|
|
|
LoadInst *&ClassRep = EquivClasses[std::make_pair(PointerSCEV, Ty)];
|
2015-11-11 12:30:07 +08:00
|
|
|
if (ClassRep) {
|
2015-10-18 20:39:19 +08:00
|
|
|
InvEquivClassVMap[LInst] = ClassRep;
|
2015-11-11 12:30:07 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
ClassRep = LInst;
|
2016-02-08 01:30:13 +08:00
|
|
|
InvariantEquivClasses.emplace_back(PointerSCEV, MemoryAccessList(), nullptr,
|
|
|
|
Ty);
|
2015-10-10 01:12:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-08 23:41:13 +08:00
|
|
|
void Scop::buildContext() {
|
|
|
|
isl_space *Space = isl_space_params_alloc(IslCtx, 0);
|
2013-10-30 05:05:49 +08:00
|
|
|
Context = isl_set_universe(isl_space_copy(Space));
|
|
|
|
AssumedContext = isl_set_universe(Space);
|
2011-10-06 08:03:48 +08:00
|
|
|
}
|
|
|
|
|
2012-05-22 18:47:27 +08:00
|
|
|
void Scop::addParameterBounds() {
|
2015-02-24 00:15:51 +08:00
|
|
|
for (const auto &ParamID : ParameterIds) {
|
|
|
|
int dim = ParamID.second;
|
2012-05-22 18:47:27 +08:00
|
|
|
|
2015-02-24 00:15:51 +08:00
|
|
|
ConstantRange SRange = SE->getSignedRange(ParamID.first);
|
2015-01-09 03:26:53 +08:00
|
|
|
|
2015-02-24 19:58:30 +08:00
|
|
|
Context = addRangeBoundsToSet(Context, SRange, dim, isl_dim_param);
|
2012-05-22 18:47:27 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-08 23:41:08 +08:00
|
|
|
void Scop::realignParams() {
|
2011-11-08 23:41:13 +08:00
|
|
|
// Add all parameters into a common model.
|
2011-11-08 23:41:28 +08:00
|
|
|
isl_space *Space = isl_space_params_alloc(IslCtx, ParameterIds.size());
|
2011-11-08 23:41:13 +08:00
|
|
|
|
2014-06-28 16:59:45 +08:00
|
|
|
for (const auto &ParamID : ParameterIds) {
|
|
|
|
const SCEV *Parameter = ParamID.first;
|
2011-11-08 23:41:13 +08:00
|
|
|
isl_id *id = getIdForParam(Parameter);
|
2014-06-28 16:59:45 +08:00
|
|
|
Space = isl_space_set_dim_id(Space, isl_dim_param, ParamID.second, id);
|
2011-11-08 23:41:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Align the parameters of all data structures to the model.
|
|
|
|
Context = isl_set_align_params(Context, Space);
|
|
|
|
|
2015-05-27 13:16:57 +08:00
|
|
|
for (ScopStmt &Stmt : *this)
|
|
|
|
Stmt.realignParams();
|
2011-11-08 23:41:08 +08:00
|
|
|
}
|
|
|
|
|
2015-09-16 06:52:53 +08:00
|
|
|
static __isl_give isl_set *
|
|
|
|
simplifyAssumptionContext(__isl_take isl_set *AssumptionContext,
|
|
|
|
const Scop &S) {
|
2015-11-09 04:16:39 +08:00
|
|
|
// If we modelt all blocks in the SCoP that have side effects we can simplify
|
|
|
|
// the context with the constraints that are needed for anything to be
|
|
|
|
// executed at all. However, if we have error blocks in the SCoP we already
|
|
|
|
// assumed some parameter combinations cannot occure and removed them from the
|
|
|
|
// domains, thus we cannot use the remaining domain to simplify the
|
|
|
|
// assumptions.
|
|
|
|
if (!S.hasErrorBlock()) {
|
|
|
|
isl_set *DomainParameters = isl_union_set_params(S.getDomains());
|
|
|
|
AssumptionContext =
|
|
|
|
isl_set_gist_params(AssumptionContext, DomainParameters);
|
|
|
|
}
|
|
|
|
|
2015-09-16 06:52:53 +08:00
|
|
|
AssumptionContext = isl_set_gist_params(AssumptionContext, S.getContext());
|
|
|
|
return AssumptionContext;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scop::simplifyContexts() {
|
2014-07-03 01:47:48 +08:00
|
|
|
// The parameter constraints of the iteration domains give us a set of
|
|
|
|
// constraints that need to hold for all cases where at least a single
|
|
|
|
// statement iteration is executed in the whole scop. We now simplify the
|
|
|
|
// assumed context under the assumption that such constraints hold and at
|
|
|
|
// least a single statement iteration is executed. For cases where no
|
|
|
|
// statement instances are executed, the assumptions we have taken about
|
|
|
|
// the executed code do not matter and can be changed.
|
|
|
|
//
|
|
|
|
// WARNING: This only holds if the assumptions we have taken do not reduce
|
|
|
|
// the set of statement instances that are executed. Otherwise we
|
|
|
|
// may run into a case where the iteration domains suggest that
|
2015-02-25 00:00:29 +08:00
|
|
|
// for a certain set of parameter constraints no code is executed,
|
2014-07-03 01:47:48 +08:00
|
|
|
// but in the original program some computation would have been
|
2015-02-25 00:00:29 +08:00
|
|
|
// performed. In such a case, modifying the run-time conditions and
|
|
|
|
// possibly influencing the run-time check may cause certain scops
|
2014-07-03 01:47:48 +08:00
|
|
|
// to not be executed.
|
|
|
|
//
|
|
|
|
// Example:
|
|
|
|
//
|
|
|
|
// When delinearizing the following code:
|
|
|
|
//
|
|
|
|
// for (long i = 0; i < 100; i++)
|
|
|
|
// for (long j = 0; j < m; j++)
|
|
|
|
// A[i+p][j] = 1.0;
|
|
|
|
//
|
|
|
|
// we assume that the condition m <= 0 or (m >= 1 and p >= 0) holds as
|
2015-02-25 00:00:29 +08:00
|
|
|
// otherwise we would access out of bound data. Now, knowing that code is
|
2014-07-03 01:47:48 +08:00
|
|
|
// only executed for the case m >= 0, it is sufficient to assume p >= 0.
|
2015-09-16 06:52:53 +08:00
|
|
|
AssumedContext = simplifyAssumptionContext(AssumedContext, *this);
|
|
|
|
BoundaryContext = simplifyAssumptionContext(BoundaryContext, *this);
|
2014-07-03 01:47:48 +08:00
|
|
|
}
|
|
|
|
|
2014-09-18 19:17:17 +08:00
|
|
|
/// @brief Add the minimal/maximal access in @p Set to @p User.
|
2015-05-28 21:32:11 +08:00
|
|
|
static isl_stat buildMinMaxAccess(__isl_take isl_set *Set, void *User) {
|
2014-09-18 19:17:17 +08:00
|
|
|
Scop::MinMaxVectorTy *MinMaxAccesses = (Scop::MinMaxVectorTy *)User;
|
|
|
|
isl_pw_multi_aff *MinPMA, *MaxPMA;
|
|
|
|
isl_pw_aff *LastDimAff;
|
|
|
|
isl_aff *OneAff;
|
|
|
|
unsigned Pos;
|
|
|
|
|
2014-09-27 19:02:39 +08:00
|
|
|
// Restrict the number of parameters involved in the access as the lexmin/
|
|
|
|
// lexmax computation will take too long if this number is high.
|
|
|
|
//
|
|
|
|
// Experiments with a simple test case using an i7 4800MQ:
|
|
|
|
//
|
|
|
|
// #Parameters involved | Time (in sec)
|
|
|
|
// 6 | 0.01
|
|
|
|
// 7 | 0.04
|
|
|
|
// 8 | 0.12
|
|
|
|
// 9 | 0.40
|
|
|
|
// 10 | 1.54
|
|
|
|
// 11 | 6.78
|
|
|
|
// 12 | 30.38
|
|
|
|
//
|
|
|
|
if (isl_set_n_param(Set) > RunTimeChecksMaxParameters) {
|
|
|
|
unsigned InvolvedParams = 0;
|
|
|
|
for (unsigned u = 0, e = isl_set_n_param(Set); u < e; u++)
|
|
|
|
if (isl_set_involves_dims(Set, isl_dim_param, u, 1))
|
|
|
|
InvolvedParams++;
|
|
|
|
|
|
|
|
if (InvolvedParams > RunTimeChecksMaxParameters) {
|
|
|
|
isl_set_free(Set);
|
2015-05-28 21:32:11 +08:00
|
|
|
return isl_stat_error;
|
2014-09-27 19:02:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-14 20:00:06 +08:00
|
|
|
Set = isl_set_remove_divs(Set);
|
|
|
|
|
2014-09-18 19:17:17 +08:00
|
|
|
MinPMA = isl_set_lexmin_pw_multi_aff(isl_set_copy(Set));
|
|
|
|
MaxPMA = isl_set_lexmax_pw_multi_aff(isl_set_copy(Set));
|
|
|
|
|
2014-10-07 22:37:59 +08:00
|
|
|
MinPMA = isl_pw_multi_aff_coalesce(MinPMA);
|
|
|
|
MaxPMA = isl_pw_multi_aff_coalesce(MaxPMA);
|
|
|
|
|
2014-09-18 19:17:17 +08:00
|
|
|
// Adjust the last dimension of the maximal access by one as we want to
|
|
|
|
// enclose the accessed memory region by MinPMA and MaxPMA. The pointer
|
|
|
|
// we test during code generation might now point after the end of the
|
|
|
|
// allocated array but we will never dereference it anyway.
|
|
|
|
assert(isl_pw_multi_aff_dim(MaxPMA, isl_dim_out) &&
|
|
|
|
"Assumed at least one output dimension");
|
|
|
|
Pos = isl_pw_multi_aff_dim(MaxPMA, isl_dim_out) - 1;
|
|
|
|
LastDimAff = isl_pw_multi_aff_get_pw_aff(MaxPMA, Pos);
|
|
|
|
OneAff = isl_aff_zero_on_domain(
|
|
|
|
isl_local_space_from_space(isl_pw_aff_get_domain_space(LastDimAff)));
|
|
|
|
OneAff = isl_aff_add_constant_si(OneAff, 1);
|
|
|
|
LastDimAff = isl_pw_aff_add(LastDimAff, isl_pw_aff_from_aff(OneAff));
|
|
|
|
MaxPMA = isl_pw_multi_aff_set_pw_aff(MaxPMA, Pos, LastDimAff);
|
|
|
|
|
|
|
|
MinMaxAccesses->push_back(std::make_pair(MinPMA, MaxPMA));
|
|
|
|
|
|
|
|
isl_set_free(Set);
|
2015-05-28 21:32:11 +08:00
|
|
|
return isl_stat_ok;
|
2014-09-18 19:17:17 +08:00
|
|
|
}
|
|
|
|
|
2014-10-01 20:42:37 +08:00
|
|
|
static __isl_give isl_set *getAccessDomain(MemoryAccess *MA) {
|
|
|
|
isl_set *Domain = MA->getStatement()->getDomain();
|
|
|
|
Domain = isl_set_project_out(Domain, isl_dim_set, 0, isl_set_n_dim(Domain));
|
|
|
|
return isl_set_reset_tuple_id(Domain);
|
|
|
|
}
|
|
|
|
|
2015-07-24 01:04:54 +08:00
|
|
|
/// @brief Wrapper function to calculate minimal/maximal accesses to each array.
|
|
|
|
static bool calculateMinMaxAccess(__isl_take isl_union_map *Accesses,
|
2015-07-25 20:31:03 +08:00
|
|
|
__isl_take isl_union_set *Domains,
|
2015-07-26 21:14:38 +08:00
|
|
|
Scop::MinMaxVectorTy &MinMaxAccesses) {
|
2015-07-24 01:04:54 +08:00
|
|
|
|
|
|
|
Accesses = isl_union_map_intersect_domain(Accesses, Domains);
|
|
|
|
isl_union_set *Locations = isl_union_map_range(Accesses);
|
|
|
|
Locations = isl_union_set_coalesce(Locations);
|
|
|
|
Locations = isl_union_set_detect_equalities(Locations);
|
|
|
|
bool Valid = (0 == isl_union_set_foreach_set(Locations, buildMinMaxAccess,
|
2015-07-26 21:14:38 +08:00
|
|
|
&MinMaxAccesses));
|
2015-07-24 01:04:54 +08:00
|
|
|
isl_union_set_free(Locations);
|
|
|
|
return Valid;
|
|
|
|
}
|
|
|
|
|
2015-08-31 05:13:53 +08:00
|
|
|
/// @brief Helper to treat non-affine regions and basic blocks the same.
|
|
|
|
///
|
|
|
|
///{
|
|
|
|
|
|
|
|
/// @brief Return the block that is the representing block for @p RN.
|
|
|
|
static inline BasicBlock *getRegionNodeBasicBlock(RegionNode *RN) {
|
|
|
|
return RN->isSubRegion() ? RN->getNodeAs<Region>()->getEntry()
|
|
|
|
: RN->getNodeAs<BasicBlock>();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief Return the @p idx'th block that is executed after @p RN.
|
2015-09-28 17:33:22 +08:00
|
|
|
static inline BasicBlock *
|
|
|
|
getRegionNodeSuccessor(RegionNode *RN, TerminatorInst *TI, unsigned idx) {
|
2015-08-31 05:13:53 +08:00
|
|
|
if (RN->isSubRegion()) {
|
|
|
|
assert(idx == 0);
|
|
|
|
return RN->getNodeAs<Region>()->getExit();
|
|
|
|
}
|
2015-09-28 17:33:22 +08:00
|
|
|
return TI->getSuccessor(idx);
|
2015-08-31 05:13:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief Return the smallest loop surrounding @p RN.
|
|
|
|
static inline Loop *getRegionNodeLoop(RegionNode *RN, LoopInfo &LI) {
|
|
|
|
if (!RN->isSubRegion())
|
|
|
|
return LI.getLoopFor(RN->getNodeAs<BasicBlock>());
|
|
|
|
|
|
|
|
Region *NonAffineSubRegion = RN->getNodeAs<Region>();
|
|
|
|
Loop *L = LI.getLoopFor(NonAffineSubRegion->getEntry());
|
|
|
|
while (L && NonAffineSubRegion->contains(L))
|
|
|
|
L = L->getParentLoop();
|
|
|
|
return L;
|
|
|
|
}
|
|
|
|
|
2015-09-10 23:27:46 +08:00
|
|
|
static inline unsigned getNumBlocksInRegionNode(RegionNode *RN) {
|
|
|
|
if (!RN->isSubRegion())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
Region *R = RN->getNodeAs<Region>();
|
2016-02-01 09:55:08 +08:00
|
|
|
return std::distance(R->block_begin(), R->block_end());
|
2015-09-10 23:27:46 +08:00
|
|
|
}
|
|
|
|
|
2015-10-08 04:32:43 +08:00
|
|
|
static bool containsErrorBlock(RegionNode *RN, const Region &R, LoopInfo &LI,
|
|
|
|
const DominatorTree &DT) {
|
2015-10-02 07:48:18 +08:00
|
|
|
if (!RN->isSubRegion())
|
2015-10-08 04:32:43 +08:00
|
|
|
return isErrorBlock(*RN->getNodeAs<BasicBlock>(), R, LI, DT);
|
2015-10-02 07:48:18 +08:00
|
|
|
for (BasicBlock *BB : RN->getNodeAs<Region>()->blocks())
|
2015-10-08 04:32:43 +08:00
|
|
|
if (isErrorBlock(*BB, R, LI, DT))
|
2015-10-02 07:48:18 +08:00
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-08-31 05:13:53 +08:00
|
|
|
///}
|
|
|
|
|
2015-09-10 23:27:46 +08:00
|
|
|
static inline __isl_give isl_set *addDomainDimId(__isl_take isl_set *Domain,
|
|
|
|
unsigned Dim, Loop *L) {
|
2015-09-21 00:15:32 +08:00
|
|
|
Domain = isl_set_lower_bound_si(Domain, isl_dim_set, Dim, -1);
|
2015-09-10 23:27:46 +08:00
|
|
|
isl_id *DimId =
|
|
|
|
isl_id_alloc(isl_set_get_ctx(Domain), nullptr, static_cast<void *>(L));
|
|
|
|
return isl_set_set_dim_id(Domain, isl_dim_set, Dim, DimId);
|
|
|
|
}
|
|
|
|
|
2015-08-31 05:13:53 +08:00
|
|
|
isl_set *Scop::getDomainConditions(ScopStmt *Stmt) {
|
|
|
|
BasicBlock *BB = Stmt->isBlockStmt() ? Stmt->getBasicBlock()
|
|
|
|
: Stmt->getRegion()->getEntry();
|
2015-09-16 06:49:04 +08:00
|
|
|
return getDomainConditions(BB);
|
|
|
|
}
|
|
|
|
|
|
|
|
isl_set *Scop::getDomainConditions(BasicBlock *BB) {
|
|
|
|
assert(DomainMap.count(BB) && "Requested BB did not have a domain");
|
2015-08-31 21:56:32 +08:00
|
|
|
return isl_set_copy(DomainMap[BB]);
|
2015-08-31 05:13:53 +08:00
|
|
|
}
|
|
|
|
|
2015-11-22 19:06:51 +08:00
|
|
|
void Scop::removeErrorBlockDomains() {
|
|
|
|
auto removeDomains = [this](BasicBlock *Start) {
|
|
|
|
auto BBNode = DT.getNode(Start);
|
|
|
|
for (auto ErrorChild : depth_first(BBNode)) {
|
|
|
|
auto ErrorChildBlock = ErrorChild->getBlock();
|
|
|
|
auto CurrentDomain = DomainMap[ErrorChildBlock];
|
|
|
|
auto Empty = isl_set_empty(isl_set_get_space(CurrentDomain));
|
|
|
|
DomainMap[ErrorChildBlock] = Empty;
|
|
|
|
isl_set_free(CurrentDomain);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-11-23 18:18:23 +08:00
|
|
|
SmallVector<Region *, 4> Todo = {&R};
|
2015-11-22 19:06:51 +08:00
|
|
|
|
|
|
|
while (!Todo.empty()) {
|
|
|
|
auto SubRegion = Todo.back();
|
|
|
|
Todo.pop_back();
|
|
|
|
|
|
|
|
if (!SD.isNonAffineSubRegion(SubRegion, &getRegion())) {
|
|
|
|
for (auto &Child : *SubRegion)
|
|
|
|
Todo.push_back(Child.get());
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (containsErrorBlock(SubRegion->getNode(), getRegion(), LI, DT))
|
|
|
|
removeDomains(SubRegion->getEntry());
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto BB : R.blocks())
|
|
|
|
if (isErrorBlock(*BB, R, LI, DT))
|
|
|
|
removeDomains(BB);
|
|
|
|
}
|
|
|
|
|
2015-10-08 04:31:36 +08:00
|
|
|
void Scop::buildDomains(Region *R) {
|
2015-08-31 05:13:53 +08:00
|
|
|
|
2016-01-26 19:01:41 +08:00
|
|
|
bool IsOnlyNonAffineRegion = SD.isNonAffineSubRegion(R, R);
|
2015-08-31 21:56:32 +08:00
|
|
|
auto *EntryBB = R->getEntry();
|
2016-01-26 19:01:41 +08:00
|
|
|
auto *L = IsOnlyNonAffineRegion ? nullptr : LI.getLoopFor(EntryBB);
|
|
|
|
int LD = getRelativeLoopDepth(L);
|
2015-08-31 21:56:32 +08:00
|
|
|
auto *S = isl_set_universe(isl_space_set_alloc(getIslCtx(), 0, LD + 1));
|
2015-09-10 23:27:46 +08:00
|
|
|
|
|
|
|
while (LD-- >= 0) {
|
|
|
|
S = addDomainDimId(S, LD + 1, L);
|
|
|
|
L = L->getParentLoop();
|
|
|
|
}
|
|
|
|
|
2015-08-31 21:56:32 +08:00
|
|
|
DomainMap[EntryBB] = S;
|
2015-08-31 05:13:53 +08:00
|
|
|
|
2016-01-26 19:01:41 +08:00
|
|
|
if (IsOnlyNonAffineRegion)
|
2015-09-14 19:15:07 +08:00
|
|
|
return;
|
|
|
|
|
2015-10-08 04:31:36 +08:00
|
|
|
buildDomainsWithBranchConstraints(R);
|
|
|
|
propagateDomainConstraints(R);
|
2015-11-22 19:06:51 +08:00
|
|
|
|
|
|
|
// Error blocks and blocks dominated by them have been assumed to never be
|
|
|
|
// executed. Representing them in the Scop does not add any value. In fact,
|
|
|
|
// it is likely to cause issues during construction of the ScopStmts. The
|
|
|
|
// contents of error blocks have not been verfied to be expressible and
|
|
|
|
// will cause problems when building up a ScopStmt for them.
|
|
|
|
// Furthermore, basic blocks dominated by error blocks may reference
|
|
|
|
// instructions in the error block which, if the error block is not modeled,
|
|
|
|
// can themselves not be constructed properly.
|
|
|
|
removeErrorBlockDomains();
|
2015-08-31 05:13:53 +08:00
|
|
|
}
|
|
|
|
|
2015-10-08 04:31:36 +08:00
|
|
|
void Scop::buildDomainsWithBranchConstraints(Region *R) {
|
2016-01-26 19:03:25 +08:00
|
|
|
auto &BoxedLoops = *SD.getBoxedLoops(&getRegion());
|
2015-08-31 05:13:53 +08:00
|
|
|
|
|
|
|
// To create the domain for each block in R we iterate over all blocks and
|
|
|
|
// subregions in R and propagate the conditions under which the current region
|
|
|
|
// element is executed. To this end we iterate in reverse post order over R as
|
|
|
|
// it ensures that we first visit all predecessors of a region node (either a
|
|
|
|
// basic block or a subregion) before we visit the region node itself.
|
|
|
|
// Initially, only the domain for the SCoP region entry block is set and from
|
|
|
|
// there we propagate the current domain to all successors, however we add the
|
|
|
|
// condition that the successor is actually executed next.
|
|
|
|
// As we are only interested in non-loop carried constraints here we can
|
|
|
|
// simply skip loop back edges.
|
|
|
|
|
|
|
|
ReversePostOrderTraversal<Region *> RTraversal(R);
|
|
|
|
for (auto *RN : RTraversal) {
|
|
|
|
|
|
|
|
// Recurse for affine subregions but go on for basic blocks and non-affine
|
|
|
|
// subregions.
|
|
|
|
if (RN->isSubRegion()) {
|
|
|
|
Region *SubRegion = RN->getNodeAs<Region>();
|
|
|
|
if (!SD.isNonAffineSubRegion(SubRegion, &getRegion())) {
|
2015-10-08 04:31:36 +08:00
|
|
|
buildDomainsWithBranchConstraints(SubRegion);
|
2015-08-31 05:13:53 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-11 16:42:20 +08:00
|
|
|
if (containsErrorBlock(RN, getRegion(), LI, DT))
|
2015-11-09 04:16:39 +08:00
|
|
|
HasErrorBlock = true;
|
2015-10-02 07:48:18 +08:00
|
|
|
|
2015-08-31 05:13:53 +08:00
|
|
|
BasicBlock *BB = getRegionNodeBasicBlock(RN);
|
2015-09-11 01:51:27 +08:00
|
|
|
TerminatorInst *TI = BB->getTerminator();
|
|
|
|
|
2015-11-11 16:42:20 +08:00
|
|
|
if (isa<UnreachableInst>(TI))
|
|
|
|
continue;
|
|
|
|
|
2015-10-02 07:48:18 +08:00
|
|
|
isl_set *Domain = DomainMap.lookup(BB);
|
|
|
|
if (!Domain) {
|
|
|
|
DEBUG(dbgs() << "\tSkip: " << BB->getName()
|
|
|
|
<< ", it is only reachable from error blocks.\n");
|
2015-09-11 01:51:27 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-08-31 05:13:53 +08:00
|
|
|
DEBUG(dbgs() << "\tVisit: " << BB->getName() << " : " << Domain << "\n");
|
|
|
|
|
|
|
|
Loop *BBLoop = getRegionNodeLoop(RN, LI);
|
|
|
|
int BBLoopDepth = getRelativeLoopDepth(BBLoop);
|
|
|
|
|
|
|
|
// Build the condition sets for the successor nodes of the current region
|
|
|
|
// node. If it is a non-affine subregion we will always execute the single
|
|
|
|
// exit node, hence the single entry node domain is the condition set. For
|
|
|
|
// basic blocks we use the helper function buildConditionSets.
|
2015-09-28 17:33:22 +08:00
|
|
|
SmallVector<isl_set *, 8> ConditionSets;
|
2015-08-31 05:13:53 +08:00
|
|
|
if (RN->isSubRegion())
|
|
|
|
ConditionSets.push_back(isl_set_copy(Domain));
|
|
|
|
else
|
2015-09-28 17:33:22 +08:00
|
|
|
buildConditionSets(*this, TI, BBLoop, Domain, ConditionSets);
|
2015-08-31 05:13:53 +08:00
|
|
|
|
|
|
|
// Now iterate over the successors and set their initial domain based on
|
|
|
|
// their condition set. We skip back edges here and have to be careful when
|
|
|
|
// we leave a loop not to keep constraints over a dimension that doesn't
|
|
|
|
// exist anymore.
|
2015-09-28 17:33:22 +08:00
|
|
|
assert(RN->isSubRegion() || TI->getNumSuccessors() == ConditionSets.size());
|
2015-08-31 05:13:53 +08:00
|
|
|
for (unsigned u = 0, e = ConditionSets.size(); u < e; u++) {
|
|
|
|
isl_set *CondSet = ConditionSets[u];
|
2015-09-28 17:33:22 +08:00
|
|
|
BasicBlock *SuccBB = getRegionNodeSuccessor(RN, TI, u);
|
2015-08-31 05:13:53 +08:00
|
|
|
|
|
|
|
// Skip back edges.
|
|
|
|
if (DT.dominates(SuccBB, BB)) {
|
|
|
|
isl_set_free(CondSet);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-08-31 21:56:32 +08:00
|
|
|
// Do not adjust the number of dimensions if we enter a boxed loop or are
|
|
|
|
// in a non-affine subregion or if the surrounding loop stays the same.
|
2015-08-31 05:13:53 +08:00
|
|
|
Loop *SuccBBLoop = LI.getLoopFor(SuccBB);
|
2016-01-26 19:03:25 +08:00
|
|
|
while (BoxedLoops.count(SuccBBLoop))
|
|
|
|
SuccBBLoop = SuccBBLoop->getParentLoop();
|
2015-10-04 22:57:41 +08:00
|
|
|
|
|
|
|
if (BBLoop != SuccBBLoop) {
|
2015-08-31 21:56:32 +08:00
|
|
|
|
|
|
|
// Check if the edge to SuccBB is a loop entry or exit edge. If so
|
|
|
|
// adjust the dimensionality accordingly. Lastly, if we leave a loop
|
|
|
|
// and enter a new one we need to drop the old constraints.
|
|
|
|
int SuccBBLoopDepth = getRelativeLoopDepth(SuccBBLoop);
|
2015-09-10 23:53:59 +08:00
|
|
|
unsigned LoopDepthDiff = std::abs(BBLoopDepth - SuccBBLoopDepth);
|
2015-09-02 02:17:41 +08:00
|
|
|
if (BBLoopDepth > SuccBBLoopDepth) {
|
2015-09-10 23:53:59 +08:00
|
|
|
CondSet = isl_set_project_out(CondSet, isl_dim_set,
|
|
|
|
isl_set_n_dim(CondSet) - LoopDepthDiff,
|
|
|
|
LoopDepthDiff);
|
2015-09-02 02:17:41 +08:00
|
|
|
} else if (SuccBBLoopDepth > BBLoopDepth) {
|
2015-09-10 23:53:59 +08:00
|
|
|
assert(LoopDepthDiff == 1);
|
2015-08-31 21:56:32 +08:00
|
|
|
CondSet = isl_set_add_dims(CondSet, isl_dim_set, 1);
|
2015-09-10 23:27:46 +08:00
|
|
|
CondSet = addDomainDimId(CondSet, SuccBBLoopDepth, SuccBBLoop);
|
2015-09-02 02:17:41 +08:00
|
|
|
} else if (BBLoopDepth >= 0) {
|
2015-09-10 23:53:59 +08:00
|
|
|
assert(LoopDepthDiff <= 1);
|
2015-09-02 02:17:41 +08:00
|
|
|
CondSet = isl_set_project_out(CondSet, isl_dim_set, BBLoopDepth, 1);
|
|
|
|
CondSet = isl_set_add_dims(CondSet, isl_dim_set, 1);
|
2015-09-10 23:27:46 +08:00
|
|
|
CondSet = addDomainDimId(CondSet, SuccBBLoopDepth, SuccBBLoop);
|
2015-09-02 02:17:41 +08:00
|
|
|
}
|
2015-08-31 05:13:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set the domain for the successor or merge it with an existing domain in
|
|
|
|
// case there are multiple paths (without loop back edges) to the
|
|
|
|
// successor block.
|
|
|
|
isl_set *&SuccDomain = DomainMap[SuccBB];
|
|
|
|
if (!SuccDomain)
|
|
|
|
SuccDomain = CondSet;
|
|
|
|
else
|
|
|
|
SuccDomain = isl_set_union(SuccDomain, CondSet);
|
|
|
|
|
|
|
|
SuccDomain = isl_set_coalesce(SuccDomain);
|
2015-12-20 21:31:48 +08:00
|
|
|
if (isl_set_n_basic_set(SuccDomain) > MaxConjunctsInDomain) {
|
|
|
|
auto *Empty = isl_set_empty(isl_set_get_space(SuccDomain));
|
|
|
|
isl_set_free(SuccDomain);
|
|
|
|
SuccDomain = Empty;
|
|
|
|
invalidate(ERROR_DOMAINCONJUNCTS, DebugLoc());
|
|
|
|
}
|
2015-10-04 22:57:41 +08:00
|
|
|
DEBUG(dbgs() << "\tSet SuccBB: " << SuccBB->getName() << " : "
|
|
|
|
<< SuccDomain << "\n");
|
2015-08-31 05:13:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-10 21:00:06 +08:00
|
|
|
/// @brief Return the domain for @p BB wrt @p DomainMap.
|
|
|
|
///
|
|
|
|
/// This helper function will lookup @p BB in @p DomainMap but also handle the
|
|
|
|
/// case where @p BB is contained in a non-affine subregion using the region
|
|
|
|
/// tree obtained by @p RI.
|
|
|
|
static __isl_give isl_set *
|
|
|
|
getDomainForBlock(BasicBlock *BB, DenseMap<BasicBlock *, isl_set *> &DomainMap,
|
|
|
|
RegionInfo &RI) {
|
|
|
|
auto DIt = DomainMap.find(BB);
|
|
|
|
if (DIt != DomainMap.end())
|
|
|
|
return isl_set_copy(DIt->getSecond());
|
|
|
|
|
|
|
|
Region *R = RI.getRegionFor(BB);
|
|
|
|
while (R->getEntry() == BB)
|
|
|
|
R = R->getParent();
|
|
|
|
return getDomainForBlock(R->getEntry(), DomainMap, RI);
|
|
|
|
}
|
|
|
|
|
2015-10-08 04:31:36 +08:00
|
|
|
void Scop::propagateDomainConstraints(Region *R) {
|
2015-09-10 21:00:06 +08:00
|
|
|
// Iterate over the region R and propagate the domain constrains from the
|
|
|
|
// predecessors to the current node. In contrast to the
|
|
|
|
// buildDomainsWithBranchConstraints function, this one will pull the domain
|
|
|
|
// information from the predecessors instead of pushing it to the successors.
|
|
|
|
// Additionally, we assume the domains to be already present in the domain
|
|
|
|
// map here. However, we iterate again in reverse post order so we know all
|
|
|
|
// predecessors have been visited before a block or non-affine subregion is
|
|
|
|
// visited.
|
|
|
|
|
|
|
|
// The set of boxed loops (loops in non-affine subregions) for this SCoP.
|
|
|
|
auto &BoxedLoops = *SD.getBoxedLoops(&getRegion());
|
|
|
|
|
|
|
|
ReversePostOrderTraversal<Region *> RTraversal(R);
|
|
|
|
for (auto *RN : RTraversal) {
|
|
|
|
|
|
|
|
// Recurse for affine subregions but go on for basic blocks and non-affine
|
|
|
|
// subregions.
|
|
|
|
if (RN->isSubRegion()) {
|
|
|
|
Region *SubRegion = RN->getNodeAs<Region>();
|
|
|
|
if (!SD.isNonAffineSubRegion(SubRegion, &getRegion())) {
|
2015-10-08 04:31:36 +08:00
|
|
|
propagateDomainConstraints(SubRegion);
|
2015-09-10 21:00:06 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-02 07:48:18 +08:00
|
|
|
// Get the domain for the current block and check if it was initialized or
|
|
|
|
// not. The only way it was not is if this block is only reachable via error
|
|
|
|
// blocks, thus will not be executed under the assumptions we make. Such
|
|
|
|
// blocks have to be skipped as their predecessors might not have domains
|
|
|
|
// either. It would not benefit us to compute the domain anyway, only the
|
|
|
|
// domains of the error blocks that are reachable from non-error blocks
|
|
|
|
// are needed to generate assumptions.
|
2015-09-10 21:00:06 +08:00
|
|
|
BasicBlock *BB = getRegionNodeBasicBlock(RN);
|
2015-10-02 07:48:18 +08:00
|
|
|
isl_set *&Domain = DomainMap[BB];
|
|
|
|
if (!Domain) {
|
|
|
|
DEBUG(dbgs() << "\tSkip: " << BB->getName()
|
|
|
|
<< ", it is only reachable from error blocks.\n");
|
|
|
|
DomainMap.erase(BB);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
DEBUG(dbgs() << "\tVisit: " << BB->getName() << " : " << Domain << "\n");
|
|
|
|
|
2015-09-10 21:00:06 +08:00
|
|
|
Loop *BBLoop = getRegionNodeLoop(RN, LI);
|
|
|
|
int BBLoopDepth = getRelativeLoopDepth(BBLoop);
|
|
|
|
|
|
|
|
isl_set *PredDom = isl_set_empty(isl_set_get_space(Domain));
|
|
|
|
for (auto *PredBB : predecessors(BB)) {
|
|
|
|
|
|
|
|
// Skip backedges
|
|
|
|
if (DT.dominates(BB, PredBB))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
isl_set *PredBBDom = nullptr;
|
|
|
|
|
|
|
|
// Handle the SCoP entry block with its outside predecessors.
|
|
|
|
if (!getRegion().contains(PredBB))
|
|
|
|
PredBBDom = isl_set_universe(isl_set_get_space(PredDom));
|
|
|
|
|
|
|
|
if (!PredBBDom) {
|
|
|
|
// Determine the loop depth of the predecessor and adjust its domain to
|
|
|
|
// the domain of the current block. This can mean we have to:
|
|
|
|
// o) Drop a dimension if this block is the exit of a loop, not the
|
|
|
|
// header of a new loop and the predecessor was part of the loop.
|
|
|
|
// o) Add an unconstrainted new dimension if this block is the header
|
|
|
|
// of a loop and the predecessor is not part of it.
|
|
|
|
// o) Drop the information about the innermost loop dimension when the
|
|
|
|
// predecessor and the current block are surrounded by different
|
|
|
|
// loops in the same depth.
|
|
|
|
PredBBDom = getDomainForBlock(PredBB, DomainMap, *R->getRegionInfo());
|
|
|
|
Loop *PredBBLoop = LI.getLoopFor(PredBB);
|
|
|
|
while (BoxedLoops.count(PredBBLoop))
|
|
|
|
PredBBLoop = PredBBLoop->getParentLoop();
|
|
|
|
|
|
|
|
int PredBBLoopDepth = getRelativeLoopDepth(PredBBLoop);
|
2015-09-10 23:53:59 +08:00
|
|
|
unsigned LoopDepthDiff = std::abs(BBLoopDepth - PredBBLoopDepth);
|
2015-09-10 21:00:06 +08:00
|
|
|
if (BBLoopDepth < PredBBLoopDepth)
|
2015-09-10 23:53:59 +08:00
|
|
|
PredBBDom = isl_set_project_out(
|
|
|
|
PredBBDom, isl_dim_set, isl_set_n_dim(PredBBDom) - LoopDepthDiff,
|
|
|
|
LoopDepthDiff);
|
|
|
|
else if (PredBBLoopDepth < BBLoopDepth) {
|
|
|
|
assert(LoopDepthDiff == 1);
|
2015-09-10 21:00:06 +08:00
|
|
|
PredBBDom = isl_set_add_dims(PredBBDom, isl_dim_set, 1);
|
2015-09-10 23:53:59 +08:00
|
|
|
} else if (BBLoop != PredBBLoop && BBLoopDepth >= 0) {
|
|
|
|
assert(LoopDepthDiff <= 1);
|
2015-09-10 21:00:06 +08:00
|
|
|
PredBBDom = isl_set_drop_constraints_involving_dims(
|
|
|
|
PredBBDom, isl_dim_set, BBLoopDepth, 1);
|
2015-09-10 23:53:59 +08:00
|
|
|
}
|
2015-09-10 21:00:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
PredDom = isl_set_union(PredDom, PredBBDom);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Under the union of all predecessor conditions we can reach this block.
|
2015-09-16 06:11:49 +08:00
|
|
|
Domain = isl_set_coalesce(isl_set_intersect(Domain, PredDom));
|
2015-09-11 01:51:27 +08:00
|
|
|
|
2015-09-28 09:30:37 +08:00
|
|
|
if (BBLoop && BBLoop->getHeader() == BB && getRegion().contains(BBLoop))
|
2015-10-08 04:31:36 +08:00
|
|
|
addLoopBoundsToHeaderDomain(BBLoop);
|
2015-09-21 00:15:32 +08:00
|
|
|
|
2015-09-11 01:51:27 +08:00
|
|
|
// Add assumptions for error blocks.
|
2015-10-08 04:32:43 +08:00
|
|
|
if (containsErrorBlock(RN, getRegion(), LI, DT)) {
|
2015-09-11 01:51:27 +08:00
|
|
|
IsOptimized = true;
|
|
|
|
isl_set *DomPar = isl_set_params(isl_set_copy(Domain));
|
2015-11-12 10:33:38 +08:00
|
|
|
addAssumption(ERRORBLOCK, isl_set_complement(DomPar),
|
|
|
|
BB->getTerminator()->getDebugLoc());
|
2015-09-11 01:51:27 +08:00
|
|
|
}
|
2015-09-10 21:00:06 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief Create a map from SetSpace -> SetSpace where the dimensions @p Dim
|
|
|
|
/// is incremented by one and all other dimensions are equal, e.g.,
|
|
|
|
/// [i0, i1, i2, i3] -> [i0, i1, i2 + 1, i3]
|
|
|
|
/// if @p Dim is 2 and @p SetSpace has 4 dimensions.
|
|
|
|
static __isl_give isl_map *
|
|
|
|
createNextIterationMap(__isl_take isl_space *SetSpace, unsigned Dim) {
|
|
|
|
auto *MapSpace = isl_space_map_from_set(SetSpace);
|
|
|
|
auto *NextIterationMap = isl_map_universe(isl_space_copy(MapSpace));
|
|
|
|
for (unsigned u = 0; u < isl_map_n_in(NextIterationMap); u++)
|
|
|
|
if (u != Dim)
|
|
|
|
NextIterationMap =
|
|
|
|
isl_map_equate(NextIterationMap, isl_dim_in, u, isl_dim_out, u);
|
|
|
|
auto *C = isl_constraint_alloc_equality(isl_local_space_from_space(MapSpace));
|
|
|
|
C = isl_constraint_set_constant_si(C, 1);
|
|
|
|
C = isl_constraint_set_coefficient_si(C, isl_dim_in, Dim, 1);
|
|
|
|
C = isl_constraint_set_coefficient_si(C, isl_dim_out, Dim, -1);
|
|
|
|
NextIterationMap = isl_map_add_constraint(NextIterationMap, C);
|
|
|
|
return NextIterationMap;
|
|
|
|
}
|
|
|
|
|
2015-10-08 04:31:36 +08:00
|
|
|
void Scop::addLoopBoundsToHeaderDomain(Loop *L) {
|
2015-09-21 00:15:32 +08:00
|
|
|
int LoopDepth = getRelativeLoopDepth(L);
|
|
|
|
assert(LoopDepth >= 0 && "Loop in region should have at least depth one");
|
2015-09-10 21:00:06 +08:00
|
|
|
|
2015-09-21 00:15:32 +08:00
|
|
|
BasicBlock *HeaderBB = L->getHeader();
|
|
|
|
assert(DomainMap.count(HeaderBB));
|
|
|
|
isl_set *&HeaderBBDom = DomainMap[HeaderBB];
|
2015-09-10 21:00:06 +08:00
|
|
|
|
2015-09-21 00:15:32 +08:00
|
|
|
isl_map *NextIterationMap =
|
|
|
|
createNextIterationMap(isl_set_get_space(HeaderBBDom), LoopDepth);
|
2015-09-10 21:00:06 +08:00
|
|
|
|
2015-09-21 00:15:32 +08:00
|
|
|
isl_set *UnionBackedgeCondition =
|
|
|
|
isl_set_empty(isl_set_get_space(HeaderBBDom));
|
2015-09-10 21:00:06 +08:00
|
|
|
|
2015-09-21 00:15:32 +08:00
|
|
|
SmallVector<llvm::BasicBlock *, 4> LatchBlocks;
|
|
|
|
L->getLoopLatches(LatchBlocks);
|
2015-09-10 21:00:06 +08:00
|
|
|
|
2015-09-21 00:15:32 +08:00
|
|
|
for (BasicBlock *LatchBB : LatchBlocks) {
|
2015-10-02 07:48:18 +08:00
|
|
|
|
|
|
|
// If the latch is only reachable via error statements we skip it.
|
|
|
|
isl_set *LatchBBDom = DomainMap.lookup(LatchBB);
|
|
|
|
if (!LatchBBDom)
|
|
|
|
continue;
|
|
|
|
|
2015-09-21 00:15:32 +08:00
|
|
|
isl_set *BackedgeCondition = nullptr;
|
|
|
|
|
2015-09-28 17:33:22 +08:00
|
|
|
TerminatorInst *TI = LatchBB->getTerminator();
|
|
|
|
BranchInst *BI = dyn_cast<BranchInst>(TI);
|
|
|
|
if (BI && BI->isUnconditional())
|
2015-09-21 00:15:32 +08:00
|
|
|
BackedgeCondition = isl_set_copy(LatchBBDom);
|
|
|
|
else {
|
2015-09-28 17:33:22 +08:00
|
|
|
SmallVector<isl_set *, 8> ConditionSets;
|
2015-09-21 00:15:32 +08:00
|
|
|
int idx = BI->getSuccessor(0) != HeaderBB;
|
2015-09-28 17:33:22 +08:00
|
|
|
buildConditionSets(*this, TI, L, LatchBBDom, ConditionSets);
|
2015-09-20 23:00:20 +08:00
|
|
|
|
2015-09-21 00:15:32 +08:00
|
|
|
// Free the non back edge condition set as we do not need it.
|
|
|
|
isl_set_free(ConditionSets[1 - idx]);
|
|
|
|
|
|
|
|
BackedgeCondition = ConditionSets[idx];
|
2015-09-20 23:00:20 +08:00
|
|
|
}
|
2015-09-10 21:00:06 +08:00
|
|
|
|
2015-09-21 00:15:32 +08:00
|
|
|
int LatchLoopDepth = getRelativeLoopDepth(LI.getLoopFor(LatchBB));
|
|
|
|
assert(LatchLoopDepth >= LoopDepth);
|
|
|
|
BackedgeCondition =
|
|
|
|
isl_set_project_out(BackedgeCondition, isl_dim_set, LoopDepth + 1,
|
|
|
|
LatchLoopDepth - LoopDepth);
|
|
|
|
UnionBackedgeCondition =
|
|
|
|
isl_set_union(UnionBackedgeCondition, BackedgeCondition);
|
2015-09-10 21:00:06 +08:00
|
|
|
}
|
2015-09-21 00:15:32 +08:00
|
|
|
|
|
|
|
isl_map *ForwardMap = isl_map_lex_le(isl_set_get_space(HeaderBBDom));
|
|
|
|
for (int i = 0; i < LoopDepth; i++)
|
|
|
|
ForwardMap = isl_map_equate(ForwardMap, isl_dim_in, i, isl_dim_out, i);
|
|
|
|
|
|
|
|
isl_set *UnionBackedgeConditionComplement =
|
|
|
|
isl_set_complement(UnionBackedgeCondition);
|
|
|
|
UnionBackedgeConditionComplement = isl_set_lower_bound_si(
|
|
|
|
UnionBackedgeConditionComplement, isl_dim_set, LoopDepth, 0);
|
|
|
|
UnionBackedgeConditionComplement =
|
|
|
|
isl_set_apply(UnionBackedgeConditionComplement, ForwardMap);
|
|
|
|
HeaderBBDom = isl_set_subtract(HeaderBBDom, UnionBackedgeConditionComplement);
|
|
|
|
HeaderBBDom = isl_set_apply(HeaderBBDom, NextIterationMap);
|
|
|
|
|
|
|
|
auto Parts = partitionSetParts(HeaderBBDom, LoopDepth);
|
|
|
|
HeaderBBDom = Parts.second;
|
|
|
|
|
2015-09-21 00:59:23 +08:00
|
|
|
// Check if there is a <nsw> tagged AddRec for this loop and if so do not add
|
|
|
|
// the bounded assumptions to the context as they are already implied by the
|
|
|
|
// <nsw> tag.
|
|
|
|
if (Affinator.hasNSWAddRecForLoop(L)) {
|
|
|
|
isl_set_free(Parts.first);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-09-21 00:15:32 +08:00
|
|
|
isl_set *UnboundedCtx = isl_set_params(Parts.first);
|
|
|
|
isl_set *BoundedCtx = isl_set_complement(UnboundedCtx);
|
2015-11-12 10:33:38 +08:00
|
|
|
addAssumption(INFINITELOOP, BoundedCtx,
|
|
|
|
HeaderBB->getTerminator()->getDebugLoc());
|
2015-09-10 21:00:06 +08:00
|
|
|
}
|
|
|
|
|
2015-08-21 02:30:08 +08:00
|
|
|
void Scop::buildAliasChecks(AliasAnalysis &AA) {
|
|
|
|
if (!PollyUseRuntimeAliasChecks)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (buildAliasGroups(AA))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// If a problem occurs while building the alias groups we need to delete
|
|
|
|
// this SCoP and pretend it wasn't valid in the first place. To this end
|
|
|
|
// we make the assumed context infeasible.
|
2015-12-12 17:52:26 +08:00
|
|
|
invalidate(ALIASING, DebugLoc());
|
2015-08-21 02:30:08 +08:00
|
|
|
|
|
|
|
DEBUG(dbgs() << "\n\nNOTE: Run time checks for " << getNameStr()
|
|
|
|
<< " could not be created as the number of parameters involved "
|
|
|
|
"is too high. The SCoP will be "
|
|
|
|
"dismissed.\nUse:\n\t--polly-rtc-max-parameters=X\nto adjust "
|
|
|
|
"the maximal number of parameters but be advised that the "
|
|
|
|
"compile time might increase exponentially.\n\n");
|
|
|
|
}
|
|
|
|
|
2014-09-27 19:02:39 +08:00
|
|
|
bool Scop::buildAliasGroups(AliasAnalysis &AA) {
|
2014-09-18 19:17:17 +08:00
|
|
|
// To create sound alias checks we perform the following steps:
|
2015-02-25 00:00:29 +08:00
|
|
|
// o) Use the alias analysis and an alias set tracker to build alias sets
|
2014-09-18 19:17:17 +08:00
|
|
|
// for all memory accesses inside the SCoP.
|
|
|
|
// o) For each alias set we then map the aliasing pointers back to the
|
2015-02-25 00:00:29 +08:00
|
|
|
// memory accesses we know, thus obtain groups of memory accesses which
|
2014-09-18 19:17:17 +08:00
|
|
|
// might alias.
|
2014-10-01 20:42:37 +08:00
|
|
|
// o) We divide each group based on the domains of the minimal/maximal
|
2015-02-25 00:00:29 +08:00
|
|
|
// accesses. That means two minimal/maximal accesses are only in a group
|
2014-10-01 20:42:37 +08:00
|
|
|
// if their access domains intersect, otherwise they are in different
|
|
|
|
// ones.
|
2015-07-24 01:04:54 +08:00
|
|
|
// o) We partition each group into read only and non read only accesses.
|
2015-02-25 00:00:29 +08:00
|
|
|
// o) For each group with more than one base pointer we then compute minimal
|
2015-07-24 01:04:54 +08:00
|
|
|
// and maximal accesses to each array of a group in read only and non
|
|
|
|
// read only partitions separately.
|
2014-09-18 19:17:17 +08:00
|
|
|
using AliasGroupTy = SmallVector<MemoryAccess *, 4>;
|
|
|
|
|
|
|
|
AliasSetTracker AST(AA);
|
|
|
|
|
|
|
|
DenseMap<Value *, MemoryAccess *> PtrToAcc;
|
2014-10-01 20:40:46 +08:00
|
|
|
DenseSet<Value *> HasWriteAccess;
|
2015-05-27 13:16:57 +08:00
|
|
|
for (ScopStmt &Stmt : *this) {
|
2014-10-07 01:43:00 +08:00
|
|
|
|
|
|
|
// Skip statements with an empty domain as they will never be executed.
|
2015-05-27 13:16:57 +08:00
|
|
|
isl_set *StmtDomain = Stmt.getDomain();
|
2014-10-07 01:43:00 +08:00
|
|
|
bool StmtDomainEmpty = isl_set_is_empty(StmtDomain);
|
|
|
|
isl_set_free(StmtDomain);
|
|
|
|
if (StmtDomainEmpty)
|
|
|
|
continue;
|
|
|
|
|
2015-05-27 13:16:57 +08:00
|
|
|
for (MemoryAccess *MA : Stmt) {
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
if (MA->isScalarKind())
|
2014-09-18 19:17:17 +08:00
|
|
|
continue;
|
2014-10-01 20:40:46 +08:00
|
|
|
if (!MA->isRead())
|
|
|
|
HasWriteAccess.insert(MA->getBaseAddr());
|
2016-01-28 01:09:17 +08:00
|
|
|
MemAccInst Acc(MA->getAccessInstruction());
|
|
|
|
PtrToAcc[Acc.getPointerOperand()] = MA;
|
2014-09-18 19:17:17 +08:00
|
|
|
AST.add(Acc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<AliasGroupTy, 4> AliasGroups;
|
|
|
|
for (AliasSet &AS : AST) {
|
2014-10-08 10:23:48 +08:00
|
|
|
if (AS.isMustAlias() || AS.isForwardingAliasSet())
|
2014-09-18 19:17:17 +08:00
|
|
|
continue;
|
|
|
|
AliasGroupTy AG;
|
|
|
|
for (auto PR : AS)
|
|
|
|
AG.push_back(PtrToAcc[PR.getValue()]);
|
|
|
|
assert(AG.size() > 1 &&
|
|
|
|
"Alias groups should contain at least two accesses");
|
|
|
|
AliasGroups.push_back(std::move(AG));
|
|
|
|
}
|
|
|
|
|
2014-10-01 20:42:37 +08:00
|
|
|
// Split the alias groups based on their domain.
|
|
|
|
for (unsigned u = 0; u < AliasGroups.size(); u++) {
|
|
|
|
AliasGroupTy NewAG;
|
|
|
|
AliasGroupTy &AG = AliasGroups[u];
|
|
|
|
AliasGroupTy::iterator AGI = AG.begin();
|
|
|
|
isl_set *AGDomain = getAccessDomain(*AGI);
|
|
|
|
while (AGI != AG.end()) {
|
|
|
|
MemoryAccess *MA = *AGI;
|
|
|
|
isl_set *MADomain = getAccessDomain(MA);
|
|
|
|
if (isl_set_is_disjoint(AGDomain, MADomain)) {
|
|
|
|
NewAG.push_back(MA);
|
|
|
|
AGI = AG.erase(AGI);
|
|
|
|
isl_set_free(MADomain);
|
|
|
|
} else {
|
|
|
|
AGDomain = isl_set_union(AGDomain, MADomain);
|
|
|
|
AGI++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (NewAG.size() > 1)
|
|
|
|
AliasGroups.push_back(std::move(NewAG));
|
|
|
|
isl_set_free(AGDomain);
|
|
|
|
}
|
|
|
|
|
2015-11-12 10:32:51 +08:00
|
|
|
auto &F = *getRegion().getEntry()->getParent();
|
2015-04-05 21:11:54 +08:00
|
|
|
MapVector<const Value *, SmallPtrSet<MemoryAccess *, 8>> ReadOnlyPairs;
|
2014-10-01 20:40:46 +08:00
|
|
|
SmallPtrSet<const Value *, 4> NonReadOnlyBaseValues;
|
|
|
|
for (AliasGroupTy &AG : AliasGroups) {
|
|
|
|
NonReadOnlyBaseValues.clear();
|
|
|
|
ReadOnlyPairs.clear();
|
|
|
|
|
2014-10-01 20:42:37 +08:00
|
|
|
if (AG.size() < 2) {
|
|
|
|
AG.clear();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-10-01 20:40:46 +08:00
|
|
|
for (auto II = AG.begin(); II != AG.end();) {
|
2015-11-12 10:32:51 +08:00
|
|
|
emitOptimizationRemarkAnalysis(
|
|
|
|
F.getContext(), DEBUG_TYPE, F,
|
|
|
|
(*II)->getAccessInstruction()->getDebugLoc(),
|
|
|
|
"Possibly aliasing pointer, use restrict keyword.");
|
|
|
|
|
2014-10-01 20:40:46 +08:00
|
|
|
Value *BaseAddr = (*II)->getBaseAddr();
|
|
|
|
if (HasWriteAccess.count(BaseAddr)) {
|
|
|
|
NonReadOnlyBaseValues.insert(BaseAddr);
|
|
|
|
II++;
|
|
|
|
} else {
|
|
|
|
ReadOnlyPairs[BaseAddr].insert(*II);
|
|
|
|
II = AG.erase(II);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we don't have read only pointers check if there are at least two
|
|
|
|
// non read only pointers, otherwise clear the alias group.
|
2015-07-25 20:31:03 +08:00
|
|
|
if (ReadOnlyPairs.empty() && NonReadOnlyBaseValues.size() <= 1) {
|
2015-07-24 01:04:54 +08:00
|
|
|
AG.clear();
|
2014-10-01 20:40:46 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we don't have non read only pointers clear the alias group.
|
|
|
|
if (NonReadOnlyBaseValues.empty()) {
|
|
|
|
AG.clear();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-07-24 01:04:54 +08:00
|
|
|
// Calculate minimal and maximal accesses for non read only accesses.
|
2015-07-26 21:14:38 +08:00
|
|
|
MinMaxAliasGroups.emplace_back();
|
|
|
|
MinMaxVectorPairTy &pair = MinMaxAliasGroups.back();
|
|
|
|
MinMaxVectorTy &MinMaxAccessesNonReadOnly = pair.first;
|
|
|
|
MinMaxVectorTy &MinMaxAccessesReadOnly = pair.second;
|
|
|
|
MinMaxAccessesNonReadOnly.reserve(AG.size());
|
2014-09-18 19:17:17 +08:00
|
|
|
|
|
|
|
isl_union_map *Accesses = isl_union_map_empty(getParamSpace());
|
2015-07-24 01:04:54 +08:00
|
|
|
|
|
|
|
// AG contains only non read only accesses.
|
2014-09-18 19:17:17 +08:00
|
|
|
for (MemoryAccess *MA : AG)
|
|
|
|
Accesses = isl_union_map_add_map(Accesses, MA->getAccessRelation());
|
|
|
|
|
2015-08-21 05:29:26 +08:00
|
|
|
bool Valid = calculateMinMaxAccess(Accesses, getDomains(),
|
|
|
|
MinMaxAccessesNonReadOnly);
|
2015-07-24 01:04:54 +08:00
|
|
|
|
|
|
|
// Bail out if the number of values we need to compare is too large.
|
|
|
|
// This is important as the number of comparisions grows quadratically with
|
|
|
|
// the number of values we need to compare.
|
2015-07-26 21:14:38 +08:00
|
|
|
if (!Valid || (MinMaxAccessesNonReadOnly.size() + !ReadOnlyPairs.empty() >
|
|
|
|
RunTimeChecksMaxArraysPerGroup))
|
2015-07-24 01:04:54 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Calculate minimal and maximal accesses for read only accesses.
|
2015-07-26 21:14:38 +08:00
|
|
|
MinMaxAccessesReadOnly.reserve(ReadOnlyPairs.size());
|
2015-07-24 01:04:54 +08:00
|
|
|
Accesses = isl_union_map_empty(getParamSpace());
|
|
|
|
|
|
|
|
for (const auto &ReadOnlyPair : ReadOnlyPairs)
|
|
|
|
for (MemoryAccess *MA : ReadOnlyPair.second)
|
|
|
|
Accesses = isl_union_map_add_map(Accesses, MA->getAccessRelation());
|
|
|
|
|
2015-08-21 05:29:26 +08:00
|
|
|
Valid =
|
|
|
|
calculateMinMaxAccess(Accesses, getDomains(), MinMaxAccessesReadOnly);
|
2014-09-27 19:02:39 +08:00
|
|
|
|
|
|
|
if (!Valid)
|
2015-03-28 22:50:32 +08:00
|
|
|
return false;
|
2014-09-18 19:17:17 +08:00
|
|
|
}
|
2014-09-27 19:02:39 +08:00
|
|
|
|
2015-03-28 22:50:32 +08:00
|
|
|
return true;
|
2014-09-18 19:17:17 +08:00
|
|
|
}
|
|
|
|
|
2015-11-22 00:56:13 +08:00
|
|
|
/// @brief Get the smallest loop that contains @p R but is not in @p R.
|
2015-09-10 23:27:46 +08:00
|
|
|
static Loop *getLoopSurroundingRegion(Region &R, LoopInfo &LI) {
|
2015-11-22 00:56:13 +08:00
|
|
|
// Start with the smallest loop containing the entry and expand that
|
|
|
|
// loop until it contains all blocks in the region. If there is a loop
|
|
|
|
// containing all blocks in the region check if it is itself contained
|
|
|
|
// and if so take the parent loop as it will be the smallest containing
|
|
|
|
// the region but not contained by it.
|
2015-09-10 23:27:46 +08:00
|
|
|
Loop *L = LI.getLoopFor(R.getEntry());
|
2015-11-22 00:56:13 +08:00
|
|
|
while (L) {
|
|
|
|
bool AllContained = true;
|
|
|
|
for (auto *BB : R.blocks())
|
|
|
|
AllContained &= L->contains(BB);
|
|
|
|
if (AllContained)
|
|
|
|
break;
|
|
|
|
L = L->getParentLoop();
|
|
|
|
}
|
|
|
|
|
2015-09-10 23:27:46 +08:00
|
|
|
return L ? (R.contains(L) ? L->getParentLoop() : L) : nullptr;
|
|
|
|
}
|
|
|
|
|
2015-04-13 06:58:40 +08:00
|
|
|
static unsigned getMaxLoopDepthInRegion(const Region &R, LoopInfo &LI,
|
|
|
|
ScopDetection &SD) {
|
|
|
|
|
|
|
|
const ScopDetection::BoxedLoopsSetTy *BoxedLoops = SD.getBoxedLoops(&R);
|
|
|
|
|
2014-11-01 08:12:13 +08:00
|
|
|
unsigned MinLD = INT_MAX, MaxLD = 0;
|
|
|
|
for (BasicBlock *BB : R.blocks()) {
|
|
|
|
if (Loop *L = LI.getLoopFor(BB)) {
|
2015-01-14 02:31:55 +08:00
|
|
|
if (!R.contains(L))
|
|
|
|
continue;
|
2015-04-13 06:58:40 +08:00
|
|
|
if (BoxedLoops && BoxedLoops->count(L))
|
|
|
|
continue;
|
2014-11-01 08:12:13 +08:00
|
|
|
unsigned LD = L->getLoopDepth();
|
|
|
|
MinLD = std::min(MinLD, LD);
|
|
|
|
MaxLD = std::max(MaxLD, LD);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle the case that there is no loop in the SCoP first.
|
|
|
|
if (MaxLD == 0)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
assert(MinLD >= 1 && "Minimal loop depth should be at least one");
|
|
|
|
assert(MaxLD >= MinLD &&
|
|
|
|
"Maximal loop depth was smaller than mininaml loop depth?");
|
|
|
|
return MaxLD - MinLD + 1;
|
|
|
|
}
|
|
|
|
|
2015-10-02 21:09:31 +08:00
|
|
|
Scop::Scop(Region &R, AccFuncMapType &AccFuncMap, ScopDetection &SD,
|
2015-10-08 04:31:36 +08:00
|
|
|
ScalarEvolution &ScalarEvolution, DominatorTree &DT, LoopInfo &LI,
|
2015-08-31 05:13:53 +08:00
|
|
|
isl_ctx *Context, unsigned MaxLoopDepth)
|
2015-10-08 04:31:36 +08:00
|
|
|
: LI(LI), DT(DT), SE(&ScalarEvolution), SD(SD), R(R),
|
|
|
|
AccFuncMap(AccFuncMap), IsOptimized(false),
|
2015-11-09 04:16:39 +08:00
|
|
|
HasSingleExitEdge(R.getExitingBlock()), HasErrorBlock(false),
|
|
|
|
MaxLoopDepth(MaxLoopDepth), IslCtx(Context), Context(nullptr),
|
|
|
|
Affinator(this), AssumedContext(nullptr), BoundaryContext(nullptr),
|
Support accesses with differently sized types to the same array
This allows code such as:
void multiple_types(char *Short, char *Float, char *Double) {
for (long i = 0; i < 100; i++) {
Short[i] = *(short *)&Short[2 * i];
Float[i] = *(float *)&Float[4 * i];
Double[i] = *(double *)&Double[8 * i];
}
}
To model such code we use as canonical element type of the modeled array the
smallest element type of all original array accesses, if type allocation sizes
are multiples of each other. Otherwise, we use a newly created iN type, where N
is the gcd of the allocation size of the types used in the accesses to this
array. Accesses with types larger as the canonical element type are modeled as
multiple accesses with the smaller type.
For example the second load access is modeled as:
{ Stmt_bb2[i0] -> MemRef_Float[o0] : 4i0 <= o0 <= 3 + 4i0 }
To support code-generating these memory accesses, we introduce a new method
getAccessAddressFunction that assigns each statement instance a single memory
location, the address we load from/store to. Currently we obtain this address by
taking the lexmin of the access function. We may consider keeping track of the
memory location more explicitly in the future.
We currently do _not_ handle multi-dimensional arrays and also keep the
restriction of not supporting accesses where the offset expression is not a
multiple of the access element type size. This patch adds tests that ensure
we correctly invalidate a scop in case these accesses are found. Both types of
accesses can be handled using the very same model, but are left to be added in
the future.
We also move the initialization of the scop-context into the constructor to
ensure it is already available when invalidating the scop.
Finally, we add this as a new item to the 2.9 release notes
Reviewers: jdoerfert, Meinersbur
Differential Revision: http://reviews.llvm.org/D16878
llvm-svn: 259784
2016-02-04 21:18:42 +08:00
|
|
|
Schedule(nullptr) {
|
|
|
|
buildContext();
|
|
|
|
}
|
2015-02-24 20:00:50 +08:00
|
|
|
|
2015-11-12 11:25:01 +08:00
|
|
|
void Scop::init(AliasAnalysis &AA, AssumptionCache &AC) {
|
|
|
|
addUserAssumptions(AC);
|
2015-10-10 01:12:26 +08:00
|
|
|
buildInvariantEquivalenceClasses();
|
|
|
|
|
2015-10-08 04:31:36 +08:00
|
|
|
buildDomains(&R);
|
2015-08-31 05:13:53 +08:00
|
|
|
|
2015-10-02 21:53:07 +08:00
|
|
|
// Remove empty and ignored statements.
|
2015-10-03 00:33:27 +08:00
|
|
|
// Exit early in case there are no executable statements left in this scop.
|
2015-10-02 21:53:07 +08:00
|
|
|
simplifySCoP(true);
|
2015-10-03 00:33:27 +08:00
|
|
|
if (Stmts.empty())
|
|
|
|
return;
|
2015-10-02 21:53:07 +08:00
|
|
|
|
|
|
|
// The ScopStmts now have enough information to initialize themselves.
|
|
|
|
for (ScopStmt &Stmt : Stmts)
|
|
|
|
Stmt.init();
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2016-01-06 20:59:23 +08:00
|
|
|
buildSchedule();
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2015-11-02 19:29:32 +08:00
|
|
|
if (isl_set_is_empty(AssumedContext))
|
|
|
|
return;
|
|
|
|
|
|
|
|
updateAccessDimensionality();
|
2011-11-08 23:41:08 +08:00
|
|
|
realignParams();
|
2012-05-22 18:47:27 +08:00
|
|
|
addParameterBounds();
|
2015-08-16 18:19:29 +08:00
|
|
|
addUserContext();
|
2015-09-16 06:52:53 +08:00
|
|
|
buildBoundaryContext();
|
|
|
|
simplifyContexts();
|
2015-08-21 02:30:08 +08:00
|
|
|
buildAliasChecks(AA);
|
2015-09-30 07:47:21 +08:00
|
|
|
|
|
|
|
hoistInvariantLoads();
|
2015-10-02 21:53:07 +08:00
|
|
|
simplifySCoP(false);
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
Scop::~Scop() {
|
|
|
|
isl_set_free(Context);
|
2013-10-30 05:05:49 +08:00
|
|
|
isl_set_free(AssumedContext);
|
2015-09-16 06:52:53 +08:00
|
|
|
isl_set_free(BoundaryContext);
|
2015-07-14 17:33:13 +08:00
|
|
|
isl_schedule_free(Schedule);
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2015-08-31 05:13:53 +08:00
|
|
|
for (auto It : DomainMap)
|
|
|
|
isl_set_free(It.second);
|
|
|
|
|
2014-09-18 19:17:17 +08:00
|
|
|
// Free the alias groups
|
2015-07-24 01:04:54 +08:00
|
|
|
for (MinMaxVectorPairTy &MinMaxAccessPair : MinMaxAliasGroups) {
|
2015-07-26 21:14:38 +08:00
|
|
|
for (MinMaxAccessTy &MMA : MinMaxAccessPair.first) {
|
2014-09-18 19:17:17 +08:00
|
|
|
isl_pw_multi_aff_free(MMA.first);
|
|
|
|
isl_pw_multi_aff_free(MMA.second);
|
|
|
|
}
|
2015-07-26 21:14:38 +08:00
|
|
|
for (MinMaxAccessTy &MMA : MinMaxAccessPair.second) {
|
2015-07-24 01:04:54 +08:00
|
|
|
isl_pw_multi_aff_free(MMA.first);
|
|
|
|
isl_pw_multi_aff_free(MMA.second);
|
|
|
|
}
|
2014-09-18 19:17:17 +08:00
|
|
|
}
|
2015-09-30 07:47:21 +08:00
|
|
|
|
2015-10-10 01:12:26 +08:00
|
|
|
for (const auto &IAClass : InvariantEquivClasses)
|
2015-10-18 20:39:19 +08:00
|
|
|
isl_set_free(std::get<2>(IAClass));
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2015-09-26 16:55:54 +08:00
|
|
|
void Scop::updateAccessDimensionality() {
|
|
|
|
for (auto &Stmt : *this)
|
|
|
|
for (auto &Access : Stmt)
|
|
|
|
Access->updateDimensionality();
|
|
|
|
}
|
|
|
|
|
2015-10-02 21:53:07 +08:00
|
|
|
void Scop::simplifySCoP(bool RemoveIgnoredStmts) {
|
2015-09-30 07:47:21 +08:00
|
|
|
for (auto StmtIt = Stmts.begin(), StmtEnd = Stmts.end(); StmtIt != StmtEnd;) {
|
|
|
|
ScopStmt &Stmt = *StmtIt;
|
2015-10-02 21:53:07 +08:00
|
|
|
RegionNode *RN = Stmt.isRegionStmt()
|
|
|
|
? Stmt.getRegion()->getNode()
|
|
|
|
: getRegion().getBBNode(Stmt.getBasicBlock());
|
|
|
|
|
2015-11-04 00:54:49 +08:00
|
|
|
bool RemoveStmt = StmtIt->isEmpty();
|
|
|
|
if (!RemoveStmt)
|
|
|
|
RemoveStmt = isl_set_is_empty(DomainMap[getRegionNodeBasicBlock(RN)]);
|
|
|
|
if (!RemoveStmt)
|
|
|
|
RemoveStmt = (RemoveIgnoredStmts && isIgnored(RN));
|
|
|
|
|
|
|
|
// Remove read only statements only after invariant loop hoisting.
|
|
|
|
if (!RemoveStmt && !RemoveIgnoredStmts) {
|
|
|
|
bool OnlyRead = true;
|
|
|
|
for (MemoryAccess *MA : Stmt) {
|
|
|
|
if (MA->isRead())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
OnlyRead = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
RemoveStmt = OnlyRead;
|
|
|
|
}
|
2015-10-04 23:00:05 +08:00
|
|
|
|
2015-11-04 00:54:49 +08:00
|
|
|
if (RemoveStmt) {
|
2015-10-02 21:53:07 +08:00
|
|
|
// Remove the statement because it is unnecessary.
|
|
|
|
if (Stmt.isRegionStmt())
|
|
|
|
for (BasicBlock *BB : Stmt.getRegion()->blocks())
|
|
|
|
StmtMap.erase(BB);
|
|
|
|
else
|
|
|
|
StmtMap.erase(Stmt.getBasicBlock());
|
2015-09-30 07:47:21 +08:00
|
|
|
|
2015-10-02 21:53:07 +08:00
|
|
|
StmtIt = Stmts.erase(StmtIt);
|
2015-09-30 07:47:21 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-10-02 21:53:07 +08:00
|
|
|
StmtIt++;
|
2015-09-30 07:47:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-18 20:39:19 +08:00
|
|
|
const InvariantEquivClassTy *Scop::lookupInvariantEquivClass(Value *Val) const {
|
|
|
|
LoadInst *LInst = dyn_cast<LoadInst>(Val);
|
|
|
|
if (!LInst)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
if (Value *Rep = InvEquivClassVMap.lookup(LInst))
|
|
|
|
LInst = cast<LoadInst>(Rep);
|
|
|
|
|
2016-02-08 01:30:13 +08:00
|
|
|
Type *Ty = LInst->getType();
|
2015-10-18 20:39:19 +08:00
|
|
|
const SCEV *PointerSCEV = SE->getSCEV(LInst->getPointerOperand());
|
|
|
|
for (auto &IAClass : InvariantEquivClasses)
|
2016-02-08 01:30:13 +08:00
|
|
|
if (PointerSCEV == std::get<0>(IAClass) && Ty == std::get<3>(IAClass))
|
2015-10-18 20:39:19 +08:00
|
|
|
return &IAClass;
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scop::addInvariantLoads(ScopStmt &Stmt, MemoryAccessList &InvMAs) {
|
|
|
|
|
|
|
|
// Get the context under which the statement is executed.
|
|
|
|
isl_set *DomainCtx = isl_set_params(Stmt.getDomain());
|
|
|
|
DomainCtx = isl_set_remove_redundancies(DomainCtx);
|
|
|
|
DomainCtx = isl_set_detect_equalities(DomainCtx);
|
|
|
|
DomainCtx = isl_set_coalesce(DomainCtx);
|
|
|
|
|
|
|
|
// Project out all parameters that relate to loads in the statement. Otherwise
|
|
|
|
// we could have cyclic dependences on the constraints under which the
|
|
|
|
// hoisted loads are executed and we could not determine an order in which to
|
|
|
|
// pre-load them. This happens because not only lower bounds are part of the
|
|
|
|
// domain but also upper bounds.
|
|
|
|
for (MemoryAccess *MA : InvMAs) {
|
|
|
|
Instruction *AccInst = MA->getAccessInstruction();
|
|
|
|
if (SE->isSCEVable(AccInst->getType())) {
|
2015-11-08 03:45:27 +08:00
|
|
|
SetVector<Value *> Values;
|
|
|
|
for (const SCEV *Parameter : Parameters) {
|
|
|
|
Values.clear();
|
|
|
|
findValues(Parameter, Values);
|
|
|
|
if (!Values.count(AccInst))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (isl_id *ParamId = getIdForParam(Parameter)) {
|
|
|
|
int Dim = isl_set_find_dim_by_id(DomainCtx, isl_dim_param, ParamId);
|
|
|
|
DomainCtx = isl_set_eliminate(DomainCtx, isl_dim_param, Dim, 1);
|
|
|
|
isl_id_free(ParamId);
|
|
|
|
}
|
2015-10-18 20:39:19 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (MemoryAccess *MA : InvMAs) {
|
|
|
|
// Check for another invariant access that accesses the same location as
|
|
|
|
// MA and if found consolidate them. Otherwise create a new equivalence
|
|
|
|
// class at the end of InvariantEquivClasses.
|
|
|
|
LoadInst *LInst = cast<LoadInst>(MA->getAccessInstruction());
|
2016-02-08 01:30:13 +08:00
|
|
|
Type *Ty = LInst->getType();
|
2015-10-18 20:39:19 +08:00
|
|
|
const SCEV *PointerSCEV = SE->getSCEV(LInst->getPointerOperand());
|
|
|
|
|
|
|
|
bool Consolidated = false;
|
|
|
|
for (auto &IAClass : InvariantEquivClasses) {
|
2016-02-08 01:30:13 +08:00
|
|
|
if (PointerSCEV != std::get<0>(IAClass) || Ty != std::get<3>(IAClass))
|
2015-10-18 20:39:19 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
Consolidated = true;
|
|
|
|
|
|
|
|
// Add MA to the list of accesses that are in this class.
|
|
|
|
auto &MAs = std::get<1>(IAClass);
|
|
|
|
MAs.push_front(MA);
|
|
|
|
|
|
|
|
// Unify the execution context of the class and this statement.
|
|
|
|
isl_set *&IAClassDomainCtx = std::get<2>(IAClass);
|
2015-11-11 12:30:07 +08:00
|
|
|
if (IAClassDomainCtx)
|
|
|
|
IAClassDomainCtx = isl_set_coalesce(
|
|
|
|
isl_set_union(IAClassDomainCtx, isl_set_copy(DomainCtx)));
|
|
|
|
else
|
|
|
|
IAClassDomainCtx = isl_set_copy(DomainCtx);
|
2015-10-18 20:39:19 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Consolidated)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// If we did not consolidate MA, thus did not find an equivalence class
|
|
|
|
// for it, we create a new one.
|
|
|
|
InvariantEquivClasses.emplace_back(PointerSCEV, MemoryAccessList{MA},
|
2016-02-08 01:30:13 +08:00
|
|
|
isl_set_copy(DomainCtx), Ty);
|
2015-10-18 20:39:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
isl_set_free(DomainCtx);
|
|
|
|
}
|
|
|
|
|
2015-12-14 05:00:40 +08:00
|
|
|
bool Scop::isHoistableAccess(MemoryAccess *Access,
|
|
|
|
__isl_keep isl_union_map *Writes) {
|
|
|
|
// TODO: Loads that are not loop carried, hence are in a statement with
|
|
|
|
// zero iterators, are by construction invariant, though we
|
|
|
|
// currently "hoist" them anyway. This is necessary because we allow
|
|
|
|
// them to be treated as parameters (e.g., in conditions) and our code
|
|
|
|
// generation would otherwise use the old value.
|
|
|
|
|
|
|
|
auto &Stmt = *Access->getStatement();
|
|
|
|
BasicBlock *BB =
|
|
|
|
Stmt.isBlockStmt() ? Stmt.getBasicBlock() : Stmt.getRegion()->getEntry();
|
|
|
|
|
|
|
|
if (Access->isScalarKind() || Access->isWrite() || !Access->isAffine())
|
|
|
|
return false;
|
2015-09-30 07:47:21 +08:00
|
|
|
|
2015-12-14 05:00:40 +08:00
|
|
|
// Skip accesses that have an invariant base pointer which is defined but
|
|
|
|
// not loaded inside the SCoP. This can happened e.g., if a readnone call
|
|
|
|
// returns a pointer that is used as a base address. However, as we want
|
|
|
|
// to hoist indirect pointers, we allow the base pointer to be defined in
|
|
|
|
// the region if it is also a memory access. Each ScopArrayInfo object
|
|
|
|
// that has a base pointer origin has a base pointer that is loaded and
|
|
|
|
// that it is invariant, thus it will be hoisted too. However, if there is
|
|
|
|
// no base pointer origin we check that the base pointer is defined
|
|
|
|
// outside the region.
|
|
|
|
const ScopArrayInfo *SAI = Access->getScopArrayInfo();
|
|
|
|
while (auto *BasePtrOriginSAI = SAI->getBasePtrOriginSAI())
|
|
|
|
SAI = BasePtrOriginSAI;
|
|
|
|
|
|
|
|
if (auto *BasePtrInst = dyn_cast<Instruction>(SAI->getBasePtr()))
|
|
|
|
if (R.contains(BasePtrInst))
|
|
|
|
return false;
|
2015-09-30 07:47:21 +08:00
|
|
|
|
2015-12-14 05:00:40 +08:00
|
|
|
// Skip accesses in non-affine subregions as they might not be executed
|
|
|
|
// under the same condition as the entry of the non-affine subregion.
|
|
|
|
if (BB != Access->getAccessInstruction()->getParent())
|
|
|
|
return false;
|
2015-09-30 07:47:21 +08:00
|
|
|
|
2015-12-14 05:00:40 +08:00
|
|
|
isl_map *AccessRelation = Access->getAccessRelation();
|
2015-09-30 07:47:21 +08:00
|
|
|
|
2015-12-14 05:00:40 +08:00
|
|
|
// Skip accesses that have an empty access relation. These can be caused
|
|
|
|
// by multiple offsets with a type cast in-between that cause the overall
|
|
|
|
// byte offset to be not divisible by the new types sizes.
|
|
|
|
if (isl_map_is_empty(AccessRelation)) {
|
|
|
|
isl_map_free(AccessRelation);
|
|
|
|
return false;
|
|
|
|
}
|
2015-10-19 03:49:25 +08:00
|
|
|
|
2015-12-14 05:00:40 +08:00
|
|
|
if (isl_map_involves_dims(AccessRelation, isl_dim_in, 0,
|
|
|
|
Stmt.getNumIterators())) {
|
|
|
|
isl_map_free(AccessRelation);
|
|
|
|
return false;
|
|
|
|
}
|
2015-10-02 22:51:00 +08:00
|
|
|
|
2015-12-14 05:00:40 +08:00
|
|
|
AccessRelation = isl_map_intersect_domain(AccessRelation, Stmt.getDomain());
|
|
|
|
isl_set *AccessRange = isl_map_range(AccessRelation);
|
2015-10-19 03:50:18 +08:00
|
|
|
|
2015-12-14 05:00:40 +08:00
|
|
|
isl_union_map *Written = isl_union_map_intersect_range(
|
|
|
|
isl_union_map_copy(Writes), isl_union_set_from_set(AccessRange));
|
|
|
|
bool IsWritten = !isl_union_map_is_empty(Written);
|
|
|
|
isl_union_map_free(Written);
|
2015-10-19 03:50:18 +08:00
|
|
|
|
2015-12-14 05:00:40 +08:00
|
|
|
if (IsWritten)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2015-09-30 07:47:21 +08:00
|
|
|
|
2015-12-14 05:00:40 +08:00
|
|
|
void Scop::verifyInvariantLoads() {
|
|
|
|
auto &RIL = *SD.getRequiredInvariantLoads(&getRegion());
|
|
|
|
for (LoadInst *LI : RIL) {
|
|
|
|
assert(LI && getRegion().contains(LI));
|
|
|
|
ScopStmt *Stmt = getStmtForBasicBlock(LI->getParent());
|
2015-12-21 15:10:39 +08:00
|
|
|
if (Stmt && Stmt->getArrayAccessOrNULLFor(LI)) {
|
2015-12-14 05:00:40 +08:00
|
|
|
invalidate(INVARIANTLOAD, LI->getDebugLoc());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-09-30 07:47:21 +08:00
|
|
|
|
2015-12-14 05:00:40 +08:00
|
|
|
void Scop::hoistInvariantLoads() {
|
|
|
|
isl_union_map *Writes = getWrites();
|
|
|
|
for (ScopStmt &Stmt : *this) {
|
2015-09-30 07:47:21 +08:00
|
|
|
|
2015-12-14 05:00:40 +08:00
|
|
|
MemoryAccessList InvariantAccesses;
|
2015-09-30 07:47:21 +08:00
|
|
|
|
2015-12-14 05:00:40 +08:00
|
|
|
for (MemoryAccess *Access : Stmt)
|
|
|
|
if (isHoistableAccess(Access, Writes))
|
|
|
|
InvariantAccesses.push_front(Access);
|
2015-09-30 07:47:21 +08:00
|
|
|
|
|
|
|
// We inserted invariant accesses always in the front but need them to be
|
|
|
|
// sorted in a "natural order". The statements are already sorted in reverse
|
|
|
|
// post order and that suffices for the accesses too. The reason we require
|
|
|
|
// an order in the first place is the dependences between invariant loads
|
|
|
|
// that can be caused by indirect loads.
|
2015-12-14 05:00:40 +08:00
|
|
|
InvariantAccesses.reverse();
|
2015-09-30 07:47:21 +08:00
|
|
|
|
|
|
|
// Transfer the memory access from the statement to the SCoP.
|
2015-12-14 05:00:40 +08:00
|
|
|
Stmt.removeMemoryAccesses(InvariantAccesses);
|
|
|
|
addInvariantLoads(Stmt, InvariantAccesses);
|
2015-09-30 07:47:21 +08:00
|
|
|
}
|
|
|
|
isl_union_map_free(Writes);
|
|
|
|
|
2015-12-14 05:00:40 +08:00
|
|
|
verifyInvariantLoads();
|
2015-09-30 07:47:21 +08:00
|
|
|
}
|
|
|
|
|
2014-11-07 16:31:31 +08:00
|
|
|
const ScopArrayInfo *
|
2016-02-02 21:22:54 +08:00
|
|
|
Scop::getOrCreateScopArrayInfo(Value *BasePtr, Type *ElementType,
|
2015-11-11 01:31:31 +08:00
|
|
|
ArrayRef<const SCEV *> Sizes,
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
ScopArrayInfo::MemoryKind Kind) {
|
2015-11-11 01:31:31 +08:00
|
|
|
auto &SAI = ScopArrayInfoMap[std::make_pair(BasePtr, Kind)];
|
2015-09-26 16:55:54 +08:00
|
|
|
if (!SAI) {
|
2015-11-13 04:15:08 +08:00
|
|
|
auto &DL = getRegion().getEntry()->getModule()->getDataLayout();
|
2016-02-02 21:22:54 +08:00
|
|
|
SAI.reset(new ScopArrayInfo(BasePtr, ElementType, getIslCtx(), Sizes, Kind,
|
2015-11-13 04:15:08 +08:00
|
|
|
DL, this));
|
2015-09-26 16:55:54 +08:00
|
|
|
} else {
|
2015-11-02 19:29:32 +08:00
|
|
|
// In case of mismatching array sizes, we bail out by setting the run-time
|
|
|
|
// context to false.
|
Support accesses with differently sized types to the same array
This allows code such as:
void multiple_types(char *Short, char *Float, char *Double) {
for (long i = 0; i < 100; i++) {
Short[i] = *(short *)&Short[2 * i];
Float[i] = *(float *)&Float[4 * i];
Double[i] = *(double *)&Double[8 * i];
}
}
To model such code we use as canonical element type of the modeled array the
smallest element type of all original array accesses, if type allocation sizes
are multiples of each other. Otherwise, we use a newly created iN type, where N
is the gcd of the allocation size of the types used in the accesses to this
array. Accesses with types larger as the canonical element type are modeled as
multiple accesses with the smaller type.
For example the second load access is modeled as:
{ Stmt_bb2[i0] -> MemRef_Float[o0] : 4i0 <= o0 <= 3 + 4i0 }
To support code-generating these memory accesses, we introduce a new method
getAccessAddressFunction that assigns each statement instance a single memory
location, the address we load from/store to. Currently we obtain this address by
taking the lexmin of the access function. We may consider keeping track of the
memory location more explicitly in the future.
We currently do _not_ handle multi-dimensional arrays and also keep the
restriction of not supporting accesses where the offset expression is not a
multiple of the access element type size. This patch adds tests that ensure
we correctly invalidate a scop in case these accesses are found. Both types of
accesses can be handled using the very same model, but are left to be added in
the future.
We also move the initialization of the scop-context into the constructor to
ensure it is already available when invalidating the scop.
Finally, we add this as a new item to the 2.9 release notes
Reviewers: jdoerfert, Meinersbur
Differential Revision: http://reviews.llvm.org/D16878
llvm-svn: 259784
2016-02-04 21:18:42 +08:00
|
|
|
if (!SAI->updateSizes(Sizes, ElementType))
|
2015-12-12 17:52:26 +08:00
|
|
|
invalidate(DELINEARIZATION, DebugLoc());
|
2015-09-26 16:55:54 +08:00
|
|
|
}
|
2015-05-23 13:58:27 +08:00
|
|
|
return SAI.get();
|
2014-10-05 19:32:18 +08:00
|
|
|
}
|
|
|
|
|
2015-11-11 01:31:31 +08:00
|
|
|
const ScopArrayInfo *Scop::getScopArrayInfo(Value *BasePtr,
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
ScopArrayInfo::MemoryKind Kind) {
|
2015-11-11 01:31:31 +08:00
|
|
|
auto *SAI = ScopArrayInfoMap[std::make_pair(BasePtr, Kind)].get();
|
2014-10-05 19:32:18 +08:00
|
|
|
assert(SAI && "No ScopArrayInfo available for this base pointer");
|
|
|
|
return SAI;
|
|
|
|
}
|
|
|
|
|
2013-01-15 06:40:23 +08:00
|
|
|
std::string Scop::getContextStr() const { return stringFromIslObj(Context); }
|
2014-07-03 01:47:48 +08:00
|
|
|
std::string Scop::getAssumedContextStr() const {
|
|
|
|
return stringFromIslObj(AssumedContext);
|
|
|
|
}
|
2015-09-16 06:52:53 +08:00
|
|
|
std::string Scop::getBoundaryContextStr() const {
|
|
|
|
return stringFromIslObj(BoundaryContext);
|
|
|
|
}
|
2011-04-29 14:27:02 +08:00
|
|
|
|
|
|
|
std::string Scop::getNameStr() const {
|
|
|
|
std::string ExitName, EntryName;
|
|
|
|
raw_string_ostream ExitStr(ExitName);
|
|
|
|
raw_string_ostream EntryStr(EntryName);
|
|
|
|
|
2014-01-09 18:42:15 +08:00
|
|
|
R.getEntry()->printAsOperand(EntryStr, false);
|
2011-04-29 14:27:02 +08:00
|
|
|
EntryStr.str();
|
|
|
|
|
|
|
|
if (R.getExit()) {
|
2014-01-09 18:42:15 +08:00
|
|
|
R.getExit()->printAsOperand(ExitStr, false);
|
2011-04-29 14:27:02 +08:00
|
|
|
ExitStr.str();
|
|
|
|
} else
|
|
|
|
ExitName = "FunctionExit";
|
|
|
|
|
|
|
|
return EntryName + "---" + ExitName;
|
|
|
|
}
|
|
|
|
|
2013-01-15 06:40:23 +08:00
|
|
|
__isl_give isl_set *Scop::getContext() const { return isl_set_copy(Context); }
|
2011-10-06 08:03:42 +08:00
|
|
|
__isl_give isl_space *Scop::getParamSpace() const {
|
2015-05-27 05:37:31 +08:00
|
|
|
return isl_set_get_space(Context);
|
2011-10-06 08:03:42 +08:00
|
|
|
}
|
|
|
|
|
2013-10-30 05:05:49 +08:00
|
|
|
__isl_give isl_set *Scop::getAssumedContext() const {
|
|
|
|
return isl_set_copy(AssumedContext);
|
|
|
|
}
|
|
|
|
|
2015-08-20 13:58:56 +08:00
|
|
|
__isl_give isl_set *Scop::getRuntimeCheckContext() const {
|
|
|
|
isl_set *RuntimeCheckContext = getAssumedContext();
|
2015-09-16 06:52:53 +08:00
|
|
|
RuntimeCheckContext =
|
|
|
|
isl_set_intersect(RuntimeCheckContext, getBoundaryContext());
|
|
|
|
RuntimeCheckContext = simplifyAssumptionContext(RuntimeCheckContext, *this);
|
2015-08-20 13:58:56 +08:00
|
|
|
return RuntimeCheckContext;
|
|
|
|
}
|
|
|
|
|
2015-08-21 02:06:30 +08:00
|
|
|
bool Scop::hasFeasibleRuntimeContext() const {
|
2015-08-20 13:58:56 +08:00
|
|
|
isl_set *RuntimeCheckContext = getRuntimeCheckContext();
|
2015-08-21 02:06:30 +08:00
|
|
|
RuntimeCheckContext = addNonEmptyDomainConstraints(RuntimeCheckContext);
|
2015-08-20 13:58:56 +08:00
|
|
|
bool IsFeasible = !isl_set_is_empty(RuntimeCheckContext);
|
|
|
|
isl_set_free(RuntimeCheckContext);
|
|
|
|
return IsFeasible;
|
|
|
|
}
|
|
|
|
|
2015-11-12 10:33:38 +08:00
|
|
|
static std::string toString(AssumptionKind Kind) {
|
|
|
|
switch (Kind) {
|
|
|
|
case ALIASING:
|
|
|
|
return "No-aliasing";
|
|
|
|
case INBOUNDS:
|
|
|
|
return "Inbounds";
|
|
|
|
case WRAPPING:
|
|
|
|
return "No-overflows";
|
2015-11-13 04:15:32 +08:00
|
|
|
case ALIGNMENT:
|
|
|
|
return "Alignment";
|
2015-11-12 10:33:38 +08:00
|
|
|
case ERRORBLOCK:
|
|
|
|
return "No-error";
|
|
|
|
case INFINITELOOP:
|
|
|
|
return "Finite loop";
|
|
|
|
case INVARIANTLOAD:
|
|
|
|
return "Invariant load";
|
|
|
|
case DELINEARIZATION:
|
|
|
|
return "Delinearization";
|
2015-12-20 21:31:48 +08:00
|
|
|
case ERROR_DOMAINCONJUNCTS:
|
|
|
|
return "Low number of domain conjuncts";
|
2015-11-12 10:33:38 +08:00
|
|
|
}
|
|
|
|
llvm_unreachable("Unknown AssumptionKind!");
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scop::trackAssumption(AssumptionKind Kind, __isl_keep isl_set *Set,
|
|
|
|
DebugLoc Loc) {
|
|
|
|
if (isl_set_is_subset(Context, Set))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (isl_set_is_subset(AssumedContext, Set))
|
|
|
|
return;
|
|
|
|
|
|
|
|
auto &F = *getRegion().getEntry()->getParent();
|
|
|
|
std::string Msg = toString(Kind) + " assumption:\t" + stringFromIslObj(Set);
|
|
|
|
emitOptimizationRemarkAnalysis(F.getContext(), DEBUG_TYPE, F, Loc, Msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scop::addAssumption(AssumptionKind Kind, __isl_take isl_set *Set,
|
|
|
|
DebugLoc Loc) {
|
|
|
|
trackAssumption(Kind, Set, Loc);
|
2014-07-03 01:47:48 +08:00
|
|
|
AssumedContext = isl_set_intersect(AssumedContext, Set);
|
2015-11-12 00:22:36 +08:00
|
|
|
|
2015-11-12 04:01:31 +08:00
|
|
|
int NSets = isl_set_n_basic_set(AssumedContext);
|
2015-11-12 00:22:36 +08:00
|
|
|
if (NSets >= MaxDisjunctsAssumed) {
|
|
|
|
isl_space *Space = isl_set_get_space(AssumedContext);
|
|
|
|
isl_set_free(AssumedContext);
|
2015-11-12 04:21:39 +08:00
|
|
|
AssumedContext = isl_set_empty(Space);
|
2015-11-12 00:22:36 +08:00
|
|
|
}
|
|
|
|
|
Assume GetElementPtr offsets to be inbounds
In case a GEP instruction references into a fixed size array e.g., an access
A[i][j] into an array A[100x100], LLVM-IR does not guarantee that the subscripts
always compute values that are within array bounds. We now derive the set of
parameter values for which all accesses are within bounds and add the assumption
that the scop is only every executed with this set of parameter values.
Example:
void foo(float A[][20], long n, long m {
for (long i = 0; i < n; i++)
for (long j = 0; j < m; j++)
A[i][j] = ...
This loop yields out-of-bound accesses if m is at least 20 and at the same time
at least one iteration of the outer loop is executed. Hence, we assume:
n <= 0 or m <= 20.
Doing so simplifies the dependence analysis problem, allows us to perform
more optimizations and generate better code.
TODO: The location where the GEP instruction is executed is not necessarily the
location where the memory is actually accessed. As a result scanning for GEP[s]
is imprecise. Even though this is not a correctness problem, this imprecision
may result in missed optimizations or non-optimal run-time checks.
In polybench where this mismatch between parametric loop bounds and fixed size
arrays is common, we see with this patch significant reductions in compile time
(up to 50%) and execution time (up to 70%). We see two significant compile time
regressions (fdtd-2d, jacobi-2d-imper), and one execution time regression
(trmm). Both regressions arise due to additional optimizations that have been
enabled by this patch. They can be addressed in subsequent commits.
http://reviews.llvm.org/D6369
llvm-svn: 222754
2014-11-25 18:51:12 +08:00
|
|
|
AssumedContext = isl_set_coalesce(AssumedContext);
|
2014-07-03 01:47:48 +08:00
|
|
|
}
|
|
|
|
|
2015-12-12 17:52:26 +08:00
|
|
|
void Scop::invalidate(AssumptionKind Kind, DebugLoc Loc) {
|
|
|
|
addAssumption(Kind, isl_set_empty(getParamSpace()), Loc);
|
|
|
|
}
|
|
|
|
|
2015-09-16 06:52:53 +08:00
|
|
|
__isl_give isl_set *Scop::getBoundaryContext() const {
|
|
|
|
return isl_set_copy(BoundaryContext);
|
|
|
|
}
|
|
|
|
|
2011-04-29 14:27:02 +08:00
|
|
|
void Scop::printContext(raw_ostream &OS) const {
|
|
|
|
OS << "Context:\n";
|
|
|
|
|
|
|
|
if (!Context) {
|
|
|
|
OS.indent(4) << "n/a\n\n";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
OS.indent(4) << getContextStr() << "\n";
|
2011-11-08 23:41:28 +08:00
|
|
|
|
2014-07-03 01:47:48 +08:00
|
|
|
OS.indent(4) << "Assumed Context:\n";
|
|
|
|
if (!AssumedContext) {
|
|
|
|
OS.indent(4) << "n/a\n\n";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
OS.indent(4) << getAssumedContextStr() << "\n";
|
|
|
|
|
2015-09-16 06:52:53 +08:00
|
|
|
OS.indent(4) << "Boundary Context:\n";
|
|
|
|
if (!BoundaryContext) {
|
|
|
|
OS.indent(4) << "n/a\n\n";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
OS.indent(4) << getBoundaryContextStr() << "\n";
|
|
|
|
|
2014-06-28 16:59:45 +08:00
|
|
|
for (const SCEV *Parameter : Parameters) {
|
2011-11-08 23:41:28 +08:00
|
|
|
int Dim = ParameterIds.find(Parameter)->second;
|
|
|
|
OS.indent(4) << "p" << Dim << ": " << *Parameter << "\n";
|
|
|
|
}
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2014-09-18 19:17:17 +08:00
|
|
|
void Scop::printAliasAssumptions(raw_ostream &OS) const {
|
2015-07-25 20:31:03 +08:00
|
|
|
int noOfGroups = 0;
|
|
|
|
for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
|
2015-07-26 21:14:38 +08:00
|
|
|
if (Pair.second.size() == 0)
|
2015-07-24 01:04:54 +08:00
|
|
|
noOfGroups += 1;
|
|
|
|
else
|
2015-07-26 21:14:38 +08:00
|
|
|
noOfGroups += Pair.second.size();
|
2015-07-24 01:04:54 +08:00
|
|
|
}
|
|
|
|
|
2015-07-25 20:31:03 +08:00
|
|
|
OS.indent(4) << "Alias Groups (" << noOfGroups << "):\n";
|
2014-09-18 19:17:17 +08:00
|
|
|
if (MinMaxAliasGroups.empty()) {
|
|
|
|
OS.indent(8) << "n/a\n";
|
|
|
|
return;
|
|
|
|
}
|
2015-07-24 01:04:54 +08:00
|
|
|
|
2015-07-25 20:31:03 +08:00
|
|
|
for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
|
2015-07-24 01:04:54 +08:00
|
|
|
|
|
|
|
// If the group has no read only accesses print the write accesses.
|
2015-07-26 21:14:38 +08:00
|
|
|
if (Pair.second.empty()) {
|
2015-07-24 01:04:54 +08:00
|
|
|
OS.indent(8) << "[[";
|
2015-07-26 21:14:38 +08:00
|
|
|
for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
|
2015-07-25 20:31:03 +08:00
|
|
|
OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
|
|
|
|
<< ">";
|
2015-07-24 01:04:54 +08:00
|
|
|
}
|
|
|
|
OS << " ]]\n";
|
|
|
|
}
|
|
|
|
|
2015-07-26 21:14:38 +08:00
|
|
|
for (const MinMaxAccessTy &MMAReadOnly : Pair.second) {
|
2015-07-24 01:04:54 +08:00
|
|
|
OS.indent(8) << "[[";
|
2015-07-25 20:31:03 +08:00
|
|
|
OS << " <" << MMAReadOnly.first << ", " << MMAReadOnly.second << ">";
|
2015-07-26 21:14:38 +08:00
|
|
|
for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
|
2015-07-25 20:31:03 +08:00
|
|
|
OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
|
|
|
|
<< ">";
|
2015-07-24 01:04:54 +08:00
|
|
|
}
|
|
|
|
OS << " ]]\n";
|
|
|
|
}
|
2014-09-18 19:17:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-29 14:27:02 +08:00
|
|
|
void Scop::printStatements(raw_ostream &OS) const {
|
|
|
|
OS << "Statements {\n";
|
|
|
|
|
2015-05-27 13:16:57 +08:00
|
|
|
for (const ScopStmt &Stmt : *this)
|
|
|
|
OS.indent(4) << Stmt;
|
2011-04-29 14:27:02 +08:00
|
|
|
|
|
|
|
OS.indent(4) << "}\n";
|
|
|
|
}
|
|
|
|
|
2015-05-20 16:05:31 +08:00
|
|
|
void Scop::printArrayInfo(raw_ostream &OS) const {
|
|
|
|
OS << "Arrays {\n";
|
|
|
|
|
2015-05-23 13:58:27 +08:00
|
|
|
for (auto &Array : arrays())
|
2015-05-20 16:05:31 +08:00
|
|
|
Array.second->print(OS);
|
|
|
|
|
|
|
|
OS.indent(4) << "}\n";
|
2015-08-12 23:27:16 +08:00
|
|
|
|
|
|
|
OS.indent(4) << "Arrays (Bounds as pw_affs) {\n";
|
|
|
|
|
|
|
|
for (auto &Array : arrays())
|
|
|
|
Array.second->print(OS, /* SizeAsPwAff */ true);
|
|
|
|
|
|
|
|
OS.indent(4) << "}\n";
|
2015-05-20 16:05:31 +08:00
|
|
|
}
|
|
|
|
|
2011-04-29 14:27:02 +08:00
|
|
|
void Scop::print(raw_ostream &OS) const {
|
2014-03-19 02:51:11 +08:00
|
|
|
OS.indent(4) << "Function: " << getRegion().getEntry()->getParent()->getName()
|
|
|
|
<< "\n";
|
2014-03-19 02:05:38 +08:00
|
|
|
OS.indent(4) << "Region: " << getNameStr() << "\n";
|
2015-01-14 02:31:55 +08:00
|
|
|
OS.indent(4) << "Max Loop Depth: " << getMaxLoopDepth() << "\n";
|
2015-09-30 07:47:21 +08:00
|
|
|
OS.indent(4) << "Invariant Accesses: {\n";
|
2015-10-10 01:12:26 +08:00
|
|
|
for (const auto &IAClass : InvariantEquivClasses) {
|
2015-10-18 20:39:19 +08:00
|
|
|
const auto &MAs = std::get<1>(IAClass);
|
|
|
|
if (MAs.empty()) {
|
|
|
|
OS.indent(12) << "Class Pointer: " << *std::get<0>(IAClass) << "\n";
|
2015-10-10 01:12:26 +08:00
|
|
|
} else {
|
2015-10-18 20:39:19 +08:00
|
|
|
MAs.front()->print(OS);
|
|
|
|
OS.indent(12) << "Execution Context: " << std::get<2>(IAClass) << "\n";
|
2015-10-10 01:12:26 +08:00
|
|
|
}
|
2015-09-30 07:47:21 +08:00
|
|
|
}
|
|
|
|
OS.indent(4) << "}\n";
|
2011-04-29 14:27:02 +08:00
|
|
|
printContext(OS.indent(4));
|
2015-05-20 16:05:31 +08:00
|
|
|
printArrayInfo(OS.indent(4));
|
2014-09-18 19:17:17 +08:00
|
|
|
printAliasAssumptions(OS);
|
2011-04-29 14:27:02 +08:00
|
|
|
printStatements(OS.indent(4));
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scop::dump() const { print(dbgs()); }
|
|
|
|
|
2011-11-08 23:41:03 +08:00
|
|
|
isl_ctx *Scop::getIslCtx() const { return IslCtx; }
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2015-09-16 06:49:04 +08:00
|
|
|
__isl_give isl_pw_aff *Scop::getPwAff(const SCEV *E, BasicBlock *BB) {
|
|
|
|
return Affinator.getPwAff(E, BB);
|
2015-08-12 18:19:50 +08:00
|
|
|
}
|
|
|
|
|
2015-07-14 17:33:13 +08:00
|
|
|
__isl_give isl_union_set *Scop::getDomains() const {
|
2014-06-28 16:59:38 +08:00
|
|
|
isl_union_set *Domain = isl_union_set_empty(getParamSpace());
|
2012-02-14 22:02:40 +08:00
|
|
|
|
2015-07-14 17:33:13 +08:00
|
|
|
for (const ScopStmt &Stmt : *this)
|
2015-05-27 13:16:57 +08:00
|
|
|
Domain = isl_union_set_add_set(Domain, Stmt.getDomain());
|
2012-02-14 22:02:40 +08:00
|
|
|
|
|
|
|
return Domain;
|
|
|
|
}
|
|
|
|
|
2015-11-12 22:07:09 +08:00
|
|
|
__isl_give isl_union_map *
|
|
|
|
Scop::getAccessesOfType(std::function<bool(MemoryAccess &)> Predicate) {
|
|
|
|
isl_union_map *Accesses = isl_union_map_empty(getParamSpace());
|
2014-07-11 15:12:10 +08:00
|
|
|
|
2015-05-27 13:16:57 +08:00
|
|
|
for (ScopStmt &Stmt : *this) {
|
|
|
|
for (MemoryAccess *MA : Stmt) {
|
2015-11-12 22:07:09 +08:00
|
|
|
if (!Predicate(*MA))
|
2014-07-11 15:12:10 +08:00
|
|
|
continue;
|
|
|
|
|
2015-05-27 13:16:57 +08:00
|
|
|
isl_set *Domain = Stmt.getDomain();
|
2014-07-11 15:12:10 +08:00
|
|
|
isl_map *AccessDomain = MA->getAccessRelation();
|
|
|
|
AccessDomain = isl_map_intersect_domain(AccessDomain, Domain);
|
2015-11-12 22:07:09 +08:00
|
|
|
Accesses = isl_union_map_add_map(Accesses, AccessDomain);
|
2014-07-11 15:12:10 +08:00
|
|
|
}
|
|
|
|
}
|
2015-11-12 22:07:09 +08:00
|
|
|
return isl_union_map_coalesce(Accesses);
|
2014-07-11 15:12:10 +08:00
|
|
|
}
|
|
|
|
|
2015-11-12 22:07:09 +08:00
|
|
|
__isl_give isl_union_map *Scop::getMustWrites() {
|
|
|
|
return getAccessesOfType([](MemoryAccess &MA) { return MA.isMustWrite(); });
|
|
|
|
}
|
2014-07-11 15:12:10 +08:00
|
|
|
|
2015-11-12 22:07:09 +08:00
|
|
|
__isl_give isl_union_map *Scop::getMayWrites() {
|
|
|
|
return getAccessesOfType([](MemoryAccess &MA) { return MA.isMayWrite(); });
|
2014-07-11 15:12:10 +08:00
|
|
|
}
|
|
|
|
|
2014-02-21 05:43:54 +08:00
|
|
|
__isl_give isl_union_map *Scop::getWrites() {
|
2015-11-12 22:07:09 +08:00
|
|
|
return getAccessesOfType([](MemoryAccess &MA) { return MA.isWrite(); });
|
2014-02-21 05:43:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
__isl_give isl_union_map *Scop::getReads() {
|
2015-11-12 22:07:09 +08:00
|
|
|
return getAccessesOfType([](MemoryAccess &MA) { return MA.isRead(); });
|
2014-02-21 05:43:54 +08:00
|
|
|
}
|
|
|
|
|
2015-11-12 22:07:13 +08:00
|
|
|
__isl_give isl_union_map *Scop::getAccesses() {
|
|
|
|
return getAccessesOfType([](MemoryAccess &MA) { return true; });
|
|
|
|
}
|
|
|
|
|
2015-07-14 17:33:13 +08:00
|
|
|
__isl_give isl_union_map *Scop::getSchedule() const {
|
|
|
|
auto Tree = getScheduleTree();
|
|
|
|
auto S = isl_schedule_get_map(Tree);
|
|
|
|
isl_schedule_free(Tree);
|
|
|
|
return S;
|
|
|
|
}
|
2014-02-21 05:43:54 +08:00
|
|
|
|
2015-07-14 17:33:13 +08:00
|
|
|
__isl_give isl_schedule *Scop::getScheduleTree() const {
|
|
|
|
return isl_schedule_intersect_domain(isl_schedule_copy(Schedule),
|
|
|
|
getDomains());
|
|
|
|
}
|
2014-06-28 16:59:38 +08:00
|
|
|
|
2015-07-14 17:33:13 +08:00
|
|
|
void Scop::setSchedule(__isl_take isl_union_map *NewSchedule) {
|
|
|
|
auto *S = isl_schedule_from_domain(getDomains());
|
|
|
|
S = isl_schedule_insert_partial_schedule(
|
|
|
|
S, isl_multi_union_pw_aff_from_union_map(NewSchedule));
|
|
|
|
isl_schedule_free(Schedule);
|
|
|
|
Schedule = S;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scop::setScheduleTree(__isl_take isl_schedule *NewSchedule) {
|
|
|
|
isl_schedule_free(Schedule);
|
|
|
|
Schedule = NewSchedule;
|
2014-02-21 05:43:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool Scop::restrictDomains(__isl_take isl_union_set *Domain) {
|
|
|
|
bool Changed = false;
|
2015-05-27 13:16:57 +08:00
|
|
|
for (ScopStmt &Stmt : *this) {
|
|
|
|
isl_union_set *StmtDomain = isl_union_set_from_set(Stmt.getDomain());
|
2014-02-21 05:43:54 +08:00
|
|
|
isl_union_set *NewStmtDomain = isl_union_set_intersect(
|
|
|
|
isl_union_set_copy(StmtDomain), isl_union_set_copy(Domain));
|
|
|
|
|
|
|
|
if (isl_union_set_is_subset(StmtDomain, NewStmtDomain)) {
|
|
|
|
isl_union_set_free(StmtDomain);
|
|
|
|
isl_union_set_free(NewStmtDomain);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
Changed = true;
|
|
|
|
|
|
|
|
isl_union_set_free(StmtDomain);
|
|
|
|
NewStmtDomain = isl_union_set_coalesce(NewStmtDomain);
|
|
|
|
|
|
|
|
if (isl_union_set_is_empty(NewStmtDomain)) {
|
2015-05-27 13:16:57 +08:00
|
|
|
Stmt.restrictDomain(isl_set_empty(Stmt.getDomainSpace()));
|
2014-02-21 05:43:54 +08:00
|
|
|
isl_union_set_free(NewStmtDomain);
|
|
|
|
} else
|
2015-05-27 13:16:57 +08:00
|
|
|
Stmt.restrictDomain(isl_set_from_union_set(NewStmtDomain));
|
2014-02-21 05:43:54 +08:00
|
|
|
}
|
|
|
|
isl_union_set_free(Domain);
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2011-04-29 14:27:02 +08:00
|
|
|
ScalarEvolution *Scop::getSE() const { return SE; }
|
|
|
|
|
2015-10-02 07:48:18 +08:00
|
|
|
bool Scop::isIgnored(RegionNode *RN) {
|
|
|
|
BasicBlock *BB = getRegionNodeBasicBlock(RN);
|
2015-12-14 03:21:45 +08:00
|
|
|
ScopStmt *Stmt = getStmtForRegionNode(RN);
|
|
|
|
|
|
|
|
// If there is no stmt, then it already has been removed.
|
|
|
|
if (!Stmt)
|
|
|
|
return true;
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2015-10-02 07:48:18 +08:00
|
|
|
// Check if there are accesses contained.
|
2015-12-14 03:21:45 +08:00
|
|
|
if (Stmt->isEmpty())
|
2015-10-02 07:48:18 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Check for reachability via non-error blocks.
|
|
|
|
if (!DomainMap.count(BB))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Check if error blocks are contained.
|
2015-10-08 04:32:43 +08:00
|
|
|
if (containsErrorBlock(RN, getRegion(), LI, DT))
|
2015-10-02 07:48:18 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2015-07-14 17:33:13 +08:00
|
|
|
struct MapToDimensionDataTy {
|
|
|
|
int N;
|
|
|
|
isl_union_pw_multi_aff *Res;
|
|
|
|
};
|
|
|
|
|
|
|
|
// @brief Create a function that maps the elements of 'Set' to its N-th
|
2015-12-22 06:45:53 +08:00
|
|
|
// dimension and add it to User->Res.
|
2015-07-14 17:33:13 +08:00
|
|
|
//
|
2015-12-22 06:45:53 +08:00
|
|
|
// @param Set The input set.
|
|
|
|
// @param User->N The dimension to map to.
|
|
|
|
// @param User->Res The isl_union_pw_multi_aff to which to add the result.
|
2015-07-14 17:33:13 +08:00
|
|
|
//
|
2015-12-22 06:45:53 +08:00
|
|
|
// @returns isl_stat_ok if no error occured, othewise isl_stat_error.
|
2015-07-14 17:33:13 +08:00
|
|
|
static isl_stat mapToDimension_AddSet(__isl_take isl_set *Set, void *User) {
|
|
|
|
struct MapToDimensionDataTy *Data = (struct MapToDimensionDataTy *)User;
|
|
|
|
int Dim;
|
|
|
|
isl_space *Space;
|
|
|
|
isl_pw_multi_aff *PMA;
|
|
|
|
|
|
|
|
Dim = isl_set_dim(Set, isl_dim_set);
|
|
|
|
Space = isl_set_get_space(Set);
|
|
|
|
PMA = isl_pw_multi_aff_project_out_map(Space, isl_dim_set, Data->N,
|
|
|
|
Dim - Data->N);
|
|
|
|
if (Data->N > 1)
|
|
|
|
PMA = isl_pw_multi_aff_drop_dims(PMA, isl_dim_out, 0, Data->N - 1);
|
|
|
|
Data->Res = isl_union_pw_multi_aff_add_pw_multi_aff(Data->Res, PMA);
|
|
|
|
|
|
|
|
isl_set_free(Set);
|
|
|
|
|
|
|
|
return isl_stat_ok;
|
|
|
|
}
|
|
|
|
|
2015-12-22 06:45:53 +08:00
|
|
|
// @brief Create an isl_multi_union_aff that defines an identity mapping
|
|
|
|
// from the elements of USet to their N-th dimension.
|
|
|
|
//
|
|
|
|
// # Example:
|
|
|
|
//
|
|
|
|
// Domain: { A[i,j]; B[i,j,k] }
|
|
|
|
// N: 1
|
2015-07-14 17:33:13 +08:00
|
|
|
//
|
2015-12-22 06:45:53 +08:00
|
|
|
// Resulting Mapping: { {A[i,j] -> [(j)]; B[i,j,k] -> [(j)] }
|
|
|
|
//
|
|
|
|
// @param USet A union set describing the elements for which to generate a
|
|
|
|
// mapping.
|
2015-07-14 17:33:13 +08:00
|
|
|
// @param N The dimension to map to.
|
2015-12-22 06:45:53 +08:00
|
|
|
// @returns A mapping from USet to its N-th dimension.
|
2015-07-14 17:33:13 +08:00
|
|
|
static __isl_give isl_multi_union_pw_aff *
|
2015-12-22 06:45:53 +08:00
|
|
|
mapToDimension(__isl_take isl_union_set *USet, int N) {
|
|
|
|
assert(N >= 0);
|
2015-12-22 07:01:53 +08:00
|
|
|
assert(USet);
|
2015-12-22 06:45:53 +08:00
|
|
|
assert(!isl_union_set_is_empty(USet));
|
2015-09-10 23:27:46 +08:00
|
|
|
|
2015-07-14 17:33:13 +08:00
|
|
|
struct MapToDimensionDataTy Data;
|
|
|
|
|
2015-12-22 06:45:53 +08:00
|
|
|
auto *Space = isl_union_set_get_space(USet);
|
|
|
|
auto *PwAff = isl_union_pw_multi_aff_empty(Space);
|
2015-07-14 17:33:13 +08:00
|
|
|
|
2015-12-22 06:45:53 +08:00
|
|
|
Data = {N, PwAff};
|
|
|
|
|
|
|
|
auto Res = isl_union_set_foreach_set(USet, &mapToDimension_AddSet, &Data);
|
|
|
|
|
2016-01-20 23:41:30 +08:00
|
|
|
(void)Res;
|
|
|
|
|
2015-12-22 06:45:53 +08:00
|
|
|
assert(Res == isl_stat_ok);
|
|
|
|
|
|
|
|
isl_union_set_free(USet);
|
2015-07-14 17:33:13 +08:00
|
|
|
return isl_multi_union_pw_aff_from_union_pw_multi_aff(Data.Res);
|
|
|
|
}
|
|
|
|
|
2015-11-12 03:28:14 +08:00
|
|
|
void Scop::addScopStmt(BasicBlock *BB, Region *R) {
|
2015-02-24 20:00:50 +08:00
|
|
|
if (BB) {
|
2015-09-12 05:41:48 +08:00
|
|
|
Stmts.emplace_back(*this, *BB);
|
2015-11-12 03:28:14 +08:00
|
|
|
auto Stmt = &Stmts.back();
|
2015-07-14 17:33:13 +08:00
|
|
|
StmtMap[BB] = Stmt;
|
2015-02-24 20:00:50 +08:00
|
|
|
} else {
|
2015-05-27 13:16:57 +08:00
|
|
|
assert(R && "Either basic block or a region expected.");
|
2015-09-12 05:41:48 +08:00
|
|
|
Stmts.emplace_back(*this, *R);
|
2015-11-12 03:28:14 +08:00
|
|
|
auto Stmt = &Stmts.back();
|
2015-02-24 20:00:50 +08:00
|
|
|
for (BasicBlock *BB : R->blocks())
|
2015-07-14 17:33:13 +08:00
|
|
|
StmtMap[BB] = Stmt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-06 20:59:23 +08:00
|
|
|
void Scop::buildSchedule() {
|
|
|
|
Loop *L = getLoopSurroundingRegion(getRegion(), LI);
|
2016-02-01 19:54:13 +08:00
|
|
|
LoopStackTy LoopStack({LoopStackElementTy(L, nullptr, 0)});
|
|
|
|
buildSchedule(getRegion().getNode(), LoopStack);
|
|
|
|
assert(LoopStack.size() == 1 && LoopStack.back().L == L);
|
|
|
|
Schedule = LoopStack[0].Schedule;
|
2016-01-06 20:59:23 +08:00
|
|
|
}
|
|
|
|
|
2016-02-01 19:54:13 +08:00
|
|
|
/// To generate a schedule for the elements in a Region we traverse the Region
|
|
|
|
/// in reverse-post-order and add the contained RegionNodes in traversal order
|
|
|
|
/// to the schedule of the loop that is currently at the top of the LoopStack.
|
|
|
|
/// For loop-free codes, this results in a correct sequential ordering.
|
|
|
|
///
|
|
|
|
/// Example:
|
|
|
|
/// bb1(0)
|
|
|
|
/// / \.
|
|
|
|
/// bb2(1) bb3(2)
|
|
|
|
/// \ / \.
|
|
|
|
/// bb4(3) bb5(4)
|
|
|
|
/// \ /
|
|
|
|
/// bb6(5)
|
|
|
|
///
|
|
|
|
/// Including loops requires additional processing. Whenever a loop header is
|
|
|
|
/// encountered, the corresponding loop is added to the @p LoopStack. Starting
|
|
|
|
/// from an empty schedule, we first process all RegionNodes that are within
|
|
|
|
/// this loop and complete the sequential schedule at this loop-level before
|
|
|
|
/// processing about any other nodes. To implement this
|
|
|
|
/// loop-nodes-first-processing, the reverse post-order traversal is
|
|
|
|
/// insufficient. Hence, we additionally check if the traversal yields
|
|
|
|
/// sub-regions or blocks that are outside the last loop on the @p LoopStack.
|
|
|
|
/// These region-nodes are then queue and only traverse after the all nodes
|
|
|
|
/// within the current loop have been processed.
|
|
|
|
void Scop::buildSchedule(Region *R, LoopStackTy &LoopStack) {
|
|
|
|
Loop *OuterScopLoop = getLoopSurroundingRegion(getRegion(), LI);
|
|
|
|
|
|
|
|
ReversePostOrderTraversal<Region *> RTraversal(R);
|
|
|
|
std::deque<RegionNode *> WorkList(RTraversal.begin(), RTraversal.end());
|
|
|
|
std::deque<RegionNode *> DelayList;
|
|
|
|
bool LastRNWaiting = false;
|
|
|
|
|
|
|
|
// Iterate over the region @p R in reverse post-order but queue
|
|
|
|
// sub-regions/blocks iff they are not part of the last encountered but not
|
|
|
|
// completely traversed loop. The variable LastRNWaiting is a flag to indicate
|
|
|
|
// that we queued the last sub-region/block from the reverse post-order
|
|
|
|
// iterator. If it is set we have to explore the next sub-region/block from
|
|
|
|
// the iterator (if any) to guarantee progress. If it is not set we first try
|
|
|
|
// the next queued sub-region/blocks.
|
|
|
|
while (!WorkList.empty() || !DelayList.empty()) {
|
|
|
|
RegionNode *RN;
|
|
|
|
|
|
|
|
if ((LastRNWaiting && !WorkList.empty()) || DelayList.size() == 0) {
|
|
|
|
RN = WorkList.front();
|
|
|
|
WorkList.pop_front();
|
|
|
|
LastRNWaiting = false;
|
|
|
|
} else {
|
|
|
|
RN = DelayList.front();
|
|
|
|
DelayList.pop_front();
|
|
|
|
}
|
|
|
|
|
|
|
|
Loop *L = getRegionNodeLoop(RN, LI);
|
|
|
|
if (!getRegion().contains(L))
|
|
|
|
L = OuterScopLoop;
|
|
|
|
|
|
|
|
Loop *LastLoop = LoopStack.back().L;
|
|
|
|
if (LastLoop != L) {
|
|
|
|
if (!LastLoop->contains(L)) {
|
|
|
|
LastRNWaiting = true;
|
|
|
|
DelayList.push_back(RN);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
LoopStack.push_back({L, nullptr, 0});
|
|
|
|
}
|
|
|
|
buildSchedule(RN, LoopStack);
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Scop::buildSchedule(RegionNode *RN, LoopStackTy &LoopStack) {
|
2016-01-06 20:59:23 +08:00
|
|
|
|
2016-01-06 23:30:06 +08:00
|
|
|
if (RN->isSubRegion()) {
|
|
|
|
auto *LocalRegion = RN->getNodeAs<Region>();
|
|
|
|
if (!SD.isNonAffineSubRegion(LocalRegion, &getRegion())) {
|
2016-02-01 19:54:13 +08:00
|
|
|
buildSchedule(LocalRegion, LoopStack);
|
2016-01-06 23:30:06 +08:00
|
|
|
return;
|
2015-09-10 23:27:46 +08:00
|
|
|
}
|
2016-01-06 23:30:06 +08:00
|
|
|
}
|
2015-02-24 20:00:50 +08:00
|
|
|
|
2016-02-01 19:54:13 +08:00
|
|
|
auto &LoopData = LoopStack.back();
|
|
|
|
LoopData.NumBlocksProcessed += getNumBlocksInRegionNode(RN);
|
2015-07-14 17:33:13 +08:00
|
|
|
|
2016-01-24 04:23:06 +08:00
|
|
|
if (auto *Stmt = getStmtForRegionNode(RN)) {
|
2016-01-06 23:30:06 +08:00
|
|
|
auto *UDomain = isl_union_set_from_set(Stmt->getDomain());
|
|
|
|
auto *StmtSchedule = isl_schedule_from_domain(UDomain);
|
2016-02-01 19:54:13 +08:00
|
|
|
LoopData.Schedule = combineInSequence(LoopData.Schedule, StmtSchedule);
|
2016-01-06 23:30:06 +08:00
|
|
|
}
|
|
|
|
|
2016-02-01 19:54:13 +08:00
|
|
|
// Check if we just processed the last node in this loop. If we did, finalize
|
|
|
|
// the loop by:
|
|
|
|
//
|
|
|
|
// - adding new schedule dimensions
|
|
|
|
// - folding the resulting schedule into the parent loop schedule
|
|
|
|
// - dropping the loop schedule from the LoopStack.
|
|
|
|
//
|
|
|
|
// Then continue to check surrounding loops, which might also have been
|
|
|
|
// completed by this node.
|
|
|
|
while (LoopData.L &&
|
|
|
|
LoopData.NumBlocksProcessed == LoopData.L->getNumBlocks()) {
|
|
|
|
auto Schedule = LoopData.Schedule;
|
|
|
|
auto NumBlocksProcessed = LoopData.NumBlocksProcessed;
|
|
|
|
|
|
|
|
LoopStack.pop_back();
|
|
|
|
auto &NextLoopData = LoopStack.back();
|
|
|
|
|
|
|
|
if (Schedule) {
|
|
|
|
auto *Domain = isl_schedule_get_domain(Schedule);
|
|
|
|
auto *MUPA = mapToDimension(Domain, LoopStack.size());
|
|
|
|
Schedule = isl_schedule_insert_partial_schedule(Schedule, MUPA);
|
|
|
|
NextLoopData.Schedule =
|
|
|
|
combineInSequence(NextLoopData.Schedule, Schedule);
|
2016-01-06 23:30:06 +08:00
|
|
|
}
|
2015-12-22 07:01:53 +08:00
|
|
|
|
2016-02-01 19:54:13 +08:00
|
|
|
NextLoopData.NumBlocksProcessed += NumBlocksProcessed;
|
|
|
|
LoopData = NextLoopData;
|
2015-07-14 17:33:13 +08:00
|
|
|
}
|
2011-04-29 14:27:02 +08:00
|
|
|
}
|
|
|
|
|
2014-11-01 07:13:39 +08:00
|
|
|
ScopStmt *Scop::getStmtForBasicBlock(BasicBlock *BB) const {
|
2015-05-27 14:51:34 +08:00
|
|
|
auto StmtMapIt = StmtMap.find(BB);
|
2014-11-01 07:13:39 +08:00
|
|
|
if (StmtMapIt == StmtMap.end())
|
|
|
|
return nullptr;
|
|
|
|
return StmtMapIt->second;
|
|
|
|
}
|
|
|
|
|
2015-12-14 03:21:45 +08:00
|
|
|
ScopStmt *Scop::getStmtForRegionNode(RegionNode *RN) const {
|
|
|
|
return getStmtForBasicBlock(getRegionNodeBasicBlock(RN));
|
|
|
|
}
|
|
|
|
|
2015-08-31 05:13:53 +08:00
|
|
|
int Scop::getRelativeLoopDepth(const Loop *L) const {
|
|
|
|
Loop *OuterLoop =
|
|
|
|
L ? R.outermostLoopInRegion(const_cast<Loop *>(L)) : nullptr;
|
|
|
|
if (!OuterLoop)
|
|
|
|
return -1;
|
2015-08-27 14:53:52 +08:00
|
|
|
return L->getLoopDepth() - OuterLoop->getLoopDepth();
|
|
|
|
}
|
|
|
|
|
2015-09-10 23:25:24 +08:00
|
|
|
void ScopInfo::buildPHIAccesses(PHINode *PHI, Region &R,
|
|
|
|
Region *NonAffineSubRegion, bool IsExitBlock) {
|
2015-09-10 20:46:52 +08:00
|
|
|
|
|
|
|
// PHI nodes that are in the exit block of the region, hence if IsExitBlock is
|
|
|
|
// true, are not modeled as ordinary PHI nodes as they are not part of the
|
|
|
|
// region. However, we model the operands in the predecessor blocks that are
|
|
|
|
// part of the region as regular scalar accesses.
|
|
|
|
|
|
|
|
// If we can synthesize a PHI we can skip it, however only if it is in
|
|
|
|
// the region. If it is not it can only be in the exit block of the region.
|
|
|
|
// In this case we model the operands but not the PHI itself.
|
|
|
|
if (!IsExitBlock && canSynthesize(PHI, LI, SE, &R))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// PHI nodes are modeled as if they had been demoted prior to the SCoP
|
|
|
|
// detection. Hence, the PHI is a load of a new memory location in which the
|
|
|
|
// incoming value was written at the end of the incoming basic block.
|
|
|
|
bool OnlyNonAffineSubRegionOperands = true;
|
|
|
|
for (unsigned u = 0; u < PHI->getNumIncomingValues(); u++) {
|
|
|
|
Value *Op = PHI->getIncomingValue(u);
|
|
|
|
BasicBlock *OpBB = PHI->getIncomingBlock(u);
|
|
|
|
|
|
|
|
// Do not build scalar dependences inside a non-affine subregion.
|
|
|
|
if (NonAffineSubRegion && NonAffineSubRegion->contains(OpBB))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
OnlyNonAffineSubRegionOperands = false;
|
2016-01-26 21:33:27 +08:00
|
|
|
ensurePHIWrite(PHI, OpBB, Op, IsExitBlock);
|
2015-09-10 20:46:52 +08:00
|
|
|
}
|
|
|
|
|
2015-09-26 02:53:27 +08:00
|
|
|
if (!OnlyNonAffineSubRegionOperands && !IsExitBlock) {
|
|
|
|
addPHIReadAccess(PHI);
|
2015-09-10 20:46:52 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-06 17:19:40 +08:00
|
|
|
void ScopInfo::buildScalarDependences(Instruction *Inst) {
|
|
|
|
assert(!isa<PHINode>(Inst));
|
2015-09-10 20:46:52 +08:00
|
|
|
|
2016-02-06 17:19:40 +08:00
|
|
|
// Pull-in required operands.
|
|
|
|
for (Use &Op : Inst->operands())
|
|
|
|
ensureValueRead(Op.get(), Inst->getParent());
|
|
|
|
}
|
2015-09-10 20:46:52 +08:00
|
|
|
|
2016-02-06 17:19:40 +08:00
|
|
|
void ScopInfo::buildEscapingDependences(Instruction *Inst) {
|
|
|
|
Region *R = &scop->getRegion();
|
2015-09-10 20:46:52 +08:00
|
|
|
|
2016-02-06 17:19:40 +08:00
|
|
|
// Check for uses of this instruction outside the scop. Because we do not
|
|
|
|
// iterate over such instructions and therefore did not "ensure" the existence
|
|
|
|
// of a write, we must determine such use here.
|
|
|
|
for (Use &U : Inst->uses()) {
|
|
|
|
Instruction *UI = dyn_cast<Instruction>(U.getUser());
|
|
|
|
if (!UI)
|
2015-09-10 20:46:52 +08:00
|
|
|
continue;
|
|
|
|
|
2016-02-06 17:19:40 +08:00
|
|
|
BasicBlock *UseParent = getUseBlock(U);
|
|
|
|
BasicBlock *UserParent = UI->getParent();
|
|
|
|
|
|
|
|
// An escaping value is either used by an instruction not within the scop,
|
|
|
|
// or (when the scop region's exit needs to be simplified) by a PHI in the
|
|
|
|
// scop's exit block. This is because region simplification before code
|
|
|
|
// generation inserts new basic blocks before the PHI such that its incoming
|
|
|
|
// blocks are not in the scop anymore.
|
|
|
|
if (!R->contains(UseParent) ||
|
|
|
|
(isa<PHINode>(UI) && UserParent == R->getExit() &&
|
|
|
|
R->getExitingBlock())) {
|
|
|
|
// At least one escaping use found.
|
|
|
|
ensureValueWrite(Inst);
|
|
|
|
break;
|
2015-09-10 20:46:52 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
extern MapInsnToMemAcc InsnToMemAcc;
|
|
|
|
|
2016-02-03 00:46:49 +08:00
|
|
|
bool ScopInfo::buildAccessMultiDimFixed(
|
2016-01-28 01:09:17 +08:00
|
|
|
MemAccInst Inst, Loop *L, Region *R,
|
2015-10-08 04:17:36 +08:00
|
|
|
const ScopDetection::BoxedLoopsSetTy *BoxedLoops,
|
|
|
|
const InvariantLoadsSetTy &ScopRIL) {
|
2016-01-28 01:09:17 +08:00
|
|
|
Value *Val = Inst.getValueOperand();
|
|
|
|
Type *SizeType = Val->getType();
|
2016-02-03 00:46:45 +08:00
|
|
|
unsigned ElementSize = DL->getTypeAllocSize(SizeType);
|
2016-02-03 00:46:49 +08:00
|
|
|
Value *Address = Inst.getPointerOperand();
|
2015-09-18 01:28:15 +08:00
|
|
|
const SCEV *AccessFunction = SE->getSCEVAtScope(Address, L);
|
2015-09-10 20:46:52 +08:00
|
|
|
const SCEVUnknown *BasePointer =
|
|
|
|
dyn_cast<SCEVUnknown>(SE->getPointerBase(AccessFunction));
|
2016-02-03 00:46:49 +08:00
|
|
|
enum MemoryAccess::AccessType Type =
|
|
|
|
Inst.isLoad() ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
|
2015-09-10 20:46:52 +08:00
|
|
|
|
2015-09-18 04:16:21 +08:00
|
|
|
if (isa<GetElementPtrInst>(Address) || isa<BitCastInst>(Address)) {
|
|
|
|
auto NewAddress = Address;
|
|
|
|
if (auto *BitCast = dyn_cast<BitCastInst>(Address)) {
|
|
|
|
auto Src = BitCast->getOperand(0);
|
|
|
|
auto SrcTy = Src->getType();
|
|
|
|
auto DstTy = BitCast->getType();
|
|
|
|
if (SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits())
|
|
|
|
NewAddress = Src;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (auto *GEP = dyn_cast<GetElementPtrInst>(NewAddress)) {
|
|
|
|
std::vector<const SCEV *> Subscripts;
|
|
|
|
std::vector<int> Sizes;
|
|
|
|
std::tie(Subscripts, Sizes) = getIndexExpressionsFromGEP(GEP, *SE);
|
|
|
|
auto BasePtr = GEP->getOperand(0);
|
2015-09-18 01:28:15 +08:00
|
|
|
|
2015-09-18 04:16:21 +08:00
|
|
|
std::vector<const SCEV *> SizesSCEV;
|
|
|
|
|
2015-10-08 04:17:36 +08:00
|
|
|
for (auto Subscript : Subscripts) {
|
|
|
|
InvariantLoadsSetTy AccessILS;
|
2016-02-03 00:46:49 +08:00
|
|
|
if (!isAffineExpr(R, Subscript, *SE, nullptr, &AccessILS))
|
|
|
|
return false;
|
2015-10-08 04:17:36 +08:00
|
|
|
|
|
|
|
for (LoadInst *LInst : AccessILS)
|
|
|
|
if (!ScopRIL.count(LInst))
|
2016-02-03 00:46:49 +08:00
|
|
|
return false;
|
2015-10-08 04:17:36 +08:00
|
|
|
}
|
2015-09-18 04:16:21 +08:00
|
|
|
|
2016-02-03 00:46:49 +08:00
|
|
|
if (Sizes.size() > 0) {
|
2015-09-18 04:16:21 +08:00
|
|
|
for (auto V : Sizes)
|
|
|
|
SizesSCEV.push_back(SE->getSCEV(ConstantInt::get(
|
|
|
|
IntegerType::getInt64Ty(BasePtr->getContext()), V)));
|
2015-09-18 01:28:15 +08:00
|
|
|
|
2016-02-03 00:46:45 +08:00
|
|
|
addArrayAccess(Inst, Type, BasePointer->getValue(), ElementSize, true,
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
Subscripts, SizesSCEV, Val);
|
2016-02-03 00:46:49 +08:00
|
|
|
return true;
|
2015-09-18 04:16:21 +08:00
|
|
|
}
|
2015-09-18 01:28:15 +08:00
|
|
|
}
|
|
|
|
}
|
2016-02-03 00:46:49 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ScopInfo::buildAccessMultiDimParam(
|
|
|
|
MemAccInst Inst, Loop *L, Region *R,
|
|
|
|
const ScopDetection::BoxedLoopsSetTy *BoxedLoops,
|
|
|
|
const InvariantLoadsSetTy &ScopRIL) {
|
|
|
|
Value *Address = Inst.getPointerOperand();
|
|
|
|
Value *Val = Inst.getValueOperand();
|
|
|
|
Type *SizeType = Val->getType();
|
|
|
|
unsigned ElementSize = DL->getTypeAllocSize(SizeType);
|
|
|
|
enum MemoryAccess::AccessType Type =
|
|
|
|
Inst.isLoad() ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
|
|
|
|
|
|
|
|
const SCEV *AccessFunction = SE->getSCEVAtScope(Address, L);
|
|
|
|
const SCEVUnknown *BasePointer =
|
|
|
|
dyn_cast<SCEVUnknown>(SE->getPointerBase(AccessFunction));
|
|
|
|
|
|
|
|
assert(BasePointer && "Could not find base pointer");
|
|
|
|
AccessFunction = SE->getMinusSCEV(AccessFunction, BasePointer);
|
2015-09-18 01:28:15 +08:00
|
|
|
|
2015-09-10 20:46:52 +08:00
|
|
|
auto AccItr = InsnToMemAcc.find(Inst);
|
2015-09-19 03:59:43 +08:00
|
|
|
if (PollyDelinearize && AccItr != InsnToMemAcc.end()) {
|
2016-02-03 00:46:45 +08:00
|
|
|
std::vector<const SCEV *> Sizes(
|
|
|
|
AccItr->second.Shape->DelinearizedSizes.begin(),
|
|
|
|
AccItr->second.Shape->DelinearizedSizes.end());
|
|
|
|
// Remove the element size. This information is already provided by the
|
Support accesses with differently sized types to the same array
This allows code such as:
void multiple_types(char *Short, char *Float, char *Double) {
for (long i = 0; i < 100; i++) {
Short[i] = *(short *)&Short[2 * i];
Float[i] = *(float *)&Float[4 * i];
Double[i] = *(double *)&Double[8 * i];
}
}
To model such code we use as canonical element type of the modeled array the
smallest element type of all original array accesses, if type allocation sizes
are multiples of each other. Otherwise, we use a newly created iN type, where N
is the gcd of the allocation size of the types used in the accesses to this
array. Accesses with types larger as the canonical element type are modeled as
multiple accesses with the smaller type.
For example the second load access is modeled as:
{ Stmt_bb2[i0] -> MemRef_Float[o0] : 4i0 <= o0 <= 3 + 4i0 }
To support code-generating these memory accesses, we introduce a new method
getAccessAddressFunction that assigns each statement instance a single memory
location, the address we load from/store to. Currently we obtain this address by
taking the lexmin of the access function. We may consider keeping track of the
memory location more explicitly in the future.
We currently do _not_ handle multi-dimensional arrays and also keep the
restriction of not supporting accesses where the offset expression is not a
multiple of the access element type size. This patch adds tests that ensure
we correctly invalidate a scop in case these accesses are found. Both types of
accesses can be handled using the very same model, but are left to be added in
the future.
We also move the initialization of the scop-context into the constructor to
ensure it is already available when invalidating the scop.
Finally, we add this as a new item to the 2.9 release notes
Reviewers: jdoerfert, Meinersbur
Differential Revision: http://reviews.llvm.org/D16878
llvm-svn: 259784
2016-02-04 21:18:42 +08:00
|
|
|
// ElementSize parameter. In case the element size of this access and the
|
|
|
|
// element size used for delinearization differs the delinearization is
|
|
|
|
// incorrect. Hence, we invalidate the scop.
|
|
|
|
//
|
|
|
|
// TODO: Handle delinearization with differing element sizes.
|
|
|
|
auto DelinearizedSize =
|
|
|
|
cast<SCEVConstant>(Sizes.back())->getAPInt().getSExtValue();
|
2016-02-03 00:46:45 +08:00
|
|
|
Sizes.pop_back();
|
Support accesses with differently sized types to the same array
This allows code such as:
void multiple_types(char *Short, char *Float, char *Double) {
for (long i = 0; i < 100; i++) {
Short[i] = *(short *)&Short[2 * i];
Float[i] = *(float *)&Float[4 * i];
Double[i] = *(double *)&Double[8 * i];
}
}
To model such code we use as canonical element type of the modeled array the
smallest element type of all original array accesses, if type allocation sizes
are multiples of each other. Otherwise, we use a newly created iN type, where N
is the gcd of the allocation size of the types used in the accesses to this
array. Accesses with types larger as the canonical element type are modeled as
multiple accesses with the smaller type.
For example the second load access is modeled as:
{ Stmt_bb2[i0] -> MemRef_Float[o0] : 4i0 <= o0 <= 3 + 4i0 }
To support code-generating these memory accesses, we introduce a new method
getAccessAddressFunction that assigns each statement instance a single memory
location, the address we load from/store to. Currently we obtain this address by
taking the lexmin of the access function. We may consider keeping track of the
memory location more explicitly in the future.
We currently do _not_ handle multi-dimensional arrays and also keep the
restriction of not supporting accesses where the offset expression is not a
multiple of the access element type size. This patch adds tests that ensure
we correctly invalidate a scop in case these accesses are found. Both types of
accesses can be handled using the very same model, but are left to be added in
the future.
We also move the initialization of the scop-context into the constructor to
ensure it is already available when invalidating the scop.
Finally, we add this as a new item to the 2.9 release notes
Reviewers: jdoerfert, Meinersbur
Differential Revision: http://reviews.llvm.org/D16878
llvm-svn: 259784
2016-02-04 21:18:42 +08:00
|
|
|
if (ElementSize != DelinearizedSize)
|
|
|
|
scop->invalidate(DELINEARIZATION, Inst.getDebugLoc());
|
2016-02-03 00:46:45 +08:00
|
|
|
|
|
|
|
addArrayAccess(Inst, Type, BasePointer->getValue(), ElementSize, true,
|
|
|
|
AccItr->second.DelinearizedSubscripts, Sizes, Val);
|
2016-02-03 00:46:49 +08:00
|
|
|
return true;
|
2015-09-19 03:59:43 +08:00
|
|
|
}
|
2016-02-03 00:46:49 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ScopInfo::buildAccessSingleDim(
|
|
|
|
MemAccInst Inst, Loop *L, Region *R,
|
|
|
|
const ScopDetection::BoxedLoopsSetTy *BoxedLoops,
|
|
|
|
const InvariantLoadsSetTy &ScopRIL) {
|
|
|
|
Value *Address = Inst.getPointerOperand();
|
|
|
|
Value *Val = Inst.getValueOperand();
|
|
|
|
Type *SizeType = Val->getType();
|
|
|
|
unsigned ElementSize = DL->getTypeAllocSize(SizeType);
|
|
|
|
enum MemoryAccess::AccessType Type =
|
|
|
|
Inst.isLoad() ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
|
|
|
|
|
|
|
|
const SCEV *AccessFunction = SE->getSCEVAtScope(Address, L);
|
|
|
|
const SCEVUnknown *BasePointer =
|
|
|
|
dyn_cast<SCEVUnknown>(SE->getPointerBase(AccessFunction));
|
|
|
|
|
|
|
|
assert(BasePointer && "Could not find base pointer");
|
|
|
|
AccessFunction = SE->getMinusSCEV(AccessFunction, BasePointer);
|
2015-09-10 20:46:52 +08:00
|
|
|
|
|
|
|
// Check if the access depends on a loop contained in a non-affine subregion.
|
|
|
|
bool isVariantInNonAffineLoop = false;
|
|
|
|
if (BoxedLoops) {
|
|
|
|
SetVector<const Loop *> Loops;
|
|
|
|
findLoops(AccessFunction, Loops);
|
|
|
|
for (const Loop *L : Loops)
|
|
|
|
if (BoxedLoops->count(L))
|
|
|
|
isVariantInNonAffineLoop = true;
|
|
|
|
}
|
|
|
|
|
2015-10-08 04:17:36 +08:00
|
|
|
InvariantLoadsSetTy AccessILS;
|
|
|
|
bool IsAffine =
|
|
|
|
!isVariantInNonAffineLoop &&
|
|
|
|
isAffineExpr(R, AccessFunction, *SE, BasePointer->getValue(), &AccessILS);
|
|
|
|
|
|
|
|
for (LoadInst *LInst : AccessILS)
|
|
|
|
if (!ScopRIL.count(LInst))
|
|
|
|
IsAffine = false;
|
2015-09-10 20:46:52 +08:00
|
|
|
|
2015-09-19 03:59:43 +08:00
|
|
|
if (!IsAffine && Type == MemoryAccess::MUST_WRITE)
|
|
|
|
Type = MemoryAccess::MAY_WRITE;
|
2015-09-10 20:46:52 +08:00
|
|
|
|
2016-02-03 00:46:45 +08:00
|
|
|
addArrayAccess(Inst, Type, BasePointer->getValue(), ElementSize, IsAffine,
|
|
|
|
{AccessFunction}, {}, Val);
|
2015-09-10 20:46:52 +08:00
|
|
|
}
|
|
|
|
|
2016-02-03 00:46:49 +08:00
|
|
|
void ScopInfo::buildMemoryAccess(
|
|
|
|
MemAccInst Inst, Loop *L, Region *R,
|
|
|
|
const ScopDetection::BoxedLoopsSetTy *BoxedLoops,
|
|
|
|
const InvariantLoadsSetTy &ScopRIL) {
|
|
|
|
|
|
|
|
if (buildAccessMultiDimFixed(Inst, L, R, BoxedLoops, ScopRIL))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (buildAccessMultiDimParam(Inst, L, R, BoxedLoops, ScopRIL))
|
|
|
|
return;
|
|
|
|
|
|
|
|
buildAccessSingleDim(Inst, L, R, BoxedLoops, ScopRIL);
|
|
|
|
}
|
|
|
|
|
2015-09-10 23:25:24 +08:00
|
|
|
void ScopInfo::buildAccessFunctions(Region &R, Region &SR) {
|
2015-09-10 20:46:52 +08:00
|
|
|
|
|
|
|
if (SD->isNonAffineSubRegion(&SR, &R)) {
|
|
|
|
for (BasicBlock *BB : SR.blocks())
|
|
|
|
buildAccessFunctions(R, *BB, &SR);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto I = SR.element_begin(), E = SR.element_end(); I != E; ++I)
|
|
|
|
if (I->isSubRegion())
|
|
|
|
buildAccessFunctions(R, *I->getNodeAs<Region>());
|
|
|
|
else
|
|
|
|
buildAccessFunctions(R, *I->getNodeAs<BasicBlock>());
|
|
|
|
}
|
|
|
|
|
2016-02-02 22:14:40 +08:00
|
|
|
void ScopInfo::buildStmts(Region &R, Region &SR) {
|
2015-10-02 21:53:07 +08:00
|
|
|
|
2016-02-02 22:14:40 +08:00
|
|
|
if (SD->isNonAffineSubRegion(&SR, &R)) {
|
2015-10-02 21:53:07 +08:00
|
|
|
scop->addScopStmt(nullptr, &SR);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto I = SR.element_begin(), E = SR.element_end(); I != E; ++I)
|
|
|
|
if (I->isSubRegion())
|
2016-02-02 22:14:40 +08:00
|
|
|
buildStmts(R, *I->getNodeAs<Region>());
|
2015-10-02 21:53:07 +08:00
|
|
|
else
|
|
|
|
scop->addScopStmt(I->getNodeAs<BasicBlock>(), nullptr);
|
|
|
|
}
|
|
|
|
|
2015-09-10 23:25:24 +08:00
|
|
|
void ScopInfo::buildAccessFunctions(Region &R, BasicBlock &BB,
|
|
|
|
Region *NonAffineSubRegion,
|
|
|
|
bool IsExitBlock) {
|
2015-11-12 04:15:49 +08:00
|
|
|
// We do not build access functions for error blocks, as they may contain
|
|
|
|
// instructions we can not model.
|
2016-02-02 22:14:20 +08:00
|
|
|
if (isErrorBlock(BB, R, *LI, *DT) && !IsExitBlock)
|
2015-11-12 04:15:49 +08:00
|
|
|
return;
|
|
|
|
|
2015-09-10 20:46:52 +08:00
|
|
|
Loop *L = LI->getLoopFor(&BB);
|
|
|
|
|
|
|
|
// The set of loops contained in non-affine subregions that are part of R.
|
|
|
|
const ScopDetection::BoxedLoopsSetTy *BoxedLoops = SD->getBoxedLoops(&R);
|
|
|
|
|
2015-10-08 04:17:36 +08:00
|
|
|
// The set of loads that are required to be invariant.
|
|
|
|
auto &ScopRIL = *SD->getRequiredInvariantLoads(&R);
|
|
|
|
|
2016-02-06 17:19:40 +08:00
|
|
|
for (Instruction &Inst : BB) {
|
|
|
|
PHINode *PHI = dyn_cast<PHINode>(&Inst);
|
2015-09-10 20:46:52 +08:00
|
|
|
if (PHI)
|
2015-09-19 03:59:43 +08:00
|
|
|
buildPHIAccesses(PHI, R, NonAffineSubRegion, IsExitBlock);
|
2015-09-10 20:46:52 +08:00
|
|
|
|
|
|
|
// For the exit block we stop modeling after the last PHI node.
|
|
|
|
if (!PHI && IsExitBlock)
|
|
|
|
break;
|
|
|
|
|
2015-10-08 04:17:36 +08:00
|
|
|
// TODO: At this point we only know that elements of ScopRIL have to be
|
|
|
|
// invariant and will be hoisted for the SCoP to be processed. Though,
|
|
|
|
// there might be other invariant accesses that will be hoisted and
|
|
|
|
// that would allow to make a non-affine access affine.
|
2016-01-28 01:09:17 +08:00
|
|
|
if (auto MemInst = MemAccInst::dyn_cast(Inst))
|
|
|
|
buildMemoryAccess(MemInst, L, &R, BoxedLoops, ScopRIL);
|
2015-09-10 20:46:52 +08:00
|
|
|
|
2016-02-06 17:19:40 +08:00
|
|
|
if (isIgnoredIntrinsic(&Inst))
|
2015-09-10 20:46:52 +08:00
|
|
|
continue;
|
|
|
|
|
2016-02-06 17:19:40 +08:00
|
|
|
if (!PHI)
|
|
|
|
buildScalarDependences(&Inst);
|
|
|
|
if (!IsExitBlock)
|
|
|
|
buildEscapingDependences(&Inst);
|
2015-09-10 20:46:52 +08:00
|
|
|
}
|
2015-09-19 03:59:43 +08:00
|
|
|
}
|
2015-09-10 20:46:52 +08:00
|
|
|
|
2016-01-26 21:33:27 +08:00
|
|
|
MemoryAccess *ScopInfo::addMemoryAccess(BasicBlock *BB, Instruction *Inst,
|
|
|
|
MemoryAccess::AccessType Type,
|
|
|
|
Value *BaseAddress, unsigned ElemBytes,
|
|
|
|
bool Affine, Value *AccessValue,
|
|
|
|
ArrayRef<const SCEV *> Subscripts,
|
|
|
|
ArrayRef<const SCEV *> Sizes,
|
|
|
|
ScopArrayInfo::MemoryKind Kind) {
|
2015-10-02 21:53:07 +08:00
|
|
|
ScopStmt *Stmt = scop->getStmtForBasicBlock(BB);
|
|
|
|
|
|
|
|
// Do not create a memory access for anything not in the SCoP. It would be
|
|
|
|
// ignored anyway.
|
|
|
|
if (!Stmt)
|
2016-01-26 21:33:27 +08:00
|
|
|
return nullptr;
|
2015-10-02 21:53:07 +08:00
|
|
|
|
2015-09-19 03:59:43 +08:00
|
|
|
AccFuncSetType &AccList = AccFuncMap[BB];
|
|
|
|
Value *BaseAddr = BaseAddress;
|
|
|
|
std::string BaseName = getIslCompatibleName("MemRef_", BaseAddr, "");
|
|
|
|
|
2015-12-14 23:05:37 +08:00
|
|
|
bool isKnownMustAccess = false;
|
|
|
|
|
|
|
|
// Accesses in single-basic block statements are always excuted.
|
|
|
|
if (Stmt->isBlockStmt())
|
|
|
|
isKnownMustAccess = true;
|
|
|
|
|
|
|
|
if (Stmt->isRegionStmt()) {
|
|
|
|
// Accesses that dominate the exit block of a non-affine region are always
|
|
|
|
// executed. In non-affine regions there may exist MK_Values that do not
|
|
|
|
// dominate the exit. MK_Values will always dominate the exit and MK_PHIs
|
|
|
|
// only if there is at most one PHI_WRITE in the non-affine region.
|
|
|
|
if (DT->dominates(BB, Stmt->getRegion()->getExit()))
|
|
|
|
isKnownMustAccess = true;
|
|
|
|
}
|
|
|
|
|
2016-01-26 21:33:27 +08:00
|
|
|
// Non-affine PHI writes do not "happen" at a particular instruction, but
|
|
|
|
// after exiting the statement. Therefore they are guaranteed execute and
|
|
|
|
// overwrite the old value.
|
|
|
|
if (Kind == ScopArrayInfo::MK_PHI || Kind == ScopArrayInfo::MK_ExitPHI)
|
|
|
|
isKnownMustAccess = true;
|
|
|
|
|
2015-12-14 23:05:37 +08:00
|
|
|
if (!isKnownMustAccess && Type == MemoryAccess::MUST_WRITE)
|
2015-10-02 21:53:07 +08:00
|
|
|
Type = MemoryAccess::MAY_WRITE;
|
|
|
|
|
2015-11-06 04:15:37 +08:00
|
|
|
AccList.emplace_back(Stmt, Inst, Type, BaseAddress, ElemBytes, Affine,
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
Subscripts, Sizes, AccessValue, Kind, BaseName);
|
2015-10-02 21:53:07 +08:00
|
|
|
Stmt->addAccess(&AccList.back());
|
2016-01-26 21:33:27 +08:00
|
|
|
return &AccList.back();
|
2015-09-10 20:46:52 +08:00
|
|
|
}
|
|
|
|
|
2016-01-28 01:09:17 +08:00
|
|
|
void ScopInfo::addArrayAccess(MemAccInst MemAccInst,
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
MemoryAccess::AccessType Type, Value *BaseAddress,
|
|
|
|
unsigned ElemBytes, bool IsAffine,
|
|
|
|
ArrayRef<const SCEV *> Subscripts,
|
|
|
|
ArrayRef<const SCEV *> Sizes,
|
|
|
|
Value *AccessValue) {
|
2016-01-28 01:09:17 +08:00
|
|
|
assert(MemAccInst.isLoad() == (Type == MemoryAccess::READ));
|
|
|
|
addMemoryAccess(MemAccInst.getParent(), MemAccInst, Type, BaseAddress,
|
2015-09-26 05:21:00 +08:00
|
|
|
ElemBytes, IsAffine, AccessValue, Subscripts, Sizes,
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
ScopArrayInfo::MK_Array);
|
2015-09-26 02:53:27 +08:00
|
|
|
}
|
2016-01-26 21:33:10 +08:00
|
|
|
void ScopInfo::ensureValueWrite(Instruction *Value) {
|
|
|
|
ScopStmt *Stmt = scop->getStmtForBasicBlock(Value->getParent());
|
|
|
|
|
|
|
|
// Value not defined within this SCoP.
|
|
|
|
if (!Stmt)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Do not process further if the value is already written.
|
|
|
|
if (Stmt->lookupValueWriteOf(Value))
|
|
|
|
return;
|
|
|
|
|
2015-09-26 02:53:27 +08:00
|
|
|
addMemoryAccess(Value->getParent(), Value, MemoryAccess::MUST_WRITE, Value, 1,
|
|
|
|
true, Value, ArrayRef<const SCEV *>(),
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
ArrayRef<const SCEV *>(), ScopArrayInfo::MK_Value);
|
2015-09-26 02:53:27 +08:00
|
|
|
}
|
2016-01-26 21:33:15 +08:00
|
|
|
void ScopInfo::ensureValueRead(Value *Value, BasicBlock *UserBB) {
|
2016-01-28 06:51:56 +08:00
|
|
|
|
2016-02-06 17:19:40 +08:00
|
|
|
// There cannot be an "access" for literal constants. BasicBlock references
|
|
|
|
// (jump destinations) also never change.
|
|
|
|
if ((isa<Constant>(Value) && !isa<GlobalVariable>(Value)) ||
|
|
|
|
isa<BasicBlock>(Value))
|
|
|
|
return;
|
|
|
|
|
2016-01-28 06:51:56 +08:00
|
|
|
// If the instruction can be synthesized and the user is in the region we do
|
|
|
|
// not need to add a value dependences.
|
|
|
|
Region &ScopRegion = scop->getRegion();
|
|
|
|
if (canSynthesize(Value, LI, SE, &ScopRegion))
|
|
|
|
return;
|
|
|
|
|
2016-02-06 17:19:40 +08:00
|
|
|
// Do not build scalar dependences for required invariant loads as we will
|
|
|
|
// hoist them later on anyway or drop the SCoP if we cannot.
|
|
|
|
auto ScopRIL = SD->getRequiredInvariantLoads(&ScopRegion);
|
|
|
|
if (ScopRIL->count(dyn_cast<LoadInst>(Value)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Determine the ScopStmt containing the value's definition and use. There is
|
|
|
|
// no defining ScopStmt if the value is a function argument, a global value,
|
|
|
|
// or defined outside the SCoP.
|
|
|
|
Instruction *ValueInst = dyn_cast<Instruction>(Value);
|
|
|
|
ScopStmt *ValueStmt =
|
|
|
|
ValueInst ? scop->getStmtForBasicBlock(ValueInst->getParent()) : nullptr;
|
|
|
|
|
2016-01-26 21:33:15 +08:00
|
|
|
ScopStmt *UserStmt = scop->getStmtForBasicBlock(UserBB);
|
|
|
|
|
|
|
|
// We do not model uses outside the scop.
|
|
|
|
if (!UserStmt)
|
|
|
|
return;
|
|
|
|
|
2016-02-06 17:19:40 +08:00
|
|
|
// Add MemoryAccess for invariant values only if requested.
|
|
|
|
if (!ModelReadOnlyScalars && !ValueStmt)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Ignore use-def chains within the same ScopStmt.
|
|
|
|
if (ValueStmt == UserStmt)
|
|
|
|
return;
|
|
|
|
|
2016-01-26 21:33:15 +08:00
|
|
|
// Do not create another MemoryAccess for reloading the value if one already
|
|
|
|
// exists.
|
|
|
|
if (UserStmt->lookupValueReadOf(Value))
|
|
|
|
return;
|
|
|
|
|
|
|
|
addMemoryAccess(UserBB, nullptr, MemoryAccess::READ, Value, 1, true, Value,
|
2015-09-26 05:21:00 +08:00
|
|
|
ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(),
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
ScopArrayInfo::MK_Value);
|
2016-02-06 17:19:40 +08:00
|
|
|
if (ValueInst)
|
|
|
|
ensureValueWrite(ValueInst);
|
2015-09-26 02:53:27 +08:00
|
|
|
}
|
2016-01-26 21:33:27 +08:00
|
|
|
void ScopInfo::ensurePHIWrite(PHINode *PHI, BasicBlock *IncomingBlock,
|
|
|
|
Value *IncomingValue, bool IsExitBlock) {
|
|
|
|
ScopStmt *IncomingStmt = scop->getStmtForBasicBlock(IncomingBlock);
|
2016-02-06 17:19:40 +08:00
|
|
|
if (!IncomingStmt)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Take care for the incoming value being available in the incoming block.
|
|
|
|
// This must be done before the check for multiple PHI writes because multiple
|
|
|
|
// exiting edges from subregion each can be the effective written value of the
|
|
|
|
// subregion. As such, all of them must be made available in the subregion
|
|
|
|
// statement.
|
|
|
|
ensureValueRead(IncomingValue, IncomingBlock);
|
2016-01-26 21:33:27 +08:00
|
|
|
|
|
|
|
// Do not add more than one MemoryAccess per PHINode and ScopStmt.
|
|
|
|
if (MemoryAccess *Acc = IncomingStmt->lookupPHIWriteOf(PHI)) {
|
|
|
|
assert(Acc->getAccessInstruction() == PHI);
|
|
|
|
Acc->addIncoming(IncomingBlock, IncomingValue);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryAccess *Acc = addMemoryAccess(
|
|
|
|
IncomingStmt->isBlockStmt() ? IncomingBlock
|
|
|
|
: IncomingStmt->getRegion()->getEntry(),
|
|
|
|
PHI, MemoryAccess::MUST_WRITE, PHI, 1, true, PHI,
|
|
|
|
ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(),
|
|
|
|
IsExitBlock ? ScopArrayInfo::MK_ExitPHI : ScopArrayInfo::MK_PHI);
|
|
|
|
assert(Acc);
|
|
|
|
Acc->addIncoming(IncomingBlock, IncomingValue);
|
2015-09-26 02:53:27 +08:00
|
|
|
}
|
|
|
|
void ScopInfo::addPHIReadAccess(PHINode *PHI) {
|
|
|
|
addMemoryAccess(PHI->getParent(), PHI, MemoryAccess::READ, PHI, 1, true, PHI,
|
2015-09-26 05:21:00 +08:00
|
|
|
ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(),
|
ScopInfo: Harmonize the different array kinds
Over time different vocabulary has been introduced to describe the different
memory objects in Polly, resulting in different - often inconsistent - naming
schemes in different parts of Polly. We now standartize this to the following
scheme:
KindArray, KindValue, KindPHI, KindExitPHI
| ------- isScalar -----------|
In most cases this naming scheme has already been used previously (this
minimizes changes and ensures we remain consistent with previous publications).
The main change is that we remove KindScalar to clearify the difference between
a scalar as a memory object of kind Value, PHI or ExitPHI and a value (former
KindScalar) which is a memory object modeling a llvm::Value.
We also move all documentation to the Kind* enum in the ScopArrayInfo class,
remove the second enum in the MemoryAccess class and update documentation to be
formulated from the perspective of the memory object, rather than the memory
access. The terms "Implicit"/"Explicit", formerly used to describe memory
accesses, have been dropped. From the perspective of memory accesses they
described the different memory kinds well - especially from the perspective of
code generation - but just from the perspective of a memory object it seems more
straightforward to talk about scalars and arrays, rather than explicit and
implicit arrays. The last comment is clearly subjective, though. A less
subjective reason to go for these terms is the historic use both in mailing list
discussions and publications.
llvm-svn: 255467
2015-12-14 03:59:01 +08:00
|
|
|
ScopArrayInfo::MK_PHI);
|
2015-09-26 02:53:27 +08:00
|
|
|
}
|
|
|
|
|
2015-12-14 06:10:37 +08:00
|
|
|
void ScopInfo::buildScop(Region &R, AssumptionCache &AC) {
|
2015-09-12 05:41:48 +08:00
|
|
|
unsigned MaxLoopDepth = getMaxLoopDepthInRegion(R, *LI, *SD);
|
2015-12-14 06:10:37 +08:00
|
|
|
scop = new Scop(R, AccFuncMap, *SD, *SE, *DT, *LI, ctx, MaxLoopDepth);
|
2015-09-10 20:46:52 +08:00
|
|
|
|
2016-02-02 22:14:40 +08:00
|
|
|
buildStmts(R, R);
|
2015-09-10 20:46:52 +08:00
|
|
|
buildAccessFunctions(R, R);
|
|
|
|
|
|
|
|
// In case the region does not have an exiting block we will later (during
|
|
|
|
// code generation) split the exit block. This will move potential PHI nodes
|
|
|
|
// from the current exit block into the new region exiting block. Hence, PHI
|
|
|
|
// nodes that are at this point not part of the region will be.
|
|
|
|
// To handle these PHI nodes later we will now model their operands as scalar
|
|
|
|
// accesses. Note that we do not model anything in the exit block if we have
|
|
|
|
// an exiting block in the region, as there will not be any splitting later.
|
|
|
|
if (!R.getExitingBlock())
|
|
|
|
buildAccessFunctions(R, *R.getExit(), nullptr, /* IsExitBlock */ true);
|
|
|
|
|
2015-11-12 11:25:01 +08:00
|
|
|
scop->init(*AA, AC);
|
2015-09-10 20:46:52 +08:00
|
|
|
}
|
|
|
|
|
2015-09-10 23:25:24 +08:00
|
|
|
void ScopInfo::print(raw_ostream &OS, const Module *) const {
|
2015-09-12 05:41:48 +08:00
|
|
|
if (!scop) {
|
2015-09-10 23:25:24 +08:00
|
|
|
OS << "Invalid Scop!\n";
|
2015-09-12 05:41:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
scop->print(OS);
|
2015-09-10 20:46:52 +08:00
|
|
|
}
|
|
|
|
|
2015-09-10 23:25:24 +08:00
|
|
|
void ScopInfo::clear() {
|
2015-09-10 20:46:52 +08:00
|
|
|
AccFuncMap.clear();
|
2015-09-10 23:25:24 +08:00
|
|
|
if (scop) {
|
|
|
|
delete scop;
|
|
|
|
scop = 0;
|
|
|
|
}
|
|
|
|
}
|
2015-09-10 20:46:52 +08:00
|
|
|
|
2011-04-29 14:27:02 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2015-09-12 05:41:48 +08:00
|
|
|
ScopInfo::ScopInfo() : RegionPass(ID), scop(0) {
|
2011-08-20 19:11:25 +08:00
|
|
|
ctx = isl_ctx_alloc();
|
2011-12-07 15:42:51 +08:00
|
|
|
isl_options_set_on_error(ctx, ISL_ON_ERROR_ABORT);
|
2011-08-20 19:11:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ScopInfo::~ScopInfo() {
|
|
|
|
clear();
|
|
|
|
isl_ctx_free(ctx);
|
|
|
|
}
|
|
|
|
|
2011-04-29 14:27:02 +08:00
|
|
|
void ScopInfo::getAnalysisUsage(AnalysisUsage &AU) const {
|
2015-01-17 22:16:56 +08:00
|
|
|
AU.addRequired<LoopInfoWrapperPass>();
|
2014-07-20 02:40:17 +08:00
|
|
|
AU.addRequired<RegionInfoPass>();
|
2015-08-31 05:13:53 +08:00
|
|
|
AU.addRequired<DominatorTreeWrapperPass>();
|
2015-09-10 23:25:24 +08:00
|
|
|
AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
|
|
|
|
AU.addRequiredTransitive<ScopDetection>();
|
2015-09-10 06:13:56 +08:00
|
|
|
AU.addRequired<AAResultsWrapperPass>();
|
2015-11-12 11:25:01 +08:00
|
|
|
AU.addRequired<AssumptionCacheTracker>();
|
2011-04-29 14:27:02 +08:00
|
|
|
AU.setPreservesAll();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ScopInfo::runOnRegion(Region *R, RGPassManager &RGM) {
|
2015-09-10 23:25:24 +08:00
|
|
|
SD = &getAnalysis<ScopDetection>();
|
|
|
|
|
|
|
|
if (!SD->isMaxRegionInScop(*R))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Function *F = R->getEntry()->getParent();
|
|
|
|
SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
|
|
|
|
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
|
|
|
|
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
|
2016-02-02 22:15:13 +08:00
|
|
|
DL = &F->getParent()->getDataLayout();
|
2015-12-14 06:10:37 +08:00
|
|
|
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
2015-11-12 11:25:01 +08:00
|
|
|
auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F);
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2015-11-12 10:32:32 +08:00
|
|
|
DebugLoc Beg, End;
|
|
|
|
getDebugLocations(R, Beg, End);
|
|
|
|
std::string Msg = "SCoP begins here.";
|
|
|
|
emitOptimizationRemarkAnalysis(F->getContext(), DEBUG_TYPE, *F, Beg, Msg);
|
|
|
|
|
2015-12-14 06:10:37 +08:00
|
|
|
buildScop(*R, AC);
|
2011-04-29 14:27:02 +08:00
|
|
|
|
2015-05-30 14:26:21 +08:00
|
|
|
DEBUG(scop->print(dbgs()));
|
|
|
|
|
2015-10-03 00:33:27 +08:00
|
|
|
if (scop->isEmpty() || !scop->hasFeasibleRuntimeContext()) {
|
2015-11-12 10:32:32 +08:00
|
|
|
Msg = "SCoP ends here but was dismissed.";
|
2015-08-20 13:58:56 +08:00
|
|
|
delete scop;
|
|
|
|
scop = nullptr;
|
2015-11-12 10:32:32 +08:00
|
|
|
} else {
|
|
|
|
Msg = "SCoP ends here.";
|
|
|
|
++ScopFound;
|
|
|
|
if (scop->getMaxLoopDepth() > 0)
|
|
|
|
++RichScopFound;
|
2015-08-20 13:58:56 +08:00
|
|
|
}
|
|
|
|
|
2015-11-12 10:32:32 +08:00
|
|
|
emitOptimizationRemarkAnalysis(F->getContext(), DEBUG_TYPE, *F, End, Msg);
|
|
|
|
|
2011-04-29 14:27:02 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
char ScopInfo::ID = 0;
|
|
|
|
|
2013-03-23 09:05:07 +08:00
|
|
|
Pass *polly::createScopInfoPass() { return new ScopInfo(); }
|
|
|
|
|
2011-10-08 08:30:40 +08:00
|
|
|
INITIALIZE_PASS_BEGIN(ScopInfo, "polly-scops",
|
|
|
|
"Polly - Create polyhedral description of Scops", false,
|
2013-03-23 09:05:07 +08:00
|
|
|
false);
|
2015-09-10 06:13:56 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
|
2015-11-12 11:25:01 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
|
2015-01-17 22:16:56 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
|
2014-07-20 02:40:17 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
|
2015-08-17 18:57:08 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
|
2015-02-24 20:00:50 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(ScopDetection);
|
2015-08-31 05:13:53 +08:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
|
2011-10-08 08:30:40 +08:00
|
|
|
INITIALIZE_PASS_END(ScopInfo, "polly-scops",
|
|
|
|
"Polly - Create polyhedral description of Scops", false,
|
|
|
|
false)
|