llvm-project/polly/lib/Support/ScopHelper.cpp

684 lines
22 KiB
C++
Raw Normal View History

//===- ScopHelper.cpp - Some Helper Functions for Scop. ------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Small functions that help with Scop and LLVM-IR.
//
//===----------------------------------------------------------------------===//
#include "polly/Support/ScopHelper.h"
#include "polly/Options.h"
#include "polly/ScopInfo.h"
#include "polly/Support/SCEVValidator.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/RegionInfo.h"
#include "llvm/Analysis/RegionInfoImpl.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/Debug.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
using namespace llvm;
using namespace polly;
#define DEBUG_TYPE "polly-scop-helper"
static cl::opt<bool> PollyAllowErrorBlocks(
"polly-allow-error-blocks",
cl::desc("Allow to speculate on the execution of 'error blocks'."),
cl::Hidden, cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
static cl::list<std::string> DebugFunctions(
"polly-debug-func",
cl::desc("Allow calls to the specified functions in SCoPs even if their "
"side-effects are unknown. This can be used to do debug output in "
"Polly-transformed code."),
cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated, cl::cat(PollyCategory));
Revise the simplification of regions The previous code had several problems: For newly created BasicBlocks it did not (always) call RegionInfo::setRegionFor in order to update its analysis. At the moment RegionInfo does not verify its BBMap, but will in the future. This is fixed by determining the region new BBs belong to and set it accordingly. The new executeScopConditionally() requires accurate getRegionFor information. Which block is created by SplitEdge depends on the incoming and outgoing edges of the blocks it connects, which makes handling its output more difficult than it needs to be. Especially for finding which block has been created an to assign a region to it for the setRegionFor problem above. This patch uses an implementation for splitEdge that always creates a block between the predecessor and successor. simplifyRegion has also been simplified by using SplitBlockPredecessors instead of SplitEdge. Isolating the entries and exits have been refectored into individual functions. Previously simplifyRegion did more than just ensuring that there is only one entering and one exiting edge. It ensured that the entering block had no other outgoing edge which was necessary for executeScopConditionally(). Now the latter uses the alternative splitEdge implementation which can handle this situation so simplifyRegion really only needs to simplify the region. Also, executeScopConditionally assumed that there can be no PHI nodes in blocks with one incoming edge. This is wrong and LCSSA deliberately produces such edges. However, previous passes ensured that there can be no such PHIs in exit nodes, but which will no longer hold in the future. The new code that the property that it preserves the identity of region block (the property that the memory address of the BasicBlock containing the instructions remains the same; new blocks only contain PHI nodes and a terminator), especially the entry block. As a result, there is no need to update the reference to the BasicBlock of ScopStmt that contain its instructions because they have been moved to other basic blocks. Reviewers: grosser Part of Differential Revision: http://reviews.llvm.org/D11867 llvm-svn: 244606
2015-08-11 22:39:21 +08:00
// Ensures that there is just one predecessor to the entry node from outside the
// region.
// The identity of the region entry node is preserved.
static void simplifyRegionEntry(Region *R, DominatorTree *DT, LoopInfo *LI,
RegionInfo *RI) {
BasicBlock *EnteringBB = R->getEnteringBlock();
Revise the simplification of regions The previous code had several problems: For newly created BasicBlocks it did not (always) call RegionInfo::setRegionFor in order to update its analysis. At the moment RegionInfo does not verify its BBMap, but will in the future. This is fixed by determining the region new BBs belong to and set it accordingly. The new executeScopConditionally() requires accurate getRegionFor information. Which block is created by SplitEdge depends on the incoming and outgoing edges of the blocks it connects, which makes handling its output more difficult than it needs to be. Especially for finding which block has been created an to assign a region to it for the setRegionFor problem above. This patch uses an implementation for splitEdge that always creates a block between the predecessor and successor. simplifyRegion has also been simplified by using SplitBlockPredecessors instead of SplitEdge. Isolating the entries and exits have been refectored into individual functions. Previously simplifyRegion did more than just ensuring that there is only one entering and one exiting edge. It ensured that the entering block had no other outgoing edge which was necessary for executeScopConditionally(). Now the latter uses the alternative splitEdge implementation which can handle this situation so simplifyRegion really only needs to simplify the region. Also, executeScopConditionally assumed that there can be no PHI nodes in blocks with one incoming edge. This is wrong and LCSSA deliberately produces such edges. However, previous passes ensured that there can be no such PHIs in exit nodes, but which will no longer hold in the future. The new code that the property that it preserves the identity of region block (the property that the memory address of the BasicBlock containing the instructions remains the same; new blocks only contain PHI nodes and a terminator), especially the entry block. As a result, there is no need to update the reference to the BasicBlock of ScopStmt that contain its instructions because they have been moved to other basic blocks. Reviewers: grosser Part of Differential Revision: http://reviews.llvm.org/D11867 llvm-svn: 244606
2015-08-11 22:39:21 +08:00
BasicBlock *Entry = R->getEntry();
Revise the simplification of regions The previous code had several problems: For newly created BasicBlocks it did not (always) call RegionInfo::setRegionFor in order to update its analysis. At the moment RegionInfo does not verify its BBMap, but will in the future. This is fixed by determining the region new BBs belong to and set it accordingly. The new executeScopConditionally() requires accurate getRegionFor information. Which block is created by SplitEdge depends on the incoming and outgoing edges of the blocks it connects, which makes handling its output more difficult than it needs to be. Especially for finding which block has been created an to assign a region to it for the setRegionFor problem above. This patch uses an implementation for splitEdge that always creates a block between the predecessor and successor. simplifyRegion has also been simplified by using SplitBlockPredecessors instead of SplitEdge. Isolating the entries and exits have been refectored into individual functions. Previously simplifyRegion did more than just ensuring that there is only one entering and one exiting edge. It ensured that the entering block had no other outgoing edge which was necessary for executeScopConditionally(). Now the latter uses the alternative splitEdge implementation which can handle this situation so simplifyRegion really only needs to simplify the region. Also, executeScopConditionally assumed that there can be no PHI nodes in blocks with one incoming edge. This is wrong and LCSSA deliberately produces such edges. However, previous passes ensured that there can be no such PHIs in exit nodes, but which will no longer hold in the future. The new code that the property that it preserves the identity of region block (the property that the memory address of the BasicBlock containing the instructions remains the same; new blocks only contain PHI nodes and a terminator), especially the entry block. As a result, there is no need to update the reference to the BasicBlock of ScopStmt that contain its instructions because they have been moved to other basic blocks. Reviewers: grosser Part of Differential Revision: http://reviews.llvm.org/D11867 llvm-svn: 244606
2015-08-11 22:39:21 +08:00
// Before (one of):
//
// \ / //
// EnteringBB //
// | \------> //
// \ / | //
// Entry <--\ Entry <--\ //
// / \ / / \ / //
// .... .... //
// Create single entry edge if the region has multiple entry edges.
if (!EnteringBB) {
Revise the simplification of regions The previous code had several problems: For newly created BasicBlocks it did not (always) call RegionInfo::setRegionFor in order to update its analysis. At the moment RegionInfo does not verify its BBMap, but will in the future. This is fixed by determining the region new BBs belong to and set it accordingly. The new executeScopConditionally() requires accurate getRegionFor information. Which block is created by SplitEdge depends on the incoming and outgoing edges of the blocks it connects, which makes handling its output more difficult than it needs to be. Especially for finding which block has been created an to assign a region to it for the setRegionFor problem above. This patch uses an implementation for splitEdge that always creates a block between the predecessor and successor. simplifyRegion has also been simplified by using SplitBlockPredecessors instead of SplitEdge. Isolating the entries and exits have been refectored into individual functions. Previously simplifyRegion did more than just ensuring that there is only one entering and one exiting edge. It ensured that the entering block had no other outgoing edge which was necessary for executeScopConditionally(). Now the latter uses the alternative splitEdge implementation which can handle this situation so simplifyRegion really only needs to simplify the region. Also, executeScopConditionally assumed that there can be no PHI nodes in blocks with one incoming edge. This is wrong and LCSSA deliberately produces such edges. However, previous passes ensured that there can be no such PHIs in exit nodes, but which will no longer hold in the future. The new code that the property that it preserves the identity of region block (the property that the memory address of the BasicBlock containing the instructions remains the same; new blocks only contain PHI nodes and a terminator), especially the entry block. As a result, there is no need to update the reference to the BasicBlock of ScopStmt that contain its instructions because they have been moved to other basic blocks. Reviewers: grosser Part of Differential Revision: http://reviews.llvm.org/D11867 llvm-svn: 244606
2015-08-11 22:39:21 +08:00
SmallVector<BasicBlock *, 4> Preds;
for (BasicBlock *P : predecessors(Entry))
if (!R->contains(P))
Preds.push_back(P);
BasicBlock *NewEntering =
SplitBlockPredecessors(Entry, Preds, ".region_entering", DT, LI);
if (RI) {
// The exit block of predecessing regions must be changed to NewEntering
for (BasicBlock *ExitPred : predecessors(NewEntering)) {
Region *RegionOfPred = RI->getRegionFor(ExitPred);
if (RegionOfPred->getExit() != Entry)
continue;
while (!RegionOfPred->isTopLevelRegion() &&
RegionOfPred->getExit() == Entry) {
RegionOfPred->replaceExit(NewEntering);
RegionOfPred = RegionOfPred->getParent();
}
}
// Make all ancestors use EnteringBB as entry; there might be edges to it
Region *AncestorR = R->getParent();
RI->setRegionFor(NewEntering, AncestorR);
while (!AncestorR->isTopLevelRegion() && AncestorR->getEntry() == Entry) {
AncestorR->replaceEntry(NewEntering);
AncestorR = AncestorR->getParent();
}
}
Revise the simplification of regions The previous code had several problems: For newly created BasicBlocks it did not (always) call RegionInfo::setRegionFor in order to update its analysis. At the moment RegionInfo does not verify its BBMap, but will in the future. This is fixed by determining the region new BBs belong to and set it accordingly. The new executeScopConditionally() requires accurate getRegionFor information. Which block is created by SplitEdge depends on the incoming and outgoing edges of the blocks it connects, which makes handling its output more difficult than it needs to be. Especially for finding which block has been created an to assign a region to it for the setRegionFor problem above. This patch uses an implementation for splitEdge that always creates a block between the predecessor and successor. simplifyRegion has also been simplified by using SplitBlockPredecessors instead of SplitEdge. Isolating the entries and exits have been refectored into individual functions. Previously simplifyRegion did more than just ensuring that there is only one entering and one exiting edge. It ensured that the entering block had no other outgoing edge which was necessary for executeScopConditionally(). Now the latter uses the alternative splitEdge implementation which can handle this situation so simplifyRegion really only needs to simplify the region. Also, executeScopConditionally assumed that there can be no PHI nodes in blocks with one incoming edge. This is wrong and LCSSA deliberately produces such edges. However, previous passes ensured that there can be no such PHIs in exit nodes, but which will no longer hold in the future. The new code that the property that it preserves the identity of region block (the property that the memory address of the BasicBlock containing the instructions remains the same; new blocks only contain PHI nodes and a terminator), especially the entry block. As a result, there is no need to update the reference to the BasicBlock of ScopStmt that contain its instructions because they have been moved to other basic blocks. Reviewers: grosser Part of Differential Revision: http://reviews.llvm.org/D11867 llvm-svn: 244606
2015-08-11 22:39:21 +08:00
EnteringBB = NewEntering;
}
Revise the simplification of regions The previous code had several problems: For newly created BasicBlocks it did not (always) call RegionInfo::setRegionFor in order to update its analysis. At the moment RegionInfo does not verify its BBMap, but will in the future. This is fixed by determining the region new BBs belong to and set it accordingly. The new executeScopConditionally() requires accurate getRegionFor information. Which block is created by SplitEdge depends on the incoming and outgoing edges of the blocks it connects, which makes handling its output more difficult than it needs to be. Especially for finding which block has been created an to assign a region to it for the setRegionFor problem above. This patch uses an implementation for splitEdge that always creates a block between the predecessor and successor. simplifyRegion has also been simplified by using SplitBlockPredecessors instead of SplitEdge. Isolating the entries and exits have been refectored into individual functions. Previously simplifyRegion did more than just ensuring that there is only one entering and one exiting edge. It ensured that the entering block had no other outgoing edge which was necessary for executeScopConditionally(). Now the latter uses the alternative splitEdge implementation which can handle this situation so simplifyRegion really only needs to simplify the region. Also, executeScopConditionally assumed that there can be no PHI nodes in blocks with one incoming edge. This is wrong and LCSSA deliberately produces such edges. However, previous passes ensured that there can be no such PHIs in exit nodes, but which will no longer hold in the future. The new code that the property that it preserves the identity of region block (the property that the memory address of the BasicBlock containing the instructions remains the same; new blocks only contain PHI nodes and a terminator), especially the entry block. As a result, there is no need to update the reference to the BasicBlock of ScopStmt that contain its instructions because they have been moved to other basic blocks. Reviewers: grosser Part of Differential Revision: http://reviews.llvm.org/D11867 llvm-svn: 244606
2015-08-11 22:39:21 +08:00
assert(R->getEnteringBlock() == EnteringBB);
Revise the simplification of regions The previous code had several problems: For newly created BasicBlocks it did not (always) call RegionInfo::setRegionFor in order to update its analysis. At the moment RegionInfo does not verify its BBMap, but will in the future. This is fixed by determining the region new BBs belong to and set it accordingly. The new executeScopConditionally() requires accurate getRegionFor information. Which block is created by SplitEdge depends on the incoming and outgoing edges of the blocks it connects, which makes handling its output more difficult than it needs to be. Especially for finding which block has been created an to assign a region to it for the setRegionFor problem above. This patch uses an implementation for splitEdge that always creates a block between the predecessor and successor. simplifyRegion has also been simplified by using SplitBlockPredecessors instead of SplitEdge. Isolating the entries and exits have been refectored into individual functions. Previously simplifyRegion did more than just ensuring that there is only one entering and one exiting edge. It ensured that the entering block had no other outgoing edge which was necessary for executeScopConditionally(). Now the latter uses the alternative splitEdge implementation which can handle this situation so simplifyRegion really only needs to simplify the region. Also, executeScopConditionally assumed that there can be no PHI nodes in blocks with one incoming edge. This is wrong and LCSSA deliberately produces such edges. However, previous passes ensured that there can be no such PHIs in exit nodes, but which will no longer hold in the future. The new code that the property that it preserves the identity of region block (the property that the memory address of the BasicBlock containing the instructions remains the same; new blocks only contain PHI nodes and a terminator), especially the entry block. As a result, there is no need to update the reference to the BasicBlock of ScopStmt that contain its instructions because they have been moved to other basic blocks. Reviewers: grosser Part of Differential Revision: http://reviews.llvm.org/D11867 llvm-svn: 244606
2015-08-11 22:39:21 +08:00
// After:
//
// \ / //
// EnteringBB //
// | //
// | //
// Entry <--\ //
// / \ / //
// .... //
}
Revise the simplification of regions The previous code had several problems: For newly created BasicBlocks it did not (always) call RegionInfo::setRegionFor in order to update its analysis. At the moment RegionInfo does not verify its BBMap, but will in the future. This is fixed by determining the region new BBs belong to and set it accordingly. The new executeScopConditionally() requires accurate getRegionFor information. Which block is created by SplitEdge depends on the incoming and outgoing edges of the blocks it connects, which makes handling its output more difficult than it needs to be. Especially for finding which block has been created an to assign a region to it for the setRegionFor problem above. This patch uses an implementation for splitEdge that always creates a block between the predecessor and successor. simplifyRegion has also been simplified by using SplitBlockPredecessors instead of SplitEdge. Isolating the entries and exits have been refectored into individual functions. Previously simplifyRegion did more than just ensuring that there is only one entering and one exiting edge. It ensured that the entering block had no other outgoing edge which was necessary for executeScopConditionally(). Now the latter uses the alternative splitEdge implementation which can handle this situation so simplifyRegion really only needs to simplify the region. Also, executeScopConditionally assumed that there can be no PHI nodes in blocks with one incoming edge. This is wrong and LCSSA deliberately produces such edges. However, previous passes ensured that there can be no such PHIs in exit nodes, but which will no longer hold in the future. The new code that the property that it preserves the identity of region block (the property that the memory address of the BasicBlock containing the instructions remains the same; new blocks only contain PHI nodes and a terminator), especially the entry block. As a result, there is no need to update the reference to the BasicBlock of ScopStmt that contain its instructions because they have been moved to other basic blocks. Reviewers: grosser Part of Differential Revision: http://reviews.llvm.org/D11867 llvm-svn: 244606
2015-08-11 22:39:21 +08:00
// Ensure that the region has a single block that branches to the exit node.
static void simplifyRegionExit(Region *R, DominatorTree *DT, LoopInfo *LI,
RegionInfo *RI) {
BasicBlock *ExitBB = R->getExit();
BasicBlock *ExitingBB = R->getExitingBlock();
// Before:
//
// (Region) ______/ //
// \ | / //
// ExitBB //
// / \ //
if (!ExitingBB) {
SmallVector<BasicBlock *, 4> Preds;
for (BasicBlock *P : predecessors(ExitBB))
if (R->contains(P))
Preds.push_back(P);
// Preds[0] Preds[1] otherBB //
// \ | ________/ //
// \ | / //
// BB //
ExitingBB =
SplitBlockPredecessors(ExitBB, Preds, ".region_exiting", DT, LI);
// Preds[0] Preds[1] otherBB //
// \ / / //
// BB.region_exiting / //
// \ / //
// BB //
if (RI)
RI->setRegionFor(ExitingBB, R);
// Change the exit of nested regions, but not the region itself,
R->replaceExitRecursive(ExitingBB);
R->replaceExit(ExitBB);
}
Revise the simplification of regions The previous code had several problems: For newly created BasicBlocks it did not (always) call RegionInfo::setRegionFor in order to update its analysis. At the moment RegionInfo does not verify its BBMap, but will in the future. This is fixed by determining the region new BBs belong to and set it accordingly. The new executeScopConditionally() requires accurate getRegionFor information. Which block is created by SplitEdge depends on the incoming and outgoing edges of the blocks it connects, which makes handling its output more difficult than it needs to be. Especially for finding which block has been created an to assign a region to it for the setRegionFor problem above. This patch uses an implementation for splitEdge that always creates a block between the predecessor and successor. simplifyRegion has also been simplified by using SplitBlockPredecessors instead of SplitEdge. Isolating the entries and exits have been refectored into individual functions. Previously simplifyRegion did more than just ensuring that there is only one entering and one exiting edge. It ensured that the entering block had no other outgoing edge which was necessary for executeScopConditionally(). Now the latter uses the alternative splitEdge implementation which can handle this situation so simplifyRegion really only needs to simplify the region. Also, executeScopConditionally assumed that there can be no PHI nodes in blocks with one incoming edge. This is wrong and LCSSA deliberately produces such edges. However, previous passes ensured that there can be no such PHIs in exit nodes, but which will no longer hold in the future. The new code that the property that it preserves the identity of region block (the property that the memory address of the BasicBlock containing the instructions remains the same; new blocks only contain PHI nodes and a terminator), especially the entry block. As a result, there is no need to update the reference to the BasicBlock of ScopStmt that contain its instructions because they have been moved to other basic blocks. Reviewers: grosser Part of Differential Revision: http://reviews.llvm.org/D11867 llvm-svn: 244606
2015-08-11 22:39:21 +08:00
assert(ExitingBB == R->getExitingBlock());
// After:
//
// \ / //
// ExitingBB _____/ //
// \ / //
// ExitBB //
// / \ //
}
void polly::simplifyRegion(Region *R, DominatorTree *DT, LoopInfo *LI,
RegionInfo *RI) {
assert(R && !R->isTopLevelRegion());
assert(!RI || RI == R->getRegionInfo());
assert((!RI || DT) &&
"RegionInfo requires DominatorTree to be updated as well");
Revise the simplification of regions The previous code had several problems: For newly created BasicBlocks it did not (always) call RegionInfo::setRegionFor in order to update its analysis. At the moment RegionInfo does not verify its BBMap, but will in the future. This is fixed by determining the region new BBs belong to and set it accordingly. The new executeScopConditionally() requires accurate getRegionFor information. Which block is created by SplitEdge depends on the incoming and outgoing edges of the blocks it connects, which makes handling its output more difficult than it needs to be. Especially for finding which block has been created an to assign a region to it for the setRegionFor problem above. This patch uses an implementation for splitEdge that always creates a block between the predecessor and successor. simplifyRegion has also been simplified by using SplitBlockPredecessors instead of SplitEdge. Isolating the entries and exits have been refectored into individual functions. Previously simplifyRegion did more than just ensuring that there is only one entering and one exiting edge. It ensured that the entering block had no other outgoing edge which was necessary for executeScopConditionally(). Now the latter uses the alternative splitEdge implementation which can handle this situation so simplifyRegion really only needs to simplify the region. Also, executeScopConditionally assumed that there can be no PHI nodes in blocks with one incoming edge. This is wrong and LCSSA deliberately produces such edges. However, previous passes ensured that there can be no such PHIs in exit nodes, but which will no longer hold in the future. The new code that the property that it preserves the identity of region block (the property that the memory address of the BasicBlock containing the instructions remains the same; new blocks only contain PHI nodes and a terminator), especially the entry block. As a result, there is no need to update the reference to the BasicBlock of ScopStmt that contain its instructions because they have been moved to other basic blocks. Reviewers: grosser Part of Differential Revision: http://reviews.llvm.org/D11867 llvm-svn: 244606
2015-08-11 22:39:21 +08:00
simplifyRegionEntry(R, DT, LI, RI);
simplifyRegionExit(R, DT, LI, RI);
assert(R->isSimple());
}
// Split the block into two successive blocks.
//
// Like llvm::SplitBlock, but also preserves RegionInfo
static BasicBlock *splitBlock(BasicBlock *Old, Instruction *SplitPt,
DominatorTree *DT, llvm::LoopInfo *LI,
RegionInfo *RI) {
assert(Old && SplitPt);
// Before:
//
// \ / //
// Old //
// / \ //
BasicBlock *NewBlock = llvm::SplitBlock(Old, SplitPt, DT, LI);
if (RI) {
Region *R = RI->getRegionFor(Old);
RI->setRegionFor(NewBlock, R);
}
// After:
//
// \ / //
// Old //
// | //
// NewBlock //
// / \ //
return NewBlock;
}
[Polly][PM][WIP] Polly pass registration Summary: This patch is a first attempt at registering Polly passes with the LLVM tools. Tool plugins are still unsupported, but this registration is usable from the tools if Polly is linked into them (albeit requiring minimal patches to those tools). Registration requires a small amount of machinery (the owning analysis proxies), necessary for injecting ScopAnalysisManager objects into the calling tools. This patch is marked WIP because the registration is incomplete. Parsing manual pipelines is fully supported, but default pass injection into the O3 pipeline is lacking, mostly because there is opportunity for some redesign here, I believe. The first point of order would be insertion points. I think it makes sense to run before the vectorizers. Running Polly Early, however, is weird. Mostly because it actually is the default (which to me is unexpected), and because Polly runs it's own O1 pipeline. Why not instead insert it at an appropriate place somewhere after simplification happend? Running after the loop optimizers seems intuitive, but it also seems wasteful, since multiple consecutive loops might well be a single scop, and we don't need to run for all of them. My second request for comments would be regarding all those smallish helper passes we have, like PollyViewer, PollyPrinter, PollyImportJScop. Right now these are controlled by command line options, deciding whether they should be part of the Polly pipeline. What is your opinion on treating them like real passes, and have the user write an appropriate pipeline if they want to use any of them? Reviewers: grosser, Meinersbur, bollu Reviewed By: grosser Subscribers: llvm-commits, pollydev Tags: #polly Differential Revision: https://reviews.llvm.org/D35458 llvm-svn: 309826
2017-08-02 23:52:25 +08:00
void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, DominatorTree *DT,
LoopInfo *LI, RegionInfo *RI) {
// Find first non-alloca instruction. Every basic block has a non-alloca
// instruction, as every well formed basic block has a terminator.
BasicBlock::iterator I = EntryBlock->begin();
2013-02-15 00:42:45 +08:00
while (isa<AllocaInst>(I))
++I;
[Polly][PM][WIP] Polly pass registration Summary: This patch is a first attempt at registering Polly passes with the LLVM tools. Tool plugins are still unsupported, but this registration is usable from the tools if Polly is linked into them (albeit requiring minimal patches to those tools). Registration requires a small amount of machinery (the owning analysis proxies), necessary for injecting ScopAnalysisManager objects into the calling tools. This patch is marked WIP because the registration is incomplete. Parsing manual pipelines is fully supported, but default pass injection into the O3 pipeline is lacking, mostly because there is opportunity for some redesign here, I believe. The first point of order would be insertion points. I think it makes sense to run before the vectorizers. Running Polly Early, however, is weird. Mostly because it actually is the default (which to me is unexpected), and because Polly runs it's own O1 pipeline. Why not instead insert it at an appropriate place somewhere after simplification happend? Running after the loop optimizers seems intuitive, but it also seems wasteful, since multiple consecutive loops might well be a single scop, and we don't need to run for all of them. My second request for comments would be regarding all those smallish helper passes we have, like PollyViewer, PollyPrinter, PollyImportJScop. Right now these are controlled by command line options, deciding whether they should be part of the Polly pipeline. What is your opinion on treating them like real passes, and have the user write an appropriate pipeline if they want to use any of them? Reviewers: grosser, Meinersbur, bollu Reviewed By: grosser Subscribers: llvm-commits, pollydev Tags: #polly Differential Revision: https://reviews.llvm.org/D35458 llvm-svn: 309826
2017-08-02 23:52:25 +08:00
// splitBlock updates DT, LI and RI.
splitBlock(EntryBlock, &*I, DT, LI, RI);
}
void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, Pass *P) {
auto *DTWP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>();
auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
auto *LIWP = P->getAnalysisIfAvailable<LoopInfoWrapperPass>();
auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
RegionInfoPass *RIP = P->getAnalysisIfAvailable<RegionInfoPass>();
RegionInfo *RI = RIP ? &RIP->getRegionInfo() : nullptr;
// splitBlock updates DT, LI and RI.
[Polly][PM][WIP] Polly pass registration Summary: This patch is a first attempt at registering Polly passes with the LLVM tools. Tool plugins are still unsupported, but this registration is usable from the tools if Polly is linked into them (albeit requiring minimal patches to those tools). Registration requires a small amount of machinery (the owning analysis proxies), necessary for injecting ScopAnalysisManager objects into the calling tools. This patch is marked WIP because the registration is incomplete. Parsing manual pipelines is fully supported, but default pass injection into the O3 pipeline is lacking, mostly because there is opportunity for some redesign here, I believe. The first point of order would be insertion points. I think it makes sense to run before the vectorizers. Running Polly Early, however, is weird. Mostly because it actually is the default (which to me is unexpected), and because Polly runs it's own O1 pipeline. Why not instead insert it at an appropriate place somewhere after simplification happend? Running after the loop optimizers seems intuitive, but it also seems wasteful, since multiple consecutive loops might well be a single scop, and we don't need to run for all of them. My second request for comments would be regarding all those smallish helper passes we have, like PollyViewer, PollyPrinter, PollyImportJScop. Right now these are controlled by command line options, deciding whether they should be part of the Polly pipeline. What is your opinion on treating them like real passes, and have the user write an appropriate pipeline if they want to use any of them? Reviewers: grosser, Meinersbur, bollu Reviewed By: grosser Subscribers: llvm-commits, pollydev Tags: #polly Differential Revision: https://reviews.llvm.org/D35458 llvm-svn: 309826
2017-08-02 23:52:25 +08:00
polly::splitEntryBlockForAlloca(EntryBlock, DT, LI, RI);
}
/// The SCEVExpander will __not__ generate any code for an existing SDiv/SRem
/// instruction but just use it, if it is referenced as a SCEVUnknown. We want
/// however to generate new code if the instruction is in the analyzed region
/// and we generate code outside/in front of that region. Hence, we generate the
/// code for the SDiv/SRem operands in front of the analyzed region and then
/// create a new SDiv/SRem operation there too.
struct ScopExpander : SCEVVisitor<ScopExpander, const SCEV *> {
friend struct SCEVVisitor<ScopExpander, const SCEV *>;
explicit ScopExpander(const Region &R, ScalarEvolution &SE,
const DataLayout &DL, const char *Name, ValueMapT *VMap,
BasicBlock *RTCBB)
: Expander(SCEVExpander(SE, DL, Name)), SE(SE), Name(Name), R(R),
VMap(VMap), RTCBB(RTCBB) {}
Value *expandCodeFor(const SCEV *E, Type *Ty, Instruction *I) {
// If we generate code in the region we will immediately fall back to the
// SCEVExpander, otherwise we will stop at all unknowns in the SCEV and if
// needed replace them by copies computed in the entering block.
if (!R.contains(I))
E = visit(E);
return Expander.expandCodeFor(E, Ty, I);
}
const SCEV *visit(const SCEV *E) {
// Cache the expansion results for intermediate SCEV expressions. A SCEV
// expression can refer to an operand multiple times (e.g. "x*x), so
// a naive visitor takes exponential time.
if (SCEVCache.count(E))
return SCEVCache[E];
const SCEV *Result = SCEVVisitor::visit(E);
SCEVCache[E] = Result;
return Result;
}
private:
SCEVExpander Expander;
ScalarEvolution &SE;
const char *Name;
const Region &R;
ValueMapT *VMap;
BasicBlock *RTCBB;
DenseMap<const SCEV *, const SCEV *> SCEVCache;
const SCEV *visitGenericInst(const SCEVUnknown *E, Instruction *Inst,
Instruction *IP) {
if (!Inst || !R.contains(Inst))
return E;
assert(!Inst->mayThrow() && !Inst->mayReadOrWriteMemory() &&
!isa<PHINode>(Inst));
auto *InstClone = Inst->clone();
for (auto &Op : Inst->operands()) {
assert(SE.isSCEVable(Op->getType()));
auto *OpSCEV = SE.getSCEV(Op);
auto *OpClone = expandCodeFor(OpSCEV, Op->getType(), IP);
InstClone->replaceUsesOfWith(Op, OpClone);
}
InstClone->setName(Name + Inst->getName());
InstClone->insertBefore(IP);
return SE.getSCEV(InstClone);
}
const SCEV *visitUnknown(const SCEVUnknown *E) {
// If a value mapping was given try if the underlying value is remapped.
Value *NewVal = VMap ? VMap->lookup(E->getValue()) : nullptr;
if (NewVal) {
auto *NewE = SE.getSCEV(NewVal);
// While the mapped value might be different the SCEV representation might
// not be. To this end we will check before we go into recursion here.
if (E != NewE)
return visit(NewE);
}
Instruction *Inst = dyn_cast<Instruction>(E->getValue());
Instruction *IP;
if (Inst && !R.contains(Inst))
IP = Inst;
else if (Inst && RTCBB->getParent() == Inst->getFunction())
IP = RTCBB->getTerminator();
else
IP = RTCBB->getParent()->getEntryBlock().getTerminator();
if (!Inst || (Inst->getOpcode() != Instruction::SRem &&
Inst->getOpcode() != Instruction::SDiv))
return visitGenericInst(E, Inst, IP);
const SCEV *LHSScev = SE.getSCEV(Inst->getOperand(0));
const SCEV *RHSScev = SE.getSCEV(Inst->getOperand(1));
if (!SE.isKnownNonZero(RHSScev))
RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
Value *LHS = expandCodeFor(LHSScev, E->getType(), IP);
Value *RHS = expandCodeFor(RHSScev, E->getType(), IP);
Inst = BinaryOperator::Create((Instruction::BinaryOps)Inst->getOpcode(),
LHS, RHS, Inst->getName() + Name, IP);
return SE.getSCEV(Inst);
}
/// The following functions will just traverse the SCEV and rebuild it with
/// the new operands returned by the traversal.
///
///{
const SCEV *visitConstant(const SCEVConstant *E) { return E; }
const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) {
return SE.getTruncateExpr(visit(E->getOperand()), E->getType());
}
const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) {
return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType());
}
const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) {
return SE.getSignExtendExpr(visit(E->getOperand()), E->getType());
}
const SCEV *visitUDivExpr(const SCEVUDivExpr *E) {
auto *RHSScev = visit(E->getRHS());
2016-06-06 20:09:30 +08:00
if (!SE.isKnownNonZero(RHSScev))
RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
return SE.getUDivExpr(visit(E->getLHS()), RHSScev);
}
const SCEV *visitAddExpr(const SCEVAddExpr *E) {
SmallVector<const SCEV *, 4> NewOps;
for (const SCEV *Op : E->operands())
NewOps.push_back(visit(Op));
return SE.getAddExpr(NewOps);
}
const SCEV *visitMulExpr(const SCEVMulExpr *E) {
SmallVector<const SCEV *, 4> NewOps;
for (const SCEV *Op : E->operands())
NewOps.push_back(visit(Op));
return SE.getMulExpr(NewOps);
}
const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) {
SmallVector<const SCEV *, 4> NewOps;
for (const SCEV *Op : E->operands())
NewOps.push_back(visit(Op));
return SE.getUMaxExpr(NewOps);
}
const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) {
SmallVector<const SCEV *, 4> NewOps;
for (const SCEV *Op : E->operands())
NewOps.push_back(visit(Op));
return SE.getSMaxExpr(NewOps);
}
const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
SmallVector<const SCEV *, 4> NewOps;
for (const SCEV *Op : E->operands())
NewOps.push_back(visit(Op));
return SE.getAddRecExpr(NewOps, E->getLoop(), E->getNoWrapFlags());
}
///}
};
Allow invariant loads in the SCoP description This patch allows invariant loads to be used in the SCoP description, e.g., as loop bounds, conditions or in memory access functions. First we collect "required invariant loads" during SCoP detection that would otherwise make an expression we care about non-affine. To this end a new level of abstraction was introduced before SCEVValidator::isAffineExpr() namely ScopDetection::isAffine() and ScopDetection::onlyValidRequiredInvariantLoads(). Here we can decide if we want a load inside the region to be optimistically assumed invariant or not. If we do, it will be marked as required and in the SCoP generation we bail if it is actually not invariant. If we don't it will be a non-affine expression as before. At the moment we optimistically assume all "hoistable" (namely non-loop-carried) loads to be invariant. This causes us to expand some SCoPs and dismiss them later but it also allows us to detect a lot we would dismiss directly if we would ask e.g., AliasAnalysis::canBasicBlockModify(). We also allow potential aliases between optimistically assumed invariant loads and other pointers as our runtime alias checks are sound in case the loads are actually invariant. Together with the invariant checks this combination allows to handle a lot more than LICM can. The code generation of the invariant loads had to be extended as we can now have dependences between parameters and invariant (hoisted) loads as well as the other way around, e.g., test/Isl/CodeGen/invariant_load_parameters_cyclic_dependence.ll First, it is important to note that we cannot have real cycles but only dependences from a hoisted load to a parameter and from another parameter to that hoisted load (and so on). To handle such cases we materialize llvm::Values for parameters that are referred by a hoisted load on demand and then materialize the remaining parameters. Second, there are new kinds of dependences between hoisted loads caused by the constraints on their execution. If a hoisted load is conditionally executed it might depend on the value of another hoisted load. To deal with such situations we sort them already in the ScopInfo such that they can be generated in the order they are listed in the Scop::InvariantAccesses list (see compareInvariantAccesses). The dependences between hoisted loads caused by indirect accesses are handled the same way as before. llvm-svn: 249607
2015-10-08 04:17:36 +08:00
Value *polly::expandCodeFor(Scop &S, ScalarEvolution &SE, const DataLayout &DL,
const char *Name, const SCEV *E, Type *Ty,
Instruction *IP, ValueMapT *VMap,
BasicBlock *RTCBB) {
ScopExpander Expander(S.getRegion(), SE, DL, Name, VMap, RTCBB);
return Expander.expandCodeFor(E, Ty, IP);
}
bool polly::isErrorBlock(BasicBlock &BB, const Region &R, LoopInfo &LI,
const DominatorTree &DT) {
if (!PollyAllowErrorBlocks)
return false;
if (isa<UnreachableInst>(BB.getTerminator()))
return true;
if (LI.isLoopHeader(&BB))
return false;
// Basic blocks that are always executed are not considered error blocks,
// as their execution can not be a rare event.
bool DominatesAllPredecessors = true;
if (R.isTopLevelRegion()) {
for (BasicBlock &I : *R.getEntry()->getParent())
if (isa<ReturnInst>(I.getTerminator()) && !DT.dominates(&BB, &I))
DominatesAllPredecessors = false;
} else {
for (auto Pred : predecessors(R.getExit()))
if (R.contains(Pred) && !DT.dominates(&BB, Pred))
DominatesAllPredecessors = false;
}
if (DominatesAllPredecessors)
return false;
for (Instruction &Inst : BB)
if (CallInst *CI = dyn_cast<CallInst>(&Inst)) {
if (isDebugCall(CI))
continue;
if (isIgnoredIntrinsic(CI))
continue;
// memset, memcpy and memmove are modeled intrinsics.
if (isa<MemSetInst>(CI) || isa<MemTransferInst>(CI))
continue;
if (!CI->doesNotAccessMemory())
return true;
if (CI->doesNotReturn())
return true;
}
return false;
}
Value *polly::getConditionFromTerminator(TerminatorInst *TI) {
if (BranchInst *BR = dyn_cast<BranchInst>(TI)) {
if (BR->isUnconditional())
return ConstantInt::getTrue(Type::getInt1Ty(TI->getContext()));
return BR->getCondition();
}
if (SwitchInst *SI = dyn_cast<SwitchInst>(TI))
return SI->getCondition();
return nullptr;
}
Allow invariant loads in the SCoP description This patch allows invariant loads to be used in the SCoP description, e.g., as loop bounds, conditions or in memory access functions. First we collect "required invariant loads" during SCoP detection that would otherwise make an expression we care about non-affine. To this end a new level of abstraction was introduced before SCEVValidator::isAffineExpr() namely ScopDetection::isAffine() and ScopDetection::onlyValidRequiredInvariantLoads(). Here we can decide if we want a load inside the region to be optimistically assumed invariant or not. If we do, it will be marked as required and in the SCoP generation we bail if it is actually not invariant. If we don't it will be a non-affine expression as before. At the moment we optimistically assume all "hoistable" (namely non-loop-carried) loads to be invariant. This causes us to expand some SCoPs and dismiss them later but it also allows us to detect a lot we would dismiss directly if we would ask e.g., AliasAnalysis::canBasicBlockModify(). We also allow potential aliases between optimistically assumed invariant loads and other pointers as our runtime alias checks are sound in case the loads are actually invariant. Together with the invariant checks this combination allows to handle a lot more than LICM can. The code generation of the invariant loads had to be extended as we can now have dependences between parameters and invariant (hoisted) loads as well as the other way around, e.g., test/Isl/CodeGen/invariant_load_parameters_cyclic_dependence.ll First, it is important to note that we cannot have real cycles but only dependences from a hoisted load to a parameter and from another parameter to that hoisted load (and so on). To handle such cases we materialize llvm::Values for parameters that are referred by a hoisted load on demand and then materialize the remaining parameters. Second, there are new kinds of dependences between hoisted loads caused by the constraints on their execution. If a hoisted load is conditionally executed it might depend on the value of another hoisted load. To deal with such situations we sort them already in the ScopInfo such that they can be generated in the order they are listed in the Scop::InvariantAccesses list (see compareInvariantAccesses). The dependences between hoisted loads caused by indirect accesses are handled the same way as before. llvm-svn: 249607
2015-10-08 04:17:36 +08:00
static bool hasVariantIndex(GetElementPtrInst *Gep, Loop *L, Region &R,
ScalarEvolution &SE) {
for (const Use &Val : llvm::drop_begin(Gep->operands(), 1)) {
const SCEV *PtrSCEV = SE.getSCEVAtScope(Val, L);
Loop *OuterLoop = R.outermostLoopInRegion(L);
if (!SE.isLoopInvariant(PtrSCEV, OuterLoop))
return true;
}
return false;
}
Allow invariant loads in the SCoP description This patch allows invariant loads to be used in the SCoP description, e.g., as loop bounds, conditions or in memory access functions. First we collect "required invariant loads" during SCoP detection that would otherwise make an expression we care about non-affine. To this end a new level of abstraction was introduced before SCEVValidator::isAffineExpr() namely ScopDetection::isAffine() and ScopDetection::onlyValidRequiredInvariantLoads(). Here we can decide if we want a load inside the region to be optimistically assumed invariant or not. If we do, it will be marked as required and in the SCoP generation we bail if it is actually not invariant. If we don't it will be a non-affine expression as before. At the moment we optimistically assume all "hoistable" (namely non-loop-carried) loads to be invariant. This causes us to expand some SCoPs and dismiss them later but it also allows us to detect a lot we would dismiss directly if we would ask e.g., AliasAnalysis::canBasicBlockModify(). We also allow potential aliases between optimistically assumed invariant loads and other pointers as our runtime alias checks are sound in case the loads are actually invariant. Together with the invariant checks this combination allows to handle a lot more than LICM can. The code generation of the invariant loads had to be extended as we can now have dependences between parameters and invariant (hoisted) loads as well as the other way around, e.g., test/Isl/CodeGen/invariant_load_parameters_cyclic_dependence.ll First, it is important to note that we cannot have real cycles but only dependences from a hoisted load to a parameter and from another parameter to that hoisted load (and so on). To handle such cases we materialize llvm::Values for parameters that are referred by a hoisted load on demand and then materialize the remaining parameters. Second, there are new kinds of dependences between hoisted loads caused by the constraints on their execution. If a hoisted load is conditionally executed it might depend on the value of another hoisted load. To deal with such situations we sort them already in the ScopInfo such that they can be generated in the order they are listed in the Scop::InvariantAccesses list (see compareInvariantAccesses). The dependences between hoisted loads caused by indirect accesses are handled the same way as before. llvm-svn: 249607
2015-10-08 04:17:36 +08:00
bool polly::isHoistableLoad(LoadInst *LInst, Region &R, LoopInfo &LI,
ScalarEvolution &SE, const DominatorTree &DT,
const InvariantLoadsSetTy &KnownInvariantLoads) {
Allow invariant loads in the SCoP description This patch allows invariant loads to be used in the SCoP description, e.g., as loop bounds, conditions or in memory access functions. First we collect "required invariant loads" during SCoP detection that would otherwise make an expression we care about non-affine. To this end a new level of abstraction was introduced before SCEVValidator::isAffineExpr() namely ScopDetection::isAffine() and ScopDetection::onlyValidRequiredInvariantLoads(). Here we can decide if we want a load inside the region to be optimistically assumed invariant or not. If we do, it will be marked as required and in the SCoP generation we bail if it is actually not invariant. If we don't it will be a non-affine expression as before. At the moment we optimistically assume all "hoistable" (namely non-loop-carried) loads to be invariant. This causes us to expand some SCoPs and dismiss them later but it also allows us to detect a lot we would dismiss directly if we would ask e.g., AliasAnalysis::canBasicBlockModify(). We also allow potential aliases between optimistically assumed invariant loads and other pointers as our runtime alias checks are sound in case the loads are actually invariant. Together with the invariant checks this combination allows to handle a lot more than LICM can. The code generation of the invariant loads had to be extended as we can now have dependences between parameters and invariant (hoisted) loads as well as the other way around, e.g., test/Isl/CodeGen/invariant_load_parameters_cyclic_dependence.ll First, it is important to note that we cannot have real cycles but only dependences from a hoisted load to a parameter and from another parameter to that hoisted load (and so on). To handle such cases we materialize llvm::Values for parameters that are referred by a hoisted load on demand and then materialize the remaining parameters. Second, there are new kinds of dependences between hoisted loads caused by the constraints on their execution. If a hoisted load is conditionally executed it might depend on the value of another hoisted load. To deal with such situations we sort them already in the ScopInfo such that they can be generated in the order they are listed in the Scop::InvariantAccesses list (see compareInvariantAccesses). The dependences between hoisted loads caused by indirect accesses are handled the same way as before. llvm-svn: 249607
2015-10-08 04:17:36 +08:00
Loop *L = LI.getLoopFor(LInst->getParent());
auto *Ptr = LInst->getPointerOperand();
// A LoadInst is hoistable if the address it is loading from is also
// invariant; in this case: another invariant load (whether that address
// is also not written to has to be checked separately)
// TODO: This only checks for a LoadInst->GetElementPtrInst->LoadInst
// pattern generated by the Chapel frontend, but generally this applies
// for any chain of instruction that does not also depend on any
// induction variable
if (auto *GepInst = dyn_cast<GetElementPtrInst>(Ptr)) {
if (!hasVariantIndex(GepInst, L, R, SE)) {
if (auto *DecidingLoad =
dyn_cast<LoadInst>(GepInst->getPointerOperand())) {
if (KnownInvariantLoads.count(DecidingLoad))
return true;
}
}
}
const SCEV *PtrSCEV = SE.getSCEVAtScope(Ptr, L);
Allow invariant loads in the SCoP description This patch allows invariant loads to be used in the SCoP description, e.g., as loop bounds, conditions or in memory access functions. First we collect "required invariant loads" during SCoP detection that would otherwise make an expression we care about non-affine. To this end a new level of abstraction was introduced before SCEVValidator::isAffineExpr() namely ScopDetection::isAffine() and ScopDetection::onlyValidRequiredInvariantLoads(). Here we can decide if we want a load inside the region to be optimistically assumed invariant or not. If we do, it will be marked as required and in the SCoP generation we bail if it is actually not invariant. If we don't it will be a non-affine expression as before. At the moment we optimistically assume all "hoistable" (namely non-loop-carried) loads to be invariant. This causes us to expand some SCoPs and dismiss them later but it also allows us to detect a lot we would dismiss directly if we would ask e.g., AliasAnalysis::canBasicBlockModify(). We also allow potential aliases between optimistically assumed invariant loads and other pointers as our runtime alias checks are sound in case the loads are actually invariant. Together with the invariant checks this combination allows to handle a lot more than LICM can. The code generation of the invariant loads had to be extended as we can now have dependences between parameters and invariant (hoisted) loads as well as the other way around, e.g., test/Isl/CodeGen/invariant_load_parameters_cyclic_dependence.ll First, it is important to note that we cannot have real cycles but only dependences from a hoisted load to a parameter and from another parameter to that hoisted load (and so on). To handle such cases we materialize llvm::Values for parameters that are referred by a hoisted load on demand and then materialize the remaining parameters. Second, there are new kinds of dependences between hoisted loads caused by the constraints on their execution. If a hoisted load is conditionally executed it might depend on the value of another hoisted load. To deal with such situations we sort them already in the ScopInfo such that they can be generated in the order they are listed in the Scop::InvariantAccesses list (see compareInvariantAccesses). The dependences between hoisted loads caused by indirect accesses are handled the same way as before. llvm-svn: 249607
2015-10-08 04:17:36 +08:00
while (L && R.contains(L)) {
if (!SE.isLoopInvariant(PtrSCEV, L))
return false;
L = L->getParentLoop();
}
for (auto *User : Ptr->users()) {
auto *UserI = dyn_cast<Instruction>(User);
if (!UserI || !R.contains(UserI))
continue;
if (!UserI->mayWriteToMemory())
continue;
auto &BB = *UserI->getParent();
if (DT.dominates(&BB, LInst->getParent()))
return false;
bool DominatesAllPredecessors = true;
if (R.isTopLevelRegion()) {
for (BasicBlock &I : *R.getEntry()->getParent())
if (isa<ReturnInst>(I.getTerminator()) && !DT.dominates(&BB, &I))
DominatesAllPredecessors = false;
} else {
for (auto Pred : predecessors(R.getExit()))
if (R.contains(Pred) && !DT.dominates(&BB, Pred))
DominatesAllPredecessors = false;
}
if (!DominatesAllPredecessors)
continue;
return false;
}
Allow invariant loads in the SCoP description This patch allows invariant loads to be used in the SCoP description, e.g., as loop bounds, conditions or in memory access functions. First we collect "required invariant loads" during SCoP detection that would otherwise make an expression we care about non-affine. To this end a new level of abstraction was introduced before SCEVValidator::isAffineExpr() namely ScopDetection::isAffine() and ScopDetection::onlyValidRequiredInvariantLoads(). Here we can decide if we want a load inside the region to be optimistically assumed invariant or not. If we do, it will be marked as required and in the SCoP generation we bail if it is actually not invariant. If we don't it will be a non-affine expression as before. At the moment we optimistically assume all "hoistable" (namely non-loop-carried) loads to be invariant. This causes us to expand some SCoPs and dismiss them later but it also allows us to detect a lot we would dismiss directly if we would ask e.g., AliasAnalysis::canBasicBlockModify(). We also allow potential aliases between optimistically assumed invariant loads and other pointers as our runtime alias checks are sound in case the loads are actually invariant. Together with the invariant checks this combination allows to handle a lot more than LICM can. The code generation of the invariant loads had to be extended as we can now have dependences between parameters and invariant (hoisted) loads as well as the other way around, e.g., test/Isl/CodeGen/invariant_load_parameters_cyclic_dependence.ll First, it is important to note that we cannot have real cycles but only dependences from a hoisted load to a parameter and from another parameter to that hoisted load (and so on). To handle such cases we materialize llvm::Values for parameters that are referred by a hoisted load on demand and then materialize the remaining parameters. Second, there are new kinds of dependences between hoisted loads caused by the constraints on their execution. If a hoisted load is conditionally executed it might depend on the value of another hoisted load. To deal with such situations we sort them already in the ScopInfo such that they can be generated in the order they are listed in the Scop::InvariantAccesses list (see compareInvariantAccesses). The dependences between hoisted loads caused by indirect accesses are handled the same way as before. llvm-svn: 249607
2015-10-08 04:17:36 +08:00
return true;
}
bool polly::isIgnoredIntrinsic(const Value *V) {
if (auto *IT = dyn_cast<IntrinsicInst>(V)) {
switch (IT->getIntrinsicID()) {
// Lifetime markers are supported/ignored.
case llvm::Intrinsic::lifetime_start:
case llvm::Intrinsic::lifetime_end:
// Invariant markers are supported/ignored.
case llvm::Intrinsic::invariant_start:
case llvm::Intrinsic::invariant_end:
// Some misc annotations are supported/ignored.
case llvm::Intrinsic::var_annotation:
case llvm::Intrinsic::ptr_annotation:
case llvm::Intrinsic::annotation:
case llvm::Intrinsic::donothing:
case llvm::Intrinsic::assume:
// Some debug info intrinsics are supported/ignored.
case llvm::Intrinsic::dbg_value:
case llvm::Intrinsic::dbg_declare:
return true;
default:
break;
}
}
return false;
}
bool polly::canSynthesize(const Value *V, const Scop &S, ScalarEvolution *SE,
Loop *Scope) {
if (!V || !SE->isSCEVable(V->getType()))
return false;
const InvariantLoadsSetTy &ILS = S.getRequiredInvariantLoads();
if (const SCEV *Scev = SE->getSCEVAtScope(const_cast<Value *>(V), Scope))
if (!isa<SCEVCouldNotCompute>(Scev))
if (!hasScalarDepsInsideRegion(Scev, &S.getRegion(), Scope, false, ILS))
return true;
return false;
}
llvm::BasicBlock *polly::getUseBlock(const llvm::Use &U) {
Instruction *UI = dyn_cast<Instruction>(U.getUser());
if (!UI)
return nullptr;
if (PHINode *PHI = dyn_cast<PHINode>(UI))
return PHI->getIncomingBlock(U);
return UI->getParent();
}
std::tuple<std::vector<const SCEV *>, std::vector<int>>
polly::getIndexExpressionsFromGEP(GetElementPtrInst *GEP, ScalarEvolution &SE) {
std::vector<const SCEV *> Subscripts;
std::vector<int> Sizes;
Type *Ty = GEP->getPointerOperandType();
bool DroppedFirstDim = false;
for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
const SCEV *Expr = SE.getSCEV(GEP->getOperand(i));
if (i == 1) {
if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
Ty = PtrTy->getElementType();
} else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) {
Ty = ArrayTy->getElementType();
} else {
Subscripts.clear();
Sizes.clear();
break;
}
if (auto *Const = dyn_cast<SCEVConstant>(Expr))
if (Const->getValue()->isZero()) {
DroppedFirstDim = true;
continue;
}
Subscripts.push_back(Expr);
continue;
}
auto *ArrayTy = dyn_cast<ArrayType>(Ty);
if (!ArrayTy) {
Subscripts.clear();
Sizes.clear();
break;
}
Subscripts.push_back(Expr);
if (!(DroppedFirstDim && i == 2))
Sizes.push_back(ArrayTy->getNumElements());
Ty = ArrayTy->getElementType();
}
return std::make_tuple(Subscripts, Sizes);
}
llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI,
const BoxedLoopsSetTy &BoxedLoops) {
while (BoxedLoops.count(L))
L = L->getParentLoop();
return L;
}
llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::BasicBlock *BB,
llvm::LoopInfo &LI,
const BoxedLoopsSetTy &BoxedLoops) {
Loop *L = LI.getLoopFor(BB);
return getFirstNonBoxedLoopFor(L, LI, BoxedLoops);
}
bool polly::isDebugCall(Instruction *Inst) {
auto *CI = dyn_cast<CallInst>(Inst);
if (!CI)
return false;
Function *CF = CI->getCalledFunction();
if (!CF)
return false;
return std::find(DebugFunctions.begin(), DebugFunctions.end(),
CF->getName()) != DebugFunctions.end();
}
static bool hasDebugCall(BasicBlock *BB) {
for (Instruction &Inst : *BB) {
if (isDebugCall(&Inst))
return true;
}
return false;
}
bool polly::hasDebugCall(ScopStmt *Stmt) {
// Quick skip if no debug functions have been defined.
if (DebugFunctions.empty())
return false;
if (!Stmt)
return false;
for (Instruction *Inst : Stmt->getInstructions())
if (isDebugCall(Inst))
return true;
if (Stmt->isRegionStmt()) {
for (BasicBlock *RBB : Stmt->getRegion()->blocks())
if (RBB != Stmt->getEntryBlock() && ::hasDebugCall(RBB))
return true;
}
return false;
}