forked from OSchip/llvm-project
LoopFusion: Creates private MemRefs which are used only by operations in the fused loop.
*) Enables reduction of private memref size based on MemRef region accessed by fused slice. *) Enables maximal fusion by creating a private memref to break a fusion-preventing dependence. *) Adds maximal fusion flag to enable fusing as much as possible (though it still fuses the minimum cost computation slice). PiperOrigin-RevId: 229936698
This commit is contained in:
parent
24e5a72dac
commit
c4237ae990
|
@ -32,6 +32,7 @@
|
|||
#include "mlir/StandardOps/StandardOps.h"
|
||||
#include "mlir/Transforms/LoopUtils.h"
|
||||
#include "mlir/Transforms/Passes.h"
|
||||
#include "mlir/Transforms/Utils.h"
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/ADT/DenseSet.h"
|
||||
#include "llvm/ADT/SetVector.h"
|
||||
|
@ -45,6 +46,10 @@ using llvm::SetVector;
|
|||
|
||||
using namespace mlir;
|
||||
|
||||
static llvm::cl::opt<bool>
|
||||
clMaximalLoopFusion("fusion-maximal", llvm::cl::Hidden,
|
||||
llvm::cl::desc("Enables maximal loop fusion."));
|
||||
|
||||
namespace {
|
||||
|
||||
/// Loop fusion pass. This pass currently supports a greedy fusion policy,
|
||||
|
@ -95,6 +100,7 @@ public:
|
|||
// MemRefDependenceGraph is a graph data structure where graph nodes are
|
||||
// top-level instructions in a Function which contain load/store ops, and edges
|
||||
// are memref dependences between the nodes.
|
||||
// TODO(andydavis) Add a more flexible dependece graph representation.
|
||||
// TODO(andydavis) Add a depth parameter to dependence graph construction.
|
||||
struct MemRefDependenceGraph {
|
||||
public:
|
||||
|
@ -147,6 +153,9 @@ public:
|
|||
DenseMap<unsigned, SmallVector<Edge, 2>> inEdges;
|
||||
// Map from node id to list of output edges.
|
||||
DenseMap<unsigned, SmallVector<Edge, 2>> outEdges;
|
||||
// Map from memref to a count on the dependence edges associated with that
|
||||
// memref.
|
||||
DenseMap<Value *, unsigned> memrefEdgeCount;
|
||||
|
||||
MemRefDependenceGraph() {}
|
||||
|
||||
|
@ -161,6 +170,32 @@ public:
|
|||
return &it->second;
|
||||
}
|
||||
|
||||
// Remove node 'id' (and its associated edges) from graph.
|
||||
void removeNode(unsigned id) {
|
||||
// Remove each edge in 'inEdges[id]'.
|
||||
if (inEdges.count(id) > 0) {
|
||||
SmallVector<Edge, 2> oldInEdges = inEdges[id];
|
||||
for (auto &inEdge : oldInEdges) {
|
||||
removeEdge(inEdge.id, id, inEdge.memref);
|
||||
}
|
||||
}
|
||||
// Remove each edge in 'outEdges[id]'.
|
||||
if (outEdges.count(id) > 0) {
|
||||
SmallVector<Edge, 2> oldOutEdges = outEdges[id];
|
||||
for (auto &outEdge : oldOutEdges) {
|
||||
removeEdge(id, outEdge.id, outEdge.memref);
|
||||
}
|
||||
}
|
||||
// Erase remaining node state.
|
||||
inEdges.erase(id);
|
||||
outEdges.erase(id);
|
||||
nodes.erase(id);
|
||||
}
|
||||
|
||||
bool hasOutEdges(unsigned id) {
|
||||
return outEdges.count(id) > 0 && !outEdges[id].empty();
|
||||
}
|
||||
|
||||
// Returns true iff there is an edge from node 'srcId' to node 'dstId' for
|
||||
// 'memref'. Returns false otherwise.
|
||||
bool hasEdge(unsigned srcId, unsigned dstId, Value *memref) {
|
||||
|
@ -181,6 +216,7 @@ public:
|
|||
if (!hasEdge(srcId, dstId, memref)) {
|
||||
outEdges[srcId].push_back({dstId, memref});
|
||||
inEdges[dstId].push_back({srcId, memref});
|
||||
memrefEdgeCount[memref]++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -188,6 +224,8 @@ public:
|
|||
void removeEdge(unsigned srcId, unsigned dstId, Value *memref) {
|
||||
assert(inEdges.count(dstId) > 0);
|
||||
assert(outEdges.count(srcId) > 0);
|
||||
assert(memrefEdgeCount.count(memref) > 0);
|
||||
memrefEdgeCount[memref]--;
|
||||
// Remove 'srcId' from 'inEdges[dstId]'.
|
||||
for (auto it = inEdges[dstId].begin(); it != inEdges[dstId].end(); ++it) {
|
||||
if ((*it).id == srcId && (*it).memref == memref) {
|
||||
|
@ -224,43 +262,36 @@ public:
|
|||
return outEdgeCount;
|
||||
}
|
||||
|
||||
// Returns the min node id of all output edges from node 'id'.
|
||||
unsigned getMinOutEdgeNodeId(unsigned id) {
|
||||
// Returns the min node id across all outgoing edges from node 'id', skipping
|
||||
// edges with 'memrefToSkip'.
|
||||
unsigned getMinOutEdgeNodeId(unsigned id, Value *memrefToSkip) {
|
||||
unsigned minId = std::numeric_limits<unsigned>::max();
|
||||
if (outEdges.count(id) > 0)
|
||||
for (auto &outEdge : outEdges[id])
|
||||
minId = std::min(minId, outEdge.id);
|
||||
if (outEdge.memref != memrefToSkip)
|
||||
minId = std::min(minId, outEdge.id);
|
||||
return minId;
|
||||
}
|
||||
|
||||
// Updates edge mappings from node 'srcId' to node 'dstId' and removes
|
||||
// state associated with node 'srcId'.
|
||||
void updateEdgesAndRemoveSrcNode(unsigned srcId, unsigned dstId) {
|
||||
// Updates edge mappings from node 'srcId' to node 'dstId'.
|
||||
void updateEdges(unsigned srcId, unsigned dstId) {
|
||||
// For each edge in 'inEdges[srcId]': add new edge remaping to 'dstId'.
|
||||
if (inEdges.count(srcId) > 0) {
|
||||
SmallVector<Edge, 2> oldInEdges = inEdges[srcId];
|
||||
for (auto &inEdge : oldInEdges) {
|
||||
// Remove edge from 'inEdge.id' to 'srcId'.
|
||||
removeEdge(inEdge.id, srcId, inEdge.memref);
|
||||
// Add edge from 'inEdge.id' to 'dstId'.
|
||||
addEdge(inEdge.id, dstId, inEdge.memref);
|
||||
}
|
||||
}
|
||||
// For each edge in 'outEdges[srcId]': add new edge remaping to 'dstId'.
|
||||
// For each edge in 'outEdges[srcId]': remove edge from 'srcId' to 'dstId'.
|
||||
if (outEdges.count(srcId) > 0) {
|
||||
SmallVector<Edge, 2> oldOutEdges = outEdges[srcId];
|
||||
for (auto &outEdge : oldOutEdges) {
|
||||
// Remove edge from 'srcId' to 'outEdge.id'.
|
||||
removeEdge(srcId, outEdge.id, outEdge.memref);
|
||||
// Add edge from 'dstId' to 'outEdge.id' (if 'outEdge.id' != 'dstId').
|
||||
if (outEdge.id != dstId)
|
||||
addEdge(dstId, outEdge.id, outEdge.memref);
|
||||
// Remove any out edges from 'srcId' to 'dstId' across memrefs.
|
||||
if (outEdge.id == dstId)
|
||||
removeEdge(srcId, outEdge.id, outEdge.memref);
|
||||
}
|
||||
}
|
||||
// Remove 'srcId' from graph state.
|
||||
inEdges.erase(srcId);
|
||||
outEdges.erase(srcId);
|
||||
nodes.erase(srcId);
|
||||
}
|
||||
|
||||
// Adds ops in 'loads' and 'stores' to node at 'id'.
|
||||
|
@ -273,6 +304,12 @@ public:
|
|||
node->stores.push_back(storeOpInst);
|
||||
}
|
||||
|
||||
void clearNodeLoadAndStores(unsigned id) {
|
||||
Node *node = getNode(id);
|
||||
node->loads.clear();
|
||||
node->stores.clear();
|
||||
}
|
||||
|
||||
void print(raw_ostream &os) const {
|
||||
os << "\nMemRefDependenceGraph\n";
|
||||
os << "\nNodes:\n";
|
||||
|
@ -614,6 +651,82 @@ static bool getSliceUnion(const ComputationSliceState &sliceStateA,
|
|||
return true;
|
||||
}
|
||||
|
||||
// Creates and returns a private (single-user) memref for fused loop rooted
|
||||
// at 'forInst', with (potentially reduced) memref size based on the
|
||||
// MemRefRegion written to by 'srcStoreOpInst'.
|
||||
static Value *createPrivateMemRef(ForInst *forInst,
|
||||
OperationInst *srcStoreOpInst) {
|
||||
// Create builder to insert alloc op just before 'forInst'.
|
||||
FuncBuilder b(forInst);
|
||||
// Builder to create constants at the top level.
|
||||
FuncBuilder top(forInst->getFunction());
|
||||
// Create new memref type based on slice bounds.
|
||||
auto *oldMemRef = srcStoreOpInst->cast<StoreOp>()->getMemRef();
|
||||
auto oldMemRefType = oldMemRef->getType().cast<MemRefType>();
|
||||
unsigned rank = oldMemRefType.getRank();
|
||||
|
||||
// Compute MemRefRegion for 'srcStoreOpInst'.
|
||||
MemRefRegion region;
|
||||
getMemRefRegion(srcStoreOpInst, 0, ®ion);
|
||||
SmallVector<int, 4> newShape;
|
||||
std::vector<SmallVector<int64_t, 4>> lbs;
|
||||
lbs.reserve(rank);
|
||||
// Query 'region' for 'newShape' and lower bounds of MemRefRegion accessed
|
||||
// by 'srcStoreOpInst'.
|
||||
Optional<int64_t> numElements =
|
||||
region.getBoundingConstantSizeAndShape(&newShape, &lbs);
|
||||
assert(numElements.hasValue());
|
||||
|
||||
// Build 'rank' AffineExprs from MemRefRegion 'lbs'
|
||||
const FlatAffineConstraints *cst = region.getConstraints();
|
||||
SmallVector<AffineExpr, 4> offsets;
|
||||
offsets.reserve(rank);
|
||||
for (unsigned d = 0; d < rank; ++d) {
|
||||
AffineExpr offset = top.getAffineConstantExpr(0);
|
||||
for (unsigned j = 0, e = cst->getNumCols() - rank - 1; j < e; j++) {
|
||||
offset = offset + lbs[d][j] * top.getAffineDimExpr(j);
|
||||
}
|
||||
offset = offset + lbs[d][cst->getNumCols() - 1 - rank];
|
||||
offsets.push_back(offset);
|
||||
}
|
||||
|
||||
// Create 'newMemRefType' using 'newShape' from MemRefRegion accessed
|
||||
// by 'srcStoreOpInst'.
|
||||
auto newMemRefType = b.getMemRefType(newShape, oldMemRefType.getElementType(),
|
||||
{}, oldMemRefType.getMemorySpace());
|
||||
// Gather alloc operands for the dynamic dimensions of the memref.
|
||||
SmallVector<Value *, 4> allocOperands;
|
||||
unsigned dynamicDimCount = 0;
|
||||
for (auto dimSize : oldMemRefType.getShape()) {
|
||||
if (dimSize == -1)
|
||||
allocOperands.push_back(
|
||||
b.create<DimOp>(forInst->getLoc(), oldMemRef, dynamicDimCount++));
|
||||
}
|
||||
|
||||
// Create new private memref for fused loop 'forInst'.
|
||||
Value *newMemRef =
|
||||
b.create<AllocOp>(forInst->getLoc(), newMemRefType, allocOperands);
|
||||
|
||||
// Build an AffineMap to remap access functions based on lower bound offsets.
|
||||
SmallVector<AffineExpr, 4> remapExprs;
|
||||
remapExprs.reserve(rank);
|
||||
unsigned zeroOffsetCount = 0;
|
||||
for (unsigned i = 0; i < rank; i++) {
|
||||
if (auto constExpr = offsets[i].dyn_cast<AffineConstantExpr>())
|
||||
if (constExpr.getValue() == 0)
|
||||
++zeroOffsetCount;
|
||||
auto dimExpr = b.getAffineDimExpr(i);
|
||||
remapExprs.push_back(dimExpr - offsets[i]);
|
||||
}
|
||||
auto indexRemap = zeroOffsetCount == rank
|
||||
? AffineMap::Null()
|
||||
: b.getAffineMap(rank, 0, remapExprs, {});
|
||||
// Replace all users of 'oldMemRef' with 'newMemRef'.
|
||||
assert(replaceAllMemRefUsesWith(oldMemRef, newMemRef, {}, indexRemap, {},
|
||||
&*forInst->getBody()->begin()));
|
||||
return newMemRef;
|
||||
}
|
||||
|
||||
// Checks the profitability of fusing a backwards slice of the loop nest
|
||||
// surrounding 'srcOpInst' into the loop nest surrounding 'dstOpInsts'.
|
||||
// Returns true if it profitable to fuse the candidate loop nests. Returns
|
||||
|
@ -744,10 +857,12 @@ static bool isFusionProfitable(OperationInst *srcOpInst,
|
|||
<< " minFusedLoopNestComputeCost: "
|
||||
<< minFusedLoopNestComputeCost << "\n");
|
||||
|
||||
// Do not fuse if fused loop would increase the total cost of the computation.
|
||||
// Do not fuse if fused loop would increase the total cost of the computation,
|
||||
// unless 'clMaximalLoopFusion' flag is set.
|
||||
// TODO(andydavis) Use locality/reduction in slice memref size/opportunity
|
||||
// for load/store forwarding in cost model.
|
||||
if (minFusedLoopNestComputeCost > srcLoopNestCost + dstLoopNestCost)
|
||||
if (!clMaximalLoopFusion &&
|
||||
minFusedLoopNestComputeCost > srcLoopNestCost + dstLoopNestCost)
|
||||
return false;
|
||||
// Update return parameter 'sliceState' with 'bestSliceState'.
|
||||
ComputationSliceState *bestSliceState = &sliceStates[bestDstLoopDepth - 1];
|
||||
|
@ -835,9 +950,13 @@ public:
|
|||
|
||||
SmallVector<OperationInst *, 4> loads = dstNode->loads;
|
||||
SmallVector<OperationInst *, 4> dstLoadOpInsts;
|
||||
DenseSet<Value *> visitedMemrefs;
|
||||
while (!loads.empty()) {
|
||||
// Get memref of load on top of the stack.
|
||||
auto *memref = loads.back()->cast<LoadOp>()->getMemRef();
|
||||
if (visitedMemrefs.count(memref) > 0)
|
||||
continue;
|
||||
visitedMemrefs.insert(memref);
|
||||
// Move all loads in 'loads' accessing 'memref' to 'dstLoadOpInsts'.
|
||||
moveLoadsAccessingMemrefTo(memref, &loads, &dstLoadOpInsts);
|
||||
// Skip if no input edges along which to fuse.
|
||||
|
@ -855,16 +974,13 @@ public:
|
|||
// Skip if 'srcNode' has more than one store to 'memref'.
|
||||
if (srcNode->getStoreOpCount(memref) != 1)
|
||||
continue;
|
||||
// Skip 'srcNode' if it has out edges on 'memref' other than 'dstId'.
|
||||
if (mdg->getOutEdgeCount(srcNode->id, memref) != 1)
|
||||
continue;
|
||||
// Skip 'srcNode' if it has in dependence edges. NOTE: This is overly
|
||||
// TODO(andydavis) Track dependence type with edges, and just check
|
||||
// for WAW dependence edge here.
|
||||
if (mdg->getInEdgeCount(srcNode->id, memref) != 0)
|
||||
continue;
|
||||
// Skip if 'srcNode' has out edges to other memrefs after 'dstId'.
|
||||
if (mdg->getMinOutEdgeNodeId(srcNode->id) != dstId)
|
||||
if (mdg->getMinOutEdgeNodeId(srcNode->id, memref) < dstId)
|
||||
continue;
|
||||
// Get unique 'srcNode' store op.
|
||||
auto *srcStoreOpInst = srcNode->stores.front();
|
||||
|
@ -878,27 +994,66 @@ public:
|
|||
auto *sliceLoopNest = mlir::insertBackwardComputationSlice(
|
||||
srcStoreOpInst, dstLoadOpInsts[0], dstLoopDepth, &sliceState);
|
||||
if (sliceLoopNest != nullptr) {
|
||||
// Remove edges between 'srcNode' and 'dstNode' and remove 'srcNode'
|
||||
mdg->updateEdgesAndRemoveSrcNode(srcNode->id, dstNode->id);
|
||||
// Record all load/store accesses in 'sliceLoopNest' at 'dstPos'.
|
||||
LoopNestStateCollector collector;
|
||||
collector.walkForInst(sliceLoopNest);
|
||||
mdg->addToNode(dstId, collector.loadOpInsts,
|
||||
collector.storeOpInsts);
|
||||
// Add new load ops to current Node load op list 'loads' to
|
||||
// continue fusing based on new operands.
|
||||
for (auto *loadOpInst : collector.loadOpInsts)
|
||||
loads.push_back(loadOpInst);
|
||||
// Promote single iteration loops to single IV value.
|
||||
for (auto *forInst : collector.forInsts) {
|
||||
// Update edges between 'srcNode' and 'dstNode'.
|
||||
mdg->updateEdges(srcNode->id, dstNode->id);
|
||||
|
||||
// Collect slice loop stats.
|
||||
LoopNestStateCollector sliceCollector;
|
||||
sliceCollector.walkForInst(sliceLoopNest);
|
||||
// Promote single iteration slice loops to single IV value.
|
||||
for (auto *forInst : sliceCollector.forInsts) {
|
||||
promoteIfSingleIteration(forInst);
|
||||
}
|
||||
// Remove old src loop nest.
|
||||
cast<ForInst>(srcNode->inst)->erase();
|
||||
|
||||
// Create private memref for 'memref' in 'dstForInst'.
|
||||
auto *dstForInst = cast<ForInst>(dstNode->inst);
|
||||
SmallVector<OperationInst *, 4> storesForMemref;
|
||||
for (auto *storeOpInst : sliceCollector.storeOpInsts) {
|
||||
if (storeOpInst->cast<StoreOp>()->getMemRef() == memref)
|
||||
storesForMemref.push_back(storeOpInst);
|
||||
}
|
||||
assert(storesForMemref.size() == 1);
|
||||
auto *newMemRef =
|
||||
createPrivateMemRef(dstForInst, storesForMemref[0]);
|
||||
visitedMemrefs.insert(newMemRef);
|
||||
|
||||
// Collect dst loop stats after memref privatizaton transformation.
|
||||
LoopNestStateCollector dstLoopCollector;
|
||||
dstLoopCollector.walkForInst(dstForInst);
|
||||
|
||||
// Add new load ops to current Node load op list 'loads' to
|
||||
// continue fusing based on new operands.
|
||||
for (auto *loadOpInst : dstLoopCollector.loadOpInsts) {
|
||||
auto *loadMemRef = loadOpInst->cast<LoadOp>()->getMemRef();
|
||||
if (visitedMemrefs.count(loadMemRef) == 0)
|
||||
loads.push_back(loadOpInst);
|
||||
}
|
||||
|
||||
// Clear and add back loads and stores
|
||||
mdg->clearNodeLoadAndStores(dstNode->id);
|
||||
mdg->addToNode(dstId, dstLoopCollector.loadOpInsts,
|
||||
dstLoopCollector.storeOpInsts);
|
||||
// Remove old src loop nest if it no longer has users.
|
||||
if (!mdg->hasOutEdges(srcNode->id)) {
|
||||
mdg->removeNode(srcNode->id);
|
||||
cast<ForInst>(srcNode->inst)->erase();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Clean up any allocs with no users.
|
||||
for (auto &pair : mdg->memrefEdgeCount) {
|
||||
if (pair.second > 0)
|
||||
continue;
|
||||
auto *memref = pair.first;
|
||||
// Use list expected to match the dep graph info.
|
||||
assert(memref->use_empty());
|
||||
auto *inst = memref->getDefiningInst();
|
||||
auto *opInst = dyn_cast_or_null<OperationInst>(inst);
|
||||
if (opInst && opInst->isa<AllocOp>())
|
||||
opInst->erase();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -61,13 +61,13 @@ func @should_fuse_reduction_to_pointwise() {
|
|||
// CHECK: for %i0 = 0 to 10 {
|
||||
// CHECK-NEXT: %3 = affine_apply [[MAP0]](%i0)
|
||||
// CHECK-NEXT: for %i1 = 0 to 10 {
|
||||
// CHECK-NEXT: %4 = load %1[%3] : memref<10xf32>
|
||||
// CHECK-NEXT: %4 = load %2[%3] : memref<10xf32>
|
||||
// CHECK-NEXT: %5 = load %0[%3, %i1] : memref<10x10xf32>
|
||||
// CHECK-NEXT: %6 = addf %4, %5 : f32
|
||||
// CHECK-NEXT: store %6, %1[%3] : memref<10xf32>
|
||||
// CHECK-NEXT: store %6, %2[%3] : memref<10xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: %7 = load %1[%i0] : memref<10xf32>
|
||||
// CHECK-NEXT: store %7, %2[%i0] : memref<10xf32>
|
||||
// CHECK-NEXT: %7 = load %2[%i0] : memref<10xf32>
|
||||
// CHECK-NEXT: store %7, %1[%i0] : memref<10xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: return
|
||||
return
|
||||
|
@ -75,38 +75,45 @@ func @should_fuse_reduction_to_pointwise() {
|
|||
|
||||
// -----
|
||||
|
||||
// CHECK: [[MAP_SHIFT_MINUS_ONE:#map[0-9]+]] = (d0) -> (d0 - 1)
|
||||
// CHECK: [[MAP_SHIFT_MINUS_ONE_R1:#map[0-9]+]] = (d0) -> (d0 - 1)
|
||||
// CHECK: [[MAP_SHIFT_BY_ONE:#map[0-9]+]] = (d0, d1) -> (d0 + 1, d1 + 1)
|
||||
// CHECK: [[MAP_SHIFT_MINUS_ONE_R2:#map[0-9]+]] = (d0, d1) -> (d0 - 1, d1 - 1)
|
||||
|
||||
// CHECK-LABEL: func @should_fuse_loop_nests_with_shifts() {
|
||||
func @should_fuse_loop_nests_with_shifts() {
|
||||
%a = alloc() : memref<10x10xf32>
|
||||
%cf7 = constant 7.0 : f32
|
||||
|
||||
for %i0 = 0 to 10 {
|
||||
for %i1 = 0 to 10 {
|
||||
for %i0 = 0 to 9 {
|
||||
for %i1 = 0 to 9 {
|
||||
%a0 = affine_apply (d0, d1) -> (d0 + 1, d1 + 1) (%i0, %i1)
|
||||
store %cf7, %a[%a0#0, %a0#1] : memref<10x10xf32>
|
||||
}
|
||||
}
|
||||
for %i2 = 0 to 10 {
|
||||
for %i3 = 0 to 10 {
|
||||
for %i2 = 1 to 10 {
|
||||
for %i3 = 1 to 10 {
|
||||
%v0 = load %a[%i2, %i3] : memref<10x10xf32>
|
||||
}
|
||||
}
|
||||
|
||||
// The cost of fusing the src loop nest at dst loop depth 1 is less expensive
|
||||
// than fusing at dst loop depth 2, because at dst loop depth 1, we are
|
||||
// able to reduce the trip count around the %i1 loop by one (because the
|
||||
// dst loop never reads the last element written by the src loop).
|
||||
// CHECK: for %i0 = 0 to 10 {
|
||||
// CHECK-NEXT: %1 = affine_apply [[MAP_SHIFT_MINUS_ONE]](%i0)
|
||||
// CHECK-NEXT: for %i1 = 0 to 9 {
|
||||
// CHECK-NEXT: %2 = affine_apply [[MAP_SHIFT_BY_ONE]](%1, %i1)
|
||||
// CHECK-NEXT: store %cst, %0[%2#0, %2#1] : memref<10x10xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: for %i2 = 0 to 10 {
|
||||
// CHECK-NEXT: %3 = load %0[%i0, %i2] : memref<10x10xf32>
|
||||
// Source slice affine apply sequence:
|
||||
// *) First two affine apply's map from the dst to src iteration space.
|
||||
// *) Third affine apply is access function around src store.
|
||||
// *) Fourth affine apply shifts the stores access function by '-1', because
|
||||
// of the offset induced by reducing the memref shape from 10x10 to 9x9.
|
||||
// *) Fifth affine apply shifts the loads access function by '-1', because
|
||||
// of the offset induced by reducing the memref shape from 10x10 to 9x9.
|
||||
// NOTE: Should create a private memref with reduced shape 9x9xf32.
|
||||
// CHECK: %0 = alloc() : memref<9x9xf32>
|
||||
// CHECK-NEXT: for %i0 = 1 to 10 {
|
||||
// CHECK-NEXT: for %i1 = 1 to 10 {
|
||||
// CHECK-NEXT: %1 = affine_apply [[MAP_SHIFT_MINUS_ONE_R1]](%i0)
|
||||
// CHECK-NEXT: %2 = affine_apply [[MAP_SHIFT_MINUS_ONE_R1]](%i1)
|
||||
// CHECK-NEXT: %3 = affine_apply [[MAP_SHIFT_BY_ONE]](%1, %2)
|
||||
// CHECK-NEXT: %4 = affine_apply [[MAP_SHIFT_MINUS_ONE_R2]](%3#0, %3#1)
|
||||
// CHECK-NEXT: store %cst, %0[%4#0, %4#1] : memref<9x9xf32>
|
||||
// CHECK-NEXT: %5 = affine_apply [[MAP_SHIFT_MINUS_ONE_R2]](%i0, %i1)
|
||||
// CHECK-NEXT: %6 = load %0[%5#0, %5#1] : memref<9x9xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: return
|
||||
|
@ -139,17 +146,19 @@ func @should_fuse_loop_nest() {
|
|||
%v1 = load %b[%i4, %i5] : memref<10x10xf32>
|
||||
}
|
||||
}
|
||||
|
||||
// CHECK: for %i0 = 0 to 10 {
|
||||
// Expecting private memref for '%b' first, then private memref for '%a'.
|
||||
// CHECK: [[NEWB:%[0-9]+]] = alloc() : memref<10x10xf32>
|
||||
// CHECK-NEXT: [[NEWA:%[0-9]+]] = alloc() : memref<10x10xf32>
|
||||
// CHECK-NEXT: for %i0 = 0 to 10 {
|
||||
// CHECK-NEXT: for %i1 = 0 to 10 {
|
||||
// CHECK-NEXT: %2 = affine_apply [[MAP_ID]](%i1)
|
||||
// CHECK-NEXT: %3 = affine_apply [[MAP_ID]](%i0)
|
||||
// CHECK-NEXT: store %cst, %0[%2, %3] : memref<10x10xf32>
|
||||
// CHECK-NEXT: store %cst, [[NEWA]][%2, %3] : memref<10x10xf32>
|
||||
// CHECK-NEXT: %4 = affine_apply [[MAP_ID]](%i0)
|
||||
// CHECK-NEXT: %5 = affine_apply [[MAP_ID]](%i1)
|
||||
// CHECK-NEXT: %6 = load %0[%5, %4] : memref<10x10xf32>
|
||||
// CHECK-NEXT: store %6, %1[%4, %5] : memref<10x10xf32>
|
||||
// CHECK-NEXT: %7 = load %1[%i0, %i1] : memref<10x10xf32>
|
||||
// CHECK-NEXT: %6 = load [[NEWA]][%5, %4] : memref<10x10xf32>
|
||||
// CHECK-NEXT: store %6, [[NEWB]][%4, %5] : memref<10x10xf32>
|
||||
// CHECK-NEXT: %7 = load [[NEWB]][%i0, %i1] : memref<10x10xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: return
|
||||
|
@ -180,14 +189,16 @@ func @should_fuse_across_intermediate_loop_with_no_deps() {
|
|||
}
|
||||
|
||||
// Should fuse first loop (past second loop with no dependences) into third.
|
||||
// Note that fusion creates a private memref '%2' for the fused loop nest.
|
||||
// CHECK: for %i0 = 0 to 10 {
|
||||
// CHECK-NEXT: store %cst, %2[%i0] : memref<10xf32>
|
||||
// CHECK-NEXT: store %cst, %1[%i0] : memref<10xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK: %2 = alloc() : memref<10xf32>
|
||||
// CHECK: for %i1 = 0 to 10 {
|
||||
// CHECK-NEXT: %3 = affine_apply [[MAP0]](%i1)
|
||||
// CHECK-NEXT: %4 = load %0[%3] : memref<10xf32>
|
||||
// CHECK-NEXT: store %4, %1[%3] : memref<10xf32>
|
||||
// CHECK-NEXT: %5 = load %1[%i1] : memref<10xf32>
|
||||
// CHECK-NEXT: store %4, %2[%3] : memref<10xf32>
|
||||
// CHECK-NEXT: %5 = load %2[%i1] : memref<10xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: return
|
||||
return
|
||||
|
@ -216,13 +227,16 @@ func @should_fuse_all_loops() {
|
|||
}
|
||||
|
||||
// Should fuse first and second loops into third.
|
||||
// CHECK: for %i0 = 0 to 10 {
|
||||
// Expecting private memref for '%b' first, then private memref for '%a'.
|
||||
// CHECK: [[NEWB:%[0-9]+]] = alloc() : memref<10xf32>
|
||||
// CHECK-NEXT: [[NEWA:%[0-9]+]] = alloc() : memref<10xf32>
|
||||
// CHECK-NEXT: for %i0 = 0 to 10 {
|
||||
// CHECK-NEXT: %2 = affine_apply [[MAP0]](%i0)
|
||||
// CHECK-NEXT: store %cst, %0[%2] : memref<10xf32>
|
||||
// CHECK-NEXT: store %cst, [[NEWA]][%2] : memref<10xf32>
|
||||
// CHECK-NEXT: %3 = affine_apply [[MAP0]](%i0)
|
||||
// CHECK-NEXT: store %cst, %1[%3] : memref<10xf32>
|
||||
// CHECK-NEXT: %4 = load %0[%i0] : memref<10xf32>
|
||||
// CHECK-NEXT: %5 = load %1[%i0] : memref<10xf32>
|
||||
// CHECK-NEXT: store %cst, [[NEWB]][%3] : memref<10xf32>
|
||||
// CHECK-NEXT: %4 = load [[NEWA]][%i0] : memref<10xf32>
|
||||
// CHECK-NEXT: %5 = load [[NEWB]][%i0] : memref<10xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: return
|
||||
return
|
||||
|
@ -252,14 +266,16 @@ func @should_fuse_first_and_second_loops() {
|
|||
}
|
||||
|
||||
// Should fuse first loop into the second (last loop should not be fused).
|
||||
// CHECK: for %i0 = 0 to 10 {
|
||||
// Should create private memref '%2' for fused loop.
|
||||
// CHECK: %2 = alloc() : memref<10xf32>
|
||||
// CHECK-NEXT: for %i0 = 0 to 10 {
|
||||
// CHECK-NEXT: %3 = affine_apply [[MAP0]](%i0)
|
||||
// CHECK-NEXT: store %cst, %0[%3] : memref<10xf32>
|
||||
// CHECK-NEXT: %4 = load %0[%i0] : memref<10xf32>
|
||||
// CHECK-NEXT: store %cst, %1[%i0] : memref<10xf32>
|
||||
// CHECK-NEXT: store %cst, %2[%3] : memref<10xf32>
|
||||
// CHECK-NEXT: %4 = load %2[%i0] : memref<10xf32>
|
||||
// CHECK-NEXT: store %cst, %0[%i0] : memref<10xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK: for %i1 = 0 to 10 {
|
||||
// CHECK-NEXT: %5 = load %2[%i1] : memref<10xf32>
|
||||
// CHECK-NEXT: %5 = load %1[%i1] : memref<10xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: return
|
||||
|
||||
|
@ -310,39 +326,10 @@ func @should_not_fuse_would_create_cycle() {
|
|||
}
|
||||
|
||||
// -----
|
||||
// CHECK: #map0 = (d0) -> (d0)
|
||||
|
||||
// CHECK-LABEL: func @should_not_fuse_raw_dep_would_be_violated() {
|
||||
func @should_not_fuse_raw_dep_would_be_violated() {
|
||||
%m = alloc() : memref<10xf32>
|
||||
%cf7 = constant 7.0 : f32
|
||||
|
||||
for %i0 = 0 to 10 {
|
||||
store %cf7, %m[%i0] : memref<10xf32>
|
||||
}
|
||||
for %i1 = 0 to 10 {
|
||||
%v0 = load %m[%i1] : memref<10xf32>
|
||||
}
|
||||
for %i2 = 0 to 10 {
|
||||
%v1 = load %m[%i2] : memref<10xf32>
|
||||
}
|
||||
// Fusing loop %i0 to %i2 would violate the RAW dependence between %i0 and %i1
|
||||
// CHECK: for %i0 = 0 to 10 {
|
||||
// CHECK-NEXT: store %cst, %0[%i0] : memref<10xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK: for %i1 = 0 to 10 {
|
||||
// CHECK-NEXT: %1 = load %0[%i1] : memref<10xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK: for %i2 = 0 to 10 {
|
||||
// CHECK-NEXT: %2 = load %0[%i2] : memref<10xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: return
|
||||
return
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
// CHECK-LABEL: func @should_not_fuse_waw_dep_would_be_violated() {
|
||||
func @should_not_fuse_waw_dep_would_be_violated() {
|
||||
// CHECK-LABEL: func @should_fuse_across_waw_dep_with_private_memref() {
|
||||
func @should_fuse_across_waw_dep_with_private_memref() {
|
||||
%m = alloc() : memref<10xf32>
|
||||
%cf7 = constant 7.0 : f32
|
||||
|
||||
|
@ -362,8 +349,11 @@ func @should_not_fuse_waw_dep_would_be_violated() {
|
|||
// CHECK: for %i1 = 0 to 10 {
|
||||
// CHECK-NEXT: store %cst, %0[%i1] : memref<10xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK: for %i2 = 0 to 10 {
|
||||
// CHECK-NEXT: %1 = load %0[%i2] : memref<10xf32>
|
||||
// CHECK: %1 = alloc() : memref<10xf32>
|
||||
// CHECK-NEXT: for %i2 = 0 to 10 {
|
||||
// CHECK-NEXT: %2 = affine_apply #map0(%i2)
|
||||
// CHECK-NEXT: store %cst, %1[%2] : memref<10xf32>
|
||||
// CHECK-NEXT: %3 = load %1[%i2] : memref<10xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: return
|
||||
return
|
||||
|
@ -404,8 +394,8 @@ func @should_not_fuse_war_dep_would_be_violated() {
|
|||
|
||||
// -----
|
||||
|
||||
// CHECK-LABEL: func @should_not_fuse_if_top_level_access() {
|
||||
func @should_not_fuse_if_top_level_access() {
|
||||
// CHECK-LABEL: func @should_fuse_with_private_memref_if_top_level_access() {
|
||||
func @should_fuse_with_private_memref_if_top_level_access() {
|
||||
%m = alloc() : memref<10xf32>
|
||||
%cf7 = constant 7.0 : f32
|
||||
|
||||
|
@ -422,8 +412,11 @@ func @should_not_fuse_if_top_level_access() {
|
|||
// CHECK: for %i0 = 0 to 10 {
|
||||
// CHECK-NEXT: store %cst, %0[%i0] : memref<10xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK: for %i1 = 0 to 10 {
|
||||
// CHECK-NEXT: %1 = load %0[%i1] : memref<10xf32>
|
||||
// CHECK: %1 = alloc() : memref<10xf32>
|
||||
// CHECK-NEXT: for %i1 = 0 to 10 {
|
||||
// CHECK-NEXT: %2 = affine_apply #map0(%i1)
|
||||
// CHECK-NEXT: store %cst, %1[%2] : memref<10xf32>
|
||||
// CHECK-NEXT: %3 = load %1[%i1] : memref<10xf32>
|
||||
// CHECK-NEXT: }
|
||||
return
|
||||
}
|
||||
|
@ -625,12 +618,14 @@ func @fuse_reshape_16_4_64() {
|
|||
|
||||
// -----
|
||||
|
||||
// TODO(b/123072438) Re-enable test MemRefRegion bug is fixed.
|
||||
// All three loop nests below (6-d one, 2-d one, 2-d one is fused into a single
|
||||
// 2-d loop nest).
|
||||
// CHECK-LABEL: func @R6_to_R2_reshape
|
||||
// xCHECK-LABEL: func @R6_to_R2_reshape
|
||||
func @R6_to_R2_reshape_square() -> memref<64x9xi32> {
|
||||
%in = alloc() : memref<2x2x3x3x16x1xi32>
|
||||
%out = alloc() : memref<64x9xi32>
|
||||
%live_out = alloc() : memref<64x9xi32>
|
||||
|
||||
// Initialize input.
|
||||
for %i0 = 0 to 2 {
|
||||
|
@ -670,35 +665,38 @@ func @R6_to_R2_reshape_square() -> memref<64x9xi32> {
|
|||
for %j = 0 to 9 {
|
||||
%a = load %out[%i, %j] : memref<64x9xi32>
|
||||
%b = muli %a, %a : i32
|
||||
store %b, %out[%i, %j] : memref<64x9xi32>
|
||||
store %b, %live_out[%i, %j] : memref<64x9xi32>
|
||||
}
|
||||
}
|
||||
return %out : memref<64x9xi32>
|
||||
return %live_out : memref<64x9xi32>
|
||||
}
|
||||
// Everything above is fused to a single 2-d loop nest, and the 6-d tensor %in
|
||||
// is eliminated if -memref-dataflow-opt is also supplied.
|
||||
//
|
||||
// CHECK: for %i0 = 0 to 64 {
|
||||
// CHECK-NEXT: for %i1 = 0 to 9 {
|
||||
// CHECK-NEXT: %2 = affine_apply #map0(%i0, %i1)
|
||||
// CHECK-NEXT: %3 = affine_apply #map1(%i0, %i1)
|
||||
// CHECK-NEXT: %4 = affine_apply #map2(%i0, %i1)
|
||||
// CHECK-NEXT: %5 = affine_apply #map3(%i0, %i1)
|
||||
// CHECK-NEXT: %6 = affine_apply #map4(%i0, %i1)
|
||||
// CHECK-NEXT: %7 = "foo"(%2, %3, %4, %5, %6, %c0) : (index, index, index, index, index, index) -> i32
|
||||
// CHECK-NEXT: store %7, %0[%2, %3, %4, %5, %6, %c0] : memref<2x2x3x3x16x1xi32>
|
||||
// CHECK-NEXT: %8 = affine_apply #map5(%i0)
|
||||
// CHECK-NEXT: %9 = affine_apply #map5(%i1)
|
||||
// CHECK-NEXT: %10 = affine_apply #map6(%8, %9)
|
||||
// CHECK-NEXT: %11 = affine_apply #map7(%10)
|
||||
// CHECK-NEXT: %12 = load %0[%11#0, %11#1, %11#3, %11#4, %11#2, %11#5] : memref<2x2x3x3x16x1xi32>
|
||||
// CHECK-NEXT: store %12, %1[%8, %9] : memref<64x9xi32>
|
||||
// CHECK-NEXT: %13 = load %1[%i0, %i1] : memref<64x9xi32>
|
||||
// CHECK-NEXT: %14 = muli %13, %13 : i32
|
||||
// CHECK-NEXT: store %14, %1[%i0, %i1] : memref<64x9xi32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: return %1 : memref<64x9xi32>
|
||||
// xCHECK: %0 = alloc() : memref<64x9xi32>
|
||||
// xCHECK-NEXT: %1 = alloc() : memref<64x9xi32>
|
||||
// xCHECK-NEXT: %2 = alloc() : memref<2x2x3x3x16x1xi32>
|
||||
// xCHECK-NEXT: for %i0 = 0 to 64 {
|
||||
// xCHECK-NEXT: for %i1 = 0 to 9 {
|
||||
// xCHECK-NEXT: %3 = affine_apply #map0(%i0, %i1)
|
||||
// xCHECK-NEXT: %4 = affine_apply #map1(%i0, %i1)
|
||||
// xCHECK-NEXT: %5 = affine_apply #map2(%i0, %i1)
|
||||
// xCHECK-NEXT: %6 = affine_apply #map3(%i0, %i1)
|
||||
// xCHECK-NEXT: %7 = affine_apply #map4(%i0, %i1)
|
||||
// xCHECK-NEXT: %8 = "foo"(%3, %4, %5, %6, %7, %c0) : (index, index, index, index, index, index) -> i32
|
||||
// xCHECK-NEXT: store %8, %2[%3, %4, %5, %6, %7, %c0] : memref<2x2x3x3x16x1xi32>
|
||||
// xCHECK-NEXT: %9 = affine_apply #map5(%i0)
|
||||
// xCHECK-NEXT: %10 = affine_apply #map5(%i1)
|
||||
// xCHECK-NEXT: %11 = affine_apply #map6(%8, %9)
|
||||
// xCHECK-NEXT: %12 = affine_apply #map7(%10)
|
||||
// xCHECK-NEXT: %13 = load %2[%12#0, %12#1, %12#3, %12#4, %12#2, %12#5] : memref<2x2x3x3x16x1xi32>
|
||||
// xCHECK-NEXT: store %12, %1[%9, %10] : memref<64x9xi32>
|
||||
// xCHECK-NEXT: %14 = load %1[%i0, %i1] : memref<64x9xi32>
|
||||
// xCHECK-NEXT: %15 = muli %14, %14 : i32
|
||||
// xCHECK-NEXT: store %15, %0[%i0, %i1] : memref<64x9xi32>
|
||||
// xCHECK-NEXT: }
|
||||
// xCHECK-NEXT: }
|
||||
// xCHECK-NEXT: return %0 : memref<64x9xi32>
|
||||
|
||||
// -----
|
||||
|
||||
|
@ -867,10 +865,13 @@ func @fusion_at_depth0_not_currently_supported() {
|
|||
for %i1 = 0 to 10 {
|
||||
%1 = load %0[%c0] : memref<10xf32>
|
||||
}
|
||||
// CHECK:for %i0 = 0 to 10 {
|
||||
// NOTE: Should shrink memref size to 1 element access by load in dst loop
|
||||
// nest, and make the store in the slice store to the same element.
|
||||
// CHECK: %0 = alloc() : memref<1xf32>
|
||||
// CHECK-NEXT: for %i0 = 0 to 10 {
|
||||
// CHECK-NEXT: %1 = affine_apply #map0()[%c0]
|
||||
// CHECK-NEXT: store %cst, %0[%1] : memref<10xf32>
|
||||
// CHECK-NEXT: %2 = load %0[%c0] : memref<10xf32>
|
||||
// CHECK-NEXT: store %cst, %0[%1] : memref<1xf32>
|
||||
// CHECK-NEXT: %2 = load %0[%c0] : memref<1xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: return
|
||||
return
|
||||
|
@ -954,7 +955,7 @@ func @should_fuse_deep_loop_nests() {
|
|||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: for %i6 = 0 to 16 {
|
||||
// CHECK-NEXT: for %i7 = 0 to 10 {
|
||||
// CHECK-NEXT: store %cst, %1[%3, %4, %5, %6, %i6, %i7] : memref<2x2x3x3x16x10xf32, 2>
|
||||
// CHECK-NEXT: store %cst, %2[%3, %4, %5, %6, %i6, %i7] : memref<2x2x3x3x16x10xf32, 2>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: for %i8 = 0 to 3 {
|
||||
|
@ -968,7 +969,7 @@ func @should_fuse_deep_loop_nests() {
|
|||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: for %i14 = 0 to 16 {
|
||||
// CHECK-NEXT: for %i15 = 0 to 10 {
|
||||
// CHECK-NEXT: %9 = load %1[%i2, %i3, %i0, %i1, %i14, %i15] : memref<2x2x3x3x16x10xf32, 2>
|
||||
// CHECK-NEXT: %9 = load %2[%i2, %i3, %i0, %i1, %i14, %i15] : memref<2x2x3x3x16x10xf32, 2>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }
|
||||
|
@ -1014,16 +1015,20 @@ func @should_fuse_at_depth1_and_reduce_slice_trip_count() {
|
|||
// redundant computation and reduces costs.
|
||||
// 2) Inserting the sliceable src loop %i2 at depth 1, we can still reduce
|
||||
// its trip count to 16 (from 256) reducing costs.
|
||||
// CHECK: for %i0 = 0 to 4 {
|
||||
// NOTE: the size of the private memref created for the fused loop nest
|
||||
// is reduced from the original shape from 4x256 to 4x16 because of the
|
||||
// data accessed by the load.
|
||||
// CHECK: %1 = alloc() : memref<4x16xf32>
|
||||
// CHECK-NEXT: for %i0 = 0 to 4 {
|
||||
// CHECK-NEXT: %2 = affine_apply #map0(%i0)
|
||||
// CHECK-NEXT: for %i1 = 0 to 256 {
|
||||
// CHECK-NEXT: %3 = load %1[%2, %i1] : memref<4x256xf32>
|
||||
// CHECK-NEXT: %3 = load %0[%2, %i1] : memref<4x256xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: for %i2 = 0 to 16 {
|
||||
// CHECK-NEXT: store %cst, %0[%2, %i2] : memref<4x256xf32>
|
||||
// CHECK-NEXT: store %cst, %1[%2, %i2] : memref<4x16xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: for %i3 = 0 to 16 {
|
||||
// CHECK-NEXT: %4 = load %0[%i0, %i3] : memref<4x256xf32>
|
||||
// CHECK-NEXT: %4 = load %1[%i0, %i3] : memref<4x16xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: return
|
||||
|
@ -1052,16 +1057,18 @@ func @should_fuse_at_depth1_with_trip_count_20() {
|
|||
}
|
||||
}
|
||||
}
|
||||
// CHECK: for %i0 = 0 to 5 {
|
||||
// NOTE: The size of the private memref created for fusion is shrunk to 20xf32
|
||||
// CHECK: %0 = alloc() : memref<20xf32>
|
||||
// CHECK-NEXT: for %i0 = 0 to 5 {
|
||||
// CHECK-NEXT: for %i1 = 0 to 20 {
|
||||
// CHECK-NEXT: store %cst, %0[%i1] : memref<100xf32>
|
||||
// CHECK-NEXT: store %cst, %0[%i1] : memref<20xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: for %i2 = 0 to 10 {
|
||||
// CHECK-NEXT: %1 = load %0[%i2] : memref<100xf32>
|
||||
// CHECK-NEXT: %1 = load %0[%i2] : memref<20xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: for %i3 = 0 to 10 {
|
||||
// CHECK-NEXT: for %i4 = 0 to 20 {
|
||||
// CHECK-NEXT: %2 = load %0[%i4] : memref<100xf32>
|
||||
// CHECK-NEXT: %2 = load %0[%i4] : memref<20xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }
|
||||
|
@ -1091,19 +1098,57 @@ func @should_fuse_at_depth1_with_trip_count_19() {
|
|||
}
|
||||
}
|
||||
}
|
||||
// CHECK: for %i0 = 0 to 5 {
|
||||
// NOTE: The size of the private memref created for fusion is shrunk to 19xf32
|
||||
// CHECK: %0 = alloc() : memref<19xf32>
|
||||
// CHECK-NEXT: for %i0 = 0 to 5 {
|
||||
// CHECK-NEXT: for %i1 = 0 to 19 {
|
||||
// CHECK-NEXT: store %cst, %0[%i1] : memref<100xf32>
|
||||
// CHECK-NEXT: store %cst, %0[%i1] : memref<19xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: for %i2 = 0 to 19 {
|
||||
// CHECK-NEXT: %1 = load %0[%i2] : memref<100xf32>
|
||||
// CHECK-NEXT: %1 = load %0[%i2] : memref<19xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: for %i3 = 0 to 10 {
|
||||
// CHECK-NEXT: for %i4 = 0 to 10 {
|
||||
// CHECK-NEXT: %2 = load %0[%i4] : memref<100xf32>
|
||||
// CHECK-NEXT: %2 = load %0[%i4] : memref<19xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: return
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
// -----
|
||||
// CHECK: #map0 = (d0) -> (d0)
|
||||
|
||||
// CHECK-LABEL: func @should_fuse_with_private_memrefs_with_diff_shapes() {
|
||||
func @should_fuse_with_private_memrefs_with_diff_shapes() {
|
||||
%m = alloc() : memref<100xf32>
|
||||
%cf7 = constant 7.0 : f32
|
||||
|
||||
for %i0 = 0 to 100 {
|
||||
store %cf7, %m[%i0] : memref<100xf32>
|
||||
}
|
||||
for %i1 = 0 to 17 {
|
||||
%v0 = load %m[%i1] : memref<100xf32>
|
||||
}
|
||||
for %i2 = 0 to 82 {
|
||||
%v1 = load %m[%i2] : memref<100xf32>
|
||||
}
|
||||
// Should create two new private memrefs customized to the shapes accessed
|
||||
// by loops %i1 and %i2.
|
||||
// CHECK: %0 = alloc() : memref<17xf32>
|
||||
// CHECK-NEXT: for %i0 = 0 to 17 {
|
||||
// CHECK-NEXT: %1 = affine_apply #map0(%i0)
|
||||
// CHECK-NEXT: store %cst, %0[%1] : memref<17xf32>
|
||||
// CHECK-NEXT: %2 = load %0[%i0] : memref<17xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: %3 = alloc() : memref<82xf32>
|
||||
// CHECK-NEXT: for %i1 = 0 to 82 {
|
||||
// CHECK-NEXT: %4 = affine_apply #map0(%i1)
|
||||
// CHECK-NEXT: store %cst, %3[%4] : memref<82xf32>
|
||||
// CHECK-NEXT: %5 = load %3[%i1] : memref<82xf32>
|
||||
// CHECK-NEXT: }
|
||||
// CHECK-NEXT: return
|
||||
return
|
||||
}
|
Loading…
Reference in New Issue