2018-09-29 03:17:26 +08:00
|
|
|
//===- PipelineDataTransfer.cpp --- Pass for pipelining data movement ---*-===//
|
|
|
|
//
|
|
|
|
// Copyright 2019 The MLIR Authors.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
// =============================================================================
|
|
|
|
//
|
|
|
|
// This file implements a pass to pipeline data transfers.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "mlir/Transforms/Passes.h"
|
|
|
|
|
2018-10-13 05:54:54 +08:00
|
|
|
#include "mlir/Analysis/AffineAnalysis.h"
|
2018-10-19 02:14:26 +08:00
|
|
|
#include "mlir/Analysis/LoopAnalysis.h"
|
|
|
|
#include "mlir/Analysis/Utils.h"
|
2018-10-05 08:15:30 +08:00
|
|
|
#include "mlir/IR/Builders.h"
|
2018-10-19 02:14:26 +08:00
|
|
|
#include "mlir/IR/StmtVisitor.h"
|
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
|
|
|
#include "mlir/Pass.h"
|
2018-10-11 05:23:30 +08:00
|
|
|
#include "mlir/StandardOps/StandardOps.h"
|
2018-09-29 03:17:26 +08:00
|
|
|
#include "mlir/Transforms/LoopUtils.h"
|
2018-10-05 08:15:30 +08:00
|
|
|
#include "mlir/Transforms/Utils.h"
|
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2018-10-19 02:14:26 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "pipeline-data-transfer"
|
2018-09-29 03:17:26 +08:00
|
|
|
|
|
|
|
using namespace mlir;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2018-10-26 07:58:08 +08:00
|
|
|
struct PipelineDataTransfer : public FunctionPass,
|
2018-10-19 02:14:26 +08:00
|
|
|
StmtWalker<PipelineDataTransfer> {
|
2018-11-08 02:24:03 +08:00
|
|
|
PipelineDataTransfer() : FunctionPass(&PipelineDataTransfer::passID) {}
|
2018-09-29 03:17:26 +08:00
|
|
|
PassResult runOnMLFunction(MLFunction *f) override;
|
2018-10-19 02:14:26 +08:00
|
|
|
PassResult runOnForStmt(ForStmt *forStmt);
|
|
|
|
|
|
|
|
// Collect all 'for' statements.
|
|
|
|
void visitForStmt(ForStmt *forStmt) { forStmts.push_back(forStmt); }
|
|
|
|
std::vector<ForStmt *> forStmts;
|
2018-11-07 10:34:18 +08:00
|
|
|
|
|
|
|
static char passID;
|
2018-09-29 03:17:26 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
2018-11-07 10:34:18 +08:00
|
|
|
char PipelineDataTransfer::passID = 0;
|
|
|
|
|
2018-09-29 03:17:26 +08:00
|
|
|
/// Creates a pass to pipeline explicit movement of data across levels of the
|
|
|
|
/// memory hierarchy.
|
2018-10-26 07:58:08 +08:00
|
|
|
FunctionPass *mlir::createPipelineDataTransferPass() {
|
2018-09-29 03:17:26 +08:00
|
|
|
return new PipelineDataTransfer();
|
|
|
|
}
|
|
|
|
|
2018-10-05 08:15:30 +08:00
|
|
|
// Returns the position of the tag memref operand given a DMA statement.
|
|
|
|
// Temporary utility: will be replaced when DmaStart/DmaFinish abstract op's are
|
|
|
|
// added. TODO(b/117228571)
|
2018-10-10 06:04:27 +08:00
|
|
|
static unsigned getTagMemRefPos(const OperationStmt &dmaStmt) {
|
2018-10-20 00:07:58 +08:00
|
|
|
assert(dmaStmt.isa<DmaStartOp>() || dmaStmt.isa<DmaWaitOp>());
|
|
|
|
if (dmaStmt.isa<DmaStartOp>()) {
|
2018-10-05 08:15:30 +08:00
|
|
|
// Second to last operand.
|
|
|
|
return dmaStmt.getNumOperands() - 2;
|
|
|
|
}
|
|
|
|
// First operand for a dma finish statement.
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-11 03:39:31 +08:00
|
|
|
/// Doubles the buffer of the supplied memref on the specified 'for' statement
|
|
|
|
/// by adding a leading dimension of size two to the memref. Replaces all uses
|
|
|
|
/// of the old memref by the new one while indexing the newly added dimension by
|
|
|
|
/// the loop IV of the specified 'for' statement modulo 2. Returns false if such
|
|
|
|
/// a replacement cannot be performed.
|
|
|
|
static bool doubleBuffer(MLValue *oldMemRef, ForStmt *forStmt) {
|
2018-10-05 08:15:30 +08:00
|
|
|
MLFuncBuilder bInner(forStmt, forStmt->begin());
|
|
|
|
bInner.setInsertionPoint(forStmt, forStmt->begin());
|
|
|
|
|
|
|
|
// Doubles the shape with a leading dimension extent of 2.
|
2018-10-31 05:59:22 +08:00
|
|
|
auto doubleShape = [&](MemRefType oldMemRefType) -> MemRefType {
|
2018-10-05 08:15:30 +08:00
|
|
|
// Add the leading dimension in the shape for the double buffer.
|
2018-10-31 05:59:22 +08:00
|
|
|
ArrayRef<int> shape = oldMemRefType.getShape();
|
2018-10-05 08:15:30 +08:00
|
|
|
SmallVector<int, 4> shapeSizes(shape.begin(), shape.end());
|
|
|
|
shapeSizes.insert(shapeSizes.begin(), 2);
|
|
|
|
|
2018-10-31 05:59:22 +08:00
|
|
|
auto newMemRefType =
|
|
|
|
bInner.getMemRefType(shapeSizes, oldMemRefType.getElementType(), {},
|
|
|
|
oldMemRefType.getMemorySpace());
|
2018-10-05 08:15:30 +08:00
|
|
|
return newMemRefType;
|
|
|
|
};
|
|
|
|
|
2018-12-11 03:39:31 +08:00
|
|
|
auto oldMemRefType = oldMemRef->getType().cast<MemRefType>();
|
|
|
|
auto newMemRefType = doubleShape(oldMemRefType);
|
2018-10-05 08:15:30 +08:00
|
|
|
|
2018-12-11 03:39:31 +08:00
|
|
|
// Put together alloc operands for the dynamic dimensions of the memref.
|
|
|
|
MLFuncBuilder bOuter(forStmt);
|
|
|
|
SmallVector<SSAValue *, 4> allocOperands;
|
|
|
|
unsigned dynamicDimCount = 0;
|
|
|
|
for (auto dimSize : oldMemRefType.getShape()) {
|
|
|
|
if (dimSize == -1)
|
|
|
|
allocOperands.push_back(bOuter.create<DimOp>(forStmt->getLoc(), oldMemRef,
|
|
|
|
dynamicDimCount++));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create and place the alloc right before the 'for' statement.
|
|
|
|
// TODO(mlir-team): we are assuming scoped allocation here, and aren't
|
|
|
|
// inserting a dealloc -- this isn't the right thing.
|
|
|
|
SSAValue *newMemRef =
|
|
|
|
bOuter.create<AllocOp>(forStmt->getLoc(), newMemRefType, allocOperands);
|
2018-10-05 08:15:30 +08:00
|
|
|
|
2018-12-11 03:39:31 +08:00
|
|
|
// Create 'iv mod 2' value to index the leading dimension.
|
2018-10-09 01:20:25 +08:00
|
|
|
auto d0 = bInner.getAffineDimExpr(0);
|
2018-10-10 07:39:24 +08:00
|
|
|
auto modTwoMap =
|
|
|
|
bInner.getAffineMap(/*dimCount=*/1, /*symbolCount=*/0, {d0 % 2}, {});
|
2018-10-05 08:15:30 +08:00
|
|
|
auto ivModTwoOp =
|
|
|
|
bInner.create<AffineApplyOp>(forStmt->getLoc(), modTwoMap, forStmt);
|
2018-12-11 03:39:31 +08:00
|
|
|
|
|
|
|
// replaceAllMemRefUsesWith will always succeed unless the forStmt body has
|
|
|
|
// non-deferencing uses of the memref.
|
|
|
|
if (!replaceAllMemRefUsesWith(oldMemRef, cast<MLValue>(newMemRef),
|
|
|
|
ivModTwoOp->getResult(0), AffineMap::Null(), {},
|
|
|
|
&*forStmt->begin())) {
|
2018-10-19 02:14:26 +08:00
|
|
|
LLVM_DEBUG(llvm::dbgs()
|
|
|
|
<< "memref replacement for double buffering failed\n";);
|
2018-10-22 10:53:10 +08:00
|
|
|
ivModTwoOp->getOperation()->erase();
|
2018-10-05 08:15:30 +08:00
|
|
|
return false;
|
2018-10-19 02:14:26 +08:00
|
|
|
}
|
2018-10-05 08:15:30 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-17 12:12:06 +08:00
|
|
|
/// Returns success if the IR is in a valid state.
|
2018-09-29 03:17:26 +08:00
|
|
|
PassResult PipelineDataTransfer::runOnMLFunction(MLFunction *f) {
|
2018-10-19 02:14:26 +08:00
|
|
|
// Do a post order walk so that inner loop DMAs are processed first. This is
|
|
|
|
// necessary since 'for' statements nested within would otherwise become
|
|
|
|
// invalid (erased) when the outer loop is pipelined (the pipelined one gets
|
|
|
|
// deleted and replaced by a prologue, a new steady-state loop and an
|
|
|
|
// epilogue).
|
|
|
|
forStmts.clear();
|
|
|
|
walkPostOrder(f);
|
2018-11-17 12:12:06 +08:00
|
|
|
bool ret = false;
|
2018-10-19 02:14:26 +08:00
|
|
|
for (auto *forStmt : forStmts) {
|
2018-11-17 12:12:06 +08:00
|
|
|
ret = ret | runOnForStmt(forStmt);
|
2018-10-05 08:15:30 +08:00
|
|
|
}
|
2018-10-19 02:14:26 +08:00
|
|
|
return ret ? failure() : success();
|
|
|
|
}
|
2018-10-05 08:15:30 +08:00
|
|
|
|
2018-10-19 02:14:26 +08:00
|
|
|
// Check if tags of the dma start op and dma wait op match.
|
|
|
|
static bool checkTagMatch(OpPointer<DmaStartOp> startOp,
|
|
|
|
OpPointer<DmaWaitOp> waitOp) {
|
|
|
|
if (startOp->getTagMemRef() != waitOp->getTagMemRef())
|
|
|
|
return false;
|
|
|
|
auto startIndices = startOp->getTagIndices();
|
|
|
|
auto waitIndices = waitOp->getTagIndices();
|
|
|
|
// Both of these have the same number of indices since they correspond to the
|
|
|
|
// same tag memref.
|
|
|
|
for (auto it = startIndices.begin(), wIt = waitIndices.begin(),
|
|
|
|
e = startIndices.end();
|
|
|
|
it != e; ++it, ++wIt) {
|
|
|
|
// Keep it simple for now, just checking if indices match.
|
|
|
|
// TODO(mlir-team): this would in general need to check if there is no
|
|
|
|
// intervening write writing to the same tag location, i.e., memory last
|
|
|
|
// write/data flow analysis. This is however sufficient/powerful enough for
|
|
|
|
// now since the DMA generation pass or the input for it will always have
|
|
|
|
// start/wait with matching tags (same SSA operand indices).
|
|
|
|
if (*it != *wIt)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2018-09-29 03:17:26 +08:00
|
|
|
|
2018-10-19 02:14:26 +08:00
|
|
|
// Identify matching DMA start/finish statements to overlap computation with.
|
|
|
|
static void findMatchingStartFinishStmts(
|
|
|
|
ForStmt *forStmt,
|
|
|
|
SmallVectorImpl<std::pair<OperationStmt *, OperationStmt *>>
|
|
|
|
&startWaitPairs) {
|
2018-12-11 05:14:28 +08:00
|
|
|
|
|
|
|
// Collect outgoing DMA statements - needed to check for dependences below.
|
|
|
|
SmallVector<OpPointer<DmaStartOp>, 4> outgoingDmaOps;
|
|
|
|
for (auto &stmt : *forStmt) {
|
|
|
|
auto *opStmt = dyn_cast<OperationStmt>(&stmt);
|
|
|
|
if (!opStmt)
|
|
|
|
continue;
|
|
|
|
OpPointer<DmaStartOp> dmaStartOp;
|
|
|
|
if ((dmaStartOp = opStmt->dyn_cast<DmaStartOp>()) &&
|
|
|
|
dmaStartOp->isSrcMemorySpaceFaster())
|
|
|
|
outgoingDmaOps.push_back(dmaStartOp);
|
|
|
|
}
|
|
|
|
|
2018-10-19 02:14:26 +08:00
|
|
|
SmallVector<OperationStmt *, 4> dmaStartStmts, dmaFinishStmts;
|
2018-10-05 08:15:30 +08:00
|
|
|
for (auto &stmt : *forStmt) {
|
|
|
|
auto *opStmt = dyn_cast<OperationStmt>(&stmt);
|
|
|
|
if (!opStmt)
|
|
|
|
continue;
|
2018-10-19 02:14:26 +08:00
|
|
|
// Collect DMA finish statements.
|
2018-10-20 00:07:58 +08:00
|
|
|
if (opStmt->isa<DmaWaitOp>()) {
|
2018-10-05 08:15:30 +08:00
|
|
|
dmaFinishStmts.push_back(opStmt);
|
2018-10-19 02:14:26 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
OpPointer<DmaStartOp> dmaStartOp;
|
2018-10-20 00:07:58 +08:00
|
|
|
if (!(dmaStartOp = opStmt->dyn_cast<DmaStartOp>()))
|
2018-10-19 02:14:26 +08:00
|
|
|
continue;
|
2018-12-11 05:14:28 +08:00
|
|
|
// Only DMAs incoming into higher memory spaces are pipelined for now.
|
|
|
|
// TODO(bondhugula): handle outgoing DMA pipelining.
|
2018-10-19 02:14:26 +08:00
|
|
|
if (!dmaStartOp->isDestMemorySpaceFaster())
|
|
|
|
continue;
|
|
|
|
|
2018-12-11 05:14:28 +08:00
|
|
|
// Check for dependence with outgoing DMAs. Doing this conservatively.
|
|
|
|
// TODO(andydavis,bondhugula): use the dependence analysis to check for
|
|
|
|
// dependences between an incoming and outgoing DMA in the same iteration.
|
|
|
|
auto it = outgoingDmaOps.begin();
|
|
|
|
for (; it != outgoingDmaOps.end(); ++it) {
|
|
|
|
if ((*it)->getDstMemRef() == dmaStartOp->getSrcMemRef())
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (it != outgoingDmaOps.end())
|
|
|
|
continue;
|
|
|
|
|
2018-10-19 02:14:26 +08:00
|
|
|
// We only double buffer if the buffer is not live out of loop.
|
|
|
|
const MLValue *memref =
|
|
|
|
cast<MLValue>(dmaStartOp->getOperand(dmaStartOp->getFasterMemPos()));
|
|
|
|
bool escapingUses = false;
|
|
|
|
for (const auto &use : memref->getUses()) {
|
2018-12-11 03:39:31 +08:00
|
|
|
if (!dominates(*forStmt->begin(), *use.getOwner())) {
|
2018-10-19 02:14:26 +08:00
|
|
|
LLVM_DEBUG(llvm::dbgs()
|
|
|
|
<< "can't pipeline: buffer is live out of loop\n";);
|
|
|
|
escapingUses = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!escapingUses)
|
|
|
|
dmaStartStmts.push_back(opStmt);
|
|
|
|
}
|
|
|
|
|
|
|
|
// For each start statement, we look for a matching finish statement.
|
|
|
|
for (auto *dmaStartStmt : dmaStartStmts) {
|
|
|
|
for (auto *dmaFinishStmt : dmaFinishStmts) {
|
2018-10-20 00:07:58 +08:00
|
|
|
if (checkTagMatch(dmaStartStmt->cast<DmaStartOp>(),
|
|
|
|
dmaFinishStmt->cast<DmaWaitOp>())) {
|
2018-10-19 02:14:26 +08:00
|
|
|
startWaitPairs.push_back({dmaStartStmt, dmaFinishStmt});
|
|
|
|
break;
|
|
|
|
}
|
2018-10-05 08:15:30 +08:00
|
|
|
}
|
|
|
|
}
|
2018-10-19 02:14:26 +08:00
|
|
|
}
|
2018-10-05 08:15:30 +08:00
|
|
|
|
2018-10-19 02:14:26 +08:00
|
|
|
/// Overlap DMA transfers with computation in this loop. If successful,
|
|
|
|
/// 'forStmt' is deleted, and a prologue, a new pipelined loop, and epilogue are
|
|
|
|
/// inserted right before where it was.
|
|
|
|
PassResult PipelineDataTransfer::runOnForStmt(ForStmt *forStmt) {
|
|
|
|
auto mayBeConstTripCount = getConstantTripCount(*forStmt);
|
|
|
|
if (!mayBeConstTripCount.hasValue()) {
|
|
|
|
LLVM_DEBUG(llvm::dbgs() << "unknown trip count loop\n");
|
2018-10-23 04:44:31 +08:00
|
|
|
return success();
|
2018-10-19 02:14:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
SmallVector<std::pair<OperationStmt *, OperationStmt *>, 4> startWaitPairs;
|
|
|
|
findMatchingStartFinishStmts(forStmt, startWaitPairs);
|
|
|
|
|
|
|
|
if (startWaitPairs.empty()) {
|
|
|
|
LLVM_DEBUG(llvm::dbgs() << "No dma start/finish pairs\n";);
|
2018-10-23 04:44:31 +08:00
|
|
|
return success();
|
2018-10-19 02:14:26 +08:00
|
|
|
}
|
2018-10-05 08:15:30 +08:00
|
|
|
|
|
|
|
// Double the buffers for the higher memory space memref's.
|
2018-10-19 02:14:26 +08:00
|
|
|
// Identify memref's to replace by scanning through all DMA start statements.
|
|
|
|
// A DMA start statement has two memref's - the one from the higher level of
|
|
|
|
// memory hierarchy is the one to double buffer.
|
2018-10-05 08:15:30 +08:00
|
|
|
// TODO(bondhugula): check whether double-buffering is even necessary.
|
|
|
|
// TODO(bondhugula): make this work with different layouts: assuming here that
|
|
|
|
// the dimension we are adding here for the double buffering is the outermost
|
|
|
|
// dimension.
|
2018-10-19 02:14:26 +08:00
|
|
|
for (auto &pair : startWaitPairs) {
|
|
|
|
auto *dmaStartStmt = pair.first;
|
2018-11-21 07:07:37 +08:00
|
|
|
MLValue *oldMemRef = cast<MLValue>(dmaStartStmt->getOperand(
|
2018-10-20 00:07:58 +08:00
|
|
|
dmaStartStmt->cast<DmaStartOp>()->getFasterMemPos()));
|
2018-10-13 05:54:54 +08:00
|
|
|
if (!doubleBuffer(oldMemRef, forStmt)) {
|
2018-10-19 02:14:26 +08:00
|
|
|
// Normally, double buffering should not fail because we already checked
|
|
|
|
// that there are no uses outside.
|
|
|
|
LLVM_DEBUG(llvm::dbgs() << "double buffering failed for: \n";);
|
|
|
|
LLVM_DEBUG(dmaStartStmt->dump());
|
2018-10-23 04:44:31 +08:00
|
|
|
// IR still in a valid state.
|
|
|
|
return success();
|
2018-10-13 05:54:54 +08:00
|
|
|
}
|
2018-11-21 07:07:37 +08:00
|
|
|
// If the old memref has no more uses, remove its 'dead' alloc if it was
|
2018-12-11 03:39:31 +08:00
|
|
|
// alloc'ed. (note: DMA buffers are rarely function live-in; but a 'dim'
|
|
|
|
// operation could have been used on it if it was dynamically shaped in
|
|
|
|
// order to create the double buffer above)
|
2018-11-21 07:07:37 +08:00
|
|
|
if (oldMemRef->use_empty())
|
|
|
|
if (auto *allocStmt = oldMemRef->getDefiningStmt())
|
|
|
|
allocStmt->erase();
|
2018-10-05 08:15:30 +08:00
|
|
|
}
|
|
|
|
|
2018-10-19 02:14:26 +08:00
|
|
|
// Double the buffers for tag memrefs.
|
|
|
|
for (auto &pair : startWaitPairs) {
|
2018-11-21 07:07:37 +08:00
|
|
|
auto *dmaFinishStmt = pair.second;
|
|
|
|
MLValue *oldTagMemRef = cast<MLValue>(
|
2018-10-05 08:15:30 +08:00
|
|
|
dmaFinishStmt->getOperand(getTagMemRefPos(*dmaFinishStmt)));
|
2018-10-13 05:54:54 +08:00
|
|
|
if (!doubleBuffer(oldTagMemRef, forStmt)) {
|
2018-10-19 02:14:26 +08:00
|
|
|
LLVM_DEBUG(llvm::dbgs() << "tag double buffering failed\n";);
|
2018-10-23 04:44:31 +08:00
|
|
|
return success();
|
2018-10-13 05:54:54 +08:00
|
|
|
}
|
2018-11-21 07:07:37 +08:00
|
|
|
// If the old tag has no more uses, remove its 'dead' alloc if it was
|
|
|
|
// alloc'ed.
|
|
|
|
if (oldTagMemRef->use_empty())
|
|
|
|
if (auto *allocStmt = oldTagMemRef->getDefiningStmt())
|
|
|
|
allocStmt->erase();
|
2018-10-05 08:15:30 +08:00
|
|
|
}
|
|
|
|
|
2018-10-19 02:14:26 +08:00
|
|
|
// Double buffering would have invalidated all the old DMA start/wait stmts.
|
|
|
|
startWaitPairs.clear();
|
|
|
|
findMatchingStartFinishStmts(forStmt, startWaitPairs);
|
|
|
|
|
2018-12-11 07:17:25 +08:00
|
|
|
// Store shift for statement for later lookup for AffineApplyOp's.
|
|
|
|
DenseMap<const Statement *, unsigned> stmtShiftMap;
|
2018-10-19 02:14:26 +08:00
|
|
|
for (auto &pair : startWaitPairs) {
|
|
|
|
auto *dmaStartStmt = pair.first;
|
2018-10-20 00:07:58 +08:00
|
|
|
assert(dmaStartStmt->isa<DmaStartOp>());
|
2018-12-11 07:17:25 +08:00
|
|
|
stmtShiftMap[dmaStartStmt] = 0;
|
2018-10-19 02:14:26 +08:00
|
|
|
// Set shifts for DMA start stmt's affine operand computation slices to 0.
|
|
|
|
if (auto *slice = mlir::createAffineComputationSlice(dmaStartStmt)) {
|
2018-12-11 07:17:25 +08:00
|
|
|
stmtShiftMap[slice] = 0;
|
2018-10-13 05:54:54 +08:00
|
|
|
} else {
|
2018-10-19 02:14:26 +08:00
|
|
|
// If a slice wasn't created, the reachable affine_apply op's from its
|
|
|
|
// operands are the ones that go with it.
|
|
|
|
SmallVector<OperationStmt *, 4> affineApplyStmts;
|
|
|
|
SmallVector<MLValue *, 4> operands(dmaStartStmt->getOperands());
|
|
|
|
getReachableAffineApplyOps(operands, affineApplyStmts);
|
|
|
|
for (const auto *stmt : affineApplyStmts) {
|
2018-12-11 07:17:25 +08:00
|
|
|
stmtShiftMap[stmt] = 0;
|
2018-10-19 02:14:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Everything else (including compute ops and dma finish) are shifted by one.
|
|
|
|
for (const auto &stmt : *forStmt) {
|
2018-12-11 07:17:25 +08:00
|
|
|
if (stmtShiftMap.find(&stmt) == stmtShiftMap.end()) {
|
|
|
|
stmtShiftMap[&stmt] = 1;
|
2018-10-05 08:15:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-11 07:17:25 +08:00
|
|
|
// Get shifts stored in map.
|
|
|
|
std::vector<uint64_t> shifts(forStmt->getStatements().size());
|
2018-10-05 08:15:30 +08:00
|
|
|
unsigned s = 0;
|
2018-11-17 12:12:06 +08:00
|
|
|
for (auto &stmt : *forStmt) {
|
2018-12-11 07:17:25 +08:00
|
|
|
assert(stmtShiftMap.find(&stmt) != stmtShiftMap.end());
|
|
|
|
shifts[s++] = stmtShiftMap[&stmt];
|
2018-11-17 12:12:06 +08:00
|
|
|
LLVM_DEBUG(
|
2018-12-11 07:17:25 +08:00
|
|
|
// Tagging statements with shifts for debugging purposes.
|
2018-11-17 12:12:06 +08:00
|
|
|
if (auto *opStmt = dyn_cast<OperationStmt>(&stmt)) {
|
|
|
|
MLFuncBuilder b(opStmt);
|
2018-12-11 07:17:25 +08:00
|
|
|
opStmt->setAttr(b.getIdentifier("shift"),
|
|
|
|
b.getIntegerAttr(shifts[s - 1]));
|
2018-11-17 12:12:06 +08:00
|
|
|
});
|
2018-10-05 08:15:30 +08:00
|
|
|
}
|
2018-09-29 03:17:26 +08:00
|
|
|
|
2018-12-11 07:17:25 +08:00
|
|
|
if (!isStmtwiseShiftValid(*forStmt, shifts)) {
|
2018-10-23 04:44:31 +08:00
|
|
|
// Violates dependences.
|
|
|
|
LLVM_DEBUG(llvm::dbgs() << "Shifts invalid - unexpected\n";);
|
|
|
|
return success();
|
2018-10-05 08:15:30 +08:00
|
|
|
}
|
2018-09-29 03:17:26 +08:00
|
|
|
|
2018-12-11 07:17:25 +08:00
|
|
|
if (stmtBodySkew(forStmt, shifts)) {
|
2018-10-23 04:44:31 +08:00
|
|
|
LLVM_DEBUG(llvm::dbgs() << "stmt body skewing failed - unexpected\n";);
|
|
|
|
return success();
|
2018-10-13 05:54:54 +08:00
|
|
|
}
|
2018-09-29 03:17:26 +08:00
|
|
|
|
2018-10-19 02:14:26 +08:00
|
|
|
return success();
|
2018-09-29 03:17:26 +08:00
|
|
|
}
|
2018-11-07 10:34:18 +08:00
|
|
|
|
|
|
|
static PassRegistration<PipelineDataTransfer> pass(
|
|
|
|
"pipeline-data-transfer",
|
|
|
|
"Pipeline non-blocking data transfers between explicitly managed levels of "
|
|
|
|
"the memory hierarchy");
|