2018-09-29 03:17:26 +08:00
|
|
|
//===- PipelineDataTransfer.cpp --- Pass for pipelining data movement ---*-===//
|
|
|
|
//
|
|
|
|
// Copyright 2019 The MLIR Authors.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
// =============================================================================
|
|
|
|
//
|
|
|
|
// This file implements a pass to pipeline data transfers.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "mlir/Transforms/Passes.h"
|
|
|
|
|
2019-02-02 08:42:18 +08:00
|
|
|
#include "mlir/AffineOps/AffineOps.h"
|
2018-10-13 05:54:54 +08:00
|
|
|
#include "mlir/Analysis/AffineAnalysis.h"
|
2018-10-19 02:14:26 +08:00
|
|
|
#include "mlir/Analysis/LoopAnalysis.h"
|
|
|
|
#include "mlir/Analysis/Utils.h"
|
2018-10-05 08:15:30 +08:00
|
|
|
#include "mlir/IR/Builders.h"
|
2019-02-20 09:17:46 +08:00
|
|
|
#include "mlir/Pass/Pass.h"
|
2019-03-02 05:48:24 +08:00
|
|
|
#include "mlir/StandardOps/Ops.h"
|
2018-09-29 03:17:26 +08:00
|
|
|
#include "mlir/Transforms/LoopUtils.h"
|
2018-10-05 08:15:30 +08:00
|
|
|
#include "mlir/Transforms/Utils.h"
|
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2018-10-19 02:14:26 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2019-05-04 02:07:37 +08:00
|
|
|
#define DEBUG_TYPE "affine-pipeline-data-transfer"
|
2018-09-29 03:17:26 +08:00
|
|
|
|
|
|
|
using namespace mlir;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2019-02-28 02:59:29 +08:00
|
|
|
struct PipelineDataTransfer : public FunctionPass<PipelineDataTransfer> {
|
2019-03-01 06:50:42 +08:00
|
|
|
void runOnFunction() override;
|
2019-03-25 10:53:05 +08:00
|
|
|
void runOnAffineForOp(AffineForOp forOp);
|
2018-10-19 02:14:26 +08:00
|
|
|
|
2019-03-25 10:53:05 +08:00
|
|
|
std::vector<AffineForOp> forOps;
|
2018-09-29 03:17:26 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
/// Creates a pass to pipeline explicit movement of data across levels of the
|
|
|
|
/// memory hierarchy.
|
2019-02-28 02:59:29 +08:00
|
|
|
FunctionPassBase *mlir::createPipelineDataTransferPass() {
|
2018-09-29 03:17:26 +08:00
|
|
|
return new PipelineDataTransfer();
|
|
|
|
}
|
|
|
|
|
2019-03-28 05:02:02 +08:00
|
|
|
// Returns the position of the tag memref operand given a DMA operation.
|
2018-10-05 08:15:30 +08:00
|
|
|
// Temporary utility: will be replaced when DmaStart/DmaFinish abstract op's are
|
|
|
|
// added. TODO(b/117228571)
|
2019-03-28 05:02:02 +08:00
|
|
|
static unsigned getTagMemRefPos(Operation &dmaInst) {
|
2019-05-12 09:59:54 +08:00
|
|
|
assert(isa<DmaStartOp>(dmaInst) || isa<DmaWaitOp>(dmaInst));
|
|
|
|
if (isa<DmaStartOp>(dmaInst)) {
|
2018-10-05 08:15:30 +08:00
|
|
|
// Second to last operand.
|
2018-12-29 08:05:35 +08:00
|
|
|
return dmaInst.getNumOperands() - 2;
|
2018-10-05 08:15:30 +08:00
|
|
|
}
|
2019-03-28 05:02:02 +08:00
|
|
|
// First operand for a dma finish operation.
|
2018-10-05 08:15:30 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-03-26 01:14:34 +08:00
|
|
|
/// Doubles the buffer of the supplied memref on the specified 'affine.for'
|
2019-03-28 05:02:02 +08:00
|
|
|
/// operation by adding a leading dimension of size two to the memref.
|
2019-03-26 01:14:34 +08:00
|
|
|
/// Replaces all uses of the old memref by the new one while indexing the newly
|
2019-03-28 05:02:02 +08:00
|
|
|
/// added dimension by the loop IV of the specified 'affine.for' operation
|
2019-03-26 01:14:34 +08:00
|
|
|
/// modulo 2. Returns false if such a replacement cannot be performed.
|
2019-03-25 10:53:05 +08:00
|
|
|
static bool doubleBuffer(Value *oldMemRef, AffineForOp forOp) {
|
2019-03-26 02:13:31 +08:00
|
|
|
auto *forBody = forOp.getBody();
|
2019-06-05 10:18:23 +08:00
|
|
|
OpBuilder bInner(forBody, forBody->begin());
|
2018-12-24 00:17:48 +08:00
|
|
|
bInner.setInsertionPoint(forBody, forBody->begin());
|
2018-10-05 08:15:30 +08:00
|
|
|
|
|
|
|
// Doubles the shape with a leading dimension extent of 2.
|
2018-10-31 05:59:22 +08:00
|
|
|
auto doubleShape = [&](MemRefType oldMemRefType) -> MemRefType {
|
2018-10-05 08:15:30 +08:00
|
|
|
// Add the leading dimension in the shape for the double buffer.
|
2019-01-24 06:39:45 +08:00
|
|
|
ArrayRef<int64_t> oldShape = oldMemRefType.getShape();
|
|
|
|
SmallVector<int64_t, 4> newShape(1 + oldMemRefType.getRank());
|
2019-01-16 06:41:56 +08:00
|
|
|
newShape[0] = 2;
|
|
|
|
std::copy(oldShape.begin(), oldShape.end(), newShape.begin() + 1);
|
2018-10-31 05:59:22 +08:00
|
|
|
auto newMemRefType =
|
2019-01-16 06:41:56 +08:00
|
|
|
bInner.getMemRefType(newShape, oldMemRefType.getElementType(), {},
|
2018-10-31 05:59:22 +08:00
|
|
|
oldMemRefType.getMemorySpace());
|
2018-10-05 08:15:30 +08:00
|
|
|
return newMemRefType;
|
|
|
|
};
|
|
|
|
|
2018-12-11 03:39:31 +08:00
|
|
|
auto oldMemRefType = oldMemRef->getType().cast<MemRefType>();
|
|
|
|
auto newMemRefType = doubleShape(oldMemRefType);
|
2018-10-05 08:15:30 +08:00
|
|
|
|
2019-02-13 04:08:01 +08:00
|
|
|
// The double buffer is allocated right before 'forInst'.
|
2019-03-27 08:05:09 +08:00
|
|
|
auto *forInst = forOp.getOperation();
|
2019-06-05 10:18:23 +08:00
|
|
|
OpBuilder bOuter(forInst);
|
2019-02-13 04:08:01 +08:00
|
|
|
// Put together alloc operands for any dynamic dimensions of the memref.
|
2018-12-28 06:35:10 +08:00
|
|
|
SmallVector<Value *, 4> allocOperands;
|
2018-12-11 03:39:31 +08:00
|
|
|
unsigned dynamicDimCount = 0;
|
|
|
|
for (auto dimSize : oldMemRefType.getShape()) {
|
|
|
|
if (dimSize == -1)
|
2018-12-29 08:05:35 +08:00
|
|
|
allocOperands.push_back(bOuter.create<DimOp>(forInst->getLoc(), oldMemRef,
|
2018-12-11 03:39:31 +08:00
|
|
|
dynamicDimCount++));
|
|
|
|
}
|
|
|
|
|
2019-03-28 05:02:02 +08:00
|
|
|
// Create and place the alloc right before the 'affine.for' operation.
|
2018-12-28 06:35:10 +08:00
|
|
|
Value *newMemRef =
|
2018-12-29 08:05:35 +08:00
|
|
|
bOuter.create<AllocOp>(forInst->getLoc(), newMemRefType, allocOperands);
|
2018-10-05 08:15:30 +08:00
|
|
|
|
2018-12-11 03:39:31 +08:00
|
|
|
// Create 'iv mod 2' value to index the leading dimension.
|
2018-10-09 01:20:25 +08:00
|
|
|
auto d0 = bInner.getAffineDimExpr(0);
|
2019-03-26 02:13:31 +08:00
|
|
|
int64_t step = forOp.getStep();
|
2019-01-10 12:00:19 +08:00
|
|
|
auto modTwoMap = bInner.getAffineMap(/*dimCount=*/1, /*symbolCount=*/0,
|
2019-05-30 05:56:41 +08:00
|
|
|
{d0.floorDiv(step) % 2});
|
2019-03-26 02:13:31 +08:00
|
|
|
auto ivModTwoOp = bInner.create<AffineApplyOp>(forOp.getLoc(), modTwoMap,
|
|
|
|
forOp.getInductionVar());
|
2018-12-11 03:39:31 +08:00
|
|
|
|
2019-02-02 08:42:18 +08:00
|
|
|
// replaceAllMemRefUsesWith will always succeed unless the forOp body has
|
2019-03-01 04:07:12 +08:00
|
|
|
// non-deferencing uses of the memref (dealloc's are fine though).
|
2019-03-26 02:13:31 +08:00
|
|
|
if (!replaceAllMemRefUsesWith(oldMemRef, newMemRef,
|
|
|
|
/*extraIndices=*/{ivModTwoOp},
|
|
|
|
/*indexRemap=*/AffineMap(),
|
|
|
|
/*extraOperands=*/{},
|
|
|
|
/*domInstFilter=*/&*forOp.getBody()->begin())) {
|
2019-03-01 04:07:12 +08:00
|
|
|
LLVM_DEBUG(
|
2019-03-26 02:13:31 +08:00
|
|
|
forOp.emitError("memref replacement for double buffering failed"));
|
2019-03-26 04:02:06 +08:00
|
|
|
ivModTwoOp.erase();
|
2018-10-05 08:15:30 +08:00
|
|
|
return false;
|
2018-10-19 02:14:26 +08:00
|
|
|
}
|
2019-02-13 04:08:01 +08:00
|
|
|
// Insert the dealloc op right after the for loop.
|
|
|
|
bOuter.setInsertionPoint(forInst->getBlock(),
|
|
|
|
std::next(Block::iterator(forInst)));
|
|
|
|
bOuter.create<DeallocOp>(forInst->getLoc(), newMemRef);
|
|
|
|
|
2018-10-05 08:15:30 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-17 12:12:06 +08:00
|
|
|
/// Returns success if the IR is in a valid state.
|
2019-03-01 06:50:42 +08:00
|
|
|
void PipelineDataTransfer::runOnFunction() {
|
2018-10-19 02:14:26 +08:00
|
|
|
// Do a post order walk so that inner loop DMAs are processed first. This is
|
2019-03-28 05:02:02 +08:00
|
|
|
// necessary since 'affine.for' operations nested within would otherwise
|
2019-03-26 01:14:34 +08:00
|
|
|
// become invalid (erased) when the outer loop is pipelined (the pipelined one
|
|
|
|
// gets deleted and replaced by a prologue, a new steady-state loop and an
|
2018-10-19 02:14:26 +08:00
|
|
|
// epilogue).
|
2019-02-02 08:42:18 +08:00
|
|
|
forOps.clear();
|
2019-04-05 02:13:02 +08:00
|
|
|
getFunction().walk<AffineForOp>(
|
2019-03-25 10:53:05 +08:00
|
|
|
[&](AffineForOp forOp) { forOps.push_back(forOp); });
|
2019-03-01 06:50:42 +08:00
|
|
|
for (auto forOp : forOps)
|
|
|
|
runOnAffineForOp(forOp);
|
2018-10-19 02:14:26 +08:00
|
|
|
}
|
2018-10-05 08:15:30 +08:00
|
|
|
|
2018-10-19 02:14:26 +08:00
|
|
|
// Check if tags of the dma start op and dma wait op match.
|
2019-03-25 10:53:05 +08:00
|
|
|
static bool checkTagMatch(DmaStartOp startOp, DmaWaitOp waitOp) {
|
2019-03-26 04:02:06 +08:00
|
|
|
if (startOp.getTagMemRef() != waitOp.getTagMemRef())
|
2018-10-19 02:14:26 +08:00
|
|
|
return false;
|
2019-03-26 04:02:06 +08:00
|
|
|
auto startIndices = startOp.getTagIndices();
|
|
|
|
auto waitIndices = waitOp.getTagIndices();
|
2018-10-19 02:14:26 +08:00
|
|
|
// Both of these have the same number of indices since they correspond to the
|
|
|
|
// same tag memref.
|
|
|
|
for (auto it = startIndices.begin(), wIt = waitIndices.begin(),
|
|
|
|
e = startIndices.end();
|
|
|
|
it != e; ++it, ++wIt) {
|
|
|
|
// Keep it simple for now, just checking if indices match.
|
|
|
|
// TODO(mlir-team): this would in general need to check if there is no
|
|
|
|
// intervening write writing to the same tag location, i.e., memory last
|
|
|
|
// write/data flow analysis. This is however sufficient/powerful enough for
|
|
|
|
// now since the DMA generation pass or the input for it will always have
|
|
|
|
// start/wait with matching tags (same SSA operand indices).
|
|
|
|
if (*it != *wIt)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2018-09-29 03:17:26 +08:00
|
|
|
|
2019-03-28 05:02:02 +08:00
|
|
|
// Identify matching DMA start/finish operations to overlap computation with.
|
2018-12-29 08:05:35 +08:00
|
|
|
static void findMatchingStartFinishInsts(
|
2019-03-25 10:53:05 +08:00
|
|
|
AffineForOp forOp,
|
2019-03-28 05:02:02 +08:00
|
|
|
SmallVectorImpl<std::pair<Operation *, Operation *>> &startWaitPairs) {
|
2018-12-11 05:14:28 +08:00
|
|
|
|
2019-03-28 05:02:02 +08:00
|
|
|
// Collect outgoing DMA operations - needed to check for dependences below.
|
2019-03-25 10:53:05 +08:00
|
|
|
SmallVector<DmaStartOp, 4> outgoingDmaOps;
|
2019-03-28 05:02:02 +08:00
|
|
|
for (auto &op : *forOp.getBody()) {
|
2019-05-12 06:56:50 +08:00
|
|
|
auto dmaStartOp = dyn_cast<DmaStartOp>(op);
|
2019-03-26 04:02:06 +08:00
|
|
|
if (dmaStartOp && dmaStartOp.isSrcMemorySpaceFaster())
|
2018-12-11 05:14:28 +08:00
|
|
|
outgoingDmaOps.push_back(dmaStartOp);
|
|
|
|
}
|
|
|
|
|
2019-03-28 05:02:02 +08:00
|
|
|
SmallVector<Operation *, 4> dmaStartInsts, dmaFinishInsts;
|
|
|
|
for (auto &op : *forOp.getBody()) {
|
|
|
|
// Collect DMA finish operations.
|
2019-05-12 09:59:54 +08:00
|
|
|
if (isa<DmaWaitOp>(op)) {
|
2019-03-28 05:02:02 +08:00
|
|
|
dmaFinishInsts.push_back(&op);
|
2018-10-19 02:14:26 +08:00
|
|
|
continue;
|
|
|
|
}
|
2019-05-12 06:56:50 +08:00
|
|
|
auto dmaStartOp = dyn_cast<DmaStartOp>(op);
|
2019-03-25 10:53:05 +08:00
|
|
|
if (!dmaStartOp)
|
2018-10-19 02:14:26 +08:00
|
|
|
continue;
|
2019-03-25 10:53:05 +08:00
|
|
|
|
2018-12-11 05:14:28 +08:00
|
|
|
// Only DMAs incoming into higher memory spaces are pipelined for now.
|
|
|
|
// TODO(bondhugula): handle outgoing DMA pipelining.
|
2019-03-26 04:02:06 +08:00
|
|
|
if (!dmaStartOp.isDestMemorySpaceFaster())
|
2018-10-19 02:14:26 +08:00
|
|
|
continue;
|
|
|
|
|
2018-12-11 05:14:28 +08:00
|
|
|
// Check for dependence with outgoing DMAs. Doing this conservatively.
|
|
|
|
// TODO(andydavis,bondhugula): use the dependence analysis to check for
|
|
|
|
// dependences between an incoming and outgoing DMA in the same iteration.
|
|
|
|
auto it = outgoingDmaOps.begin();
|
|
|
|
for (; it != outgoingDmaOps.end(); ++it) {
|
2019-03-26 04:02:06 +08:00
|
|
|
if (it->getDstMemRef() == dmaStartOp.getSrcMemRef())
|
2018-12-11 05:14:28 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (it != outgoingDmaOps.end())
|
|
|
|
continue;
|
|
|
|
|
2018-10-19 02:14:26 +08:00
|
|
|
// We only double buffer if the buffer is not live out of loop.
|
2019-03-26 04:02:06 +08:00
|
|
|
auto *memref = dmaStartOp.getOperand(dmaStartOp.getFasterMemPos());
|
2018-10-19 02:14:26 +08:00
|
|
|
bool escapingUses = false;
|
2019-05-19 02:09:07 +08:00
|
|
|
for (auto *user : memref->getUsers()) {
|
2019-02-13 04:08:01 +08:00
|
|
|
// We can double buffer regardless of dealloc's outside the loop.
|
2019-05-19 02:09:07 +08:00
|
|
|
if (isa<DeallocOp>(user))
|
2019-02-13 04:08:01 +08:00
|
|
|
continue;
|
2019-05-19 02:09:07 +08:00
|
|
|
if (!forOp.getBody()->findAncestorInstInBlock(*user)) {
|
2018-10-19 02:14:26 +08:00
|
|
|
LLVM_DEBUG(llvm::dbgs()
|
|
|
|
<< "can't pipeline: buffer is live out of loop\n";);
|
|
|
|
escapingUses = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!escapingUses)
|
2019-03-28 05:02:02 +08:00
|
|
|
dmaStartInsts.push_back(&op);
|
2018-10-19 02:14:26 +08:00
|
|
|
}
|
|
|
|
|
2019-03-28 05:02:02 +08:00
|
|
|
// For each start operation, we look for a matching finish operation.
|
2018-12-29 08:05:35 +08:00
|
|
|
for (auto *dmaStartInst : dmaStartInsts) {
|
|
|
|
for (auto *dmaFinishInst : dmaFinishInsts) {
|
2019-05-12 08:57:32 +08:00
|
|
|
if (checkTagMatch(cast<DmaStartOp>(dmaStartInst),
|
|
|
|
cast<DmaWaitOp>(dmaFinishInst))) {
|
2018-12-29 08:05:35 +08:00
|
|
|
startWaitPairs.push_back({dmaStartInst, dmaFinishInst});
|
2018-10-19 02:14:26 +08:00
|
|
|
break;
|
|
|
|
}
|
2018-10-05 08:15:30 +08:00
|
|
|
}
|
|
|
|
}
|
2018-10-19 02:14:26 +08:00
|
|
|
}
|
2018-10-05 08:15:30 +08:00
|
|
|
|
2018-10-19 02:14:26 +08:00
|
|
|
/// Overlap DMA transfers with computation in this loop. If successful,
|
2019-02-02 08:42:18 +08:00
|
|
|
/// 'forOp' is deleted, and a prologue, a new pipelined loop, and epilogue are
|
2018-10-19 02:14:26 +08:00
|
|
|
/// inserted right before where it was.
|
2019-03-25 10:53:05 +08:00
|
|
|
void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) {
|
2019-02-02 08:42:18 +08:00
|
|
|
auto mayBeConstTripCount = getConstantTripCount(forOp);
|
2018-10-19 02:14:26 +08:00
|
|
|
if (!mayBeConstTripCount.hasValue()) {
|
2019-05-02 03:13:44 +08:00
|
|
|
LLVM_DEBUG(
|
|
|
|
forOp.emitRemark("won't pipeline due to unknown trip count loop"));
|
2019-03-01 06:50:42 +08:00
|
|
|
return;
|
2018-10-19 02:14:26 +08:00
|
|
|
}
|
|
|
|
|
2019-03-28 05:02:02 +08:00
|
|
|
SmallVector<std::pair<Operation *, Operation *>, 4> startWaitPairs;
|
2019-02-02 08:42:18 +08:00
|
|
|
findMatchingStartFinishInsts(forOp, startWaitPairs);
|
2018-10-19 02:14:26 +08:00
|
|
|
|
|
|
|
if (startWaitPairs.empty()) {
|
2019-05-02 03:13:44 +08:00
|
|
|
LLVM_DEBUG(forOp.emitRemark("No dma start/finish pairs\n"));
|
2019-03-01 06:50:42 +08:00
|
|
|
return;
|
2018-10-19 02:14:26 +08:00
|
|
|
}
|
2018-10-05 08:15:30 +08:00
|
|
|
|
|
|
|
// Double the buffers for the higher memory space memref's.
|
2018-12-29 08:05:35 +08:00
|
|
|
// Identify memref's to replace by scanning through all DMA start
|
2019-03-28 05:02:02 +08:00
|
|
|
// operations. A DMA start operation has two memref's - the one from the
|
2018-12-29 08:05:35 +08:00
|
|
|
// higher level of memory hierarchy is the one to double buffer.
|
2018-10-05 08:15:30 +08:00
|
|
|
// TODO(bondhugula): check whether double-buffering is even necessary.
|
|
|
|
// TODO(bondhugula): make this work with different layouts: assuming here that
|
|
|
|
// the dimension we are adding here for the double buffering is the outermost
|
|
|
|
// dimension.
|
2018-10-19 02:14:26 +08:00
|
|
|
for (auto &pair : startWaitPairs) {
|
2018-12-29 08:05:35 +08:00
|
|
|
auto *dmaStartInst = pair.first;
|
|
|
|
Value *oldMemRef = dmaStartInst->getOperand(
|
2019-05-12 08:57:32 +08:00
|
|
|
cast<DmaStartOp>(dmaStartInst).getFasterMemPos());
|
2019-02-02 08:42:18 +08:00
|
|
|
if (!doubleBuffer(oldMemRef, forOp)) {
|
2018-10-19 02:14:26 +08:00
|
|
|
// Normally, double buffering should not fail because we already checked
|
|
|
|
// that there are no uses outside.
|
|
|
|
LLVM_DEBUG(llvm::dbgs() << "double buffering failed for: \n";);
|
2018-12-29 08:05:35 +08:00
|
|
|
LLVM_DEBUG(dmaStartInst->dump());
|
2018-10-23 04:44:31 +08:00
|
|
|
// IR still in a valid state.
|
2019-03-01 06:50:42 +08:00
|
|
|
return;
|
2018-10-13 05:54:54 +08:00
|
|
|
}
|
2018-11-21 07:07:37 +08:00
|
|
|
// If the old memref has no more uses, remove its 'dead' alloc if it was
|
2018-12-11 03:39:31 +08:00
|
|
|
// alloc'ed. (note: DMA buffers are rarely function live-in; but a 'dim'
|
|
|
|
// operation could have been used on it if it was dynamically shaped in
|
2019-02-12 08:33:53 +08:00
|
|
|
// order to create the double buffer above.)
|
|
|
|
// '-canonicalize' does this in a more general way, but we'll anyway do the
|
|
|
|
// simple/common case so that the output / test cases looks clear.
|
2019-03-27 08:05:09 +08:00
|
|
|
if (auto *allocInst = oldMemRef->getDefiningOp()) {
|
2019-02-12 08:33:53 +08:00
|
|
|
if (oldMemRef->use_empty()) {
|
2018-12-29 08:05:35 +08:00
|
|
|
allocInst->erase();
|
2019-02-12 08:33:53 +08:00
|
|
|
} else if (oldMemRef->hasOneUse()) {
|
2019-05-19 02:09:07 +08:00
|
|
|
if (auto dealloc = dyn_cast<DeallocOp>(*oldMemRef->user_begin())) {
|
|
|
|
dealloc.erase();
|
2019-03-27 08:05:09 +08:00
|
|
|
oldMemRef->getDefiningOp()->erase();
|
2019-02-12 08:33:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-10-05 08:15:30 +08:00
|
|
|
}
|
|
|
|
|
2018-10-19 02:14:26 +08:00
|
|
|
// Double the buffers for tag memrefs.
|
|
|
|
for (auto &pair : startWaitPairs) {
|
2018-12-29 08:05:35 +08:00
|
|
|
auto *dmaFinishInst = pair.second;
|
2018-12-28 06:35:10 +08:00
|
|
|
Value *oldTagMemRef =
|
2018-12-29 08:05:35 +08:00
|
|
|
dmaFinishInst->getOperand(getTagMemRefPos(*dmaFinishInst));
|
2019-02-02 08:42:18 +08:00
|
|
|
if (!doubleBuffer(oldTagMemRef, forOp)) {
|
2018-10-19 02:14:26 +08:00
|
|
|
LLVM_DEBUG(llvm::dbgs() << "tag double buffering failed\n";);
|
2019-03-01 06:50:42 +08:00
|
|
|
return;
|
2018-10-13 05:54:54 +08:00
|
|
|
}
|
2018-11-21 07:07:37 +08:00
|
|
|
// If the old tag has no more uses, remove its 'dead' alloc if it was
|
|
|
|
// alloc'ed.
|
|
|
|
if (oldTagMemRef->use_empty())
|
2019-03-27 08:05:09 +08:00
|
|
|
if (auto *allocInst = oldTagMemRef->getDefiningOp())
|
2018-12-29 08:05:35 +08:00
|
|
|
allocInst->erase();
|
2018-10-05 08:15:30 +08:00
|
|
|
}
|
|
|
|
|
2018-12-29 08:05:35 +08:00
|
|
|
// Double buffering would have invalidated all the old DMA start/wait insts.
|
2018-10-19 02:14:26 +08:00
|
|
|
startWaitPairs.clear();
|
2019-02-02 08:42:18 +08:00
|
|
|
findMatchingStartFinishInsts(forOp, startWaitPairs);
|
2018-10-19 02:14:26 +08:00
|
|
|
|
2019-03-28 05:02:02 +08:00
|
|
|
// Store shift for operation for later lookup for AffineApplyOp's.
|
|
|
|
DenseMap<Operation *, unsigned> instShiftMap;
|
2018-10-19 02:14:26 +08:00
|
|
|
for (auto &pair : startWaitPairs) {
|
2018-12-29 08:05:35 +08:00
|
|
|
auto *dmaStartInst = pair.first;
|
2019-05-12 09:59:54 +08:00
|
|
|
assert(isa<DmaStartOp>(dmaStartInst));
|
2018-12-29 08:05:35 +08:00
|
|
|
instShiftMap[dmaStartInst] = 0;
|
2019-03-28 05:02:02 +08:00
|
|
|
// Set shifts for DMA start op's affine operand computation slices to 0.
|
2019-03-25 10:53:05 +08:00
|
|
|
SmallVector<AffineApplyOp, 4> sliceOps;
|
2019-01-26 06:06:32 +08:00
|
|
|
mlir::createAffineComputationSlice(dmaStartInst, &sliceOps);
|
|
|
|
if (!sliceOps.empty()) {
|
|
|
|
for (auto sliceOp : sliceOps) {
|
2019-03-27 08:05:09 +08:00
|
|
|
instShiftMap[sliceOp.getOperation()] = 0;
|
2019-01-26 06:06:32 +08:00
|
|
|
}
|
2018-10-13 05:54:54 +08:00
|
|
|
} else {
|
2019-02-07 03:08:18 +08:00
|
|
|
// If a slice wasn't created, the reachable affine.apply op's from its
|
2018-10-19 02:14:26 +08:00
|
|
|
// operands are the ones that go with it.
|
2019-03-28 05:02:02 +08:00
|
|
|
SmallVector<Operation *, 4> affineApplyInsts;
|
2018-12-29 08:05:35 +08:00
|
|
|
SmallVector<Value *, 4> operands(dmaStartInst->getOperands());
|
|
|
|
getReachableAffineApplyOps(operands, affineApplyInsts);
|
2019-03-28 05:02:02 +08:00
|
|
|
for (auto *op : affineApplyInsts) {
|
|
|
|
instShiftMap[op] = 0;
|
2018-10-19 02:14:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Everything else (including compute ops and dma finish) are shifted by one.
|
2019-03-28 05:02:02 +08:00
|
|
|
for (auto &op : *forOp.getBody()) {
|
|
|
|
if (instShiftMap.find(&op) == instShiftMap.end()) {
|
|
|
|
instShiftMap[&op] = 1;
|
2018-10-05 08:15:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-11 07:17:25 +08:00
|
|
|
// Get shifts stored in map.
|
2019-03-27 08:05:09 +08:00
|
|
|
std::vector<uint64_t> shifts(forOp.getBody()->getOperations().size());
|
2018-10-05 08:15:30 +08:00
|
|
|
unsigned s = 0;
|
2019-03-28 05:02:02 +08:00
|
|
|
for (auto &op : *forOp.getBody()) {
|
|
|
|
assert(instShiftMap.find(&op) != instShiftMap.end());
|
|
|
|
shifts[s++] = instShiftMap[&op];
|
2019-02-05 02:38:47 +08:00
|
|
|
|
2019-03-28 05:02:02 +08:00
|
|
|
// Tagging operations with shifts for debugging purposes.
|
2019-02-05 02:38:47 +08:00
|
|
|
LLVM_DEBUG({
|
2019-06-05 10:18:23 +08:00
|
|
|
OpBuilder b(&op);
|
2019-03-28 05:02:02 +08:00
|
|
|
op.setAttr("shift", b.getI64IntegerAttr(shifts[s - 1]));
|
2019-02-05 02:38:47 +08:00
|
|
|
});
|
2018-10-05 08:15:30 +08:00
|
|
|
}
|
2018-09-29 03:17:26 +08:00
|
|
|
|
2019-02-02 08:42:18 +08:00
|
|
|
if (!isInstwiseShiftValid(forOp, shifts)) {
|
2018-10-23 04:44:31 +08:00
|
|
|
// Violates dependences.
|
|
|
|
LLVM_DEBUG(llvm::dbgs() << "Shifts invalid - unexpected\n";);
|
2019-03-01 06:50:42 +08:00
|
|
|
return;
|
2018-10-05 08:15:30 +08:00
|
|
|
}
|
2018-09-29 03:17:26 +08:00
|
|
|
|
2019-03-07 09:37:14 +08:00
|
|
|
if (failed(instBodySkew(forOp, shifts))) {
|
2019-03-28 05:02:02 +08:00
|
|
|
LLVM_DEBUG(llvm::dbgs() << "op body skewing failed - unexpected\n";);
|
2019-03-01 06:50:42 +08:00
|
|
|
return;
|
2018-10-13 05:54:54 +08:00
|
|
|
}
|
2018-09-29 03:17:26 +08:00
|
|
|
}
|
2018-11-07 10:34:18 +08:00
|
|
|
|
|
|
|
static PassRegistration<PipelineDataTransfer> pass(
|
2019-05-04 02:07:37 +08:00
|
|
|
"affine-pipeline-data-transfer",
|
2018-11-07 10:34:18 +08:00
|
|
|
"Pipeline non-blocking data transfers between explicitly managed levels of "
|
|
|
|
"the memory hierarchy");
|