[mlir][NFC] MemRef cleanup: Remove helper functions

Remove `getDynOperands` and `createOrFoldDimOp` from MemRef.h to decouple MemRef a bit from Tensor. These two functions are used in other dialects/transforms.

Differential Revision: https://reviews.llvm.org/D105260
This commit is contained in:
Matthias Springer 2021-07-05 10:04:01 +09:00
parent 9e24979d73
commit 2c115ecc41
13 changed files with 73 additions and 40 deletions

View File

@ -42,6 +42,14 @@ void applyPermutationToVector(SmallVector<T, N> &inVec,
inVec = auxVec;
}
/// Helper function that creates a memref::DimOp or tensor::DimOp depending on
/// the type of `source`.
Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim);
/// Given an operation, retrieves the value of each dynamic dimension through
/// constructing the necessary DimOp operators.
SmallVector<Value, 4> getDynOperands(Location loc, Value val, OpBuilder &b);
/// If `size` comes from an AffineMinOp and one of the values of AffineMinOp
/// is a constant then return a new value set to the smallest such constant.
/// If `size` comes from a ConstantOp, return the constant.

View File

@ -30,14 +30,6 @@ raw_ostream &operator<<(raw_ostream &os, Range &range);
/// with `b` at location `loc`.
SmallVector<Range, 8> getOrCreateRanges(OffsetSizeAndStrideOpInterface op,
OpBuilder &b, Location loc);
/// Given an operation, retrieves the value of each dynamic dimension through
/// constructing the necessary DimOp operators.
SmallVector<Value, 4> getDynOperands(Location loc, Value val, OpBuilder &b);
// Helper function that creates a memref::DimOp or tensor::DimOp depending on
// the type of `source`.
Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim);
} // namespace mlir
//===----------------------------------------------------------------------===//

View File

@ -31,6 +31,10 @@ class VectorTransferOpInterface;
namespace vector {
class TransferWriteOp;
class TransferReadOp;
/// Helper function that creates a memref::DimOp or tensor::DimOp depending on
/// the type of `source`.
Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim);
} // namespace vector
/// Return the number of elements of basis, `0` if empty.

View File

@ -166,7 +166,7 @@ static Value generateInBoundsCheck(
Location loc = xferOp.getLoc();
ImplicitLocOpBuilder lb(xferOp.getLoc(), b);
if (!xferOp.isDimInBounds(0) && !isBroadcast) {
Value memrefDim = createOrFoldDimOp(b, loc, xferOp.source(), *dim);
Value memrefDim = vector::createOrFoldDimOp(b, loc, xferOp.source(), *dim);
AffineExpr d0, d1;
bindDims(xferOp.getContext(), d0, d1);
Value base = xferOp.indices()[dim.getValue()];

View File

@ -191,6 +191,17 @@ SmallVector<Value, 4> mlir::linalg::applyMapToValues(OpBuilder &b, Location loc,
return res;
}
/// Helper function that creates a memref::DimOp or tensor::DimOp depending on
/// the type of `source`.
static Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source,
int64_t dim) {
if (source.getType().isa<UnrankedMemRefType, MemRefType>())
return b.createOrFold<memref::DimOp>(loc, source, dim);
if (source.getType().isa<UnrankedTensorType, RankedTensorType>())
return b.createOrFold<tensor::DimOp>(loc, source, dim);
llvm_unreachable("Expected MemRefType or TensorType");
}
SmallVector<Value, 4> LinalgOp::createFlatListOfOperandDims(OpBuilder &b,
Location loc) {
SmallVector<Value, 4> res;

View File

@ -108,6 +108,7 @@
#include "PassDetail.h"
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/Linalg/Utils/Utils.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/Utils/StaticValueUtils.h"

View File

@ -66,7 +66,7 @@ getOrCreateOperandsMatchingResultTypes(OpBuilder &b, Operation *op) {
Value firstOperand = operands.front();
auto rankedTensorType = t.cast<RankedTensorType>();
auto staticShape = llvm::to_vector<4>(rankedTensorType.getShape());
auto dynamicShape = getDynOperands(loc, firstOperand, b);
auto dynamicShape = linalg::getDynOperands(loc, firstOperand, b);
res.push_back(b.create<linalg::InitTensorOp>(
loc, dynamicShape, staticShape, rankedTensorType.getElementType()));

View File

@ -157,6 +157,28 @@ static void unpackRanges(ArrayRef<Range> ranges, SmallVectorImpl<Value> &lbs,
namespace mlir {
namespace linalg {
/// Helper function that creates a memref::DimOp or tensor::DimOp depending on
/// the type of `source`.
Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim) {
if (source.getType().isa<UnrankedMemRefType, MemRefType>())
return b.createOrFold<memref::DimOp>(loc, source, dim);
if (source.getType().isa<UnrankedTensorType, RankedTensorType>())
return b.createOrFold<tensor::DimOp>(loc, source, dim);
llvm_unreachable("Expected MemRefType or TensorType");
}
/// Given an operation, retrieves the value of each dynamic dimension through
/// constructing the necessary DimOp operators.
SmallVector<Value, 4> getDynOperands(Location loc, Value val, OpBuilder &b) {
SmallVector<Value, 4> dynOperands;
auto shapedType = val.getType().cast<ShapedType>();
for (auto dim : llvm::enumerate(shapedType.getShape())) {
if (dim.value() == ShapedType::kDynamicSize)
dynOperands.push_back(createOrFoldDimOp(b, loc, val, dim.index()));
}
return dynOperands;
}
/// If `size` comes from an AffineMinOp and one of the values of AffineMinOp
/// is a constant then return a new value set to the smallest such constant.
/// Otherwise returngetSmallestBoundingIndex nullptr.

View File

@ -33,29 +33,6 @@ struct MemRefInlinerInterface : public DialectInlinerInterface {
};
} // end anonymous namespace
SmallVector<Value, 4> mlir::getDynOperands(Location loc, Value val,
OpBuilder &b) {
SmallVector<Value, 4> dynOperands;
auto shapedType = val.getType().cast<ShapedType>();
for (auto dim : llvm::enumerate(shapedType.getShape())) {
if (dim.value() == ShapedType::kDynamicSize)
dynOperands.push_back(createOrFoldDimOp(b, loc, val, dim.index()));
}
return dynOperands;
}
// Helper function that creates a memref::DimOp or tensor::DimOp depending on
// the type of `source`.
// TODO: Move helper function out of MemRef dialect.
Value mlir::createOrFoldDimOp(OpBuilder &b, Location loc, Value source,
int64_t dim) {
if (source.getType().isa<UnrankedMemRefType, MemRefType>())
return b.createOrFold<memref::DimOp>(loc, source, dim);
if (source.getType().isa<UnrankedTensorType, RankedTensorType>())
return b.createOrFold<tensor::DimOp>(loc, source, dim);
llvm_unreachable("Expected MemRefType or TensorType");
}
void mlir::memref::MemRefDialect::initialize() {
addOperations<DmaStartOp, DmaWaitOp,
#define GET_OP_LIST

View File

@ -332,7 +332,7 @@ static bool genBuffers(Merger &merger, CodeGen &codegen,
// Find lower and upper bound in current dimension.
Value up;
if (shape[d] == MemRefType::kDynamicSize) {
up = createOrFoldDimOp(rewriter, loc, t->get(), d);
up = rewriter.create<tensor::DimOp>(loc, t->get(), d);
args.push_back(up);
} else {
up = rewriter.create<ConstantIndexOp>(loc, shape[d]);

View File

@ -2330,7 +2330,7 @@ static Value createInBoundsCond(OpBuilder &b,
Value sum =
makeComposedAffineApply(b, loc, d0 + vs, xferOp.indices()[indicesIdx]);
Value cond = createFoldedSLE(
b, sum, createOrFoldDimOp(b, loc, xferOp.source(), indicesIdx));
b, sum, vector::createOrFoldDimOp(b, loc, xferOp.source(), indicesIdx));
if (!cond)
return;
// Conjunction over all dims for which we are in-bounds.
@ -2415,8 +2415,8 @@ static Value createSubViewIntersection(OpBuilder &b,
auto isaWrite = isa<vector::TransferWriteOp>(xferOp);
xferOp.zipResultAndIndexing([&](int64_t resultIdx, int64_t indicesIdx) {
using MapList = ArrayRef<ArrayRef<AffineExpr>>;
Value dimMemRef =
createOrFoldDimOp(b, xferOp.getLoc(), xferOp.source(), indicesIdx);
Value dimMemRef = vector::createOrFoldDimOp(b, xferOp.getLoc(),
xferOp.source(), indicesIdx);
Value dimAlloc = lb.create<memref::DimOp>(alloc, resultIdx);
Value index = xferOp.indices()[indicesIdx];
AffineExpr i, j, k;
@ -3954,7 +3954,8 @@ public:
unsigned vecWidth = vtp.getNumElements();
unsigned lastIndex = llvm::size(xferOp.indices()) - 1;
Value off = xferOp.indices()[lastIndex];
Value dim = createOrFoldDimOp(rewriter, loc, xferOp.source(), lastIndex);
Value dim =
vector::createOrFoldDimOp(rewriter, loc, xferOp.source(), lastIndex);
Value mask = buildVectorComparison(
rewriter, xferOp, enableIndexOptimizations, vecWidth, dim, &off);

View File

@ -13,7 +13,9 @@
#include "mlir/Dialect/Vector/VectorUtils.h"
#include "mlir/Analysis/LoopAnalysis.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Dialect/Vector/VectorOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/IntegerSet.h"
@ -27,6 +29,17 @@
using namespace mlir;
/// Helper function that creates a memref::DimOp or tensor::DimOp depending on
/// the type of `source`.
Value mlir::vector::createOrFoldDimOp(OpBuilder &b, Location loc, Value source,
int64_t dim) {
if (source.getType().isa<UnrankedMemRefType, MemRefType>())
return b.createOrFold<memref::DimOp>(loc, source, dim);
if (source.getType().isa<UnrankedTensorType, RankedTensorType>())
return b.createOrFold<tensor::DimOp>(loc, source, dim);
llvm_unreachable("Expected MemRefType or TensorType");
}
/// Return the number of elements of basis, `0` if empty.
int64_t mlir::computeMaxLinearIndex(ArrayRef<int64_t> basis) {
if (basis.empty())

View File

@ -85,8 +85,12 @@ static bool doubleBuffer(Value oldMemRef, AffineForOp forOp) {
// The double buffer is allocated right before 'forOp'.
OpBuilder bOuter(forOp);
// Put together alloc operands for any dynamic dimensions of the memref.
auto allocOperands = getDynOperands(forOp.getLoc(), oldMemRef, bOuter);
SmallVector<Value, 4> allocOperands;
for (auto dim : llvm::enumerate(oldMemRefType.getShape())) {
if (dim.value() == ShapedType::kDynamicSize)
allocOperands.push_back(bOuter.createOrFold<memref::DimOp>(
forOp.getLoc(), oldMemRef, dim.index()));
}
// Create and place the alloc right before the 'affine.for' operation.
Value newMemRef = bOuter.create<memref::AllocOp>(