[mlir][Linalg] Relax PadTensor tiling constraints and expose it to strategies.

Reviewed By: ThomasRaoux

Differential Revision: https://reviews.llvm.org/D117334
This commit is contained in:
Nicolas Vasilache 2022-01-17 17:07:46 +00:00 committed by Nicolas Vasilache
parent d96a5042d6
commit 8a8f0a00b2
4 changed files with 16 additions and 1 deletions

View File

@ -46,6 +46,9 @@ bool skipUnitDimReshape(const OpResult &producer, OpOperand &consumer);
//===----------------------------------------------------------------------===//
using LinalgLoops = SmallVector<Operation *, 4>;
void populatePadTensorTilingPatterns(RewritePatternSet &patterns,
const LinalgTilingOptions &options);
/// Populate patterns for vectorizing low-D convolution ops. This is a step in
/// progressive lowering for convolution ops, it assume high-D convolution ops
/// were decomposed previously.

View File

@ -100,6 +100,8 @@ struct LinalgStrategyTilePass
filter);
else
tilingPattern.add<LinalgTilingPattern>(ctx, options, filter);
if (anchorOpName == linalg::PadTensorOp::getOperationName())
populatePadTensorTilingPatterns(tilingPattern, options);
(void)applyPatternsAndFoldGreedily(funcOp, std::move(tilingPattern));
}

View File

@ -354,7 +354,9 @@ static LogicalResult tilePadTensorOp(RewriterBase &builder, PadTensorOp op,
int64_t rank = op.getResultType().getRank();
SmallVector<Value> tileSizes =
options.tileSizeComputationFunction(builder, op);
assert(static_cast<int64_t>(tileSizes.size()) == rank);
// Normalize untiled padding dimensions to 0.
Value zero = builder.create<arith::ConstantIndexOp>(loc, 0);
tileSizes.append(rank - tileSizes.size(), zero);
// Compute lower and upper bounds of the loop nest.
SmallVector<Range> ranges = op.getIterationDomain(builder);
SmallVector<Value> lbs, dims, allDims, steps;
@ -490,6 +492,12 @@ static void insertTilingPatterns(RewritePatternSet &patterns,
patterns.add<PadTensorOpTilingPattern>(ctx, options);
}
void mlir::linalg::populatePadTensorTilingPatterns(
RewritePatternSet &patterns, const LinalgTilingOptions &options) {
auto *ctx = patterns.getContext();
patterns.add<PadTensorOpTilingPattern>(ctx, options);
}
static void applyExtractSliceOfPadTensorSwapPattern(FuncOp funcOp) {
MLIRContext *ctx = funcOp.getContext();
RewritePatternSet patterns(ctx);

View File

@ -2,6 +2,8 @@
// RUN: FileCheck %s -check-prefix=TILE2
// RUN: mlir-opt %s -linalg-tile="tile-sizes=0,3" -resolve-shaped-type-result-dims -cse -split-input-file | \
// RUN: FileCheck %s -check-prefix=TILE1
// This test only checks that tiling does not crash.
// RUN: mlir-opt %s -linalg-tile="tile-sizes=2" -resolve-shaped-type-result-dims -cse -split-input-file
// TILE2-DAG: #[[MAP0:.*]] = affine_map<()[s0] -> (s0 + 8)>
// TILE2-DAG: #[[MAP1:.*]] = affine_map<()[s0] -> (s0 + 7)>