[mlir][linalg] update fusion to support linalg index operations.

The patch updates the linalg fusion pass to add the tile offsets to the indices.

Differential Revision: https://reviews.llvm.org/D100456
This commit is contained in:
Tobias Gysi 2021-04-14 15:18:41 +00:00
parent 4d195f1b4d
commit ce82843f72
4 changed files with 174 additions and 45 deletions

View File

@ -188,6 +188,27 @@ static LinalgOp fuse(OpBuilder &builder, LinalgOp producer,
SmallPtrSet<Operation *, 1>{newIndex});
}
}
// When the producer has index semantics, we have to transform the indices of
// the producer according to the tiling of the consumer, i.e. offset them by
// the values computed in `loopRanges`.
if (producer.hasIndexSemantics()) {
assert(clonedOp->getNumRegions() == 1 &&
clonedOp->getRegion(0).getBlocks().size() == 1 &&
"expected producer to have one block.");
// Shift all indices by the tile offset.
Block &block = clonedOp->getRegion(0).front();
for (IndexOp indexOp : block.getOps<IndexOp>()) {
OpBuilder::InsertionGuard g(builder);
builder.setInsertionPointAfter(indexOp);
AffineExpr index, offset;
bindDims(builder.getContext(), index, offset);
AffineApplyOp applyOp = builder.create<AffineApplyOp>(
indexOp.getLoc(), index + offset,
ValueRange{indexOp.getResult(), loopRanges[indexOp.dim()].offset});
indexOp.getResult().replaceAllUsesExcept(
applyOp, SmallPtrSet<Operation *, 1>{applyOp});
}
}
return clonedOp;
}

View File

@ -232,8 +232,7 @@ transformIndexOps(OpBuilder &b, LinalgOp op, SmallVectorImpl<Value> &ivs,
"expected linalg operation to have one block.");
Block &block = op->getRegion(0).front();
for (IndexOp indexOp :
llvm::make_early_inc_range(block.getOps<linalg::IndexOp>())) {
for (IndexOp indexOp : block.getOps<linalg::IndexOp>()) {
auto rangeIndex = loopIndexToRangeIndex.find(indexOp.dim());
if (rangeIndex == loopIndexToRangeIndex.end())
continue;

View File

@ -62,6 +62,70 @@ func @fuse_indexed_generic_consumer(%A: memref<?x?xf32>,
// -----
#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
#id_2d = affine_map<(d0, d1) -> (d0, d1)>
#pointwise_2d_trait = {
indexing_maps = [#id_2d, #id_2d, #id_2d],
iterator_types = ["parallel", "parallel"]
}
func @fuse_indexed_consumer(%A: memref<?x?xf32>,
%B: memref<?x?xf32>,
%C: memref<?x?xf32>,
%D: memref<?x?xf32>) {
linalg.generic #pointwise_2d_trait
ins(%A, %B: memref<?x?xf32>, memref<?x?xf32>)
outs(%C : memref<?x?xf32>) {
^bb0(%e: f32, %arg5: f32, %arg6: f32): // no predecessors
%2 = addf %e, %arg5 : f32
linalg.yield %2 : f32
}
%c1 = constant 1 : index
%c0 = constant 0 : index
%c25 = constant 25 : index
%c10 = constant 10 : index
%0 = memref.dim %C, %c0 : memref<?x?xf32>
%1 = memref.dim %C, %c1 : memref<?x?xf32>
%2 = memref.dim %D, %c0 : memref<?x?xf32>
%3 = memref.dim %D, %c1 : memref<?x?xf32>
scf.for %arg2 = %c0 to %0 step %c10 {
scf.for %arg3 = %c0 to %1 step %c25 {
%4 = memref.subview %C[%arg2, %arg3][%c10, %c25][%c1, %c1] :
memref<?x?xf32> to memref<?x?xf32, #map>
%5 = memref.subview %D[%arg2, %arg3][%c10, %c25][%c1, %c1] :
memref<?x?xf32> to memref<?x?xf32, #map>
linalg.generic {
indexing_maps = [#id_2d, #id_2d],
iterator_types = ["parallel", "parallel"]}
ins(%4 : memref<?x?xf32, #map>)
outs(%5 : memref<?x?xf32, #map>) {
^bb0(%arg4: f32, %arg5: f32):
%idx0 = linalg.index 0 : index
%idx1 = linalg.index 1 : index
%6 = addi %idx0, %arg2 : index
%7 = addi %idx1, %arg3 : index
%8 = index_cast %6 : index to i32
%9 = sitofp %8 : i32 to f32
%10 = index_cast %7 : index to i32
%11 = sitofp %10 : i32 to f32
%12 = addf %9, %11 : f32
linalg.yield %12 : f32
}
}
}
return
}
// CHECK-LABEL: func @fuse_indexed_consumer
// CHECK: scf.for
// CHECK: scf.for
// CHECK-NOT: scf.for
// CHECK: linalg.generic
// CHECK-NOT: affine.apply
// CHECK: addf
// CHECK: linalg.generic
// CHECK: index_cast
// -----
#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
#id_2d = affine_map<(d0, d1) -> (d0, d1)>
#pointwise_2d_trait = {
@ -124,6 +188,56 @@ func @fuse_indexed_generic_producer(%A: memref<?x?xf32>,
// -----
#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
func @fuse_indexed_producer(%A: memref<?x?xindex>,
%B: memref<?x?xindex>) {
%c1 = constant 1 : index
%c0 = constant 0 : index
%c25 = constant 25 : index
%c10 = constant 10 : index
linalg.generic {
indexing_maps = [affine_map<(i, j) -> (j, i)>],
iterator_types = ["parallel", "parallel"]}
outs(%A : memref<?x?xindex>) {
^bb0(%a: index): // no predecessors
%idx0 = linalg.index 0 : index
%idx1 = linalg.index 1 : index
%0 = addi %idx0, %idx1 : index
linalg.yield %0 : index
}
%A_X = memref.dim %A, %c0 : memref<?x?xindex>
%A_Y = memref.dim %A, %c1 : memref<?x?xindex>
scf.parallel (%arg2, %arg3) = (%c0, %c0) to (%A_X, %A_Y) step (%c10, %c25) {
%A_view = memref.subview %A[%arg2, %arg3][%c10, %c25][%c1, %c1] :
memref<?x?xindex> to memref<?x?xindex, #map>
%B_view = memref.subview %B[%arg2, %arg3][%c10, %c25][%c1, %c1] :
memref<?x?xindex> to memref<?x?xindex, #map>
linalg.generic {
indexing_maps = [affine_map<(i, j) -> (i, j)>,
affine_map<(i, j) -> (i, j)>],
iterator_types = ["parallel", "parallel"]}
ins(%A_view : memref<?x?xindex, #map>)
outs(%B_view : memref<?x?xindex, #map>) {
^bb0(%a: index, %b: index):
linalg.yield %a : index
}
}
return
}
// CHECK: [[$MAP:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0 + d1)>
// CHECK-LABEL: func @fuse_indexed_producer
// CHECK: scf.parallel ([[I:%.*]], [[J:%.*]]) =
// CHECK: linalg.generic
// CHECK: [[idx0:%.*]] = linalg.index 0 : index
// CHECK: [[i_new:%.*]] = affine.apply [[$MAP]]([[idx0]], [[J]])
// CHECK: [[idx1:%.*]] = linalg.index 1 : index
// CHECK: [[j_new:%.*]] = affine.apply [[$MAP]]([[idx1]], [[I]])
// CHECK: [[sum:%.*]] = addi [[i_new]], [[j_new]] : index
// CHECK: linalg.yield [[sum]] : index
// CHECK: linalg.generic
// -----
#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
#id_2d = affine_map<(d0, d1) -> (d0, d1)>
#pointwise_2d_trait = {
@ -192,49 +306,48 @@ func @fuse_indexed_generic_producer_tile_second_dim_only(%A: memref<?x?xf32>,
// -----
#map = affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2)>
#id_2d = affine_map<(d0, d1) -> (d0, d1)>
#pointwise_2d_trait = {
indexing_maps = [#id_2d],
iterator_types = ["parallel", "parallel"]
}
func @index_op(%A: memref<?x?xindex>,
%B: memref<?x?xindex>) {
linalg.generic #pointwise_2d_trait
outs(%B : memref<?x?xindex>) {
^bb0(%arg6: index): // no predecessors
%2 = constant 0 : index
linalg.yield %2 : index
}
func @fuse_indexed_producer_tiled_second_dim_only(%A: memref<?x?xindex>,
%B: memref<?x?xindex>) {
%c1 = constant 1 : index
%c0 = constant 0 : index
%c25 = constant 25 : index
%c10 = constant 10 : index
%0 = memref.dim %A, %c0 : memref<?x?xindex>
%1 = memref.dim %A, %c1 : memref<?x?xindex>
%2 = memref.dim %B, %c0 : memref<?x?xindex>
%3 = memref.dim %B, %c1 : memref<?x?xindex>
scf.for %arg2 = %c0 to %0 step %c10 {
scf.for %arg3 = %c0 to %1 step %c25 {
%4 = memref.subview %A[%arg2, %arg3][%c10, %c25][%c1, %c1] :
memref<?x?xindex> to memref<?x?xindex, #map>
%5 = memref.subview %B[%arg2, %arg3][%c10, %c25][%c1, %c1] :
memref<?x?xindex> to memref<?x?xindex, #map>
linalg.generic {
indexing_maps = [#id_2d, #id_2d],
iterator_types = ["parallel", "parallel"]}
ins(%4 : memref<?x?xindex, #map>)
outs(%5 : memref<?x?xindex, #map>) {
^bb0(%arg6: index, %arg7: index):
%6 = linalg.index 0 : index
linalg.yield %6 : index
}
linalg.generic {
indexing_maps = [affine_map<(i, j) -> (i, j)>],
iterator_types = ["parallel", "parallel"]}
outs(%A : memref<?x?xindex>) {
^bb0(%a: index): // no predecessors
%idx0 = linalg.index 0 : index
%idx1 = linalg.index 1 : index
%0 = addi %idx0, %idx1 : index
linalg.yield %0 : index
}
%A_X = memref.dim %A, %c0 : memref<?x?xindex>
%A_Y = memref.dim %A, %c1 : memref<?x?xindex>
scf.parallel (%arg3) = (%c0) to (%A_Y) step (%c25) {
%A_view = memref.subview %A[%c0, %arg3][%A_X, %c25][%c1, %c1] :
memref<?x?xindex> to memref<?x?xindex, #map>
%B_view = memref.subview %B[%c0, %arg3][%A_X, %c25][%c1, %c1] :
memref<?x?xindex> to memref<?x?xindex, #map>
linalg.generic {
indexing_maps = [affine_map<(i, j) -> (i, j)>,
affine_map<(i, j) -> (i, j)>],
iterator_types = ["parallel", "parallel"]}
ins(%A_view : memref<?x?xindex, #map>)
outs(%B_view : memref<?x?xindex, #map>) {
^bb0(%a: index, %b: index):
linalg.yield %a : index
}
}
return
}
// CHECK-LABEL: func @index_op
// CHECK: linalg.generic
// CHECK: scf.for
// CHECK: scf.for
// CHECK-NOT: scf.for
// CHECK: linalg.generic
// CHECK: [[$MAP:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0 + d1)>
// CHECK-LABEL: func @fuse_indexed_producer_tiled_second_dim_only
// CHECK: scf.parallel ([[J:%.*]]) =
// CHECK: linalg.generic
// CHECK: [[idx0:%.*]] = linalg.index 0 : index
// CHECK: [[idx1:%.*]] = linalg.index 1 : index
// CHECK: [[j_new:%.*]] = affine.apply [[$MAP]]([[idx1]], [[J]])
// CHECK: [[sum:%.*]] = addi [[idx0]], [[j_new]] : index
// CHECK: linalg.yield [[sum]] : index
// CHECK: linalg.generic

View File

@ -126,10 +126,6 @@ static LogicalResult fuseLinalgOpsGreedily(FuncOp f) {
// Save original Linalg ops, we only want to make a pass over those.
SmallVector<LinalgOp, 8> linalgOps;
f.walk([&](LinalgOp op) {
// TODO: remove hasIndexSemantics check once index ops are supported.
if (op.hasIndexSemantics())
return;
// TODO: support multi-results.
if (op->getNumResults() <= 1)
linalgOps.push_back(op);