[mlir][linalg] Remove IndexedGenericOp support from LinalgBufferize...

after introducing the IndexedGenericOp to GenericOp canonicalization (https://reviews.llvm.org/D101612).

Differential Revision: https://reviews.llvm.org/D102308
This commit is contained in:
Tobias Gysi 2021-05-12 12:00:08 +00:00
parent 7d0a81ca38
commit c6b96ae06f
2 changed files with 9 additions and 40 deletions

View File

@ -71,18 +71,17 @@ allocateBuffersForResults(Location loc, LinalgOp linalgOp, ValueRange outputs,
return success();
}
/// Specialization for `linalg::GenericOp` and `linalg::IndexedGenericOp`.
/// Specialization for `linalg::GenericOp`.
/// A pattern to convert Generic Linalg operations which work on tensors to
/// use buffers. BufferPlacement pass should be later used to move
/// Alloc operations to the correct positions and insert the missing Dealloc
/// operations in the correct places.
template <typename GenericOpTy>
static void
finalizeBufferAllocationForGenericOp(ConversionPatternRewriter &rewriter,
GenericOpTy genericOp, ValueRange inputs,
GenericOp genericOp, ValueRange inputs,
ValueRange outputs) {
// Generate a new linalg operation that works on buffers.
auto newGenericOp = rewriter.create<GenericOpTy>(
auto newGenericOp = rewriter.create<GenericOp>(
genericOp.getLoc(),
/*resultTensorTypes=*/llvm::None,
/*inputs=*/inputs,
@ -116,7 +115,6 @@ static void finalizeBufferAllocation(ConversionPatternRewriter &rewriter,
linalg::LinalgOp linalgOp,
ValueRange inputs, ValueRange outputs) {
assert(!isa<linalg::GenericOp>(linalgOp.getOperation()));
assert(!isa<linalg::IndexedGenericOp>(linalgOp.getOperation()));
SmallVector<Value, 8> newOperands = inputs;
newOperands.append(outputs.begin(), outputs.end());
auto otherOperands = linalgOp.getAssumedNonShapedOperands();
@ -195,6 +193,10 @@ public:
LogicalResult
matchAndRewrite(LinalgOp op, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const final {
// Canonicalize indexed generic operations before bufferization.
if (isa<IndexedGenericOp>(op))
return failure();
// GenericOpAdaptor below expects an `operand_segment_sizes` attribute.
if (!op->hasAttr("operand_segment_sizes"))
return failure();
@ -215,15 +217,8 @@ public:
// Delegate to the linalg generic pattern.
if (auto genericOp = dyn_cast<linalg::GenericOp>(*op)) {
finalizeBufferAllocationForGenericOp<GenericOp>(
rewriter, genericOp, adaptor.inputs(), newOutputBuffers);
return success();
}
// Delegate to the linalg indexed generic pattern.
if (auto genericOp = dyn_cast<linalg::IndexedGenericOp>(*op)) {
finalizeBufferAllocationForGenericOp<IndexedGenericOp>(
rewriter, genericOp, adaptor.inputs(), newOutputBuffers);
finalizeBufferAllocationForGenericOp(rewriter, genericOp,
adaptor.inputs(), newOutputBuffers);
return success();
}

View File

@ -91,32 +91,6 @@ func @multiple_results(%arg0: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf32>) {
// -----
#map0 = affine_map<(d0) -> (d0)>
// CHECK-LABEL: func @multiple_results_indexed
// CHECK: %[[RESULT0:.*]] = memref.alloc() : memref<4xi32>
// CHECK: %[[RESULT1:.*]] = memref.alloc() : memref<4xi32>
// CHECK: linalg.generic
// CHECK-SAME: ins(%{{.*}} : memref<4xi32>)
// CHECK-SAME: outs(%[[RESULT0]], %[[RESULT1]] : memref<4xi32>, memref<4xi32>)
// CHECK-NEXT: ^bb0(%{{.*}}: i32, %{{.*}}: i32, %{{.*}}: i32):
func @multiple_results_indexed(%arg0: tensor<4xi32>)
-> (tensor<4xi32>, tensor<4xi32>) {
%0, %1 = linalg.indexed_generic {
indexing_maps = [#map0, #map0, #map0],
iterator_types = ["parallel"]
} ins(%arg0 : tensor<4xi32>)
outs (%arg0, %arg0 : tensor<4xi32>, tensor<4xi32>) {
^bb0(%i: index, %gen_arg1: i32, %out1: i32, %out2: i32):
%i_i32 = index_cast %i : index to i32
%tmp1 = addi %gen_arg1, %i_i32 : i32
linalg.yield %tmp1, %tmp1 : i32, i32
} -> tensor<4xi32>, tensor<4xi32>
return %0, %1 : tensor<4xi32>, tensor<4xi32>
}
// -----
#map_2d = affine_map<(d0, d1) -> (d0, d1)>
// Check that the allocs properly consider the different shapes of the output