forked from OSchip/llvm-project
[mlir] Extract DestinationStyleOpInterface from LinalgStructuredInterface.
There are several use cases where a destination style operation needs an interface that contains a subset of the methods from LinalgStructuredInterface. In this change, we move all such methods to a new interface, and add forwarding methods to LinalgStructuredInterface to make the change the less invasive. It may be possible to refactor the code later to get rid of (some or all) of the forwarding methods. This change also removes the cloneWithMapper interface methods, as it is not used anywhere. RFC: https://discourse.llvm.org/t/rfc-interface-for-destination-style-ops/64056 Differential Revision: https://reviews.llvm.org/D132125
This commit is contained in:
parent
0df7e1b0e5
commit
d9cbefc4c7
|
@ -57,6 +57,9 @@ LogicalResult verifyFillInterface(Operation *op);
|
|||
/// Verify that `op` conforms to the invariants of StructuredOpInterface
|
||||
LogicalResult verifyStructuredOpInterface(Operation *op);
|
||||
|
||||
/// Verify that `op` conforms to the invariants of DestinationStyleOpInterface
|
||||
LogicalResult verifyDestinationStyleOpInterface(Operation *op);
|
||||
|
||||
} // namespace detail
|
||||
} // namespace linalg
|
||||
} // namespace mlir
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -28,6 +28,7 @@ class LinalgStructuredBase_Op<string mnemonic, list<Trait> props>
|
|||
: Op<Linalg_Dialect, mnemonic, !listconcat([
|
||||
SingleBlockImplicitTerminator<"YieldOp">,
|
||||
DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
|
||||
DestinationStyleOpInterface,
|
||||
LinalgStructuredInterface,
|
||||
RegionBranchOpInterface,
|
||||
ReifyRankedShapedTypeOpInterface], props)> {
|
||||
|
|
|
@ -633,22 +633,6 @@ LinalgOp::reifyResultShapes(OpBuilder &b,
|
|||
|
||||
LogicalResult mlir::linalg::detail::verifyStructuredOpInterface(Operation *op) {
|
||||
LinalgOp linalgOp = cast<LinalgOp>(op);
|
||||
// Expect at least one output operand.
|
||||
// This means an op that constructs a tensor out of indices cannot be a
|
||||
// LinalgOp at the moment. For now this will have to be a special op until we
|
||||
// have output shape operands that are not tensors.
|
||||
int64_t numInputs = linalgOp.getNumInputs();
|
||||
int64_t numOutputs = linalgOp.getNumOutputs();
|
||||
if (numOutputs == 0)
|
||||
return op->emitOpError("expected at least one output operand");
|
||||
if (failed(OpTrait::impl::verifyNOperands(op, numInputs + numOutputs)))
|
||||
return failure();
|
||||
// Verify the number of results matches the number of output tensors.
|
||||
if (op->getNumResults() != linalgOp.getOutputTensorOperands().size())
|
||||
return op->emitOpError("expected the number of results (")
|
||||
<< op->getNumResults()
|
||||
<< ") to be equal to the number of output tensors ("
|
||||
<< linalgOp.getOutputTensorOperands().size() << ")";
|
||||
|
||||
// Check all iterator types are known.
|
||||
auto iteratorTypesRange =
|
||||
|
@ -699,26 +683,6 @@ LogicalResult mlir::linalg::detail::verifyStructuredOpInterface(Operation *op) {
|
|||
SmallVector<unsigned> redDims;
|
||||
linalgOp.getReductionDims(redDims);
|
||||
|
||||
// Simplifying assumption: either full tensor or full buffer mode.
|
||||
// This allows simpler verification of output operands vs result types
|
||||
// without premature tracking of which operand is what in mixed-mode.
|
||||
// TODO: relax when mixed-mode needs to pass verification.
|
||||
if (!linalgOp.getOutputBufferOperands().empty() &&
|
||||
!linalgOp.getOutputTensorOperands().empty())
|
||||
return op->emitOpError(
|
||||
"expected output operands to all have tensor type or "
|
||||
"all have buffer type");
|
||||
|
||||
for (OpOperand *opOperand : linalgOp.getOutputTensorOperands()) {
|
||||
OpResult result = linalgOp.getTiedOpResult(opOperand);
|
||||
if (result.getType() != opOperand->get().getType())
|
||||
return op->emitOpError("expected type of operand #")
|
||||
<< opOperand->getOperandNumber() << " ("
|
||||
<< opOperand->get().getType() << ")"
|
||||
<< " to match type of corresponding result (" << result.getType()
|
||||
<< ")";
|
||||
}
|
||||
|
||||
// Output tensor indexing map may not depend on reduction indices.
|
||||
for (OpOperand *opOperand : linalgOp.getOutputOperands()) {
|
||||
AffineMap indexingMap = linalgOp.getTiedIndexingMap(opOperand);
|
||||
|
@ -740,36 +704,9 @@ LogicalResult mlir::linalg::detail::verifyStructuredOpInterface(Operation *op) {
|
|||
}
|
||||
}
|
||||
|
||||
// Check the region has exactly one block.
|
||||
if (linalgOp->getNumRegions() != 1 ||
|
||||
!llvm::hasSingleElement(linalgOp->getRegion(0)))
|
||||
return op->emitOpError("expects to have 1 region with 1 block");
|
||||
|
||||
if (!linalgOp.getShapesToLoopsMap())
|
||||
return op->emitOpError("expected the shape-to-loops map to be non-null");
|
||||
|
||||
// Simplifying assumption: bbargs match 1-1 with shape operands elemental
|
||||
// types.
|
||||
// TODO: once ranked shape types are plugged in, we may want to drop the
|
||||
// corresponding bbargs, that can never be read from. This will be subject to
|
||||
// consistency discussions (i.e. what to do with output tensors whose bbarg is
|
||||
// not used).
|
||||
Block &block = linalgOp->getRegion(0).front();
|
||||
|
||||
if (linalgOp.getNumInputsAndOutputs() != block.getNumArguments())
|
||||
return op->emitOpError("expected as many non-induction variable region "
|
||||
"arguments as the number of input/output operands");
|
||||
|
||||
for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) {
|
||||
Type elementType = getElementTypeOrSelf(opOperand->get());
|
||||
Type argType = block.getArgument(opOperand->getOperandNumber()).getType();
|
||||
if (elementType != argType)
|
||||
return op->emitOpError("expected type of bb argument #")
|
||||
<< opOperand->getOperandNumber() << " (" << argType << ")"
|
||||
<< " to match element or self type of the corresponding operand ("
|
||||
<< elementType << ")";
|
||||
}
|
||||
|
||||
// Check if given shapes match to inferred shapes.
|
||||
SmallVector<int64_t, 4> endLoopRangeValues = linalgOp.getStaticLoopRanges();
|
||||
SmallVector<int64_t, 4> startLoopRangeValues(endLoopRangeValues.size(), 0);
|
||||
|
@ -835,3 +772,75 @@ LogicalResult mlir::linalg::detail::verifyStructuredOpInterface(Operation *op) {
|
|||
|
||||
return success();
|
||||
}
|
||||
|
||||
LogicalResult
|
||||
mlir::linalg::detail::verifyDestinationStyleOpInterface(Operation *op) {
|
||||
DestinationStyleOpInterface dstStyleOp =
|
||||
cast<DestinationStyleOpInterface>(op);
|
||||
|
||||
// Expect at least one output operand.
|
||||
// This means an op that constructs a tensor out of indices cannot be a
|
||||
// LinalgOp at the moment. For now this will have to be a special op until we
|
||||
// have output shape operands that are not tensors.
|
||||
int64_t numInputs = dstStyleOp.getNumInputs();
|
||||
int64_t numOutputs = dstStyleOp.getNumOutputs();
|
||||
if (numOutputs == 0)
|
||||
return op->emitOpError("expected at least one output operand");
|
||||
if (failed(OpTrait::impl::verifyNOperands(op, numInputs + numOutputs)))
|
||||
return failure();
|
||||
// Verify the number of results matches the number of output tensors.
|
||||
if (op->getNumResults() != dstStyleOp.getOutputTensorOperands().size())
|
||||
return op->emitOpError("expected the number of results (")
|
||||
<< op->getNumResults()
|
||||
<< ") to be equal to the number of output tensors ("
|
||||
<< dstStyleOp.getOutputTensorOperands().size() << ")";
|
||||
|
||||
// Simplifying assumption: either full tensor or full buffer mode.
|
||||
// This allows simpler verification of output operands vs result types
|
||||
// without premature tracking of which operand is what in mixed-mode.
|
||||
// TODO: relax when mixed-mode needs to pass verification.
|
||||
if (!dstStyleOp.getOutputBufferOperands().empty() &&
|
||||
!dstStyleOp.getOutputTensorOperands().empty())
|
||||
return op->emitOpError(
|
||||
"expected output operands to all have tensor type or "
|
||||
"all have buffer type");
|
||||
|
||||
for (OpOperand *opOperand : dstStyleOp.getOutputTensorOperands()) {
|
||||
OpResult result = dstStyleOp.getTiedOpResult(opOperand);
|
||||
if (result.getType() != opOperand->get().getType())
|
||||
return op->emitOpError("expected type of operand #")
|
||||
<< opOperand->getOperandNumber() << " ("
|
||||
<< opOperand->get().getType() << ")"
|
||||
<< " to match type of corresponding result (" << result.getType()
|
||||
<< ")";
|
||||
}
|
||||
|
||||
// Check the region has exactly one block.
|
||||
if (dstStyleOp->getNumRegions() != 1 ||
|
||||
!llvm::hasSingleElement(dstStyleOp->getRegion(0)))
|
||||
return op->emitOpError("expects to have 1 region with 1 block");
|
||||
|
||||
// Simplifying assumption: bbargs match 1-1 with shape operands elemental
|
||||
// types.
|
||||
// TODO: once ranked shape types are plugged in, we may want to drop the
|
||||
// corresponding bbargs, that can never be read from. This will be subject to
|
||||
// consistency discussions (i.e. what to do with output tensors whose bbarg is
|
||||
// not used).
|
||||
Block &block = dstStyleOp->getRegion(0).front();
|
||||
|
||||
if (dstStyleOp.getNumInputsAndOutputs() != block.getNumArguments())
|
||||
return op->emitOpError("expected as many non-induction variable region "
|
||||
"arguments as the number of input/output operands");
|
||||
|
||||
for (OpOperand *opOperand : dstStyleOp.getInputAndOutputOperands()) {
|
||||
Type elementType = getElementTypeOrSelf(opOperand->get());
|
||||
Type argType = block.getArgument(opOperand->getOperandNumber()).getType();
|
||||
if (elementType != argType)
|
||||
return op->emitOpError("expected type of bb argument #")
|
||||
<< opOperand->getOperandNumber() << " (" << argType << ")"
|
||||
<< " to match element or self type of the corresponding operand ("
|
||||
<< elementType << ")";
|
||||
}
|
||||
|
||||
return success();
|
||||
}
|
||||
|
|
|
@ -2741,7 +2741,8 @@ def TestLinalgConvOpNotLinalgOp : TEST_Op<"conv_op_not_linalg_op", [
|
|||
|
||||
def TestLinalgConvOp :
|
||||
TEST_Op<"linalg_conv_op", [AttrSizedOperandSegments, SingleBlock,
|
||||
LinalgStructuredInterface, LinalgConvolutionOpInterface]> {
|
||||
DestinationStyleOpInterface, LinalgStructuredInterface,
|
||||
LinalgConvolutionOpInterface]> {
|
||||
|
||||
let arguments = (ins Variadic<AnyType>:$inputs,
|
||||
Variadic<AnyType>:$outputs);
|
||||
|
@ -2799,7 +2800,8 @@ def TestLinalgFillOpNotLinalgOp : TEST_Op<"fill_op_not_linalg_op", [
|
|||
|
||||
def TestLinalgFillOp :
|
||||
TEST_Op<"linalg_fill_op", [AttrSizedOperandSegments, SingleBlock,
|
||||
LinalgStructuredInterface, LinalgFillOpInterface]> {
|
||||
DestinationStyleOpInterface, LinalgStructuredInterface,
|
||||
LinalgFillOpInterface]> {
|
||||
|
||||
let arguments = (ins Variadic<AnyType>:$inputs,
|
||||
Variadic<AnyType>:$outputs);
|
||||
|
|
Loading…
Reference in New Issue