2018-12-28 13:21:41 +08:00
|
|
|
//===- Operation.cpp - Operation support code -----------------------------===//
|
2018-07-05 11:45:39 +08:00
|
|
|
//
|
2020-01-26 11:58:30 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
2019-12-24 01:35:36 +08:00
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2018-07-05 11:45:39 +08:00
|
|
|
//
|
2019-12-24 01:35:36 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2018-07-05 11:45:39 +08:00
|
|
|
|
2019-03-27 05:45:38 +08:00
|
|
|
#include "mlir/IR/Operation.h"
|
|
|
|
#include "mlir/IR/BlockAndValueMapping.h"
|
2020-12-04 09:22:29 +08:00
|
|
|
#include "mlir/IR/BuiltinTypes.h"
|
2018-11-10 06:04:03 +08:00
|
|
|
#include "mlir/IR/Dialect.h"
|
2018-09-27 01:07:16 +08:00
|
|
|
#include "mlir/IR/OpImplementation.h"
|
2019-04-28 11:55:38 +08:00
|
|
|
#include "mlir/IR/PatternMatch.h"
|
2019-10-06 01:05:40 +08:00
|
|
|
#include "mlir/IR/TypeUtilities.h"
|
2020-08-12 17:36:54 +08:00
|
|
|
#include "mlir/Interfaces/FoldInterfaces.h"
|
2021-08-28 11:03:15 +08:00
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2019-03-27 08:28:18 +08:00
|
|
|
#include <numeric>
|
2019-10-06 01:05:40 +08:00
|
|
|
|
2018-07-05 11:45:39 +08:00
|
|
|
using namespace mlir;
|
|
|
|
|
2019-11-13 03:57:47 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// OperationName
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2018-10-10 13:08:52 +08:00
|
|
|
/// Form the OperationName for an op with the specified string. This either is
|
|
|
|
/// a reference to an AbstractOperation if one is known, or a uniqued Identifier
|
|
|
|
/// if not.
|
|
|
|
OperationName::OperationName(StringRef name, MLIRContext *context) {
|
2018-10-22 10:49:31 +08:00
|
|
|
if (auto *op = AbstractOperation::lookup(name, context))
|
2018-10-10 13:08:52 +08:00
|
|
|
representation = op;
|
|
|
|
else
|
|
|
|
representation = Identifier::get(name, context);
|
|
|
|
}
|
|
|
|
|
2019-06-04 07:32:25 +08:00
|
|
|
/// Return the name of the dialect this operation is registered to.
|
2021-02-27 09:57:03 +08:00
|
|
|
StringRef OperationName::getDialectNamespace() const {
|
|
|
|
if (Dialect *dialect = getDialect())
|
|
|
|
return dialect->getNamespace();
|
2021-07-29 04:32:47 +08:00
|
|
|
return getStringRef().split('.').first;
|
2019-06-04 07:32:25 +08:00
|
|
|
}
|
|
|
|
|
2020-06-17 06:23:10 +08:00
|
|
|
/// Return the operation name with dialect name stripped, if it has one.
|
|
|
|
StringRef OperationName::stripDialect() const {
|
2021-07-29 04:32:47 +08:00
|
|
|
return getStringRef().split('.').second;
|
2020-06-17 06:23:10 +08:00
|
|
|
}
|
|
|
|
|
2020-09-03 03:10:36 +08:00
|
|
|
/// Return the name of this operation. This always succeeds.
|
2018-10-10 13:08:52 +08:00
|
|
|
StringRef OperationName::getStringRef() const {
|
2020-09-03 03:10:36 +08:00
|
|
|
return getIdentifier().strref();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return the name of this operation as an identifier. This always succeeds.
|
|
|
|
Identifier OperationName::getIdentifier() const {
|
2018-10-10 13:08:52 +08:00
|
|
|
if (auto *op = representation.dyn_cast<const AbstractOperation *>())
|
|
|
|
return op->name;
|
2020-09-03 03:10:36 +08:00
|
|
|
return representation.get<Identifier>();
|
2018-10-10 13:08:52 +08:00
|
|
|
}
|
|
|
|
|
[mlir][PDL] Add support for PDL bytecode and expose PDL support to OwningRewritePatternList
PDL patterns are now supported via a new `PDLPatternModule` class. This class contains a ModuleOp with the pdl::PatternOp operations representing the patterns, as well as a collection of registered C++ functions for native constraints/creations/rewrites/etc. that may be invoked via the pdl patterns. Instances of this class are added to an OwningRewritePatternList in the same fashion as C++ RewritePatterns, i.e. via the `insert` method.
The PDL bytecode is an in-memory representation of the PDL interpreter dialect that can be efficiently interpreted/executed. The representation of the bytecode boils down to a code array(for opcodes/memory locations/etc) and a memory buffer(for storing attributes/operations/values/any other data necessary). The bytecode operations are effectively a 1-1 mapping to the PDLInterp dialect operations, with a few exceptions in cases where the in-memory representation of the bytecode can be more efficient than the MLIR representation. For example, a generic `AreEqual` bytecode op can be used to represent AreEqualOp, CheckAttributeOp, and CheckTypeOp.
The execution of the bytecode is split into two phases: matching and rewriting. When matching, all of the matched patterns are collected to avoid the overhead of re-running parts of the matcher. These matched patterns are then considered alongside the native C++ patterns, which rewrite immediately in-place via `RewritePattern::matchAndRewrite`, for the given root operation. When a PDL pattern is matched and has the highest benefit, it is passed back to the bytecode to execute its rewriter.
Differential Revision: https://reviews.llvm.org/D89107
2020-12-02 06:30:18 +08:00
|
|
|
OperationName OperationName::getFromOpaquePointer(const void *pointer) {
|
|
|
|
return OperationName(
|
|
|
|
RepresentationUnion::getFromOpaqueValue(const_cast<void *>(pointer)));
|
2018-10-10 13:08:52 +08:00
|
|
|
}
|
|
|
|
|
2019-03-27 05:45:38 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Operation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// Create a new Operation with the specific fields.
|
|
|
|
Operation *Operation::create(Location location, OperationName name,
|
2020-09-03 06:33:19 +08:00
|
|
|
TypeRange resultTypes, ValueRange operands,
|
2019-03-27 05:45:38 +08:00
|
|
|
ArrayRef<NamedAttribute> attributes,
|
2020-09-03 06:33:19 +08:00
|
|
|
BlockRange successors, unsigned numRegions) {
|
2019-09-29 00:35:23 +08:00
|
|
|
return create(location, name, resultTypes, operands,
|
2021-02-08 16:44:03 +08:00
|
|
|
DictionaryAttr::get(location.getContext(), attributes),
|
2020-12-18 09:10:12 +08:00
|
|
|
successors, numRegions);
|
2019-03-27 05:45:38 +08:00
|
|
|
}
|
|
|
|
|
2019-03-27 20:11:58 +08:00
|
|
|
/// Create a new Operation from operation state.
|
|
|
|
Operation *Operation::create(const OperationState &state) {
|
2020-09-03 06:33:19 +08:00
|
|
|
return create(state.location, state.name, state.types, state.operands,
|
2020-12-18 09:10:12 +08:00
|
|
|
state.attributes.getDictionary(state.getContext()),
|
|
|
|
state.successors, state.regions);
|
2019-11-27 07:30:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Create a new Operation with the specific fields.
|
|
|
|
Operation *Operation::create(Location location, OperationName name,
|
2020-09-03 06:33:19 +08:00
|
|
|
TypeRange resultTypes, ValueRange operands,
|
2020-12-18 09:10:12 +08:00
|
|
|
DictionaryAttr attributes, BlockRange successors,
|
|
|
|
RegionRange regions) {
|
2019-11-27 07:30:04 +08:00
|
|
|
unsigned numRegions = regions.size();
|
|
|
|
Operation *op = create(location, name, resultTypes, operands, attributes,
|
2020-04-27 12:28:22 +08:00
|
|
|
successors, numRegions);
|
2019-03-27 20:11:58 +08:00
|
|
|
for (unsigned i = 0; i < numRegions; ++i)
|
2019-11-27 07:30:04 +08:00
|
|
|
if (regions[i])
|
|
|
|
op->getRegion(i).takeBody(*regions[i]);
|
2019-03-28 23:24:38 +08:00
|
|
|
return op;
|
2019-03-27 20:11:58 +08:00
|
|
|
}
|
|
|
|
|
2020-12-18 09:10:12 +08:00
|
|
|
/// Overload of create that takes an existing DictionaryAttr to avoid
|
2019-03-27 05:45:38 +08:00
|
|
|
/// unnecessarily uniquing a list of attributes.
|
|
|
|
Operation *Operation::create(Location location, OperationName name,
|
2020-09-03 06:33:19 +08:00
|
|
|
TypeRange resultTypes, ValueRange operands,
|
2020-12-18 09:10:12 +08:00
|
|
|
DictionaryAttr attributes, BlockRange successors,
|
|
|
|
unsigned numRegions) {
|
[mlir][IR] Refactor the internal implementation of Value
The current implementation of Value involves a pointer int pair with several different kinds of owners, i.e. BlockArgumentImpl*, Operation *, TrailingOpResult*. This design arose from the desire to save memory overhead for operations that have a very small number of results (generally 0-2). There are, unfortunately, many problematic aspects of the current implementation that make Values difficult to work with or just inefficient.
Operation result types are stored as a separate array on the Operation. This is very inefficient for many reasons: we use TupleType for multiple results, which can lead to huge amounts of memory usage if multi-result operations change types frequently(they do). It also means that simple methods like Value::getType/Value::setType now require complex logic to get to the desired type.
Value only has one pointer bit free, severely limiting the ability to use it in things like PointerUnion/PointerIntPair. Given that we store the kind of a Value along with the "owner" pointer, we only leave one bit free for users of Value. This creates situations where we end up nesting PointerUnions to be able to use Value in one.
As noted above, most of the methods in Value need to branch on at least 3 different cases which is both inefficient, possibly error prone, and verbose. The current storage of results also creates problems for utilities like ValueRange/TypeRange, which want to efficiently store base pointers to ranges (of which Operation* isn't really useful as one).
This revision greatly simplifies the implementation of Value by the introduction of a new ValueImpl class. This class contains all of the state shared between all of the various derived value classes; i.e. the use list, the type, and the kind. This shared implementation class provides several large benefits:
* Most of the methods on value are now branchless, and often one-liners.
* The "kind" of the value is now stored in ValueImpl instead of Value
This frees up all of Value's pointer bits, allowing for users to take full advantage of PointerUnion/PointerIntPair/etc. It also allows for storing more operation results as "inline", 6 now instead of 2, freeing up 1 word per new inline result.
* Operation result types are now stored in the result, instead of a side array
This drops the size of zero-result operations by 1 word. It also removes the memory crushing use of TupleType for operations results (which could lead up to hundreds of megabytes of "dead" TupleTypes in the context). This also allowed restructured ValueRange, making it simpler and one word smaller.
This revision does come with two conceptual downsides:
* Operation::getResultTypes no longer returns an ArrayRef<Type>
This conceptually makes some usages slower, as the iterator increment is slightly more complex.
* OpResult::getOwner is slightly more expensive, as it now requires a little bit of arithmetic
From profiling, neither of the conceptual downsides have resulted in any perceivable hit to performance. Given the advantages of the new design, most compiles are slightly faster.
Differential Revision: https://reviews.llvm.org/D97804
2021-03-04 06:23:14 +08:00
|
|
|
assert(llvm::all_of(resultTypes, [](Type t) { return t; }) &&
|
|
|
|
"unexpected null result type");
|
|
|
|
|
2020-01-03 06:28:37 +08:00
|
|
|
// We only need to allocate additional memory for a subset of results.
|
|
|
|
unsigned numTrailingResults = OpResult::getNumTrailing(resultTypes.size());
|
2020-04-24 07:23:34 +08:00
|
|
|
unsigned numInlineResults = OpResult::getNumInline(resultTypes.size());
|
2020-03-06 04:48:28 +08:00
|
|
|
unsigned numSuccessors = successors.size();
|
|
|
|
unsigned numOperands = operands.size();
|
[mlir][IR] Refactor the internal implementation of Value
The current implementation of Value involves a pointer int pair with several different kinds of owners, i.e. BlockArgumentImpl*, Operation *, TrailingOpResult*. This design arose from the desire to save memory overhead for operations that have a very small number of results (generally 0-2). There are, unfortunately, many problematic aspects of the current implementation that make Values difficult to work with or just inefficient.
Operation result types are stored as a separate array on the Operation. This is very inefficient for many reasons: we use TupleType for multiple results, which can lead to huge amounts of memory usage if multi-result operations change types frequently(they do). It also means that simple methods like Value::getType/Value::setType now require complex logic to get to the desired type.
Value only has one pointer bit free, severely limiting the ability to use it in things like PointerUnion/PointerIntPair. Given that we store the kind of a Value along with the "owner" pointer, we only leave one bit free for users of Value. This creates situations where we end up nesting PointerUnions to be able to use Value in one.
As noted above, most of the methods in Value need to branch on at least 3 different cases which is both inefficient, possibly error prone, and verbose. The current storage of results also creates problems for utilities like ValueRange/TypeRange, which want to efficiently store base pointers to ranges (of which Operation* isn't really useful as one).
This revision greatly simplifies the implementation of Value by the introduction of a new ValueImpl class. This class contains all of the state shared between all of the various derived value classes; i.e. the use list, the type, and the kind. This shared implementation class provides several large benefits:
* Most of the methods on value are now branchless, and often one-liners.
* The "kind" of the value is now stored in ValueImpl instead of Value
This frees up all of Value's pointer bits, allowing for users to take full advantage of PointerUnion/PointerIntPair/etc. It also allows for storing more operation results as "inline", 6 now instead of 2, freeing up 1 word per new inline result.
* Operation result types are now stored in the result, instead of a side array
This drops the size of zero-result operations by 1 word. It also removes the memory crushing use of TupleType for operations results (which could lead up to hundreds of megabytes of "dead" TupleTypes in the context). This also allowed restructured ValueRange, making it simpler and one word smaller.
This revision does come with two conceptual downsides:
* Operation::getResultTypes no longer returns an ArrayRef<Type>
This conceptually makes some usages slower, as the iterator increment is slightly more complex.
* OpResult::getOwner is slightly more expensive, as it now requires a little bit of arithmetic
From profiling, neither of the conceptual downsides have resulted in any perceivable hit to performance. Given the advantages of the new design, most compiles are slightly faster.
Differential Revision: https://reviews.llvm.org/D97804
2021-03-04 06:23:14 +08:00
|
|
|
unsigned numResults = resultTypes.size();
|
2019-03-27 05:45:38 +08:00
|
|
|
|
2020-04-27 12:28:32 +08:00
|
|
|
// If the operation is known to have no operands, don't allocate an operand
|
|
|
|
// storage.
|
|
|
|
bool needsOperandStorage = true;
|
|
|
|
if (operands.empty()) {
|
|
|
|
if (const AbstractOperation *abstractOp = name.getAbstractOperation())
|
|
|
|
needsOperandStorage = !abstractOp->hasTrait<OpTrait::ZeroOperands>();
|
|
|
|
}
|
|
|
|
|
2020-12-05 13:01:26 +08:00
|
|
|
// Compute the byte size for the operation and the operand storage. This takes
|
|
|
|
// into account the size of the operation, its trailing objects, and its
|
|
|
|
// prefixed objects.
|
|
|
|
size_t byteSize =
|
2021-03-02 02:39:41 +08:00
|
|
|
totalSizeToAlloc<BlockOperand, Region, detail::OperandStorage>(
|
|
|
|
numSuccessors, numRegions, needsOperandStorage ? 1 : 0) +
|
2020-12-05 13:01:26 +08:00
|
|
|
detail::OperandStorage::additionalAllocSize(numOperands);
|
|
|
|
size_t prefixByteSize = llvm::alignTo(
|
|
|
|
Operation::prefixAllocSize(numTrailingResults, numInlineResults),
|
|
|
|
alignof(Operation));
|
|
|
|
char *mallocMem = reinterpret_cast<char *>(malloc(byteSize + prefixByteSize));
|
|
|
|
void *rawMem = mallocMem + prefixByteSize;
|
2019-03-27 05:45:38 +08:00
|
|
|
|
|
|
|
// Create the new Operation.
|
2020-04-27 12:28:32 +08:00
|
|
|
Operation *op =
|
[mlir][IR] Refactor the internal implementation of Value
The current implementation of Value involves a pointer int pair with several different kinds of owners, i.e. BlockArgumentImpl*, Operation *, TrailingOpResult*. This design arose from the desire to save memory overhead for operations that have a very small number of results (generally 0-2). There are, unfortunately, many problematic aspects of the current implementation that make Values difficult to work with or just inefficient.
Operation result types are stored as a separate array on the Operation. This is very inefficient for many reasons: we use TupleType for multiple results, which can lead to huge amounts of memory usage if multi-result operations change types frequently(they do). It also means that simple methods like Value::getType/Value::setType now require complex logic to get to the desired type.
Value only has one pointer bit free, severely limiting the ability to use it in things like PointerUnion/PointerIntPair. Given that we store the kind of a Value along with the "owner" pointer, we only leave one bit free for users of Value. This creates situations where we end up nesting PointerUnions to be able to use Value in one.
As noted above, most of the methods in Value need to branch on at least 3 different cases which is both inefficient, possibly error prone, and verbose. The current storage of results also creates problems for utilities like ValueRange/TypeRange, which want to efficiently store base pointers to ranges (of which Operation* isn't really useful as one).
This revision greatly simplifies the implementation of Value by the introduction of a new ValueImpl class. This class contains all of the state shared between all of the various derived value classes; i.e. the use list, the type, and the kind. This shared implementation class provides several large benefits:
* Most of the methods on value are now branchless, and often one-liners.
* The "kind" of the value is now stored in ValueImpl instead of Value
This frees up all of Value's pointer bits, allowing for users to take full advantage of PointerUnion/PointerIntPair/etc. It also allows for storing more operation results as "inline", 6 now instead of 2, freeing up 1 word per new inline result.
* Operation result types are now stored in the result, instead of a side array
This drops the size of zero-result operations by 1 word. It also removes the memory crushing use of TupleType for operations results (which could lead up to hundreds of megabytes of "dead" TupleTypes in the context). This also allowed restructured ValueRange, making it simpler and one word smaller.
This revision does come with two conceptual downsides:
* Operation::getResultTypes no longer returns an ArrayRef<Type>
This conceptually makes some usages slower, as the iterator increment is slightly more complex.
* OpResult::getOwner is slightly more expensive, as it now requires a little bit of arithmetic
From profiling, neither of the conceptual downsides have resulted in any perceivable hit to performance. Given the advantages of the new design, most compiles are slightly faster.
Differential Revision: https://reviews.llvm.org/D97804
2021-03-04 06:23:14 +08:00
|
|
|
::new (rawMem) Operation(location, name, numResults, numSuccessors,
|
2020-04-27 12:28:32 +08:00
|
|
|
numRegions, attributes, needsOperandStorage);
|
2019-03-27 05:45:38 +08:00
|
|
|
|
2021-02-10 03:41:10 +08:00
|
|
|
assert((numSuccessors == 0 || op->mightHaveTrait<OpTrait::IsTerminator>()) &&
|
2019-03-27 05:45:38 +08:00
|
|
|
"unexpected successors in a non-terminator operation");
|
|
|
|
|
2020-04-24 07:23:34 +08:00
|
|
|
// Initialize the results.
|
[mlir][IR] Refactor the internal implementation of Value
The current implementation of Value involves a pointer int pair with several different kinds of owners, i.e. BlockArgumentImpl*, Operation *, TrailingOpResult*. This design arose from the desire to save memory overhead for operations that have a very small number of results (generally 0-2). There are, unfortunately, many problematic aspects of the current implementation that make Values difficult to work with or just inefficient.
Operation result types are stored as a separate array on the Operation. This is very inefficient for many reasons: we use TupleType for multiple results, which can lead to huge amounts of memory usage if multi-result operations change types frequently(they do). It also means that simple methods like Value::getType/Value::setType now require complex logic to get to the desired type.
Value only has one pointer bit free, severely limiting the ability to use it in things like PointerUnion/PointerIntPair. Given that we store the kind of a Value along with the "owner" pointer, we only leave one bit free for users of Value. This creates situations where we end up nesting PointerUnions to be able to use Value in one.
As noted above, most of the methods in Value need to branch on at least 3 different cases which is both inefficient, possibly error prone, and verbose. The current storage of results also creates problems for utilities like ValueRange/TypeRange, which want to efficiently store base pointers to ranges (of which Operation* isn't really useful as one).
This revision greatly simplifies the implementation of Value by the introduction of a new ValueImpl class. This class contains all of the state shared between all of the various derived value classes; i.e. the use list, the type, and the kind. This shared implementation class provides several large benefits:
* Most of the methods on value are now branchless, and often one-liners.
* The "kind" of the value is now stored in ValueImpl instead of Value
This frees up all of Value's pointer bits, allowing for users to take full advantage of PointerUnion/PointerIntPair/etc. It also allows for storing more operation results as "inline", 6 now instead of 2, freeing up 1 word per new inline result.
* Operation result types are now stored in the result, instead of a side array
This drops the size of zero-result operations by 1 word. It also removes the memory crushing use of TupleType for operations results (which could lead up to hundreds of megabytes of "dead" TupleTypes in the context). This also allowed restructured ValueRange, making it simpler and one word smaller.
This revision does come with two conceptual downsides:
* Operation::getResultTypes no longer returns an ArrayRef<Type>
This conceptually makes some usages slower, as the iterator increment is slightly more complex.
* OpResult::getOwner is slightly more expensive, as it now requires a little bit of arithmetic
From profiling, neither of the conceptual downsides have resulted in any perceivable hit to performance. Given the advantages of the new design, most compiles are slightly faster.
Differential Revision: https://reviews.llvm.org/D97804
2021-03-04 06:23:14 +08:00
|
|
|
auto resultTypeIt = resultTypes.begin();
|
|
|
|
for (unsigned i = 0; i < numInlineResults; ++i, ++resultTypeIt)
|
|
|
|
new (op->getInlineOpResult(i)) detail::InlineOpResult(*resultTypeIt, i);
|
|
|
|
for (unsigned i = 0; i < numTrailingResults; ++i, ++resultTypeIt) {
|
|
|
|
new (op->getOutOfLineOpResult(i))
|
|
|
|
detail::OutOfLineOpResult(*resultTypeIt, i);
|
|
|
|
}
|
2020-01-03 06:28:37 +08:00
|
|
|
|
2019-03-27 05:45:38 +08:00
|
|
|
// Initialize the regions.
|
|
|
|
for (unsigned i = 0; i != numRegions; ++i)
|
|
|
|
new (&op->getRegion(i)) Region(op);
|
|
|
|
|
2020-03-06 04:48:28 +08:00
|
|
|
// Initialize the operands.
|
2020-04-27 12:28:32 +08:00
|
|
|
if (needsOperandStorage)
|
|
|
|
new (&op->getOperandStorage()) detail::OperandStorage(op, operands);
|
2019-03-27 05:45:38 +08:00
|
|
|
|
2020-03-06 04:48:28 +08:00
|
|
|
// Initialize the successors.
|
|
|
|
auto blockOperands = op->getBlockOperands();
|
|
|
|
for (unsigned i = 0; i != numSuccessors; ++i)
|
|
|
|
new (&blockOperands[i]) BlockOperand(op, successors[i]);
|
2019-03-27 05:45:38 +08:00
|
|
|
|
|
|
|
return op;
|
|
|
|
}
|
|
|
|
|
[mlir][IR] Refactor the internal implementation of Value
The current implementation of Value involves a pointer int pair with several different kinds of owners, i.e. BlockArgumentImpl*, Operation *, TrailingOpResult*. This design arose from the desire to save memory overhead for operations that have a very small number of results (generally 0-2). There are, unfortunately, many problematic aspects of the current implementation that make Values difficult to work with or just inefficient.
Operation result types are stored as a separate array on the Operation. This is very inefficient for many reasons: we use TupleType for multiple results, which can lead to huge amounts of memory usage if multi-result operations change types frequently(they do). It also means that simple methods like Value::getType/Value::setType now require complex logic to get to the desired type.
Value only has one pointer bit free, severely limiting the ability to use it in things like PointerUnion/PointerIntPair. Given that we store the kind of a Value along with the "owner" pointer, we only leave one bit free for users of Value. This creates situations where we end up nesting PointerUnions to be able to use Value in one.
As noted above, most of the methods in Value need to branch on at least 3 different cases which is both inefficient, possibly error prone, and verbose. The current storage of results also creates problems for utilities like ValueRange/TypeRange, which want to efficiently store base pointers to ranges (of which Operation* isn't really useful as one).
This revision greatly simplifies the implementation of Value by the introduction of a new ValueImpl class. This class contains all of the state shared between all of the various derived value classes; i.e. the use list, the type, and the kind. This shared implementation class provides several large benefits:
* Most of the methods on value are now branchless, and often one-liners.
* The "kind" of the value is now stored in ValueImpl instead of Value
This frees up all of Value's pointer bits, allowing for users to take full advantage of PointerUnion/PointerIntPair/etc. It also allows for storing more operation results as "inline", 6 now instead of 2, freeing up 1 word per new inline result.
* Operation result types are now stored in the result, instead of a side array
This drops the size of zero-result operations by 1 word. It also removes the memory crushing use of TupleType for operations results (which could lead up to hundreds of megabytes of "dead" TupleTypes in the context). This also allowed restructured ValueRange, making it simpler and one word smaller.
This revision does come with two conceptual downsides:
* Operation::getResultTypes no longer returns an ArrayRef<Type>
This conceptually makes some usages slower, as the iterator increment is slightly more complex.
* OpResult::getOwner is slightly more expensive, as it now requires a little bit of arithmetic
From profiling, neither of the conceptual downsides have resulted in any perceivable hit to performance. Given the advantages of the new design, most compiles are slightly faster.
Differential Revision: https://reviews.llvm.org/D97804
2021-03-04 06:23:14 +08:00
|
|
|
Operation::Operation(Location location, OperationName name, unsigned numResults,
|
|
|
|
unsigned numSuccessors, unsigned numRegions,
|
|
|
|
DictionaryAttr attributes, bool hasOperandStorage)
|
|
|
|
: location(location), numResults(numResults), numSuccs(numSuccessors),
|
|
|
|
numRegions(numRegions), hasOperandStorage(hasOperandStorage), name(name),
|
2020-04-27 12:28:32 +08:00
|
|
|
attrs(attributes) {
|
2020-12-18 09:10:12 +08:00
|
|
|
assert(attributes && "unexpected null attribute dictionary");
|
2021-07-15 10:13:30 +08:00
|
|
|
#ifndef NDEBUG
|
|
|
|
if (!getDialect() && !getContext()->allowsUnregisteredDialects())
|
|
|
|
llvm::report_fatal_error(
|
|
|
|
name.getStringRef() +
|
|
|
|
" created with unregistered dialect. If this is intended, please call "
|
|
|
|
"allowUnregisteredDialects() on the MLIRContext, or use "
|
2021-10-12 17:34:17 +08:00
|
|
|
"-allow-unregistered-dialect with the MLIR tool used.");
|
2021-07-15 10:13:30 +08:00
|
|
|
#endif
|
2020-01-03 06:28:37 +08:00
|
|
|
}
|
2019-03-27 05:45:38 +08:00
|
|
|
|
|
|
|
// Operations are deleted through the destroy() member because they are
|
|
|
|
// allocated via malloc.
|
|
|
|
Operation::~Operation() {
|
|
|
|
assert(block == nullptr && "operation destroyed but still in a block");
|
2021-03-31 05:11:02 +08:00
|
|
|
#ifndef NDEBUG
|
|
|
|
if (!use_empty()) {
|
|
|
|
{
|
|
|
|
InFlightDiagnostic diag =
|
|
|
|
emitOpError("operation destroyed but still has uses");
|
|
|
|
for (Operation *user : getUsers())
|
|
|
|
diag.attachNote(user->getLoc()) << "- use: " << *user << "\n";
|
|
|
|
}
|
|
|
|
llvm::report_fatal_error("operation destroyed but still has uses");
|
|
|
|
}
|
|
|
|
#endif
|
2020-04-27 12:28:32 +08:00
|
|
|
// Explicitly run the destructors for the operands.
|
|
|
|
if (hasOperandStorage)
|
|
|
|
getOperandStorage().~OperandStorage();
|
2019-03-27 05:45:38 +08:00
|
|
|
|
|
|
|
// Explicitly run the destructors for the successors.
|
|
|
|
for (auto &successor : getBlockOperands())
|
|
|
|
successor.~BlockOperand();
|
|
|
|
|
|
|
|
// Explicitly destroy the regions.
|
|
|
|
for (auto ®ion : getRegions())
|
|
|
|
region.~Region();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Destroy this operation or one of its subclasses.
|
|
|
|
void Operation::destroy() {
|
2020-12-05 13:01:26 +08:00
|
|
|
// Operations may have additional prefixed allocation, which needs to be
|
|
|
|
// accounted for here when computing the address to free.
|
|
|
|
char *rawMem = reinterpret_cast<char *>(this) -
|
|
|
|
llvm::alignTo(prefixAllocSize(), alignof(Operation));
|
2019-03-27 05:45:38 +08:00
|
|
|
this->~Operation();
|
2020-12-05 13:01:26 +08:00
|
|
|
free(rawMem);
|
2019-03-27 05:45:38 +08:00
|
|
|
}
|
|
|
|
|
2019-09-19 09:23:12 +08:00
|
|
|
/// Return true if this operation is a proper ancestor of the `other`
|
|
|
|
/// operation.
|
|
|
|
bool Operation::isProperAncestor(Operation *other) {
|
|
|
|
while ((other = other->getParentOp()))
|
|
|
|
if (this == other)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-05-20 06:42:49 +08:00
|
|
|
/// Replace any uses of 'from' with 'to' within this operation.
|
2019-12-24 06:45:01 +08:00
|
|
|
void Operation::replaceUsesOfWith(Value from, Value to) {
|
2019-05-20 06:42:49 +08:00
|
|
|
if (from == to)
|
|
|
|
return;
|
|
|
|
for (auto &operand : getOpOperands())
|
|
|
|
if (operand.get() == from)
|
|
|
|
operand.set(to);
|
|
|
|
}
|
|
|
|
|
2019-12-08 02:35:01 +08:00
|
|
|
/// Replace the current operands of this operation with the ones provided in
|
2020-04-27 12:28:22 +08:00
|
|
|
/// 'operands'.
|
2019-12-08 02:35:01 +08:00
|
|
|
void Operation::setOperands(ValueRange operands) {
|
2020-04-27 12:28:32 +08:00
|
|
|
if (LLVM_LIKELY(hasOperandStorage))
|
|
|
|
return getOperandStorage().setOperands(this, operands);
|
|
|
|
assert(operands.empty() && "setting operands without an operand storage");
|
2019-12-08 02:35:01 +08:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:09:11 +08:00
|
|
|
/// Replace the operands beginning at 'start' and ending at 'start' + 'length'
|
|
|
|
/// with the ones provided in 'operands'. 'operands' may be smaller or larger
|
|
|
|
/// than the range pointed to by 'start'+'length'.
|
|
|
|
void Operation::setOperands(unsigned start, unsigned length,
|
|
|
|
ValueRange operands) {
|
|
|
|
assert((start + length) <= getNumOperands() &&
|
|
|
|
"invalid operand range specified");
|
|
|
|
if (LLVM_LIKELY(hasOperandStorage))
|
|
|
|
return getOperandStorage().setOperands(this, start, length, operands);
|
|
|
|
assert(operands.empty() && "setting operands without an operand storage");
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Insert the given operands into the operand list at the given 'index'.
|
|
|
|
void Operation::insertOperands(unsigned index, ValueRange operands) {
|
|
|
|
if (LLVM_LIKELY(hasOperandStorage))
|
|
|
|
return setOperands(index, /*length=*/0, operands);
|
|
|
|
assert(operands.empty() && "inserting operands without an operand storage");
|
|
|
|
}
|
|
|
|
|
2019-03-27 05:45:38 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2019-11-13 03:57:47 +08:00
|
|
|
// Diagnostics
|
2019-03-27 05:45:38 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-05-04 02:40:57 +08:00
|
|
|
/// Emit an error about fatal conditions with this operation, reporting up to
|
|
|
|
/// any diagnostic handlers that may be listening.
|
|
|
|
InFlightDiagnostic Operation::emitError(const Twine &message) {
|
2019-11-13 03:57:47 +08:00
|
|
|
InFlightDiagnostic diag = mlir::emitError(getLoc(), message);
|
2020-04-04 10:02:39 +08:00
|
|
|
if (getContext()->shouldPrintOpOnDiagnostic()) {
|
2021-10-18 23:07:23 +08:00
|
|
|
diag.attachNote(getLoc())
|
|
|
|
.append("see current operation: ")
|
|
|
|
.appendOp(*this, OpPrintingFlags().printGenericOpForm());
|
2019-11-13 03:57:47 +08:00
|
|
|
}
|
|
|
|
return diag;
|
2019-05-02 03:13:44 +08:00
|
|
|
}
|
|
|
|
|
2019-03-27 05:45:38 +08:00
|
|
|
/// Emit a warning about this operation, reporting up to any diagnostic
|
|
|
|
/// handlers that may be listening.
|
2019-05-04 01:01:01 +08:00
|
|
|
InFlightDiagnostic Operation::emitWarning(const Twine &message) {
|
2019-11-13 03:57:47 +08:00
|
|
|
InFlightDiagnostic diag = mlir::emitWarning(getLoc(), message);
|
2020-04-04 10:02:39 +08:00
|
|
|
if (getContext()->shouldPrintOpOnDiagnostic())
|
2019-11-13 03:57:47 +08:00
|
|
|
diag.attachNote(getLoc()) << "see current operation: " << *this;
|
|
|
|
return diag;
|
2019-03-27 05:45:38 +08:00
|
|
|
}
|
|
|
|
|
2019-05-04 02:40:57 +08:00
|
|
|
/// Emit a remark about this operation, reporting up to any diagnostic
|
|
|
|
/// handlers that may be listening.
|
|
|
|
InFlightDiagnostic Operation::emitRemark(const Twine &message) {
|
2019-11-13 03:57:47 +08:00
|
|
|
InFlightDiagnostic diag = mlir::emitRemark(getLoc(), message);
|
2020-04-04 10:02:39 +08:00
|
|
|
if (getContext()->shouldPrintOpOnDiagnostic())
|
2019-11-13 03:57:47 +08:00
|
|
|
diag.attachNote(getLoc()) << "see current operation: " << *this;
|
|
|
|
return diag;
|
2019-03-27 05:45:38 +08:00
|
|
|
}
|
|
|
|
|
2019-11-13 03:57:47 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2019-12-05 08:09:41 +08:00
|
|
|
// Operation Ordering
|
2019-11-13 03:57:47 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-12-05 08:09:41 +08:00
|
|
|
constexpr unsigned Operation::kInvalidOrderIdx;
|
|
|
|
constexpr unsigned Operation::kOrderStride;
|
|
|
|
|
2019-03-27 05:45:38 +08:00
|
|
|
/// Given an operation 'other' that is within the same parent block, return
|
|
|
|
/// whether the current operation is before 'other' in the operation list
|
|
|
|
/// of the parent block.
|
|
|
|
/// Note: This function has an average complexity of O(1), but worst case may
|
|
|
|
/// take O(N) where N is the number of operations within the parent block.
|
|
|
|
bool Operation::isBeforeInBlock(Operation *other) {
|
|
|
|
assert(block && "Operations without parent blocks have no order.");
|
|
|
|
assert(other && other->block == block &&
|
|
|
|
"Expected other operation to have the same parent block.");
|
2019-12-05 08:09:41 +08:00
|
|
|
// If the order of the block is already invalid, directly recompute the
|
|
|
|
// parent.
|
|
|
|
if (!block->isOpOrderValid()) {
|
2019-11-07 08:08:51 +08:00
|
|
|
block->recomputeOpOrder();
|
2019-12-05 08:09:41 +08:00
|
|
|
} else {
|
|
|
|
// Update the order either operation if necessary.
|
|
|
|
updateOrderIfNecessary();
|
|
|
|
other->updateOrderIfNecessary();
|
|
|
|
}
|
|
|
|
|
2019-03-27 05:45:38 +08:00
|
|
|
return orderIndex < other->orderIndex;
|
|
|
|
}
|
|
|
|
|
2019-12-05 08:09:41 +08:00
|
|
|
/// Update the order index of this operation of this operation if necessary,
|
|
|
|
/// potentially recomputing the order of the parent block.
|
|
|
|
void Operation::updateOrderIfNecessary() {
|
|
|
|
assert(block && "expected valid parent");
|
|
|
|
|
|
|
|
// If the order is valid for this operation there is nothing to do.
|
|
|
|
if (hasValidOrder())
|
|
|
|
return;
|
|
|
|
Operation *blockFront = &block->front();
|
|
|
|
Operation *blockBack = &block->back();
|
|
|
|
|
|
|
|
// This method is expected to only be invoked on blocks with more than one
|
|
|
|
// operation.
|
|
|
|
assert(blockFront != blockBack && "expected more than one operation");
|
|
|
|
|
|
|
|
// If the operation is at the end of the block.
|
|
|
|
if (this == blockBack) {
|
|
|
|
Operation *prevNode = getPrevNode();
|
|
|
|
if (!prevNode->hasValidOrder())
|
|
|
|
return block->recomputeOpOrder();
|
|
|
|
|
|
|
|
// Add the stride to the previous operation.
|
|
|
|
orderIndex = prevNode->orderIndex + kOrderStride;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is the first operation try to use the next operation to compute the
|
|
|
|
// ordering.
|
|
|
|
if (this == blockFront) {
|
|
|
|
Operation *nextNode = getNextNode();
|
|
|
|
if (!nextNode->hasValidOrder())
|
|
|
|
return block->recomputeOpOrder();
|
|
|
|
// There is no order to give this operation.
|
|
|
|
if (nextNode->orderIndex == 0)
|
|
|
|
return block->recomputeOpOrder();
|
|
|
|
|
|
|
|
// If we can't use the stride, just take the middle value left. This is safe
|
|
|
|
// because we know there is at least one valid index to assign to.
|
|
|
|
if (nextNode->orderIndex <= kOrderStride)
|
|
|
|
orderIndex = (nextNode->orderIndex / 2);
|
|
|
|
else
|
|
|
|
orderIndex = kOrderStride;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, this operation is between two others. Place this operation in
|
|
|
|
// the middle of the previous and next if possible.
|
|
|
|
Operation *prevNode = getPrevNode(), *nextNode = getNextNode();
|
|
|
|
if (!prevNode->hasValidOrder() || !nextNode->hasValidOrder())
|
|
|
|
return block->recomputeOpOrder();
|
|
|
|
unsigned prevOrder = prevNode->orderIndex, nextOrder = nextNode->orderIndex;
|
|
|
|
|
|
|
|
// Check to see if there is a valid order between the two.
|
|
|
|
if (prevOrder + 1 == nextOrder)
|
|
|
|
return block->recomputeOpOrder();
|
2020-10-09 19:12:11 +08:00
|
|
|
orderIndex = prevOrder + ((nextOrder - prevOrder) / 2);
|
2019-12-05 08:09:41 +08:00
|
|
|
}
|
|
|
|
|
2019-03-27 05:45:38 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// ilist_traits for Operation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
auto llvm::ilist_detail::SpecificNodeAccess<
|
|
|
|
typename llvm::ilist_detail::compute_node_options<
|
|
|
|
::mlir::Operation>::type>::getNodePtr(pointer N) -> node_type * {
|
|
|
|
return NodeAccess::getNodePtr<OptionsT>(N);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto llvm::ilist_detail::SpecificNodeAccess<
|
|
|
|
typename llvm::ilist_detail::compute_node_options<
|
|
|
|
::mlir::Operation>::type>::getNodePtr(const_pointer N)
|
|
|
|
-> const node_type * {
|
|
|
|
return NodeAccess::getNodePtr<OptionsT>(N);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto llvm::ilist_detail::SpecificNodeAccess<
|
|
|
|
typename llvm::ilist_detail::compute_node_options<
|
|
|
|
::mlir::Operation>::type>::getValuePtr(node_type *N) -> pointer {
|
|
|
|
return NodeAccess::getValuePtr<OptionsT>(N);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto llvm::ilist_detail::SpecificNodeAccess<
|
|
|
|
typename llvm::ilist_detail::compute_node_options<
|
|
|
|
::mlir::Operation>::type>::getValuePtr(const node_type *N)
|
|
|
|
-> const_pointer {
|
|
|
|
return NodeAccess::getValuePtr<OptionsT>(N);
|
|
|
|
}
|
|
|
|
|
|
|
|
void llvm::ilist_traits<::mlir::Operation>::deleteNode(Operation *op) {
|
|
|
|
op->destroy();
|
|
|
|
}
|
|
|
|
|
|
|
|
Block *llvm::ilist_traits<::mlir::Operation>::getContainingBlock() {
|
|
|
|
size_t Offset(size_t(&((Block *)nullptr->*Block::getSublistAccess(nullptr))));
|
|
|
|
iplist<Operation> *Anchor(static_cast<iplist<Operation> *>(this));
|
|
|
|
return reinterpret_cast<Block *>(reinterpret_cast<char *>(Anchor) - Offset);
|
|
|
|
}
|
|
|
|
|
2020-04-05 10:30:01 +08:00
|
|
|
/// This is a trait method invoked when an operation is added to a block. We
|
2019-03-27 05:45:38 +08:00
|
|
|
/// keep the block pointer up to date.
|
|
|
|
void llvm::ilist_traits<::mlir::Operation>::addNodeToList(Operation *op) {
|
2020-04-05 10:30:01 +08:00
|
|
|
assert(!op->getBlock() && "already in an operation block!");
|
2019-03-27 05:45:38 +08:00
|
|
|
op->block = getContainingBlock();
|
|
|
|
|
2019-12-05 08:09:41 +08:00
|
|
|
// Invalidate the order on the operation.
|
|
|
|
op->orderIndex = Operation::kInvalidOrderIdx;
|
2019-03-27 05:45:38 +08:00
|
|
|
}
|
|
|
|
|
2020-04-05 10:30:01 +08:00
|
|
|
/// This is a trait method invoked when an operation is removed from a block.
|
2019-03-27 05:45:38 +08:00
|
|
|
/// We keep the block pointer up to date.
|
|
|
|
void llvm::ilist_traits<::mlir::Operation>::removeNodeFromList(Operation *op) {
|
2020-04-05 10:30:01 +08:00
|
|
|
assert(op->block && "not already in an operation block!");
|
2019-03-27 05:45:38 +08:00
|
|
|
op->block = nullptr;
|
|
|
|
}
|
|
|
|
|
2020-04-05 10:30:01 +08:00
|
|
|
/// This is a trait method invoked when an operation is moved from one block
|
2019-03-27 05:45:38 +08:00
|
|
|
/// to another. We keep the block pointer up to date.
|
|
|
|
void llvm::ilist_traits<::mlir::Operation>::transferNodesFromList(
|
2019-03-27 08:05:09 +08:00
|
|
|
ilist_traits<Operation> &otherList, op_iterator first, op_iterator last) {
|
2019-03-27 05:45:38 +08:00
|
|
|
Block *curParent = getContainingBlock();
|
|
|
|
|
|
|
|
// Invalidate the ordering of the parent block.
|
2019-11-07 08:08:51 +08:00
|
|
|
curParent->invalidateOpOrder();
|
2019-03-27 05:45:38 +08:00
|
|
|
|
|
|
|
// If we are transferring operations within the same block, the block
|
|
|
|
// pointer doesn't need to be updated.
|
|
|
|
if (curParent == otherList.getContainingBlock())
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Update the 'block' member of each operation.
|
|
|
|
for (; first != last; ++first)
|
|
|
|
first->block = curParent;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Remove this operation (and its descendants) from its Block and delete
|
|
|
|
/// all of them.
|
|
|
|
void Operation::erase() {
|
2019-07-04 04:21:24 +08:00
|
|
|
if (auto *parent = getBlock())
|
|
|
|
parent->getOperations().erase(this);
|
|
|
|
else
|
|
|
|
destroy();
|
2019-03-27 05:45:38 +08:00
|
|
|
}
|
|
|
|
|
[mlir][PDL] Add support for PDL bytecode and expose PDL support to OwningRewritePatternList
PDL patterns are now supported via a new `PDLPatternModule` class. This class contains a ModuleOp with the pdl::PatternOp operations representing the patterns, as well as a collection of registered C++ functions for native constraints/creations/rewrites/etc. that may be invoked via the pdl patterns. Instances of this class are added to an OwningRewritePatternList in the same fashion as C++ RewritePatterns, i.e. via the `insert` method.
The PDL bytecode is an in-memory representation of the PDL interpreter dialect that can be efficiently interpreted/executed. The representation of the bytecode boils down to a code array(for opcodes/memory locations/etc) and a memory buffer(for storing attributes/operations/values/any other data necessary). The bytecode operations are effectively a 1-1 mapping to the PDLInterp dialect operations, with a few exceptions in cases where the in-memory representation of the bytecode can be more efficient than the MLIR representation. For example, a generic `AreEqual` bytecode op can be used to represent AreEqualOp, CheckAttributeOp, and CheckTypeOp.
The execution of the bytecode is split into two phases: matching and rewriting. When matching, all of the matched patterns are collected to avoid the overhead of re-running parts of the matcher. These matched patterns are then considered alongside the native C++ patterns, which rewrite immediately in-place via `RewritePattern::matchAndRewrite`, for the given root operation. When a PDL pattern is matched and has the highest benefit, it is passed back to the bytecode to execute its rewriter.
Differential Revision: https://reviews.llvm.org/D89107
2020-12-02 06:30:18 +08:00
|
|
|
/// Remove the operation from its parent block, but don't delete it.
|
|
|
|
void Operation::remove() {
|
|
|
|
if (Block *parent = getBlock())
|
|
|
|
parent->getOperations().remove(this);
|
|
|
|
}
|
|
|
|
|
2019-03-27 05:45:38 +08:00
|
|
|
/// Unlink this operation from its current block and insert it right before
|
2019-11-07 08:08:51 +08:00
|
|
|
/// `existingOp` which may be in the same or another block in the same
|
2019-03-27 05:45:38 +08:00
|
|
|
/// function.
|
2019-11-07 08:08:51 +08:00
|
|
|
void Operation::moveBefore(Operation *existingOp) {
|
|
|
|
moveBefore(existingOp->getBlock(), existingOp->getIterator());
|
2019-03-27 05:45:38 +08:00
|
|
|
}
|
|
|
|
|
2019-03-27 23:55:17 +08:00
|
|
|
/// Unlink this operation from its current basic block and insert it right
|
|
|
|
/// before `iterator` in the specified basic block.
|
2019-03-27 05:45:38 +08:00
|
|
|
void Operation::moveBefore(Block *block,
|
|
|
|
llvm::iplist<Operation>::iterator iterator) {
|
2019-03-27 08:05:09 +08:00
|
|
|
block->getOperations().splice(iterator, getBlock()->getOperations(),
|
|
|
|
getIterator());
|
2019-03-27 05:45:38 +08:00
|
|
|
}
|
|
|
|
|
2020-05-09 06:34:13 +08:00
|
|
|
/// Unlink this operation from its current block and insert it right after
|
|
|
|
/// `existingOp` which may be in the same or another block in the same function.
|
|
|
|
void Operation::moveAfter(Operation *existingOp) {
|
|
|
|
moveAfter(existingOp->getBlock(), existingOp->getIterator());
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Unlink this operation from its current block and insert it right after
|
|
|
|
/// `iterator` in the specified block.
|
|
|
|
void Operation::moveAfter(Block *block,
|
|
|
|
llvm::iplist<Operation>::iterator iterator) {
|
|
|
|
assert(iterator != block->end() && "cannot move after end of block");
|
|
|
|
moveBefore(&*std::next(iterator));
|
|
|
|
}
|
|
|
|
|
2019-03-27 05:45:38 +08:00
|
|
|
/// This drops all operand uses from this operation, which is an essential
|
|
|
|
/// step in breaking cyclic dependences between references when they are to
|
|
|
|
/// be deleted.
|
|
|
|
void Operation::dropAllReferences() {
|
2019-03-29 02:25:19 +08:00
|
|
|
for (auto &op : getOpOperands())
|
2019-03-27 05:45:38 +08:00
|
|
|
op.drop();
|
|
|
|
|
|
|
|
for (auto ®ion : getRegions())
|
2019-07-12 01:00:54 +08:00
|
|
|
region.dropAllReferences();
|
2019-03-27 05:45:38 +08:00
|
|
|
|
|
|
|
for (auto &dest : getBlockOperands())
|
|
|
|
dest.drop();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// This drops all uses of any values defined by this operation or its nested
|
|
|
|
/// regions, wherever they are located.
|
|
|
|
void Operation::dropAllDefinedValueUses() {
|
2019-12-31 12:49:47 +08:00
|
|
|
dropAllUses();
|
2019-03-27 05:45:38 +08:00
|
|
|
|
|
|
|
for (auto ®ion : getRegions())
|
|
|
|
for (auto &block : region)
|
|
|
|
block.dropAllDefinedValueUses();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Operation::setSuccessor(Block *block, unsigned index) {
|
|
|
|
assert(index < getNumSuccessors());
|
|
|
|
getBlockOperands()[index].set(block);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Attempt to fold this operation using the Op's registered foldHook.
|
2019-05-17 03:51:45 +08:00
|
|
|
LogicalResult Operation::fold(ArrayRef<Attribute> operands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &results) {
|
|
|
|
// If we have a registered operation definition matching this one, use it to
|
|
|
|
// try to constant fold the operation.
|
|
|
|
auto *abstractOp = getAbstractOperation();
|
|
|
|
if (abstractOp && succeeded(abstractOp->foldHook(this, operands, results)))
|
|
|
|
return success();
|
|
|
|
|
|
|
|
// Otherwise, fall back on the dialect hook to handle it.
|
2019-06-04 07:32:25 +08:00
|
|
|
Dialect *dialect = getDialect();
|
|
|
|
if (!dialect)
|
|
|
|
return failure();
|
2019-05-17 03:51:45 +08:00
|
|
|
|
2020-08-12 17:36:54 +08:00
|
|
|
auto *interface = dialect->getRegisteredInterface<DialectFoldInterface>();
|
|
|
|
if (!interface)
|
2019-05-17 03:51:45 +08:00
|
|
|
return failure();
|
2020-08-12 17:36:54 +08:00
|
|
|
|
2020-08-14 17:38:37 +08:00
|
|
|
return interface->fold(this, operands, results);
|
2019-03-27 05:45:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Emit an error with the op name prefixed, like "'dim' op " which is
|
|
|
|
/// convenient for verifiers.
|
2019-05-04 01:01:01 +08:00
|
|
|
InFlightDiagnostic Operation::emitOpError(const Twine &message) {
|
2019-05-20 13:35:01 +08:00
|
|
|
return emitError() << "'" << getName() << "' op " << message;
|
2019-03-27 05:45:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Operation Cloning
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-03-29 06:58:53 +08:00
|
|
|
/// Create a deep copy of this operation but keep the operation regions empty.
|
|
|
|
/// Operands are remapped using `mapper` (if present), and `mapper` is updated
|
|
|
|
/// to contain the results.
|
2019-06-28 07:42:50 +08:00
|
|
|
Operation *Operation::cloneWithoutRegions(BlockAndValueMapping &mapper) {
|
2019-12-24 06:45:01 +08:00
|
|
|
SmallVector<Value, 8> operands;
|
2019-03-27 05:45:38 +08:00
|
|
|
SmallVector<Block *, 2> successors;
|
|
|
|
|
2020-03-06 04:48:28 +08:00
|
|
|
// Remap the operands.
|
|
|
|
operands.reserve(getNumOperands());
|
|
|
|
for (auto opValue : getOperands())
|
|
|
|
operands.push_back(mapper.lookupOrDefault(opValue));
|
2019-03-27 05:45:38 +08:00
|
|
|
|
2020-03-06 04:48:28 +08:00
|
|
|
// Remap the successors.
|
|
|
|
successors.reserve(getNumSuccessors());
|
|
|
|
for (Block *successor : getSuccessors())
|
|
|
|
successors.push_back(mapper.lookupOrDefault(successor));
|
2019-03-27 05:45:38 +08:00
|
|
|
|
2020-03-06 04:48:28 +08:00
|
|
|
// Create the new operation.
|
2020-09-03 06:33:19 +08:00
|
|
|
auto *newOp = create(getLoc(), getName(), getResultTypes(), operands, attrs,
|
|
|
|
successors, getNumRegions());
|
2019-03-27 05:45:38 +08:00
|
|
|
|
2019-03-29 06:58:53 +08:00
|
|
|
// Remember the mapping of any results.
|
|
|
|
for (unsigned i = 0, e = getNumResults(); i != e; ++i)
|
|
|
|
mapper.map(getResult(i), newOp->getResult(i));
|
|
|
|
|
|
|
|
return newOp;
|
|
|
|
}
|
|
|
|
|
2019-06-28 07:42:50 +08:00
|
|
|
Operation *Operation::cloneWithoutRegions() {
|
2019-03-29 06:58:53 +08:00
|
|
|
BlockAndValueMapping mapper;
|
2019-06-28 07:42:50 +08:00
|
|
|
return cloneWithoutRegions(mapper);
|
2019-03-29 06:58:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Create a deep copy of this operation, remapping any operands that use
|
|
|
|
/// values outside of the operation using the map that is provided (leaving
|
|
|
|
/// them alone if no entry is present). Replaces references to cloned
|
|
|
|
/// sub-operations to the corresponding operation that is copied, and adds
|
|
|
|
/// those mappings to the map.
|
2019-06-28 07:42:50 +08:00
|
|
|
Operation *Operation::clone(BlockAndValueMapping &mapper) {
|
|
|
|
auto *newOp = cloneWithoutRegions(mapper);
|
2019-03-29 06:58:53 +08:00
|
|
|
|
2019-03-27 05:45:38 +08:00
|
|
|
// Clone the regions.
|
|
|
|
for (unsigned i = 0; i != numRegions; ++i)
|
2019-06-28 07:42:50 +08:00
|
|
|
getRegion(i).cloneInto(&newOp->getRegion(i), mapper);
|
2019-03-27 05:45:38 +08:00
|
|
|
|
|
|
|
return newOp;
|
|
|
|
}
|
|
|
|
|
2019-06-28 07:42:50 +08:00
|
|
|
Operation *Operation::clone() {
|
2019-03-27 05:45:38 +08:00
|
|
|
BlockAndValueMapping mapper;
|
2019-06-28 07:42:50 +08:00
|
|
|
return clone(mapper);
|
2019-03-27 05:45:38 +08:00
|
|
|
}
|
|
|
|
|
2018-09-10 11:40:23 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2018-09-27 06:06:38 +08:00
|
|
|
// OpState trait class.
|
2018-09-10 11:40:23 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2019-01-24 03:26:56 +08:00
|
|
|
// The fallback for the parser is to reject the custom assembly form.
|
2019-09-21 10:47:05 +08:00
|
|
|
ParseResult OpState::parse(OpAsmParser &parser, OperationState &result) {
|
2019-09-21 02:36:49 +08:00
|
|
|
return parser.emitError(parser.getNameLoc(), "has no custom assembly form");
|
2018-10-22 10:49:31 +08:00
|
|
|
}
|
|
|
|
|
2019-01-24 03:26:56 +08:00
|
|
|
// The fallback for the printer is to print in the generic assembly form.
|
2020-11-03 06:21:02 +08:00
|
|
|
void OpState::print(Operation *op, OpAsmPrinter &p) { p.printGenericOp(op); }
|
2021-08-28 11:03:15 +08:00
|
|
|
// The fallback for the printer is to print in the generic assembly form.
|
2021-08-28 11:03:49 +08:00
|
|
|
void OpState::printOpName(Operation *op, OpAsmPrinter &p,
|
|
|
|
StringRef defaultDialect) {
|
2021-08-28 11:03:15 +08:00
|
|
|
StringRef name = op->getName().getStringRef();
|
2021-08-28 11:03:49 +08:00
|
|
|
if (name.startswith((defaultDialect + ".").str()))
|
|
|
|
name = name.drop_front(defaultDialect.size() + 1);
|
2021-10-06 10:35:58 +08:00
|
|
|
// TODO: remove this special case (and update test/IR/parser.mlir)
|
|
|
|
else if ((defaultDialect.empty() || defaultDialect == "builtin") &&
|
|
|
|
name.startswith("std."))
|
2021-08-28 11:03:15 +08:00
|
|
|
name = name.drop_front(4);
|
|
|
|
p.getStream() << name;
|
|
|
|
}
|
2018-10-22 10:49:31 +08:00
|
|
|
|
2018-09-10 11:40:23 +08:00
|
|
|
/// Emit an error about fatal conditions with this operation, reporting up to
|
2019-05-04 01:01:01 +08:00
|
|
|
/// any diagnostic handlers that may be listening.
|
|
|
|
InFlightDiagnostic OpState::emitError(const Twine &message) {
|
2019-03-27 08:05:09 +08:00
|
|
|
return getOperation()->emitError(message);
|
2018-09-10 11:40:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Emit an error with the op name prefixed, like "'dim' op " which is
|
|
|
|
/// convenient for verifiers.
|
2019-05-04 01:01:01 +08:00
|
|
|
InFlightDiagnostic OpState::emitOpError(const Twine &message) {
|
2019-03-27 08:05:09 +08:00
|
|
|
return getOperation()->emitOpError(message);
|
2018-09-10 11:40:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Emit a warning about this operation, reporting up to any diagnostic
|
|
|
|
/// handlers that may be listening.
|
2019-05-04 01:01:01 +08:00
|
|
|
InFlightDiagnostic OpState::emitWarning(const Twine &message) {
|
|
|
|
return getOperation()->emitWarning(message);
|
2018-09-10 11:40:23 +08:00
|
|
|
}
|
|
|
|
|
2019-05-02 03:13:44 +08:00
|
|
|
/// Emit a remark about this operation, reporting up to any diagnostic
|
|
|
|
/// handlers that may be listening.
|
2019-05-04 01:01:01 +08:00
|
|
|
InFlightDiagnostic OpState::emitRemark(const Twine &message) {
|
|
|
|
return getOperation()->emitRemark(message);
|
2019-05-02 03:13:44 +08:00
|
|
|
}
|
|
|
|
|
2018-09-27 01:07:16 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Op Trait implementations
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-10-16 23:49:38 +08:00
|
|
|
OpFoldResult OpTrait::impl::foldIdempotent(Operation *op) {
|
|
|
|
auto *argumentOp = op->getOperand(0).getDefiningOp();
|
|
|
|
if (argumentOp && op->getName() == argumentOp->getName()) {
|
|
|
|
// Replace the outer operation output with the inner operation.
|
|
|
|
return op->getOperand(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2020-10-14 04:58:19 +08:00
|
|
|
OpFoldResult OpTrait::impl::foldInvolution(Operation *op) {
|
|
|
|
auto *argumentOp = op->getOperand(0).getDefiningOp();
|
|
|
|
if (argumentOp && op->getName() == argumentOp->getName()) {
|
|
|
|
// Replace the outer involutions output with inner's input.
|
|
|
|
return argumentOp->getOperand(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyZeroOperands(Operation *op) {
|
2018-09-27 12:18:42 +08:00
|
|
|
if (op->getNumOperands() != 0)
|
2019-05-04 02:40:57 +08:00
|
|
|
return op->emitOpError() << "requires zero operands";
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-09-27 12:18:42 +08:00
|
|
|
}
|
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyOneOperand(Operation *op) {
|
2018-09-27 12:18:42 +08:00
|
|
|
if (op->getNumOperands() != 1)
|
2019-05-04 02:40:57 +08:00
|
|
|
return op->emitOpError() << "requires a single operand";
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-09-27 12:18:42 +08:00
|
|
|
}
|
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyNOperands(Operation *op,
|
|
|
|
unsigned numOperands) {
|
2018-10-10 06:04:27 +08:00
|
|
|
if (op->getNumOperands() != numOperands) {
|
2019-05-04 02:40:57 +08:00
|
|
|
return op->emitOpError() << "expected " << numOperands
|
|
|
|
<< " operands, but found " << op->getNumOperands();
|
2018-10-10 06:04:27 +08:00
|
|
|
}
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-09-27 12:18:42 +08:00
|
|
|
}
|
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyAtLeastNOperands(Operation *op,
|
|
|
|
unsigned numOperands) {
|
2018-09-27 12:18:42 +08:00
|
|
|
if (op->getNumOperands() < numOperands)
|
2019-05-04 02:40:57 +08:00
|
|
|
return op->emitOpError()
|
2021-09-14 16:42:50 +08:00
|
|
|
<< "expected " << numOperands << " or more operands, but found "
|
|
|
|
<< op->getNumOperands();
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-09-27 12:18:42 +08:00
|
|
|
}
|
|
|
|
|
2018-11-07 07:37:39 +08:00
|
|
|
/// If this is a vector type, or a tensor type, return the scalar element type
|
|
|
|
/// that it is built around, otherwise return the type unmodified.
|
|
|
|
static Type getTensorOrVectorElementType(Type type) {
|
|
|
|
if (auto vec = type.dyn_cast<VectorType>())
|
|
|
|
return vec.getElementType();
|
|
|
|
|
|
|
|
// Look through tensor<vector<...>> to find the underlying element type.
|
|
|
|
if (auto tensor = type.dyn_cast<TensorType>())
|
|
|
|
return getTensorOrVectorElementType(tensor.getElementType());
|
|
|
|
return type;
|
|
|
|
}
|
|
|
|
|
2020-10-16 23:49:38 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyIsIdempotent(Operation *op) {
|
|
|
|
// FIXME: Add back check for no side effects on operation.
|
|
|
|
// Currently adding it would cause the shared library build
|
|
|
|
// to fail since there would be a dependency of IR on SideEffectInterfaces
|
|
|
|
// which is cyclical.
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2020-10-14 04:58:19 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyIsInvolution(Operation *op) {
|
|
|
|
// FIXME: Add back check for no side effects on operation.
|
|
|
|
// Currently adding it would cause the shared library build
|
|
|
|
// to fail since there would be a dependency of IR on SideEffectInterfaces
|
|
|
|
// which is cyclical.
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2020-01-11 03:48:24 +08:00
|
|
|
LogicalResult
|
|
|
|
OpTrait::impl::verifyOperandsAreSignlessIntegerLike(Operation *op) {
|
2019-05-25 04:28:55 +08:00
|
|
|
for (auto opType : op->getOperandTypes()) {
|
|
|
|
auto type = getTensorOrVectorElementType(opType);
|
2020-01-11 03:48:24 +08:00
|
|
|
if (!type.isSignlessIntOrIndex())
|
2019-05-04 02:40:57 +08:00
|
|
|
return op->emitOpError() << "requires an integer or index type";
|
2018-11-07 07:37:39 +08:00
|
|
|
}
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-11-07 07:37:39 +08:00
|
|
|
}
|
|
|
|
|
2019-05-07 08:51:08 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyOperandsAreFloatLike(Operation *op) {
|
2019-05-25 04:28:55 +08:00
|
|
|
for (auto opType : op->getOperandTypes()) {
|
|
|
|
auto type = getTensorOrVectorElementType(opType);
|
2019-05-07 08:51:08 +08:00
|
|
|
if (!type.isa<FloatType>())
|
|
|
|
return op->emitOpError("requires a float type");
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
LogicalResult OpTrait::impl::verifySameTypeOperands(Operation *op) {
|
2018-11-07 07:37:39 +08:00
|
|
|
// Zero or one operand always have the "same" type.
|
|
|
|
unsigned nOperands = op->getNumOperands();
|
|
|
|
if (nOperands < 2)
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-11-07 07:37:39 +08:00
|
|
|
|
2020-01-12 00:54:04 +08:00
|
|
|
auto type = op->getOperand(0).getType();
|
2019-05-25 04:28:55 +08:00
|
|
|
for (auto opType : llvm::drop_begin(op->getOperandTypes(), 1))
|
|
|
|
if (opType != type)
|
2019-05-04 02:40:57 +08:00
|
|
|
return op->emitOpError() << "requires all operands to have the same type";
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-11-07 07:37:39 +08:00
|
|
|
}
|
|
|
|
|
2020-04-05 16:03:24 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyZeroRegion(Operation *op) {
|
|
|
|
if (op->getNumRegions() != 0)
|
|
|
|
return op->emitOpError() << "requires zero regions";
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
LogicalResult OpTrait::impl::verifyOneRegion(Operation *op) {
|
|
|
|
if (op->getNumRegions() != 1)
|
|
|
|
return op->emitOpError() << "requires one region";
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
LogicalResult OpTrait::impl::verifyNRegions(Operation *op,
|
|
|
|
unsigned numRegions) {
|
|
|
|
if (op->getNumRegions() != numRegions)
|
|
|
|
return op->emitOpError() << "expected " << numRegions << " regions";
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
LogicalResult OpTrait::impl::verifyAtLeastNRegions(Operation *op,
|
|
|
|
unsigned numRegions) {
|
|
|
|
if (op->getNumRegions() < numRegions)
|
|
|
|
return op->emitOpError() << "expected " << numRegions << " or more regions";
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyZeroResult(Operation *op) {
|
2018-09-27 12:18:42 +08:00
|
|
|
if (op->getNumResults() != 0)
|
2019-05-04 02:40:57 +08:00
|
|
|
return op->emitOpError() << "requires zero results";
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-09-27 12:18:42 +08:00
|
|
|
}
|
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyOneResult(Operation *op) {
|
2018-09-27 12:18:42 +08:00
|
|
|
if (op->getNumResults() != 1)
|
2019-05-04 02:40:57 +08:00
|
|
|
return op->emitOpError() << "requires one result";
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-09-27 12:18:42 +08:00
|
|
|
}
|
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyNResults(Operation *op,
|
|
|
|
unsigned numOperands) {
|
2018-09-27 12:18:42 +08:00
|
|
|
if (op->getNumResults() != numOperands)
|
2019-05-04 02:40:57 +08:00
|
|
|
return op->emitOpError() << "expected " << numOperands << " results";
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-09-27 12:18:42 +08:00
|
|
|
}
|
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyAtLeastNResults(Operation *op,
|
|
|
|
unsigned numOperands) {
|
2018-09-27 12:18:42 +08:00
|
|
|
if (op->getNumResults() < numOperands)
|
2019-05-04 02:40:57 +08:00
|
|
|
return op->emitOpError()
|
|
|
|
<< "expected " << numOperands << " or more results";
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-09-27 12:18:42 +08:00
|
|
|
}
|
|
|
|
|
2019-08-29 02:25:19 +08:00
|
|
|
LogicalResult OpTrait::impl::verifySameOperandsShape(Operation *op) {
|
2019-10-01 15:56:38 +08:00
|
|
|
if (failed(verifyAtLeastNOperands(op, 1)))
|
2019-08-29 02:25:19 +08:00
|
|
|
return failure();
|
|
|
|
|
2021-03-10 19:55:29 +08:00
|
|
|
if (failed(verifyCompatibleShapes(op->getOperandTypes())))
|
|
|
|
return op->emitOpError() << "requires the same shape for all operands";
|
|
|
|
|
2019-08-29 02:25:19 +08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
LogicalResult OpTrait::impl::verifySameOperandsAndResultShape(Operation *op) {
|
2019-10-01 15:56:38 +08:00
|
|
|
if (failed(verifyAtLeastNOperands(op, 1)) ||
|
|
|
|
failed(verifyAtLeastNResults(op, 1)))
|
2019-04-03 04:09:34 +08:00
|
|
|
return failure();
|
2018-11-29 03:49:26 +08:00
|
|
|
|
2021-03-10 19:55:29 +08:00
|
|
|
SmallVector<Type, 8> types(op->getOperandTypes());
|
|
|
|
types.append(llvm::to_vector<4>(op->getResultTypes()));
|
|
|
|
|
|
|
|
if (failed(verifyCompatibleShapes(types)))
|
|
|
|
return op->emitOpError()
|
|
|
|
<< "requires the same shape for all operands and results";
|
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-11-29 03:49:26 +08:00
|
|
|
}
|
|
|
|
|
2019-08-29 02:25:19 +08:00
|
|
|
LogicalResult OpTrait::impl::verifySameOperandsElementType(Operation *op) {
|
2019-10-01 15:56:38 +08:00
|
|
|
if (failed(verifyAtLeastNOperands(op, 1)))
|
2019-08-29 02:25:19 +08:00
|
|
|
return failure();
|
2019-10-06 01:05:40 +08:00
|
|
|
auto elementType = getElementTypeOrSelf(op->getOperand(0));
|
2019-08-29 02:25:19 +08:00
|
|
|
|
2019-10-06 01:05:40 +08:00
|
|
|
for (auto operand : llvm::drop_begin(op->getOperands(), 1)) {
|
|
|
|
if (getElementTypeOrSelf(operand) != elementType)
|
2019-08-29 02:25:19 +08:00
|
|
|
return op->emitOpError("requires the same element type for all operands");
|
|
|
|
}
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-05-05 02:14:40 +08:00
|
|
|
LogicalResult
|
|
|
|
OpTrait::impl::verifySameOperandsAndResultElementType(Operation *op) {
|
2019-10-01 15:56:38 +08:00
|
|
|
if (failed(verifyAtLeastNOperands(op, 1)) ||
|
|
|
|
failed(verifyAtLeastNResults(op, 1)))
|
2019-05-05 02:14:40 +08:00
|
|
|
return failure();
|
|
|
|
|
2019-10-06 01:05:40 +08:00
|
|
|
auto elementType = getElementTypeOrSelf(op->getResult(0));
|
2019-05-05 02:14:40 +08:00
|
|
|
|
|
|
|
// Verify result element type matches first result's element type.
|
2019-12-11 05:20:50 +08:00
|
|
|
for (auto result : llvm::drop_begin(op->getResults(), 1)) {
|
2019-10-06 01:05:40 +08:00
|
|
|
if (getElementTypeOrSelf(result) != elementType)
|
2019-05-05 02:14:40 +08:00
|
|
|
return op->emitOpError(
|
|
|
|
"requires the same element type for all operands and results");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify operand's element type matches first result's element type.
|
|
|
|
for (auto operand : op->getOperands()) {
|
2019-10-06 01:05:40 +08:00
|
|
|
if (getElementTypeOrSelf(operand) != elementType)
|
2019-05-05 02:14:40 +08:00
|
|
|
return op->emitOpError(
|
|
|
|
"requires the same element type for all operands and results");
|
|
|
|
}
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
LogicalResult OpTrait::impl::verifySameOperandsAndResultType(Operation *op) {
|
2019-10-01 15:56:38 +08:00
|
|
|
if (failed(verifyAtLeastNOperands(op, 1)) ||
|
|
|
|
failed(verifyAtLeastNResults(op, 1)))
|
2019-04-03 04:09:34 +08:00
|
|
|
return failure();
|
2018-11-29 03:49:26 +08:00
|
|
|
|
2020-01-12 00:54:04 +08:00
|
|
|
auto type = op->getResult(0).getType();
|
2019-10-09 10:36:37 +08:00
|
|
|
auto elementType = getElementTypeOrSelf(type);
|
[mlir][IR] Refactor the internal implementation of Value
The current implementation of Value involves a pointer int pair with several different kinds of owners, i.e. BlockArgumentImpl*, Operation *, TrailingOpResult*. This design arose from the desire to save memory overhead for operations that have a very small number of results (generally 0-2). There are, unfortunately, many problematic aspects of the current implementation that make Values difficult to work with or just inefficient.
Operation result types are stored as a separate array on the Operation. This is very inefficient for many reasons: we use TupleType for multiple results, which can lead to huge amounts of memory usage if multi-result operations change types frequently(they do). It also means that simple methods like Value::getType/Value::setType now require complex logic to get to the desired type.
Value only has one pointer bit free, severely limiting the ability to use it in things like PointerUnion/PointerIntPair. Given that we store the kind of a Value along with the "owner" pointer, we only leave one bit free for users of Value. This creates situations where we end up nesting PointerUnions to be able to use Value in one.
As noted above, most of the methods in Value need to branch on at least 3 different cases which is both inefficient, possibly error prone, and verbose. The current storage of results also creates problems for utilities like ValueRange/TypeRange, which want to efficiently store base pointers to ranges (of which Operation* isn't really useful as one).
This revision greatly simplifies the implementation of Value by the introduction of a new ValueImpl class. This class contains all of the state shared between all of the various derived value classes; i.e. the use list, the type, and the kind. This shared implementation class provides several large benefits:
* Most of the methods on value are now branchless, and often one-liners.
* The "kind" of the value is now stored in ValueImpl instead of Value
This frees up all of Value's pointer bits, allowing for users to take full advantage of PointerUnion/PointerIntPair/etc. It also allows for storing more operation results as "inline", 6 now instead of 2, freeing up 1 word per new inline result.
* Operation result types are now stored in the result, instead of a side array
This drops the size of zero-result operations by 1 word. It also removes the memory crushing use of TupleType for operations results (which could lead up to hundreds of megabytes of "dead" TupleTypes in the context). This also allowed restructured ValueRange, making it simpler and one word smaller.
This revision does come with two conceptual downsides:
* Operation::getResultTypes no longer returns an ArrayRef<Type>
This conceptually makes some usages slower, as the iterator increment is slightly more complex.
* OpResult::getOwner is slightly more expensive, as it now requires a little bit of arithmetic
From profiling, neither of the conceptual downsides have resulted in any perceivable hit to performance. Given the advantages of the new design, most compiles are slightly faster.
Differential Revision: https://reviews.llvm.org/D97804
2021-03-04 06:23:14 +08:00
|
|
|
for (auto resultType : llvm::drop_begin(op->getResultTypes())) {
|
2019-10-09 10:36:37 +08:00
|
|
|
if (getElementTypeOrSelf(resultType) != elementType ||
|
|
|
|
failed(verifyCompatibleShape(resultType, type)))
|
2019-05-04 02:40:57 +08:00
|
|
|
return op->emitOpError()
|
|
|
|
<< "requires the same type for all operands and results";
|
2018-09-27 01:07:16 +08:00
|
|
|
}
|
2019-05-25 04:28:55 +08:00
|
|
|
for (auto opType : op->getOperandTypes()) {
|
2019-10-09 10:36:37 +08:00
|
|
|
if (getElementTypeOrSelf(opType) != elementType ||
|
|
|
|
failed(verifyCompatibleShape(opType, type)))
|
2019-05-04 02:40:57 +08:00
|
|
|
return op->emitOpError()
|
|
|
|
<< "requires the same type for all operands and results";
|
2018-09-27 01:07:16 +08:00
|
|
|
}
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-09-27 01:07:16 +08:00
|
|
|
}
|
|
|
|
|
2020-03-06 04:39:46 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyIsTerminator(Operation *op) {
|
|
|
|
Block *block = op->getBlock();
|
|
|
|
// Verify that the operation is at the end of the respective parent block.
|
|
|
|
if (!block || &block->back() != op)
|
|
|
|
return op->emitOpError("must be the last operation in the parent block");
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
static LogicalResult verifyTerminatorSuccessors(Operation *op) {
|
2019-08-10 11:07:25 +08:00
|
|
|
auto *parent = op->getParentRegion();
|
2019-07-02 01:29:09 +08:00
|
|
|
|
2018-11-16 01:56:06 +08:00
|
|
|
// Verify that the operands lines up with the BB arguments in the successor.
|
2020-03-06 04:40:23 +08:00
|
|
|
for (Block *succ : op->getSuccessors())
|
2019-07-02 01:29:09 +08:00
|
|
|
if (succ->getParent() != parent)
|
|
|
|
return op->emitError("reference to block defined in another region");
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-11-16 01:56:06 +08:00
|
|
|
}
|
|
|
|
|
2020-03-06 04:39:46 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyZeroSuccessor(Operation *op) {
|
|
|
|
if (op->getNumSuccessors() != 0) {
|
|
|
|
return op->emitOpError("requires 0 successors but found ")
|
|
|
|
<< op->getNumSuccessors();
|
|
|
|
}
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-11-14 01:49:27 +08:00
|
|
|
}
|
|
|
|
|
2020-03-06 04:39:46 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyOneSuccessor(Operation *op) {
|
|
|
|
if (op->getNumSuccessors() != 1) {
|
|
|
|
return op->emitOpError("requires 1 successor but found ")
|
|
|
|
<< op->getNumSuccessors();
|
|
|
|
}
|
|
|
|
return verifyTerminatorSuccessors(op);
|
|
|
|
}
|
|
|
|
LogicalResult OpTrait::impl::verifyNSuccessors(Operation *op,
|
|
|
|
unsigned numSuccessors) {
|
|
|
|
if (op->getNumSuccessors() != numSuccessors) {
|
|
|
|
return op->emitOpError("requires ")
|
|
|
|
<< numSuccessors << " successors but found "
|
|
|
|
<< op->getNumSuccessors();
|
|
|
|
}
|
|
|
|
return verifyTerminatorSuccessors(op);
|
|
|
|
}
|
|
|
|
LogicalResult OpTrait::impl::verifyAtLeastNSuccessors(Operation *op,
|
|
|
|
unsigned numSuccessors) {
|
|
|
|
if (op->getNumSuccessors() < numSuccessors) {
|
|
|
|
return op->emitOpError("requires at least ")
|
|
|
|
<< numSuccessors << " successors but found "
|
|
|
|
<< op->getNumSuccessors();
|
|
|
|
}
|
|
|
|
return verifyTerminatorSuccessors(op);
|
|
|
|
}
|
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyResultsAreBoolLike(Operation *op) {
|
2019-05-25 04:28:55 +08:00
|
|
|
for (auto resultType : op->getResultTypes()) {
|
|
|
|
auto elementType = getTensorOrVectorElementType(resultType);
|
2019-01-04 06:29:52 +08:00
|
|
|
bool isBoolType = elementType.isInteger(1);
|
2018-11-29 03:49:26 +08:00
|
|
|
if (!isBoolType)
|
2019-05-04 02:40:57 +08:00
|
|
|
return op->emitOpError() << "requires a bool result type";
|
2018-11-29 03:49:26 +08:00
|
|
|
}
|
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-11-29 03:49:26 +08:00
|
|
|
}
|
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyResultsAreFloatLike(Operation *op) {
|
2019-05-25 04:28:55 +08:00
|
|
|
for (auto resultType : op->getResultTypes())
|
|
|
|
if (!getTensorOrVectorElementType(resultType).isa<FloatType>())
|
2019-05-04 02:40:57 +08:00
|
|
|
return op->emitOpError() << "requires a floating point type";
|
2018-09-27 01:07:16 +08:00
|
|
|
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-09-27 01:07:16 +08:00
|
|
|
}
|
|
|
|
|
2020-01-11 03:48:24 +08:00
|
|
|
LogicalResult
|
|
|
|
OpTrait::impl::verifyResultsAreSignlessIntegerLike(Operation *op) {
|
2019-05-25 04:28:55 +08:00
|
|
|
for (auto resultType : op->getResultTypes())
|
2020-01-11 03:48:24 +08:00
|
|
|
if (!getTensorOrVectorElementType(resultType).isSignlessIntOrIndex())
|
2019-05-04 02:40:57 +08:00
|
|
|
return op->emitOpError() << "requires an integer or index type";
|
2019-04-03 04:09:34 +08:00
|
|
|
return success();
|
2018-09-27 01:07:16 +08:00
|
|
|
}
|
|
|
|
|
[mlir] Add support for VariadicOfVariadic operands
This revision adds native ODS support for VariadicOfVariadic operand
groups. An example of this is the SwitchOp, which has a variadic number
of nested operand ranges for each of the case statements, where the
number of case statements is variadic. Builtin ODS support allows for
generating proper accessors for the nested operand ranges, builder
support, and declarative format support. VariadicOfVariadic operands
are supported by providing a segment attribute to use to store the
operand groups, mapping similarly to the AttrSizedOperand trait
(but with a user defined attribute name).
`build` methods for VariadicOfVariadic operand expect inputs of the
form `ArrayRef<ValueRange>`. Accessors for the variadic ranges
return a new `OperandRangeRange` type, which represents a
contiguous range of `OperandRange`. In the declarative assembly
format, VariadicOfVariadic operands and types are by default
formatted as a comma delimited list of value lists:
`(<value>, <value>), (), (<value>)`.
Differential Revision: https://reviews.llvm.org/D107774
2021-08-24 04:23:09 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyValueSizeAttr(Operation *op,
|
|
|
|
StringRef attrName,
|
|
|
|
StringRef valueGroupName,
|
|
|
|
size_t expectedCount) {
|
2019-11-26 09:26:16 +08:00
|
|
|
auto sizeAttr = op->getAttrOfType<DenseIntElementsAttr>(attrName);
|
|
|
|
if (!sizeAttr)
|
[mlir] Add support for VariadicOfVariadic operands
This revision adds native ODS support for VariadicOfVariadic operand
groups. An example of this is the SwitchOp, which has a variadic number
of nested operand ranges for each of the case statements, where the
number of case statements is variadic. Builtin ODS support allows for
generating proper accessors for the nested operand ranges, builder
support, and declarative format support. VariadicOfVariadic operands
are supported by providing a segment attribute to use to store the
operand groups, mapping similarly to the AttrSizedOperand trait
(but with a user defined attribute name).
`build` methods for VariadicOfVariadic operand expect inputs of the
form `ArrayRef<ValueRange>`. Accessors for the variadic ranges
return a new `OperandRangeRange` type, which represents a
contiguous range of `OperandRange`. In the declarative assembly
format, VariadicOfVariadic operands and types are by default
formatted as a comma delimited list of value lists:
`(<value>, <value>), (), (<value>)`.
Differential Revision: https://reviews.llvm.org/D107774
2021-08-24 04:23:09 +08:00
|
|
|
return op->emitOpError("requires 1D i32 elements attribute '")
|
|
|
|
<< attrName << "'";
|
2019-11-26 09:26:16 +08:00
|
|
|
|
[mlir] Add support for VariadicOfVariadic operands
This revision adds native ODS support for VariadicOfVariadic operand
groups. An example of this is the SwitchOp, which has a variadic number
of nested operand ranges for each of the case statements, where the
number of case statements is variadic. Builtin ODS support allows for
generating proper accessors for the nested operand ranges, builder
support, and declarative format support. VariadicOfVariadic operands
are supported by providing a segment attribute to use to store the
operand groups, mapping similarly to the AttrSizedOperand trait
(but with a user defined attribute name).
`build` methods for VariadicOfVariadic operand expect inputs of the
form `ArrayRef<ValueRange>`. Accessors for the variadic ranges
return a new `OperandRangeRange` type, which represents a
contiguous range of `OperandRange`. In the declarative assembly
format, VariadicOfVariadic operands and types are by default
formatted as a comma delimited list of value lists:
`(<value>, <value>), (), (<value>)`.
Differential Revision: https://reviews.llvm.org/D107774
2021-08-24 04:23:09 +08:00
|
|
|
auto sizeAttrType = sizeAttr.getType();
|
|
|
|
if (sizeAttrType.getRank() != 1 ||
|
2021-03-23 21:04:52 +08:00
|
|
|
!sizeAttrType.getElementType().isInteger(32))
|
[mlir] Add support for VariadicOfVariadic operands
This revision adds native ODS support for VariadicOfVariadic operand
groups. An example of this is the SwitchOp, which has a variadic number
of nested operand ranges for each of the case statements, where the
number of case statements is variadic. Builtin ODS support allows for
generating proper accessors for the nested operand ranges, builder
support, and declarative format support. VariadicOfVariadic operands
are supported by providing a segment attribute to use to store the
operand groups, mapping similarly to the AttrSizedOperand trait
(but with a user defined attribute name).
`build` methods for VariadicOfVariadic operand expect inputs of the
form `ArrayRef<ValueRange>`. Accessors for the variadic ranges
return a new `OperandRangeRange` type, which represents a
contiguous range of `OperandRange`. In the declarative assembly
format, VariadicOfVariadic operands and types are by default
formatted as a comma delimited list of value lists:
`(<value>, <value>), (), (<value>)`.
Differential Revision: https://reviews.llvm.org/D107774
2021-08-24 04:23:09 +08:00
|
|
|
return op->emitOpError("requires 1D i32 elements attribute '")
|
2021-03-23 21:04:52 +08:00
|
|
|
<< attrName << "'";
|
2019-11-26 09:26:16 +08:00
|
|
|
|
2021-09-21 09:40:22 +08:00
|
|
|
if (llvm::any_of(sizeAttr.getValues<APInt>(), [](const APInt &element) {
|
2019-11-26 09:26:16 +08:00
|
|
|
return !element.isNonNegative();
|
|
|
|
}))
|
|
|
|
return op->emitOpError("'")
|
|
|
|
<< attrName << "' attribute cannot have negative elements";
|
|
|
|
|
|
|
|
size_t totalCount = std::accumulate(
|
|
|
|
sizeAttr.begin(), sizeAttr.end(), 0,
|
|
|
|
[](unsigned all, APInt one) { return all + one.getZExtValue(); });
|
|
|
|
|
[mlir] Add support for VariadicOfVariadic operands
This revision adds native ODS support for VariadicOfVariadic operand
groups. An example of this is the SwitchOp, which has a variadic number
of nested operand ranges for each of the case statements, where the
number of case statements is variadic. Builtin ODS support allows for
generating proper accessors for the nested operand ranges, builder
support, and declarative format support. VariadicOfVariadic operands
are supported by providing a segment attribute to use to store the
operand groups, mapping similarly to the AttrSizedOperand trait
(but with a user defined attribute name).
`build` methods for VariadicOfVariadic operand expect inputs of the
form `ArrayRef<ValueRange>`. Accessors for the variadic ranges
return a new `OperandRangeRange` type, which represents a
contiguous range of `OperandRange`. In the declarative assembly
format, VariadicOfVariadic operands and types are by default
formatted as a comma delimited list of value lists:
`(<value>, <value>), (), (<value>)`.
Differential Revision: https://reviews.llvm.org/D107774
2021-08-24 04:23:09 +08:00
|
|
|
if (totalCount != expectedCount)
|
|
|
|
return op->emitOpError()
|
|
|
|
<< valueGroupName << " count (" << expectedCount
|
|
|
|
<< ") does not match with the total size (" << totalCount
|
|
|
|
<< ") specified in attribute '" << attrName << "'";
|
2019-11-26 09:26:16 +08:00
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
LogicalResult OpTrait::impl::verifyOperandSizeAttr(Operation *op,
|
|
|
|
StringRef attrName) {
|
[mlir] Add support for VariadicOfVariadic operands
This revision adds native ODS support for VariadicOfVariadic operand
groups. An example of this is the SwitchOp, which has a variadic number
of nested operand ranges for each of the case statements, where the
number of case statements is variadic. Builtin ODS support allows for
generating proper accessors for the nested operand ranges, builder
support, and declarative format support. VariadicOfVariadic operands
are supported by providing a segment attribute to use to store the
operand groups, mapping similarly to the AttrSizedOperand trait
(but with a user defined attribute name).
`build` methods for VariadicOfVariadic operand expect inputs of the
form `ArrayRef<ValueRange>`. Accessors for the variadic ranges
return a new `OperandRangeRange` type, which represents a
contiguous range of `OperandRange`. In the declarative assembly
format, VariadicOfVariadic operands and types are by default
formatted as a comma delimited list of value lists:
`(<value>, <value>), (), (<value>)`.
Differential Revision: https://reviews.llvm.org/D107774
2021-08-24 04:23:09 +08:00
|
|
|
return verifyValueSizeAttr(op, attrName, "operand", op->getNumOperands());
|
2019-11-26 09:26:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
LogicalResult OpTrait::impl::verifyResultSizeAttr(Operation *op,
|
|
|
|
StringRef attrName) {
|
[mlir] Add support for VariadicOfVariadic operands
This revision adds native ODS support for VariadicOfVariadic operand
groups. An example of this is the SwitchOp, which has a variadic number
of nested operand ranges for each of the case statements, where the
number of case statements is variadic. Builtin ODS support allows for
generating proper accessors for the nested operand ranges, builder
support, and declarative format support. VariadicOfVariadic operands
are supported by providing a segment attribute to use to store the
operand groups, mapping similarly to the AttrSizedOperand trait
(but with a user defined attribute name).
`build` methods for VariadicOfVariadic operand expect inputs of the
form `ArrayRef<ValueRange>`. Accessors for the variadic ranges
return a new `OperandRangeRange` type, which represents a
contiguous range of `OperandRange`. In the declarative assembly
format, VariadicOfVariadic operands and types are by default
formatted as a comma delimited list of value lists:
`(<value>, <value>), (), (<value>)`.
Differential Revision: https://reviews.llvm.org/D107774
2021-08-24 04:23:09 +08:00
|
|
|
return verifyValueSizeAttr(op, attrName, "result", op->getNumResults());
|
2019-11-26 09:26:16 +08:00
|
|
|
}
|
|
|
|
|
2020-07-02 08:54:12 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyNoRegionArguments(Operation *op) {
|
|
|
|
for (Region ®ion : op->getRegions()) {
|
|
|
|
if (region.empty())
|
|
|
|
continue;
|
|
|
|
|
2020-07-11 08:07:29 +08:00
|
|
|
if (region.getNumArguments() != 0) {
|
2020-07-02 08:54:12 +08:00
|
|
|
if (op->getNumRegions() > 1)
|
|
|
|
return op->emitOpError("region #")
|
|
|
|
<< region.getRegionNumber() << " should have no arguments";
|
|
|
|
else
|
|
|
|
return op->emitOpError("region should have no arguments");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2021-03-02 22:29:08 +08:00
|
|
|
LogicalResult OpTrait::impl::verifyElementwise(Operation *op) {
|
2020-11-04 10:17:55 +08:00
|
|
|
auto isMappableType = [](Type type) {
|
|
|
|
return type.isa<VectorType, TensorType>();
|
|
|
|
};
|
|
|
|
auto resultMappableTypes = llvm::to_vector<1>(
|
|
|
|
llvm::make_filter_range(op->getResultTypes(), isMappableType));
|
|
|
|
auto operandMappableTypes = llvm::to_vector<2>(
|
|
|
|
llvm::make_filter_range(op->getOperandTypes(), isMappableType));
|
|
|
|
|
|
|
|
// If the op only has scalar operand/result types, then we have nothing to
|
|
|
|
// check.
|
|
|
|
if (resultMappableTypes.empty() && operandMappableTypes.empty())
|
|
|
|
return success();
|
|
|
|
|
|
|
|
if (!resultMappableTypes.empty() && operandMappableTypes.empty())
|
|
|
|
return op->emitOpError("if a result is non-scalar, then at least one "
|
|
|
|
"operand must be non-scalar");
|
|
|
|
|
|
|
|
assert(!operandMappableTypes.empty());
|
|
|
|
|
|
|
|
if (resultMappableTypes.empty())
|
|
|
|
return op->emitOpError("if an operand is non-scalar, then there must be at "
|
|
|
|
"least one non-scalar result");
|
|
|
|
|
|
|
|
if (resultMappableTypes.size() != op->getNumResults())
|
|
|
|
return op->emitOpError(
|
|
|
|
"if an operand is non-scalar, then all results must be non-scalar");
|
|
|
|
|
2021-03-15 16:47:00 +08:00
|
|
|
SmallVector<Type, 4> types = llvm::to_vector<2>(
|
|
|
|
llvm::concat<Type>(operandMappableTypes, resultMappableTypes));
|
|
|
|
TypeID expectedBaseTy = types.front().getTypeID();
|
|
|
|
if (!llvm::all_of(types,
|
|
|
|
[&](Type t) { return t.getTypeID() == expectedBaseTy; }) ||
|
|
|
|
failed(verifyCompatibleShapes(types))) {
|
|
|
|
return op->emitOpError() << "all non-scalar operands/results must have the "
|
|
|
|
"same shape and base type";
|
2020-11-04 10:17:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2021-05-29 06:12:44 +08:00
|
|
|
/// Check for any values used by operations regions attached to the
|
|
|
|
/// specified "IsIsolatedFromAbove" operation defined outside of it.
|
|
|
|
LogicalResult OpTrait::impl::verifyIsIsolatedFromAbove(Operation *isolatedOp) {
|
|
|
|
assert(isolatedOp->hasTrait<OpTrait::IsIsolatedFromAbove>() &&
|
|
|
|
"Intended to check IsolatedFromAbove ops");
|
|
|
|
|
|
|
|
// List of regions to analyze. Each region is processed independently, with
|
|
|
|
// respect to the common `limit` region, so we can look at them in any order.
|
|
|
|
// Therefore, use a simple vector and push/pop back the current region.
|
|
|
|
SmallVector<Region *, 8> pendingRegions;
|
|
|
|
for (auto ®ion : isolatedOp->getRegions()) {
|
|
|
|
pendingRegions.push_back(®ion);
|
|
|
|
|
|
|
|
// Traverse all operations in the region.
|
|
|
|
while (!pendingRegions.empty()) {
|
|
|
|
for (Operation &op : pendingRegions.pop_back_val()->getOps()) {
|
|
|
|
for (Value operand : op.getOperands()) {
|
|
|
|
// operand should be non-null here if the IR is well-formed. But
|
|
|
|
// we don't assert here as this function is called from the verifier
|
|
|
|
// and so could be called on invalid IR.
|
|
|
|
if (!operand)
|
|
|
|
return op.emitOpError("operation's operand is null");
|
|
|
|
|
|
|
|
// Check that any value that is used by an operation is defined in the
|
|
|
|
// same region as either an operation result.
|
|
|
|
auto *operandRegion = operand.getParentRegion();
|
|
|
|
if (!region.isAncestor(operandRegion)) {
|
|
|
|
return op.emitOpError("using value defined outside the region")
|
|
|
|
.attachNote(isolatedOp->getLoc())
|
|
|
|
<< "required by region isolation constraints";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Schedule any regions in the operation for further checking. Don't
|
|
|
|
// recurse into other IsolatedFromAbove ops, because they will check
|
|
|
|
// themselves.
|
|
|
|
if (op.getNumRegions() &&
|
|
|
|
!op.hasTrait<OpTrait::IsIsolatedFromAbove>()) {
|
|
|
|
for (Region &subRegion : op.getRegions())
|
|
|
|
pendingRegions.push_back(&subRegion);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2021-03-02 22:29:08 +08:00
|
|
|
bool OpTrait::hasElementwiseMappableTraits(Operation *op) {
|
|
|
|
return op->hasTrait<Elementwise>() && op->hasTrait<Scalarizable>() &&
|
|
|
|
op->hasTrait<Vectorizable>() && op->hasTrait<Tensorizable>();
|
|
|
|
}
|
|
|
|
|
2018-09-27 01:07:16 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// BinaryOp implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// These functions are out-of-line implementations of the methods in BinaryOp,
|
|
|
|
// which avoids them being template instantiated/duplicated.
|
|
|
|
|
2020-04-23 22:02:46 +08:00
|
|
|
void impl::buildBinaryOp(OpBuilder &builder, OperationState &result, Value lhs,
|
2019-12-24 06:45:01 +08:00
|
|
|
Value rhs) {
|
2020-01-12 00:54:04 +08:00
|
|
|
assert(lhs.getType() == rhs.getType());
|
2019-09-21 10:47:05 +08:00
|
|
|
result.addOperands({lhs, rhs});
|
2020-01-12 00:54:04 +08:00
|
|
|
result.types.push_back(lhs.getType());
|
2018-09-27 01:07:16 +08:00
|
|
|
}
|
|
|
|
|
2019-10-04 03:59:42 +08:00
|
|
|
ParseResult impl::parseOneResultSameOperandTypeOp(OpAsmParser &parser,
|
|
|
|
OperationState &result) {
|
2018-09-27 01:07:16 +08:00
|
|
|
SmallVector<OpAsmParser::OperandType, 2> ops;
|
2018-10-31 05:59:22 +08:00
|
|
|
Type type;
|
2021-08-28 11:03:15 +08:00
|
|
|
// If the operand list is in-between parentheses, then we have a generic form.
|
|
|
|
// (see the fallback in `printOneResultOp`).
|
|
|
|
llvm::SMLoc loc = parser.getCurrentLocation();
|
|
|
|
if (!parser.parseOptionalLParen()) {
|
|
|
|
if (parser.parseOperandList(ops) || parser.parseRParen() ||
|
|
|
|
parser.parseOptionalAttrDict(result.attributes) ||
|
|
|
|
parser.parseColon() || parser.parseType(type))
|
|
|
|
return failure();
|
|
|
|
auto fnType = type.dyn_cast<FunctionType>();
|
|
|
|
if (!fnType) {
|
|
|
|
parser.emitError(loc, "expected function type");
|
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
if (parser.resolveOperands(ops, fnType.getInputs(), loc, result.operands))
|
|
|
|
return failure();
|
|
|
|
result.addTypes(fnType.getResults());
|
|
|
|
return success();
|
|
|
|
}
|
2019-10-04 03:59:42 +08:00
|
|
|
return failure(parser.parseOperandList(ops) ||
|
2019-11-06 05:32:07 +08:00
|
|
|
parser.parseOptionalAttrDict(result.attributes) ||
|
2019-09-21 02:36:49 +08:00
|
|
|
parser.parseColonType(type) ||
|
2019-09-21 10:47:05 +08:00
|
|
|
parser.resolveOperands(ops, type, result.operands) ||
|
|
|
|
parser.addTypeToList(type, result.types));
|
2018-09-27 01:07:16 +08:00
|
|
|
}
|
|
|
|
|
2019-10-04 03:59:42 +08:00
|
|
|
void impl::printOneResultOp(Operation *op, OpAsmPrinter &p) {
|
|
|
|
assert(op->getNumResults() == 1 && "op should have one result");
|
2019-01-17 04:49:11 +08:00
|
|
|
|
|
|
|
// If not all the operand and result types are the same, just use the
|
2019-01-24 03:26:56 +08:00
|
|
|
// generic assembly form to avoid omitting information in printing.
|
2020-01-12 00:54:04 +08:00
|
|
|
auto resultType = op->getResult(0).getType();
|
2019-10-04 03:59:42 +08:00
|
|
|
if (llvm::any_of(op->getOperandTypes(),
|
|
|
|
[&](Type type) { return type != resultType; })) {
|
2021-08-28 11:03:15 +08:00
|
|
|
p.printGenericOp(op, /*printOpName=*/false);
|
2019-01-17 04:49:11 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-08-28 11:03:15 +08:00
|
|
|
p << ' ';
|
2019-10-04 03:59:42 +08:00
|
|
|
p.printOperands(op->getOperands());
|
2019-09-21 11:43:02 +08:00
|
|
|
p.printOptionalAttrDict(op->getAttrs());
|
2019-01-17 04:49:11 +08:00
|
|
|
// Now we can output only one type for all operands and the result.
|
2019-10-04 03:59:42 +08:00
|
|
|
p << " : " << resultType;
|
2018-09-27 01:07:16 +08:00
|
|
|
}
|
2018-10-23 00:00:03 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// CastOp implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2021-01-21 08:17:13 +08:00
|
|
|
/// Attempt to fold the given cast operation.
|
|
|
|
LogicalResult
|
|
|
|
impl::foldCastInterfaceOp(Operation *op, ArrayRef<Attribute> attrOperands,
|
|
|
|
SmallVectorImpl<OpFoldResult> &foldResults) {
|
|
|
|
OperandRange operands = op->getOperands();
|
|
|
|
if (operands.empty())
|
|
|
|
return failure();
|
|
|
|
ResultRange results = op->getResults();
|
|
|
|
|
|
|
|
// Check for the case where the input and output types match 1-1.
|
|
|
|
if (operands.getTypes() == results.getTypes()) {
|
|
|
|
foldResults.append(operands.begin(), operands.end());
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Attempt to verify the given cast operation.
|
|
|
|
LogicalResult impl::verifyCastInterfaceOp(
|
|
|
|
Operation *op, function_ref<bool(TypeRange, TypeRange)> areCastCompatible) {
|
|
|
|
auto resultTypes = op->getResultTypes();
|
|
|
|
if (llvm::empty(resultTypes))
|
|
|
|
return op->emitOpError()
|
|
|
|
<< "expected at least one result for cast operation";
|
|
|
|
|
|
|
|
auto operandTypes = op->getOperandTypes();
|
|
|
|
if (!areCastCompatible(operandTypes, resultTypes)) {
|
|
|
|
InFlightDiagnostic diag = op->emitOpError("operand type");
|
|
|
|
if (llvm::empty(operandTypes))
|
|
|
|
diag << "s []";
|
|
|
|
else if (llvm::size(operandTypes) == 1)
|
|
|
|
diag << " " << *operandTypes.begin();
|
|
|
|
else
|
|
|
|
diag << "s " << operandTypes;
|
|
|
|
return diag << " and result type" << (resultTypes.size() == 1 ? " " : "s ")
|
|
|
|
<< resultTypes << " are cast incompatible";
|
|
|
|
}
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2020-04-23 22:02:46 +08:00
|
|
|
void impl::buildCastOp(OpBuilder &builder, OperationState &result, Value source,
|
2019-12-24 06:45:01 +08:00
|
|
|
Type destType) {
|
2019-09-21 10:47:05 +08:00
|
|
|
result.addOperands(source);
|
|
|
|
result.addTypes(destType);
|
2018-10-23 00:00:03 +08:00
|
|
|
}
|
|
|
|
|
2019-09-21 10:47:05 +08:00
|
|
|
ParseResult impl::parseCastOp(OpAsmParser &parser, OperationState &result) {
|
2018-10-23 00:00:03 +08:00
|
|
|
OpAsmParser::OperandType srcInfo;
|
2018-10-31 05:59:22 +08:00
|
|
|
Type srcType, dstType;
|
2019-09-21 02:36:49 +08:00
|
|
|
return failure(parser.parseOperand(srcInfo) ||
|
2019-11-06 05:32:07 +08:00
|
|
|
parser.parseOptionalAttrDict(result.attributes) ||
|
2019-09-21 02:36:49 +08:00
|
|
|
parser.parseColonType(srcType) ||
|
2019-09-21 10:47:05 +08:00
|
|
|
parser.resolveOperand(srcInfo, srcType, result.operands) ||
|
2019-09-21 02:36:49 +08:00
|
|
|
parser.parseKeywordType("to", dstType) ||
|
2019-09-21 10:47:05 +08:00
|
|
|
parser.addTypeToList(dstType, result.types));
|
2018-10-23 00:00:03 +08:00
|
|
|
}
|
|
|
|
|
2019-09-21 11:43:02 +08:00
|
|
|
void impl::printCastOp(Operation *op, OpAsmPrinter &p) {
|
2021-08-28 11:03:15 +08:00
|
|
|
p << ' ' << op->getOperand(0);
|
2019-09-21 11:43:02 +08:00
|
|
|
p.printOptionalAttrDict(op->getAttrs());
|
2020-01-12 00:54:04 +08:00
|
|
|
p << " : " << op->getOperand(0).getType() << " to "
|
|
|
|
<< op->getResult(0).getType();
|
2018-10-23 00:00:03 +08:00
|
|
|
}
|
2019-04-28 11:55:38 +08:00
|
|
|
|
2019-12-24 06:45:01 +08:00
|
|
|
Value impl::foldCastOp(Operation *op) {
|
2019-04-28 11:55:38 +08:00
|
|
|
// Identity cast
|
2020-01-12 00:54:04 +08:00
|
|
|
if (op->getOperand(0).getType() == op->getResult(0).getType())
|
2019-04-28 11:55:38 +08:00
|
|
|
return op->getOperand(0);
|
|
|
|
return nullptr;
|
|
|
|
}
|
2019-06-22 11:20:27 +08:00
|
|
|
|
2020-12-16 08:47:19 +08:00
|
|
|
LogicalResult
|
|
|
|
impl::verifyCastOp(Operation *op,
|
|
|
|
function_ref<bool(Type, Type)> areCastCompatible) {
|
|
|
|
auto opType = op->getOperand(0).getType();
|
|
|
|
auto resType = op->getResult(0).getType();
|
|
|
|
if (!areCastCompatible(opType, resType))
|
|
|
|
return op->emitError("operand type ")
|
|
|
|
<< opType << " and result type " << resType
|
|
|
|
<< " are cast incompatible";
|
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2019-06-22 11:20:27 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2019-12-11 05:20:50 +08:00
|
|
|
// Misc. utils
|
2019-06-22 11:20:27 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// Insert an operation, generated by `buildTerminatorOp`, at the end of the
|
|
|
|
/// region's only block if it does not have a terminator already. If the region
|
|
|
|
/// is empty, insert a new block first. `buildTerminatorOp` should return the
|
|
|
|
/// terminator operation to insert.
|
|
|
|
void impl::ensureRegionTerminator(
|
2020-05-20 22:00:57 +08:00
|
|
|
Region ®ion, OpBuilder &builder, Location loc,
|
|
|
|
function_ref<Operation *(OpBuilder &, Location)> buildTerminatorOp) {
|
|
|
|
OpBuilder::InsertionGuard guard(builder);
|
2019-06-22 11:20:27 +08:00
|
|
|
if (region.empty())
|
2020-05-20 22:00:57 +08:00
|
|
|
builder.createBlock(®ion);
|
2019-06-22 11:20:27 +08:00
|
|
|
|
|
|
|
Block &block = region.back();
|
2021-02-10 03:41:10 +08:00
|
|
|
if (!block.empty() && block.back().hasTrait<OpTrait::IsTerminator>())
|
2019-06-22 11:20:27 +08:00
|
|
|
return;
|
|
|
|
|
2020-05-20 22:00:57 +08:00
|
|
|
builder.setInsertionPointToEnd(&block);
|
|
|
|
builder.insert(buildTerminatorOp(builder, loc));
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Create a simple OpBuilder and forward to the OpBuilder version of this
|
|
|
|
/// function.
|
|
|
|
void impl::ensureRegionTerminator(
|
|
|
|
Region ®ion, Builder &builder, Location loc,
|
|
|
|
function_ref<Operation *(OpBuilder &, Location)> buildTerminatorOp) {
|
|
|
|
OpBuilder opBuilder(builder.getContext());
|
|
|
|
ensureRegionTerminator(region, opBuilder, loc, buildTerminatorOp);
|
2019-06-22 11:20:27 +08:00
|
|
|
}
|