2021-02-21 14:46:27 +08:00
|
|
|
//===- mlir-linalg-ods-yaml-gen.cpp - Linalg ODS generation from yaml ----===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements an ODS (and C++) generator from a YAML form
|
|
|
|
// derived from the mathematical expression of linalg named ops. Typically a
|
|
|
|
// math oriented DSL will be used to export the essential representation to
|
|
|
|
// this form, and maintaining the SOT at the math level (versus recreating it
|
|
|
|
// in MLIR) is deemed to have systemic value.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "mlir/IR/AffineMap.h"
|
|
|
|
#include "mlir/IR/MLIRContext.h"
|
|
|
|
#include "mlir/Parser.h"
|
|
|
|
#include "mlir/Support/FileUtilities.h"
|
|
|
|
#include "mlir/Support/LLVM.h"
|
|
|
|
#include "llvm/ADT/Optional.h"
|
|
|
|
#include "llvm/ADT/StringRef.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/FormatVariadic.h"
|
|
|
|
#include "llvm/Support/ToolOutputFile.h"
|
|
|
|
#include "llvm/Support/YAMLTraits.h"
|
|
|
|
|
|
|
|
using namespace mlir;
|
|
|
|
|
|
|
|
using llvm::yaml::Input;
|
|
|
|
using llvm::yaml::MappingTraits;
|
|
|
|
using llvm::yaml::ScalarEnumerationTraits;
|
|
|
|
using llvm::yaml::ScalarTraits;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "linalg-ods-gen"
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Mapping structs (correspond to data types in the YAML description).
|
|
|
|
// TODO: Since this is a schema/part of the contract, it should be moved to
|
|
|
|
// a real header.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct LinalgYAMLContext {
|
|
|
|
MLIRContext *mlirContext;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct LinalgOpMetadata {
|
|
|
|
std::string name;
|
2021-05-19 21:10:28 +08:00
|
|
|
std::string cppClassName;
|
2021-02-21 14:46:27 +08:00
|
|
|
Optional<std::string> doc;
|
2021-02-27 05:01:03 +08:00
|
|
|
SmallVector<std::string> implements;
|
2021-02-21 14:46:27 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct SerializedAffineMap {
|
|
|
|
AffineMapAttr affineMapAttr;
|
|
|
|
|
|
|
|
AffineMap affineMap() { return affineMapAttr.getValue(); }
|
|
|
|
};
|
|
|
|
|
2022-02-14 20:12:15 +08:00
|
|
|
enum class LinalgOperandDefUsage { Input, Output, IndexAttr };
|
2021-02-21 14:46:27 +08:00
|
|
|
|
2021-06-15 21:32:12 +08:00
|
|
|
struct LinalgOperandDef {
|
2021-02-21 14:46:27 +08:00
|
|
|
std::string name;
|
2021-06-15 21:32:12 +08:00
|
|
|
LinalgOperandDefUsage usage;
|
2022-02-14 20:12:15 +08:00
|
|
|
Optional<std::string> typeVar;
|
2021-06-24 17:56:16 +08:00
|
|
|
Optional<SerializedAffineMap> shapeMap;
|
2022-02-14 20:12:15 +08:00
|
|
|
Optional<SerializedAffineMap> indexAttrMap;
|
|
|
|
Optional<SmallVector<int64_t>> defaultVals;
|
2021-02-21 14:46:27 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
enum class LinalgIteratorTypeDef {
|
|
|
|
parallel,
|
|
|
|
reduction,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct LinalgIndexingMapsConfig {
|
|
|
|
Optional<SmallVector<SerializedAffineMap>> staticIndexingMaps;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ScalarExpression;
|
|
|
|
|
2022-01-07 20:37:52 +08:00
|
|
|
struct ScalarArithFn {
|
2021-02-21 14:46:27 +08:00
|
|
|
std::string fnName;
|
|
|
|
// NOTE: Must be pure heap allocated container (not SmallVector)
|
|
|
|
// due to recursive data type.
|
|
|
|
std::vector<ScalarExpression> operands;
|
|
|
|
};
|
|
|
|
|
2022-01-07 20:23:11 +08:00
|
|
|
struct ScalarTypeFn {
|
|
|
|
std::string fnName;
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
std::string typeVar;
|
|
|
|
// NOTE: This must be of arity 1, but to break the self-referential cycle,
|
|
|
|
// we use a heap allocated vector.
|
|
|
|
std::vector<ScalarExpression> operands;
|
|
|
|
};
|
|
|
|
|
2021-02-21 14:46:27 +08:00
|
|
|
struct ScalarExpression {
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
Optional<std::string> arg;
|
2021-05-19 21:10:28 +08:00
|
|
|
Optional<std::string> constant;
|
|
|
|
Optional<int64_t> index;
|
2022-01-07 20:37:52 +08:00
|
|
|
Optional<ScalarArithFn> arithFn;
|
2022-01-07 20:23:11 +08:00
|
|
|
Optional<ScalarTypeFn> typeFn;
|
2021-02-21 14:46:27 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ScalarAssign {
|
|
|
|
std::string arg;
|
|
|
|
ScalarExpression value;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct LinalgStructuredOpConfig {
|
2021-06-15 21:32:12 +08:00
|
|
|
SmallVector<LinalgOperandDef> args;
|
2021-02-21 14:46:27 +08:00
|
|
|
LinalgIndexingMapsConfig indexingMaps;
|
|
|
|
SmallVector<LinalgIteratorTypeDef> iteratorTypes;
|
2021-06-15 21:32:12 +08:00
|
|
|
std::vector<ScalarAssign> assignments;
|
2021-02-21 14:46:27 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct LinalgOpConfig {
|
|
|
|
Optional<LinalgOpMetadata> metadata;
|
|
|
|
Optional<LinalgStructuredOpConfig> structuredOp;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Mapping traits.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2021-06-15 21:32:12 +08:00
|
|
|
LLVM_YAML_IS_SEQUENCE_VECTOR(LinalgOperandDef)
|
2021-03-02 05:48:22 +08:00
|
|
|
LLVM_YAML_IS_SEQUENCE_VECTOR(SerializedAffineMap)
|
|
|
|
LLVM_YAML_IS_SEQUENCE_VECTOR(LinalgIteratorTypeDef)
|
|
|
|
LLVM_YAML_IS_SEQUENCE_VECTOR(ScalarAssign)
|
|
|
|
LLVM_YAML_IS_SEQUENCE_VECTOR(ScalarExpression)
|
|
|
|
LLVM_YAML_IS_DOCUMENT_LIST_VECTOR(LinalgOpConfig)
|
2021-02-21 14:46:27 +08:00
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
namespace yaml {
|
|
|
|
|
|
|
|
/// Top-level type containing op metadata and one of a concrete op type.
|
|
|
|
/// Currently, the only defined op type is `structured_op` (maps to
|
|
|
|
/// `LinalgStructuredOpConfig`).
|
2022-01-07 20:23:11 +08:00
|
|
|
template <>
|
|
|
|
struct MappingTraits<LinalgOpConfig> {
|
2021-02-21 14:46:27 +08:00
|
|
|
static void mapping(IO &io, LinalgOpConfig &info) {
|
|
|
|
io.mapOptional("metadata", info.metadata);
|
|
|
|
io.mapOptional("structured_op", info.structuredOp);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// A structured op models (at most) a single contraction by modeling
|
2021-06-24 17:56:16 +08:00
|
|
|
/// - A list of named arguments (`LinalgOperandDef`), which can be inputs,
|
|
|
|
/// outputs, or index attributes.
|
2021-02-21 14:46:27 +08:00
|
|
|
/// - List of indexing maps (see `LinalgIndexingMaps`).
|
|
|
|
/// - Iterator types (see `LinalgIteratorTypeDef`).
|
|
|
|
/// - List of scalar level assignment (see `ScalarAssign`).
|
2022-01-07 20:23:11 +08:00
|
|
|
template <>
|
|
|
|
struct MappingTraits<LinalgStructuredOpConfig> {
|
2021-02-21 14:46:27 +08:00
|
|
|
static void mapping(IO &io, LinalgStructuredOpConfig &info) {
|
|
|
|
io.mapRequired("args", info.args);
|
|
|
|
io.mapRequired("indexing_maps", info.indexingMaps);
|
|
|
|
io.mapRequired("iterator_types", info.iteratorTypes);
|
|
|
|
io.mapRequired("assignments", info.assignments);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-06-24 17:56:16 +08:00
|
|
|
/// Maps a named tensor, scalar or attribute argument to an operation,
|
|
|
|
/// consisting of:
|
2021-02-21 14:46:27 +08:00
|
|
|
/// - `name`: Must be unique within the operation.
|
2021-06-24 17:56:16 +08:00
|
|
|
/// - `usage`: How the argument is used (input, output, attribute, etc).
|
2021-06-15 21:32:12 +08:00
|
|
|
/// - `type_var`: The symbolic type variable that binds to the element or self
|
2021-06-24 17:56:16 +08:00
|
|
|
/// type of the tensor or scalar argument, respectively.
|
|
|
|
/// - `shape_map`: An optional AffineMap from all op symbols to the shape of
|
|
|
|
/// the argument. Only tensor arguments have a `shape_map`. Each shape must
|
|
|
|
/// be normalized over the same list of symbols and have no dimension
|
|
|
|
/// inputs.
|
2022-02-14 20:12:15 +08:00
|
|
|
/// - `index_attr_map`: An optional AffineMap from all op symbols to the
|
|
|
|
/// index attribute symbols. During op creation these symbols are replaced
|
|
|
|
/// by the corresponding `name` index attribue values. Only index attribute
|
|
|
|
/// arguments have an `index_attr_map`.
|
|
|
|
/// - `default_vals`: An optional default initialization for index attribute
|
|
|
|
/// arguments.
|
2022-01-07 20:23:11 +08:00
|
|
|
template <>
|
|
|
|
struct MappingTraits<LinalgOperandDef> {
|
2021-06-15 21:32:12 +08:00
|
|
|
static void mapping(IO &io, LinalgOperandDef &info) {
|
2021-02-21 14:46:27 +08:00
|
|
|
io.mapRequired("name", info.name);
|
|
|
|
io.mapRequired("usage", info.usage);
|
2022-02-14 20:12:15 +08:00
|
|
|
io.mapOptional("type_var", info.typeVar);
|
2021-06-24 17:56:16 +08:00
|
|
|
io.mapOptional("shape_map", info.shapeMap);
|
2022-02-14 20:12:15 +08:00
|
|
|
io.mapOptional("index_attr_map", info.indexAttrMap);
|
|
|
|
io.mapOptional("default_vals", info.defaultVals);
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Usage enum for a named argument.
|
2022-01-07 20:23:11 +08:00
|
|
|
template <>
|
|
|
|
struct ScalarEnumerationTraits<LinalgOperandDefUsage> {
|
2021-06-15 21:32:12 +08:00
|
|
|
static void enumeration(IO &io, LinalgOperandDefUsage &value) {
|
2022-02-14 20:12:15 +08:00
|
|
|
io.enumCase(value, "Input", LinalgOperandDefUsage::Input);
|
|
|
|
io.enumCase(value, "Output", LinalgOperandDefUsage::Output);
|
|
|
|
io.enumCase(value, "IndexAttr", LinalgOperandDefUsage::IndexAttr);
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Iterator type enum.
|
2022-01-07 20:23:11 +08:00
|
|
|
template <>
|
|
|
|
struct ScalarEnumerationTraits<LinalgIteratorTypeDef> {
|
2021-02-21 14:46:27 +08:00
|
|
|
static void enumeration(IO &io, LinalgIteratorTypeDef &value) {
|
|
|
|
io.enumCase(value, "parallel", LinalgIteratorTypeDef::parallel);
|
|
|
|
io.enumCase(value, "reduction", LinalgIteratorTypeDef::reduction);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Metadata about the op (name, C++ name, and documentation).
|
2022-01-07 20:23:11 +08:00
|
|
|
template <>
|
|
|
|
struct MappingTraits<LinalgOpMetadata> {
|
2021-02-21 14:46:27 +08:00
|
|
|
static void mapping(IO &io, LinalgOpMetadata &info) {
|
|
|
|
io.mapRequired("name", info.name);
|
2021-05-19 21:10:28 +08:00
|
|
|
io.mapRequired("cpp_class_name", info.cppClassName);
|
2021-02-21 14:46:27 +08:00
|
|
|
io.mapOptional("doc", info.doc);
|
2021-02-27 05:01:03 +08:00
|
|
|
io.mapOptional("implements", info.implements);
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// How the ops indexing maps are produced. Must be one of:
|
|
|
|
/// - static_indexing_maps: A static list of AffineMaps, possibly with
|
|
|
|
/// some symbols that bind to attributes of the op. Each indexing map must
|
|
|
|
/// be normalized over the same list of dimensions, and its symbols must
|
|
|
|
/// match the symbols for argument shapes.
|
2022-01-07 20:23:11 +08:00
|
|
|
template <>
|
|
|
|
struct MappingTraits<LinalgIndexingMapsConfig> {
|
2021-02-21 14:46:27 +08:00
|
|
|
static void mapping(IO &io, LinalgIndexingMapsConfig &info) {
|
|
|
|
io.mapOptional("static_indexing_maps", info.staticIndexingMaps);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Models an assignment to a named output.
|
2021-06-15 21:32:12 +08:00
|
|
|
/// - The `arg` name must match a named output.
|
2021-02-21 14:46:27 +08:00
|
|
|
/// - The `value` is a scalar expression for computing the value to
|
|
|
|
/// assign (see `ScalarExpression`).
|
2022-01-07 20:23:11 +08:00
|
|
|
template <>
|
|
|
|
struct MappingTraits<ScalarAssign> {
|
2021-02-21 14:46:27 +08:00
|
|
|
static void mapping(IO &io, ScalarAssign &info) {
|
|
|
|
io.mapRequired("arg", info.arg);
|
|
|
|
io.mapRequired("value", info.value);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// A scalar expression (RHS of an assignment). Must be one of:
|
2022-01-07 20:37:52 +08:00
|
|
|
/// - `scalar_arg`: An operation argument.
|
|
|
|
/// - `scalar_const`: A constant definition.
|
|
|
|
/// - `scalar_index`: An iteration index.
|
|
|
|
/// - `arith_fn`: A named arithmetic function (see `ScalarArithFn`).
|
2022-01-07 20:23:11 +08:00
|
|
|
/// - `type_fn`: A named type conversion function (see `ScalarTypeFn`).
|
|
|
|
template <>
|
|
|
|
struct MappingTraits<ScalarExpression> {
|
2021-02-21 14:46:27 +08:00
|
|
|
static void mapping(IO &io, ScalarExpression &info) {
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
io.mapOptional("scalar_arg", info.arg);
|
2021-05-19 21:10:28 +08:00
|
|
|
io.mapOptional("scalar_const", info.constant);
|
|
|
|
io.mapOptional("scalar_index", info.index);
|
2022-01-07 20:37:52 +08:00
|
|
|
io.mapOptional("arith_fn", info.arithFn);
|
2022-01-07 20:23:11 +08:00
|
|
|
io.mapOptional("type_fn", info.typeFn);
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// A scalar expression that evaluates a named function.
|
|
|
|
/// Functions are generally "math" level and type polymorphic. Builtin
|
|
|
|
/// functions include:
|
|
|
|
/// - `add(lhs, rhs)`
|
|
|
|
/// - `mul(lhs, rhs)`
|
2022-01-07 20:23:11 +08:00
|
|
|
template <>
|
2022-01-07 20:37:52 +08:00
|
|
|
struct MappingTraits<ScalarArithFn> {
|
|
|
|
static void mapping(IO &io, ScalarArithFn &info) {
|
2021-02-21 14:46:27 +08:00
|
|
|
io.mapRequired("fn_name", info.fnName);
|
|
|
|
io.mapRequired("operands", info.operands);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-01-07 20:23:11 +08:00
|
|
|
template <>
|
|
|
|
struct MappingTraits<ScalarTypeFn> {
|
|
|
|
static void mapping(IO &io, ScalarTypeFn &info) {
|
|
|
|
io.mapRequired("fn_name", info.fnName);
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
io.mapRequired("type_var", info.typeVar);
|
|
|
|
io.mapRequired("operands", info.operands);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-02-21 14:46:27 +08:00
|
|
|
/// Helper mapping which accesses an AffineMapAttr as a serialized string of
|
|
|
|
/// the same.
|
2022-01-07 20:23:11 +08:00
|
|
|
template <>
|
|
|
|
struct ScalarTraits<SerializedAffineMap> {
|
2021-02-21 14:46:27 +08:00
|
|
|
static void output(const SerializedAffineMap &value, void *rawYamlContext,
|
|
|
|
raw_ostream &out) {
|
|
|
|
assert(value.affineMapAttr);
|
|
|
|
value.affineMapAttr.print(out);
|
|
|
|
}
|
|
|
|
static StringRef input(StringRef scalar, void *rawYamlContext,
|
|
|
|
SerializedAffineMap &value) {
|
|
|
|
assert(rawYamlContext);
|
|
|
|
auto *yamlContext = static_cast<LinalgYAMLContext *>(rawYamlContext);
|
|
|
|
if (auto attr = mlir::parseAttribute(scalar, yamlContext->mlirContext)
|
|
|
|
.dyn_cast_or_null<AffineMapAttr>())
|
|
|
|
value.affineMapAttr = attr;
|
|
|
|
else if (!value.affineMapAttr || !value.affineMapAttr.isa<AffineMapAttr>())
|
|
|
|
return "could not parse as an affine map attribute";
|
|
|
|
return StringRef();
|
|
|
|
}
|
|
|
|
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace yaml
|
|
|
|
} // namespace llvm
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Generation utilities
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
class GenerationContext {
|
|
|
|
public:
|
|
|
|
GenerationContext(MLIRContext *context, raw_ostream *odsOut,
|
|
|
|
raw_ostream *defnOut)
|
|
|
|
: context(context), loc(UnknownLoc::get(context)), odsOut(odsOut),
|
|
|
|
defnOut(defnOut) {}
|
|
|
|
|
|
|
|
MLIRContext *getContext() { return context; }
|
|
|
|
|
|
|
|
void setLoc(Location loc) { this->loc = loc; }
|
|
|
|
Location getLoc() { return loc; }
|
|
|
|
|
|
|
|
bool shouldGenerateOds() { return odsOut; }
|
|
|
|
bool shouldGenerateDefns() { return defnOut; }
|
|
|
|
|
|
|
|
raw_ostream &odss() {
|
|
|
|
assert(odsOut && "ODS stream not defined");
|
|
|
|
return *odsOut;
|
|
|
|
}
|
|
|
|
|
|
|
|
raw_ostream &defns() {
|
|
|
|
assert(defnOut && "Definition stream not defined");
|
|
|
|
return *defnOut;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
MLIRContext *context;
|
|
|
|
Location loc;
|
|
|
|
raw_ostream *odsOut;
|
|
|
|
raw_ostream *defnOut;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
static std::string generateCppExpression(SerializedAffineMap self,
|
|
|
|
StringRef contextName) {
|
|
|
|
std::string printedStr;
|
|
|
|
llvm::raw_string_ostream printedSs(printedStr);
|
|
|
|
self.affineMapAttr.print(printedSs);
|
|
|
|
printedSs.flush();
|
|
|
|
|
|
|
|
static const char exprFormat[] =
|
|
|
|
R"FMT(mlir::parseAttribute("{0}", {1}).cast<AffineMapAttr>().getValue())FMT";
|
|
|
|
return llvm::formatv(exprFormat, printedStr, contextName);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename Container>
|
|
|
|
static std::string interleaveToString(Container &container,
|
|
|
|
StringRef separator) {
|
|
|
|
std::string result;
|
|
|
|
llvm::raw_string_ostream ss(result);
|
|
|
|
llvm::interleave(container, ss, separator);
|
|
|
|
ss.flush();
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Optional<int>
|
2021-06-15 21:32:12 +08:00
|
|
|
findTensorDefArgIndex(StringRef name, SmallVectorImpl<LinalgOperandDef> &args) {
|
2021-12-24 06:13:06 +08:00
|
|
|
for (const auto &it : llvm::enumerate(args)) {
|
2021-02-21 14:46:27 +08:00
|
|
|
if (it.value().name == name)
|
|
|
|
return it.index();
|
|
|
|
}
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2021-05-19 21:10:28 +08:00
|
|
|
// Try to map the TypeVar to a predefined or an argument type.
|
|
|
|
static Optional<std::string>
|
2021-06-15 21:32:12 +08:00
|
|
|
findTypeValue(StringRef typeVar, SmallVectorImpl<LinalgOperandDef> &args) {
|
2021-05-19 21:10:28 +08:00
|
|
|
// Handle all predefined types.
|
|
|
|
if (typeVar == "I32")
|
|
|
|
return std::string("helper.getIntegerType(32)");
|
|
|
|
if (typeVar == "I64")
|
|
|
|
return std::string("helper.getIntegerType(64)");
|
|
|
|
if (typeVar == "F32")
|
|
|
|
return std::string("helper.getFloat32Type()");
|
|
|
|
if (typeVar == "F64")
|
|
|
|
return std::string("helper.getFloat64Type()");
|
|
|
|
|
|
|
|
// Search all argument types.
|
2021-12-24 06:13:06 +08:00
|
|
|
for (const auto &it : llvm::enumerate(args)) {
|
2022-02-14 20:12:15 +08:00
|
|
|
if (it.value().usage != LinalgOperandDefUsage::Input &&
|
|
|
|
it.value().usage != LinalgOperandDefUsage::Output)
|
|
|
|
continue;
|
|
|
|
if (it.value().typeVar.getValue() == typeVar)
|
2021-05-19 21:10:28 +08:00
|
|
|
return llvm::formatv("block.getArgument({0}).getType()", it.index())
|
|
|
|
.str();
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
}
|
2021-05-19 21:10:28 +08:00
|
|
|
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2021-06-15 21:32:12 +08:00
|
|
|
static ScalarAssign *findAssignment(StringRef name,
|
|
|
|
std::vector<ScalarAssign> &assignments) {
|
2021-02-21 14:46:27 +08:00
|
|
|
for (auto &assign : assignments) {
|
|
|
|
if (assign.arg == name)
|
|
|
|
return &assign;
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Templates
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// A single line banner format. Parameters:
|
|
|
|
// {0}: Single line comment
|
|
|
|
static const char bannerFormat[] = R"FMT(
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// {0}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
)FMT";
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Named generic op generation.
|
|
|
|
// These ops map at most a single contraction that complies with the limitations
|
|
|
|
// of a linalg.generic.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// Template for Linalg named ops' ODS definitions. Parameters:
|
|
|
|
// {0}: ODS/C++ op name
|
|
|
|
// {1}: assembly op mnemonic
|
|
|
|
// {2}: op interface list
|
|
|
|
// {3}: documentation (summary + description)
|
|
|
|
// {4}: op attribute list
|
2021-06-24 17:56:16 +08:00
|
|
|
// {5}: builder methods taking standalone attribute parameters
|
|
|
|
// {6}: additional methods for attributes used by indexing maps
|
2021-02-21 14:46:27 +08:00
|
|
|
static const char structuredOpOdsHeaderFormat[] = R"FMT(
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Op definition for {0}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2021-10-14 17:08:39 +08:00
|
|
|
def {0} : LinalgStructuredBase_Op<"{1}", !listconcat([AttrSizedOperandSegments],
|
2021-02-27 05:01:03 +08:00
|
|
|
/*extraInterfaces=*/[{2}])> {
|
2021-02-21 14:46:27 +08:00
|
|
|
{3}
|
|
|
|
let arguments = (ins
|
2021-06-15 21:32:12 +08:00
|
|
|
Variadic<AnyType>:$inputs,
|
2021-02-21 14:46:27 +08:00
|
|
|
Variadic<AnyShaped>:$outputs{4}
|
|
|
|
);
|
|
|
|
let results = (outs Variadic<AnyRankedTensor>:$result_tensors);
|
|
|
|
let regions = (region AnyRegion:$region);
|
|
|
|
|
|
|
|
let skipDefaultBuilders = 1;
|
|
|
|
let builders = [
|
2021-03-03 22:53:09 +08:00
|
|
|
OpBuilder<
|
2021-08-24 01:15:35 +08:00
|
|
|
(ins "ValueRange":$inputs, "ValueRange":$outputs,
|
|
|
|
CArg<"ArrayRef<NamedAttribute>", "{{}">:$attributes),
|
2021-02-21 14:46:27 +08:00
|
|
|
[{{
|
|
|
|
$_state.addOperands(inputs);
|
|
|
|
$_state.addOperands(outputs);
|
2021-08-19 14:17:41 +08:00
|
|
|
SmallVector<Type> resultTensorTypes;
|
|
|
|
copy_if(outputs.getTypes(),
|
|
|
|
std::back_inserter(resultTensorTypes),
|
|
|
|
[](Type type) {{ return type.isa<RankedTensorType>(); });
|
|
|
|
$_state.addTypes(resultTensorTypes);
|
2021-02-21 14:46:27 +08:00
|
|
|
$_state.addAttribute(
|
|
|
|
"operand_segment_sizes",
|
|
|
|
$_builder.getI32VectorAttr({{
|
|
|
|
static_cast<int32_t>(inputs.size()),
|
|
|
|
static_cast<int32_t>(outputs.size())}));
|
2021-08-24 01:15:35 +08:00
|
|
|
$_state.addAttributes(attributes);
|
2021-02-21 14:46:27 +08:00
|
|
|
createAndFillStructuredOpRegion<{0}>(
|
|
|
|
$_builder,
|
|
|
|
$_state,
|
|
|
|
TypeRange(inputs),
|
2021-06-15 21:32:12 +08:00
|
|
|
TypeRange(outputs));
|
2021-02-21 14:46:27 +08:00
|
|
|
}]>,
|
2021-03-03 22:53:09 +08:00
|
|
|
OpBuilder<
|
2021-02-21 14:46:27 +08:00
|
|
|
(ins "TypeRange":$resultTensorTypes, "ValueRange":$inputs,
|
2021-08-17 23:59:13 +08:00
|
|
|
"ValueRange":$outputs,
|
|
|
|
CArg<"ArrayRef<NamedAttribute>", "{{}">:$attributes),
|
2021-02-21 14:46:27 +08:00
|
|
|
[{{
|
|
|
|
$_state.addOperands(inputs);
|
|
|
|
$_state.addOperands(outputs);
|
|
|
|
$_state.addTypes(resultTensorTypes);
|
2021-08-17 23:59:13 +08:00
|
|
|
$_state.addAttributes(attributes);
|
2021-02-21 14:46:27 +08:00
|
|
|
$_state.addAttribute(
|
|
|
|
"operand_segment_sizes",
|
|
|
|
$_builder.getI32VectorAttr({{
|
|
|
|
static_cast<int32_t>(inputs.size()),
|
|
|
|
static_cast<int32_t>(outputs.size())}));
|
|
|
|
createAndFillStructuredOpRegion<{0}>(
|
|
|
|
$_builder,
|
|
|
|
$_state,
|
|
|
|
TypeRange(inputs),
|
2021-06-15 21:32:12 +08:00
|
|
|
TypeRange(outputs));
|
2021-02-21 14:46:27 +08:00
|
|
|
}]>,
|
2021-03-03 22:53:09 +08:00
|
|
|
OpBuilder<
|
2021-02-21 14:46:27 +08:00
|
|
|
(ins "TypeRange":$resultTensorTypes, "ValueRange":$operands,
|
|
|
|
CArg<"ArrayRef<NamedAttribute>", "{{}">:$attributes),
|
|
|
|
[{{
|
|
|
|
$_state.addOperands(operands);
|
|
|
|
$_state.addAttributes(attributes);
|
|
|
|
$_state.addTypes(resultTensorTypes);
|
|
|
|
(void)$_state.addRegion();
|
|
|
|
}]>
|
2021-06-24 17:56:16 +08:00
|
|
|
{5}
|
2021-02-21 14:46:27 +08:00
|
|
|
];
|
2022-02-08 09:54:04 +08:00
|
|
|
let hasCustomAssemblyFormat = 1;
|
2021-02-21 14:46:27 +08:00
|
|
|
let hasFolder = 1;
|
|
|
|
|
|
|
|
let extraClassDeclaration = structuredOpsBaseDecls # [{{
|
|
|
|
// Auto-generated.
|
|
|
|
ArrayAttr iterator_types();
|
|
|
|
ArrayAttr indexing_maps();
|
2022-02-14 21:02:11 +08:00
|
|
|
static void regionBuilder(ImplicitLocOpBuilder &b,
|
|
|
|
Block &block, ArrayRef<NamedAttribute> attrs);
|
|
|
|
static std::function<void(ImplicitLocOpBuilder &,
|
|
|
|
Block &, ArrayRef<NamedAttribute>)>
|
2021-05-20 23:05:05 +08:00
|
|
|
getRegionBuilder() {{
|
2021-02-21 14:46:27 +08:00
|
|
|
return regionBuilder;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generic methods.
|
|
|
|
static unsigned getNumRegionArgs();
|
|
|
|
std::string getLibraryCallName();
|
2021-06-24 17:56:16 +08:00
|
|
|
{6}
|
2021-02-21 14:46:27 +08:00
|
|
|
}];
|
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
|
2021-06-24 17:56:16 +08:00
|
|
|
// Builder method taking attribute parameters. Parameters:
|
|
|
|
// {0}: Class name
|
|
|
|
// {1}: Comma interleaved attribute parameters
|
|
|
|
// {2}: Attribute initialization
|
|
|
|
static const char structuredOpBuilderFormat[] = R"FMT(
|
|
|
|
, OpBuilder<
|
|
|
|
(ins "TypeRange":$resultTensorTypes, "ValueRange":$inputs,
|
2021-08-24 01:15:35 +08:00
|
|
|
"ValueRange":$outputs, {1},
|
|
|
|
CArg<"ArrayRef<NamedAttribute>", "{{}">:$attributes),
|
2021-06-24 17:56:16 +08:00
|
|
|
[{{
|
|
|
|
$_state.addOperands(inputs);
|
|
|
|
$_state.addOperands(outputs);
|
|
|
|
$_state.addTypes(resultTensorTypes);
|
|
|
|
$_state.addAttribute(
|
|
|
|
"operand_segment_sizes",
|
|
|
|
$_builder.getI32VectorAttr({{
|
|
|
|
static_cast<int32_t>(inputs.size()),
|
|
|
|
static_cast<int32_t>(outputs.size())}));
|
|
|
|
createAndFillStructuredOpRegion<{0}>(
|
|
|
|
$_builder,
|
|
|
|
$_state,
|
|
|
|
TypeRange(inputs),
|
|
|
|
TypeRange(outputs));
|
|
|
|
{2}
|
2021-08-24 01:15:35 +08:00
|
|
|
$_state.addAttributes(attributes);
|
2021-06-24 17:56:16 +08:00
|
|
|
}]>
|
|
|
|
)FMT";
|
|
|
|
|
2022-02-11 16:20:37 +08:00
|
|
|
// The iterator_types() method for structured ops. Parameters:
|
2021-02-21 14:46:27 +08:00
|
|
|
// {0}: Class name
|
|
|
|
// {1}: Comma interleaved iterator type names.
|
|
|
|
static const char structuredOpIteratorTypesFormat[] =
|
|
|
|
R"FMT(
|
2022-02-11 16:20:37 +08:00
|
|
|
ArrayAttr {0}::iterator_types() {{
|
2021-02-21 14:46:27 +08:00
|
|
|
return Builder(getContext()).getStrArrayAttr(SmallVector<StringRef>{{ {1} });
|
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
|
2022-02-11 16:20:37 +08:00
|
|
|
// The iterator_types() method for rank polymorphic structured ops. Parameters:
|
|
|
|
// {0}: Class name
|
|
|
|
static const char rankPolyStructuredOpIteratorTypesFormat[] =
|
|
|
|
R"FMT(
|
|
|
|
ArrayAttr {0}::iterator_types() {{
|
|
|
|
int64_t rank = getRank(getOutputOperand(0));
|
|
|
|
return Builder(getContext()).getStrArrayAttr(
|
|
|
|
SmallVector<StringRef>(rank, getParallelIteratorTypeName()));
|
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
|
|
|
|
// The indexing_maps() method for structured ops. Parameters:
|
|
|
|
// {0}: Class name
|
|
|
|
// {1}: Comma-separated list of dimension variable names.
|
|
|
|
// {2}: Statements
|
|
|
|
static const char structuredOpIndexingMapsFormat[] = R"FMT(
|
|
|
|
ArrayAttr {0}::indexing_maps() {{
|
|
|
|
static const char memoizeAttr[] = "linalg.memoized_indexing_maps";
|
|
|
|
ArrayAttr cached = getOperation()->getAttrOfType<ArrayAttr>(memoizeAttr);
|
|
|
|
if (cached)
|
|
|
|
return cached;
|
|
|
|
|
|
|
|
MLIRContext *context = getContext();
|
|
|
|
auto symbolBindings = getSymbolBindings(*this);
|
|
|
|
SmallVector<AffineMap> maps;
|
|
|
|
{2}
|
|
|
|
cached = Builder(context).getAffineMapArrayAttr(maps);
|
|
|
|
getOperation()->setAttr(memoizeAttr, cached);
|
|
|
|
return cached;
|
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
|
|
|
|
// The indexing_maps() method for rank polymorphic structured ops. Parameters:
|
|
|
|
// {0}: Class name
|
|
|
|
static const char rankPolyStructuredOpIndexingMapsFormat[] = R"FMT(
|
|
|
|
ArrayAttr {0}::indexing_maps() {{
|
|
|
|
MLIRContext *context = getContext();
|
|
|
|
AffineMap scalarMap = AffineMap::get(getNumParallelLoops(), 0, context);
|
|
|
|
AffineMap tensorMap = AffineMap::getMultiDimIdentityMap(
|
|
|
|
getNumParallelLoops(), context);
|
|
|
|
SmallVector<AffineMap> indexingMaps;
|
|
|
|
for (OpOperand *opOperand : getInputAndOutputOperands())
|
|
|
|
indexingMaps.push_back(isScalar(opOperand) ? scalarMap : tensorMap);
|
|
|
|
return Builder(getContext()).getAffineMapArrayAttr(indexingMaps);
|
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
|
2021-06-15 02:09:43 +08:00
|
|
|
// Implementations of fold and getEffects.
|
2021-02-21 14:46:27 +08:00
|
|
|
// Parameters:
|
|
|
|
// {0}: Class name
|
2021-06-15 02:09:43 +08:00
|
|
|
const char structuredOpFoldersFormat[] = R"FMT(
|
2021-02-21 14:46:27 +08:00
|
|
|
LogicalResult {0}::fold(ArrayRef<Attribute>,
|
|
|
|
SmallVectorImpl<OpFoldResult> &) {{
|
|
|
|
return foldMemRefCast(*this);
|
|
|
|
}
|
|
|
|
void {0}::getEffects(SmallVectorImpl<
|
|
|
|
SideEffects::EffectInstance<MemoryEffects::Effect> >&effects) {{
|
2021-06-04 16:18:19 +08:00
|
|
|
SmallVector<Value> inputBuffers = getInputBufferOperands();
|
|
|
|
SmallVector<Value> outputBuffers = getOutputBufferOperands();
|
|
|
|
getGenericEffectsImpl(effects,
|
|
|
|
getOperation()->getResults(), inputBuffers, outputBuffers);
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
|
2022-02-08 09:54:04 +08:00
|
|
|
// Implementation of parse/print.
|
|
|
|
// Parameters:
|
|
|
|
// {0}: Class name
|
|
|
|
static const char structuredOpParserFormat[] = R"FMT(
|
|
|
|
ParseResult {0}::parse(OpAsmParser &parser, OperationState &result) {{
|
|
|
|
return ::parseNamedStructuredOp<{0}>(parser, result);
|
|
|
|
}
|
|
|
|
void {0}::print(OpAsmPrinter &p) {{
|
|
|
|
::printNamedStructuredOp(p, *this);
|
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
|
2021-02-21 14:46:27 +08:00
|
|
|
static LogicalResult generateNamedGenericOpOds(LinalgOpConfig &opConfig,
|
|
|
|
GenerationContext &genContext) {
|
|
|
|
if (!genContext.shouldGenerateOds())
|
|
|
|
return success();
|
|
|
|
|
|
|
|
raw_ostream &os = genContext.odss();
|
|
|
|
|
|
|
|
std::string interfaceNameList;
|
|
|
|
std::string attrList;
|
|
|
|
std::string attrMethods;
|
|
|
|
std::string attrBuilder;
|
|
|
|
|
|
|
|
std::string doc;
|
|
|
|
if (opConfig.metadata->doc) {
|
2021-06-24 17:56:16 +08:00
|
|
|
static const char structuredOpDocFmt[] = R"FMT(
|
|
|
|
let summary = [{ {0} }];
|
|
|
|
let description = [{
|
|
|
|
{1}
|
|
|
|
}];
|
|
|
|
)FMT";
|
2021-02-21 14:46:27 +08:00
|
|
|
StringRef summary, description;
|
|
|
|
std::tie(summary, description) =
|
|
|
|
StringRef(*opConfig.metadata->doc).trim().split('\n');
|
2021-06-24 17:56:16 +08:00
|
|
|
doc = llvm::formatv(structuredOpDocFmt, summary.trim(), description.trim());
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
|
|
|
|
2021-02-27 05:01:03 +08:00
|
|
|
interfaceNameList = interleaveToString(opConfig.metadata->implements, ", ");
|
|
|
|
|
2021-06-24 17:56:16 +08:00
|
|
|
// Assemble the attribute specific logic required for the op definition.
|
|
|
|
if (llvm::any_of(opConfig.structuredOp->args, [](LinalgOperandDef &arg) {
|
2022-02-14 20:12:15 +08:00
|
|
|
return arg.usage == LinalgOperandDefUsage::IndexAttr;
|
2021-06-24 17:56:16 +08:00
|
|
|
})) {
|
|
|
|
SmallVector<std::string> attrDefs;
|
|
|
|
SmallVector<std::string> attrParams;
|
|
|
|
SmallVector<std::string> attrStmts;
|
|
|
|
for (LinalgOperandDef &arg : opConfig.structuredOp->args) {
|
2022-02-14 20:12:15 +08:00
|
|
|
if (arg.usage != LinalgOperandDefUsage::IndexAttr)
|
2021-06-24 17:56:16 +08:00
|
|
|
continue;
|
2022-02-14 20:12:15 +08:00
|
|
|
assert(arg.indexAttrMap.hasValue());
|
|
|
|
assert(arg.defaultVals.hasValue());
|
|
|
|
size_t size = arg.indexAttrMap->affineMap().getNumResults();
|
|
|
|
assert(arg.defaultVals.getValue().size() == size);
|
|
|
|
static const char typeFmt[] = "RankedI64ElementsAttr<[{0}]>";
|
|
|
|
static const char defFmt[] = "DefaultValuedAttr<{0}, \"{1}\">:${2}";
|
2021-06-24 17:56:16 +08:00
|
|
|
static const char paramFmt[] = "\"Attribute\":${0}";
|
|
|
|
static const char stmtFmt[] = "$_state.addAttribute(\"{0}\", {0});";
|
2022-02-14 20:12:15 +08:00
|
|
|
std::string defaultVals;
|
|
|
|
llvm::raw_string_ostream ss(defaultVals);
|
|
|
|
ss << "{ ";
|
|
|
|
llvm::interleave(
|
|
|
|
arg.defaultVals.getValue(), ss,
|
|
|
|
[&](int64_t val) { ss << "static_cast<int64_t>(" << val << ")"; },
|
|
|
|
", ");
|
|
|
|
ss << " }";
|
|
|
|
attrDefs.push_back(llvm::formatv(defFmt, llvm::formatv(typeFmt, size),
|
|
|
|
ss.str(), arg.name));
|
2021-06-24 17:56:16 +08:00
|
|
|
attrParams.push_back(llvm::formatv(paramFmt, arg.name));
|
|
|
|
attrStmts.push_back(llvm::formatv(stmtFmt, arg.name));
|
|
|
|
}
|
|
|
|
attrList = ",\n" + llvm::join(attrDefs, ",\n");
|
|
|
|
attrMethods = R"(
|
|
|
|
bool hasDynamicIndexingMaps();
|
|
|
|
LogicalResult verifyIndexingMapRequiredAttributes();
|
|
|
|
)";
|
|
|
|
attrBuilder = llvm::formatv(
|
|
|
|
structuredOpBuilderFormat, opConfig.metadata->cppClassName,
|
|
|
|
llvm::join(attrParams, ", "), llvm::join(attrStmts, "\n"));
|
|
|
|
}
|
|
|
|
|
|
|
|
os << llvm::formatv(structuredOpOdsHeaderFormat,
|
|
|
|
opConfig.metadata->cppClassName, opConfig.metadata->name,
|
|
|
|
interfaceNameList, doc, attrList, attrBuilder,
|
|
|
|
attrMethods);
|
2021-02-21 14:46:27 +08:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
static LogicalResult
|
|
|
|
generateNamedGenericOpDefns(LinalgOpConfig &opConfig,
|
|
|
|
GenerationContext &genContext) {
|
|
|
|
if (!genContext.shouldGenerateDefns())
|
|
|
|
return success();
|
|
|
|
|
|
|
|
raw_ostream &os = genContext.defns();
|
2021-05-19 21:10:28 +08:00
|
|
|
StringRef className = opConfig.metadata->cppClassName;
|
2021-02-21 14:46:27 +08:00
|
|
|
|
|
|
|
// Implementation banner.
|
|
|
|
std::string bannerComment = llvm::formatv("Implementation of {0}", className);
|
|
|
|
os << llvm::formatv(bannerFormat, bannerComment);
|
|
|
|
|
2021-06-24 17:56:16 +08:00
|
|
|
// Compute the number of scalar and tensor arguments.
|
|
|
|
int64_t numOfArgs =
|
|
|
|
llvm::count_if(opConfig.structuredOp->args, [](LinalgOperandDef &arg) {
|
2022-02-14 20:12:15 +08:00
|
|
|
return arg.usage != LinalgOperandDefUsage::IndexAttr;
|
2021-06-24 17:56:16 +08:00
|
|
|
});
|
|
|
|
|
2022-02-11 16:20:37 +08:00
|
|
|
// An operation that accesses only scalars and scalar/rank zero tensors is
|
|
|
|
// rank polymorhpic. We implement rank polymorphism by generating different
|
|
|
|
// indexing maps and iterators that match the rank of the first output tensor.
|
|
|
|
// An operation is rank polymorphic if the iteration domain has rank zero.
|
|
|
|
bool isRankPolymorphic = opConfig.structuredOp->iteratorTypes.empty();
|
|
|
|
|
|
|
|
// Generate the iterator_types() method.
|
|
|
|
if (!isRankPolymorphic) {
|
2021-02-21 14:46:27 +08:00
|
|
|
std::string iteratorsStr;
|
|
|
|
llvm::raw_string_ostream ss(iteratorsStr);
|
|
|
|
llvm::interleaveComma(opConfig.structuredOp->iteratorTypes, ss,
|
|
|
|
[&](LinalgIteratorTypeDef it) {
|
|
|
|
switch (it) {
|
|
|
|
case LinalgIteratorTypeDef::parallel:
|
|
|
|
ss << "getParallelIteratorTypeName()";
|
|
|
|
break;
|
|
|
|
case LinalgIteratorTypeDef::reduction:
|
|
|
|
ss << "getReductionIteratorTypeName()";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
ss.flush();
|
|
|
|
os << llvm::formatv(structuredOpIteratorTypesFormat, className,
|
|
|
|
iteratorsStr);
|
2022-02-11 16:20:37 +08:00
|
|
|
} else {
|
|
|
|
os << llvm::formatv(rankPolyStructuredOpIteratorTypesFormat, className);
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
|
|
|
|
2022-02-11 16:20:37 +08:00
|
|
|
// Generating the indexing_maps() method.
|
2021-02-21 14:46:27 +08:00
|
|
|
if (auto &staticMaps =
|
|
|
|
opConfig.structuredOp->indexingMaps.staticIndexingMaps) {
|
|
|
|
if (staticMaps->empty())
|
|
|
|
return emitError(genContext.getLoc()) << "op has no indexing maps";
|
2022-02-11 16:20:37 +08:00
|
|
|
if (!isRankPolymorphic) {
|
|
|
|
AffineMap firstMap = staticMaps->front().affineMap();
|
|
|
|
|
|
|
|
// Symbol bindings.
|
|
|
|
{
|
|
|
|
// For each symbol, generate a declaration for it, either with an
|
|
|
|
// AffineSymbolExpr or an AffineConstantExpr (if the symbol derives from
|
|
|
|
// an attribute).
|
|
|
|
// TODO: Possibly lift into a top-level method.
|
|
|
|
static const char structuredOpSymbolBindingsFormat[] = R"FMT(
|
2021-02-21 14:46:27 +08:00
|
|
|
static SmallVector<AffineExpr> getSymbolBindings({0} self) {
|
|
|
|
MLIRContext *context = self.getContext();
|
|
|
|
SmallVector<AffineExpr> exprs;
|
|
|
|
{1}
|
|
|
|
return exprs;
|
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
|
2022-02-11 16:20:37 +08:00
|
|
|
unsigned symbolCount = firstMap.getNumSymbols();
|
|
|
|
SmallVector<std::string> symbolBindings;
|
|
|
|
for (unsigned i = 0; i < symbolCount; ++i) {
|
|
|
|
symbolBindings.push_back(llvm::formatv(
|
|
|
|
" exprs.push_back(getAffineSymbolExpr({0}, context));", i));
|
|
|
|
}
|
2021-06-24 17:56:16 +08:00
|
|
|
|
2022-02-11 16:20:37 +08:00
|
|
|
// Access an index attribute. Parameters:
|
|
|
|
// {0}: Attribute name
|
|
|
|
// {1}: Symbol position
|
|
|
|
// {2}: Attribute index
|
|
|
|
static const char structuredOpAccessAttrFormat[] = R"FMT(
|
2021-11-09 08:05:55 +08:00
|
|
|
int64_t cst{1} = self.{0}().getValues<int64_t>()[{2}];
|
2021-06-24 17:56:16 +08:00
|
|
|
exprs.push_back(getAffineConstantExpr(cst{1}, context));
|
|
|
|
)FMT";
|
2022-02-11 16:20:37 +08:00
|
|
|
// Update all symbol bindings mapped to an attribute.
|
|
|
|
for (LinalgOperandDef &arg : opConfig.structuredOp->args) {
|
2022-02-14 20:12:15 +08:00
|
|
|
if (arg.usage != LinalgOperandDefUsage::IndexAttr)
|
2022-02-11 16:20:37 +08:00
|
|
|
continue;
|
2022-02-14 20:12:15 +08:00
|
|
|
assert(arg.indexAttrMap.hasValue());
|
2022-02-11 16:20:37 +08:00
|
|
|
for (auto &en :
|
2022-02-14 20:12:15 +08:00
|
|
|
llvm::enumerate(arg.indexAttrMap->affineMap().getResults())) {
|
2022-02-11 16:20:37 +08:00
|
|
|
if (auto symbol = en.value().dyn_cast<AffineSymbolExpr>()) {
|
|
|
|
symbolBindings[symbol.getPosition()] =
|
|
|
|
llvm::formatv(structuredOpAccessAttrFormat, arg.name,
|
|
|
|
symbol.getPosition(), en.index());
|
|
|
|
}
|
2021-06-24 17:56:16 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-02-11 16:20:37 +08:00
|
|
|
std::string symbolBindingsStr;
|
|
|
|
llvm::raw_string_ostream symbolBindingsSs(symbolBindingsStr);
|
|
|
|
llvm::interleave(symbolBindings, symbolBindingsSs, "\n");
|
|
|
|
symbolBindingsSs.flush();
|
2021-02-21 14:46:27 +08:00
|
|
|
|
2022-02-11 16:20:37 +08:00
|
|
|
os << llvm::formatv(structuredOpSymbolBindingsFormat, className,
|
|
|
|
symbolBindingsStr);
|
|
|
|
}
|
2021-02-27 05:11:02 +08:00
|
|
|
|
2022-02-11 16:20:37 +08:00
|
|
|
// Indexing maps.
|
|
|
|
{
|
|
|
|
unsigned dimCount = firstMap.getNumDims();
|
|
|
|
|
|
|
|
// Generate a comma-separated list of dim identifiers to be passed to
|
|
|
|
// bindDims, ensuring tht AffineExpr identifiers are bound in the right
|
|
|
|
// order to the proper AffineDimExpr.
|
|
|
|
// This results in vars in scope like: d0, d1, d2...
|
|
|
|
SmallVector<unsigned> dimIndices;
|
|
|
|
for (unsigned i = 0; i < dimCount; ++i)
|
|
|
|
dimIndices.push_back(i);
|
|
|
|
std::string dimIdentsStr;
|
|
|
|
llvm::raw_string_ostream dimIdentsSs(dimIdentsStr);
|
|
|
|
llvm::interleaveComma(dimIndices, dimIdentsSs,
|
|
|
|
[&](unsigned i) { dimIdentsSs << "d" << i; });
|
|
|
|
dimIdentsSs.flush();
|
|
|
|
|
|
|
|
// Statements to add and simplify each affine map.
|
|
|
|
SmallVector<std::string> stmts;
|
|
|
|
for (auto &indexingMap : *staticMaps) {
|
|
|
|
// TODO: Assert that dim and symbol count match the first.
|
|
|
|
stmts.push_back(
|
|
|
|
llvm::formatv("maps.push_back({0});",
|
|
|
|
generateCppExpression(indexingMap, "context")));
|
|
|
|
stmts.push_back(llvm::formatv(
|
|
|
|
"maps.back() = "
|
|
|
|
"simplifyAffineMap(maps.back().replaceDimsAndSymbols({{}, "
|
|
|
|
"symbolBindings, {0}, 0));",
|
|
|
|
dimCount));
|
|
|
|
}
|
2021-02-21 14:46:27 +08:00
|
|
|
|
2022-02-11 16:20:37 +08:00
|
|
|
// TODO: This needs to be memoized and/or converted to non-parser based
|
|
|
|
// C++ codegen prior to real use.
|
|
|
|
os << llvm::formatv(structuredOpIndexingMapsFormat, className,
|
|
|
|
dimIdentsStr, interleaveToString(stmts, "\n "));
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
2022-02-11 16:20:37 +08:00
|
|
|
} else {
|
|
|
|
os << llvm::formatv(rankPolyStructuredOpIndexingMapsFormat, className);
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return emitError(genContext.getLoc())
|
|
|
|
<< "generating code for non static indexing maps not currently "
|
|
|
|
"supported";
|
|
|
|
}
|
|
|
|
|
|
|
|
// getNumRegionArgs()
|
|
|
|
{
|
|
|
|
// Generates a getNumRegionArgs() method. Parameters:
|
|
|
|
// {0}: Class name
|
|
|
|
// {1}: Number of region args
|
|
|
|
static const char structuredOpGetNumRegionArgsFormat[] = R"FMT(
|
|
|
|
unsigned {0}::getNumRegionArgs() {{ return {1}; }
|
|
|
|
)FMT";
|
|
|
|
os << llvm::formatv(structuredOpGetNumRegionArgsFormat, className,
|
2021-06-24 17:56:16 +08:00
|
|
|
numOfArgs);
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// getLibraryCallName()
|
|
|
|
{
|
|
|
|
// Generates a getLibraryCallName method. Parameters:
|
|
|
|
// {0}: Class name
|
|
|
|
static const char structuredOpGetLibraryCallFormat[] = R"FMT(
|
|
|
|
std::string {0}::getLibraryCallName() {{
|
|
|
|
return generateLibraryCallName(getOperation());
|
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
os << llvm::formatv(structuredOpGetLibraryCallFormat, className);
|
|
|
|
}
|
|
|
|
|
2021-06-24 17:56:16 +08:00
|
|
|
// hasDynamicIndexingMaps() and verifyIndexingMapRequiredAttributes()
|
|
|
|
if (llvm::any_of(opConfig.structuredOp->args, [](LinalgOperandDef &arg) {
|
2022-02-14 20:12:15 +08:00
|
|
|
return arg.usage == LinalgOperandDefUsage::IndexAttr;
|
2021-06-24 17:56:16 +08:00
|
|
|
})) {
|
|
|
|
std::vector<std::string> attrVerifications;
|
|
|
|
for (LinalgOperandDef &arg : opConfig.structuredOp->args) {
|
2022-02-14 20:12:15 +08:00
|
|
|
if (arg.usage != LinalgOperandDefUsage::IndexAttr)
|
2021-06-24 17:56:16 +08:00
|
|
|
continue;
|
2022-02-14 20:12:15 +08:00
|
|
|
assert(arg.indexAttrMap.hasValue());
|
2021-06-24 17:56:16 +08:00
|
|
|
// Verify index attribute. Paramters:
|
|
|
|
// {0}: Attribute name
|
|
|
|
// {1}: Attribute size
|
|
|
|
static const char attrFmt[] = R"FMT(
|
|
|
|
if (auto attr = op->getAttrOfType<DenseElementsAttr>("{0}")) {{
|
|
|
|
if (!attr.getType().getElementType().isInteger(64))
|
2022-02-14 20:12:15 +08:00
|
|
|
return op->emitError("incorrect element type for index attribute '{0}'");
|
2021-06-24 17:56:16 +08:00
|
|
|
if (attr.getType().getShape() != ArrayRef<int64_t>{{ {1} })
|
2022-02-14 20:12:15 +08:00
|
|
|
return op->emitError("incorrect shape for index attribute '{0}'");
|
2021-06-24 17:56:16 +08:00
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
attrVerifications.push_back(llvm::formatv(
|
2022-02-14 20:12:15 +08:00
|
|
|
attrFmt, arg.name, arg.indexAttrMap->affineMap().getNumResults()));
|
2021-06-24 17:56:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generates the verifyIndexingMapRequiredAttributes method. Parameters:
|
|
|
|
// {0}: Class name
|
|
|
|
// {1}: Attribute verification
|
|
|
|
static const char structuredOpVerifyIndexingMapRequiredAttributes[] = R"FMT(
|
|
|
|
bool {0}::hasDynamicIndexingMaps() {{ return true; }
|
|
|
|
LogicalResult {0}::verifyIndexingMapRequiredAttributes() {{
|
|
|
|
Operation *op = getOperation();
|
|
|
|
{1}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
os << llvm::formatv(structuredOpVerifyIndexingMapRequiredAttributes,
|
|
|
|
className, llvm::join(attrVerifications, "\n"));
|
|
|
|
}
|
|
|
|
|
2021-02-21 14:46:27 +08:00
|
|
|
// regionBuilder()
|
|
|
|
{
|
|
|
|
// Generates a regionBuilder method. Parameters.
|
|
|
|
// {0}: Class name
|
2021-05-19 21:10:28 +08:00
|
|
|
// {1}: Number of args
|
|
|
|
// {2}: Statements
|
2021-02-21 14:46:27 +08:00
|
|
|
static const char structuredOpRegionBuilderFormat[] = R"FMT(
|
2022-02-14 21:02:11 +08:00
|
|
|
void {0}::regionBuilder(ImplicitLocOpBuilder &b,
|
|
|
|
Block &block, ArrayRef<NamedAttribute> attrs) {{
|
2021-05-19 21:10:28 +08:00
|
|
|
assert({1} > 0 && block.getNumArguments() == {1} &&
|
|
|
|
"{0} regionBuilder expects {1} (>=0) args");
|
|
|
|
RegionBuilderHelper helper(block.getArgument(0).getContext(), block);
|
2021-02-21 14:46:27 +08:00
|
|
|
SmallVector<Value> yields;
|
2021-05-19 21:10:28 +08:00
|
|
|
{2}
|
2021-02-21 14:46:27 +08:00
|
|
|
helper.yieldOutputs(yields);
|
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
auto &args = opConfig.structuredOp->args;
|
|
|
|
auto &assignments = opConfig.structuredOp->assignments;
|
|
|
|
size_t generatedAssignmentCount = 0;
|
|
|
|
int localCounter = 0;
|
|
|
|
SmallVector<std::string> stmts;
|
2021-06-15 21:32:12 +08:00
|
|
|
for (LinalgOperandDef &arg : args) {
|
2022-02-14 20:12:15 +08:00
|
|
|
if (arg.usage != LinalgOperandDefUsage::Output)
|
2021-02-21 14:46:27 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Find the assignment that correlates with the argument.
|
|
|
|
ScalarAssign *assignment = findAssignment(arg.name, assignments);
|
|
|
|
if (!assignment)
|
|
|
|
return emitError(genContext.getLoc())
|
|
|
|
<< "no assignment found for output argument " << arg.name;
|
|
|
|
++generatedAssignmentCount;
|
|
|
|
|
|
|
|
// Recursively generate the expression.
|
|
|
|
std::function<Optional<std::string>(ScalarExpression &)>
|
|
|
|
generateExpression =
|
|
|
|
[&](ScalarExpression &expression) -> Optional<std::string> {
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
if (expression.arg) {
|
|
|
|
// Argument reference.
|
|
|
|
Optional<int> argIndex = findTensorDefArgIndex(*expression.arg, args);
|
2021-02-21 14:46:27 +08:00
|
|
|
if (!argIndex) {
|
|
|
|
emitError(genContext.getLoc())
|
2021-05-19 21:10:28 +08:00
|
|
|
<< "scalar argument not defined on the op: " << *expression.arg;
|
2021-02-21 14:46:27 +08:00
|
|
|
return None;
|
|
|
|
}
|
|
|
|
return std::string(
|
|
|
|
llvm::formatv("block.getArgument({0})", *argIndex));
|
2021-05-19 21:10:28 +08:00
|
|
|
}
|
|
|
|
if (expression.constant) {
|
|
|
|
std::string cppIdent = llvm::formatv("value{0}", ++localCounter);
|
|
|
|
stmts.push_back(
|
|
|
|
llvm::formatv(R"FMT(Value {0} = helper.constant("{1}");)FMT",
|
|
|
|
cppIdent, expression.constant));
|
|
|
|
return cppIdent;
|
|
|
|
}
|
|
|
|
if (expression.index) {
|
|
|
|
// Access an iteration index.
|
|
|
|
std::string cppIdent = llvm::formatv("value{0}", ++localCounter);
|
|
|
|
stmts.push_back(llvm::formatv("Value {0} = helper.index({1});",
|
|
|
|
cppIdent, *expression.index));
|
|
|
|
return cppIdent;
|
|
|
|
}
|
2022-01-07 20:37:52 +08:00
|
|
|
if (expression.arithFn) {
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
// Apply function.
|
2021-02-21 14:46:27 +08:00
|
|
|
// Recursively generate operands.
|
|
|
|
SmallVector<std::string> operandCppValues;
|
2022-01-07 20:37:52 +08:00
|
|
|
for (ScalarExpression &operand : expression.arithFn->operands) {
|
2021-02-21 14:46:27 +08:00
|
|
|
auto operandCppValue = generateExpression(operand);
|
|
|
|
if (!operandCppValue)
|
|
|
|
return None;
|
|
|
|
operandCppValues.push_back(*operandCppValue);
|
|
|
|
}
|
|
|
|
std::string cppIdent = llvm::formatv("value{0}", ++localCounter);
|
|
|
|
stmts.push_back(
|
2022-01-07 20:37:52 +08:00
|
|
|
llvm::formatv("Value {0} = helper.arithfn__{1}({2});", cppIdent,
|
|
|
|
expression.arithFn->fnName,
|
2021-02-21 14:46:27 +08:00
|
|
|
interleaveToString(operandCppValues, ", ")));
|
|
|
|
return cppIdent;
|
2021-05-19 21:10:28 +08:00
|
|
|
}
|
2022-01-07 20:23:11 +08:00
|
|
|
if (expression.typeFn) {
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
// Symbolic cast.
|
|
|
|
// Operands must be arity 1.
|
2022-01-07 20:23:11 +08:00
|
|
|
if (expression.typeFn->operands.size() != 1) {
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
emitError(genContext.getLoc())
|
2022-01-07 20:23:11 +08:00
|
|
|
<< "type conversion operand arity must be 1";
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
return None;
|
|
|
|
}
|
|
|
|
Optional<std::string> operandCppValue =
|
2022-01-07 20:23:11 +08:00
|
|
|
generateExpression(expression.typeFn->operands[0]);
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
if (!operandCppValue)
|
|
|
|
return None;
|
|
|
|
|
2021-05-19 21:10:28 +08:00
|
|
|
Optional<std::string> typeCppValue =
|
2022-01-07 20:23:11 +08:00
|
|
|
findTypeValue(expression.typeFn->typeVar, args);
|
2021-05-19 21:10:28 +08:00
|
|
|
if (!typeCppValue) {
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
emitError(genContext.getLoc())
|
2022-01-07 20:23:11 +08:00
|
|
|
<< "type variable " << expression.typeFn->typeVar
|
|
|
|
<< ", used in a type conversion, must map to a predefined or "
|
2021-05-19 21:10:28 +08:00
|
|
|
<< "an argument type but it does not";
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
return None;
|
|
|
|
}
|
|
|
|
std::string cppIdent = llvm::formatv("value{0}", ++localCounter);
|
2021-10-07 14:26:38 +08:00
|
|
|
stmts.push_back(
|
2022-01-07 20:23:11 +08:00
|
|
|
llvm::formatv("Value {0} = helper.typefn__{1}({2}, {3});",
|
|
|
|
cppIdent, expression.typeFn->fnName,
|
|
|
|
typeCppValue.getValue(), *operandCppValue));
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
return cppIdent;
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
2021-05-19 21:10:28 +08:00
|
|
|
emitError(genContext.getLoc()) << "unknown ScalarExpression type";
|
|
|
|
return None;
|
2021-02-21 14:46:27 +08:00
|
|
|
};
|
|
|
|
Optional<std::string> cppValue = generateExpression(assignment->value);
|
|
|
|
if (!cppValue)
|
|
|
|
return failure();
|
|
|
|
stmts.push_back(llvm::formatv("yields.push_back({0});", cppValue));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (generatedAssignmentCount != assignments.size())
|
|
|
|
return emitError(genContext.getLoc())
|
|
|
|
<< "mismatched number of assignments vs output arguments";
|
|
|
|
|
2021-05-19 21:10:28 +08:00
|
|
|
os << llvm::formatv(structuredOpRegionBuilderFormat, className, numOfArgs,
|
2021-02-21 14:46:27 +08:00
|
|
|
interleaveToString(stmts, "\n "));
|
|
|
|
}
|
|
|
|
|
2022-02-08 09:54:04 +08:00
|
|
|
// Parser and printer.
|
|
|
|
os << llvm::formatv(structuredOpParserFormat, className);
|
|
|
|
|
2021-02-21 14:46:27 +08:00
|
|
|
// Canonicalizers and folders.
|
2021-06-15 02:09:43 +08:00
|
|
|
os << llvm::formatv(structuredOpFoldersFormat, className);
|
2021-02-21 14:46:27 +08:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
static LogicalResult generateOp(LinalgOpConfig &opConfig,
|
|
|
|
GenerationContext &genContext) {
|
|
|
|
// Switch on op type being generated.
|
|
|
|
if (opConfig.structuredOp) {
|
|
|
|
return success(
|
|
|
|
succeeded(generateNamedGenericOpOds(opConfig, genContext)) &&
|
|
|
|
succeeded(generateNamedGenericOpDefns(opConfig, genContext)));
|
|
|
|
}
|
2021-12-21 03:45:05 +08:00
|
|
|
return emitError(genContext.getLoc()) << "unsupported operation type";
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Command line options and main
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static llvm::cl::opt<std::string>
|
|
|
|
inputFilename(llvm::cl::Positional, llvm::cl::desc("<input file>"),
|
|
|
|
llvm::cl::init("-"), llvm::cl::value_desc("YAML filename"));
|
|
|
|
|
|
|
|
static llvm::cl::opt<std::string>
|
|
|
|
outputOdsDeclFilename("o-ods-decl", llvm::cl::desc("ODS output filename"),
|
|
|
|
llvm::cl::value_desc("filename"), llvm::cl::init(""));
|
|
|
|
|
|
|
|
static llvm::cl::opt<std::string>
|
|
|
|
outputCppImplFilename("o-impl",
|
|
|
|
llvm::cl::desc("C++ implementation file name"),
|
|
|
|
llvm::cl::value_desc("filename"), llvm::cl::init(""));
|
|
|
|
|
|
|
|
int main(int argc, char **argv) {
|
|
|
|
llvm::cl::ParseCommandLineOptions(argc, argv, "Linalg ODS Gen from YAML");
|
|
|
|
|
|
|
|
// Set up the input file.
|
|
|
|
std::string errorMessage;
|
|
|
|
std::unique_ptr<llvm::MemoryBuffer> file =
|
|
|
|
mlir::openInputFile(inputFilename, &errorMessage);
|
|
|
|
if (!file) {
|
|
|
|
llvm::errs() << errorMessage << "\n";
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
MLIRContext mlirContext;
|
|
|
|
LinalgYAMLContext yamlContext{&mlirContext};
|
|
|
|
|
|
|
|
std::vector<LinalgOpConfig> opConfigs;
|
|
|
|
|
|
|
|
// Parse input.
|
|
|
|
Input yin(file->getBuffer(), &yamlContext);
|
|
|
|
yin >> opConfigs;
|
|
|
|
|
|
|
|
if (yin.error())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
// Open output files.
|
|
|
|
std::unique_ptr<llvm::ToolOutputFile> outputOdsDecl;
|
|
|
|
if (!outputOdsDeclFilename.empty()) {
|
|
|
|
outputOdsDecl = openOutputFile(outputOdsDeclFilename, &errorMessage);
|
|
|
|
if (!outputOdsDecl) {
|
|
|
|
llvm::errs() << errorMessage << "\n";
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_ptr<llvm::ToolOutputFile> outputCppImpl;
|
|
|
|
if (!outputCppImplFilename.empty()) {
|
|
|
|
outputCppImpl = openOutputFile(outputCppImplFilename, &errorMessage);
|
|
|
|
if (!outputCppImpl) {
|
|
|
|
llvm::errs() << errorMessage << "\n";
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!outputOdsDecl && !outputCppImpl) {
|
|
|
|
llvm::errs() << "error: No output files specified\n";
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate.
|
|
|
|
GenerationContext genContext(&mlirContext,
|
|
|
|
outputOdsDecl ? &outputOdsDecl->os() : nullptr,
|
|
|
|
outputCppImpl ? &outputCppImpl->os() : nullptr);
|
|
|
|
|
|
|
|
for (auto &opConfig : opConfigs) {
|
|
|
|
if (!opConfig.metadata) {
|
|
|
|
emitError(genContext.getLoc())
|
|
|
|
<< "missing operation metadata on subsequent op";
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
genContext.setLoc(NameLoc::get(
|
2021-11-17 01:21:15 +08:00
|
|
|
StringAttr::get(&mlirContext, opConfig.metadata->cppClassName)));
|
2021-02-21 14:46:27 +08:00
|
|
|
if (failed(generateOp(opConfig, genContext))) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (outputOdsDecl)
|
|
|
|
outputOdsDecl->keep();
|
|
|
|
if (outputCppImpl)
|
|
|
|
outputCppImpl->keep();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|