2021-02-21 14:46:27 +08:00
|
|
|
//===- mlir-linalg-ods-yaml-gen.cpp - Linalg ODS generation from yaml ----===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements an ODS (and C++) generator from a YAML form
|
|
|
|
// derived from the mathematical expression of linalg named ops. Typically a
|
|
|
|
// math oriented DSL will be used to export the essential representation to
|
|
|
|
// this form, and maintaining the SOT at the math level (versus recreating it
|
|
|
|
// in MLIR) is deemed to have systemic value.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "mlir/IR/AffineMap.h"
|
|
|
|
#include "mlir/IR/MLIRContext.h"
|
|
|
|
#include "mlir/Parser.h"
|
|
|
|
#include "mlir/Support/FileUtilities.h"
|
|
|
|
#include "mlir/Support/LLVM.h"
|
|
|
|
#include "llvm/ADT/Optional.h"
|
|
|
|
#include "llvm/ADT/StringRef.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/FormatVariadic.h"
|
|
|
|
#include "llvm/Support/ToolOutputFile.h"
|
|
|
|
#include "llvm/Support/YAMLTraits.h"
|
|
|
|
|
|
|
|
using namespace mlir;
|
|
|
|
|
|
|
|
using llvm::yaml::Input;
|
|
|
|
using llvm::yaml::IO;
|
|
|
|
using llvm::yaml::MappingTraits;
|
|
|
|
using llvm::yaml::ScalarEnumerationTraits;
|
|
|
|
using llvm::yaml::ScalarTraits;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "linalg-ods-gen"
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Mapping structs (correspond to data types in the YAML description).
|
|
|
|
// TODO: Since this is a schema/part of the contract, it should be moved to
|
|
|
|
// a real header.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct LinalgYAMLContext {
|
|
|
|
MLIRContext *mlirContext;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct LinalgOpMetadata {
|
|
|
|
std::string name;
|
2021-05-19 21:10:28 +08:00
|
|
|
std::string cppClassName;
|
2021-02-21 14:46:27 +08:00
|
|
|
Optional<std::string> doc;
|
2021-02-27 05:01:03 +08:00
|
|
|
SmallVector<std::string> implements;
|
2021-02-21 14:46:27 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct SerializedAffineMap {
|
|
|
|
AffineMapAttr affineMapAttr;
|
|
|
|
|
|
|
|
AffineMap affineMap() { return affineMapAttr.getValue(); }
|
|
|
|
};
|
|
|
|
|
2021-06-15 21:32:12 +08:00
|
|
|
enum class LinalgOperandDefUsage { input, output };
|
2021-02-21 14:46:27 +08:00
|
|
|
|
2021-06-15 21:32:12 +08:00
|
|
|
struct LinalgOperandDef {
|
2021-02-21 14:46:27 +08:00
|
|
|
std::string name;
|
2021-06-15 21:32:12 +08:00
|
|
|
LinalgOperandDefUsage usage;
|
|
|
|
Optional<SerializedAffineMap> shape;
|
|
|
|
std::string typeVar;
|
2021-02-21 14:46:27 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
enum class LinalgIteratorTypeDef {
|
|
|
|
parallel,
|
|
|
|
reduction,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct LinalgIndexingMapsConfig {
|
|
|
|
Optional<SmallVector<SerializedAffineMap>> staticIndexingMaps;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ScalarExpression;
|
|
|
|
|
|
|
|
struct ScalarApply {
|
|
|
|
std::string fnName;
|
|
|
|
// NOTE: Must be pure heap allocated container (not SmallVector)
|
|
|
|
// due to recursive data type.
|
|
|
|
std::vector<ScalarExpression> operands;
|
|
|
|
};
|
|
|
|
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
struct ScalarSymbolicCast {
|
|
|
|
std::string typeVar;
|
|
|
|
// NOTE: This must be of arity 1, but to break the self-referential cycle,
|
|
|
|
// we use a heap allocated vector.
|
|
|
|
std::vector<ScalarExpression> operands;
|
|
|
|
};
|
|
|
|
|
2021-02-21 14:46:27 +08:00
|
|
|
struct ScalarExpression {
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
Optional<std::string> arg;
|
2021-05-19 21:10:28 +08:00
|
|
|
Optional<std::string> constant;
|
|
|
|
Optional<int64_t> index;
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
Optional<ScalarApply> apply;
|
|
|
|
Optional<ScalarSymbolicCast> symbolicCast;
|
2021-02-21 14:46:27 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ScalarAssign {
|
|
|
|
std::string arg;
|
|
|
|
ScalarExpression value;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct LinalgStructuredOpConfig {
|
2021-06-15 21:32:12 +08:00
|
|
|
SmallVector<LinalgOperandDef> args;
|
2021-02-21 14:46:27 +08:00
|
|
|
LinalgIndexingMapsConfig indexingMaps;
|
|
|
|
SmallVector<LinalgIteratorTypeDef> iteratorTypes;
|
2021-06-15 21:32:12 +08:00
|
|
|
std::vector<ScalarAssign> assignments;
|
2021-02-21 14:46:27 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct LinalgOpConfig {
|
|
|
|
Optional<LinalgOpMetadata> metadata;
|
|
|
|
Optional<LinalgStructuredOpConfig> structuredOp;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Mapping traits.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2021-06-15 21:32:12 +08:00
|
|
|
LLVM_YAML_IS_SEQUENCE_VECTOR(LinalgOperandDef)
|
2021-03-02 05:48:22 +08:00
|
|
|
LLVM_YAML_IS_SEQUENCE_VECTOR(SerializedAffineMap)
|
|
|
|
LLVM_YAML_IS_SEQUENCE_VECTOR(LinalgIteratorTypeDef)
|
|
|
|
LLVM_YAML_IS_SEQUENCE_VECTOR(ScalarAssign)
|
|
|
|
LLVM_YAML_IS_SEQUENCE_VECTOR(ScalarExpression)
|
|
|
|
LLVM_YAML_IS_DOCUMENT_LIST_VECTOR(LinalgOpConfig)
|
2021-02-21 14:46:27 +08:00
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
namespace yaml {
|
|
|
|
|
|
|
|
/// Top-level type containing op metadata and one of a concrete op type.
|
|
|
|
/// Currently, the only defined op type is `structured_op` (maps to
|
|
|
|
/// `LinalgStructuredOpConfig`).
|
|
|
|
template <>
|
|
|
|
struct MappingTraits<LinalgOpConfig> {
|
|
|
|
static void mapping(IO &io, LinalgOpConfig &info) {
|
|
|
|
io.mapOptional("metadata", info.metadata);
|
|
|
|
io.mapOptional("structured_op", info.structuredOp);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// A structured op models (at most) a single contraction by modeling
|
2021-06-15 21:32:12 +08:00
|
|
|
/// - A list of named arguments (`LinalgOperandDef`), which can be inputs or
|
|
|
|
/// outputs.
|
2021-02-21 14:46:27 +08:00
|
|
|
/// - List of indexing maps (see `LinalgIndexingMaps`).
|
|
|
|
/// - Iterator types (see `LinalgIteratorTypeDef`).
|
|
|
|
/// - List of scalar level assignment (see `ScalarAssign`).
|
|
|
|
template <>
|
|
|
|
struct MappingTraits<LinalgStructuredOpConfig> {
|
|
|
|
static void mapping(IO &io, LinalgStructuredOpConfig &info) {
|
|
|
|
io.mapRequired("args", info.args);
|
|
|
|
io.mapRequired("indexing_maps", info.indexingMaps);
|
|
|
|
io.mapRequired("iterator_types", info.iteratorTypes);
|
|
|
|
io.mapRequired("assignments", info.assignments);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-06-15 21:32:12 +08:00
|
|
|
/// Maps a named tensor- or scalar-argument to an operation, consisting of:
|
2021-02-21 14:46:27 +08:00
|
|
|
/// - `name`: Must be unique within the operation.
|
|
|
|
/// - `usage`: How the argument is used (input, output, etc).
|
2021-06-15 21:32:12 +08:00
|
|
|
/// - `shape`: An optional AffineMap from all op symbols to the shape of the
|
|
|
|
/// argument. Only tensor-arguments have a shape. Each shape must be
|
|
|
|
/// normalized over the same list of symbols and have no dimension inputs.
|
|
|
|
/// - `type_var`: The symbolic type variable that binds to the element or self
|
|
|
|
/// type of the tensor- or scalar-argument, respectively.
|
2021-02-21 14:46:27 +08:00
|
|
|
template <>
|
2021-06-15 21:32:12 +08:00
|
|
|
struct MappingTraits<LinalgOperandDef> {
|
|
|
|
static void mapping(IO &io, LinalgOperandDef &info) {
|
2021-02-21 14:46:27 +08:00
|
|
|
io.mapRequired("name", info.name);
|
|
|
|
io.mapRequired("usage", info.usage);
|
2021-06-15 21:32:12 +08:00
|
|
|
io.mapOptional("shape", info.shape);
|
|
|
|
io.mapRequired("type_var", info.typeVar);
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Usage enum for a named argument.
|
|
|
|
template <>
|
2021-06-15 21:32:12 +08:00
|
|
|
struct ScalarEnumerationTraits<LinalgOperandDefUsage> {
|
|
|
|
static void enumeration(IO &io, LinalgOperandDefUsage &value) {
|
|
|
|
io.enumCase(value, "input", LinalgOperandDefUsage::input);
|
|
|
|
io.enumCase(value, "output", LinalgOperandDefUsage::output);
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Iterator type enum.
|
|
|
|
template <>
|
|
|
|
struct ScalarEnumerationTraits<LinalgIteratorTypeDef> {
|
|
|
|
static void enumeration(IO &io, LinalgIteratorTypeDef &value) {
|
|
|
|
io.enumCase(value, "parallel", LinalgIteratorTypeDef::parallel);
|
|
|
|
io.enumCase(value, "reduction", LinalgIteratorTypeDef::reduction);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Metadata about the op (name, C++ name, and documentation).
|
|
|
|
template <>
|
|
|
|
struct MappingTraits<LinalgOpMetadata> {
|
|
|
|
static void mapping(IO &io, LinalgOpMetadata &info) {
|
|
|
|
io.mapRequired("name", info.name);
|
2021-05-19 21:10:28 +08:00
|
|
|
io.mapRequired("cpp_class_name", info.cppClassName);
|
2021-02-21 14:46:27 +08:00
|
|
|
io.mapOptional("doc", info.doc);
|
2021-02-27 05:01:03 +08:00
|
|
|
io.mapOptional("implements", info.implements);
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// How the ops indexing maps are produced. Must be one of:
|
|
|
|
/// - static_indexing_maps: A static list of AffineMaps, possibly with
|
|
|
|
/// some symbols that bind to attributes of the op. Each indexing map must
|
|
|
|
/// be normalized over the same list of dimensions, and its symbols must
|
|
|
|
/// match the symbols for argument shapes.
|
|
|
|
template <>
|
|
|
|
struct MappingTraits<LinalgIndexingMapsConfig> {
|
|
|
|
static void mapping(IO &io, LinalgIndexingMapsConfig &info) {
|
|
|
|
io.mapOptional("static_indexing_maps", info.staticIndexingMaps);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Models an assignment to a named output.
|
2021-06-15 21:32:12 +08:00
|
|
|
/// - The `arg` name must match a named output.
|
2021-02-21 14:46:27 +08:00
|
|
|
/// - The `value` is a scalar expression for computing the value to
|
|
|
|
/// assign (see `ScalarExpression`).
|
|
|
|
template <>
|
|
|
|
struct MappingTraits<ScalarAssign> {
|
|
|
|
static void mapping(IO &io, ScalarAssign &info) {
|
|
|
|
io.mapRequired("arg", info.arg);
|
|
|
|
io.mapRequired("value", info.value);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// A scalar expression (RHS of an assignment). Must be one of:
|
|
|
|
/// - `scalar_arg`: Name of an argument to the op.
|
|
|
|
/// - `scalar_apply`: Result of evaluating a named function (see
|
|
|
|
/// `ScalarApply`).
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
/// - `symbolic_cast`: Cast to a symbolic TypeVar bound elsewhere.
|
2021-02-21 14:46:27 +08:00
|
|
|
template <>
|
|
|
|
struct MappingTraits<ScalarExpression> {
|
|
|
|
static void mapping(IO &io, ScalarExpression &info) {
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
io.mapOptional("scalar_arg", info.arg);
|
2021-05-19 21:10:28 +08:00
|
|
|
io.mapOptional("scalar_const", info.constant);
|
|
|
|
io.mapOptional("scalar_index", info.index);
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
io.mapOptional("scalar_apply", info.apply);
|
|
|
|
io.mapOptional("symbolic_cast", info.symbolicCast);
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/// A scalar expression that evaluates a named function.
|
|
|
|
/// Functions are generally "math" level and type polymorphic. Builtin
|
|
|
|
/// functions include:
|
|
|
|
/// - `add(lhs, rhs)`
|
|
|
|
/// - `mul(lhs, rhs)`
|
|
|
|
template <>
|
|
|
|
struct MappingTraits<ScalarApply> {
|
|
|
|
static void mapping(IO &io, ScalarApply &info) {
|
|
|
|
io.mapRequired("fn_name", info.fnName);
|
|
|
|
io.mapRequired("operands", info.operands);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
template <>
|
|
|
|
struct MappingTraits<ScalarSymbolicCast> {
|
|
|
|
static void mapping(IO &io, ScalarSymbolicCast &info) {
|
|
|
|
io.mapRequired("type_var", info.typeVar);
|
|
|
|
io.mapRequired("operands", info.operands);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-02-21 14:46:27 +08:00
|
|
|
/// Helper mapping which accesses an AffineMapAttr as a serialized string of
|
|
|
|
/// the same.
|
|
|
|
template <>
|
|
|
|
struct ScalarTraits<SerializedAffineMap> {
|
|
|
|
static void output(const SerializedAffineMap &value, void *rawYamlContext,
|
|
|
|
raw_ostream &out) {
|
|
|
|
assert(value.affineMapAttr);
|
|
|
|
value.affineMapAttr.print(out);
|
|
|
|
}
|
|
|
|
static StringRef input(StringRef scalar, void *rawYamlContext,
|
|
|
|
SerializedAffineMap &value) {
|
|
|
|
assert(rawYamlContext);
|
|
|
|
auto *yamlContext = static_cast<LinalgYAMLContext *>(rawYamlContext);
|
|
|
|
if (auto attr = mlir::parseAttribute(scalar, yamlContext->mlirContext)
|
|
|
|
.dyn_cast_or_null<AffineMapAttr>())
|
|
|
|
value.affineMapAttr = attr;
|
|
|
|
else if (!value.affineMapAttr || !value.affineMapAttr.isa<AffineMapAttr>())
|
|
|
|
return "could not parse as an affine map attribute";
|
|
|
|
return StringRef();
|
|
|
|
}
|
|
|
|
static QuotingType mustQuote(StringRef) { return QuotingType::None; }
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace yaml
|
|
|
|
} // namespace llvm
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Generation utilities
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
class GenerationContext {
|
|
|
|
public:
|
|
|
|
GenerationContext(MLIRContext *context, raw_ostream *odsOut,
|
|
|
|
raw_ostream *defnOut)
|
|
|
|
: context(context), loc(UnknownLoc::get(context)), odsOut(odsOut),
|
|
|
|
defnOut(defnOut) {}
|
|
|
|
|
|
|
|
MLIRContext *getContext() { return context; }
|
|
|
|
|
|
|
|
void setLoc(Location loc) { this->loc = loc; }
|
|
|
|
Location getLoc() { return loc; }
|
|
|
|
|
|
|
|
bool shouldGenerateOds() { return odsOut; }
|
|
|
|
bool shouldGenerateDefns() { return defnOut; }
|
|
|
|
|
|
|
|
raw_ostream &odss() {
|
|
|
|
assert(odsOut && "ODS stream not defined");
|
|
|
|
return *odsOut;
|
|
|
|
}
|
|
|
|
|
|
|
|
raw_ostream &defns() {
|
|
|
|
assert(defnOut && "Definition stream not defined");
|
|
|
|
return *defnOut;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
MLIRContext *context;
|
|
|
|
Location loc;
|
|
|
|
raw_ostream *odsOut;
|
|
|
|
raw_ostream *defnOut;
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
static std::string generateCppExpression(SerializedAffineMap self,
|
|
|
|
StringRef contextName) {
|
|
|
|
std::string printedStr;
|
|
|
|
llvm::raw_string_ostream printedSs(printedStr);
|
|
|
|
self.affineMapAttr.print(printedSs);
|
|
|
|
printedSs.flush();
|
|
|
|
|
|
|
|
static const char exprFormat[] =
|
|
|
|
R"FMT(mlir::parseAttribute("{0}", {1}).cast<AffineMapAttr>().getValue())FMT";
|
|
|
|
return llvm::formatv(exprFormat, printedStr, contextName);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename Container>
|
|
|
|
static std::string interleaveToString(Container &container,
|
|
|
|
StringRef separator) {
|
|
|
|
std::string result;
|
|
|
|
llvm::raw_string_ostream ss(result);
|
|
|
|
llvm::interleave(container, ss, separator);
|
|
|
|
ss.flush();
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Optional<int>
|
2021-06-15 21:32:12 +08:00
|
|
|
findTensorDefArgIndex(StringRef name, SmallVectorImpl<LinalgOperandDef> &args) {
|
2021-02-21 14:46:27 +08:00
|
|
|
for (auto it : llvm::enumerate(args)) {
|
|
|
|
if (it.value().name == name)
|
|
|
|
return it.index();
|
|
|
|
}
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2021-05-19 21:10:28 +08:00
|
|
|
// Try to map the TypeVar to a predefined or an argument type.
|
|
|
|
static Optional<std::string>
|
2021-06-15 21:32:12 +08:00
|
|
|
findTypeValue(StringRef typeVar, SmallVectorImpl<LinalgOperandDef> &args) {
|
2021-05-19 21:10:28 +08:00
|
|
|
// Handle all predefined types.
|
|
|
|
if (typeVar == "I32")
|
|
|
|
return std::string("helper.getIntegerType(32)");
|
|
|
|
if (typeVar == "I64")
|
|
|
|
return std::string("helper.getIntegerType(64)");
|
|
|
|
if (typeVar == "F32")
|
|
|
|
return std::string("helper.getFloat32Type()");
|
|
|
|
if (typeVar == "F64")
|
|
|
|
return std::string("helper.getFloat64Type()");
|
|
|
|
|
|
|
|
// Search all argument types.
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
for (auto it : llvm::enumerate(args)) {
|
2021-06-15 21:32:12 +08:00
|
|
|
if (it.value().typeVar == typeVar)
|
2021-05-19 21:10:28 +08:00
|
|
|
return llvm::formatv("block.getArgument({0}).getType()", it.index())
|
|
|
|
.str();
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
}
|
2021-05-19 21:10:28 +08:00
|
|
|
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2021-06-15 21:32:12 +08:00
|
|
|
static ScalarAssign *findAssignment(StringRef name,
|
|
|
|
std::vector<ScalarAssign> &assignments) {
|
2021-02-21 14:46:27 +08:00
|
|
|
for (auto &assign : assignments) {
|
|
|
|
if (assign.arg == name)
|
|
|
|
return &assign;
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Templates
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// A single line banner format. Parameters:
|
|
|
|
// {0}: Single line comment
|
|
|
|
static const char bannerFormat[] = R"FMT(
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// {0}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
)FMT";
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Named generic op generation.
|
|
|
|
// These ops map at most a single contraction that complies with the limitations
|
|
|
|
// of a linalg.generic.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// Template for Linalg named ops' ODS definitions. Parameters:
|
|
|
|
// {0}: ODS/C++ op name
|
|
|
|
// {1}: assembly op mnemonic
|
|
|
|
// {2}: op interface list
|
|
|
|
// {3}: documentation (summary + description)
|
|
|
|
// {4}: op attribute list
|
|
|
|
// {5}: the number of arguments for the op region
|
|
|
|
// {6}: builder methods taking standalone attribute parameters
|
|
|
|
// {7}: additional methods for attributes used by indexing maps
|
|
|
|
static const char structuredOpOdsHeaderFormat[] = R"FMT(
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Op definition for {0}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2021-02-27 05:01:03 +08:00
|
|
|
def {0} : LinalgStructuredBase_Op<"{1}", !listconcat([
|
2021-02-21 14:46:27 +08:00
|
|
|
AttrSizedOperandSegments,
|
|
|
|
DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
|
2021-02-27 05:01:03 +08:00
|
|
|
SingleBlockImplicitTerminator<"YieldOp">],
|
|
|
|
/*extraInterfaces=*/[{2}])> {
|
2021-02-21 14:46:27 +08:00
|
|
|
{3}
|
|
|
|
let arguments = (ins
|
2021-06-15 21:32:12 +08:00
|
|
|
Variadic<AnyType>:$inputs,
|
2021-02-21 14:46:27 +08:00
|
|
|
Variadic<AnyShaped>:$outputs{4}
|
|
|
|
);
|
|
|
|
let results = (outs Variadic<AnyRankedTensor>:$result_tensors);
|
|
|
|
let regions = (region AnyRegion:$region);
|
|
|
|
|
|
|
|
let skipDefaultBuilders = 1;
|
|
|
|
let builders = [
|
2021-03-03 22:53:09 +08:00
|
|
|
OpBuilder<
|
2021-02-21 14:46:27 +08:00
|
|
|
(ins "ValueRange":$inputs, "ValueRange":$outputs),
|
|
|
|
[{{
|
|
|
|
$_state.addOperands(inputs);
|
|
|
|
$_state.addOperands(outputs);
|
|
|
|
$_state.addAttribute(
|
|
|
|
"operand_segment_sizes",
|
|
|
|
$_builder.getI32VectorAttr({{
|
|
|
|
static_cast<int32_t>(inputs.size()),
|
|
|
|
static_cast<int32_t>(outputs.size())}));
|
|
|
|
createAndFillStructuredOpRegion<{0}>(
|
|
|
|
$_builder,
|
|
|
|
$_state,
|
|
|
|
TypeRange(inputs),
|
2021-06-15 21:32:12 +08:00
|
|
|
TypeRange(outputs));
|
2021-02-21 14:46:27 +08:00
|
|
|
}]>,
|
2021-03-03 22:53:09 +08:00
|
|
|
OpBuilder<
|
2021-02-21 14:46:27 +08:00
|
|
|
(ins "TypeRange":$resultTensorTypes, "ValueRange":$inputs,
|
|
|
|
"ValueRange":$outputs),
|
|
|
|
[{{
|
|
|
|
$_state.addOperands(inputs);
|
|
|
|
$_state.addOperands(outputs);
|
|
|
|
$_state.addTypes(resultTensorTypes);
|
|
|
|
$_state.addAttribute(
|
|
|
|
"operand_segment_sizes",
|
|
|
|
$_builder.getI32VectorAttr({{
|
|
|
|
static_cast<int32_t>(inputs.size()),
|
|
|
|
static_cast<int32_t>(outputs.size())}));
|
|
|
|
createAndFillStructuredOpRegion<{0}>(
|
|
|
|
$_builder,
|
|
|
|
$_state,
|
|
|
|
TypeRange(inputs),
|
2021-06-15 21:32:12 +08:00
|
|
|
TypeRange(outputs));
|
2021-02-21 14:46:27 +08:00
|
|
|
}]>,
|
2021-03-03 22:53:09 +08:00
|
|
|
OpBuilder<
|
2021-02-21 14:46:27 +08:00
|
|
|
(ins "TypeRange":$resultTensorTypes, "ValueRange":$operands,
|
|
|
|
CArg<"ArrayRef<NamedAttribute>", "{{}">:$attributes),
|
|
|
|
[{{
|
|
|
|
$_state.addOperands(operands);
|
|
|
|
$_state.addAttributes(attributes);
|
|
|
|
$_state.addTypes(resultTensorTypes);
|
|
|
|
(void)$_state.addRegion();
|
|
|
|
}]>
|
|
|
|
{6}
|
|
|
|
];
|
|
|
|
let printer = [{{ return ::printNamedStructuredOp(p, *this); }];
|
|
|
|
let parser = [{{
|
2021-06-15 21:32:12 +08:00
|
|
|
return ::parseNamedStructuredOp<{0}>(parser, result);
|
2021-02-21 14:46:27 +08:00
|
|
|
}];
|
|
|
|
let hasFolder = 1;
|
|
|
|
|
|
|
|
let extraClassDeclaration = structuredOpsBaseDecls # [{{
|
|
|
|
// Auto-generated.
|
|
|
|
ArrayAttr iterator_types();
|
|
|
|
ArrayAttr indexing_maps();
|
2021-05-20 23:05:05 +08:00
|
|
|
static void regionBuilder(
|
|
|
|
ImplicitLocOpBuilder &b, Block &block, ValueRange captures);
|
|
|
|
static std::function<
|
|
|
|
void(ImplicitLocOpBuilder &b, Block &, ValueRange)>
|
|
|
|
getRegionBuilder() {{
|
2021-02-21 14:46:27 +08:00
|
|
|
return regionBuilder;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generic methods.
|
|
|
|
static unsigned getNumRegionArgs();
|
|
|
|
std::string getLibraryCallName();
|
|
|
|
{7}
|
|
|
|
}];
|
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
|
|
|
|
// The iterator_types() method implementation. Parameters:
|
|
|
|
// {0}: Class name
|
|
|
|
// {1}: Comma interleaved iterator type names.
|
|
|
|
static const char structuredOpIteratorTypesFormat[] =
|
|
|
|
R"FMT(
|
|
|
|
ArrayAttr {0}::iterator_types() {
|
|
|
|
return Builder(getContext()).getStrArrayAttr(SmallVector<StringRef>{{ {1} });
|
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
|
2021-06-15 02:09:43 +08:00
|
|
|
// Implementations of fold and getEffects.
|
2021-02-21 14:46:27 +08:00
|
|
|
// Parameters:
|
|
|
|
// {0}: Class name
|
2021-06-15 02:09:43 +08:00
|
|
|
const char structuredOpFoldersFormat[] = R"FMT(
|
2021-02-21 14:46:27 +08:00
|
|
|
LogicalResult {0}::fold(ArrayRef<Attribute>,
|
|
|
|
SmallVectorImpl<OpFoldResult> &) {{
|
|
|
|
return foldMemRefCast(*this);
|
|
|
|
}
|
|
|
|
void {0}::getEffects(SmallVectorImpl<
|
|
|
|
SideEffects::EffectInstance<MemoryEffects::Effect> >&effects) {{
|
2021-06-04 16:18:19 +08:00
|
|
|
SmallVector<Value> inputBuffers = getInputBufferOperands();
|
|
|
|
SmallVector<Value> outputBuffers = getOutputBufferOperands();
|
|
|
|
getGenericEffectsImpl(effects,
|
|
|
|
getOperation()->getResults(), inputBuffers, outputBuffers);
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
|
|
|
|
static LogicalResult generateNamedGenericOpOds(LinalgOpConfig &opConfig,
|
|
|
|
GenerationContext &genContext) {
|
|
|
|
if (!genContext.shouldGenerateOds())
|
|
|
|
return success();
|
|
|
|
|
|
|
|
raw_ostream &os = genContext.odss();
|
|
|
|
|
|
|
|
std::string interfaceNameList;
|
|
|
|
std::string attrList;
|
|
|
|
std::string attrMethods;
|
|
|
|
std::string attrBuilder;
|
|
|
|
|
|
|
|
std::string doc;
|
|
|
|
if (opConfig.metadata->doc) {
|
|
|
|
const char *docFmt = R"FMT(
|
|
|
|
let summary = [{ {0} }];
|
|
|
|
let description = [{
|
|
|
|
{1}
|
|
|
|
}];
|
|
|
|
)FMT";
|
|
|
|
StringRef summary, description;
|
|
|
|
std::tie(summary, description) =
|
|
|
|
StringRef(*opConfig.metadata->doc).trim().split('\n');
|
|
|
|
doc = llvm::formatv(docFmt, summary.trim(), description.trim());
|
|
|
|
}
|
|
|
|
|
2021-02-27 05:01:03 +08:00
|
|
|
interfaceNameList = interleaveToString(opConfig.metadata->implements, ", ");
|
|
|
|
|
2021-05-19 21:10:28 +08:00
|
|
|
os << llvm::formatv(
|
|
|
|
structuredOpOdsHeaderFormat, opConfig.metadata->cppClassName,
|
|
|
|
opConfig.metadata->name, interfaceNameList, doc, attrList,
|
|
|
|
opConfig.structuredOp->args.size(), attrBuilder, attrMethods);
|
2021-02-21 14:46:27 +08:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
static LogicalResult
|
|
|
|
generateNamedGenericOpDefns(LinalgOpConfig &opConfig,
|
|
|
|
GenerationContext &genContext) {
|
|
|
|
if (!genContext.shouldGenerateDefns())
|
|
|
|
return success();
|
|
|
|
|
|
|
|
raw_ostream &os = genContext.defns();
|
2021-05-19 21:10:28 +08:00
|
|
|
StringRef className = opConfig.metadata->cppClassName;
|
2021-02-21 14:46:27 +08:00
|
|
|
|
|
|
|
// Implementation banner.
|
|
|
|
std::string bannerComment = llvm::formatv("Implementation of {0}", className);
|
|
|
|
os << llvm::formatv(bannerFormat, bannerComment);
|
|
|
|
|
|
|
|
// Reference iterators.
|
|
|
|
{
|
|
|
|
std::string iteratorsStr;
|
|
|
|
llvm::raw_string_ostream ss(iteratorsStr);
|
|
|
|
llvm::interleaveComma(opConfig.structuredOp->iteratorTypes, ss,
|
|
|
|
[&](LinalgIteratorTypeDef it) {
|
|
|
|
switch (it) {
|
|
|
|
case LinalgIteratorTypeDef::parallel:
|
|
|
|
ss << "getParallelIteratorTypeName()";
|
|
|
|
break;
|
|
|
|
case LinalgIteratorTypeDef::reduction:
|
|
|
|
ss << "getReductionIteratorTypeName()";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
ss.flush();
|
|
|
|
os << llvm::formatv(structuredOpIteratorTypesFormat, className,
|
|
|
|
iteratorsStr);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Static indexing maps.
|
|
|
|
if (auto &staticMaps =
|
|
|
|
opConfig.structuredOp->indexingMaps.staticIndexingMaps) {
|
|
|
|
if (staticMaps->empty())
|
|
|
|
return emitError(genContext.getLoc()) << "op has no indexing maps";
|
|
|
|
AffineMap firstMap = staticMaps->front().affineMap();
|
|
|
|
|
|
|
|
// Symbol bindings.
|
|
|
|
{
|
|
|
|
// For each symbol, generate a declaration for it, either with an
|
|
|
|
// AffineSymbolExpr or an AffineConstantExpr (if the symbol derives from
|
|
|
|
// an attribute).
|
|
|
|
// TODO: Implement attribute constants.
|
|
|
|
// TODO: Possibly lift into a top-level method.
|
|
|
|
static const char structuredOpSymbolBindingsFormat[] = R"FMT(
|
|
|
|
static SmallVector<AffineExpr> getSymbolBindings({0} self) {
|
|
|
|
MLIRContext *context = self.getContext();
|
|
|
|
SmallVector<AffineExpr> exprs;
|
|
|
|
{1}
|
|
|
|
return exprs;
|
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
|
|
|
|
unsigned symbolCount = firstMap.getNumSymbols();
|
|
|
|
SmallVector<std::string> symbolBindings;
|
|
|
|
for (unsigned i = 0; i < symbolCount; ++i) {
|
|
|
|
// TODO: Switch and emit constants for attribute bound symbols.
|
|
|
|
symbolBindings.push_back(llvm::formatv(
|
|
|
|
" exprs.push_back(getAffineSymbolExpr({0}, context));", i));
|
|
|
|
}
|
|
|
|
std::string symbolBindingsStr;
|
|
|
|
llvm::raw_string_ostream symbolBindingsSs(symbolBindingsStr);
|
|
|
|
llvm::interleave(symbolBindings, symbolBindingsSs, "\n");
|
|
|
|
symbolBindingsSs.flush();
|
|
|
|
|
|
|
|
os << llvm::formatv(structuredOpSymbolBindingsFormat, className,
|
|
|
|
symbolBindingsStr);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Indexing maps.
|
|
|
|
{
|
|
|
|
// Parameters:
|
|
|
|
// {0}: Class name
|
|
|
|
// {1}: Comma-separated list of dimension variable names.
|
|
|
|
// {2}: Statements
|
|
|
|
static const char structuredOpIndexingMapsFormat[] = R"FMT(
|
|
|
|
ArrayAttr {0}::indexing_maps() {
|
2021-02-27 05:11:02 +08:00
|
|
|
static const char memoizeAttr[] = "linalg.memoized_indexing_maps";
|
|
|
|
ArrayAttr cached = getOperation()->getAttrOfType<ArrayAttr>(memoizeAttr);
|
|
|
|
if (cached)
|
|
|
|
return cached;
|
|
|
|
|
2021-02-21 14:46:27 +08:00
|
|
|
MLIRContext *context = getContext();
|
|
|
|
auto symbolBindings = getSymbolBindings(*this);
|
|
|
|
SmallVector<AffineMap> maps;
|
|
|
|
{2}
|
2021-02-27 05:11:02 +08:00
|
|
|
cached = Builder(context).getAffineMapArrayAttr(maps);
|
|
|
|
getOperation()->setAttr(memoizeAttr, cached);
|
|
|
|
return cached;
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
|
|
|
|
unsigned dimCount = firstMap.getNumDims();
|
|
|
|
|
|
|
|
// Generate a comma-separated list of dim identifiers to be passed to
|
|
|
|
// bindDims, ensuring tht AffineExpr identifiers are bound in the right
|
|
|
|
// order to the proper AffineDimExpr.
|
|
|
|
// This results in vars in scope like: d0, d1, d2...
|
|
|
|
SmallVector<unsigned> dimIndices;
|
|
|
|
for (unsigned i = 0; i < dimCount; ++i)
|
|
|
|
dimIndices.push_back(i);
|
|
|
|
std::string dimIdentsStr;
|
|
|
|
llvm::raw_string_ostream dimIdentsSs(dimIdentsStr);
|
|
|
|
llvm::interleaveComma(dimIndices, dimIdentsSs,
|
|
|
|
[&](unsigned i) { dimIdentsSs << "d" << i; });
|
|
|
|
dimIdentsSs.flush();
|
|
|
|
|
|
|
|
// Statements to add and simplify each affine map.
|
|
|
|
SmallVector<std::string> stmts;
|
|
|
|
for (auto &indexingMap : *staticMaps) {
|
|
|
|
// TODO: Assert that dim and symbol count match the first.
|
|
|
|
stmts.push_back(
|
|
|
|
llvm::formatv("maps.push_back({0});",
|
|
|
|
generateCppExpression(indexingMap, "context")));
|
|
|
|
stmts.push_back(llvm::formatv(
|
|
|
|
"maps.back() = "
|
|
|
|
"simplifyAffineMap(maps.back().replaceDimsAndSymbols({{}, "
|
|
|
|
"symbolBindings, {0}, 0));",
|
|
|
|
dimCount));
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: This needs to be memoized and/or converted to non-parser based
|
|
|
|
// C++ codegen prior to real use.
|
|
|
|
os << llvm::formatv(structuredOpIndexingMapsFormat, className,
|
|
|
|
dimIdentsStr, interleaveToString(stmts, "\n "));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return emitError(genContext.getLoc())
|
|
|
|
<< "generating code for non static indexing maps not currently "
|
|
|
|
"supported";
|
|
|
|
}
|
|
|
|
|
|
|
|
// getNumRegionArgs()
|
|
|
|
{
|
|
|
|
// Generates a getNumRegionArgs() method. Parameters:
|
|
|
|
// {0}: Class name
|
|
|
|
// {1}: Number of region args
|
|
|
|
static const char structuredOpGetNumRegionArgsFormat[] = R"FMT(
|
|
|
|
unsigned {0}::getNumRegionArgs() {{ return {1}; }
|
|
|
|
)FMT";
|
|
|
|
os << llvm::formatv(structuredOpGetNumRegionArgsFormat, className,
|
|
|
|
opConfig.structuredOp->args.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
// getLibraryCallName()
|
|
|
|
{
|
|
|
|
// Generates a getLibraryCallName method. Parameters:
|
|
|
|
// {0}: Class name
|
|
|
|
static const char structuredOpGetLibraryCallFormat[] = R"FMT(
|
|
|
|
std::string {0}::getLibraryCallName() {{
|
|
|
|
return generateLibraryCallName(getOperation());
|
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
os << llvm::formatv(structuredOpGetLibraryCallFormat, className);
|
|
|
|
}
|
|
|
|
|
|
|
|
// regionBuilder()
|
|
|
|
{
|
|
|
|
// Generates a regionBuilder method. Parameters.
|
|
|
|
// {0}: Class name
|
2021-05-19 21:10:28 +08:00
|
|
|
// {1}: Number of args
|
|
|
|
// {2}: Statements
|
2021-02-21 14:46:27 +08:00
|
|
|
static const char structuredOpRegionBuilderFormat[] = R"FMT(
|
2021-05-20 23:05:05 +08:00
|
|
|
void {0}::regionBuilder(
|
|
|
|
ImplicitLocOpBuilder &b, Block &block, ValueRange captures) {{
|
2021-05-19 21:10:28 +08:00
|
|
|
assert({1} > 0 && block.getNumArguments() == {1} &&
|
|
|
|
"{0} regionBuilder expects {1} (>=0) args");
|
|
|
|
RegionBuilderHelper helper(block.getArgument(0).getContext(), block);
|
2021-02-21 14:46:27 +08:00
|
|
|
SmallVector<Value> yields;
|
2021-05-19 21:10:28 +08:00
|
|
|
{2}
|
2021-02-21 14:46:27 +08:00
|
|
|
helper.yieldOutputs(yields);
|
|
|
|
}
|
|
|
|
)FMT";
|
|
|
|
auto &args = opConfig.structuredOp->args;
|
|
|
|
auto &assignments = opConfig.structuredOp->assignments;
|
|
|
|
size_t generatedAssignmentCount = 0;
|
|
|
|
int localCounter = 0;
|
|
|
|
SmallVector<std::string> stmts;
|
2021-06-15 21:32:12 +08:00
|
|
|
for (LinalgOperandDef &arg : args) {
|
|
|
|
if (arg.usage != LinalgOperandDefUsage::output)
|
2021-02-21 14:46:27 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Find the assignment that correlates with the argument.
|
|
|
|
ScalarAssign *assignment = findAssignment(arg.name, assignments);
|
|
|
|
if (!assignment)
|
|
|
|
return emitError(genContext.getLoc())
|
|
|
|
<< "no assignment found for output argument " << arg.name;
|
|
|
|
++generatedAssignmentCount;
|
|
|
|
|
|
|
|
// Recursively generate the expression.
|
|
|
|
std::function<Optional<std::string>(ScalarExpression &)>
|
|
|
|
generateExpression =
|
|
|
|
[&](ScalarExpression &expression) -> Optional<std::string> {
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
if (expression.arg) {
|
|
|
|
// Argument reference.
|
|
|
|
Optional<int> argIndex = findTensorDefArgIndex(*expression.arg, args);
|
2021-02-21 14:46:27 +08:00
|
|
|
if (!argIndex) {
|
|
|
|
emitError(genContext.getLoc())
|
2021-05-19 21:10:28 +08:00
|
|
|
<< "scalar argument not defined on the op: " << *expression.arg;
|
2021-02-21 14:46:27 +08:00
|
|
|
return None;
|
|
|
|
}
|
|
|
|
return std::string(
|
|
|
|
llvm::formatv("block.getArgument({0})", *argIndex));
|
2021-05-19 21:10:28 +08:00
|
|
|
}
|
|
|
|
if (expression.constant) {
|
|
|
|
std::string cppIdent = llvm::formatv("value{0}", ++localCounter);
|
|
|
|
stmts.push_back(
|
|
|
|
llvm::formatv(R"FMT(Value {0} = helper.constant("{1}");)FMT",
|
|
|
|
cppIdent, expression.constant));
|
|
|
|
return cppIdent;
|
|
|
|
}
|
|
|
|
if (expression.index) {
|
|
|
|
// Access an iteration index.
|
|
|
|
std::string cppIdent = llvm::formatv("value{0}", ++localCounter);
|
|
|
|
stmts.push_back(llvm::formatv("Value {0} = helper.index({1});",
|
|
|
|
cppIdent, *expression.index));
|
|
|
|
return cppIdent;
|
|
|
|
}
|
|
|
|
if (expression.apply) {
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
// Apply function.
|
2021-02-21 14:46:27 +08:00
|
|
|
// Recursively generate operands.
|
|
|
|
SmallVector<std::string> operandCppValues;
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
for (ScalarExpression &operand : expression.apply->operands) {
|
2021-02-21 14:46:27 +08:00
|
|
|
auto operandCppValue = generateExpression(operand);
|
|
|
|
if (!operandCppValue)
|
|
|
|
return None;
|
|
|
|
operandCppValues.push_back(*operandCppValue);
|
|
|
|
}
|
|
|
|
std::string cppIdent = llvm::formatv("value{0}", ++localCounter);
|
|
|
|
stmts.push_back(
|
|
|
|
llvm::formatv("Value {0} = helper.applyfn__{1}({2});", cppIdent,
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
expression.apply->fnName,
|
2021-02-21 14:46:27 +08:00
|
|
|
interleaveToString(operandCppValues, ", ")));
|
|
|
|
return cppIdent;
|
2021-05-19 21:10:28 +08:00
|
|
|
}
|
|
|
|
if (expression.symbolicCast) {
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
// Symbolic cast.
|
|
|
|
// Operands must be arity 1.
|
|
|
|
if (expression.symbolicCast->operands.size() != 1) {
|
|
|
|
emitError(genContext.getLoc())
|
|
|
|
<< "symbolic_cast operand arity must be 1";
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
Optional<std::string> operandCppValue =
|
|
|
|
generateExpression(expression.symbolicCast->operands[0]);
|
|
|
|
if (!operandCppValue)
|
|
|
|
return None;
|
|
|
|
|
2021-05-19 21:10:28 +08:00
|
|
|
Optional<std::string> typeCppValue =
|
|
|
|
findTypeValue(expression.symbolicCast->typeVar, args);
|
|
|
|
if (!typeCppValue) {
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
emitError(genContext.getLoc())
|
|
|
|
<< "type variable " << expression.symbolicCast->typeVar
|
2021-05-19 21:10:28 +08:00
|
|
|
<< ", used in a symbolic cast must map to a predefined or "
|
|
|
|
<< "an argument type but it does not";
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
return None;
|
|
|
|
}
|
|
|
|
std::string cppIdent = llvm::formatv("value{0}", ++localCounter);
|
|
|
|
stmts.push_back(llvm::formatv("Value {0} = helper.cast({1}, {2});",
|
2021-05-19 21:10:28 +08:00
|
|
|
cppIdent, typeCppValue.getValue(),
|
[mlir][linalg] Add symbolic type conversion to linalg named ops.
This enables this kind of construct in the DSL to generate a named op that is polymorphic over numeric type variables `T` and `U`, generating the correct arithmetic casts at construction time:
```
@tc_def_op
def polymorphic_matmul(A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
implements(ContractionOpInterface)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
```
Presently, this only supports type variables that are bound to the element type of one of the arguments, although a further extension that allows binding a type variable to an attribute would allow some more expressiveness and may be useful for some formulations. This is left to a future patch. In addition, this patch does not yet materialize the verifier support which ensures that types are bound correctly (for such simple examples, failing to do so will yield IR that fails verification, it just won't yet fail with a precise error).
Note that the full grid of extensions/truncation/int<->float conversions are supported, but many of them are lossy and higher level code needs to be mindful of numerics (it is not the job of this level).
As-is, this should be sufficient for most integer matmul scenarios we work with in typical quantization schemes.
Differential Revision: https://reviews.llvm.org/D97603
2021-02-27 10:01:15 +08:00
|
|
|
*operandCppValue));
|
|
|
|
return cppIdent;
|
2021-02-21 14:46:27 +08:00
|
|
|
}
|
2021-05-19 21:10:28 +08:00
|
|
|
emitError(genContext.getLoc()) << "unknown ScalarExpression type";
|
|
|
|
return None;
|
2021-02-21 14:46:27 +08:00
|
|
|
};
|
|
|
|
Optional<std::string> cppValue = generateExpression(assignment->value);
|
|
|
|
if (!cppValue)
|
|
|
|
return failure();
|
|
|
|
stmts.push_back(llvm::formatv("yields.push_back({0});", cppValue));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (generatedAssignmentCount != assignments.size())
|
|
|
|
return emitError(genContext.getLoc())
|
|
|
|
<< "mismatched number of assignments vs output arguments";
|
|
|
|
|
2021-05-19 21:10:28 +08:00
|
|
|
int64_t numOfArgs = args.size();
|
|
|
|
os << llvm::formatv(structuredOpRegionBuilderFormat, className, numOfArgs,
|
2021-02-21 14:46:27 +08:00
|
|
|
interleaveToString(stmts, "\n "));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Canonicalizers and folders.
|
2021-06-15 02:09:43 +08:00
|
|
|
os << llvm::formatv(structuredOpFoldersFormat, className);
|
2021-02-21 14:46:27 +08:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
static LogicalResult generateOp(LinalgOpConfig &opConfig,
|
|
|
|
GenerationContext &genContext) {
|
|
|
|
// Switch on op type being generated.
|
|
|
|
if (opConfig.structuredOp) {
|
|
|
|
return success(
|
|
|
|
succeeded(generateNamedGenericOpOds(opConfig, genContext)) &&
|
|
|
|
succeeded(generateNamedGenericOpDefns(opConfig, genContext)));
|
|
|
|
} else {
|
|
|
|
return emitError(genContext.getLoc()) << "unsupported operation type";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Command line options and main
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
static llvm::cl::opt<std::string>
|
|
|
|
inputFilename(llvm::cl::Positional, llvm::cl::desc("<input file>"),
|
|
|
|
llvm::cl::init("-"), llvm::cl::value_desc("YAML filename"));
|
|
|
|
|
|
|
|
static llvm::cl::opt<std::string>
|
|
|
|
outputOdsDeclFilename("o-ods-decl", llvm::cl::desc("ODS output filename"),
|
|
|
|
llvm::cl::value_desc("filename"), llvm::cl::init(""));
|
|
|
|
|
|
|
|
static llvm::cl::opt<std::string>
|
|
|
|
outputCppImplFilename("o-impl",
|
|
|
|
llvm::cl::desc("C++ implementation file name"),
|
|
|
|
llvm::cl::value_desc("filename"), llvm::cl::init(""));
|
|
|
|
|
|
|
|
int main(int argc, char **argv) {
|
|
|
|
llvm::cl::ParseCommandLineOptions(argc, argv, "Linalg ODS Gen from YAML");
|
|
|
|
|
|
|
|
// Set up the input file.
|
|
|
|
std::string errorMessage;
|
|
|
|
std::unique_ptr<llvm::MemoryBuffer> file =
|
|
|
|
mlir::openInputFile(inputFilename, &errorMessage);
|
|
|
|
if (!file) {
|
|
|
|
llvm::errs() << errorMessage << "\n";
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
MLIRContext mlirContext;
|
|
|
|
LinalgYAMLContext yamlContext{&mlirContext};
|
|
|
|
|
|
|
|
std::vector<LinalgOpConfig> opConfigs;
|
|
|
|
|
|
|
|
// Parse input.
|
|
|
|
Input yin(file->getBuffer(), &yamlContext);
|
|
|
|
yin >> opConfigs;
|
|
|
|
|
|
|
|
if (yin.error())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
// Open output files.
|
|
|
|
std::unique_ptr<llvm::ToolOutputFile> outputOdsDecl;
|
|
|
|
if (!outputOdsDeclFilename.empty()) {
|
|
|
|
outputOdsDecl = openOutputFile(outputOdsDeclFilename, &errorMessage);
|
|
|
|
if (!outputOdsDecl) {
|
|
|
|
llvm::errs() << errorMessage << "\n";
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_ptr<llvm::ToolOutputFile> outputCppImpl;
|
|
|
|
if (!outputCppImplFilename.empty()) {
|
|
|
|
outputCppImpl = openOutputFile(outputCppImplFilename, &errorMessage);
|
|
|
|
if (!outputCppImpl) {
|
|
|
|
llvm::errs() << errorMessage << "\n";
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!outputOdsDecl && !outputCppImpl) {
|
|
|
|
llvm::errs() << "error: No output files specified\n";
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate.
|
|
|
|
GenerationContext genContext(&mlirContext,
|
|
|
|
outputOdsDecl ? &outputOdsDecl->os() : nullptr,
|
|
|
|
outputCppImpl ? &outputCppImpl->os() : nullptr);
|
|
|
|
|
|
|
|
for (auto &opConfig : opConfigs) {
|
|
|
|
if (!opConfig.metadata) {
|
|
|
|
emitError(genContext.getLoc())
|
|
|
|
<< "missing operation metadata on subsequent op";
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
genContext.setLoc(NameLoc::get(
|
2021-05-19 21:10:28 +08:00
|
|
|
Identifier::get(opConfig.metadata->cppClassName, &mlirContext)));
|
2021-02-21 14:46:27 +08:00
|
|
|
if (failed(generateOp(opConfig, genContext))) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (outputOdsDecl)
|
|
|
|
outputOdsDecl->keep();
|
|
|
|
if (outputCppImpl)
|
|
|
|
outputCppImpl->keep();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|