forked from OSchip/llvm-project
[mlir][sparse] sparse tensor type encoding migration (new home, new builders)
(1) migrates the encoding from TensorDialect into the new SparseTensorDialect (2) replaces dictionary-based storage and builders with struct-like data Reviewed By: mehdi_amini Differential Revision: https://reviews.llvm.org/D101669
This commit is contained in:
parent
8fc5f07fc0
commit
0a29219931
|
@ -1,2 +1,7 @@
|
|||
add_mlir_dialect(SparseTensorOps sparse_tensor)
|
||||
add_mlir_doc(SparseTensorOps SparseTensorOps Dialects/ -gen-dialect-doc)
|
||||
|
||||
set(LLVM_TARGET_DEFINITIONS SparseTensorAttrDefs.td)
|
||||
mlir_tablegen(SparseTensorAttrDefs.h.inc -gen-attrdef-decls)
|
||||
mlir_tablegen(SparseTensorAttrDefs.cpp.inc -gen-attrdef-defs)
|
||||
add_public_tablegen_target(MLIRSparseTensorAttrDefsIncGen)
|
||||
|
|
|
@ -13,8 +13,12 @@
|
|||
#include "mlir/IR/Dialect.h"
|
||||
#include "mlir/IR/OpDefinition.h"
|
||||
#include "mlir/IR/OpImplementation.h"
|
||||
#include "mlir/IR/TensorEncoding.h"
|
||||
#include "mlir/Interfaces/SideEffectInterfaces.h"
|
||||
|
||||
#define GET_ATTRDEF_CLASSES
|
||||
#include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.h.inc"
|
||||
|
||||
#define GET_OP_CLASSES
|
||||
#include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.h.inc"
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//===-- TensorAttrDefs.td - Tensor Attributes Definitions --*- tablegen -*-===//
|
||||
//===-- SparseTensorAttrDefs.td - attributes definitions ---*- tablegen -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
|
@ -6,38 +6,66 @@
|
|||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef TENSOR_ATTRDEFS
|
||||
#define TENSOR_ATTRDEFS
|
||||
#ifndef SPARSETENSOR_ATTRDEFS
|
||||
#define SPARSETENSOR_ATTRDEFS
|
||||
|
||||
include "mlir/Dialect/Tensor/IR/TensorBase.td"
|
||||
include "mlir/Dialect/SparseTensor/IR/SparseTensorBase.td"
|
||||
include "mlir/IR/TensorEncoding.td"
|
||||
|
||||
// All of the Tensor attributes will extend this class.
|
||||
class Tensor_Attr<string name,
|
||||
list<Trait> traits = []> : AttrDef<Tensor_Dialect, name, traits>;
|
||||
class SparseTensor_Attr<string name,
|
||||
list<Trait> traits = []>
|
||||
: AttrDef<SparseTensor_Dialect, name, traits>;
|
||||
|
||||
// Sparse tensor encoding attribute.
|
||||
def SparseTensorEncodingAttr : Tensor_Attr<"SparseTensorEncoding",
|
||||
def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
|
||||
[ DeclareAttrInterfaceMethods<VerifiableTensorEncoding> ] > {
|
||||
let mnemonic = "sparse";
|
||||
let mnemonic = "encoding";
|
||||
|
||||
let description = [{
|
||||
An attribute to encode "TACO"-style information (see tensor-compiler.org)
|
||||
on the sparsity of tensors. The semantics are defined by means of the
|
||||
methods getDimLevelType(), getDimOrdering(), getPointerType(), and
|
||||
getIndexType(), documented below. The encoding is eventually used by
|
||||
a `sparse compiler` pass to generate sparse code fully automatically
|
||||
on sparsity properties of tensors. The encoding is eventually used by a
|
||||
`sparse compiler` pass to generate sparse code fully automatically
|
||||
for all tensor expressions that involve tensors with a sparse encoding.
|
||||
Compiler passes that run before this sparse compiler pass need to be
|
||||
aware of the semantics of tensor types with such an encoding.
|
||||
}];
|
||||
|
||||
// All data is stored in a dictionary, interpreted by the methods below.
|
||||
// Data in sparse tensor encoding.
|
||||
let parameters = (
|
||||
ins
|
||||
"DictionaryAttr":$dict
|
||||
// A dimension level type for each dimension of a tensor type.
|
||||
// The choices are `dense` (dimension should be stored in its entirety),
|
||||
// `compressed` (only non-zero regions or elements should be stored),
|
||||
// or `singleton` (no sibling elements for parent).
|
||||
ArrayRefParameter<
|
||||
"SparseTensorEncodingAttr::DimLevelType",
|
||||
"Per-dimension level type"
|
||||
>: $dimLevelType,
|
||||
// A dimension order on the indices of this tensor type.
|
||||
// Unlike dense storage, most sparse storage schemes do not provide
|
||||
// fast random access. This affine map specifies the order of
|
||||
// dimensions that should be support by the sparse storage scheme
|
||||
// (e.g. (i,j) -> (i,j) requests 2-d row-wise and (i,j) -> (j,i)
|
||||
// requests 2-d column-wise storage).
|
||||
// TODO: block structure with higher-dim inputs
|
||||
"AffineMap":$dimOrdering,
|
||||
// The required bit width for pointer storage. A narrow width reduces
|
||||
// the memory footprint of overhead storage, as long as the width
|
||||
// suffices to define the total required range (viz. the maximum
|
||||
// number of stored entries over all indirection dimensions). The choices
|
||||
// are `8`, `16`, `32`, `64`, or `0` for a native width.
|
||||
"unsigned":$pointerBitWidth,
|
||||
// The required bit width for index storage. A narrow width reduces
|
||||
// the memory footprint of overhead storage, as long as the width
|
||||
// suffices to define the total required range (viz. the maximum
|
||||
// value of each tensor index over all dimensions). The choices are `8`,
|
||||
// `16`, `32`, `64`, or `0` for a native width.
|
||||
"unsigned":$indexBitWidth
|
||||
);
|
||||
|
||||
let genVerifyDecl = 1;
|
||||
|
||||
let extraClassDeclaration = [{
|
||||
// Dimension level types that define sparse tensors:
|
||||
// Dense - dimension is dense, every entry is stored
|
||||
|
@ -46,37 +74,7 @@ def SparseTensorEncodingAttr : Tensor_Attr<"SparseTensorEncoding",
|
|||
enum class DimLevelType {
|
||||
Dense, Compressed, Singleton
|
||||
};
|
||||
|
||||
// Returns the dimension level type in the given dimension `dim`
|
||||
// of this tensor type. The choices, defined by the `DimLevelType`
|
||||
// enum, are `dense` (the dimension should be stored in its entirety),
|
||||
// `compressed` (only non-zero regions or elements should be stored),
|
||||
// or `singleton` (no sibling elements for parent).
|
||||
DimLevelType getDimLevelType(unsigned dim) const;
|
||||
|
||||
// Returns the dimension order of this tensor type as an AffineMap.
|
||||
// Unlike dense storage, most sparse storage schemes do not provide
|
||||
// fast random access. This affine map specifies the order of
|
||||
// dimensions that should be support by the sparse storage scheme
|
||||
// (e.g. (i,j) -> (i,j) requests 2-d row-wise and (i,j) -> (j,i)
|
||||
// requests 2-d column-wise storage).
|
||||
// TODO: block structure with higher-dim inputs
|
||||
AffineMap getDimOrdering() const;
|
||||
|
||||
// Returns the required bit width for pointer storage. A narrow width
|
||||
// reduces the memory footprint of overhead storage, as long as the
|
||||
// width suffices to define the total required range (viz. the maximum
|
||||
// number of stored entries over all indirection dimensions). The choices
|
||||
// are `8`, `16`, `32`, `64`, or `0` for a native width.
|
||||
unsigned getPointerBitWidth() const;
|
||||
|
||||
// Returns the required bit width for index storage. A narrow width
|
||||
// reduces the memory footprint of overhead storage, as long as the
|
||||
// width suffices to define the total required range (viz. the maximum
|
||||
// value of each tensor index over all dimensions). The choices are `8`,
|
||||
// `16`, `32`, `64`, or `0` for a native width.
|
||||
unsigned getIndexBitWidth() const;
|
||||
}];
|
||||
}
|
||||
|
||||
#endif // LLVMIR_ATTRDEFS
|
||||
#endif // SPARSETENSOR_ATTRDEFS
|
|
@ -9,6 +9,7 @@
|
|||
#ifndef SPARSETENSOR_OPS
|
||||
#define SPARSETENSOR_OPS
|
||||
|
||||
include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td"
|
||||
include "mlir/Dialect/SparseTensor/IR/SparseTensorBase.td"
|
||||
include "mlir/Interfaces/SideEffectInterfaces.td"
|
||||
|
||||
|
|
|
@ -1,7 +1,2 @@
|
|||
add_mlir_dialect(TensorOps tensor)
|
||||
add_mlir_doc(TensorOps TensorOps Dialects/ -gen-dialect-doc)
|
||||
|
||||
set(LLVM_TARGET_DEFINITIONS TensorAttrDefs.td)
|
||||
mlir_tablegen(TensorAttrDefs.h.inc -gen-attrdef-decls)
|
||||
mlir_tablegen(TensorAttrDefs.cpp.inc -gen-attrdef-defs)
|
||||
add_public_tablegen_target(MLIRTensorAttrDefsIncGen)
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
#include "mlir/IR/Dialect.h"
|
||||
#include "mlir/IR/OpDefinition.h"
|
||||
#include "mlir/IR/OpImplementation.h"
|
||||
#include "mlir/IR/TensorEncoding.h"
|
||||
#include "mlir/Interfaces/CastInterfaces.h"
|
||||
#include "mlir/Interfaces/ControlFlowInterfaces.h"
|
||||
#include "mlir/Interfaces/SideEffectInterfaces.h"
|
||||
|
@ -24,13 +23,6 @@
|
|||
|
||||
#include "mlir/Dialect/Tensor/IR/TensorOpsDialect.h.inc"
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Tensor Dialect Attributes
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#define GET_ATTRDEF_CLASSES
|
||||
#include "mlir/Dialect/Tensor/IR/TensorAttrDefs.h.inc"
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Tensor Dialect Operations
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
#define TENSOR_OPS
|
||||
|
||||
include "mlir/Dialect/Tensor/IR/TensorBase.td"
|
||||
include "mlir/Dialect/Tensor/IR/TensorAttrDefs.td"
|
||||
include "mlir/Interfaces/CastInterfaces.td"
|
||||
include "mlir/Interfaces/ControlFlowInterfaces.td"
|
||||
include "mlir/Interfaces/SideEffectInterfaces.td"
|
||||
|
|
|
@ -5,9 +5,11 @@ add_mlir_dialect_library(MLIRSparseTensor
|
|||
${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/SparseTensor
|
||||
|
||||
DEPENDS
|
||||
MLIRSparseTensorAttrDefsIncGen
|
||||
MLIRSparseTensorOpsIncGen
|
||||
|
||||
LINK_LIBS PUBLIC
|
||||
MLIRDialect
|
||||
MLIRIR
|
||||
MLIRSupport
|
||||
)
|
||||
|
|
|
@ -9,12 +9,184 @@
|
|||
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
|
||||
|
||||
#include "mlir/IR/Builders.h"
|
||||
#include "mlir/IR/DialectImplementation.h"
|
||||
#include "mlir/IR/OpImplementation.h"
|
||||
#include "llvm/ADT/TypeSwitch.h"
|
||||
|
||||
using namespace mlir;
|
||||
using namespace mlir::sparse_tensor;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// TensorDialect Attribute Methods
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#define GET_ATTRDEF_CLASSES
|
||||
#include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.cpp.inc"
|
||||
|
||||
static bool acceptBitWidth(unsigned bitWidth) {
|
||||
switch (bitWidth) {
|
||||
case 0:
|
||||
case 8:
|
||||
case 16:
|
||||
case 32:
|
||||
case 64:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
Attribute SparseTensorEncodingAttr::parse(MLIRContext *context,
|
||||
DialectAsmParser &parser, Type type) {
|
||||
if (failed(parser.parseLess()))
|
||||
return {};
|
||||
// Parse the data as a dictionary.
|
||||
DictionaryAttr dict;
|
||||
if (failed(parser.parseAttribute(dict)))
|
||||
return {};
|
||||
if (failed(parser.parseGreater()))
|
||||
return {};
|
||||
// Process the data from the parsed dictionary value into struct-like data.
|
||||
SmallVector<SparseTensorEncodingAttr::DimLevelType, 4> dlt;
|
||||
AffineMap map = {};
|
||||
unsigned ptr = 0;
|
||||
unsigned ind = 0;
|
||||
for (const NamedAttribute &attr : dict) {
|
||||
if (attr.first == "dimLevelType") {
|
||||
auto arrayAttr = attr.second.dyn_cast<ArrayAttr>();
|
||||
if (!arrayAttr) {
|
||||
parser.emitError(parser.getNameLoc(),
|
||||
"expected an array for dimension level types");
|
||||
return {};
|
||||
}
|
||||
for (unsigned i = 0, e = arrayAttr.size(); i < e; i++) {
|
||||
auto strAttr = arrayAttr[i].dyn_cast<StringAttr>();
|
||||
if (!strAttr) {
|
||||
parser.emitError(parser.getNameLoc(),
|
||||
"expected a string value in dimension level types");
|
||||
return {};
|
||||
}
|
||||
auto strVal = strAttr.getValue();
|
||||
if (strVal == "dense") {
|
||||
dlt.push_back(SparseTensorEncodingAttr::DimLevelType::Dense);
|
||||
} else if (strVal == "compressed") {
|
||||
dlt.push_back(SparseTensorEncodingAttr::DimLevelType::Compressed);
|
||||
} else if (strVal == "singleton") {
|
||||
dlt.push_back(SparseTensorEncodingAttr::DimLevelType::Singleton);
|
||||
} else {
|
||||
parser.emitError(parser.getNameLoc(),
|
||||
"unexpected dimension level type: ")
|
||||
<< strVal;
|
||||
return {};
|
||||
}
|
||||
}
|
||||
} else if (attr.first == "dimOrdering") {
|
||||
auto affineAttr = attr.second.dyn_cast<AffineMapAttr>();
|
||||
if (!affineAttr) {
|
||||
parser.emitError(parser.getNameLoc(),
|
||||
"expected an affine map for dimension ordering");
|
||||
return {};
|
||||
}
|
||||
map = affineAttr.getValue();
|
||||
} else if (attr.first == "pointerBitWidth") {
|
||||
auto intAttr = attr.second.dyn_cast<IntegerAttr>();
|
||||
if (!intAttr) {
|
||||
parser.emitError(parser.getNameLoc(),
|
||||
"expected an integral pointer bitwidth");
|
||||
return {};
|
||||
}
|
||||
ptr = intAttr.getInt();
|
||||
} else if (attr.first == "indexBitWidth") {
|
||||
auto intAttr = attr.second.dyn_cast<IntegerAttr>();
|
||||
if (!intAttr) {
|
||||
parser.emitError(parser.getNameLoc(),
|
||||
"expected an integral index bitwidth");
|
||||
return {};
|
||||
}
|
||||
ind = intAttr.getInt();
|
||||
} else {
|
||||
parser.emitError(parser.getNameLoc(), "unexpected key: ")
|
||||
<< attr.first.str();
|
||||
return {};
|
||||
}
|
||||
}
|
||||
// Construct struct-like storage for attribute.
|
||||
return parser.getChecked<SparseTensorEncodingAttr>(context, dlt, map, ptr,
|
||||
ind);
|
||||
}
|
||||
|
||||
void SparseTensorEncodingAttr::print(DialectAsmPrinter &printer) const {
|
||||
// Print the struct-like storage in dictionary fashion.
|
||||
printer << "encoding<{ dimLevelType = [ ";
|
||||
for (unsigned i = 0, e = getDimLevelType().size(); i < e; i++) {
|
||||
switch (getDimLevelType()[i]) {
|
||||
case DimLevelType::Dense:
|
||||
printer << "\"dense\"";
|
||||
break;
|
||||
case DimLevelType::Compressed:
|
||||
printer << "\"compressed\"";
|
||||
break;
|
||||
case DimLevelType::Singleton:
|
||||
printer << "\"singleton\"";
|
||||
break;
|
||||
}
|
||||
if (i != e - 1)
|
||||
printer << ", ";
|
||||
}
|
||||
printer << " ]";
|
||||
if (getDimOrdering())
|
||||
printer << ", dimOrdering = affine_map<" << getDimOrdering() << ">";
|
||||
printer << ", pointerBitWidth = " << getPointerBitWidth()
|
||||
<< ", indexBitWidth = " << getIndexBitWidth() << " }>";
|
||||
}
|
||||
|
||||
LogicalResult SparseTensorEncodingAttr::verify(
|
||||
function_ref<InFlightDiagnostic()> emitError,
|
||||
ArrayRef<DimLevelType> dimLevelType, AffineMap dimOrdering,
|
||||
unsigned pointerBitWidth, unsigned indexBitWidth) {
|
||||
if (!acceptBitWidth(pointerBitWidth))
|
||||
return emitError() << "unexpected pointer bitwidth: " << pointerBitWidth;
|
||||
if (!acceptBitWidth(indexBitWidth))
|
||||
return emitError() << "unexpected index bitwidth: " << indexBitWidth;
|
||||
if (dimOrdering) {
|
||||
if (!dimOrdering.isPermutation())
|
||||
return emitError()
|
||||
<< "expected a permutation affine map for dimension ordering";
|
||||
if (dimOrdering.getNumResults() != dimLevelType.size())
|
||||
return emitError() << "unexpected mismatch in ordering and dimension "
|
||||
"level types size";
|
||||
}
|
||||
return success();
|
||||
}
|
||||
|
||||
LogicalResult SparseTensorEncodingAttr::verifyEncoding(
|
||||
ArrayRef<int64_t> shape, Type elementType,
|
||||
function_ref<InFlightDiagnostic()> emitError) const {
|
||||
// Check structural integrity.
|
||||
if (failed(verify(emitError, getDimLevelType(), getDimOrdering(),
|
||||
getPointerBitWidth(), getIndexBitWidth())))
|
||||
return failure();
|
||||
// Check integrity with tensor type specifics. Dimension ordering is optional,
|
||||
// but we always should have dimension level types for the full rank.
|
||||
unsigned size = shape.size();
|
||||
if (getDimOrdering() && getDimOrdering().getNumResults() != size)
|
||||
return emitError() << "expected an affine map of size " << size
|
||||
<< " for dimension ordering";
|
||||
if (getDimLevelType().size() != size)
|
||||
return emitError() << "expected an array of size " << size
|
||||
<< " for dimension level types";
|
||||
return success();
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// TensorDialect Methods
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
void SparseTensorDialect::initialize() {
|
||||
addAttributes<
|
||||
#define GET_ATTRDEF_LIST
|
||||
#include "mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.cpp.inc"
|
||||
>();
|
||||
addOperations<
|
||||
#define GET_OP_LIST
|
||||
#include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.cpp.inc"
|
||||
|
@ -23,3 +195,23 @@ void SparseTensorDialect::initialize() {
|
|||
|
||||
#define GET_OP_CLASSES
|
||||
#include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.cpp.inc"
|
||||
|
||||
Attribute SparseTensorDialect::parseAttribute(DialectAsmParser &parser,
|
||||
Type type) const {
|
||||
StringRef attrTag;
|
||||
if (failed(parser.parseKeyword(&attrTag)))
|
||||
return Attribute();
|
||||
Attribute attr;
|
||||
auto parseResult =
|
||||
generatedAttributeParser(getContext(), parser, attrTag, type, attr);
|
||||
if (parseResult.hasValue())
|
||||
return attr;
|
||||
parser.emitError(parser.getNameLoc(), "unknown sparse tensor attribute");
|
||||
return Attribute();
|
||||
}
|
||||
|
||||
void SparseTensorDialect::printAttribute(Attribute attr,
|
||||
DialectAsmPrinter &printer) const {
|
||||
if (succeeded(generatedAttributePrinter(attr, printer)))
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -7,7 +7,6 @@ add_mlir_dialect_library(MLIRTensor
|
|||
|
||||
DEPENDS
|
||||
MLIRTensorOpsIncGen
|
||||
MLIRTensorAttrDefsIncGen
|
||||
|
||||
LINK_COMPONENTS
|
||||
Core
|
||||
|
|
|
@ -7,142 +7,11 @@
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "mlir/Dialect/Tensor/IR/Tensor.h"
|
||||
#include "mlir/IR/DialectImplementation.h"
|
||||
#include "mlir/Transforms/InliningUtils.h"
|
||||
#include "llvm/ADT/TypeSwitch.h"
|
||||
|
||||
using namespace mlir;
|
||||
using namespace mlir::tensor;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// TableGen'd Attributes Methods
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#define GET_ATTRDEF_CLASSES
|
||||
#include "mlir/Dialect/Tensor/IR/TensorAttrDefs.cpp.inc"
|
||||
|
||||
// Dictionary keys.
|
||||
static constexpr StringRef getSparseDimLevelTypeAttrName() {
|
||||
return "sparseDimLevelType";
|
||||
}
|
||||
static constexpr StringRef getSparseDimOrderingAttrName() {
|
||||
return "sparseDimOrdering";
|
||||
}
|
||||
static constexpr StringRef getSparsePointerBitWidthAttrName() {
|
||||
return "sparsePointerBitWidth";
|
||||
}
|
||||
static constexpr StringRef getSparseIndexBitWidthAttrName() {
|
||||
return "sparseIndexBitWidth";
|
||||
}
|
||||
|
||||
// Dictionary values.
|
||||
static constexpr StringRef getDenseDimLevelTypeVal() { return "dense"; }
|
||||
static constexpr StringRef getCompressedDimLevelTypeVal() {
|
||||
return "compressed";
|
||||
}
|
||||
static constexpr StringRef getSingletonDimLevelTypeVal() { return "singleton"; }
|
||||
|
||||
Attribute SparseTensorEncodingAttr::parse(MLIRContext *context,
|
||||
DialectAsmParser &parser, Type type) {
|
||||
if (failed(parser.parseLess()))
|
||||
return {};
|
||||
DictionaryAttr dict;
|
||||
if (failed(parser.parseAttribute(dict)))
|
||||
return {};
|
||||
if (failed(parser.parseGreater()))
|
||||
return {};
|
||||
return SparseTensorEncodingAttr::get(context, dict);
|
||||
}
|
||||
|
||||
void SparseTensorEncodingAttr::print(DialectAsmPrinter &printer) const {
|
||||
printer << "sparse<" << getDict() << ">";
|
||||
}
|
||||
|
||||
LogicalResult SparseTensorEncodingAttr::verifyEncoding(
|
||||
llvm::ArrayRef<int64_t> shape, Type elementType,
|
||||
llvm::function_ref<mlir::InFlightDiagnostic()> emitError) const {
|
||||
unsigned size = shape.size();
|
||||
for (const NamedAttribute &attr : getDict()) {
|
||||
if (attr.first == getSparseDimLevelTypeAttrName()) {
|
||||
// Dimension level type verification.
|
||||
auto arrayAttr = attr.second.dyn_cast<ArrayAttr>();
|
||||
if (!arrayAttr || size != static_cast<int64_t>(arrayAttr.size()))
|
||||
return emitError() << "expected an array of size " << size
|
||||
<< " for dimension level types";
|
||||
for (unsigned i = 0; i < size; i++) {
|
||||
auto strAttr = arrayAttr[i].dyn_cast<StringAttr>();
|
||||
if (!strAttr)
|
||||
return emitError()
|
||||
<< "expected string value in dimension level types";
|
||||
auto strVal = strAttr.getValue();
|
||||
if (strVal != getDenseDimLevelTypeVal() &&
|
||||
strVal != getCompressedDimLevelTypeVal() &&
|
||||
strVal != getSingletonDimLevelTypeVal())
|
||||
return emitError() << "unexpected dimension level type: " << strAttr;
|
||||
}
|
||||
} else if (attr.first == getSparseDimOrderingAttrName()) {
|
||||
// Dimension order verification.
|
||||
auto affineAttr = attr.second.dyn_cast<AffineMapAttr>();
|
||||
if (!affineAttr)
|
||||
return emitError() << "expected an affine map for dimension ordering";
|
||||
AffineMap map = affineAttr.getValue();
|
||||
if (size != map.getNumResults() || !map.isPermutation())
|
||||
return emitError() << "expected a permutation affine map of size "
|
||||
<< size << " for dimension ordering";
|
||||
} else if (attr.first == getSparsePointerBitWidthAttrName() ||
|
||||
attr.first == getSparseIndexBitWidthAttrName()) {
|
||||
// Pointer or index bitwidth verification.
|
||||
auto intAttr = attr.second.dyn_cast<IntegerAttr>();
|
||||
if (!intAttr)
|
||||
return emitError() << "expected an integral bitwidth";
|
||||
switch (intAttr.getInt()) {
|
||||
case 0:
|
||||
case 8:
|
||||
case 16:
|
||||
case 32:
|
||||
case 64:
|
||||
continue;
|
||||
default:
|
||||
return emitError() << "unexpected bitwidth: " << intAttr.getInt();
|
||||
}
|
||||
} else {
|
||||
return emitError() << "unexpected key: " << attr.first.str();
|
||||
}
|
||||
}
|
||||
return success();
|
||||
}
|
||||
|
||||
SparseTensorEncodingAttr::DimLevelType
|
||||
SparseTensorEncodingAttr::getDimLevelType(unsigned dim) const {
|
||||
if (auto value = getDict().get(getSparseDimLevelTypeAttrName())) {
|
||||
auto strVal =
|
||||
value.dyn_cast<ArrayAttr>()[dim].cast<StringAttr>().getValue();
|
||||
if (strVal == getCompressedDimLevelTypeVal())
|
||||
return DimLevelType::Compressed;
|
||||
if (strVal == getSingletonDimLevelTypeVal())
|
||||
return DimLevelType::Singleton;
|
||||
}
|
||||
return DimLevelType::Dense;
|
||||
}
|
||||
|
||||
AffineMap SparseTensorEncodingAttr::getDimOrdering() const {
|
||||
if (auto value = getDict().get(getSparseDimOrderingAttrName()))
|
||||
return value.cast<AffineMapAttr>().getValue();
|
||||
return {};
|
||||
}
|
||||
|
||||
unsigned SparseTensorEncodingAttr::getPointerBitWidth() const {
|
||||
if (auto value = getDict().get(getSparsePointerBitWidthAttrName()))
|
||||
return value.cast<IntegerAttr>().getInt();
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned SparseTensorEncodingAttr::getIndexBitWidth() const {
|
||||
if (auto value = getDict().get(getSparseIndexBitWidthAttrName()))
|
||||
return value.cast<IntegerAttr>().getInt();
|
||||
return 0;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// TensorDialect Dialect Interfaces
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -166,33 +35,9 @@ struct TensorInlinerInterface : public DialectInlinerInterface {
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
void TensorDialect::initialize() {
|
||||
addAttributes<
|
||||
#define GET_ATTRDEF_LIST
|
||||
#include "mlir/Dialect/Tensor/IR/TensorAttrDefs.cpp.inc"
|
||||
>();
|
||||
addOperations<
|
||||
#define GET_OP_LIST
|
||||
#include "mlir/Dialect/Tensor/IR/TensorOps.cpp.inc"
|
||||
>();
|
||||
addInterfaces<TensorInlinerInterface>();
|
||||
}
|
||||
|
||||
Attribute TensorDialect::parseAttribute(DialectAsmParser &parser,
|
||||
Type type) const {
|
||||
StringRef attrTag;
|
||||
if (failed(parser.parseKeyword(&attrTag)))
|
||||
return Attribute();
|
||||
Attribute attr;
|
||||
auto parseResult =
|
||||
generatedAttributeParser(getContext(), parser, attrTag, type, attr);
|
||||
if (parseResult.hasValue())
|
||||
return attr;
|
||||
parser.emitError(parser.getNameLoc(), "unknown tensor attribute");
|
||||
return Attribute();
|
||||
}
|
||||
|
||||
void TensorDialect::printAttribute(::mlir::Attribute attr,
|
||||
::mlir::DialectAsmPrinter &printer) const {
|
||||
if (succeeded(generatedAttributePrinter(attr, printer)))
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
// RUN: mlir-opt <%s -split-input-file -verify-diagnostics
|
||||
|
||||
// -----
|
||||
|
||||
#a = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}>
|
||||
func private @tensor_size_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{expected an array of size 1 for dimension level types}}
|
||||
|
||||
// -----
|
||||
|
||||
#a = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"], dimOrdering = affine_map<(i) -> (i)>}> // expected-error {{unexpected mismatch in ordering and dimension level types size}}
|
||||
func private @tensor_sizes_mismatch(%arg0: tensor<8xi32, #a>) -> ()
|
||||
|
||||
// -----
|
||||
|
||||
#a = #sparse_tensor.encoding<{dimLevelType = [1]}> // expected-error {{expected a string value in dimension level types}}
|
||||
func private @tensor_type_mismatch(%arg0: tensor<8xi32, #a>) -> ()
|
||||
|
||||
// -----
|
||||
|
||||
#a = #sparse_tensor.encoding<{dimLevelType = ["strange"]}> // expected-error {{unexpected dimension level type: strange}}
|
||||
func private @tensor_value_mismatch(%arg0: tensor<8xi32, #a>) -> ()
|
||||
|
||||
// -----
|
||||
|
||||
#a = #sparse_tensor.encoding<{dimOrdering = "wrong"}> // expected-error {{expected an affine map for dimension ordering}}
|
||||
func private @tensor_order_mismatch(%arg0: tensor<8xi32, #a>) -> ()
|
||||
|
||||
// -----
|
||||
|
||||
#a = #sparse_tensor.encoding<{dimOrdering = affine_map<(i,j) -> (i,i)>}> // expected-error {{expected a permutation affine map for dimension ordering}}
|
||||
func private @tensor_no_permutation(%arg0: tensor<16x32xf32, #a>) -> ()
|
||||
|
||||
// -----
|
||||
|
||||
#a = #sparse_tensor.encoding<{pointerBitWidth = "x"}> // expected-error {{expected an integral pointer bitwidth}}
|
||||
func private @tensor_no_int_ptr(%arg0: tensor<16x32xf32, #a>) -> ()
|
||||
|
||||
// -----
|
||||
|
||||
#a = #sparse_tensor.encoding<{pointerBitWidth = 42}> // expected-error {{unexpected pointer bitwidth: 42}}
|
||||
func private @tensor_invalid_int_ptr(%arg0: tensor<16x32xf32, #a>) -> ()
|
||||
|
||||
// -----
|
||||
|
||||
#a = #sparse_tensor.encoding<{indexBitWidth = "not really"}> // expected-error {{expected an integral index bitwidth}}
|
||||
func private @tensor_no_int_index(%arg0: tensor<16x32xf32, #a>) -> ()
|
||||
|
||||
// -----
|
||||
|
||||
#a = #sparse_tensor.encoding<{indexBitWidth = 128}> // expected-error {{unexpected index bitwidth: 128}}
|
||||
func private @tensor_invalid_int_index(%arg0: tensor<16x32xf32, #a>) -> ()
|
||||
|
||||
// -----
|
||||
|
||||
#a = #sparse_tensor.encoding<{key = 1}> // expected-error {{unexpected key: key}}
|
||||
func private @tensor_invalid_key(%arg0: tensor<16x32xf32, #a>) -> ()
|
|
@ -0,0 +1,16 @@
|
|||
// RUN: mlir-opt <%s | mlir-opt | FileCheck %s
|
||||
|
||||
// CHECK-LABEL: func private @sparse_1d_tensor(
|
||||
// CHECK-SAME: tensor<32xf64, #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ], pointerBitWidth = 0, indexBitWidth = 0 }>>)
|
||||
func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>>)
|
||||
|
||||
#CSR = #sparse_tensor.encoding<{
|
||||
dimLevelType = [ "dense", "compressed" ],
|
||||
dimOrdering = affine_map<(i,j) -> (i,j)>,
|
||||
pointerBitWidth = 64,
|
||||
indexBitWidth = 64
|
||||
}>
|
||||
|
||||
// CHECK-LABEL: func private @sparse_2d_tensor(
|
||||
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d0, d1)>, pointerBitWidth = 64, indexBitWidth = 64 }>>)
|
||||
func private @sparse_2d_tensor(tensor<?x?xf32, #CSR>)
|
|
@ -1,46 +0,0 @@
|
|||
// RUN: mlir-opt <%s -split-input-file -verify-diagnostics
|
||||
|
||||
// -----
|
||||
|
||||
#a = #tensor.sparse<{sparseDimLevelType = [1,2]}>
|
||||
func private @tensor_size_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{expected an array of size 1 for dimension level types}}
|
||||
|
||||
// -----
|
||||
|
||||
#a = #tensor.sparse<{sparseDimLevelType = [1]}>
|
||||
func private @tensor_type_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{expected string value in dimension level types}}
|
||||
|
||||
// -----
|
||||
|
||||
#a = #tensor.sparse<{sparseDimLevelType = ["strange"]}>
|
||||
func private @tensor_value_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{unexpected dimension level type: "strange"}}
|
||||
|
||||
// -----
|
||||
|
||||
#a = #tensor.sparse<{sparseDimOrdering = "wrong"}>
|
||||
func private @tensor_order_mismatch(%arg0: tensor<8xi32, #a>) -> () // expected-error {{expected an affine map for dimension ordering}}
|
||||
|
||||
// -----
|
||||
|
||||
#a = #tensor.sparse<{sparseDimOrdering = affine_map<(i,j) -> (i,i)>}>
|
||||
func private @tensor_no_permutation(%arg0: tensor<16x32xf32, #a>) -> () // expected-error {{expected a permutation affine map of size 2 for dimension ordering}}
|
||||
|
||||
// -----
|
||||
|
||||
#a = #tensor.sparse<{sparsePointerBitWidth = 42}>
|
||||
func private @tensor_invalid_int_ptr(%arg0: tensor<16x32xf32, #a>) -> () // expected-error {{unexpected bitwidth: 42}}
|
||||
|
||||
// -----
|
||||
|
||||
#a = #tensor.sparse<{sparseIndexBitWidth = "not really"}>
|
||||
func private @tensor_no_int_index(%arg0: tensor<16x32xf32, #a>) -> () // expected-error {{expected an integral bitwidth}}
|
||||
|
||||
// -----
|
||||
|
||||
#a = #tensor.sparse<{sparseIndexBitWidth = 128}>
|
||||
func private @tensor_invalid_int_index(%arg0: tensor<16x32xf32, #a>) -> () // expected-error {{unexpected bitwidth: 128}}
|
||||
|
||||
// -----
|
||||
|
||||
#a = #tensor.sparse<{key = 1}>
|
||||
func private @tensor_invalid_key(%arg0: tensor<16x32xf32, #a>) -> () // expected-error {{unexpected key: key}}
|
|
@ -1,14 +0,0 @@
|
|||
// RUN: mlir-opt <%s | mlir-opt | FileCheck %s
|
||||
|
||||
// CHECK: func private @sparse_1d_tensor(tensor<32xf64, #tensor.sparse<{sparseDimLevelType = ["compressed"]}>>)
|
||||
func private @sparse_1d_tensor(tensor<32xf64, #tensor.sparse<{sparseDimLevelType = ["compressed"]}>>)
|
||||
|
||||
#CSR = #tensor.sparse<{
|
||||
sparseDimLevelType = [ "dense", "compressed" ],
|
||||
sparseDimOrdering = affine_map<(i,j) -> (i,j)>,
|
||||
sparseIndexBitWidth = 64,
|
||||
sparsePointerBitWidth = 64
|
||||
}>
|
||||
|
||||
// CHECK: func private @sparse_2d_tensor(tensor<?x?xf32, #tensor.sparse<{sparseDimLevelType = ["dense", "compressed"], sparseDimOrdering = affine_map<(d0, d1) -> (d0, d1)>, sparseIndexBitWidth = 64 : i64, sparsePointerBitWidth = 64 : i64}>>)
|
||||
func private @sparse_2d_tensor(tensor<?x?xf32, #CSR>)
|
Loading…
Reference in New Issue