[mlir][sparse] add init sparse tensor operation

This is the first step towards supporting general sparse tensors as output
of operations. The init sparse tensor is used to materialize an empty sparse
tensor of given shape and sparsity into a subsequent computation (similar to
the dense tensor init operation counterpart).

Example:
  %c = sparse_tensor.init %d1, %d2 : tensor<?x?xf32, #SparseMatrix>
  %0 = linalg.matmul
    ins(%a, %b: tensor<?x?xf32>, tensor<?x?xf32>)
    outs(%c: tensor<?x?xf32, #SparseMatrix>) -> tensor<?x?xf32, #SparseMatrix>

Reviewed By: bixia

Differential Revision: https://reviews.llvm.org/D111684
This commit is contained in:
Aart Bik 2021-10-12 15:08:27 -07:00
parent 58917054c2
commit 35517a251d
4 changed files with 103 additions and 14 deletions

View File

@ -31,16 +31,16 @@ class SparseTensor_Op<string mnemonic, list<OpTrait> traits = []>
def SparseTensor_NewOp : SparseTensor_Op<"new", []>,
Arguments<(ins AnyType:$source)>,
Results<(outs TensorOf<[AnyType]>:$result)> {
string summary = "Constructs a new sparse tensor";
string summary = "Materializes a new sparse tensor from given source";
string description = [{
Constructs a sparse tensor value with contents taken from an opaque
pointer provided by `source`. For targets that have access to a file
system, for example, this pointer may be a filename (or file) of a sparse
Materializes a sparse tensor with contents taken from an opaque pointer
provided by `source`. For targets that have access to a file system,
for example, this pointer may be a filename (or file) of a sparse
tensor in a particular external storage format. The form of the operation
is kept deliberately very general to allow for alternative implementations
in the future, such as pointers to buffers or runnable initialization
code. The operation is provided as an anchor that materializes a fully
typed sparse tensor values into a computation.
code. The operation is provided as an anchor that materializes a properly
typed sparse tensor with inital contents into a computation.
Example:
@ -51,6 +51,28 @@ def SparseTensor_NewOp : SparseTensor_Op<"new", []>,
let assemblyFormat = "$source attr-dict `:` type($source) `to` type($result)";
}
def SparseTensor_InitOp : SparseTensor_Op<"init", []>,
Arguments<(ins Variadic<Index>:$sizes)>,
Results<(outs AnyTensor:$result)> {
string summary = "Materializes an empty sparse tensor";
string description = [{
Materializes an empty sparse tensor with given shape (either static or dynamic).
The operation is provided as an anchor that materializes a properly typed sparse
tensor into the output clause of a subsequent operation that yields a sparse tensor
as the result.
Example:
```mlir
%c = sparse_tensor.init_tensor [%d1, %d2] : tensor<?x?xf32, #SparseMatrix>
%0 = linalg.matmul
ins(%a, %b: tensor<?x?xf32>, tensor<?x?xf32>)
outs(%c: tensor<?x?xf32, #SparseMatrix>) -> tensor<?x?xf32, #SparseMatrix>
```
}];
let assemblyFormat = "`[` $sizes `]` attr-dict `:` type($result)";
}
def SparseTensor_ConvertOp : SparseTensor_Op<"convert",
[NoSideEffect, SameOperandsAndResultType]>,
Arguments<(ins AnyTensor:$source)>,
@ -89,8 +111,8 @@ def SparseTensor_ReleaseOp : SparseTensor_Op<"release", []>,
Arguments<(ins AnyTensor:$tensor)> {
string description = [{
Releases the underlying sparse storage scheme for a tensor that
materialized earlier through a `new` operator or a non-trivial
`convert` operator with an annotated tensor type as destination.
materialized earlier through a `new` operator, `init` operator, or a
non-trivial `convert` operator with an annotated tensor type as destination.
This operation should only be called once for any materialized tensor.
Also, after this operation, any subsequent `memref` querying operation
on the tensor returns undefined results.
@ -177,9 +199,9 @@ def SparseTensor_ToValuesOp : SparseTensor_Op<"values", [NoSideEffect]>,
def SparseTensor_ToTensorOp : SparseTensor_Op<"tensor", [NoSideEffect]>,
Arguments<(ins Variadic<AnyStridedMemRefOfRank<1>>:$memrefs)>,
Results<(outs AnyTensor:$result)> {
let summary = "Reconstructs tensor from arrays(s)";
let summary = "Rematerializes tensor from arrays(s)";
let description = [{
Reconstructs the sparse tensor from the sparse storage scheme array(s).
Rematerializes the sparse tensor from the sparse storage scheme array(s).
This is similar to the `memref.load` operation in the sense that it
provides a bridge between a bufferized world view and a tensor world
view. Unlike the `memref.load` operation, however, this sparse operation

View File

@ -213,6 +213,27 @@ static LogicalResult verify(NewOp op) {
return success();
}
static LogicalResult verify(InitOp op) {
if (!getSparseTensorEncoding(op.result().getType()))
return op.emitError("expected a sparse tensor result");
RankedTensorType ttp = op.getType().cast<RankedTensorType>();
unsigned rank = ttp.getRank();
if (rank != op.sizes().size())
return op.emitError("unexpected mismatch between tensor rank and sizes: ")
<< rank << " vs. " << op.sizes().size();
auto shape = ttp.getShape();
for (unsigned i = 0; i < rank; i++) {
if (shape[i] == ShapedType::kDynamicSize)
continue;
auto constantOp = op.sizes()[i].getDefiningOp<ConstantOp>();
if (!constantOp ||
constantOp.getValue().cast<IntegerAttr>().getInt() != shape[i])
return op.emitError("unexpected mismatch with static dimension size ")
<< shape[i];
}
return success();
}
static LogicalResult verify(ConvertOp op) {
if (auto tp1 = op.source().getType().dyn_cast<RankedTensorType>()) {
if (auto tp2 = op.dest().getType().dyn_cast<RankedTensorType>()) {
@ -221,8 +242,8 @@ static LogicalResult verify(ConvertOp op) {
auto shape2 = tp2.getShape();
for (unsigned d = 0, rank = tp1.getRank(); d < rank; d++) {
if (shape1[d] != shape2[d])
return op.emitError()
<< "unexpected conversion mismatch in dimension " << d;
return op.emitError("unexpected conversion mismatch in dimension ")
<< d;
}
return success();
}
@ -276,7 +297,7 @@ static LogicalResult verify(ToValuesOp op) {
static LogicalResult verify(ToTensorOp op) {
if (!getSparseTensorEncoding(op.result().getType()))
return op.emitError("expected a sparse tensor as result");
return op.emitError("expected a sparse tensor result");
return success();
}

View File

@ -16,6 +16,36 @@ func @invalid_release_dense(%arg0: tensor<4xi32>) {
// -----
func @invalid_init_dense(%arg0: index, %arg1: index) -> tensor<?x?xf32> {
// expected-error@+1 {{expected a sparse tensor result}}
%0 = sparse_tensor.init [%arg0, %arg1] : tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
// -----
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
func @invalid_init_rank(%arg0: index) -> tensor<?xf32, #SparseVector> {
// expected-error@+1 {{unexpected mismatch between tensor rank and sizes: 1 vs. 2}}
%0 = sparse_tensor.init [%arg0, %arg0] : tensor<?xf32, #SparseVector>
return %0 : tensor<?xf32, #SparseVector>
}
// -----
#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}>
func @invalid_init_size() -> tensor<?x10xf32, #SparseMatrix> {
%c10 = constant 10 : index
%c20 = constant 20 : index
// expected-error@+1 {{unexpected mismatch with static dimension size 10}}
%0 = sparse_tensor.init [%c10, %c20] : tensor<?x10xf32, #SparseMatrix>
return %0 : tensor<?x10xf32, #SparseMatrix>
}
// -----
func @invalid_pointers_dense(%arg0: tensor<128xf64>) -> memref<?xindex> {
%c = arith.constant 0 : index
// expected-error@+1 {{expected a sparse tensor to get pointers}}
@ -115,7 +145,7 @@ func @mismatch_values_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<?xf32
// -----
func @sparse_to_unannotated_tensor(%arg0: memref<?xf64>) -> tensor<16x32xf64> {
// expected-error@+1 {{expected a sparse tensor as result}}
// expected-error@+1 {{expected a sparse tensor result}}
%0 = sparse_tensor.tensor %arg0 : memref<?xf64> to tensor<16x32xf64>
return %0 : tensor<16x32xf64>
}

View File

@ -13,6 +13,22 @@ func @sparse_new(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
// -----
#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}>
// CHECK-LABEL: func @sparse_init()
// CHECK-DAG: %[[C16:.*]] = constant 16 : index
// CHECK-DAG: %[[C32:.*]] = constant 32 : index
// CHECK: %[[T:.*]] = sparse_tensor.init[%[[C16]], %[[C32]]] : tensor<?x32xf64, #{{.*}}>
// CHECK: return %[[T]] : tensor<?x32xf64, #{{.*}}>
func @sparse_init() -> tensor<?x32xf64, #SparseMatrix> {
%d1 = constant 16 : index
%d2 = constant 32 : index
%0 = sparse_tensor.init [%d1, %d2] : tensor<?x32xf64, #SparseMatrix>
return %0 : tensor<?x32xf64, #SparseMatrix>
}
// -----
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
// CHECK-LABEL: func @sparse_release(