From f981121a66a40dfa8c60b6cb8f594f4fb97408f2 Mon Sep 17 00:00:00 2001 From: Aart Bik Date: Wed, 31 Aug 2022 20:31:24 -0700 Subject: [PATCH] [mlir][sparse] refined doc of sparse tensor ops Reviewed By: bixia Differential Revision: https://reviews.llvm.org/D133086 --- .../Dialect/SparseTensor/IR/SparseTensorOps.td | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td index 25bc16fec96c..3e1564f201cc 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -100,8 +100,8 @@ def SparseTensor_ToPointersOp : SparseTensor_Op<"pointers", [NoSideEffect]>, `bufferization.to_memref` operation in the sense that it provides a bridge between a tensor world view and a bufferized world view. Unlike the `bufferization.to_memref` operation, however, this sparse operation actually - lowers into a call into a support library to obtain access to the - pointers array. + lowers into code that extracts the pointers array from the sparse storage + scheme (either by calling a support library or through direct code). Example: @@ -125,8 +125,8 @@ def SparseTensor_ToIndicesOp : SparseTensor_Op<"indices", [NoSideEffect]>, `bufferization.to_memref` operation in the sense that it provides a bridge between a tensor world view and a bufferized world view. Unlike the `bufferization.to_memref` operation, however, this sparse operation actually - lowers into a call into a support library to obtain access to the - indices array. + lowers into code that extracts the indices array from the sparse storage + scheme (either by calling a support library or through direct code). Example: @@ -150,8 +150,8 @@ def SparseTensor_ToValuesOp : SparseTensor_Op<"values", [NoSideEffect]>, the `bufferization.to_memref` operation in the sense that it provides a bridge between a tensor world view and a bufferized world view. Unlike the `bufferization.to_memref` operation, however, this sparse operation actually - lowers into a call into a support library to obtain access to the - values array. + lowers into code that extracts the values array from the sparse storage + scheme (either by calling a support library or through direct code). Example: @@ -195,8 +195,9 @@ def SparseTensor_ConcatenateOp : SparseTensor_Op<"concatenate", []>, // Sparse Tensor Management Operations. These operations are "impure" in the // sense that they do not properly operate on SSA values. Instead, the behavior // is solely defined by side-effects. These operations provide a bridge between -// the code generator and the support library. The semantics of these operations -// may be refined over time as our sparse abstractions evolve. +// "sparsification" on one hand and a support library or actual code generation +// on the other hand. The semantics of these operations may be refined over time +// as our sparse abstractions evolve. //===----------------------------------------------------------------------===// def SparseTensor_LexInsertOp : SparseTensor_Op<"lex_insert", []>, @@ -675,5 +676,4 @@ def SparseTensor_StorageSetOp : SparseTensor_Op<"storage_set", []>, let hasVerifier = 1; } - #endif // SPARSETENSOR_OPS