From a6d92a971175d727873a9e7644913ee02d7232a8 Mon Sep 17 00:00:00 2001 From: Aart Bik Date: Wed, 28 Apr 2021 18:15:11 -0700 Subject: [PATCH] [mlir][sparse] migrate sparse operations into new sparse tensor dialect This is the very first step toward removing the glue and clutter from linalg and replace it with proper sparse tensor types. This revision migrates the LinalgSparseOps into SparseTensorOps of a sparse tensor dialect. This also provides a new home for sparse tensor related transformation. NOTE: the actual replacement with sparse tensor types (and removal of linalg glue/clutter) will follow but I am trying to keep the amount of changes per revision manageable. Reviewed By: bixia Differential Revision: https://reviews.llvm.org/D101488 --- mlir/include/mlir/Dialect/CMakeLists.txt | 1 + .../mlir/Dialect/Linalg/IR/CMakeLists.txt | 6 - .../mlir/Dialect/Linalg/IR/LinalgOps.h | 3 - .../Dialect/Linalg/Transforms/Transforms.h | 3 - .../mlir/Dialect/SparseTensor/CMakeLists.txt | 1 + .../Dialect/SparseTensor/IR/CMakeLists.txt | 2 + .../Dialect/SparseTensor/IR/SparseTensor.h | 23 +++ .../SparseTensor/IR/SparseTensorBase.td | 29 +++ .../IR/SparseTensorOps.td} | 57 ++---- .../SparseTensor/Transforms/Transforms.h | 23 +++ mlir/include/mlir/InitAllDialects.h | 2 + mlir/lib/Dialect/CMakeLists.txt | 1 + mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp | 3 - mlir/lib/Dialect/Linalg/IR/LinalgTypes.cpp | 4 - .../Dialect/Linalg/Transforms/CMakeLists.txt | 1 - .../Linalg/Transforms/Sparsification.cpp | 16 +- mlir/lib/Dialect/SparseTensor/CMakeLists.txt | 2 + .../Dialect/SparseTensor/IR/CMakeLists.txt | 13 ++ .../SparseTensor/IR/SparseTensorDialect.cpp | 25 +++ .../SparseTensor/Transforms/CMakeLists.txt | 14 ++ .../Transforms/SparseTensorLowering.cpp} | 39 ++-- mlir/test/Dialect/Linalg/sparse_1d.mlir | 148 +++++++-------- mlir/test/Dialect/Linalg/sparse_2d.mlir | 172 +++++++++--------- mlir/test/Dialect/Linalg/sparse_3d.mlir | 138 +++++++------- mlir/test/Dialect/Linalg/sparse_lower.mlir | 10 +- mlir/test/Dialect/Linalg/sparse_nd.mlir | 10 +- mlir/test/Dialect/Linalg/sparse_vector.mlir | 4 +- .../lowering.mlir} | 24 +-- .../roundtrip.mlir} | 16 +- .../SparseTensor}/CPU/frostt-example.mlir | 0 .../SparseTensor}/CPU/lit.local.cfg | 0 .../CPU/matrix-market-example.mlir | 0 .../SparseTensor}/CPU/sparse_matvec.mlir | 2 +- .../CPU/sparse_sampled_matmul.mlir | 2 +- .../SparseTensor}/CPU/sparse_sum.mlir | 2 +- mlir/test/lib/Transforms/CMakeLists.txt | 2 + .../lib/Transforms/TestSparsification.cpp | 15 +- mlir/test/mlir-opt/commandline.mlir | 1 + 38 files changed, 458 insertions(+), 356 deletions(-) create mode 100644 mlir/include/mlir/Dialect/SparseTensor/CMakeLists.txt create mode 100644 mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt create mode 100644 mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h create mode 100644 mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td rename mlir/include/mlir/Dialect/{Linalg/IR/LinalgSparseOps.td => SparseTensor/IR/SparseTensorOps.td} (62%) create mode 100644 mlir/include/mlir/Dialect/SparseTensor/Transforms/Transforms.h create mode 100644 mlir/lib/Dialect/SparseTensor/CMakeLists.txt create mode 100644 mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt create mode 100644 mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp create mode 100644 mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt rename mlir/lib/Dialect/{Linalg/Transforms/SparseLowering.cpp => SparseTensor/Transforms/SparseTensorLowering.cpp} (77%) rename mlir/test/Dialect/{Linalg/sparse_lower_calls.mlir => SparseTensor/lowering.mlir} (74%) rename mlir/test/Dialect/{Linalg/sparse_roundtrip.mlir => SparseTensor/roundtrip.mlir} (70%) rename mlir/test/Integration/{Sparse => Dialect/SparseTensor}/CPU/frostt-example.mlir (100%) rename mlir/test/Integration/{Sparse => Dialect/SparseTensor}/CPU/lit.local.cfg (100%) rename mlir/test/Integration/{Sparse => Dialect/SparseTensor}/CPU/matrix-market-example.mlir (100%) rename mlir/test/Integration/{Sparse => Dialect/SparseTensor}/CPU/sparse_matvec.mlir (98%) rename mlir/test/Integration/{Sparse => Dialect/SparseTensor}/CPU/sparse_sampled_matmul.mlir (98%) rename mlir/test/Integration/{Sparse => Dialect/SparseTensor}/CPU/sparse_sum.mlir (97%) diff --git a/mlir/include/mlir/Dialect/CMakeLists.txt b/mlir/include/mlir/Dialect/CMakeLists.txt index 9b9c80baaa3c..2d6d04a52a9d 100644 --- a/mlir/include/mlir/Dialect/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/CMakeLists.txt @@ -17,6 +17,7 @@ add_subdirectory(PDLInterp) add_subdirectory(Quant) add_subdirectory(SCF) add_subdirectory(Shape) +add_subdirectory(SparseTensor) add_subdirectory(SPIRV) add_subdirectory(StandardOps) add_subdirectory(Tensor) diff --git a/mlir/include/mlir/Dialect/Linalg/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/Linalg/IR/CMakeLists.txt index eb4d6fd7c8eb..6056c5e5259e 100644 --- a/mlir/include/mlir/Dialect/Linalg/IR/CMakeLists.txt +++ b/mlir/include/mlir/Dialect/Linalg/IR/CMakeLists.txt @@ -80,12 +80,6 @@ add_public_tablegen_target(MLIRLinalgStructuredOpsIncGen) add_dependencies(MLIRLinalgStructuredOpsIncGen LinalgOdsGen) add_dependencies(mlir-headers MLIRLinalgStructuredOpsIncGen) -set(LLVM_TARGET_DEFINITIONS LinalgSparseOps.td) -mlir_tablegen(LinalgSparseOps.h.inc -gen-op-decls) -mlir_tablegen(LinalgSparseOps.cpp.inc -gen-op-defs) -add_public_tablegen_target(MLIRLinalgSparseOpsIncGen) -add_dependencies(mlir-headers MLIRLinalgSparseOpsIncGen) - set(LLVM_TARGET_DEFINITIONS LinalgInterfaces.td) mlir_tablegen(LinalgInterfaces.h.inc -gen-op-interface-decls) mlir_tablegen(LinalgInterfaces.cpp.inc -gen-op-interface-defs) diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h index ee04dc2fee22..2d4e0bf0b2b5 100644 --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgOps.h @@ -127,7 +127,4 @@ class IndexedGenericOp; #define GET_OP_CLASSES #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.h.inc" -#define GET_OP_CLASSES -#include "mlir/Dialect/Linalg/IR/LinalgSparseOps.h.inc" - #endif // MLIR_DIALECT_LINALG_LINALGOPS_H_ diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h index 52f7425cbfef..d76ccd91fdbf 100644 --- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h @@ -1129,9 +1129,6 @@ void populateSparsificationPatterns( RewritePatternSet &patterns, const SparsificationOptions &options = SparsificationOptions()); -/// Sets up sparsification conversion rules with the given options. -void populateSparsificationConversionPatterns(RewritePatternSet &patterns); - } // namespace linalg } // namespace mlir diff --git a/mlir/include/mlir/Dialect/SparseTensor/CMakeLists.txt b/mlir/include/mlir/Dialect/SparseTensor/CMakeLists.txt new file mode 100644 index 000000000000..f33061b2d87c --- /dev/null +++ b/mlir/include/mlir/Dialect/SparseTensor/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(IR) diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt new file mode 100644 index 000000000000..bb13cb9edb70 --- /dev/null +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/CMakeLists.txt @@ -0,0 +1,2 @@ +add_mlir_dialect(SparseTensorOps sparse_tensor) +add_mlir_doc(SparseTensorOps SparseTensorOps Dialects/ -gen-dialect-doc) diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h new file mode 100644 index 000000000000..c5a466e9d7db --- /dev/null +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensor.h @@ -0,0 +1,23 @@ +//===- SparseTensor.h - Sparse tensor dialect -------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSOR_H_ +#define MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSOR_H_ + +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/Dialect.h" +#include "mlir/IR/OpDefinition.h" +#include "mlir/IR/OpImplementation.h" +#include "mlir/Interfaces/SideEffectInterfaces.h" + +#define GET_OP_CLASSES +#include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.h.inc" + +#include "mlir/Dialect/SparseTensor/IR/SparseTensorOpsDialect.h.inc" + +#endif // MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSOR_H_ diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td new file mode 100644 index 000000000000..cdf4e75b7cb3 --- /dev/null +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorBase.td @@ -0,0 +1,29 @@ +//===- SparseTensorBase.td - Sparse tensor dialect base ----*- tablegen -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SPARSETENSOR_BASE +#define SPARSETENSOR_BASE + +include "mlir/IR/OpBase.td" + +def SparseTensor_Dialect : Dialect { + let name = "sparse_tensor"; + let cppNamespace = "::mlir::sparse_tensor"; + let description = [{ + The `sparse tensor` dialect is intended to hold primitives that + form a bridge between high-level operations on sparse tensors + and lower-level operations on the actual sparse storage schemes + consisting of pointers, indices, and values. This bridge + simplifies a `sparse compiler` pass by postponing actual + code generation for the supported primitives to a later phase, + either by generating calls into a runtime support library + or by further lowering the primitives into actual code. + }]; +} + +#endif // SPARSETENSOR_BASE diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgSparseOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td similarity index 62% rename from mlir/include/mlir/Dialect/Linalg/IR/LinalgSparseOps.td rename to mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td index f0653c462aae..5a2b1b129df1 100644 --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgSparseOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -1,49 +1,27 @@ -//===- LinalgSparseOps.td - Linalg dialect sparse ops ------*- tablegen -*-===// +//===- SparseTensorOps.td - Sparse tensor dialect ops ------*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// -// -// The following operations bootstrap working with sparse tensors solely -// within the Linalg dialect. They provide temporary bridges between a -// future SparseTensorType (now an opaque pointer), the actual TensorType, -// and MemRef arrays underlying an actual sparse storage scheme in memory. -// -// Lacking a proper sparse tensor type, the 'sparse_tensor' operation -// provides a bridge between an opaque pointer and a regular tensor type -// just to simplify feeding the value into a Linalg op. The operation -// simply disappears during lowering. -// -// The other operations form the bridge between the opaque pointer and -// the actual storage of pointers, indices, and values. These operations -// resemble 'buffer_cast' in the sense that they map tensors to -// their bufferized memrefs, but they lower into actual calls since -// sparse storage does not bufferize into a single memrefs, as dense -// tensors do, but into a hierarchical storage scheme where pointers -// access memrefs with indices and eventually into values. -// -// TODO: introduce SparseTensorType as first class citizen in MLIR -// -//===----------------------------------------------------------------------===// -#ifndef LINALG_SPARSE_OPS -#define LINALG_SPARSE_OPS +#ifndef SPARSETENSOR_OPS +#define SPARSETENSOR_OPS -include "mlir/Dialect/Linalg/IR/LinalgBase.td" +include "mlir/Dialect/SparseTensor/IR/SparseTensorBase.td" include "mlir/Interfaces/SideEffectInterfaces.td" // Base class. -class Linalg_SparseOp traits = []> - : Op { +class SparseTensor_Op traits = []> + : Op { let printer = [{ return ::print(p, *this); }]; let verifier = ?; let parser = [{ return ::parse$cppClass(parser, result); }]; } -def Linalg_SparseTensorFromPointerOp : - Linalg_SparseOp<"sparse_tensor">, +// TODO: remove me +def SparseTensor_FromPointerOp : SparseTensor_Op<"fromPtr">, Arguments<(ins AnyType:$ptr)>, Results<(outs AnyTensor:$result)> { let summary = "Views an opaque sparse tensor pointer as a tensor"; @@ -60,14 +38,13 @@ def Linalg_SparseTensorFromPointerOp : ```mlir !SparseTensor = type !llvm.ptr - %0 = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<64x64xf64> + %0 = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<64x64xf64> ``` }]; let assemblyFormat = "$ptr attr-dict `:` type($ptr) `to` type($result)"; } -def Linalg_SparseTensorToPointersMemRefOp : - Linalg_SparseOp<"sparse_pointers", [NoSideEffect]>, +def SparseTensor_ToPointersOp : SparseTensor_Op<"pointers", [NoSideEffect]>, Arguments<(ins AnyTensor:$tensor, Index:$dim)>, Results<(outs AnyStridedMemRefOfRank<1>:$result)> { let summary = "Extract pointers array at given dimension from a tensor"; @@ -83,15 +60,14 @@ def Linalg_SparseTensorToPointersMemRefOp : Example: ```mlir - %1 = linalg.sparse_pointers %0, %c1 : tensor<64x64xf64> to memref + %1 = sparse_tensor.pointers %0, %c1 : tensor<64x64xf64> to memref ``` }]; let assemblyFormat = "$tensor `,` $dim attr-dict `:` type($tensor)" " `to` type($result)"; } -def Linalg_SparseTensorToIndicesMemRefOp : - Linalg_SparseOp<"sparse_indices", [NoSideEffect]>, +def SparseTensor_ToIndicesOp : SparseTensor_Op<"indices", [NoSideEffect]>, Arguments<(ins AnyTensor:$tensor, Index:$dim)>, Results<(outs AnyStridedMemRefOfRank<1>:$result)> { let summary = "Extract indices array at given dimension from a tensor"; @@ -107,15 +83,14 @@ def Linalg_SparseTensorToIndicesMemRefOp : Example: ```mlir - %1 = linalg.sparse_indices %0, %c1 : tensor<64x64xf64> to memref + %1 = sparse_tensor.indices %0, %c1 : tensor<64x64xf64> to memref ``` }]; let assemblyFormat = "$tensor `,` $dim attr-dict `:` type($tensor)" " `to` type($result)"; } -def Linalg_SparseTensorToValuesMemRefOp : - Linalg_SparseOp<"sparse_values", [NoSideEffect]>, +def SparseTensor_ToValuesOp : SparseTensor_Op<"values", [NoSideEffect]>, Arguments<(ins AnyTensor:$tensor)>, Results<(outs AnyStridedMemRefOfRank<1>:$result)> { let summary = "Extract numerical values array from a tensor"; @@ -131,10 +106,10 @@ def Linalg_SparseTensorToValuesMemRefOp : Example: ```mlir - %1 = linalg.sparse_values %0 : tensor<64x64xf64> to memref + %1 = sparse_tensor.values %0 : tensor<64x64xf64> to memref ``` }]; let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($result)"; } -#endif // LINALG_SPARSE_OPS +#endif // SPARSETENSOR_OPS diff --git a/mlir/include/mlir/Dialect/SparseTensor/Transforms/Transforms.h b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Transforms.h new file mode 100644 index 000000000000..0ae4b0bf0817 --- /dev/null +++ b/mlir/include/mlir/Dialect/SparseTensor/Transforms/Transforms.h @@ -0,0 +1,23 @@ +//===- Transforms.h - Sparse tensor transformations -------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_TRANSFORMS_H_ +#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_TRANSFORMS_H_ + +#include "mlir/IR/PatternMatch.h" + +namespace mlir { +namespace sparse_tensor { + +/// Sets up sparsification conversion rules with the given options. +void populateSparsificationConversionPatterns(RewritePatternSet &patterns); + +} // namespace sparse_tensor +} // namespace mlir + +#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_TRANSFORMS_H_ diff --git a/mlir/include/mlir/InitAllDialects.h b/mlir/include/mlir/InitAllDialects.h index b718ada0875c..e44f2e8f1ae0 100644 --- a/mlir/include/mlir/InitAllDialects.h +++ b/mlir/include/mlir/InitAllDialects.h @@ -37,6 +37,7 @@ #include "mlir/Dialect/SDBM/SDBMDialect.h" #include "mlir/Dialect/SPIRV/IR/SPIRVDialect.h" #include "mlir/Dialect/Shape/IR/Shape.h" +#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Dialect/Tosa/IR/TosaOps.h" @@ -74,6 +75,7 @@ inline void registerAllDialects(DialectRegistry ®istry) { ROCDL::ROCDLDialect, SDBMDialect, shape::ShapeDialect, + sparse_tensor::SparseTensorDialect, tensor::TensorDialect, tosa::TosaDialect, x86vector::X86VectorDialect>(); diff --git a/mlir/lib/Dialect/CMakeLists.txt b/mlir/lib/Dialect/CMakeLists.txt index ce4cb0a4e55d..f5124f7d138f 100644 --- a/mlir/lib/Dialect/CMakeLists.txt +++ b/mlir/lib/Dialect/CMakeLists.txt @@ -18,6 +18,7 @@ add_subdirectory(Quant) add_subdirectory(SCF) add_subdirectory(SDBM) add_subdirectory(Shape) +add_subdirectory(SparseTensor) add_subdirectory(SPIRV) add_subdirectory(StandardOps) add_subdirectory(Tensor) diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp index 750d818bd261..41703210a154 100644 --- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp @@ -2384,9 +2384,6 @@ struct FoldTensorCastOp; #define GET_OP_CLASSES #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc" -#define GET_OP_CLASSES -#include "mlir/Dialect/Linalg/IR/LinalgSparseOps.cpp.inc" - /// Return the dims that are `iteratorTypeName` loops in the LinalgOp `op`. /// Assumes `op` is a LinalgOp. void mlir::linalg::getDimsOfType(Operation *op, StringRef iteratorTypeName, diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgTypes.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgTypes.cpp index 2288f734dcb5..0337365761a2 100644 --- a/mlir/lib/Dialect/Linalg/IR/LinalgTypes.cpp +++ b/mlir/lib/Dialect/Linalg/IR/LinalgTypes.cpp @@ -99,10 +99,6 @@ void mlir::linalg::LinalgDialect::initialize() { #define GET_OP_LIST #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc" >(); - addOperations< -#define GET_OP_LIST -#include "mlir/Dialect/Linalg/IR/LinalgSparseOps.cpp.inc" - >(); // Fill the Linalg-specific OpName to RegionBuilder map. addNamedOpBuilders< diff --git a/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt b/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt index 74e4f42bfee0..9e633cff7bf5 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt +++ b/mlir/lib/Dialect/Linalg/Transforms/CMakeLists.txt @@ -11,7 +11,6 @@ add_mlir_dialect_library(MLIRLinalgTransforms Interchange.cpp Loops.cpp Promotion.cpp - SparseLowering.cpp Sparsification.cpp Tiling.cpp Transforms.cpp diff --git a/mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp b/mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp index 99024c32104f..5570cbaf9e85 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Sparsification.cpp @@ -45,6 +45,7 @@ #include "mlir/Dialect/Linalg/Transforms/Transforms.h" #include "mlir/Dialect/Linalg/Utils/Utils.h" #include "mlir/Dialect/SCF/SCF.h" +#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" #include "mlir/Dialect/StandardOps/IR/Ops.h" #include "mlir/IR/Matchers.h" @@ -360,7 +361,7 @@ static void findSparseAnnotations(Merger &merger, linalg::GenericOp op) { /// Returns true if tensor was set up with sparse storage scheme. static bool linkedSparse(linalg::GenericOp op, unsigned tensor) { if (tensor < op.getNumInputs()) - return isa_and_nonnull( + return isa_and_nonnull( op.getInput(tensor).getDefiningOp()); return false; } @@ -576,12 +577,10 @@ static void genBuffers(Merger &merger, CodeGen &codegen, dynShape, genIntType(rewriter, codegen.options.indType)); Value dim = rewriter.create(loc, d); // Generate sparse primitives to obtains pointer and indices. - codegen.pointers[t][i] = - rewriter.create( - loc, ptrTp, tensor, dim); - codegen.indices[t][i] = - rewriter.create(loc, indTp, - tensor, dim); + codegen.pointers[t][i] = rewriter.create( + loc, ptrTp, tensor, dim); + codegen.indices[t][i] = rewriter.create( + loc, indTp, tensor, dim); } // Find lower and upper bound in current dimension. Value up; @@ -608,8 +607,7 @@ static void genBuffers(Merger &merger, CodeGen &codegen, auto dynShape = {ShapedType::kDynamicSize}; auto sparseTp = MemRefType::get(dynShape, tensorType.getElementType()); codegen.buffers[t] = - rewriter.create(loc, sparseTp, - tensor); + rewriter.create(loc, sparseTp, tensor); } } } diff --git a/mlir/lib/Dialect/SparseTensor/CMakeLists.txt b/mlir/lib/Dialect/SparseTensor/CMakeLists.txt new file mode 100644 index 000000000000..9f57627c321f --- /dev/null +++ b/mlir/lib/Dialect/SparseTensor/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(IR) +add_subdirectory(Transforms) diff --git a/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt b/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt new file mode 100644 index 000000000000..4cc81a3e3b39 --- /dev/null +++ b/mlir/lib/Dialect/SparseTensor/IR/CMakeLists.txt @@ -0,0 +1,13 @@ +add_mlir_dialect_library(MLIRSparseTensor + SparseTensorDialect.cpp + + ADDITIONAL_HEADER_DIRS + ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/SparseTensor + + DEPENDS + MLIRSparseTensorOpsIncGen + + LINK_LIBS PUBLIC + MLIRDialect + MLIRIR + ) diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp new file mode 100644 index 000000000000..90db6b2d4b5f --- /dev/null +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -0,0 +1,25 @@ +//===- SparseTensorDialect.cpp - Sparse tensor dialect implementation -----===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" + +#include "mlir/IR/Builders.h" +#include "mlir/IR/OpImplementation.h" + +using namespace mlir; +using namespace mlir::sparse_tensor; + +void SparseTensorDialect::initialize() { + addOperations< +#define GET_OP_LIST +#include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.cpp.inc" + >(); +} + +#define GET_OP_CLASSES +#include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.cpp.inc" diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt b/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt new file mode 100644 index 000000000000..63623b460704 --- /dev/null +++ b/mlir/lib/Dialect/SparseTensor/Transforms/CMakeLists.txt @@ -0,0 +1,14 @@ +add_mlir_dialect_library(MLIRSparseTensorTransforms + SparseTensorLowering.cpp + + ADDITIONAL_HEADER_DIRS + ${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/SparseTensor + + LINK_LIBS PUBLIC + MLIRIR + MLIRLLVMIR + MLIRPass + MLIRStandard + MLIRSparseTensor + MLIRTransforms +) diff --git a/mlir/lib/Dialect/Linalg/Transforms/SparseLowering.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorLowering.cpp similarity index 77% rename from mlir/lib/Dialect/Linalg/Transforms/SparseLowering.cpp rename to mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorLowering.cpp index b1efd24d48e2..d87cc61e5bd8 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/SparseLowering.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorLowering.cpp @@ -1,14 +1,25 @@ -//===- SparseLowering.cpp - Lowers sparse primitives to library calls. ---===// +//===- SparseTensorLowering.cpp - Sparse tensor primitives lowering -------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// +// +// Lower sparse tensor primitives to calls into a runtime support library. +// Note that this is a current implementation choice to keep the lowering +// simple. In principle, these primitives could also be lowered to actual +// elaborate IR code that implements the primitives on the selected sparse +// tensor storage schemes. +// +//===----------------------------------------------------------------------===// #include "mlir/Dialect/LLVMIR/LLVMTypes.h" -#include "mlir/Dialect/Linalg/IR/LinalgOps.h" -#include "mlir/Dialect/Linalg/Transforms/Transforms.h" +#include "mlir/Dialect/MemRef/IR/MemRef.h" +#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" +#include "mlir/Dialect/SparseTensor/Transforms/Transforms.h" +#include "mlir/Dialect/StandardOps/IR/Ops.h" +#include "mlir/Transforms/DialectConversion.h" using namespace mlir; @@ -32,11 +43,10 @@ static FlatSymbolRefAttr getFunc(Operation *op, StringRef name, Type result, /// Sparse conversion rule to remove opaque pointer cast. class TensorFromPointerConverter - : public OpConversionPattern { + : public OpConversionPattern { using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(linalg::SparseTensorFromPointerOp op, - ArrayRef operands, + matchAndRewrite(sparse_tensor::FromPointerOp op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { rewriter.replaceOp(op, operands[0]); return success(); @@ -62,12 +72,11 @@ public: /// Sparse conversion rule for pointer accesses. class TensorToPointersConverter - : public OpConversionPattern { + : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(linalg::SparseTensorToPointersMemRefOp op, - ArrayRef operands, + matchAndRewrite(sparse_tensor::ToPointersOp op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { Type resType = op.getType(); Type eltType = resType.cast().getElementType(); @@ -90,12 +99,11 @@ public: /// Sparse conversion rule for index accesses. class TensorToIndicesConverter - : public OpConversionPattern { + : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(linalg::SparseTensorToIndicesMemRefOp op, - ArrayRef operands, + matchAndRewrite(sparse_tensor::ToIndicesOp op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { Type resType = op.getType(); Type eltType = resType.cast().getElementType(); @@ -118,12 +126,11 @@ public: /// Sparse conversion rule for value accesses. class TensorToValuesConverter - : public OpConversionPattern { + : public OpConversionPattern { public: using OpConversionPattern::OpConversionPattern; LogicalResult - matchAndRewrite(linalg::SparseTensorToValuesMemRefOp op, - ArrayRef operands, + matchAndRewrite(sparse_tensor::ToValuesOp op, ArrayRef operands, ConversionPatternRewriter &rewriter) const override { Type resType = op.getType(); Type eltType = resType.cast().getElementType(); @@ -150,7 +157,7 @@ public: /// Populates the given patterns list with conversion rules required for /// the sparsification of linear algebra operations. -void linalg::populateSparsificationConversionPatterns( +void sparse_tensor::populateSparsificationConversionPatterns( RewritePatternSet &patterns) { patterns.add, %argb: f32, %argx: tensor<32xf32>) -> tensor< // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant true // CHECK: %[[VAL_6:.*]] = constant 1 : index -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32> // CHECK: %[[VAL_11:.*]] = memref.alloc() : memref<32xf32> // CHECK: linalg.copy(%[[VAL_10]], %[[VAL_11]]) : memref<32xf32>, memref<32xf32> @@ -148,9 +148,9 @@ func @add_s(%arga: tensor<32xf32>, %argb: f32, %argx: tensor<32xf32>) -> tensor< // CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK: %[[VAL_2:.*]] = constant 0 : index // CHECK: %[[VAL_3:.*]] = constant 1 : index -// CHECK: %[[VAL_4:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_2]] : tensor<32xf32> to memref -// CHECK: %[[VAL_5:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_2]] : tensor<32xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref +// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor<32xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_2]] : tensor<32xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref // CHECK: %[[VAL_7:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf32> // CHECK: %[[VAL_8:.*]] = memref.alloc() : memref<32xf32> // CHECK: linalg.copy(%[[VAL_7]], %[[VAL_8]]) : memref<32xf32>, memref<32xf32> @@ -189,9 +189,9 @@ func @repeated_add_s(%arga: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32x // CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref // CHECK: %[[VAL_8:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32> // CHECK: %[[VAL_9:.*]] = memref.alloc() : memref<32xf32> // CHECK: linalg.copy(%[[VAL_8]], %[[VAL_9]]) : memref<32xf32>, memref<32xf32> @@ -320,9 +320,9 @@ func @mul_dd(%arga: tensor<32xf32>, %argb: tensor<32xf32>, %argx: tensor<32xf32> // CHECK: %[[VAL_5:.*]] = constant true // CHECK: %[[VAL_6:.*]] = constant 1 : index // CHECK: %[[VAL_7:.*]] = memref.buffer_cast %[[VAL_0]] : memref<32xf32> -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<32xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<32xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32> // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32xf32> // CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<32xf32>, memref<32xf32> @@ -378,9 +378,9 @@ func @add_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32>, %argx: tensor<32xf32> // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index // CHECK: %[[VAL_5:.*]] = memref.buffer_cast %[[VAL_0]] : memref<32xf32> -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32> to memref // CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32> // CHECK: %[[VAL_10:.*]] = memref.alloc() : memref<32xf32> // CHECK: linalg.copy(%[[VAL_9]], %[[VAL_10]]) : memref<32xf32>, memref<32xf32> @@ -430,9 +430,9 @@ func @mul_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32>, %argx: tensor<32xf32> // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant true // CHECK: %[[VAL_6:.*]] = constant 1 : index -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf32> // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32> // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32xf32> @@ -488,9 +488,9 @@ func @add_sd(%arga: tensor<32xf32>, %argb: tensor<32xf32>, %argx: tensor<32xf32> // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref // CHECK: %[[VAL_8:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf32> // CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32> // CHECK: %[[VAL_10:.*]] = memref.alloc() : memref<32xf32> @@ -539,12 +539,12 @@ func @mul_sd(%arga: tensor<32xf32>, %argb: tensor<32xf32>, %argx: tensor<32xf32> // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32> // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32xf32> // CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<32xf32>, memref<32xf32> @@ -623,12 +623,12 @@ func @add_ss(%arga: tensor<32xf32>, %argb: tensor<32xf32>, %argx: tensor<32xf32> // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32> // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32xf32> // CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<32xf32>, memref<32xf32> @@ -701,12 +701,12 @@ func @mul_ss(%arga: tensor<32xf32>, %argb: tensor<32xf32>, %argx: tensor<32xf32> // CHECK-SAME: %[[VAL_3:.*3]]: tensor<16xf32>) -> tensor<16xf32> { // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32> to memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_3]] : memref<16xf32> // CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<16xf32> // CHECK: linalg.copy(%[[VAL_12]], %[[VAL_13]]) : memref<16xf32>, memref<16xf32> @@ -794,12 +794,12 @@ func @two_way_inv(%arga: tensor<16xf32>, %argb: tensor<16xf32>, %argc: f32, %arg // CHECK-SAME: %[[VAL_3:.*3]]: tensor<16xf32>) -> tensor<16xf32> { // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32> to memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_3]] : memref<16xf32> // CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<16xf32> // CHECK: linalg.copy(%[[VAL_12]], %[[VAL_13]]) : memref<16xf32>, memref<16xf32> @@ -898,8 +898,8 @@ func @two_way_inv_alt(%arga: tensor<16xf32>, // CHECK-SAME: %[[VAL_1:.*]]: tensor) -> tensor { // CHECK: %[[VAL_2:.*]] = constant 0 : index // CHECK: %[[VAL_3:.*]] = constant 1 : index -// CHECK: %[[VAL_4:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_2]] : tensor to memref -// CHECK: %[[VAL_5:.*]] = linalg.sparse_values %[[VAL_0]] : tensor to memref +// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor to memref // CHECK: %[[VAL_6:.*]] = memref.buffer_cast %[[VAL_1]] : memref // CHECK: %[[VAL_7:.*]] = memref.alloc() : memref // CHECK: linalg.copy(%[[VAL_6]], %[[VAL_7]]) : memref, memref @@ -947,12 +947,12 @@ func @sum_reduction(%arga: tensor, %argx: tensor) -> tensor { // CHECK-SAME: %[[VAL_2:.*2]]: tensor) -> tensor { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<16xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<16xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref // CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref, memref @@ -1062,13 +1062,13 @@ func @sum_reduction_ss(%arga: tensor<16xf32>, // CHECK-SAME: %[[VAL_3:.*3]]: tensor) -> tensor { // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32> to memref // CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_2]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_2]], %[[VAL_4]] : tensor<16xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_values %[[VAL_2]] : tensor<16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_2]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_2]], %[[VAL_4]] : tensor<16xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<16xf32> to memref // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_3]] : memref // CHECK: %[[VAL_14:.*]] = memref.alloc() : memref // CHECK: linalg.copy(%[[VAL_13]], %[[VAL_14]]) : memref, memref @@ -1189,13 +1189,13 @@ func @sum_reduction_inv(%arga: tensor<16xf32>, // CHECK: %[[VAL_6:.*]] = constant true // CHECK: %[[VAL_7:.*]] = constant 1 : index // CHECK: %[[VAL_8:.*]] = memref.buffer_cast %[[VAL_0]] : memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_5]] : tensor to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_5]] : tensor to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_1]] : tensor to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_5]] : tensor to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_5]] : tensor to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_pointers %[[VAL_3]], %[[VAL_5]] : tensor to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_indices %[[VAL_3]], %[[VAL_5]] : tensor to memref -// CHECK: %[[VAL_15:.*]] = linalg.sparse_values %[[VAL_3]] : tensor to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.pointers %[[VAL_3]], %[[VAL_5]] : tensor to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.indices %[[VAL_3]], %[[VAL_5]] : tensor to memref +// CHECK: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_3]] : tensor to memref // CHECK: %[[VAL_16:.*]] = memref.dim %[[VAL_4]], %[[VAL_5]] : tensor // CHECK: %[[VAL_17:.*]] = memref.buffer_cast %[[VAL_4]] : memref // CHECK: %[[VAL_18:.*]] = memref.alloc(%[[VAL_16]]) : memref @@ -1371,15 +1371,15 @@ func @four_tensors_op(%arga: tensor, // CHECK-SAME: %[[VAL_3:.*3]]: tensor) -> tensor { // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_1]] : tensor to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_2]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_2]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_2]] : tensor to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_2]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_2]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_2]] : tensor to memref // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_3]] : memref // CHECK: %[[VAL_16:.*]] = memref.alloc() : memref // CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref, memref diff --git a/mlir/test/Dialect/Linalg/sparse_2d.mlir b/mlir/test/Dialect/Linalg/sparse_2d.mlir index b9e14e3afb8e..4303c9dfb41d 100644 --- a/mlir/test/Dialect/Linalg/sparse_2d.mlir +++ b/mlir/test/Dialect/Linalg/sparse_2d.mlir @@ -110,9 +110,9 @@ func @mul_dd(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tensor<3 // CHECK: %[[VAL_5:.*]] = constant 0 : index // CHECK: %[[VAL_6:.*]] = constant true // CHECK: %[[VAL_7:.*]] = constant 1 : index -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32> // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<32x16xf32> @@ -172,9 +172,9 @@ func @add_ds(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tensor<3 // CHECK: %[[VAL_3:.*]] = constant 32 : index // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32> // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_11:.*]] = memref.alloc() : memref<32x16xf32> @@ -229,9 +229,9 @@ func @mul_ds(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tensor<3 // CHECK: %[[VAL_5:.*]] = constant true // CHECK: %[[VAL_6:.*]] = constant 0 : index // CHECK: %[[VAL_7:.*]] = constant 1 : index -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32> // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<32x16xf32> @@ -296,9 +296,9 @@ func @add_sd(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tensor<3 // CHECK: %[[VAL_3:.*]] = constant 16 : index // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32> // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_11:.*]] = memref.alloc() : memref<32x16xf32> @@ -354,11 +354,11 @@ func @mul_sd(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tensor<3 // CHECK: %[[VAL_5:.*]] = constant true // CHECK: %[[VAL_6:.*]] = constant 0 : index // CHECK: %[[VAL_7:.*]] = constant 1 : index -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32> // CHECK: %[[VAL_14:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_15:.*]] = memref.alloc() : memref<32x16xf32> @@ -446,11 +446,11 @@ func @add_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tensor<3 // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32> // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32x16xf32> @@ -505,16 +505,16 @@ func @mul_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tensor<3 // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_16:.*]] = memref.alloc() : memref<32x16xf32> // CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16xf32>, memref<32x16xf32> @@ -670,16 +670,16 @@ func @add_ss_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tenso // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_16:.*]] = memref.alloc() : memref<32x16xf32> // CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16xf32>, memref<32x16xf32> @@ -782,16 +782,16 @@ func @mul_ss_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tenso // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_16:.*]] = memref.alloc() : memref<32x16xf32> // CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16xf32>, memref<32x16xf32> @@ -947,16 +947,16 @@ func @add_sd_ds(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tenso // CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> { // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32> to memref // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32> // CHECK: %[[VAL_16:.*]] = memref.alloc() : memref<32x16xf32> // CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16xf32>, memref<32x16xf32> @@ -1060,9 +1060,9 @@ func @mul_sd_ds(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tenso // CHECK: %[[VAL_3:.*]] = constant 16 : index // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<16x32xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<16x32xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16x32xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<16x32xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<16x32xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16x32xf32> to memref // CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf32> // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<16xf32> // CHECK: %[[VAL_11:.*]] = memref.alloc() : memref<16xf32> @@ -1116,8 +1116,8 @@ func @matvec(%argA: tensor<16x32xf32>, %argb: tensor<32xf32>, %argx: tensor<16xf // CHECK: %[[VAL_2:.*]] = constant 10 : index // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<10x20xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<10x20xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<10x20xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32> to memref // CHECK: %[[VAL_7:.*]] = memref.buffer_cast %[[VAL_1]] : memref // CHECK: %[[VAL_8:.*]] = memref.alloc() : memref // CHECK: linalg.copy(%[[VAL_7]], %[[VAL_8]]) : memref, memref @@ -1166,9 +1166,9 @@ func @sum_reduction(%arga: tensor<10x20xf32>, %argx: tensor) -> tensor // CHECK-DAG: %[[VAL_3:.*]] = constant 0 : index // CHECK-DAG: %[[VAL_4:.*]] = constant 1 : index // CHECK-DAG: %[[VAL_2:.*]] = constant 2.000000e+00 : f64 -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor to memref // CHECK: %[[VAL_8:.*]] = memref.dim %[[VAL_1]], %[[VAL_3]] : tensor // CHECK: %[[VAL_9:.*]] = memref.dim %[[VAL_1]], %[[VAL_4]] : tensor // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref @@ -1224,11 +1224,11 @@ func @scale(%arga: tensor, %argx: tensor) -> tensor { // CHECK-SAME: %[[VAL_3:.*3]]: tensor) -> tensor { // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref // CHECK: %[[VAL_12:.*]] = memref.dim %[[VAL_2]], %[[VAL_4]] : tensor // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref @@ -1308,17 +1308,17 @@ func @sampled_dense_dense(%args: tensor, // CHECK: %[[VAL_6:.*]] = constant 0 : index // CHECK: %[[VAL_7:.*]] = constant true // CHECK: %[[VAL_8:.*]] = constant 1 : index -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_values %[[VAL_0]] : tensor to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_8]] : tensor to memref -// CHECK: %[[VAL_15:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_8]] : tensor to memref -// CHECK: %[[VAL_16:.*]] = linalg.sparse_values %[[VAL_1]] : tensor to memref -// CHECK: %[[VAL_17:.*]] = linalg.sparse_pointers %[[VAL_2]], %[[VAL_8]] : tensor to memref -// CHECK: %[[VAL_18:.*]] = linalg.sparse_indices %[[VAL_2]], %[[VAL_8]] : tensor to memref -// CHECK: %[[VAL_19:.*]] = linalg.sparse_values %[[VAL_2]] : tensor to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_6]] : tensor to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_6]] : tensor to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_8]] : tensor to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_8]] : tensor to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_8]] : tensor to memref +// CHECK: %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_8]] : tensor to memref +// CHECK: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref +// CHECK: %[[VAL_17:.*]] = sparse_tensor.pointers %[[VAL_2]], %[[VAL_8]] : tensor to memref +// CHECK: %[[VAL_18:.*]] = sparse_tensor.indices %[[VAL_2]], %[[VAL_8]] : tensor to memref +// CHECK: %[[VAL_19:.*]] = sparse_tensor.values %[[VAL_2]] : tensor to memref // CHECK: %[[VAL_20:.*]] = memref.buffer_cast %[[VAL_3]] : memref // CHECK: %[[VAL_21:.*]] = memref.buffer_cast %[[VAL_4]] : memref // CHECK: %[[VAL_22:.*]] = memref.dim %[[VAL_5]], %[[VAL_6]] : tensor diff --git a/mlir/test/Dialect/Linalg/sparse_3d.mlir b/mlir/test/Dialect/Linalg/sparse_3d.mlir index cecc09ee7e7d..41ad5757924d 100644 --- a/mlir/test/Dialect/Linalg/sparse_3d.mlir +++ b/mlir/test/Dialect/Linalg/sparse_3d.mlir @@ -118,9 +118,9 @@ func @mul_ddd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten // CHECK: %[[VAL_7:.*]] = constant 0 : index // CHECK: %[[VAL_8:.*]] = constant true // CHECK: %[[VAL_9:.*]] = constant 1 : index -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_14:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_15:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -186,9 +186,9 @@ func @add_dds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten // CHECK: %[[VAL_5:.*]] = constant 16 : index // CHECK: %[[VAL_6:.*]] = constant 0 : index // CHECK: %[[VAL_7:.*]] = constant 1 : index -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -248,9 +248,9 @@ func @mul_dds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten // CHECK: %[[VAL_6:.*]] = constant true // CHECK: %[[VAL_7:.*]] = constant 0 : index // CHECK: %[[VAL_8:.*]] = constant 1 : index -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_14:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -319,9 +319,9 @@ func @add_dsd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten // CHECK: %[[VAL_4:.*]] = constant 8 : index // CHECK: %[[VAL_5:.*]] = constant 0 : index // CHECK: %[[VAL_6:.*]] = constant 1 : index -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -382,11 +382,11 @@ func @mul_dsd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten // CHECK: %[[VAL_7:.*]] = constant true // CHECK: %[[VAL_8:.*]] = constant 0 : index // CHECK: %[[VAL_9:.*]] = constant 1 : index -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_16:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_17:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -479,11 +479,11 @@ func @add_dss(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten // CHECK: %[[VAL_4:.*]] = constant 32 : index // CHECK: %[[VAL_5:.*]] = constant 0 : index // CHECK: %[[VAL_6:.*]] = constant 1 : index -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_14:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -545,9 +545,9 @@ func @mul_dss(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten // CHECK: %[[VAL_6:.*]] = constant true // CHECK: %[[VAL_7:.*]] = constant 0 : index // CHECK: %[[VAL_8:.*]] = constant 1 : index -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_14:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -621,9 +621,9 @@ func @add_sdd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten // CHECK: %[[VAL_4:.*]] = constant 8 : index // CHECK: %[[VAL_5:.*]] = constant 0 : index // CHECK: %[[VAL_6:.*]] = constant 1 : index -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -685,11 +685,11 @@ func @mul_sdd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten // CHECK: %[[VAL_7:.*]] = constant true // CHECK: %[[VAL_8:.*]] = constant 0 : index // CHECK: %[[VAL_9:.*]] = constant 1 : index -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_16:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_17:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -787,11 +787,11 @@ func @add_sds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten // CHECK: %[[VAL_4:.*]] = constant 16 : index // CHECK: %[[VAL_5:.*]] = constant 0 : index // CHECK: %[[VAL_6:.*]] = constant 1 : index -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_14:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -854,11 +854,11 @@ func @mul_sds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten // CHECK: %[[VAL_6:.*]] = constant true // CHECK: %[[VAL_7:.*]] = constant 0 : index // CHECK: %[[VAL_8:.*]] = constant 1 : index -// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_14:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_16:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -959,11 +959,11 @@ func @add_ssd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten // CHECK: %[[VAL_3:.*]] = constant 8 : index // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -1027,13 +1027,13 @@ func @mul_ssd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten // CHECK: %[[VAL_7:.*]] = constant true // CHECK: %[[VAL_8:.*]] = constant 0 : index // CHECK: %[[VAL_9:.*]] = constant 1 : index -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_14:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_15:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_16:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_17:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_18:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_19:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -1158,13 +1158,13 @@ func @add_sss(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten // CHECK: %[[VAL_3:.*]] = constant 2 : index // CHECK: %[[VAL_4:.*]] = constant 0 : index // CHECK: %[[VAL_5:.*]] = constant 1 : index -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref -// CHECK: %[[VAL_12:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref +// CHECK: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32> // CHECK: %[[VAL_14:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32> // CHECK: %[[VAL_15:.*]] = memref.alloc() : memref<32x16x8xf32> @@ -1229,9 +1229,9 @@ func @mul_sss(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten // CHECK: %[[VAL_4:.*]] = constant 2 : index // CHECK: %[[VAL_5:.*]] = constant 0 : index // CHECK: %[[VAL_6:.*]] = constant 1 : index -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor to memref -// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_1]] : tensor to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor to memref +// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_1]] : tensor to memref // CHECK: %[[VAL_10:.*]] = memref.dim %[[VAL_2]], %[[VAL_5]] : tensor // CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref // CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_3]] : memref @@ -1300,10 +1300,10 @@ func @kernel_3d(%arga: tensor, // CHECK: %[[VAL_2:.*]] = constant 2 : index // CHECK: %[[VAL_3:.*]] = constant 0 : index // CHECK: %[[VAL_4:.*]] = constant 1 : index -// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<10x20x30xf32> to memref -// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<10x20x30xf32> to memref -// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_2]] : tensor<10x20x30xf32> to memref -// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<10x20x30xf32> to memref +// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<10x20x30xf32> to memref +// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<10x20x30xf32> to memref +// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor<10x20x30xf32> to memref +// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20x30xf32> to memref // CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref // CHECK: %[[VAL_10:.*]] = memref.alloc() : memref // CHECK: linalg.copy(%[[VAL_9]], %[[VAL_10]]) : memref, memref diff --git a/mlir/test/Dialect/Linalg/sparse_lower.mlir b/mlir/test/Dialect/Linalg/sparse_lower.mlir index 58ebd7a12041..cdbaf6d2ebec 100644 --- a/mlir/test/Dialect/Linalg/sparse_lower.mlir +++ b/mlir/test/Dialect/Linalg/sparse_lower.mlir @@ -41,10 +41,10 @@ // CHECK-HIR: %[[VAL_3:.*]] = constant 64 : index // CHECK-HIR: %[[VAL_4:.*]] = constant 0 : index // CHECK-HIR: %[[VAL_5:.*]] = constant 1 : index -// CHECK-HIR: %[[VAL_6:.*]] = linalg.sparse_tensor %[[VAL_0]] : !llvm.ptr to tensor<64x64xf64> -// CHECK-HIR: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_6]], %[[VAL_5]] : tensor<64x64xf64> to memref -// CHECK-HIR: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_6]], %[[VAL_5]] : tensor<64x64xf64> to memref -// CHECK-HIR: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_6]] : tensor<64x64xf64> to memref +// CHECK-HIR: %[[VAL_6:.*]] = sparse_tensor.fromPtr %[[VAL_0]] : !llvm.ptr to tensor<64x64xf64> +// CHECK-HIR: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_6]], %[[VAL_5]] : tensor<64x64xf64> to memref +// CHECK-HIR: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_6]], %[[VAL_5]] : tensor<64x64xf64> to memref +// CHECK-HIR: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_6]] : tensor<64x64xf64> to memref // CHECK-HIR: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<64xf64> // CHECK-HIR: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<64xf64> // CHECK-HIR: %[[VAL_12:.*]] = memref.alloc() : memref<64xf64> @@ -168,7 +168,7 @@ !SparseTensor = type !llvm.ptr func @matvec(%argA: !SparseTensor, %argb: tensor<64xf64>, %argx: tensor<64xf64>) -> tensor<64xf64> { - %arga = linalg.sparse_tensor %argA : !SparseTensor to tensor<64x64xf64> + %arga = sparse_tensor.fromPtr %argA : !SparseTensor to tensor<64x64xf64> %0 = linalg.generic #trait_matvec ins(%arga, %argb : tensor<64x64xf64>, tensor<64xf64>) outs(%argx: tensor<64xf64>) { diff --git a/mlir/test/Dialect/Linalg/sparse_nd.mlir b/mlir/test/Dialect/Linalg/sparse_nd.mlir index bc282ca5f8ba..646f83f570fe 100644 --- a/mlir/test/Dialect/Linalg/sparse_nd.mlir +++ b/mlir/test/Dialect/Linalg/sparse_nd.mlir @@ -34,11 +34,11 @@ // CHECK: %[[VAL_11:.*]] = constant 0 : index // CHECK: %[[VAL_12:.*]] = constant 1 : index // CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_0]] : memref<10x20x30x40x50x60x70x80xf32> -// CHECK: %[[VAL_14:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<80x70x60x50x40x30x20x10xf32> to memref -// CHECK: %[[VAL_15:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<80x70x60x50x40x30x20x10xf32> to memref -// CHECK: %[[VAL_16:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<80x70x60x50x40x30x20x10xf32> to memref -// CHECK: %[[VAL_17:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<80x70x60x50x40x30x20x10xf32> to memref -// CHECK: %[[VAL_18:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<80x70x60x50x40x30x20x10xf32> to memref +// CHECK: %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<80x70x60x50x40x30x20x10xf32> to memref +// CHECK: %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<80x70x60x50x40x30x20x10xf32> to memref +// CHECK: %[[VAL_16:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<80x70x60x50x40x30x20x10xf32> to memref +// CHECK: %[[VAL_17:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<80x70x60x50x40x30x20x10xf32> to memref +// CHECK: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<80x70x60x50x40x30x20x10xf32> to memref // CHECK: %[[VAL_19:.*]] = memref.buffer_cast %[[VAL_2]] : memref<10x20x30x40x50x60x70x80xf32> // CHECK: %[[VAL_20:.*]] = memref.alloc() : memref<10x20x30x40x50x60x70x80xf32> // CHECK: linalg.copy(%[[VAL_19]], %[[VAL_20]]) : memref<10x20x30x40x50x60x70x80xf32>, memref<10x20x30x40x50x60x70x80xf32> diff --git a/mlir/test/Dialect/Linalg/sparse_vector.mlir b/mlir/test/Dialect/Linalg/sparse_vector.mlir index b22a4ebd7df2..69a7cfaa359e 100644 --- a/mlir/test/Dialect/Linalg/sparse_vector.mlir +++ b/mlir/test/Dialect/Linalg/sparse_vector.mlir @@ -228,8 +228,8 @@ func @mul_s(%arga: tensor<1024xf32>, %argb: tensor<1024xf32>, %argx: tensor<1024 // !SparseTensor = type !llvm.ptr func @mul_s_alt(%argA: !SparseTensor, %argB: !SparseTensor, %argx: tensor<1024xf32>) -> tensor<1024xf32> { - %arga = linalg.sparse_tensor %argA : !SparseTensor to tensor<1024xf32> - %argb = linalg.sparse_tensor %argB : !SparseTensor to tensor<1024xf32> + %arga = sparse_tensor.fromPtr %argA : !SparseTensor to tensor<1024xf32> + %argb = sparse_tensor.fromPtr %argB : !SparseTensor to tensor<1024xf32> %0 = linalg.generic #trait_mul_s ins(%arga, %argb: tensor<1024xf32>, tensor<1024xf32>) outs(%argx: tensor<1024xf32>) { diff --git a/mlir/test/Dialect/Linalg/sparse_lower_calls.mlir b/mlir/test/Dialect/SparseTensor/lowering.mlir similarity index 74% rename from mlir/test/Dialect/Linalg/sparse_lower_calls.mlir rename to mlir/test/Dialect/SparseTensor/lowering.mlir index 6727a8472cd8..6f02c10b49b2 100644 --- a/mlir/test/Dialect/Linalg/sparse_lower_calls.mlir +++ b/mlir/test/Dialect/SparseTensor/lowering.mlir @@ -8,9 +8,9 @@ // CHECK: %[[T:.*]] = call @sparsePointers64(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func @sparse_pointers(%arg0: !SparseTensor) -> memref { - %a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64> + %a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64> %c = constant 1 : index - %0 = linalg.sparse_pointers %a, %c : tensor<128xf64> to memref + %0 = sparse_tensor.pointers %a, %c : tensor<128xf64> to memref return %0 : memref } @@ -20,9 +20,9 @@ func @sparse_pointers(%arg0: !SparseTensor) -> memref { // CHECK: %[[T:.*]] = call @sparsePointers32(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func @sparse_pointers32(%arg0: !SparseTensor) -> memref { - %a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64> + %a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64> %c = constant 1 : index - %0 = linalg.sparse_pointers %a, %c : tensor<128xf64> to memref + %0 = sparse_tensor.pointers %a, %c : tensor<128xf64> to memref return %0 : memref } @@ -32,9 +32,9 @@ func @sparse_pointers32(%arg0: !SparseTensor) -> memref { // CHECK: %[[T:.*]] = call @sparseIndices64(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func @sparse_indices(%arg0: !SparseTensor) -> memref { - %a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64> + %a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64> %c = constant 1 : index - %0 = linalg.sparse_indices %a, %c : tensor<128xf64> to memref + %0 = sparse_tensor.indices %a, %c : tensor<128xf64> to memref return %0 : memref } @@ -44,9 +44,9 @@ func @sparse_indices(%arg0: !SparseTensor) -> memref { // CHECK: %[[T:.*]] = call @sparseIndices32(%[[A]], %[[C]]) : (!llvm.ptr, index) -> memref // CHECK: return %[[T]] : memref func @sparse_indices32(%arg0: !SparseTensor) -> memref { - %a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64> + %a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64> %c = constant 1 : index - %0 = linalg.sparse_indices %a, %c : tensor<128xf64> to memref + %0 = sparse_tensor.indices %a, %c : tensor<128xf64> to memref return %0 : memref } @@ -55,8 +55,8 @@ func @sparse_indices32(%arg0: !SparseTensor) -> memref { // CHECK: %[[T:.*]] = call @sparseValuesF64(%[[A]]) : (!llvm.ptr) -> memref // CHECK: return %[[T]] : memref func @sparse_valuesf64(%arg0: !SparseTensor) -> memref { - %a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64> - %0 = linalg.sparse_values %a : tensor<128xf64> to memref + %a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64> + %0 = sparse_tensor.values %a : tensor<128xf64> to memref return %0 : memref } @@ -65,7 +65,7 @@ func @sparse_valuesf64(%arg0: !SparseTensor) -> memref { // CHECK: %[[T:.*]] = call @sparseValuesF32(%[[A]]) : (!llvm.ptr) -> memref // CHECK: return %[[T]] : memref func @sparse_valuesf32(%arg0: !SparseTensor) -> memref { - %a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf32> - %0 = linalg.sparse_values %a : tensor<128xf32> to memref + %a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf32> + %0 = sparse_tensor.values %a : tensor<128xf32> to memref return %0 : memref } diff --git a/mlir/test/Dialect/Linalg/sparse_roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir similarity index 70% rename from mlir/test/Dialect/Linalg/sparse_roundtrip.mlir rename to mlir/test/Dialect/SparseTensor/roundtrip.mlir index 9f4a32fdb784..ae9481914a44 100644 --- a/mlir/test/Dialect/Linalg/sparse_roundtrip.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir @@ -4,10 +4,10 @@ // CHECK-LABEL: func @sparse_tensor( // CHECK-SAME: %[[A:.*]]: !llvm.ptr) -// CHECK: %[[T:.*]] = linalg.sparse_tensor %[[A]] : !llvm.ptr to tensor<128xf64> +// CHECK: %[[T:.*]] = sparse_tensor.fromPtr %[[A]] : !llvm.ptr to tensor<128xf64> // CHECK: return %[[T]] : tensor<128xf64> func @sparse_tensor(%arg0: !SparseTensor) -> tensor<128xf64> { - %0 = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64> + %0 = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64> return %0 : tensor<128xf64> } @@ -16,11 +16,11 @@ func @sparse_tensor(%arg0: !SparseTensor) -> tensor<128xf64> { // CHECK-LABEL: func @sparse_pointers( // CHECK-SAME: %[[A:.*]]: tensor<128xf64>) // CHECK: %[[C:.*]] = constant 1 : index -// CHECK: %[[T:.*]] = linalg.sparse_pointers %[[A]], %[[C]] : tensor<128xf64> to memref +// CHECK: %[[T:.*]] = sparse_tensor.pointers %[[A]], %[[C]] : tensor<128xf64> to memref // CHECK: return %[[T]] : memref func @sparse_pointers(%arg0: tensor<128xf64>) -> memref { %c = constant 1 : index - %0 = linalg.sparse_pointers %arg0, %c : tensor<128xf64> to memref + %0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64> to memref return %0 : memref } @@ -29,11 +29,11 @@ func @sparse_pointers(%arg0: tensor<128xf64>) -> memref { // CHECK-LABEL: func @sparse_indices( // CHECK-SAME: %[[A:.*]]: tensor<128xf64>) // CHECK: %[[C:.*]] = constant 1 : index -// CHECK: %[[T:.*]] = linalg.sparse_indices %[[A]], %[[C]] : tensor<128xf64> to memref +// CHECK: %[[T:.*]] = sparse_tensor.indices %[[A]], %[[C]] : tensor<128xf64> to memref // CHECK: return %[[T]] : memref func @sparse_indices(%arg0: tensor<128xf64>) -> memref { %c = constant 1 : index - %0 = linalg.sparse_indices %arg0, %c : tensor<128xf64> to memref + %0 = sparse_tensor.indices %arg0, %c : tensor<128xf64> to memref return %0 : memref } @@ -41,9 +41,9 @@ func @sparse_indices(%arg0: tensor<128xf64>) -> memref { // CHECK-LABEL: func @sparse_values( // CHECK-SAME: %[[A:.*]]: tensor<128xf64>) -// CHECK: %[[T:.*]] = linalg.sparse_values %[[A]] : tensor<128xf64> to memref +// CHECK: %[[T:.*]] = sparse_tensor.values %[[A]] : tensor<128xf64> to memref // CHECK: return %[[T]] : memref func @sparse_values(%arg0: tensor<128xf64>) -> memref { - %0 = linalg.sparse_values %arg0 : tensor<128xf64> to memref + %0 = sparse_tensor.values %arg0 : tensor<128xf64> to memref return %0 : memref } diff --git a/mlir/test/Integration/Sparse/CPU/frostt-example.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/frostt-example.mlir similarity index 100% rename from mlir/test/Integration/Sparse/CPU/frostt-example.mlir rename to mlir/test/Integration/Dialect/SparseTensor/CPU/frostt-example.mlir diff --git a/mlir/test/Integration/Sparse/CPU/lit.local.cfg b/mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg similarity index 100% rename from mlir/test/Integration/Sparse/CPU/lit.local.cfg rename to mlir/test/Integration/Dialect/SparseTensor/CPU/lit.local.cfg diff --git a/mlir/test/Integration/Sparse/CPU/matrix-market-example.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/matrix-market-example.mlir similarity index 100% rename from mlir/test/Integration/Sparse/CPU/matrix-market-example.mlir rename to mlir/test/Integration/Dialect/SparseTensor/CPU/matrix-market-example.mlir diff --git a/mlir/test/Integration/Sparse/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir similarity index 98% rename from mlir/test/Integration/Sparse/CPU/sparse_matvec.mlir rename to mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir index fde494760982..e78281a9da63 100644 --- a/mlir/test/Integration/Sparse/CPU/sparse_matvec.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir @@ -56,7 +56,7 @@ module { func @kernel_matvec(%argA: !SparseTensor, %argb: tensor, %argx: tensor) -> tensor { - %arga = linalg.sparse_tensor %argA : !SparseTensor to tensor + %arga = sparse_tensor.fromPtr %argA : !SparseTensor to tensor %0 = linalg.generic #matvec ins(%arga, %argb: tensor, tensor) outs(%argx: tensor) { diff --git a/mlir/test/Integration/Sparse/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir similarity index 98% rename from mlir/test/Integration/Sparse/CPU/sparse_sampled_matmul.mlir rename to mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir index 68d0ee620f9f..509402a48b0d 100644 --- a/mlir/test/Integration/Sparse/CPU/sparse_sampled_matmul.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir @@ -47,7 +47,7 @@ module { %arga: tensor, %argb: tensor, %argx: tensor) -> tensor { - %args = linalg.sparse_tensor %argS : !SparseTensor to tensor + %args = sparse_tensor.fromPtr %argS : !SparseTensor to tensor %0 = linalg.generic #trait_sampled_dense_dense ins(%args, %arga, %argb: tensor, tensor, tensor) outs(%argx: tensor) { diff --git a/mlir/test/Integration/Sparse/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir similarity index 97% rename from mlir/test/Integration/Sparse/CPU/sparse_sum.mlir rename to mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir index 6e067b336d9d..6d3dc0ee1659 100644 --- a/mlir/test/Integration/Sparse/CPU/sparse_sum.mlir +++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir @@ -41,7 +41,7 @@ module { // func @kernel_sum_reduce(%argA: !SparseTensor, %argx: tensor) -> tensor { - %arga = linalg.sparse_tensor %argA : !SparseTensor to tensor + %arga = sparse_tensor.fromPtr %argA : !SparseTensor to tensor %0 = linalg.generic #trait_sum_reduce ins(%arga: tensor) outs(%argx: tensor) { diff --git a/mlir/test/lib/Transforms/CMakeLists.txt b/mlir/test/lib/Transforms/CMakeLists.txt index e75b0fefca85..d000b950ea71 100644 --- a/mlir/test/lib/Transforms/CMakeLists.txt +++ b/mlir/test/lib/Transforms/CMakeLists.txt @@ -64,6 +64,8 @@ add_mlir_library(MLIRTestTransforms MLIRSCF MLIRSCFTransforms MLIRStandardOpsTransforms + MLIRSparseTensor + MLIRSparseTensorTransforms MLIRTargetLLVMIRExport MLIRTestDialect MLIRTransformUtils diff --git a/mlir/test/lib/Transforms/TestSparsification.cpp b/mlir/test/lib/Transforms/TestSparsification.cpp index 22900f7edd1b..5a12405789ba 100644 --- a/mlir/test/lib/Transforms/TestSparsification.cpp +++ b/mlir/test/lib/Transforms/TestSparsification.cpp @@ -8,6 +8,8 @@ #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/Linalg/Transforms/Transforms.h" +#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h" +#include "mlir/Dialect/SparseTensor/Transforms/Transforms.h" #include "mlir/Dialect/Vector/VectorOps.h" #include "mlir/Pass/Pass.h" #include "mlir/Transforms/GreedyPatternRewriteDriver.h" @@ -51,7 +53,8 @@ struct TestSparsification /// Registers all dialects required by testing. void getDependentDialects(DialectRegistry ®istry) const override { registry.insert(); + sparse_tensor::SparseTensorDialect, vector::VectorDialect, + LLVM::LLVMDialect>(); } /// Returns parallelization strategy given on command line. @@ -114,12 +117,12 @@ struct TestSparsification if (lower) { RewritePatternSet conversionPatterns(ctx); ConversionTarget target(*ctx); - target.addIllegalOp(); + target.addIllegalOp< + sparse_tensor::FromPointerOp, sparse_tensor::ToPointersOp, + sparse_tensor::ToIndicesOp, sparse_tensor::ToValuesOp>(); target.addLegalOp(); - linalg::populateSparsificationConversionPatterns(conversionPatterns); + sparse_tensor::populateSparsificationConversionPatterns( + conversionPatterns); if (failed(applyPartialConversion(getOperation(), target, std::move(conversionPatterns)))) signalPassFailure(); diff --git a/mlir/test/mlir-opt/commandline.mlir b/mlir/test/mlir-opt/commandline.mlir index e9e6fa78eadb..d58908f27118 100644 --- a/mlir/test/mlir-opt/commandline.mlir +++ b/mlir/test/mlir-opt/commandline.mlir @@ -22,6 +22,7 @@ // CHECK-NEXT: scf // CHECK-NEXT: sdbm // CHECK-NEXT: shape +// CHECK-NEXT: sparse // CHECK-NEXT: spv // CHECK-NEXT: std // CHECK-NEXT: tensor