forked from OSchip/llvm-project
[mlir][sparse] migrate sparse operations into new sparse tensor dialect
This is the very first step toward removing the glue and clutter from linalg and replace it with proper sparse tensor types. This revision migrates the LinalgSparseOps into SparseTensorOps of a sparse tensor dialect. This also provides a new home for sparse tensor related transformation. NOTE: the actual replacement with sparse tensor types (and removal of linalg glue/clutter) will follow but I am trying to keep the amount of changes per revision manageable. Reviewed By: bixia Differential Revision: https://reviews.llvm.org/D101488
This commit is contained in:
parent
0f8b6686ac
commit
a6d92a9711
|
@ -17,6 +17,7 @@ add_subdirectory(PDLInterp)
|
|||
add_subdirectory(Quant)
|
||||
add_subdirectory(SCF)
|
||||
add_subdirectory(Shape)
|
||||
add_subdirectory(SparseTensor)
|
||||
add_subdirectory(SPIRV)
|
||||
add_subdirectory(StandardOps)
|
||||
add_subdirectory(Tensor)
|
||||
|
|
|
@ -80,12 +80,6 @@ add_public_tablegen_target(MLIRLinalgStructuredOpsIncGen)
|
|||
add_dependencies(MLIRLinalgStructuredOpsIncGen LinalgOdsGen)
|
||||
add_dependencies(mlir-headers MLIRLinalgStructuredOpsIncGen)
|
||||
|
||||
set(LLVM_TARGET_DEFINITIONS LinalgSparseOps.td)
|
||||
mlir_tablegen(LinalgSparseOps.h.inc -gen-op-decls)
|
||||
mlir_tablegen(LinalgSparseOps.cpp.inc -gen-op-defs)
|
||||
add_public_tablegen_target(MLIRLinalgSparseOpsIncGen)
|
||||
add_dependencies(mlir-headers MLIRLinalgSparseOpsIncGen)
|
||||
|
||||
set(LLVM_TARGET_DEFINITIONS LinalgInterfaces.td)
|
||||
mlir_tablegen(LinalgInterfaces.h.inc -gen-op-interface-decls)
|
||||
mlir_tablegen(LinalgInterfaces.cpp.inc -gen-op-interface-defs)
|
||||
|
|
|
@ -127,7 +127,4 @@ class IndexedGenericOp;
|
|||
#define GET_OP_CLASSES
|
||||
#include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.h.inc"
|
||||
|
||||
#define GET_OP_CLASSES
|
||||
#include "mlir/Dialect/Linalg/IR/LinalgSparseOps.h.inc"
|
||||
|
||||
#endif // MLIR_DIALECT_LINALG_LINALGOPS_H_
|
||||
|
|
|
@ -1129,9 +1129,6 @@ void populateSparsificationPatterns(
|
|||
RewritePatternSet &patterns,
|
||||
const SparsificationOptions &options = SparsificationOptions());
|
||||
|
||||
/// Sets up sparsification conversion rules with the given options.
|
||||
void populateSparsificationConversionPatterns(RewritePatternSet &patterns);
|
||||
|
||||
} // namespace linalg
|
||||
} // namespace mlir
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
add_subdirectory(IR)
|
|
@ -0,0 +1,2 @@
|
|||
add_mlir_dialect(SparseTensorOps sparse_tensor)
|
||||
add_mlir_doc(SparseTensorOps SparseTensorOps Dialects/ -gen-dialect-doc)
|
|
@ -0,0 +1,23 @@
|
|||
//===- SparseTensor.h - Sparse tensor dialect -------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSOR_H_
|
||||
#define MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSOR_H_
|
||||
|
||||
#include "mlir/IR/BuiltinTypes.h"
|
||||
#include "mlir/IR/Dialect.h"
|
||||
#include "mlir/IR/OpDefinition.h"
|
||||
#include "mlir/IR/OpImplementation.h"
|
||||
#include "mlir/Interfaces/SideEffectInterfaces.h"
|
||||
|
||||
#define GET_OP_CLASSES
|
||||
#include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.h.inc"
|
||||
|
||||
#include "mlir/Dialect/SparseTensor/IR/SparseTensorOpsDialect.h.inc"
|
||||
|
||||
#endif // MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSOR_H_
|
|
@ -0,0 +1,29 @@
|
|||
//===- SparseTensorBase.td - Sparse tensor dialect base ----*- tablegen -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SPARSETENSOR_BASE
|
||||
#define SPARSETENSOR_BASE
|
||||
|
||||
include "mlir/IR/OpBase.td"
|
||||
|
||||
def SparseTensor_Dialect : Dialect {
|
||||
let name = "sparse_tensor";
|
||||
let cppNamespace = "::mlir::sparse_tensor";
|
||||
let description = [{
|
||||
The `sparse tensor` dialect is intended to hold primitives that
|
||||
form a bridge between high-level operations on sparse tensors
|
||||
and lower-level operations on the actual sparse storage schemes
|
||||
consisting of pointers, indices, and values. This bridge
|
||||
simplifies a `sparse compiler` pass by postponing actual
|
||||
code generation for the supported primitives to a later phase,
|
||||
either by generating calls into a runtime support library
|
||||
or by further lowering the primitives into actual code.
|
||||
}];
|
||||
}
|
||||
|
||||
#endif // SPARSETENSOR_BASE
|
|
@ -1,49 +1,27 @@
|
|||
//===- LinalgSparseOps.td - Linalg dialect sparse ops ------*- tablegen -*-===//
|
||||
//===- SparseTensorOps.td - Sparse tensor dialect ops ------*- tablegen -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// The following operations bootstrap working with sparse tensors solely
|
||||
// within the Linalg dialect. They provide temporary bridges between a
|
||||
// future SparseTensorType (now an opaque pointer), the actual TensorType,
|
||||
// and MemRef arrays underlying an actual sparse storage scheme in memory.
|
||||
//
|
||||
// Lacking a proper sparse tensor type, the 'sparse_tensor' operation
|
||||
// provides a bridge between an opaque pointer and a regular tensor type
|
||||
// just to simplify feeding the value into a Linalg op. The operation
|
||||
// simply disappears during lowering.
|
||||
//
|
||||
// The other operations form the bridge between the opaque pointer and
|
||||
// the actual storage of pointers, indices, and values. These operations
|
||||
// resemble 'buffer_cast' in the sense that they map tensors to
|
||||
// their bufferized memrefs, but they lower into actual calls since
|
||||
// sparse storage does not bufferize into a single memrefs, as dense
|
||||
// tensors do, but into a hierarchical storage scheme where pointers
|
||||
// access memrefs with indices and eventually into values.
|
||||
//
|
||||
// TODO: introduce SparseTensorType as first class citizen in MLIR
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LINALG_SPARSE_OPS
|
||||
#define LINALG_SPARSE_OPS
|
||||
#ifndef SPARSETENSOR_OPS
|
||||
#define SPARSETENSOR_OPS
|
||||
|
||||
include "mlir/Dialect/Linalg/IR/LinalgBase.td"
|
||||
include "mlir/Dialect/SparseTensor/IR/SparseTensorBase.td"
|
||||
include "mlir/Interfaces/SideEffectInterfaces.td"
|
||||
|
||||
// Base class.
|
||||
class Linalg_SparseOp<string mnemonic, list<OpTrait> traits = []>
|
||||
: Op<Linalg_Dialect, mnemonic, traits> {
|
||||
class SparseTensor_Op<string mnemonic, list<OpTrait> traits = []>
|
||||
: Op<SparseTensor_Dialect, mnemonic, traits> {
|
||||
let printer = [{ return ::print(p, *this); }];
|
||||
let verifier = ?;
|
||||
let parser = [{ return ::parse$cppClass(parser, result); }];
|
||||
}
|
||||
|
||||
def Linalg_SparseTensorFromPointerOp :
|
||||
Linalg_SparseOp<"sparse_tensor">,
|
||||
// TODO: remove me
|
||||
def SparseTensor_FromPointerOp : SparseTensor_Op<"fromPtr">,
|
||||
Arguments<(ins AnyType:$ptr)>,
|
||||
Results<(outs AnyTensor:$result)> {
|
||||
let summary = "Views an opaque sparse tensor pointer as a tensor";
|
||||
|
@ -60,14 +38,13 @@ def Linalg_SparseTensorFromPointerOp :
|
|||
```mlir
|
||||
!SparseTensor = type !llvm.ptr<i8>
|
||||
|
||||
%0 = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<64x64xf64>
|
||||
%0 = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<64x64xf64>
|
||||
```
|
||||
}];
|
||||
let assemblyFormat = "$ptr attr-dict `:` type($ptr) `to` type($result)";
|
||||
}
|
||||
|
||||
def Linalg_SparseTensorToPointersMemRefOp :
|
||||
Linalg_SparseOp<"sparse_pointers", [NoSideEffect]>,
|
||||
def SparseTensor_ToPointersOp : SparseTensor_Op<"pointers", [NoSideEffect]>,
|
||||
Arguments<(ins AnyTensor:$tensor, Index:$dim)>,
|
||||
Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
|
||||
let summary = "Extract pointers array at given dimension from a tensor";
|
||||
|
@ -83,15 +60,14 @@ def Linalg_SparseTensorToPointersMemRefOp :
|
|||
Example:
|
||||
|
||||
```mlir
|
||||
%1 = linalg.sparse_pointers %0, %c1 : tensor<64x64xf64> to memref<?xindex>
|
||||
%1 = sparse_tensor.pointers %0, %c1 : tensor<64x64xf64> to memref<?xindex>
|
||||
```
|
||||
}];
|
||||
let assemblyFormat = "$tensor `,` $dim attr-dict `:` type($tensor)"
|
||||
" `to` type($result)";
|
||||
}
|
||||
|
||||
def Linalg_SparseTensorToIndicesMemRefOp :
|
||||
Linalg_SparseOp<"sparse_indices", [NoSideEffect]>,
|
||||
def SparseTensor_ToIndicesOp : SparseTensor_Op<"indices", [NoSideEffect]>,
|
||||
Arguments<(ins AnyTensor:$tensor, Index:$dim)>,
|
||||
Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
|
||||
let summary = "Extract indices array at given dimension from a tensor";
|
||||
|
@ -107,15 +83,14 @@ def Linalg_SparseTensorToIndicesMemRefOp :
|
|||
Example:
|
||||
|
||||
```mlir
|
||||
%1 = linalg.sparse_indices %0, %c1 : tensor<64x64xf64> to memref<?xindex>
|
||||
%1 = sparse_tensor.indices %0, %c1 : tensor<64x64xf64> to memref<?xindex>
|
||||
```
|
||||
}];
|
||||
let assemblyFormat = "$tensor `,` $dim attr-dict `:` type($tensor)"
|
||||
" `to` type($result)";
|
||||
}
|
||||
|
||||
def Linalg_SparseTensorToValuesMemRefOp :
|
||||
Linalg_SparseOp<"sparse_values", [NoSideEffect]>,
|
||||
def SparseTensor_ToValuesOp : SparseTensor_Op<"values", [NoSideEffect]>,
|
||||
Arguments<(ins AnyTensor:$tensor)>,
|
||||
Results<(outs AnyStridedMemRefOfRank<1>:$result)> {
|
||||
let summary = "Extract numerical values array from a tensor";
|
||||
|
@ -131,10 +106,10 @@ def Linalg_SparseTensorToValuesMemRefOp :
|
|||
Example:
|
||||
|
||||
```mlir
|
||||
%1 = linalg.sparse_values %0 : tensor<64x64xf64> to memref<?xf64>
|
||||
%1 = sparse_tensor.values %0 : tensor<64x64xf64> to memref<?xf64>
|
||||
```
|
||||
}];
|
||||
let assemblyFormat = "$tensor attr-dict `:` type($tensor) `to` type($result)";
|
||||
}
|
||||
|
||||
#endif // LINALG_SPARSE_OPS
|
||||
#endif // SPARSETENSOR_OPS
|
|
@ -0,0 +1,23 @@
|
|||
//===- Transforms.h - Sparse tensor transformations -------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_TRANSFORMS_H_
|
||||
#define MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_TRANSFORMS_H_
|
||||
|
||||
#include "mlir/IR/PatternMatch.h"
|
||||
|
||||
namespace mlir {
|
||||
namespace sparse_tensor {
|
||||
|
||||
/// Sets up sparsification conversion rules with the given options.
|
||||
void populateSparsificationConversionPatterns(RewritePatternSet &patterns);
|
||||
|
||||
} // namespace sparse_tensor
|
||||
} // namespace mlir
|
||||
|
||||
#endif // MLIR_DIALECT_SPARSETENSOR_TRANSFORMS_TRANSFORMS_H_
|
|
@ -37,6 +37,7 @@
|
|||
#include "mlir/Dialect/SDBM/SDBMDialect.h"
|
||||
#include "mlir/Dialect/SPIRV/IR/SPIRVDialect.h"
|
||||
#include "mlir/Dialect/Shape/IR/Shape.h"
|
||||
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
|
||||
#include "mlir/Dialect/StandardOps/IR/Ops.h"
|
||||
#include "mlir/Dialect/Tensor/IR/Tensor.h"
|
||||
#include "mlir/Dialect/Tosa/IR/TosaOps.h"
|
||||
|
@ -74,6 +75,7 @@ inline void registerAllDialects(DialectRegistry ®istry) {
|
|||
ROCDL::ROCDLDialect,
|
||||
SDBMDialect,
|
||||
shape::ShapeDialect,
|
||||
sparse_tensor::SparseTensorDialect,
|
||||
tensor::TensorDialect,
|
||||
tosa::TosaDialect,
|
||||
x86vector::X86VectorDialect>();
|
||||
|
|
|
@ -18,6 +18,7 @@ add_subdirectory(Quant)
|
|||
add_subdirectory(SCF)
|
||||
add_subdirectory(SDBM)
|
||||
add_subdirectory(Shape)
|
||||
add_subdirectory(SparseTensor)
|
||||
add_subdirectory(SPIRV)
|
||||
add_subdirectory(StandardOps)
|
||||
add_subdirectory(Tensor)
|
||||
|
|
|
@ -2384,9 +2384,6 @@ struct FoldTensorCastOp;
|
|||
#define GET_OP_CLASSES
|
||||
#include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
|
||||
|
||||
#define GET_OP_CLASSES
|
||||
#include "mlir/Dialect/Linalg/IR/LinalgSparseOps.cpp.inc"
|
||||
|
||||
/// Return the dims that are `iteratorTypeName` loops in the LinalgOp `op`.
|
||||
/// Assumes `op` is a LinalgOp.
|
||||
void mlir::linalg::getDimsOfType(Operation *op, StringRef iteratorTypeName,
|
||||
|
|
|
@ -99,10 +99,6 @@ void mlir::linalg::LinalgDialect::initialize() {
|
|||
#define GET_OP_LIST
|
||||
#include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
|
||||
>();
|
||||
addOperations<
|
||||
#define GET_OP_LIST
|
||||
#include "mlir/Dialect/Linalg/IR/LinalgSparseOps.cpp.inc"
|
||||
>();
|
||||
|
||||
// Fill the Linalg-specific OpName to RegionBuilder map.
|
||||
addNamedOpBuilders<
|
||||
|
|
|
@ -11,7 +11,6 @@ add_mlir_dialect_library(MLIRLinalgTransforms
|
|||
Interchange.cpp
|
||||
Loops.cpp
|
||||
Promotion.cpp
|
||||
SparseLowering.cpp
|
||||
Sparsification.cpp
|
||||
Tiling.cpp
|
||||
Transforms.cpp
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
|
||||
#include "mlir/Dialect/Linalg/Utils/Utils.h"
|
||||
#include "mlir/Dialect/SCF/SCF.h"
|
||||
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
|
||||
#include "mlir/Dialect/StandardOps/IR/Ops.h"
|
||||
#include "mlir/IR/Matchers.h"
|
||||
|
||||
|
@ -360,7 +361,7 @@ static void findSparseAnnotations(Merger &merger, linalg::GenericOp op) {
|
|||
/// Returns true if tensor was set up with sparse storage scheme.
|
||||
static bool linkedSparse(linalg::GenericOp op, unsigned tensor) {
|
||||
if (tensor < op.getNumInputs())
|
||||
return isa_and_nonnull<linalg::SparseTensorFromPointerOp>(
|
||||
return isa_and_nonnull<sparse_tensor::FromPointerOp>(
|
||||
op.getInput(tensor).getDefiningOp());
|
||||
return false;
|
||||
}
|
||||
|
@ -576,12 +577,10 @@ static void genBuffers(Merger &merger, CodeGen &codegen,
|
|||
dynShape, genIntType(rewriter, codegen.options.indType));
|
||||
Value dim = rewriter.create<ConstantIndexOp>(loc, d);
|
||||
// Generate sparse primitives to obtains pointer and indices.
|
||||
codegen.pointers[t][i] =
|
||||
rewriter.create<linalg::SparseTensorToPointersMemRefOp>(
|
||||
loc, ptrTp, tensor, dim);
|
||||
codegen.indices[t][i] =
|
||||
rewriter.create<linalg::SparseTensorToIndicesMemRefOp>(loc, indTp,
|
||||
tensor, dim);
|
||||
codegen.pointers[t][i] = rewriter.create<sparse_tensor::ToPointersOp>(
|
||||
loc, ptrTp, tensor, dim);
|
||||
codegen.indices[t][i] = rewriter.create<sparse_tensor::ToIndicesOp>(
|
||||
loc, indTp, tensor, dim);
|
||||
}
|
||||
// Find lower and upper bound in current dimension.
|
||||
Value up;
|
||||
|
@ -608,8 +607,7 @@ static void genBuffers(Merger &merger, CodeGen &codegen,
|
|||
auto dynShape = {ShapedType::kDynamicSize};
|
||||
auto sparseTp = MemRefType::get(dynShape, tensorType.getElementType());
|
||||
codegen.buffers[t] =
|
||||
rewriter.create<linalg::SparseTensorToValuesMemRefOp>(loc, sparseTp,
|
||||
tensor);
|
||||
rewriter.create<sparse_tensor::ToValuesOp>(loc, sparseTp, tensor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
add_subdirectory(IR)
|
||||
add_subdirectory(Transforms)
|
|
@ -0,0 +1,13 @@
|
|||
add_mlir_dialect_library(MLIRSparseTensor
|
||||
SparseTensorDialect.cpp
|
||||
|
||||
ADDITIONAL_HEADER_DIRS
|
||||
${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/SparseTensor
|
||||
|
||||
DEPENDS
|
||||
MLIRSparseTensorOpsIncGen
|
||||
|
||||
LINK_LIBS PUBLIC
|
||||
MLIRDialect
|
||||
MLIRIR
|
||||
)
|
|
@ -0,0 +1,25 @@
|
|||
//===- SparseTensorDialect.cpp - Sparse tensor dialect implementation -----===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
|
||||
|
||||
#include "mlir/IR/Builders.h"
|
||||
#include "mlir/IR/OpImplementation.h"
|
||||
|
||||
using namespace mlir;
|
||||
using namespace mlir::sparse_tensor;
|
||||
|
||||
void SparseTensorDialect::initialize() {
|
||||
addOperations<
|
||||
#define GET_OP_LIST
|
||||
#include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.cpp.inc"
|
||||
>();
|
||||
}
|
||||
|
||||
#define GET_OP_CLASSES
|
||||
#include "mlir/Dialect/SparseTensor/IR/SparseTensorOps.cpp.inc"
|
|
@ -0,0 +1,14 @@
|
|||
add_mlir_dialect_library(MLIRSparseTensorTransforms
|
||||
SparseTensorLowering.cpp
|
||||
|
||||
ADDITIONAL_HEADER_DIRS
|
||||
${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/SparseTensor
|
||||
|
||||
LINK_LIBS PUBLIC
|
||||
MLIRIR
|
||||
MLIRLLVMIR
|
||||
MLIRPass
|
||||
MLIRStandard
|
||||
MLIRSparseTensor
|
||||
MLIRTransforms
|
||||
)
|
|
@ -1,14 +1,25 @@
|
|||
//===- SparseLowering.cpp - Lowers sparse primitives to library calls. ---===//
|
||||
//===- SparseTensorLowering.cpp - Sparse tensor primitives lowering -------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Lower sparse tensor primitives to calls into a runtime support library.
|
||||
// Note that this is a current implementation choice to keep the lowering
|
||||
// simple. In principle, these primitives could also be lowered to actual
|
||||
// elaborate IR code that implements the primitives on the selected sparse
|
||||
// tensor storage schemes.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
|
||||
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
|
||||
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
|
||||
#include "mlir/Dialect/MemRef/IR/MemRef.h"
|
||||
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
|
||||
#include "mlir/Dialect/SparseTensor/Transforms/Transforms.h"
|
||||
#include "mlir/Dialect/StandardOps/IR/Ops.h"
|
||||
#include "mlir/Transforms/DialectConversion.h"
|
||||
|
||||
using namespace mlir;
|
||||
|
||||
|
@ -32,11 +43,10 @@ static FlatSymbolRefAttr getFunc(Operation *op, StringRef name, Type result,
|
|||
|
||||
/// Sparse conversion rule to remove opaque pointer cast.
|
||||
class TensorFromPointerConverter
|
||||
: public OpConversionPattern<linalg::SparseTensorFromPointerOp> {
|
||||
: public OpConversionPattern<sparse_tensor::FromPointerOp> {
|
||||
using OpConversionPattern::OpConversionPattern;
|
||||
LogicalResult
|
||||
matchAndRewrite(linalg::SparseTensorFromPointerOp op,
|
||||
ArrayRef<Value> operands,
|
||||
matchAndRewrite(sparse_tensor::FromPointerOp op, ArrayRef<Value> operands,
|
||||
ConversionPatternRewriter &rewriter) const override {
|
||||
rewriter.replaceOp(op, operands[0]);
|
||||
return success();
|
||||
|
@ -62,12 +72,11 @@ public:
|
|||
|
||||
/// Sparse conversion rule for pointer accesses.
|
||||
class TensorToPointersConverter
|
||||
: public OpConversionPattern<linalg::SparseTensorToPointersMemRefOp> {
|
||||
: public OpConversionPattern<sparse_tensor::ToPointersOp> {
|
||||
public:
|
||||
using OpConversionPattern::OpConversionPattern;
|
||||
LogicalResult
|
||||
matchAndRewrite(linalg::SparseTensorToPointersMemRefOp op,
|
||||
ArrayRef<Value> operands,
|
||||
matchAndRewrite(sparse_tensor::ToPointersOp op, ArrayRef<Value> operands,
|
||||
ConversionPatternRewriter &rewriter) const override {
|
||||
Type resType = op.getType();
|
||||
Type eltType = resType.cast<ShapedType>().getElementType();
|
||||
|
@ -90,12 +99,11 @@ public:
|
|||
|
||||
/// Sparse conversion rule for index accesses.
|
||||
class TensorToIndicesConverter
|
||||
: public OpConversionPattern<linalg::SparseTensorToIndicesMemRefOp> {
|
||||
: public OpConversionPattern<sparse_tensor::ToIndicesOp> {
|
||||
public:
|
||||
using OpConversionPattern::OpConversionPattern;
|
||||
LogicalResult
|
||||
matchAndRewrite(linalg::SparseTensorToIndicesMemRefOp op,
|
||||
ArrayRef<Value> operands,
|
||||
matchAndRewrite(sparse_tensor::ToIndicesOp op, ArrayRef<Value> operands,
|
||||
ConversionPatternRewriter &rewriter) const override {
|
||||
Type resType = op.getType();
|
||||
Type eltType = resType.cast<ShapedType>().getElementType();
|
||||
|
@ -118,12 +126,11 @@ public:
|
|||
|
||||
/// Sparse conversion rule for value accesses.
|
||||
class TensorToValuesConverter
|
||||
: public OpConversionPattern<linalg::SparseTensorToValuesMemRefOp> {
|
||||
: public OpConversionPattern<sparse_tensor::ToValuesOp> {
|
||||
public:
|
||||
using OpConversionPattern::OpConversionPattern;
|
||||
LogicalResult
|
||||
matchAndRewrite(linalg::SparseTensorToValuesMemRefOp op,
|
||||
ArrayRef<Value> operands,
|
||||
matchAndRewrite(sparse_tensor::ToValuesOp op, ArrayRef<Value> operands,
|
||||
ConversionPatternRewriter &rewriter) const override {
|
||||
Type resType = op.getType();
|
||||
Type eltType = resType.cast<ShapedType>().getElementType();
|
||||
|
@ -150,7 +157,7 @@ public:
|
|||
|
||||
/// Populates the given patterns list with conversion rules required for
|
||||
/// the sparsification of linear algebra operations.
|
||||
void linalg::populateSparsificationConversionPatterns(
|
||||
void sparse_tensor::populateSparsificationConversionPatterns(
|
||||
RewritePatternSet &patterns) {
|
||||
patterns.add<TensorFromPointerConverter, TensorToDimSizeConverter,
|
||||
TensorToPointersConverter, TensorToIndicesConverter,
|
|
@ -95,9 +95,9 @@ func @mul_d(%arga: tensor<32xf32>, %argb: f32, %argx: tensor<32xf32>) -> tensor<
|
|||
// CHECK: %[[VAL_4:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant true
|
||||
// CHECK: %[[VAL_6:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.alloc() : memref<32xf32>
|
||||
// CHECK: linalg.copy(%[[VAL_10]], %[[VAL_11]]) : memref<32xf32>, memref<32xf32>
|
||||
|
@ -148,9 +148,9 @@ func @add_s(%arga: tensor<32xf32>, %argb: f32, %argx: tensor<32xf32>) -> tensor<
|
|||
// CHECK-SAME: %[[VAL_1:.*]]: tensor<32xf32>) -> tensor<32xf32> {
|
||||
// CHECK: %[[VAL_2:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_3:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_4:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_2]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_5:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_2]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_5:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_2]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_7:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = memref.alloc() : memref<32xf32>
|
||||
// CHECK: linalg.copy(%[[VAL_7]], %[[VAL_8]]) : memref<32xf32>, memref<32xf32>
|
||||
|
@ -189,9 +189,9 @@ func @repeated_add_s(%arga: tensor<32xf32>, %argx: tensor<32xf32>) -> tensor<32x
|
|||
// CHECK-SAME: %[[VAL_2:.*]]: tensor<32xf32>) -> tensor<32xf32> {
|
||||
// CHECK: %[[VAL_3:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.alloc() : memref<32xf32>
|
||||
// CHECK: linalg.copy(%[[VAL_8]], %[[VAL_9]]) : memref<32xf32>, memref<32xf32>
|
||||
|
@ -320,9 +320,9 @@ func @mul_dd(%arga: tensor<32xf32>, %argb: tensor<32xf32>, %argx: tensor<32xf32>
|
|||
// CHECK: %[[VAL_5:.*]] = constant true
|
||||
// CHECK: %[[VAL_6:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_7:.*]] = memref.buffer_cast %[[VAL_0]] : memref<32xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32xf32>
|
||||
// CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<32xf32>, memref<32xf32>
|
||||
|
@ -378,9 +378,9 @@ func @add_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32>, %argx: tensor<32xf32>
|
|||
// CHECK: %[[VAL_3:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_5:.*]] = memref.buffer_cast %[[VAL_0]] : memref<32xf32>
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.alloc() : memref<32xf32>
|
||||
// CHECK: linalg.copy(%[[VAL_9]], %[[VAL_10]]) : memref<32xf32>, memref<32xf32>
|
||||
|
@ -430,9 +430,9 @@ func @mul_ds(%arga: tensor<32xf32>, %argb: tensor<32xf32>, %argx: tensor<32xf32>
|
|||
// CHECK: %[[VAL_4:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant true
|
||||
// CHECK: %[[VAL_6:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32xf32>
|
||||
|
@ -488,9 +488,9 @@ func @add_sd(%arga: tensor<32xf32>, %argb: tensor<32xf32>, %argx: tensor<32xf32>
|
|||
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> {
|
||||
// CHECK: %[[VAL_3:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.alloc() : memref<32xf32>
|
||||
|
@ -539,12 +539,12 @@ func @mul_sd(%arga: tensor<32xf32>, %argb: tensor<32xf32>, %argx: tensor<32xf32>
|
|||
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> {
|
||||
// CHECK: %[[VAL_3:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32xf32>
|
||||
// CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<32xf32>, memref<32xf32>
|
||||
|
@ -623,12 +623,12 @@ func @add_ss(%arga: tensor<32xf32>, %argb: tensor<32xf32>, %argx: tensor<32xf32>
|
|||
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32xf32>) -> tensor<32xf32> {
|
||||
// CHECK: %[[VAL_3:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32xf32>
|
||||
// CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<32xf32>, memref<32xf32>
|
||||
|
@ -701,12 +701,12 @@ func @mul_ss(%arga: tensor<32xf32>, %argb: tensor<32xf32>, %argx: tensor<32xf32>
|
|||
// CHECK-SAME: %[[VAL_3:.*3]]: tensor<16xf32>) -> tensor<16xf32> {
|
||||
// CHECK: %[[VAL_4:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_3]] : memref<16xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<16xf32>
|
||||
// CHECK: linalg.copy(%[[VAL_12]], %[[VAL_13]]) : memref<16xf32>, memref<16xf32>
|
||||
|
@ -794,12 +794,12 @@ func @two_way_inv(%arga: tensor<16xf32>, %argb: tensor<16xf32>, %argc: f32, %arg
|
|||
// CHECK-SAME: %[[VAL_3:.*3]]: tensor<16xf32>) -> tensor<16xf32> {
|
||||
// CHECK: %[[VAL_4:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_3]] : memref<16xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<16xf32>
|
||||
// CHECK: linalg.copy(%[[VAL_12]], %[[VAL_13]]) : memref<16xf32>, memref<16xf32>
|
||||
|
@ -898,8 +898,8 @@ func @two_way_inv_alt(%arga: tensor<16xf32>,
|
|||
// CHECK-SAME: %[[VAL_1:.*]]: tensor<f32>) -> tensor<f32> {
|
||||
// CHECK: %[[VAL_2:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_3:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_4:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_2]] : tensor<?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_5:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<?xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_4:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor<?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_5:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_6:.*]] = memref.buffer_cast %[[VAL_1]] : memref<f32>
|
||||
// CHECK: %[[VAL_7:.*]] = memref.alloc() : memref<f32>
|
||||
// CHECK: linalg.copy(%[[VAL_6]], %[[VAL_7]]) : memref<f32>, memref<f32>
|
||||
|
@ -947,12 +947,12 @@ func @sum_reduction(%arga: tensor<?xf32>, %argx: tensor<f32>) -> tensor<f32> {
|
|||
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<f32>) -> tensor<f32> {
|
||||
// CHECK: %[[VAL_3:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<f32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<f32>
|
||||
// CHECK: linalg.copy(%[[VAL_11]], %[[VAL_12]]) : memref<f32>, memref<f32>
|
||||
|
@ -1062,13 +1062,13 @@ func @sum_reduction_ss(%arga: tensor<16xf32>,
|
|||
// CHECK-SAME: %[[VAL_3:.*3]]: tensor<f32>) -> tensor<f32> {
|
||||
// CHECK: %[[VAL_4:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<f32>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_2]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_2]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = linalg.sparse_values %[[VAL_2]] : tensor<16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_2]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_2]], %[[VAL_4]] : tensor<16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_3]] : memref<f32>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.alloc() : memref<f32>
|
||||
// CHECK: linalg.copy(%[[VAL_13]], %[[VAL_14]]) : memref<f32>, memref<f32>
|
||||
|
@ -1189,13 +1189,13 @@ func @sum_reduction_inv(%arga: tensor<16xf32>,
|
|||
// CHECK: %[[VAL_6:.*]] = constant true
|
||||
// CHECK: %[[VAL_7:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_8:.*]] = memref.buffer_cast %[[VAL_0]] : memref<?xf64>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_5]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_5]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<?xf64> to memref<?xf64>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_5]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_5]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xf64> to memref<?xf64>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref<?xf64>
|
||||
// CHECK: %[[VAL_13:.*]] = linalg.sparse_pointers %[[VAL_3]], %[[VAL_5]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = linalg.sparse_indices %[[VAL_3]], %[[VAL_5]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_15:.*]] = linalg.sparse_values %[[VAL_3]] : tensor<?xf64> to memref<?xf64>
|
||||
// CHECK: %[[VAL_13:.*]] = sparse_tensor.pointers %[[VAL_3]], %[[VAL_5]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = sparse_tensor.indices %[[VAL_3]], %[[VAL_5]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_15:.*]] = sparse_tensor.values %[[VAL_3]] : tensor<?xf64> to memref<?xf64>
|
||||
// CHECK: %[[VAL_16:.*]] = memref.dim %[[VAL_4]], %[[VAL_5]] : tensor<?xf64>
|
||||
// CHECK: %[[VAL_17:.*]] = memref.buffer_cast %[[VAL_4]] : memref<?xf64>
|
||||
// CHECK: %[[VAL_18:.*]] = memref.alloc(%[[VAL_16]]) : memref<?xf64>
|
||||
|
@ -1371,15 +1371,15 @@ func @four_tensors_op(%arga: tensor<?xf64>,
|
|||
// CHECK-SAME: %[[VAL_3:.*3]]: tensor<f64>) -> tensor<f64> {
|
||||
// CHECK: %[[VAL_4:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<?xf64> to memref<?xf64>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<?xf64> to memref<?xf64>
|
||||
// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_2]], %[[VAL_4]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_2]], %[[VAL_4]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_2]] : tensor<?xf64> to memref<?xf64>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?xf64> to memref<?xf64>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?xf64> to memref<?xf64>
|
||||
// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_2]], %[[VAL_4]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_2]], %[[VAL_4]] : tensor<?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<?xf64> to memref<?xf64>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_3]] : memref<f64>
|
||||
// CHECK: %[[VAL_16:.*]] = memref.alloc() : memref<f64>
|
||||
// CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<f64>, memref<f64>
|
||||
|
|
|
@ -110,9 +110,9 @@ func @mul_dd(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tensor<3
|
|||
// CHECK: %[[VAL_5:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_6:.*]] = constant true
|
||||
// CHECK: %[[VAL_7:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<32x16xf32>
|
||||
|
@ -172,9 +172,9 @@ func @add_ds(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tensor<3
|
|||
// CHECK: %[[VAL_3:.*]] = constant 32 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.alloc() : memref<32x16xf32>
|
||||
|
@ -229,9 +229,9 @@ func @mul_ds(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tensor<3
|
|||
// CHECK: %[[VAL_5:.*]] = constant true
|
||||
// CHECK: %[[VAL_6:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_7:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<32x16xf32>
|
||||
|
@ -296,9 +296,9 @@ func @add_sd(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tensor<3
|
|||
// CHECK: %[[VAL_3:.*]] = constant 16 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.alloc() : memref<32x16xf32>
|
||||
|
@ -354,11 +354,11 @@ func @mul_sd(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tensor<3
|
|||
// CHECK: %[[VAL_5:.*]] = constant true
|
||||
// CHECK: %[[VAL_6:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_7:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.alloc() : memref<32x16xf32>
|
||||
|
@ -446,11 +446,11 @@ func @add_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tensor<3
|
|||
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
|
||||
// CHECK: %[[VAL_3:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32x16xf32>
|
||||
|
@ -505,16 +505,16 @@ func @mul_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tensor<3
|
|||
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
|
||||
// CHECK: %[[VAL_3:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_16:.*]] = memref.alloc() : memref<32x16xf32>
|
||||
// CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16xf32>, memref<32x16xf32>
|
||||
|
@ -670,16 +670,16 @@ func @add_ss_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tenso
|
|||
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
|
||||
// CHECK: %[[VAL_3:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_16:.*]] = memref.alloc() : memref<32x16xf32>
|
||||
// CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16xf32>, memref<32x16xf32>
|
||||
|
@ -782,16 +782,16 @@ func @mul_ss_ss(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tenso
|
|||
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
|
||||
// CHECK: %[[VAL_3:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_16:.*]] = memref.alloc() : memref<32x16xf32>
|
||||
// CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16xf32>, memref<32x16xf32>
|
||||
|
@ -947,16 +947,16 @@ func @add_sd_ds(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tenso
|
|||
// CHECK-SAME: %[[VAL_2:.*2]]: tensor<32x16xf32>) -> tensor<32x16xf32> {
|
||||
// CHECK: %[[VAL_3:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<32x16xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<32x16xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16xf32>
|
||||
// CHECK: %[[VAL_16:.*]] = memref.alloc() : memref<32x16xf32>
|
||||
// CHECK: linalg.copy(%[[VAL_15]], %[[VAL_16]]) : memref<32x16xf32>, memref<32x16xf32>
|
||||
|
@ -1060,9 +1060,9 @@ func @mul_sd_ds(%arga: tensor<32x16xf32>, %argb: tensor<32x16xf32>, %argx: tenso
|
|||
// CHECK: %[[VAL_3:.*]] = constant 16 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<16x32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<16x32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<16x32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<16x32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<16x32xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<16x32xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<16xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.alloc() : memref<16xf32>
|
||||
|
@ -1116,8 +1116,8 @@ func @matvec(%argA: tensor<16x32xf32>, %argb: tensor<32xf32>, %argx: tensor<16xf
|
|||
// CHECK: %[[VAL_2:.*]] = constant 10 : index
|
||||
// CHECK: %[[VAL_3:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<10x20xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<10x20xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<10x20xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_7:.*]] = memref.buffer_cast %[[VAL_1]] : memref<f32>
|
||||
// CHECK: %[[VAL_8:.*]] = memref.alloc() : memref<f32>
|
||||
// CHECK: linalg.copy(%[[VAL_7]], %[[VAL_8]]) : memref<f32>, memref<f32>
|
||||
|
@ -1166,9 +1166,9 @@ func @sum_reduction(%arga: tensor<10x20xf32>, %argx: tensor<f32>) -> tensor<f32>
|
|||
// CHECK-DAG: %[[VAL_3:.*]] = constant 0 : index
|
||||
// CHECK-DAG: %[[VAL_4:.*]] = constant 1 : index
|
||||
// CHECK-DAG: %[[VAL_2:.*]] = constant 2.000000e+00 : f64
|
||||
// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<?x?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<?x?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<?x?xf64> to memref<?xf64>
|
||||
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<?x?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<?x?xf64> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xf64> to memref<?xf64>
|
||||
// CHECK: %[[VAL_8:.*]] = memref.dim %[[VAL_1]], %[[VAL_3]] : tensor<?x?xf64>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.dim %[[VAL_1]], %[[VAL_4]] : tensor<?x?xf64>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<?x?xf64>
|
||||
|
@ -1224,11 +1224,11 @@ func @scale(%arga: tensor<?x?xf64>, %argx: tensor<?x?xf64>) -> tensor<?x?xf64> {
|
|||
// CHECK-SAME: %[[VAL_3:.*3]]: tensor<?x?xf32>) -> tensor<?x?xf32> {
|
||||
// CHECK: %[[VAL_4:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<?x?xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref<?x?xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.dim %[[VAL_2]], %[[VAL_4]] : tensor<?x?xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref<?x?xf32>
|
||||
|
@ -1308,17 +1308,17 @@ func @sampled_dense_dense(%args: tensor<?x?xf32>,
|
|||
// CHECK: %[[VAL_6:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_7:.*]] = constant true
|
||||
// CHECK: %[[VAL_8:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<?x?xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_14:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_15:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_16:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<?x?xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_17:.*]] = linalg.sparse_pointers %[[VAL_2]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_18:.*]] = linalg.sparse_indices %[[VAL_2]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_19:.*]] = linalg.sparse_values %[[VAL_2]] : tensor<?x?xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_6]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_6]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?x?xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_17:.*]] = sparse_tensor.pointers %[[VAL_2]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_18:.*]] = sparse_tensor.indices %[[VAL_2]], %[[VAL_8]] : tensor<?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_19:.*]] = sparse_tensor.values %[[VAL_2]] : tensor<?x?xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_20:.*]] = memref.buffer_cast %[[VAL_3]] : memref<?xf32>
|
||||
// CHECK: %[[VAL_21:.*]] = memref.buffer_cast %[[VAL_4]] : memref<f32>
|
||||
// CHECK: %[[VAL_22:.*]] = memref.dim %[[VAL_5]], %[[VAL_6]] : tensor<?xf32>
|
||||
|
|
|
@ -118,9 +118,9 @@ func @mul_ddd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten
|
|||
// CHECK: %[[VAL_7:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_8:.*]] = constant true
|
||||
// CHECK: %[[VAL_9:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.alloc() : memref<32x16x8xf32>
|
||||
|
@ -186,9 +186,9 @@ func @add_dds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten
|
|||
// CHECK: %[[VAL_5:.*]] = constant 16 : index
|
||||
// CHECK: %[[VAL_6:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_7:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<32x16x8xf32>
|
||||
|
@ -248,9 +248,9 @@ func @mul_dds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten
|
|||
// CHECK: %[[VAL_6:.*]] = constant true
|
||||
// CHECK: %[[VAL_7:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_8:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.alloc() : memref<32x16x8xf32>
|
||||
|
@ -319,9 +319,9 @@ func @add_dsd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten
|
|||
// CHECK: %[[VAL_4:.*]] = constant 8 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_6:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32x16x8xf32>
|
||||
|
@ -382,11 +382,11 @@ func @mul_dsd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten
|
|||
// CHECK: %[[VAL_7:.*]] = constant true
|
||||
// CHECK: %[[VAL_8:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_9:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_16:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_17:.*]] = memref.alloc() : memref<32x16x8xf32>
|
||||
|
@ -479,11 +479,11 @@ func @add_dss(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten
|
|||
// CHECK: %[[VAL_4:.*]] = constant 32 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_6:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_6]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.alloc() : memref<32x16x8xf32>
|
||||
|
@ -545,9 +545,9 @@ func @mul_dss(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten
|
|||
// CHECK: %[[VAL_6:.*]] = constant true
|
||||
// CHECK: %[[VAL_7:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_8:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.alloc() : memref<32x16x8xf32>
|
||||
|
@ -621,9 +621,9 @@ func @add_sdd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten
|
|||
// CHECK: %[[VAL_4:.*]] = constant 8 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_6:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.alloc() : memref<32x16x8xf32>
|
||||
|
@ -685,11 +685,11 @@ func @mul_sdd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten
|
|||
// CHECK: %[[VAL_7:.*]] = constant true
|
||||
// CHECK: %[[VAL_8:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_9:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_16:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_17:.*]] = memref.alloc() : memref<32x16x8xf32>
|
||||
|
@ -787,11 +787,11 @@ func @add_sds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten
|
|||
// CHECK: %[[VAL_4:.*]] = constant 16 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_6:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.alloc() : memref<32x16x8xf32>
|
||||
|
@ -854,11 +854,11 @@ func @mul_sds(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten
|
|||
// CHECK: %[[VAL_6:.*]] = constant true
|
||||
// CHECK: %[[VAL_7:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_8:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_7]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_16:.*]] = memref.alloc() : memref<32x16x8xf32>
|
||||
|
@ -959,11 +959,11 @@ func @add_ssd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten
|
|||
// CHECK: %[[VAL_3:.*]] = constant 8 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.alloc() : memref<32x16x8xf32>
|
||||
|
@ -1027,13 +1027,13 @@ func @mul_ssd(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten
|
|||
// CHECK: %[[VAL_7:.*]] = constant true
|
||||
// CHECK: %[[VAL_8:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_9:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_15:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_16:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_8]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_13:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_9]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_16:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_17:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_18:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_19:.*]] = memref.alloc() : memref<32x16x8xf32>
|
||||
|
@ -1158,13 +1158,13 @@ func @add_sss(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten
|
|||
// CHECK: %[[VAL_3:.*]] = constant 2 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = linalg.sparse_indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_4]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_10:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_11:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_3]] : tensor<32x16x8xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_12:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x16x8xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_1]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_14:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32x16x8xf32>
|
||||
// CHECK: %[[VAL_15:.*]] = memref.alloc() : memref<32x16x8xf32>
|
||||
|
@ -1229,9 +1229,9 @@ func @mul_sss(%arga: tensor<32x16x8xf32>, %argb: tensor<32x16x8xf32>, %argx: ten
|
|||
// CHECK: %[[VAL_4:.*]] = constant 2 : index
|
||||
// CHECK: %[[VAL_5:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_6:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<?x?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<?x?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<?x?x?xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<?x?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<?x?x?xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<?x?x?xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.dim %[[VAL_2]], %[[VAL_5]] : tensor<?x?xf32>
|
||||
// CHECK: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<?x?xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = memref.buffer_cast %[[VAL_3]] : memref<?x?xf32>
|
||||
|
@ -1300,10 +1300,10 @@ func @kernel_3d(%arga: tensor<?x?xf32>,
|
|||
// CHECK: %[[VAL_2:.*]] = constant 2 : index
|
||||
// CHECK: %[[VAL_3:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_4:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_5:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_3]] : tensor<10x20x30xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_4]] : tensor<10x20x30xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_0]], %[[VAL_2]] : tensor<10x20x30xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = linalg.sparse_values %[[VAL_0]] : tensor<10x20x30xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_5:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_3]] : tensor<10x20x30xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_4]] : tensor<10x20x30xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_2]] : tensor<10x20x30xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<10x20x30xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<f32>
|
||||
// CHECK: %[[VAL_10:.*]] = memref.alloc() : memref<f32>
|
||||
// CHECK: linalg.copy(%[[VAL_9]], %[[VAL_10]]) : memref<f32>, memref<f32>
|
||||
|
|
|
@ -41,10 +41,10 @@
|
|||
// CHECK-HIR: %[[VAL_3:.*]] = constant 64 : index
|
||||
// CHECK-HIR: %[[VAL_4:.*]] = constant 0 : index
|
||||
// CHECK-HIR: %[[VAL_5:.*]] = constant 1 : index
|
||||
// CHECK-HIR: %[[VAL_6:.*]] = linalg.sparse_tensor %[[VAL_0]] : !llvm.ptr<i8> to tensor<64x64xf64>
|
||||
// CHECK-HIR: %[[VAL_7:.*]] = linalg.sparse_pointers %[[VAL_6]], %[[VAL_5]] : tensor<64x64xf64> to memref<?xindex>
|
||||
// CHECK-HIR: %[[VAL_8:.*]] = linalg.sparse_indices %[[VAL_6]], %[[VAL_5]] : tensor<64x64xf64> to memref<?xindex>
|
||||
// CHECK-HIR: %[[VAL_9:.*]] = linalg.sparse_values %[[VAL_6]] : tensor<64x64xf64> to memref<?xf64>
|
||||
// CHECK-HIR: %[[VAL_6:.*]] = sparse_tensor.fromPtr %[[VAL_0]] : !llvm.ptr<i8> to tensor<64x64xf64>
|
||||
// CHECK-HIR: %[[VAL_7:.*]] = sparse_tensor.pointers %[[VAL_6]], %[[VAL_5]] : tensor<64x64xf64> to memref<?xindex>
|
||||
// CHECK-HIR: %[[VAL_8:.*]] = sparse_tensor.indices %[[VAL_6]], %[[VAL_5]] : tensor<64x64xf64> to memref<?xindex>
|
||||
// CHECK-HIR: %[[VAL_9:.*]] = sparse_tensor.values %[[VAL_6]] : tensor<64x64xf64> to memref<?xf64>
|
||||
// CHECK-HIR: %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<64xf64>
|
||||
// CHECK-HIR: %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<64xf64>
|
||||
// CHECK-HIR: %[[VAL_12:.*]] = memref.alloc() : memref<64xf64>
|
||||
|
@ -168,7 +168,7 @@
|
|||
!SparseTensor = type !llvm.ptr<i8>
|
||||
|
||||
func @matvec(%argA: !SparseTensor, %argb: tensor<64xf64>, %argx: tensor<64xf64>) -> tensor<64xf64> {
|
||||
%arga = linalg.sparse_tensor %argA : !SparseTensor to tensor<64x64xf64>
|
||||
%arga = sparse_tensor.fromPtr %argA : !SparseTensor to tensor<64x64xf64>
|
||||
%0 = linalg.generic #trait_matvec
|
||||
ins(%arga, %argb : tensor<64x64xf64>, tensor<64xf64>)
|
||||
outs(%argx: tensor<64xf64>) {
|
||||
|
|
|
@ -34,11 +34,11 @@
|
|||
// CHECK: %[[VAL_11:.*]] = constant 0 : index
|
||||
// CHECK: %[[VAL_12:.*]] = constant 1 : index
|
||||
// CHECK: %[[VAL_13:.*]] = memref.buffer_cast %[[VAL_0]] : memref<10x20x30x40x50x60x70x80xf32>
|
||||
// CHECK: %[[VAL_14:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_3]] : tensor<80x70x60x50x40x30x20x10xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_15:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_3]] : tensor<80x70x60x50x40x30x20x10xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_16:.*]] = linalg.sparse_pointers %[[VAL_1]], %[[VAL_4]] : tensor<80x70x60x50x40x30x20x10xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_17:.*]] = linalg.sparse_indices %[[VAL_1]], %[[VAL_4]] : tensor<80x70x60x50x40x30x20x10xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_18:.*]] = linalg.sparse_values %[[VAL_1]] : tensor<80x70x60x50x40x30x20x10xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_14:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_3]] : tensor<80x70x60x50x40x30x20x10xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_15:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_3]] : tensor<80x70x60x50x40x30x20x10xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_16:.*]] = sparse_tensor.pointers %[[VAL_1]], %[[VAL_4]] : tensor<80x70x60x50x40x30x20x10xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_17:.*]] = sparse_tensor.indices %[[VAL_1]], %[[VAL_4]] : tensor<80x70x60x50x40x30x20x10xf32> to memref<?xindex>
|
||||
// CHECK: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<80x70x60x50x40x30x20x10xf32> to memref<?xf32>
|
||||
// CHECK: %[[VAL_19:.*]] = memref.buffer_cast %[[VAL_2]] : memref<10x20x30x40x50x60x70x80xf32>
|
||||
// CHECK: %[[VAL_20:.*]] = memref.alloc() : memref<10x20x30x40x50x60x70x80xf32>
|
||||
// CHECK: linalg.copy(%[[VAL_19]], %[[VAL_20]]) : memref<10x20x30x40x50x60x70x80xf32>, memref<10x20x30x40x50x60x70x80xf32>
|
||||
|
|
|
@ -228,8 +228,8 @@ func @mul_s(%arga: tensor<1024xf32>, %argb: tensor<1024xf32>, %argx: tensor<1024
|
|||
//
|
||||
!SparseTensor = type !llvm.ptr<i8>
|
||||
func @mul_s_alt(%argA: !SparseTensor, %argB: !SparseTensor, %argx: tensor<1024xf32>) -> tensor<1024xf32> {
|
||||
%arga = linalg.sparse_tensor %argA : !SparseTensor to tensor<1024xf32>
|
||||
%argb = linalg.sparse_tensor %argB : !SparseTensor to tensor<1024xf32>
|
||||
%arga = sparse_tensor.fromPtr %argA : !SparseTensor to tensor<1024xf32>
|
||||
%argb = sparse_tensor.fromPtr %argB : !SparseTensor to tensor<1024xf32>
|
||||
%0 = linalg.generic #trait_mul_s
|
||||
ins(%arga, %argb: tensor<1024xf32>, tensor<1024xf32>)
|
||||
outs(%argx: tensor<1024xf32>) {
|
||||
|
|
|
@ -8,9 +8,9 @@
|
|||
// CHECK: %[[T:.*]] = call @sparsePointers64(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
|
||||
// CHECK: return %[[T]] : memref<?xindex>
|
||||
func @sparse_pointers(%arg0: !SparseTensor) -> memref<?xindex> {
|
||||
%a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64>
|
||||
%a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64>
|
||||
%c = constant 1 : index
|
||||
%0 = linalg.sparse_pointers %a, %c : tensor<128xf64> to memref<?xindex>
|
||||
%0 = sparse_tensor.pointers %a, %c : tensor<128xf64> to memref<?xindex>
|
||||
return %0 : memref<?xindex>
|
||||
}
|
||||
|
||||
|
@ -20,9 +20,9 @@ func @sparse_pointers(%arg0: !SparseTensor) -> memref<?xindex> {
|
|||
// CHECK: %[[T:.*]] = call @sparsePointers32(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi32>
|
||||
// CHECK: return %[[T]] : memref<?xi32>
|
||||
func @sparse_pointers32(%arg0: !SparseTensor) -> memref<?xi32> {
|
||||
%a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64>
|
||||
%a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64>
|
||||
%c = constant 1 : index
|
||||
%0 = linalg.sparse_pointers %a, %c : tensor<128xf64> to memref<?xi32>
|
||||
%0 = sparse_tensor.pointers %a, %c : tensor<128xf64> to memref<?xi32>
|
||||
return %0 : memref<?xi32>
|
||||
}
|
||||
|
||||
|
@ -32,9 +32,9 @@ func @sparse_pointers32(%arg0: !SparseTensor) -> memref<?xi32> {
|
|||
// CHECK: %[[T:.*]] = call @sparseIndices64(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
|
||||
// CHECK: return %[[T]] : memref<?xindex>
|
||||
func @sparse_indices(%arg0: !SparseTensor) -> memref<?xindex> {
|
||||
%a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64>
|
||||
%a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64>
|
||||
%c = constant 1 : index
|
||||
%0 = linalg.sparse_indices %a, %c : tensor<128xf64> to memref<?xindex>
|
||||
%0 = sparse_tensor.indices %a, %c : tensor<128xf64> to memref<?xindex>
|
||||
return %0 : memref<?xindex>
|
||||
}
|
||||
|
||||
|
@ -44,9 +44,9 @@ func @sparse_indices(%arg0: !SparseTensor) -> memref<?xindex> {
|
|||
// CHECK: %[[T:.*]] = call @sparseIndices32(%[[A]], %[[C]]) : (!llvm.ptr<i8>, index) -> memref<?xi32>
|
||||
// CHECK: return %[[T]] : memref<?xi32>
|
||||
func @sparse_indices32(%arg0: !SparseTensor) -> memref<?xi32> {
|
||||
%a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64>
|
||||
%a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64>
|
||||
%c = constant 1 : index
|
||||
%0 = linalg.sparse_indices %a, %c : tensor<128xf64> to memref<?xi32>
|
||||
%0 = sparse_tensor.indices %a, %c : tensor<128xf64> to memref<?xi32>
|
||||
return %0 : memref<?xi32>
|
||||
}
|
||||
|
||||
|
@ -55,8 +55,8 @@ func @sparse_indices32(%arg0: !SparseTensor) -> memref<?xi32> {
|
|||
// CHECK: %[[T:.*]] = call @sparseValuesF64(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xf64>
|
||||
// CHECK: return %[[T]] : memref<?xf64>
|
||||
func @sparse_valuesf64(%arg0: !SparseTensor) -> memref<?xf64> {
|
||||
%a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64>
|
||||
%0 = linalg.sparse_values %a : tensor<128xf64> to memref<?xf64>
|
||||
%a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64>
|
||||
%0 = sparse_tensor.values %a : tensor<128xf64> to memref<?xf64>
|
||||
return %0 : memref<?xf64>
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ func @sparse_valuesf64(%arg0: !SparseTensor) -> memref<?xf64> {
|
|||
// CHECK: %[[T:.*]] = call @sparseValuesF32(%[[A]]) : (!llvm.ptr<i8>) -> memref<?xf32>
|
||||
// CHECK: return %[[T]] : memref<?xf32>
|
||||
func @sparse_valuesf32(%arg0: !SparseTensor) -> memref<?xf32> {
|
||||
%a = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf32>
|
||||
%0 = linalg.sparse_values %a : tensor<128xf32> to memref<?xf32>
|
||||
%a = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf32>
|
||||
%0 = sparse_tensor.values %a : tensor<128xf32> to memref<?xf32>
|
||||
return %0 : memref<?xf32>
|
||||
}
|
|
@ -4,10 +4,10 @@
|
|||
|
||||
// CHECK-LABEL: func @sparse_tensor(
|
||||
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>)
|
||||
// CHECK: %[[T:.*]] = linalg.sparse_tensor %[[A]] : !llvm.ptr<i8> to tensor<128xf64>
|
||||
// CHECK: %[[T:.*]] = sparse_tensor.fromPtr %[[A]] : !llvm.ptr<i8> to tensor<128xf64>
|
||||
// CHECK: return %[[T]] : tensor<128xf64>
|
||||
func @sparse_tensor(%arg0: !SparseTensor) -> tensor<128xf64> {
|
||||
%0 = linalg.sparse_tensor %arg0 : !SparseTensor to tensor<128xf64>
|
||||
%0 = sparse_tensor.fromPtr %arg0 : !SparseTensor to tensor<128xf64>
|
||||
return %0 : tensor<128xf64>
|
||||
}
|
||||
|
||||
|
@ -16,11 +16,11 @@ func @sparse_tensor(%arg0: !SparseTensor) -> tensor<128xf64> {
|
|||
// CHECK-LABEL: func @sparse_pointers(
|
||||
// CHECK-SAME: %[[A:.*]]: tensor<128xf64>)
|
||||
// CHECK: %[[C:.*]] = constant 1 : index
|
||||
// CHECK: %[[T:.*]] = linalg.sparse_pointers %[[A]], %[[C]] : tensor<128xf64> to memref<?xindex>
|
||||
// CHECK: %[[T:.*]] = sparse_tensor.pointers %[[A]], %[[C]] : tensor<128xf64> to memref<?xindex>
|
||||
// CHECK: return %[[T]] : memref<?xindex>
|
||||
func @sparse_pointers(%arg0: tensor<128xf64>) -> memref<?xindex> {
|
||||
%c = constant 1 : index
|
||||
%0 = linalg.sparse_pointers %arg0, %c : tensor<128xf64> to memref<?xindex>
|
||||
%0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64> to memref<?xindex>
|
||||
return %0 : memref<?xindex>
|
||||
}
|
||||
|
||||
|
@ -29,11 +29,11 @@ func @sparse_pointers(%arg0: tensor<128xf64>) -> memref<?xindex> {
|
|||
// CHECK-LABEL: func @sparse_indices(
|
||||
// CHECK-SAME: %[[A:.*]]: tensor<128xf64>)
|
||||
// CHECK: %[[C:.*]] = constant 1 : index
|
||||
// CHECK: %[[T:.*]] = linalg.sparse_indices %[[A]], %[[C]] : tensor<128xf64> to memref<?xindex>
|
||||
// CHECK: %[[T:.*]] = sparse_tensor.indices %[[A]], %[[C]] : tensor<128xf64> to memref<?xindex>
|
||||
// CHECK: return %[[T]] : memref<?xindex>
|
||||
func @sparse_indices(%arg0: tensor<128xf64>) -> memref<?xindex> {
|
||||
%c = constant 1 : index
|
||||
%0 = linalg.sparse_indices %arg0, %c : tensor<128xf64> to memref<?xindex>
|
||||
%0 = sparse_tensor.indices %arg0, %c : tensor<128xf64> to memref<?xindex>
|
||||
return %0 : memref<?xindex>
|
||||
}
|
||||
|
||||
|
@ -41,9 +41,9 @@ func @sparse_indices(%arg0: tensor<128xf64>) -> memref<?xindex> {
|
|||
|
||||
// CHECK-LABEL: func @sparse_values(
|
||||
// CHECK-SAME: %[[A:.*]]: tensor<128xf64>)
|
||||
// CHECK: %[[T:.*]] = linalg.sparse_values %[[A]] : tensor<128xf64> to memref<?xf64>
|
||||
// CHECK: %[[T:.*]] = sparse_tensor.values %[[A]] : tensor<128xf64> to memref<?xf64>
|
||||
// CHECK: return %[[T]] : memref<?xf64>
|
||||
func @sparse_values(%arg0: tensor<128xf64>) -> memref<?xf64> {
|
||||
%0 = linalg.sparse_values %arg0 : tensor<128xf64> to memref<?xf64>
|
||||
%0 = sparse_tensor.values %arg0 : tensor<128xf64> to memref<?xf64>
|
||||
return %0 : memref<?xf64>
|
||||
}
|
|
@ -56,7 +56,7 @@ module {
|
|||
func @kernel_matvec(%argA: !SparseTensor,
|
||||
%argb: tensor<?xi32>,
|
||||
%argx: tensor<?xi32>) -> tensor<?xi32> {
|
||||
%arga = linalg.sparse_tensor %argA : !SparseTensor to tensor<?x?xi32>
|
||||
%arga = sparse_tensor.fromPtr %argA : !SparseTensor to tensor<?x?xi32>
|
||||
%0 = linalg.generic #matvec
|
||||
ins(%arga, %argb: tensor<?x?xi32>, tensor<?xi32>)
|
||||
outs(%argx: tensor<?xi32>) {
|
|
@ -47,7 +47,7 @@ module {
|
|||
%arga: tensor<?x?xf32>,
|
||||
%argb: tensor<?x?xf32>,
|
||||
%argx: tensor<?x?xf32>) -> tensor<?x?xf32> {
|
||||
%args = linalg.sparse_tensor %argS : !SparseTensor to tensor<?x?xf32>
|
||||
%args = sparse_tensor.fromPtr %argS : !SparseTensor to tensor<?x?xf32>
|
||||
%0 = linalg.generic #trait_sampled_dense_dense
|
||||
ins(%args, %arga, %argb: tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>)
|
||||
outs(%argx: tensor<?x?xf32>) {
|
|
@ -41,7 +41,7 @@ module {
|
|||
//
|
||||
func @kernel_sum_reduce(%argA: !SparseTensor,
|
||||
%argx: tensor<f64>) -> tensor<f64> {
|
||||
%arga = linalg.sparse_tensor %argA : !SparseTensor to tensor<?x?xf64>
|
||||
%arga = sparse_tensor.fromPtr %argA : !SparseTensor to tensor<?x?xf64>
|
||||
%0 = linalg.generic #trait_sum_reduce
|
||||
ins(%arga: tensor<?x?xf64>)
|
||||
outs(%argx: tensor<f64>) {
|
|
@ -64,6 +64,8 @@ add_mlir_library(MLIRTestTransforms
|
|||
MLIRSCF
|
||||
MLIRSCFTransforms
|
||||
MLIRStandardOpsTransforms
|
||||
MLIRSparseTensor
|
||||
MLIRSparseTensorTransforms
|
||||
MLIRTargetLLVMIRExport
|
||||
MLIRTestDialect
|
||||
MLIRTransformUtils
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
|
||||
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
|
||||
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
|
||||
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
|
||||
#include "mlir/Dialect/SparseTensor/Transforms/Transforms.h"
|
||||
#include "mlir/Dialect/Vector/VectorOps.h"
|
||||
#include "mlir/Pass/Pass.h"
|
||||
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
|
||||
|
@ -51,7 +53,8 @@ struct TestSparsification
|
|||
/// Registers all dialects required by testing.
|
||||
void getDependentDialects(DialectRegistry ®istry) const override {
|
||||
registry.insert<memref::MemRefDialect, scf::SCFDialect,
|
||||
vector::VectorDialect, LLVM::LLVMDialect>();
|
||||
sparse_tensor::SparseTensorDialect, vector::VectorDialect,
|
||||
LLVM::LLVMDialect>();
|
||||
}
|
||||
|
||||
/// Returns parallelization strategy given on command line.
|
||||
|
@ -114,12 +117,12 @@ struct TestSparsification
|
|||
if (lower) {
|
||||
RewritePatternSet conversionPatterns(ctx);
|
||||
ConversionTarget target(*ctx);
|
||||
target.addIllegalOp<linalg::SparseTensorFromPointerOp,
|
||||
linalg::SparseTensorToPointersMemRefOp,
|
||||
linalg::SparseTensorToIndicesMemRefOp,
|
||||
linalg::SparseTensorToValuesMemRefOp>();
|
||||
target.addIllegalOp<
|
||||
sparse_tensor::FromPointerOp, sparse_tensor::ToPointersOp,
|
||||
sparse_tensor::ToIndicesOp, sparse_tensor::ToValuesOp>();
|
||||
target.addLegalOp<CallOp>();
|
||||
linalg::populateSparsificationConversionPatterns(conversionPatterns);
|
||||
sparse_tensor::populateSparsificationConversionPatterns(
|
||||
conversionPatterns);
|
||||
if (failed(applyPartialConversion(getOperation(), target,
|
||||
std::move(conversionPatterns))))
|
||||
signalPassFailure();
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
// CHECK-NEXT: scf
|
||||
// CHECK-NEXT: sdbm
|
||||
// CHECK-NEXT: shape
|
||||
// CHECK-NEXT: sparse
|
||||
// CHECK-NEXT: spv
|
||||
// CHECK-NEXT: std
|
||||
// CHECK-NEXT: tensor
|
||||
|
|
Loading…
Reference in New Issue