forked from OSchip/llvm-project
[mlir][sparse] integration test for "simply dynamic" sparse output tensors
Reviewed By: gussmith23 Differential Revision: https://reviews.llvm.org/D104583
This commit is contained in:
parent
cadfaf2df4
commit
b13cbf537f
|
@ -0,0 +1,77 @@
|
|||
// RUN: mlir-opt %s \
|
||||
// RUN: --sparsification --sparse-tensor-conversion \
|
||||
// RUN: --convert-linalg-to-loops --convert-vector-to-scf --convert-scf-to-std \
|
||||
// RUN: --func-bufferize --tensor-constant-bufferize --tensor-bufferize \
|
||||
// RUN: --std-bufferize --finalizing-bufferize \
|
||||
// RUN: --convert-vector-to-llvm --convert-std-to-llvm | \
|
||||
// RUN: TENSOR0="%mlir_integration_test_dir/data/test.mtx" \
|
||||
// RUN: mlir-cpu-runner \
|
||||
// RUN: -e entry -entry-point-result=void \
|
||||
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
|
||||
// RUN: FileCheck %s
|
||||
|
||||
!Filename = type !llvm.ptr<i8>
|
||||
|
||||
#DCSR = #sparse_tensor.encoding<{
|
||||
dimLevelType = [ "compressed", "compressed" ],
|
||||
dimOrdering = affine_map<(i,j) -> (i,j)>
|
||||
}>
|
||||
|
||||
#eltwise_mult = {
|
||||
indexing_maps = [
|
||||
affine_map<(i,j) -> (i,j)> // X (out)
|
||||
],
|
||||
iterator_types = ["parallel", "parallel"],
|
||||
doc = "X(i,j) += X(i,j) * X(i,j)"
|
||||
}
|
||||
|
||||
//
|
||||
// Integration test that lowers a kernel annotated as sparse to
|
||||
// actual sparse code, initializes a matching sparse storage scheme
|
||||
// from file, and runs the resulting code with the JIT compiler.
|
||||
//
|
||||
module {
|
||||
//
|
||||
// A kernel that multiplies a sparse matrix A with itself
|
||||
// in an element-wise fashion. In this operation, we have
|
||||
// a sparse tensor as output, but although the values of the
|
||||
// sparse tensor change, its nonzero structure remains the same.
|
||||
//
|
||||
func @kernel_eltwise_mult(%argx: tensor<?x?xf64, #DCSR> {linalg.inplaceable = true})
|
||||
-> tensor<?x?xf64, #DCSR> {
|
||||
%0 = linalg.generic #eltwise_mult
|
||||
outs(%argx: tensor<?x?xf64, #DCSR>) {
|
||||
^bb(%x: f64):
|
||||
%0 = mulf %x, %x : f64
|
||||
linalg.yield %0 : f64
|
||||
} -> tensor<?x?xf64, #DCSR>
|
||||
return %0 : tensor<?x?xf64, #DCSR>
|
||||
}
|
||||
|
||||
func private @getTensorFilename(index) -> (!Filename)
|
||||
|
||||
//
|
||||
// Main driver that reads matrix from file and calls the sparse kernel.
|
||||
//
|
||||
func @entry() {
|
||||
%d0 = constant 0.0 : f64
|
||||
%c0 = constant 0 : index
|
||||
|
||||
// Read the sparse matrix from file, construct sparse storage.
|
||||
%fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
|
||||
%x = sparse_tensor.new %fileName : !llvm.ptr<i8> to tensor<?x?xf64, #DCSR>
|
||||
|
||||
// Call kernel.
|
||||
%0 = call @kernel_eltwise_mult(%x) : (tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR>
|
||||
|
||||
// Print the result for verification.
|
||||
//
|
||||
// CHECK: ( 1, 1.96, 4, 6.25, 9, 16.81, 16, 27.04, 25 )
|
||||
//
|
||||
%m = sparse_tensor.values %0 : tensor<?x?xf64, #DCSR> to memref<?xf64>
|
||||
%v = vector.transfer_read %m[%c0], %d0: memref<?xf64>, vector<9xf64>
|
||||
vector.print %v : vector<9xf64>
|
||||
|
||||
return
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue