forked from OSchip/llvm-project
[mlir][sparse]Add more integration tests for sparse_tensor.unary
Previously, the sparse_tensor.unary integration test does not contain cases with the use of `linalg.index` (previoulsy unsupported), this commit adds test cases that use `linalg.index` operators. Reviewed By: aartbik Differential Revision: https://reviews.llvm.org/D128460
This commit is contained in:
parent
03859994b3
commit
057e33ef36
|
@ -71,6 +71,30 @@ module {
|
|||
return %0 : tensor<?xf64, #SparseVector>
|
||||
}
|
||||
|
||||
// Performs B[i] = i * A[i].
|
||||
func.func @vector_magnify(%arga: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> {
|
||||
%c = arith.constant 0 : index
|
||||
%d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
|
||||
%xv = bufferization.alloc_tensor(%d) : tensor<?xf64, #SparseVector>
|
||||
%0 = linalg.generic #trait_vec_scale
|
||||
ins(%arga: tensor<?xf64, #SparseVector>)
|
||||
outs(%xv: tensor<?xf64, #SparseVector>) {
|
||||
^bb(%a: f64, %x: f64):
|
||||
%idx = linalg.index 0 : index
|
||||
%1 = sparse_tensor.unary %a : f64 to f64
|
||||
present={
|
||||
^bb0(%x0: f64):
|
||||
%tmp = arith.index_cast %idx : index to i64
|
||||
%idxf = arith.uitofp %tmp : i64 to f64
|
||||
%ret = arith.mulf %x0, %idxf : f64
|
||||
sparse_tensor.yield %ret : f64
|
||||
}
|
||||
absent={}
|
||||
linalg.yield %1 : f64
|
||||
} -> tensor<?xf64, #SparseVector>
|
||||
return %0 : tensor<?xf64, #SparseVector>
|
||||
}
|
||||
|
||||
// Clips values to the range [3, 7].
|
||||
func.func @matrix_clip(%argx: tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> {
|
||||
%c0 = arith.constant 0 : index
|
||||
|
@ -99,6 +123,40 @@ module {
|
|||
return %0 : tensor<?x?xf64, #DCSR>
|
||||
}
|
||||
|
||||
// Slices matrix and only keep the value of the lower-right corner of the original
|
||||
// matrix (i.e., A[2/d0 ..][2/d1 ..]), and set other values to 99.
|
||||
func.func @matrix_slice(%argx: tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> {
|
||||
%c0 = arith.constant 0 : index
|
||||
%c1 = arith.constant 1 : index
|
||||
%d0 = tensor.dim %argx, %c0 : tensor<?x?xf64, #DCSR>
|
||||
%d1 = tensor.dim %argx, %c1 : tensor<?x?xf64, #DCSR>
|
||||
%xv = bufferization.alloc_tensor(%d0, %d1) : tensor<?x?xf64, #DCSR>
|
||||
%0 = linalg.generic #trait_mat_scale
|
||||
ins(%argx: tensor<?x?xf64, #DCSR>)
|
||||
outs(%xv: tensor<?x?xf64, #DCSR>) {
|
||||
^bb(%a: f64, %x: f64):
|
||||
%row = linalg.index 0 : index
|
||||
%col = linalg.index 1 : index
|
||||
%1 = sparse_tensor.unary %a: f64 to f64
|
||||
present={
|
||||
^bb0(%x0: f64):
|
||||
%v = arith.constant 99.0 : f64
|
||||
%two = arith.constant 2 : index
|
||||
%r = arith.muli %two, %row : index
|
||||
%c = arith.muli %two, %col : index
|
||||
%cmp1 = arith.cmpi "ult", %r, %d0 : index
|
||||
%tmp = arith.select %cmp1, %v, %x0 : f64
|
||||
%cmp2 = arith.cmpi "ult", %c, %d1 : index
|
||||
%result = arith.select %cmp2, %v, %tmp : f64
|
||||
sparse_tensor.yield %result : f64
|
||||
}
|
||||
absent={}
|
||||
linalg.yield %1 : f64
|
||||
} -> tensor<?x?xf64, #DCSR>
|
||||
return %0 : tensor<?x?xf64, #DCSR>
|
||||
}
|
||||
|
||||
|
||||
// Dumps a sparse vector of type f64.
|
||||
func.func @dump_vec_f64(%arg0: tensor<?xf64, #SparseVector>) {
|
||||
// Dump the values array to verify only sparse contents are stored.
|
||||
|
@ -171,10 +229,14 @@ module {
|
|||
: (tensor<?xf64, #SparseVector>) -> tensor<?xi32, #SparseVector>
|
||||
%1 = call @vector_negation(%sv1)
|
||||
: (tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector>
|
||||
|
||||
%2 = call @vector_magnify(%sv1)
|
||||
: (tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector>
|
||||
|
||||
|
||||
// Call sparse matrix kernels.
|
||||
%2 = call @matrix_clip(%sm1)
|
||||
%3 = call @matrix_clip(%sm1)
|
||||
: (tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR>
|
||||
%4 = call @matrix_slice(%sm1)
|
||||
: (tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR>
|
||||
|
||||
//
|
||||
|
@ -186,20 +248,28 @@ module {
|
|||
// CHECK-NEXT: ( 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0 )
|
||||
// CHECK-NEXT: ( -1, 1, 1, -2, 1, 1, 1, 1, 1, 1, 1, -3, 1, 1, 1, 1, 1, -4, 1, 1, -5, -6, 1, 1, 1, 1, 1, 1, -7, -8, 1, -9 )
|
||||
// CHECK-NEXT: ( -1, 1, 1, -2, 1, 1, 1, 1, 1, 1, 1, -3, 1, 1, 1, 1, 1, -4, 1, 1, -5, -6, 1, 1, 1, 1, 1, 1, -7, -8, 1, -9 )
|
||||
// CHECK-NEXT: ( 0, 6, 33, 68, 100, 126, 196, 232, 279, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 )
|
||||
// CHECK-NEXT: ( 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 33, 0, 0, 0, 0, 0, 68, 0, 0, 100, 126, 0, 0, 0, 0, 0, 0, 196, 232, 0, 279 )
|
||||
// CHECK-NEXT: ( 3, 3, 3, 4, 5, 6, 7, 7, 7, -1, -1, -1, -1, -1, -1, -1 )
|
||||
// CHECK-NEXT: ( ( 3, 3, 0, 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 3 ), ( 0, 0, 4, 0, 5, 0, 0, 6 ), ( 7, 0, 7, 7, 0, 0, 0, 0 ) )
|
||||
// CHECK-NEXT: ( 99, 99, 99, 99, 5, 6, 99, 99, 99, -1, -1, -1, -1, -1, -1, -1 )
|
||||
// CHECK-NEXT: ( ( 99, 99, 0, 0, 0, 0, 0, 0 ), ( 0, 0, 0, 0, 0, 0, 0, 99 ), ( 0, 0, 99, 0, 5, 0, 0, 6 ), ( 99, 0, 99, 99, 0, 0, 0, 0 ) )
|
||||
//
|
||||
call @dump_vec_f64(%sv1) : (tensor<?xf64, #SparseVector>) -> ()
|
||||
call @dump_vec_i32(%0) : (tensor<?xi32, #SparseVector>) -> ()
|
||||
call @dump_vec_f64(%1) : (tensor<?xf64, #SparseVector>) -> ()
|
||||
call @dump_mat(%2) : (tensor<?x?xf64, #DCSR>) -> ()
|
||||
|
||||
call @dump_vec_f64(%2) : (tensor<?xf64, #SparseVector>) -> ()
|
||||
call @dump_mat(%3) : (tensor<?x?xf64, #DCSR>) -> ()
|
||||
call @dump_mat(%4) : (tensor<?x?xf64, #DCSR>) -> ()
|
||||
|
||||
// Release the resources.
|
||||
sparse_tensor.release %sv1 : tensor<?xf64, #SparseVector>
|
||||
sparse_tensor.release %sm1 : tensor<?x?xf64, #DCSR>
|
||||
sparse_tensor.release %0 : tensor<?xi32, #SparseVector>
|
||||
sparse_tensor.release %1 : tensor<?xf64, #SparseVector>
|
||||
sparse_tensor.release %2 : tensor<?x?xf64, #DCSR>
|
||||
sparse_tensor.release %2 : tensor<?xf64, #SparseVector>
|
||||
sparse_tensor.release %3 : tensor<?x?xf64, #DCSR>
|
||||
sparse_tensor.release %4 : tensor<?x?xf64, #DCSR>
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue