llvm-project/mlir/test/Dialect/SparseTensor/invalid.mlir

323 lines
9.8 KiB
MLIR

// RUN: mlir-opt %s -split-input-file -verify-diagnostics
func.func @invalid_new_dense(%arg0: !llvm.ptr<i8>) -> tensor<32xf32> {
// expected-error@+1 {{expected a sparse tensor result}}
%0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<32xf32>
return %0 : tensor<32xf32>
}
// -----
func.func @invalid_release_dense(%arg0: tensor<4xi32>) {
// expected-error@+1 {{expected a sparse tensor to release}}
sparse_tensor.release %arg0 : tensor<4xi32>
return
}
// -----
func.func @invalid_init_dense(%arg0: index, %arg1: index) -> tensor<?x?xf32> {
// expected-error@+1 {{expected a sparse tensor result}}
%0 = sparse_tensor.init [%arg0, %arg1] : tensor<?x?xf32>
return %0 : tensor<?x?xf32>
}
// -----
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
func.func @invalid_init_rank(%arg0: index) -> tensor<?xf32, #SparseVector> {
// expected-error@+1 {{unexpected mismatch between tensor rank and sizes: 1 vs. 2}}
%0 = sparse_tensor.init [%arg0, %arg0] : tensor<?xf32, #SparseVector>
return %0 : tensor<?xf32, #SparseVector>
}
// -----
#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}>
func.func @invalid_init_size() -> tensor<?x10xf32, #SparseMatrix> {
%c10 = arith.constant 10 : index
%c20 = arith.constant 20 : index
// expected-error@+1 {{unexpected mismatch with static dimension size 10}}
%0 = sparse_tensor.init [%c10, %c20] : tensor<?x10xf32, #SparseMatrix>
return %0 : tensor<?x10xf32, #SparseMatrix>
}
// -----
func.func @invalid_pointers_dense(%arg0: tensor<128xf64>) -> memref<?xindex> {
%c = arith.constant 0 : index
// expected-error@+1 {{expected a sparse tensor to get pointers}}
%0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64> to memref<?xindex>
return %0 : memref<?xindex>
}
// -----
func.func @invalid_pointers_unranked(%arg0: tensor<*xf64>) -> memref<?xindex> {
%c = arith.constant 0 : index
// expected-error@+1 {{expected a sparse tensor to get pointers}}
%0 = sparse_tensor.pointers %arg0, %c : tensor<*xf64> to memref<?xindex>
return %0 : memref<?xindex>
}
// -----
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"], pointerBitWidth=32}>
func.func @mismatch_pointers_types(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%c = arith.constant 0 : index
// expected-error@+1 {{unexpected type for pointers}}
%0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
return %0 : memref<?xindex>
}
// -----
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
func.func @pointers_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%c = arith.constant 1 : index
// expected-error@+1 {{requested pointers dimension out of bounds}}
%0 = sparse_tensor.pointers %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
return %0 : memref<?xindex>
}
// -----
func.func @invalid_indices_dense(%arg0: tensor<10x10xi32>) -> memref<?xindex> {
%c = arith.constant 1 : index
// expected-error@+1 {{expected a sparse tensor to get indices}}
%0 = sparse_tensor.indices %arg0, %c : tensor<10x10xi32> to memref<?xindex>
return %0 : memref<?xindex>
}
// -----
func.func @invalid_indices_unranked(%arg0: tensor<*xf64>) -> memref<?xindex> {
%c = arith.constant 0 : index
// expected-error@+1 {{expected a sparse tensor to get indices}}
%0 = sparse_tensor.indices %arg0, %c : tensor<*xf64> to memref<?xindex>
return %0 : memref<?xindex>
}
// -----
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
func.func @mismatch_indices_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<?xi32> {
%c = arith.constant 0 : index
// expected-error@+1 {{unexpected type for indices}}
%0 = sparse_tensor.indices %arg0, %c : tensor<?xf64, #SparseVector> to memref<?xi32>
return %0 : memref<?xi32>
}
// -----
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
func.func @indices_oob(%arg0: tensor<128xf64, #SparseVector>) -> memref<?xindex> {
%c = arith.constant 1 : index
// expected-error@+1 {{requested indices dimension out of bounds}}
%0 = sparse_tensor.indices %arg0, %c : tensor<128xf64, #SparseVector> to memref<?xindex>
return %0 : memref<?xindex>
}
// -----
func.func @invalid_values_dense(%arg0: tensor<1024xf32>) -> memref<?xf32> {
// expected-error@+1 {{expected a sparse tensor to get values}}
%0 = sparse_tensor.values %arg0 : tensor<1024xf32> to memref<?xf32>
return %0 : memref<?xf32>
}
// -----
#SparseVector = #sparse_tensor.encoding<{dimLevelType = ["compressed"]}>
func.func @mismatch_values_types(%arg0: tensor<?xf64, #SparseVector>) -> memref<?xf32> {
// expected-error@+1 {{unexpected mismatch in element types}}
%0 = sparse_tensor.values %arg0 : tensor<?xf64, #SparseVector> to memref<?xf32>
return %0 : memref<?xf32>
}
// -----
func.func @sparse_unannotated_load(%arg0: tensor<16x32xf64>) -> tensor<16x32xf64> {
// expected-error@+1 {{expected a sparse tensor to materialize}}
%0 = sparse_tensor.load %arg0 : tensor<16x32xf64>
return %0 : tensor<16x32xf64>
}
// -----
func.func @sparse_unannotated_insert(%arg0: tensor<128xf64>, %arg1: memref<?xindex>, %arg2: f64) {
// expected-error@+1 {{expected a sparse tensor for insertion}}
sparse_tensor.lex_insert %arg0, %arg1, %arg2 : tensor<128xf64>, memref<?xindex>, f64
return
}
// -----
func.func @sparse_unannotated_expansion(%arg0: tensor<128xf64>) {
// expected-error@+1 {{expected a sparse tensor for expansion}}
%values, %filled, %added, %count = sparse_tensor.expand %arg0
: tensor<128xf64> to memref<?xf64>, memref<?xi1>, memref<?xindex>, index
return
}
// -----
func.func @sparse_unannotated_compression(%arg0: tensor<128xf64>, %arg1: memref<?xindex>,
%arg2: memref<?xf64>, %arg3: memref<?xi1>,
%arg4: memref<?xindex>, %arg5: index) {
// expected-error@+1 {{expected a sparse tensor for compression}}
sparse_tensor.compress %arg0, %arg1, %arg2, %arg3, %arg4, %arg5
: tensor<128xf64>, memref<?xindex>, memref<?xf64>, memref<?xi1>, memref<?xindex>, index
}
// -----
func.func @sparse_convert_unranked(%arg0: tensor<*xf32>) -> tensor<10xf32> {
// expected-error@+1 {{unexpected type in convert}}
%0 = sparse_tensor.convert %arg0 : tensor<*xf32> to tensor<10xf32>
return %0 : tensor<10xf32>
}
// -----
#DCSR = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}>
func.func @sparse_convert_rank_mismatch(%arg0: tensor<10x10xf64, #DCSR>) -> tensor<?xf64> {
// expected-error@+1 {{unexpected conversion mismatch in rank}}
%0 = sparse_tensor.convert %arg0 : tensor<10x10xf64, #DCSR> to tensor<?xf64>
return %0 : tensor<?xf64>
}
// -----
#CSR = #sparse_tensor.encoding<{dimLevelType = ["dense", "compressed"]}>
func.func @sparse_convert_dim_mismatch(%arg0: tensor<10x?xf32>) -> tensor<10x10xf32, #CSR> {
// expected-error@+1 {{unexpected conversion mismatch in dimension 1}}
%0 = sparse_tensor.convert %arg0 : tensor<10x?xf32> to tensor<10x10xf32, #CSR>
return %0 : tensor<10x10xf32, #CSR>
}
// -----
func.func @invalid_out_dense(%arg0: tensor<10xf64>, %arg1: !llvm.ptr<i8>) {
// expected-error@+1 {{expected a sparse tensor for output}}
sparse_tensor.out %arg0, %arg1 : tensor<10xf64>, !llvm.ptr<i8>
return
}
// -----
func.func @invalid_binary_num_args_mismatch_overlap(%arg0: f64, %arg1: f64) -> f64 {
// expected-error@+1 {{overlap region must have exactly 2 arguments}}
%r = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
overlap={
^bb0(%x: f64):
sparse_tensor.yield %x : f64
}
left={}
right={}
return %r : f64
}
// -----
func.func @invalid_binary_num_args_mismatch_right(%arg0: f64, %arg1: f64) -> f64 {
// expected-error@+1 {{right region must have exactly 1 arguments}}
%r = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
overlap={}
left={}
right={
^bb0(%x: f64, %y: f64):
sparse_tensor.yield %y : f64
}
return %r : f64
}
// -----
func.func @invalid_binary_argtype_mismatch(%arg0: f64, %arg1: f64) -> f64 {
// expected-error@+1 {{overlap region argument 2 type mismatch}}
%r = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
overlap={
^bb0(%x: f64, %y: f32):
sparse_tensor.yield %x : f64
}
left=identity
right=identity
return %r : f64
}
// -----
func.func @invalid_binary_wrong_return_type(%arg0: f64, %arg1: f64) -> f64 {
// expected-error@+1 {{left region yield type mismatch}}
%0 = sparse_tensor.binary %arg0, %arg1 : f64, f64 to f64
overlap={}
left={
^bb0(%x: f64):
%1 = arith.constant 0.0 : f32
sparse_tensor.yield %1 : f32
}
right=identity
return %0 : f64
}
// -----
func.func @invalid_binary_wrong_identity_type(%arg0: i64, %arg1: f64) -> f64 {
// expected-error@+1 {{left=identity requires first argument to have the same type as the output}}
%0 = sparse_tensor.binary %arg0, %arg1 : i64, f64 to f64
overlap={}
left=identity
right=identity
return %0 : f64
}
// -----
func.func @invalid_unary_argtype_mismatch(%arg0: f64) -> f64 {
// expected-error@+1 {{present region argument 1 type mismatch}}
%r = sparse_tensor.unary %arg0 : f64 to f64
present={
^bb0(%x: index):
sparse_tensor.yield %x : index
}
absent={}
return %r : f64
}
// -----
func.func @invalid_unary_num_args_mismatch(%arg0: f64) -> f64 {
// expected-error@+1 {{absent region must have exactly 0 arguments}}
%r = sparse_tensor.unary %arg0 : f64 to f64
present={}
absent={
^bb0(%x: f64):
sparse_tensor.yield %x : f64
}
return %r : f64
}
// -----
func.func @invalid_unary_wrong_return_type(%arg0: f64) -> f64 {
// expected-error@+1 {{present region yield type mismatch}}
%0 = sparse_tensor.unary %arg0 : f64 to f64
present={
^bb0(%x: f64):
%1 = arith.constant 0.0 : f32
sparse_tensor.yield %1 : f32
}
absent={}
return %0 : f64
}