[mlir][NFC] Update textual references of `func` to `func.func` in Integration tests

The special case parsing of `func` operations is being removed.
This commit is contained in:
River Riddle 2022-04-20 16:20:21 -07:00
parent c48e3a13f3
commit 87db8e4439
133 changed files with 392 additions and 392 deletions

View File

@ -35,7 +35,7 @@
#map0 = affine_map<(d0, d1) -> (d0, d1)>
func @linalg_generic(%lhs: memref<?x?xf32>,
func.func @linalg_generic(%lhs: memref<?x?xf32>,
%rhs: memref<?x?xf32>,
%sum: memref<?x?xf32>) {
linalg.generic {
@ -53,7 +53,7 @@ func @linalg_generic(%lhs: memref<?x?xf32>,
return
}
func @entry() {
func.func @entry() {
%f1 = arith.constant 1.0 : f32
%f4 = arith.constant 4.0 : f32
%c0 = arith.constant 0 : index
@ -128,7 +128,7 @@ func @entry() {
return
}
func private @rtclock() -> f64
func.func private @rtclock() -> f64
func private @print_memref_f32(memref<*xf32>)
func.func private @print_memref_f32(memref<*xf32>)
attributes { llvm.emit_c_interface }

View File

@ -56,7 +56,7 @@
#map0 = affine_map<(d0, d1) -> (d0, d1)>
func @scf_parallel(%lhs: memref<?x?xf32>,
func.func @scf_parallel(%lhs: memref<?x?xf32>,
%rhs: memref<?x?xf32>,
%sum: memref<?x?xf32>) {
%c0 = arith.constant 0 : index
@ -75,7 +75,7 @@ func @scf_parallel(%lhs: memref<?x?xf32>,
return
}
func @entry() {
func.func @entry() {
%f1 = arith.constant 1.0 : f32
%f4 = arith.constant 4.0 : f32
%c0 = arith.constant 0 : index
@ -150,7 +150,7 @@ func @entry() {
return
}
func private @rtclock() -> f64
func.func private @rtclock() -> f64
func private @print_memref_f32(memref<*xf32>)
func.func private @print_memref_f32(memref<*xf32>)
attributes { llvm.emit_c_interface }

View File

@ -51,12 +51,12 @@
// RUN: | FileCheck %s --dump-input=always
// Suppress constant folding by introducing "dynamic" zero value at runtime.
func private @zero() -> index {
func.func private @zero() -> index {
%0 = arith.constant 0 : index
return %0 : index
}
func @entry() {
func.func @entry() {
%c0 = arith.constant 0.0 : f32
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@ -132,4 +132,4 @@ func @entry() {
return
}
func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface }
func.func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface }

View File

@ -47,7 +47,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_async_runtime%shlibext\
// RUN: | FileCheck %s --dump-input=always
func @entry() {
func.func @entry() {
%c0 = arith.constant 0.0 : f32
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
@ -141,4 +141,4 @@ func @entry() {
return
}
func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface }
func.func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface }

View File

@ -22,7 +22,7 @@
!row_major_B = type memref<${K}x${N}x!elem_type_b>
!row_major_C = type memref<${M}x${N}x!elem_type_c>
func @matmul(%a: !row_major_A, %b: !row_major_B, %c: !row_major_C)
func.func @matmul(%a: !row_major_A, %b: !row_major_B, %c: !row_major_C)
// TODO: activate manually for now.
// attributes { passthrough = [["target-cpu", "skylake-avx512"], ["prefer-vector-width", "512"]]}
{
@ -31,7 +31,7 @@ func @matmul(%a: !row_major_A, %b: !row_major_B, %c: !row_major_C)
return
}
func @print_perf(%iters: index, %total_time: f64) {
func.func @print_perf(%iters: index, %total_time: f64) {
%c2 = arith.constant 2 : index
%cM = arith.constant ${M} : index
%cN = arith.constant ${N} : index
@ -51,7 +51,7 @@ func @print_perf(%iters: index, %total_time: f64) {
return
}
func @main() {
func.func @main() {
%v0 = arith.constant 0.0 : !elem_type_a
%v1 = arith.constant 1.0 : !elem_type_a
@ -106,8 +106,8 @@ func @main() {
return
}
func private @rtclock() -> f64
func private @verifyMemRefF32(memref<*xf32>, memref<*xf32>) -> i64 attributes { llvm.emit_c_interface }
func.func private @rtclock() -> f64
func.func private @verifyMemRefF32(memref<*xf32>, memref<*xf32>) -> i64 attributes { llvm.emit_c_interface }
// TODO: init with random, run and check output.
// func private @fill_random_f32(memref<*xf32>)

View File

@ -3,9 +3,9 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
// RUN: | FileCheck %s
func private @print_memref_f32(memref<*xf32>)
func.func private @print_memref_f32(memref<*xf32>)
func @matmul(%A: memref<?x?xf32>, %B: memref<?x?xf32>) -> (memref<?x?xf32>) {
func.func @matmul(%A: memref<?x?xf32>, %B: memref<?x?xf32>) -> (memref<?x?xf32>) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%f0 = arith.constant 0.0 : f32
@ -18,7 +18,7 @@ func @matmul(%A: memref<?x?xf32>, %B: memref<?x?xf32>) -> (memref<?x?xf32>) {
return %C : memref<?x?xf32>
}
func @matvec(%A: memref<?x?xf32>, %B: memref<?x?xf32>) -> (memref<?x?xf32>) {
func.func @matvec(%A: memref<?x?xf32>, %B: memref<?x?xf32>) -> (memref<?x?xf32>) {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%f0 = arith.constant 0.0 : f32
@ -36,7 +36,7 @@ func @matvec(%A: memref<?x?xf32>, %B: memref<?x?xf32>) -> (memref<?x?xf32>) {
return %C : memref<?x?xf32>
}
func @main() {
func.func @main() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%m = arith.constant 5 : index

View File

@ -3,9 +3,9 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
// RUN: | FileCheck %s
func private @print_memref_f32(memref<*xf32>)
func.func private @print_memref_f32(memref<*xf32>)
func @main() {
func.func @main() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index

View File

@ -7,7 +7,7 @@
// RUN: | FileCheck %s
func @main() {
func.func @main() {
%const = arith.constant dense<[[[[-3.9058,0.9072],[-2.9470,-2.2055],[18.3946,8.2997]],[[3.4700,5.9006],[-17.2267,4.9777],[1.0450,-0.8201]]],[[[17.6996,-11.1763],[26.7775,-3.8823],[-4.2492,-5.8966]],[[2.1259,13.1794],[-10.7136,0.8428],[16.4233,9.4589]]]]> : tensor<2x2x3x2xf32>
%dynamic = tensor.cast %const: tensor<2x2x3x2xf32> to tensor<2x?x?x?xf32>
%collapsed = call @collapse_dynamic_shape(%dynamic) : (tensor<2x?x?x?xf32>) -> (tensor<2x?x?xf32>)
@ -30,9 +30,9 @@ func @main() {
return
}
func private @print_memref_f32(%ptr : tensor<*xf32>)
func.func private @print_memref_f32(%ptr : tensor<*xf32>)
func @collapse_dynamic_shape(%arg0 : tensor<2x?x?x?xf32>) -> tensor<2x?x?xf32> {
func.func @collapse_dynamic_shape(%arg0 : tensor<2x?x?x?xf32>) -> tensor<2x?x?xf32> {
%0 = tensor.collapse_shape %arg0 [[0], [1, 2], [3]]: tensor<2x?x?x?xf32> into tensor<2x?x?xf32>
return %0 : tensor<2x?x?xf32>
}

View File

@ -9,7 +9,7 @@
#map0 = affine_map<(d0, d1)[s0] -> ((d1 - d0) ceildiv s0)>
#map1 = affine_map<(d0, d1)[s0] -> ((d0 - d1) ceildiv s0)>
func @init_and_dot(%arg0: tensor<64xf32>, %arg1: tensor<64xf32>, %arg2: tensor<f32> {linalg.inplaceable = true}) -> tensor<f32> {
func.func @init_and_dot(%arg0: tensor<64xf32>, %arg1: tensor<64xf32>, %arg2: tensor<f32> {linalg.inplaceable = true}) -> tensor<f32> {
%c64 = arith.constant 64 : index
%cst = arith.constant 0.000000e+00 : f32
%c2 = arith.constant 2 : index
@ -75,7 +75,7 @@ func @init_and_dot(%arg0: tensor<64xf32>, %arg1: tensor<64xf32>, %arg2: tensor<f
return %7 : tensor<f32>
}
func @main() {
func.func @main() {
%v0 = arith.constant 0.0 : f32
%v1 = arith.constant 1.0 : f32
%v2 = arith.constant 2.0 : f32
@ -99,4 +99,4 @@ func @main() {
return
}
func private @print_memref_f32(tensor<*xf32>) attributes { llvm.emit_c_interface }
func.func private @print_memref_f32(tensor<*xf32>) attributes { llvm.emit_c_interface }

View File

@ -9,22 +9,22 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
// RUN: | FileCheck %s
func private @print_memref_f32(memref<*xf32>)
func.func private @print_memref_f32(memref<*xf32>)
// Creates and returns a 1-D buffer of size %s1 filled with the value %f
func @alloc_1d_filled_f32(%s1 : index, %f : f32) -> memref<?xf32> {
func.func @alloc_1d_filled_f32(%s1 : index, %f : f32) -> memref<?xf32> {
%buf = memref.alloc(%s1) : memref<?xf32>
linalg.fill ins(%f : f32) outs(%buf : memref<?xf32>)
return %buf : memref<?xf32>
}
func @conv_1d(%arg0: memref<?xf32>, %arg1: memref<?xf32>, %arg2: memref<?xf32>) {
func.func @conv_1d(%arg0: memref<?xf32>, %arg1: memref<?xf32>, %arg2: memref<?xf32>) {
linalg.conv_1d ins (%arg0, %arg1: memref<?xf32>, memref<?xf32>)
outs (%arg2: memref<?xf32>)
return
}
func @main() {
func.func @main() {
%c3 = arith.constant 3 : index
%c6 = arith.constant 6 : index
%c8 = arith.constant 8 : index

View File

@ -9,16 +9,16 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
// RUN: | FileCheck %s
func private @print_memref_f32(memref<*xf32>)
func.func private @print_memref_f32(memref<*xf32>)
// Creates and returns 3-D buffer of size (%s1, %s2, %s3) filled with the value %f
func @alloc_3d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %f : f32) -> memref<?x?x?xf32> {
func.func @alloc_3d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %f : f32) -> memref<?x?x?xf32> {
%buf = memref.alloc(%s1, %s2, %s3) : memref<?x?x?xf32>
linalg.fill ins(%f : f32) outs(%buf : memref<?x?x?xf32>)
return %buf : memref<?x?x?xf32>
}
func @conv_1d_nwc_wcf(%arg0: memref<?x?x?xf32>, %arg1: memref<?x?x?xf32>, %arg2: memref<?x?x?xf32>) {
func.func @conv_1d_nwc_wcf(%arg0: memref<?x?x?xf32>, %arg1: memref<?x?x?xf32>, %arg2: memref<?x?x?xf32>) {
linalg.conv_1d_nwc_wcf {dilations = dense<1> : tensor<1xi64>,
strides = dense<1> : tensor<1xi64>}
ins (%arg0, %arg1: memref<?x?x?xf32>, memref<?x?x?xf32>)
@ -26,7 +26,7 @@ func @conv_1d_nwc_wcf(%arg0: memref<?x?x?xf32>, %arg1: memref<?x?x?xf32>, %arg2:
return
}
func @main() {
func.func @main() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c3 = arith.constant 3 : index

View File

@ -9,22 +9,22 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
// RUN: | FileCheck %s
func private @print_memref_f32(memref<*xf32>)
func.func private @print_memref_f32(memref<*xf32>)
// Creates and returns a 2-D buffer of size (%s1, %s2) filled with the value %f
func @alloc_2d_filled_f32(%s1 : index, %s2 : index, %f : f32) -> memref<?x?xf32> {
func.func @alloc_2d_filled_f32(%s1 : index, %s2 : index, %f : f32) -> memref<?x?xf32> {
%buf = memref.alloc(%s1, %s2) : memref<?x?xf32>
linalg.fill ins(%f : f32) outs(%buf : memref<?x?xf32>)
return %buf : memref<?x?xf32>
}
func @conv_2d(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
func.func @conv_2d(%arg0: memref<?x?xf32>, %arg1: memref<?x?xf32>, %arg2: memref<?x?xf32>) {
linalg.conv_2d ins (%arg0, %arg1: memref<?x?xf32>, memref<?x?xf32>)
outs (%arg2: memref<?x?xf32>)
return
}
func @main() {
func.func @main() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c3 = arith.constant 3 : index

View File

@ -9,16 +9,16 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
// RUN: | FileCheck %s
func private @print_memref_f32(memref<*xf32>)
func.func private @print_memref_f32(memref<*xf32>)
// Creates and returns 4-D buffer of size (%s1, %s2, %s3, %s4) filled with the value %f
func @alloc_4d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %s4 : index, %f : f32) -> memref<?x?x?x?xf32> {
func.func @alloc_4d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %s4 : index, %f : f32) -> memref<?x?x?x?xf32> {
%buf = memref.alloc(%s1, %s2, %s3, %s4) : memref<?x?x?x?xf32>
linalg.fill ins(%f : f32) outs(%buf : memref<?x?x?x?xf32>)
return %buf : memref<?x?x?x?xf32>
}
func @conv_2d_nhwc_hwcf(%arg0: memref<?x?x?x?xf32>, %arg1: memref<?x?x?x?xf32>, %arg2: memref<?x?x?x?xf32>) {
func.func @conv_2d_nhwc_hwcf(%arg0: memref<?x?x?x?xf32>, %arg1: memref<?x?x?x?xf32>, %arg2: memref<?x?x?x?xf32>) {
linalg.conv_2d_nhwc_hwcf {dilations = dense<1> : tensor<2xi64>,
strides = dense<1> : tensor<2xi64>}
ins (%arg0, %arg1: memref<?x?x?x?xf32>, memref<?x?x?x?xf32>)
@ -26,7 +26,7 @@ func @conv_2d_nhwc_hwcf(%arg0: memref<?x?x?x?xf32>, %arg1: memref<?x?x?x?xf32>,
return
}
func @main() {
func.func @main() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c3 = arith.constant 3 : index

View File

@ -9,22 +9,22 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
// RUN: | FileCheck %s
func private @print_memref_f32(memref<*xf32>)
func.func private @print_memref_f32(memref<*xf32>)
// Creates and returns 3-D buffer of size (%s1, %s2, %s3) filled with the value %f
func @alloc_3d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %f : f32) -> memref<?x?x?xf32> {
func.func @alloc_3d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %f : f32) -> memref<?x?x?xf32> {
%buf = memref.alloc(%s1, %s2, %s3) : memref<?x?x?xf32>
linalg.fill ins(%f : f32) outs(%buf : memref<?x?x?xf32>)
return %buf : memref<?x?x?xf32>
}
func @conv_3d(%arg0: memref<?x?x?xf32>, %arg1: memref<?x?x?xf32>, %arg2: memref<?x?x?xf32>) {
func.func @conv_3d(%arg0: memref<?x?x?xf32>, %arg1: memref<?x?x?xf32>, %arg2: memref<?x?x?xf32>) {
linalg.conv_3d ins (%arg0, %arg1: memref<?x?x?xf32>, memref<?x?x?xf32>)
outs (%arg2: memref<?x?x?xf32>)
return
}
func @main() {
func.func @main() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c3 = arith.constant 3 : index

View File

@ -9,16 +9,16 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
// RUN: | FileCheck %s
func private @print_memref_f32(memref<*xf32>)
func.func private @print_memref_f32(memref<*xf32>)
// Creates and returns 5-D buffer of size (%s1, %s2, %s3, %s4, %s5) filled with the value %f
func @alloc_5d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %s4 : index, %s5 : index, %f : f32) -> memref<?x?x?x?x?xf32> {
func.func @alloc_5d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %s4 : index, %s5 : index, %f : f32) -> memref<?x?x?x?x?xf32> {
%buf = memref.alloc(%s1, %s2, %s3, %s4, %s5) : memref<?x?x?x?x?xf32>
linalg.fill ins(%f : f32) outs(%buf : memref<?x?x?x?x?xf32>)
return %buf : memref<?x?x?x?x?xf32>
}
func @conv_3d_ndhwc_dhwcf(%arg0: memref<?x?x?x?x?xf32>, %arg1: memref<?x?x?x?x?xf32>, %arg2: memref<?x?x?x?x?xf32>) {
func.func @conv_3d_ndhwc_dhwcf(%arg0: memref<?x?x?x?x?xf32>, %arg1: memref<?x?x?x?x?xf32>, %arg2: memref<?x?x?x?x?xf32>) {
linalg.conv_3d_ndhwc_dhwcf {dilations = dense<1> : tensor<3xi64>,
strides = dense<1> : tensor<3xi64>}
ins (%arg0, %arg1: memref<?x?x?x?x?xf32>, memref<?x?x?x?x?xf32>)
@ -27,7 +27,7 @@ func @conv_3d_ndhwc_dhwcf(%arg0: memref<?x?x?x?x?xf32>, %arg1: memref<?x?x?x?x?x
}
func @main() {
func.func @main() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c3 = arith.constant 3 : index

View File

@ -7,7 +7,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
// RUN: | FileCheck %s
func @main() {
func.func @main() {
%a = arith.constant dense<[1.0, 2.0, 3.0]> : tensor<3xf32>
%b = arith.constant dense<[10.0, 20.0, 30.0]> : tensor<3xf32>
@ -20,4 +20,4 @@ func @main() {
return
}
func private @print_memref_f32(%ptr : tensor<*xf32>)
func.func private @print_memref_f32(%ptr : tensor<*xf32>)

View File

@ -7,7 +7,7 @@
// RUN: | FileCheck %s
func @main() {
func.func @main() {
%const = arith.constant dense<[[[-3.9058,0.9072],[-2.9470,-2.2055],[18.3946,8.2997],[3.4700,5.9006],[-17.2267,4.9777],[1.0450,-0.8201]],[[17.6996,-11.1763],[26.7775,-3.8823],[-4.2492,-5.8966],[2.1259,13.1794],[-10.7136,0.8428],[16.4233,9.4589]]]> : tensor<2x6x2xf32>
%dynamic = tensor.cast %const: tensor<2x6x2xf32> to tensor<2x?x?xf32>
%expanded = call @expand_dynamic_shape(%dynamic) : (tensor<2x?x?xf32>) -> (tensor<2x2x?x1x?xf32>)
@ -31,9 +31,9 @@ func @main() {
return
}
func private @print_memref_f32(%ptr : tensor<*xf32>)
func.func private @print_memref_f32(%ptr : tensor<*xf32>)
func @expand_dynamic_shape(%arg0 : tensor<2x?x?xf32>) -> tensor<2x2x?x1x?xf32> {
func.func @expand_dynamic_shape(%arg0 : tensor<2x?x?xf32>) -> tensor<2x2x?x1x?xf32> {
%0 = tensor.expand_shape %arg0 [[0], [1, 2, 3], [4]]: tensor<2x?x?xf32> into tensor<2x2x?x1x?xf32>
return %0 : tensor<2x2x?x1x?xf32>
}

View File

@ -7,7 +7,7 @@
// RUN: | FileCheck %s
func @main() {
func.func @main() {
%const = arith.constant dense<[[[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]]]> : tensor<1x2x3xf32>
%dynamic = tensor.cast %const: tensor<1x2x3xf32> to tensor<1x?x3xf32>
%offset = arith.constant 2 : index
@ -30,4 +30,4 @@ func @main() {
return
}
func private @print_memref_f32(%ptr : tensor<*xf32>)
func.func private @print_memref_f32(%ptr : tensor<*xf32>)

View File

@ -6,7 +6,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
// RUN: | FileCheck %s
func @main() {
func.func @main() {
%const = arith.constant dense<10.0> : tensor<2xf32>
%insert_val = arith.constant dense<20.0> : tensor<1xf32>
@ -34,4 +34,4 @@ func @main() {
return
}
func private @print_memref_f32(%ptr : tensor<*xf32>)
func.func private @print_memref_f32(%ptr : tensor<*xf32>)

View File

@ -6,7 +6,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
// RUN: | FileCheck %s
func @main() {
func.func @main() {
%const = arith.constant dense<10.0> : tensor<2xf32>
%insert_val = arith.constant dense<20.0> : tensor<1xf32>
%inserted = tensor.insert_slice %insert_val into %const[0][1][1] : tensor<1xf32> into tensor<2xf32>
@ -21,4 +21,4 @@ func @main() {
return
}
func private @print_memref_f32(%ptr : tensor<*xf32>)
func.func private @print_memref_f32(%ptr : tensor<*xf32>)

View File

@ -5,12 +5,12 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
// RUN: | FileCheck %s
func @foo() -> tensor<4xf32> {
func.func @foo() -> tensor<4xf32> {
%0 = arith.constant dense<[1.0, 2.0, 3.0, 4.0]> : tensor<4xf32>
return %0 : tensor<4xf32>
}
func @main() {
func.func @main() {
%0 = call @foo() : () -> tensor<4xf32>
// Instead of relying on tensor_store which introduces aliasing, we rely on
@ -33,4 +33,4 @@ func @main() {
// Note that this is skipping a step and we would need at least some function
// attribute to declare that this conversion is valid (e.g. when we statically
// know that things will play nicely at the C ABI boundary).
func private @print_memref_f32(%ptr : tensor<*xf32>)
func.func private @print_memref_f32(%ptr : tensor<*xf32>)

View File

@ -15,7 +15,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_runner_utils%shlibext \
// RUN: | FileCheck %s
func @main() {
func.func @main() {
%A = arith.constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32>
%B = arith.constant dense<[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
@ -36,4 +36,4 @@ func @main() {
return
}
func private @print_memref_f32(%ptr : tensor<*xf32>)
func.func private @print_memref_f32(%ptr : tensor<*xf32>)

View File

@ -19,7 +19,7 @@
/* MLIR_BEGIN
//--- input.mlir
// Performs: arg0[i, j] = arg0[i, j] + arg1[i, j]
func private @add_memref(%arg0: memref<?x?xf64>, %arg1: memref<?x?xf64>) -> i64
func.func private @add_memref(%arg0: memref<?x?xf64>, %arg1: memref<?x?xf64>) -> i64
attributes {llvm.emit_c_interface} {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index

View File

@ -99,7 +99,7 @@ module @patterns {
// CHECK: %[[BWD:.*]] = "kernel.FcBwd"(%arg0, %[[SM]]#1, %arg2) : (tensor<2x20xf32>, tensor<2x10xf32>, tensor<20x10xf32>) -> tensor<20x10xf32>
// CHECK: return %[[SM:.*]]#0, %[[BWD]] : tensor<f32>, tensor<20x10xf32>
module @ir attributes { test.mlp_split } {
func @main(%arg0: tensor<2x20xf32>, %arg1: tensor<2xi32>, %arg2: tensor<20x10xf32>) -> (tensor<f32>, tensor<20x10xf32>) {
func.func @main(%arg0: tensor<2x20xf32>, %arg1: tensor<2xi32>, %arg2: tensor<20x10xf32>) -> (tensor<f32>, tensor<20x10xf32>) {
%0 = "tf.Const"() {value = dense<0> : tensor<1xi32>} : () -> tensor<1xi32>
%1 = "tf.Const"() {value = dense<1.000000e-01> : tensor<f32>} : () -> tensor<f32>
%2 = "tf.Const"() {value = dense<5.000000e-01> : tensor<2x1xf32>} : () -> tensor<2x1xf32>
@ -261,7 +261,7 @@ module @patterns {
// CHECK: %[[SM]]:2 = "kernel.SoftmaxCrossEntropy"(%[[FC2]]#0, %arg1) : (tensor<2x10xf32>, tensor<2xi32>) -> (tensor<f32>, tensor<2x10xf32>)
// CHECK: %[[FC1]]:3 = "kernel.FcWithBias"(%arg0, %[[FC2]]#1, %arg3, %arg2) : (tensor<2x20xf32>, tensor<2x256xf32>, tensor<20x256xf32>, tensor<256xf32>) -> (tensor<2x256xf32>, tensor<20x256xf32>, tensor<256xf32>)
module @ir attributes { test.mlp_fused } {
func @main(%arg0: tensor<2x20xf32>, %arg1: tensor<2xi32>, %arg2: tensor<256xf32>, %arg3: tensor<20x256xf32>, %arg4: tensor<256x10xf32>) -> () { // tensor<f32>, tensor<256xf32>, tensor<20x256xf32>, tensor<256x10xf32>) {
func.func @main(%arg0: tensor<2x20xf32>, %arg1: tensor<2xi32>, %arg2: tensor<256xf32>, %arg3: tensor<20x256xf32>, %arg4: tensor<256x10xf32>) -> () { // tensor<f32>, tensor<256xf32>, tensor<20x256xf32>, tensor<256x10xf32>) {
// The replacement operations fuse forward and backward pass; therefore, the
// resulting graph is not a DAG. To address this, we wrap the operations in
// a graph region.

View File

@ -41,7 +41,7 @@ module {
//
// A kernel that assigns elements from A to X.
//
func @dense_output(%arga: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x?xf64, #DenseMatrix> {
func.func @dense_output(%arga: tensor<?x?xf64, #SparseMatrix>) -> tensor<?x?xf64, #DenseMatrix> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%d0 = tensor.dim %arga, %c0 : tensor<?x?xf64, #SparseMatrix>
@ -56,12 +56,12 @@ module {
return %0 : tensor<?x?xf64, #DenseMatrix>
}
func private @getTensorFilename(index) -> (!Filename)
func.func private @getTensorFilename(index) -> (!Filename)
//
// Main driver that reads matrix from file and calls the kernel.
//
func @entry() {
func.func @entry() {
%d0 = arith.constant 0.0 : f64
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index

View File

@ -45,7 +45,7 @@ module {
// Since all casts are "zero preserving" unary operations, lattice computation
// and conversion to sparse code is straightforward.
//
func @sparse_cast_s32_to_f32(%arga: tensor<10xi32, #SV>) -> tensor<10xf32> {
func.func @sparse_cast_s32_to_f32(%arga: tensor<10xi32, #SV>) -> tensor<10xf32> {
%argx = arith.constant dense<0.0> : tensor<10xf32>
%0 = linalg.generic #trait_cast
ins(%arga: tensor<10xi32, #SV>)
@ -56,7 +56,7 @@ module {
} -> tensor<10xf32>
return %0 : tensor<10xf32>
}
func @sparse_cast_u32_to_f32(%arga: tensor<10xi32, #SV>) -> tensor<10xf32> {
func.func @sparse_cast_u32_to_f32(%arga: tensor<10xi32, #SV>) -> tensor<10xf32> {
%argx = arith.constant dense<0.0> : tensor<10xf32>
%0 = linalg.generic #trait_cast
ins(%arga: tensor<10xi32, #SV>)
@ -67,7 +67,7 @@ module {
} -> tensor<10xf32>
return %0 : tensor<10xf32>
}
func @sparse_cast_f32_to_s32(%arga: tensor<10xf32, #SV>) -> tensor<10xi32> {
func.func @sparse_cast_f32_to_s32(%arga: tensor<10xf32, #SV>) -> tensor<10xi32> {
%argx = arith.constant dense<0> : tensor<10xi32>
%0 = linalg.generic #trait_cast
ins(%arga: tensor<10xf32, #SV>)
@ -78,7 +78,7 @@ module {
} -> tensor<10xi32>
return %0 : tensor<10xi32>
}
func @sparse_cast_f64_to_u32(%arga: tensor<10xf64, #SV>) -> tensor<10xi32> {
func.func @sparse_cast_f64_to_u32(%arga: tensor<10xf64, #SV>) -> tensor<10xi32> {
%argx = arith.constant dense<0> : tensor<10xi32>
%0 = linalg.generic #trait_cast
ins(%arga: tensor<10xf64, #SV>)
@ -89,7 +89,7 @@ module {
} -> tensor<10xi32>
return %0 : tensor<10xi32>
}
func @sparse_cast_f32_to_f64(%arga: tensor<10xf32, #SV>) -> tensor<10xf64> {
func.func @sparse_cast_f32_to_f64(%arga: tensor<10xf32, #SV>) -> tensor<10xf64> {
%argx = arith.constant dense<0.0> : tensor<10xf64>
%0 = linalg.generic #trait_cast
ins(%arga: tensor<10xf32, #SV>)
@ -100,7 +100,7 @@ module {
} -> tensor<10xf64>
return %0 : tensor<10xf64>
}
func @sparse_cast_f64_to_f32(%arga: tensor<10xf64, #SV>) -> tensor<10xf32> {
func.func @sparse_cast_f64_to_f32(%arga: tensor<10xf64, #SV>) -> tensor<10xf32> {
%argx = arith.constant dense<0.0> : tensor<10xf32>
%0 = linalg.generic #trait_cast
ins(%arga: tensor<10xf64, #SV>)
@ -111,7 +111,7 @@ module {
} -> tensor<10xf32>
return %0 : tensor<10xf32>
}
func @sparse_cast_s32_to_u64(%arga: tensor<10xi32, #SV>) -> tensor<10xi64> {
func.func @sparse_cast_s32_to_u64(%arga: tensor<10xi32, #SV>) -> tensor<10xi64> {
%argx = arith.constant dense<0> : tensor<10xi64>
%0 = linalg.generic #trait_cast
ins(%arga: tensor<10xi32, #SV>)
@ -122,7 +122,7 @@ module {
} -> tensor<10xi64>
return %0 : tensor<10xi64>
}
func @sparse_cast_u32_to_s64(%arga: tensor<10xi32, #SV>) -> tensor<10xi64> {
func.func @sparse_cast_u32_to_s64(%arga: tensor<10xi32, #SV>) -> tensor<10xi64> {
%argx = arith.constant dense<0> : tensor<10xi64>
%0 = linalg.generic #trait_cast
ins(%arga: tensor<10xi32, #SV>)
@ -133,7 +133,7 @@ module {
} -> tensor<10xi64>
return %0 : tensor<10xi64>
}
func @sparse_cast_i32_to_i8(%arga: tensor<10xi32, #SV>) -> tensor<10xi8> {
func.func @sparse_cast_i32_to_i8(%arga: tensor<10xi32, #SV>) -> tensor<10xi8> {
%argx = arith.constant dense<0> : tensor<10xi8>
%0 = linalg.generic #trait_cast
ins(%arga: tensor<10xi32, #SV>)
@ -144,7 +144,7 @@ module {
} -> tensor<10xi8>
return %0 : tensor<10xi8>
}
func @sparse_cast_f32_as_s32(%arga: tensor<10xf32, #SV>) -> tensor<10xi32> {
func.func @sparse_cast_f32_as_s32(%arga: tensor<10xf32, #SV>) -> tensor<10xi32> {
%argx = arith.constant dense<0> : tensor<10xi32>
%0 = linalg.generic #trait_cast
ins(%arga: tensor<10xf32, #SV>)
@ -160,7 +160,7 @@ module {
// Main driver that converts a dense tensor into a sparse tensor
// and then calls the sparse casting kernel.
//
func @entry() {
func.func @entry() {
%z = arith.constant 0 : index
%b = arith.constant 0 : i8
%i = arith.constant 0 : i32

View File

@ -12,7 +12,7 @@
// Integration tests for conversions from sparse constants to sparse tensors.
//
module {
func @entry() {
func.func @entry() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index

View File

@ -26,14 +26,14 @@ module {
//
// Output utilities.
//
func @dumpf64(%arg0: memref<?xf64>) {
func.func @dumpf64(%arg0: memref<?xf64>) {
%c0 = arith.constant 0 : index
%d0 = arith.constant -1.0 : f64
%0 = vector.transfer_read %arg0[%c0], %d0: memref<?xf64>, vector<25xf64>
vector.print %0 : vector<25xf64>
return
}
func @dumpidx(%arg0: memref<?xindex>) {
func.func @dumpidx(%arg0: memref<?xindex>) {
%c0 = arith.constant 0 : index
%d0 = arith.constant 0 : index
%0 = vector.transfer_read %arg0[%c0], %d0: memref<?xindex>, vector<25xindex>
@ -44,7 +44,7 @@ module {
//
// Main driver.
//
func @entry() {
func.func @entry() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index

View File

@ -24,7 +24,7 @@ module {
// Helper method to print values array. The transfer actually
// reads more than required to verify size of buffer as well.
//
func @dump(%arg0: memref<?xf64>) {
func.func @dump(%arg0: memref<?xf64>) {
%c = arith.constant 0 : index
%d = arith.constant -1.0 : f64
%0 = vector.transfer_read %arg0[%c], %d: memref<?xf64>, vector<8xf64>
@ -32,7 +32,7 @@ module {
return
}
func @entry() {
func.func @entry() {
%t1 = arith.constant sparse<
[ [0,0], [0,1], [0,63], [1,0], [1,1], [31,0], [31,63] ],
[ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0 ]> : tensor<32x64xf64>

View File

@ -35,28 +35,28 @@ module {
// Helper method to print values and indices arrays. The transfer actually
// reads more than required to verify size of buffer as well.
//
func @dumpf64(%arg0: memref<?xf64>) {
func.func @dumpf64(%arg0: memref<?xf64>) {
%c = arith.constant 0 : index
%d = arith.constant -1.0 : f64
%0 = vector.transfer_read %arg0[%c], %d: memref<?xf64>, vector<8xf64>
vector.print %0 : vector<8xf64>
return
}
func @dumpi08(%arg0: memref<?xi8>) {
func.func @dumpi08(%arg0: memref<?xi8>) {
%c = arith.constant 0 : index
%d = arith.constant -1 : i8
%0 = vector.transfer_read %arg0[%c], %d: memref<?xi8>, vector<8xi8>
vector.print %0 : vector<8xi8>
return
}
func @dumpi32(%arg0: memref<?xi32>) {
func.func @dumpi32(%arg0: memref<?xi32>) {
%c = arith.constant 0 : index
%d = arith.constant -1 : i32
%0 = vector.transfer_read %arg0[%c], %d: memref<?xi32>, vector<8xi32>
vector.print %0 : vector<8xi32>
return
}
func @dumpi64(%arg0: memref<?xi64>) {
func.func @dumpi64(%arg0: memref<?xi64>) {
%c = arith.constant 0 : index
%d = arith.constant -1 : i64
%0 = vector.transfer_read %arg0[%c], %d: memref<?xi64>, vector<8xi64>
@ -64,7 +64,7 @@ module {
return
}
func @entry() {
func.func @entry() {
%c1 = arith.constant 1 : index
%t1 = arith.constant sparse<
[ [0,0], [0,1], [0,63], [1,0], [1,1], [31,0], [31,63] ],

View File

@ -42,62 +42,62 @@ module {
//
// Utilities for output and releasing memory.
//
func @dump(%arg0: tensor<2x3x4xf64>) {
func.func @dump(%arg0: tensor<2x3x4xf64>) {
%c0 = arith.constant 0 : index
%d0 = arith.constant -1.0 : f64
%0 = vector.transfer_read %arg0[%c0, %c0, %c0], %d0: tensor<2x3x4xf64>, vector<2x3x4xf64>
vector.print %0 : vector<2x3x4xf64>
return
}
func @dumpAndRelease_234(%arg0: tensor<2x3x4xf64>) {
func.func @dumpAndRelease_234(%arg0: tensor<2x3x4xf64>) {
call @dump(%arg0) : (tensor<2x3x4xf64>) -> ()
%1 = bufferization.to_memref %arg0 : memref<2x3x4xf64>
memref.dealloc %1 : memref<2x3x4xf64>
return
}
func @dumpAndRelease_p34(%arg0: tensor<?x3x4xf64>) {
func.func @dumpAndRelease_p34(%arg0: tensor<?x3x4xf64>) {
%0 = tensor.cast %arg0 : tensor<?x3x4xf64> to tensor<2x3x4xf64>
call @dump(%0) : (tensor<2x3x4xf64>) -> ()
%1 = bufferization.to_memref %arg0 : memref<?x3x4xf64>
memref.dealloc %1 : memref<?x3x4xf64>
return
}
func @dumpAndRelease_2p4(%arg0: tensor<2x?x4xf64>) {
func.func @dumpAndRelease_2p4(%arg0: tensor<2x?x4xf64>) {
%0 = tensor.cast %arg0 : tensor<2x?x4xf64> to tensor<2x3x4xf64>
call @dump(%0) : (tensor<2x3x4xf64>) -> ()
%1 = bufferization.to_memref %arg0 : memref<2x?x4xf64>
memref.dealloc %1 : memref<2x?x4xf64>
return
}
func @dumpAndRelease_23p(%arg0: tensor<2x3x?xf64>) {
func.func @dumpAndRelease_23p(%arg0: tensor<2x3x?xf64>) {
%0 = tensor.cast %arg0 : tensor<2x3x?xf64> to tensor<2x3x4xf64>
call @dump(%0) : (tensor<2x3x4xf64>) -> ()
%1 = bufferization.to_memref %arg0 : memref<2x3x?xf64>
memref.dealloc %1 : memref<2x3x?xf64>
return
}
func @dumpAndRelease_2pp(%arg0: tensor<2x?x?xf64>) {
func.func @dumpAndRelease_2pp(%arg0: tensor<2x?x?xf64>) {
%0 = tensor.cast %arg0 : tensor<2x?x?xf64> to tensor<2x3x4xf64>
call @dump(%0) : (tensor<2x3x4xf64>) -> ()
%1 = bufferization.to_memref %arg0 : memref<2x?x?xf64>
memref.dealloc %1 : memref<2x?x?xf64>
return
}
func @dumpAndRelease_p3p(%arg0: tensor<?x3x?xf64>) {
func.func @dumpAndRelease_p3p(%arg0: tensor<?x3x?xf64>) {
%0 = tensor.cast %arg0 : tensor<?x3x?xf64> to tensor<2x3x4xf64>
call @dump(%0) : (tensor<2x3x4xf64>) -> ()
%1 = bufferization.to_memref %arg0 : memref<?x3x?xf64>
memref.dealloc %1 : memref<?x3x?xf64>
return
}
func @dumpAndRelease_pp4(%arg0: tensor<?x?x4xf64>) {
func.func @dumpAndRelease_pp4(%arg0: tensor<?x?x4xf64>) {
%0 = tensor.cast %arg0 : tensor<?x?x4xf64> to tensor<2x3x4xf64>
call @dump(%0) : (tensor<2x3x4xf64>) -> ()
%1 = bufferization.to_memref %arg0 : memref<?x?x4xf64>
memref.dealloc %1 : memref<?x?x4xf64>
return
}
func @dumpAndRelease_ppp(%arg0: tensor<?x?x?xf64>) {
func.func @dumpAndRelease_ppp(%arg0: tensor<?x?x?xf64>) {
%0 = tensor.cast %arg0 : tensor<?x?x?xf64> to tensor<2x3x4xf64>
call @dump(%0) : (tensor<2x3x4xf64>) -> ()
%1 = bufferization.to_memref %arg0 : memref<?x?x?xf64>
@ -108,7 +108,7 @@ module {
//
// Main driver.
//
func @entry() {
func.func @entry() {
//
// Initialize a 3-dim dense tensor.
//

View File

@ -10,7 +10,7 @@ module {
//
// Sparse kernel.
//
func @sparse_dot(%a: tensor<1024xf32, #SparseVector>,
func.func @sparse_dot(%a: tensor<1024xf32, #SparseVector>,
%b: tensor<1024xf32, #SparseVector>) -> tensor<f32> {
%x = linalg.init_tensor [] : tensor<f32>
%dot = linalg.dot ins(%a, %b: tensor<1024xf32, #SparseVector>,
@ -22,7 +22,7 @@ module {
//
// Main driver.
//
func @entry() {
func.func @entry() {
// Setup two sparse vectors.
%d1 = arith.constant sparse<
[ [0], [1], [22], [23], [1022] ], [1.0, 2.0, 3.0, 4.0, 5.0]

View File

@ -15,7 +15,7 @@
// An example of a 2D convolution with a sparse filter.
module {
func @conv2d(%input: tensor<8x8xi32>,
func.func @conv2d(%input: tensor<8x8xi32>,
%filter: tensor<3x3xi32, #DCSR>,
%output: tensor<6x6xi32>) -> tensor<6x6xi32> {
%0 = linalg.conv_2d
@ -24,7 +24,7 @@ module {
return %0 : tensor<6x6xi32>
}
func @entry() {
func.func @entry() {
%c0 = arith.constant 0 : index
%i0 = arith.constant 0 : i32

View File

@ -44,7 +44,7 @@ module {
//
// A kernel that flattens a rank 8 tensor into a dense matrix.
//
func @kernel_flatten(%arga: tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>,
func.func @kernel_flatten(%arga: tensor<7x3x3x3x3x3x5x3xf64, #SparseTensor>,
%argx: tensor<7x3xf64> {linalg.inplaceable = true})
-> tensor<7x3xf64> {
%0 = linalg.generic #trait_flatten
@ -57,12 +57,12 @@ module {
return %0 : tensor<7x3xf64>
}
func private @getTensorFilename(index) -> (!Filename)
func.func private @getTensorFilename(index) -> (!Filename)
//
// Main driver that reads tensor from file and calls the sparse kernel.
//
func @entry() {
func.func @entry() {
%d0 = arith.constant 0.0 : f64
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index

View File

@ -38,7 +38,7 @@ module {
//
// Kernel that uses index in the index notation (conjunction).
//
func @sparse_index_1d_conj(%arga: tensor<8xi64, #SparseVector>)
func.func @sparse_index_1d_conj(%arga: tensor<8xi64, #SparseVector>)
-> tensor<8xi64, #SparseVector> {
%d0 = arith.constant 8 : index
%init = sparse_tensor.init [%d0] : tensor<8xi64, #SparseVector>
@ -57,7 +57,7 @@ module {
//
// Kernel that uses index in the index notation (disjunction).
//
func @sparse_index_1d_disj(%arga: tensor<8xi64, #SparseVector>)
func.func @sparse_index_1d_disj(%arga: tensor<8xi64, #SparseVector>)
-> tensor<8xi64, #SparseVector> {
%d0 = arith.constant 8 : index
%init = sparse_tensor.init [%d0] : tensor<8xi64, #SparseVector>
@ -76,7 +76,7 @@ module {
//
// Kernel that uses indices in the index notation (conjunction).
//
func @sparse_index_2d_conj(%arga: tensor<3x4xi64, #SparseMatrix>)
func.func @sparse_index_2d_conj(%arga: tensor<3x4xi64, #SparseMatrix>)
-> tensor<3x4xi64, #SparseMatrix> {
%d0 = arith.constant 3 : index
%d1 = arith.constant 4 : index
@ -99,7 +99,7 @@ module {
//
// Kernel that uses indices in the index notation (disjunction).
//
func @sparse_index_2d_disj(%arga: tensor<3x4xi64, #SparseMatrix>)
func.func @sparse_index_2d_disj(%arga: tensor<3x4xi64, #SparseMatrix>)
-> tensor<3x4xi64, #SparseMatrix> {
%d0 = arith.constant 3 : index
%d1 = arith.constant 4 : index
@ -119,7 +119,7 @@ module {
return %r : tensor<3x4xi64, #SparseMatrix>
}
func @add_outer_2d(%arg0: tensor<2x3xf32, #SparseMatrix>)
func.func @add_outer_2d(%arg0: tensor<2x3xf32, #SparseMatrix>)
-> tensor<2x3xf32, #SparseMatrix> {
%c2 = arith.constant 2 : index
%c3 = arith.constant 3 : index
@ -140,7 +140,7 @@ module {
//
// Main driver.
//
func @entry() {
func.func @entry() {
%c0 = arith.constant 0 : index
%du = arith.constant -1 : i64
%df = arith.constant -1.0 : f32

View File

@ -44,7 +44,7 @@ module {
//
// Kernel that uses index in the index notation (conjunction).
//
func @sparse_index_1d_conj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8xi64> {
func.func @sparse_index_1d_conj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8xi64> {
%init = linalg.init_tensor [8] : tensor<8xi64>
%r = linalg.generic #trait_1d
ins(%arga: tensor<8xi64, #SparseVector>)
@ -61,7 +61,7 @@ module {
//
// Kernel that uses index in the index notation (disjunction).
//
func @sparse_index_1d_disj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8xi64> {
func.func @sparse_index_1d_disj(%arga: tensor<8xi64, #SparseVector>) -> tensor<8xi64> {
%init = linalg.init_tensor [8] : tensor<8xi64>
%r = linalg.generic #trait_1d
ins(%arga: tensor<8xi64, #SparseVector>)
@ -78,7 +78,7 @@ module {
//
// Kernel that uses indices in the index notation (conjunction).
//
func @sparse_index_2d_conj(%arga: tensor<3x4xi64, #SparseMatrix>) -> tensor<3x4xi64> {
func.func @sparse_index_2d_conj(%arga: tensor<3x4xi64, #SparseMatrix>) -> tensor<3x4xi64> {
%init = linalg.init_tensor [3,4] : tensor<3x4xi64>
%r = linalg.generic #trait_2d
ins(%arga: tensor<3x4xi64, #SparseMatrix>)
@ -98,7 +98,7 @@ module {
//
// Kernel that uses indices in the index notation (disjunction).
//
func @sparse_index_2d_disj(%arga: tensor<3x4xi64, #SparseMatrix>) -> tensor<3x4xi64> {
func.func @sparse_index_2d_disj(%arga: tensor<3x4xi64, #SparseMatrix>) -> tensor<3x4xi64> {
%init = linalg.init_tensor [3,4] : tensor<3x4xi64>
%r = linalg.generic #trait_2d
ins(%arga: tensor<3x4xi64, #SparseMatrix>)
@ -118,7 +118,7 @@ module {
//
// Main driver.
//
func @entry() {
func.func @entry() {
%c0 = arith.constant 0 : index
%du = arith.constant -1 : i64

View File

@ -17,7 +17,7 @@ module {
//
// Computes C = A x B with all matrices dense.
//
func @matmul1(%A: tensor<4x8xf64>,
func.func @matmul1(%A: tensor<4x8xf64>,
%B: tensor<8x4xf64>) -> tensor<4x4xf64> {
%C = arith.constant dense<0.0> : tensor<4x4xf64>
%D = linalg.matmul
@ -29,7 +29,7 @@ module {
//
// Computes C = A x B with all matrices sparse (SpMSpM) in CSR.
//
func @matmul2(%A: tensor<4x8xf64, #CSR>,
func.func @matmul2(%A: tensor<4x8xf64, #CSR>,
%B: tensor<8x4xf64, #CSR>) -> tensor<4x4xf64, #CSR> {
%c4 = arith.constant 4 : index
%C = sparse_tensor.init [%c4, %c4] : tensor<4x4xf64, #CSR>
@ -42,7 +42,7 @@ module {
//
// Computes C = A x B with all matrices sparse (SpMSpM) in DCSR.
//
func @matmul3(%A: tensor<4x8xf64, #DCSR>,
func.func @matmul3(%A: tensor<4x8xf64, #DCSR>,
%B: tensor<8x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
%c4 = arith.constant 4 : index
%C = sparse_tensor.init [%c4, %c4] : tensor<4x4xf64, #DCSR>
@ -55,7 +55,7 @@ module {
//
// Main driver.
//
func @entry() {
func.func @entry() {
%c0 = arith.constant 0 : index
%d1 = arith.constant -1.0 : f64

View File

@ -36,7 +36,7 @@
module {
// Scales a sparse matrix into a new sparse matrix.
func @matrix_scale(%arga: tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> {
func.func @matrix_scale(%arga: tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> {
%s = arith.constant 2.0 : f64
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@ -54,7 +54,7 @@ module {
}
// Scales a sparse matrix in place.
func @matrix_scale_inplace(%argx: tensor<?x?xf64, #DCSR>
func.func @matrix_scale_inplace(%argx: tensor<?x?xf64, #DCSR>
{linalg.inplaceable = true}) -> tensor<?x?xf64, #DCSR> {
%s = arith.constant 2.0 : f64
%0 = linalg.generic #trait_scale_inpl
@ -67,7 +67,7 @@ module {
}
// Adds two sparse matrices element-wise into a new sparse matrix.
func @matrix_add(%arga: tensor<?x?xf64, #DCSR>,
func.func @matrix_add(%arga: tensor<?x?xf64, #DCSR>,
%argb: tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@ -85,7 +85,7 @@ module {
}
// Multiplies two sparse matrices element-wise into a new sparse matrix.
func @matrix_mul(%arga: tensor<?x?xf64, #DCSR>,
func.func @matrix_mul(%arga: tensor<?x?xf64, #DCSR>,
%argb: tensor<?x?xf64, #DCSR>) -> tensor<?x?xf64, #DCSR> {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@ -103,7 +103,7 @@ module {
}
// Dump a sparse matrix.
func @dump(%arg0: tensor<?x?xf64, #DCSR>) {
func.func @dump(%arg0: tensor<?x?xf64, #DCSR>) {
%d0 = arith.constant 0.0 : f64
%c0 = arith.constant 0 : index
%dm = sparse_tensor.convert %arg0 : tensor<?x?xf64, #DCSR> to tensor<?x?xf64>
@ -115,7 +115,7 @@ module {
}
// Driver method to call and verify matrix kernels.
func @entry() {
func.func @entry() {
%c0 = arith.constant 0 : index
%d1 = arith.constant 1.1 : f64

View File

@ -43,7 +43,7 @@ module {
// A kernel that multiplies a sparse matrix A with a dense vector b
// into a dense vector x.
//
func @kernel_matvec(%arga: tensor<?x?xi32, #SparseMatrix>,
func.func @kernel_matvec(%arga: tensor<?x?xi32, #SparseMatrix>,
%argb: tensor<?xi32>,
%argx: tensor<?xi32> {linalg.inplaceable = true})
-> tensor<?xi32> {
@ -58,12 +58,12 @@ module {
return %0 : tensor<?xi32>
}
func private @getTensorFilename(index) -> (!Filename)
func.func private @getTensorFilename(index) -> (!Filename)
//
// Main driver that reads matrix from file and calls the sparse kernel.
//
func @entry() {
func.func @entry() {
%i0 = arith.constant 0 : i32
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index

View File

@ -41,7 +41,7 @@ module {
// Computes Matricized Tensor Times Khatri-Rao Product (MTTKRP) kernel. See
// http://tensor-compiler.org/docs/data_analytics/index.html.
//
func @kernel_mttkrp(%argb: tensor<?x?x?xf64, #SparseTensor>,
func.func @kernel_mttkrp(%argb: tensor<?x?x?xf64, #SparseTensor>,
%argc: tensor<?x?xf64>,
%argd: tensor<?x?xf64>,
%arga: tensor<?x?xf64> {linalg.inplaceable = true})
@ -59,12 +59,12 @@ module {
return %0 : tensor<?x?xf64>
}
func private @getTensorFilename(index) -> (!Filename)
func.func private @getTensorFilename(index) -> (!Filename)
//
// Main driver that reads matrix from file and calls the sparse kernel.
//
func @entry() {
func.func @entry() {
%f0 = arith.constant 0.0 : f64
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index

View File

@ -20,7 +20,7 @@
module {
// Sparse kernel.
func @sparse_mult_elt(
func.func @sparse_mult_elt(
%arga: tensor<32x16xf32, #DCSR>, %argb: tensor<32x16xf32, #DCSR>) -> tensor<32x16xf32, #DCSR> {
%c16 = arith.constant 16 : index
%c32 = arith.constant 32 : index
@ -36,7 +36,7 @@ module {
}
// Driver method to call and verify kernel.
func @entry() {
func.func @entry() {
%c0 = arith.constant 0 : index
%f1 = arith.constant -1.0 : f32

View File

@ -23,7 +23,7 @@
}
module {
func @redsum(%arga: tensor<?x?x?xi32, #SparseTensor>,
func.func @redsum(%arga: tensor<?x?x?xi32, #SparseTensor>,
%argb: tensor<?x?x?xi32, #SparseTensor>)
-> tensor<?x?xi32, #SparseMatrix> {
%c0 = arith.constant 0 : index
@ -44,7 +44,7 @@ module {
}
// Driver method to call and verify tensor kernel.
func @entry() {
func.func @entry() {
%c0 = arith.constant 0 : index
%i0 = arith.constant -1 : i32

View File

@ -41,7 +41,7 @@ module {
// a sparse tensor as output, but although the values of the
// sparse tensor change, its nonzero structure remains the same.
//
func @kernel_eltwise_mult(%argx: tensor<?x?xf64, #DCSR> {linalg.inplaceable = true})
func.func @kernel_eltwise_mult(%argx: tensor<?x?xf64, #DCSR> {linalg.inplaceable = true})
-> tensor<?x?xf64, #DCSR> {
%0 = linalg.generic #eltwise_mult
outs(%argx: tensor<?x?xf64, #DCSR>) {
@ -52,12 +52,12 @@ module {
return %0 : tensor<?x?xf64, #DCSR>
}
func private @getTensorFilename(index) -> (!Filename)
func.func private @getTensorFilename(index) -> (!Filename)
//
// Main driver that reads matrix from file and calls the sparse kernel.
//
func @entry() {
func.func @entry() {
%d0 = arith.constant 0.0 : f64
%c0 = arith.constant 0 : index

View File

@ -18,7 +18,7 @@
// operation.
module {
func @quantized_matmul(%input1: tensor<5x3xi8>,
func.func @quantized_matmul(%input1: tensor<5x3xi8>,
%input2: tensor<3x6xi8, #DCSR>,
%output: tensor<5x6xi32>) -> tensor<5x6xi32> {
%c0 = arith.constant 0 : i32
@ -29,7 +29,7 @@ module {
return %0: tensor<5x6xi32>
}
func @entry() {
func.func @entry() {
%c0 = arith.constant 0 : index
%i0 = arith.constant 0 : i32

View File

@ -25,7 +25,7 @@
// An example of vector reductions.
module {
func @sum_reduction_i32(%arga: tensor<32xi32, #SV>,
func.func @sum_reduction_i32(%arga: tensor<32xi32, #SV>,
%argx: tensor<i32>) -> tensor<i32> {
%0 = linalg.generic #trait_reduction
ins(%arga: tensor<32xi32, #SV>)
@ -37,7 +37,7 @@ module {
return %0 : tensor<i32>
}
func @sum_reduction_f32(%arga: tensor<32xf32, #SV>,
func.func @sum_reduction_f32(%arga: tensor<32xf32, #SV>,
%argx: tensor<f32>) -> tensor<f32> {
%0 = linalg.generic #trait_reduction
ins(%arga: tensor<32xf32, #SV>)
@ -49,7 +49,7 @@ module {
return %0 : tensor<f32>
}
func @prod_reduction_i32(%arga: tensor<32xi32, #DV>,
func.func @prod_reduction_i32(%arga: tensor<32xi32, #DV>,
%argx: tensor<i32>) -> tensor<i32> {
%0 = linalg.generic #trait_reduction
ins(%arga: tensor<32xi32, #DV>)
@ -61,7 +61,7 @@ module {
return %0 : tensor<i32>
}
func @prod_reduction_f32(%arga: tensor<32xf32, #DV>,
func.func @prod_reduction_f32(%arga: tensor<32xf32, #DV>,
%argx: tensor<f32>) -> tensor<f32> {
%0 = linalg.generic #trait_reduction
ins(%arga: tensor<32xf32, #DV>)
@ -73,7 +73,7 @@ module {
return %0 : tensor<f32>
}
func @and_reduction_i32(%arga: tensor<32xi32, #DV>,
func.func @and_reduction_i32(%arga: tensor<32xi32, #DV>,
%argx: tensor<i32>) -> tensor<i32> {
%0 = linalg.generic #trait_reduction
ins(%arga: tensor<32xi32, #DV>)
@ -85,7 +85,7 @@ module {
return %0 : tensor<i32>
}
func @or_reduction_i32(%arga: tensor<32xi32, #SV>,
func.func @or_reduction_i32(%arga: tensor<32xi32, #SV>,
%argx: tensor<i32>) -> tensor<i32> {
%0 = linalg.generic #trait_reduction
ins(%arga: tensor<32xi32, #SV>)
@ -97,7 +97,7 @@ module {
return %0 : tensor<i32>
}
func @xor_reduction_i32(%arga: tensor<32xi32, #SV>,
func.func @xor_reduction_i32(%arga: tensor<32xi32, #SV>,
%argx: tensor<i32>) -> tensor<i32> {
%0 = linalg.generic #trait_reduction
ins(%arga: tensor<32xi32, #SV>)
@ -109,19 +109,19 @@ module {
return %0 : tensor<i32>
}
func @dump_i32(%arg0 : memref<i32>) {
func.func @dump_i32(%arg0 : memref<i32>) {
%v = memref.load %arg0[] : memref<i32>
vector.print %v : i32
return
}
func @dump_f32(%arg0 : memref<f32>) {
func.func @dump_f32(%arg0 : memref<f32>) {
%v = memref.load %arg0[] : memref<f32>
vector.print %v : f32
return
}
func @entry() {
func.func @entry() {
%ri = arith.constant dense< 7 > : tensor<i32>
%rf = arith.constant dense< 2.0 > : tensor<f32>

View File

@ -44,7 +44,7 @@ module {
//
// A kernel that computes a sampled matrix matrix multiplication.
//
func @sampled_dense_dense(%args: tensor<?x?xf32, #SparseMatrix>,
func.func @sampled_dense_dense(%args: tensor<?x?xf32, #SparseMatrix>,
%arga: tensor<?x?xf32>,
%argb: tensor<?x?xf32>,
%argx: tensor<?x?xf32> {linalg.inplaceable = true}) -> tensor<?x?xf32> {
@ -60,12 +60,12 @@ module {
return %0 : tensor<?x?xf32>
}
func private @getTensorFilename(index) -> (!Filename)
func.func private @getTensorFilename(index) -> (!Filename)
//
// Main driver that reads matrix from file and calls the sparse kernel.
//
func @entry() {
func.func @entry() {
%d0 = arith.constant 0.0 : f32
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index

View File

@ -49,7 +49,7 @@ module {
// A kernel that computes a direct sampled matrix matrix multiplication
// (with dense result).
//
func @sampled_dd(%args: tensor<8x8xf64, #SM>,
func.func @sampled_dd(%args: tensor<8x8xf64, #SM>,
%arga: tensor<8x8xf64>,
%argb: tensor<8x8xf64>) -> tensor<8x8xf64> {
%1 = arith.constant dense<0.0> : tensor<8x8xf64>
@ -70,7 +70,7 @@ module {
// A kernel that computes an unfused sampled matrix matrix multiplication
// (with dense result).
//
func @sampled_dd_unfused(%args: tensor<8x8xf64, #SM>,
func.func @sampled_dd_unfused(%args: tensor<8x8xf64, #SM>,
%arga: tensor<8x8xf64>,
%argb: tensor<8x8xf64>) -> tensor<8x8xf64> {
// Perform dense-dense matrix matrix multiplication.
@ -98,7 +98,7 @@ module {
// A kernel that computes a direct sampled matrix matrix multiplication
// (with sparse result).
//
func @sparse_sampled_dd(%args: tensor<8x8xf64, #SM>,
func.func @sparse_sampled_dd(%args: tensor<8x8xf64, #SM>,
%arga: tensor<8x8xf64>,
%argb: tensor<8x8xf64>) -> tensor<8x8xf64, #SM> {
%c8 = arith.constant 8 : index
@ -120,7 +120,7 @@ module {
// A kernel that computes an unfused sampled matrix matrix multiplication
// (with sparse result).
//
func @sparse_sampled_dd_unfused(
func.func @sparse_sampled_dd_unfused(
%args: tensor<8x8xf64, #SM>,
%arga: tensor<8x8xf64>,
%argb: tensor<8x8xf64>) -> tensor<8x8xf64, #SM> {
@ -150,7 +150,7 @@ module {
//
// Main driver.
//
func @entry() {
func.func @entry() {
%d0 = arith.constant 0.0 : f64
%c0 = arith.constant 0 : index

View File

@ -31,7 +31,7 @@ module {
//
// A kernel that scales a sparse matrix A by a factor of 2.0.
//
func @sparse_scale(%argx: tensor<8x8xf32, #CSR>
func.func @sparse_scale(%argx: tensor<8x8xf32, #CSR>
{linalg.inplaceable = true}) -> tensor<8x8xf32, #CSR> {
%c = arith.constant 2.0 : f32
%0 = linalg.generic #trait_scale
@ -48,7 +48,7 @@ module {
// and then calls the sparse scaling kernel with the sparse tensor
// as input argument.
//
func @entry() {
func.func @entry() {
%c0 = arith.constant 0 : index
%f0 = arith.constant 0.0 : f32

View File

@ -40,7 +40,7 @@ module {
// A kernel that multiplies a sparse matrix A with a dense matrix B
// into a dense matrix X.
//
func @kernel_spmm(%arga: tensor<?x?xf64, #SparseMatrix>,
func.func @kernel_spmm(%arga: tensor<?x?xf64, #SparseMatrix>,
%argb: tensor<?x?xf64>,
%argx: tensor<?x?xf64> {linalg.inplaceable = true}) -> tensor<?x?xf64> {
%0 = linalg.generic #spmm
@ -54,12 +54,12 @@ module {
return %0 : tensor<?x?xf64>
}
func private @getTensorFilename(index) -> (!Filename)
func.func private @getTensorFilename(index) -> (!Filename)
//
// Main driver that reads matrix from file and calls the sparse kernel.
//
func @entry() {
func.func @entry() {
%i0 = arith.constant 0.0 : f64
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index

View File

@ -49,7 +49,7 @@ module {
// are typically not concerned with such details, but the test ensures
// everything is working "under the hood".
//
func @entry() {
func.func @entry() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%d0 = arith.constant 0.0 : f64

View File

@ -38,7 +38,7 @@ module {
//
// A kernel that sum-reduces a matrix to a single scalar.
//
func @kernel_sum_reduce(%arga: tensor<?x?xf64, #SparseMatrix>,
func.func @kernel_sum_reduce(%arga: tensor<?x?xf64, #SparseMatrix>,
%argx: tensor<f64> {linalg.inplaceable = true}) -> tensor<f64> {
%0 = linalg.generic #trait_sum_reduce
ins(%arga: tensor<?x?xf64, #SparseMatrix>)
@ -50,12 +50,12 @@ module {
return %0 : tensor<f64>
}
func private @getTensorFilename(index) -> (!Filename)
func.func private @getTensorFilename(index) -> (!Filename)
//
// Main driver that reads matrix from file and calls the sparse kernel.
//
func @entry() {
func.func @entry() {
%d0 = arith.constant 0.0 : f64
%c0 = arith.constant 0 : index

View File

@ -21,7 +21,7 @@
module {
// Scales a sparse tensor into a new sparse tensor.
func @tensor_scale(%arga: tensor<?x?x?xf64, #ST1>) -> tensor<?x?x?xf64, #ST2> {
func.func @tensor_scale(%arga: tensor<?x?x?xf64, #ST1>) -> tensor<?x?x?xf64, #ST2> {
%s = arith.constant 2.0 : f64
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
@ -41,7 +41,7 @@ module {
}
// Driver method to call and verify tensor kernel.
func @entry() {
func.func @entry() {
%c0 = arith.constant 0 : index
%d1 = arith.constant -1.0 : f64

View File

@ -29,7 +29,7 @@ module {
// the iteration graph. This can be avoided by converting the incoming
// matrix into a sparse column-wise matrix first.
//
func @sparse_transpose(%arga: tensor<3x4xf64, #DCSR>) -> tensor<4x3xf64, #DCSR> {
func.func @sparse_transpose(%arga: tensor<3x4xf64, #DCSR>) -> tensor<4x3xf64, #DCSR> {
%t = sparse_tensor.convert %arga : tensor<3x4xf64, #DCSR> to tensor<3x4xf64, #DCSC>
%c3 = arith.constant 3 : index
@ -51,7 +51,7 @@ module {
//
// Main driver.
//
func @entry() {
func.func @entry() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c4 = arith.constant 4 : index

View File

@ -46,7 +46,7 @@
module {
// Scales a sparse vector into a new sparse vector.
func @vector_scale(%arga: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> {
func.func @vector_scale(%arga: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> {
%s = arith.constant 2.0 : f64
%c = arith.constant 0 : index
%d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
@ -62,7 +62,7 @@ module {
}
// Scales a sparse vector in place.
func @vector_scale_inplace(%argx: tensor<?xf64, #SparseVector>
func.func @vector_scale_inplace(%argx: tensor<?xf64, #SparseVector>
{linalg.inplaceable = true}) -> tensor<?xf64, #SparseVector> {
%s = arith.constant 2.0 : f64
%0 = linalg.generic #trait_scale_inpl
@ -75,7 +75,7 @@ module {
}
// Adds two sparse vectors into a new sparse vector.
func @vector_add(%arga: tensor<?xf64, #SparseVector>,
func.func @vector_add(%arga: tensor<?xf64, #SparseVector>,
%argb: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> {
%c = arith.constant 0 : index
%d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
@ -91,7 +91,7 @@ module {
}
// Multiplies two sparse vectors into a new sparse vector.
func @vector_mul(%arga: tensor<?xf64, #SparseVector>,
func.func @vector_mul(%arga: tensor<?xf64, #SparseVector>,
%argb: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #SparseVector> {
%c = arith.constant 0 : index
%d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
@ -107,7 +107,7 @@ module {
}
// Multiplies two sparse vectors into a new "annotated" dense vector.
func @vector_mul_d(%arga: tensor<?xf64, #SparseVector>,
func.func @vector_mul_d(%arga: tensor<?xf64, #SparseVector>,
%argb: tensor<?xf64, #SparseVector>) -> tensor<?xf64, #DenseVector> {
%c = arith.constant 0 : index
%d = tensor.dim %arga, %c : tensor<?xf64, #SparseVector>
@ -123,7 +123,7 @@ module {
}
// Sum reduces dot product of two sparse vectors.
func @vector_dotprod(%arga: tensor<?xf64, #SparseVector>,
func.func @vector_dotprod(%arga: tensor<?xf64, #SparseVector>,
%argb: tensor<?xf64, #SparseVector>,
%argx: tensor<f64> {linalg.inplaceable = true}) -> tensor<f64> {
%0 = linalg.generic #trait_dot
@ -138,7 +138,7 @@ module {
}
// Dumps a sparse vector.
func @dump(%arg0: tensor<?xf64, #SparseVector>) {
func.func @dump(%arg0: tensor<?xf64, #SparseVector>) {
// Dump the values array to verify only sparse contents are stored.
%c0 = arith.constant 0 : index
%d0 = arith.constant -1.0 : f64
@ -155,7 +155,7 @@ module {
}
// Driver method to call and verify vector kernels.
func @entry() {
func.func @entry() {
%c0 = arith.constant 0 : index
%d1 = arith.constant 1.1 : f64

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @transfer_read_2d(%A : memref<40xi32>, %base1: index) {
func.func @transfer_read_2d(%A : memref<40xi32>, %base1: index) {
%i42 = arith.constant -42: i32
%f = vector.transfer_read %A[%base1], %i42
{permutation_map = affine_map<(d0) -> (d0)>} :
@ -12,7 +12,7 @@ func @transfer_read_2d(%A : memref<40xi32>, %base1: index) {
return
}
func @entry() {
func.func @entry() {
%c0 = arith.constant 0: index
%c20 = arith.constant 20: i32
%c10 = arith.constant 10: i32

View File

@ -9,7 +9,7 @@ dense<[[0.0, 1.0, 2.0],
[9.0, 10.0, 11.0],
[12.0, 13.0, 14.0]]>
func @main() {
func.func @main() {
%0 = memref.get_global @__constant_5x3xf32 : memref<5x3xf32>
/// Subview with only leading operands.
@ -60,4 +60,4 @@ func @main() {
return
}
func private @print_memref_f32(%ptr : memref<*xf32>)
func.func private @print_memref_f32(%ptr : memref<*xf32>)

View File

@ -9,7 +9,7 @@
// Note: To run this test, your CPU must support AMX.
// Multiply full size tiles into zero destination.
func @kernel(%arg0: memref<16x32xbf16>,
func.func @kernel(%arg0: memref<16x32xbf16>,
%arg1: memref<16x32xbf16>,
%arg2: memref<16x16xf32>) {
%0 = arith.constant 0 : index
@ -21,7 +21,7 @@ func @kernel(%arg0: memref<16x32xbf16>,
return
}
func @entry() -> i32 {
func.func @entry() -> i32 {
%fu = arith.constant -1.0: f32
%c0 = arith.constant 0: index
%c1 = arith.constant 1: index

View File

@ -6,7 +6,7 @@
// Note: To run this test, your CPU must support AMX.
// Multiply into zeroed destination.
func @kernel1(%arg0: memref<2x4xbf16>,
func.func @kernel1(%arg0: memref<2x4xbf16>,
%arg1: memref<2x4xbf16>,
%arg2: memref<2x2xf32>) {
%0 = arith.constant 0 : index
@ -19,7 +19,7 @@ func @kernel1(%arg0: memref<2x4xbf16>,
}
// Multiply and update into destination.
func @kernel2(%arg0: memref<2x4xbf16>,
func.func @kernel2(%arg0: memref<2x4xbf16>,
%arg1: memref<2x4xbf16>,
%arg2: memref<2x2xf32>) {
%0 = arith.constant 0 : index
@ -31,7 +31,7 @@ func @kernel2(%arg0: memref<2x4xbf16>,
return
}
func @entry() -> i32 {
func.func @entry() -> i32 {
%f0 = arith.constant 0.0: f32
%c0 = arith.constant 0: index
%c1 = arith.constant 1: index

View File

@ -5,7 +5,7 @@
// Note: To run this test, your CPU must support AMX.
func @print(%arg0: memref<16x4xi32>) {
func.func @print(%arg0: memref<16x4xi32>) {
%iu = arith.constant -1: i32
%c0 = arith.constant 0: index
%c1 = arith.constant 1: index
@ -17,7 +17,7 @@ func @print(%arg0: memref<16x4xi32>) {
return
}
func @kernel1(%arg0: memref<16x16xi8>,
func.func @kernel1(%arg0: memref<16x16xi8>,
%arg1: memref<4x16xi8>,
%arg2: memref<16x4xi32>) {
%0 = arith.constant 0 : index
@ -29,7 +29,7 @@ func @kernel1(%arg0: memref<16x16xi8>,
return
}
func @kernel2(%arg0: memref<16x16xi8>,
func.func @kernel2(%arg0: memref<16x16xi8>,
%arg1: memref<4x16xi8>,
%arg2: memref<16x4xi32>) {
%0 = arith.constant 0 : index
@ -41,7 +41,7 @@ func @kernel2(%arg0: memref<16x16xi8>,
return
}
func @kernel3(%arg0: memref<16x16xi8>,
func.func @kernel3(%arg0: memref<16x16xi8>,
%arg1: memref<4x16xi8>,
%arg2: memref<16x4xi32>) {
%0 = arith.constant 0 : index
@ -53,7 +53,7 @@ func @kernel3(%arg0: memref<16x16xi8>,
return
}
func @kernel4(%arg0: memref<16x16xi8>,
func.func @kernel4(%arg0: memref<16x16xi8>,
%arg1: memref<4x16xi8>,
%arg2: memref<16x4xi32>) {
%0 = arith.constant 0 : index
@ -65,7 +65,7 @@ func @kernel4(%arg0: memref<16x16xi8>,
return
}
func @entry() -> i32 {
func.func @entry() -> i32 {
%c0 = arith.constant 0: index
// Set up memory.

View File

@ -9,7 +9,7 @@
// Note: To run this test, your CPU must support AMX.
// Multiply full size tiles into zero destination.
func @kernel(%arg0: memref<16x64xi8>,
func.func @kernel(%arg0: memref<16x64xi8>,
%arg1: memref<16x64xi8>,
%arg2: memref<16x16xi32>) {
%0 = arith.constant 0 : index
@ -21,7 +21,7 @@ func @kernel(%arg0: memref<16x64xi8>,
return
}
func @entry() -> i32 {
func.func @entry() -> i32 {
%iu = arith.constant -1: i32
%c0 = arith.constant 0: index
%c1 = arith.constant 1: index

View File

@ -6,7 +6,7 @@
// Note: To run this test, your CPU must support AMX.
// Multiply into zeroed destination.
func @kernel1(%arg0: memref<2x8xi8>,
func.func @kernel1(%arg0: memref<2x8xi8>,
%arg1: memref<2x8xi8>,
%arg2: memref<2x2xi32>) {
%0 = arith.constant 0 : index
@ -19,7 +19,7 @@ func @kernel1(%arg0: memref<2x8xi8>,
}
// Multiply and update into destination.
func @kernel2(%arg0: memref<2x8xi8>,
func.func @kernel2(%arg0: memref<2x8xi8>,
%arg1: memref<2x8xi8>,
%arg2: memref<2x2xi32>) {
%0 = arith.constant 0 : index
@ -31,7 +31,7 @@ func @kernel2(%arg0: memref<2x8xi8>,
return
}
func @entry() -> i32 {
func.func @entry() -> i32 {
%i0 = arith.constant 0: i32
%c0 = arith.constant 0: index
%c1 = arith.constant 1: index

View File

@ -5,7 +5,7 @@
// Note: To run this test, your CPU must support AMX.
func @print(%arg0: memref<4x32xf32>) {
func.func @print(%arg0: memref<4x32xf32>) {
%fu = arith.constant -1.0: f32
%c0 = arith.constant 0: index
%c1 = arith.constant 1: index
@ -17,7 +17,7 @@ func @print(%arg0: memref<4x32xf32>) {
return
}
func @kernel(%arg0: memref<4x32xf32>) {
func.func @kernel(%arg0: memref<4x32xf32>) {
%c0 = arith.constant 0: index
%c2 = arith.constant 2 : index
%c4 = arith.constant 4 : index
@ -33,7 +33,7 @@ func @kernel(%arg0: memref<4x32xf32>) {
return
}
func @entry() -> i32 {
func.func @entry() -> i32 {
%f1 = arith.constant 1.0: f32
%c0 = arith.constant 0: index
%c1 = arith.constant 1: index

View File

@ -5,13 +5,13 @@
// Note: To run this test, your CPU must support AMX.
func @tilezero(%arg0: memref<?x?xi32>, %i: index, %j: index) {
func.func @tilezero(%arg0: memref<?x?xi32>, %i: index, %j: index) {
%1 = amx.tile_zero : vector<16x16xi32>
amx.tile_store %arg0[%i, %j], %1 : memref<?x?xi32>, vector<16x16xi32>
return
}
func @entry() -> i32 {
func.func @entry() -> i32 {
%i0 = arith.constant 0: i32
%i1 = arith.constant 1: i32
%c0 = arith.constant 0: index

View File

@ -6,7 +6,7 @@
// Note: To run this test, your CPU must support SVE
// VLA memcopy
func @kernel_copy(%src : memref<?xi64>, %dst : memref<?xi64>, %size : index) {
func.func @kernel_copy(%src : memref<?xi64>, %dst : memref<?xi64>, %size : index) {
%c0 = arith.constant 0 : index
%c2 = arith.constant 2 : index
%vs = vector.vscale
@ -20,7 +20,7 @@ func @kernel_copy(%src : memref<?xi64>, %dst : memref<?xi64>, %size : index) {
}
// VLA multiply and add
func @kernel_muladd(%a : memref<?xi64>,
func.func @kernel_muladd(%a : memref<?xi64>,
%b : memref<?xi64>,
%c : memref<?xi64>,
%size : index) {
@ -40,7 +40,7 @@ func @kernel_muladd(%a : memref<?xi64>,
}
// SVE-based absolute difference
func @kernel_absdiff(%a : memref<?xi64>,
func.func @kernel_absdiff(%a : memref<?xi64>,
%b : memref<?xi64>,
%c : memref<?xi64>,
%size : index) {
@ -68,7 +68,7 @@ func @kernel_absdiff(%a : memref<?xi64>,
}
// VLA unknown bounds vector addition
func @kernel_addition(%a : memref<?xf32>,
func.func @kernel_addition(%a : memref<?xf32>,
%b : memref<?xf32>,
%c : memref<?xf32>,
%N : index) {
@ -88,7 +88,7 @@ func @kernel_addition(%a : memref<?xf32>,
return
}
func @entry() -> i32 {
func.func @entry() -> i32 {
%i0 = arith.constant 0: i64
%i1 = arith.constant 1: i64
%r0 = arith.constant 0: i32

View File

@ -3,7 +3,7 @@
// RUN: %lli --entry-function=entry --mattr="avx" --dlopen=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() -> i32 {
func.func @entry() -> i32 {
%i0 = arith.constant 0 : i32
%i4 = arith.constant 4 : i32

View File

@ -3,7 +3,7 @@
// RUN: %lli --entry-function=entry --mattr="avx512bw" --dlopen=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() -> i32 {
func.func @entry() -> i32 {
%i0 = arith.constant 0 : i32
%a = arith.constant dense<[1., 0., 0., 2., 4., 3., 5., 7., 8., 1., 5., 5., 3., 1., 0., 7.]> : vector<16xf32>

View File

@ -3,7 +3,7 @@
// RUN: %lli --entry-function=entry --mattr="avx" --dlopen=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() -> i32 {
func.func @entry() -> i32 {
%i0 = arith.constant 0 : i32
%v = arith.constant dense<[0.125, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 16.0]> : vector<8xf32>

View File

@ -32,7 +32,7 @@
}
// Sparse vector dot product of two vectors.
func @vector_dot(%v_A : vector<8xi64>, %v_B : vector<8xf64>,
func.func @vector_dot(%v_A : vector<8xi64>, %v_B : vector<8xf64>,
%v_C : vector<8xi64>, %v_D : vector<8xf64>) -> f64 {
// Compute intersection of indices.
%k0, %k1 = x86vector.avx512.vp2intersect %v_A, %v_C : vector<8xi64>
@ -51,7 +51,7 @@ func @vector_dot(%v_A : vector<8xi64>, %v_B : vector<8xf64>,
// Fill input memrefs will all zeros, so that they can be used with arbitrary
// input sizes up to 128 elements per sparse vector.
func @init_input(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
func.func @init_input(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
%m_C : memref<?xi64>, %m_D : memref<?xf64>) {
%c0 = arith.constant 0 : index
%v_data = arith.constant dense<0.0> : vector<128xf64>
@ -65,7 +65,7 @@ func @init_input(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
return
}
func @fill_input_1(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
func.func @fill_input_1(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
%m_C : memref<?xi64>, %m_D : memref<?xf64>)
-> (index, index){
call @init_input(%m_A, %m_B, %m_C, %m_D)
@ -95,7 +95,7 @@ func @fill_input_1(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
return %M, %N : index, index
}
func @fill_input_2(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
func.func @fill_input_2(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
%m_C : memref<?xi64>, %m_D : memref<?xf64>)
-> (index, index){
call @init_input(%m_A, %m_B, %m_C, %m_D)
@ -131,7 +131,7 @@ func @fill_input_2(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
// Simple vector dot product implementation: Intersect every segment of size 8
// in (%m_A, %m_B) with every segment of size 8 in (%m_C, %m_D).
func @memref_dot_simple(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
func.func @memref_dot_simple(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
%m_C : memref<?xi64>, %m_D : memref<?xf64>,
%M : index, %N : index)
-> f64 {
@ -174,7 +174,7 @@ func @memref_dot_simple(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
// that indices in %m_A and %m_C are sorted ascendingly, skip over segments
// in (%m_C, %m_D) that are know to have no intersection with the current
// segment from (%m_A, %m_B).
func @memref_dot_optimized(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
func.func @memref_dot_optimized(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
%m_C : memref<?xi64>, %m_D : memref<?xf64>,
%M : index, %N : index)
-> f64 {
@ -245,7 +245,7 @@ func @memref_dot_optimized(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
// else a += 8, b += 8
// }
// }
func @memref_dot_while(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
func.func @memref_dot_while(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
%m_C : memref<?xi64>, %m_D : memref<?xf64>,
%M : index, %N : index)
-> f64 {
@ -334,7 +334,7 @@ func @memref_dot_while(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
// a += (segA[7] <= segB[7]) * 8
// b += (segB[7] <= segA[7]) * 8
// }
func @memref_dot_while_branchless(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
func.func @memref_dot_while_branchless(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
%m_C : memref<?xi64>, %m_D : memref<?xf64>,
%M : index, %N : index)
-> f64 {
@ -391,7 +391,7 @@ func @memref_dot_while_branchless(%m_A : memref<?xi64>, %m_B : memref<?xf64>,
return %r0 : f64
}
func @entry() -> i32 {
func.func @entry() -> i32 {
// Initialize large buffers that can be used for multiple test cases of
// different sizes.
%b_A = memref.alloc() : memref<128xi64>

View File

@ -5,7 +5,7 @@
// Note: To run this test, your CPU must support AVX512 vp2intersect.
func @entry() -> i32 {
func.func @entry() -> i32 {
%i0 = arith.constant 0 : i32
%i1 = arith.constant 1: i32
%i2 = arith.constant 2: i32

View File

@ -3,32 +3,32 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @extract_element_0d(%a: vector<f32>) {
func.func @extract_element_0d(%a: vector<f32>) {
%1 = vector.extractelement %a[] : vector<f32>
// CHECK: 42
vector.print %1: f32
return
}
func @insert_element_0d(%a: f32, %b: vector<f32>) -> (vector<f32>) {
func.func @insert_element_0d(%a: f32, %b: vector<f32>) -> (vector<f32>) {
%1 = vector.insertelement %a, %b[] : vector<f32>
return %1: vector<f32>
}
func @print_vector_0d(%a: vector<f32>) {
func.func @print_vector_0d(%a: vector<f32>) {
// CHECK: ( 42 )
vector.print %a: vector<f32>
return
}
func @splat_0d(%a: f32) {
func.func @splat_0d(%a: f32) {
%1 = vector.splat %a : vector<f32>
// CHECK: ( 42 )
vector.print %1: vector<f32>
return
}
func @broadcast_0d(%a: f32) {
func.func @broadcast_0d(%a: f32) {
%1 = vector.broadcast %a : f32 to vector<f32>
// CHECK: ( 42 )
vector.print %1: vector<f32>
@ -55,7 +55,7 @@ func @broadcast_0d(%a: f32) {
return
}
func @bitcast_0d() {
func.func @bitcast_0d() {
%0 = arith.constant 42 : i32
%1 = arith.constant dense<0> : vector<i32>
%2 = vector.insertelement %0, %1[] : vector<i32>
@ -67,7 +67,7 @@ func @bitcast_0d() {
return
}
func @constant_mask_0d() {
func.func @constant_mask_0d() {
%1 = vector.constant_mask [0] : vector<i1>
// CHECK: ( 0 )
vector.print %1: vector<i1>
@ -77,7 +77,7 @@ func @constant_mask_0d() {
return
}
func @arith_cmpi_0d(%smaller : vector<i32>, %bigger : vector<i32>) {
func.func @arith_cmpi_0d(%smaller : vector<i32>, %bigger : vector<i32>) {
%0 = arith.cmpi ult, %smaller, %bigger : vector<i32>
// CHECK: ( 1 )
vector.print %0: vector<i1>
@ -93,7 +93,7 @@ func @arith_cmpi_0d(%smaller : vector<i32>, %bigger : vector<i32>) {
return
}
func @create_mask_0d(%zero : index, %one : index) {
func.func @create_mask_0d(%zero : index, %one : index) {
%zero_mask = vector.create_mask %zero : vector<i1>
// CHECK: ( 0 )
vector.print %zero_mask : vector<i1>
@ -105,7 +105,7 @@ func @create_mask_0d(%zero : index, %one : index) {
return
}
func @entry() {
func.func @entry() {
%0 = arith.constant 42.0 : f32
%1 = arith.constant dense<0.0> : vector<f32>
%2 = call @insert_element_0d(%0, %1) : (f32, vector<f32>) -> (vector<f32>)

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
%i = arith.constant 2147483647: i32
%l = arith.constant 9223372036854775807 : i64

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @compress16(%base: memref<?xf32>,
func.func @compress16(%base: memref<?xf32>,
%mask: vector<16xi1>, %value: vector<16xf32>) {
%c0 = arith.constant 0: index
vector.compressstore %base[%c0], %mask, %value
@ -11,7 +11,7 @@ func @compress16(%base: memref<?xf32>,
return
}
func @compress16_at8(%base: memref<?xf32>,
func.func @compress16_at8(%base: memref<?xf32>,
%mask: vector<16xi1>, %value: vector<16xf32>) {
%c8 = arith.constant 8: index
vector.compressstore %base[%c8], %mask, %value
@ -19,7 +19,7 @@ func @compress16_at8(%base: memref<?xf32>,
return
}
func @printmem16(%A: memref<?xf32>) {
func.func @printmem16(%A: memref<?xf32>) {
%c0 = arith.constant 0: index
%c1 = arith.constant 1: index
%c16 = arith.constant 16: index
@ -36,7 +36,7 @@ func @printmem16(%A: memref<?xf32>) {
return
}
func @entry() {
func.func @entry() {
// Set up memory.
%c0 = arith.constant 0: index
%c1 = arith.constant 1: index

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
%0 = vector.constant_mask [4] : vector<8xi1>
vector.print %0 : vector<8xi1>
// CHECK: ( 1, 1, 1, 1, 0, 0, 0, 0 )

View File

@ -133,7 +133,7 @@
iterator_types = ["parallel", "parallel", "reduction"]
}
func @entry() {
func.func @entry() {
%f0 = arith.constant 0.0: f32
%f1 = arith.constant 1.0: f32
%f2 = arith.constant 2.0: f32

View File

@ -6,7 +6,7 @@
// NOTE: This is similar to test-create-mask.mlir, but with a different length,
// because the v4i1 vector specifically exposed bugs in the LLVM backend.
func @entry() {
func.func @entry() {
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
%cneg1 = arith.constant -1 : index
%c0 = arith.constant 0 : index
%c1 = arith.constant 1 : index

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @expand16(%base: memref<?xf32>,
func.func @expand16(%base: memref<?xf32>,
%mask: vector<16xi1>,
%pass_thru: vector<16xf32>) -> vector<16xf32> {
%c0 = arith.constant 0: index
@ -12,7 +12,7 @@ func @expand16(%base: memref<?xf32>,
return %e : vector<16xf32>
}
func @expand16_at8(%base: memref<?xf32>,
func.func @expand16_at8(%base: memref<?xf32>,
%mask: vector<16xi1>,
%pass_thru: vector<16xf32>) -> vector<16xf32> {
%c8 = arith.constant 8: index
@ -21,7 +21,7 @@ func @expand16_at8(%base: memref<?xf32>,
return %e : vector<16xf32>
}
func @entry() {
func.func @entry() {
// Set up memory.
%c0 = arith.constant 0: index
%c1 = arith.constant 1: index

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
%f0 = arith.constant 0.0: f32
%f1 = arith.constant 1.0: f32
%f2 = arith.constant 2.0: f32

View File

@ -4,7 +4,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
%f0 = arith.constant 0.0: f64
%f1 = arith.constant 1.0: f64
%f2 = arith.constant 2.0: f64

View File

@ -4,7 +4,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
%f0 = arith.constant 0.0: f64
%f1 = arith.constant 1.0: f64
%f2 = arith.constant 2.0: f64

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
%f1 = arith.constant 1.0: f32
%f3 = arith.constant 3.0: f32
%f7 = arith.constant 7.0: f32

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @gather8(%base: memref<?xf32>, %indices: vector<8xi32>,
func.func @gather8(%base: memref<?xf32>, %indices: vector<8xi32>,
%mask: vector<8xi1>, %pass_thru: vector<8xf32>) -> vector<8xf32> {
%c0 = arith.constant 0: index
%g = vector.gather %base[%c0][%indices], %mask, %pass_thru
@ -11,7 +11,7 @@ func @gather8(%base: memref<?xf32>, %indices: vector<8xi32>,
return %g : vector<8xf32>
}
func @entry() {
func.func @entry() {
// Set up memory.
%c0 = arith.constant 0: index
%c1 = arith.constant 1: index

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
%c0 = arith.constant dense<[0, 1, 2, 3]>: vector<4xindex>
%c1 = arith.constant dense<[0, 1]>: vector<2xindex>
%c2 = arith.constant 2 : index

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
%f1 = arith.constant 1.0: f32
%f2 = arith.constant 2.0: f32
%f3 = arith.constant 3.0: f32

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @maskedload16(%base: memref<?xf32>, %mask: vector<16xi1>,
func.func @maskedload16(%base: memref<?xf32>, %mask: vector<16xi1>,
%pass_thru: vector<16xf32>) -> vector<16xf32> {
%c0 = arith.constant 0: index
%ld = vector.maskedload %base[%c0], %mask, %pass_thru
@ -11,7 +11,7 @@ func @maskedload16(%base: memref<?xf32>, %mask: vector<16xi1>,
return %ld : vector<16xf32>
}
func @maskedload16_at8(%base: memref<?xf32>, %mask: vector<16xi1>,
func.func @maskedload16_at8(%base: memref<?xf32>, %mask: vector<16xi1>,
%pass_thru: vector<16xf32>) -> vector<16xf32> {
%c8 = arith.constant 8: index
%ld = vector.maskedload %base[%c8], %mask, %pass_thru
@ -19,7 +19,7 @@ func @maskedload16_at8(%base: memref<?xf32>, %mask: vector<16xi1>,
return %ld : vector<16xf32>
}
func @entry() {
func.func @entry() {
// Set up memory.
%c0 = arith.constant 0: index
%c1 = arith.constant 1: index

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @maskedstore16(%base: memref<?xf32>,
func.func @maskedstore16(%base: memref<?xf32>,
%mask: vector<16xi1>, %value: vector<16xf32>) {
%c0 = arith.constant 0: index
vector.maskedstore %base[%c0], %mask, %value
@ -11,7 +11,7 @@ func @maskedstore16(%base: memref<?xf32>,
return
}
func @maskedstore16_at8(%base: memref<?xf32>,
func.func @maskedstore16_at8(%base: memref<?xf32>,
%mask: vector<16xi1>, %value: vector<16xf32>) {
%c8 = arith.constant 8: index
vector.maskedstore %base[%c8], %mask, %value
@ -19,7 +19,7 @@ func @maskedstore16_at8(%base: memref<?xf32>,
return
}
func @printmem16(%A: memref<?xf32>) {
func.func @printmem16(%A: memref<?xf32>) {
%c0 = arith.constant 0: index
%c1 = arith.constant 1: index
%c16 = arith.constant 16: index
@ -36,7 +36,7 @@ func @printmem16(%A: memref<?xf32>) {
return
}
func @entry() {
func.func @entry() {
// Set up memory.
%f0 = arith.constant 0.0: f32
%c0 = arith.constant 0: index

View File

@ -4,7 +4,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
%f0 = arith.constant 0.0: f64
%f1 = arith.constant 1.0: f64
%f2 = arith.constant 2.0: f64

View File

@ -4,7 +4,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
%f0 = arith.constant 0.0: f64
%f1 = arith.constant 1.0: f64
%f2 = arith.constant 2.0: f64

View File

@ -13,7 +13,7 @@
!vector_type_R = type vector<7xf32>
func @vector_outerproduct_splat_8x8(%fa: f32, %fb: f32, %fc: f32) -> !vector_type_C {
func.func @vector_outerproduct_splat_8x8(%fa: f32, %fb: f32, %fc: f32) -> !vector_type_C {
%a = vector.splat %fa: !vector_type_A
%b = vector.splat %fb: !vector_type_B
%c = vector.splat %fc: !vector_type_C
@ -21,20 +21,20 @@ func @vector_outerproduct_splat_8x8(%fa: f32, %fb: f32, %fc: f32) -> !vector_typ
return %d: !vector_type_C
}
func @vector_outerproduct_vec_2x3(%x : !vector_type_X,
func.func @vector_outerproduct_vec_2x3(%x : !vector_type_X,
%y : !vector_type_Y) -> !vector_type_Z {
%o = vector.outerproduct %x, %y : !vector_type_X, !vector_type_Y
return %o: !vector_type_Z
}
func @vector_outerproduct_vec_2x3_acc(%x : !vector_type_X,
func.func @vector_outerproduct_vec_2x3_acc(%x : !vector_type_X,
%y : !vector_type_Y,
%z : !vector_type_Z) -> !vector_type_Z {
%o = vector.outerproduct %x, %y, %z : !vector_type_X, !vector_type_Y
return %o: !vector_type_Z
}
func @entry() {
func.func @entry() {
%f0 = arith.constant 0.0: f32
%f1 = arith.constant 1.0: f32
%f2 = arith.constant 2.0: f32

View File

@ -13,7 +13,7 @@
!vector_type_R = type vector<7xi64>
func @vector_outerproduct_splat_8x8(%ia: i64, %ib: i64, %ic: i64) -> !vector_type_C {
func.func @vector_outerproduct_splat_8x8(%ia: i64, %ib: i64, %ic: i64) -> !vector_type_C {
%a = vector.splat %ia: !vector_type_A
%b = vector.splat %ib: !vector_type_B
%c = vector.splat %ic: !vector_type_C
@ -21,20 +21,20 @@ func @vector_outerproduct_splat_8x8(%ia: i64, %ib: i64, %ic: i64) -> !vector_typ
return %d: !vector_type_C
}
func @vector_outerproduct_vec_2x3(%x : !vector_type_X,
func.func @vector_outerproduct_vec_2x3(%x : !vector_type_X,
%y : !vector_type_Y) -> !vector_type_Z {
%o = vector.outerproduct %x, %y : !vector_type_X, !vector_type_Y
return %o: !vector_type_Z
}
func @vector_outerproduct_vec_2x3_acc(%x : !vector_type_X,
func.func @vector_outerproduct_vec_2x3_acc(%x : !vector_type_X,
%y : !vector_type_Y,
%z : !vector_type_Z) -> !vector_type_Z {
%o = vector.outerproduct %x, %y, %z : !vector_type_X, !vector_type_Y
return %o: !vector_type_Z
}
func @entry() {
func.func @entry() {
%i0 = arith.constant 0: i64
%i1 = arith.constant 1: i64
%i2 = arith.constant 2: i64

View File

@ -6,7 +6,7 @@
//
// Test various signless, signed, unsigned integer types.
//
func @entry() {
func.func @entry() {
%0 = arith.constant dense<[true, false, -1, 0, 1]> : vector<5xi1>
vector.print %0 : vector<5xi1>
// CHECK: ( 1, 0, 1, 0, 1 )

View File

@ -5,7 +5,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
// Construct test vector, numerically very stable.
%f1 = arith.constant 1.0: f32
%f2 = arith.constant 2.0: f32

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
// Construct test vector.
%f1 = arith.constant 1.5: f32
%f2 = arith.constant 2.0: f32

View File

@ -5,7 +5,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
// Construct test vector, numerically very stable.
%f1 = arith.constant 1.0: f64
%f2 = arith.constant 2.0: f64

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
// Construct test vector.
%f1 = arith.constant 1.5: f64
%f2 = arith.constant 2.0: f64

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
// Construct test vector.
%i1 = arith.constant 1: i32
%i2 = arith.constant 2: i32

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
%v = arith.constant dense<[-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]> : vector<24xi4>
vector.print %v : vector<24xi4>
//

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
// Construct test vector.
%i1 = arith.constant 1: i64
%i2 = arith.constant 2: i64

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
%v0 = arith.constant dense<[-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7]> : vector<16xi4>
%v = vector.bitcast %v0 : vector<16xi4> to vector<16xsi4>
vector.print %v : vector<16xsi4>

View File

@ -3,7 +3,7 @@
// RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \
// RUN: FileCheck %s
func @entry() {
func.func @entry() {
%v0 = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]> : vector<16xi4>
%v = vector.bitcast %v0 : vector<16xi4> to vector<16xui4>
vector.print %v : vector<16xui4>

Some files were not shown because too many files have changed in this diff Show More