[mlir] Don't print `// no predecessors` on entry blocks

Entry blocks can never have predecessors, so this is unnecessary.

Fixes #53287

Differential Revision: https://reviews.llvm.org/D117713
This commit is contained in:
River Riddle 2022-01-19 12:18:30 -08:00
parent fabbe8d5fd
commit d75c3e8396
45 changed files with 180 additions and 179 deletions

View File

@ -67,7 +67,7 @@ def BufferDeallocation : Pass<"buffer-deallocation", "FuncOp"> {
args_out = 1 : i64,
indexing_maps = [#map0, #map0],
iterator_types = ["parallel"]} %arg1, %1 {
^bb0(%arg3: f32, %arg4: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32):
%4 = exp %arg3 : f32
linalg.yield %4 : f32
}: memref<2xf32>, memref<2xf32>

View File

@ -104,7 +104,7 @@ def SCFForToWhileLoop
%1 = arith.cmpi slt, %i, %arg1 : index
scf.condition(%1) %i : index
} do {
^bb0(%i: index): // no predecessors
^bb0(%i: index):
%1 = arith.addi %i, %c1 : index
%2 = arith.addi %arg2, %arg2 : i32
memref.store %2, %arg0[%i] : memref<?xi32>

View File

@ -554,7 +554,7 @@ struct StaticTensorGenerate : public OpRewritePattern<GenerateOp> {
/// Canonicalizes the pattern of the form
///
/// %tensor = tensor.generate %x {
/// ^bb0(%arg0: index): // no predecessors
/// ^bb0(%arg0: index):
/// <computation>
/// yield %1 : index
/// } : tensor<?xindex>

View File

@ -2653,7 +2653,8 @@ void OperationPrinter::print(Block *block, bool printBlockArgs,
if (!block->getParent()) {
os << " // block is not in a region!";
} else if (block->hasNoPredecessors()) {
os << " // no predecessors";
if (!block->isEntryBlock())
os << " // no predecessors";
} else if (auto *pred = block->getSinglePredecessor()) {
os << " // pred: ";
printBlockName(pred);

View File

@ -51,7 +51,7 @@ func @wsloop(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: inde
omp.parallel {
// CHECK: omp.wsloop (%[[ARG6:.*]], %[[ARG7:.*]]) : i64 = (%[[ARG0]], %[[ARG1]]) to (%[[ARG2]], %[[ARG3]]) step (%[[ARG4]], %[[ARG5]]) {
"omp.wsloop"(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5) ({
^bb0(%arg6: index, %arg7: index): // no predecessors
^bb0(%arg6: index, %arg7: index):
// CHECK-DAG: %[[CAST_ARG6:.*]] = builtin.unrealized_conversion_cast %[[ARG6]] : i64 to index
// CHECK-DAG: %[[CAST_ARG7:.*]] = builtin.unrealized_conversion_cast %[[ARG7]] : i64 to index
// CHECK: "test.payload"(%[[CAST_ARG6]], %[[CAST_ARG7]]) : (index, index) -> ()

View File

@ -473,7 +473,7 @@ func @while_values(%arg0: i32, %arg1: f32) {
scf.condition(%0) %2, %3 : i64, f64
} do {
// CHECK: ^[[AFTER]](%[[ARG4:.*]]: i64, %[[ARG5:.*]]: f64):
^bb0(%arg2: i64, %arg3: f64): // no predecessors
^bb0(%arg2: i64, %arg3: f64):
// CHECK: br ^[[BEFORE]](%{{.*}}, %{{.*}} : i32, f32)
scf.yield %c0_i32, %cst : i32, f32
}

View File

@ -419,7 +419,7 @@ func @depthwise_conv(%arg0 : tensor<1x7x5x3xf32>, %arg1 : tensor<3x1x3x11xf32>,
// CHECK: [[DEPTH:%.+]] = linalg.depthwise_conv_2d_nhwc_hwcm {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%arg0, %arg1 : tensor<1x7x5x3xf32>, tensor<3x1x3x11xf32>) outs([[FILL]] : tensor<1x5x5x3x11xf32>)
// CHECK: [[COLLAPSED:%.+]] = "tosa.reshape"([[DEPTH]]) {new_shape = [1, 5, 5, 33]}
// CHECK: [[BIAS:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, [[COLLAPSED]] : tensor<33xf32>, tensor<1x5x5x33xf32>) outs([[OUT]] : tensor<1x5x5x33xf32>) {
// CHECK: ^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
// CHECK: ^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
// CHECK: [[ADD:%.+]] = arith.addf %arg3, %arg4 : f32
// CHECK: linalg.yield [[ADD]] : f32
// CHECK: } -> tensor<1x5x5x33xf32>
@ -443,7 +443,7 @@ func @depthwise_conv_dyn(%arg0 : tensor<?x7x5x3xf32>, %arg1 : tensor<3x1x3x11xf3
// CHECK: %[[DEPTH:.+]] = linalg.depthwise_conv_2d_nhwc_hwcm {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%arg0, %arg1 : tensor<?x7x5x3xf32>, tensor<3x1x3x11xf32>) outs(%[[FILL]] : tensor<?x5x5x3x11xf32>)
// CHECK: %[[COLLAPSED:.+]] = "tosa.reshape"(%[[DEPTH]]) {new_shape = [-1, 5, 5, 33]}
// CHECK: %[[BIAS:.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, %[[COLLAPSED]] : tensor<33xf32>, tensor<?x5x5x33xf32>) outs(%[[OUT]] : tensor<?x5x5x33xf32>) {
// CHECK: ^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
// CHECK: ^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
// CHECK: %[[ADD:.+]] = arith.addf %arg3, %arg4 : f32
// CHECK: linalg.yield %[[ADD]] : f32
// CHECK: } -> tensor<?x5x5x33xf32>
@ -465,7 +465,7 @@ func @depthwise_conv_strides(%arg0 : tensor<1x11x9x3xf32>, %arg1 : tensor<3x1x3x
// CHECK: [[DEPTH:%.+]] = linalg.depthwise_conv_2d_nhwc_hwcm {dilations = dense<1> : tensor<2xi64>, strides = dense<2> : tensor<2xi64>} ins(%arg0, %arg1 : tensor<1x11x9x3xf32>, tensor<3x1x3x11xf32>) outs([[FILL]] : tensor<1x5x5x3x11xf32>)
// CHECK: [[COLLAPSED:%.+]] = "tosa.reshape"([[DEPTH]]) {new_shape = [1, 5, 5, 33]}
// CHECK: [[BIAS:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, [[COLLAPSED]] : tensor<33xf32>, tensor<1x5x5x33xf32>) outs([[OUT]] : tensor<1x5x5x33xf32>) {
// CHECK: ^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
// CHECK: ^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
// CHECK: [[ADD:%.+]] = arith.addf %arg3, %arg4 : f32
// CHECK: linalg.yield [[ADD]] : f32
// CHECK: } -> tensor<1x5x5x33xf32>
@ -493,7 +493,7 @@ func @depthwise_conv_quant(%arg0 : tensor<1x12x12x4xi8>, %arg1 : tensor<3x3x4x12
// CHECK: [[DEPTH:%.+]] = linalg.depthwise_conv_2d_nhwc_hwcm_q {dilations = dense<1> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins([[PAD]], %arg1, [[C128]], [[C42]] : tensor<1x14x14x4xi8>, tensor<3x3x4x128xi8>, i32, i32) outs([[FILL]] : tensor<1x12x12x4x128xi32>)
// CHECK: [[COLLAPSED:%.+]] = "tosa.reshape"([[DEPTH]]) {new_shape = [1, 12, 12, 512]}
// CHECK: [[BIAS:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, [[COLLAPSED]] : tensor<512xi32>, tensor<1x12x12x512xi32>) outs([[OUT]] : tensor<1x12x12x512xi32>) {
// CHECK: ^bb0(%arg3: i32, %arg4: i32, %arg5: i32): // no predecessors
// CHECK: ^bb0(%arg3: i32, %arg4: i32, %arg5: i32):
// CHECK: [[ADD:%.+]] = arith.addi %arg3, %arg4 : i32
// CHECK: linalg.yield [[ADD]] : i32
// CHECK: } -> tensor<1x12x12x512xi32>
@ -517,7 +517,7 @@ func @depthwise_conv_quant_dilations(%arg0 : tensor<1x14x14x4xi8>, %arg1 : tenso
// CHECK: [[DEPTH:%.+]] = linalg.depthwise_conv_2d_nhwc_hwcm_q {dilations = dense<2> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} ins(%arg0, %arg1, [[C128]], [[C42]] : tensor<1x14x14x4xi8>, tensor<3x3x4x128xi8>, i32, i32) outs([[FILL]] : tensor<1x10x10x4x128xi32>)
// CHECK: [[COLLAPSED:%.+]] = "tosa.reshape"([[DEPTH]]) {new_shape = [1, 10, 10, 512]}
// CHECK: [[BIAS:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP1]], #[[$MAP1]]], iterator_types = ["parallel", "parallel", "parallel", "parallel"]} ins(%arg2, [[COLLAPSED]] : tensor<512xi32>, tensor<1x10x10x512xi32>) outs([[OUT]] : tensor<1x10x10x512xi32>) {
// CHECK: ^bb0(%arg3: i32, %arg4: i32, %arg5: i32): // no predecessors
// CHECK: ^bb0(%arg3: i32, %arg4: i32, %arg5: i32):
// CHECK: [[ADD:%.+]] = arith.addi %arg3, %arg4 : i32
// CHECK: linalg.yield [[ADD]] : i32
// CHECK: } -> tensor<1x10x10x512xi32>

View File

@ -1159,7 +1159,7 @@ func @pad_float(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
// CHECK-DAG: [[INDEX4:%.+]] = arith.constant 4 : index
// CHECK-DAG: [[CST:%.+]] = arith.constant 0.000000e+00 : f32
// CHECK: linalg.pad_tensor %arg0 low{{\[}}%{{.*}}, [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] {
// CHECK: ^bb0(%arg1: index, %arg2: index): // no predecessors
// CHECK: ^bb0(%arg1: index, %arg2: index):
// CHECK: linalg.yield [[CST]]
// CHECK: } : tensor<1x2xf32> to tensor<4x9xf32>
%1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor<4x9xf32>)
@ -1195,7 +1195,7 @@ func @pad_float_explicit(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
// CHECK-DAG: [[INDEX4:%.+]] = arith.constant 4 : index
// CHECK-DAG: [[CST:%.+]] = arith.constant 4.200000e+01 : f32
// CHECK: linalg.pad_tensor %arg0 low{{\[}}%{{.*}}, [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] {
// CHECK: ^bb0(%arg1: index, %arg2: index): // no predecessors
// CHECK: ^bb0(%arg1: index, %arg2: index):
// CHECK: linalg.yield [[CST]]
// CHECK: } : tensor<1x2xf32> to tensor<4x9xf32>
%1 = arith.constant dense<42.0> : tensor<f32>

View File

@ -50,7 +50,7 @@ func @affine.yield() {
// CHECK-NEXT: }
//
// GENERIC: "affine.for"() ({
// GENERIC-NEXT: ^bb0(%{{.*}}: index): // no predecessors
// GENERIC-NEXT: ^bb0(%{{.*}}: index):
// GENERIC-NEXT: "affine.yield"() : () -> ()
// GENERIC-NEXT: }) {lower_bound = #map0, step = 1 : index, upper_bound = #map1} : () -> ()
affine.for %i = 0 to 10 {

View File

@ -278,7 +278,7 @@ func @pad_tensor_dynamic_shape(%arg0: tensor<4x?x2x?xf32>, %arg1: index) -> tens
%c0 = arith.constant 0 : index
%cst = arith.constant 0.0 : f32
%out = linalg.pad_tensor %arg0 low[%c0, %c0, %arg1, %c0] high[%c0, %c0, %c0, %arg1] {
^bb0(%gen_arg1: index, %gen_arg2: index, %gen_arg3: index, %gen_arg4: index): // no predecessors
^bb0(%gen_arg1: index, %gen_arg2: index, %gen_arg3: index, %gen_arg4: index):
linalg.yield %cst : f32
} : tensor<4x?x2x?xf32> to tensor<4x?x?x?xf32>
return %out : tensor<4x?x?x?xf32>

View File

@ -297,7 +297,7 @@ func @dead_linalg_tensor(%arg0 : tensor<7x7xi32>, %arg1 : tensor<7x7xf32>,
linalg.yield %3 : i32
} -> tensor<7x7xi32>
%3 = linalg.pad_tensor %arg2 low[%c0, %c0] high[%high, %high] {
^bb0(%arg9: index, %arg10: index): // no predecessors
^bb0(%arg9: index, %arg10: index):
linalg.yield %cst : f32
} : tensor<?x?xf32> to tensor<2x4xf32>
return
@ -354,7 +354,7 @@ func @pad_tensor_after_cast_different_shape(%arg0: tensor<?x64x?x?xf32>)
%cst = arith.constant 0.000000e+00 : f32
%dynamic = tensor.cast %arg0 : tensor<?x64x?x?xf32> to tensor<?x?x?x?xf32>
%padded = linalg.pad_tensor %dynamic low[0, 0, 1, 1] high[0, 0, 1, 1] {
^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index): // no predecessors
^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index):
linalg.yield %cst: f32
} : tensor<?x?x?x?xf32> to tensor<?x?x?x?xf32>
return %padded: tensor<?x?x?x?xf32>
@ -378,7 +378,7 @@ func @pad_tensor_after_cast_same_shape(%arg0: tensor<?x64x?x?xf32>, %padding : i
%cst = arith.constant 0.000000e+00 : f32
%dynamic = tensor.cast %arg0 : tensor<?x64x?x?xf32> to tensor<?x?x?x?xf32>
%padded = linalg.pad_tensor %dynamic low[0, %padding, 1, 1] high[0, %padding, 1, 1] {
^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index): // no predecessors
^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index):
linalg.yield %cst: f32
} : tensor<?x?x?x?xf32> to tensor<?x?x?x?xf32>
return %padded: tensor<?x?x?x?xf32>
@ -395,7 +395,7 @@ func @pad_tensor_of_cast(%t: tensor<8x?xf32>, %s: index) -> tensor<8x32xf32> {
%cst = arith.constant 0.000000e+00 : f32
%0 = tensor.cast %t : tensor<8x?xf32> to tensor<?x?xf32>
%1 = linalg.pad_tensor %0 low[%c0, %c0] high[%c0, %s] {
^bb0(%arg9: index, %arg10: index): // no predecessors
^bb0(%arg9: index, %arg10: index):
linalg.yield %cst : f32
} : tensor<?x?xf32> to tensor<8x32xf32>
return %1 : tensor<8x32xf32>
@ -584,7 +584,7 @@ func @tensor_pad_cast_fold(%arg0: tensor<4x4xf32>) -> tensor<4x4xf32> {
%cst = arith.constant 0.0 : f32
%0 = tensor.cast %arg0 : tensor<4x4xf32> to tensor<?x?xf32>
%1 = linalg.pad_tensor %0 low[%c0, %c0] high[%c0, %c0] {
^bb0(%arg1: index, %arg2: index): // no predecessors
^bb0(%arg1: index, %arg2: index):
linalg.yield %cst : f32
} : tensor<?x?xf32> to tensor<4x4xf32>
return %1 : tensor<4x4xf32>
@ -603,7 +603,7 @@ func @fold_pad_tensor_source_cast(%arg0: tensor<4x?xf32>) -> tensor<4x4xf32> {
%cst = arith.constant 0.0 : f32
%0 = tensor.cast %arg0 : tensor<4x?xf32> to tensor<?x?xf32>
%1 = linalg.pad_tensor %0 low[0, 0] high[0, 1] {
^bb0(%arg1: index, %arg2: index): // no predecessors
^bb0(%arg1: index, %arg2: index):
linalg.yield %cst : f32
} : tensor<?x?xf32> to tensor<4x4xf32>
return %1 : tensor<4x4xf32>

View File

@ -7,7 +7,7 @@ func @detensor_simple(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32> att
%1 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []}
ins(%arg1, %arg2 : tensor<f32>, tensor<f32>)
outs(%0 : tensor<f32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%2 = arith.addf %arg3, %arg4 : f32
linalg.yield %2 : f32
} -> tensor<f32>
@ -26,7 +26,7 @@ func @detensor_op_sequence(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32
%1 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []}
ins(%arg1, %arg2 : tensor<f32>, tensor<f32>)
outs(%0 : tensor<f32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%2 = arith.addf %arg3, %arg4 : f32
linalg.yield %2 : f32
} -> tensor<f32>
@ -35,7 +35,7 @@ func @detensor_op_sequence(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32
%4 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []}
ins(%arg1, %1 : tensor<f32>, tensor<f32>)
outs(%3 : tensor<f32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%5 = arith.mulf %arg3, %arg4 : f32
linalg.yield %5 : f32
} -> tensor<f32>
@ -44,7 +44,7 @@ func @detensor_op_sequence(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32
%7 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []}
ins(%1, %4 : tensor<f32>, tensor<f32>)
outs(%6 : tensor<f32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%5 = arith.divf %arg3, %arg4 : f32
linalg.yield %5 : f32
} -> tensor<f32>
@ -66,7 +66,7 @@ func @detensor_multiple_ops(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f3
%1 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []}
ins(%arg1, %arg2 : tensor<f32>, tensor<f32>)
outs(%0 : tensor<f32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%2 = arith.addf %arg3, %arg4 : f32
%3 = arith.mulf %2, %arg4 : f32
linalg.yield %3 : f32
@ -87,7 +87,7 @@ func @detensor_foreign_op(%arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32>
%1 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = []}
ins(%arg1, %arg2 : tensor<f32>, tensor<f32>)
outs(%0 : tensor<f32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%2 = "foreign.do_something"(%arg3, %arg4) {} : (f32, f32) -> f32
linalg.yield %2 : f32
} -> tensor<f32>

View File

@ -11,7 +11,7 @@ func @if_true_test(%arg0: i1, %arg1: i32) -> tensor<i32> attributes {} {
{indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []}
ins(%arg0_t : tensor<i1>)
outs(%2 : tensor<i8>) {
^bb0(%arg2: i1, %arg3: i8): // no predecessors
^bb0(%arg2: i1, %arg3: i8):
%10 = arith.extui %arg2 : i1 to i8
linalg.yield %10 : i8
} -> tensor<i8>
@ -24,7 +24,7 @@ func @if_true_test(%arg0: i1, %arg1: i32) -> tensor<i32> attributes {} {
{indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>, affine_map<() -> ()>], iterator_types = []}
ins(%arg1_t, %cst : tensor<i32>, tensor<i32>)
outs(%6 : tensor<i32>) {
^bb0(%arg2: i32, %arg3: i32, %arg4: i32): // no predecessors
^bb0(%arg2: i32, %arg3: i32, %arg4: i32):
%10 = arith.addi %arg2, %arg3 : i32
linalg.yield %10 : i32
} -> tensor<i32>

View File

@ -19,7 +19,7 @@ func @main() -> (tensor<i32>) attributes {} {
%4 = linalg.generic #attrs
ins(%2, %1 : tensor<i32>, tensor<i32>)
outs(%3 : tensor<i1>) {
^bb0(%arg0: i32, %arg1: i32, %arg2: i1): // no predecessors
^bb0(%arg0: i32, %arg1: i32, %arg2: i1):
%8 = arith.cmpi slt, %arg0, %arg1 : i32
linalg.yield %8 : i1
} -> tensor<i1>
@ -31,7 +31,7 @@ func @main() -> (tensor<i32>) attributes {} {
%8 = linalg.generic #attrs
ins(%6, %6 : tensor<i32>, tensor<i32>)
outs(%7 : tensor<i32>) {
^bb0(%arg0: i32, %arg1: i32, %arg2: i32): // no predecessors
^bb0(%arg0: i32, %arg1: i32, %arg2: i32):
%9 = arith.addi %arg0, %arg1 : i32
linalg.yield %9 : i32
} -> tensor<i32>
@ -80,7 +80,7 @@ func @main() -> (tensor<i32>) attributes {} {
%4 = linalg.generic #attrs
ins(%2, %1 : tensor<i32>, tensor<i32>)
outs(%3 : tensor<i1>) {
^bb0(%arg0: i32, %arg1: i32, %arg2: i1): // no predecessors
^bb0(%arg0: i32, %arg1: i32, %arg2: i1):
%8 = arith.cmpi slt, %arg0, %arg1 : i32
linalg.yield %8 : i1
} -> tensor<i1>
@ -92,7 +92,7 @@ func @main() -> (tensor<i32>) attributes {} {
%8 = linalg.generic #attrs
ins(%6, %6 : tensor<i32>, tensor<i32>)
outs(%7 : tensor<i32>) {
^bb0(%arg0: i32, %arg1: i32, %arg2: i32): // no predecessors
^bb0(%arg0: i32, %arg1: i32, %arg2: i32):
%9 = arith.addi %arg0, %arg1 : i32
linalg.yield %9 : i32
} -> tensor<i32>
@ -143,7 +143,7 @@ func @main() -> (tensor<i32>) attributes {} {
%4 = linalg.generic #attrs
ins(%2, %1 : tensor<i32>, tensor<i32>)
outs(%3 : tensor<i1>) {
^bb0(%arg0: i32, %arg1: i32, %arg2: i1): // no predecessors
^bb0(%arg0: i32, %arg1: i32, %arg2: i1):
%8 = arith.cmpi slt, %arg0, %arg1 : i32
linalg.yield %8 : i1
} -> tensor<i1>
@ -160,7 +160,7 @@ func @main() -> (tensor<i32>) attributes {} {
%8 = linalg.generic #attrs
ins(%6, %12 : tensor<i32>, tensor<i32>)
outs(%7 : tensor<i32>) {
^bb0(%arg0: i32, %arg1: i32, %arg2: i32): // no predecessors
^bb0(%arg0: i32, %arg1: i32, %arg2: i32):
%9 = arith.addi %arg0, %arg1 : i32
linalg.yield %9 : i32
} -> tensor<i32>

View File

@ -16,7 +16,7 @@ func @main(%farg0: tensor<i32>, %farg1: tensor<i32>) -> tensor<i32> attributes {
%2 = linalg.generic #attrs
ins(%0, %farg1 : tensor<i32>, tensor<i32>)
outs(%1 : tensor<i1>) {
^bb0(%arg0: i32, %arg1: i32, %arg2: i1): // no predecessors
^bb0(%arg0: i32, %arg1: i32, %arg2: i1):
%8 = arith.cmpi slt, %arg0, %arg1 : i32
linalg.yield %8 : i1
} -> tensor<i1>
@ -28,7 +28,7 @@ func @main(%farg0: tensor<i32>, %farg1: tensor<i32>) -> tensor<i32> attributes {
%6 = linalg.generic #attrs
ins(%4, %4 : tensor<i32>, tensor<i32>)
outs(%5 : tensor<i32>) {
^bb0(%arg0: i32, %arg1: i32, %arg2: i32): // no predecessors
^bb0(%arg0: i32, %arg1: i32, %arg2: i32):
%8 = arith.addi %arg0, %arg1 : i32
linalg.yield %8 : i32
} -> tensor<i32>

View File

@ -38,7 +38,7 @@ func @main(%farg0: tensor<10xi32>, %farg1: tensor<i32>) -> tensor<i32> attribute
%4 = linalg.generic #attrs
ins(%2, %farg1 : tensor<i32>, tensor<i32>)
outs(%3 : tensor<i1>) {
^bb0(%arg0: i32, %arg1: i32, %arg2: i1): // no predecessors
^bb0(%arg0: i32, %arg1: i32, %arg2: i1):
%8 = arith.cmpi slt, %arg0, %arg1 : i32
linalg.yield %8 : i1
} -> tensor<i1>
@ -68,7 +68,7 @@ func @main(%farg0: tensor<10xi32>, %farg1: tensor<i32>) -> tensor<i32> attribute
// DET-ALL: ^[[bb1]](%{{.*}}: tensor<10xi32>)
// DET-ALL: linalg.init_tensor [] : tensor<i32>
// DET-ALL: linalg.generic {{{.*}}} ins(%{{.*}} : tensor<10xi32>) outs(%{{.*}} : tensor<i32>) {
// DET-ALL: ^bb0(%{{.*}}: i32, %{{.*}}: i32): // no predecessors
// DET-ALL: ^bb0(%{{.*}}: i32, %{{.*}}: i32):
// DET-ALL: %{{.*}} = arith.addi %{{.*}}, %{{.*}}
// DET-ALL: linalg.yield %{{.*}} : i32
// DET-ALL: } -> tensor<i32>

View File

@ -21,7 +21,7 @@ func @main() -> () attributes {} {
%4 = linalg.generic #attrs
ins(%2, %reshaped1 : tensor<i32>, tensor<i32>)
outs(%3 : tensor<i1>) {
^bb0(%arg0: i32, %arg1: i32, %arg2: i1): // no predecessors
^bb0(%arg0: i32, %arg1: i32, %arg2: i1):
%8 = arith.cmpi slt, %arg0, %arg1 : i32
linalg.yield %8 : i1
} -> tensor<i1>
@ -33,7 +33,7 @@ func @main() -> () attributes {} {
%8 = linalg.generic #attrs
ins(%6, %6 : tensor<i32>, tensor<i32>)
outs(%7 : tensor<i32>) {
^bb0(%arg0: i32, %arg1: i32, %arg2: i32): // no predecessors
^bb0(%arg0: i32, %arg1: i32, %arg2: i32):
%9 = arith.addi %arg0, %arg1 : i32
linalg.yield %9 : i32
} -> tensor<i32>

View File

@ -156,7 +156,7 @@ func @leading_dim_1_canonicalization(%arg0: tensor<1x5xf32>, %shape: tensor<5xf3
%0 = linalg.generic #trait
ins(%arg0 : tensor<1x5xf32>)
outs(%shape : tensor<5xf32>) {
^bb0(%arg2: f32, %arg3: f32): // no predecessors
^bb0(%arg2: f32, %arg3: f32):
linalg.yield %arg2 : f32
} -> tensor<5xf32>
return %0 : tensor<5xf32>
@ -250,7 +250,7 @@ func @fold_unit_dim_tensor_reshape_op(%arg0 : tensor<5xf32>) -> tensor<2x5xf32>
%2 = linalg.generic {i64, indexing_maps = [#map1, #map0],
iterator_types = ["parallel", "parallel", "parallel"]}
ins(%arg0 : tensor<5xf32>) outs(%1 : tensor<1x2x5xf32>) {
^bb0(%arg1: f32, %arg2: f32): // no predecessors
^bb0(%arg1: f32, %arg2: f32):
linalg.yield %arg1 : f32
} -> tensor<1x2x5xf32>
%3 = tensor.collapse_shape %2 [[0, 1], [2]]
@ -338,7 +338,7 @@ func @unit_dim_for_reduction(%arg0: tensor<1x?x1x?xf32>) -> tensor<1x?xf32> {
iterator_types = ["parallel", "parallel", "reduction", "reduction"]}
ins(%arg0 : tensor<1x?x1x?xf32>)
outs(%2 : tensor<1x?xf32>) {
^bb0(%arg1: f32, %arg2: f32): // no predecessors
^bb0(%arg1: f32, %arg2: f32):
%4 = arith.addf %arg1, %arg2 : f32
linalg.yield %4 : f32
} -> tensor<1x?xf32>
@ -372,7 +372,7 @@ func @unit_dim_for_both_reduction(%arg0: tensor<1x?x1x1xf32>) -> tensor<1x1xf32>
iterator_types = ["parallel", "parallel", "reduction", "reduction"]}
ins(%arg0 : tensor<1x?x1x1xf32>)
outs(%2 : tensor<1x1xf32>) {
^bb0(%arg1: f32, %arg2: f32): // no predecessors
^bb0(%arg1: f32, %arg2: f32):
%4 = arith.addf %arg1, %arg2 : f32
linalg.yield %4 : f32
} -> tensor<1x1xf32>
@ -406,7 +406,7 @@ func @unit_dim_for_reduction_inner(%arg0: tensor<?x1x?x1xf32>) -> tensor<?x1xf32
iterator_types = ["parallel", "parallel", "reduction", "reduction"]}
ins(%arg0 : tensor<?x1x?x1xf32>)
outs(%2 : tensor<?x1xf32>) {
^bb0(%arg1: f32, %arg2: f32): // no predecessors
^bb0(%arg1: f32, %arg2: f32):
%4 = arith.addf %arg1, %arg2 : f32
linalg.yield %4 : f32
} -> tensor<?x1xf32>
@ -608,7 +608,7 @@ func @leading_dim_1_canonicalization(%arg0: memref<1x5xf32>, %shape: memref<5xf3
linalg.generic #trait
ins(%arg0 : memref<1x5xf32>)
outs(%shape : memref<5xf32>) {
^bb0(%arg2: f32, %arg3: f32): // no predecessors
^bb0(%arg2: f32, %arg3: f32):
linalg.yield %arg2 : f32
}
return %shape : memref<5xf32>
@ -702,7 +702,7 @@ func @fold_unit_dim_memref_reshape_op(%arg0 : memref<5xf32>) -> memref<2x5xf32>
linalg.generic {i64, indexing_maps = [#map1, #map0],
iterator_types = ["parallel", "parallel", "parallel"]}
ins(%arg0 : memref<5xf32>) outs(%1 : memref<1x2x5xf32>) {
^bb0(%arg1: f32, %arg2: f32): // no predecessors
^bb0(%arg1: f32, %arg2: f32):
linalg.yield %arg1 : f32
}
%3 = memref.collapse_shape %1 [[0, 1], [2]]
@ -792,7 +792,7 @@ func @input_stays_same(%arg0 : memref<?x1x?xf32, #map0>, %arg1 : f32, %shape: me
// CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel"]}
// CHECK-SAME: ins(%[[ARG0]], %[[ARG1]] : memref<?x1x?xf32, #[[MAP0]]>, f32)
// CHECK-SAME: outs(%[[OUT]] : memref<?x?x?xf32>) {
// CHECK: ^bb0(%{{.*}}: f32, %[[ARG:.*]]: f32, %{{.*}}: f32): // no predecessors
// CHECK: ^bb0(%{{.*}}: f32, %[[ARG:.*]]: f32, %{{.*}}: f32):
// CHECK: linalg.yield %[[ARG]] : f32
// CHECK: }
// CHECK: return %[[ARG2]] : memref<?x1x?x1x?xf32>

View File

@ -97,7 +97,7 @@ func @leading_dim_1_canonicalization(%arg0: tensor<1x5xf32>, %shape: tensor<5xf3
%0 = linalg.generic #trait
ins(%arg0 : tensor<1x5xf32>)
outs(%shape : tensor<5xf32>) {
^bb0(%arg2: f32, %arg3: f32): // no predecessors
^bb0(%arg2: f32, %arg3: f32):
linalg.yield %arg2 : f32
} -> tensor<5xf32>
return %0 : tensor<5xf32>

View File

@ -14,7 +14,7 @@ func @add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : te
%3 = linalg.generic {indexing_maps = [#map0, #map0, #map0], iterator_types = ["parallel", "parallel"]}
ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)
outs(%2 : tensor<?x?xf32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%4 = arith.addf %arg3, %arg4 : f32
linalg.yield %4 : f32
} -> tensor<?x?xf32>
@ -27,7 +27,7 @@ func @add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, %arg2 : te
// CHECK-SAME: [[ARG0:%[a-zA-Z0-9_]*]]
// CHECK-SAME: [[ARG1:%[a-zA-Z0-9_]*]]
// CHECK-SAME: [[ARG2:%[a-zA-Z0-9_]*]]
^bb0(%arg5: f32, %arg6: f32, %arg7: f32): // no predecessors
^bb0(%arg5: f32, %arg6: f32, %arg7: f32):
// CHECK: [[T1:%[a-zA-Z0-9_]*]] = arith.addf [[ARG0]], [[ARG1]]
// CHECK-NOT: linalg.yield
// CHECK: arith.mulf [[T1]], [[ARG2]]
@ -56,7 +56,7 @@ func @scalar_add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : f32, %arg2 : f32) ->
%3 = linalg.generic {indexing_maps = [#map0, #map1, #map0], iterator_types = ["parallel", "parallel"]}
ins(%arg0, %arg1 : tensor<?x?xf32>, f32)
outs(%2 : tensor<?x?xf32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%4 = arith.addf %arg3, %arg4 : f32
linalg.yield %4 : f32
} -> tensor<?x?xf32>
@ -69,7 +69,7 @@ func @scalar_add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : f32, %arg2 : f32) ->
// CHECK-SAME: [[ARG3:%[a-zA-Z0-9_]*]]
// CHECK-SAME: [[ARG4:%[a-zA-Z0-9_]*]]
// CHECK-SAME: [[ARG5:%[a-zA-Z0-9_]*]]
^bb0(%arg5: f32, %arg6: f32, %arg7: f32): // no predecessors
^bb0(%arg5: f32, %arg6: f32, %arg7: f32):
// CHECK: [[T1:%[a-zA-Z0-9_]*]] = arith.addf [[ARG3]], [[ARG4]]
// CHECK-NOT: linalg.yield
// CHECK: arith.mulf [[T1]], [[ARG5]]
@ -98,7 +98,7 @@ func @transpose_add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
%3 = linalg.generic {indexing_maps = [#map0, #map1, #map0], iterator_types = ["parallel", "parallel"]}
ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)
outs(%2 : tensor<?x?xf32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%4 = arith.addf %arg3, %arg4 : f32
linalg.yield %4 : f32
} -> tensor<?x?xf32>
@ -107,7 +107,7 @@ func @transpose_add_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
%4 = linalg.generic {indexing_maps = [#map0, #map0, #map0], iterator_types = ["parallel", "parallel"]}
ins(%3, %arg2 : tensor<?x?xf32>, tensor<?x?xf32>)
outs(%2 : tensor<?x?xf32>) {
^bb0(%arg5: f32, %arg6: f32, %arg7: f32): // no predecessors
^bb0(%arg5: f32, %arg6: f32, %arg7: f32):
%5 = arith.mulf %arg5, %arg6 : f32
linalg.yield %5 : f32
} -> tensor<?x?xf32>
@ -132,7 +132,7 @@ func @add_transpose_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
%3 = linalg.generic {indexing_maps = [#map0, #map1, #map0], iterator_types = ["parallel", "parallel"]}
ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)
outs(%2 : tensor<?x?xf32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%4 = arith.addf %arg3, %arg4 : f32
linalg.yield %4 : f32
} -> tensor<?x?xf32>
@ -141,7 +141,7 @@ func @add_transpose_mul_fusion(%arg0: tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
%4 = linalg.generic {indexing_maps = [#map1, #map0, #map0], iterator_types = ["parallel", "parallel"]}
ins(%3, %arg2 : tensor<?x?xf32>, tensor<?x?xf32>)
outs(%2 : tensor<?x?xf32>){
^bb0(%arg5: f32, %arg6: f32, %arg7: f32): // no predecessors
^bb0(%arg5: f32, %arg6: f32, %arg7: f32):
%5 = arith.mulf %arg5, %arg6 : f32
linalg.yield %5 : f32
} -> tensor<?x?xf32>
@ -166,7 +166,7 @@ func @add_broadcast_mul_fusion(%arg0: tensor<?xf32>, %arg1 : tensor<?xf32>, %arg
%2 = linalg.generic {indexing_maps = [#map2, #map2, #map2], iterator_types = ["parallel"]}
ins(%arg0, %arg1 : tensor<?xf32>, tensor<?xf32>)
outs(%1 : tensor<?xf32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%3 = arith.addf %arg3, %arg4 : f32
linalg.yield %3 : f32
} -> tensor<?xf32>
@ -177,7 +177,7 @@ func @add_broadcast_mul_fusion(%arg0: tensor<?xf32>, %arg1 : tensor<?xf32>, %arg
%5 = linalg.generic {indexing_maps = [#map1, #map0, #map0], iterator_types = ["parallel", "parallel"]}
ins(%2, %arg2 : tensor<?xf32>, tensor<?x?xf32>)
outs(%4 : tensor<?x?xf32>){
^bb0(%arg5: f32, %arg6: f32, %arg7: f32): // no predecessors
^bb0(%arg5: f32, %arg6: f32, %arg7: f32):
%6 = arith.mulf %arg5, %arg6 : f32
linalg.yield %6 : f32
} -> tensor<?x?xf32>
@ -196,7 +196,7 @@ func @add_mul_scalar_fusion(%arg0: tensor<f32>, %arg1: tensor<f32>, %arg2: tenso
%1 = linalg.generic {indexing_maps = [#map0, #map0, #map0], iterator_types = []}
ins(%arg0, %arg1 : tensor<f32>, tensor<f32>)
outs(%0 : tensor<f32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%2 = arith.addf %arg3, %arg4 : f32
linalg.yield %2 : f32
} -> tensor<f32>
@ -206,7 +206,7 @@ func @add_mul_scalar_fusion(%arg0: tensor<f32>, %arg1: tensor<f32>, %arg2: tenso
%2 = linalg.generic {indexing_maps = [#map0, #map0, #map0], iterator_types = []}
ins(%1, %arg2 : tensor<f32>, tensor<f32>)
outs(%0 : tensor<f32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%3 = arith.mulf %arg3, %arg4 : f32
linalg.yield %3 : f32
} -> tensor<f32>
@ -292,7 +292,7 @@ func @producer_indexed_consumer_fusion(%arg0: tensor<?x?xi32>,
iterator_types = ["parallel", "parallel"] }
ins(%arg0, %arg1 : tensor<?x?xi32>, tensor<?x?xi32>)
outs(%2 : tensor<?x?xi32>) {
^bb0(%arg2: i32, %arg3: i32, %arg4: i32): // no predecessors
^bb0(%arg2: i32, %arg3: i32, %arg4: i32):
%10 = arith.addi %arg2, %arg3 : i32
linalg.yield %10 : i32
} -> tensor<?x?xi32>
@ -301,7 +301,7 @@ func @producer_indexed_consumer_fusion(%arg0: tensor<?x?xi32>,
iterator_types = ["parallel", "parallel"] }
ins(%3 : tensor<?x?xi32>)
outs(%2 : tensor<?x?xi32>) {
^bb0(%arg2: i32, %arg3: i32): // no predecessors
^bb0(%arg2: i32, %arg3: i32):
%idx0 = linalg.index 0 : index
%idx1 = linalg.index 1 : index
%5 = arith.index_cast %idx0 : index to i32
@ -343,7 +343,7 @@ func @indexed_producer_consumer_fusion(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32
iterator_types = ["parallel", "parallel"] }
ins(%arg0 : tensor<?x?xi32>)
outs(%2 : tensor<?x?xi32>) {
^bb0(%arg4: i32, %arg5: i32): // no predecessors
^bb0(%arg4: i32, %arg5: i32):
%idx0 = linalg.index 0 : index
%idx1 = linalg.index 1 : index
%4 = arith.index_cast %idx0 : index to i32
@ -357,7 +357,7 @@ func @indexed_producer_consumer_fusion(%arg0: tensor<?x?xi32>) -> tensor<?x?xi32
iterator_types = ["parallel", "parallel"] }
ins(%3, %arg0 : tensor<?x?xi32>, tensor<?x?xi32>)
outs(%2 : tensor<?x?xi32>) {
^bb0(%arg2: i32, %arg3: i32, %arg4: i32): // no predecessors
^bb0(%arg2: i32, %arg3: i32, %arg4: i32):
%10 = arith.addi %arg2, %arg3 : i32
linalg.yield %10 : i32
} -> tensor<?x?xi32>
@ -397,7 +397,7 @@ func @indexed_producer_indexed_consumer_fusion(%arg0: tensor<?x?xi32>)
iterator_types = ["parallel", "parallel"] }
ins(%arg0 : tensor<?x?xi32>)
outs(%2 : tensor<?x?xi32>) {
^bb0(%arg2: i32, %arg3: i32): // no predecessors
^bb0(%arg2: i32, %arg3: i32):
%idx0 = linalg.index 0 : index
%idx1 = linalg.index 1 : index
%4 = arith.index_cast %idx0 : index to i32
@ -411,7 +411,7 @@ func @indexed_producer_indexed_consumer_fusion(%arg0: tensor<?x?xi32>)
iterator_types = ["parallel", "parallel"] }
ins(%3 : tensor<?x?xi32>)
outs(%2 : tensor<?x?xi32>) {
^bb0(%arg2: i32, %arg3: i32): // no predecessors
^bb0(%arg2: i32, %arg3: i32):
%idx0 = linalg.index 0 : index
%idx1 = linalg.index 1 : index
%5 = arith.index_cast %idx0 : index to i32
@ -504,7 +504,7 @@ func @scalar_generic_fusion
{indexing_maps = [affine_map<() -> ()>, affine_map<() -> ()>],
iterator_types = []}
ins(%arg1 : tensor<i32>) outs(%0 : tensor<f32>) {
^bb0(%arg2: i32, %arg3: f32): // no predecessors
^bb0(%arg2: i32, %arg3: f32):
%3 = arith.index_cast %arg2 : i32 to index
%4 = tensor.extract %arg0[%3, %c0, %c0] : tensor<5x1x1xf32>
linalg.yield %4 : f32
@ -515,7 +515,7 @@ func @scalar_generic_fusion
affine_map<(d0) -> (d0)>],
iterator_types = ["parallel"]}
ins(%1, %cst : tensor<f32>, tensor<10xf32>) outs(%2 : tensor<10xf32>) {
^bb0(%arg2: f32, %arg3: f32, %arg4: f32): // no predecessors
^bb0(%arg2: f32, %arg3: f32, %arg4: f32):
%4 = arith.mulf %arg2, %arg3 : f32
linalg.yield %4 : f32
} -> tensor<10xf32>
@ -580,7 +580,7 @@ func @consumer_with_reduction(%arg0: tensor<1x10xf32>,
iterator_types = ["parallel", "parallel"]}
ins(%arg0, %arg1 : tensor<1x10xf32>, tensor<1x10xf32>)
outs(%init : tensor<1x10xf32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%2 = arith.addf %arg3, %arg4 : f32
linalg.yield %2 : f32
} -> tensor<1x10xf32>
@ -589,7 +589,7 @@ func @consumer_with_reduction(%arg0: tensor<1x10xf32>,
iterator_types = ["reduction"]}
ins(%0 : tensor<1x10xf32>)
outs(%arg2 : tensor<1xf32>) {
^bb0(%arg3: f32, %arg4: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32):
%2 = arith.addf %arg3, %arg4 : f32
linalg.yield %2 : f32
} -> tensor<1xf32>
@ -626,7 +626,7 @@ func @sigmoid_dynamic_dim(%0: tensor<?x1xf32>) -> tensor<?x1xf32> {
iterator_types = ["parallel", "parallel"]
}
outs(%init0 : tensor<?x1xf32>) {
^bb0(%a: f32): // no predecessors
^bb0(%a: f32):
linalg.yield %cp5 : f32
} -> tensor<?x1xf32>
%d0 = tensor.dim %0, %c0 : tensor<?x1xf32>
@ -639,7 +639,7 @@ func @sigmoid_dynamic_dim(%0: tensor<?x1xf32>) -> tensor<?x1xf32> {
}
ins(%0, %1 : tensor<?x1xf32>, tensor<?x1xf32>)
outs(%init1 : tensor<?x1xf32>) {
^bb0(%a: f32, %b: f32, %c: f32): // no predecessors
^bb0(%a: f32, %b: f32, %c: f32):
%m = arith.mulf %a, %b : f32
linalg.yield %m : f32
} -> tensor<?x1xf32>
@ -930,7 +930,7 @@ func @no_fusion_missing_reduction_shape(%arg0: tensor<f32>, %arg1: index) -> ten
indexing_maps = [#map0, #map1],
iterator_types = ["parallel", "parallel"]
} ins(%arg0 : tensor<f32>) outs(%4 : tensor<?x?xf32>) {
^bb0(%arg2: f32, %arg3: f32): // no predecessors
^bb0(%arg2: f32, %arg3: f32):
linalg.yield %arg2 : f32
} -> tensor<?x?xf32>
%6 = linalg.init_tensor [%arg1] : tensor<?xf32>
@ -939,7 +939,7 @@ func @no_fusion_missing_reduction_shape(%arg0: tensor<f32>, %arg1: index) -> ten
indexing_maps = [#map2, #map3],
iterator_types = ["parallel", "reduction"]
} ins(%5 : tensor<?x?xf32>) outs(%7 : tensor<?xf32>) {
^bb0(%arg2: f32, %arg3: f32): // no predecessors
^bb0(%arg2: f32, %arg3: f32):
%9 = arith.maxf %arg2, %arg3 : f32
linalg.yield %9 : f32
} -> tensor<?xf32>

View File

@ -13,7 +13,7 @@ func @fuse_indexed_consumer(%A: memref<?x?xf32>,
linalg.generic #pointwise_2d_trait
ins(%A, %B: memref<?x?xf32>, memref<?x?xf32>)
outs(%C : memref<?x?xf32>) {
^bb0(%e: f32, %arg5: f32, %arg6: f32): // no predecessors
^bb0(%e: f32, %arg5: f32, %arg6: f32):
%2 = arith.addf %e, %arg5 : f32
linalg.yield %2 : f32
}
@ -75,7 +75,7 @@ func @fuse_indexed_producer(%A: memref<?x?xindex>,
indexing_maps = [affine_map<(i, j) -> (j, i)>],
iterator_types = ["parallel", "parallel"]}
outs(%A : memref<?x?xindex>) {
^bb0(%a: index): // no predecessors
^bb0(%a: index):
%idx0 = linalg.index 0 : index
%idx1 = linalg.index 1 : index
%0 = arith.addi %idx0, %idx1 : index
@ -124,7 +124,7 @@ func @fuse_indexed_producer_tiled_second_dim_only(%A: memref<?x?xindex>,
indexing_maps = [affine_map<(i, j) -> (i, j)>],
iterator_types = ["parallel", "parallel"]}
outs(%A : memref<?x?xindex>) {
^bb0(%a: index): // no predecessors
^bb0(%a: index):
%idx0 = linalg.index 0 : index
%idx1 = linalg.index 1 : index
%0 = arith.addi %idx0, %idx1 : index

View File

@ -20,7 +20,7 @@ func @reshape(%A: tensor<?x16xf32>, %B: tensor<16xf32>, %init: tensor<?x112x16xf
iterator_types = ["parallel", "parallel", "parallel"]}
ins(%0, %B : tensor<?x112x16xf32>, tensor<16xf32>)
outs(%init : tensor<?x112x16xf32>) {
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors
^bb0(%arg1: f32, %arg2: f32, %arg3: f32):
%s = arith.subf %arg1, %arg2 : f32
linalg.yield %s : f32
} -> tensor<?x112x16xf32>
@ -56,7 +56,7 @@ func @reshape_multiple(%A: tensor<12544x16xf32>, %B: tensor<12544x16xf32>,
iterator_types = ["parallel", "parallel", "parallel"]}
ins(%0, %1, %C : tensor<112x112x16xf32>, tensor<112x112x16xf32>, tensor<16xf32>)
outs(%2 : tensor<112x112x16xf32>) {
^bb0(%arg1: f32, %arg2: f32, %arg3: f32, %arg4: f32): // no predecessors
^bb0(%arg1: f32, %arg2: f32, %arg3: f32, %arg4: f32):
%s = arith.subf %arg1, %arg2 : f32
%m = arith.mulf %s, %arg3 : f32
linalg.yield %m : f32
@ -82,7 +82,7 @@ func @reshape_negative(%A: tensor<12544x16xf32>, %B: tensor<112xf32>) -> tensor<
iterator_types = ["parallel", "parallel", "parallel"]}
ins(%20, %B : tensor<112x112x16xf32>, tensor<112xf32>)
outs(%21 : tensor<112x112x16xf32>) {
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors
^bb0(%arg1: f32, %arg2: f32, %arg3: f32):
%s = arith.subf %arg1, %arg2 : f32
linalg.yield %s : f32
} -> tensor<112x112x16xf32>
@ -107,7 +107,7 @@ func @type_correctness(%arg0 : tensor<6x5xi32>, %arg1 : tensor<5xf32>,
iterator_types = ["parallel", "parallel", "parallel"]}
ins(%25, %arg1, %arg2 : tensor<2x3x5xi32>, tensor<5xf32>, tensor<5xf32>)
outs(%26 : tensor<2x3x5xf32>) {
^bb0(%arg6: i32, %arg7: f32, %arg8: f32, %arg9: f32): // no predecessors
^bb0(%arg6: i32, %arg7: f32, %arg8: f32, %arg9: f32):
%29 = arith.sitofp %arg6 : i32 to f32
%30 = arith.addf %arg7, %cst_8 : f32
%31 = arith.divf %cst_7, %30 : f32

View File

@ -504,7 +504,7 @@ func @pointwise(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
ins(%A, %A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
memref<?x?xf32, offset: 0, strides: [?, ?]>)
outs(%B : memref<?x?xf32, offset: 0, strides: [?, ?]>) {
^bb0(%E: f32, %arg5: f32, %arg6: f32): // no predecessors
^bb0(%E: f32, %arg5: f32, %arg6: f32):
%2 = arith.addf %E, %arg5 : f32
linalg.yield %2 : f32
}
@ -525,7 +525,7 @@ func @pointwise(%A: memref<?x?xf32, offset: 0, strides: [?, ?]>,
ins(%4, %5: memref<?x?xf32, offset: ?, strides: [?, ?]>,
memref<?x?xf32, offset: ?, strides: [?, ?]>)
outs(%6 : memref<?x?xf32, offset: ?, strides: [?, ?]>) {
^bb0(%arg6: f32, %arg7: f32, %arg8: f32): // no predecessors
^bb0(%arg6: f32, %arg7: f32, %arg8: f32):
%7 = arith.mulf %arg6, %arg7 : f32
linalg.yield %7 : f32
}
@ -562,7 +562,7 @@ func @pointwise_no_view(%M: index, %N: index) {
linalg.generic #pointwise_2d_trait
ins(%A, %A : memref<?x?xf32>, memref<?x?xf32>)
outs(%B : memref<?x?xf32>) {
^bb0(%e: f32, %arg5: f32, %arg6: f32): // no predecessors
^bb0(%e: f32, %arg5: f32, %arg6: f32):
%2 = arith.addf %e, %arg5 : f32
linalg.yield %2 : f32
}
@ -583,7 +583,7 @@ func @pointwise_no_view(%M: index, %N: index) {
ins(%4, %5: memref<?x?xf32, offset: ?, strides: [?, ?]>,
memref<?x?xf32, offset: ?, strides: [?, ?]>)
outs(%6 : memref<?x?xf32, offset: ?, strides: [?, ?]>) {
^bb0(%arg6: f32, %arg7: f32, %arg8: f32): // no predecessors
^bb0(%arg6: f32, %arg7: f32, %arg8: f32):
%7 = arith.mulf %arg6, %arg7 : f32
linalg.yield %7 : f32
}
@ -618,7 +618,7 @@ func @fusion_of_three(%arg0: memref<100x10xf32>,
iterator_types = ["parallel", "parallel"]}
ins(%arg1 : memref<100xf32>)
outs(%0 : memref<100x10xf32>) {
^bb0(%arg3: f32, %arg4: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32):
linalg.yield %arg3 : f32
}
%1 = memref.alloc() {temp = true} : memref<100x10xf32>
@ -627,7 +627,7 @@ func @fusion_of_three(%arg0: memref<100x10xf32>,
iterator_types = ["parallel", "parallel"]}
ins(%arg0, %0: memref<100x10xf32>, memref<100x10xf32>)
outs(%1 : memref<100x10xf32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%2 = arith.subf %arg3, %arg4 : f32
linalg.yield %2 : f32
}
@ -647,7 +647,7 @@ func @fusion_of_three(%arg0: memref<100x10xf32>,
iterator_types = ["parallel", "parallel"]}
ins(%6 : memref<?x?xf32, #map2>)
outs(%7 : memref<?x?xf32, #map2>) {
^bb0(%arg3: f32, %arg4: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32):
%8 = math.exp %arg3 : f32
linalg.yield %8 : f32
}

View File

@ -10,7 +10,7 @@
func @generalize_pad_tensor_static_shape(%arg0: tensor<1x28x28x1xf32>) -> tensor<1x32x32x1xf32> {
%cst = arith.constant 0.000000e+00 : f32
%0 = linalg.pad_tensor %arg0 low[0, 2, 2, 0] high[0, 2, 2, 0] {
^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index): // no predecessors
^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index):
linalg.yield %cst : f32
} : tensor<1x28x28x1xf32> to tensor<1x32x32x1xf32>
return %0 : tensor<1x32x32x1xf32>
@ -39,7 +39,7 @@ func @generalize_pad_tensor_dynamic_shape(%arg0: tensor<4x?x2x?xf32>, %arg1: ind
%c0 = arith.constant 0 : index
%cst = arith.constant 0.0 : f32
%out = linalg.pad_tensor %arg0 low[%c0, %c0, %arg1, %c0] high[%c0, %c0, %c0, %arg1] {
^bb0(%gen_arg1: index, %gen_arg2: index, %gen_arg3: index, %gen_arg4: index): // no predecessors
^bb0(%gen_arg1: index, %gen_arg2: index, %gen_arg3: index, %gen_arg4: index):
linalg.yield %cst : f32
} : tensor<4x?x2x?xf32> to tensor<4x?x?x?xf32>
return %out : tensor<4x?x?x?xf32>

View File

@ -30,7 +30,7 @@ func @static_size_divisible(%arg0: tensor<24x12xf32>,
// MATVEC-DAG: %[[T4:.*]] = tensor.extract_slice %[[T0]][%[[IDX0]]
%2 = tensor.extract_slice %arg1[%arg3] [4] [1] : tensor<12xf32> to tensor<4xf32>
%3 = linalg.pad_tensor %2 nofold low[%c0] high[%c0] {
^bb0(%arg5: index): // no predecessors
^bb0(%arg5: index):
linalg.yield %cst : f32
} : tensor<4xf32> to tensor<4xf32>
@ -81,11 +81,11 @@ func @static_size_not_divisible(%arg0: tensor<24x12xf32>,
%3 = tensor.extract_slice %arg1[%arg3] [%1] [1] : tensor<12xf32> to tensor<?xf32>
%4 = affine.apply #map1(%1)
%5 = linalg.pad_tensor %2 low[%c0, %c0] high[%c0, %4] {
^bb0(%arg5: index, %arg6: index): // no predecessors
^bb0(%arg5: index, %arg6: index):
linalg.yield %cst : f32
} : tensor<24x?xf32> to tensor<24x5xf32>
%6 = linalg.pad_tensor %3 low[%c0] high[%4] {
^bb0(%arg5: index): // no predecessors
^bb0(%arg5: index):
linalg.yield %cst : f32
} : tensor<?xf32> to tensor<5xf32>
@ -141,11 +141,11 @@ func @dynamic_size(%arg0: tensor<24x?xf32>,
%4 = tensor.extract_slice %arg1[%arg3] [%2] [1] : tensor<?xf32> to tensor<?xf32>
%5 = affine.apply #map1(%2)
%6 = linalg.pad_tensor %3 low[%c0, %c0] high[%c0, %5] {
^bb0(%arg5: index, %arg6: index): // no predecessors
^bb0(%arg5: index, %arg6: index):
linalg.yield %cst : f32
} : tensor<24x?xf32> to tensor<24x4xf32>
%7 = linalg.pad_tensor %4 nofold low[%c0] high[%5] {
^bb0(%arg5: index): // no predecessors
^bb0(%arg5: index):
linalg.yield %cst : f32
} : tensor<?xf32> to tensor<4xf32>
@ -177,7 +177,7 @@ func @non_constant_padding(%arg0: tensor<24x12xf32>,
// MATVEC: %[[T1:.*]] = linalg.pad_tensor %[[T0]]
%2 = tensor.extract_slice %arg1[%arg3] [4] [1] : tensor<12xf32> to tensor<4xf32>
%3 = linalg.pad_tensor %2 nofold low[%c0] high[%c0] {
^bb0(%arg5: index): // no predecessors
^bb0(%arg5: index):
%5 = arith.index_cast %arg3 : index to i32
%6 = arith.sitofp %5 : i32 to f32
linalg.yield %6 : f32
@ -214,7 +214,7 @@ func @non_constant_op_padding(%arg0: tensor<24x12xf32>,
%2 = tensor.extract_slice %arg1[%arg3] [4] [1] : tensor<12xf32> to tensor<4xf32>
%3 = tensor.extract %arg1[%arg3] : tensor<12xf32>
%4 = linalg.pad_tensor %2 nofold low[%c0] high[%c0] {
^bb0(%arg5: index): // no predecessors
^bb0(%arg5: index):
linalg.yield %3 : f32
} : tensor<4xf32> to tensor<4xf32>
@ -251,7 +251,7 @@ func @non_index_operand(%arg0: tensor<24x12xf32>,
%2 = tensor.extract_slice %arg1[%arg4] [4] [1] : tensor<12xf32> to tensor<4xf32>
%3 = arith.index_cast %arg3 : i32 to index
%4 = linalg.pad_tensor %2 nofold low[%3] high[%3] {
^bb0(%arg6: index): // no predecessors
^bb0(%arg6: index):
linalg.yield %cst : f32
} : tensor<4xf32> to tensor<4xf32>
@ -288,7 +288,7 @@ func @memory_effect(%arg0: tensor<24x12xf32>,
%2 = tensor.extract_slice %arg1[%arg4] [4] [1] : tensor<12xf32> to tensor<4xf32>
%3 = memref.load %arg3[%c0] : memref<?xindex>
%4 = linalg.pad_tensor %2 nofold low[%3] high[%3] {
^bb0(%arg6: index): // no predecessors
^bb0(%arg6: index):
linalg.yield %cst : f32
} : tensor<4xf32> to tensor<4xf32>
@ -328,7 +328,7 @@ func @index_result_loop(%arg0: tensor<24x12xf32>,
scf.yield %6 : index
}
%4 = linalg.pad_tensor %2 nofold low[%3] high[%3] {
^bb0(%arg6: index): // no predecessors
^bb0(%arg6: index):
linalg.yield %cst : f32
} : tensor<4xf32> to tensor<4xf32>
@ -373,7 +373,7 @@ func @tile_and_fuse(%arg0: tensor<12x6xf32>,
// Check the fused and padded fill op does not prevent hoisting.
%4 = linalg.pad_tensor %2 nofold low[%c0, %c0] high[%3, %c0] {
^bb0(%arg5: index, %arg6: index): // no predecessors
^bb0(%arg5: index, %arg6: index):
linalg.yield %cst : f32
} : tensor<?x24xf32> to tensor<5x24xf32>
%5 = linalg.fill(%cst, %4) : f32, tensor<5x24xf32> -> tensor<5x24xf32>
@ -394,18 +394,18 @@ func @tile_and_fuse(%arg0: tensor<12x6xf32>,
%10 = tensor.extract_slice %arg1[%arg5, 0] [3, 24] [1, 1] : tensor<6x24xf32> to tensor<3x24xf32>
%11 = tensor.extract_slice %arg6[0, 0] [%1, 24] [1, 1] : tensor<?x24xf32> to tensor<?x24xf32>
%12 = linalg.pad_tensor %9 nofold low[%c0, %c0] high[%3, %c0] {
^bb0(%arg7: index, %arg8: index): // no predecessors
^bb0(%arg7: index, %arg8: index):
linalg.yield %cst : f32
} : tensor<?x3xf32> to tensor<5x3xf32>
%13 = linalg.pad_tensor %10 nofold low[%c0, %c0] high[%c0, %c0] {
^bb0(%arg7: index, %arg8: index): // no predecessors
^bb0(%arg7: index, %arg8: index):
linalg.yield %cst : f32
} : tensor<3x24xf32> to tensor<3x24xf32>
// Check the output padding is not hoisted.
// MATMUL: %[[T8:.*]] = linalg.pad_tensor
%14 = linalg.pad_tensor %11 nofold low[%c0, %c0] high[%3, %c0] {
^bb0(%arg7: index, %arg8: index): // no predecessors
^bb0(%arg7: index, %arg8: index):
linalg.yield %cst : f32
} : tensor<?x24xf32> to tensor<5x24xf32>

View File

@ -14,7 +14,7 @@ func @inline_zerod(%arg0: tensor<4xf32>, %scalar: tensor<f32>) -> tensor<4xf32>
ins(%arg0, %scalar : tensor<4xf32>, tensor<f32>)
outs(%0 : tensor<4xf32>) {
// CHECK: ^bb0(%{{.*}}: f32, %{{.*}}: f32)
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors
^bb0(%arg1: f32, %arg2: f32, %arg3: f32):
// CHECK: tensor.extract %[[SCALAR]][]
%2 = arith.divf %arg1, %arg2 : f32
linalg.yield %2 : f32
@ -39,7 +39,7 @@ func @inline_oned(%arg0: tensor<4xf32>, %scalar: tensor<1xf32>) -> tensor<4xf32>
ins(%arg0, %scalar : tensor<4xf32>, tensor<1xf32>)
outs(%0 : tensor<4xf32>) {
// CHECK: ^bb0(%{{.*}}: f32, %{{.*}}: f32)
^bb0(%arg1: f32, %arg2: f32, %arg3: f32): // no predecessors
^bb0(%arg1: f32, %arg2: f32, %arg3: f32):
// CHECK: tensor.extract %[[SCALAR]][%[[ZERO]]]
%2 = arith.divf %arg1, %arg2 : f32
linalg.yield %2 : f32

View File

@ -357,7 +357,7 @@ func @init_tensor_err(%arg0 : index)
func @pad_result_type(%arg0: tensor<?x2x3x4xi32>, %arg1: index, %arg2: i32) -> tensor<?x?x?x8xf32> {
// expected-error @+1 {{specified type 'tensor<?x?x?x8xf32>' does not match the inferred type 'tensor<?x?x?x9xi32>}}
%0 = linalg.pad_tensor %arg0 low[1, %arg1, 2, 2] high[1, 2, %arg1, 3] {
^bb0(%arg3: index, %arg4: index): // no predecessors
^bb0(%arg3: index, %arg4: index):
linalg.yield %arg2 : i32
} : tensor<?x2x3x4xi32> to tensor<?x?x?x8xf32>
return %0 : tensor<?x?x?x8xf32>
@ -368,7 +368,7 @@ func @pad_result_type(%arg0: tensor<?x2x3x4xi32>, %arg1: index, %arg2: i32) -> t
func @pad_number_of_block_args(%arg0: tensor<?x4xi32>, %arg1: i32) -> tensor<?x9xi32> {
// expected-error @+1 {{expected the block to have 2 arguments}}
%0 = linalg.pad_tensor %arg0 low[1, 2] high[2, 3] {
^bb0(%arg2: index, %arg3: index, %arg4: index): // no predecessors
^bb0(%arg2: index, %arg3: index, %arg4: index):
linalg.yield %arg1 : i32
} : tensor<?x4xi32> to tensor<?x9xi32>
return %0 : tensor<?x9xi32>
@ -388,7 +388,7 @@ func @pad_no_block(%arg0: tensor<?x4xi32>, %arg1: i32) -> tensor<?x9xi32> {
func @pad_block_args(%arg0: tensor<?x4xi32>, %arg1: i32) -> tensor<?x9xi32> {
// expected-error @+1 {{op expected block argument 1 to be an index}}
%0 = linalg.pad_tensor %arg0 low[1, 2] high[2, 3] {
^bb0(%arg2: i32, %arg3: i32): // no predecessors
^bb0(%arg2: i32, %arg3: i32):
linalg.yield %arg1 : i32
} : tensor<?x4xi32> to tensor<?x9xi32>
return %0 : tensor<?x9xi32>
@ -399,7 +399,7 @@ func @pad_block_args(%arg0: tensor<?x4xi32>, %arg1: i32) -> tensor<?x9xi32> {
func @pad_num_yields(%arg0: tensor<?x4xi32>, %arg1: i32) -> tensor<?x9xi32> {
// expected-error @+3 {{op expected single yield operand (got 2)}}
%0 = linalg.pad_tensor %arg0 low[1, 2] high[2, 3] {
^bb0(%arg2: index, %arg3: index): // no predecessors
^bb0(%arg2: index, %arg3: index):
linalg.yield %arg1, %arg1 : i32, i32
} : tensor<?x4xi32> to tensor<?x9xi32>
return %0 : tensor<?x9xi32>
@ -410,7 +410,7 @@ func @pad_num_yields(%arg0: tensor<?x4xi32>, %arg1: i32) -> tensor<?x9xi32> {
func @pad_yield_type(%arg0: tensor<?x4xi32>, %arg1: i8) -> tensor<?x9xi32> {
// expected-error @+3 {{op expected yield type to match shape element type}}
%0 = linalg.pad_tensor %arg0 low[1, 2] high[2, 3] {
^bb0(%arg2: index, %arg3: index): // no predecessors
^bb0(%arg2: index, %arg3: index):
linalg.yield %arg1 : i8
} : tensor<?x4xi32> to tensor<?x9xi32>
return %0 : tensor<?x9xi32>

View File

@ -573,7 +573,7 @@ func @generic_index_op_1D_reduce(%arg0: memref<?xf32>,
func @generic_const_init(%arg0: memref<?xf32>) {
%cst = arith.constant 1.0 : f32
linalg.generic #trait_const_fill outs(%arg0 : memref<?xf32>) {
^bb0(%arg1: f32): // no predecessors
^bb0(%arg1: f32):
linalg.yield %cst : f32
}
return

View File

@ -7,7 +7,7 @@ func @pad_tensor_with_memrefs(%arg0: memref<1x28x28x1xf32>) -> memref<2x31x31x3x
%cst = arith.constant 0.000000e+00 : f32
%0 = bufferization.to_tensor %arg0 : memref<1x28x28x1xf32>
%1 = linalg.pad_tensor %0 low[1, 1, 1, 2] high[0, 2, 2, 0] {
^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index): // no predecessors
^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index):
linalg.yield %cst : f32
} : tensor<1x28x28x1xf32> to tensor<2x31x31x3xf32>
%2 = bufferization.to_memref %1 : memref<2x31x31x3xf32>
@ -26,7 +26,7 @@ func @pad_tensor_with_memrefs(%arg0: memref<1x28x28x1xf32>) -> memref<2x31x31x3x
func @pad_tensor_no_memrefs(%arg0: tensor<1x28x28xf32>) -> tensor<2x32x32xf32> {
%cst = arith.constant 0.000000e+00 : f32
%0 = linalg.pad_tensor %arg0 low[1, 2, 2] high[0, 2, 2] {
^bb0(%arg1: index, %arg2: index, %arg3: index): // no predecessors
^bb0(%arg1: index, %arg2: index, %arg3: index):
linalg.yield %cst : f32
} : tensor<1x28x28xf32> to tensor<2x32x32xf32>
return %0 : tensor<2x32x32xf32>
@ -44,7 +44,7 @@ func @pad_tensor_no_memrefs(%arg0: tensor<1x28x28xf32>) -> tensor<2x32x32xf32> {
func @pad_tensor_detailed(%arg0: tensor<1x28x28x1xf32>) -> tensor<1x32x32x1xf32> {
%cst = arith.constant 0.000000e+00 : f32
%0 = linalg.pad_tensor %arg0 low[0, 2, 2, 0] high[0, 2, 2, 0] {
^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index): // no predecessors
^bb0(%arg1: index, %arg2: index, %arg3: index, %arg4: index):
linalg.yield %cst : f32
} : tensor<1x28x28x1xf32> to tensor<1x32x32x1xf32>
return %0 : tensor<1x32x32x1xf32>

View File

@ -566,7 +566,7 @@ func @conv_interface_wrong_input_indexing_map(
%arg0 : tensor<?x?x?x?xf32>, %arg2 : tensor<?x?x?x?xf32>, %arg1 : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> {
// expected-error @+1 {{unexpected input index map for convolutions}}
%0 = "linalg.conv_2d_nhwc_hwcf"(%arg0, %arg1, %arg2) ({
^bb0(%arg3: f32, %arg4: f32, %arg5 : f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5 : f32):
%1 = "arith.mulf"(%arg3, %arg4) : (f32, f32) -> f32
%2 = "arith.addf"(%arg5, %1) : (f32, f32) -> f32
"linalg.yield"(%2) : (f32) -> ()
@ -583,7 +583,7 @@ func @conv_interface_wrong_num_operands(
%arg0 : tensor<?x?x?x?xf32>, %arg1 : tensor<?x?x?x?x?xf32>, %arg2 : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> {
// expected-error @+1 {{expected output/filter indexing maps to be projected permutations}}
%0 = "linalg.conv_2d_nhwc_hwcf"(%arg0, %arg1, %arg2) ({
^bb0(%arg3: f32, %arg4: f32, %arg5 : f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5 : f32):
%1 = "arith.mulf"(%arg3, %arg4) : (f32, f32) -> f32
%2 = "arith.addf"(%arg5, %1) : (f32, f32) -> f32
"linalg.yield"(%2) : (f32) -> ()

View File

@ -202,7 +202,7 @@ func @compose_padding(%arg0: tensor<64x64xf32>,
// MATMUL: %[[T3:.*]] = linalg.fill(%{{.*}}, %[[T2]]
%0 = tensor.extract_slice %arg0[0, 0] [%size, %size] [1, 1] : tensor<64x64xf32> to tensor<?x?xf32>
%1 = linalg.pad_tensor %0 low[0, 0] high[%iv0, %iv0] {
^bb0(%arg3: index, %arg4: index): // no predecessors
^bb0(%arg3: index, %arg4: index):
linalg.yield %cst : f32
} : tensor<?x?xf32> to tensor<64x64xf32>
%2 = linalg.fill(%cst, %1) : f32, tensor<64x64xf32> -> tensor<64x64xf32>
@ -234,7 +234,7 @@ func @different_padding_values(%arg0: tensor<64x64xf32>,
%size = affine.min #map0()[%iv0]
%0 = tensor.extract_slice %arg0[0, 0] [%size, %size] [1, 1] : tensor<64x64xf32> to tensor<?x?xf32>
%1 = linalg.pad_tensor %0 low[0, 0] high[%iv0, %iv0] {
^bb0(%arg3: index, %arg4: index): // no predecessors
^bb0(%arg3: index, %arg4: index):
linalg.yield %cst : f32
} : tensor<?x?xf32> to tensor<64x64xf32>
%2 = linalg.fill(%cst, %1) : f32, tensor<64x64xf32> -> tensor<64x64xf32>
@ -259,7 +259,7 @@ func @different_padding_dynamic_sizes(%arg0: tensor<64x64xf32>,
%size = affine.min #map0()[%iv0]
%0 = tensor.extract_slice %arg0[0, 0] [%iv0, %iv0] [1, 1] : tensor<64x64xf32> to tensor<?x?xf32>
%1 = linalg.pad_tensor %0 low[0, 0] high[%iv0, %iv0] {
^bb0(%arg3: index, %arg4: index): // no predecessors
^bb0(%arg3: index, %arg4: index):
linalg.yield %cst : f32
} : tensor<?x?xf32> to tensor<64x64xf32>
%2 = linalg.fill(%cst, %1) : f32, tensor<64x64xf32> -> tensor<64x64xf32>
@ -284,7 +284,7 @@ func @different_padding_dynamic_rank(%arg0: tensor<64x64x1xf32>,
%size = affine.min #map0()[%iv0]
%0 = tensor.extract_slice %arg0[0, 0, 0] [%size, %size, 1] [1, 1, 1] : tensor<64x64x1xf32> to tensor<?x?xf32>
%1 = linalg.pad_tensor %0 low[0, 0] high[%iv0, %iv0] {
^bb0(%arg3: index, %arg4: index): // no predecessors
^bb0(%arg3: index, %arg4: index):
linalg.yield %cst : f32
} : tensor<?x?xf32> to tensor<64x64xf32>
%2 = linalg.fill(%cst, %1) : f32, tensor<64x64xf32> -> tensor<64x64xf32>
@ -309,7 +309,7 @@ func @different_padding_static_sizes(%arg0: tensor<62x62xf32>,
%size = affine.min #map0()[%iv0]
%0 = tensor.extract_slice %arg0[0, 0] [%size, %size] [1, 1] : tensor<62x62xf32> to tensor<?x?xf32>
%1 = linalg.pad_tensor %0 low[0, 0] high[%iv0, %iv0] {
^bb0(%arg3: index, %arg4: index): // no predecessors
^bb0(%arg3: index, %arg4: index):
linalg.yield %cst : f32
} : tensor<?x?xf32> to tensor<62x62xf32>
%2 = linalg.fill(%cst, %1) : f32, tensor<62x62xf32> -> tensor<62x62xf32>

View File

@ -9,7 +9,7 @@ func @linalg_generic_sum(%lhs: memref<2x2xf32>,
iterator_types = ["parallel", "parallel"]}
ins(%lhs, %rhs : memref<2x2xf32>, memref<2x2xf32>)
outs(%sum : memref<2x2xf32>) {
^bb0(%lhs_in: f32, %rhs_in: f32, %sum_out: f32): // no predecessors
^bb0(%lhs_in: f32, %rhs_in: f32, %sum_out: f32):
%0 = arith.addf %lhs_in, %rhs_in : f32
linalg.yield %0 : f32
}

View File

@ -15,7 +15,7 @@ func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x4x?xf32>,
iterator_types = ["parallel", "parallel", "parallel"]}
ins(%0, %arg1, %arg2 : tensor<?x?x?xf32>, tensor<?x?x?xf32>, f32)
outs(%0 : tensor<?x?x?xf32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32, %s: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32, %s: f32):
%1 = arith.mulf %arg3, %arg4 : f32
%2 = arith.addf %1, %arg5 : f32
linalg.yield %2 : f32
@ -58,7 +58,7 @@ func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?xf32>,
iterator_types = ["parallel", "parallel"]}
ins(%arg0, %arg1, %arg2 : tensor<?x?xf32>, tensor<?x?xf32>, f32)
outs(%arg0 : tensor<?x?xf32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32, %s: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32, %s: f32):
%1 = arith.mulf %arg3, %arg4 : f32
%2 = arith.addf %1, %arg5 : f32
linalg.yield %2 : f32
@ -143,7 +143,7 @@ func @generic_op_reshape_consumer_static(%arg0: tensor<264x4xf32>)
iterator_types = ["parallel", "parallel"]}
ins(%arg0, %cst : tensor<264x4xf32>, tensor<264x4xf32>)
outs(%0 : tensor<264x4xf32>) {
^bb0(%arg1: f32, %arg2: f32, %s: f32): // no predecessors
^bb0(%arg1: f32, %arg2: f32, %s: f32):
%2 = arith.mulf %arg1, %arg2 : f32
linalg.yield %2 : f32
} -> tensor<264x4xf32>
@ -230,7 +230,7 @@ func @indexed_producer_reshape_consumer_fusion(%arg0 : tensor<?x?xi32>,
iterator_types = ["parallel", "parallel"]}
ins(%arg0, %arg1 : tensor<?x?xi32>, tensor<?x?xi32>)
outs(%arg0 : tensor<?x?xi32>) {
^bb0(%arg3: i32, %arg4: i32, %s: i32): // no predecessors
^bb0(%arg3: i32, %arg4: i32, %s: i32):
%idx0 = linalg.index 0 : index
%idx1 = linalg.index 1 : index
%1 = arith.muli %arg3, %arg4 : i32
@ -345,7 +345,7 @@ func @reshape_as_producer_projected_permutation(
iterator_types = ["parallel", "parallel", "parallel"]}
ins(%0 : tensor<264x?xi32>)
outs(%shape : tensor<264x?x4xi32>) {
^bb0(%arg1: i32, %s: i32): // no predecessors
^bb0(%arg1: i32, %s: i32):
%idx0 = linalg.index 0 : index
%idx1 = linalg.index 1 : index
%idx2 = linalg.index 2 : index
@ -401,7 +401,7 @@ func @generic_op_reshape_consumer_fusion_projected(%arg0 : tensor<?x?xf32>,
iterator_types = ["parallel", "parallel"]}
ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)
outs(%arg0 : tensor<?x?xf32>) {
^bb0(%arg3: f32, %arg4: f32, %s: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %s: f32):
%1 = arith.mulf %arg3, %arg4 : f32
linalg.yield %1 : f32
} -> tensor<?x?xf32>
@ -439,7 +439,7 @@ func @unit_dim_reshape_expansion(%arg0 : tensor<1x5xf32>) -> tensor<5x5xf32> {
affine_map<(d0, d1) -> (d0, d1)>],
iterator_types = ["parallel", "parallel"]}
ins(%0 : tensor<5xf32>) outs(%1 : tensor<5x5xf32>) {
^bb0(%arg2: f32, %arg3: f32): // no predecessors
^bb0(%arg2: f32, %arg3: f32):
linalg.yield %arg2 : f32
} -> tensor<5x5xf32>
return %2 : tensor<5x5xf32>
@ -458,7 +458,7 @@ func @unit_dim_reshape_collapse(%arg0 : tensor<5xf32>) -> tensor<5x1x5xf32> {
affine_map<(d0, d1) -> (d0, d1)>],
iterator_types = ["parallel", "parallel"]}
ins(%arg0 : tensor<5xf32>) outs(%0 : tensor<5x5xf32>) {
^bb0(%arg2: f32, %arg3: f32): // no predecessors
^bb0(%arg2: f32, %arg3: f32):
linalg.yield %arg2 : f32
} -> tensor<5x5xf32>
%2 = tensor.expand_shape %1 [[0, 1], [2]]
@ -487,7 +487,7 @@ func @unit_dim_reshape_expansion_full
iterator_types = ["parallel", "parallel", "parallel"]}
ins(%0, %arg1 : tensor<?x2x4xf32>, tensor<?x2x4xf32>)
outs(%2 : tensor<?x2x4xf32>) {
^bb0(%arg2: f32, %arg3: f32, %arg4: f32): // no predecessors
^bb0(%arg2: f32, %arg3: f32, %arg4: f32):
%4 = arith.mulf %arg2, %arg3 : f32
linalg.yield %4 : f32
} -> tensor<?x2x4xf32>
@ -543,7 +543,7 @@ func @no_fuse_mismatched_dynamism(%arg0: tensor<1x1xi64>, %arg1: tensor<?xi64>)
iterator_types = ["parallel"]}
ins(%0, %arg1 : tensor<1xi64>, tensor<?xi64>)
outs(%1 : tensor<1xi64>) {
^bb0(%arg4: i64, %arg5: i64, %arg6: i64): // no predecessors
^bb0(%arg4: i64, %arg5: i64, %arg6: i64):
%3 = arith.addi %arg4, %arg5 : i64
linalg.yield %3 : i64
} -> tensor<1xi64>

View File

@ -10,7 +10,7 @@ func @generic_op_reshape_producer_fusion(%arg0 : tensor<?x?x?xi32>)
iterator_types = ["parallel", "parallel", "parallel", "parallel"] }
ins(%0 : tensor<?x?x4x?xi32>)
outs(%0 : tensor<?x?x4x?xi32>) {
^bb0(%arg6: i32, %arg7 : i32): // no predecessors
^bb0(%arg6: i32, %arg7 : i32):
%idx = linalg.index 0 : index
%2 = arith.index_cast %idx : index to i32
%3 = arith.addi %arg6, %2 : i32
@ -40,7 +40,7 @@ func @generic_op_reshape_consumer_fusion(%arg0 : tensor<?x?x4x5xi32>)
indexing_maps = [#map0, #map0],
iterator_types = ["parallel", "parallel", "parallel", "parallel"] }
ins(%arg0 : tensor<?x?x4x5xi32>) outs(%arg0 : tensor<?x?x4x5xi32>) {
^bb0(%arg6: i32, %arg7: i32): // no predecessors
^bb0(%arg6: i32, %arg7: i32):
%idx = linalg.index 0 : index
%2 = arith.index_cast %idx : index to i32
%3 = arith.addi %arg6, %2 : i32
@ -75,7 +75,7 @@ func @generic_op_021_permultation_reshape_producer_fusion(%arg0 : tensor<3x35xf3
{indexing_maps = [#map2, #map3],
iterator_types = ["parallel", "parallel", "parallel"]}
ins(%0 : tensor<3x5x7xf32>) outs(%1 : tensor<3x7x5xf32>) {
^bb0(%arg2: f32, %arg3 : f32): // no predecessors
^bb0(%arg2: f32, %arg3 : f32):
linalg.yield %arg2 : f32
} -> tensor<3x7x5xf32>
return %2 : tensor<3x7x5xf32>
@ -100,7 +100,7 @@ func @generic_op_120_permutation_reshape_producer_fusion(%arg0 : tensor<3x35xf32
{indexing_maps = [#map2, #map3],
iterator_types = ["parallel", "parallel", "parallel"]}
ins(%0 : tensor<3x5x7xf32>) outs(%1 : tensor<5x7x3xf32>) {
^bb0(%arg2: f32, %arg3: f32): // no predecessors
^bb0(%arg2: f32, %arg3: f32):
linalg.yield %arg2 : f32
} -> tensor<5x7x3xf32>
return %2 : tensor<5x7x3xf32>
@ -127,7 +127,7 @@ func @generic_op_102_permultation_reshape_producer_fusion(%arg0 : tensor<3x35xf3
{indexing_maps = [#map2, #map3],
iterator_types = ["parallel", "parallel", "parallel"]}
ins(%0 : tensor<3x5x7xf32>) outs(%1 : tensor<5x3x7xf32>) {
^bb0(%arg2: f32, %arg3: f32): // no predecessors
^bb0(%arg2: f32, %arg3: f32):
linalg.yield %arg2 : f32
} -> tensor<5x3x7xf32>
return %2 : tensor<5x3x7xf32>
@ -153,7 +153,7 @@ func @generic_op_102_permultation_reshape_consumer_fusion(%arg0 : tensor<3x5x7xf
{indexing_maps = [#map0, #map1],
iterator_types = ["parallel", "parallel", "parallel"]}
ins(%arg0 : tensor<3x5x7xf32>) outs(%0 : tensor<5x3x7xf32>) {
^bb0(%arg2: f32, %arg3 : f32): // no predecessors
^bb0(%arg2: f32, %arg3 : f32):
linalg.yield %arg2 : f32
} -> tensor<5x3x7xf32>
%2 = tensor.collapse_shape %1 [[0], [1, 2]]
@ -184,7 +184,7 @@ func @generic_op_reshape_consumer_nofusion(%arg0 : tensor<?x?x?x5xf32>,
iterator_types = ["parallel", "parallel", "parallel", "parallel"]}
ins(%arg0, %arg1 : tensor<?x?x?x5xf32>, tensor<?x?x?x5xf32>)
outs(%arg0 : tensor<?x?x?x5xf32>) {
^bb0(%arg3: f32, %arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg3: f32, %arg4: f32, %arg5: f32):
%1 = arith.mulf %arg3, %arg4 : f32
linalg.yield %1 : f32
} -> tensor<?x?x?x5xf32>
@ -209,7 +209,7 @@ func @generic_op_permultation_reshape_consumer_fusion_unused_dim(%arg0 : tensor<
affine_map<(d0, d1) -> (d0, d1)>],
iterator_types = ["parallel", "parallel"]}
ins(%arg0 : tensor<6x1xf32>) outs(%0 : tensor<6x1xi32>) {
^bb0(%arg3: f32, %arg4: i32): // no predecessors
^bb0(%arg3: f32, %arg4: i32):
%5 = arith.fptosi %arg3 : f32 to i32
linalg.yield %5 : i32
} -> tensor<6x1xi32>

View File

@ -259,7 +259,7 @@ func @generic_without_inputs(%arg0 : memref<?x?x?xf32>) {
linalg.generic {indexing_maps = [#map0],
iterator_types = ["parallel", "parallel", "parallel"]}
outs(%arg0 : memref<?x?x?xf32>) {
^bb0(%arg3: f32): // no predecessors
^bb0(%arg3: f32):
%cst = arith.constant 0.000000e+00 : f32
linalg.yield %cst : f32
}
@ -322,7 +322,7 @@ func @generic_with_multiple_tensor_outputs(
iterator_types = ["reduction"]}
ins(%arg0, %arg1 : tensor<?xi32>, tensor<?xi32>)
outs(%1, %3 : tensor<i32>, tensor<i32>) {
^bb0(%arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32): // no predecessors
^bb0(%arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32):
%5 = arith.cmpi sge, %arg3, %arg5 : i32
%6 = select %5, %arg3, %arg5 : i32
%7 = arith.cmpi eq, %arg3, %arg5 : i32

View File

@ -106,7 +106,7 @@ builtin.func @fuse_reduction(%arg0: tensor<24x12xf32>,
%c24 = arith.constant 24 : index
%c4 = arith.constant 4 : index
%0 = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "reduction", "parallel"]} ins(%arg3 : tensor<12x7x25xf32>) outs(%arg1 : tensor<12x25xf32>) {
^bb0(%arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg4: f32, %arg5: f32):
%2 = arith.addf %arg4, %arg5 : f32
linalg.yield %2 : f32
} -> tensor<12x25xf32>
@ -150,7 +150,7 @@ builtin.func @fuse_transposed(%arg0: tensor<24x12xf32>,
%c24 = arith.constant 24 : index
%c4 = arith.constant 4 : index
%0 = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "parallel"]} ins(%arg3 : tensor<12x24xf32>) outs(%arg0 : tensor<24x12xf32>) {
^bb0(%arg4: f32, %arg5: f32): // no predecessors
^bb0(%arg4: f32, %arg5: f32):
%2 = arith.addf %arg4, %arg5 : f32
linalg.yield %2 : f32
} -> tensor<24x12xf32>
@ -219,7 +219,7 @@ builtin.func @fuse_indexed(%arg0: tensor<24x12xi32>,
%c24 = arith.constant 24 : index
%c4 = arith.constant 4 : index
%0 = linalg.generic {indexing_maps = [#map0], iterator_types = ["parallel", "parallel"]} outs(%arg1 : tensor<12x25xi32>) {
^bb0(%arg3: i32): // no predecessors
^bb0(%arg3: i32):
%6 = linalg.index 0 : index
%7 = linalg.index 1 : index
%8 = arith.addi %6, %7 : index
@ -272,7 +272,7 @@ func @fuse_outermost_reduction(%arg0: tensor<10x17xf32>,
// GENERIC-SAME: %[[IV1]]
// GENERIC: linalg.generic {{.*}} ins(%[[T2]] {{.*}} outs(%[[T3]]
%2 = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "reduction"]} ins(%0 : tensor<10x17xf32>) outs(%1 : tensor<10xf32>) {
^bb0(%arg2: f32, %arg3: f32): // no predecessors
^bb0(%arg2: f32, %arg3: f32):
%3 = arith.addf %arg2, %arg3 : f32
linalg.yield %3 : f32
} -> tensor<10xf32>
@ -315,7 +315,7 @@ func @fuse_non_rectangular(%arg0: tensor<10x17xf32>,
// GENERIC-SAME: , %[[UB1]]
// GENERIC: %[[T1:.*]] = linalg.fill(%{{.*}}, %[[T0]])
%1 = linalg.generic {indexing_maps = [#map0, #map1], iterator_types = ["parallel", "parallel"]} ins(%0 : tensor<10x17xf32>) outs(%arg1 : tensor<10x8xf32>) {
^bb0(%arg2: f32, %arg3: f32): // no predecessors
^bb0(%arg2: f32, %arg3: f32):
%2 = arith.addf %arg2, %arg3 : f32
linalg.yield %2 : f32
} -> tensor<10x8xf32>

View File

@ -318,7 +318,7 @@ func @pointwise(%arg0: memref<?x?xf32, offset: ?, strides: [?, 1]>, %arg1: memre
linalg.generic #pointwise_2d_trait
ins(%arg0, %arg1 : memref<?x?xf32, offset: ?, strides: [?, 1]>, memref<?x?xf32, offset: ?, strides: [?, 1]>)
outs(%arg2 : memref<?x?xf32, offset: ?, strides: [?, 1]>) {
^bb0(%arg4: f32, %arg5: f32, %arg6: f32): // no predecessors
^bb0(%arg4: f32, %arg5: f32, %arg6: f32):
%4 = arith.addf %arg4, %arg5 : f32
linalg.yield %4 : f32
}

View File

@ -462,7 +462,7 @@ func @generic_vectorize_broadcast_transpose(
iterator_types = ["parallel", "parallel", "parallel", "parallel"]}
ins(%B, %A, %A, %B: memref<4x4xf32>, memref<4xf32>, memref<4xf32>, memref<4x4xf32>)
outs(%C : memref<4x4x4x4xf32>) {
^bb0(%arg0: f32, %arg1: f32, %arg2: f32, %arg3: f32, %arg4: f32): // no predecessors
^bb0(%arg0: f32, %arg1: f32, %arg2: f32, %arg3: f32, %arg4: f32):
%s = arith.subf %arg0, %arg1 : f32
%a = arith.addf %arg2, %s : f32
%b = arith.addf %arg3, %a : f32
@ -775,7 +775,7 @@ func @sum_exp(%input: tensor<4x16x8xf32>, %output: tensor<4x16xf32>)
],
iterator_types = ["parallel", "parallel", "reduction"]
} ins(%input : tensor<4x16x8xf32>) outs(%output : tensor<4x16xf32>) {
^bb0(%arg0: f32, %arg1: f32): // no predecessors
^bb0(%arg0: f32, %arg1: f32):
%1 = math.exp %arg0 : f32
%2 = arith.addf %1, %arg1 : f32
linalg.yield %2 : f32
@ -811,7 +811,7 @@ func @sum_exp_2(%input: tensor<3x2xf32>, %input_2: tensor<5x4xf32>, %output: ten
],
iterator_types = ["parallel", "reduction", "reduction", "parallel"]
} ins(%input, %input_2 : tensor<3x2xf32>, tensor<5x4xf32>) outs(%output : tensor<5x2xf32>) {
^bb0(%arg0: f32, %arg1: f32, %arg2: f32): // no predecessors
^bb0(%arg0: f32, %arg1: f32, %arg2: f32):
%1 = math.exp %arg0 : f32
%2 = math.exp %arg1 : f32
%3 = arith.addf %1, %2 : f32
@ -838,7 +838,7 @@ func @red_max_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
affine_map<(d0, d1) -> (d0)>],
iterator_types = ["parallel", "reduction"]}
ins(%arg0 : tensor<4x4xf32>) outs(%fill : tensor<4xf32>) {
^bb0(%in0: f32, %out0: f32): // no predecessors
^bb0(%in0: f32, %out0: f32):
%max = arith.maxf %in0, %out0 : f32
linalg.yield %max : f32
} -> tensor<4xf32>
@ -863,7 +863,7 @@ func @red_min_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
affine_map<(d0, d1) -> (d0)>],
iterator_types = ["parallel", "reduction"]}
ins(%arg0 : tensor<4x4xf32>) outs(%fill : tensor<4xf32>) {
^bb0(%in0: f32, %out0: f32): // no predecessors
^bb0(%in0: f32, %out0: f32):
%min = arith.minf %out0, %in0 : f32
linalg.yield %min : f32
} -> tensor<4xf32>
@ -887,7 +887,7 @@ func @red_mul_2d(%arg0: tensor<4x4xf32>) -> tensor<4xf32> {
affine_map<(d0, d1) -> (d0)>],
iterator_types = ["parallel", "reduction"]}
ins(%arg0 : tensor<4x4xf32>) outs(%fill : tensor<4xf32>) {
^bb0(%in0: f32, %out0: f32): // no predecessors
^bb0(%in0: f32, %out0: f32):
%mul = arith.mulf %in0, %out0 : f32
linalg.yield %mul : f32
} -> tensor<4xf32>
@ -910,7 +910,7 @@ func @red_or_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
affine_map<(d0, d1) -> (d0)>],
iterator_types = ["parallel", "reduction"]}
ins(%arg0 : tensor<4x4xi1>) outs(%fill : tensor<4xi1>) {
^bb0(%in0: i1, %out0: i1): // no predecessors
^bb0(%in0: i1, %out0: i1):
%or = arith.ori %in0, %out0 : i1
linalg.yield %or : i1
} -> tensor<4xi1>
@ -933,7 +933,7 @@ func @red_and_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
affine_map<(d0, d1) -> (d0)>],
iterator_types = ["parallel", "reduction"]}
ins(%arg0 : tensor<4x4xi1>) outs(%fill : tensor<4xi1>) {
^bb0(%in0: i1, %out0: i1): // no predecessors
^bb0(%in0: i1, %out0: i1):
%and = arith.andi %in0, %out0 : i1
linalg.yield %and : i1
} -> tensor<4xi1>
@ -956,7 +956,7 @@ func @red_xor_2d(%arg0: tensor<4x4xi1>) -> tensor<4xi1> {
affine_map<(d0, d1) -> (d0)>],
iterator_types = ["parallel", "reduction"]}
ins(%arg0 : tensor<4x4xi1>) outs(%fill : tensor<4xi1>) {
^bb0(%in0: i1, %out0: i1): // no predecessors
^bb0(%in0: i1, %out0: i1):
%xor = arith.xori %in0, %out0 : i1
linalg.yield %xor : i1
} -> tensor<4xi1>
@ -1051,7 +1051,7 @@ func @reduce_1d(%arg0: tensor<32xf32>) -> tensor<f32> {
iterator_types = ["reduction"]}
ins(%arg0 : tensor<32xf32>)
outs(%1 : tensor<f32>) {
^bb0(%a: f32, %b: f32): // no predecessors
^bb0(%a: f32, %b: f32):
%3 = arith.addf %a, %b : f32
linalg.yield %3 : f32
} -> tensor<f32>

View File

@ -839,7 +839,7 @@ func @while_cond_true() -> i1 {
// CHECK-NEXT: %[[cmp:.+]] = "test.condition"() : () -> i1
// CHECK-NEXT: scf.condition(%[[cmp]]) %[[cmp]] : i1
// CHECK-NEXT: } do {
// CHECK-NEXT: ^bb0(%arg0: i1): // no predecessors
// CHECK-NEXT: ^bb0(%arg0: i1):
// CHECK-NEXT: "test.use"(%[[true]]) : (i1) -> ()
// CHECK-NEXT: scf.yield
// CHECK-NEXT: }
@ -862,7 +862,7 @@ func @while_unused_arg(%x : i32, %y : f64) -> i32 {
// CHECK-NEXT: %[[cmp:.*]] = "test.condition"(%[[arg2]]) : (i32) -> i1
// CHECK-NEXT: scf.condition(%[[cmp]]) %[[arg2]] : i32
// CHECK-NEXT: } do {
// CHECK-NEXT: ^bb0(%[[post:.+]]: i32): // no predecessors
// CHECK-NEXT: ^bb0(%[[post:.+]]: i32):
// CHECK-NEXT: %[[next:.+]] = "test.use"(%[[post]]) : (i32) -> i32
// CHECK-NEXT: scf.yield %[[next]] : i32
// CHECK-NEXT: }
@ -890,7 +890,7 @@ func @while_unused_result() -> i32 {
// CHECK-NEXT: %{{.*}} = "test.get_some_value"() : () -> i64
// CHECK-NEXT: scf.condition(%[[cmp]]) %[[val]] : i32
// CHECK-NEXT: } do {
// CHECK-NEXT: ^bb0(%[[arg:.*]]: i32): // no predecessors
// CHECK-NEXT: ^bb0(%[[arg:.*]]: i32):
// CHECK-NEXT: "test.use"(%[[arg]]) : (i32) -> ()
// CHECK-NEXT: scf.yield
// CHECK-NEXT: }
@ -918,7 +918,7 @@ func @while_cmp_lhs(%arg0 : i32) {
// CHECK-NEXT: %[[cmp:.+]] = arith.cmpi ne, %[[val]], %arg0 : i32
// CHECK-NEXT: scf.condition(%[[cmp]]) %[[val]] : i32
// CHECK-NEXT: } do {
// CHECK-NEXT: ^bb0(%arg1: i32): // no predecessors
// CHECK-NEXT: ^bb0(%arg1: i32):
// CHECK-NEXT: "test.use"(%[[true]], %[[false]], %arg1) : (i1, i1, i32) -> ()
// CHECK-NEXT: scf.yield
// CHECK-NEXT: }
@ -945,7 +945,7 @@ func @while_cmp_rhs(%arg0 : i32) {
// CHECK-NEXT: %[[cmp:.+]] = arith.cmpi ne, %arg0, %[[val]] : i32
// CHECK-NEXT: scf.condition(%[[cmp]]) %[[val]] : i32
// CHECK-NEXT: } do {
// CHECK-NEXT: ^bb0(%arg1: i32): // no predecessors
// CHECK-NEXT: ^bb0(%arg1: i32):
// CHECK-NEXT: "test.use"(%[[true]], %[[false]], %arg1) : (i1, i1, i32) -> ()
// CHECK-NEXT: scf.yield
// CHECK-NEXT: }

View File

@ -9,11 +9,11 @@
// CHECK-NOT: @sub
func @inlined_if_fn(%arg0: tensor<f32>, %arg1: tensor<f32>, %arg2: tensor<i1>) -> tensor<f32> {
%0 = "tosa.cond_if"(%arg2, %arg0, %arg1) ({
^bb0(%arg3: tensor<f32>, %arg4: tensor<f32>): // no predecessors
^bb0(%arg3: tensor<f32>, %arg4: tensor<f32>):
%1 = call @add(%arg3, %arg4) : (tensor<f32>, tensor<f32>) -> tensor<f32>
"tosa.yield"(%1) : (tensor<f32>) -> ()
}, {
^bb0(%arg3: tensor<f32>, %arg4: tensor<f32>): // no predecessors
^bb0(%arg3: tensor<f32>, %arg4: tensor<f32>):
%1 = call @sub(%arg3, %arg4) : (tensor<f32>, tensor<f32>) -> tensor<f32>
"tosa.yield"(%1) : (tensor<f32>) -> ()
}) : (tensor<i1>, tensor<f32>, tensor<f32>) -> tensor<f32>
@ -35,11 +35,11 @@ func @inlined_while_fn(%arg0: tensor<i32>, %arg1: tensor<i32>, %arg2: tensor<i32
// Check that calls are inlined and functions eliminated:
// CHECK-NOT: @while
%1:4 = "tosa.while_loop"(%arg0, %arg1, %arg2, %arg3) ({
^bb0(%arg4: tensor<i32>, %arg5: tensor<i32>, %arg6: tensor<i32>, %arg7: tensor<10xi32>): // no predecessors
^bb0(%arg4: tensor<i32>, %arg5: tensor<i32>, %arg6: tensor<i32>, %arg7: tensor<10xi32>):
%2 = call @while_cond_40(%arg4, %arg5, %arg6, %arg7) : (tensor<i32>, tensor<i32>, tensor<i32>, tensor<10xi32>) -> tensor<i1>
"tosa.yield"(%2) : (tensor<i1>) -> ()
}, {
^bb0(%arg4: tensor<i32>, %arg5: tensor<i32>, %arg6: tensor<i32>, %arg7: tensor<10xi32>): // no predecessors
^bb0(%arg4: tensor<i32>, %arg5: tensor<i32>, %arg6: tensor<i32>, %arg7: tensor<10xi32>):
%2:4 = call @while_body_50(%arg4, %arg5, %arg6, %arg7) : (tensor<i32>, tensor<i32>, tensor<i32>, tensor<10xi32>) -> (tensor<i32>, tensor<i32>, tensor<i32>, tensor<10xi32>)
"tosa.yield"(%2#0, %2#1, %2#2, %2#3) : (tensor<i32>, tensor<i32>, tensor<i32>, tensor<10xi32>) -> ()
}) : (tensor<i32>, tensor<i32>, tensor<i32>, tensor<10xi32>) -> (tensor<i32>, tensor<i32>, tensor<i32>, tensor<10xi32>)

View File

@ -506,11 +506,11 @@ func @test_identity(%arg0: tensor<13x21x3xi32>) -> tensor<13x21x3xi32> {
// CHECK-LABEL: cond_if
func @test_cond_if(%arg0: tensor<f32>, %arg1: tensor<f32>, %arg2: tensor<i1>) -> tensor<f32> {
%0 = "tosa.cond_if"(%arg2, %arg0, %arg1) ({
^bb0(%arg3: tensor<f32>, %arg4: tensor<f32>): // no predecessors
^bb0(%arg3: tensor<f32>, %arg4: tensor<f32>):
%1 = "tosa.add"(%arg3, %arg4) : (tensor<f32>, tensor<f32>) -> tensor<f32>
"tosa.yield"(%1) : (tensor<f32>) -> ()
}, {
^bb0(%arg3: tensor<f32>, %arg4: tensor<f32>): // no predecessors
^bb0(%arg3: tensor<f32>, %arg4: tensor<f32>):
%1 = "tosa.sub"(%arg3, %arg4) : (tensor<f32>, tensor<f32>) -> tensor<f32>
"tosa.yield"(%1) : (tensor<f32>) -> ()
}) : (tensor<i1>, tensor<f32>, tensor<f32>) -> tensor<f32>
@ -522,12 +522,12 @@ func @test_cond_if(%arg0: tensor<f32>, %arg1: tensor<f32>, %arg2: tensor<i1>) ->
func @test_while_loop(%arg0: tensor<10xi32>, %arg1: tensor<i32>) {
%0 = "tosa.const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
%1:3 = "tosa.while_loop"(%0, %0, %arg0) ({
^bb0(%arg2: tensor<i32>, %arg3: tensor<i32>, %arg4: tensor<10xi32>): // no predecessors
^bb0(%arg2: tensor<i32>, %arg3: tensor<i32>, %arg4: tensor<10xi32>):
%2 = "tosa.greater_equal"(%arg3, %arg1) : (tensor<i32>, tensor<i32>) -> tensor<i1>
%3 = "tosa.logical_not"(%2) : (tensor<i1>) -> tensor<i1>
"tosa.yield"(%3) : (tensor<i1>) -> ()
}, {
^bb0(%arg2: tensor<i32>, %arg3: tensor<i32>, %arg4: tensor<10xi32>): // no predecessors
^bb0(%arg2: tensor<i32>, %arg3: tensor<i32>, %arg4: tensor<10xi32>):
%2 = "tosa.const"() {value = dense<1> : tensor<i32>} : () -> tensor<i32>
%3 = "tosa.add"(%arg3, %2) : (tensor<i32>, tensor<i32>) -> tensor<i32>
%4 = "tosa.reshape"(%2) {new_shape = [1]} : (tensor<i32>) -> tensor<1xi32>

View File

@ -79,7 +79,7 @@ func @named_region_has_wrong_number_of_blocks() {
// Region with single block and not terminator.
// CHECK: unregistered_without_terminator
"test.unregistered_without_terminator"() ({
^bb0: // no predecessors
^bb0:
}) : () -> ()
// -----

View File

@ -22,7 +22,7 @@ func @init_and_dot(%arg0: tensor<64xf32>, %arg1: tensor<64xf32>, %arg2: tensor<f
%9 = tensor.extract_slice %arg1[%arg3] [2] [1] : tensor<64xf32> to tensor<2xf32>
%10 = tensor.cast %9 : tensor<2xf32> to tensor<?xf32>
%11 = linalg.pad_tensor %10 low[%c0] high[%c0] {
^bb0(%arg5: index): // no predecessors
^bb0(%arg5: index):
linalg.yield %cst : f32
} : tensor<?xf32> to tensor<2xf32>
%12 = tensor.insert_slice %11 into %arg4[%8, 0] [1, 2] [1, 1] : tensor<2xf32> into tensor<?x2xf32>
@ -39,7 +39,7 @@ func @init_and_dot(%arg0: tensor<64xf32>, %arg1: tensor<64xf32>, %arg2: tensor<f
%9 = tensor.extract_slice %arg0[%arg3] [2] [1] : tensor<64xf32> to tensor<2xf32>
%10 = tensor.cast %9 : tensor<2xf32> to tensor<?xf32>
%11 = linalg.pad_tensor %10 low[%c0] high[%c0] {
^bb0(%arg5: index): // no predecessors
^bb0(%arg5: index):
linalg.yield %cst : f32
} : tensor<?xf32> to tensor<2xf32>
%12 = tensor.insert_slice %11 into %arg4[%8, 0] [1, 2] [1, 1] : tensor<2xf32> into tensor<?x2xf32>

View File

@ -14,7 +14,7 @@ func @main() {
%cst = arith.constant 2.3 : f32
%c0 = arith.constant 0 : index
%out = linalg.pad_tensor %dynamic low[%c0, %offset, %c0] high[%c0, %c0, %offset] {
^bb0(%gen_arg1: index, %gen_arg2: index, %gen_arg3: index): // no predecessors
^bb0(%gen_arg1: index, %gen_arg2: index, %gen_arg3: index):
linalg.yield %cst : f32
} : tensor<1x?x3xf32> to tensor<1x?x?xf32>
%unranked = tensor.cast %out: tensor<1x?x?xf32> to tensor<*xf32>

View File

@ -361,7 +361,7 @@ def testOperationWithRegion():
op1 = Operation.create("custom.op1", regions=1)
block = op1.regions[0].blocks.append(i32, i32)
# CHECK: "custom.op1"() ({
# CHECK: ^bb0(%arg0: si32, %arg1: si32): // no predecessors
# CHECK: ^bb0(%arg0: si32, %arg1: si32):
# CHECK: "custom.terminator"() : () -> ()
# CHECK: }) : () -> ()
terminator = Operation.create("custom.terminator")