[mlir][Linalg] Make printer/parser have the same behavior.

The parser of generic op did not recognize the output from mlir-opt when there
are multiple outputs. One would wrap the result types with braces, and one would
not. The patch makes the behavior the same.

Reviewed By: mravishankar

Differential Revision: https://reviews.llvm.org/D104256
This commit is contained in:
Hanhan Wang 2021-06-14 13:38:21 -07:00
parent e0c382a9d5
commit e3bc4dbe8e
6 changed files with 43 additions and 11 deletions

View File

@ -3067,9 +3067,8 @@ parseNamedStructuredOpRegion(OpAsmParser &parser, Region &region,
static ParseResult static ParseResult
parseNamedStructuredOpResults(OpAsmParser &parser, parseNamedStructuredOpResults(OpAsmParser &parser,
SmallVectorImpl<Type> &resultTypes) { SmallVectorImpl<Type> &resultTypes) {
if (succeeded(parser.parseOptionalArrow())) if (parser.parseOptionalArrowTypeList(resultTypes))
if (parser.parseTypeList(resultTypes)) return failure();
return failure();
return success(); return success();
} }

View File

@ -85,7 +85,7 @@ func @multiple_results(%arg0: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf32>) {
^bb0(%gen_arg1: f32, %out1: f32, %out2: f32): ^bb0(%gen_arg1: f32, %out1: f32, %out2: f32):
%tmp1 = math.exp %gen_arg1 : f32 %tmp1 = math.exp %gen_arg1 : f32
linalg.yield %tmp1, %tmp1 : f32, f32 linalg.yield %tmp1, %tmp1 : f32, f32
} -> tensor<4xf32>, tensor<4xf32> } -> (tensor<4xf32>, tensor<4xf32>)
return %0, %1 : tensor<4xf32>, tensor<4xf32> return %0, %1 : tensor<4xf32>, tensor<4xf32>
} }
@ -118,7 +118,7 @@ func @dynamic_results(%arg0: tensor<?x?xf32>)
^bb0(%gen_arg1: f32, %out1: f32, %out2: f32): ^bb0(%gen_arg1: f32, %out1: f32, %out2: f32):
%tmp1 = math.exp %gen_arg1 : f32 %tmp1 = math.exp %gen_arg1 : f32
linalg.yield %tmp1, %tmp1 : f32, f32 linalg.yield %tmp1, %tmp1 : f32, f32
} -> tensor<?x?xf32>, tensor<?x?xf32> } -> (tensor<?x?xf32>, tensor<?x?xf32>)
return %0, %1 : tensor<?x?xf32>, tensor<?x?xf32> return %0, %1 : tensor<?x?xf32>, tensor<?x?xf32>
} }

View File

@ -714,7 +714,7 @@ func @init_tensor_dim_of_linalg_result(%arg_0 : tensor<?xf32>,
outs(%arg_0, %arg_1 : tensor<?xf32>, tensor<?xf32>) { outs(%arg_0, %arg_1 : tensor<?xf32>, tensor<?xf32>) {
^bb0(%in: f32, %out_0: f32, %out_1: f32): ^bb0(%in: f32, %out_0: f32, %out_1: f32):
linalg.yield %in, %in : f32, f32 linalg.yield %in, %in : f32, f32
} -> tensor<?xf32>, tensor<?xf32> } -> (tensor<?xf32>, tensor<?xf32>)
%c0 = constant 0 : index %c0 = constant 0 : index
%num_elem_0 = memref.dim %0, %c0 : tensor<?xf32> %num_elem_0 = memref.dim %0, %c0 : tensor<?xf32>
@ -778,7 +778,7 @@ func @remove_no_op(%arg0 : tensor<?x?x?xf32>, %arg1 : tensor<?x?x?xf32>)
outs(%3, %3 : tensor<?x?x?xf32>, tensor<?x?x?xf32>) { outs(%3, %3 : tensor<?x?x?xf32>, tensor<?x?x?xf32>) {
^bb0(%arg2 : f32, %arg3 : f32, %arg4 : f32, %arg5 : f32): ^bb0(%arg2 : f32, %arg3 : f32, %arg4 : f32, %arg5 : f32):
linalg.yield %arg3, %arg2 : f32, f32 linalg.yield %arg3, %arg2 : f32, f32
} -> tensor<?x?x?xf32>, tensor<?x?x?xf32> } -> (tensor<?x?x?xf32>, tensor<?x?x?xf32>)
return %4, %5 : tensor<?x?x?xf32>, tensor<?x?x?xf32> return %4, %5 : tensor<?x?x?xf32>, tensor<?x?x?xf32>
} }
// CHECK-LABEL: func @remove_no_op // CHECK-LABEL: func @remove_no_op
@ -832,7 +832,7 @@ func @keep_not_noop(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>)
outs(%2, %2 : tensor<?x?xf32>, tensor<?x?xf32>) { outs(%2, %2 : tensor<?x?xf32>, tensor<?x?xf32>) {
^bb0(%arg3: f32, %arg4 : f32, %arg5 : f32, %arg6 : f32): ^bb0(%arg3: f32, %arg4 : f32, %arg5 : f32, %arg6 : f32):
linalg.yield %arg2, %arg4 : f32, f32 linalg.yield %arg2, %arg4 : f32, f32
} -> tensor<?x?xf32>, tensor<?x?xf32> } -> (tensor<?x?xf32>, tensor<?x?xf32>)
return %3#0, %3#1 : tensor<?x?xf32>, tensor<?x?xf32> return %3#0, %3#1 : tensor<?x?xf32>, tensor<?x?xf32>
} }
// CHECK-LABEL: func @keep_not_noop // CHECK-LABEL: func @keep_not_noop

View File

@ -449,7 +449,7 @@ func @named_ops(%a3: memref<?x?x?xf32>, %b3: memref<?x?xf32>, %c3: memref<?x?x?x
func @incorrect_region_arg_count(%m: memref<?x?xf32>) { func @incorrect_region_arg_count(%m: memref<?x?xf32>) {
// expected-error @+3 {{region expects 3 args, got 2}} // expected-error @+3 {{region expects 3 args, got 2}}
%res = linalg.matmul ins(%m, %m : memref<?x?xf32>, memref<?x?xf32>) %res = linalg.matmul ins(%m, %m : memref<?x?xf32>, memref<?x?xf32>)
-> tensor<?x?xf32>, tensor<?x?xf32> -> (tensor<?x?xf32>, tensor<?x?xf32>)
return return
} }

View File

@ -424,6 +424,39 @@ func @generic_with_tensor_input_and_output(
// ----- // -----
func @generic_with_multiple_tensor_outputs(
%arg0: tensor<?xi32>, %arg1: tensor<?xi32>, %arg2: i32)
-> (tensor<i32>, tensor<i32>) {
%c0 = constant 0 : index
%0 = linalg.init_tensor [] : tensor<i32>
%1 = linalg.fill(%0, %arg2) : tensor<i32>, i32 -> tensor<i32>
%2 = linalg.init_tensor [] : tensor<i32>
%3 = linalg.fill(%2, %arg2) : tensor<i32>, i32 -> tensor<i32>
%4:2 = linalg.generic {
indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> ()>, affine_map<(d0) -> ()>],
iterator_types = ["reduction"]}
ins(%arg0, %arg1 : tensor<?xi32>, tensor<?xi32>)
outs(%1, %3 : tensor<i32>, tensor<i32>) {
^bb0(%arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32): // no predecessors
%5 = cmpi sge, %arg3, %arg5 : i32
%6 = select %5, %arg3, %arg5 : i32
%7 = cmpi eq, %arg3, %arg5 : i32
%8 = cmpi slt, %arg4, %arg6 : i32
%9 = select %8, %arg4, %arg6 : i32
%10 = select %5, %arg4, %arg6 : i32
%11 = select %7, %9, %10 : i32
linalg.yield %6, %11 : i32, i32
} -> (tensor<i32>, tensor<i32>)
return %4#0, %4#1 : tensor<i32>, tensor<i32>
}
// CHECK-LABEL: func @generic_with_multiple_tensor_outputs
// CHECK: %{{.*}} = linalg.generic {
// CHECK-SAME: ins({{.*}} : tensor<?xi32>, tensor<?xi32>)
// CHECK-SAME: outs({{.*}} : tensor<i32>, tensor<i32>)
// CHECK: } -> (tensor<i32>, tensor<i32>)
// -----
#accesses_2 = [ #accesses_2 = [
affine_map<(i, j, k) -> (j, i)>, affine_map<(i, j, k) -> (j, i)>,
affine_map<(i, j, k) -> (i, k, i + j)>, affine_map<(i, j, k) -> (i, k, i + j)>,

View File

@ -386,9 +386,9 @@ func @generic_vectorize_tensor(%arg0: tensor<4x256xf32>,
// CHECK: %[[R9:.*]] = vector.transfer_write %[[TAN]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32> // CHECK: %[[R9:.*]] = vector.transfer_write %[[TAN]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32>
linalg.yield %6, %8, %c1_f32, %9, %10, %11, %12, %13, %14, %15 : f32, f32, linalg.yield %6, %8, %c1_f32, %9, %10, %11, %12, %13, %14, %15 : f32, f32,
f32, f32, f32, f32, f32, f32, f32, f32 f32, f32, f32, f32, f32, f32, f32, f32
} -> tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, } -> (tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32> tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>)
// CHECK: return %[[R0]], %[[R1]], %[[R2]], %[[R3]], %[[R4]], %[[R5]], %[[R6]], %[[R7]], %[[R8]], %[[R9]] : tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32> // CHECK: return %[[R0]], %[[R1]], %[[R2]], %[[R3]], %[[R4]], %[[R5]], %[[R6]], %[[R7]], %[[R8]], %[[R9]] : tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>
return %r#0, %r#1, %r#2, %r#3, %r#4, %r#5, %r#6, %r#7, %r#8, %r#9: return %r#0, %r#1, %r#2, %r#3, %r#4, %r#5, %r#6, %r#7, %r#8, %r#9:
tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,