[mlir][tosa] Added div op, variadic concat. Removed placeholder. Spec v0.22 alignment.

Nearly complete alignment to spec v0.22
- Adds Div op
- Concat inputs now variadic
- Removes Placeholder op

Note: TF side PR https://github.com/tensorflow/tensorflow/pull/48921 deletes Concat legalizations to avoid breaking TensorFlow CI. This must be merged only after the TF PR has merged.

Reviewed By: rsuderman

Differential Revision: https://reviews.llvm.org/D101958
This commit is contained in:
Rob Suderman 2021-05-06 15:55:58 -07:00
parent 44ee974e2f
commit d3e987c389
3 changed files with 34 additions and 32 deletions

View File

@ -492,6 +492,28 @@ def Tosa_BitwiseXorOp : Tosa_Op<"bitwise_xor", [ResultsBroadcastableShape,
); );
} }
//===----------------------------------------------------------------------===//
// Operator: div
//===----------------------------------------------------------------------===//
def Tosa_DivOp : Tosa_Op<"div", [ResultsBroadcastableShape,
NoSideEffect]> {
let summary = "Integer divide operator";
let description = [{
Elementwise integer divide operator of input1 by input2. Axis of size 1
will be broadcast, as necessary. Rank of input tensors must match.
}];
let arguments = (ins
Tosa_Int32TensorUpto4D:$input1,
Tosa_Int32TensorUpto4D:$input2
);
let results = (outs
Tosa_Int32TensorUpto4D:$output
);
}
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Operator: logical_and // Operator: logical_and
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -1206,13 +1228,12 @@ def Tosa_ConcatOp : Tosa_Op<"concat", [NoSideEffect]> {
let summary = "Concatenates tensors along one dimension."; let summary = "Concatenates tensors along one dimension.";
let description = [{ let description = [{
Concatenate two tensors along a given axis. No data conversion happens Concatenate a variadic amount of tensors along a given axis. No data
during a concat operation. conversion happens during a concat operation.
}]; }];
let arguments = (ins let arguments = (ins
Tosa_Tensor1Dto4D:$input1, Variadic<Tosa_Tensor1Dto4D>:$input1,
Tosa_Tensor1Dto4D:$input2,
I64Attr:$axis I64Attr:$axis
); );
@ -1586,26 +1607,6 @@ def Tosa_IdentityNOp: Tosa_Op<"identityn", [NoSideEffect]> {
); );
} }
//===----------------------------------------------------------------------===//
// Operator: placeholder
//===----------------------------------------------------------------------===//
def Tosa_PlaceholderOp : Tosa_Op<"placeholder", [NoSideEffect]> {
let summary = "Placeholder op";
let description = [{
A node where data will be inserted into the network at runtime. Generally
used for inputs to the network.
}];
let arguments = (ins
);
let results = (outs
Tosa_Tensor1Dto4D:$output
);
}
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// TOSA Spec Section 2.14 // TOSA Spec Section 2.14
// Operator Class: Custom Operators. // Operator Class: Custom Operators.

View File

@ -126,6 +126,8 @@ def Tosa_Tensor1Dto6D : TensorRankOf<[Tosa_AnyNumber], [1,2,3,4,5,6]>;
def Tosa_TensorUpto4D : TensorRankOf<[Tosa_AnyNumber], [0,1,2,3,4]>; def Tosa_TensorUpto4D : TensorRankOf<[Tosa_AnyNumber], [0,1,2,3,4]>;
def Tosa_TensorUpto6D : TensorRankOf<[Tosa_AnyNumber], [0,1,2,3,4,5,6]>; def Tosa_TensorUpto6D : TensorRankOf<[Tosa_AnyNumber], [0,1,2,3,4,5,6]>;
def Tosa_Int32TensorUpto4D : TensorRankOf<[Tosa_Int32], [0,1,2,3,4]>;
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Generic scalar, vector, or tensor of a particular type. // Generic scalar, vector, or tensor of a particular type.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -101,7 +101,6 @@ func @test_arithmetic_right_shift(%arg0: tensor<13x21x1xf32>, %arg1: tensor<13x2
return %0 : tensor<13x21x3xf32> return %0 : tensor<13x21x3xf32>
} }
// ----- // -----
// CHECK-LABEL: bitwise_and // CHECK-LABEL: bitwise_and
func @test_bitwise_and(%arg0: tensor<13x21x3xi32>, %arg1: tensor<13x21x1xi32>) -> tensor<13x21x3xi32> { func @test_bitwise_and(%arg0: tensor<13x21x3xi32>, %arg1: tensor<13x21x1xi32>) -> tensor<13x21x3xi32> {
@ -123,6 +122,13 @@ func @test_bitwise_xor(%arg0: tensor<13x21x1xi32>, %arg1: tensor<13x21x3xi32>) -
return %0 : tensor<13x21x3xi32> return %0 : tensor<13x21x3xi32>
} }
// -----
// CHECK-LABEL: div
func @test_div(%arg0: tensor<13x21x1xi32>, %arg1: tensor<13x21x3xi32>) -> tensor<13x21x3xi32> {
%0 = "tosa.div"(%arg0, %arg1) : (tensor<13x21x1xi32>, tensor<13x21x3xi32>) -> tensor<13x21x3xi32>
return %0 : tensor<13x21x3xi32>
}
// ----- // -----
// CHECK-LABEL: logical_and // CHECK-LABEL: logical_and
func @test_logical_and(%arg0: tensor<13x21x3xi1>, %arg1: tensor<13x21x1xi1>) -> tensor<13x21x3xi1> { func @test_logical_and(%arg0: tensor<13x21x3xi1>, %arg1: tensor<13x21x1xi1>) -> tensor<13x21x3xi1> {
@ -474,13 +480,6 @@ func @test_identityn(%arg0: tensor<1xi32>, %arg1: tensor<1xi32>) -> tensor<1xi32
return %0#0 : tensor<1xi32> return %0#0 : tensor<1xi32>
} }
// -----
// CHECK-LABEL: placeholder
func @test_placeholder() -> tensor<1xi32> {
%0 = "tosa.placeholder"() : () -> tensor<1xi32>
return %0 : tensor<1xi32>
}
// ----- // -----
// CHECK-LABEL: cond_if // CHECK-LABEL: cond_if
func @test_cond_if(%arg0: tensor<f32>, %arg1: tensor<f32>, %arg2: tensor<i1>) -> tensor<f32> { func @test_cond_if(%arg0: tensor<f32>, %arg1: tensor<f32>, %arg2: tensor<i1>) -> tensor<f32> {