[mlir][tosa] Correct tosa.avg_pool2d for specification error

Specification specified the output type for quantized average pool should be
an i32. Only accumulator should be an i32, result type should match the input
type.

Caused in https://reviews.llvm.org/D111590

Reviewed By: sjarus, GMNGeoffrey

Differential Revision: https://reviews.llvm.org/D112484
This commit is contained in:
Robert Suderman 2021-10-25 14:30:06 -07:00 committed by Rob Suderman
parent e6a971b1ba
commit 58901a5a29
4 changed files with 45 additions and 17 deletions

View File

@ -3000,8 +3000,6 @@ public:
}
}
// Cast to output type.
rewriter.create<linalg::YieldOp>(loc, poolVal);
});

View File

@ -416,9 +416,9 @@ static LogicalResult verifyAveragePoolOp(tosa::AvgPool2dOp op) {
if (inputETy.isF32() && resultETy.isF32())
return success();
if (inputETy.isInteger(8) && resultETy.isInteger(32))
if (inputETy.isInteger(8) && resultETy.isInteger(8))
return success();
if (inputETy.isInteger(16) && resultETy.isInteger(32))
if (inputETy.isInteger(16) && resultETy.isInteger(16))
return success();
return op.emitOpError("input/output element types are incompatible.");

View File

@ -1474,14 +1474,44 @@ func @avg_pool_i8(%arg0 : tensor<1x128x128x2xi8>) -> () {
// CHECK: %[[SCALE:.+]] = "tosa.apply_scale"(%{{.+}}, %[[MULTIPLIER]], %[[SHIFT]]) {double_round = false}
// CHECK: %[[OUTZP:.+]] = arith.constant -128
// CHECK: %[[OUT:.+]] = arith.addi %[[SCALE]], %[[OUTZP]]
// CHECK: %[[MIN:.+]] = arith.constant -2147483648
// CHECK: %[[MAX:.+]] = arith.constant 2147483647
// CHECK: %[[MIN:.+]] = arith.constant -128
// CHECK: %[[MAX:.+]] = arith.constant 127
// CHECK: %[[CMP_MIN:.+]] = arith.cmpi slt, %[[OUT]], %[[MIN]]
// CHECK: %[[CLMP_MIN:.+]] = select %[[CMP_MIN]], %[[MIN]], %[[OUT]]
// CHECK: %[[CMP_MAX:.+]] = arith.cmpi slt, %[[MAX]], %[[OUT]]
// CHECK: %[[CLMP_MAX:.+]] = select %[[CMP_MAX]], %[[MAX]], %[[CLMP_MIN]]
// CHECK: linalg.yield %[[CLMP_MAX]]
%0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 4], pad = [0, 0, 0, 0], quantization_info = {input_zp = -128 : i32, output_zp = -128 : i32}, stride = [4, 4]} : (tensor<1x128x128x2xi8>) -> tensor<1x32x32x2xi32>
// CHECK: %[[TRUNC:.+]] = arith.trunci %[[CLMP_MAX]]
// CHECK: linalg.yield %[[TRUNC]]
%0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 4], pad = [0, 0, 0, 0], quantization_info = {input_zp = -128 : i32, output_zp = -128 : i32}, stride = [4, 4]} : (tensor<1x128x128x2xi8>) -> tensor<1x32x32x2xi8>
return
}
// -----
// CHECK-LABEL: @avg_pool_i16
func @avg_pool_i16(%arg0 : tensor<1x128x128x2xi16>) -> () {
// CHECK: linalg.pooling_nhwc_sum
// CHECK: linalg.generic
// CHECK: %[[INZP:.+]] = arith.constant -128
// CHECK: %[[INZP_OFF:.+]] = arith.muli %{{.+}}, %[[INZP]]
// CHECK: %[[OFFSETED:.+]] = arith.subi %arg1, %[[INZP_OFF]]
// CHECK: %[[NUMERATOR:.+]] = arith.constant 1073741825
// CHECK: %[[MULTIPLIER:.+]] = arith.divui %[[NUMERATOR]], %{{.+}}
// CHECK: %[[SHIFT:.+]] = arith.constant 30
// CHECK: %[[SCALE:.+]] = "tosa.apply_scale"(%{{.+}}, %[[MULTIPLIER]], %[[SHIFT]]) {double_round = false}
// CHECK: %[[OUTZP:.+]] = arith.constant -128
// CHECK: %[[OUT:.+]] = arith.addi %[[SCALE]], %[[OUTZP]]
// CHECK: %[[MIN:.+]] = arith.constant -32768
// CHECK: %[[MAX:.+]] = arith.constant 32767
// CHECK: %[[CMP_MIN:.+]] = arith.cmpi slt, %[[OUT]], %[[MIN]]
// CHECK: %[[CLMP_MIN:.+]] = select %[[CMP_MIN]], %[[MIN]], %[[OUT]]
// CHECK: %[[CMP_MAX:.+]] = arith.cmpi slt, %[[MAX]], %[[OUT]]
// CHECK: %[[CLMP_MAX:.+]] = select %[[CMP_MAX]], %[[MAX]], %[[CLMP_MIN]]
// CHECK: %[[TRUNC:.+]] = arith.trunci %[[CLMP_MAX]]
// CHECK: linalg.yield %[[TRUNC]]
%0 = "tosa.avg_pool2d"(%arg0) {kernel = [4, 4], pad = [0, 0, 0, 0], quantization_info = {input_zp = -128 : i32, output_zp = -128 : i32}, stride = [4, 4]} : (tensor<1x128x128x2xi16>) -> tensor<1x32x32x2xi16>
return
}

View File

@ -18,23 +18,23 @@ func @test_avg_pool2d_f32(%arg0: tensor<1x7x7x9xf32>) -> tensor<1x7x7x9xf32> {
// -----
// CHECK-LABEL: avg_pool2d_i8
func @test_avg_pool2d_i8(%arg0: tensor<1x7x7x9xi8>) -> tensor<1x7x7x9xi32> {
%0 = "tosa.avg_pool2d"(%arg0) {kernel = [2, 2], pad = [0, 1, 0, 1], stride = [1, 1]} : (tensor<1x7x7x9xi8>) -> tensor<1x7x7x9xi32>
return %0 : tensor<1x7x7x9xi32>
func @test_avg_pool2d_i8(%arg0: tensor<1x7x7x9xi8>) -> tensor<1x7x7x9xi8> {
%0 = "tosa.avg_pool2d"(%arg0) {kernel = [2, 2], pad = [0, 1, 0, 1], stride = [1, 1]} : (tensor<1x7x7x9xi8>) -> tensor<1x7x7x9xi8>
return %0 : tensor<1x7x7x9xi8>
}
// -----
// CHECK-LABEL: avg_pool2d_i16
func @test_avg_pool2d_i16(%arg0: tensor<1x7x7x9xi16>) -> tensor<1x7x7x9xi32> {
%0 = "tosa.avg_pool2d"(%arg0) {kernel = [2, 2], pad = [0, 1, 0, 1], stride = [1, 1]} : (tensor<1x7x7x9xi16>) -> tensor<1x7x7x9xi32>
return %0 : tensor<1x7x7x9xi32>
func @test_avg_pool2d_i16(%arg0: tensor<1x7x7x9xi16>) -> tensor<1x7x7x9xi16> {
%0 = "tosa.avg_pool2d"(%arg0) {kernel = [2, 2], pad = [0, 1, 0, 1], stride = [1, 1]} : (tensor<1x7x7x9xi16>) -> tensor<1x7x7x9xi16>
return %0 : tensor<1x7x7x9xi16>
}
// -----
// CHECK-LABEL: avg_pool2d_q8
func @test_avg_pool2d_q8(%arg0: tensor<1x7x7x9x!quant.uniform<i8:f32, 0.01>>) -> tensor<1x7x7x9x!quant.uniform<i32:f32, 0.01>> {
%0 = "tosa.avg_pool2d"(%arg0) {kernel = [2, 2], pad = [0, 1, 0, 1], stride = [1, 1]} : (tensor<1x7x7x9x!quant.uniform<i8:f32, 0.01>>) -> tensor<1x7x7x9x!quant.uniform<i32:f32, 0.01>>
return %0 : tensor<1x7x7x9x!quant.uniform<i32:f32, 0.01>>
func @test_avg_pool2d_q8(%arg0: tensor<1x7x7x9x!quant.uniform<i8:f32, 0.01>>) -> tensor<1x7x7x9x!quant.uniform<i8:f32, 0.01>> {
%0 = "tosa.avg_pool2d"(%arg0) {kernel = [2, 2], pad = [0, 1, 0, 1], stride = [1, 1]} : (tensor<1x7x7x9x!quant.uniform<i8:f32, 0.01>>) -> tensor<1x7x7x9x!quant.uniform<i8:f32, 0.01>>
return %0 : tensor<1x7x7x9x!quant.uniform<i8:f32, 0.01>>
}
// -----