Fix bug of several operators.

This commit is contained in:
wang_shaocong 2020-10-20 15:48:46 +08:00
parent d0a1a9b73c
commit 7d75ac047f
8 changed files with 69 additions and 21 deletions

View File

@ -18,7 +18,7 @@
#include "nnacl/quantization/fixed_point.h"
void ScaleInnerInt8(const int8_t *in_data, int8_t *out_data, const int8_t *scale, int outer_start, int outer_end,
int axis_size, int inner_size, const ScaleParameter *scale_param) {
int axis_size, int inner_size, const ScaleParameter *scale_param, int max, int min) {
for (int out = outer_start; out < outer_end; out++) {
int out_offset = out * axis_size * inner_size;
for (int i = 0; i < axis_size; i++) {
@ -34,8 +34,8 @@ void ScaleInnerInt8(const int8_t *in_data, int8_t *out_data, const int8_t *scale
scale_param->scale_mul_arg_.multiplier_),
scale_param->scale_mul_arg_.right_shift_);
int tmp = input_mul_scale + scale_param->output_zp_;
tmp = tmp > INT8_MAX ? INT8_MAX : tmp;
tmp = tmp < INT8_MIN ? INT8_MIN : tmp;
tmp = tmp > max ? max : tmp;
tmp = tmp < min ? min : tmp;
out_data[in_offset] = tmp;
}
}
@ -44,7 +44,7 @@ void ScaleInnerInt8(const int8_t *in_data, int8_t *out_data, const int8_t *scale
void ScaleInnerWithBiasInt8(const int8_t *in_data, int8_t *out_data, const int8_t *scale, const int8_t *offset,
int outer_start, int outer_end, int axis_size, int inner_size,
const ScaleParameter *scale_param) {
const ScaleParameter *scale_param, int max, int min) {
for (int out = outer_start; out < outer_end; out++) {
int out_offset = out * axis_size * inner_size;
for (int i = 0; i < axis_size; i++) {
@ -63,10 +63,10 @@ void ScaleInnerWithBiasInt8(const int8_t *in_data, int8_t *out_data, const int8_
int bias = RoundingDivideByPOT(
SaturatingRoundingDoublingHighMul(tmp_bias * (1 << (unsigned int)scale_param->offset_mul_arg_.left_shift_),
scale_param->offset_mul_arg_.multiplier_),
scale_param->scale_mul_arg_.right_shift_);
scale_param->offset_mul_arg_.right_shift_);
int tmp = input_mul_scale + bias + scale_param->output_zp_;
tmp = tmp > INT8_MAX ? INT8_MAX : tmp;
tmp = tmp < INT8_MIN ? INT8_MIN : tmp;
tmp = tmp > max ? max : tmp;
tmp = tmp < min ? min : tmp;
out_data[in_offset] = tmp;
}
}
@ -74,21 +74,21 @@ void ScaleInnerWithBiasInt8(const int8_t *in_data, int8_t *out_data, const int8_
}
void DoScaleInt8(const int8_t *in_data, int8_t *out_data, const int8_t *scale, int task_id,
const ScaleParameter *scale_param) {
const ScaleParameter *scale_param, int max, int min) {
int outer_step = UP_DIV(scale_param->outer_size_, scale_param->op_parameter_.thread_num_);
int outer_start = task_id * outer_step;
int outer_end = MSMIN(outer_start + outer_step, scale_param->outer_size_);
ScaleInnerInt8(in_data, out_data, scale, outer_start, outer_end, scale_param->axis_size_, scale_param->inner_size_,
scale_param);
scale_param, max, min);
}
void DoScaleWithBiasInt8(const int8_t *in_data, int8_t *out_data, const int8_t *scale, const int8_t *offset,
int task_id, const ScaleParameter *scale_param) {
int task_id, const ScaleParameter *scale_param, int max, int min) {
int outer_step = UP_DIV(scale_param->outer_size_, scale_param->op_parameter_.thread_num_);
int outer_start = task_id * outer_step;
int outer_end = MSMIN(outer_start + outer_step, scale_param->outer_size_);
ScaleInnerWithBiasInt8(in_data, out_data, scale, offset, outer_start, outer_end, scale_param->axis_size_,
scale_param->inner_size_, scale_param);
scale_param->inner_size_, scale_param, max, min);
}

View File

@ -23,9 +23,9 @@
extern "C" {
#endif
void DoScaleInt8(const int8_t *in_data, int8_t *out_data, const int8_t *scale, int task_id,
const ScaleParameter *scale_param);
const ScaleParameter *scale_param, int max, int min);
void DoScaleWithBiasInt8(const int8_t *in_data, int8_t *out_data, const int8_t *scale, const int8_t *offset,
int task_id, const ScaleParameter *scale_param);
int task_id, const ScaleParameter *scale_param, int max, int min);
#ifdef __cplusplus
}
#endif

View File

@ -15,12 +15,13 @@
*/
#include "src/runtime/kernel/arm/fp32/arithmetic.h"
#include "src/runtime/kernel/arm/int8/add_int8.h"
#include "src/runtime/kernel/arm/int8/mul_int8.h"
#include "include/errorcode.h"
#include "schema/model_generated.h"
#include "src/kernel_registry.h"
#include "src/runtime/kernel/arm/int8/add_int8.h"
#include "src/runtime/kernel/arm/int8/mul_int8.h"
#include "src/runtime/runtime_api.h"
#include "include/errorcode.h"
using mindspore::kernel::KERNEL_ARCH::kCPU;
using mindspore::lite::KernelRegistrar;
@ -48,6 +49,12 @@ int ArithmeticCPUKernel::ReSize() {
arithmeticParameter_->in_elements_num0_ = in_tensors_[0]->ElementsNum();
arithmeticParameter_->in_elements_num1_ = in_tensors_[1]->ElementsNum();
arithmeticParameter_->out_elements_num_ = out_tensors_[0]->ElementsNum();
memcpy(arithmeticParameter_->in_shape0_, static_cast<void *>(in_tensors_[0]->shape().data()),
in_tensors_[0]->shape().size() * sizeof(int));
memcpy(arithmeticParameter_->in_shape1_, static_cast<void *>(in_tensors_[1]->shape().data()),
in_tensors_[1]->shape().size() * sizeof(int));
memcpy(arithmeticParameter_->out_shape_, static_cast<void *>(out_tensors_[0]->shape().data()),
out_tensors_[0]->shape().size() * sizeof(int));
if (arithmeticParameter_->in_elements_num0_ == 1 || arithmeticParameter_->in_elements_num1_ == 1) {
switch (arithmeticParameter_->op_parameter_.type_) {

View File

@ -15,6 +15,7 @@
*/
#include "src/runtime/kernel/arm/int8/scale_int8.h"
#include <string.h>
#include <vector>
#include "nnacl/int8/scale_int8.h"
@ -195,9 +196,35 @@ int ScaleInt8CPUKernel::ReSize() {
int ScaleInt8CPUKernel::Scale(int task_id) {
if (has_bias_) {
DoScaleWithBiasInt8(input_ptr_, output_ptr_, scale_, offset_, task_id, scale_param_);
switch (scale_param_->activation_type_) {
case schema::ActivationType_RELU:
DoScaleWithBiasInt8(input_ptr_, output_ptr_, scale_, offset_, task_id, scale_param_, INT8_MAX, 0);
break;
case schema::ActivationType_RELU6:
DoScaleWithBiasInt8(input_ptr_, output_ptr_, scale_, offset_, task_id, scale_param_, 6, 0);
break;
case schema::ActivationType_NO_ACTIVATION:
DoScaleWithBiasInt8(input_ptr_, output_ptr_, scale_, offset_, task_id, scale_param_, INT8_MAX, INT8_MIN);
break;
default:
MS_LOG(ERROR) << "Scale does not support activation type " << scale_param_->activation_type_;
return RET_ERROR;
}
} else {
DoScaleInt8(input_ptr_, output_ptr_, scale_, task_id, scale_param_);
switch (scale_param_->activation_type_) {
case schema::ActivationType_RELU:
DoScaleInt8(input_ptr_, output_ptr_, scale_, task_id, scale_param_, INT8_MAX, 0);
break;
case schema::ActivationType_RELU6:
DoScaleInt8(input_ptr_, output_ptr_, scale_, task_id, scale_param_, 6, 0);
break;
case schema::ActivationType_NO_ACTIVATION:
DoScaleInt8(input_ptr_, output_ptr_, scale_, task_id, scale_param_, INT8_MAX, INT8_MIN);
break;
default:
MS_LOG(ERROR) << "Scale does not support activation type " << scale_param_->activation_type_;
return RET_ERROR;
}
}
return RET_OK;

View File

@ -5,3 +5,4 @@ ml_face_3d.onnx
gts_version-RFB-320_simplified.onnx
mnist-8.onnx
crnn_lite_lstm_v2.onnx:32,32,32,1
psenet_lite_mbv2.onnx:1,32,32,3

View File

@ -15,6 +15,7 @@
*/
#include "tools/converter/parser/onnx/onnx_pool_parser.h"
#include <memory>
namespace mindspore {
@ -77,7 +78,11 @@ STATUS OnnxPoolParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
}
}
if (attribute_name == "auto_pad") {
MS_ASSERT(false);
if (onnx_node_attr.s() == "SAME_UPPER") {
attr->padMode = schema::PadMode_SAME_UPPER;
} else if (onnx_node_attr.s() == "SAME_LOWER") {
attr->padMode = schema::PadMode_SAME_LOWER;
}
}
if (attribute_name == "pads") {
if (onnx_node_attr.ints_size() == 4) {

View File

@ -15,8 +15,9 @@
*/
#include "tools/converter/parser/onnx/onnx_relu_parser.h"
#include <vector>
#include <memory>
#include <vector>
#include "securec/include/securec.h"
namespace mindspore {
@ -47,6 +48,12 @@ STATUS OnnxReluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod
MS_LOG(DEBUG) << "onnx LeakyReluParser";
attr->type = schema::ActivationType_LEAKY_RELU;
}
for (const auto &onnx_node_attr : onnx_node.attribute()) {
const auto &attribute_name = onnx_node_attr.name();
if (attribute_name == "alpha") {
attr->alpha = onnx_node_attr.f();
}
}
op->primitive->value.type = schema::PrimitiveType_Activation;
op->primitive->value.value = attr.release();

View File

@ -15,6 +15,7 @@
*/
#include "tools/converter/parser/onnx/onnx_upsample_parser.h"
#include <memory>
namespace mindspore {
@ -54,7 +55,7 @@ STATUS OnnxUpsampleParser::Parse(const onnx::GraphProto &onnx_graph, const onnx:
attr->newWidth = 1;
attr->newHeight = 1;
attr->alignCorners = false;
op->primitive->value.type = schema::PrimitiveType_Upsample;
op->primitive->value.type = schema::PrimitiveType_Resize;
op->primitive->value.value = attr.release();
return RET_OK;
}