From 6f2d60373c84cad43f07aa40f0fd11b51b312177 Mon Sep 17 00:00:00 2001 From: sunsuodong Date: Mon, 23 Nov 2020 14:24:34 +0800 Subject: [PATCH] fix code review --- mindspore/lite/nnacl/arg_min_max.c | 18 +++----- mindspore/lite/nnacl/fp16/crop_fp16.c | 2 + mindspore/lite/nnacl/fp16/matmul_fp16.c | 46 +++++++++---------- mindspore/lite/nnacl/fp16/matmul_fp16.h | 12 ++--- mindspore/lite/nnacl/fp32/arithmetic_fp32.c | 5 +- mindspore/lite/nnacl/fp32/broadcast_to_fp32.c | 5 +- mindspore/lite/nnacl/fp32/common_func_fp32.c | 5 +- .../lite/nnacl/fp32/conv_depthwise_fp32.c | 18 ++++---- mindspore/lite/nnacl/fp32/gather_fp32.c | 10 ++-- .../lite/nnacl/fp32/instance_norm_fp32.c | 7 ++- mindspore/lite/nnacl/fp32/layer_norm_fp32.c | 7 ++- .../nnacl/fp32/local_response_norm_fp32.c | 13 ++---- mindspore/lite/nnacl/int8/transpose_int8.c | 2 +- mindspore/lite/nnacl/int8/transpose_int8.h | 2 +- mindspore/lite/nnacl/l2_norm.c | 3 ++ .../lite/nnacl/minimal_filtering_generator.c | 13 ++++-- mindspore/lite/nnacl/zeroslike.c | 2 +- mindspore/lite/nnacl/zeroslike.h | 2 +- mindspore/lite/src/ops/apply_momentum.cc | 2 +- mindspore/lite/src/ops/bias_add.cc | 4 -- mindspore/lite/src/ops/bias_grad.cc | 8 +--- mindspore/lite/src/ops/bn_grad.cc | 8 +--- mindspore/lite/src/ops/cast.cc | 4 -- mindspore/lite/src/ops/conv2d.cc | 8 ++++ mindspore/lite/src/ops/deconv2d.cc | 8 ++++ mindspore/lite/src/ops/dedepthwise_conv2d.cc | 6 +-- mindspore/lite/src/ops/depthwise_conv2d.cc | 6 +-- mindspore/lite/src/ops/dequant.cc | 4 -- mindspore/lite/src/ops/full_connection.cc | 2 +- mindspore/lite/src/ops/fused_batchnorm.cc | 8 ++-- mindspore/lite/src/ops/primitive_c.h | 5 +- .../runtime/kernel/arm/fp32/zeroslike_fp32.cc | 4 +- .../runtime/kernel/arm/int8/transpose_int8.cc | 2 +- mindspore/lite/tools/common/protobuf_utils.cc | 1 + .../fusion/matmul_biasadd_fusion_pass.cc | 19 ++++---- .../graph/trans_format_insert_pass.cc | 2 +- .../parser/caffe/caffe_reduce_parser.cc | 1 + .../converter/parser/onnx/onnx_conv_parser.cc | 2 +- .../tools/converter/quantizer/quantizer.h | 2 +- 39 files changed, 137 insertions(+), 141 deletions(-) diff --git a/mindspore/lite/nnacl/arg_min_max.c b/mindspore/lite/nnacl/arg_min_max.c index 40b4cebdf1c..527584e217e 100644 --- a/mindspore/lite/nnacl/arg_min_max.c +++ b/mindspore/lite/nnacl/arg_min_max.c @@ -38,17 +38,13 @@ void ArgMinMaxTopk1(const void *input, void *output, const int *shape, ArgMinMax int axis_count = 1; int after_axis_count = 1; GetCalcParameter(shape, param->dims_size_, param->axis_, &pre_axis_count, &axis_count, &after_axis_count); - switch (param->data_type_) { - case FLOAT_DATA_TYPE: { - if (param->get_max_) { - ArgMax(input, output, param, pre_axis_count, axis_count, after_axis_count); - } else { - ArgMin(input, output, param, pre_axis_count, axis_count, after_axis_count); - } - break; - } - default: - break; + if (param->data_type_ != FLOAT_DATA_TYPE) { + return; + } + if (param->get_max_) { + ArgMax(input, output, param, pre_axis_count, axis_count, after_axis_count); + } else { + ArgMin(input, output, param, pre_axis_count, axis_count, after_axis_count); } } diff --git a/mindspore/lite/nnacl/fp16/crop_fp16.c b/mindspore/lite/nnacl/fp16/crop_fp16.c index b44d6b9bb61..f014f03a424 100644 --- a/mindspore/lite/nnacl/fp16/crop_fp16.c +++ b/mindspore/lite/nnacl/fp16/crop_fp16.c @@ -35,6 +35,8 @@ void Fp16Crop(const float16_t *input, float16_t *output, int task_id, CropParame case 4: Fp16Crop4D(input, output, task_id, para); break; + default: + break; } } diff --git a/mindspore/lite/nnacl/fp16/matmul_fp16.c b/mindspore/lite/nnacl/fp16/matmul_fp16.c index 9b1cb4b4db0..b50bc74888d 100644 --- a/mindspore/lite/nnacl/fp16/matmul_fp16.c +++ b/mindspore/lite/nnacl/fp16/matmul_fp16.c @@ -16,16 +16,16 @@ #include "nnacl/fp16/matmul_fp16.h" -void ColMajor2Row8MajorFp16(void *src_ptr, float16_t *dst_ptr, size_t row, size_t col, bool src_float16) { +void ColMajor2Row8MajorFp16(const void *src_ptr, float16_t *dst_ptr, size_t row, size_t col, bool src_float16) { int row_c8 = row / C8NUM * C8NUM; int col_c8 = col / C8NUM * C8NUM; int ci = 0; if (src_float16) { - float16_t *src = (float16_t *)src_ptr; + const float16_t *src = (const float16_t *)src_ptr; for (; ci < col_c8; ci += C8NUM) { int ri = 0; for (; ri < row_c8; ri += C8NUM) { - float16_t *src_ptr1 = src + ci * row + ri; + const float16_t *src_ptr1 = src + ci * row + ri; float16_t *dst_ptr1 = dst_ptr + ci * row + ri * C8NUM; #ifdef ENABLE_ARM64 size_t strid_row = row * 2; @@ -93,7 +93,7 @@ void ColMajor2Row8MajorFp16(void *src_ptr, float16_t *dst_ptr, size_t row, size_ #endif } for (; ri < row; ++ri) { - float16_t *src_ptr1 = src + ci * row; + const float16_t *src_ptr1 = src + ci * row; float16_t *dst_ptr1 = dst_ptr + ci * row; for (int tc = 0; tc < C8NUM; ++tc) { dst_ptr1[ri * C8NUM + tc] = src_ptr1[tc * row + ri]; @@ -108,11 +108,11 @@ void ColMajor2Row8MajorFp16(void *src_ptr, float16_t *dst_ptr, size_t row, size_ } } } else { - float *src = (float *)src_ptr; + const float *src = (const float *)src_ptr; for (; ci < col_c8; ci += C8NUM) { int ri = 0; for (; ri < row_c8; ri += C8NUM) { - float *src_ptr1 = src + ci * row + ri; + const float *src_ptr1 = src + ci * row + ri; float16_t *dst_ptr1 = dst_ptr + ci * row + ri * C8NUM; #ifdef ENABLE_ARM64 size_t strid_row = row * 4; @@ -197,7 +197,7 @@ void ColMajor2Row8MajorFp16(void *src_ptr, float16_t *dst_ptr, size_t row, size_ #endif } for (; ri < row; ++ri) { - float *src_ptr1 = src + ci * row; + const float *src_ptr1 = src + ci * row; float16_t *dst_ptr1 = dst_ptr + ci * row; for (int tc = 0; tc < C8NUM; ++tc) { dst_ptr1[ri * C8NUM + tc] = (float16_t)(src_ptr1[tc * row + ri]); @@ -274,18 +274,18 @@ void MatVecMulFp16(const float16_t *a, const float16_t *b, float16_t *c, const f MatVecMulFp16Neon64(a, b, c, bias, (int)act_type, depth, col); } -void RowMajor2Col16MajorFp16Opt(float16_t *src_ptr, float16_t *dst_ptr, size_t row, size_t col) { +void RowMajor2Col16MajorFp16Opt(const float16_t *src_ptr, float16_t *dst_ptr, size_t row, size_t col) { size_t row_up_16 = UP_ROUND(row, C16NUM); size_t row16 = row / C16NUM * C16NUM; size_t col8 = col / C8NUM * C8NUM; - float16_t *src_r = src_ptr; + const float16_t *src_r = src_ptr; float16_t *dst_r = dst_ptr; size_t ri = 0; for (; ri < row16; ri += C16NUM) { size_t ci = 0; for (; ci < col8; ci += C8NUM) { - float16_t *src_c = src_r + ci; + const float16_t *src_c = src_r + ci; float16_t *dst_c = dst_r + ci * C16NUM; #ifdef ENABLE_ARM64 @@ -403,7 +403,7 @@ void RowMajor2Col16MajorFp16Opt(float16_t *src_ptr, float16_t *dst_ptr, size_t r #endif } for (; ci < col; ci++) { - float16_t *src_c = src_r + ci; + const float16_t *src_c = src_r + ci; float16_t *dst_c = dst_r + ci * C16NUM; for (size_t i = 0; i < C16NUM; i++) { dst_c[i] = src_c[i * col]; @@ -428,57 +428,57 @@ void RowMajor2Col16MajorFp16Opt(float16_t *src_ptr, float16_t *dst_ptr, size_t r return; } -void RowMajor2Col16MajorFp16(void *src, float16_t *dst, int row, int col, bool is_fp32_src) { +void RowMajor2Col16MajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src) { for (int r = 0; r < row; r++) { for (int c = 0; c < col; c++) { int r_div16 = r / 16; int r_mod16 = r % 16; if (is_fp32_src) { - dst[r_div16 * 16 * col + c * 16 + r_mod16] = (float16_t)(((float *)src)[r * col + c]); + dst[r_div16 * 16 * col + c * 16 + r_mod16] = (float16_t)(((const float *)src)[r * col + c]); } else { - dst[r_div16 * 16 * col + c * 16 + r_mod16] = ((float16_t *)src)[r * col + c]; + dst[r_div16 * 16 * col + c * 16 + r_mod16] = ((const float16_t *)src)[r * col + c]; } } } } -void RowMajor2Row16MajorFp16(void *src, float16_t *dst, int row, int col, bool is_fp32_src) { +void RowMajor2Row16MajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src) { for (int r = 0; r < row; r++) { for (int c = 0; c < col; c++) { int c_div16 = c / 16; int c_mod16 = c % 16; if (is_fp32_src) { - dst[c_div16 * 16 * row + r * 16 + c_mod16] = (float16_t)(((float *)src)[r * col + c]); + dst[c_div16 * 16 * row + r * 16 + c_mod16] = (float16_t)(((const float *)src)[r * col + c]); } else { - dst[c_div16 * 16 * row + r * 16 + c_mod16] = ((float16_t *)src)[r * col + c]; + dst[c_div16 * 16 * row + r * 16 + c_mod16] = ((const float16_t *)src)[r * col + c]; } } } } -void RowMajor2Row8MajorFp16(void *src, float16_t *dst, int row, int col, bool is_fp32_src) { +void RowMajor2Row8MajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src) { for (int r = 0; r < row; r++) { for (int c = 0; c < col; c++) { int c_div8 = c / 8; int c_mod8 = c % 8; if (is_fp32_src) { - dst[c_div8 * 8 * row + r * 8 + c_mod8] = (float16_t)(((float *)src)[r * col + c]); + dst[c_div8 * 8 * row + r * 8 + c_mod8] = (float16_t)(((const float *)src)[r * col + c]); } else { - dst[c_div8 * 8 * row + r * 8 + c_mod8] = ((float16_t *)src)[r * col + c]; + dst[c_div8 * 8 * row + r * 8 + c_mod8] = ((const float16_t *)src)[r * col + c]; } } } } -void RowMajor2Col8MajorFp16(void *src, float16_t *dst, int row, int col, bool is_fp32_src) { +void RowMajor2Col8MajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src) { for (int r = 0; r < row; r++) { for (int c = 0; c < col; c++) { int r_div8 = r / 8; int r_mod8 = r % 8; if (is_fp32_src) { - dst[r_div8 * 8 * col + c * 8 + r_mod8] = (float16_t)(((float *)src)[r * col + c]); + dst[r_div8 * 8 * col + c * 8 + r_mod8] = (float16_t)(((const float *)src)[r * col + c]); } else { - dst[r_div8 * 8 * col + c * 8 + r_mod8] = ((float16_t *)src)[r * col + c]; + dst[r_div8 * 8 * col + c * 8 + r_mod8] = ((const float16_t *)src)[r * col + c]; } } } diff --git a/mindspore/lite/nnacl/fp16/matmul_fp16.h b/mindspore/lite/nnacl/fp16/matmul_fp16.h index 26d827c6219..086a1b973dc 100644 --- a/mindspore/lite/nnacl/fp16/matmul_fp16.h +++ b/mindspore/lite/nnacl/fp16/matmul_fp16.h @@ -38,9 +38,9 @@ void MatMulFp16(const float16_t *a, const float16_t *b, float16_t *c, const floa void MatVecMulFp16(const float16_t *a, const float16_t *b, float16_t *c, const float16_t *bias, ActType act_type, int depth, int col); -void ColMajor2Row8MajorFp16(void *src_ptr, float16_t *dst_ptr, size_t row, size_t col, bool src_float16); +void ColMajor2Row8MajorFp16(const void *src_ptr, float16_t *dst_ptr, size_t row, size_t col, bool src_float16); -void RowMajor2Col16MajorFp16Opt(float16_t *src_ptr, float16_t *dst_ptr, size_t row, size_t col); +void RowMajor2Col16MajorFp16Opt(const float16_t *src_ptr, float16_t *dst_ptr, size_t row, size_t col); void MatmulFp16Neon64(const float16_t *a, const float16_t *b, float16_t *c, const float16_t *bias, int act_type, size_t depth, size_t row, size_t col, size_t stride, bool write_nhwc); @@ -51,13 +51,13 @@ void MatmulFp16Neon64Opt(const float16_t *a, const float16_t *b, float16_t *c, c void MatVecMulFp16Neon64(const float16_t *a, const float16_t *b, float16_t *c, const float16_t *bias, int act_type, int depth, int col); -void RowMajor2Col16MajorFp16(void *src, float16_t *dst, int row, int col, bool is_fp32_src); +void RowMajor2Col16MajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src); -void RowMajor2Row16MajorFp16(void *src, float16_t *dst, int row, int col, bool is_fp32_src); +void RowMajor2Row16MajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src); -void RowMajor2Row8MajorFp16(void *src, float16_t *dst, int row, int col, bool is_fp32_src); +void RowMajor2Row8MajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src); -void RowMajor2Col8MajorFp16(void *src, float16_t *dst, int row, int col, bool is_fp32_src); +void RowMajor2Col8MajorFp16(const void *src, float16_t *dst, int row, int col, bool is_fp32_src); #ifdef __cplusplus } diff --git a/mindspore/lite/nnacl/fp32/arithmetic_fp32.c b/mindspore/lite/nnacl/fp32/arithmetic_fp32.c index 916f359603c..081540967e8 100644 --- a/mindspore/lite/nnacl/fp32/arithmetic_fp32.c +++ b/mindspore/lite/nnacl/fp32/arithmetic_fp32.c @@ -16,6 +16,7 @@ #include "nnacl/fp32/arithmetic_fp32.h" #include +#include #define ACCURACY_DATA 0.00000001 @@ -964,7 +965,7 @@ int ElementNotEqual(const float *input0, const float *input1, float *output, con } #endif for (; index < element_size; index++) { - output[index] = (float)(input0[index] != input1[index]); + output[index] = (float)(fabsf(input0[index] - input1[index]) > FLT_EPSILON); } return NNACL_OK; } @@ -996,7 +997,7 @@ int ElementEqual(const float *input0, const float *input1, float *output, const } #endif for (; index < element_size; index++) { - output[index] = (float)(input0[index] == input1[index]); + output[index] = (float)(fabsf(input0[index] - input1[index]) <= FLT_EPSILON); } return NNACL_OK; } diff --git a/mindspore/lite/nnacl/fp32/broadcast_to_fp32.c b/mindspore/lite/nnacl/fp32/broadcast_to_fp32.c index 2fcf17be939..73202f663fe 100644 --- a/mindspore/lite/nnacl/fp32/broadcast_to_fp32.c +++ b/mindspore/lite/nnacl/fp32/broadcast_to_fp32.c @@ -17,6 +17,7 @@ #include "nnacl/fp32/broadcast_to_fp32.h" #include #include "nnacl/op_base.h" +#include "nnacl/errorcode.h" void PadBroadcastShapeInfo(BroadcastShapeInfo *shape_info) { if (shape_info->input_shape_size_ < DIMENSION_4D) { @@ -51,7 +52,7 @@ void PadBroadcastShapeInfo(BroadcastShapeInfo *shape_info) { int BroadcastTo(const float *input, BroadcastShapeInfo *shape_info, float *output) { if (shape_info->input_shape_size_ > DIMENSION_4D || shape_info->output_shape_size_ > DIMENSION_4D) { - return -1; + return NNACL_ERR; } PadBroadcastShapeInfo(shape_info); size_t input_dim_offset[DIMENSION_4D - 1]; @@ -98,5 +99,5 @@ int BroadcastTo(const float *input, BroadcastShapeInfo *shape_info, float *outpu memcpy(out_base + output_dim_offset[0] * dim0, out_base, output_dim_offset[0]); } } - return 0; + return NNACL_OK; } diff --git a/mindspore/lite/nnacl/fp32/common_func_fp32.c b/mindspore/lite/nnacl/fp32/common_func_fp32.c index c0b0a8400c8..0edd78a2bf0 100644 --- a/mindspore/lite/nnacl/fp32/common_func_fp32.c +++ b/mindspore/lite/nnacl/fp32/common_func_fp32.c @@ -116,11 +116,10 @@ void WinogradTransRight(const float *S, const float *B, float *M, size_t w, size } #endif -union float32_bits { +typedef union float32_bits { unsigned int u; float f; -}; -typedef union float32_bits float32_bits; +} float32_bits; float ShortToFloat32(uint16_t src_value) { const float32_bits magic = {113 << 23}; diff --git a/mindspore/lite/nnacl/fp32/conv_depthwise_fp32.c b/mindspore/lite/nnacl/fp32/conv_depthwise_fp32.c index 54b4b174f08..7a99e5f0ea0 100644 --- a/mindspore/lite/nnacl/fp32/conv_depthwise_fp32.c +++ b/mindspore/lite/nnacl/fp32/conv_depthwise_fp32.c @@ -93,19 +93,21 @@ void InitSlidingParam(SlidingWindowParam *sliding, const ConvParameter *conv_par int top = 0; int bottom = conv_param->output_h_; - for (; left * conv_param->stride_w_ < conv_param->pad_l_; left++) { + while (left * conv_param->stride_w_ < conv_param->pad_l_) { + left++; } - for (; (right - 1) * conv_param->stride_w_ - conv_param->pad_l_ + conv_param->kernel_w_ * conv_param->dilation_w_ > + while ((right - 1) * conv_param->stride_w_ - conv_param->pad_l_ + conv_param->kernel_w_ * conv_param->dilation_w_ > conv_param->input_w_ && - right > left; - right--) { + right > left) { + right--; } - for (; top * conv_param->stride_h_ < conv_param->pad_u_; top++) { + while (top * conv_param->stride_h_ < conv_param->pad_u_) { + top++; } - for (; (bottom - 1) * conv_param->stride_h_ - conv_param->pad_u_ + conv_param->kernel_h_ * conv_param->dilation_h_ > + while ((bottom - 1) * conv_param->stride_h_ - conv_param->pad_u_ + conv_param->kernel_h_ * conv_param->dilation_h_ > conv_param->input_h_ && - bottom > top; - bottom--) { + bottom > top) { + bottom--; } sliding->left_ = left; sliding->right_ = right; diff --git a/mindspore/lite/nnacl/fp32/gather_fp32.c b/mindspore/lite/nnacl/fp32/gather_fp32.c index c899ec1fb8f..ab6b4ed3b61 100644 --- a/mindspore/lite/nnacl/fp32/gather_fp32.c +++ b/mindspore/lite/nnacl/fp32/gather_fp32.c @@ -28,11 +28,10 @@ inline int Stride(const int *shape, int rank, int index) { int Gather(float *input, int outer_size, int inner_size, int limit, const int *indices, int indices_element_size, float *output) { - int i, m; - for (m = 0; m < outer_size; ++m) { + for (int m = 0; m < outer_size; ++m) { float *inputm = input + inner_size * m * limit; float *outputm = output + inner_size * m * indices_element_size; - for (i = 0; i < indices_element_size; ++i) { + for (int i = 0; i < indices_element_size; ++i) { if (indices[i] < 0 || indices[i] > limit) { return NNACL_ERR; } @@ -44,11 +43,10 @@ int Gather(float *input, int outer_size, int inner_size, int limit, const int *i int GatherInt32(const int32_t *input, int outer_size, int inner_size, int limit, const int *indices, int indices_element_size, int32_t *output) { - int i, m; - for (m = 0; m < outer_size; ++m) { + for (int m = 0; m < outer_size; ++m) { const int32_t *inputm = input + inner_size * m * limit; int32_t *outputm = output + inner_size * m * indices_element_size; - for (i = 0; i < indices_element_size; ++i) { + for (int i = 0; i < indices_element_size; ++i) { if (indices[i] < 0 || indices[i] > limit) { return NNACL_ERR; } diff --git a/mindspore/lite/nnacl/fp32/instance_norm_fp32.c b/mindspore/lite/nnacl/fp32/instance_norm_fp32.c index 38eaf5444df..68b05d74a24 100644 --- a/mindspore/lite/nnacl/fp32/instance_norm_fp32.c +++ b/mindspore/lite/nnacl/fp32/instance_norm_fp32.c @@ -24,14 +24,13 @@ int InstanceNorm(const int outer_size, const int inner_size, const float *src_da if (src_data == NULL || dst_data == NULL || scale_data == NULL || bias_data == NULL) { return NNACL_NULL_PTR; } - int i, j; - for (j = task_id; j < outer_size; j += thread_num) { + for (int j = task_id; j < outer_size; j += thread_num) { int offset = (j / param->channel_) * inner_size * param->channel_; const float *src = src_data + offset; float *dst = dst_data + offset; float mean = 0.0f; float square_mean = 0.0f; - for (i = 0; i < inner_size; i++) { + for (int i = 0; i < inner_size; i++) { int idx = j % param->channel_ + i * param->channel_; mean += src[idx]; square_mean += src[idx] * src[idx]; @@ -39,7 +38,7 @@ int InstanceNorm(const int outer_size, const int inner_size, const float *src_da mean /= (float)inner_size; square_mean /= (float)inner_size; const float deno = 1 / sqrtf(square_mean - mean * mean + param->epsilon_); - for (i = 0; i < inner_size; ++i) { + for (int i = 0; i < inner_size; ++i) { int idx = j % param->channel_ + i * param->channel_; int scale_idx = (j / param->channel_) * param->channel_ + j % param->channel_; dst[idx] = ((src[idx] - mean) * deno) * scale_data[scale_idx] + bias_data[scale_idx]; diff --git a/mindspore/lite/nnacl/fp32/layer_norm_fp32.c b/mindspore/lite/nnacl/fp32/layer_norm_fp32.c index 7a957bd856d..a5cbc767e2a 100644 --- a/mindspore/lite/nnacl/fp32/layer_norm_fp32.c +++ b/mindspore/lite/nnacl/fp32/layer_norm_fp32.c @@ -27,20 +27,19 @@ int LayerNorm(const int outer_size, const int inner_size, const float *src_data, if (affine && (gamma_data == NULL || beta_data == NULL)) { return NNACL_NULL_PTR; } - int i, j; - for (j = tid; j < outer_size; j += thread_num) { + for (int j = tid; j < outer_size; j += thread_num) { const float *src = src_data + j * inner_size; float *dst = dst_data + j * inner_size; float mean = 0.0f; float square_mean = 0.0f; - for (i = 0; i < inner_size; i++) { + for (int i = 0; i < inner_size; i++) { mean += src[i]; square_mean += src[i] * src[i]; } mean /= (float)inner_size; square_mean /= (float)inner_size; const float deno = 1 / sqrtf(square_mean - mean * mean + epsilon); - for (i = 0; i < inner_size; ++i) { + for (int i = 0; i < inner_size; ++i) { dst[i] = (src[i] - mean) * deno; if (affine) { dst[i] = dst[i] * gamma_data[i] + beta_data[i]; diff --git a/mindspore/lite/nnacl/fp32/local_response_norm_fp32.c b/mindspore/lite/nnacl/fp32/local_response_norm_fp32.c index 93e2a0636af..1f49b413006 100644 --- a/mindspore/lite/nnacl/fp32/local_response_norm_fp32.c +++ b/mindspore/lite/nnacl/fp32/local_response_norm_fp32.c @@ -19,24 +19,21 @@ int LocalResponseNorm(float *input_ptr, int out_size, int channel, float *output_ptr, LocalResponseNormParameter *param) { - int i, j, k; - int left, right; - int depth_radius = param->depth_radius_; float bias = param->bias_; float alpha = param->alpha_; float beta = param->beta_; - for (i = 0; i < out_size; i++) { + for (int i = 0; i < out_size; i++) { float *in_data = input_ptr + i * channel; float *out_data = output_ptr + i * channel; - for (j = 0; j < channel; j++) { - left = MSMAX(0, j - depth_radius); - right = MSMIN(channel - 1, j + depth_radius); + for (int j = 0; j < channel; j++) { + int left = MSMAX(0, j - depth_radius); + int right = MSMIN(channel - 1, j + depth_radius); float sum = 0.0; - for (k = left; k <= right; k++) { + for (int k = left; k <= right; k++) { const float in_val = in_data[k]; sum += in_val * in_val; } diff --git a/mindspore/lite/nnacl/int8/transpose_int8.c b/mindspore/lite/nnacl/int8/transpose_int8.c index 114eed0c2a3..7f198a43b2e 100644 --- a/mindspore/lite/nnacl/int8/transpose_int8.c +++ b/mindspore/lite/nnacl/int8/transpose_int8.c @@ -148,7 +148,7 @@ void TransposeCommInt8(const int8_t *in_data, int8_t *out_data, const int *strid } } -int DoTransposeInt8(const int8_t *in_data, int8_t *out_data, int *input_shape, const int *output_shape, +int DoTransposeInt8(const int8_t *in_data, int8_t *out_data, const int *output_shape, TransposeParameter *transpose_param, int h_start, int h_end, int *dim_size, int *position) { if (in_data == NULL || out_data == NULL) { return NNACL_NULL_PTR; diff --git a/mindspore/lite/nnacl/int8/transpose_int8.h b/mindspore/lite/nnacl/int8/transpose_int8.h index 1379ed3c28c..c90c485bc14 100644 --- a/mindspore/lite/nnacl/int8/transpose_int8.h +++ b/mindspore/lite/nnacl/int8/transpose_int8.h @@ -25,7 +25,7 @@ extern "C" { #endif -int DoTransposeInt8(const int8_t *in_data, int8_t *out_data, int *input_shape, const int *output_shape, +int DoTransposeInt8(const int8_t *in_data, int8_t *out_data, const int *output_shape, TransposeParameter *transpose_param, int h_start, int h_end, int *dim_size, int *position); #ifdef __cplusplus diff --git a/mindspore/lite/nnacl/l2_norm.c b/mindspore/lite/nnacl/l2_norm.c index 22fc6fde0d4..8070401d555 100644 --- a/mindspore/lite/nnacl/l2_norm.c +++ b/mindspore/lite/nnacl/l2_norm.c @@ -32,6 +32,9 @@ int ThreadDivSqrtSum(const float *input_ptr, float *output_ptr, const L2NormPara bool is_relu = param->act_type_ == ActType_Relu; bool is_relu6 = param->act_type_ == ActType_Relu6; int i; + if (sqrt_sum == 0) { + return NNACL_ERRCODE_DIVISOR_ZERO; + } for (i = begin; i < end; i++) { float tmp = input_ptr[i] / sqrt_sum; if (is_relu) { diff --git a/mindspore/lite/nnacl/minimal_filtering_generator.c b/mindspore/lite/nnacl/minimal_filtering_generator.c index 14b5d731e11..f5d8a5e0777 100644 --- a/mindspore/lite/nnacl/minimal_filtering_generator.c +++ b/mindspore/lite/nnacl/minimal_filtering_generator.c @@ -23,7 +23,9 @@ void Polynomial(const float *interval, float *m, int degree) { for (int i = 0; i < degree; ++i) { float mul = 1; for (int j = 0; j < degree; ++j) { - if (i == j) continue; + if (i == j) { + continue; + } mul *= (interval[i] - interval[j]); } m[i] = mul; @@ -35,7 +37,9 @@ void DiagonalPlusMatrix(const float *matrix, float *diagonal_matrix, int degree) memset(diagonal_matrix, 0, data_num * sizeof(float)); for (int i = 0; i < degree; ++i) { for (int j = 0; j < degree; ++j) { - if (j == i) diagonal_matrix[i * (degree + 1) + j] = matrix[i]; + if (j == i) { + diagonal_matrix[i * (degree + 1) + j] = matrix[i]; + } } } diagonal_matrix[data_num - 1] = 1; @@ -207,7 +211,10 @@ int CookToomFilter(float *matrix_a, float *matrix_at, float *matrix_b, float *ma MatrixTranspose(matrix_a, matrix_at, in_unit, out_unit); // get matrix B - B(interval, matrix_bt, in_unit); + int ret = B(interval, matrix_bt, in_unit); + if (ret != NNACL_OK) { + return ret; + } MatrixTranspose(matrix_bt, matrix_b, in_unit, in_unit); MatrixMultiply(diagonal_matrix, matrix_b, matrix_bt, in_unit, in_unit, in_unit); MatrixTranspose(matrix_bt, matrix_b, in_unit, in_unit); diff --git a/mindspore/lite/nnacl/zeroslike.c b/mindspore/lite/nnacl/zeroslike.c index 93612cfd5c4..92712b6ea3e 100644 --- a/mindspore/lite/nnacl/zeroslike.c +++ b/mindspore/lite/nnacl/zeroslike.c @@ -17,4 +17,4 @@ #include #include -void ApproximateZerosLike(float *input, float *output, int number) { memset(output, 0.0, number * sizeof(float)); } +void ApproximateZerosLike(float *output, int number) { memset(output, 0.0, number * sizeof(float)); } diff --git a/mindspore/lite/nnacl/zeroslike.h b/mindspore/lite/nnacl/zeroslike.h index b1a541ffd0c..b3faa49ab92 100644 --- a/mindspore/lite/nnacl/zeroslike.h +++ b/mindspore/lite/nnacl/zeroslike.h @@ -21,7 +21,7 @@ #ifdef __cplusplus extern "C" { #endif -void ApproximateZerosLike(float *input, float *output, int number); +void ApproximateZerosLike(float *output, int number); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/src/ops/apply_momentum.cc b/mindspore/lite/src/ops/apply_momentum.cc index 4771314f2d5..e38e032efc3 100644 --- a/mindspore/lite/src/ops/apply_momentum.cc +++ b/mindspore/lite/src/ops/apply_momentum.cc @@ -79,7 +79,7 @@ Registry ApplyMomentumRegistry(schema::PrimitiveType_ApplyMomentum, ApplyMomentu #endif int ApplyMomentum::InferShape(std::vector inputs, std::vector outputs) { - if (5 != inputs.size()) { + if (inputs.size() != 5) { MS_LOG(ERROR) << "ApplyMomentum should have at least 5 input tensors"; return RET_ERROR; } diff --git a/mindspore/lite/src/ops/bias_add.cc b/mindspore/lite/src/ops/bias_add.cc index a8863264cbb..dc8c4f933f3 100644 --- a/mindspore/lite/src/ops/bias_add.cc +++ b/mindspore/lite/src/ops/bias_add.cc @@ -54,10 +54,6 @@ int BiasAdd::UnPackAttr(const Primitive &prim, const std::vector &in attr->axis = CastToInt(prim.GetAttr("axis"), true); } this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } } return RET_OK; } diff --git a/mindspore/lite/src/ops/bias_grad.cc b/mindspore/lite/src/ops/bias_grad.cc index eec0d79df76..85017d3561a 100644 --- a/mindspore/lite/src/ops/bias_grad.cc +++ b/mindspore/lite/src/ops/bias_grad.cc @@ -52,10 +52,6 @@ int BiasGrad::UnPackAttr(const Primitive &prim, const std::vector &i attr->axis = CastToInt(prim.GetAttr("axis"), true); } this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } } return RET_OK; } @@ -91,11 +87,11 @@ Registry BiasGradRegistry(schema::PrimitiveType_BiasGrad, BiasGradCreator); #endif int BiasGrad::InferShape(std::vector inputs, std::vector outputs) { - if (1 != inputs.size()) { + if (inputs.size() != 1) { MS_LOG(ERROR) << "BiasGrad should have one input"; return RET_ERROR; } - if (1 != outputs.size()) { + if (outputs.size() != 1) { MS_LOG(ERROR) << "BiasGrad should have one output"; return RET_ERROR; } diff --git a/mindspore/lite/src/ops/bn_grad.cc b/mindspore/lite/src/ops/bn_grad.cc index 17916da72a9..99604e2d510 100644 --- a/mindspore/lite/src/ops/bn_grad.cc +++ b/mindspore/lite/src/ops/bn_grad.cc @@ -56,10 +56,6 @@ int BNGrad::UnPackAttr(const Primitive &prim, const std::vector &inp attr->eps = GetValue(prim.GetAttr("epsilon")); } this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } } return RET_OK; } @@ -85,11 +81,11 @@ float BNGrad::GetEps() const { return this->primitive_->value_as_BNGrad()->eps() float BNGrad::GetMomentum() const { return this->primitive_->value_as_BNGrad()->momentum(); } #endif int BNGrad::InferShape(std::vector inputs, std::vector outputs) { - if (6 != inputs.size()) { + if (inputs.size() != 6) { MS_LOG(ERROR) << "BNGrad should have five inputs"; return RET_ERROR; } - if (3 != outputs.size()) { + if (outputs.size() != 3) { MS_LOG(ERROR) << "BNGrad should have three outputs"; return RET_ERROR; } diff --git a/mindspore/lite/src/ops/cast.cc b/mindspore/lite/src/ops/cast.cc index 5403bfa03a3..6dfe87d9d35 100644 --- a/mindspore/lite/src/ops/cast.cc +++ b/mindspore/lite/src/ops/cast.cc @@ -53,10 +53,6 @@ int Cast::UnPackAttr(const Primitive &prim, const std::vector &input attr->srcT = srcAnf->number_type(); attr->dstT = dstAnf->number_type(); this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } } return RET_OK; diff --git a/mindspore/lite/src/ops/conv2d.cc b/mindspore/lite/src/ops/conv2d.cc index 6090b6f3189..2e673b1aad1 100644 --- a/mindspore/lite/src/ops/conv2d.cc +++ b/mindspore/lite/src/ops/conv2d.cc @@ -131,6 +131,10 @@ void ConvertConvWeight(const ParameterPtr ¶m_node) { void Conv2D::PopulaterConv2DMultiGroup(const Primitive &prim, schema::PrimitiveT *primitive, const int &group, const std::vector &inputs) { auto attr = std::make_unique(); + if (attr.get() == nullptr) { + MS_LOG(ERROR) << "Memory allocation failed"; + return; + } auto format = GetValue(prim.GetAttr("data_format")); if (format == "NCHW") { attr->format = schema::Format::Format_NCHW; @@ -203,6 +207,10 @@ void Conv2D::PopulaterConv2DMultiGroup(const Primitive &prim, schema::PrimitiveT void Conv2D::PopulaterConv2DSingleGroup(const Primitive &prim, schema::PrimitiveT *primitive, const int &group) { auto attr = std::make_unique(); + if (attr.get() == nullptr) { + MS_LOG(ERROR) << "Memory allocation failed"; + return; + } attr->group = group; auto format = GetValue(prim.GetAttr("data_format")); if (format == "NCHW") { diff --git a/mindspore/lite/src/ops/deconv2d.cc b/mindspore/lite/src/ops/deconv2d.cc index 109305009da..19f1f90a8f6 100644 --- a/mindspore/lite/src/ops/deconv2d.cc +++ b/mindspore/lite/src/ops/deconv2d.cc @@ -124,6 +124,10 @@ void ConvertConvWeight(const ParameterPtr ¶m_node) { void DeConv2D::PopulaterConv2DMultiGroup(const Primitive &prim, schema::PrimitiveT *primitive, const int &group, const std::vector &inputs) { auto attr = std::make_unique(); + if (attr.get() == nullptr) { + MS_LOG(ERROR) << "Memory allocation failed"; + return; + } auto format = GetValue(prim.GetAttr("data_format")); if (format == "NCHW") { attr->format = schema::Format::Format_NCHW; @@ -186,6 +190,10 @@ void DeConv2D::PopulaterConv2DMultiGroup(const Primitive &prim, schema::Primitiv void DeConv2D::PopulaterDeConv2DSingleGroup(const Primitive &prim, schema::PrimitiveT *primitive, const int &group) { auto attr = std::make_unique(); + if (attr.get() == nullptr) { + MS_LOG(ERROR) << "Memory allocation failed"; + return; + } attr->group = group; auto format = GetValue(prim.GetAttr("data_format")); if (format == "NCHW") { diff --git a/mindspore/lite/src/ops/dedepthwise_conv2d.cc b/mindspore/lite/src/ops/dedepthwise_conv2d.cc index 577b7263ac9..50c1baab6bf 100644 --- a/mindspore/lite/src/ops/dedepthwise_conv2d.cc +++ b/mindspore/lite/src/ops/dedepthwise_conv2d.cc @@ -45,7 +45,7 @@ int DeDepthwiseConv2D::GetActivationType() const { } void DeDepthwiseConv2D::SetFormat(int format) { - this->primitive_->value.AsDeDepthwiseConv2D()->format = (schema::Format)format; + this->primitive_->value.AsDeDepthwiseConv2D()->format = static_cast(format); } void DeDepthwiseConv2D::SetChannelIn(int channel_in) { this->primitive_->value.AsDeDepthwiseConv2D()->channelIn = channel_in; @@ -58,7 +58,7 @@ void DeDepthwiseConv2D::SetKernelH(int kernel_h) { this->primitive_->value.AsDeD void DeDepthwiseConv2D::SetStrideW(int stride_w) { this->primitive_->value.AsDeDepthwiseConv2D()->strideW = stride_w; } void DeDepthwiseConv2D::SetStrideH(int stride_h) { this->primitive_->value.AsDeDepthwiseConv2D()->strideH = stride_h; } void DeDepthwiseConv2D::SetPadMode(int pad_mode) { - this->primitive_->value.AsDeDepthwiseConv2D()->padMode = (schema::PadMode)pad_mode; + this->primitive_->value.AsDeDepthwiseConv2D()->padMode = static_cast(pad_mode); } void DeDepthwiseConv2D::SetPadUp(int pad_up) { this->primitive_->value.AsDeDepthwiseConv2D()->padUp = pad_up; } void DeDepthwiseConv2D::SetPadDown(int pad_down) { this->primitive_->value.AsDeDepthwiseConv2D()->padDown = pad_down; } @@ -70,7 +70,7 @@ void DeDepthwiseConv2D::SetDilateW(int dilate_w) { this->primitive_->value.AsDeD void DeDepthwiseConv2D::SetDilateH(int dilate_h) { this->primitive_->value.AsDeDepthwiseConv2D()->dilateH = dilate_h; } void DeDepthwiseConv2D::SetHasBias(bool has_bias) { this->primitive_->value.AsDeDepthwiseConv2D()->hasBias = has_bias; } void DeDepthwiseConv2D::SetActivationType(int activation_type) { - this->primitive_->value.AsDeDepthwiseConv2D()->activationType = (schema::ActivationType)activation_type; + this->primitive_->value.AsDeDepthwiseConv2D()->activationType = static_cast(activation_type); } #else diff --git a/mindspore/lite/src/ops/depthwise_conv2d.cc b/mindspore/lite/src/ops/depthwise_conv2d.cc index 6e5f920a3ad..fe66ec13e4e 100644 --- a/mindspore/lite/src/ops/depthwise_conv2d.cc +++ b/mindspore/lite/src/ops/depthwise_conv2d.cc @@ -48,7 +48,7 @@ bool DepthwiseConv2D::GetHasBias() const { return this->primitive_->value.AsDept int DepthwiseConv2D::GetActivationType() const { return this->primitive_->value.AsDepthwiseConv2D()->activationType; } void DepthwiseConv2D::SetFormat(int format) { - this->primitive_->value.AsDepthwiseConv2D()->format = (schema::Format)format; + this->primitive_->value.AsDepthwiseConv2D()->format = static_cast(format); } void DepthwiseConv2D::SetChannelIn(int channel_in) { this->primitive_->value.AsDepthwiseConv2D()->channelIn = channel_in; @@ -61,7 +61,7 @@ void DepthwiseConv2D::SetKernelH(int kernel_h) { this->primitive_->value.AsDepth void DepthwiseConv2D::SetStrideW(int stride_w) { this->primitive_->value.AsDepthwiseConv2D()->strideW = stride_w; } void DepthwiseConv2D::SetStrideH(int stride_h) { this->primitive_->value.AsDepthwiseConv2D()->strideH = stride_h; } void DepthwiseConv2D::SetPadMode(int pad_mode) { - this->primitive_->value.AsDepthwiseConv2D()->padMode = (schema::PadMode)pad_mode; + this->primitive_->value.AsDepthwiseConv2D()->padMode = static_cast(pad_mode); } void DepthwiseConv2D::SetPadUp(int pad_up) { this->primitive_->value.AsDepthwiseConv2D()->padUp = pad_up; } void DepthwiseConv2D::SetPadDown(int pad_down) { this->primitive_->value.AsDepthwiseConv2D()->padDown = pad_down; } @@ -71,7 +71,7 @@ void DepthwiseConv2D::SetDilateW(int dilate_w) { this->primitive_->value.AsDepth void DepthwiseConv2D::SetDilateH(int dilate_h) { this->primitive_->value.AsDepthwiseConv2D()->dilateH = dilate_h; } void DepthwiseConv2D::SetHasBias(bool has_bias) { this->primitive_->value.AsDepthwiseConv2D()->hasBias = has_bias; } void DepthwiseConv2D::SetActivationType(int activation_type) { - this->primitive_->value.AsDepthwiseConv2D()->activationType = (schema::ActivationType)activation_type; + this->primitive_->value.AsDepthwiseConv2D()->activationType = static_cast(activation_type); } int DepthwiseConv2D::UnPackAttr(const Primitive &prim, const std::vector &inputs) { diff --git a/mindspore/lite/src/ops/dequant.cc b/mindspore/lite/src/ops/dequant.cc index e9ab1bf36c7..13de8103767 100644 --- a/mindspore/lite/src/ops/dequant.cc +++ b/mindspore/lite/src/ops/dequant.cc @@ -40,10 +40,6 @@ int Dequant::UnPackAttr(const Primitive &prim, const std::vector &in return RET_ERROR; } this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } } return RET_OK; } diff --git a/mindspore/lite/src/ops/full_connection.cc b/mindspore/lite/src/ops/full_connection.cc index a6e26dc86d9..24be3ce5d15 100644 --- a/mindspore/lite/src/ops/full_connection.cc +++ b/mindspore/lite/src/ops/full_connection.cc @@ -32,7 +32,7 @@ void FullConnection::SetHasBias(bool has_bias) { this->primitive_->value.AsFullC void FullConnection::SetAxis(int axis) { this->primitive_->value.AsFullConnection()->axis = axis; } void FullConnection::SetUseAxis(bool use_axis) { this->primitive_->value.AsFullConnection()->useAxis = use_axis; } void FullConnection::SetActivationType(int activationType) { - this->primitive_->value.AsFullConnection()->activationType = (schema::ActivationType)activationType; + this->primitive_->value.AsFullConnection()->activationType = static_cast(activationType); } #else int FullConnection::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { diff --git a/mindspore/lite/src/ops/fused_batchnorm.cc b/mindspore/lite/src/ops/fused_batchnorm.cc index aaad730cb65..f1c79306a56 100644 --- a/mindspore/lite/src/ops/fused_batchnorm.cc +++ b/mindspore/lite/src/ops/fused_batchnorm.cc @@ -53,10 +53,6 @@ int FusedBatchNorm::UnPackAttr(const Primitive &prim, const std::vectorepsilon = GetValue(prim.GetAttr("epsilon")); attr->momentum = GetValue(prim.GetAttr("momentum")); this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } } return RET_OK; } @@ -88,7 +84,9 @@ Registry FusedBatchNormRegistry(schema::PrimitiveType_FusedBatchNorm, FusedBatch int FusedBatchNorm::InferShape(std::vector inputs_, std::vector outputs_) { for (size_t i = 0; i < inputs_.size(); i++) { - if (outputs_.size() <= i) break; + if (outputs_.size() <= i) { + break; + } outputs_.at(i)->set_shape(inputs_.at(i)->shape()); outputs_.at(i)->set_data_type(inputs_.at(i)->data_type()); outputs_.at(i)->set_format(inputs_.at(i)->format()); diff --git a/mindspore/lite/src/ops/primitive_c.h b/mindspore/lite/src/ops/primitive_c.h index 4494516c155..46ee0d23d29 100644 --- a/mindspore/lite/src/ops/primitive_c.h +++ b/mindspore/lite/src/ops/primitive_c.h @@ -64,8 +64,7 @@ class PrimitiveC : public mindspore::Primitive { // Argument primitive is deliverd into PrimitiveC and will be deleted in ~PrimitiveC(). // Caller should not delete primitive. - explicit PrimitiveC(const std::string &name, schema::PrimitiveT *primitive) - : Primitive(name), primitive_(primitive) {} + PrimitiveC(const std::string &name, schema::PrimitiveT *primitive) : Primitive(name), primitive_(primitive) {} PrimitiveC() : Primitive(""), primitive_(nullptr) {} @@ -175,7 +174,7 @@ class PrimitiveC { template ::value>> static PrimitiveC *NewPrimitiveC(const schema::Primitive *primitive) { - auto primc = new T(); + auto primc = new (std::nothrow) T(); if (primc == nullptr) { MS_LOG(ERROR) << "new PrimitiveC failed"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike_fp32.cc index 265e639540b..9e1adef6d71 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike_fp32.cc @@ -30,10 +30,8 @@ namespace mindspore::kernel { int ZerosLikeCPUKernel::Init() { return RET_OK; } int ZerosLikeCPUKernel::Run() { - auto input = in_tensors_.at(0); - auto input_data = reinterpret_cast(input->MutableData()); auto output_data = reinterpret_cast(out_tensors_.at(0)->MutableData()); - ApproximateZerosLike(input_data, output_data, input->ElementsNum()); + ApproximateZerosLike(output_data, in_tensors_.at(0)->ElementsNum()); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.cc index c6794db43d2..dc0ed424912 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.cc @@ -118,7 +118,7 @@ int TransposeInt8CPUKernel::DoTranspose(int task_id) { position = position_ + task_id * transpose_param_->num_axes_; } - auto ret = DoTransposeInt8(in_ptr_, out_ptr_, in_shape_, out_shape_, transpose_param_, thread_offset, + auto ret = DoTransposeInt8(in_ptr_, out_ptr_, out_shape_, transpose_param_, thread_offset, thread_offset + num_unit_thread, dim_size, position); if (ret != RET_OK) { MS_LOG(ERROR) << "Transpose error task_id[" << task_id << "] error_code[" << ret << "]"; diff --git a/mindspore/lite/tools/common/protobuf_utils.cc b/mindspore/lite/tools/common/protobuf_utils.cc index 206b8122417..764ecfdf71b 100644 --- a/mindspore/lite/tools/common/protobuf_utils.cc +++ b/mindspore/lite/tools/common/protobuf_utils.cc @@ -59,6 +59,7 @@ STATUS ReadProtoFromText(const char *file, google::protobuf::Message *message) { bool status = google::protobuf::TextFormat::Parse(&input, message); if (!status) { MS_LOG(ERROR) << "call [google::protobuf::TextFormat::Parse] func status fail, please check your text file."; + fs.close(); return RET_ERROR; } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.cc index 6c96524cf27..759b2cc8ae5 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.cc @@ -74,7 +74,7 @@ STATUS MatMulBiasAddFusionPass::DoFusion(MetaGraphT *graph, const std::string &p // biasadd node the second tensor is not constant tensor, don't fusion auto baNodeInputIndex = baNode->inputIndex; if (baNodeInputIndex.size() != BIASADD_OP_INPUT_NUM) { - MS_LOG(ERROR) << "%s node tensors number is invalid! "; // baNode->name.c_str()); + MS_LOG(ERROR) << "input num is invalid! node: " << baNode->name.c_str(); return RET_ERROR; } MS_ASSERT(graph->allTensors.size() > baNodeInputIndex.at(BIASADD_OP_BIAS_INDEX)); @@ -88,7 +88,7 @@ STATUS MatMulBiasAddFusionPass::DoFusion(MetaGraphT *graph, const std::string &p // 1. add biasTensor for matMul auto status = AddFullConnectionBiasTensor(matMulPath, baPath, graph); if (RET_OK != status) { - MS_LOG(ERROR) << "AddFullConnectionBiasTensor failed, %d"; // status); + MS_LOG(ERROR) << "AddFullConnectionBiasTensor failed, ret: " << status; return status; } @@ -113,17 +113,16 @@ STATUS MatMulBiasAddFusionPass::DoFusion(MetaGraphT *graph, const std::string &p MergeNodeAttrFromPost(matMulNode, baNode); status = IsolateOneWayNode(graph, baPath->nodeIdx); if (status != RET_OK) { - MS_LOG(ERROR) << "IsolateOneWayNode failed, subGraph: %zu, node: %zu, error: %d"; - // baPath->subGraphIdx, baPath->nodeIdx, status); + MS_LOG(ERROR) << "IsolateOneWayNode failed, subGraph: " << baPath->subGraphIdx << ", node: " << baPath->nodeIdx + << ", ret: " << status; return status; } // 4. addTranspose node status = InsertTransposeNode(graph, matMulPath); if (status != RET_OK) { - MS_LOG(ERROR) - << "InsertTransposeNode failed, subGraph: %zu, node: %zu, error: %d"; // matMulPath->subGraphIdx, - // matMulPath->nodeIdx, status); + MS_LOG(ERROR) << "InsertTransposeNode failed, subGraph: " << matMulPath->subGraphIdx + << ", node: " << matMulPath->nodeIdx << ", ret: " << status; return status; } return RET_OK; @@ -162,7 +161,7 @@ STATUS MatMulBiasAddFusionPass::InsertTransposeNode(MetaGraphT *graph, const std matmulOpIter = InsertNode(graph, matmulOpIter, kBefore, needInsertIdx, std::move(transNode), &errorCode, TransposeOpCopyer); if (errorCode != RET_OK) { - MS_LOG(ERROR) << "InsertNode failed: %d"; // errorCode); + MS_LOG(ERROR) << "InsertNode failed: " << errorCode; return errorCode; } } @@ -187,7 +186,7 @@ STATUS MatMulBiasAddFusionPass::AddFullConnectionBiasTensor(const std::shared_pt // check biasTensor auto baWeightTensorIdxes = baNode->inputIndex; if (baWeightTensorIdxes.size() != BIASADD_OP_INPUT_NUM) { - MS_LOG(ERROR) << "%s node tensors number is invalid! "; // baNode->name.c_str()); + MS_LOG(ERROR) << "input number is invalid! node: " << baNode->name.c_str(); return RET_ERROR; } MS_ASSERT(graph->allTensors.size() > baWeightTensorIdxes.at(BIASADD_OP_BIAS_INDEX)); @@ -196,7 +195,7 @@ STATUS MatMulBiasAddFusionPass::AddFullConnectionBiasTensor(const std::shared_pt auto biasDims = biasTensor->dims; // if biasTensor is a scaler if (biasDims.empty() && biasTensor->data.data() == nullptr) { - MS_LOG(ERROR) << "BiasAdd node %s bias tensor is invalid"; // baNode->name.c_str()); + MS_LOG(ERROR) << "bias tensor is invalid, node: " << baNode->name.c_str(); return RET_ERROR; } if (!biasDims.empty() && biasDims.size() != BIASADD_WEIGHT_SHAPE_SIZE) { diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_insert_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_insert_pass.cc index a8b306290e5..6554d72ade7 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_insert_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_insert_pass.cc @@ -142,7 +142,7 @@ STATUS TransOpInsertPass::Run(schema::MetaGraphT *graph) { changed = false; for (auto iter = graph->nodes.begin(); iter != graph->nodes.end(); iter++) { auto &node = *iter; - if (node == nullptr && node->primitive == nullptr) { + if (node == nullptr || node->primitive == nullptr) { MS_LOG(ERROR) << "node or primitive null"; return RET_NULL_PTR; } diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.cc index 23334f85580..8264e36cc58 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.cc @@ -53,6 +53,7 @@ STATUS CaffeReduceParser::Parse(const caffe::LayerParameter &proto, const caffe: break; case caffe::ReductionParameter_ReductionOp_ASUM: attr->mode = schema::ReduceMode_ReduceASum; + break; default: MS_LOG(ERROR) << "reduce parse params fail, unsupported opration: " << reduce_param.operation(); return RET_ERROR; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc index 13e3f0130b7..3c1cbde2e8a 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc @@ -174,7 +174,7 @@ STATUS OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::Nod attr->activationType = schema::ActivationType_NO_ACTIVATION; } - if (attr != nullptr && attr->group > kSingleGroup && attr->group == attr->channelIn) { + if (attr->group > kSingleGroup && attr->group == attr->channelIn) { if (!ParseGroupConvolution(attr, op)) { MS_LOG(ERROR) << "Convert Convolution to Depthwise failed"; return RET_ERROR; diff --git a/mindspore/lite/tools/converter/quantizer/quantizer.h b/mindspore/lite/tools/converter/quantizer/quantizer.h index 40e4da548ad..3bb576bd686 100644 --- a/mindspore/lite/tools/converter/quantizer/quantizer.h +++ b/mindspore/lite/tools/converter/quantizer/quantizer.h @@ -43,7 +43,7 @@ class Quantizer { public: explicit Quantizer(FuncGraphPtr graph) : funcGraph(std::move(graph)) {} - ~Quantizer() = default; + virtual ~Quantizer() = default; virtual STATUS RemoveFakeQuant();