From 0fa81a6516dc00e972e7a9f5370e9713155f500a Mon Sep 17 00:00:00 2001 From: xuanyue Date: Fri, 30 Jul 2021 11:54:34 +0800 Subject: [PATCH] code check --- .../cpu/nnacl/while_parameter.h | 27 +++++++++++++++++++ .../src/ops/compat/attr_transfer_common.cc | 5 ++-- .../src/ops/compat/attr_transfer_common.h | 2 +- .../lite/src/ops/compat/compat_register.h | 2 +- .../ops/compat/v0/broadcast_to_compat_v0.cc | 2 +- .../lite/src/ops/compat/v0/cast_compat_v0.cc | 3 ++- .../ops/compat/v0/expand_dims_compat_v0.cc | 2 +- .../lite/src/ops/compat/v0/fill_compat_v0.cc | 2 +- .../src/ops/compat/v0/gather_compat_v0.cc | 2 +- .../src/ops/compat/v0/nchw2nhwc_compat_v0.cc | 2 +- .../src/ops/compat/v0/nhwc2nchw_compat_v0.cc | 2 +- .../lite/src/ops/compat/v0/pad_compat_v0.cc | 3 ++- .../src/ops/compat/v0/permute_compat_v0.cc | 2 +- .../lite/src/ops/compat/v0/power_compat_v0.cc | 2 +- .../src/ops/compat/v0/reduce_compat_v0.cc | 2 +- .../src/ops/compat/v0/reshape_compat_v0.cc | 2 +- .../lite/src/ops/compat/v0/slice_compat_v0.cc | 3 ++- .../ops/compat/v0/strided_slice_compat_v0.cc | 2 +- .../lite/src/ops/compat/v0/tile_compat_v0.cc | 2 +- .../lite/src/ops/compat/v0/topk_compat_v0.cc | 3 ++- .../src/ops/compat/v0/transpose_compat_v0.cc | 2 +- .../populate/constant_of_shape_populate.cc | 2 +- .../lite/src/ops/populate/crop_populate.cc | 2 +- .../lite/src/ops/populate/pooling_populate.cc | 14 +++++----- .../populate/space_to_batch_nd_populate.cc | 11 +++++--- .../ops/populate/space_to_batch_populate.cc | 11 +++++--- .../lite/src/ops/populate/splice_populate.cc | 17 ++++++++---- .../v0/constant_of_shape_populate_v0.cc | 2 +- .../src/ops/populate/v0/crop_populate_v0.cc | 2 +- .../ops/populate/v0/layer_norm_populate_v0.cc | 4 +-- .../ops/populate/v0/partial_populate_v0.cc | 7 +---- .../ops/populate/v0/prior_box_populate_v0.cc | 6 ++--- .../src/ops/populate/v0/reduce_populate_v0.cc | 2 +- .../ops/populate/v0/reverse_populate_v0.cc | 2 +- .../src/ops/populate/v0/slice_populate_v0.cc | 2 +- .../populate/v0/space_to_batch_populate_v0.cc | 13 +++------ .../src/ops/populate/v0/split_populate_v0.cc | 2 +- .../ops/populate/v0/unsqueeze_populate_v0.cc | 4 +-- .../src/ops/populate/v0/while_populate_v0.cc | 6 +---- .../lite/src/ops/populate/while_populate.cc | 7 +---- mindspore/lite/src/runtime/inner_allocator.cc | 4 +-- .../runtime/kernel/arm/fp16/gather_fp16.cc | 6 ++++- .../tools/converter/parser/inputs_adjust.cc | 17 +++++++----- .../tools/optimizer/graph/node_infershape.cc | 3 ++- .../optimizer/graph/transpose_strategy.cc | 9 ++++--- .../optimizer/graph/unify_format_pass.cc | 9 ++++--- 46 files changed, 138 insertions(+), 100 deletions(-) create mode 100644 mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/while_parameter.h diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/while_parameter.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/while_parameter.h new file mode 100644 index 00000000000..cd4b251a533 --- /dev/null +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/while_parameter.h @@ -0,0 +1,27 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_NNACL_WHILE_PARAMETER_H_ +#define MINDSPORE_NNACL_WHILE_PARAMETER_H_ + +#include "nnacl/op_base.h" + +typedef struct WhileParemeter { + OpParameter op_parameter_; + int body_subgraph_index = 0; + int cond_subgraph_index = 0; +} WhileParemeter; + +#endif // MINDSPORE_NNACL_WHILE_PARAMETER_H_ diff --git a/mindspore/lite/src/ops/compat/attr_transfer_common.cc b/mindspore/lite/src/ops/compat/attr_transfer_common.cc index ec6679a3e07..8265fedfefa 100644 --- a/mindspore/lite/src/ops/compat/attr_transfer_common.cc +++ b/mindspore/lite/src/ops/compat/attr_transfer_common.cc @@ -21,7 +21,7 @@ namespace mindspore { namespace lite { schema::Tensor *AttrToTensor(void *data, int data_size, bool is_array, TypeId type_id, - std::vector *tensor_bufs) { + std::vector *const tensor_bufs) { if (data == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return nullptr; @@ -50,7 +50,8 @@ schema::Tensor *AttrToTensor(void *data, int data_size, bool is_array, TypeId ty fbb.Clear(); return nullptr; } - auto tensor_buf = reinterpret_cast(malloc(fbb.GetSize())); + size_t byte_num = fbb.GetSize(); + auto tensor_buf = reinterpret_cast(malloc(byte_num)); if (tensor_buf == nullptr) { MS_LOG(ERROR) << "malloc primitive_buf_ failed"; fbb.Clear(); diff --git a/mindspore/lite/src/ops/compat/attr_transfer_common.h b/mindspore/lite/src/ops/compat/attr_transfer_common.h index 265db8db22c..dcff0ae2e7b 100644 --- a/mindspore/lite/src/ops/compat/attr_transfer_common.h +++ b/mindspore/lite/src/ops/compat/attr_transfer_common.h @@ -27,7 +27,7 @@ namespace mindspore { namespace lite { schema::Tensor *AttrToTensor(void *data, int data_size, bool is_array, TypeId type_id, - std::vector *tensor_bufs); + std::vector *const tensor_bufs); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/compat/compat_register.h b/mindspore/lite/src/ops/compat/compat_register.h index 83019c04643..87e3f729661 100644 --- a/mindspore/lite/src/ops/compat/compat_register.h +++ b/mindspore/lite/src/ops/compat/compat_register.h @@ -29,7 +29,7 @@ namespace mindspore { namespace lite { // compatibility, transfer attr to input tensor. typedef int (*TransferAttrFunc)(Model::Node *node, std::vector *tensor, - std::vector *tensor_bufs); + std::vector *const tensor_bufs); class CompatRegistry { public: static CompatRegistry *GetInstance() { diff --git a/mindspore/lite/src/ops/compat/v0/broadcast_to_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/broadcast_to_compat_v0.cc index cb396dc10cf..b8e0c7902fb 100644 --- a/mindspore/lite/src/ops/compat/v0/broadcast_to_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/broadcast_to_compat_v0.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace lite { int TransferBroadcastToAttr(Model::Node *node, std::vector *dst_tensors, - std::vector *tensor_bufs) { + std::vector *const tensor_bufs) { if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/compat/v0/cast_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/cast_compat_v0.cc index 4e211821ec7..3c22c874d8d 100644 --- a/mindspore/lite/src/ops/compat/v0/cast_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/cast_compat_v0.cc @@ -19,7 +19,8 @@ namespace mindspore { namespace lite { -int TransferCastAttr(Model::Node *node, std::vector *dst_tensors, std::vector *tensor_bufs) { +int TransferCastAttr(Model::Node *node, std::vector *dst_tensors, + std::vector *const tensor_bufs) { if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/compat/v0/expand_dims_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/expand_dims_compat_v0.cc index f2ca4983ce5..eb97b279922 100644 --- a/mindspore/lite/src/ops/compat/v0/expand_dims_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/expand_dims_compat_v0.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace lite { int TransferExpandDimsAttr(Model::Node *node, std::vector *dst_tensors, - std::vector *tensor_bufs) { + std::vector *const tensor_bufs) { if (node == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/compat/v0/fill_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/fill_compat_v0.cc index f46478778dd..0d9eb526a93 100644 --- a/mindspore/lite/src/ops/compat/v0/fill_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/fill_compat_v0.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace lite { int TransferFillToAttr(Model::Node *node, std::vector *dst_tensors, - std::vector *tensor_bufs) { + std::vector *const tensor_bufs) { if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/compat/v0/gather_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/gather_compat_v0.cc index b3b064ee06d..0547457bd07 100644 --- a/mindspore/lite/src/ops/compat/v0/gather_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/gather_compat_v0.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace lite { int TransferGatherAttr(Model::Node *node, std::vector *dst_tensors, - std::vector *tensor_bufs) { + std::vector *const tensor_bufs) { if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/compat/v0/nchw2nhwc_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/nchw2nhwc_compat_v0.cc index 1164ebd821e..a3967ec15c7 100644 --- a/mindspore/lite/src/ops/compat/v0/nchw2nhwc_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/nchw2nhwc_compat_v0.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace lite { int TransferNchw2NhwcAttr(Model::Node *node, std::vector *dst_tensors, - std::vector *tensor_bufs) { + std::vector *const tensor_bufs) { if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/compat/v0/nhwc2nchw_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/nhwc2nchw_compat_v0.cc index 0d6297a7f25..2a3d3d3024e 100644 --- a/mindspore/lite/src/ops/compat/v0/nhwc2nchw_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/nhwc2nchw_compat_v0.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace lite { int TransferNhwc2NchwAttr(Model::Node *node, std::vector *dst_tensors, - std::vector *tensor_bufs) { + std::vector *const tensor_bufs) { if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/compat/v0/pad_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/pad_compat_v0.cc index bab8bb545e3..bed45bf9799 100644 --- a/mindspore/lite/src/ops/compat/v0/pad_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/pad_compat_v0.cc @@ -19,7 +19,8 @@ namespace mindspore { namespace lite { -int TransferPadAttr(Model::Node *node, std::vector *dst_tensors, std::vector *tensor_bufs) { +int TransferPadAttr(Model::Node *node, std::vector *dst_tensors, + std::vector *const tensor_bufs) { if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/compat/v0/permute_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/permute_compat_v0.cc index 9c59a9af232..8f53e9f0fad 100644 --- a/mindspore/lite/src/ops/compat/v0/permute_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/permute_compat_v0.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace lite { int TransferPermuteAttr(Model::Node *node, std::vector *dst_tensors, - std::vector *tensor_bufs) { + std::vector *const tensor_bufs) { if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/compat/v0/power_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/power_compat_v0.cc index b3cca54a359..8e6403cf5fe 100644 --- a/mindspore/lite/src/ops/compat/v0/power_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/power_compat_v0.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace lite { int TransferPowerToAttr(Model::Node *node, std::vector *dst_tensors, - std::vector *tensor_bufs) { + std::vector *const tensor_bufs) { if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/compat/v0/reduce_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/reduce_compat_v0.cc index 43b38ff08c7..5f25fc8716b 100644 --- a/mindspore/lite/src/ops/compat/v0/reduce_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/reduce_compat_v0.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace lite { int TransferReduceToAttr(Model::Node *node, std::vector *dst_tensors, - std::vector *tensor_bufs) { + std::vector *const tensor_bufs) { if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/compat/v0/reshape_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/reshape_compat_v0.cc index 76532180d5f..07b1d501e51 100644 --- a/mindspore/lite/src/ops/compat/v0/reshape_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/reshape_compat_v0.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace lite { int TransferReshapeAttr(Model::Node *node, std::vector *dst_tensors, - std::vector *tensor_bufs) { + std::vector *const tensor_bufs) { if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/compat/v0/slice_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/slice_compat_v0.cc index a1e69ff6ce2..efbf3019e86 100644 --- a/mindspore/lite/src/ops/compat/v0/slice_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/slice_compat_v0.cc @@ -19,7 +19,8 @@ namespace mindspore { namespace lite { -int TransferSliceAttr(Model::Node *node, std::vector *dst_tensors, std::vector *tensor_bufs) { +int TransferSliceAttr(Model::Node *node, std::vector *dst_tensors, + std::vector *const tensor_bufs) { if (node == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/compat/v0/strided_slice_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/strided_slice_compat_v0.cc index 1db53a5bd30..69471b4147a 100644 --- a/mindspore/lite/src/ops/compat/v0/strided_slice_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/strided_slice_compat_v0.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace lite { int TransferStridedSliceAttr(Model::Node *node, std::vector *dst_tensors, - std::vector *tensor_bufs) { + std::vector *const tensor_bufs) { if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/compat/v0/tile_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/tile_compat_v0.cc index e675f05552f..7107859c4cc 100644 --- a/mindspore/lite/src/ops/compat/v0/tile_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/tile_compat_v0.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace lite { int TransferTileToAttr(Model::Node *node, std::vector *dst_tensors, - std::vector *tensor_bufs) { + std::vector *const tensor_bufs) { if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/compat/v0/topk_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/topk_compat_v0.cc index a3d89e7dd0c..3785abc2a32 100644 --- a/mindspore/lite/src/ops/compat/v0/topk_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/topk_compat_v0.cc @@ -19,7 +19,8 @@ namespace mindspore { namespace lite { -int TransferTopkAttr(Model::Node *node, std::vector *dst_tensors, std::vector *tensor_bufs) { +int TransferTopkAttr(Model::Node *node, std::vector *dst_tensors, + std::vector *const tensor_bufs) { if (node == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/compat/v0/transpose_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/transpose_compat_v0.cc index 3e722498d27..34ccee77b48 100644 --- a/mindspore/lite/src/ops/compat/v0/transpose_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/transpose_compat_v0.cc @@ -20,7 +20,7 @@ namespace mindspore { namespace lite { int TransferTransposeAttr(Model::Node *node, std::vector *dst_tensors, - std::vector *tensor_bufs) { + std::vector *const tensor_bufs) { if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; diff --git a/mindspore/lite/src/ops/populate/constant_of_shape_populate.cc b/mindspore/lite/src/ops/populate/constant_of_shape_populate.cc index 66f4554b4e5..25e721dee69 100644 --- a/mindspore/lite/src/ops/populate/constant_of_shape_populate.cc +++ b/mindspore/lite/src/ops/populate/constant_of_shape_populate.cc @@ -53,7 +53,7 @@ OpParameter *PopulateConstantOfShapeParameter(const void *prim) { param->value_.f32_value_ = *(prim_val->begin()); break; case kNumberTypeInt32: - param->value_.int32_value_ = *(prim_val->begin()); + param->value_.int32_value_ = static_cast(*(prim_val->begin())); break; default: MS_LOG(ERROR) << "The value of constant of shape is invalid"; diff --git a/mindspore/lite/src/ops/populate/crop_populate.cc b/mindspore/lite/src/ops/populate/crop_populate.cc index 94edf794c97..739e1f5ac60 100644 --- a/mindspore/lite/src/ops/populate/crop_populate.cc +++ b/mindspore/lite/src/ops/populate/crop_populate.cc @@ -49,7 +49,7 @@ OpParameter *PopulateCropParameter(const void *prim) { param->op_parameter_.type_ = primitive->value_type(); param->axis_ = value->axis(); - param->offset_size_ = param_offset->size(); + param->offset_size_ = static_cast(param_offset->size()); for (size_t i = 0; i < param_offset->size(); ++i) { param->offset_[i] = *(param_offset->begin() + i); } diff --git a/mindspore/lite/src/ops/populate/pooling_populate.cc b/mindspore/lite/src/ops/populate/pooling_populate.cc index 0b9530c50fb..29adecdecaa 100644 --- a/mindspore/lite/src/ops/populate/pooling_populate.cc +++ b/mindspore/lite/src/ops/populate/pooling_populate.cc @@ -20,8 +20,10 @@ using mindspore::schema::PrimitiveType_MaxPoolFusion; namespace mindspore { namespace lite { -constexpr auto kMinShapeSize = 2; -constexpr auto kMinPadSize = 4; +constexpr size_t kMinShapeSize = 2; +constexpr size_t kMinPadSize = 4; +constexpr int kOffsetTwo = 2; +constexpr int kOffsetThree = 3; OpParameter *PopulateAvgPoolParameter(const void *primitive) { auto pooling_prim = static_cast(primitive); MS_ASSERT(pooling_prim != nullptr); @@ -53,8 +55,8 @@ OpParameter *PopulateAvgPoolParameter(const void *primitive) { if (pad != nullptr && pad->size() >= kMinPadSize) { param->pad_u_ = static_cast(*(pad->begin())); param->pad_d_ = static_cast(*(pad->begin() + 1)); - param->pad_l_ = static_cast(*(pad->begin() + 2)); - param->pad_r_ = static_cast(*(pad->begin() + 3)); + param->pad_l_ = static_cast(*(pad->begin() + kOffsetTwo)); + param->pad_r_ = static_cast(*(pad->begin() + kOffsetThree)); } if (!param->global_) { auto kernel_size = value->kernel_size(); @@ -138,8 +140,8 @@ OpParameter *PopulateMaxPoolParameter(const void *primitive) { if (pad != nullptr && pad->size() >= kMinPadSize) { param->pad_u_ = static_cast(*(pad->begin())); param->pad_d_ = static_cast(*(pad->begin() + 1)); - param->pad_l_ = static_cast(*(pad->begin() + 2)); - param->pad_r_ = static_cast(*(pad->begin() + 3)); + param->pad_l_ = static_cast(*(pad->begin() + kOffsetTwo)); + param->pad_r_ = static_cast(*(pad->begin() + kOffsetThree)); } } diff --git a/mindspore/lite/src/ops/populate/space_to_batch_nd_populate.cc b/mindspore/lite/src/ops/populate/space_to_batch_nd_populate.cc index 33886ee8d47..141758bd9b6 100644 --- a/mindspore/lite/src/ops/populate/space_to_batch_nd_populate.cc +++ b/mindspore/lite/src/ops/populate/space_to_batch_nd_populate.cc @@ -59,10 +59,13 @@ OpParameter *PopulateSpaceToBatchNDParameter(const void *prim) { free(param); return nullptr; } - if (fb_paddings->size() == 0 || - ((*(fb_paddings->begin())) != nullptr && (*(fb_paddings->begin()))->data() != nullptr && - static_cast(fb_paddings->size() * (*(fb_paddings->begin()))->data()->size()) > - std::numeric_limits::max() / sizeof(int64_t))) { + if (fb_paddings->size() == 0 || *(fb_paddings->begin()) == nullptr || (*(fb_paddings->begin()))->data() == nullptr) { + MS_LOG(ERROR) << "exit attr is nullptr."; + free(param); + return nullptr; + } + size_t num = static_cast(fb_paddings->size() * (*(fb_paddings->begin()))->data()->size()); + if (num > std::numeric_limits::max() / sizeof(int64_t)) { MS_LOG(ERROR) << "The value of paddings.size() is zero or too big"; free(param); return nullptr; diff --git a/mindspore/lite/src/ops/populate/space_to_batch_populate.cc b/mindspore/lite/src/ops/populate/space_to_batch_populate.cc index 0d12f1e95b0..cfb07e9cd43 100644 --- a/mindspore/lite/src/ops/populate/space_to_batch_populate.cc +++ b/mindspore/lite/src/ops/populate/space_to_batch_populate.cc @@ -62,10 +62,13 @@ OpParameter *PopulateSpaceToBatchParameter(const void *prim) { free(param); return nullptr; } - if (fb_paddings->size() == 0 || - ((*(fb_paddings->begin())) != nullptr && (*(fb_paddings->begin()))->data() != nullptr && - static_cast(fb_paddings->size() * (*(fb_paddings->begin()))->data()->size()) > - std::numeric_limits::max() / sizeof(int64_t))) { + if (fb_paddings->size() == 0 || *(fb_paddings->begin()) == nullptr || (*(fb_paddings->begin()))->data() == nullptr) { + MS_LOG(ERROR) << "exit attr is nullptr."; + free(param); + return nullptr; + } + size_t num = static_cast(fb_paddings->size() * (*(fb_paddings->begin()))->data()->size()); + if (num > std::numeric_limits::max() / sizeof(int64_t)) { MS_LOG(ERROR) << "The value of paddings.size() is zero or too big"; free(param); return nullptr; diff --git a/mindspore/lite/src/ops/populate/splice_populate.cc b/mindspore/lite/src/ops/populate/splice_populate.cc index 97de2b0439d..2e3a8ef1efa 100644 --- a/mindspore/lite/src/ops/populate/splice_populate.cc +++ b/mindspore/lite/src/ops/populate/splice_populate.cc @@ -44,6 +44,11 @@ OpParameter *PopulateSpliceParameter(const void *prim) { return nullptr; } std::vector primitive_context(context->begin(), context->end()); + if (primitive_context.size() > std::numeric_limits::max()) { + MS_LOG(ERROR) << "size is too big."; + free(param); + return nullptr; + } param->context_dim_ = static_cast(primitive_context.size()); // malloc && memset for context @@ -57,7 +62,7 @@ OpParameter *PopulateSpliceParameter(const void *prim) { int src_to_dst_row_offset = INT32_MIN; memset(param->context_, 0, param->context_dim_ * sizeof(int)); for (int i = 0; i < param->context_dim_; ++i) { - param->context_[i] = primitive_context.at(i); + param->context_[i] = primitive_context[i]; src_to_dst_row_offset = std::max(src_to_dst_row_offset, std::abs(primitive_context.at(i))); } @@ -69,6 +74,12 @@ OpParameter *PopulateSpliceParameter(const void *prim) { return nullptr; } std::vector primitive_forward_indexes(forward_indexes->begin(), forward_indexes->end()); + if (primitive_forward_indexes.size() > std::numeric_limits::max()) { + MS_LOG(ERROR) << "size is too big."; + free(param->context_); + free(param); + return nullptr; + } param->forward_indexes_dim_ = static_cast(primitive_forward_indexes.size()); // malloc && memset for forward_indexes @@ -81,10 +92,6 @@ OpParameter *PopulateSpliceParameter(const void *prim) { } memset(param->forward_indexes_, 0, param->forward_indexes_dim_ * sizeof(int)); memcpy(param->forward_indexes_, primitive_forward_indexes.data(), param->forward_indexes_dim_ * sizeof(int)); - - for (int i = 0; i < param->context_dim_; ++i) { - param->context_[i] = primitive_context.at(i); - } param->output_dim_ = value->output_dim(); return reinterpret_cast(param); } diff --git a/mindspore/lite/src/ops/populate/v0/constant_of_shape_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/constant_of_shape_populate_v0.cc index 874e479121e..e11957f2322 100644 --- a/mindspore/lite/src/ops/populate/v0/constant_of_shape_populate_v0.cc +++ b/mindspore/lite/src/ops/populate/v0/constant_of_shape_populate_v0.cc @@ -52,7 +52,7 @@ OpParameter *PopulateConstantOfShapeParameter(const void *prim) { param->value_.f32_value_ = value->data()[0]; break; case kNumberTypeInt32: - param->value_.int32_value_ = value->data()[0]; + param->value_.int32_value_ = static_cast(value->data()[0]); break; default: MS_LOG(ERROR) << "The value of constant of shape is invalid"; diff --git a/mindspore/lite/src/ops/populate/v0/crop_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/crop_populate_v0.cc index 39ef66d7a15..3dda02adf49 100644 --- a/mindspore/lite/src/ops/populate/v0/crop_populate_v0.cc +++ b/mindspore/lite/src/ops/populate/v0/crop_populate_v0.cc @@ -46,7 +46,7 @@ OpParameter *PopulateCropParameter(const void *prim) { memset(crop_param, 0, sizeof(CropParameter)); crop_param->op_parameter_.type_ = schema::PrimitiveType_Crop; crop_param->axis_ = crop_prim->axis(); - crop_param->offset_size_ = param_offset->size(); + crop_param->offset_size_ = static_cast(param_offset->size()); for (size_t i = 0; i < param_offset->size(); ++i) { crop_param->offset_[i] = *(param_offset->begin() + i); } diff --git a/mindspore/lite/src/ops/populate/v0/layer_norm_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/layer_norm_populate_v0.cc index 4782b5318fa..9c118978c23 100644 --- a/mindspore/lite/src/ops/populate/v0/layer_norm_populate_v0.cc +++ b/mindspore/lite/src/ops/populate/v0/layer_norm_populate_v0.cc @@ -41,8 +41,8 @@ OpParameter *PopulateLayerNormParameterV0(const void *prim) { } memset(layer_norm_parameter, 0, sizeof(LayerNormParameter)); layer_norm_parameter->op_parameter_.type_ = schema::PrimitiveType_LayerNormFusion; - layer_norm_parameter->begin_norm_axis_ = -normalized_shape->size(); - layer_norm_parameter->begin_params_axis_ = -normalized_shape->size(); + layer_norm_parameter->begin_norm_axis_ = -static_cast(normalized_shape->size()); + layer_norm_parameter->begin_params_axis_ = -static_cast(normalized_shape->size()); layer_norm_parameter->epsilon_ = layer_norm_prim->epsilon(); layer_norm_parameter->elementwise_affine_ = layer_norm_prim->elementwiseAffine(); diff --git a/mindspore/lite/src/ops/populate/v0/partial_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/partial_populate_v0.cc index 158701b70bf..f737b8cd0a5 100644 --- a/mindspore/lite/src/ops/populate/v0/partial_populate_v0.cc +++ b/mindspore/lite/src/ops/populate/v0/partial_populate_v0.cc @@ -16,16 +16,11 @@ #include "schema/model_v0_generated.h" #include "src/ops/populate/populate_register.h" +#include "nnacl/partial_fusion_parameter.h" namespace mindspore { namespace lite { namespace { -struct PartialParameter { - OpParameter op_parameter_; - int sub_graph_index_ = 0; -}; -using PartialParameter = PartialParameter; - OpParameter *PopulatePartialParameter(const void *prim) { auto *primitive = static_cast(prim); MS_ASSERT(primitive != nullptr); diff --git a/mindspore/lite/src/ops/populate/v0/prior_box_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/prior_box_populate_v0.cc index 146c34fb085..11ee22bdcc2 100644 --- a/mindspore/lite/src/ops/populate/v0/prior_box_populate_v0.cc +++ b/mindspore/lite/src/ops/populate/v0/prior_box_populate_v0.cc @@ -48,7 +48,7 @@ OpParameter *PopulatePriorBoxParameter(const void *prim) { free(prior_box_param); return nullptr; } - prior_box_param->min_sizes_size = min_sizes->size(); + prior_box_param->min_sizes_size = static_cast(min_sizes->size()); memcpy(prior_box_param->min_sizes, min_sizes->data(), min_sizes->size() * sizeof(int32_t)); auto max_sizes = prior_box_prim->max_sizes(); @@ -62,7 +62,7 @@ OpParameter *PopulatePriorBoxParameter(const void *prim) { free(prior_box_param); return nullptr; } - prior_box_param->max_sizes_size = max_sizes->size(); + prior_box_param->max_sizes_size = static_cast(max_sizes->size()); memcpy(prior_box_param->max_sizes, max_sizes->data(), max_sizes->size() * sizeof(int32_t)); auto aspect_ratios = prior_box_prim->aspect_ratios(); @@ -76,7 +76,7 @@ OpParameter *PopulatePriorBoxParameter(const void *prim) { free(prior_box_param); return nullptr; } - prior_box_param->aspect_ratios_size = aspect_ratios->size(); + prior_box_param->aspect_ratios_size = static_cast(aspect_ratios->size()); memcpy(prior_box_param->aspect_ratios, aspect_ratios->data(), aspect_ratios->size() * sizeof(float)); auto variances = prior_box_prim->variances(); diff --git a/mindspore/lite/src/ops/populate/v0/reduce_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/reduce_populate_v0.cc index ee64d277bb4..a71ea372a28 100644 --- a/mindspore/lite/src/ops/populate/v0/reduce_populate_v0.cc +++ b/mindspore/lite/src/ops/populate/v0/reduce_populate_v0.cc @@ -37,7 +37,7 @@ OpParameter *PopulateReduceParameter(const void *prim) { memset(reduce_param, 0, sizeof(ReduceParameter)); reduce_param->op_parameter_.type_ = schema::PrimitiveType_ReduceFusion; - reduce_param->keep_dims_ = reduce_prim->keepDims(); + reduce_param->keep_dims_ = static_cast(reduce_prim->keepDims()); reduce_param->reduce_to_end_ = reduce_prim->reduceToEnd(); reduce_param->coeff = reduce_prim->coeff(); auto axisVector = reduce_prim->axes(); diff --git a/mindspore/lite/src/ops/populate/v0/reverse_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/reverse_populate_v0.cc index 87c1e32c5a4..34ed4bdefea 100644 --- a/mindspore/lite/src/ops/populate/v0/reverse_populate_v0.cc +++ b/mindspore/lite/src/ops/populate/v0/reverse_populate_v0.cc @@ -42,7 +42,7 @@ OpParameter *PopulateReverseParameter(const void *prim) { free(reverse_param); return nullptr; } - reverse_param->num_axis_ = flatAxis->size(); + reverse_param->num_axis_ = static_cast(flatAxis->size()); int i = 0; for (int flatAxi : *flatAxis) { reverse_param->axis_[i++] = flatAxi; diff --git a/mindspore/lite/src/ops/populate/v0/slice_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/slice_populate_v0.cc index d95320a9292..aa7ba1b9ba2 100644 --- a/mindspore/lite/src/ops/populate/v0/slice_populate_v0.cc +++ b/mindspore/lite/src/ops/populate/v0/slice_populate_v0.cc @@ -45,7 +45,7 @@ OpParameter *PopulateSliceParameter(const void *prim) { return nullptr; } for (size_t i = 0; i < param_axis->size(); ++i) { - slice_param->axis_[i] = param_axis->Get(i); + slice_param->axis_[i] = static_cast(param_axis->Get(i)); } } else { // use default axes diff --git a/mindspore/lite/src/ops/populate/v0/space_to_batch_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/space_to_batch_populate_v0.cc index 1fba85e7d72..9ea85c557ee 100644 --- a/mindspore/lite/src/ops/populate/v0/space_to_batch_populate_v0.cc +++ b/mindspore/lite/src/ops/populate/v0/space_to_batch_populate_v0.cc @@ -42,31 +42,26 @@ OpParameter *PopulateSpaceToBatchParameter(const void *prim) { free(space_batch_param); return nullptr; } - space_batch_param->m_ = block_sizes->size(); - if (((size_t)block_sizes->size()) > COMM_SHAPE_SIZE) { + space_batch_param->m_ = static_cast(block_sizes->size()); + if (block_sizes->size() > COMM_SHAPE_SIZE) { MS_LOG(ERROR) << "The value of block_sizes.size() is too big,which cannot be bigger than " << COMM_SHAPE_SIZE; free(space_batch_param); return nullptr; } - memcpy(space_batch_param->block_sizes_, (block_sizes->data()), block_sizes->size() * sizeof(int)); + memcpy(space_batch_param->block_sizes_, block_sizes->data(), block_sizes->size() * sizeof(int)); auto paddings = space_to_batch_prim->paddings(); if (paddings == nullptr) { MS_LOG(ERROR) << "paddings is nullptr"; free(space_batch_param); return nullptr; } - if (((size_t)paddings->size()) > COMM_SHAPE_SIZE) { + if (paddings->size() > COMM_SHAPE_SIZE) { MS_LOG(ERROR) << "The value of paddings.size() is too big,which cannot be bigger than " << COMM_SHAPE_SIZE; free(space_batch_param); return nullptr; } memcpy(space_batch_param->paddings_, (paddings->data()), paddings->size() * sizeof(int)); - space_batch_param->m_ = block_sizes->size(); - for (int i = 0; i < space_batch_param->m_; i++) { - space_batch_param->block_sizes_[i] = block_sizes->data()[i]; - } - return reinterpret_cast(space_batch_param); } } // namespace diff --git a/mindspore/lite/src/ops/populate/v0/split_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/split_populate_v0.cc index 79cfeef7fd6..3f14f6832d1 100644 --- a/mindspore/lite/src/ops/populate/v0/split_populate_v0.cc +++ b/mindspore/lite/src/ops/populate/v0/split_populate_v0.cc @@ -53,7 +53,7 @@ OpParameter *PopulateSplitParameter(const void *prim) { auto split_sizes_vector_ = split_prim->sizeSplits(); if (split_sizes_vector_ != nullptr) { int i = 0; - for (auto iter = split_sizes_vector_->begin(); iter != split_sizes_vector_->end(); iter++) { + for (auto iter = split_sizes_vector_->begin(); iter != split_sizes_vector_->end(); ++iter) { split_param->split_sizes_[i++] = *iter; } split_param->split_count_ = split_param->num_split_; diff --git a/mindspore/lite/src/ops/populate/v0/unsqueeze_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/unsqueeze_populate_v0.cc index 413893df7f3..08e50be0c88 100644 --- a/mindspore/lite/src/ops/populate/v0/unsqueeze_populate_v0.cc +++ b/mindspore/lite/src/ops/populate/v0/unsqueeze_populate_v0.cc @@ -47,9 +47,9 @@ OpParameter *PopulateUnsqueezeParameter(const void *prim) { free(unsqueeze_param); return nullptr; } - unsqueeze_param->num_dim_ = flat_axis->size(); + unsqueeze_param->num_dim_ = static_cast(flat_axis->size()); int i = 0; - for (auto iter = flat_axis->begin(); iter != flat_axis->end(); iter++) { + for (auto iter = flat_axis->begin(); iter != flat_axis->end(); ++iter) { unsqueeze_param->dims_[i++] = *iter; } return reinterpret_cast(unsqueeze_param); diff --git a/mindspore/lite/src/ops/populate/v0/while_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/while_populate_v0.cc index 47f8417bd80..3636522ae33 100644 --- a/mindspore/lite/src/ops/populate/v0/while_populate_v0.cc +++ b/mindspore/lite/src/ops/populate/v0/while_populate_v0.cc @@ -16,15 +16,11 @@ #include "schema/model_v0_generated.h" #include "src/ops/populate/populate_register.h" +#include "nnacl/while_parameter.h" namespace mindspore { namespace lite { namespace { -struct WhileParemeter { - OpParameter op_parameter_; - int body_subgraph_index = 0; - int cond_subgraph_index = 0; -}; using WhileParemeter = WhileParemeter; OpParameter *PopulateWhileParameter(const void *prim) { auto *primitive = static_cast(prim); diff --git a/mindspore/lite/src/ops/populate/while_populate.cc b/mindspore/lite/src/ops/populate/while_populate.cc index 742fba6a8c9..032bffdba82 100644 --- a/mindspore/lite/src/ops/populate/while_populate.cc +++ b/mindspore/lite/src/ops/populate/while_populate.cc @@ -14,16 +14,11 @@ * limitations under the License. */ #include "src/ops/populate/populate_register.h" +#include "nnacl/while_parameter.h" using mindspore::schema::PrimitiveType_While; namespace mindspore { namespace lite { -struct WhileParemeter { - OpParameter op_parameter_; - int body_subgraph_index = 0; - int cond_subgraph_index = 0; -}; -using WhileParemeter = WhileParemeter; OpParameter *PopulateWhileParemeter(const void *prim) { auto primitive = static_cast(prim); MS_ASSERT(primitive != nullptr); diff --git a/mindspore/lite/src/runtime/inner_allocator.cc b/mindspore/lite/src/runtime/inner_allocator.cc index bc0d5223bf0..e9db6ad1300 100644 --- a/mindspore/lite/src/runtime/inner_allocator.cc +++ b/mindspore/lite/src/runtime/inner_allocator.cc @@ -19,9 +19,7 @@ #include "src/common/log_adapter.h" namespace mindspore { -std::shared_ptr Allocator::Create() { - return std::shared_ptr(new (std::nothrow) DefaultAllocator()); -} +std::shared_ptr Allocator::Create() { return std::make_shared(); } DefaultAllocator::DefaultAllocator(size_t aligned_size) { aligned_size_ = aligned_size; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc index 3ae07e0c6ae..e9cbb9d2dd5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc @@ -29,6 +29,9 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Gather; namespace mindspore::kernel { +namespace { +constexpr int kSecondInput = 2; +} GatherFp16CPUKernel::~GatherFp16CPUKernel() { if (input_data_) { ms_context_->allocator->Free(input_data_); @@ -44,7 +47,8 @@ int GatherFp16CPUKernel::Init() { reinterpret_cast(ms_context_->allocator->Malloc(input_tensor->ElementsNum() * sizeof(float16_t))); Float32ToFloat16(reinterpret_cast(input_tensor->data_c()), input_data_, input_tensor->ElementsNum()); } - (reinterpret_cast(op_parameter_))->axis_ = *(reinterpret_cast(in_tensors_.at(2)->data_c())); + (reinterpret_cast(op_parameter_))->axis_ = + *(reinterpret_cast(in_tensors_.at(kSecondInput)->data_c())); if (!InferShapeDone()) { return RET_OK; } diff --git a/mindspore/lite/tools/converter/parser/inputs_adjust.cc b/mindspore/lite/tools/converter/parser/inputs_adjust.cc index e444a878c04..b6fe506c8b4 100644 --- a/mindspore/lite/tools/converter/parser/inputs_adjust.cc +++ b/mindspore/lite/tools/converter/parser/inputs_adjust.cc @@ -19,6 +19,11 @@ #include "ops/primitive_c.h" namespace mindspore::lite { +namespace { +constexpr int kBuildInputFlagTwo = 2; +constexpr int kBuildInputFlagThree = 3; +constexpr int kBuildInputFlagFour = 4; +} // namespace STATUS InputAdjust::AddAttrToInput(const FuncGraphPtr &func_graph, const CNodePtr &cnode, int input_num, const std::string &attr_name, int flag) { MS_ASSERT(cnode != nullptr); @@ -96,10 +101,10 @@ bool InputAdjust::Run(const FuncGraphPtr &func_graph) { } if (opt::CheckPrimitiveType(node, prim::kPrimTranspose)) { MS_LOG(INFO) << "Adjust Transpose"; - status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "perm", 2); + status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "perm", kBuildInputFlagTwo); } else if (opt::CheckPrimitiveType(node, prim::kPrimReshape)) { MS_LOG(INFO) << "Adjust Reshape"; - status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "shape", 2); + status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "shape", kBuildInputFlagTwo); } else if (opt::CheckPrimitiveType(node, prim::kPrimGather)) { MS_LOG(INFO) << "Adjust Gather"; status = AddAttrToInput(func_graph, cnode, opt::kInputIndexThree, "axis", 1); @@ -111,16 +116,16 @@ bool InputAdjust::Run(const FuncGraphPtr &func_graph) { status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "k", 1); } else if (opt::CheckPrimitiveType(node, prim::kPrimTileFusion)) { MS_LOG(INFO) << "Adjust TileFusion"; - status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "multiples", 2); + status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "multiples", kBuildInputFlagTwo); } else if (opt::CheckPrimitiveType(node, prim::kPrimReduceFusion)) { MS_LOG(INFO) << "Adjust ReduceFusion"; - status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "axes", 2); + status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "axes", kBuildInputFlagTwo); } else if (opt::CheckPrimitiveType(node, prim::kPrimPadFusion)) { MS_LOG(INFO) << "Adjust PadFusion"; - status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "paddings", 3); + status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "paddings", kBuildInputFlagThree); } else if (opt::CheckPrimitiveType(node, prim::kPrimPowFusion)) { MS_LOG(INFO) << "Adjust PowFuison"; - status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "power", 4); + status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "power", kBuildInputFlagFour); } else if (opt::CheckPrimitiveType(node, prim::kPrimResize)) { status = AddAttrToInput(func_graph, cnode, opt::kInputIndexTwo, "zoom_factor", 1); } diff --git a/mindspore/lite/tools/optimizer/graph/node_infershape.cc b/mindspore/lite/tools/optimizer/graph/node_infershape.cc index 310c62f0a2c..6e11780ff7b 100644 --- a/mindspore/lite/tools/optimizer/graph/node_infershape.cc +++ b/mindspore/lite/tools/optimizer/graph/node_infershape.cc @@ -30,6 +30,7 @@ namespace mindspore { namespace opt { namespace { +constexpr int kInputChannal = 3; constexpr size_t INITIAL_SIZE = 1024; void FreeTensors(std::vector *tensors) { if (tensors == nullptr) { @@ -63,7 +64,7 @@ void RectifyFormat(const CNodePtr &cnode, const std::vector &inp } for (auto &input : inputs) { auto shape = input->shape(); - if (shape.size() == kInputSizeFour && shape[3] == 3 && shape[1] == -1) { + if (shape.size() == kInputSizeFour && shape[kInputIndexThree] == kInputChannal && shape[1] == -1) { input->set_format(mindspore::NHWC); } } diff --git a/mindspore/lite/tools/optimizer/graph/transpose_strategy.cc b/mindspore/lite/tools/optimizer/graph/transpose_strategy.cc index 25ef8c4ab22..6075cbcbd46 100644 --- a/mindspore/lite/tools/optimizer/graph/transpose_strategy.cc +++ b/mindspore/lite/tools/optimizer/graph/transpose_strategy.cc @@ -30,6 +30,7 @@ namespace mindspore { namespace opt { namespace { constexpr size_t kFirstInput = 1; +constexpr size_t kHalfDivisor = 2; constexpr size_t kOnnxStridedSlice = 6; const std::vector NH2NC = {0, 3, 1, 2}; const std::vector NC2NH = {0, 2, 3, 1}; @@ -119,16 +120,16 @@ bool TransposeStrategy::CanFusionIfInsert(const FuncGraphPtr &func_graph, const return false; } auto total_node_count = in_nodes.size() + out_nodes.size(); - bool can_insert = trans_count > total_node_count / 2; + bool can_insert = trans_count > total_node_count / kHalfDivisor; if (CheckPrimitiveType(cnode, prim::kPrimActivation)) { auto prim_act = GetValueNode>(cnode->input(0)); MS_ASSERT(prim_act != nullptr); if (prim_act->get_activation_type() == mindspore::ActivationType::LEAKY_RELU) { - can_insert = trans_count >= total_node_count / 2; + can_insert = trans_count >= total_node_count / kHalfDivisor; } } if (CheckPrimitiveType(cnode, prim::kPrimSplit) || CheckPrimitiveType(cnode, prim::kPrimQuantDTypeCast)) { - can_insert = trans_count >= total_node_count / 2; + can_insert = trans_count >= total_node_count / kHalfDivisor; } if (!can_insert) { return can_insert; @@ -202,7 +203,7 @@ STATUS TransposeStrategy::ChangeOpAxis(const FuncGraphPtr &func_graph, const CNo auto new_axis = axis_map[axis < 0 ? axis + kInputSizeFour : axis]; if (new_axis == 0) { offsets = {offsets[0], offsets[kInputIndexTwo], offsets[kInputIndexThree], offsets[1]}; - } else if (new_axis == 3) { + } else if (new_axis == kInputIndexThree) { offsets = {offsets[1], offsets[kInputIndexTwo], offsets[0]}; } else { offsets.push_back(0); diff --git a/mindspore/lite/tools/optimizer/graph/unify_format_pass.cc b/mindspore/lite/tools/optimizer/graph/unify_format_pass.cc index d1e57bbafa0..aa84582bf0f 100644 --- a/mindspore/lite/tools/optimizer/graph/unify_format_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/unify_format_pass.cc @@ -28,6 +28,7 @@ using mindspore::lite::NCHW_SHAPE; namespace mindspore { namespace opt { namespace { +constexpr int kInputChannel = 3; const std::vector NH2NC = {0, 3, 1, 2}; const std::vector NC2NH = {0, 2, 3, 1}; @@ -162,7 +163,7 @@ void ConvertNcTensor2Nh(const FuncGraphPtr &func_graph, const CNodePtr &cnode, s } else if (data_info.shape_.size() == kInputSizeTwo) { new_shape = {1, 1, data_info.shape_[0], data_info.shape_[1]}; } else if (data_info.shape_.size() == kInputSizeThree) { - new_shape = {1, data_info.shape_[0], data_info.shape_[1], data_info.shape_[2]}; + new_shape = {1, data_info.shape_[0], data_info.shape_[1], data_info.shape_[kInputIndexTwo]}; } auto size = data_info.data_.size() / sizeof(float); std::vector new_data(size); @@ -185,7 +186,7 @@ void ConvertNcTensor2Nh(const FuncGraphPtr &func_graph, const CNodePtr &cnode, s } auto param_node = func_graph->add_parameter(); param_node->set_name(cnode->input(index)->fullname_with_scope()); - std::vector shape_vec{new_shape[0], new_shape[2], new_shape[3], new_shape[1]}; + std::vector shape_vec{new_shape[0], new_shape[kInputIndexTwo], new_shape[kInputIndexThree], new_shape[1]}; auto tensor_info = lite::CreateTensorInfo(new_data.data(), size * sizeof(float), shape_vec, kNumberTypeFloat32); if (tensor_info == nullptr) { MS_LOG(ERROR) << "Create tensor info failed"; @@ -501,8 +502,8 @@ STATUS UnifyFormatPass::HandleGraphInput(const FuncGraphPtr &func_graph, const C if (shape_vector.size() != kInputSizeFour) { continue; } - if (func_graph->get_inputs().size() == 1 && fmk_type_ == lite::converter::FmkType_ONNX && shape_vector[3] == 3 && - shape_vector[1] == -1) { + if (func_graph->get_inputs().size() == 1 && fmk_type_ == lite::converter::FmkType_ONNX && + shape_vector[kInputIndexThree] == kInputChannel && shape_vector[1] == -1) { continue; } std::vector new_dims = {shape_vector[NCHW_SHAPE::NCHW_N], shape_vector[NCHW_SHAPE::NCHW_H],