From f170cd9405c88a166f4e033bf81cb9cd5b717595 Mon Sep 17 00:00:00 2001 From: zhaozhenlong Date: Tue, 27 Jul 2021 10:42:07 +0800 Subject: [PATCH] fix magic number --- mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc | 3 ++- .../src/runtime/kernel/arm/fp32/crop_and_resize_fp32.cc | 8 ++++++-- mindspore/lite/src/runtime/kernel/arm/fp32/cumsum_fp32.cc | 2 +- .../lite/tools/optimizer/fusion/conv_transform_fusion.cc | 3 ++- .../lite/tools/optimizer/graph/reduce_same_act_pass.cc | 5 ++++- mindspore/lite/tools/optimizer/graph/split_one_pass.cc | 5 ++++- 6 files changed, 19 insertions(+), 7 deletions(-) diff --git a/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc index 87f3893f7cf..2b483d03ebe 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc @@ -83,7 +83,8 @@ int SliceCPUKernel::Run() { return RET_NULL_PTR; } // param_ shape info has already been extended to 8d - if (param_->size_[5] < op_parameter_->thread_num_) { + constexpr size_t kDimHUnder8D = 5; + if (param_->size_[kDimHUnder8D] < op_parameter_->thread_num_) { DoSliceNoParallel(input_data, output_data, param_, lite::DataTypeSize(in_tensors_.at(0)->data_type())); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/crop_and_resize_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/crop_and_resize_fp32.cc index fb38d50ce0c..a5a8ef70c6b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/crop_and_resize_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/crop_and_resize_fp32.cc @@ -28,6 +28,10 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_CropAndResize; namespace mindspore::kernel { +namespace { +constexpr size_t kBoxIndex = 1; +constexpr size_t kBoxIdIndex = 2; +} // namespace int CropAndResizeCPUKernel::Init() { if (!InferShapeDone()) { return RET_OK; @@ -112,11 +116,11 @@ int CropAndResizeCPUKernel::RunImpl(int task_id) { if (input_data == nullptr) { return RET_NULL_PTR; } - auto boxes = reinterpret_cast(in_tensors_.at(1)->data_c()); + auto boxes = reinterpret_cast(in_tensors_.at(kBoxIndex)->data_c()); if (boxes == nullptr) { return RET_NULL_PTR; } - auto box_idx = reinterpret_cast(in_tensors_.at(2)->data_c()); + auto box_idx = reinterpret_cast(in_tensors_.at(kBoxIdIndex)->data_c()); if (box_idx == nullptr) { return RET_NULL_PTR; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/cumsum_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/cumsum_fp32.cc index 642834abebc..aef65236c46 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/cumsum_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/cumsum_fp32.cc @@ -54,7 +54,7 @@ int CumSumCPUKernel::Init() { } int CumSumCPUKernel::ReSize() { - MS_ASSERT(in_tensors_.size() == 2); + MS_ASSERT(in_tensors_.size() == kInputSize1); auto input_tensor = in_tensors_.at(0); auto axis_tensor = in_tensors_.at(1); int *axis_data = reinterpret_cast(axis_tensor->data_c()); diff --git a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc index 649cca73fe8..eb142df3d94 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc @@ -30,6 +30,7 @@ constexpr size_t kConvNoBiasLen = 3; constexpr size_t kConvWithBiasLen = 4; constexpr size_t kNumDim1 = 1; constexpr size_t kNumDim2 = 2; +constexpr size_t kDim4D = 4; int GetOutChannels(const CNodePtr &conv_node) { MS_ASSERT(conv_node != nullptr); auto value_node = conv_node->input(0); @@ -230,7 +231,7 @@ void ConvTransformFusion::CalNewWeightTensor(const CNodePtr &conv_node, const te int kernel_num, const float *trans_scale) const { MS_ASSERT(weight_data != nullptr); MS_ASSERT(trans_scale != nullptr); - if (weight_tensor->shape().size() > 4) { + if (weight_tensor->shape().size() > kDim4D) { MS_LOG(ERROR) << "weight tensor shape error"; return; } diff --git a/mindspore/lite/tools/optimizer/graph/reduce_same_act_pass.cc b/mindspore/lite/tools/optimizer/graph/reduce_same_act_pass.cc index 021d9d5fe1e..90db8d61081 100644 --- a/mindspore/lite/tools/optimizer/graph/reduce_same_act_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/reduce_same_act_pass.cc @@ -23,6 +23,9 @@ namespace mindspore { namespace opt { +namespace { +constexpr size_t kMinUsersSize = 2; +} bool ReduceSameActPass::Run(const FuncGraphPtr &func_graph) { auto node_list = TopoSort(func_graph->get_return()); auto manager = Manage(func_graph, true); @@ -37,7 +40,7 @@ bool ReduceSameActPass::Run(const FuncGraphPtr &func_graph) { continue; } auto cur_node_users = func_graph->manager()->node_users()[node]; - if (cur_node_users.size() < 2) { + if (cur_node_users.size() < kMinUsersSize) { continue; } diff --git a/mindspore/lite/tools/optimizer/graph/split_one_pass.cc b/mindspore/lite/tools/optimizer/graph/split_one_pass.cc index 97bde69ec7f..7c7ed710dbe 100644 --- a/mindspore/lite/tools/optimizer/graph/split_one_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/split_one_pass.cc @@ -23,6 +23,9 @@ namespace mindspore { namespace opt { +namespace { +constexpr size_t kMinCnodeSize = 2; +} // namespace bool SplitOnePass::Run(const FuncGraphPtr &func_graph) { auto node_list = TopoSort(func_graph->get_return()); auto manager = Manage(func_graph, true); @@ -50,7 +53,7 @@ bool SplitOnePass::Run(const FuncGraphPtr &func_graph) { if (primitive_c->get_output_num() != 1) { continue; } - if (cnode->size() < 2) { + if (cnode->size() < kMinCnodeSize) { return false; } func_graph->manager()->Replace(node, cnode->input(1));