fix magic number

This commit is contained in:
zhaozhenlong 2021-07-27 10:42:07 +08:00
parent b2c7f80b03
commit f170cd9405
6 changed files with 19 additions and 7 deletions

View File

@ -83,7 +83,8 @@ int SliceCPUKernel::Run() {
return RET_NULL_PTR;
}
// param_ shape info has already been extended to 8d
if (param_->size_[5] < op_parameter_->thread_num_) {
constexpr size_t kDimHUnder8D = 5;
if (param_->size_[kDimHUnder8D] < op_parameter_->thread_num_) {
DoSliceNoParallel(input_data, output_data, param_, lite::DataTypeSize(in_tensors_.at(0)->data_type()));
return RET_OK;
}

View File

@ -28,6 +28,10 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_CropAndResize;
namespace mindspore::kernel {
namespace {
constexpr size_t kBoxIndex = 1;
constexpr size_t kBoxIdIndex = 2;
} // namespace
int CropAndResizeCPUKernel::Init() {
if (!InferShapeDone()) {
return RET_OK;
@ -112,11 +116,11 @@ int CropAndResizeCPUKernel::RunImpl(int task_id) {
if (input_data == nullptr) {
return RET_NULL_PTR;
}
auto boxes = reinterpret_cast<float *>(in_tensors_.at(1)->data_c());
auto boxes = reinterpret_cast<float *>(in_tensors_.at(kBoxIndex)->data_c());
if (boxes == nullptr) {
return RET_NULL_PTR;
}
auto box_idx = reinterpret_cast<int32_t *>(in_tensors_.at(2)->data_c());
auto box_idx = reinterpret_cast<int32_t *>(in_tensors_.at(kBoxIdIndex)->data_c());
if (box_idx == nullptr) {
return RET_NULL_PTR;
}

View File

@ -54,7 +54,7 @@ int CumSumCPUKernel::Init() {
}
int CumSumCPUKernel::ReSize() {
MS_ASSERT(in_tensors_.size() == 2);
MS_ASSERT(in_tensors_.size() == kInputSize1);
auto input_tensor = in_tensors_.at(0);
auto axis_tensor = in_tensors_.at(1);
int *axis_data = reinterpret_cast<int *>(axis_tensor->data_c());

View File

@ -30,6 +30,7 @@ constexpr size_t kConvNoBiasLen = 3;
constexpr size_t kConvWithBiasLen = 4;
constexpr size_t kNumDim1 = 1;
constexpr size_t kNumDim2 = 2;
constexpr size_t kDim4D = 4;
int GetOutChannels(const CNodePtr &conv_node) {
MS_ASSERT(conv_node != nullptr);
auto value_node = conv_node->input(0);
@ -230,7 +231,7 @@ void ConvTransformFusion::CalNewWeightTensor(const CNodePtr &conv_node, const te
int kernel_num, const float *trans_scale) const {
MS_ASSERT(weight_data != nullptr);
MS_ASSERT(trans_scale != nullptr);
if (weight_tensor->shape().size() > 4) {
if (weight_tensor->shape().size() > kDim4D) {
MS_LOG(ERROR) << "weight tensor shape error";
return;
}

View File

@ -23,6 +23,9 @@
namespace mindspore {
namespace opt {
namespace {
constexpr size_t kMinUsersSize = 2;
}
bool ReduceSameActPass::Run(const FuncGraphPtr &func_graph) {
auto node_list = TopoSort(func_graph->get_return());
auto manager = Manage(func_graph, true);
@ -37,7 +40,7 @@ bool ReduceSameActPass::Run(const FuncGraphPtr &func_graph) {
continue;
}
auto cur_node_users = func_graph->manager()->node_users()[node];
if (cur_node_users.size() < 2) {
if (cur_node_users.size() < kMinUsersSize) {
continue;
}

View File

@ -23,6 +23,9 @@
namespace mindspore {
namespace opt {
namespace {
constexpr size_t kMinCnodeSize = 2;
} // namespace
bool SplitOnePass::Run(const FuncGraphPtr &func_graph) {
auto node_list = TopoSort(func_graph->get_return());
auto manager = Manage(func_graph, true);
@ -50,7 +53,7 @@ bool SplitOnePass::Run(const FuncGraphPtr &func_graph) {
if (primitive_c->get_output_num() != 1) {
continue;
}
if (cnode->size() < 2) {
if (cnode->size() < kMinCnodeSize) {
return false;
}
func_graph->manager()->Replace(node, cnode->input(1));