From fa127d4fb0c2bfcf08bf54df156bcc52715ba6a3 Mon Sep 17 00:00:00 2001 From: liuxiao93 Date: Wed, 21 Apr 2021 17:50:30 +0800 Subject: [PATCH] fix codedex. --- .../backend/kernel_compiler/tbe/tbe_kernel_build.cc | 4 ++-- .../optimizer/ascend/enhancer/split_n_optimizer.cc | 2 +- .../ir_fission/max_pool3d_grad_grad_fission.cc | 12 ++++++------ 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_build.cc b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_build.cc index de8336ca219..fa0ebe5eca9 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_build.cc +++ b/mindspore/ccsrc/backend/kernel_compiler/tbe/tbe_kernel_build.cc @@ -198,7 +198,7 @@ bool TbeKernelJsonCreator::GenTbeSingleKernelJson(const std::shared_ptr &input_ptr, size_t input_i, - std::vector *input_list) { + std::vector *const input_list) { nlohmann::json input_desc_json; auto in_name = input_ptr->name(); input_desc_json[kJName] = in_name + std::to_string(input_i); @@ -209,7 +209,7 @@ void GenNoneInputDescJson(const std::shared_ptr &input_ptr, size_t inp void TbeKernelJsonCreator::GenValidInputDescJson(const std::shared_ptr &anf_node, size_t real_input_index, bool value, const std::shared_ptr &input_ptr, const string &op_input_name, size_t input_i, - std::vector *input_list) { + std::vector *const input_list) { auto def_format = kOpFormat_NCHW; auto dtype = GetDeviceInputType(anf_node, real_input_index); auto format = GetDeviceInputFormat(anf_node, real_input_index); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/enhancer/split_n_optimizer.cc b/mindspore/ccsrc/backend/optimizer/ascend/enhancer/split_n_optimizer.cc index 1f33f29afde..2d722aa81ce 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/enhancer/split_n_optimizer.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/enhancer/split_n_optimizer.cc @@ -30,7 +30,7 @@ namespace { using KernelWithIndex = std::pair; const std::set InvalidOps = {kSplitOpName, kSplitVOpName, kConcatOpName}; -void GetSplitOutputs(const FuncGraphPtr &func_graph, const AnfNodePtr &node, std::vector *out_nodes) { +void GetSplitOutputs(const FuncGraphPtr &func_graph, const AnfNodePtr &node, std::vector *const out_nodes) { MS_EXCEPTION_IF_NULL(func_graph); auto manager = func_graph->manager(); MS_EXCEPTION_IF_NULL(manager); diff --git a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/max_pool3d_grad_grad_fission.cc b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/max_pool3d_grad_grad_fission.cc index 029bdbeaba5..c7c5d3cc5e7 100644 --- a/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/max_pool3d_grad_grad_fission.cc +++ b/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/max_pool3d_grad_grad_fission.cc @@ -36,12 +36,12 @@ tensor::TensorPtr CreateTensor(const AnfNodePtr &node) { MS_LOG(ERROR) << "MaxPool3DGradGrad only support NCDHW."; } MS_LOG(DEBUG) << "ksize of MaxPool3DGradGrad:" << ksize; - int64_t D = ksize[2]; - int64_t H = ksize[3]; - int64_t W = ksize[4]; + int64_t d = ksize[2]; + int64_t h = ksize[3]; + int64_t w = ksize[4]; // 1 create tensor - std::vector assist_shape = {1, 1, D, H, W}; // shape:NCDHW + std::vector assist_shape = {1, 1, d, h, w}; // shape:NCDHW TensorTypePtr tensor_type = std::make_shared(kFloat16); MS_EXCEPTION_IF_NULL(tensor_type); tensor::DeviceInfo device_info{kOpFormat_NDC1HWC0, tensor_type}; @@ -52,14 +52,14 @@ tensor::TensorPtr CreateTensor(const AnfNodePtr &node) { auto data_ptr = assist_tensor->data_c(); MS_EXCEPTION_IF_NULL(data_ptr); std::vector half_data; - int64_t dims = 1 * 1 * D * H * W; + int64_t dims = 1 * 1 * d * h * w; int64_t counter = dims; for (int64_t i = 0; i < dims; i++) { half_data.emplace_back(float16(static_cast(counter))); counter--; } - auto elem_num = dims * kFloat16Len; + int64_t elem_num = dims * kFloat16Len; auto ret_code = memcpy_s(data_ptr, static_cast(assist_tensor->data().nbytes()), half_data.data(), elem_num); if (ret_code != 0) { MS_LOG(ERROR) << "Failed to copy data into Tensor.";