From 04aeff97e12a32e15d9b8a6b4660895644055349 Mon Sep 17 00:00:00 2001 From: Emir Haleva Date: Mon, 19 Jul 2021 17:46:45 +0300 Subject: [PATCH] Fix coverity issues --- .../kernel_compiler/cpu/nnacl/fp16_grad/resize_grad.c | 4 ++-- .../kernel_compiler/cpu/nnacl/fp32_grad/resize_grad.c | 4 ++-- mindspore/lite/examples/unified_api/src/inference.cc | 10 ++++++---- .../lite/src/registry/kernel_interface_registry.cc | 3 +-- mindspore/lite/src/registry/register_kernel_impl.cc | 6 ++---- .../arm/fp16_grad/convolution_fp16_grad_input.cc | 2 +- 6 files changed, 14 insertions(+), 15 deletions(-) diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16_grad/resize_grad.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16_grad/resize_grad.c index 600fea3f8c9..48fddb910c4 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16_grad/resize_grad.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16_grad/resize_grad.c @@ -79,13 +79,13 @@ void ResizeBiLinearFp16Grad(float16_t *in_addr, float16_t *out_addr, int batch_s size_t top_y_index = MSMAX((size_t)(floorf(in_y)), (size_t)(0)); size_t bottom_y_index = MSMIN((size_t)(ceilf(in_y)), param->out_height_ - 1); float16_t y_lerp = in_y - floorf(in_y); - float16_t inverse_y_lerp = 1.0 - y_lerp; + const float16_t inverse_y_lerp = 1.0 - y_lerp; float16_t in_x = (float16_t)w * param->width_scale_; size_t left_x_index = MSMAX((size_t)(floorf(in_x)), (size_t)(0)); size_t right_x_index = MSMIN((size_t)(ceilf(in_x)), param->out_width_ - 1); float16_t x_lerp = in_x - floorf(in_x); - float16_t inverse_x_lerp = 1.0 - x_lerp; + const float16_t inverse_x_lerp = 1.0 - x_lerp; size_t in_offset = h * (param->in_width_ * channel) + (w * channel) + c; size_t out_offset_top_y_left_x = top_y_index * (param->out_width_ * channel) + (left_x_index * channel) + c; diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/resize_grad.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/resize_grad.c index 2040a5c5a45..e73a681ca44 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/resize_grad.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32_grad/resize_grad.c @@ -79,13 +79,13 @@ void ResizeBiLinearGrad(const float *in_addr, float *out_addr, int batch_size, i size_t top_y_index = MSMAX((size_t)(floorf(in_y)), (size_t)(0)); size_t bottom_y_index = MSMIN((size_t)(ceilf(in_y)), param->out_height_ - 1); float y_lerp = in_y - floorf(in_y); - float inverse_y_lerp = 1.0 - y_lerp; + const float inverse_y_lerp = 1.0 - y_lerp; float in_x = (float)w * param->width_scale_; size_t left_x_index = MSMAX((size_t)(floorf(in_x)), (size_t)(0)); size_t right_x_index = MSMIN((size_t)(ceilf(in_x)), param->out_width_ - 1); float x_lerp = in_x - floorf(in_x); - float inverse_x_lerp = 1.0 - x_lerp; + const float inverse_x_lerp = 1.0 - x_lerp; size_t in_offset = h * (param->in_width_ * channel) + (w * channel) + c; size_t out_offset_top_y_left_x = top_y_index * (param->out_width_ * channel) + (left_x_index * channel) + c; diff --git a/mindspore/lite/examples/unified_api/src/inference.cc b/mindspore/lite/examples/unified_api/src/inference.cc index d33f05d3c77..355871e4125 100644 --- a/mindspore/lite/examples/unified_api/src/inference.cc +++ b/mindspore/lite/examples/unified_api/src/inference.cc @@ -29,15 +29,13 @@ static void Usage() { std::cout << "Usage: infer -f <.ms model file>" << std::en static std::string ReadArgs(int argc, char *argv[]) { std::string infer_model_fn; int opt; - while ((opt = getopt(argc, argv, "f:h")) != -1) { + while ((opt = getopt(argc, argv, "f:")) != -1) { switch (opt) { case 'f': infer_model_fn = std::string(optarg); break; - case 'h': default: - Usage(); - exit(-1); + break; } } return infer_model_fn; @@ -45,6 +43,10 @@ static std::string ReadArgs(int argc, char *argv[]) { int main(int argc, char **argv) { std::string infer_model_fn = ReadArgs(argc, argv); + if (infer_model_fn.size() == 0) { + Usage(); + return -1; + } auto context = std::make_shared(); auto cpu_context = std::make_shared(); diff --git a/mindspore/lite/src/registry/kernel_interface_registry.cc b/mindspore/lite/src/registry/kernel_interface_registry.cc index 1a79fa150bb..df5fe5ba437 100644 --- a/mindspore/lite/src/registry/kernel_interface_registry.cc +++ b/mindspore/lite/src/registry/kernel_interface_registry.cc @@ -134,12 +134,11 @@ int KernelInterfaceRegistry::Reg(const std::string &provider, int op_type, Kerne auto iter = kernel_creators_.find(provider); if (iter == kernel_creators_.end()) { kernel_creators_[provider] = - reinterpret_cast(malloc(kMaxKernelNum * sizeof(KernelInterfaceCreator))); + reinterpret_cast(calloc(kMaxKernelNum, sizeof(KernelInterfaceCreator))); if (kernel_creators_[provider] == nullptr) { MS_LOG(ERROR) << "malloc kernel dev delegate creator fail!"; return RET_ERROR; } - memset(reinterpret_cast(kernel_creators_[provider]), 0, kMaxKernelNum * sizeof(KernelInterfaceCreator)); } kernel_creators_[provider][op_type] = creator; diff --git a/mindspore/lite/src/registry/register_kernel_impl.cc b/mindspore/lite/src/registry/register_kernel_impl.cc index 462efac5b66..69c4906a3dc 100644 --- a/mindspore/lite/src/registry/register_kernel_impl.cc +++ b/mindspore/lite/src/registry/register_kernel_impl.cc @@ -70,21 +70,19 @@ int RegistryKernelImpl::RegKernel(const std::string &arch, const std::string &pr std::unique_lock lock(lock_); auto iter = kernel_creators_.find(provider); if (iter == kernel_creators_.end()) { - kernel_creators_[provider][arch] = reinterpret_cast(malloc(kKernelMaxNum * sizeof(CreateKernel))); + kernel_creators_[provider][arch] = reinterpret_cast(calloc(kKernelMaxNum, sizeof(CreateKernel))); if (kernel_creators_[provider][arch] == nullptr) { MS_LOG(ERROR) << "malloc kernel creator buffer fail! provider: " << provider << ",arch:" << arch; return RET_ERROR; } - memset(reinterpret_cast(kernel_creators_[provider][arch]), 0, kKernelMaxNum * sizeof(CreateKernel)); } else { auto iter_arch = iter->second.find(arch); if (iter_arch == iter->second.end()) { - iter->second[arch] = reinterpret_cast(malloc(kKernelMaxNum * sizeof(CreateKernel))); + iter->second[arch] = reinterpret_cast(calloc(kKernelMaxNum, sizeof(CreateKernel))); if (iter->second[arch] == nullptr) { MS_LOG(ERROR) << "malloc kernel creator buffer fail! provider: " << provider << ",arch:" << arch; return RET_ERROR; } - memset(reinterpret_cast(iter->second[arch]), 0, kKernelMaxNum * sizeof(CreateKernel)); } } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_input.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_input.cc index b580e638fa1..df40a437633 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_input.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/convolution_fp16_grad_input.cc @@ -172,7 +172,7 @@ int ConvolutionGradInputCPUKernelFp16::Run() { int in_w = conv_param->input_w_; auto *out_dx = out_tensors_.at(0); auto dx_addr = reinterpret_cast(out_dx->data_c()); - memset(dx_addr, 0, sizeof(float16_t) * batch * in_ch * in_h * in_w); + std::memset(dx_addr, 0, sizeof(float16_t) * batch * in_ch * in_h * in_w); int error_code = ParallelLaunch(this->ms_context_, ConvolutionGradInputFp16Run, this, ms_context_->thread_num_); if (error_code != RET_OK) { MS_LOG(ERROR) << "bias function error error_code[" << error_code << "]";