Fix coverity issues

This commit is contained in:
Emir Haleva 2021-07-19 17:46:45 +03:00
parent e69fa78453
commit 04aeff97e1
6 changed files with 14 additions and 15 deletions

View File

@ -79,13 +79,13 @@ void ResizeBiLinearFp16Grad(float16_t *in_addr, float16_t *out_addr, int batch_s
size_t top_y_index = MSMAX((size_t)(floorf(in_y)), (size_t)(0));
size_t bottom_y_index = MSMIN((size_t)(ceilf(in_y)), param->out_height_ - 1);
float16_t y_lerp = in_y - floorf(in_y);
float16_t inverse_y_lerp = 1.0 - y_lerp;
const float16_t inverse_y_lerp = 1.0 - y_lerp;
float16_t in_x = (float16_t)w * param->width_scale_;
size_t left_x_index = MSMAX((size_t)(floorf(in_x)), (size_t)(0));
size_t right_x_index = MSMIN((size_t)(ceilf(in_x)), param->out_width_ - 1);
float16_t x_lerp = in_x - floorf(in_x);
float16_t inverse_x_lerp = 1.0 - x_lerp;
const float16_t inverse_x_lerp = 1.0 - x_lerp;
size_t in_offset = h * (param->in_width_ * channel) + (w * channel) + c;
size_t out_offset_top_y_left_x = top_y_index * (param->out_width_ * channel) + (left_x_index * channel) + c;

View File

@ -79,13 +79,13 @@ void ResizeBiLinearGrad(const float *in_addr, float *out_addr, int batch_size, i
size_t top_y_index = MSMAX((size_t)(floorf(in_y)), (size_t)(0));
size_t bottom_y_index = MSMIN((size_t)(ceilf(in_y)), param->out_height_ - 1);
float y_lerp = in_y - floorf(in_y);
float inverse_y_lerp = 1.0 - y_lerp;
const float inverse_y_lerp = 1.0 - y_lerp;
float in_x = (float)w * param->width_scale_;
size_t left_x_index = MSMAX((size_t)(floorf(in_x)), (size_t)(0));
size_t right_x_index = MSMIN((size_t)(ceilf(in_x)), param->out_width_ - 1);
float x_lerp = in_x - floorf(in_x);
float inverse_x_lerp = 1.0 - x_lerp;
const float inverse_x_lerp = 1.0 - x_lerp;
size_t in_offset = h * (param->in_width_ * channel) + (w * channel) + c;
size_t out_offset_top_y_left_x = top_y_index * (param->out_width_ * channel) + (left_x_index * channel) + c;

View File

@ -29,15 +29,13 @@ static void Usage() { std::cout << "Usage: infer -f <.ms model file>" << std::en
static std::string ReadArgs(int argc, char *argv[]) {
std::string infer_model_fn;
int opt;
while ((opt = getopt(argc, argv, "f:h")) != -1) {
while ((opt = getopt(argc, argv, "f:")) != -1) {
switch (opt) {
case 'f':
infer_model_fn = std::string(optarg);
break;
case 'h':
default:
Usage();
exit(-1);
break;
}
}
return infer_model_fn;
@ -45,6 +43,10 @@ static std::string ReadArgs(int argc, char *argv[]) {
int main(int argc, char **argv) {
std::string infer_model_fn = ReadArgs(argc, argv);
if (infer_model_fn.size() == 0) {
Usage();
return -1;
}
auto context = std::make_shared<mindspore::Context>();
auto cpu_context = std::make_shared<mindspore::CPUDeviceInfo>();

View File

@ -134,12 +134,11 @@ int KernelInterfaceRegistry::Reg(const std::string &provider, int op_type, Kerne
auto iter = kernel_creators_.find(provider);
if (iter == kernel_creators_.end()) {
kernel_creators_[provider] =
reinterpret_cast<KernelInterfaceCreator *>(malloc(kMaxKernelNum * sizeof(KernelInterfaceCreator)));
reinterpret_cast<KernelInterfaceCreator *>(calloc(kMaxKernelNum, sizeof(KernelInterfaceCreator)));
if (kernel_creators_[provider] == nullptr) {
MS_LOG(ERROR) << "malloc kernel dev delegate creator fail!";
return RET_ERROR;
}
memset(reinterpret_cast<void *>(kernel_creators_[provider]), 0, kMaxKernelNum * sizeof(KernelInterfaceCreator));
}
kernel_creators_[provider][op_type] = creator;

View File

@ -70,21 +70,19 @@ int RegistryKernelImpl::RegKernel(const std::string &arch, const std::string &pr
std::unique_lock<std::mutex> lock(lock_);
auto iter = kernel_creators_.find(provider);
if (iter == kernel_creators_.end()) {
kernel_creators_[provider][arch] = reinterpret_cast<CreateKernel *>(malloc(kKernelMaxNum * sizeof(CreateKernel)));
kernel_creators_[provider][arch] = reinterpret_cast<CreateKernel *>(calloc(kKernelMaxNum, sizeof(CreateKernel)));
if (kernel_creators_[provider][arch] == nullptr) {
MS_LOG(ERROR) << "malloc kernel creator buffer fail! provider: " << provider << ",arch:" << arch;
return RET_ERROR;
}
memset(reinterpret_cast<void *>(kernel_creators_[provider][arch]), 0, kKernelMaxNum * sizeof(CreateKernel));
} else {
auto iter_arch = iter->second.find(arch);
if (iter_arch == iter->second.end()) {
iter->second[arch] = reinterpret_cast<CreateKernel *>(malloc(kKernelMaxNum * sizeof(CreateKernel)));
iter->second[arch] = reinterpret_cast<CreateKernel *>(calloc(kKernelMaxNum, sizeof(CreateKernel)));
if (iter->second[arch] == nullptr) {
MS_LOG(ERROR) << "malloc kernel creator buffer fail! provider: " << provider << ",arch:" << arch;
return RET_ERROR;
}
memset(reinterpret_cast<void *>(iter->second[arch]), 0, kKernelMaxNum * sizeof(CreateKernel));
}
}

View File

@ -172,7 +172,7 @@ int ConvolutionGradInputCPUKernelFp16::Run() {
int in_w = conv_param->input_w_;
auto *out_dx = out_tensors_.at(0);
auto dx_addr = reinterpret_cast<float16_t *>(out_dx->data_c());
memset(dx_addr, 0, sizeof(float16_t) * batch * in_ch * in_h * in_w);
std::memset(dx_addr, 0, sizeof(float16_t) * batch * in_ch * in_h * in_w);
int error_code = ParallelLaunch(this->ms_context_, ConvolutionGradInputFp16Run, this, ms_context_->thread_num_);
if (error_code != RET_OK) {
MS_LOG(ERROR) << "bias function error error_code[" << error_code << "]";