From 4246e0ee73ca8129fe80c0e304141ba5cd263739 Mon Sep 17 00:00:00 2001 From: yangruoqi713 Date: Mon, 23 Aug 2021 10:23:16 +0800 Subject: [PATCH] [MSLITE][DEVELOP] sync some fixed bugs --- .../cpu/nnacl/base/broadcast_to.c | 4 ++ .../cpu/nnacl/fp16/conv_depthwise_fp16.c | 2 +- .../cpu/nnacl/fp32/conv_depthwise_fp32.c | 2 +- .../cpu/nnacl/infer/conv2d_infer.c | 6 ++ .../kernel_compiler/cpu/nnacl/op_base.h | 5 ++ mindspore/lite/src/cxx_api/types.cc | 62 +++++++++---------- .../runtime/kernel/arm/fp16/gather_fp16.cc | 4 ++ 7 files changed, 52 insertions(+), 33 deletions(-) diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/broadcast_to.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/broadcast_to.c index a4ea4318d58..e89dd29a6b6 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/broadcast_to.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/base/broadcast_to.c @@ -74,6 +74,10 @@ void pad_input_shape(int *input_shape, int input_shape_len, int output_shape_len \ int dim_index = dim_max - 1; \ while (dim_index >= 0) { \ + if (input_shape[dim_index] == 0) { \ + free(data_temp); \ + return NNACL_ERR; \ + } \ dim_broadcast_rate = (size_t)(output_shape[dim_index] / input_shape[dim_index]); \ if (dim_broadcast_rate > 1) { \ before_dim_elements_num = accumulate(input_shape, 0, dim_index - 1); \ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/conv_depthwise_fp16.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/conv_depthwise_fp16.c index c29c98ef41e..ded944b3d38 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/conv_depthwise_fp16.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp16/conv_depthwise_fp16.c @@ -416,7 +416,7 @@ void ConvDwFp16(float16_t *output_data, const float16_t *input_data, const float memcpy(dst_data + ow * conv_param->output_channel_, bias_data, conv_param->output_channel_ * sizeof(float16_t)); } for (int kh = start_kh; kh < end_kh; kh++) { - int ih = ih_origin + conv_param->dilation_w_ * kh; + int ih = ih_origin + conv_param->dilation_h_ * kh; const float16_t *src_kh = src + ih * conv_param->input_w_ * conv_param->input_channel_; const float16_t *weight_kh = weight_data + kh * conv_param->kernel_w_ * conv_param->output_channel_; diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_depthwise_fp32.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_depthwise_fp32.c index 9c194f4c8e3..857e3a8f42a 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_depthwise_fp32.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/conv_depthwise_fp32.c @@ -55,7 +55,7 @@ void ConvDw(float *output_data, const float *input_data, const float *weight_dat conv_param->output_channel_ * (int)(sizeof(float))); } for (int kh = start_kh; kh < end_kh; kh++) { - int ih = ih_origin + conv_param->dilation_w_ * kh; + int ih = ih_origin + conv_param->dilation_h_ * kh; const float *src_kh = src + ih * conv_param->input_w_ * conv_param->input_channel_; const float *weight_kh = weight_data + kh * conv_param->kernel_w_ * conv_param->output_channel_; diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_infer.c index d043abdcfad..1db32468244 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_infer.c @@ -24,6 +24,12 @@ int ConvInferShape(int input_h, int input_w, int *output_h, int *output_w, ConvP int dilate_w = param->dilation_w_; int dilate_h = param->dilation_h_; + if (stride_w == 0 || stride_h == 0) { + return NNACL_PARAM_INVALID; + } + if (INT_MUL_OVERFLOW(kernel_h, dilate_h) || INT_MUL_OVERFLOW(kernel_w, dilate_w)) { + return NNACL_ERRCODE_MUL_OVERFLOW; + } if (param->pad_mode_ == Pad_same) { // maybe error *output_w = ceil((float)(input_w) / (float)(stride_w)); *output_h = ceil((float)(input_h) / (float)(stride_h)); diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/op_base.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/op_base.h index 21995b7a007..f71c4e59011 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/op_base.h +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/op_base.h @@ -48,6 +48,11 @@ #define DOWN_ROUND(x, y) ((x) / (y) * (y)) #define MSVALID(left, x, right) (MSMIN((MSMAX(left, x)), right)) +#define INT_MUL_OVERFLOW(x, y) \ + ((x == 0) ? false \ + : ((x) > 0 ? ((y >= 0) ? (INT_MAX / (x)) < (y) : (INT_MAX / (x)) < (-1 * (y))) \ + : ((y >= 0) ? (INT_MAX / (x)) > (-1 * (y)) : (INT_MAX / (x)) > (y)))) +#define INT_ADD_OVERFLOW(x, y) (INT_MAX - (x)) < (y) #define COMM_SHAPE_SIZE 4 #define MAX_SHAPE_SIZE 8 diff --git a/mindspore/lite/src/cxx_api/types.cc b/mindspore/lite/src/cxx_api/types.cc index aac33f13c7f..65eac7e525b 100644 --- a/mindspore/lite/src/cxx_api/types.cc +++ b/mindspore/lite/src/cxx_api/types.cc @@ -78,32 +78,31 @@ MSTensor *MSTensor::CreateTensor(const std::vector &name, enum DataType ty MS_LOG(ERROR) << "data_len is error."; return nullptr; } - void *new_data = nullptr; - if (data != nullptr) { - new_data = malloc(data_len); - if (new_data == nullptr) { - MS_LOG(ERROR) << "Allocate data failed."; - return nullptr; - } - ::memcpy(new_data, data, data_len); - } - auto impl = Impl::CreateTensorImpl(CharToString(name), type, shape, new_data, data_len); - if (impl == nullptr) { - MS_LOG(ERROR) << "Allocate tensor impl failed."; - if (new_data != nullptr) { - free(new_data); - } + if (data_len > 0 && data == nullptr) { + MS_LOG(ERROR) << "Mull data ptr of tensor."; return nullptr; } - auto ms_tensor = new (std::nothrow) MSTensor(impl); - if (ms_tensor == nullptr) { + auto impl = Impl::CreateTensorImpl(CharToString(name), type, shape, nullptr, data_len); + if (impl == nullptr) { MS_LOG(ERROR) << "Allocate tensor impl failed."; - if (new_data != nullptr) { - free(new_data); - } return nullptr; } impl->set_own_data(true); + + auto ms_tensor = new (std::nothrow) MSTensor(impl); + if (ms_tensor == nullptr) { + MS_LOG(ERROR) << "Allocate tensor impl failed."; + return nullptr; + } + + if (data != nullptr) { + if (ms_tensor->MutableData() == nullptr) { + MS_LOG(ERROR) << "Allocate data failed."; + delete ms_tensor; + return nullptr; + } + ::memcpy(ms_tensor->MutableData(), data, data_len); + } return ms_tensor; } @@ -161,26 +160,27 @@ MSTensor *MSTensor::Clone() const { MS_LOG(ERROR) << "Illegal data size of tensor."; return nullptr; } - auto new_data = malloc(data_len); - if (new_data == nullptr) { - MS_LOG(ERROR) << "Allocate data failed."; - return nullptr; - } - memset(new_data, 0, data_len); - auto impl = Impl::CreateTensorImpl(this->Name(), this->DataType(), this->Shape(), new_data, data_len); + auto impl = Impl::CreateTensorImpl(this->Name(), this->DataType(), this->Shape(), nullptr, data_len); if (impl == nullptr) { MS_LOG(ERROR) << "Allocate tensor impl failed."; - free(new_data); return nullptr; } + impl->set_own_data(true); + auto ms_tensor = new (std::nothrow) MSTensor(impl); if (ms_tensor == nullptr) { MS_LOG(ERROR) << "Allocate tensor impl failed."; - free(new_data); return nullptr; } - ::memcpy(new_data, impl_->MutableData(), data_len); - impl->set_own_data(true); + + if (impl_->Data() != nullptr) { + if (ms_tensor->MutableData() == nullptr) { + MS_LOG(ERROR) << "Allocate data failed."; + delete ms_tensor; + return nullptr; + } + ::memcpy(ms_tensor->MutableData(), impl_->MutableData(), data_len); + } return ms_tensor; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc index 129875c6be2..034232a3613 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc @@ -48,6 +48,10 @@ int GatherFp16CPUKernel::Init() { const_input_ = true; input_data_ = reinterpret_cast(ms_context_->allocator->Malloc(input_tensor->ElementsNum() * sizeof(float16_t))); + if (input_data_ == nullptr) { + MS_LOG(ERROR) << "Malloc failed"; + return RET_ERROR; + } Float32ToFloat16(reinterpret_cast(input_tensor->data_c()), input_data_, input_tensor->ElementsNum()); } MS_ASSERT(in_tensors_.at(kSecondInput)->data_c() != nullptr);