!22207 [MSLITE][DEVELOP] sync some fixed bugs

Merge pull request !22207 from yangruoqi713/r1.3_bug
This commit is contained in:
i-robot 2021-08-23 11:06:09 +00:00 committed by Gitee
commit 5503ce674c
7 changed files with 52 additions and 33 deletions

View File

@ -74,6 +74,10 @@ void pad_input_shape(int *input_shape, int input_shape_len, int output_shape_len
\ \
int dim_index = dim_max - 1; \ int dim_index = dim_max - 1; \
while (dim_index >= 0) { \ while (dim_index >= 0) { \
if (input_shape[dim_index] == 0) { \
free(data_temp); \
return NNACL_ERR; \
} \
dim_broadcast_rate = (size_t)(output_shape[dim_index] / input_shape[dim_index]); \ dim_broadcast_rate = (size_t)(output_shape[dim_index] / input_shape[dim_index]); \
if (dim_broadcast_rate > 1) { \ if (dim_broadcast_rate > 1) { \
before_dim_elements_num = accumulate(input_shape, 0, dim_index - 1); \ before_dim_elements_num = accumulate(input_shape, 0, dim_index - 1); \

View File

@ -416,7 +416,7 @@ void ConvDwFp16(float16_t *output_data, const float16_t *input_data, const float
memcpy(dst_data + ow * conv_param->output_channel_, bias_data, conv_param->output_channel_ * sizeof(float16_t)); memcpy(dst_data + ow * conv_param->output_channel_, bias_data, conv_param->output_channel_ * sizeof(float16_t));
} }
for (int kh = start_kh; kh < end_kh; kh++) { for (int kh = start_kh; kh < end_kh; kh++) {
int ih = ih_origin + conv_param->dilation_w_ * kh; int ih = ih_origin + conv_param->dilation_h_ * kh;
const float16_t *src_kh = src + ih * conv_param->input_w_ * conv_param->input_channel_; const float16_t *src_kh = src + ih * conv_param->input_w_ * conv_param->input_channel_;
const float16_t *weight_kh = weight_data + kh * conv_param->kernel_w_ * conv_param->output_channel_; const float16_t *weight_kh = weight_data + kh * conv_param->kernel_w_ * conv_param->output_channel_;

View File

@ -55,7 +55,7 @@ void ConvDw(float *output_data, const float *input_data, const float *weight_dat
conv_param->output_channel_ * (int)(sizeof(float))); conv_param->output_channel_ * (int)(sizeof(float)));
} }
for (int kh = start_kh; kh < end_kh; kh++) { for (int kh = start_kh; kh < end_kh; kh++) {
int ih = ih_origin + conv_param->dilation_w_ * kh; int ih = ih_origin + conv_param->dilation_h_ * kh;
const float *src_kh = src + ih * conv_param->input_w_ * conv_param->input_channel_; const float *src_kh = src + ih * conv_param->input_w_ * conv_param->input_channel_;
const float *weight_kh = weight_data + kh * conv_param->kernel_w_ * conv_param->output_channel_; const float *weight_kh = weight_data + kh * conv_param->kernel_w_ * conv_param->output_channel_;

View File

@ -24,6 +24,12 @@ int ConvInferShape(int input_h, int input_w, int *output_h, int *output_w, ConvP
int dilate_w = param->dilation_w_; int dilate_w = param->dilation_w_;
int dilate_h = param->dilation_h_; int dilate_h = param->dilation_h_;
if (stride_w == 0 || stride_h == 0) {
return NNACL_PARAM_INVALID;
}
if (INT_MUL_OVERFLOW(kernel_h, dilate_h) || INT_MUL_OVERFLOW(kernel_w, dilate_w)) {
return NNACL_ERRCODE_MUL_OVERFLOW;
}
if (param->pad_mode_ == Pad_same) { // maybe error if (param->pad_mode_ == Pad_same) { // maybe error
*output_w = ceil((float)(input_w) / (float)(stride_w)); *output_w = ceil((float)(input_w) / (float)(stride_w));
*output_h = ceil((float)(input_h) / (float)(stride_h)); *output_h = ceil((float)(input_h) / (float)(stride_h));

View File

@ -48,6 +48,11 @@
#define DOWN_ROUND(x, y) ((x) / (y) * (y)) #define DOWN_ROUND(x, y) ((x) / (y) * (y))
#define MSVALID(left, x, right) (MSMIN((MSMAX(left, x)), right)) #define MSVALID(left, x, right) (MSMIN((MSMAX(left, x)), right))
#define INT_MUL_OVERFLOW(x, y) \
((x == 0) ? false \
: ((x) > 0 ? ((y >= 0) ? (INT_MAX / (x)) < (y) : (INT_MAX / (x)) < (-1 * (y))) \
: ((y >= 0) ? (INT_MAX / (x)) > (-1 * (y)) : (INT_MAX / (x)) > (y))))
#define INT_ADD_OVERFLOW(x, y) (INT_MAX - (x)) < (y)
#define COMM_SHAPE_SIZE 4 #define COMM_SHAPE_SIZE 4
#define MAX_SHAPE_SIZE 8 #define MAX_SHAPE_SIZE 8

View File

@ -78,32 +78,31 @@ MSTensor *MSTensor::CreateTensor(const std::vector<char> &name, enum DataType ty
MS_LOG(ERROR) << "data_len is error."; MS_LOG(ERROR) << "data_len is error.";
return nullptr; return nullptr;
} }
void *new_data = nullptr; if (data_len > 0 && data == nullptr) {
if (data != nullptr) { MS_LOG(ERROR) << "Mull data ptr of tensor.";
new_data = malloc(data_len);
if (new_data == nullptr) {
MS_LOG(ERROR) << "Allocate data failed.";
return nullptr;
}
::memcpy(new_data, data, data_len);
}
auto impl = Impl::CreateTensorImpl(CharToString(name), type, shape, new_data, data_len);
if (impl == nullptr) {
MS_LOG(ERROR) << "Allocate tensor impl failed.";
if (new_data != nullptr) {
free(new_data);
}
return nullptr; return nullptr;
} }
auto ms_tensor = new (std::nothrow) MSTensor(impl); auto impl = Impl::CreateTensorImpl(CharToString(name), type, shape, nullptr, data_len);
if (ms_tensor == nullptr) { if (impl == nullptr) {
MS_LOG(ERROR) << "Allocate tensor impl failed."; MS_LOG(ERROR) << "Allocate tensor impl failed.";
if (new_data != nullptr) {
free(new_data);
}
return nullptr; return nullptr;
} }
impl->set_own_data(true); impl->set_own_data(true);
auto ms_tensor = new (std::nothrow) MSTensor(impl);
if (ms_tensor == nullptr) {
MS_LOG(ERROR) << "Allocate tensor impl failed.";
return nullptr;
}
if (data != nullptr) {
if (ms_tensor->MutableData() == nullptr) {
MS_LOG(ERROR) << "Allocate data failed.";
delete ms_tensor;
return nullptr;
}
::memcpy(ms_tensor->MutableData(), data, data_len);
}
return ms_tensor; return ms_tensor;
} }
@ -161,26 +160,27 @@ MSTensor *MSTensor::Clone() const {
MS_LOG(ERROR) << "Illegal data size of tensor."; MS_LOG(ERROR) << "Illegal data size of tensor.";
return nullptr; return nullptr;
} }
auto new_data = malloc(data_len); auto impl = Impl::CreateTensorImpl(this->Name(), this->DataType(), this->Shape(), nullptr, data_len);
if (new_data == nullptr) {
MS_LOG(ERROR) << "Allocate data failed.";
return nullptr;
}
memset(new_data, 0, data_len);
auto impl = Impl::CreateTensorImpl(this->Name(), this->DataType(), this->Shape(), new_data, data_len);
if (impl == nullptr) { if (impl == nullptr) {
MS_LOG(ERROR) << "Allocate tensor impl failed."; MS_LOG(ERROR) << "Allocate tensor impl failed.";
free(new_data);
return nullptr; return nullptr;
} }
impl->set_own_data(true);
auto ms_tensor = new (std::nothrow) MSTensor(impl); auto ms_tensor = new (std::nothrow) MSTensor(impl);
if (ms_tensor == nullptr) { if (ms_tensor == nullptr) {
MS_LOG(ERROR) << "Allocate tensor impl failed."; MS_LOG(ERROR) << "Allocate tensor impl failed.";
free(new_data);
return nullptr; return nullptr;
} }
::memcpy(new_data, impl_->MutableData(), data_len);
impl->set_own_data(true); if (impl_->Data() != nullptr) {
if (ms_tensor->MutableData() == nullptr) {
MS_LOG(ERROR) << "Allocate data failed.";
delete ms_tensor;
return nullptr;
}
::memcpy(ms_tensor->MutableData(), impl_->MutableData(), data_len);
}
return ms_tensor; return ms_tensor;
} }

View File

@ -48,6 +48,10 @@ int GatherFp16CPUKernel::Init() {
const_input_ = true; const_input_ = true;
input_data_ = input_data_ =
reinterpret_cast<float16_t *>(ms_context_->allocator->Malloc(input_tensor->ElementsNum() * sizeof(float16_t))); reinterpret_cast<float16_t *>(ms_context_->allocator->Malloc(input_tensor->ElementsNum() * sizeof(float16_t)));
if (input_data_ == nullptr) {
MS_LOG(ERROR) << "Malloc failed";
return RET_ERROR;
}
Float32ToFloat16(reinterpret_cast<float *>(input_tensor->data_c()), input_data_, input_tensor->ElementsNum()); Float32ToFloat16(reinterpret_cast<float *>(input_tensor->data_c()), input_data_, input_tensor->ElementsNum());
} }
MS_ASSERT(in_tensors_.at(kSecondInput)->data_c() != nullptr); MS_ASSERT(in_tensors_.at(kSecondInput)->data_c() != nullptr);