From 0e24335f679d4f3b6623cce652bac447a3733645 Mon Sep 17 00:00:00 2001 From: zhaodezan Date: Mon, 2 Aug 2021 18:52:23 +0800 Subject: [PATCH] fix mixed int and size_t --- .../cpu/nnacl/infer/common_infer.c | 90 ------------------- .../cpu/nnacl/infer/common_infer.h | 7 -- .../nnacl/infer/conv2d_grad_filter_infer.c | 5 +- .../infer/group_conv2d_grad_input_infer.c | 2 +- .../cpu/nnacl/infer/max_min_grad_infer.c | 10 +-- .../cpu/nnacl/infer/prior_box_infer.c | 8 +- .../cpu/nnacl/infer/slice_infer.c | 8 +- .../cpu/nnacl/infer/split_infer.c | 2 +- .../cpu/nnacl/infer/strided_slice_infer.c | 4 +- .../nnacl/infer/tensorlist_fromtensor_infer.c | 2 +- .../cpu/nnacl/infer/tile_infer.c | 5 +- 11 files changed, 26 insertions(+), 117 deletions(-) diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.c index 7969d623685..b0d0bd476c5 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.c @@ -385,96 +385,6 @@ int FftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **ou return NNACL_OK; } -int VectorCInit(VectorC *vc, size_t per_malloc_size) { - if (per_malloc_size == 0) { - return NNACL_ERR; - } - vc->data_ = (int *)malloc(per_malloc_size * sizeof(int)); - if (vc->data_ == NULL) { - return NNACL_ERR; - } - vc->size_ = 0; - vc->max_size_ = per_malloc_size; - vc->per_malloc_size_ = per_malloc_size; - return NNACL_OK; -} - -int VectorCSet(VectorC *vc, const int *src_shape, size_t src_shape_size) { - if (src_shape_size == 0) { - vc->size_ = 0; - } else { - free(vc->data_); - if (vc->per_malloc_size_ == 0) { - return NNACL_ERR; - } - vc->max_size_ = (src_shape_size / vc->per_malloc_size_ + 1) * vc->per_malloc_size_; - vc->data_ = (int *)malloc(sizeof(int) * vc->max_size_); - if (vc->data_ == NULL) { - return NNACL_ERR; - } - for (size_t i = 0; i < src_shape_size; i++) { - vc->data_[i] = src_shape[i]; - } - vc->size_ = src_shape_size; - } - return NNACL_OK; -} - -int VectorCPush(VectorC *vc, int value) { - if (vc->size_ + 1 > vc->max_size_) { - int *tmp = (int *)malloc(vc->per_malloc_size_ * sizeof(int) + vc->max_size_ * sizeof(int)); - if (tmp == NULL) { - return NNACL_ERR; - } - memcpy(tmp, vc->data_, vc->size_ * sizeof(int)); - free(vc->data_); - vc->data_ = tmp; - vc->max_size_ = vc->max_size_ + vc->per_malloc_size_; - } - vc->data_[vc->size_] = value; - vc->size_++; - return NNACL_OK; -} - -int VectorCInsert(VectorC *vc, int index, int value) { - if (vc->size_ + 1 > vc->max_size_) { - int *tmp = (int *)malloc(vc->per_malloc_size_ * sizeof(int) + vc->max_size_ * sizeof(int)); - if (tmp == NULL) { - return NNACL_ERR; - } - memcpy(tmp, vc->data_, vc->size_ * sizeof(int)); - free(vc->data_); - vc->data_ = tmp; - vc->max_size_ = vc->max_size_ + vc->per_malloc_size_; - } - memmove(vc->data_ + index + 1, vc->data_ + index, (vc->size_ - index) * sizeof(int)); - vc->data_[index] = value; - vc->size_++; - return NNACL_OK; -} - -void VectorCErase(VectorC *vc, int index) { - memmove(vc->data_ + index, vc->data_ + index + 1, (vc->size_ - index - 1) * sizeof(int)); - vc->size_--; -} - -bool VectorCEqual(const VectorC *vc1, const VectorC *vc2) { - if (vc1->size_ != vc2->size_) { - return false; - } - for (size_t i = 0; i < vc1->size_; i++) { - if (vc1->data_[i] != vc2->data_[i]) { - return false; - } - } - return true; -} - -void VectorCFree(VectorC *vc) { - free(vc->data_); - vc->data_ = NULL; -} - bool InferFlag(const TensorC *const *inputs, size_t inputs_size) { if (inputs == NULL) { return false; diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.h b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.h index 3b2a9e197b2..83cdf6cd81c 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.h +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/common_infer.h @@ -199,13 +199,6 @@ int CommonInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC * int FftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, const OpParameter *parameter); -int VectorCInit(VectorC *vc, size_t per_malloc_size); -int VectorCSet(VectorC *vc, const int *src_shape, size_t src_shape_size); -int VectorCPush(VectorC *vc, int value); -int VectorCInsert(VectorC *vc, int index, int value); -void VectorCErase(VectorC *vc, int index); -bool VectorCEqual(const VectorC *vc1, const VectorC *vc2); -void VectorCFree(VectorC *vc); bool InferFlag(const TensorC *const *inputs, size_t inputs_size); #ifdef __cplusplus diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_filter_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_filter_infer.c index 1dfe504b2eb..bfe2e068241 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_filter_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_filter_infer.c @@ -31,7 +31,10 @@ int Conv2dGradFilterInferShape(const TensorC *const *inputs, size_t inputs_size, if (inputs[2]->shape_size_ < 1 || inputs[2]->data_ == NULL) { return NNACL_ERR; } - size_t filter_shape_size = inputs[2]->shape_[0]; + if (inputs[2]->shape_[0] < 0) { + return NNACL_ERR; + } + size_t filter_shape_size = (size_t)(inputs[2]->shape_[0]); if (filter_shape_size != 4) { return NNACL_ERR; } diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/group_conv2d_grad_input_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/group_conv2d_grad_input_infer.c index 64ac57b30a1..6101efec7d3 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/group_conv2d_grad_input_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/group_conv2d_grad_input_infer.c @@ -36,7 +36,7 @@ int GroupConv2dGradInputInferShape(const TensorC *const *inputs, size_t inputs_s return NNACL_INPUT_TENSOR_ERROR; } int shape_[MAX_SHAPE_SIZE]; - for (int i = 0; i < shape_size_; i++) { + for (size_t i = 0; i < shape_size_; i++) { shape_[i] = in0->shape_[i]; } SetShapeArray(out, shape_, shape_size_); diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/max_min_grad_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/max_min_grad_infer.c index fe84e5a1a8b..37b3f387731 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/max_min_grad_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/max_min_grad_infer.c @@ -41,11 +41,11 @@ int MaxMinGradInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso ArithmeticParameter *param = (ArithmeticParameter *)parameter; param->ndim_ = dy->shape_size_; - param->in_elements_num0_ = param->ndim_; - param->in_elements_num1_ = param->ndim_; - param->out_elements_num_ = param->ndim_; - int fillDimNum0 = dy->shape_size_ - x1->shape_size_; - int fillDimNum1 = dy->shape_size_ - x2->shape_size_; + param->in_elements_num0_ = (int)(param->ndim_); + param->in_elements_num1_ = (int)(param->ndim_); + param->out_elements_num_ = (int)(param->ndim_); + int fillDimNum0 = (int)(dy->shape_size_ - x1->shape_size_); + int fillDimNum1 = (int)(dy->shape_size_ - x2->shape_size_); int j0 = 0; int j1 = 0; for (unsigned int i = 0; i < dy->shape_size_; i++) { diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/prior_box_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/prior_box_infer.c index a1aaee328b6..55481331ee6 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/prior_box_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/prior_box_infer.c @@ -38,8 +38,8 @@ int PriorBoxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC PriorBoxParameter *param = (PriorBoxParameter *)parameter; float *aspect_ratios = param->aspect_ratios; - size_t aspect_ratios_size = param->aspect_ratios_size; - for (size_t i = 0; i < aspect_ratios_size; i++) { + int32_t aspect_ratios_size = param->aspect_ratios_size; + for (int32_t i = 0; i < aspect_ratios_size; i++) { float ratio = aspect_ratios[i]; if (ratio == 0) { return NNACL_ERR; @@ -62,8 +62,8 @@ int PriorBoxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC } } - size_t min_sizes_size = param->min_sizes_size; - size_t max_sizes_size = param->max_sizes_size; + int32_t min_sizes_size = param->min_sizes_size; + int32_t max_sizes_size = param->max_sizes_size; int32_t num_priors_box = min_sizes_size * different_aspect_ratios_size + max_sizes_size; const int kPriorBoxPoints = 4; const int kPriorBoxN = 1; diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/slice_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/slice_infer.c index 91a3121c048..b029d3a9aa5 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/slice_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/slice_infer.c @@ -58,7 +58,7 @@ int SliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** output->shape_size_ = input->shape_size_; /* init begin parameter */ - size_t slice_begin_size = GetElementNum(inputs[1]); + int slice_begin_size = GetElementNum(inputs[1]); int *begin_ptr = (int *)(inputs[1]->data_); if (slice_begin_size != param->param_length_ || begin_ptr == NULL) { return NNACL_INFER_INVALID; @@ -68,7 +68,7 @@ int SliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** } /* init size parameter */ - size_t slice_size_size = GetElementNum(inputs[2]); + int slice_size_size = GetElementNum(inputs[2]); int *size_ptr = (int *)(inputs[2]->data_); if (slice_size_size != param->param_length_ || size_ptr == NULL) { return NNACL_INFER_INVALID; @@ -80,12 +80,12 @@ int SliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** /* infer output shape information */ int begin[MAX_SHAPE_SIZE]; int size[MAX_SHAPE_SIZE]; - for (size_t i = 0; i < param->param_length_; ++i) { + for (int32_t i = 0; i < param->param_length_; ++i) { begin[param->axis_[i]] = param->begin_[i]; size[param->axis_[i]] = param->size_[i]; } - for (size_t i = 0; i < param->param_length_; ++i) { + for (int32_t i = 0; i < param->param_length_; ++i) { if (size[i] < 0 && size[i] != -1) { return NNACL_PARAM_INVALID; } diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/split_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/split_infer.c index 533a32824f7..a0474f3fc60 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/split_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/split_infer.c @@ -31,7 +31,7 @@ int SplitInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** SplitParameter *param = (SplitParameter *)parameter; - size_t num_split_ = param->num_split_ == 0 ? (int)(outputs_size) : param->num_split_; + int num_split_ = param->num_split_ == 0 ? (int)(outputs_size) : param->num_split_; if (num_split_ == 0) { return NNACL_ERR; } diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/strided_slice_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/strided_slice_infer.c index 442d95624d3..aed280cfbe6 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/strided_slice_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/strided_slice_infer.c @@ -236,7 +236,7 @@ void ApplyBeginMask(StridedSliceTransferBuffer *transfer_buffer) { } int ApplyEndMask(StridedSliceTransferBuffer *transfer_buffer, const int *in_shape, size_t in_shape_size) { - for (int i = 0; i < transfer_buffer->ndim_; i++) { + for (size_t i = 0; i < (size_t)(transfer_buffer->ndim_); i++) { if (transfer_buffer->ends_mask_[i]) { if (i >= in_shape_size) { return NNACL_ERR; @@ -296,7 +296,7 @@ void ApplyShrinkMask(StridedSliceTransferBuffer *transfer_buffer, int *output_sh int TransferBuffer2Param(const StridedSliceTransferBuffer *transfer_buffer, StridedSliceParameter *param, const int *in_shape, size_t in_shape_size) { - if (transfer_buffer->ndim_ >= in_shape_size || param->in_shape_length_ >= in_shape_size) { + if (transfer_buffer->ndim_ >= (int)(in_shape_size) || param->in_shape_length_ >= (int)(in_shape_size)) { return NNACL_ERR; } for (int i = 0; i < transfer_buffer->ndim_; i++) { diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_fromtensor_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_fromtensor_infer.c index d2cf972edb9..57498b75ebe 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_fromtensor_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_fromtensor_infer.c @@ -48,7 +48,7 @@ int TensorListFromTensorInferShape(const TensorC *const *inputs, size_t inputs_s int *ele_shape_ptr = (int *)(input1->data_); vvector tensor_shape; - tensor_shape.size_ = dim0; + tensor_shape.size_ = (size_t)(dim0); tensor_shape.shape_ = (int **)malloc(tensor_shape.size_ * sizeof(int *)); if (tensor_shape.shape_ == NULL) { return NNACL_NULL_PTR; diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tile_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tile_infer.c index ecf1db30156..7ffe90f647b 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tile_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tile_infer.c @@ -60,7 +60,10 @@ int TileInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o if (data_num > (int)(input->shape_size_) || input->shape_size_ > MAX_SHAPE_SIZE) { return NNACL_INPUT_TENSOR_ERROR; } - multiples_size = data_num; + if (data_num < 0) { + return NNACL_ERR; + } + multiples_size = (size_t)(data_num); if (inputs[1]->data_type_ != kNumberTypeInt && inputs[1]->data_type_ != kNumberTypeInt32) { return NNACL_INPUT_TENSOR_ERROR; }