forked from mindspore-Ecosystem/mindspore
!21266 fix mixed int and size_t
Merge pull request !21266 from zhaodezan/master_fix_magic
This commit is contained in:
commit
668eab285d
|
@ -401,96 +401,6 @@ int FftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **ou
|
|||
return NNACL_OK;
|
||||
}
|
||||
|
||||
int VectorCInit(VectorC *vc, size_t per_malloc_size) {
|
||||
if (per_malloc_size == 0) {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
vc->data_ = (int *)malloc(per_malloc_size * sizeof(int));
|
||||
if (vc->data_ == NULL) {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
vc->size_ = 0;
|
||||
vc->max_size_ = per_malloc_size;
|
||||
vc->per_malloc_size_ = per_malloc_size;
|
||||
return NNACL_OK;
|
||||
}
|
||||
|
||||
int VectorCSet(VectorC *vc, const int *src_shape, size_t src_shape_size) {
|
||||
if (src_shape_size == 0) {
|
||||
vc->size_ = 0;
|
||||
} else {
|
||||
free(vc->data_);
|
||||
if (vc->per_malloc_size_ == 0) {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
vc->max_size_ = (src_shape_size / vc->per_malloc_size_ + 1) * vc->per_malloc_size_;
|
||||
vc->data_ = (int *)malloc(sizeof(int) * vc->max_size_);
|
||||
if (vc->data_ == NULL) {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
for (size_t i = 0; i < src_shape_size; i++) {
|
||||
vc->data_[i] = src_shape[i];
|
||||
}
|
||||
vc->size_ = src_shape_size;
|
||||
}
|
||||
return NNACL_OK;
|
||||
}
|
||||
|
||||
int VectorCPush(VectorC *vc, int value) {
|
||||
if (vc->size_ + 1 > vc->max_size_) {
|
||||
int *tmp = (int *)malloc(vc->per_malloc_size_ * sizeof(int) + vc->max_size_ * sizeof(int));
|
||||
if (tmp == NULL) {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
memcpy(tmp, vc->data_, vc->size_ * sizeof(int));
|
||||
free(vc->data_);
|
||||
vc->data_ = tmp;
|
||||
vc->max_size_ = vc->max_size_ + vc->per_malloc_size_;
|
||||
}
|
||||
vc->data_[vc->size_] = value;
|
||||
vc->size_++;
|
||||
return NNACL_OK;
|
||||
}
|
||||
|
||||
int VectorCInsert(VectorC *vc, int index, int value) {
|
||||
if (vc->size_ + 1 > vc->max_size_) {
|
||||
int *tmp = (int *)malloc(vc->per_malloc_size_ * sizeof(int) + vc->max_size_ * sizeof(int));
|
||||
if (tmp == NULL) {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
memcpy(tmp, vc->data_, vc->size_ * sizeof(int));
|
||||
free(vc->data_);
|
||||
vc->data_ = tmp;
|
||||
vc->max_size_ = vc->max_size_ + vc->per_malloc_size_;
|
||||
}
|
||||
memmove(vc->data_ + index + 1, vc->data_ + index, (vc->size_ - index) * sizeof(int));
|
||||
vc->data_[index] = value;
|
||||
vc->size_++;
|
||||
return NNACL_OK;
|
||||
}
|
||||
|
||||
void VectorCErase(VectorC *vc, int index) {
|
||||
memmove(vc->data_ + index, vc->data_ + index + 1, (vc->size_ - index - 1) * sizeof(int));
|
||||
vc->size_--;
|
||||
}
|
||||
|
||||
bool VectorCEqual(const VectorC *vc1, const VectorC *vc2) {
|
||||
if (vc1->size_ != vc2->size_) {
|
||||
return false;
|
||||
}
|
||||
for (size_t i = 0; i < vc1->size_; i++) {
|
||||
if (vc1->data_[i] != vc2->data_[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void VectorCFree(VectorC *vc) {
|
||||
free(vc->data_);
|
||||
vc->data_ = NULL;
|
||||
}
|
||||
|
||||
bool InferFlag(const TensorC *const *inputs, size_t inputs_size) {
|
||||
if (inputs == NULL) {
|
||||
return false;
|
||||
|
|
|
@ -202,13 +202,6 @@ int CommonInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC *
|
|||
int FftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
|
||||
const OpParameter *parameter);
|
||||
|
||||
int VectorCInit(VectorC *vc, size_t per_malloc_size);
|
||||
int VectorCSet(VectorC *vc, const int *src_shape, size_t src_shape_size);
|
||||
int VectorCPush(VectorC *vc, int value);
|
||||
int VectorCInsert(VectorC *vc, int index, int value);
|
||||
void VectorCErase(VectorC *vc, int index);
|
||||
bool VectorCEqual(const VectorC *vc1, const VectorC *vc2);
|
||||
void VectorCFree(VectorC *vc);
|
||||
bool InferFlag(const TensorC *const *inputs, size_t inputs_size);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -34,7 +34,10 @@ int Conv2dGradFilterInferShape(const TensorC *const *inputs, size_t inputs_size,
|
|||
if (inputs[2]->shape_size_ < 1 || inputs[2]->data_ == NULL) {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
size_t filter_shape_size = inputs[2]->shape_[0];
|
||||
if (inputs[2]->shape_[0] < 0) {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
size_t filter_shape_size = (size_t)(inputs[2]->shape_[0]);
|
||||
if (filter_shape_size != 4) {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ int GroupConv2dGradInputInferShape(const TensorC *const *inputs, size_t inputs_s
|
|||
return NNACL_INPUT_TENSOR_ERROR;
|
||||
}
|
||||
int shape_[MAX_SHAPE_SIZE];
|
||||
for (int i = 0; i < shape_size_; i++) {
|
||||
for (size_t i = 0; i < shape_size_; i++) {
|
||||
shape_[i] = in0->shape_[i];
|
||||
}
|
||||
SetShapeArray(out, shape_, shape_size_);
|
||||
|
|
|
@ -41,11 +41,11 @@ int MaxMinGradInferShape(const TensorC *const *inputs, size_t inputs_size, Tenso
|
|||
ArithmeticParameter *param = (ArithmeticParameter *)parameter;
|
||||
|
||||
param->ndim_ = dy->shape_size_;
|
||||
param->in_elements_num0_ = param->ndim_;
|
||||
param->in_elements_num1_ = param->ndim_;
|
||||
param->out_elements_num_ = param->ndim_;
|
||||
int fillDimNum0 = dy->shape_size_ - x1->shape_size_;
|
||||
int fillDimNum1 = dy->shape_size_ - x2->shape_size_;
|
||||
param->in_elements_num0_ = (int)(param->ndim_);
|
||||
param->in_elements_num1_ = (int)(param->ndim_);
|
||||
param->out_elements_num_ = (int)(param->ndim_);
|
||||
int fillDimNum0 = (int)(dy->shape_size_ - x1->shape_size_);
|
||||
int fillDimNum1 = (int)(dy->shape_size_ - x2->shape_size_);
|
||||
int j0 = 0;
|
||||
int j1 = 0;
|
||||
for (unsigned int i = 0; i < dy->shape_size_; i++) {
|
||||
|
|
|
@ -38,8 +38,8 @@ int PriorBoxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC
|
|||
|
||||
PriorBoxParameter *param = (PriorBoxParameter *)parameter;
|
||||
float *aspect_ratios = param->aspect_ratios;
|
||||
size_t aspect_ratios_size = param->aspect_ratios_size;
|
||||
for (size_t i = 0; i < aspect_ratios_size; i++) {
|
||||
int32_t aspect_ratios_size = param->aspect_ratios_size;
|
||||
for (int32_t i = 0; i < aspect_ratios_size; i++) {
|
||||
float ratio = aspect_ratios[i];
|
||||
if (ratio == 0) {
|
||||
return NNACL_ERR;
|
||||
|
@ -62,8 +62,8 @@ int PriorBoxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC
|
|||
}
|
||||
}
|
||||
|
||||
size_t min_sizes_size = param->min_sizes_size;
|
||||
size_t max_sizes_size = param->max_sizes_size;
|
||||
int32_t min_sizes_size = param->min_sizes_size;
|
||||
int32_t max_sizes_size = param->max_sizes_size;
|
||||
int32_t num_priors_box = min_sizes_size * different_aspect_ratios_size + max_sizes_size;
|
||||
const int kPriorBoxPoints = 4;
|
||||
const int kPriorBoxN = 1;
|
||||
|
|
|
@ -58,7 +58,7 @@ int SliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **
|
|||
output->shape_size_ = input->shape_size_;
|
||||
|
||||
/* init begin parameter */
|
||||
size_t slice_begin_size = GetElementNum(inputs[1]);
|
||||
int slice_begin_size = GetElementNum(inputs[1]);
|
||||
int *begin_ptr = (int *)(inputs[1]->data_);
|
||||
if (slice_begin_size != param->param_length_ || begin_ptr == NULL) {
|
||||
return NNACL_INFER_INVALID;
|
||||
|
@ -68,7 +68,7 @@ int SliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **
|
|||
}
|
||||
|
||||
/* init size parameter */
|
||||
size_t slice_size_size = GetElementNum(inputs[2]);
|
||||
int slice_size_size = GetElementNum(inputs[2]);
|
||||
int *size_ptr = (int *)(inputs[2]->data_);
|
||||
if (slice_size_size != param->param_length_ || size_ptr == NULL) {
|
||||
return NNACL_INFER_INVALID;
|
||||
|
@ -80,12 +80,12 @@ int SliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **
|
|||
/* infer output shape information */
|
||||
int begin[MAX_SHAPE_SIZE];
|
||||
int size[MAX_SHAPE_SIZE];
|
||||
for (size_t i = 0; i < param->param_length_; ++i) {
|
||||
for (int32_t i = 0; i < param->param_length_; ++i) {
|
||||
begin[param->axis_[i]] = param->begin_[i];
|
||||
size[param->axis_[i]] = param->size_[i];
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < param->param_length_; ++i) {
|
||||
for (int32_t i = 0; i < param->param_length_; ++i) {
|
||||
if (size[i] < 0 && size[i] != -1) {
|
||||
return NNACL_PARAM_INVALID;
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ int SplitInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **
|
|||
|
||||
SplitParameter *param = (SplitParameter *)parameter;
|
||||
|
||||
size_t num_split_ = param->num_split_ == 0 ? (int)(outputs_size) : param->num_split_;
|
||||
int num_split_ = param->num_split_ == 0 ? (int)(outputs_size) : param->num_split_;
|
||||
if (num_split_ == 0) {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
|
|
|
@ -240,7 +240,7 @@ void ApplyBeginMask(StridedSliceTransferBuffer *transfer_buffer) {
|
|||
}
|
||||
|
||||
int ApplyEndMask(StridedSliceTransferBuffer *transfer_buffer, const int *in_shape, size_t in_shape_size) {
|
||||
for (int i = 0; i < transfer_buffer->ndim_; i++) {
|
||||
for (size_t i = 0; i < (size_t)(transfer_buffer->ndim_); i++) {
|
||||
if (transfer_buffer->ends_mask_[i]) {
|
||||
if (i >= in_shape_size) {
|
||||
return NNACL_ERR;
|
||||
|
@ -300,7 +300,7 @@ void ApplyShrinkMask(StridedSliceTransferBuffer *transfer_buffer, int *output_sh
|
|||
|
||||
int TransferBuffer2Param(const StridedSliceTransferBuffer *transfer_buffer, StridedSliceParameter *param,
|
||||
const int *in_shape, size_t in_shape_size) {
|
||||
if (transfer_buffer->ndim_ >= in_shape_size || param->in_shape_length_ >= in_shape_size) {
|
||||
if (transfer_buffer->ndim_ >= (int)(in_shape_size) || param->in_shape_length_ >= (int)(in_shape_size)) {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
for (int i = 0; i < transfer_buffer->ndim_; i++) {
|
||||
|
|
|
@ -48,7 +48,7 @@ int TensorListFromTensorInferShape(const TensorC *const *inputs, size_t inputs_s
|
|||
int *ele_shape_ptr = (int *)(input1->data_);
|
||||
|
||||
vvector tensor_shape;
|
||||
tensor_shape.size_ = dim0;
|
||||
tensor_shape.size_ = (size_t)(dim0);
|
||||
tensor_shape.shape_ = (int **)malloc(tensor_shape.size_ * sizeof(int *));
|
||||
if (tensor_shape.shape_ == NULL) {
|
||||
return NNACL_NULL_PTR;
|
||||
|
|
|
@ -60,7 +60,10 @@ int TileInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **o
|
|||
if (data_num > (int)(input->shape_size_) || input->shape_size_ > MAX_SHAPE_SIZE) {
|
||||
return NNACL_INPUT_TENSOR_ERROR;
|
||||
}
|
||||
multiples_size = data_num;
|
||||
if (data_num < 0) {
|
||||
return NNACL_ERR;
|
||||
}
|
||||
multiples_size = (size_t)(data_num);
|
||||
if (inputs[1]->data_type_ != kNumberTypeInt && inputs[1]->data_type_ != kNumberTypeInt32) {
|
||||
return NNACL_INPUT_TENSOR_ERROR;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue