From e76bbb379341c9c1d4a77ab129430fccabae49a6 Mon Sep 17 00:00:00 2001 From: zhaodezan Date: Mon, 9 Aug 2021 14:32:35 +0800 Subject: [PATCH] codex fix 6 --- .../cpu/nnacl/infer/arithmetic_grad_infer.c | 10 ++-- .../cpu/nnacl/infer/conv2d_grad_input_infer.c | 2 +- .../cpu/nnacl/infer/reshape_infer.c | 3 ++ .../cpu/nnacl/infer/slice_infer.c | 49 ++++++++++++------- .../cpu/nnacl/infer/squeeze_infer.c | 2 +- .../nnacl/infer/tensorlist_setitem_infer.c | 2 +- 6 files changed, 42 insertions(+), 26 deletions(-) diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_grad_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_grad_infer.c index 9971a6c2cd6..83987ccfe2f 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_grad_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/arithmetic_grad_infer.c @@ -55,10 +55,10 @@ int ArithmeticGradInferShape(const TensorC *const *inputs, size_t inputs_size, T if (GetElementNum(dx1) < GetElementNum(dx2)) { param->ndim_ = in_shape1_size; - param->in_elements_num0_ = param->ndim_; - param->in_elements_num1_ = param->ndim_; - param->out_elements_num_ = param->ndim_; - int fill_dim_num = in_shape1_size - in_shape0_size; // This will not work for batch! + param->in_elements_num0_ = (int)param->ndim_; + param->in_elements_num1_ = (int)param->ndim_; + param->out_elements_num_ = (int)param->ndim_; + size_t fill_dim_num = in_shape1_size - in_shape0_size; // This will not work for batch! int j = 0; for (unsigned int i = 0; i < in_shape1_size; i++) { if (i < fill_dim_num) { @@ -76,7 +76,7 @@ int ArithmeticGradInferShape(const TensorC *const *inputs, size_t inputs_size, T param->out_elements_num_ = param->ndim_; param->broadcasting_ = true; int j = 0; - int fill_dim_num = in_shape0_size - in_shape1_size; + size_t fill_dim_num = in_shape0_size - in_shape1_size; for (unsigned int i = 0; i < in_shape0_size; i++) { if (i < fill_dim_num) { param->in_shape1_[i] = 1; diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_input_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_input_infer.c index eb54646ef45..60609c6f0e4 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_input_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/conv2d_grad_input_infer.c @@ -40,7 +40,7 @@ int Conv2dGradInputInferShape(const TensorC *const *inputs, size_t inputs_size, if (inputs[2]->shape_size_ < 1 || inputs[2]->data_ == NULL) { return NNACL_ERR; } - size_t data_size = inputs[2]->shape_[0]; + size_t data_size = (size_t)inputs[2]->shape_[0]; if (data_size != 4) { return NNACL_ERR; } diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/reshape_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/reshape_infer.c index bab31373001..6fc571263e0 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/reshape_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/reshape_infer.c @@ -190,6 +190,9 @@ int ReshapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC return NNACL_INFER_INVALID; } int shape_size = GetElementNum(shape_tensor); + if (shape_size > MAX_SHAPE_SIZE) { + return NNACL_ERR; + } int calRet = CalShapeByType(inputs, shape_size, out_shape, &out_shape_size); if (calRet != NNACL_OK) { return calRet; diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/slice_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/slice_infer.c index 1a1b8743670..b2d1e6678b3 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/slice_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/slice_infer.c @@ -32,6 +32,35 @@ static bool CheckInputsDataType(const TensorC *const *inputs, size_t inputs_size return true; } +int InitBeginAndSizeParam(const TensorC *const *inputs, SliceParameter *param) { + /* init begin parameter */ + int slice_begin_size = GetElementNum(inputs[1]); + int *begin_ptr = (int *)(inputs[1]->data_); + if (slice_begin_size != param->param_length_ || begin_ptr == NULL) { + return NNACL_INFER_INVALID; + } + if (slice_begin_size > MAX_AXIS_SIZE) { + return NNACL_ERR; + } + for (size_t i = 0; i < slice_begin_size; i++) { + param->begin_[i] = begin_ptr[i]; + } + + /* init size parameter */ + int slice_size_size = GetElementNum(inputs[2]); + int *size_ptr = (int *)(inputs[2]->data_); + if (slice_size_size != param->param_length_ || size_ptr == NULL) { + return NNACL_INFER_INVALID; + } + if (slice_size_size > MAX_AXIS_SIZE) { + return NNACL_ERR; + } + for (size_t i = 0; i < slice_size_size; i++) { + param->size_[i] = size_ptr[i]; + } + return NNACL_OK; +} + int SliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, OpParameter *parameter) { int ret = CheckAugmentWithMinSize(inputs, inputs_size, outputs, outputs_size, parameter, 3, 1); @@ -57,24 +86,8 @@ int SliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC ** param->param_length_ = (int)(input->shape_size_); output->shape_size_ = input->shape_size_; - /* init begin parameter */ - int slice_begin_size = GetElementNum(inputs[1]); - int *begin_ptr = (int *)(inputs[1]->data_); - if (slice_begin_size != param->param_length_ || begin_ptr == NULL) { - return NNACL_INFER_INVALID; - } - for (size_t i = 0; i < slice_begin_size; i++) { - param->begin_[i] = begin_ptr[i]; - } - - /* init size parameter */ - int slice_size_size = GetElementNum(inputs[2]); - int *size_ptr = (int *)(inputs[2]->data_); - if (slice_size_size != param->param_length_ || size_ptr == NULL) { - return NNACL_INFER_INVALID; - } - for (size_t i = 0; i < slice_size_size; i++) { - param->size_[i] = size_ptr[i]; + if (InitBeginAndSizeParam(inputs, param) != NNACL_OK) { + return NNACL_ERR; } /* infer output shape information */ diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/squeeze_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/squeeze_infer.c index cf2137f8095..2d35201add1 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/squeeze_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/squeeze_infer.c @@ -40,7 +40,7 @@ int SqueezeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC size_t out_shape_size = 0; for (size_t i = 0; i < param->axis_size_; i++) { - param->axis_[i] = param->axis_[i] >= 0 ? param->axis_[i] : param->axis_[i] + input->shape_size_; + param->axis_[i] = param->axis_[i] >= 0 ? param->axis_[i] : param->axis_[i] + (int)input->shape_size_; } if (param->axis_size_ == 0) { diff --git a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_setitem_infer.c b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_setitem_infer.c index df82bbb9b8e..24d42740ed1 100644 --- a/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_setitem_infer.c +++ b/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/tensorlist_setitem_infer.c @@ -107,7 +107,7 @@ int TensorListSetItemInferShape(const TensorC *const *inputs, size_t inputs_size } out_shape.shape_[index] = (int *)(value_tensor->shape_); - out_shape.shape_size_[index] = value_tensor->shape_size_; + out_shape.shape_size_[index] = (int)value_tensor->shape_size_; int ret = MallocTensorListData(output0, input0->tensors_data_type_, &out_shape); if (ret != NNACL_OK) { free(out_shape.shape_);