From 88b79e602512ea779cb8e515cf617bb3bf93203d Mon Sep 17 00:00:00 2001 From: yeyunpeng2020 Date: Tue, 11 Oct 2022 09:20:00 +0800 Subject: [PATCH] add op specification validation --- .../device/cpu/kernel/nnacl/fp32/space_to_batch_fp32.c | 4 ++-- .../plugin/device/cpu/kernel/nnacl/infer/flatten_infer.c | 4 ++-- .../lite/src/litert/kernel/cpu/fp32/sparse_to_dense_fp32.cc | 6 ++++++ mindspore/lite/src/litert/kernel/cpu/fp32/topk_fp32.cc | 5 +++++ 4 files changed, 15 insertions(+), 4 deletions(-) diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/space_to_batch_fp32.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/space_to_batch_fp32.c index 9451760c084..17bcedc7ed1 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/space_to_batch_fp32.c +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/fp32/space_to_batch_fp32.c @@ -36,13 +36,13 @@ int DoSpaceToBatch(const void *input, void *output, SpaceToBatchParameter *param NNACL_CHECK_ZERO_RETURN_ERR(input_batch); NNACL_CHECK_ZERO_RETURN_ERR(block_shape_width); int copy_size = param->input_shape_[3] * param->data_type_len; - for (int out_b = task_id; out_b < output_batch; out_b += param->op_parameter_.thread_num_) { + for (int64_t out_b = task_id; out_b < output_batch; out_b += param->op_parameter_.thread_num_) { int in_b = out_b % input_batch; int shift_w = (out_b / input_batch) % block_shape_width; int shift_h = (out_b / input_batch) / block_shape_width; for (int out_h = 0; out_h < output_height; out_h++) { for (int out_w = 0; out_w < output_width; out_w++) { - int output_offset = + int64_t output_offset = out_b * param->out_stride_[0] + out_h * param->out_stride_[1] + out_w * param->out_stride_[2]; if (out_h * block_shape_height + shift_h < padding_top || out_h * block_shape_height + shift_h >= padding_top + input_height || diff --git a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/flatten_infer.c b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/flatten_infer.c index 3f5cafa01db..dfaa2be3a4a 100644 --- a/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/flatten_infer.c +++ b/mindspore/ccsrc/plugin/device/cpu/kernel/nnacl/infer/flatten_infer.c @@ -44,10 +44,10 @@ int FlattenInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC // The value for axis must be in the range[-r, r], where r is // the rank of the input tensor.Negative value means counting // dimensions from the back. - if (abs(axis) > (int)input_shape_size) { + axis = axis < 0 ? (int)input_shape_size - axis : axis; + if (axis >= (int)input_shape_size) { return NNACL_ERR; } - axis = axis < 0 ? (int)input_shape_size - axis : axis; int output_shape[2]; output_shape[0] = axis == 0 ? 1 : input_shape[0]; for (size_t i = 1; i < (size_t)axis; i++) { diff --git a/mindspore/lite/src/litert/kernel/cpu/fp32/sparse_to_dense_fp32.cc b/mindspore/lite/src/litert/kernel/cpu/fp32/sparse_to_dense_fp32.cc index e27d198ade3..a65b11f80f6 100644 --- a/mindspore/lite/src/litert/kernel/cpu/fp32/sparse_to_dense_fp32.cc +++ b/mindspore/lite/src/litert/kernel/cpu/fp32/sparse_to_dense_fp32.cc @@ -52,6 +52,12 @@ int SparseToDenseCPUKernel::Prepare() { } int SparseToDenseCPUKernel::ReSize() { + if (in_tensors_.at(THIRD_INPUT)->data_type() != kNumberTypeFloat16 || + in_tensors_.at(THIRD_INPUT)->data_type() != kNumberTypeFloat32) { + MS_LOG(ERROR) << in_tensors_.at(THIRD_INPUT)->tensor_name() << " data type " + << in_tensors_.at(THIRD_INPUT)->data_type() << " is not support."; + return RET_ERROR; + } auto output = out_tensors_[kOutputIndex]; int output_dim = static_cast(output->shape().size()); MS_CHECK_TRUE_MSG(output_dim <= DIMENSION_4D, RET_ERROR, "output_dim should <= 4"); diff --git a/mindspore/lite/src/litert/kernel/cpu/fp32/topk_fp32.cc b/mindspore/lite/src/litert/kernel/cpu/fp32/topk_fp32.cc index d0e54d45c52..91267ed7d29 100644 --- a/mindspore/lite/src/litert/kernel/cpu/fp32/topk_fp32.cc +++ b/mindspore/lite/src/litert/kernel/cpu/fp32/topk_fp32.cc @@ -57,6 +57,11 @@ int TopKCPUKernel::Run() { CHECK_NULL_RETURN(output_index); if (in_tensors_.size() == C2NUM) { + if (in_tensors_.at(SECOND_INPUT)->data_type() != kNumberTypeInt32) { + MS_LOG(ERROR) << in_tensors_.at(SECOND_INPUT)->tensor_name() << " data type " + << in_tensors_.at(SECOND_INPUT)->data_type() << " is not support."; + return RET_ERROR; + } auto input_k = reinterpret_cast(in_tensors_.at(1)->data()); CHECK_NULL_RETURN(input_k); topk_param_->k_ = input_k[0];