!43615 add op specification validation

Merge pull request !43615 from yeyunpeng2020/master_fuzz
This commit is contained in:
i-robot 2022-10-12 06:53:48 +00:00 committed by Gitee
commit a0cc9babbf
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
4 changed files with 15 additions and 4 deletions

View File

@ -36,13 +36,13 @@ int DoSpaceToBatch(const void *input, void *output, SpaceToBatchParameter *param
NNACL_CHECK_ZERO_RETURN_ERR(input_batch);
NNACL_CHECK_ZERO_RETURN_ERR(block_shape_width);
int copy_size = param->input_shape_[3] * param->data_type_len;
for (int out_b = task_id; out_b < output_batch; out_b += param->op_parameter_.thread_num_) {
for (int64_t out_b = task_id; out_b < output_batch; out_b += param->op_parameter_.thread_num_) {
int in_b = out_b % input_batch;
int shift_w = (out_b / input_batch) % block_shape_width;
int shift_h = (out_b / input_batch) / block_shape_width;
for (int out_h = 0; out_h < output_height; out_h++) {
for (int out_w = 0; out_w < output_width; out_w++) {
int output_offset =
int64_t output_offset =
out_b * param->out_stride_[0] + out_h * param->out_stride_[1] + out_w * param->out_stride_[2];
if (out_h * block_shape_height + shift_h < padding_top ||
out_h * block_shape_height + shift_h >= padding_top + input_height ||

View File

@ -44,10 +44,10 @@ int FlattenInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC
// The value for axis must be in the range[-r, r], where r is
// the rank of the input tensor.Negative value means counting
// dimensions from the back.
if (abs(axis) > (int)input_shape_size) {
axis = axis < 0 ? (int)input_shape_size - axis : axis;
if (axis >= (int)input_shape_size) {
return NNACL_ERR;
}
axis = axis < 0 ? (int)input_shape_size - axis : axis;
int output_shape[2];
output_shape[0] = axis == 0 ? 1 : input_shape[0];
for (size_t i = 1; i < (size_t)axis; i++) {

View File

@ -52,6 +52,12 @@ int SparseToDenseCPUKernel::Prepare() {
}
int SparseToDenseCPUKernel::ReSize() {
if (in_tensors_.at(THIRD_INPUT)->data_type() != kNumberTypeFloat16 ||
in_tensors_.at(THIRD_INPUT)->data_type() != kNumberTypeFloat32) {
MS_LOG(ERROR) << in_tensors_.at(THIRD_INPUT)->tensor_name() << " data type "
<< in_tensors_.at(THIRD_INPUT)->data_type() << " is not support.";
return RET_ERROR;
}
auto output = out_tensors_[kOutputIndex];
int output_dim = static_cast<int>(output->shape().size());
MS_CHECK_TRUE_MSG(output_dim <= DIMENSION_4D, RET_ERROR, "output_dim should <= 4");

View File

@ -57,6 +57,11 @@ int TopKCPUKernel::Run() {
CHECK_NULL_RETURN(output_index);
if (in_tensors_.size() == C2NUM) {
if (in_tensors_.at(SECOND_INPUT)->data_type() != kNumberTypeInt32) {
MS_LOG(ERROR) << in_tensors_.at(SECOND_INPUT)->tensor_name() << " data type "
<< in_tensors_.at(SECOND_INPUT)->data_type() << " is not support.";
return RET_ERROR;
}
auto input_k = reinterpret_cast<int *>(in_tensors_.at(1)->data());
CHECK_NULL_RETURN(input_k);
topk_param_->k_ = input_k[0];