fuzz check

This commit is contained in:
albert-yan 2022-10-11 09:54:27 +08:00
parent e2c0a72420
commit 09ace3e825
6 changed files with 24 additions and 7 deletions

View File

@ -19,7 +19,7 @@
int RankInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
OpParameter *parameter) {
int check_ret = CheckAugmentWithMinSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1);
int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1);
if (check_ret != NNACL_OK) {
return check_ret;
}

View File

@ -29,8 +29,7 @@ int SparseToDenseInferShape(const TensorC *const *inputs, size_t inputs_size, Te
return NNACL_INPUT_TENSOR_ERROR;
}
const TensorC *input1 = inputs[1];
const TensorC *input2 = inputs[2];
SetDataTypeFormat(output, input2);
SetDataTypeFormat(output, input1);
if (!InferFlag(inputs, inputs_size)) {
return NNACL_INFER_INVALID;
}

View File

@ -82,6 +82,12 @@ int ArithmeticSelfCPUKernel::Prepare() {
CHECK_NOT_EQUAL_RETURN(in_tensors_.size(), 1);
CHECK_NOT_EQUAL_RETURN(out_tensors_.size(), 1);
auto out_tensor_category = out_tensors_[0]->category();
if (out_tensor_category == mindspore::lite::CONST_SCALAR || out_tensor_category == mindspore::lite::CONST_TENSOR) {
MS_LOG(ERROR) << "out_tensor category is invalid.";
return RET_ERROR;
}
if (!InferShapeDone()) {
return RET_OK;
}

View File

@ -36,6 +36,13 @@ int PadCPUKernel::Prepare() {
CHECK_NULL_RETURN(in_tensors_[0]);
CHECK_NULL_RETURN(in_tensors_[1]);
CHECK_NULL_RETURN(out_tensors_[0]);
auto input_data_type = in_tensors_[0]->data_type();
if (!(input_data_type == kNumberTypeFloat32 || input_data_type == kNumberTypeFloat ||
input_data_type == kNumberTypeFloat16)) {
MS_LOG(ERROR) << "Unsupported data type of input tensor for Pad op: " << input_data_type;
return RET_ERROR;
}
if (!InferShapeDone()) {
return RET_OK;
}

View File

@ -27,10 +27,14 @@ using mindspore::schema::PrimitiveType_PowFusion;
namespace mindspore::kernel {
int PowerCPUKernel::Prepare() {
MS_CHECK_TRUE_MSG(in_tensors_.size() == C2NUM, RET_ERROR, "Only support Power op with 2 inputs.");
auto exp_datatype = in_tensors_.at(1)->data_type();
MS_CHECK_TRUE_MSG((exp_datatype == kNumberTypeFloat32 || exp_datatype == kNumberTypeFloat ||
exp_datatype == kNumberTypeInt32 || exp_datatype == kNumberTypeInt),
RET_ERROR, "unsupported datatype of exponent for Power op.");
auto base_data_type = in_tensors_.at(0)->data_type();
MS_CHECK_TRUE_MSG((base_data_type == kNumberTypeFloat32 || base_data_type == kNumberTypeFloat ||
base_data_type == kNumberTypeFloat16),
RET_ERROR, "unsupported data type of exponent for Power op.");
auto exp_data_type = in_tensors_.at(1)->data_type();
MS_CHECK_TRUE_MSG((exp_data_type == kNumberTypeFloat32 || exp_data_type == kNumberTypeFloat ||
exp_data_type == kNumberTypeInt32 || exp_data_type == kNumberTypeInt),
RET_ERROR, "unsupported data type of exponent for Power op.");
CHECK_LESS_RETURN(out_tensors_.size(), 1);
return RET_OK;
}

View File

@ -68,6 +68,7 @@ int ReverseCPUKernel::ReSize() {
free(tmp_);
tmp_ = nullptr;
}
MS_CHECK_INT_MUL_NOT_OVERFLOW(data_size_, static_cast<int>(sizeof(int)), RET_ERROR);
tmp_ = reinterpret_cast<int *>(malloc(data_size_ * static_cast<int>(sizeof(int))));
if (tmp_ == nullptr) {
MS_LOG(ERROR) << "Reverse Malloc tmp_ error!";