forked from mindspore-Ecosystem/mindspore
fix fuzz bug and element size overflow
This commit is contained in:
parent
5e12f336f2
commit
3cecadc8d9
|
@ -273,6 +273,9 @@ int GetChannel(const TensorC *tensor) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int GetElementNum(const TensorC *tensor) {
|
int GetElementNum(const TensorC *tensor) {
|
||||||
|
if (tensor == NULL) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
if (tensor->shape_size_ == 0) {
|
if (tensor->shape_size_ == 0) {
|
||||||
return 1; // scalar mode
|
return 1; // scalar mode
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,21 +20,14 @@
|
||||||
|
|
||||||
int RangeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
|
int RangeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size,
|
||||||
OpParameter *parameter) {
|
OpParameter *parameter) {
|
||||||
int check_ret = CheckAugmentWithMinSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1);
|
int check_ret = CheckAugmentNullSizeInputTwo(inputs, inputs_size, outputs, outputs_size, parameter, 1, C3NUM, 1);
|
||||||
if (check_ret != NNACL_OK) {
|
if (check_ret != NNACL_OK) {
|
||||||
return check_ret;
|
return check_ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
const TensorC *input = inputs[0];
|
const TensorC *input = inputs[0];
|
||||||
TensorC *output = outputs[0];
|
TensorC *output = outputs[0];
|
||||||
NNACL_CHECK_NULL_RETURN_ERR(input);
|
output->data_type_ = inputs_size == C3NUM ? input->data_type_ : kNumberTypeInt32;
|
||||||
NNACL_CHECK_NULL_RETURN_ERR(output);
|
|
||||||
|
|
||||||
if (inputs_size == C3NUM) {
|
|
||||||
output->data_type_ = input->data_type_;
|
|
||||||
} else {
|
|
||||||
output->data_type_ = kNumberTypeInt32;
|
|
||||||
}
|
|
||||||
output->format_ = input->format_;
|
output->format_ = input->format_;
|
||||||
if (!InferFlag(inputs, inputs_size)) {
|
if (!InferFlag(inputs, inputs_size)) {
|
||||||
return NNACL_INFER_INVALID;
|
return NNACL_INFER_INVALID;
|
||||||
|
|
|
@ -95,7 +95,11 @@ int SliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **
|
||||||
int begin[MAX_SHAPE_SIZE];
|
int begin[MAX_SHAPE_SIZE];
|
||||||
int size[MAX_SHAPE_SIZE];
|
int size[MAX_SHAPE_SIZE];
|
||||||
for (int32_t i = 0; i < param->param_length_; ++i) {
|
for (int32_t i = 0; i < param->param_length_; ++i) {
|
||||||
MS_CHECK_TRUE_RET(param->axis_[i] < param->param_length_, NNACL_PARAM_INVALID);
|
if (param->axis_[i] < 0) {
|
||||||
|
MS_CHECK_INT_ADD_NOT_OVERFLOW(param->axis_[i], (int)input->shape_size_, NNACL_PARAM_INVALID);
|
||||||
|
param->axis_[i] += (int)input->shape_size_;
|
||||||
|
}
|
||||||
|
MS_CHECK_TRUE_RET(param->axis_[i] >= 0 && param->axis_[i] < param->param_length_, NNACL_PARAM_INVALID);
|
||||||
begin[param->axis_[i]] = param->begin_[i];
|
begin[param->axis_[i]] = param->begin_[i];
|
||||||
size[param->axis_[i]] = param->size_[i];
|
size[param->axis_[i]] = param->size_[i];
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
|
|
||||||
#include "nnacl/infer/strided_slice_infer.h"
|
#include "nnacl/infer/strided_slice_infer.h"
|
||||||
#include "nnacl/infer/infer_register.h"
|
#include "nnacl/infer/infer_register.h"
|
||||||
|
#include "nnacl/op_base.h"
|
||||||
|
|
||||||
const size_t kStridedSliceOutputNum = 1;
|
const size_t kStridedSliceOutputNum = 1;
|
||||||
const size_t kStridedSliceInputNum = 1;
|
const size_t kStridedSliceInputNum = 1;
|
||||||
|
@ -124,10 +125,9 @@ int HandleAxesInputExist(const TensorC *const *inputs, int *ndim, int *in_shape,
|
||||||
|
|
||||||
int *stride_data = NULL;
|
int *stride_data = NULL;
|
||||||
const TensorC *stride_tensor = inputs[4];
|
const TensorC *stride_tensor = inputs[4];
|
||||||
if (GetElementNum(stride_tensor) != 0) {
|
int stride_data_num = GetElementNum(stride_tensor);
|
||||||
if (GetElementNum(stride_tensor) != begin_ndim) {
|
if (stride_data_num != 0) {
|
||||||
return NNACL_ERR;
|
MS_CHECK_TRUE_RET(stride_data_num == begin_ndim, NNACL_ERR);
|
||||||
}
|
|
||||||
stride_data = (int *)(stride_tensor->data_);
|
stride_data = (int *)(stride_tensor->data_);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -271,9 +271,10 @@ int ApplyEllipsisMask(StridedSliceTransferBuffer *transfer_buffer, const int *in
|
||||||
return NNACL_OK;
|
return NNACL_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
int TransIndexToPositive(StridedSliceTransferBuffer *transfer_buffer, const int *in_shape, size_t in_shape_size) {
|
int TransIndexToPositive(StridedSliceTransferBuffer *transfer_buffer, const int *in_shape, size_t max_shape_size,
|
||||||
|
size_t in_shape_size) {
|
||||||
for (size_t i = 0; i < transfer_buffer->begins_size_; i++) {
|
for (size_t i = 0; i < transfer_buffer->begins_size_; i++) {
|
||||||
if (i >= in_shape_size) {
|
if (i >= max_shape_size) {
|
||||||
return NNACL_ERR;
|
return NNACL_ERR;
|
||||||
}
|
}
|
||||||
if (transfer_buffer->begins_[i] < 0) {
|
if (transfer_buffer->begins_[i] < 0) {
|
||||||
|
@ -282,6 +283,15 @@ int TransIndexToPositive(StridedSliceTransferBuffer *transfer_buffer, const int
|
||||||
if (transfer_buffer->ends_[i] < 0) {
|
if (transfer_buffer->ends_[i] < 0) {
|
||||||
transfer_buffer->ends_[i] += in_shape[i];
|
transfer_buffer->ends_[i] += in_shape[i];
|
||||||
}
|
}
|
||||||
|
if (i < in_shape_size) {
|
||||||
|
if (transfer_buffer->begins_[i] < 0 || transfer_buffer->begins_[i] > in_shape[i]) {
|
||||||
|
return NNACL_ERR;
|
||||||
|
}
|
||||||
|
if ((transfer_buffer->ends_[i] < 0 && transfer_buffer->ends_[i] != -1) ||
|
||||||
|
transfer_buffer->ends_[i] > in_shape[i]) {
|
||||||
|
return NNACL_ERR;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return NNACL_OK;
|
return NNACL_OK;
|
||||||
}
|
}
|
||||||
|
@ -426,7 +436,7 @@ int StridedSliceInferShape(const TensorC *const *inputs, size_t inputs_size, Ten
|
||||||
int output_shape[MAX_SHAPE_SIZE];
|
int output_shape[MAX_SHAPE_SIZE];
|
||||||
size_t output_shape_size = 0;
|
size_t output_shape_size = 0;
|
||||||
ShapeSet(output_shape, &output_shape_size, in_shape, in_shape_size);
|
ShapeSet(output_shape, &output_shape_size, in_shape, in_shape_size);
|
||||||
ret = TransIndexToPositive(&transfer_buffer, in_shape, MAX_SHAPE_SIZE);
|
ret = TransIndexToPositive(&transfer_buffer, in_shape, MAX_SHAPE_SIZE, input->shape_size_);
|
||||||
if (ret != NNACL_OK) {
|
if (ret != NNACL_OK) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,24 +21,26 @@
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace lite {
|
namespace lite {
|
||||||
schema::Tensor *AttrToTensor(const void *data, int data_size, bool is_array, TypeId type_id,
|
schema::Tensor *AttrToTensor(const void *data, size_t data_size, bool is_array, TypeId type_id,
|
||||||
std::vector<char *> *const tensor_bufs) {
|
std::vector<char *> *const tensor_bufs) {
|
||||||
if (data == nullptr || tensor_bufs == nullptr) {
|
if (data == nullptr || tensor_bufs == nullptr) {
|
||||||
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
MS_LOG(ERROR) << "the parameter of this function is nullptr.";
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
auto dst_tensor = (is_array ? new (std::nothrow) Tensor(type_id, {data_size}, mindspore::NHWC, Category::CONST_TENSOR)
|
if (data_size > static_cast<size_t>(INT32_MAX)) {
|
||||||
: new (std::nothrow) Tensor(type_id, {}, mindspore::NHWC, Category::CONST_SCALAR));
|
MS_LOG(ERROR) << "the amount of data exceeds the INT32_MAX.";
|
||||||
auto dst_data = dst_tensor->MutableData();
|
return nullptr;
|
||||||
if (dst_data == nullptr) {
|
}
|
||||||
MS_LOG(ERROR) << "Data from tensor is nullptr";
|
auto shape = is_array ? std::vector<int>{static_cast<int>(data_size)} : std::vector<int>{};
|
||||||
delete dst_tensor;
|
auto dst_tensor = (is_array ? new (std::nothrow) Tensor(type_id, shape, mindspore::NHWC, Category::CONST_TENSOR)
|
||||||
|
: new (std::nothrow) Tensor(type_id, shape, mindspore::NHWC, Category::CONST_SCALAR));
|
||||||
|
if (dst_tensor == nullptr) {
|
||||||
|
MS_LOG(ERROR) << "w a tensor failed.";
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
std::vector<uint8_t> uint8_data;
|
std::vector<uint8_t> uint8_data;
|
||||||
uint8_data.resize(dst_tensor->Size());
|
uint8_data.resize(dst_tensor->Size());
|
||||||
memcpy(uint8_data.data(), data, dst_tensor->Size());
|
memcpy(uint8_data.data(), data, dst_tensor->Size());
|
||||||
auto shape = dst_tensor->shape();
|
|
||||||
flatbuffers::FlatBufferBuilder fbb(1024);
|
flatbuffers::FlatBufferBuilder fbb(1024);
|
||||||
auto tensor_offset =
|
auto tensor_offset =
|
||||||
schema::CreateTensorDirect(fbb, NodeType_ValueNode, type_id, &shape, schema::Format_NHWC, 0, 0, &uint8_data);
|
schema::CreateTensorDirect(fbb, NodeType_ValueNode, type_id, &shape, schema::Format_NHWC, 0, 0, &uint8_data);
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
|
|
||||||
namespace mindspore {
|
namespace mindspore {
|
||||||
namespace lite {
|
namespace lite {
|
||||||
schema::Tensor *AttrToTensor(const void *data, int data_size, bool is_array, TypeId type_id,
|
schema::Tensor *AttrToTensor(const void *data, size_t data_size, bool is_array, TypeId type_id,
|
||||||
std::vector<char *> *const tensor_bufs);
|
std::vector<char *> *const tensor_bufs);
|
||||||
} // namespace lite
|
} // namespace lite
|
||||||
} // namespace mindspore
|
} // namespace mindspore
|
||||||
|
|
|
@ -73,6 +73,9 @@ int KernelInferShape(const std::vector<lite::Tensor *> &inputs, const std::vecto
|
||||||
auto ret =
|
auto ret =
|
||||||
kernel_interface->Infer(&in_tensors, &out_tensors, static_cast<const schema::Primitive *>(primitive), kernel);
|
kernel_interface->Infer(&in_tensors, &out_tensors, static_cast<const schema::Primitive *>(primitive), kernel);
|
||||||
if (ret == kLiteInferInvalid) {
|
if (ret == kLiteInferInvalid) {
|
||||||
|
for (auto output : outputs) {
|
||||||
|
output->set_shape({-1});
|
||||||
|
}
|
||||||
return RET_INFER_INVALID;
|
return RET_INFER_INVALID;
|
||||||
}
|
}
|
||||||
if (ret != kSuccess) {
|
if (ret != kSuccess) {
|
||||||
|
|
|
@ -40,13 +40,13 @@ class ConcatInt8CPUKernel : public InnerKernel {
|
||||||
if (output_shape != nullptr) {
|
if (output_shape != nullptr) {
|
||||||
free(output_shape);
|
free(output_shape);
|
||||||
}
|
}
|
||||||
for (std::size_t i = 0; i < in_tensors().size(); i++) {
|
|
||||||
int *input_shape = concat_param_->input_shapes_[i];
|
|
||||||
if (input_shape != nullptr) {
|
|
||||||
free(input_shape);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (concat_param_->input_shapes_ != nullptr) {
|
if (concat_param_->input_shapes_ != nullptr) {
|
||||||
|
for (std::size_t i = 0; i < in_tensors().size(); i++) {
|
||||||
|
int *input_shape = concat_param_->input_shapes_[i];
|
||||||
|
if (input_shape != nullptr) {
|
||||||
|
free(input_shape);
|
||||||
|
}
|
||||||
|
}
|
||||||
free(concat_param_->input_shapes_);
|
free(concat_param_->input_shapes_);
|
||||||
}
|
}
|
||||||
if (concat_param_->quant_arg_.in_args_ != nullptr) {
|
if (concat_param_->quant_arg_.in_args_ != nullptr) {
|
||||||
|
|
Loading…
Reference in New Issue