forked from mindspore-Ecosystem/mindspore
!25714 [MSLITE][DEVELOP] include stdint.h for data_type, elementNum & Size overflow check
Merge pull request !25714 from yangruoqi713/master
This commit is contained in:
commit
8e72f10fc7
|
@ -16,6 +16,8 @@
|
|||
#ifndef MINDSPORE_INCLUDE_API_DATA_TYPE_H_
|
||||
#define MINDSPORE_INCLUDE_API_DATA_TYPE_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
namespace mindspore {
|
||||
enum class DataType : int {
|
||||
kTypeUnknown = 0,
|
||||
|
|
|
@ -85,9 +85,11 @@ int CarryDataKernel::MoveTensorData(lite::Tensor *dst_tensor, lite::Tensor *src_
|
|||
CHECK_NULL_RETURN(src_tensor->data());
|
||||
CHECK_NULL_RETURN(dst_tensor->data());
|
||||
// need replace with increase data ref count
|
||||
MS_CHECK_FALSE(src_tensor->Size() == 0, RET_ERROR);
|
||||
memcpy(dst_tensor->data(), src_tensor->data(), src_tensor->Size());
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
#ifndef CONTROLFLOW_TENSORLIST_CLIP
|
||||
int CarryDataKernel::MoveTensorListData(lite::TensorList *dst_tensorlist, lite::TensorList *src_tensorlist) {
|
||||
// shape may change, because tensors.size() can be change in RunGraph
|
||||
|
|
|
@ -71,6 +71,7 @@ int ConstantOfShapeCPUKernel::Run() {
|
|||
CHECK_NULL_RETURN(output);
|
||||
param_->data_type_ = output->data_type();
|
||||
param_->element_size_ = output->ElementsNum();
|
||||
MS_CHECK_GT(param_->element_size_, 0, RET_ERROR);
|
||||
output_ptr_ = output->data();
|
||||
CHECK_NULL_RETURN(output_ptr_);
|
||||
|
||||
|
|
|
@ -139,6 +139,7 @@ int ConvolutionBaseCPUKernel::InitConvWeightBias() {
|
|||
}
|
||||
|
||||
if (in_tensors_.size() == kInputSize2) {
|
||||
MS_CHECK_FALSE(in_tensors_.at(kBiasIndex)->Size() == 0, RET_ERROR);
|
||||
memcpy(bias_data_, origin_bias_, in_tensors_.at(kBiasIndex)->Size());
|
||||
} else {
|
||||
MS_ASSERT(in_tensors_.size() == kInputSize1);
|
||||
|
|
|
@ -49,6 +49,7 @@ int DetectionPostProcessBaseCPUKernel::Prepare() {
|
|||
params_->selected_ = nullptr;
|
||||
params_->anchors_ = nullptr;
|
||||
auto anchor_tensor = in_tensors_.at(2);
|
||||
MS_CHECK_GT(anchor_tensor->ElementsNum(), 0, RET_ERROR);
|
||||
CHECK_NULL_RETURN(anchor_tensor->data());
|
||||
if (anchor_tensor->data_type() == kNumberTypeInt8) {
|
||||
auto quant_param = anchor_tensor->quant_params().front();
|
||||
|
@ -78,6 +79,7 @@ int DetectionPostProcessBaseCPUKernel::Prepare() {
|
|||
MS_LOG(ERROR) << "Malloc anchor failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
MS_CHECK_FALSE(anchor_tensor->Size() == 0, RET_ERROR);
|
||||
memcpy(params_->anchors_, anchor_tensor->data(), anchor_tensor->Size());
|
||||
} else {
|
||||
MS_LOG(ERROR) << "unsupported anchor data type " << anchor_tensor->data_type();
|
||||
|
|
|
@ -76,6 +76,7 @@ lite::Tensor *CreateConstTensor(const lite::Tensor *tensor, const std::vector<in
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
MS_CHECK_FALSE(new_tensor->Size() == 0, nullptr);
|
||||
uint8_t *new_tensor_data = reinterpret_cast<uint8_t *>(tensor->data()) + index * new_tensor->Size();
|
||||
memcpy(new_tensor->data(), reinterpret_cast<void *>(new_tensor_data), new_tensor->Size());
|
||||
return new_tensor;
|
||||
|
|
|
@ -82,7 +82,7 @@ int OneHotCPUKernel::ReSize() {
|
|||
return RET_ERROR;
|
||||
}
|
||||
inner_size_ = indices->ElementsNum() / outer_size_;
|
||||
|
||||
MS_CHECK_GT(inner_size_, 0, RET_ERROR);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -62,10 +62,7 @@ int QuantDTypeCastCPUKernel::ReSize() {
|
|||
MS_ASSERT(in_tensor != nullptr);
|
||||
num_unit_ = static_cast<int>(in_tensor->ElementsNum());
|
||||
thread_n_num_ = MSMIN(thread_num_, num_unit_);
|
||||
if (thread_n_num_ == 0) {
|
||||
MS_LOG(ERROR) << "div zero";
|
||||
return RET_ERROR;
|
||||
}
|
||||
MS_CHECK_GT(thread_n_num_, 0, RET_ERROR);
|
||||
thread_n_stride_ = UP_DIV(num_unit_, thread_n_num_);
|
||||
return RET_OK;
|
||||
}
|
||||
|
@ -178,6 +175,7 @@ int QuantDTypeCastCPUKernel::Run() {
|
|||
if (int8_ptr_ == nullptr || int8_out_ptr_ == nullptr) {
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
MS_CHECK_GT(in_tensors_[0]->ElementsNum(), 0, RET_ERROR);
|
||||
float32_ptr_ = new (std::nothrow) float[in_tensors_[0]->ElementsNum()];
|
||||
if (float32_ptr_ == nullptr) {
|
||||
MS_LOG(ERROR) << "new float[] failed";
|
||||
|
|
|
@ -44,6 +44,7 @@ int RandomStandardNormalCPUKernel::Run() {
|
|||
std::default_random_engine engine{static_cast<unsigned int>(random_seed)};
|
||||
std::normal_distribution<double> nums(0, 1.0);
|
||||
auto all_data_nums = out_tensors_[0]->ElementsNum();
|
||||
MS_CHECK_GT(all_data_nums, 0, RET_ERROR);
|
||||
auto out_data = out_tensors_[0]->data();
|
||||
MS_ASSERT(out_data != nullptr);
|
||||
auto output = reinterpret_cast<float *>(out_data);
|
||||
|
|
|
@ -106,7 +106,7 @@ int ReduceBaseCPUKernel::Prepare() {
|
|||
MS_CHECK_FALSE_MSG((axes_tensor->data_type() != kNumberTypeInt && axes_tensor->data_type() != kNumberTypeInt32),
|
||||
RET_ERROR, "The data type of axes tensor should be int32");
|
||||
num_axes_ = axes_tensor->ElementsNum();
|
||||
if (axes_tensor->ElementsNum() > MAX_SHAPE_SIZE) {
|
||||
if (num_axes_ <= 0 && num_axes_ > MAX_SHAPE_SIZE) {
|
||||
MS_LOG(ERROR) << "input axes invalid.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
@ -121,6 +121,7 @@ int ReduceBaseCPUKernel::Prepare() {
|
|||
return RET_ERROR;
|
||||
}
|
||||
} else {
|
||||
MS_CHECK_FALSE(axes_tensor->Size() == 0, RET_ERROR);
|
||||
memcpy(axes_, axes_tensor->data(), axes_tensor->Size());
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -44,6 +44,7 @@ int ReshapeBaseCPUKernel::Run() {
|
|||
op_parameter_->is_train_session_) {
|
||||
CHECK_NULL_RETURN(out_tensor->data());
|
||||
CHECK_NULL_RETURN(in_tensor->data());
|
||||
MS_CHECK_FALSE(in_tensor->Size() == 0, RET_ERROR);
|
||||
memcpy(out_tensor->data(), in_tensor->data(), in_tensor->Size());
|
||||
return RET_OK;
|
||||
}
|
||||
|
|
|
@ -75,6 +75,7 @@ int SelectCPUKernel::Run() {
|
|||
}
|
||||
MS_ASSERT(in_tensors_.at(1)->Size() == out_tensors_.at(0)->Size());
|
||||
auto size = in_tensors_.at(1)->ElementsNum();
|
||||
MS_CHECK_GT(size, 0, RET_ERROR);
|
||||
auto condition = static_cast<bool *>(bool_tensor->data());
|
||||
auto input1 = static_cast<float *>(in_tensors_.at(kFirstIdx)->data());
|
||||
auto input2 = static_cast<float *>(in_tensors_.at(kSecondIdx)->data());
|
||||
|
|
|
@ -68,6 +68,7 @@ int StackBaseCPUKernel::ReSize() {
|
|||
copy_size_ = GetCopyNum(input0_shape, axis_, input0_shape.size()) * data_type_size_;
|
||||
outer_size_ = GetOuterSize(input0_shape, axis_);
|
||||
}
|
||||
MS_CHECK_GT(copy_size_, 0, RET_ERROR);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue