!30122 [MS][LITE] sync code clean and fuzz bug fix 2
Merge pull request !30122 from XianglongZeng/myms_new
This commit is contained in:
commit
7a864db171
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
* Copyright 2020-2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -26,7 +26,7 @@ using mindspore::schema::PrimitiveType_PowFusion;
|
|||
|
||||
namespace mindspore::kernel {
|
||||
int PowerCPUKernel::Prepare() {
|
||||
CHECK_LESS_RETURN(in_tensors_.size(), C2NUM);
|
||||
MS_CHECK_TRUE_MSG(in_tensors_.size() == C2NUM, RET_ERROR, "Only support Power op with 2 inputs.");
|
||||
auto exp_datatype = in_tensors_.at(1)->data_type();
|
||||
MS_CHECK_TRUE_MSG((exp_datatype == kNumberTypeFloat32 || exp_datatype == kNumberTypeFloat ||
|
||||
exp_datatype == kNumberTypeInt32 || exp_datatype == kNumberTypeInt),
|
||||
|
@ -70,7 +70,6 @@ int PowerCPUKernel::RunImpl(int task_id) const {
|
|||
if (len <= 0) {
|
||||
return RET_OK;
|
||||
}
|
||||
MS_ASSERT(in_tensors_.size() == 2);
|
||||
float *exp_addr = reinterpret_cast<float *>(in_tensors_[1]->data());
|
||||
CHECK_NULL_RETURN(exp_addr);
|
||||
bool broadcast = in_tensors_[0]->shape() == in_tensors_[1]->shape() ? false : true;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
* Copyright 2021-2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -88,9 +88,6 @@ int SubInt8CPUKernel::Prepare() {
|
|||
quant_param_->left_shift_result0_ = (1 << left_shift) * ((1 << left_shift0));
|
||||
quant_param_->left_shift_result1_ = (1 << left_shift) * ((1 << left_shift1));
|
||||
|
||||
MS_ASSERT(left_shift + left_shift0 == left_shift);
|
||||
MS_ASSERT(left_shift + left_shift1 == left_shift);
|
||||
|
||||
if (!InferShapeDone()) {
|
||||
return RET_OK;
|
||||
}
|
||||
|
|
|
@ -185,11 +185,13 @@ int MindirAdjust::AdjustInputDataType(AnfNodePtr anf_node) {
|
|||
MS_CHECK_TRUE_MSG(anf_node != nullptr, RET_ERROR, "anf_node is nullptr");
|
||||
auto param_node = anf_node->cast<ParameterPtr>();
|
||||
MS_CHECK_TRUE_MSG(param_node != nullptr, RET_ERROR, "param_node is nullptr");
|
||||
auto abstract_tensor = param_node->abstract()->cast<abstract::AbstractTensorPtr>();
|
||||
auto abstract = param_node->abstract();
|
||||
MS_CHECK_TRUE_MSG(abstract != nullptr, RET_ERROR, "abstract is nullptr");
|
||||
auto abstract_tensor = abstract->cast<abstract::AbstractTensorPtr>();
|
||||
MS_CHECK_TRUE_MSG(abstract_tensor != nullptr, RET_ERROR, "param node has no abstract tensor.");
|
||||
auto tensor_element = abstract_tensor->element();
|
||||
MS_CHECK_TRUE_MSG(tensor_element != nullptr, RET_ERROR, "abstract tensor's element is null.");
|
||||
auto type_ptr = abstract_tensor->element()->GetTypeTrack();
|
||||
auto type_ptr = tensor_element->GetTypeTrack();
|
||||
MS_CHECK_TRUE_MSG(type_ptr != nullptr, RET_ERROR, "Type pointer is null.");
|
||||
auto org_type = type_ptr->type_id();
|
||||
if (!param_node->has_default() && (org_type == kNumberTypeInt64 || org_type == kNumberTypeFloat64)) {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
* Copyright 2020-2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -32,6 +32,7 @@ ops::PrimitiveC *TfliteArgminParser::Parse(const std::unique_ptr<tflite::Operato
|
|||
prim->set_out_max_value(false);
|
||||
prim->set_top_k(1);
|
||||
|
||||
MS_CHECK_TRUE_MSG(tflite_op->inputs.size() >= kInputSize1, nullptr, "argmin input size should be greater than 1.");
|
||||
const auto &axis_tensor = tflite_subgraph->tensors.at(tflite_op->inputs[1]);
|
||||
MS_CHECK_TRUE_MSG(axis_tensor != nullptr, nullptr, "axis_tensor is nullptr");
|
||||
const auto &buf_data = tflite_model->buffers.at(axis_tensor->buffer);
|
||||
|
|
|
@ -58,6 +58,60 @@ std::unique_ptr<tflite::ModelT> TfliteModelParser::ReadTfliteModel(const std::st
|
|||
return tflite::UnPackModel(tflite_model_buf_);
|
||||
}
|
||||
|
||||
STATUS TfliteModelParser::TfliteOpVerify(const std::unique_ptr<tflite::SubGraphT> &subgraph,
|
||||
const size_t operator_codes_size, const size_t all_tensor_size) {
|
||||
int32_t all_tensor_num = static_cast<int32_t>(all_tensor_size);
|
||||
for (auto &op : subgraph->operators) {
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "tflite contain nullptr op.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (op->opcode_index >= operator_codes_size) {
|
||||
MS_LOG(ERROR) << "op is not a tflite opcode";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (std::any_of(op->inputs.begin(), op->inputs.end(), [&all_tensor_num](int32_t index) {
|
||||
return index >= all_tensor_num || index + all_tensor_num < 0;
|
||||
})) {
|
||||
MS_LOG(ERROR) << "op input illegal.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (std::any_of(op->outputs.begin(), op->outputs.end(), [&all_tensor_num](int32_t index) {
|
||||
return index >= all_tensor_num || index + all_tensor_num < 0;
|
||||
})) {
|
||||
MS_LOG(ERROR) << "op output illegal.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS TfliteModelParser::TfliteTensorVerify(const std::unique_ptr<tflite::SubGraphT> &subgraph,
|
||||
const size_t model_buffers_size) {
|
||||
for (auto &tensor : subgraph->tensors) {
|
||||
if (tensor == nullptr) {
|
||||
MS_LOG(ERROR) << "tflite model contain nullptr tensor.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (tensor->buffer >= model_buffers_size) {
|
||||
MS_LOG(ERROR) << "tflite tensor buffer index beyond upper limit.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (tensor->quantization != nullptr && !tensor->quantization->scale.empty()) {
|
||||
auto scale_size = tensor->quantization->scale.size();
|
||||
auto zp_size = tensor->quantization->zero_point.size();
|
||||
auto min_size = tensor->quantization->min.size();
|
||||
auto max_size = tensor->quantization->max.size();
|
||||
if ((zp_size != 0 && zp_size != scale_size) || (min_size != 0 && min_size != scale_size) ||
|
||||
(max_size != 0 && max_size != scale_size)) {
|
||||
MS_LOG(ERROR) << "The element numbers of non-empty quantization parameters must be same.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
STATUS TfliteModelParser::TfliteModelVerify() {
|
||||
if (tflite_model_->subgraphs.empty()) {
|
||||
MS_LOG(ERROR) << "tflite model does not has a main graph.";
|
||||
|
@ -71,46 +125,34 @@ STATUS TfliteModelParser::TfliteModelVerify() {
|
|||
MS_LOG(ERROR) << "tflite contain nullptr subgraph.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto all_singraph_tensor_size = subgraph->tensors.size();
|
||||
auto all_subgraph_tensor_size = subgraph->tensors.size();
|
||||
if (subgraph->inputs.empty() || subgraph->outputs.empty()) {
|
||||
MS_LOG(ERROR) << "tflite subgraph inputs or outputs is empty.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (std::any_of(subgraph->inputs.begin(), subgraph->inputs.end(), [&all_singraph_tensor_size](int32_t index) {
|
||||
return index >= static_cast<int32_t>(all_singraph_tensor_size) || index < 0;
|
||||
if (std::any_of(subgraph->inputs.begin(), subgraph->inputs.end(), [&all_subgraph_tensor_size](int32_t index) {
|
||||
return index >= static_cast<int32_t>(all_subgraph_tensor_size) || index < 0;
|
||||
})) {
|
||||
MS_LOG(ERROR) << "tflite input illegal.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (std::any_of(subgraph->outputs.begin(), subgraph->outputs.end(), [&all_singraph_tensor_size](int32_t index) {
|
||||
return index >= static_cast<int32_t>(all_singraph_tensor_size) || index < 0;
|
||||
if (std::any_of(subgraph->outputs.begin(), subgraph->outputs.end(), [&all_subgraph_tensor_size](int32_t index) {
|
||||
return index >= static_cast<int32_t>(all_subgraph_tensor_size) || index < 0;
|
||||
})) {
|
||||
MS_LOG(ERROR) << "tflite output illegal.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
for (auto &op : subgraph->operators) {
|
||||
if (op == nullptr) {
|
||||
MS_LOG(ERROR) << "tflite contain nullptr op.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (op->opcode_index >= tflite_model_operator_codes_size) {
|
||||
MS_LOG(ERROR) << "op is not a tflite opcode";
|
||||
return RET_ERROR;
|
||||
}
|
||||
auto ret = TfliteOpVerify(subgraph, tflite_model_operator_codes_size, all_subgraph_tensor_size);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Tflite op verification dose not pass.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
for (auto &tensor : subgraph->tensors) {
|
||||
if (tensor == nullptr) {
|
||||
MS_LOG(ERROR) << "tflite model contain nullptr tensor.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
if (tensor->buffer >= tflite_model_buffers_size) {
|
||||
MS_LOG(ERROR) << "tflite tensor buffer index beyond upper limit.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
ret = TfliteTensorVerify(subgraph, tflite_model_buffers_size);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Tflite Tensor verification dose not pass.";
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
@ -353,18 +395,13 @@ STATUS TfliteModelParser::SetTensorQuantParam(const std::unique_ptr<tflite::Tens
|
|||
return RET_NULL_PTR;
|
||||
}
|
||||
|
||||
if (!tflite_tensor->quantization->scale.empty()) {
|
||||
quant_param->scale = tflite_tensor->quantization->scale[i];
|
||||
}
|
||||
|
||||
quant_param->scale = tflite_tensor->quantization->scale[i];
|
||||
if (!tflite_tensor->quantization->zero_point.empty()) {
|
||||
quant_param->zeroPoint = tflite_tensor->quantization->zero_point[i];
|
||||
}
|
||||
|
||||
if (!tflite_tensor->quantization->min.empty()) {
|
||||
quant_param->min = tflite_tensor->quantization->min[i];
|
||||
}
|
||||
|
||||
if (!tflite_tensor->quantization->max.empty()) {
|
||||
quant_param->max = tflite_tensor->quantization->max[i];
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2019 Huawei Technologies Co., Ltd
|
||||
* Copyright 2019-2022 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -73,6 +73,9 @@ class TfliteModelParser : public converter::ModelParser {
|
|||
ops::PrimitiveC *primitive_c);
|
||||
static STATUS SetTensorQuantParam(const std::unique_ptr<tflite::TensorT> &tflite_tensor,
|
||||
std::vector<QuantParamT> *quant_params, int round_type = 1);
|
||||
STATUS TfliteOpVerify(const std::unique_ptr<tflite::SubGraphT> &subgraph, const size_t operator_codes_size,
|
||||
const size_t all_tensor_size);
|
||||
STATUS TfliteTensorVerify(const std::unique_ptr<tflite::SubGraphT> &subgraph, const size_t model_buffers_size);
|
||||
STATUS TfliteModelVerify();
|
||||
|
||||
private:
|
||||
|
|
Loading…
Reference in New Issue