forked from mindspore-Ecosystem/mindspore
!24442 fix converter fuzz issue
Merge pull request !24442 from hangq/fuzz
This commit is contained in:
commit
ceb92fd6f5
|
@ -225,6 +225,7 @@ int GenerateInTensorC(const OpParameter *const parameter, const std::vector<lite
|
|||
ret = Tensor2TensorC(input, tensor_c);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "Tensor to TensorC failed.";
|
||||
free(tensor_c);
|
||||
return ret;
|
||||
}
|
||||
in_tensor_c->emplace_back(tensor_c);
|
||||
|
|
|
@ -129,7 +129,7 @@ int MatmulSparseCPUKernel::PrepareBias() {
|
|||
return RET_OK;
|
||||
}
|
||||
|
||||
int MatmulSparseCPUKernel::Init() {
|
||||
int MatmulSparseCPUKernel::Prepare() {
|
||||
if (!InferShapeDone()) {
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ int MatmulSparseCPUKernel::PackInput() {
|
|||
auto stride = matrix_a_pack_size_ / task_num;
|
||||
|
||||
auto *src = reinterpret_cast<const float *>(in_tensors_[0]->data());
|
||||
for (int i = 0; i < task_num; i++) {
|
||||
for (size_t i = 0; i < task_num; i++) {
|
||||
PackNHWCToNCHWFp32(src + i * stride, a_pack_ + i * stride, params_->batch, kBlockSize, params_->deep_, 0, 0);
|
||||
}
|
||||
return RET_OK;
|
||||
|
|
|
@ -40,7 +40,7 @@ class MatmulSparseCPUKernel : public InnerKernel {
|
|||
params_ = reinterpret_cast<MatMulParameter *>(op_parameter_);
|
||||
}
|
||||
~MatmulSparseCPUKernel() override;
|
||||
int Init() override;
|
||||
int Prepare() override;
|
||||
int ReSize() override;
|
||||
int Run() override;
|
||||
int RunInstrinsics();
|
||||
|
|
|
@ -47,6 +47,10 @@ std::unique_ptr<schema::QuantParamT> CopyQuantParamT(const std::unique_ptr<schem
|
|||
|
||||
tensor::TensorPtr CreateTensorInfo(const void *data, size_t data_size, const std::vector<int64_t> &shape,
|
||||
TypeId data_type) {
|
||||
if (data_type == kTypeUnknown) {
|
||||
MS_LOG(ERROR) << "data type of tensor is unknown";
|
||||
return nullptr;
|
||||
}
|
||||
tensor::TensorPtr tensor_info = nullptr;
|
||||
if (shape.empty() && data_size == mindspore::abstract::TypeIdSize(data_type)) {
|
||||
ShapeVector scalar_shape = {1};
|
||||
|
|
|
@ -124,6 +124,7 @@ schema::MetaGraphT *Converter::Convert(const std::unique_ptr<converter::Flags> &
|
|||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "Transform meta graph failed " << status;
|
||||
ReturnCode::GetSingleReturnCode()->UpdateReturnCode(status);
|
||||
delete meta_graph;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,42 @@ constexpr size_t kFcWeightSecondShapeIndex = 1;
|
|||
constexpr size_t kFcBiasFirstShapeIndex = 0;
|
||||
constexpr size_t kFcBiasSecondShapeIndex = 1;
|
||||
constexpr size_t kFcBiasThirdShapeIndex = 2;
|
||||
|
||||
STATUS CheckCaffeModel(const caffe::NetParameter &caffe_model, const caffe::NetParameter &caffe_weight) {
|
||||
std::set<std::string> providers;
|
||||
std::set<std::string> consumers;
|
||||
for (int i = 0; i < caffe_model.input_size(); i++) {
|
||||
const auto &input = caffe_model.input(i);
|
||||
if (providers.count(input) != 0) {
|
||||
MS_LOG(ERROR) << "Top repeated";
|
||||
return RET_ERROR;
|
||||
}
|
||||
providers.insert(input);
|
||||
}
|
||||
for (const auto &layer : caffe_model.layers()) {
|
||||
for (const auto &top : layer.top()) {
|
||||
if (providers.count(top) != 0) {
|
||||
MS_LOG(ERROR) << "Top repeated";
|
||||
return RET_ERROR;
|
||||
}
|
||||
providers.insert(top);
|
||||
}
|
||||
for (const auto &bottom : layer.bottom()) {
|
||||
if (consumers.count(bottom) != 0) {
|
||||
MS_LOG(ERROR) << "Bottom repeated";
|
||||
return RET_ERROR;
|
||||
}
|
||||
consumers.insert(bottom);
|
||||
}
|
||||
}
|
||||
for (const auto &consumer : consumers) {
|
||||
if (providers.count(consumer) == 0) {
|
||||
MS_LOG(ERROR) << "Bottom and top mismatch";
|
||||
return RET_ERROR;
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace
|
||||
bool IsSkipedLayer(const caffe::LayerParameter &layer) {
|
||||
if (layer.type() == "Input" || layer.type() == "Dropout" || layer.type() == "Split") {
|
||||
|
@ -90,6 +126,12 @@ api::FuncGraphPtr CaffeModelParser::Parse(const converter::ConverterParameters &
|
|||
ReturnCode::GetSingleReturnCode()->UpdateReturnCode(status);
|
||||
return nullptr;
|
||||
}
|
||||
status = CheckCaffeModel(caffe_model_, caffe_weight_);
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "Input caffe model error: " << status;
|
||||
ReturnCode::GetSingleReturnCode()->UpdateReturnCode(status);
|
||||
return nullptr;
|
||||
}
|
||||
res_graph_ = std::make_shared<FuncGraph>();
|
||||
MS_CHECK_TRUE_RET(res_graph_ != nullptr, nullptr);
|
||||
status = ConvertGraphInputs();
|
||||
|
|
|
@ -500,6 +500,44 @@ FuncGraphPtr OnnxModelParser::BuildBodyGraph(const onnx::NodeProto &loop_node, c
|
|||
return loop_body_graph;
|
||||
}
|
||||
|
||||
namespace {
|
||||
STATUS CheckOnnxModel(const onnx::GraphProto &onnx_graph) {
|
||||
// all input should in initialize
|
||||
std::set<std::string> providers;
|
||||
for (const auto &const_tensor : onnx_graph.initializer()) {
|
||||
const auto &name = const_tensor.name();
|
||||
if (providers.count(name) != 0) {
|
||||
MS_LOG(ERROR) << "const tensor repeated";
|
||||
return RET_ERROR;
|
||||
}
|
||||
providers.insert(name);
|
||||
}
|
||||
for (int i = 0; i < onnx_graph.input().size(); ++i) {
|
||||
providers.insert(onnx_graph.input(i).name());
|
||||
}
|
||||
for (const auto &onnx_node : onnx_graph.node()) {
|
||||
for (int i = 0; i < onnx_node.output_size(); i++) {
|
||||
auto &output = onnx_node.output(i);
|
||||
if (providers.count(output) != 0) {
|
||||
MS_LOG(ERROR) << "Output tensor repeated";
|
||||
return RET_ERROR;
|
||||
}
|
||||
providers.insert(output);
|
||||
}
|
||||
}
|
||||
// all output should find
|
||||
for (const auto &onnx_node : onnx_graph.node()) {
|
||||
for (int i = 0; i < onnx_node.input_size(); i++) {
|
||||
auto &input = onnx_node.input(i);
|
||||
if (providers.count(input) == 0) {
|
||||
MS_LOG(WARNING) << "Can not find node input: " << input;
|
||||
}
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
api::FuncGraphPtr OnnxModelParser::Parse(const converter::ConverterParameters &flag) {
|
||||
auto model_file = flag.model_file;
|
||||
NotSupportOp::GetInstance()->set_fmk_type("ONNX");
|
||||
|
@ -581,6 +619,12 @@ STATUS OnnxModelParser::ConvertOnnxGraph(const onnx::GraphProto &onnx_graph, con
|
|||
MS_ASSERT(onnx_graph != nullptr && anf_graph != nullptr);
|
||||
MS_ASSERT(anf_nodes_map != nullptr && extra_subgraph_inputs != nullptr);
|
||||
STATUS status = RET_OK;
|
||||
status = CheckOnnxModel(onnx_graph);
|
||||
if (status != RET_OK) {
|
||||
ReturnCode::GetSingleReturnCode()->UpdateReturnCode(status);
|
||||
MS_LOG(ERROR) << "input onnx model error: " << status;
|
||||
return status;
|
||||
}
|
||||
status = ConvertConstTensors(onnx_graph, anf_graph, anf_nodes_map);
|
||||
if (RET_OK != status) {
|
||||
ReturnCode::GetSingleReturnCode()->UpdateReturnCode(status);
|
||||
|
|
Loading…
Reference in New Issue