set convert ms file permission and get all unsupported node when parsing
This commit is contained in:
parent
a93476ba5d
commit
95c177482a
|
@ -102,9 +102,13 @@ int WriteToBin(const std::string &file_path, void *data, size_t size) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int CompareOutputData(float *output_data, float *correct_data, int data_size) {
|
||||
int CompareOutputData(float *output_data, size_t output_size, float *correct_data, size_t data_size) {
|
||||
if (output_size != data_size) {
|
||||
printf("compare failed, output_size %zu isn't equal to data_size %zu.\n", output_size, data_size);
|
||||
return 0;
|
||||
}
|
||||
float error = 0;
|
||||
for (int i = 0; i < data_size; i++) {
|
||||
for (size_t i = 0; i < data_size; i++) {
|
||||
float abs = fabs(output_data[i] - correct_data[i]);
|
||||
if (abs > 0.00001) {
|
||||
error += abs;
|
||||
|
@ -120,12 +124,12 @@ int CompareOutputData(float *output_data, float *correct_data, int data_size) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int CompareOutput(float *output_data, std::string file_path) {
|
||||
size_t output_size;
|
||||
auto ground_truth = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_path.c_str(), &output_size));
|
||||
size_t output_num = output_size / sizeof(float);
|
||||
printf("output num : %zu\n", output_num);
|
||||
int res = CompareOutputData(output_data, ground_truth, output_num);
|
||||
int CompareOutput(float *output_data, size_t output_num, std::string file_path) {
|
||||
size_t ground_truth_size;
|
||||
auto ground_truth = reinterpret_cast<float *>(mindspore::lite::ReadFile(file_path.c_str(), &ground_truth_size));
|
||||
size_t ground_truth_num = ground_truth_size / sizeof(float);
|
||||
printf("ground truth num : %zu\n", ground_truth_num);
|
||||
int res = CompareOutputData(output_data, output_num, ground_truth, ground_truth_num);
|
||||
delete[] ground_truth;
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -50,8 +50,8 @@ void WriteToTxt(const std::string &file_path, void *data, size_t element_size) {
|
|||
|
||||
int WriteToBin(const std::string &file_path, void *data, size_t size);
|
||||
|
||||
int CompareOutputData(float *output_data, float *correct_data, int data_size);
|
||||
int CompareOutput(float *output_data, std::string file_path);
|
||||
int CompareOutputData(float *output_data, size_t output_num, float *correct_data, size_t data_size);
|
||||
int CompareOutput(float *output_data, size_t output_num, std::string file_path);
|
||||
|
||||
std::string GetAndroidPackageName();
|
||||
std::string GetAndroidPackagePath();
|
||||
|
|
|
@ -169,6 +169,7 @@ class PrimitiveC {
|
|||
auto ret = primc->UnPackSchemaPrimitive(primitive);
|
||||
if (ret != RET_OK) {
|
||||
MS_LOG(ERROR) << "UnPackSchemaPrimitive failed";
|
||||
delete primc;
|
||||
return nullptr;
|
||||
}
|
||||
return primc;
|
||||
|
|
|
@ -144,6 +144,8 @@ void CalShape(const T *data, const std::vector<Tensor *> &inputs, std::vector<in
|
|||
for (int i = 0; i < shape_size; i++) {
|
||||
if (static_cast<int>(data[i]) == -1) {
|
||||
index = i;
|
||||
} else if (static_cast<int>(data[i]) == 0) {
|
||||
size *= inputs[0]->shape()[i];
|
||||
} else {
|
||||
size *= data[i];
|
||||
}
|
||||
|
|
|
@ -64,6 +64,10 @@ int StridedSlice::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr
|
|||
}
|
||||
if (this->primitive_->value.value == nullptr) {
|
||||
auto attr = new (std::nothrow) schema::StridedSliceT();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new StridedSlice failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
attr->beginMask = GetValue<int>(prim.GetAttr("begin_mask"));
|
||||
attr->endMask = GetValue<int>(prim.GetAttr("end_mask"));
|
||||
attr->ellipsisMask = GetValue<int>(prim.GetAttr("ellipsis_mask"));
|
||||
|
|
|
@ -43,6 +43,10 @@ int Transpose::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &
|
|||
}
|
||||
if (this->primitive_->value.value == nullptr) {
|
||||
auto attr = new (std::nothrow) schema::TransposeT();
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new TransposeT failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
MS_ASSERT(inputs.size() == kAnfPopulaterTwo);
|
||||
auto inputNode = inputs[kAnfPopulaterOne];
|
||||
if (inputNode->isa<ValueNode>()) {
|
||||
|
|
|
@ -203,7 +203,7 @@ TEST_F(TestConvolutionFp16, ConvTest1) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/conv/convfp32_out_1_28_28_32.bin";
|
||||
lite::CompareOutput(fp32_output_data, output_path);
|
||||
lite::CompareOutput(fp32_output_data, output_data_size, output_path);
|
||||
|
||||
free(nhwc4_input_data);
|
||||
free(packed_input);
|
||||
|
@ -309,7 +309,7 @@ TEST_F(TestConvolutionFp16, ConvTest2) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/conv/convfp32_out_1_128_128_32.bin";
|
||||
lite::CompareOutput(fp32_output_data, output_path);
|
||||
lite::CompareOutput(fp32_output_data, output_data_size, output_path);
|
||||
|
||||
free(packed_input);
|
||||
free(bias_data);
|
||||
|
@ -452,7 +452,7 @@ TEST_F(TestConvolutionFp16, Conv3x3Test1) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/conv/convfp32_out_1_28_28_32.bin";
|
||||
lite::CompareOutput(fp32_output_data, output_path);
|
||||
lite::CompareOutput(fp32_output_data, output_data_size, output_path);
|
||||
|
||||
free(bias_data);
|
||||
free(tile_buffer);
|
||||
|
@ -570,7 +570,7 @@ TEST_F(TestConvolutionFp16, Conv3x3Test2) {
|
|||
std::cout << std::endl;
|
||||
|
||||
std::string output_path = "./test_data/conv/convfp32_out_1_128_128_32.bin";
|
||||
lite::CompareOutput(fp32_output_data, output_path);
|
||||
lite::CompareOutput(fp32_output_data, output_data_size, output_path);
|
||||
|
||||
free(bias_data);
|
||||
free(tile_buffer);
|
||||
|
|
|
@ -54,7 +54,7 @@ TEST_F(TestConv1x1Fp32, Input1x1PrePack1) {
|
|||
|
||||
float out[20] = {0};
|
||||
Conv1x1InputPack(in, out, conv_param, sizeof(float));
|
||||
EXPECT_EQ(0, lite::CompareOutputData(out, correct, 20));
|
||||
EXPECT_EQ(0, lite::CompareOutputData(out, 20, correct, 20));
|
||||
delete conv_param;
|
||||
}
|
||||
|
||||
|
@ -114,7 +114,7 @@ TEST_F(TestConv1x1Fp32, Input1x1PrePack3) {
|
|||
-5.052577, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
|
||||
|
||||
Conv1x1InputPack(in, out, conv_param, sizeof(float));
|
||||
EXPECT_EQ(0, lite::CompareOutputData(out, correct, 18));
|
||||
EXPECT_EQ(0, lite::CompareOutputData(out, 18, correct, 18));
|
||||
delete conv_param;
|
||||
}
|
||||
|
||||
|
@ -136,7 +136,7 @@ TEST_F(TestConv1x1Fp32, Input1x1PrePack4) {
|
|||
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
|
||||
float out[54] = {0};
|
||||
Conv1x1InputPack(in, out, conv_param, sizeof(float));
|
||||
EXPECT_EQ(0, lite::CompareOutputData(out, correct, 54));
|
||||
EXPECT_EQ(0, lite::CompareOutputData(out, 54, correct, 54));
|
||||
delete conv_param;
|
||||
}
|
||||
|
||||
|
@ -166,7 +166,7 @@ TEST_F(TestConv1x1Fp32, Conv1x1WeightTest1) {
|
|||
conv_param->output_channel_ = 7;
|
||||
float out[96] = {0};
|
||||
Pack1x1WeightFp32(in, out, conv_param);
|
||||
EXPECT_EQ(0, lite::CompareOutputData(out, co, 96));
|
||||
EXPECT_EQ(0, lite::CompareOutputData(out, 96, co, 96));
|
||||
delete conv_param;
|
||||
}
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ TEST_F(TestDeConvolutionFp32, DeConvWeightC4x4Pack1) {
|
|||
0.000, 0.000, 0.000, 0.00};
|
||||
float dst[256] = {0};
|
||||
PackDeConvWeightFp32(in, dst, 5, 6, 2 * 2);
|
||||
EXPECT_EQ(0, lite::CompareOutputData(dst, co, 256));
|
||||
EXPECT_EQ(0, lite::CompareOutputData(dst, 256, co, 256));
|
||||
}
|
||||
|
||||
TEST_F(TestDeConvolutionFp32, DeConvWeightC4x4Pack2) {
|
||||
|
@ -90,7 +90,7 @@ TEST_F(TestDeConvolutionFp32, DeConvWeightC4x4Pack2) {
|
|||
-0.293, 18.686, 0.0873, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||
float dst[64] = {0};
|
||||
PackDeConvWeightFp32(in, dst, 6, 3, 2 * 1);
|
||||
EXPECT_EQ(0, lite::CompareOutputData(dst, co, 64));
|
||||
EXPECT_EQ(0, lite::CompareOutputData(dst, 64, co, 64));
|
||||
}
|
||||
|
||||
TEST_F(TestDeConvolutionFp32, PostConvFuncC8Test1) {
|
||||
|
|
|
@ -212,7 +212,7 @@ TEST_F(TestActGradFp32, SigmoidGradFp32) {
|
|||
int res = lite::CompareRelativeOutput(output_data, output_path);
|
||||
|
||||
EXPECT_EQ(res, 0);
|
||||
// lite::CompareOutput(output_data, output_path);
|
||||
// lite::CompareOutput(output_data, output_data_size, output_path);
|
||||
|
||||
delete[] input_data;
|
||||
delete[] output_data;
|
||||
|
|
|
@ -58,7 +58,7 @@ TEST_F(TestBiasGradFp32, BiasGradFp32) {
|
|||
}
|
||||
std::cout << std::endl;
|
||||
std::string output_path = "./test_data/operators/biasgradfp32_1_db_7.bin";
|
||||
lite::CompareOutput(output_data, output_path);
|
||||
lite::CompareOutput(output_data, 7, output_path);
|
||||
|
||||
delete[] input_data;
|
||||
delete[] output_data;
|
||||
|
|
|
@ -96,7 +96,7 @@ TEST_F(TestPoolingGradFp32, AvgPoolingGradFp32) {
|
|||
}
|
||||
std::cout << std::endl;
|
||||
std::string output_path = "./test_data/pooling/avgpoolgradfp32_1_dx_1_28_28_3.bin";
|
||||
auto res = lite::CompareOutput(output_data, output_path);
|
||||
auto res = lite::CompareOutput(output_data, output_data_size, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
delete[] input_data;
|
||||
|
@ -152,7 +152,7 @@ TEST_F(TestPoolingGradFp32, AvgPoolingKernelGradFp32) {
|
|||
}
|
||||
std::cout << std::endl;
|
||||
std::string output_path = "./test_data/pooling/avgpoolgradfp32_1_dx_1_28_28_3.bin";
|
||||
auto res = lite::CompareOutput(output_data, output_path);
|
||||
auto res = lite::CompareOutput(output_data, output_data_size, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
delete[] input_data;
|
||||
|
@ -213,7 +213,8 @@ TEST_F(TestPoolingGradFp32, AvgPoolingBatchGradFp32) {
|
|||
}
|
||||
std::cout << std::endl;
|
||||
std::string output_path = "./test_data/pooling/avgpoolgradfp32_1_dx_3_28_28_3.bin";
|
||||
auto res = lite::CompareOutput(output_data, output_path);
|
||||
size_t output_data_size = dx_tensor.ElementsNum();
|
||||
auto res = lite::CompareOutput(output_data, output_data_size, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
delete[] input_data;
|
||||
|
@ -388,7 +389,7 @@ TEST_F(TestPoolingGradFp32, MaxPoolingGradFp32) {
|
|||
}
|
||||
std::cout << std::endl;
|
||||
std::string output_path = "./test_data/pooling/maxpoolgradfp32_1_xgrad_1_28_28_3.bin";
|
||||
auto res = lite::CompareOutput(output_data, output_path);
|
||||
auto res = lite::CompareOutput(output_data, output_data_size, output_path);
|
||||
EXPECT_EQ(res, 0);
|
||||
|
||||
free(pooling_param);
|
||||
|
|
|
@ -70,7 +70,7 @@ TEST_F(TestSoftmaxCrossEntropyFp32, SoftmaxCrossEntropyFp32) {
|
|||
printf("==================Testing Grad===============\n");
|
||||
|
||||
std::string output_path = "./test_data/operators/sce_fp32_1_loss_1.bin";
|
||||
lite::CompareOutput(loss, output_path);
|
||||
lite::CompareOutput(loss, 1, output_path);
|
||||
|
||||
((mindspore::kernel::SparseSoftmaxCrossEntropyWithLogitsCPUKernel *)kernel_obj)->train();
|
||||
kernel_obj->Run();
|
||||
|
@ -81,7 +81,7 @@ TEST_F(TestSoftmaxCrossEntropyFp32, SoftmaxCrossEntropyFp32) {
|
|||
}
|
||||
std::cout << std::endl;
|
||||
std::string grad_path = "./test_data/operators/sce_fp32_1_dy_6_4.bin";
|
||||
lite::CompareOutput(grad, grad_path);
|
||||
lite::CompareOutput(grad, 24, grad_path);
|
||||
|
||||
delete [] ll_labels;
|
||||
delete [] labels;
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#include "schema/inner/model_generated.h"
|
||||
#include "src/ops/primitive_c.h"
|
||||
#include "ir/func_graph.h"
|
||||
#include "tools/converter/return_code.h"
|
||||
#include "tools/converter/converter_context.h"
|
||||
|
||||
namespace mindspore::lite {
|
||||
class AnfExporter {
|
||||
|
@ -47,7 +47,7 @@ class AnfExporter {
|
|||
const std::unique_ptr<schema::MetaGraphT> &meta_graphT, schema::CNodeT *output_cnode);
|
||||
void SetGraphInputIndex(const std::unique_ptr<schema::MetaGraphT> &meta_graphT);
|
||||
int SetGraphoutputIndex(const CNodePtr &cnode, const std::unique_ptr<schema::MetaGraphT> &meta_graphT,
|
||||
schema::CNodeT *return_node);
|
||||
schema::CNodeT *return_node);
|
||||
bool IsPrimitiveCNode(const AnfNodePtr &node, schema::PrimitiveType type);
|
||||
int ConvertQuantParam(const std::unique_ptr<schema::MetaGraphT> &meta_graph,
|
||||
const std::shared_ptr<PrimitiveC> primitive, const std::unique_ptr<schema::CNodeT> &dst_node);
|
||||
|
|
|
@ -202,7 +202,7 @@ PARSE_ONNXATTR_IN_SCALAR_FORM(int64, int64)
|
|||
PARSE_ONNXATTR_IN_SCALAR_FORM(uint64, uint64)
|
||||
|
||||
int AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &node,
|
||||
const onnx::ValueInfoProto &value_proto) {
|
||||
const onnx::ValueInfoProto &value_proto) {
|
||||
if (node == nullptr) {
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ int AnfImporterFromProtobuf::BuildParameterForFuncGraph(const ParameterPtr &node
|
|||
}
|
||||
|
||||
int AnfImporterFromProtobuf::ImportParametersForGraph(const FuncGraphPtr &outputFuncGraph,
|
||||
const onnx::GraphProto &importProto) {
|
||||
const onnx::GraphProto &importProto) {
|
||||
if (outputFuncGraph == nullptr) {
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
|
@ -557,6 +557,7 @@ std::unordered_map<std::string, abstract::AbstractTensorPtr> AnfImporterFromProt
|
|||
CNodePtr AnfImporterFromProtobuf::BuildCNodeForFuncGraph(const FuncGraphPtr &outputFuncGraph,
|
||||
const onnx::NodeProto &node_proto,
|
||||
const schema::QuantType &quantType) {
|
||||
static bool interrupt = false;
|
||||
if (outputFuncGraph == nullptr) {
|
||||
MS_LOG(ERROR) << "output funcgraph is nullptr";
|
||||
return nullptr;
|
||||
|
@ -600,13 +601,17 @@ CNodePtr AnfImporterFromProtobuf::BuildCNodeForFuncGraph(const FuncGraphPtr &out
|
|||
inputs.push_back(anfnode_build_map_[input_name]);
|
||||
}
|
||||
auto primitivec_ptr = PrimitiveC::Create(*prim, inputs, quantType);
|
||||
if (primitivec_ptr == nullptr) {
|
||||
MS_LOG(ERROR) << "Create PrimitiveC return nullptr, " << prim->name();
|
||||
if (primitivec_ptr == nullptr || interrupt) {
|
||||
interrupt = true;
|
||||
if (primitivec_ptr == nullptr) {
|
||||
NoSupportOp::GetInstance()->InsertOp(prim->name());
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
inputs.insert(inputs.begin(), NewValueNode(primitivec_ptr));
|
||||
CNodePtr cnode_ptr = outputFuncGraph->NewCNode(inputs);
|
||||
if (cnode_ptr == nullptr) {
|
||||
interrupt = true;
|
||||
MS_LOG(ERROR) << "funcgraph new cnode failed";
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -700,40 +705,43 @@ bool AnfImporterFromProtobuf::BuildReturnForFuncGraph(const FuncGraphPtr &output
|
|||
}
|
||||
|
||||
int AnfImporterFromProtobuf::ImportNodesForGraph(const FuncGraphPtr &outputFuncGraph,
|
||||
const onnx::GraphProto &importProto,
|
||||
const schema::QuantType &quantType) {
|
||||
const onnx::GraphProto &importProto,
|
||||
const schema::QuantType &quantType) {
|
||||
if (outputFuncGraph == nullptr) {
|
||||
MS_LOG(ERROR) << "funcgraph is nullptr";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
MS_LOG(INFO) << "The CNdoe size : " << importProto.node_size();
|
||||
CNodePtr cnode_ptr = nullptr;
|
||||
int status = RET_OK;
|
||||
for (int i = 0; i < importProto.node_size(); ++i) {
|
||||
const onnx::NodeProto &node_proto = importProto.node(i);
|
||||
const std::string &node_type = node_proto.op_type();
|
||||
if (node_type == kConstantValueNode) {
|
||||
if (!BuildValueNodeForFuncGraph(node_proto)) {
|
||||
if (status == RET_OK && !BuildValueNodeForFuncGraph(node_proto)) {
|
||||
MS_LOG(ERROR) << "Build ValueNode for funcgraph fail at index: : " << i;
|
||||
return RET_ERROR;
|
||||
status = RET_ERROR;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
cnode_ptr = BuildCNodeForFuncGraph(outputFuncGraph, node_proto, quantType);
|
||||
if (cnode_ptr == nullptr) {
|
||||
MS_LOG(ERROR) << "Build CNode for funcgraph fail at index: : " << i;
|
||||
return RET_NULL_PTR;
|
||||
status = (status == RET_OK ? RET_NULL_PTR : status);
|
||||
}
|
||||
}
|
||||
|
||||
if (status != RET_OK) {
|
||||
return status;
|
||||
}
|
||||
if (!BuildReturnForFuncGraph(outputFuncGraph, importProto, cnode_ptr)) {
|
||||
MS_LOG(ERROR) << "Build ReturnNode for funcgraph failed";
|
||||
return RET_ERROR;
|
||||
status = RET_ERROR;
|
||||
}
|
||||
return RET_OK;
|
||||
return status;
|
||||
}
|
||||
|
||||
int AnfImporterFromProtobuf::BuildFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto,
|
||||
const schema::QuantType &quantType) {
|
||||
const schema::QuantType &quantType) {
|
||||
if (outputFuncGraph == nullptr) {
|
||||
MS_LOG(ERROR) << "fundgraph is nullptr";
|
||||
return RET_NULL_PTR;
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
#include "include/errorcode.h"
|
||||
#include "tools/converter/parser/onnx/onnx.pb.h"
|
||||
#include "tools/converter/converter_context.h"
|
||||
#include "tools/anf_importer/anf_importer.h"
|
||||
#include "abstract/abstract_value.h"
|
||||
|
||||
|
@ -47,10 +48,10 @@ class AnfImporterFromProtobuf : public AnfImporter {
|
|||
int AddReturnCNode() override { return RET_ERROR; };
|
||||
int ParseModelConfigureInfo(const onnx::ModelProto &model_proto);
|
||||
int BuildFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto,
|
||||
const schema::QuantType &quantType);
|
||||
const schema::QuantType &quantType);
|
||||
int ImportParametersForGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto);
|
||||
int ImportNodesForGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto,
|
||||
const schema::QuantType &quantType);
|
||||
const schema::QuantType &quantType);
|
||||
int BuildParameterForFuncGraph(const ParameterPtr &node, const onnx::ValueInfoProto &value_proto);
|
||||
CNodePtr BuildCNodeForFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::NodeProto &node_proto,
|
||||
const schema::QuantType &quantType);
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
*/
|
||||
|
||||
#include "tools/common/storage.h"
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
#include "flatbuffers/flatbuffers.h"
|
||||
#include "utils/log_adapter.h"
|
||||
#include "src/common/file_utils.h"
|
||||
|
@ -31,7 +33,9 @@ int Storage::Save(const schema::MetaGraphT &graph, const std::string &outputPath
|
|||
MS_LOG(ERROR) << "GetBufferPointer nullptr";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
||||
if (access((outputPath + ".ms").c_str(), F_OK) == 0) {
|
||||
chmod((outputPath + ".ms").c_str(), S_IWUSR);
|
||||
}
|
||||
std::ofstream output(outputPath + ".ms", std::ofstream::binary);
|
||||
if (!output.is_open()) {
|
||||
MS_LOG(ERROR) << "Can not open output file: " << outputPath << ".ms";
|
||||
|
@ -40,6 +44,7 @@ int Storage::Save(const schema::MetaGraphT &graph, const std::string &outputPath
|
|||
|
||||
output.write((const char *)content, size);
|
||||
output.close();
|
||||
chmod((outputPath + ".ms").c_str(), S_IRUSR);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#include "tools/converter/converter_flags.h"
|
||||
#include "ir/anf.h"
|
||||
#include "tools/converter/quantizer/quantizer.h"
|
||||
#include "tools/converter/return_code.h"
|
||||
#include "tools/converter/converter_context.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
|
|
|
@ -152,6 +152,7 @@ int RunConverter(int argc, const char **argv) {
|
|||
return RET_INPUT_PARAM_INVALID;
|
||||
}
|
||||
}
|
||||
NoSupportOp::GetInstance()->PrintOps();
|
||||
status = ReturnCode::GetSingleReturnCode()->GetReturnCode();
|
||||
if (fb_graph == nullptr) {
|
||||
MS_LOG(ERROR) << "Convert model return nullptr";
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include "tools/anf_importer/anf_importer.h"
|
||||
#include "tools/converter/converter_flags.h"
|
||||
#include "tools/converter/anf_transform.h"
|
||||
#include "tools/converter/return_code.h"
|
||||
#include "tools/converter/converter_context.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
|
|
|
@ -17,13 +17,16 @@
|
|||
#ifndef LITE_RETURN_CODE_H
|
||||
#define LITE_RETURN_CODE_H
|
||||
|
||||
#include <string>
|
||||
#include <set>
|
||||
#include "include/errorcode.h"
|
||||
#include "utils/log_adapter.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
class ReturnCode {
|
||||
public:
|
||||
~ReturnCode() {}
|
||||
~ReturnCode() = default;
|
||||
static ReturnCode *GetSingleReturnCode() {
|
||||
static ReturnCode returnCode;
|
||||
return &returnCode;
|
||||
|
@ -33,15 +36,31 @@ class ReturnCode {
|
|||
statusCode = status;
|
||||
}
|
||||
}
|
||||
STATUS GetReturnCode() const {
|
||||
return statusCode;
|
||||
}
|
||||
STATUS GetReturnCode() const { return statusCode; }
|
||||
|
||||
private:
|
||||
ReturnCode() { statusCode = RET_OK; }
|
||||
int statusCode;
|
||||
};
|
||||
|
||||
class NoSupportOp {
|
||||
public:
|
||||
~NoSupportOp() = default;
|
||||
static NoSupportOp *GetInstance() {
|
||||
static NoSupportOp noSupportOp;
|
||||
return &noSupportOp;
|
||||
}
|
||||
void InsertOp(const std::string &op_name) { noSupportOps.insert(op_name); }
|
||||
void PrintOps() const {
|
||||
for (auto &op_name : noSupportOps) {
|
||||
MS_LOG(ERROR) << "The op " << op_name << " hasn't been supported";
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
NoSupportOp() { noSupportOps.clear(); }
|
||||
std::set<std::string> noSupportOps;
|
||||
};
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // LITE_RETURN_CODE_H
|
||||
|
|
@ -22,7 +22,7 @@
|
|||
#include "schema/inner/model_generated.h"
|
||||
#include "tools/anf_importer/import_from_meta_graphT.h"
|
||||
#include "ir/anf.h"
|
||||
#include "tools/converter/return_code.h"
|
||||
#include "tools/converter/converter_context.h"
|
||||
|
||||
namespace mindspore::lite {
|
||||
using namespace schema;
|
||||
|
@ -40,7 +40,7 @@ class ModelParser {
|
|||
return nullptr;
|
||||
}
|
||||
auto func_graph = this->Fb2Anf(meta_graph);
|
||||
delete(meta_graph);
|
||||
delete (meta_graph);
|
||||
return func_graph;
|
||||
}
|
||||
|
||||
|
|
|
@ -84,6 +84,9 @@ schema::MetaGraphT *CaffeModelParser::ParseToFb(const std::string &modelFile, co
|
|||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "ParseLayer failed " << status;
|
||||
ReturnCode::GetSingleReturnCode()->UpdateReturnCode(status);
|
||||
for (auto &tensor : tensorCache.GetCachedTensor()) {
|
||||
delete tensor;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -179,6 +182,8 @@ STATUS CaffeModelParser::SetGraphTensorIndex(const caffe::NetParameter &proto, T
|
|||
STATUS CaffeModelParser::ParseLayer(const caffe::NetParameter &proto, const caffe::NetParameter &weight,
|
||||
TensorCache *tensorCache, schema::MetaGraphT *subGraphDef,
|
||||
const QuantType &quantType) {
|
||||
static bool interrupt = false;
|
||||
int status = RET_OK;
|
||||
for (int i = 0; i < proto.layer_size(); i++) {
|
||||
auto layer = proto.layer(i);
|
||||
|
||||
|
@ -222,38 +227,46 @@ STATUS CaffeModelParser::ParseLayer(const caffe::NetParameter &proto, const caff
|
|||
}
|
||||
continue;
|
||||
}
|
||||
auto status = SetOpInputIdx(layer, op.get(), tensorCache);
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set Op " << layer.name() << " Input Index Failed!";
|
||||
return status;
|
||||
}
|
||||
|
||||
auto nodeParser = CaffeNodeParserRegistry::GetInstance()->GetNodeParser(layer.type().c_str());
|
||||
if (nodeParser == nullptr) {
|
||||
MS_LOG(ERROR) << "Don't support type " << layer.type() << ". for caffe op " << layer.name();
|
||||
return RET_NULL_PTR;
|
||||
if (nodeParser == nullptr || interrupt) {
|
||||
interrupt = true;
|
||||
if (nodeParser == nullptr) {
|
||||
NoSupportOp::GetInstance()->InsertOp(layer.type());
|
||||
status = (status == RET_OK ? RET_NOT_FIND_OP : status);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
std::vector<schema::TensorT *> weightVec;
|
||||
status = nodeParser->Parse(layer, layerP, op.get(), &weightVec);
|
||||
if (status != RET_OK) {
|
||||
auto status_node = nodeParser->Parse(layer, layerP, op.get(), &weightVec);
|
||||
if (status_node != RET_OK) {
|
||||
interrupt = true;
|
||||
MS_LOG(ERROR) << "Parse weight for " << layer.name() << " Failed!";
|
||||
return status;
|
||||
status = (status == RET_OK ? RET_NOT_FIND_OP : status);
|
||||
continue;
|
||||
}
|
||||
|
||||
status_node = SetOpInputIdx(layer, op.get(), tensorCache);
|
||||
if (status_node != RET_OK) {
|
||||
MS_LOG(ERROR) << "Set Op " << layer.name() << " Input Index Failed!";
|
||||
status = (status == RET_OK ? status_node : status);
|
||||
}
|
||||
SetWeightTensor(weightVec, op.get(), tensorCache);
|
||||
|
||||
status = SetOpOutputIdx(layer, op.get(), tensorCache);
|
||||
if (status != RET_OK) {
|
||||
status_node = SetOpOutputIdx(layer, op.get(), tensorCache);
|
||||
if (status_node != RET_OK) {
|
||||
interrupt = true;
|
||||
MS_LOG(ERROR) << "Set Op " << layer.name() << " Output Index Failed!";
|
||||
return status;
|
||||
status = (status == RET_OK ? RET_NOT_FIND_OP : status);
|
||||
continue;
|
||||
}
|
||||
|
||||
// op->fmkType = FmkType_CAFFE;
|
||||
subGraphDef->nodes.emplace_back(move(op));
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
return status;
|
||||
}
|
||||
|
||||
STATUS CaffeModelParser::GetModelInput(const caffe::NetParameter &proto, TensorCache *tensorCache) {
|
||||
|
|
|
@ -67,6 +67,11 @@ STATUS CaffeReduceParser::Parse(const caffe::LayerParameter &proto,
|
|||
} else {
|
||||
attr->axes = std::vector(1, 0);
|
||||
}
|
||||
if (reduce_param.has_coeff()) {
|
||||
attr->coeff = reduce_param.coeff();
|
||||
} else {
|
||||
attr->coeff = 1.0;
|
||||
}
|
||||
attr->reduceToEnd = true;
|
||||
attr->keepDims = false;
|
||||
op->name = proto.name();
|
||||
|
|
|
@ -249,6 +249,7 @@ STATUS OnnxModelParser::ParseOnnxNodeToDstOp(const onnx::GraphProto &onnx_graph,
|
|||
schema::CNodeT *dst_op, schema::TensorT *dst_tensor,
|
||||
TensorCache *tensor_cache, const QuantType &quantType) {
|
||||
// change op_type() to name(), that is unique
|
||||
static bool interrupt = false;
|
||||
dst_op->name = onnx_node.op_type() + "_" + onnx_node.output(0);
|
||||
dst_op->quantType = quantType;
|
||||
// dst_op->fmkType = FmkType_ONNX;
|
||||
|
@ -256,15 +257,25 @@ STATUS OnnxModelParser::ParseOnnxNodeToDstOp(const onnx::GraphProto &onnx_graph,
|
|||
<< onnx_node.input_size();
|
||||
// get the real op type
|
||||
SetOpQuantParams(onnx_graph, onnx_node, dst_op, dst_tensor, tensor_cache);
|
||||
auto status = ParseOnnxNodeAttr(onnx_graph, onnx_node, onnx_node.op_type(), dst_op);
|
||||
auto node_parser = OnnxNodeParserRegistry::GetInstance()->GetNodeParser(onnx_node.op_type());
|
||||
if (node_parser == nullptr || interrupt) {
|
||||
interrupt = true;
|
||||
if (node_parser == nullptr) {
|
||||
NoSupportOp::GetInstance()->InsertOp(onnx_node.op_type());
|
||||
}
|
||||
return RET_NOT_FIND_OP;
|
||||
}
|
||||
auto status = node_parser->Parse(onnx_graph, onnx_node, dst_op);
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "parser onnx node attr failed";
|
||||
interrupt = true;
|
||||
MS_LOG(ERROR) << "parser onnx node " << onnx_node.op_type() << " attr failed";
|
||||
return status;
|
||||
}
|
||||
// set op input index
|
||||
std::vector<string> node_inputs;
|
||||
(void)node_inputs.insert(node_inputs.begin(), onnx_node.input().begin(), onnx_node.input().end());
|
||||
if (SetOpInputIndex(node_inputs, dst_op, onnx_node, tensor_cache)) {
|
||||
interrupt = true;
|
||||
MS_LOG(ERROR) << "SetOpInputIndex failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
@ -273,6 +284,7 @@ STATUS OnnxModelParser::ParseOnnxNodeToDstOp(const onnx::GraphProto &onnx_graph,
|
|||
(void)node_outputs.insert(node_outputs.begin(), onnx_node.output().begin(), onnx_node.output().end());
|
||||
|
||||
if (SetOpOutputIndex(node_outputs, dst_op, tensor_cache) != RET_OK) {
|
||||
interrupt = true;
|
||||
MS_LOG(ERROR) << "SetOpOutputIndex failed";
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
@ -340,8 +352,7 @@ STATUS OnnxModelParser::ParseOnnxNodeAttr(const onnx::GraphProto &onnx_graph, co
|
|||
const string &onnx_op_type, schema::CNodeT *dst_op) {
|
||||
auto node_parser = OnnxNodeParserRegistry::GetInstance()->GetNodeParser(onnx_op_type);
|
||||
if (node_parser == nullptr) {
|
||||
MS_LOG(ERROR) << "not find " << onnx_op_type << ", node parser is nullptr";
|
||||
return RET_NULL_PTR;
|
||||
return RET_NOT_FIND_OP;
|
||||
}
|
||||
return node_parser->Parse(onnx_graph, onnx_node, dst_op);
|
||||
}
|
||||
|
@ -503,32 +514,42 @@ schema::MetaGraphT *OnnxModelParser::ParseToFb(const std::string &modelFile, con
|
|||
}
|
||||
// init op node input/output tensor, and dst_op attr
|
||||
for (const auto &onnx_node : onnx_graph.node()) {
|
||||
int status_node = RET_OK;
|
||||
if (onnx_node.op_type() == "Constant") {
|
||||
continue;
|
||||
}
|
||||
if (onnx_node.op_type() == "Gemm") {
|
||||
ParseOnnxGemmNode(onnx_graph, onnx_node, dst_graph.get(), &tensor_cache);
|
||||
if (status == RET_OK) {
|
||||
ParseOnnxGemmNode(onnx_graph, onnx_node, dst_graph.get(), &tensor_cache);
|
||||
}
|
||||
continue;
|
||||
} else if (onnx_node.op_type() == "Int8GivenIntTensorFill" || onnx_node.op_type() == "Int8GivenTensorFill") {
|
||||
status = ParseOnnxGivenFillNode(onnx_node, &tensor_cache);
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "ParseOnnxGivenFillNode failed: " << status;
|
||||
ReturnCode::GetSingleReturnCode()->UpdateReturnCode(status);
|
||||
return nullptr;
|
||||
if (status == RET_OK) {
|
||||
status_node = ParseOnnxGivenFillNode(onnx_node, &tensor_cache);
|
||||
if (status_node != RET_OK) {
|
||||
MS_LOG(ERROR) << "ParseOnnxGivenFillNode failed: " << status_node;
|
||||
status = (status == RET_OK ? status_node : status);
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
std::unique_ptr<schema::CNodeT> dst_op = std::make_unique<schema::CNodeT>();
|
||||
std::unique_ptr<schema::TensorT> dst_tensor = std::make_unique<schema::TensorT>();
|
||||
status = ParseOnnxNodeToDstOp(onnx_graph, onnx_node, dst_op.get(), dst_tensor.get(), &tensor_cache, quantType);
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "parse node " << onnx_node.op_type() << " failed";
|
||||
ReturnCode::GetSingleReturnCode()->UpdateReturnCode(status);
|
||||
return nullptr;
|
||||
status_node = ParseOnnxNodeToDstOp(onnx_graph, onnx_node, dst_op.get(), dst_tensor.get(), &tensor_cache, quantType);
|
||||
if (status_node != RET_OK) {
|
||||
status = (status == RET_OK ? status_node : status);
|
||||
continue;
|
||||
}
|
||||
dst_graph->nodes.emplace_back(std::move(dst_op));
|
||||
}
|
||||
if (status != RET_OK) {
|
||||
ReturnCode::GetSingleReturnCode()->UpdateReturnCode(status);
|
||||
for (auto &tensor : tensor_cache.GetCachedTensor()) {
|
||||
delete tensor;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
SetAllTensors(tensor_cache, dst_graph.get());
|
||||
dst_graph->name = GetModelName(modelFile);
|
||||
return dst_graph.release();
|
||||
|
|
|
@ -210,13 +210,13 @@ STATUS TfliteSingleInputOpParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
} else if (std::strcmp(node_name, "Exp") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteExpParser";
|
||||
auto attr = std::make_unique<schema::ExpT>();
|
||||
attr->base = -1; // -1 represent base = e
|
||||
attr->scale = 1;
|
||||
attr->shift = 0;
|
||||
if (attr == nullptr) {
|
||||
MS_LOG(ERROR) << "new op failed";
|
||||
return RET_NULL_PTR;
|
||||
}
|
||||
attr->base = -1; // -1 represent base = e
|
||||
attr->scale = 1;
|
||||
attr->shift = 0;
|
||||
op->primitive->value.type = schema::PrimitiveType_Exp;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "Sqrt") == 0) {
|
||||
|
@ -300,7 +300,7 @@ STATUS TfliteSingleInputOpParser::Parse(const std::unique_ptr<tflite::OperatorT>
|
|||
}
|
||||
op->primitive->value.type = schema::PrimitiveType_Floor;
|
||||
op->primitive->value.value = attr.release();
|
||||
} else if (std::strcmp(node_name, "NEG") == 0) {
|
||||
} else if (std::strcmp(node_name, "Neg") == 0) {
|
||||
MS_LOG(DEBUG) << "parse TfliteNegParser";
|
||||
auto attr = std::make_unique<schema::NegT>();
|
||||
if (attr == nullptr) {
|
||||
|
@ -424,7 +424,7 @@ TfliteNodeRegister g_TfliteLogParser("Log", new TfliteLogParser());
|
|||
TfliteNodeRegister g_tfliteRoundParser("Round", new TfliteRoundParser());
|
||||
TfliteNodeRegister g_TfliteCeilParser("Ceil", new TfliteCeilParser());
|
||||
TfliteNodeRegister g_tfliteFloorParser("flOOR", new TfliteFloorParser());
|
||||
TfliteNodeRegister g_tfliteNegParser("NEG", new TfliteNegParser());
|
||||
TfliteNodeRegister g_tfliteNegParser("Neg", new TfliteNegParser());
|
||||
|
||||
TfliteNodeRegister g_tfliteEqualParser("Equal", new TfliteEqualParser());
|
||||
TfliteNodeRegister g_tfliteNotEqualParser("NotEqual", new TfliteNotEqualParser());
|
||||
|
|
|
@ -98,6 +98,7 @@ STATUS TfliteModelParser::ConvertOp(const std::unique_ptr<tflite::ModelT> &tflit
|
|||
const std::unique_ptr<tflite::SubGraphT> &tflite_subgraph,
|
||||
const QuantType &quant_type, schema::MetaGraphT *sub_graph) {
|
||||
int idx = 0;
|
||||
int status = RET_OK;
|
||||
for (const auto &tflite_op : tflite_subgraph->operators) {
|
||||
auto tflite_op_type = (tflite_model->operator_codes[tflite_op->opcode_index])->builtin_code;
|
||||
auto op_type = GetMSOpType(tflite_op_type);
|
||||
|
@ -114,21 +115,24 @@ STATUS TfliteModelParser::ConvertOp(const std::unique_ptr<tflite::ModelT> &tflit
|
|||
|
||||
auto node_parser = TfliteNodeParserRegistry::GetInstance()->GetNodeParser(op_type);
|
||||
if (node_parser == nullptr) {
|
||||
MS_LOG(ERROR) << "cannot find node parser, opType: " << op_type.c_str();
|
||||
return RET_NOT_FIND_OP;
|
||||
}
|
||||
int status = node_parser->Parse(tflite_op, tflite_subgraph->tensors, tflite_model->buffers, op.get(), &tensorsId,
|
||||
&tensorsFormat, &tensorsIdMap);
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "node " << op_type.c_str() << " parser failed";
|
||||
return status;
|
||||
NoSupportOp::GetInstance()->InsertOp(op_type);
|
||||
status = (status == RET_OK ? RET_NOT_FIND_OP : status);
|
||||
continue;
|
||||
}
|
||||
if (status == RET_OK) {
|
||||
status = node_parser->Parse(tflite_op, tflite_subgraph->tensors, tflite_model->buffers, op.get(), &tensorsId,
|
||||
&tensorsFormat, &tensorsIdMap);
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "node " << op_type.c_str() << " parser failed";
|
||||
continue;
|
||||
}
|
||||
|
||||
sub_graph->nodes.emplace_back(op.release());
|
||||
opMap[sub_graph->nodes.back()->name] = sub_graph->nodes.back().get();
|
||||
tfliteOpMap[tflite_op.get()] = sub_graph->nodes.back().get();
|
||||
sub_graph->nodes.emplace_back(op.release());
|
||||
opMap[sub_graph->nodes.back()->name] = sub_graph->nodes.back().get();
|
||||
tfliteOpMap[tflite_op.get()] = sub_graph->nodes.back().get();
|
||||
}
|
||||
}
|
||||
return RET_OK;
|
||||
return status;
|
||||
}
|
||||
|
||||
STATUS TfliteModelParser::ConvertTensor(const std::unique_ptr<tflite::SubGraphT> &tflite_subgraph,
|
||||
|
@ -162,8 +166,8 @@ STATUS TfliteModelParser::ConvertTensor(const std::unique_ptr<tflite::SubGraphT>
|
|||
if (isConst) {
|
||||
int status = CopyConstTensorData(tflite_model_buffer, tflite_tensor.get(), tensor.get());
|
||||
if (status != RET_OK) {
|
||||
MS_LOG(ERROR) << "obtain const tensor failed";
|
||||
return status;
|
||||
MS_LOG(ERROR) << "obtain const tensor failed";
|
||||
return status;
|
||||
}
|
||||
}
|
||||
// set tensor attr
|
||||
|
|
|
@ -118,6 +118,7 @@ std::map<tflite::BuiltinOperator, std::string> tfMsOpTypeMap{
|
|||
{tflite::BuiltinOperator_UNPACK, "Unstack"},
|
||||
{tflite::BuiltinOperator_CUSTOM, "Custom"},
|
||||
{tflite::BuiltinOperator_MIRROR_PAD, "MirrorPad"},
|
||||
{tflite::BuiltinOperator_NEG, "Neg"},
|
||||
};
|
||||
|
||||
std::map<tflite::ActivationFunctionType, schema::ActivationType> tfMsActivationFunctionMap{
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include "backend/optimizer/common/pattern_engine.h"
|
||||
#include "schema/inner/model_generated.h"
|
||||
#include "src/param_value_lite.h"
|
||||
#include "tools/converter/return_code.h"
|
||||
#include "tools/converter/converter_context.h"
|
||||
|
||||
using PrimitiveCPtr = std::shared_ptr<mindspore::lite::PrimitiveC>;
|
||||
namespace mindspore {
|
||||
|
|
|
@ -124,6 +124,7 @@ int GenConvNewBias(const FuncGraphPtr &func_graph, const CNodePtr &conv_node, co
|
|||
}
|
||||
if (conv_bias_node != nullptr) {
|
||||
if (CheckIfNodeIsParam(conv_bias_node) != lite::RET_OK) {
|
||||
delete[] add_bias_data;
|
||||
return lite::RET_INVALID_OP_ATTR;
|
||||
}
|
||||
auto conv_bias_param = conv_bias_node->cast<ParameterPtr>()->default_param();
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#define MINDSPORE_LITE_SRC_PASS_FUSION_CONV_BIASADD_FUSION_H_
|
||||
|
||||
#include "backend/optimizer/common/optimizer.h"
|
||||
#include "tools/converter/return_code.h"
|
||||
#include "tools/converter/converter_context.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace opt {
|
||||
|
|
Loading…
Reference in New Issue