codegen reconstruction

This commit is contained in:
zhangyanhui 2023-03-02 20:21:45 +08:00
parent d53f3b508e
commit a6f2ec187f
6 changed files with 150 additions and 96 deletions

View File

@ -107,7 +107,9 @@ int SplitLineToMap(std::ifstream *ifs, std::map<std::string, std::map<std::strin
return RET_ERROR;
}
if (section == "model_param") {
if (model_param_infos != nullptr) {
(*model_param_infos)[model_index][key] = value;
}
} else {
(*maps)[section][key] = value;
}

View File

@ -479,7 +479,7 @@ int ConverterImpl::InitConfigParam(const std::shared_ptr<ConverterPara> &param,
auto ret = RET_OK;
auto parse_map_ret = RET_OK;
if (!param->config_file.empty()) {
ret = config_parser.ParseConfigFile(param->config_file, model_param_infos);
ret = config_parser.ParseConfigFile(param->config_file, nullptr);
parse_map_ret = mindspore::lite::ParseConfigFile(param->config_file, &maps, model_param_infos);
} else {
ret = config_parser.ParseConfigParam(&param->config_param);

View File

@ -53,6 +53,14 @@ int main(int argc, const char **argv) {
int multi_model_argc = 2;
if (argc <= multi_model_argc) {
if (flags.configFile.empty()) {
MS_LOG(ERROR) << "Flag missing. When in single model scenario, fmk/modelFile/outputFile flags are necessary. "
"When in multi model scenario, configFile flag is necessary.";
std::cout << "Flag missing. When in single model scenario, fmk/modelFile/outputFile flags are necessary. When "
"in multi model scenario, configFile flag is necessary."
<< std::endl;
return mindspore::kLiteParamInvalid;
}
mindspore::Converter converter;
converter.SetConfigFile(flags.configFile);
auto status = converter.Convert();

View File

@ -106,11 +106,25 @@ typedef void (*FreeResource)();
void CodeMSModelCalcWorkspaceSize(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx,
const Configurator &config) {
if (config.target() == kCortex_M) {
ofs << "size_t MSModelCalcWorkspaceSize(MSModelHandle model) {\n";
ofs << " size_t shape_size=0;\n"
<< " if (model == NULL) {\n"
ofs << "size_t MSModelCalcWorkspaceSize(MSModelHandle model) {\n"
<< " MicroModel *micro_model = (MicroModel *)model;\n"
<< " if (micro_model == NULL) {\n"
<< " return 0;\n"
<< " }\n"
<< " if (micro_model->calc_work_space == NULL) {\n"
<< " return 0;\n"
<< " }\n"
<< " return micro_model->calc_work_space(model);\n"
<< "}\n";
} else {
ofs << "size_t MSModelCalcWorkspaceSize(MSModelHandle model) {\n return 0;\n}\n";
}
ofs << "\n";
}
void CodeCortexCalcWorkspaceSize(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx) {
ofs << "size_t MSModelCalcWorkspaceSize" << ctx->GetCurModelIndex() << "(MSModelHandle model) {\n"
<< "size_t shape_size = 0;\n";
std::vector<Tensor *> inputs = ctx->graph_inputs();
for (size_t i = 0; i < inputs.size(); ++i) {
ofs << " shape_size += " << inputs[i]->shape().size() << " * sizeof(int64_t);\n";
@ -123,16 +137,26 @@ void CodeMSModelCalcWorkspaceSize(std::ofstream &ofs, const std::unique_ptr<Code
<< "(),4) + UP_ROUND(WEIGHT_BUF_SIZE,4) + shape_size + "
<< "(UP_ROUND(sizeof(MicroTensor),4) + UP_ROUND(sizeof(MicroTensor *),4)) * "
<< (ctx->graph_inputs().size() + ctx->graph_outputs().size()) << ";\n}\n";
} else {
ofs << "size_t MSModelCalcWorkspaceSize(MSModelHandle model) {\n return 0;\n}\n";
}
ofs << "\n";
}
void CodeMSModelSetWorkspace(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx, const Configurator &config) {
ofs << "void MSModelSetWorkspace(MSModelHandle model, void *workspace, size_t workspace_size) {";
if (config.target() == kCortex_M) {
ofs << "\n";
ofs << " MicroModel *micro_model = (MicroModel *)model;\n"
<< " if (micro_model == NULL) {\n"
<< " return;\n"
<< " }\n"
<< " if (micro_model->set_work_space == NULL) {\n"
<< " return;\n"
<< " }\n"
<< " micro_model->set_work_space(model, workspace, workspace_size);\n";
}
ofs << "}\n\n";
}
void CodeCortexSetWorkspace(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx) {
ofs << "void MSModelSetWorkspace" << ctx->GetCurModelIndex()
<< "(MSModelHandle model, void *workspace, size_t workspace_size) {\n";
ofs << cortex_set_workspace;
ofs << " micro_model->runtime_buffer = workspace;\n"
" int buffer_size = GetBufferSize"
@ -206,8 +230,7 @@ void CodeMSModelSetWorkspace(std::ofstream &ofs, const std::unique_ptr<CoderCont
for (size_t i = 0; i < outputs.size(); ++i) {
array_tostring(outputs[i], "output", i);
}
}
ofs << "}\n\n";
ofs << "}\n";
}
void CodeMSTensorHandleArrayDestroyState(std::ofstream &ofs, const Configurator &config) {
@ -297,12 +320,21 @@ void CodeMSModelBuildState(std::ofstream &ofs) { ofs << micro_model_build_state;
void CodeMSModelBuildCommon(std::ofstream &ofs, const Configurator &config) {
ofs << micro_model_build_implement;
ofs << R"RAW(
MicroModel *micro_model = (MicroModel *)model;
if (micro_model == NULL) {
return kMSStatusLiteNullptr;
}
if (micro_model->build == NULL) {
return kMSStatusLiteNullptr;
}
)RAW";
if (config.target() != kCortex_M) {
ofs << " IncRefCount();\n";
}
ofs << R"RAW(
MSStatus ret =
((MicroModel *)model)->build(model, model_data, data_size, model_context);
micro_model->build(model, model_data, data_size, model_context);
if (ret != kMSStatusSuccess) {
MSModelDestroy(model);
}
@ -387,6 +419,9 @@ MSStatus MSModelPredict(MSModelHandle model, const MSTensorHandleArray inputs, M
if (micro_model == NULL) {
return kMSStatusLiteNullptr;
}
if (micro_model->predict == NULL) {
return kMSStatusLiteNullptr;
}
return micro_model->predict(model, inputs, outputs, before, after);
}

View File

@ -29,7 +29,9 @@
namespace mindspore::lite::micro {
void CodeMSModelCalcWorkspaceSize(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx,
const Configurator &config);
void CodeCortexCalcWorkspaceSize(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx);
void CodeMSModelSetWorkspace(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx, const Configurator &config);
void CodeCortexSetWorkspace(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx);
void CodeMSTensorHandleArrayDestroyState(std::ofstream &ofs, const Configurator &config);
void CodeMSModelCreateDefault(std::ofstream &ofs);
void CodeMSModelCreate(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx, const Configurator &config);

View File

@ -47,6 +47,8 @@ typedef struct {
MSTensorHandleArray outputs;
ModelBuild build;
ModelPredict predict;
ModelSetWorkspace set_work_space;
ModelCalcWorkspaceSize calc_work_space;
FreeResource free_resource;
)RAW";
@ -302,15 +304,9 @@ int Generator::CodeCommonModelFile() {
CodeMSModelBuildState(hofs);
CodeMSModelPredictState(hofs);
CodeFreeResourceState(hofs);
if (config_->target() == kCortex_M) {
hofs << set_workspace_state;
hofs << calc_workspace_state;
}
hofs << micro_model_define_source;
if (config_->target() == kCortex_M) {
hofs << " ModelSetWorkspace set_work_space;\n"
<< " ModelCalcWorkspaceSize calc_work_space;\n";
}
hofs << "} MicroModel;\n";
hofs << "#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_MODEL_H_\n\n";
@ -332,9 +328,9 @@ int Generator::CodeCommonModelFile() {
}
if (config_->target() != kCortex_M) {
cofs << "#include \"src/allocator.h\"\n";
}
CodeMSModelCalcWorkspaceSize(cofs, ctx_, *config_);
CodeMSModelSetWorkspace(cofs, ctx_, *config_);
}
CodeMSModelCreateDefault(cofs);
CodeMSModelBuildCommon(cofs, *config_);
cofs << model_runtime_other_source;
@ -396,20 +392,31 @@ int Generator::CodeMSModelImplement() {
<< " MSTensorHandleArray *output,\n"
<< " const MSKernelCallBackC before,\n"
<< " const MSKernelCallBackC after);\n";
ofs << "void MSModelSetWorkspace" << ctx_->GetCurModelIndex()
<< "(MSModelHandle model, void *workspace, size_t workspace_size);\n";
ofs << "size_t MSModelCalcWorkspaceSize" << ctx_->GetCurModelIndex() << "(MSModelHandle model);\n";
ofs << "static MicroModel gModel" << ctx_->GetCurModelIndex() << " = {.runtime_buffer = NULL,\n"
<< " .train_mode = false,\n"
<< " .inputs = {" << ctx_->graph_inputs().size() << ", NULL},\n"
<< " .outputs = {" << ctx_->graph_outputs().size() << ", NULL},\n"
<< " .build = MSModelBuild" << ctx_->GetCurModelIndex() << ",\n"
<< " .predict = MSModelPredict" << ctx_->GetCurModelIndex() << ",\n"
<< " .free_resource = FreeResource" << ctx_->GetCurModelIndex() << "};\n";
<< " .predict = MSModelPredict" << ctx_->GetCurModelIndex() << ",\n";
if (config_->target() == kCortex_M) {
ofs << " .set_work_space = MSModelSetWorkspace" << ctx_->GetCurModelIndex() << ",\n"
<< " .calc_work_space = MSModelCalcWorkspaceSize" << ctx_->GetCurModelIndex()
<< ",\n";
} else {
ofs << " .set_work_space = NULL,\n"
<< " .calc_work_space = NULL,\n";
}
ofs << " .free_resource = FreeResource" << ctx_->GetCurModelIndex() << "};\n";
ofs << "MSModelHandle model" << ctx_->GetCurModelIndex() << " = &gModel" << ctx_->GetCurModelIndex() << ";\n\n";
CodeMSModelCreate(ofs, ctx_, *config_);
CodeMSModelBuild(ofs, ctx_->GetCurModelIndex(), *config_);
if (config_->target() == kCortex_M) {
CodeMSModelCalcWorkspaceSize(ofs, ctx_, *config_);
CodeMSModelSetWorkspace(ofs, ctx_, *config_);
CodeCortexCalcWorkspaceSize(ofs, ctx_);
CodeCortexSetWorkspace(ofs, ctx_);
}
if (config_->code_mode() == CodeMode::Train) {
CodeMSModelRunStep(ofs, ctx_);
@ -490,7 +497,7 @@ void Generator::CodeCommonNetC(std::ofstream &ofs) {
ofs << "#include \"" << kThreadWrapper << "\"\n\n";
}
if (config_->debug_mode()) {
ofs << "#include \"" << kDebugUtils << "\"\n";
ofs << "#include \"src/" << kDebugUtils << "\"\n";
}
CodeGlobalCodeBlocks(ofs, ctx_);
CodeInputImplement(ofs, ctx_);