codegen reconstruction

This commit is contained in:
zhangyanhui 2023-03-02 20:21:45 +08:00
parent d53f3b508e
commit a6f2ec187f
6 changed files with 150 additions and 96 deletions

View File

@ -107,7 +107,9 @@ int SplitLineToMap(std::ifstream *ifs, std::map<std::string, std::map<std::strin
return RET_ERROR; return RET_ERROR;
} }
if (section == "model_param") { if (section == "model_param") {
(*model_param_infos)[model_index][key] = value; if (model_param_infos != nullptr) {
(*model_param_infos)[model_index][key] = value;
}
} else { } else {
(*maps)[section][key] = value; (*maps)[section][key] = value;
} }

View File

@ -479,7 +479,7 @@ int ConverterImpl::InitConfigParam(const std::shared_ptr<ConverterPara> &param,
auto ret = RET_OK; auto ret = RET_OK;
auto parse_map_ret = RET_OK; auto parse_map_ret = RET_OK;
if (!param->config_file.empty()) { if (!param->config_file.empty()) {
ret = config_parser.ParseConfigFile(param->config_file, model_param_infos); ret = config_parser.ParseConfigFile(param->config_file, nullptr);
parse_map_ret = mindspore::lite::ParseConfigFile(param->config_file, &maps, model_param_infos); parse_map_ret = mindspore::lite::ParseConfigFile(param->config_file, &maps, model_param_infos);
} else { } else {
ret = config_parser.ParseConfigParam(&param->config_param); ret = config_parser.ParseConfigParam(&param->config_param);

View File

@ -53,6 +53,14 @@ int main(int argc, const char **argv) {
int multi_model_argc = 2; int multi_model_argc = 2;
if (argc <= multi_model_argc) { if (argc <= multi_model_argc) {
if (flags.configFile.empty()) {
MS_LOG(ERROR) << "Flag missing. When in single model scenario, fmk/modelFile/outputFile flags are necessary. "
"When in multi model scenario, configFile flag is necessary.";
std::cout << "Flag missing. When in single model scenario, fmk/modelFile/outputFile flags are necessary. When "
"in multi model scenario, configFile flag is necessary."
<< std::endl;
return mindspore::kLiteParamInvalid;
}
mindspore::Converter converter; mindspore::Converter converter;
converter.SetConfigFile(flags.configFile); converter.SetConfigFile(flags.configFile);
auto status = converter.Convert(); auto status = converter.Convert();

View File

@ -106,45 +106,69 @@ typedef void (*FreeResource)();
void CodeMSModelCalcWorkspaceSize(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx, void CodeMSModelCalcWorkspaceSize(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx,
const Configurator &config) { const Configurator &config) {
if (config.target() == kCortex_M) { if (config.target() == kCortex_M) {
ofs << "size_t MSModelCalcWorkspaceSize(MSModelHandle model) {\n"; ofs << "size_t MSModelCalcWorkspaceSize(MSModelHandle model) {\n"
ofs << " size_t shape_size=0;\n" << " MicroModel *micro_model = (MicroModel *)model;\n"
<< " if (model == NULL) {\n" << " if (micro_model == NULL) {\n"
<< " return 0;\n" << " return 0;\n"
<< " }\n"; << " }\n"
std::vector<Tensor *> inputs = ctx->graph_inputs(); << " if (micro_model->calc_work_space == NULL) {\n"
for (size_t i = 0; i < inputs.size(); ++i) { << " return 0;\n"
ofs << " shape_size += " << inputs[i]->shape().size() << " * sizeof(int64_t);\n"; << " }\n"
} << " return micro_model->calc_work_space(model);\n"
std::vector<Tensor *> outputs = ctx->graph_outputs(); << "}\n";
for (size_t i = 0; i < outputs.size(); ++i) {
ofs << " shape_size += " << outputs[i]->shape().size() << " * sizeof(int64_t);\n";
}
ofs << " return UP_ROUND(GetBufferSize" << ctx->GetCurModelIndex()
<< "(),4) + UP_ROUND(WEIGHT_BUF_SIZE,4) + shape_size + "
<< "(UP_ROUND(sizeof(MicroTensor),4) + UP_ROUND(sizeof(MicroTensor *),4)) * "
<< (ctx->graph_inputs().size() + ctx->graph_outputs().size()) << ";\n}\n";
} else { } else {
ofs << "size_t MSModelCalcWorkspaceSize(MSModelHandle model) {\n return 0;\n}\n"; ofs << "size_t MSModelCalcWorkspaceSize(MSModelHandle model) {\n return 0;\n}\n";
} }
ofs << "\n"; ofs << "\n";
} }
void CodeCortexCalcWorkspaceSize(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx) {
ofs << "size_t MSModelCalcWorkspaceSize" << ctx->GetCurModelIndex() << "(MSModelHandle model) {\n"
<< "size_t shape_size = 0;\n";
std::vector<Tensor *> inputs = ctx->graph_inputs();
for (size_t i = 0; i < inputs.size(); ++i) {
ofs << " shape_size += " << inputs[i]->shape().size() << " * sizeof(int64_t);\n";
}
std::vector<Tensor *> outputs = ctx->graph_outputs();
for (size_t i = 0; i < outputs.size(); ++i) {
ofs << " shape_size += " << outputs[i]->shape().size() << " * sizeof(int64_t);\n";
}
ofs << " return UP_ROUND(GetBufferSize" << ctx->GetCurModelIndex()
<< "(),4) + UP_ROUND(WEIGHT_BUF_SIZE,4) + shape_size + "
<< "(UP_ROUND(sizeof(MicroTensor),4) + UP_ROUND(sizeof(MicroTensor *),4)) * "
<< (ctx->graph_inputs().size() + ctx->graph_outputs().size()) << ";\n}\n";
}
void CodeMSModelSetWorkspace(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx, const Configurator &config) { void CodeMSModelSetWorkspace(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx, const Configurator &config) {
ofs << "void MSModelSetWorkspace(MSModelHandle model, void *workspace, size_t workspace_size) {"; ofs << "void MSModelSetWorkspace(MSModelHandle model, void *workspace, size_t workspace_size) {";
if (config.target() == kCortex_M) { if (config.target() == kCortex_M) {
ofs << "\n"; ofs << " MicroModel *micro_model = (MicroModel *)model;\n"
ofs << cortex_set_workspace; << " if (micro_model == NULL) {\n"
ofs << " micro_model->runtime_buffer = workspace;\n" << " return;\n"
" int buffer_size = GetBufferSize" << " }\n"
<< ctx->GetCurModelIndex() << " if (micro_model->set_work_space == NULL) {\n"
<< "();\n" << " return;\n"
" char* buf = workspace;\n" << " }\n"
" SetBuffer" << " micro_model->set_work_space(model, workspace, workspace_size);\n";
<< ctx->GetCurModelIndex() }
<< "(buf);\n" ofs << "}\n\n";
" buffer_size = UP_ROUND(buffer_size, 4);\n"; }
ofs << " " << ctx->weight_name() << " = (uint8_t *)&buf[buffer_size];\n";
ofs << R"RAW( void CodeCortexSetWorkspace(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx) {
ofs << "void MSModelSetWorkspace" << ctx->GetCurModelIndex()
<< "(MSModelHandle model, void *workspace, size_t workspace_size) {\n";
ofs << cortex_set_workspace;
ofs << " micro_model->runtime_buffer = workspace;\n"
" int buffer_size = GetBufferSize"
<< ctx->GetCurModelIndex()
<< "();\n"
" char* buf = workspace;\n"
" SetBuffer"
<< ctx->GetCurModelIndex()
<< "(buf);\n"
" buffer_size = UP_ROUND(buffer_size, 4);\n";
ofs << " " << ctx->weight_name() << " = (uint8_t *)&buf[buffer_size];\n";
ofs << R"RAW(
buffer_size += WEIGHT_BUF_SIZE; buffer_size += WEIGHT_BUF_SIZE;
buffer_size = UP_ROUND(buffer_size,4); buffer_size = UP_ROUND(buffer_size,4);
@ -158,56 +182,55 @@ void CodeMSModelSetWorkspace(std::ofstream &ofs, const std::unique_ptr<CoderCont
buffer_size = UP_ROUND(buffer_size,4); buffer_size = UP_ROUND(buffer_size,4);
MicroTensor **output_tensors = (MicroTensor **)micro_model->outputs.handle_list; MicroTensor **output_tensors = (MicroTensor **)micro_model->outputs.handle_list;
)RAW"; )RAW";
ofs << " int i;\n" ofs << " int i;\n"
<< " for (i = 0; i < GRAPH_INPUTS_SIZE; i++) {\n"; << " for (i = 0; i < GRAPH_INPUTS_SIZE; i++) {\n";
std::vector<Tensor *> inputs = ctx->graph_inputs(); std::vector<Tensor *> inputs = ctx->graph_inputs();
for (size_t i = 0; i < inputs.size(); ++i) { for (size_t i = 0; i < inputs.size(); ++i) {
ofs << " input_tensors[i] = (MicroTensor *)&buf[buffer_size];\n" ofs << " input_tensors[i] = (MicroTensor *)&buf[buffer_size];\n"
<< " buffer_size += sizeof(MicroTensor);\n" << " buffer_size += sizeof(MicroTensor);\n"
<< " buffer_size = UP_ROUND(buffer_size,4);\n"; << " buffer_size = UP_ROUND(buffer_size,4);\n";
ofs << " input_tensors[i]->shape = (int64_t *)&buf[buffer_size];\n" ofs << " input_tensors[i]->shape = (int64_t *)&buf[buffer_size];\n"
<< " buffer_size += " << inputs[i]->shape().size() * sizeof(int64_t) << ";\n" << " buffer_size += " << inputs[i]->shape().size() * sizeof(int64_t) << ";\n"
<< " buffer_size = UP_ROUND(buffer_size,4);\n"; << " buffer_size = UP_ROUND(buffer_size,4);\n";
}
ofs << " }\n";
ofs << " for (i = 0; i < GRAPH_OUTPUTS_SIZE; i++) {\n";
std::vector<Tensor *> outputs = ctx->graph_outputs();
for (size_t i = 0; i < outputs.size(); ++i) {
ofs << " output_tensors[i] = (MicroTensor *)&buf[buffer_size];\n"
<< " buffer_size += sizeof(MicroTensor);\n"
<< " buffer_size = UP_ROUND(buffer_size,4);\n";
ofs << " output_tensors[i]->shape = (int64_t *)&buf[buffer_size];\n"
<< " buffer_size += " << outputs[i]->shape().size() * sizeof(int64_t) << ";\n"
<< " buffer_size = UP_ROUND(buffer_size,4);\n";
}
ofs << " }\n";
ofs << " if (buffer_size > workspace_size) {\n"
<< " micro_model->runtime_buffer = NULL;\n"
<< " SetBuffer" << ctx->GetCurModelIndex() << "(NULL);\n"
<< " return;\n"
<< " }\n";
auto array_tostring = [&ofs](Tensor *tensor, const std::string &prefix, size_t index) {
ofs << kAlignedString << prefix << "_tensors[" << index << "]->type = " << EnumNameMSDataType(tensor->data_type())
<< ";\n";
ofs << kAlignedString << prefix << "_tensors[" << index << "]->format = kMSFormatNHWC;\n";
ofs << kAlignedString << prefix << "_tensors[" << index << "]->ndim = " << tensor->shape().size() << ";\n";
size_t shape_size = tensor->shape().size();
for (size_t i = 0; i < shape_size; i++) {
ofs << kAlignedString << prefix << "_tensors[" << index << "]->shape[" << i << "]= " << tensor->shape()[i]
<< ";\n";
}
ofs << kAlignedString << prefix << "_tensors[" << index << "]->name = \"" << tensor->tensor_name() << "\";\n";
ofs << kAlignedString << prefix << "_tensors[" << index << "]->data = NULL;\n";
};
for (size_t i = 0; i < inputs.size(); ++i) {
array_tostring(inputs[i], "input", i);
}
for (size_t i = 0; i < outputs.size(); ++i) {
array_tostring(outputs[i], "output", i);
}
} }
ofs << "}\n\n"; ofs << " }\n";
ofs << " for (i = 0; i < GRAPH_OUTPUTS_SIZE; i++) {\n";
std::vector<Tensor *> outputs = ctx->graph_outputs();
for (size_t i = 0; i < outputs.size(); ++i) {
ofs << " output_tensors[i] = (MicroTensor *)&buf[buffer_size];\n"
<< " buffer_size += sizeof(MicroTensor);\n"
<< " buffer_size = UP_ROUND(buffer_size,4);\n";
ofs << " output_tensors[i]->shape = (int64_t *)&buf[buffer_size];\n"
<< " buffer_size += " << outputs[i]->shape().size() * sizeof(int64_t) << ";\n"
<< " buffer_size = UP_ROUND(buffer_size,4);\n";
}
ofs << " }\n";
ofs << " if (buffer_size > workspace_size) {\n"
<< " micro_model->runtime_buffer = NULL;\n"
<< " SetBuffer" << ctx->GetCurModelIndex() << "(NULL);\n"
<< " return;\n"
<< " }\n";
auto array_tostring = [&ofs](Tensor *tensor, const std::string &prefix, size_t index) {
ofs << kAlignedString << prefix << "_tensors[" << index << "]->type = " << EnumNameMSDataType(tensor->data_type())
<< ";\n";
ofs << kAlignedString << prefix << "_tensors[" << index << "]->format = kMSFormatNHWC;\n";
ofs << kAlignedString << prefix << "_tensors[" << index << "]->ndim = " << tensor->shape().size() << ";\n";
size_t shape_size = tensor->shape().size();
for (size_t i = 0; i < shape_size; i++) {
ofs << kAlignedString << prefix << "_tensors[" << index << "]->shape[" << i << "]= " << tensor->shape()[i]
<< ";\n";
}
ofs << kAlignedString << prefix << "_tensors[" << index << "]->name = \"" << tensor->tensor_name() << "\";\n";
ofs << kAlignedString << prefix << "_tensors[" << index << "]->data = NULL;\n";
};
for (size_t i = 0; i < inputs.size(); ++i) {
array_tostring(inputs[i], "input", i);
}
for (size_t i = 0; i < outputs.size(); ++i) {
array_tostring(outputs[i], "output", i);
}
ofs << "}\n";
} }
void CodeMSTensorHandleArrayDestroyState(std::ofstream &ofs, const Configurator &config) { void CodeMSTensorHandleArrayDestroyState(std::ofstream &ofs, const Configurator &config) {
@ -297,12 +320,21 @@ void CodeMSModelBuildState(std::ofstream &ofs) { ofs << micro_model_build_state;
void CodeMSModelBuildCommon(std::ofstream &ofs, const Configurator &config) { void CodeMSModelBuildCommon(std::ofstream &ofs, const Configurator &config) {
ofs << micro_model_build_implement; ofs << micro_model_build_implement;
ofs << R"RAW(
MicroModel *micro_model = (MicroModel *)model;
if (micro_model == NULL) {
return kMSStatusLiteNullptr;
}
if (micro_model->build == NULL) {
return kMSStatusLiteNullptr;
}
)RAW";
if (config.target() != kCortex_M) { if (config.target() != kCortex_M) {
ofs << " IncRefCount();\n"; ofs << " IncRefCount();\n";
} }
ofs << R"RAW( ofs << R"RAW(
MSStatus ret = MSStatus ret =
((MicroModel *)model)->build(model, model_data, data_size, model_context); micro_model->build(model, model_data, data_size, model_context);
if (ret != kMSStatusSuccess) { if (ret != kMSStatusSuccess) {
MSModelDestroy(model); MSModelDestroy(model);
} }
@ -387,6 +419,9 @@ MSStatus MSModelPredict(MSModelHandle model, const MSTensorHandleArray inputs, M
if (micro_model == NULL) { if (micro_model == NULL) {
return kMSStatusLiteNullptr; return kMSStatusLiteNullptr;
} }
if (micro_model->predict == NULL) {
return kMSStatusLiteNullptr;
}
return micro_model->predict(model, inputs, outputs, before, after); return micro_model->predict(model, inputs, outputs, before, after);
} }

View File

@ -29,7 +29,9 @@
namespace mindspore::lite::micro { namespace mindspore::lite::micro {
void CodeMSModelCalcWorkspaceSize(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx, void CodeMSModelCalcWorkspaceSize(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx,
const Configurator &config); const Configurator &config);
void CodeCortexCalcWorkspaceSize(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx);
void CodeMSModelSetWorkspace(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx, const Configurator &config); void CodeMSModelSetWorkspace(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx, const Configurator &config);
void CodeCortexSetWorkspace(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx);
void CodeMSTensorHandleArrayDestroyState(std::ofstream &ofs, const Configurator &config); void CodeMSTensorHandleArrayDestroyState(std::ofstream &ofs, const Configurator &config);
void CodeMSModelCreateDefault(std::ofstream &ofs); void CodeMSModelCreateDefault(std::ofstream &ofs);
void CodeMSModelCreate(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx, const Configurator &config); void CodeMSModelCreate(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx, const Configurator &config);

View File

@ -47,6 +47,8 @@ typedef struct {
MSTensorHandleArray outputs; MSTensorHandleArray outputs;
ModelBuild build; ModelBuild build;
ModelPredict predict; ModelPredict predict;
ModelSetWorkspace set_work_space;
ModelCalcWorkspaceSize calc_work_space;
FreeResource free_resource; FreeResource free_resource;
)RAW"; )RAW";
@ -302,15 +304,9 @@ int Generator::CodeCommonModelFile() {
CodeMSModelBuildState(hofs); CodeMSModelBuildState(hofs);
CodeMSModelPredictState(hofs); CodeMSModelPredictState(hofs);
CodeFreeResourceState(hofs); CodeFreeResourceState(hofs);
if (config_->target() == kCortex_M) { hofs << set_workspace_state;
hofs << set_workspace_state; hofs << calc_workspace_state;
hofs << calc_workspace_state;
}
hofs << micro_model_define_source; hofs << micro_model_define_source;
if (config_->target() == kCortex_M) {
hofs << " ModelSetWorkspace set_work_space;\n"
<< " ModelCalcWorkspaceSize calc_work_space;\n";
}
hofs << "} MicroModel;\n"; hofs << "} MicroModel;\n";
hofs << "#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_MODEL_H_\n\n"; hofs << "#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_MODEL_H_\n\n";
@ -332,9 +328,9 @@ int Generator::CodeCommonModelFile() {
} }
if (config_->target() != kCortex_M) { if (config_->target() != kCortex_M) {
cofs << "#include \"src/allocator.h\"\n"; cofs << "#include \"src/allocator.h\"\n";
CodeMSModelCalcWorkspaceSize(cofs, ctx_, *config_);
CodeMSModelSetWorkspace(cofs, ctx_, *config_);
} }
CodeMSModelCalcWorkspaceSize(cofs, ctx_, *config_);
CodeMSModelSetWorkspace(cofs, ctx_, *config_);
CodeMSModelCreateDefault(cofs); CodeMSModelCreateDefault(cofs);
CodeMSModelBuildCommon(cofs, *config_); CodeMSModelBuildCommon(cofs, *config_);
cofs << model_runtime_other_source; cofs << model_runtime_other_source;
@ -396,20 +392,31 @@ int Generator::CodeMSModelImplement() {
<< " MSTensorHandleArray *output,\n" << " MSTensorHandleArray *output,\n"
<< " const MSKernelCallBackC before,\n" << " const MSKernelCallBackC before,\n"
<< " const MSKernelCallBackC after);\n"; << " const MSKernelCallBackC after);\n";
ofs << "void MSModelSetWorkspace" << ctx_->GetCurModelIndex()
<< "(MSModelHandle model, void *workspace, size_t workspace_size);\n";
ofs << "size_t MSModelCalcWorkspaceSize" << ctx_->GetCurModelIndex() << "(MSModelHandle model);\n";
ofs << "static MicroModel gModel" << ctx_->GetCurModelIndex() << " = {.runtime_buffer = NULL,\n" ofs << "static MicroModel gModel" << ctx_->GetCurModelIndex() << " = {.runtime_buffer = NULL,\n"
<< " .train_mode = false,\n" << " .train_mode = false,\n"
<< " .inputs = {" << ctx_->graph_inputs().size() << ", NULL},\n" << " .inputs = {" << ctx_->graph_inputs().size() << ", NULL},\n"
<< " .outputs = {" << ctx_->graph_outputs().size() << ", NULL},\n" << " .outputs = {" << ctx_->graph_outputs().size() << ", NULL},\n"
<< " .build = MSModelBuild" << ctx_->GetCurModelIndex() << ",\n" << " .build = MSModelBuild" << ctx_->GetCurModelIndex() << ",\n"
<< " .predict = MSModelPredict" << ctx_->GetCurModelIndex() << ",\n" << " .predict = MSModelPredict" << ctx_->GetCurModelIndex() << ",\n";
<< " .free_resource = FreeResource" << ctx_->GetCurModelIndex() << "};\n"; if (config_->target() == kCortex_M) {
ofs << " .set_work_space = MSModelSetWorkspace" << ctx_->GetCurModelIndex() << ",\n"
<< " .calc_work_space = MSModelCalcWorkspaceSize" << ctx_->GetCurModelIndex()
<< ",\n";
} else {
ofs << " .set_work_space = NULL,\n"
<< " .calc_work_space = NULL,\n";
}
ofs << " .free_resource = FreeResource" << ctx_->GetCurModelIndex() << "};\n";
ofs << "MSModelHandle model" << ctx_->GetCurModelIndex() << " = &gModel" << ctx_->GetCurModelIndex() << ";\n\n"; ofs << "MSModelHandle model" << ctx_->GetCurModelIndex() << " = &gModel" << ctx_->GetCurModelIndex() << ";\n\n";
CodeMSModelCreate(ofs, ctx_, *config_); CodeMSModelCreate(ofs, ctx_, *config_);
CodeMSModelBuild(ofs, ctx_->GetCurModelIndex(), *config_); CodeMSModelBuild(ofs, ctx_->GetCurModelIndex(), *config_);
if (config_->target() == kCortex_M) { if (config_->target() == kCortex_M) {
CodeMSModelCalcWorkspaceSize(ofs, ctx_, *config_); CodeCortexCalcWorkspaceSize(ofs, ctx_);
CodeMSModelSetWorkspace(ofs, ctx_, *config_); CodeCortexSetWorkspace(ofs, ctx_);
} }
if (config_->code_mode() == CodeMode::Train) { if (config_->code_mode() == CodeMode::Train) {
CodeMSModelRunStep(ofs, ctx_); CodeMSModelRunStep(ofs, ctx_);
@ -490,7 +497,7 @@ void Generator::CodeCommonNetC(std::ofstream &ofs) {
ofs << "#include \"" << kThreadWrapper << "\"\n\n"; ofs << "#include \"" << kThreadWrapper << "\"\n\n";
} }
if (config_->debug_mode()) { if (config_->debug_mode()) {
ofs << "#include \"" << kDebugUtils << "\"\n"; ofs << "#include \"src/" << kDebugUtils << "\"\n";
} }
CodeGlobalCodeBlocks(ofs, ctx_); CodeGlobalCodeBlocks(ofs, ctx_);
CodeInputImplement(ofs, ctx_); CodeInputImplement(ofs, ctx_);