diff --git a/include/c_api/model_c.h b/include/c_api/model_c.h index 938d2acb7f3..dac35b2f83a 100644 --- a/include/c_api/model_c.h +++ b/include/c_api/model_c.h @@ -62,6 +62,11 @@ MS_API void MSModelDestroy(MSModelHandle *model); /// \param[in] workspace_size Define the workspace size. MS_API void MSModelSetWorkspace(MSModelHandle model, void *workspace, size_t workspace_size); +/// \brief Calculate the workspace size required for model inference. Only valid for Iot. +/// +/// \param[in] model Model object handle. +MS_API size_t MSModelCalcWorkspaceSize(MSModelHandle model); + /// \brief Build the model from model file buffer so that it can run on a device. /// /// \param[in] model Model object handle. diff --git a/mindspore/lite/CMakeLists.txt b/mindspore/lite/CMakeLists.txt index 95e915a6e48..c36e7a50302 100644 --- a/mindspore/lite/CMakeLists.txt +++ b/mindspore/lite/CMakeLists.txt @@ -791,6 +791,7 @@ if(MSLITE_GPU_BACKEND STREQUAL opencl) endif() file(GLOB FBS_FILES ${CMAKE_CURRENT_SOURCE_DIR}/schema/*.fbs) + ms_build_flatbuffers_lite(FBS_FILES ${CMAKE_CURRENT_SOURCE_DIR}/schema/ fbs_src ${CMAKE_BINARY_DIR}/schema "") ms_build_flatbuffers_lite(FBS_FILES ${CMAKE_CURRENT_SOURCE_DIR}/schema/ fbs_inner_src ${CMAKE_BINARY_DIR}/schema/inner "inner") diff --git a/mindspore/lite/src/litert/c_api/model_c.cc b/mindspore/lite/src/litert/c_api/model_c.cc index 45531055869..f76214af228 100644 --- a/mindspore/lite/src/litert/c_api/model_c.cc +++ b/mindspore/lite/src/litert/c_api/model_c.cc @@ -317,6 +317,11 @@ void MSModelSetWorkspace(MSModelHandle model, void *workspace, size_t workspace_ return; } +size_t MSModelCalcWorkspaceSize(MSModelHandle model) { + MS_LOG(ERROR) << "Unsupported Feature."; + return 0; +} + MSStatus MSModelBuild(MSModelHandle model, const void *model_data, size_t data_size, MSModelType model_type, const MSContextHandle model_context) { if (model == nullptr || model_data == nullptr || model_context == nullptr) { diff --git a/mindspore/lite/test/st/scripts/run_benchmark_codegen.sh b/mindspore/lite/test/st/scripts/run_benchmark_codegen.sh index 6cd18c7de7c..b051e4119cd 100644 --- a/mindspore/lite/test/st/scripts/run_benchmark_codegen.sh +++ b/mindspore/lite/test/st/scripts/run_benchmark_codegen.sh @@ -199,12 +199,13 @@ function Run_cortex_m_codegen() { out_data=`cat ${models_path}/input_output/output/${model_name}.ms.out.txt` sed -i "s/float calib_input0_data\[NET_INPUT0_SIZE\] = {};/float calib_input0_data\[NET_INPUT0_SIZE\] = {${in_data}};/g" benchmark/data.c sed -i "s/float calib_output0_data\[NET_OUTPUT0_SIZE\] = {};/float calib_output0_data\[NET_OUTPUT0_SIZE\] = {${out_data}};/g" benchmark/data.c - sed -i "s/VERSION_STR=1.8.0/VERSION_STR=${version}/g" build.sh + sed -i "s/VERSION_STR=.*/VERSION_STR=${version}/g" build.sh bash build.sh || exit 1 cp -r ${output_file}/mindspore-lite-${version}-none-cortex-m7 ${output_file}/build/ cd ${stm_demo_file} || exit 1 [ -n "${stm_demo_file}" ] && rm -rf ${stm_demo_file}/build sed -i "s/LITE_PACK =/LITE_PACK = mindspore-lite-${version}-none-cortex-m7/g" Makefile + sed -i "s/ if (benchmark() == 0) {/ static char work_space\[300000\];\n if (benchmark(work_space, 300000) == 0) {/g" Core/Src/main.c make >> "$4" || return 1 continue diff --git a/mindspore/lite/tools/converter/micro/coder/generator/component/common_component.cc b/mindspore/lite/tools/converter/micro/coder/generator/component/common_component.cc index 02b58ff25e2..bb3e433311d 100644 --- a/mindspore/lite/tools/converter/micro/coder/generator/component/common_component.cc +++ b/mindspore/lite/tools/converter/micro/coder/generator/component/common_component.cc @@ -22,22 +22,23 @@ #include "coder/log.h" #include "include/errorcode.h" #include "nnacl/op_base.h" +#include "include/c_api/model_c.h" namespace mindspore::lite::micro { -const char model_runtime_init_source[] = R"RAW( +const char micro_model_define_source[] = R"RAW( typedef struct { void *runtime_buffer; bool train_mode; // true: train mode, false: eval mode MSTensorHandleArray inputs; MSTensorHandleArray outputs; } MicroModel; +)RAW"; +const char model_runtime_malloc_source[] = R"RAW( MSModelHandle MSModelCreate() { MicroModel *micro_model = (MicroModel *)malloc(sizeof(MicroModel)); if (micro_model == NULL) { return NULL; } -)RAW"; -const char model_runtime_malloc_source[] = R"RAW( int buffer_size = GetBufferSize(); void *runtime_buffer = malloc(buffer_size); if (runtime_buffer == NULL) { @@ -50,63 +51,202 @@ const char model_runtime_malloc_source[] = R"RAW( } )RAW"; +const char handle_array_destroy[] = R"RAW( +void MSTensorHandleArrayDestroy(MSTensorHandleArray inputs) { + if (inputs.handle_list == NULL) { + return; + } + for (size_t i = 0; i < inputs.handle_num; i++) { + MicroTensor *micro_tensor = inputs.handle_list[i]; + if (!micro_tensor) { + continue; + } + if (micro_tensor->data) { + free(micro_tensor->data); + micro_tensor->data = NULL; + } + if (micro_tensor->shape) { + free(micro_tensor->shape); + micro_tensor->shape = NULL; + } + free(micro_tensor); + micro_tensor = NULL; + } + free(inputs.handle_list); + inputs.handle_list = NULL; +} + +)RAW"; +const char cortex_set_workspace[] = R"RAW( + MicroModel *micro_model = (MicroModel *)model; + if (micro_model == NULL) { + return; + } + if (workspace_size < MSModelCalcWorkspaceSize(model)) { + return; + } + if (micro_model->inputs.handle_num != GRAPH_INPUTS_SIZE) { + return; + } + if (micro_model->outputs.handle_num != GRAPH_OUTPUTS_SIZE) { + return; + } + + micro_model->runtime_buffer = workspace; + int buffer_size = GetBufferSize(); + char* buf = workspace; + SetBuffer(buf); + +)RAW"; +void CodeMSModelCalcWorkspaceSize(std::ofstream &ofs, const std::unique_ptr &ctx, + const Configurator &config) { + if (config.target() == kCortex_M) { + ofs << "size_t MSModelCalcWorkspaceSize(MSModelHandle model) {\n"; + ofs << " size_t shape_size=0;\n"; + std::vector inputs = ctx->graph_inputs(); + for (size_t i = 0; i < inputs.size(); ++i) { + ofs << " shape_size += " << inputs[i]->shape().size() << " * sizeof(int64_t);\n"; + } + std::vector outputs = ctx->graph_outputs(); + for (size_t i = 0; i < outputs.size(); ++i) { + ofs << " shape_size += " << outputs[i]->shape().size() << " * sizeof(int64_t);\n"; + } + ofs << " return GetBufferSize() + WEIGHT_BUF_SIZE + shape_size + " + << "(sizeof(MicroTensor) + sizeof(MicroTensor *)) * " + << (ctx->graph_inputs().size() + ctx->graph_outputs().size()) << ";\n}\n"; + } else { + ofs << "size_t MSModelCalcWorkspaceSize(MSModelHandle model) {\n return 0;\n}\n"; + } + ofs << "\n"; +} + +void CodeMSModelSetWorkspace(std::ofstream &ofs, const std::unique_ptr &ctx, const Configurator &config) { + ofs << "void MSModelSetWorkspace(MSModelHandle model, void *workspace, size_t workspace_size) {\n"; + if (config.target() == kCortex_M) { + ofs << cortex_set_workspace; + ofs << " " << ctx->weight_name() << " = (uint8_t *)&buf[buffer_size];\n"; + ofs << R"RAW( + buffer_size += WEIGHT_BUF_SIZE; + + micro_model->inputs.handle_list = (MSTensorHandle *)&buf[buffer_size]; + buffer_size += GRAPH_INPUTS_SIZE * sizeof(MicroTensor *); + MicroTensor **input_tensors = (MicroTensor **)micro_model->inputs.handle_list; + + micro_model->outputs.handle_list = (MSTensorHandle *)&buf[buffer_size]; + buffer_size += GRAPH_OUTPUTS_SIZE * sizeof(MicroTensor *); + MicroTensor **output_tensors = (MicroTensor **)micro_model->outputs.handle_list; +)RAW"; + ofs << " int i;\n" + << " for (i = 0; i < GRAPH_INPUTS_SIZE; i++) {\n"; + std::vector inputs = ctx->graph_inputs(); + for (size_t i = 0; i < inputs.size(); ++i) { + ofs << " input_tensors[i] = (MicroTensor *)&buf[buffer_size];\n" + << " buffer_size += sizeof(MicroTensor);\n"; + ofs << " input_tensors[i]->shape = (int64_t *)&buf[buffer_size];\n" + << " buffer_size += " << inputs[i]->shape().size() * sizeof(int64_t) << ";\n"; + } + ofs << " }\n"; + + ofs << " for (i = 0; i < GRAPH_OUTPUTS_SIZE; i++) {\n"; + std::vector outputs = ctx->graph_outputs(); + for (size_t i = 0; i < outputs.size(); ++i) { + ofs << " output_tensors[i] = (MicroTensor *)&buf[buffer_size];\n" + << " buffer_size += sizeof(MicroTensor);\n"; + ofs << " output_tensors[i]->shape = (int64_t *)&buf[buffer_size];\n" + << " buffer_size += " << outputs[i]->shape().size() * sizeof(int64_t) << ";\n"; + } + ofs << " }\n"; + + auto array_tostring = [&ofs](Tensor *tensor, const std::string &prefix, size_t index) { + ofs << kAlignedString << prefix << "_tensors[" << index << "]->type = " << EnumNameMSDataType(tensor->data_type()) + << ";\n"; + ofs << kAlignedString << prefix << "_tensors[" << index << "]->format = kMSFormatNHWC;\n"; + ofs << kAlignedString << prefix << "_tensors[" << index << "]->ndim = " << tensor->shape().size() << ";\n"; + size_t shape_size = tensor->shape().size(); + for (size_t i = 0; i < shape_size; i++) { + ofs << kAlignedString << prefix << "_tensors[" << index << "]->shape[" << i << "]= " << tensor->shape()[i] + << ";\n"; + } + ofs << kAlignedString << prefix << "_tensors[" << index << "]->name = \"" << tensor->tensor_name() << "\";\n"; + ofs << kAlignedString << prefix << "_tensors[" << index << "]->data = NULL;\n"; + }; + for (size_t i = 0; i < inputs.size(); ++i) { + array_tostring(inputs[i], "input", i); + } + for (size_t i = 0; i < outputs.size(); ++i) { + array_tostring(outputs[i], "output", i); + } + } + ofs << "}\n\n"; +} void CodeMSModelCreate(std::ofstream &ofs, const std::unique_ptr &ctx, const Configurator &config) { - ofs << model_runtime_init_source; + ofs << micro_model_define_source; if (config.target() != kCortex_M) { ofs << model_runtime_malloc_source; - } else { - ofs << " micro_model->runtime_buffer = " << ctx->buffer_name() << ";\n"; - } - if (config.code_mode() == CodeMode::Inference) { - ofs << " micro_model->train_mode = false;\n"; - } else if (config.code_mode() == CodeMode::Train) { - ofs << " micro_model->train_mode = true;\n"; - } - auto array_tostring = [&ofs](Tensor *tensor, const std::string &prefix, size_t index) { - ofs << kAlignedString << prefix << "_tensors[" << index << "] = malloc(sizeof(MicroTensor));\n"; - ofs << kAlignedString << prefix << "_tensors[" << index << "]->type = " << EnumNameMSDataType(tensor->data_type()) - << ";\n"; - ofs << kAlignedString << prefix << "_tensors[" << index << "]->format = kMSFormatNHWC;\n"; - ofs << kAlignedString << prefix << "_tensors[" << index << "]->ndim = " << tensor->shape().size() << ";\n"; - size_t shape_size = tensor->shape().size(); - ofs << kAlignedString << prefix << "_tensors[" << index << "]->shape = " - << "malloc(" << shape_size << " * sizeof(int64_t));\n"; - for (size_t i = 0; i < shape_size; i++) { - ofs << kAlignedString << prefix << "_tensors[" << index << "]->shape[" << i << "]= " << tensor->shape()[i] - << ";\n"; + if (config.code_mode() == CodeMode::Inference) { + ofs << " micro_model->train_mode = false;\n"; + } else if (config.code_mode() == CodeMode::Train) { + ofs << " micro_model->train_mode = true;\n"; } - ofs << kAlignedString << prefix << "_tensors[" << index << "]->name = \"" << tensor->tensor_name() << "\";\n"; - ofs << kAlignedString << prefix << "_tensors[" << index << "]->data = NULL;\n"; - }; - std::vector inputs = ctx->graph_inputs(); - std::vector outputs = ctx->graph_outputs(); - if (config.code_mode() == CodeMode::Inference) { - outputs = ctx->graph_outputs(); - } else if (config.code_mode() == CodeMode::Train) { - outputs = ctx->graph_train_outputs(); + auto array_tostring = [&ofs](Tensor *tensor, const std::string &prefix, size_t index) { + ofs << kAlignedString << prefix << "_tensors[" << index << "] = malloc(sizeof(MicroTensor));\n"; + ofs << kAlignedString << prefix << "_tensors[" << index << "]->type = " << EnumNameMSDataType(tensor->data_type()) + << ";\n"; + ofs << kAlignedString << prefix << "_tensors[" << index << "]->format = kMSFormatNHWC;\n"; + ofs << kAlignedString << prefix << "_tensors[" << index << "]->ndim = " << tensor->shape().size() << ";\n"; + size_t shape_size = tensor->shape().size(); + ofs << kAlignedString << prefix << "_tensors[" << index << "]->shape = " + << "malloc(" << shape_size << " * sizeof(int64_t));\n"; + for (size_t i = 0; i < shape_size; i++) { + ofs << kAlignedString << prefix << "_tensors[" << index << "]->shape[" << i << "]= " << tensor->shape()[i] + << ";\n"; + } + ofs << kAlignedString << prefix << "_tensors[" << index << "]->name = \"" << tensor->tensor_name() << "\";\n"; + ofs << kAlignedString << prefix << "_tensors[" << index << "]->data = NULL;\n"; + }; + std::vector inputs = ctx->graph_inputs(); + std::vector outputs = ctx->graph_outputs(); + if (config.code_mode() == CodeMode::Inference) { + outputs = ctx->graph_outputs(); + } else if (config.code_mode() == CodeMode::Train) { + outputs = ctx->graph_train_outputs(); + } + size_t inputs_size = inputs.size(); + ofs << " MSTensorHandleArray model_inputs;\n"; + ofs << " model_inputs.handle_num = " << inputs_size << ";\n"; + ofs << " MicroTensor **input_tensors = malloc(" << inputs_size << " * sizeof(MicroTensor *));\n"; + ofs << " model_inputs.handle_list = (MSTensorHandle *)(input_tensors);\n"; + ofs << " micro_model->inputs = model_inputs;\n"; + for (size_t i = 0; i < inputs_size; ++i) { + Tensor *input = inputs[i]; + array_tostring(input, "input", i); + } + size_t outputs_size = outputs.size(); + ofs << " MSTensorHandleArray model_outputs;\n"; + ofs << " model_outputs.handle_num = " << outputs_size << ";\n"; + ofs << " MicroTensor **output_tensors = malloc(" << outputs_size << " * sizeof(MicroTensor *));\n"; + ofs << " model_outputs.handle_list = (MSTensorHandle *)(output_tensors);\n"; + ofs << " micro_model->outputs = model_outputs;\n"; + for (size_t i = 0; i < outputs_size; ++i) { + Tensor *output = outputs[i]; + array_tostring(output, "output", i); + } + ofs << " return (MSModelHandle)micro_model;\n"; + } else { + ofs << "#define GRAPH_INPUTS_SIZE " << ctx->graph_inputs().size() << "\n"; + ofs << "#define GRAPH_OUTPUTS_SIZE " << ctx->graph_outputs().size() << "\n"; + ofs << "#define WEIGHT_BUF_SIZE " << ctx->weight_buffer_size() << "\n"; + ofs << "MSModelHandle MSModelCreate() {\n"; + ofs << " static MicroModel model;\n"; + ofs << " model.runtime_buffer = NULL;\n"; + ofs << " model.inputs.handle_num = GRAPH_INPUTS_SIZE;\n"; + ofs << " model.inputs.handle_list = NULL;\n"; + ofs << " model.outputs.handle_num = GRAPH_OUTPUTS_SIZE;\n"; + ofs << " model.outputs.handle_list = NULL;\n"; + ofs << " model.train_mode = false;\n"; + ofs << " return (MSModelHandle)&model;\n"; } - size_t inputs_size = inputs.size(); - ofs << " MSTensorHandleArray model_inputs;\n"; - ofs << " model_inputs.handle_num = " << inputs_size << ";\n"; - ofs << " MicroTensor **input_tensors = malloc(" << inputs_size << " * sizeof(MicroTensor *));\n"; - ofs << " model_inputs.handle_list = (MSTensorHandle *)(input_tensors);\n"; - ofs << " micro_model->inputs = model_inputs;\n"; - for (size_t i = 0; i < inputs_size; ++i) { - Tensor *input = inputs[i]; - array_tostring(input, "input", i); - } - size_t outputs_size = outputs.size(); - ofs << " MSTensorHandleArray model_outputs;\n"; - ofs << " model_outputs.handle_num = " << outputs_size << ";\n"; - ofs << " MicroTensor **output_tensors = malloc(" << outputs_size << " * sizeof(MicroTensor *));\n"; - ofs << " model_outputs.handle_list = (MSTensorHandle *)(output_tensors);\n"; - ofs << " micro_model->outputs = model_outputs;\n"; - for (size_t i = 0; i < outputs_size; ++i) { - Tensor *output = outputs[i]; - array_tostring(output, "output", i); - } - ofs << " return (MSModelHandle)micro_model;\n"; ofs << "}\n\n"; } @@ -116,6 +256,9 @@ void CodeMSModelBuild(std::ofstream &ofs, const Configurator *config) { " const MSContextHandle model_context) {\n" " if (model_type != kMSModelTypeMindIR) {\n" " return kMSStatusLiteNotSupport;\n" + " }\n" + " if (((MicroModel *)model)->runtime_buffer == NULL) {\n" + " return kMSStatusLiteMemoryFailed;\n" " }\n"; ofs << " int ret = RET_OK;\n"; if (config->target() != kCortex_M) { @@ -139,23 +282,26 @@ void CodeMSModelBuild(std::ofstream &ofs, const Configurator *config) { } void CodeMSModelDestory(std::ofstream &ofs, const Configurator *config) { - ofs << "void MSModelDestroy(MSModelHandle *model) {\n"; - ofs << " if (*model) {\n" - " MicroModel *micro_model = (MicroModel *)*model;\n"; if (config->target() != kCortex_M) { + ofs << handle_array_destroy; + } + ofs << "void MSModelDestroy(MSModelHandle *model) {\n"; + if (config->target() != kCortex_M) { + ofs << " if (*model) {\n" + " MicroModel *micro_model = (MicroModel *)*model;\n"; ofs << " if (micro_model->runtime_buffer) {\n" " free(micro_model->runtime_buffer);\n" " micro_model->runtime_buffer = NULL;\n" " }\n"; - } - ofs << " MSTensorHandleArrayDestroy(micro_model->inputs);\n" - " MSTensorHandleArrayDestroy(micro_model->outputs);\n" - " free(*model);\n" - " *model = NULL;\n" - " }\n"; + ofs << " MSTensorHandleArrayDestroy(micro_model->inputs);\n" + " MSTensorHandleArrayDestroy(micro_model->outputs);\n" + " free(*model);\n" + " *model = NULL;\n" + " }\n"; - if (config->support_parallel()) { - ofs << " ClearThreadPool();\n"; + if (config->support_parallel()) { + ofs << " ClearThreadPool();\n"; + } } ofs << "}\n"; } diff --git a/mindspore/lite/tools/converter/micro/coder/generator/component/common_component.h b/mindspore/lite/tools/converter/micro/coder/generator/component/common_component.h index 08dfbc909fc..6ecf0063a10 100644 --- a/mindspore/lite/tools/converter/micro/coder/generator/component/common_component.h +++ b/mindspore/lite/tools/converter/micro/coder/generator/component/common_component.h @@ -27,6 +27,9 @@ #include "tools/converter/micro/coder/config.h" namespace mindspore::lite::micro { +void CodeMSModelCalcWorkspaceSize(std::ofstream &ofs, const std::unique_ptr &ctx, + const Configurator &config); +void CodeMSModelSetWorkspace(std::ofstream &ofs, const std::unique_ptr &ctx, const Configurator &config); void CodeMSModelCreate(std::ofstream &ofs, const std::unique_ptr &ctx, const Configurator &config); void CodeMSModelBuild(std::ofstream &ofs, const Configurator *config); void CodeMSModelDestory(std::ofstream &ofs, const Configurator *config); diff --git a/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/benchmark.cc b/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/benchmark.cc index 575bff5cf2a..38f809dd0fc 100644 --- a/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/benchmark.cc +++ b/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/benchmark.cc @@ -291,6 +291,7 @@ const char benchmark_source_cortex[] = R"RAW(/** * limitations under the License. */ +#include "benchmark.h" #include "calib_output.h" #include "load_input.h" #include "data.h" @@ -351,7 +352,7 @@ void PrintTensorHandle(MSTensorHandle tensor) { } } -int benchmark() { +int benchmark(char *work_space, unsigned int work_space_size) { int ret; printf("========run benchmark======\n"); printf("========Model build========\n"); @@ -360,6 +361,12 @@ int benchmark() { printf("MSModelCreate failed.\n"); return kMSStatusLiteNullptr; } + size_t workspace_size = MSModelCalcWorkspaceSize(model_handle); + if (workspace_size > work_space_size) { + printf("This Model inference requires %ul bytes of memory.\n", workspace_size); + return kMSStatusLiteError; + } + MSModelSetWorkspace(model_handle, work_space, work_space_size); ret = MSModelBuild(model_handle, NULL, 0, kMSModelTypeMindIR, NULL); if (ret != kMSStatusSuccess) { printf("MSModelBuildFromFile failed, ret : %d.\n", ret); @@ -421,6 +428,7 @@ int benchmark() { MSModelDestroy(&model_handle); return kMSStatusSuccess; } + )RAW"; const char benchmark_h_cortex[] = R"RAW(/** @@ -444,7 +452,7 @@ const char benchmark_h_cortex[] = R"RAW(/** #ifdef __cplusplus extern "C" { #endif -int benchmark(); +int benchmark(char *work_space, unsigned int work_space_size); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/cmake_lists.cc b/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/cmake_lists.cc index a24e54ac0f2..e1aa8766cb4 100644 --- a/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/cmake_lists.cc +++ b/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/cmake_lists.cc @@ -59,9 +59,10 @@ if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") else() message(STATUS "build benchmark release version") set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \ - -Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}") + -Wno-deprecated-declarations -Wno-incompatible-pointer-types -Wno-missing-braces ${CMAKE_C_FLAGS}") set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \ - -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}") + -Wno-deprecated-declarations -Wno-incompatible-pointer-types -Wno-missing-braces -Wno-overloaded-virtual \ + ${CMAKE_CXX_FLAGS}") string(REPLACE "-g" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") string(REPLACE "-g" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") endif() diff --git a/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/load_input.cc b/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/load_input.cc index a81c08aefd6..9a2aeaa78bc 100644 --- a/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/load_input.cc +++ b/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/load_input.cc @@ -293,16 +293,17 @@ set -e BASEPATH=$(cd "$(dirname $0)"; pwd) mkdir -p build -VERSION_STR=1.8.0 +VERSION_STR=1.8.1 MINDSPORE_FILE_NAME="mindspore-lite-${VERSION_STR}-none-cortex-m7" MINDSPORE_FILE="${MINDSPORE_FILE_NAME}.tar.gz" MINDSPORE_LITE_DOWNLOAD_URL=\ -"https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION_STR}/MindSpore/lite/release/cortex-m/${MINDSPORE_FILE}" +"https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION_STR}/MindSpore/lite/none_cortex-m/${MINDSPORE_FILE}" if [ ! -e ${BASEPATH}/${MINDSPORE_FILE} ]; then - wget -c -O ${BASEPATH}/${MINDSPORE_FILE} --no-check-certificate ${MINDSPORE_LITE_DOWNLOAD_URL}+ + wget -c -O ${BASEPATH}/${MINDSPORE_FILE} --no-check-certificate ${MINDSPORE_LITE_DOWNLOAD_URL} +fi +if [ ! -e ${BASEPATH}/${MINDSPORE_FILE_NAME} ]; then + tar xzf ${BASEPATH}/${MINDSPORE_FILE} -C ${BASEPATH}/ fi -tar xzf ${BASEPATH}/${MINDSPORE_FILE} -C ${BASEPATH}/ - cd build cmake -DPKG_PATH=../${MINDSPORE_FILE_NAME} -DCMAKE_TOOLCHAIN_FILE=../cortex-m7.toolchain.cmake .. make diff --git a/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/mcontext.cc b/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/mcontext.cc index 0f86e1c0807..108065165dc 100644 --- a/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/mcontext.cc +++ b/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/mcontext.cc @@ -38,6 +38,7 @@ const char context_header[] = R"RAW( #define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_CONTEXT_H_ #include +#include "c_api/context_c.h" typedef struct MicroContext { char* vendor_name_; @@ -69,29 +70,14 @@ const char context_source_cortex[] = R"RAW( */ #include "context.h" -#include "c_api/context_c.h" #include #include MSContextHandle MSContextCreate() { - MicroContext *micro_context = (MicroContext *)malloc(sizeof(MicroContext)); - if (micro_context == NULL) { - return NULL; - } - micro_context->enable_parallel_ = false; - micro_context->thread_num_ = 1; - micro_context->affinity_core_list_ = NULL; - micro_context->core_num = 0; - micro_context->affinity_mode = 0; - return micro_context; + return NULL; } void MSContextDestroy(MSContextHandle *context) { - MicroContext *micro_context = (MicroContext *)(*context); - if (micro_context) { - free(micro_context); - micro_context = NULL; - } } void MSContextSetThreadNum(MSContextHandle context, int32_t thread_num) { @@ -127,7 +113,6 @@ const char context_source_no_parallel[] = R"RAW( */ #include "context.h" -#include "c_api/context_c.h" #include #include @@ -185,10 +170,9 @@ const char context_source[] = R"RAW( */ #include "context.h" -#include "c_api/context_c.h" -#include "wrapper/thread/micro_core_affinity.h" #include #include +#include "wrapper/thread/micro_core_affinity.h" #define MAX_THREAD_NUM 4 diff --git a/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/msession.cc b/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/msession.cc index 8d329ecea28..174d9447af3 100644 --- a/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/msession.cc +++ b/mindspore/lite/tools/converter/micro/coder/generator/component/const_blocks/msession.cc @@ -18,30 +18,6 @@ namespace mindspore::lite::micro { const char model_runtime_other_source[] = R"RAW( -void MSTensorHandleArrayDestroy(MSTensorHandleArray inputs) { - if (inputs.handle_list == NULL) { - return; - } - for (size_t i = 0; i < inputs.handle_num; i++) { - MicroTensor *micro_tensor = inputs.handle_list[i]; - if (!micro_tensor) { - continue; - } - if (micro_tensor->data) { - free(micro_tensor->data); - micro_tensor->data = NULL; - } - if (micro_tensor->shape) { - free(micro_tensor->shape); - micro_tensor->shape = NULL; - } - free(micro_tensor); - micro_tensor = NULL; - } - free(inputs.handle_list); - inputs.handle_list = NULL; -} - MSTensorHandleArray MSModelGetInputs(const MSModelHandle model) { MicroModel *micro_model = (MicroModel *)model; return micro_model->inputs; diff --git a/mindspore/lite/tools/converter/micro/coder/generator/component/weight_component.cc b/mindspore/lite/tools/converter/micro/coder/generator/component/weight_component.cc index bfb2fdd0dc1..d62bc1b8289 100644 --- a/mindspore/lite/tools/converter/micro/coder/generator/component/weight_component.cc +++ b/mindspore/lite/tools/converter/micro/coder/generator/component/weight_component.cc @@ -172,11 +172,13 @@ void CodeWeightInitFunc(std::ofstream &ofs, const std::unique_ptr ofs << " }\n"; } else { ofs << "int Init(void *weight_buffer, int weight_size) {\n"; + ofs << " if (" << ctx->weight_name() << "== NULL) {\n"; + ofs << " return RET_ERROR;\n }\n"; ofs << " const size_t w_size = " << ctx->weight_buffer_size() << ";\n"; } ofs << " size_t " << ctx->weight_offset_name() << " = 0;\n"; for (const auto &block : ctx->init_contents()) { - ofs << "{\n" << block << "}\n"; + ofs << "\n{\n" << block << "}\n"; } ofs << " if (" << ctx->weight_size_name() << " < " << ctx->weight_offset_name() << ") {\n return RET_ERROR;\n }\n"; diff --git a/mindspore/lite/tools/converter/micro/coder/generator/generator.cc b/mindspore/lite/tools/converter/micro/coder/generator/generator.cc index fbafa81837f..dd3c93ac71e 100644 --- a/mindspore/lite/tools/converter/micro/coder/generator/generator.cc +++ b/mindspore/lite/tools/converter/micro/coder/generator/generator.cc @@ -214,11 +214,14 @@ int Generator::CodeMSModelImplement() { ofs << "#include \"context.h\"\n"; ofs << "#include \"c_api/model_c.h\"\n"; ofs << "#include \"net.h\"\n"; - ofs << "#include \"weight.h\"\n\n"; if (config_->support_parallel()) { ofs << "#include \"" << kThreadWrapper << "\"\n"; } + ofs << "#include \"weight.h\"\n\n"; + CodeMSModelCreate(ofs, ctx_, *config_); + CodeMSModelCalcWorkspaceSize(ofs, ctx_, *config_); + CodeMSModelSetWorkspace(ofs, ctx_, *config_); CodeMSModelBuild(ofs, config_); ofs << model_runtime_other_source; if (config_->code_mode() == CodeMode::Train) { @@ -264,9 +267,8 @@ int Generator::CodeWeightFile() { return RET_ERROR; } cofs << "int __errno; \n"; - cofs << "unsigned char g_buf[" << ctx_->total_buffer_size() + ctx_->weight_buffer_size() << "]; \n"; - cofs << "unsigned char * " << ctx_->buffer_name() << " = &g_buf[0]; \n"; - cofs << "unsigned char * " << ctx_->weight_name() << " = &g_buf[" << ctx_->total_buffer_size() << "]; \n"; + cofs << "unsigned char * " << ctx_->buffer_name() << " = NULL; \n"; + cofs << "unsigned char * " << ctx_->weight_name() << " = NULL; \n"; CodeModelParamsData(cofs, ctx_->saved_weights()); } CodeModelParamsForNet(hofs, cofs, ctx_, *config_); diff --git a/mindspore/lite/tools/converter/micro/coder/utils/type_cast.h b/mindspore/lite/tools/converter/micro/coder/utils/type_cast.h index 7753e123e64..be4aa34bb30 100644 --- a/mindspore/lite/tools/converter/micro/coder/utils/type_cast.h +++ b/mindspore/lite/tools/converter/micro/coder/utils/type_cast.h @@ -48,22 +48,15 @@ std::string EnumNameTarget(Target target); */ template std::string GetVariableTypeName() { - std::map types_name = {{std::type_index(typeid(int)), "int"}, - {std::type_index(typeid(int32_t)), "int32_t"}, - {std::type_index(typeid(int16_t)), "int16_t"}, - {std::type_index(typeid(int8_t)), "int8_t"}, - {std::type_index(typeid(uint8_t)), "uint8_t"}, - {std::type_index(typeid(float)), "float"}, - {std::type_index(typeid(double)), "double"}, - {std::type_index(typeid(::QuantArg)), "QuantArg"}, - {std::type_index(typeid(void *)), "void *"}, - {std::type_index(typeid(std::string)), "float *"}, - {std::type_index(typeid(int *)), "int *"}, - {std::type_index(typeid(int32_t *)), "int32_t *"}, - {std::type_index(typeid(int16_t *)), "int16_t *"}, - {std::type_index(typeid(int8_t *)), "int8_t *"}, - {std::type_index(typeid(uint8_t *)), "uint8_t *"}, - {std::type_index(typeid(float *)), "float *"}}; + std::map types_name = { + {std::type_index(typeid(int)), "int32_t"}, {std::type_index(typeid(int32_t)), "int32_t"}, + {std::type_index(typeid(int16_t)), "int16_t"}, {std::type_index(typeid(int8_t)), "int8_t"}, + {std::type_index(typeid(uint8_t)), "uint8_t"}, {std::type_index(typeid(float)), "float"}, + {std::type_index(typeid(double)), "double"}, {std::type_index(typeid(::QuantArg)), "QuantArg"}, + {std::type_index(typeid(void *)), "void *"}, {std::type_index(typeid(std::string)), "float *"}, + {std::type_index(typeid(int *)), "int32_t *"}, {std::type_index(typeid(int32_t *)), "int32_t *"}, + {std::type_index(typeid(int16_t *)), "int16_t *"}, {std::type_index(typeid(int8_t *)), "int8_t *"}, + {std::type_index(typeid(uint8_t *)), "uint8_t *"}, {std::type_index(typeid(float *)), "float *"}}; auto item = types_name.find(std::type_index(typeid(T))); if (item != types_name.end()) { return item->second;