feature, uniform interface of micro and lite

This commit is contained in:
yangjie159 2021-03-18 15:08:00 +08:00
parent 9bac30d37f
commit 08efea8791
26 changed files with 583 additions and 75 deletions

View File

@ -19,6 +19,7 @@ set(CODER_GENERATOR_SRC
${MICRO_DIR}/coder/generator/generator.cc
${MICRO_DIR}/coder/generator/inference/inference_generator.cc
${MICRO_DIR}/coder/generator/train/train_generator.cc
${MICRO_DIR}/coder/generator/component/component.cc
${MICRO_DIR}/coder/generator/component/benchmark_component.cc
${MICRO_DIR}/coder/generator/component/common_component.cc
${MICRO_DIR}/coder/generator/component/weight_component.cc
@ -27,8 +28,11 @@ set(CODER_GENERATOR_SRC
${MICRO_DIR}/coder/generator/component/parallel_component.cc
${MICRO_DIR}/coder/generator/component/const_blocks/cmake_lists.cc
${MICRO_DIR}/coder/generator/component/const_blocks/debug_utils.cc
${MICRO_DIR}/coder/generator/component/const_blocks/msession.cc
${MICRO_DIR}/coder/generator/component/const_blocks/mtensor.cc
${MICRO_DIR}/coder/generator/component/const_blocks/license.cc
${MICRO_DIR}/coder/generator/component/const_blocks/load_input.cc
${MICRO_DIR}/coder/generator/component/const_blocks/benchmark.cc
${MICRO_DIR}/coder/generator/component/const_blocks/micro_tensor.cc
${MICRO_DIR}/coder/generator/component/const_blocks/thread_pool.cc
)

View File

@ -96,7 +96,7 @@ void MemoryAllocator::RecordTensorsAddr(const std::map<Tensor *, size_t> &offset
void MemoryAllocator::AssignGraphInputs(const std::vector<Tensor *> &inputs) {
size_t num = inputs.size();
for (size_t i = 0; i < num; ++i) {
inputs_addr_.insert(std::make_pair(inputs.at(i), net_input_addr_ + std::to_string(i)));
tensors_addr_.insert(std::make_pair(inputs.at(i), net_input_addr_ + std::to_string(i)));
}
}

View File

@ -104,11 +104,7 @@ class MemoryAllocator {
}
std::string type_info = wrap(type_name);
void *variable = reinterpret_cast<void *>(t);
auto item = inputs_addr_.find(variable);
if (item != inputs_addr_.end()) {
return type_info + item->second;
}
item = workspaces_addr_.find(variable);
auto item = workspaces_addr_.find(variable);
if (item != workspaces_addr_.end()) {
return type_info + wrap(item->second);
}
@ -174,7 +170,6 @@ class MemoryAllocator {
std::map<Tensor *, std::string> origin_weights_addr_;
std::map<Tensor *, std::string> malloc_weights_addr_;
std::map<Tensor *, std::string> tensors_addr_;
std::map<void *, std::string> inputs_addr_;
std::string net_input_addr_;
std::string net_buffer_addr_;
std::string net_weight_addr_;

View File

@ -28,6 +28,7 @@
#include "src/common/file_utils.h"
#include "src/common/utils.h"
#include "coder/coder_config.h"
#include "coder/generator/component/component.h"
namespace mindspore::lite::micro {
@ -39,6 +40,7 @@ class CoderFlags : public virtual FlagParser {
AddFlag(&CoderFlags::code_module_name_, "moduleName", "Input code module name", "");
AddFlag(&CoderFlags::target_, "target", "generated code target, x86| ARM32M| ARM32A| ARM64", "x86");
AddFlag(&CoderFlags::code_mode_, "codeMode", "generated code mode, Inference | Train", "Inference");
AddFlag(&CoderFlags::interface_, "interface", "the interface of generated code, CPP | C", "CPP");
AddFlag(&CoderFlags::support_parallel_, "supportParallel", "whether support parallel launch, true | false", false);
AddFlag(&CoderFlags::debug_mode_, "debugMode", "dump the tensors data for debugging, true | false", false);
}
@ -50,6 +52,7 @@ class CoderFlags : public virtual FlagParser {
std::string code_module_name_;
std::string code_path_;
std::string code_mode_;
std::string interface_;
bool debug_mode_{false};
std::string target_;
};
@ -87,6 +90,7 @@ int Coder::Init(const CoderFlags &flags) const {
static const std::map<std::string, Target> kTargetMap = {
{"x86", kX86}, {"ARM32M", kARM32M}, {"ARM32A", kARM32A}, {"ARM64", kARM64}, {"All", kAllTargets}};
static const std::map<std::string, CodeMode> kCodeModeMap = {{"Inference", Inference}, {"Train", Train}};
static const std::map<std::string, Interface> kInterfaceMap = {{"CPP", Interface_CPP}, {"C", Interface_C}};
Configurator *config = Configurator::GetInstance();
@ -106,7 +110,14 @@ int Coder::Init(const CoderFlags &flags) const {
});
parsers.emplace_back([&flags, config]() -> bool {
if (flags.support_parallel_ == true && config->target() == kARM32M) {
auto item = kInterfaceMap.find(flags.interface_);
MS_CHECK_TRUE_RET_BOOL(item != kInterfaceMap.end(), "unsupported interface: " + flags.code_mode_);
config->set_interface(item->second);
return true;
});
parsers.emplace_back([&flags, config]() -> bool {
if (flags.support_parallel_ && config->target() == kARM32M) {
MS_LOG(ERROR) << "arm32M cannot support parallel.";
return false;
}
@ -162,6 +173,7 @@ int Coder::Init(const CoderFlags &flags) const {
}
return RET_ERROR;
}
config->set_module_name(kModelName);
auto print_parameter = [](auto name, auto value) {
MS_LOG(INFO) << std::setw(20) << std::left << name << "= " << value;

View File

@ -22,6 +22,7 @@
namespace mindspore::lite::micro {
enum Target { kX86 = 0, kARM32M = 1, kARM32A = 2, kARM64 = 3, kAllTargets = 4, kTargetUnknown = 99 };
enum CodeMode { Inference = 0, Train = 1, Code_Unknown = 99 };
enum Interface { Interface_CPP = 0, Interface_C = 1, Interface_Unknown = 99 };
inline const char *EnumNameTarget(Target target) {
switch (target) {
@ -70,6 +71,9 @@ class Configurator {
void set_code_mode(CodeMode code_mode) { code_mode_ = code_mode; }
CodeMode code_mode() const { return code_mode_; }
void set_interface(Interface interface) { interface_ = interface; }
Interface interface() const { return interface_; }
void set_debug_mode(bool debug) { debug_mode_ = debug; }
bool debug_mode() const { return debug_mode_; }
@ -84,6 +88,7 @@ class Configurator {
std::string code_path_;
Target target_{kTargetUnknown};
CodeMode code_mode_{Code_Unknown};
Interface interface_{Interface_CPP};
bool support_parallel_{false};
bool debug_mode_{false};
};

View File

@ -20,10 +20,9 @@
namespace mindspore::lite::micro {
void CodeCMakeNetLibrary(std::ofstream &ofs, const std::string &module_name, const std::unique_ptr<CoderContext> &ctx,
Target target) {
void CodeCMakeNetLibrary(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx, const Configurator *config) {
ofs << "include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/)\n";
if (target == kARM32M) {
if (config->target() == kARM32M) {
ofs << "include_directories(${OP_HEADER_PATH}/CMSIS/NN/Include)\n"
<< "include_directories(${OP_HEADER_PATH}/CMSIS/DSP/Include)\n"
<< "include_directories(${OP_HEADER_PATH}/CMSIS/Core/Include)\n";
@ -32,12 +31,15 @@ void CodeCMakeNetLibrary(std::ofstream &ofs, const std::string &module_name, con
for (const std::string &c_file : ctx->c_files()) {
ofs << " " << c_file << ".o\n";
}
ofs << " " << module_name << "_weight.c.o\n"
<< " " << module_name << ".c.o\n"
<< ")\n";
ofs << " net_weight.c.o\n"
<< " net.c.o\n";
if (config->interface() == Interface_CPP) {
ofs << " session.cc.o\n"
<< " tensor.cc.o\n";
}
ofs << ")\n";
std::set<std::string> kernel_cmake_asm_set_files = ctx->asm_files();
if (!kernel_cmake_asm_set_files.empty() && (target == kARM32A || target == kARM64)) {
if (!kernel_cmake_asm_set_files.empty() && (config->target() == kARM32A || config->target() == kARM64)) {
ofs << "set(ASSEMBLY_SRC\n";
for (const std::string &asm_file : kernel_cmake_asm_set_files) {
ofs << " " << asm_file << ".o\n";
@ -46,9 +48,11 @@ void CodeCMakeNetLibrary(std::ofstream &ofs, const std::string &module_name, con
<< "set_property(SOURCE ${ASSEMBLY_SRC} PROPERTY LANGUAGE C)\n"
<< "list(APPEND OP_SRC ${ASSEMBLY_SRC})\n";
}
ofs << "file(GLOB NET_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.c)\n"
<< "add_library(net STATIC ${NET_SRC})\n";
ofs << "file(GLOB NET_SRC\n"
" ${CMAKE_CURRENT_SOURCE_DIR}/*.cc\n"
" ${CMAKE_CURRENT_SOURCE_DIR}/*.c\n"
" )\n"
"add_library(net STATIC ${NET_SRC})\n";
}
} // namespace mindspore::lite::micro

View File

@ -27,8 +27,7 @@
#include "coder/context.h"
namespace mindspore::lite::micro {
void CodeCMakeNetLibrary(std::ofstream &ofs, const std::string &module_name, const std::unique_ptr<CoderContext> &ctx,
Target target);
void CodeCMakeNetLibrary(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx, const Configurator *config);
} // namespace mindspore::lite::micro

View File

@ -17,7 +17,9 @@
#include "coder/generator/component/common_component.h"
#include <memory>
#include "coder/generator/component/const_blocks/license.h"
#include "coder/generator/component/component.h"
#include "coder/utils/type_cast.h"
#include "coder/utils/coder_utils.h"
#include "coder/log.h"
#include "include/errorcode.h"
#include "nnacl/op_base.h"
@ -29,6 +31,59 @@ void CodeSourceFileInclude(std::ofstream &ofs, const std::string &weight_file, c
<< "#include \"" << header << "\"\n\n";
}
void CodeSessionCompileGraph(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx) {
std::vector<Tensor *> inputs = ctx->graph_inputs();
std::vector<Tensor *> outputs = ctx->graph_outputs();
size_t inputs_size = inputs.size();
size_t outputs_size = outputs.size();
ofs << kNameSpaceMindSpore << " {\n";
ofs << kNameSpaceLite << " {\n";
ofs << "int LiteSession::CompileGraph(lite::Model *model) {\n";
ofs << " inputs_.resize(" << inputs_size << ");\n";
for (size_t i = 0; i < inputs_size; ++i) {
Tensor *input = inputs[i];
ofs << " inputs_[" << i << "] = new (std::nothrow) MTensor(\"" << input->tensor_name() << "\", "
<< EnumNameDataType(input->data_type()) << ", " << ArrayToString(input->shape()) << ");\n";
ofs << " MS_ERROR_IF_NULL(inputs_[" << i << "]);\n";
}
ofs << " outputs_.resize(" << outputs_size << ");\n";
for (size_t i = 0; i < outputs_size; ++i) {
Tensor *output = outputs[i];
ofs << " outputs_[" << i << "] = new (std::nothrow) MTensor(\"" << output->tensor_name() << "\", "
<< EnumNameDataType(output->data_type()) << ", " << ArrayToString(output->shape()) << ");\n";
ofs << " MS_ERROR_IF_NULL(outputs_[" << i << "]);\n";
}
ofs << " for (const auto &output: outputs_) {\n"
" output_tensor_map_[output->tensor_name()] = output;\n"
" }\n";
ofs << " return RET_OK;\n";
ofs << "}\n\n";
}
void CodeCopyOutputsState(std::ofstream &ofs) { ofs << "int CopyOutputsData(void **outputs, int num);\n\n"; }
void CodeCopyOutputsImplement(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx) {
auto tensor_map = ctx->tensors_map();
std::vector<Tensor *> outputs = ctx->graph_outputs();
size_t outputs_size = outputs.size();
ofs << "int CopyOutputsData(void **outputs, int num) {\n"
" if (outputs == NULL) {\n"
" return RET_ERROR;\n"
" }\n"
<< " if (num != " << outputs_size << ") {\n"
<< " return RET_ERROR;\n"
" }\n";
for (size_t i = 0; i < outputs_size; ++i) {
Tensor *output = outputs[i];
MS_CHECK_PTR_IF_NULL(output);
ofs << " memcpy(outputs[" << i << "], " << tensor_map[output] << ", " << output->Size() << ");\n";
}
ofs << " outputs[0] = net_B;\n"
" return RET_OK;\n"
"}\n\n";
}
void CodeInputAndOutputState(std::ofstream &ofs, const std::string &module_name) {
ofs << "/**\n"
<< " * set input tensors\n"
@ -61,7 +116,7 @@ void PrintMicroTensors(std::ofstream &ofs, std::vector<Tensor *> tensors, const
<< " " << name << "[" << i << "].ndim = " << tensor->shape().size() << ";\n"
<< " " << name << "[" << i << "].dim = dim" << i << ";\n"
<< " " << name << "[" << i << "].type = " << EnumMicroTensorDataType(tensor->data_type()) << ";\n"
<< " " << name << "[" << i << "].format = " << std::to_string(tensor->format()) << ";\n"
<< " " << name << "[" << i << "].format = " << EnumMicroTensorFormat(tensor->format()) << ";\n"
<< " " << name << "[" << i << "].data =" << item->second << ";\n";
}
}
@ -82,7 +137,7 @@ void CodeInputAndOutputImplement(std::ofstream &ofs, const std::string &module_n
<< " return RET_ERROR;\n"
" }\n";
for (size_t i = 0; i < size; ++i) {
ofs << "\t" << ctx->input_name() + std::to_string(i) << " = inputs[" << i << "];\n";
ofs << "\t" << ctx->input_name() << i << " = inputs[" << i << "];\n";
}
ofs << " return RET_OK;\n}\n";
@ -129,14 +184,6 @@ void CodeGraphQuantArgsImplement(std::ofstream &ofs, const std::string &module_n
<< "}\n";
}
void CodeInitWeightState(std::ofstream &ofs, const std::string &module_name) {
ofs << "/**\n"
<< " * @param weight_buffer, the address of the weight binary file\n"
<< " * @param weight_size, the size of the model file in bytes\n"
<< " **/\n"
<< "int " << module_name << "_Init(void *weight_buffer, int weight_size);\n\n";
}
void CodeManageResourceState(std::ofstream &ofs, const std::string &module_name) {
ofs << "/**\n"
<< " * get the memory space size of the inference.\n"
@ -161,12 +208,10 @@ void CodeInitResourceImplement(std::ofstream &ofs, const std::string &module_nam
<< "}\n";
ofs << "int " << module_name << "_SetBuffer( void *buffer) {\n";
ofs << " if (buffer == NULL) {\n"
" MICRO_ERROR(\"memory buffer is NULL\");\n"
" return RET_ERROR;\n"
" }\n";
ofs << " " << ctx->buffer_name()
<< " = buffer;\n"
" return RET_OK;\n"
ofs << " " << ctx->buffer_name() << " = buffer;\n"
<< " return RET_OK;\n"
"}\n";
}

View File

@ -28,6 +28,11 @@
namespace mindspore::lite::micro {
void CodeSourceFileInclude(std::ofstream &ofs, const std::string &weight_file, const std::string &header);
void CodeSessionCompileGraph(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx);
void CodeCopyOutputsState(std::ofstream &ofs);
void CodeCopyOutputsImplement(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx);
void CodeInputAndOutputState(std::ofstream &ofs, const std::string &module_name);
void CodeInputAndOutputImplement(std::ofstream &ofs, const std::string &module_name,
const std::unique_ptr<CoderContext> &ctx);
@ -36,8 +41,6 @@ void CodeGraphQuantArgsState(std::ofstream &ofs, const std::string &module_name)
void CodeGraphQuantArgsImplement(std::ofstream &ofs, const std::string &module_name,
const std::unique_ptr<CoderContext> &ctx);
void CodeInitWeightState(std::ofstream &ofs, const std::string &module_name);
void CodeManageResourceState(std::ofstream &ofs, const std::string &module_name);
void CodeInitResourceImplement(std::ofstream &ofs, const std::string &module_name,
const std::unique_ptr<CoderContext> &ctx);

View File

@ -0,0 +1,43 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "coder/generator/component/component.h"
namespace mindspore::lite::micro {
const char *kModelName = "net";
const char *kSession = "session";
const char *kByteType = "unsigned char *";
const char *kConstByteType = "const unsigned char *";
const char *kNameSpaceMindSpore = "namespace mindspore";
const char *kNameSpaceLite = "namespace lite";
const char *kExternCpp = R"RAW(
#ifdef __cplusplus
extern "C" {
#endif
)RAW";
const char *kEndExternCpp = R"RAW(
#ifdef __cplusplus
}
#endif
)RAW";
} // namespace mindspore::lite::micro

View File

@ -0,0 +1,37 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_GENERATOR_COMPONENT_H_
#define MINDSPORE_LITE_MICRO_CODER_GENERATOR_COMPONENT_H_
namespace mindspore::lite::micro {
extern const char *kModelName;
extern const char *kSession;
extern const char *kByteType;
extern const char *kConstByteType;
extern const char *kNameSpaceMindSpore;
extern const char *kNameSpaceLite;
extern const char *kExternCpp;
extern const char *kEndExternCpp;
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_GENERATOR_COMPONENT_H_

View File

@ -43,7 +43,7 @@ const char *benchmark_source = R"RAW(
#include "include/ms_tensor.h"
#include "include/errorcode.h"
#include "read_file.h"
#include "load_input.h"
using namespace mindspore;
@ -86,12 +86,13 @@ int main(int argc, const char **argv) {
for (size_t i = 0; i < inputs_num; ++i) {
inputs_size[i] = inputs[i]->Size();
}
int ret = ReadInputsFile(argv[1], inputs_binbuf, inputs_size, inputs_num);
int ret = ReadInputsFile(const_cast<char *>(argv[1]), inputs_binbuf, inputs_size, inputs_num);
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}
for (size_t i = 0; i < inputs_num; ++i) {
inputs[i]->set_data(inputs_binbuf[i]);
void *input_data = inputs[i]->MutableData();
memcpy(input_data, inputs_binbuf[i], inputs_size[i]);
}
ret = session->RunGraph();
@ -100,7 +101,11 @@ int main(int argc, const char **argv) {
}
auto outputs = session->GetOutputs();
std::cout << outputs.size() << std::endl;
std::cout << "output size: " << outputs.size() << std::endl;
for (const auto &item : outputs) {
auto output = item.second;
std::cout << "name: " << output->tensor_name() << ", size: " << output->Size() << std::endl;
}
std::cout << "run benchmark success" << std::endl;
delete session;

View File

@ -59,15 +59,19 @@ if(MICRO_BUILD_ARM32A)
add_definitions(-mfloat-abi=softfp -mfpu=neon)
endif()
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include)
set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
message("*******************${CMAKE_BUILD_TYPE}**********")
set(CMAKE_C_FLAGS "-DDebug -g -fPIC -fPIE -fvisibility=default ${CMAKE_C_FLAGS}")
message(STATUS "build benchmark with debug info")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
else()
set(CMAKE_C_FLAGS "-fPIC -fPIE -O3 -fstack-protector-strong -fomit-frame-pointer ${CMAKE_C_FLAGS}")
set(CMAKE_C_FLAGS_Release "${CMAKE_C_FLAGS_Release} -O3 -ffunction-sections -fdata-sections")
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}")
endif()
link_directories(${MODEL_LIB_PATH})
include(benchmark.cmake)
@ -96,6 +100,7 @@ message("operator header path: ${OP_HEADER_PATH}")
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include)
include_directories(${OP_HEADER_PATH})
include_directories(${HEADER_PATH})
include(net.cmake)
@ -118,12 +123,17 @@ if(MICRO_BUILD_ARM32A)
endif()
set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
set(CMAKE_C_FLAGS "-DDebug -g -fPIC -fPIE -fvisibility=default ${CMAKE_C_FLAGS}")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
else()
set(CMAKE_C_FLAGS "-fPIC -fPIE -O3 -Werror -fstack-protector-strong -fomit-frame-pointer ${CMAKE_C_FLAGS}")
set(CMAKE_C_FLAGS_Release "${CMAKE_C_FLAGS_Release} -O3 -ffunction-sections -Werror -fdata-sections")
string(REPLACE "-g" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}")
endif()
function(create_library)

View File

@ -37,12 +37,21 @@ const char *load_input_h = R"RAW(
#ifndef MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_
#define MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_
#ifdef __cplusplus
extern "C" {
#endif
void *ReadInputData(const char *real_input_path, int *size);
void SaveOutputData(char *final_name, unsigned char *output_data, unsigned int out_size);
int ReadInputsFile(char *path, void **buffers, const int *inputs_size, int inputs_num);
#ifdef __cplusplus
}
#endif
#endif // MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_
)RAW";

View File

@ -0,0 +1,225 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "coder/generator/component/const_blocks/msession.h"
namespace mindspore::lite::micro {
const char *session_header = R"RAW(
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_
#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_
#include "include/errorcode.h"
#include "include/lite_session.h"
#include "tensor.h"
namespace mindspore {
namespace lite {
#define MS_ERROR_IF_NULL(ptr) \
do { \
if ((ptr) == nullptr) { \
return mindspore::lite::RET_ERROR; \
} \
} while (0)
class LiteSession : public session::LiteSession {
public:
LiteSession() = default;
~LiteSession() override;
void BindThread(bool if_bind) override {}
int CompileGraph(lite::Model *model) override;
std::vector<tensor::MSTensor *> GetInputs() const override;
mindspore::tensor::MSTensor *GetInputsByTensorName(const std::string &tensor_name) const override { return nullptr; }
int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) override;
std::vector<tensor::MSTensor *> GetOutputsByNodeName(const std::string &node_name) const override;
std::unordered_map<std::string, mindspore::tensor::MSTensor *> GetOutputs() const override;
std::vector<std::string> GetOutputTensorNames() const override;
mindspore::tensor::MSTensor *GetOutputByTensorName(const std::string &tensor_name) const override;
int Resize(const std::vector<tensor::MSTensor *> &inputs, const std::vector<std::vector<int>> &dims) override;
int InitRuntimeBuffer();
private:
int SetInputsData(const std::vector<MTensor *> &inputs) const;
std::vector<MTensor *> inputs_;
std::vector<MTensor *> outputs_;
std::unordered_map<std::string, mindspore::tensor::MSTensor *> output_tensor_map_;
std::unordered_map<std::string, std::vector<mindspore::tensor::MSTensor *>> output_node_map_;
void *runtime_buffer_;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_
)RAW";
const char *session_source = R"RAW(
int LiteSession::RunGraph(const KernelCallBack &before, const KernelCallBack &after) {
const void *inputs_data[inputs_.size()];
for (size_t i = 0; i < inputs_.size(); ++i) {
inputs_data[i] = inputs_[i]->MutableData();
}
net_SetInputs(inputs_data, inputs_.size());
net_Inference();
void *outputs_data[outputs_.size()];
for (size_t i = 0; i < outputs_.size(); ++i) {
outputs_data[i] = outputs_[i]->MutableData();
}
CopyOutputsData(outputs_data, outputs_.size());
return RET_OK;
}
LiteSession::~LiteSession() {
net_FreeResource();
if (runtime_buffer_ != nullptr) {
free(runtime_buffer_);
runtime_buffer_ = nullptr;
}
for (auto &input : inputs_) {
if (input == nullptr) {
continue;
}
delete input;
input = nullptr;
}
for (auto &item : output_tensor_map_) {
auto output = item.second;
if (output == nullptr) {
continue;
}
delete output;
output = nullptr;
}
}
int LiteSession::InitRuntimeBuffer() {
int buffer_size = net_GetBufferSize();
runtime_buffer_ = malloc(buffer_size);
if (runtime_buffer_ == nullptr) {
return RET_ERROR;
}
int ret = net_SetBuffer(runtime_buffer_);
if (ret != RET_OK) {
return RET_ERROR;
}
return RET_OK;
}
std::vector<tensor::MSTensor *> LiteSession::GetInputs() const {
std::vector<tensor::MSTensor *> inputs;
inputs.insert(inputs.begin(), inputs_.begin(), inputs_.end());
return inputs;
}
std::vector<tensor::MSTensor *> LiteSession::GetOutputsByNodeName(const std::string &node_name) const {
auto iter = output_node_map_.find(node_name);
if (iter == output_node_map_.end()) {
std::vector<tensor::MSTensor *> empty;
return empty;
}
return iter->second;
}
std::unordered_map<std::string, mindspore::tensor::MSTensor *> LiteSession::GetOutputs() const {
return output_tensor_map_;
}
std::vector<std::string> LiteSession::GetOutputTensorNames() const {
std::vector<std::string> output_names;
for (const auto &item : output_node_map_) {
for (const auto &output : item.second) {
output_names.emplace_back(output->tensor_name());
}
}
return output_names;
}
mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const std::string &tensor_name) const {
auto item = output_tensor_map_.find(tensor_name);
if (item == output_tensor_map_.end()) {
return nullptr;
}
return item->second;
}
int LiteSession::Resize(const std::vector<tensor::MSTensor *> &inputs, const std::vector<std::vector<int>> &dims) {
return RET_OK;
}
} // namespace lite
session::LiteSession *session::LiteSession::CreateSession(const lite::Context *context) {
auto *session = new (std::nothrow) lite::LiteSession();
if (session == nullptr) {
return nullptr;
}
session->InitRuntimeBuffer();
return session;
}
session::LiteSession *session::LiteSession::CreateSession(const char *net_buf, size_t size,
const lite::Context *context) {
session::LiteSession *session = CreateSession(context);
if (session == nullptr) {
return nullptr;
}
int ret = session->CompileGraph(nullptr);
if (ret != lite::RET_OK) {
return nullptr;
}
net_Init(const_cast<char *>(net_buf), size);
return session;
}
} // namespace mindspore
)RAW";
} // namespace mindspore::lite::micro

View File

@ -0,0 +1,28 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_GENERATOR_CONST_BLOCK_SESSION_H_
#define MINDSPORE_LITE_MICRO_GENERATOR_CONST_BLOCK_SESSION_H_
namespace mindspore::lite::micro {
extern const char *session_header;
extern const char *session_source;
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_GENERATOR_CONST_BLOCK_SESSION_H_

View File

@ -19,6 +19,7 @@
namespace mindspore::lite::micro {
const char *tensor_header = R"RAW(
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
@ -60,16 +61,16 @@ struct QuantArg {
class MTensor : public mindspore::tensor::MSTensor {
public:
MTensor() = default;
MTensor(std::string name, enum TypeId type, std::vector<int32_t> shape, void *data)
: tensor_name_(std::move(name)), data_type_(type), shape_(std::move(shape)), data_(data) {}
~MTensor() override = default;
MTensor(std::string name, enum TypeId type, std::vector<int32_t> shape)
: tensor_name_(std::move(name)), data_type_(type), shape_(std::move(shape)) {}
~MTensor() override;
TypeId data_type() const override { return data_type_; }
std::vector<int> shape() const override { return shape_; }
int DimensionSize(size_t index) const override;
int ElementsNum() const override;
size_t Size() const override;
void *MutableData() override { return data_; };
void *MutableData() override;
std::string tensor_name() const override { return tensor_name_; }
void set_tensor_name(const std::string name) override { tensor_name_ = name; }
void set_data(void *data) override { data_ = data; }
@ -87,9 +88,11 @@ class MTensor : public mindspore::tensor::MSTensor {
#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_
)RAW";
const char *tensor_source = R"RAW(
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
@ -144,6 +147,13 @@ size_t DataTypeSize(const TypeId type) {
}
}
MTensor::~MTensor() {
if (data_ != nullptr) {
free(data_);
data_ = nullptr;
}
}
int MTensor::DimensionSize(const size_t index) const {
int dim_size = -1;
if (index < shape_.size()) {
@ -164,6 +174,13 @@ size_t MTensor::Size() const {
size_t element_size = DataTypeSize(data_type_);
return element_size * ElementsNum();
}
void *MTensor::MutableData() {
if (data_ == nullptr) {
data_ = malloc(this->Size());
}
return data_;
}
} // namespace lite
} // namespace mindspore

View File

@ -84,6 +84,14 @@ void CodeModelParamsForNet(std::ofstream &hofs, std::ofstream &cofs, const std::
cofs << "\n";
}
void CodeInitWeightState(std::ofstream &ofs, const std::string &module_name) {
ofs << "/**\n"
<< " * @param weight_buffer, the address of the weight binary file\n"
<< " * @param weight_size, the size of the model file in bytes\n"
<< " **/\n"
<< "int " << module_name << "_Init(void *weight_buffer, int weight_size);\n\n";
}
void CodeWeightInitFunc(std::ofstream &ofs, const std::string &module_name, const std::unique_ptr<CoderContext> &ctx) {
ofs << "int " << module_name << "_Init(void *weight_buffer, int weight_size) {\n"
<< " if (weight_buffer == NULL) {\n"

View File

@ -34,6 +34,8 @@ void CodeModelParamsData(std::ofstream &ofs, const std::map<std::string, Tensor
void SaveDataToNet(const std::map<std::string, Tensor *> &saved_weights, const std::string &net_file);
void CodeModelParamsForNet(std::ofstream &hofs, std::ofstream &cofs, const std::unique_ptr<CoderContext> &ctx);
void CodeInitWeightState(std::ofstream &ofs, const std::string &module_name);
void CodeWeightInitFunc(std::ofstream &ofs, const std::string &module_name, const std::unique_ptr<CoderContext> &ctx);
} // namespace mindspore::lite::micro

View File

@ -15,16 +15,19 @@
*/
#include "coder/generator/generator.h"
#include <sys/stat.h>
#include <map>
#include <set>
#include <fstream>
#include "coder/generator/component/cmake_component.h"
#include "coder/generator/component/weight_component.h"
#include "coder/generator/component/common_component.h"
#include "coder/generator/component/const_blocks/micro_tensor.h"
#include "coder/generator/component/const_blocks/cmake_lists.h"
#include "coder/generator/component/const_blocks/debug_utils.h"
#include "coder/generator/component/const_blocks/load_input.h"
#include "coder/generator/component/const_blocks/thread_pool.h"
#include "coder/generator/component/const_blocks/msession.h"
#include "coder/generator/component/const_blocks/mtensor.h"
#include "coder/generator/component/const_blocks/benchmark.h"
#include "coder/generator/component/const_blocks/license.h"
#include "coder/log.h"
@ -48,8 +51,11 @@ Generator::Generator(std::unique_ptr<CoderContext> ctx) {
this->net_inc_hfile_ = module_name + ".h";
this->net_src_cfile_ = module_name + ".c";
this->net_weight_hfile_ = module_name + "_weight.h";
this->net_main_cfile_ = module_name + "_benchmark.c";
if (config_->interface() == Interface_CPP) {
this->net_main_cfile_ = "benchmark.cc";
} else {
this->net_main_cfile_ = "benchmark.c";
}
this->net_src_file_path_ = config_->code_path() + "/src/";
this->net_inc_file_path_ = config_->code_path() + "/include/";
this->net_main_file_path_ = config_->code_path() + "/benchmark/";
@ -80,9 +86,14 @@ int Generator::CodeBenchmarkCMakeFile() {
MS_CHECK_TRUE(!ofs.bad(), "filed to open file");
MS_LOG(INFO) << "write " << test_cmake_file;
ofs << "include_directories(${CMAKE_CURRENT_SOURCE_DIR})\n";
ofs << "include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/)\n";
if (config_->interface() == Interface_CPP) {
ofs << "include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src/)\n";
ofs << "include_directories(${HEADER_PATH})\n";
} else {
ofs << "include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/)\n";
}
ofs << "set(SRC_FILES\n";
ofs << "\t\t" << config_->module_name() + "_benchmark.c\n";
ofs << "\t\t" << net_main_cfile_ << "\n";
ofs << "\t\tload_input.c\n";
ofs << "\t\tdebug_utils.c\n";
ofs << ")\n";
@ -95,24 +106,32 @@ int Generator::CodeSourceCMakeFile() {
std::ofstream ofs(src_cmake_file);
MS_CHECK_TRUE(!ofs.bad(), "filed to open file");
MS_LOG(INFO) << "write " << src_cmake_file;
CodeCMakeNetLibrary(ofs, config_->module_name(), ctx_, config_->target());
CodeCMakeNetLibrary(ofs, ctx_, config_);
ofs.close();
return RET_OK;
}
int Generator::CodeStaticContent() {
std::vector<std::pair<std::string, std::string>> static_blocks = {
{net_inc_file_path_ + "microtensor.h", micro_tensor_h},
std::vector<std::pair<std::string, std::string>> const_blocks = {
{net_src_file_path_ + "CMakeLists.txt", src_cmake_lists_txt},
{net_main_file_path_ + "debug_utils.h", debug_utils_h},
{net_main_file_path_ + "debug_utils.c", debug_utils_c},
{net_main_file_path_ + "load_input.h", load_input_h},
{net_main_file_path_ + "load_input.c", load_input_c},
{net_main_file_path_ + "CMakeLists.txt", bench_cmake_lists_txt}};
if (config_->support_parallel()) {
static_blocks.emplace_back(net_inc_file_path_ + "thread_pool.h", thread_pool_h);
if (config_->interface() == Interface_CPP) {
const_blocks.emplace_back(net_src_file_path_ + "microtensor.h", micro_tensor_h);
const_blocks.emplace_back(net_src_file_path_ + "session.h", session_header);
const_blocks.emplace_back(net_src_file_path_ + "tensor.h", tensor_header);
const_blocks.emplace_back(net_src_file_path_ + "tensor.cc", tensor_source);
const_blocks.emplace_back(net_main_file_path_ + "benchmark.cc", benchmark_source);
} else {
const_blocks.emplace_back(net_inc_file_path_ + "microtensor.h", micro_tensor_h);
}
for (const auto &static_block : static_blocks) {
if (config_->support_parallel()) {
const_blocks.emplace_back(net_inc_file_path_ + "thread_pool.h", thread_pool_h);
}
for (const auto &static_block : const_blocks) {
std::string file_name = static_block.first;
std::string content = static_block.second;
MS_CHECK_RET_CODE(WriteContentToFile(file_name, content), "write file failed");
@ -120,6 +139,19 @@ int Generator::CodeStaticContent() {
return RET_OK;
}
int Generator::CodeSessionImplement() {
std::string cfile = net_src_file_path_ + "session.cc";
std::ofstream ofs(cfile);
MS_CHECK_TRUE(!ofs.bad(), "filed to open file");
MS_LOG(INFO) << "write " << cfile;
ofs << g_hwLicense;
ofs << "#include \"session.h\"\n";
ofs << "#include \"net.h\"\n\n";
CodeSessionCompileGraph(ofs, ctx_);
ofs << session_source;
return RET_OK;
}
int Generator::CodeWeightFile() {
// weight header file
std::string hfile = net_src_file_path_ + net_weight_hfile_;
@ -156,9 +188,13 @@ int Generator::GenerateCode() {
MS_CHECK_RET_CODE(CodeNetCFile(), "code net c file failed.");
MS_CHECK_RET_CODE(CodeWeightFile(), "code weight file failed.");
MS_CHECK_RET_CODE(CodeSourceCMakeFile(), "code net cmake file failed.");
MS_CHECK_RET_CODE(CodeBenchmarkFile(), "code benchmark file failed.");
MS_CHECK_RET_CODE(CodeBenchmarkCMakeFile(), "code benchmark cmake file failed.");
MS_CHECK_RET_CODE(CodeStaticContent(), "code static content failed.");
if (config_->interface() == Interface_CPP) {
MS_CHECK_RET_CODE(CodeSessionImplement(), "code session file failed.");
} else {
MS_CHECK_RET_CODE(CodeBenchmarkFile(), "code benchmark file failed.");
}
return RET_OK;
}
} // namespace mindspore::lite::micro

View File

@ -34,6 +34,7 @@
#include "coder/utils/type_cast.h"
namespace mindspore::lite::micro {
class Generator {
public:
explicit Generator(std::unique_ptr<CoderContext> ctx);
@ -66,6 +67,7 @@ class Generator {
int CodeBenchmarkCMakeFile();
int CodeSourceCMakeFile();
int CodeStaticContent();
int CodeSessionImplement();
std::string cmake_file_name_{"net.cmake"};
// the user's generated file's permission

View File

@ -20,11 +20,18 @@
#include "coder/generator/component/common_component.h"
#include "coder/generator/component/parallel_component.h"
#include "coder/generator/component/benchmark_component.h"
#include "coder/generator/component/weight_component.h"
#include "coder/generator/component/const_blocks/license.h"
#include "coder/generator/component/component.h"
namespace mindspore::lite::micro {
int InferenceGenerator::CodeNetHFile() {
std::string net_include_file = net_inc_file_path_ + net_inc_hfile_;
std::string net_include_file;
if (config_->interface() == Interface_CPP) {
net_include_file = net_src_file_path_ + net_inc_hfile_;
} else {
net_include_file = net_inc_file_path_ + net_inc_hfile_;
}
std::ofstream ofs(net_include_file);
MS_CHECK_TRUE(!ofs.bad(), "filed to open file");
MS_LOG(INFO) << "write " << net_include_file;
@ -33,7 +40,11 @@ int InferenceGenerator::CodeNetHFile() {
ofs << "#include \"thread_pool.h\"\n";
}
ofs << "#include \"microtensor.h\"\n\n";
ofs << kExternCpp;
CodeInputAndOutputState(ofs, config_->module_name());
if (config_->interface() == Interface_CPP) {
CodeCopyOutputsState(ofs);
}
if (is_get_quant_args_) {
CodeGraphQuantArgsState(ofs, config_->module_name());
}
@ -45,6 +56,7 @@ int InferenceGenerator::CodeNetHFile() {
}
CodeManageResourceState(ofs, config_->module_name());
CodeInferenceState(ofs, config_->module_name());
ofs << kEndExternCpp;
return RET_OK;
}
@ -58,6 +70,9 @@ int InferenceGenerator::CodeNetCFile() {
CodeSetGlobalThreadPoolImplement(ofs, config_->module_name());
}
CodeInputAndOutputImplement(ofs, config_->module_name(), ctx_);
if (config_->interface() == Interface_CPP) {
CodeCopyOutputsImplement(ofs, ctx_);
}
CodeInitResourceImplement(ofs, config_->module_name(), ctx_);
CodeFreeResourceImplement(ofs, config_->module_name(), ctx_);
if (is_get_quant_args_) {

View File

@ -19,6 +19,7 @@
#include <string>
#include "coder/generator/component/common_component.h"
#include "coder/generator/component/benchmark_component.h"
#include "coder/generator/component/weight_component.h"
#include "coder/generator/component/train_component.h"
#include "coder/generator/component/const_blocks/license.h"

View File

@ -87,6 +87,9 @@ int CoderGraph::ConvertTensors() {
"memcpy_s copy data failed!", delete dstTensor);
dstTensor->set_data(dst_data);
}
if (origin_tensor->name() != nullptr) {
dstTensor->set_tensor_name(origin_tensor->name()->str());
}
auto quant_params = origin_tensor->quantParams();
if (quant_params != nullptr) {
for (int j = 0; j < static_cast<int>(quant_params->size()); j++) {

View File

@ -75,13 +75,6 @@ void PrintTensorData(const lite::Tensor *tensor, std::ofstream &ofs) {
}
}
template <typename T>
std::string ArrayToString(const std::vector<T> &array) {
std::string result = "{";
std::for_each(array.begin(), array.end(), [&result](const T &t) { result += std::to_string(t) + ", "; });
return result + "}";
}
std::string TensorsToString(const std::vector<Tensor *> &tensors, const std::string &is_input) {
MemoryAllocator *allocator = MemoryAllocator::GetInstance();
std::string info;

View File

@ -35,6 +35,13 @@ std::vector<std::string> AddDumpDataInfo(const std::vector<std::string> &blocks,
void PrintTensorData(const lite::Tensor *tensor, std::ofstream &ofs);
template <typename T>
std::string ArrayToString(std::vector<T> array) {
std::string result;
std::for_each(array.begin(), array.end(), [&result](const T &t) { result += std::to_string(t) + ", "; });
return "{" + result + "}";
}
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_UTILS_CODER_UTILS_H_