!13552 uniform interface, remove c API

From: @yangjie159
Reviewed-by: @wangchengyuan,@hangangqiang
Signed-off-by: @wangchengyuan
This commit is contained in:
mindspore-ci-bot 2021-03-19 11:15:02 +08:00 committed by Gitee
commit 9a4ccaf913
35 changed files with 204 additions and 795 deletions

View File

@ -19,8 +19,6 @@ set(CODER_GENERATOR_SRC
${MICRO_DIR}/coder/generator/generator.cc
${MICRO_DIR}/coder/generator/inference/inference_generator.cc
${MICRO_DIR}/coder/generator/train/train_generator.cc
${MICRO_DIR}/coder/generator/component/component.cc
${MICRO_DIR}/coder/generator/component/benchmark_component.cc
${MICRO_DIR}/coder/generator/component/common_component.cc
${MICRO_DIR}/coder/generator/component/weight_component.cc
${MICRO_DIR}/coder/generator/component/cmake_component.cc
@ -33,8 +31,6 @@ set(CODER_GENERATOR_SRC
${MICRO_DIR}/coder/generator/component/const_blocks/license.cc
${MICRO_DIR}/coder/generator/component/const_blocks/load_input.cc
${MICRO_DIR}/coder/generator/component/const_blocks/benchmark.cc
${MICRO_DIR}/coder/generator/component/const_blocks/micro_tensor.cc
${MICRO_DIR}/coder/generator/component/const_blocks/thread_pool.cc
)
set(MINDSPORE_CORE

View File

@ -27,7 +27,7 @@
#include "securec/include/securec.h"
#include "src/common/file_utils.h"
#include "src/common/utils.h"
#include "coder/coder_config.h"
#include "coder/config.h"
#include "coder/generator/component/component.h"
namespace mindspore::lite::micro {
@ -40,7 +40,6 @@ class CoderFlags : public virtual FlagParser {
AddFlag(&CoderFlags::code_module_name_, "moduleName", "Input code module name", "");
AddFlag(&CoderFlags::target_, "target", "generated code target, x86| ARM32M| ARM32A| ARM64", "x86");
AddFlag(&CoderFlags::code_mode_, "codeMode", "generated code mode, Inference | Train", "Inference");
AddFlag(&CoderFlags::interface_, "interface", "the interface of generated code, CPP | C", "CPP");
AddFlag(&CoderFlags::support_parallel_, "supportParallel", "whether support parallel launch, true | false", false);
AddFlag(&CoderFlags::debug_mode_, "debugMode", "dump the tensors data for debugging, true | false", false);
}
@ -52,7 +51,6 @@ class CoderFlags : public virtual FlagParser {
std::string code_module_name_;
std::string code_path_;
std::string code_mode_;
std::string interface_;
bool debug_mode_{false};
std::string target_;
};
@ -90,8 +88,6 @@ int Coder::Init(const CoderFlags &flags) const {
static const std::map<std::string, Target> kTargetMap = {
{"x86", kX86}, {"ARM32M", kARM32M}, {"ARM32A", kARM32A}, {"ARM64", kARM64}, {"All", kAllTargets}};
static const std::map<std::string, CodeMode> kCodeModeMap = {{"Inference", Inference}, {"Train", Train}};
static const std::map<std::string, Interface> kInterfaceMap = {{"CPP", Interface_CPP}, {"C", Interface_C}};
Configurator *config = Configurator::GetInstance();
std::vector<std::function<bool()>> parsers;
@ -109,13 +105,6 @@ int Coder::Init(const CoderFlags &flags) const {
return true;
});
parsers.emplace_back([&flags, config]() -> bool {
auto item = kInterfaceMap.find(flags.interface_);
MS_CHECK_TRUE_RET_BOOL(item != kInterfaceMap.end(), "unsupported interface: " + flags.code_mode_);
config->set_interface(item->second);
return true;
});
parsers.emplace_back([&flags, config]() -> bool {
if (flags.support_parallel_ && config->target() == kARM32M) {
MS_LOG(ERROR) << "arm32M cannot support parallel.";

View File

@ -22,35 +22,6 @@
namespace mindspore::lite::micro {
enum Target { kX86 = 0, kARM32M = 1, kARM32A = 2, kARM64 = 3, kAllTargets = 4, kTargetUnknown = 99 };
enum CodeMode { Inference = 0, Train = 1, Code_Unknown = 99 };
enum Interface { Interface_CPP = 0, Interface_C = 1, Interface_Unknown = 99 };
inline const char *EnumNameTarget(Target target) {
switch (target) {
case kX86:
return "kX86";
case kARM32M:
return "kARM32M";
case kARM32A:
return "kARM32A";
case kARM64:
return "kARM64";
case kAllTargets:
return "kAllTargets";
default:
return "kTargetUnknown";
}
}
inline const char *EnumNameCodeMode(CodeMode codeMode) {
switch (codeMode) {
case Inference:
return "Inference";
case Train:
return "Train";
default:
return "Code_Unknown";
}
}
class Configurator {
public:
@ -71,9 +42,6 @@ class Configurator {
void set_code_mode(CodeMode code_mode) { code_mode_ = code_mode; }
CodeMode code_mode() const { return code_mode_; }
void set_interface(Interface interface) { interface_ = interface; }
Interface interface() const { return interface_; }
void set_debug_mode(bool debug) { debug_mode_ = debug; }
bool debug_mode() const { return debug_mode_; }
@ -88,7 +56,6 @@ class Configurator {
std::string code_path_;
Target target_{kTargetUnknown};
CodeMode code_mode_{Code_Unknown};
Interface interface_{Interface_CPP};
bool support_parallel_{false};
bool debug_mode_{false};
};

View File

@ -15,7 +15,7 @@
*/
#include "coder/context.h"
#include "coder/coder_config.h"
#include "coder/config.h"
#include "coder/allocator/allocator.h"
namespace mindspore::lite::micro {

View File

@ -1,177 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "coder/generator/component/benchmark_component.h"
#include <memory>
#include "coder/generator/component/const_blocks/license.h"
#include "coder/log.h"
#include "include/errorcode.h"
#include "nnacl/op_base.h"
namespace mindspore::lite::micro {
constexpr int kWarmUp = 3;
void CodeBenchmarkHeader(std::ofstream &ofs, const std::string &header) {
ofs << g_hwLicense;
ofs << "#include <stdio.h>\n"
"#include <string.h>\n"
"#include <stdlib.h>\n"
"#include <stdint.h>\n"
"#include \"microtensor.h\"\n"
"#include \"load_input.h\"\n"
"#include \"debug_utils.h\"\n";
ofs << "#include \"" << header << "\"\n";
}
void CodeBenchmarkUsage(std::ofstream &ofs) {
ofs << "void usage() {\n"
" printf(\n"
" \"-- mindspore micro params usage:\\n\"\n"
" \"args[0]: executable file\\n\"\n"
" \"args[1]: inputs binary file\\n\"\n"
" \"args[2]: model weight binary file\\n\"\n"
" \"args[3]: loop count for performance test\\n\"\n"
" \"args[4]: runtime thread num\\n\"\n"
" \"args[5]: runtime thread bind mode\\n\\n\");\n"
"}\n\n";
}
void CodeBenchmarkWarmup(std::ofstream &ofs, const std::string &module_name) {
ofs << "// the default number of warm-ups is 3\n"
<< "void " << module_name << "_WarmUp() {\n"
<< " for (int i = 0; i < " << kWarmUp << "; ++i) {\n"
<< " " << module_name << "_Inference();\n"
<< " }\n"
<< "}\n";
}
void CodeBenchmarkSetInputs(std::ofstream &ofs, const std::string &module_name,
const std::unique_ptr<CoderContext> &ctx) {
ofs << "int main(int argc, char **argv) {\n"
" if (argc < 2) {\n"
" MICRO_ERROR(\"input command is invalid\\n\");\n"
" usage();\n"
" return RET_ERROR;\n"
" }\n";
std::vector<Tensor *> inputs = ctx->graph_inputs();
size_t inputs_num = inputs.size();
ofs << " // input shape: ";
std::for_each(inputs.begin(), inputs.end(), [&](Tensor *t) {
ofs << "[ ";
for (int i : t->shape()) {
ofs << i << ", ";
}
ofs << "], ";
});
ofs << "\n";
ofs << " void *inputs_binbuf[" << inputs_num << "];\n";
ofs << " int inputs_size[" << inputs_num << "] = {";
for (size_t i = 0; i < inputs_num; ++i) {
Tensor *input = inputs[i];
ofs << input->Size() << ", ";
}
ofs << "};\n";
ofs << " int ret = ReadInputsFile(argv[1], inputs_binbuf, inputs_size, " << inputs_num
<< ");\n"
" if (ret != RET_OK) {\n"
" MICRO_ERROR(\"read inputs file failed\");\n"
" return RET_ERROR;\n"
" }\n";
ofs << " ret = " << module_name << "_SetInputs((const void **)inputs_binbuf, " << inputs_num
<< ");\n"
" if (ret != RET_OK) {\n"
" MICRO_ERROR(\"set inputs failed\");\n"
" return RET_ERROR;\n"
" }\n";
}
void CodeBenchmarkSetBuffer(std::ofstream &ofs, const std::string &module_name) {
ofs << " int total_buffer_size = " << module_name << "_GetBufferSize();\n";
ofs << " void *buffer = malloc(total_buffer_size);\n";
ofs << " if (buffer == NULL ){\n"
" MICRO_ERROR(\"malloc memory buffer failed\");\n"
" return RET_ERROR;\n"
" }\n";
ofs << " ret = " << module_name
<< "_SetBuffer(buffer);\n"
" if (ret != RET_OK) {\n"
" MICRO_ERROR(\"set inputs failed\");\n"
" return RET_ERROR;\n"
" }\n";
}
void CodeBenchmarkInitWeight(std::ofstream &ofs, const std::string &module_name) {
ofs << " int weight_size = 0;\n"
" void *weight_buffer = ReadInputData(argv[2], &weight_size); \n"
" if("
<< module_name
<< "_Init(weight_buffer, weight_size) != RET_OK) {\n"
" MICRO_ERROR(\"model init failed\");\n"
" "
<< module_name
<< "_FreeResource();\n"
" return RET_ERROR;\n"
" }\n"
" free(weight_buffer);\n"
" weight_buffer = NULL;\n";
}
void CodeBenchmarkInference(std::ofstream &ofs, const std::string &module_name) {
ofs << " if (argc >= 4) {\n"
<< " " << module_name << "_WarmUp();\n"
<< " uint64_t timeAvg = 0;\n"
<< " int loop_count = atoi(argv[3]);\n"
<< " printf(\"======Inference Start======\\n\");\n"
<< " printf(\"cycles: %d\\n\", loop_count);\n"
<< " for (int i = 0; i < loop_count; i++) {\n"
<< " uint64_t runBegin = GetTimeUs();\n"
<< " " << module_name << "_Inference();\n"
<< " uint64_t runEnd = GetTimeUs();\n"
<< " uint64_t time = runEnd - runBegin;\n"
<< " timeAvg += time;\n"
<< " }\n"
<< " float cunCost = (float)timeAvg / 1000.0f;\n"
<< " printf(\"=======Inference End=======\\n\");\n"
" printf(\"total time:\\t %.5fms, per time: \\t %.5fms\\n\", cunCost, cunCost/loop_count);\n"
<< " }\n";
ofs << " " << module_name << "_Inference();\n";
}
void CodeBenchmarkPrintOutputs(std::ofstream &ofs, const std::string &module_name) {
ofs << " // print model outputs \n";
ofs << " const MicroTensorList *outs = " << module_name << "_GetOutputs();\n";
ofs << " for (int i = 0; i < outs->num; ++i) {\n"
" MicroTensor *tensor = outs->tensor + i;\n"
" PrintTensorData(tensor);\n"
" }\n";
ofs << " printf(\"" << module_name << " inference success.\\n\");\n";
}
/**
* 1. free malloc memory buffer
* 2. set input and buffer to NULL, and free packed weight memory
* 3. free input binary memory
*/
void CodeBenchmarkFreeResourse(std::ofstream &ofs, const std::string &module_name, size_t inputs_num) {
ofs << " free(buffer);\n";
ofs << " " << module_name << "_FreeResource();\n";
ofs << " for (int i = 0; i < " << inputs_num << "; ++i) {\n";
ofs << " free(inputs_binbuf[i]);\n"
" }\n"
" return RET_OK;\n"
"}\n\n";
}
} // namespace mindspore::lite::micro

View File

@ -1,50 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_GENERATOR_BENCHMARK_COMPONENT_H_
#define MINDSPORE_LITE_MICRO_CODER_GENERATOR_BENCHMARK_COMPONENT_H_
#include <map>
#include <string>
#include <vector>
#include <memory>
#include <fstream>
#include "src/tensor.h"
#include "coder/context.h"
namespace mindspore::lite::micro {
void CodeBenchmarkHeader(std::ofstream &ofs, const std::string &header);
void CodeBenchmarkUsage(std::ofstream &ofs);
void CodeBenchmarkWarmup(std::ofstream &ofs, const std::string &module_name);
void CodeBenchmarkSetInputs(std::ofstream &ofs, const std::string &module_name,
const std::unique_ptr<CoderContext> &ctx);
void CodeBenchmarkSetBuffer(std::ofstream &ofs, const std::string &module_name);
void CodeBenchmarkInitWeight(std::ofstream &ofs, const std::string &module_name);
void CodeBenchmarkInference(std::ofstream &ofs, const std::string &module_name);
void CodeBenchmarkPrintOutputs(std::ofstream &ofs, const std::string &module_name);
void CodeBenchmarkFreeResourse(std::ofstream &ofs, const std::string &module_name, size_t inputs_num);
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_GENERATOR_BENCHMARK_COMPONENT_H_

View File

@ -32,10 +32,11 @@ void CodeCMakeNetLibrary(std::ofstream &ofs, const std::unique_ptr<CoderContext>
ofs << " " << c_file << ".o\n";
}
ofs << " net_weight.c.o\n"
<< " net.c.o\n";
if (config->interface() == Interface_CPP) {
ofs << " session.cc.o\n"
<< " tensor.cc.o\n";
<< " net.c.o\n"
<< " session.cc.o\n"
<< " tensor.cc.o\n";
if (config->debug_mode()) {
ofs << " debug_utils.c.o\n";
}
ofs << ")\n";
std::set<std::string> kernel_cmake_asm_set_files = ctx->asm_files();

View File

@ -23,7 +23,7 @@
#include <memory>
#include <fstream>
#include "src/tensor.h"
#include "coder/coder_config.h"
#include "coder/config.h"
#include "coder/context.h"
namespace mindspore::lite::micro {

View File

@ -25,12 +25,6 @@
#include "nnacl/op_base.h"
namespace mindspore::lite::micro {
void CodeSourceFileInclude(std::ofstream &ofs, const std::string &weight_file, const std::string &header) {
ofs << g_hwLicense << "#include \"microtensor.h\"\n"
<< "#include \"" << weight_file << "\"\n"
<< "#include \"" << header << "\"\n\n";
}
void CodeSessionCompileGraph(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx) {
std::vector<Tensor *> inputs = ctx->graph_inputs();
std::vector<Tensor *> outputs = ctx->graph_outputs();
@ -84,7 +78,7 @@ void CodeCopyOutputsImplement(std::ofstream &ofs, const std::unique_ptr<CoderCon
"}\n\n";
}
void CodeInputAndOutputState(std::ofstream &ofs, const std::string &module_name) {
void CodeInputState(std::ofstream &ofs, const std::string &module_name) {
ofs << "/**\n"
<< " * set input tensors\n"
<< " * @param inputs, the input data ptr's array of the model, the tensors' count of input may be greater than "
@ -92,37 +86,9 @@ void CodeInputAndOutputState(std::ofstream &ofs, const std::string &module_name)
<< " * @param num, the input data's number of the model.\n"
<< " **/\n"
<< "int " << module_name << "_SetInputs(const void **inputs, int num);\n\n";
ofs << "/**\n"
<< " * get output tensor of the model \n"
<< " **/\n"
<< "const MicroTensorList *" << module_name << "_GetOutputs();\n\n";
}
void PrintMicroTensors(std::ofstream &ofs, std::vector<Tensor *> tensors, const std::string &name,
const std::map<Tensor *, std::string> &tensors_map) {
for (size_t i = 0; i < tensors.size(); ++i) {
Tensor *tensor = tensors[i];
auto item = tensors_map.find(tensor);
if (item == tensors_map.end()) {
MS_LOG(ERROR) << "nonexistent tensor";
break;
}
ofs << " static int dim" << i << "[] = {";
for (size_t j = 0; j < tensor->shape().size(); ++j) {
ofs << tensor->shape()[j] << ", ";
}
ofs << "};\n"
<< " " << name << "[" << i << "].ndim = " << tensor->shape().size() << ";\n"
<< " " << name << "[" << i << "].dim = dim" << i << ";\n"
<< " " << name << "[" << i << "].type = " << EnumMicroTensorDataType(tensor->data_type()) << ";\n"
<< " " << name << "[" << i << "].format = " << EnumMicroTensorFormat(tensor->format()) << ";\n"
<< " " << name << "[" << i << "].data =" << item->second << ";\n";
}
}
void CodeInputAndOutputImplement(std::ofstream &ofs, const std::string &module_name,
const std::unique_ptr<CoderContext> &ctx) {
void CodeInputImplement(std::ofstream &ofs, const std::string &module_name, const std::unique_ptr<CoderContext> &ctx) {
// input tensors
std::vector<Tensor *> inputs = ctx->graph_inputs();
for (size_t i = 0; i < inputs.size(); ++i) {
@ -140,20 +106,6 @@ void CodeInputAndOutputImplement(std::ofstream &ofs, const std::string &module_n
ofs << "\t" << ctx->input_name() << i << " = inputs[" << i << "];\n";
}
ofs << " return RET_OK;\n}\n";
// output tensors
std::vector<Tensor *> outputs = ctx->graph_outputs();
size_t output_num = outputs.size();
std::string output_name = ctx->output_name();
ofs << "const MicroTensorList* " << module_name << "_GetOutputs() {\n"
<< " static MicroTensor " << output_name << "[" << output_num << "] ;\n";
PrintMicroTensors(ofs, outputs, output_name, ctx->tensors_map());
ofs << " static MicroTensorList " << module_name << "_TensorArray;\n"
<< " " << module_name << "_TensorArray.num = " << output_num << ";\n"
<< " " << module_name << "_TensorArray.tensor = &" << output_name << "[0];\n"
<< " return &" << module_name << "_TensorArray; \n}\n";
}
void CodeGraphQuantArgsState(std::ofstream &ofs, const std::string &module_name) {

View File

@ -26,16 +26,13 @@
#include "coder/context.h"
namespace mindspore::lite::micro {
void CodeSourceFileInclude(std::ofstream &ofs, const std::string &weight_file, const std::string &header);
void CodeSessionCompileGraph(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx);
void CodeCopyOutputsState(std::ofstream &ofs);
void CodeCopyOutputsImplement(std::ofstream &ofs, const std::unique_ptr<CoderContext> &ctx);
void CodeInputAndOutputState(std::ofstream &ofs, const std::string &module_name);
void CodeInputAndOutputImplement(std::ofstream &ofs, const std::string &module_name,
const std::unique_ptr<CoderContext> &ctx);
void CodeInputState(std::ofstream &ofs, const std::string &module_name);
void CodeInputImplement(std::ofstream &ofs, const std::string &module_name, const std::unique_ptr<CoderContext> &ctx);
void CodeGraphQuantArgsState(std::ofstream &ofs, const std::string &module_name);
void CodeGraphQuantArgsImplement(std::ofstream &ofs, const std::string &module_name,

View File

@ -1,43 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "coder/generator/component/component.h"
namespace mindspore::lite::micro {
const char *kModelName = "net";
const char *kSession = "session";
const char *kByteType = "unsigned char *";
const char *kConstByteType = "const unsigned char *";
const char *kNameSpaceMindSpore = "namespace mindspore";
const char *kNameSpaceLite = "namespace lite";
const char *kExternCpp = R"RAW(
#ifdef __cplusplus
extern "C" {
#endif
)RAW";
const char *kEndExternCpp = R"RAW(
#ifdef __cplusplus
}
#endif
)RAW";
} // namespace mindspore::lite::micro

View File

@ -19,18 +19,30 @@
namespace mindspore::lite::micro {
extern const char *kModelName;
constexpr auto kModelName = "net";
extern const char *kSession;
constexpr auto kSourcePath = "/src/";
extern const char *kByteType;
extern const char *kConstByteType;
constexpr auto kBenchmarkPath = "/benchmark/";
constexpr auto kBenchmarkFile = "benchmark.cc";
extern const char *kNameSpaceMindSpore;
extern const char *kNameSpaceLite;
constexpr auto kSession = "session";
constexpr auto kTensor = "tensor";
extern const char *kExternCpp;
extern const char *kEndExternCpp;
constexpr auto kNameSpaceMindSpore = "namespace mindspore";
constexpr auto kNameSpaceLite = "namespace lite";
constexpr auto kDebugUtils = "debug_utils.h";
constexpr auto kExternCpp =
"#ifdef __cplusplus\n"
"extern \"C\" {\n"
"#endif\n";
constexpr char kEndExternCpp[] =
"#ifdef __cplusplus\n"
"}\n"
"#endif\n";
} // namespace mindspore::lite::micro

View File

@ -19,6 +19,7 @@
namespace mindspore::lite::micro {
const char *benchmark_source = R"RAW(
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
@ -58,6 +59,55 @@ void usage() {
"args[5]: runtime thread bind mode\n\n");
}
template <typename T>
void PrintData(void *data, size_t data_number) {
if (data == nullptr) {
return;
}
auto casted_data = static_cast<T *>(data);
for (size_t i = 0; i < 10 && i < data_number; i++) {
std::cout << std::to_string(casted_data[i]) << ", ";
}
std::cout << std::endl;
}
void TensorToString(tensor::MSTensor *tensor) {
uint8_t i = 0;
std::cout << "uint8: " << i << std::endl;
std::cout << "Name: " << tensor->tensor_name();
std::cout << ", DataType: " << tensor->data_type();
std::cout << ", Size: " << tensor->Size();
std::cout << ", Shape:";
for (auto &dim : tensor->shape()) {
std::cout << " " << dim;
}
std::cout << ", Data:" << std::endl;
switch (tensor->data_type()) {
case kNumberTypeFloat32: {
PrintData<float>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeFloat16: {
PrintData<int16_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeInt32: {
PrintData<int32_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeInt16: {
PrintData<int16_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeInt8: {
PrintData<int8_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeUInt8: {
PrintData<uint8_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
default:
std::cout << "Unsupported data type to print" << std::endl;
break;
}
}
int main(int argc, const char **argv) {
if (argc < 2) {
std::cout << "input command is invalid\n" << std::endl;
@ -104,7 +154,7 @@ int main(int argc, const char **argv) {
std::cout << "output size: " << outputs.size() << std::endl;
for (const auto &item : outputs) {
auto output = item.second;
std::cout << "name: " << output->tensor_name() << ", size: " << output->Size() << std::endl;
TensorToString(output);
}
std::cout << "run benchmark success" << std::endl;

View File

@ -26,7 +26,12 @@ if(NOT DEFINED MODEL_LIB)
message(FATAL_ERROR "MODEL_LIB not set")
endif()
if(NOT DEFINED HEADER_PATH)
message(FATAL_ERROR "HEADER_PATH not set")
endif()
get_filename_component(MODEL_LIB ${MODEL_LIB} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})
get_filename_component(HEADER_PATH ${HEADER_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})
function(parse_lib_info lib_full_path lib_name lib_path)
string(FIND "${lib_full_path}" "/" POS REVERSE)
@ -68,9 +73,9 @@ if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
else()
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}")
endif()
link_directories(${MODEL_LIB_PATH})
@ -92,8 +97,13 @@ if(NOT DEFINED OP_HEADER_PATH)
message(FATAL_ERROR "OP_HEADER_PATH not set")
endif()
if(NOT DEFINED HEADER_PATH)
message(FATAL_ERROR "HEADER_PATH not set")
endif()
get_filename_component(OP_LIB ${OP_LIB} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})
get_filename_component(OP_HEADER_PATH ${OP_HEADER_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})
get_filename_component(HEADER_PATH ${HEADER_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})
message("operator lib path: ${OP_LIB}")
message("operator header path: ${OP_HEADER_PATH}")
@ -130,9 +140,9 @@ if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
else()
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}")
endif()

View File

@ -42,14 +42,54 @@ const char *debug_utils_h = R"RAW(
#include <sys/time.h>
#include <time.h>
#include <stdint.h>
#include "microtensor.h"
#define MICRO_INFO(content, args...) \
{ printf("[INFO] %s|%d: " #content "\r\n", __func__, __LINE__, ##args); }
#define MICRO_ERROR(content, args...) \
{ printf("[ERROR] %s|%d: " #content "\r\n", __func__, __LINE__, ##args); }
enum DataType {
DataType_DT_FLOAT = 0,
DataType_DT_FLOAT16 = 1,
DataType_DT_INT8 = 2,
DataType_DT_INT32 = 3,
DataType_DT_UINT8 = 4,
DataType_DT_INT16 = 5,
DataType_DT_UINT32 = 8,
DataType_DT_INT64 = 9,
DataType_DT_UINT16 = 10,
DataType_DT_UNDEFINED = 16,
DataType_MIN = DataType_DT_FLOAT,
DataType_MAX = DataType_DT_UNDEFINED
};
enum Format {
Format_NCHW = 0,
Format_NHWC = 1,
Format_HWKC = 2,
Format_HWCK = 3,
Format_KCHW = 4,
Format_CKHW = 5,
Format_KHWC = 6,
Format_CHWK = 7,
Format_NC4HW4 = 100,
Format_NUM_OF_FORMAT = 101,
Format_MIN = Format_NCHW,
Format_MAX = Format_NUM_OF_FORMAT
};
typedef struct {
enum DataType type;
enum Format format;
int ndim;
int *dim;
void *data;
} MicroTensor;
void PrintTensor(MicroTensor *tensor, FILE *output_file, const char *is_input);
void PrintTensorData(MicroTensor *tensor);
uint64_t GetTimeUs();
#endif // MINDSPORE_LITE_MICRO_MICRODEBUGUTIL_H_
)RAW";
@ -259,17 +299,6 @@ void PrintTensor(MicroTensor *tensor, FILE *output_file, const char *is_input) {
(void)fflush(output_file);
}
uint64_t GetTimeUs() {
const int USEC = 1000000;
const int MSEC = 1000;
struct timespec ts = {0, 0};
if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) {
return 0;
}
uint64_t retval = (uint64_t)((ts.tv_sec * USEC) + (ts.tv_nsec / MSEC));
return retval;
}
)RAW";
} // namespace mindspore::lite::micro

View File

@ -1,111 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "coder/generator/component/const_blocks/micro_tensor.h"
namespace mindspore::lite::micro {
const char *micro_tensor_h = R"RAW(
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MSMICRO_TENSOR_H
#define MSMICRO_TENSOR_H
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <stdbool.h>
#include <stdint.h>
#define MICRO_INFO(content, args...) \
{ printf("[INFO] %s|%d: " #content "\r\n", __func__, __LINE__, ##args); }
#define MICRO_ERROR(content, args...) \
{ printf("[ERROR] %s|%d: " #content "\r\n", __func__, __LINE__, ##args); }
enum STATUS {
RET_OK = 0,
RET_ERROR = 1,
};
enum DataType {
DataType_DT_FLOAT = 0,
DataType_DT_FLOAT16 = 1,
DataType_DT_INT8 = 2,
DataType_DT_INT32 = 3,
DataType_DT_UINT8 = 4,
DataType_DT_INT16 = 5,
DataType_DT_UINT32 = 8,
DataType_DT_INT64 = 9,
DataType_DT_UINT16 = 10,
DataType_DT_UNDEFINED = 16,
DataType_MIN = DataType_DT_FLOAT,
DataType_MAX = DataType_DT_UNDEFINED
};
enum Format {
Format_NCHW = 0,
Format_NHWC = 1,
Format_HWKC = 2,
Format_HWCK = 3,
Format_KCHW = 4,
Format_CKHW = 5,
Format_KHWC = 6,
Format_CHWK = 7,
Format_NC4HW4 = 100,
Format_NUM_OF_FORMAT = 101,
Format_MIN = Format_NCHW,
Format_MAX = Format_NUM_OF_FORMAT
};
typedef struct {
enum DataType type;
enum Format format;
int ndim;
int *dim;
void *data;
} MicroTensor;
typedef struct {
int num;
MicroTensor *tensor;
} MicroTensorList;
typedef struct {
float in_scale;
float out_scale;
int in_zero_point;
int out_zero_point;
} GraphQuantArgs;
#endif // MSMICRO_TENSOR_H
)RAW";
} // namespace mindspore::lite::micro

View File

@ -1,25 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_MICRO_TENSOR_H_
#define MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_MICRO_TENSOR_H_
namespace mindspore::lite::micro {
extern const char *micro_tensor_h;
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_MICRO_TENSOR_H_

View File

@ -1,99 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "coder/generator/component/const_blocks/thread_pool.h"
namespace mindspore::lite::micro {
const char *thread_pool_h = R"RAW(
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_SRC_RUNTIME_THREAD_POOL_H_
#define MINDSPORE_LITE_SRC_RUNTIME_THREAD_POOL_H_
#include <stdbool.h>
#define MAX_TASK_NUM (2)
// brief BindMode defined for holding bind cpu strategy argument.
typedef enum {
NO_BIND_MODE = 0, /**< no bind */
HIGHER_MODE = 1, /**< bind higher cpu first */
MID_MODE = 2 /**< bind middle cpu first */
} BindMode;
struct ThreadPool;
struct ThreadPool *CreateThreadPool(int thread_num, int mode);
/**
*
* @param session_index, support multi session
* @param job
* @param content
* @param task_num
*/
int ParallelLaunch(struct ThreadPool *thread_pool, int (*job)(void *, int), void *content, int task_num);
/**
* bind each thread to specified cpu core
* @param is_bind
* @param mode
*/
int BindThreads(struct ThreadPool *thread_pool, bool is_bind, int mode);
/**
* activate the thread pool
* @param thread_pool_id
*/
void ActivateThreadPool(struct ThreadPool *thread_pool);
/**
* deactivate the thread pool
* @param thread_pool_id
*/
void DeactivateThreadPool(struct ThreadPool *thread_pool);
/**
*
* @return current thread num
*/
int GetCurrentThreadNum(struct ThreadPool *thread_pool);
/**
* destroy thread pool, and release resource
*/
void DestroyThreadPool(struct ThreadPool *thread_pool);
#endif // MINDSPORE_LITE_SRC_RUNTIME_THREAD_POOL_H_
)RAW";
} // namespace mindspore::lite::micro

View File

@ -1,26 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_THREAD_POOL_H_
#define MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_THREAD_POOL_H_
namespace mindspore::lite::micro {
extern const char *thread_pool_h;
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_THREAD_POOL_H_

View File

@ -30,8 +30,11 @@ void CodeWeightFileHeader(std::ofstream &ofs, const std::unique_ptr<CoderContext
}
ofs << "#include <stdlib.h>\n"
<< "#include <string.h>\n"
<< "#include \"microtensor.h\"\n\n"
<< "extern unsigned char *" << ctx->buffer_name() << ";\n";
ofs << "enum STATUS {\n"
" RET_OK = 0,\n"
" RET_ERROR = 1,\n"
"};\n\n";
}
void CodeModelParamsState(std::ofstream &ofs, const std::map<std::string, Tensor *> &weights) {

View File

@ -23,7 +23,7 @@
#include <memory>
#include <fstream>
#include "src/tensor.h"
#include "coder/coder_config.h"
#include "coder/config.h"
#include "coder/context.h"
namespace mindspore::lite::micro {

View File

@ -17,14 +17,13 @@
#include <sys/stat.h>
#include <set>
#include <fstream>
#include "coder/generator/component/component.h"
#include "coder/generator/component/cmake_component.h"
#include "coder/generator/component/weight_component.h"
#include "coder/generator/component/common_component.h"
#include "coder/generator/component/const_blocks/micro_tensor.h"
#include "coder/generator/component/const_blocks/cmake_lists.h"
#include "coder/generator/component/const_blocks/debug_utils.h"
#include "coder/generator/component/const_blocks/load_input.h"
#include "coder/generator/component/const_blocks/thread_pool.h"
#include "coder/generator/component/const_blocks/msession.h"
#include "coder/generator/component/const_blocks/mtensor.h"
#include "coder/generator/component/const_blocks/benchmark.h"
@ -51,14 +50,8 @@ Generator::Generator(std::unique_ptr<CoderContext> ctx) {
this->net_inc_hfile_ = module_name + ".h";
this->net_src_cfile_ = module_name + ".c";
this->net_weight_hfile_ = module_name + "_weight.h";
if (config_->interface() == Interface_CPP) {
this->net_main_cfile_ = "benchmark.cc";
} else {
this->net_main_cfile_ = "benchmark.c";
}
this->net_src_file_path_ = config_->code_path() + "/src/";
this->net_inc_file_path_ = config_->code_path() + "/include/";
this->net_main_file_path_ = config_->code_path() + "/benchmark/";
this->net_src_file_path_ = config_->code_path() + kSourcePath;
this->net_main_file_path_ = config_->code_path() + kBenchmarkPath;
origin_umask_ = umask(user_umask_);
MS_LOG(DEBUG) << "origin umask: " << origin_umask_ << ", user umask: " << user_umask_;
}
@ -86,16 +79,11 @@ int Generator::CodeBenchmarkCMakeFile() {
MS_CHECK_TRUE(!ofs.bad(), "filed to open file");
MS_LOG(INFO) << "write " << test_cmake_file;
ofs << "include_directories(${CMAKE_CURRENT_SOURCE_DIR})\n";
if (config_->interface() == Interface_CPP) {
ofs << "include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src/)\n";
ofs << "include_directories(${HEADER_PATH})\n";
} else {
ofs << "include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/)\n";
}
ofs << "include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src/)\n";
ofs << "include_directories(${HEADER_PATH})\n";
ofs << "set(SRC_FILES\n";
ofs << "\t\t" << net_main_cfile_ << "\n";
ofs << "\t\t" << kBenchmarkFile << "\n";
ofs << "\t\tload_input.c\n";
ofs << "\t\tdebug_utils.c\n";
ofs << ")\n";
ofs.close();
return RET_OK;
@ -113,23 +101,17 @@ int Generator::CodeSourceCMakeFile() {
int Generator::CodeStaticContent() {
std::vector<std::pair<std::string, std::string>> const_blocks = {
{net_src_file_path_ + "CMakeLists.txt", src_cmake_lists_txt},
{net_main_file_path_ + "debug_utils.h", debug_utils_h},
{net_main_file_path_ + "debug_utils.c", debug_utils_c},
{net_main_file_path_ + "load_input.h", load_input_h},
{net_main_file_path_ + "load_input.c", load_input_c},
{net_main_file_path_ + "CMakeLists.txt", bench_cmake_lists_txt}};
if (config_->interface() == Interface_CPP) {
const_blocks.emplace_back(net_src_file_path_ + "microtensor.h", micro_tensor_h);
const_blocks.emplace_back(net_src_file_path_ + "session.h", session_header);
const_blocks.emplace_back(net_src_file_path_ + "tensor.h", tensor_header);
const_blocks.emplace_back(net_src_file_path_ + "tensor.cc", tensor_source);
const_blocks.emplace_back(net_main_file_path_ + "benchmark.cc", benchmark_source);
} else {
const_blocks.emplace_back(net_inc_file_path_ + "microtensor.h", micro_tensor_h);
}
if (config_->support_parallel()) {
const_blocks.emplace_back(net_inc_file_path_ + "thread_pool.h", thread_pool_h);
{net_main_file_path_ + "CMakeLists.txt", bench_cmake_lists_txt},
{net_main_file_path_ + "benchmark.cc", benchmark_source},
{net_src_file_path_ + "CMakeLists.txt", src_cmake_lists_txt},
{net_src_file_path_ + "session.h", session_header},
{net_src_file_path_ + "tensor.h", tensor_header},
{net_src_file_path_ + "tensor.cc", tensor_source}};
if (config_->debug_mode()) {
const_blocks.emplace_back(std::make_pair(net_src_file_path_ + "debug_utils.h", debug_utils_h));
const_blocks.emplace_back(std::make_pair(net_src_file_path_ + "debug_utils.c", debug_utils_c));
}
for (const auto &static_block : const_blocks) {
std::string file_name = static_block.first;
@ -190,11 +172,7 @@ int Generator::GenerateCode() {
MS_CHECK_RET_CODE(CodeSourceCMakeFile(), "code net cmake file failed.");
MS_CHECK_RET_CODE(CodeBenchmarkCMakeFile(), "code benchmark cmake file failed.");
MS_CHECK_RET_CODE(CodeStaticContent(), "code static content failed.");
if (config_->interface() == Interface_CPP) {
MS_CHECK_RET_CODE(CodeSessionImplement(), "code session file failed.");
} else {
MS_CHECK_RET_CODE(CodeBenchmarkFile(), "code benchmark file failed.");
}
MS_CHECK_RET_CODE(CodeSessionImplement(), "code session file failed.");
return RET_OK;
}
} // namespace mindspore::lite::micro

View File

@ -29,7 +29,7 @@
#include "include/errorcode.h"
#include "src/tensor.h"
#include "coder/log.h"
#include "coder/coder_config.h"
#include "coder/config.h"
#include "coder/context.h"
#include "coder/utils/type_cast.h"
@ -43,7 +43,6 @@ class Generator {
int GenerateCode();
protected:
virtual int CodeBenchmarkFile() = 0;
virtual int CodeNetHFile() = 0;
virtual int CodeNetCFile() = 0;
virtual int CodeWeightFile();
@ -56,10 +55,8 @@ class Generator {
bool is_get_quant_args_{false};
std::string net_inc_hfile_;
std::string net_src_cfile_;
std::string net_main_cfile_;
std::string net_weight_hfile_;
std::string net_inc_file_path_;
std::string net_src_file_path_;
std::string net_main_file_path_;

View File

@ -19,7 +19,6 @@
#include <string>
#include "coder/generator/component/common_component.h"
#include "coder/generator/component/parallel_component.h"
#include "coder/generator/component/benchmark_component.h"
#include "coder/generator/component/weight_component.h"
#include "coder/generator/component/const_blocks/license.h"
#include "coder/generator/component/component.h"
@ -27,11 +26,7 @@
namespace mindspore::lite::micro {
int InferenceGenerator::CodeNetHFile() {
std::string net_include_file;
if (config_->interface() == Interface_CPP) {
net_include_file = net_src_file_path_ + net_inc_hfile_;
} else {
net_include_file = net_inc_file_path_ + net_inc_hfile_;
}
net_include_file = net_src_file_path_ + net_inc_hfile_;
std::ofstream ofs(net_include_file);
MS_CHECK_TRUE(!ofs.bad(), "filed to open file");
MS_LOG(INFO) << "write " << net_include_file;
@ -39,12 +34,9 @@ int InferenceGenerator::CodeNetHFile() {
if (config_->support_parallel()) {
ofs << "#include \"thread_pool.h\"\n";
}
ofs << "#include \"microtensor.h\"\n\n";
ofs << kExternCpp;
CodeInputAndOutputState(ofs, config_->module_name());
if (config_->interface() == Interface_CPP) {
CodeCopyOutputsState(ofs);
}
CodeInputState(ofs, config_->module_name());
CodeCopyOutputsState(ofs);
if (is_get_quant_args_) {
CodeGraphQuantArgsState(ofs, config_->module_name());
}
@ -65,14 +57,17 @@ int InferenceGenerator::CodeNetCFile() {
std::ofstream ofs(net_impl_file);
MS_CHECK_TRUE(!ofs.bad(), "filed to open file");
MS_LOG(INFO) << "write " << net_impl_file;
CodeSourceFileInclude(ofs, net_weight_hfile_, net_inc_hfile_);
ofs << g_hwLicense << "\n"
<< "#include \"" << net_weight_hfile_ << "\"\n"
<< "#include \"" << net_inc_hfile_ << "\"\n\n";
if (config_->debug_mode()) {
ofs << "#include \"" << kDebugUtils << "\"\n";
}
if (config_->support_parallel()) {
CodeSetGlobalThreadPoolImplement(ofs, config_->module_name());
}
CodeInputAndOutputImplement(ofs, config_->module_name(), ctx_);
if (config_->interface() == Interface_CPP) {
CodeCopyOutputsImplement(ofs, ctx_);
}
CodeInputImplement(ofs, config_->module_name(), ctx_);
CodeCopyOutputsImplement(ofs, ctx_);
CodeInitResourceImplement(ofs, config_->module_name(), ctx_);
CodeFreeResourceImplement(ofs, config_->module_name(), ctx_);
if (is_get_quant_args_) {
@ -82,34 +77,4 @@ int InferenceGenerator::CodeNetCFile() {
ofs.close();
return RET_OK;
}
int InferenceGenerator::CodeBenchmarkFile() {
std::string net_main_impl_file = net_main_file_path_ + net_main_cfile_;
std::ofstream ofs(net_main_impl_file);
MS_LOG(INFO) << "write " << net_main_impl_file;
MS_CHECK_TRUE(!ofs.bad(), "filed to open file");
std::vector<Tensor *> inputs = ctx_->graph_inputs();
size_t inputs_num = inputs.size();
CodeBenchmarkHeader(ofs, net_inc_hfile_);
CodeBenchmarkUsage(ofs);
CodeBenchmarkWarmup(ofs, config_->module_name());
CodeBenchmarkSetInputs(ofs, config_->module_name(), ctx_);
CodeBenchmarkSetBuffer(ofs, config_->module_name());
if (config_->target() != kARM32M) {
CodeBenchmarkInitWeight(ofs, config_->module_name());
}
if (config_->support_parallel()) {
CodeCreateThreadPool(ofs, config_->module_name());
}
CodeBenchmarkInference(ofs, config_->module_name());
CodeBenchmarkPrintOutputs(ofs, config_->module_name());
if (config_->support_parallel()) {
CodeDestroyThreadPool(ofs);
}
CodeBenchmarkFreeResourse(ofs, config_->module_name(), inputs_num);
ofs.close();
return RET_OK;
}
} // namespace mindspore::lite::micro

View File

@ -30,8 +30,6 @@ class InferenceGenerator : public Generator {
private:
int CodeNetHFile() override;
int CodeNetCFile() override;
int CodeBenchmarkFile() override;
};
} // namespace mindspore::lite::micro
#endif // MINDSPORE_LITE_MICRO_CODER_GENERATOR_INFERENCE_GENERATOR_H_

View File

@ -18,7 +18,6 @@
#include <vector>
#include <string>
#include "coder/generator/component/common_component.h"
#include "coder/generator/component/benchmark_component.h"
#include "coder/generator/component/weight_component.h"
#include "coder/generator/component/train_component.h"
#include "coder/generator/component/const_blocks/license.h"
@ -35,7 +34,7 @@ void TrainGenerator::CodeGradientFunc(std::ofstream &ofs) const {
}
int TrainGenerator::CodeNetHFile() {
std::string net_include_file = net_inc_file_path_ + net_inc_hfile_;
std::string net_include_file = net_src_file_path_ + net_inc_hfile_;
std::ofstream ofs(net_include_file);
MS_CHECK_TRUE(!ofs.bad(), "filed to open file");
MS_LOG(INFO) << "write " << net_include_file;
@ -45,7 +44,7 @@ int TrainGenerator::CodeNetHFile() {
}
ofs << "#include \"microtensor.h\"\n\n";
CodeTrainParams(ofs);
CodeInputAndOutputState(ofs, config_->module_name());
CodeInputState(ofs, config_->module_name());
if (config_->target() != kARM32M) {
CodeInitWeightState(ofs, config_->module_name());
}
@ -61,8 +60,7 @@ int TrainGenerator::CodeNetCFile() {
std::ofstream ofs(net_impl_file);
MS_CHECK_TRUE(!ofs.bad(), "filed to open file");
MS_LOG(INFO) << "write " << net_impl_file;
CodeSourceFileInclude(ofs, net_weight_hfile_, net_inc_hfile_);
CodeInputAndOutputImplement(ofs, config_->module_name(), ctx_);
CodeInputImplement(ofs, config_->module_name(), ctx_);
CodeInitResourceImplement(ofs, config_->module_name(), ctx_);
CodeFreeResourceImplement(ofs, config_->module_name(), ctx_);
CodeFeaturesImplement(ofs, config_->module_name(), ctx_);
@ -72,26 +70,4 @@ int TrainGenerator::CodeNetCFile() {
ofs.close();
return RET_OK;
}
int TrainGenerator::CodeBenchmarkFile() {
std::string net_main_impl_file = net_main_file_path_ + net_main_cfile_;
std::ofstream ofs(net_main_impl_file);
MS_LOG(INFO) << "write " << net_main_impl_file;
MS_CHECK_TRUE(!ofs.bad(), "filed to open file");
std::vector<Tensor *> inputs = ctx_->graph_inputs();
size_t inputs_num = inputs.size();
CodeBenchmarkHeader(ofs, net_inc_hfile_);
CodeBenchmarkUsage(ofs);
CodeBenchmarkWarmup(ofs, config_->module_name());
CodeBenchmarkSetInputs(ofs, config_->module_name(), ctx_);
CodeBenchmarkSetBuffer(ofs, config_->module_name());
if (config_->target() != kARM32M) {
CodeBenchmarkInitWeight(ofs, config_->module_name());
}
CodeBenchmarkInference(ofs, config_->module_name());
CodeBenchmarkPrintOutputs(ofs, config_->module_name());
CodeBenchmarkFreeResourse(ofs, config_->module_name(), inputs_num);
ofs.close();
return RET_OK;
}
} // namespace mindspore::lite::micro

View File

@ -30,9 +30,6 @@ class TrainGenerator : public Generator {
private:
int CodeNetHFile() override;
int CodeNetCFile() override;
int CodeBenchmarkFile() override;
void CodeGradientFunc(std::ofstream &ofs) const;
};
} // namespace mindspore::lite::micro

View File

@ -22,7 +22,7 @@
#include <unordered_map>
#include <vector>
#include <string>
#include "coder/coder_config.h"
#include "coder/config.h"
#include "include/context.h"
#include "include/model.h"
#include "schema/inner/model_generated.h"

View File

@ -86,7 +86,9 @@ int ConvolutionDepthwiseINT8Coder::DoCode(CoderContext *const context) {
{"nnacl/int8/conv_depthwise_int8.h", "nnacl/int8/pack_int8.h", "wrapper/int8/convolution_depthwise_int8_wrapper.h"},
{"conv_depthwise_int8.c", "fixed_point.c", "pack_int8.c", "conv_int8.c", "winograd_transform.c",
"convolution_depthwise_int8_wrapper.c"},
{"ConvDwInt8Row.S", "ConvDwInt8PostAlign4.S", "ConvDwInt8PostAlign4PerChannel.S"});
{"ConvDwInt8Row.S", "ConvDwInt8PostAlign4.S", "ConvDwInt8PostAlign4PerChannel.S", "ConvDw3x3Int8Stride2.S",
"ConvDw3x3Int8.S", "ConvDw3x3Int8Vertical.S", "ConvDw3x3Int8Horizontal.S", "ConvDw3x3Int8Corner.S",
"MatmulOptR4Int8.S", "ConvDwInt8Center.S", "DeconvDwInt8Center.S", "DeconvDwInt8Post.S", "MatmulDpInt8Opt.S"});
nnacl::NNaclInt8Serializer code;
code.precision(kPrecision);
// call the op function

View File

@ -16,6 +16,7 @@
#include "coder/opcoders/op_coder_register.h"
#include <utility>
#include <string>
#include "coder/utils/type_cast.h"
namespace mindspore::lite::micro {
bool CoderKey::operator<(const CoderKey rhs) const {

View File

@ -23,7 +23,7 @@
#include <string>
#include "src/lite_kernel.h"
#include "include/model.h"
#include "coder/coder_config.h"
#include "coder/config.h"
namespace mindspore::lite::micro {
class OperatorCoder;
using CoderCreatorFunc = std::function<std::unique_ptr<OperatorCoder>(

View File

@ -23,7 +23,7 @@
#include "schema/inner/model_generated.h"
#include "coder/graph.h"
#include "coder/context.h"
#include "coder/coder_config.h"
#include "coder/config.h"
#include "coder/allocator/allocator.h"
#include "coder/opcoders/op_coder.h"

View File

@ -32,7 +32,7 @@ constexpr _mode_t kMicroDirMode = 0777;
constexpr __mode_t kMicroDirMode = 0777;
#endif
static std::array<std::string, 3> kWorkDirs = {"src", "include", "benchmark"};
static std::array<std::string, 3> kWorkDirs = {"src", "benchmark"};
bool DirExists(const std::string &dir_path) {
struct stat file_info;

View File

@ -126,4 +126,21 @@ std::string EnumMicroTensorDataType(TypeId type) {
return "DataType_DT_UNDEFINED";
}
}
std::string EnumNameTarget(Target target) {
switch (target) {
case kX86:
return "kX86";
case kARM32M:
return "kARM32M";
case kARM32A:
return "kARM32A";
case kARM64:
return "kARM64";
case kAllTargets:
return "kAllTargets";
default:
return "kTargetUnknown";
}
}
} // namespace mindspore::lite::micro

View File

@ -26,6 +26,7 @@
#include "securec/include/securec.h"
#include "src/tensor.h"
#include "nnacl/int8/quantize.h"
#include "coder/config.h"
namespace mindspore::lite::micro {
std::string EnumNameDataType(TypeId type);
@ -36,6 +37,8 @@ std::string EnumMicroTensorFormat(schema::Format format);
std::string EnumMicroTensorDataType(TypeId type);
std::string EnumNameTarget(Target target);
/**
* @tparam T
* @param t, basic data type variable, or tensor