diff --git a/mindspore/lite/micro/coder/operator_library/CMakeLists.txt b/mindspore/lite/micro/coder/operator_library/CMakeLists.txt index 6ee3d59320c..ff0b529c16e 100644 --- a/mindspore/lite/micro/coder/operator_library/CMakeLists.txt +++ b/mindspore/lite/micro/coder/operator_library/CMakeLists.txt @@ -53,12 +53,8 @@ include(${MICRO_CMAKE_PATH}/package_wrapper.cmake) list(APPEND OP_FILES ${NNACL_OPS} ${WRAPPER_SRC} ${RUNTIME_SRC}) -if(PLATFORM_ARM64) - set(LIB_PATH "${OPERATOR_LIBRARY_PATH}/lib/arm64") -elseif(PLATFORM_ARM32) - set(LIB_PATH "${OPERATOR_LIBRARY_PATH}/lib/arm32a") -else() - set(LIB_PATH "${OPERATOR_LIBRARY_PATH}/lib/x86") +set(LIB_PATH "${OPERATOR_LIBRARY_PATH}/lib") +if(NOT PLATFORM_ARM64 AND NOT PLATFORM_ARM32) list(APPEND OP_FILES ${CMSIS_OPS}) endif() diff --git a/mindspore/lite/micro/example/mnist/benchmark/CMakeLists.txt b/mindspore/lite/micro/example/mnist/benchmark/CMakeLists.txt new file mode 100644 index 00000000000..4fb563683a8 --- /dev/null +++ b/mindspore/lite/micro/example/mnist/benchmark/CMakeLists.txt @@ -0,0 +1,60 @@ + +cmake_minimum_required(VERSION 3.14) +project(benchmark) + +if(NOT DEFINED MODEL_LIB) + message(FATAL_ERROR "MODEL_LIB not set") +endif() + +get_filename_component(MODEL_LIB ${MODEL_LIB} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR}) + +function(parse_lib_info lib_full_path lib_name lib_path) + string(FIND "${lib_full_path}" "/" POS REVERSE) + math(EXPR POS "${POS} + 1") + string(SUBSTRING ${lib_full_path} 0 ${POS} path) + set(${lib_path} ${path} PARENT_SCOPE) + string(SUBSTRING ${lib_full_path} "${POS}" "-1" name) + set(${lib_name} ${name} PARENT_SCOPE) +endfunction(parse_lib_info) + +parse_lib_info(${MODEL_LIB} MODEL_LIB_NAME MODEL_LIB_PATH) + +message("project name: ${MODEL_LIB_NAME}") + +option(MICRO_BUILD_ARM64 "build android arm64" OFF) +option(MICRO_BUILD_ARM32A "build android arm32" OFF) + +if(MICRO_BUILD_ARM64 OR MICRO_BUILD_ARM32A) + add_compile_definitions(ENABLE_NEON) + add_compile_definitions(ENABLE_ARM) +endif() + +if(MICRO_BUILD_ARM64) + add_compile_definitions(ENABLE_ARM64) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8.2-a+dotprod") +endif() + +if(MICRO_BUILD_ARM32A) + add_compile_definitions(ENABLE_ARM32) + add_definitions(-mfloat-abi=softfp -mfpu=neon) +endif() + +set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") +if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") + message(STATUS "build benchmark with debug info") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default") +else() + set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \ + -Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}") + set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \ + -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}") +endif() +link_directories(${MODEL_LIB_PATH}) +include(benchmark.cmake) +add_executable(benchmark ${SRC_FILES}) +target_link_libraries(benchmark ${MODEL_LIB_NAME} -lm -pthread) + diff --git a/mindspore/lite/micro/example/mnist/benchmark/benchmark.cc b/mindspore/lite/micro/example/mnist/benchmark/benchmark.cc new file mode 100644 index 00000000000..5c8472adaf6 --- /dev/null +++ b/mindspore/lite/micro/example/mnist/benchmark/benchmark.cc @@ -0,0 +1,97 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "include/lite_session.h" +#include "include/ms_tensor.h" +#include "include/errorcode.h" + +#include "load_input.h" + +using namespace mindspore; + +void usage() { + printf( + "-- mindspore benchmark params usage:\n" + "args[0]: executable file\n" + "args[1]: inputs binary file\n" + "args[2]: model weight binary file\n" + "args[3]: loop count for performance test\n" + "args[4]: runtime thread num\n" + "args[5]: runtime thread bind mode\n\n"); +} + +int main(int argc, const char **argv) { + if (argc < 2) { + std::cout << "input command is invalid\n" << std::endl; + usage(); + return lite::RET_ERROR; + } + std::cout << "start run benchmark" << std::endl; + + const char *model_buffer = nullptr; + int model_size = 0; + // read .net file by ReadBinaryFile; + if (argc >= 3) { + model_buffer = static_cast(ReadInputData(argv[2], &model_size)); + } + session::LiteSession *session = mindspore::session::LiteSession::CreateSession(model_buffer, model_size, nullptr); + if (session == nullptr) { + std::cerr << "create lite session failed" << std::endl; + return lite::RET_ERROR; + } + + // set model inputs tensor data + std::vector inputs = session->GetInputs(); + size_t inputs_num = inputs.size(); + void *inputs_binbuf[inputs_num]; + int inputs_size[inputs_num]; + for (size_t i = 0; i < inputs_num; ++i) { + inputs_size[i] = inputs[i]->Size(); + } + int ret = ReadInputsFile(const_cast(argv[1]), inputs_binbuf, inputs_size, inputs_num); + if (ret != lite::RET_OK) { + return lite::RET_ERROR; + } + for (size_t i = 0; i < inputs_num; ++i) { + void *input_data = inputs[i]->MutableData(); + memcpy(input_data, inputs_binbuf[i], inputs_size[i]); + } + + ret = session->RunGraph(); + if (ret != lite::RET_OK) { + return lite::RET_ERROR; + } + + auto outputs = session->GetOutputs(); + std::cout << "output size: " << outputs.size() << std::endl; + for (const auto &item : outputs) { + auto output = item.second; + std::cout << "name: " << output->tensor_name() << ", size: " << output->Size() << std::endl; + } + + std::cout << "run benchmark success" << std::endl; + delete session; + for (size_t i = 0; i < inputs_num; ++i) { + free(inputs_binbuf[i]); + } + return lite::RET_OK; +} + diff --git a/mindspore/lite/micro/example/mnist/benchmark/benchmark.cmake b/mindspore/lite/micro/example/mnist/benchmark/benchmark.cmake new file mode 100644 index 00000000000..63ea2d62f98 --- /dev/null +++ b/mindspore/lite/micro/example/mnist/benchmark/benchmark.cmake @@ -0,0 +1,8 @@ +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src/) +include_directories(${HEADER_PATH}) +set(SRC_FILES + benchmark.cc + load_input.c + debug_utils.c +) diff --git a/mindspore/lite/micro/example/mnist/benchmark/debug_utils.c b/mindspore/lite/micro/example/mnist/benchmark/debug_utils.c new file mode 100644 index 00000000000..289472a9ea9 --- /dev/null +++ b/mindspore/lite/micro/example/mnist/benchmark/debug_utils.c @@ -0,0 +1,216 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "debug_utils.h" + +#define UP_DIV(x, y) (((x) + (y) - (1)) / (y)) + +static const unsigned int kPrintNums = 20; +static const unsigned int kLineSplitNum = 44; +static const unsigned int kLineNum = 45; +unsigned int GetTensorElementSize(const MicroTensor *tensor) { + unsigned int ans = 1; + if (tensor->format == Format_NC4HW4) { + for (unsigned int i = 0; i < tensor->ndim; ++i) { + unsigned int dim = tensor->dim[i]; + if (i == 1) { + dim = UP_DIV(dim, 4) * 4; + } + ans *= dim; + } + } else { + for (unsigned int i = 0; i < tensor->ndim; ++i) { + ans *= tensor->dim[i]; + } + } + return ans; +} + +static const char *const TypeNames[] = {"DT_FLOAT", "DT_FLOAT16", "DT_INT8", "DT_INT32", "DT_UINT8", "DT_INT16", + "", "", "DT_UINT32", "DT_INT64", "DT_UINT16", "", + "", "", "", "", "DT_UNDEFINED", ""}; + +const char *EnumNameFormat(enum Format e) { + switch (e) { + case Format_NCHW: + return "NCHW"; + case Format_NHWC: + return "NHWC"; + case Format_HWKC: + return "HWKC"; + case Format_HWCK: + return "HWCK"; + case Format_KCHW: + return "KCHW"; + case Format_CKHW: + return "CKHW"; + case Format_KHWC: + return "KHWC"; + case Format_CHWK: + return "CHWK"; + case Format_NC4HW4: + return "NC4HW4"; + case Format_NUM_OF_FORMAT: + return "NUM_OF_FORMAT"; + default: + return ""; + } +} + +void PrintTensorData(MicroTensor *tensor) { + void *data = tensor->data; + unsigned int elenums = GetTensorElementSize(tensor); + if (data == NULL || elenums == 0) { + MICRO_ERROR("print tensor data failed"); + return; + } + switch (tensor->type) { + case DataType_DT_FLOAT: { + float *addr = (float *)(data); + for (int i = 0; i < elenums && i < kPrintNums; ++i) { + printf("%f, ", addr[i]); + } + break; + } + case DataType_DT_INT32: { + int32_t *addr = (int32_t *)(data); + for (int i = 0; i < elenums && i < kPrintNums; ++i) { + printf("%d, ", addr[i]); + } + break; + } + case DataType_DT_INT8: { + int8_t *addr = (int8_t *)(data); + for (int i = 0; i < elenums && i < kPrintNums; ++i) { + printf("%d, ", addr[i]); + } + break; + } + case DataType_DT_UINT32: { + uint32_t *addr = (uint32_t *)(data); + for (int i = 0; i < elenums && i < kPrintNums; ++i) { + printf("%u, ", addr[i]); + } + break; + } + case DataType_DT_UINT8: { + uint8_t *addr = (uint8_t *)(data); + for (int i = 0; i < elenums && i < kPrintNums; ++i) { + printf("%u, ", addr[i]); + } + break; + } + default: + MICRO_ERROR("unsupported data type %d", tensor->type); + } + printf("\n"); +} + +void PrintDataToFile(const void *data, const size_t elenums, const enum DataType type, FILE *file) { + if (data == NULL || elenums == 0) { + MICRO_ERROR("print tensor data to file failed"); + return; + } + switch (type) { + case DataType_DT_FLOAT: { + float *addr = (float *)(data); + for (int i = 0; i < elenums; ++i) { + fprintf(file, "%0.15f, ", addr[i]); + if (i % kLineNum == kLineSplitNum) { + fprintf(file, "\n"); + } + } + break; + } + case DataType_DT_INT32: { + int32_t *addr = (int32_t *)(data); + for (int i = 0; i < elenums; ++i) { + fprintf(file, "%d, ", addr[i]); + if (i % kLineNum == kLineSplitNum) { + fprintf(file, "\n"); + } + } + break; + } + case DataType_DT_INT8: { + int8_t *addr = (int8_t *)(data); + for (int i = 0; i < elenums; ++i) { + fprintf(file, "%d, ", addr[i]); + if (i % kLineNum == kLineSplitNum) { + fprintf(file, "\n"); + } + } + break; + } + case DataType_DT_UINT32: { + uint32_t *addr = (uint32_t *)(data); + for (int i = 0; i < elenums; ++i) { + fprintf(file, "%u, ", addr[i]); + if (i % kLineNum == kLineSplitNum) { + fprintf(file, "\n"); + } + } + break; + } + case DataType_DT_UINT8: { + uint8_t *addr = (uint8_t *)(data); + for (int i = 0; i < elenums; ++i) { + fprintf(file, "%u, ", addr[i]); + if (i % kLineNum == kLineSplitNum) { + fprintf(file, "\n"); + } + } + break; + } + default: + MICRO_ERROR("unsupported data type %d", type); + } + fprintf(file, "\n"); +} + +void PrintTensor(MicroTensor *tensor, FILE *output_file, const char *is_input) { + if (output_file == NULL) { + MICRO_ERROR("output file is NULL"); + return; + } + fprintf(output_file, "%s ", is_input); + for (int i = 0; i < tensor->ndim; ++i) { + fprintf(output_file, "%u, ", tensor->dim[i]); + } + fprintf(output_file, "\n"); + + const char *type = TypeNames[tensor->type]; + const char *format = EnumNameFormat(tensor->format); + unsigned int tensorSize = GetTensorElementSize(tensor); + fprintf(output_file, "%s type:%s, format:%s, elementSize: %u\n", is_input, type, format, tensorSize); + fprintf(output_file, "%s Data:\n", is_input); + PrintDataToFile(tensor->data, tensorSize, tensor->type, output_file); + (void)fflush(output_file); +} + +uint64_t GetTimeUs() { + const int USEC = 1000000; + const int MSEC = 1000; + struct timespec ts = {0, 0}; + if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) { + return 0; + } + uint64_t retval = (uint64_t)((ts.tv_sec * USEC) + (ts.tv_nsec / MSEC)); + return retval; +} + diff --git a/mindspore/lite/micro/example/mnist/benchmark/debug_utils.h b/mindspore/lite/micro/example/mnist/benchmark/debug_utils.h new file mode 100644 index 00000000000..b86810388d9 --- /dev/null +++ b/mindspore/lite/micro/example/mnist/benchmark/debug_utils.h @@ -0,0 +1,34 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_MICRODEBUGUTIL_H_ +#define MINDSPORE_LITE_MICRO_MICRODEBUGUTIL_H_ + +#include +#include +#include +#include +#include "microtensor.h" + +void PrintTensor(MicroTensor *tensor, FILE *output_file, const char *is_input); + +void PrintTensorData(MicroTensor *tensor); + +uint64_t GetTimeUs(); + +#endif // MINDSPORE_LITE_MICRO_MICRODEBUGUTIL_H_ + diff --git a/mindspore/lite/micro/example/mnist/benchmark/load_input.c b/mindspore/lite/micro/example/mnist/benchmark/load_input.c new file mode 100644 index 00000000000..f0baa78f7d4 --- /dev/null +++ b/mindspore/lite/micro/example/mnist/benchmark/load_input.c @@ -0,0 +1,95 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "load_input.h" +#include +#include +#include + +void *ReadInputData(const char *real_input_path, int *size) { + if (real_input_path == NULL) { + return NULL; + } + if (strstr(real_input_path, ".bin") || strstr(real_input_path, ".net")) { + FILE *file; + file = fopen(real_input_path, "rb+"); + if (!file) { + printf("Can't find %s\n", real_input_path); + return NULL; + } + int curr_file_posi = ftell(file); + fseek(file, 0, SEEK_END); + *size = ftell(file); + unsigned char *buf = malloc((*size)); + (void)memset(buf, 0, (*size)); + fseek(file, curr_file_posi, SEEK_SET); + int read_size = (int)(fread(buf, 1, *size, file)); + if (read_size != (*size)) { + printf("read file failed, total file size: %d, read_size: %d\n", (*size), read_size); + fclose(file); + free(buf); + return NULL; + } + fclose(file); + return (void *)buf; + } else { + printf("input data file should be .bin , .net"); + return NULL; + } +} + +void SaveOutputData(char *final_name, unsigned char *output_data, unsigned int out_size) { + FILE *output_file; + output_file = fopen(final_name, "w"); + if (output_file == NULL) { + printf("fopen output file: %s failed\n", final_name); + return; + } + unsigned char str[out_size]; + for (unsigned int i = 0; i < out_size; ++i) { + str[i] = output_data[i]; + fprintf(output_file, "%d\t", str[i]); + } + fclose(output_file); +} + +int ReadInputsFile(char *path, void **buffers, const int *inputs_size, int inputs_num) { + char *inputs_path[inputs_num]; + char *delim = ","; + char *token; + int i = 0; + while ((token = strtok_r(path, delim, &path))) { + if (i >= inputs_num) { + printf("inputs num is error, need: %d\n", inputs_num); + return -1; + } + inputs_path[i] = token; + printf("input %d: %s\n", i, inputs_path[i]); + i++; + } + + for (i = 0; i < inputs_num; ++i) { + int size = 0; + buffers[i] = ReadInputData(inputs_path[i], &size); + if (size != inputs_size[i] || buffers[i] == NULL) { + printf("size mismatch, %s, input: %d, needed: %d\n", inputs_path[i], size, inputs_size[i]); + return -1; + } + } + return 0; +} + diff --git a/mindspore/lite/micro/example/mnist/benchmark/load_input.h b/mindspore/lite/micro/example/mnist/benchmark/load_input.h new file mode 100644 index 00000000000..909a4ac16b1 --- /dev/null +++ b/mindspore/lite/micro/example/mnist/benchmark/load_input.h @@ -0,0 +1,36 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_ +#define MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +void *ReadInputData(const char *real_input_path, int *size); + +void SaveOutputData(char *final_name, unsigned char *output_data, unsigned int out_size); + +int ReadInputsFile(char *path, void **buffers, const int *inputs_size, int inputs_num); + +#ifdef __cplusplus +} +#endif + +#endif // MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_ + diff --git a/mindspore/lite/micro/example/mnist/mnist.sh b/mindspore/lite/micro/example/mnist/mnist.sh index 87bcc1a2d05..4c3dfaa1289 100644 --- a/mindspore/lite/micro/example/mnist/mnist.sh +++ b/mindspore/lite/micro/example/mnist/mnist.sh @@ -15,111 +15,58 @@ # ============================================================================ set -e -CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" -MINDSPORE_ROOT_DIR=${${CURRENT_DIR}%%/mindspore/lite/micro/example/mnist} +BASEPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +MINDSPORE_ROOT_DIR=${${BASEPATH}%%/mindspore/lite/micro/example/mnist} -OUTPUT_DIR=${1:-${MINDSPORE_ROOT_DIR}/output} -THREAD_NUM=${2:-32} -MODULE_NAME=mnist -OUTPUT_IR=Reshape-64.ir -CALIB_OUT=${CURRENT_DIR}/Reshape-64.out +echo "current dir is: ${BASEPATH}" -echo "current dir is: ${CURRENT_DIR}" -echo "packed output dir is :${OUTPUT_DIR}" +VERSION_HEADER=${MINDSPORE_ROOT_DIR}/mindspore/lite/include/version.h +INPUT_BIN=${BASEPATH}/mnist_input.bin -if [ ! -d "${OUTPUT_DIR}" ]; then - echo "folder ${OUTPUT_DIR} does not exist" - return 1 -fi - -# rm if already exist -WORKSPACE=${CURRENT_DIR}/build -rm -rf ${WORKSPACE} -mkdir ${WORKSPACE} || exit 1 -PROJECT_DIR=${WORKSPACE}/${MODULE_NAME} - -compare_output() { - local OUTPUT_FILE=$1 - local CALIB_FILE=$2 - if [[ ! -f "${OUTPUT_FILE}" || ! -f "${CALIB_FILE}" ]]; then - echo "file ${OUTPUT_FILE}, ${CALIB_FILE} does not exist, pwd $(pwd)" - exit 1 - fi - lines=$(cat ${CALIB_FILE} | wc -l) - for ((i = 1; i <= $lines; i++)); do - line1=$(awk 'NR=="'${i}'"{print $0}' ${CALIB_FILE}) - line2=$(awk 'NR=="'${i}'"{print $0}' ${OUTPUT_FILE}) - if [[ "${line1}" != "${line2}" ]]; then - echo -e "file ${OUTPUT_FILE}, ${CALIB_FILE}, compare failed! line: ${i}" - exit 1 - fi - done - echo -e "compare success, ${OUTPUT_FILE}, ${CALIB_FILE}" +get_version() { + VERSION_MAJOR=$(grep "const int ms_version_major =" ${VERSION_HEADER} | tr -dc "[0-9]") + VERSION_MINOR=$(grep "const int ms_version_minor =" ${VERSION_HEADER} | tr -dc "[0-9]") + VERSION_REVISION=$(grep "const int ms_version_revision =" ${VERSION_HEADER} | tr -dc "[0-9]") + VERSION_STR=${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_REVISION} } +get_version +MINDSPORE_FILE_NAME="mindspore-lite-${VERSION_STR}-inference-linux-x64" +MINDSPORE_FILE="${MINDSPORE_FILE_NAME}.tar.gz" +MINDSPORE_LITE_DOWNLOAD_URL="https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION_STR}/MindSpore/lite/release/linux/${MINDSPORE_FILE}" -# cp oplib and codegen -cp ${OUTPUT_DIR}/mindspore-lite-*-codegen-linux-x64.tar.gz ${WORKSPACE}/ || exit 1 -cd ${WORKSPACE} || exit 1 -tar -zxf mindspore-lite-*-codegen-linux-x64.tar.gz || exit 1 -cd mindspore-lite-*-codegen-linux-x64 || exit 1 -mv operator_library/ ${WORKSPACE}/ || exit 1 -mv codegen ${WORKSPACE}/ || exit 1 -cd - -rm -r mindspore-lite-*-codegen-linux-x64 || exit 1 -rm mindspore-lite-*-codegen-linux-x64.tar.gz || exit 1 +mkdir -p build -# convert model -cp ${OUTPUT_DIR}/mindspore-lite-*-converter-linux-x64.tar.gz ${WORKSPACE}/ || exit 1 -cd ${WORKSPACE} || exit 1 -tar -zxf mindspore-lite-*-converter-linux-x64.tar.gz || exit 1 -rm mindspore-lite-*-converter-linux-x64.tar.gz || exit 1 -cd mindspore-lite-*-converter-linux-x64 || exit 1 -export LD_LIBRARY_PATH=./lib/:./third_party/protobuf/lib:./third_party/flatbuffers/lib:./third_party/glog/lib -converter/converter_lite --fmk=TFLITE \ - --modelFile=${CURRENT_DIR}/mnist.tflite \ - --outputFile=${WORKSPACE}/mnist -cd - -rm -rf mindspore-lite-*-converter-linux-x64 || exit 1 - -# generate code -${WORKSPACE}/codegen --modelPath=${WORKSPACE}/mnist.ms \ - --moduleName=${MODULE_NAME} \ - --isWeightFile=true \ - --debugMode=true -rm codegen - -if [ ! -d "${PROJECT_DIR}" ]; then - echo "folder ${PROJECT_DIR} does not exist" - return 1 +if [ ! -e ${BASEPATH}/build/${MINDSPORE_FILE} ]; then + wget -c -O ${BASEPATH}/build/${MINDSPORE_FILE} --no-check-certificate ${MINDSPORE_LITE_DOWNLOAD_URL} fi -cd ${PROJECT_DIR} || exit 1 +tar xzvf ${BASEPATH}/build/${MINDSPORE_FILE} -C ${BASEPATH}/build/ || exit 1 +rm ${BASEPATH}/build/${MINDSPORE_FILE} || exit 1 +CODEGEN_PATH=${BASEPATH}/build/${MINDSPORE_FILE_NAME}/tools/codegen +HEADER_PATH=${BASEPATH}/build/${MINDSPORE_FILE_NAME}/inference # 1. build static lib.a echo -e "building static library" -mkdir -p src/build && cd src/build || exit 1 -OP_HEADER_PATH=${WORKSPACE}/operator_library/include -OP_LIB=${WORKSPACE}/operator_library/lib/x86/libops.a +mkdir -p ${BASEPATH}/build/src && cd ${BASEPATH}/build/src || exit 1 +OP_HEADER_PATH=${CODEGEN_PATH}/operator_library/include +OP_LIB=${CODEGEN_PATH}/operator_library/lib/libops.a echo "Head Path: ${OP_HEADER_PATH}" echo "Lib Path: ${OP_LIB}" -cmake -DCMAKE_BUILD_TYPE=Debug \ - -DOP_LIB=${OP_LIB} \ - -DOP_HEADER_PATH=${OP_HEADER_PATH} .. -make -j${THREAD_NUM} +echo "Header Path: ${HEADER_PATH}" + +cmake -DCMAKE_BUILD_TYPE=Debug \ + -DOP_LIB=${OP_LIB} \ + -DOP_HEADER_PATH=${OP_HEADER_PATH} \ + -DHEADER_PATH=${HEADER_PATH} \ + ${BASEPATH}/src +make # 2. build benchmark -cd ${PROJECT_DIR}/benchmark && mkdir -p build && cd build || exit 1 -cmake -DMODEL_LIB="${PROJECT_DIR}/src/build/libnet.a" .. -make -j${THREAD_NUM} +mkdir -p ${BASEPATH}/build/benchmark && cd ${BASEPATH}/build/benchmark || exit 1 +cmake -DMODEL_LIB="${BASEPATH}/build/src/libnet.a" \ + -DHEADER_PATH=${HEADER_PATH} \ + ${BASEPATH}/benchmark +make -echo "net file: ${PROJECT_DIR}/src/${MODULE_NAME}.net" +echo "net file: ${BASEPATH}/src/mnist.net" # 3. run benchmark -./benchmark ${CURRENT_DIR}/input_1_224_224_3_uint8.bin ${PROJECT_DIR}/src/${MODULE_NAME}.net -compare_output ${OUTPUT_IR} ${CALIB_OUT} - -RET=$? -if [[ "${RET}" -eq 0 ]]; then - echo -e "run benchmark success: ${MODULE_NAME}" -else - echo -e "run benchmark failed: ${MODULE_NAME}" - exit 1 -fi \ No newline at end of file +./benchmark ${INPUT_BIN} ${BASEPATH}/src/net.net diff --git a/mindspore/lite/micro/example/mnist/mnist_input.bin b/mindspore/lite/micro/example/mnist/mnist_input.bin new file mode 100644 index 00000000000..32154910c70 Binary files /dev/null and b/mindspore/lite/micro/example/mnist/mnist_input.bin differ diff --git a/mindspore/lite/micro/example/mnist/src/CMakeLists.txt b/mindspore/lite/micro/example/mnist/src/CMakeLists.txt new file mode 100644 index 00000000000..ec9239d65f4 --- /dev/null +++ b/mindspore/lite/micro/example/mnist/src/CMakeLists.txt @@ -0,0 +1,83 @@ + +cmake_minimum_required(VERSION 3.14) +project(net) + +if(NOT DEFINED OP_LIB) + message(FATAL_ERROR "OP_LIB not set") +endif() + +if(NOT DEFINED OP_HEADER_PATH) + message(FATAL_ERROR "OP_HEADER_PATH not set") +endif() + +get_filename_component(OP_LIB ${OP_LIB} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR}) +get_filename_component(OP_HEADER_PATH ${OP_HEADER_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR}) + +message("operator lib path: ${OP_LIB}") +message("operator header path: ${OP_HEADER_PATH}") + +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include) +include_directories(${OP_HEADER_PATH}) +include_directories(${HEADER_PATH}) + +include(net.cmake) + +option(MICRO_BUILD_ARM64 "build android arm64" OFF) +option(MICRO_BUILD_ARM32A "build android arm32" OFF) + +if(MICRO_BUILD_ARM64 OR MICRO_BUILD_ARM32A) + add_compile_definitions(ENABLE_NEON) + add_compile_definitions(ENABLE_ARM) +endif() + +if(MICRO_BUILD_ARM64) + add_compile_definitions(ENABLE_ARM64) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv8.2-a+dotprod") +endif() + +if(MICRO_BUILD_ARM32A) + add_compile_definitions(ENABLE_ARM32) + add_definitions(-mfloat-abi=softfp -mfpu=neon) +endif() + +set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") +if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default") +else() + set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \ + -Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}") + set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O2 -Wall -Werror -fstack-protector-strong -Wno-attributes \ + -Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}") +endif() + +function(create_library) + add_custom_command(TARGET net + POST_BUILD + COMMAND rm -rf tmp + COMMAND mkdir tmp + COMMAND cd tmp && ar -x ${OP_LIB} + COMMAND echo "raw static library ${library_name} size:" + COMMAND ls -lh ${library_name} + COMMAND mv ${library_name} ./tmp && cd tmp && ar -x ${library_name} + COMMENT "unzip raw static library ${library_name}" + ) + foreach(object_file ${OP_SRC}) + add_custom_command(TARGET net POST_BUILD COMMAND mv ./tmp/${object_file} .) + endforeach() + add_custom_command(TARGET net + POST_BUILD + COMMAND ar cr ${library_name} *.o + COMMAND ranlib ${library_name} + COMMAND echo "new static library ${library_name} size:" + COMMAND ls -lh ${library_name} + COMMAND rm -rf tmp && rm -rf *.o + COMMENT "generate specified static library ${library_name}" + ) +endfunction(create_library) +string(CONCAT library_name "lib" net ".a") +create_library() + diff --git a/mindspore/lite/micro/example/mnist/src/microtensor.h b/mindspore/lite/micro/example/mnist/src/microtensor.h new file mode 100644 index 00000000000..861d2a002bc --- /dev/null +++ b/mindspore/lite/micro/example/mnist/src/microtensor.h @@ -0,0 +1,88 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MSMICRO_TENSOR_H +#define MSMICRO_TENSOR_H + +#include +#include +#include +#include +#include + +#define MICRO_INFO(content, args...) \ + { printf("[INFO] %s|%d: " #content "\r\n", __func__, __LINE__, ##args); } +#define MICRO_ERROR(content, args...) \ + { printf("[ERROR] %s|%d: " #content "\r\n", __func__, __LINE__, ##args); } + +enum STATUS { + RET_OK = 0, + RET_ERROR = 1, +}; + +enum DataType { + DataType_DT_FLOAT = 0, + DataType_DT_FLOAT16 = 1, + DataType_DT_INT8 = 2, + DataType_DT_INT32 = 3, + DataType_DT_UINT8 = 4, + DataType_DT_INT16 = 5, + DataType_DT_UINT32 = 8, + DataType_DT_INT64 = 9, + DataType_DT_UINT16 = 10, + DataType_DT_UNDEFINED = 16, + DataType_MIN = DataType_DT_FLOAT, + DataType_MAX = DataType_DT_UNDEFINED +}; + +enum Format { + Format_NCHW = 0, + Format_NHWC = 1, + Format_HWKC = 2, + Format_HWCK = 3, + Format_KCHW = 4, + Format_CKHW = 5, + Format_KHWC = 6, + Format_CHWK = 7, + Format_NC4HW4 = 100, + Format_NUM_OF_FORMAT = 101, + Format_MIN = Format_NCHW, + Format_MAX = Format_NUM_OF_FORMAT +}; + +typedef struct { + enum DataType type; + enum Format format; + int ndim; + int *dim; + void *data; +} MicroTensor; + +typedef struct { + int num; + MicroTensor *tensor; +} MicroTensorList; + +typedef struct { + float in_scale; + float out_scale; + int in_zero_point; + int out_zero_point; +} GraphQuantArgs; + +#endif // MSMICRO_TENSOR_H + diff --git a/mindspore/lite/micro/example/mnist/src/net.c b/mindspore/lite/micro/example/mnist/src/net.c new file mode 100644 index 00000000000..c19bc1e5a7c --- /dev/null +++ b/mindspore/lite/micro/example/mnist/src/net.c @@ -0,0 +1,184 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "microtensor.h" +#include "net_weight.h" +#include "net.h" + +static const unsigned char *net_I0 = 0; +int net_SetInputs(const void **inputs, int num) { + if (inputs == NULL) { + return RET_ERROR; + } + if (num !=1) { + return RET_ERROR; + } + net_I0 = inputs[0]; + return RET_OK; +} +const MicroTensorList* net_GetOutputs() { + static MicroTensor net_O[1] ; + static int dim0[] = {1, 10, }; + net_O[0].ndim = 2; + net_O[0].dim = dim0; + net_O[0].type = DataType_DT_FLOAT; + net_O[0].format = Format_NHWC; + net_O[0].data =net_B+56; + static MicroTensorList net_TensorArray; + net_TensorArray.num = 1; + net_TensorArray.tensor = &net_O[0]; + return &net_TensorArray; +} +int CopyOutputsData(void **outputs, int num) { + if (outputs == NULL) { + return RET_ERROR; + } + if (num != 1) { + return RET_ERROR; + } + memcpy(outputs[0], net_B+56, 40); + outputs[0] = net_B; + return RET_OK; +} + +int net_GetBufferSize() { + return 40032; +} +int net_SetBuffer( void *buffer) { + if (buffer == NULL) { + return RET_ERROR; + } + net_B = buffer; + return RET_OK; +} +void net_FreeResource() { + net_B= NULL; + net_I0 = NULL; + void *allocated[] = {net_W14, net_W15, net_W16, net_W17, net_W18, net_W19, }; + for (int i = 0; i < 6; ++i) { + free(allocated[i]); + allocated[i] = NULL; + } +} +void net_Inference() { + const int g_thread_num = 1; + { +DoQuantizeFp32ToInt8((float *)(net_I0), (int8_t *)(net_B+0), 0.007874015718698501587, 0, 784, false); + } + { +memset((int16_t *)(net_B+10928), 0, 2048); +memset((int16_t *)(net_B+12976), 0, 256); +memset((int *)(net_B+13232), 0, 6144); +memset((uint8_t *)(net_B+19376), 0, 8112); +memset((int16_t *)(net_B+27488), 0, 12544); +static QuantArg conv_param__quant_arg_in[1] = {{0.007874015718698501587, 0}}; +static QuantArg conv_param__quant_arg_w[12] = {{0.003238174133002758026, -6}, {0.003890725085511803627, -8}, {0.003394871251657605171, -7}, {0.001685356837697327137, -127}, {0.004322394262999296188, 1}, {0.002274985425174236298, -56}, {0.003617759561166167259, 17}, {0.004447745624929666519, 23}, {0.004683905746787786484, 26}, {0.004021023400127887726, 24}, {0.005650237202644348145, 11}, {0.001966834301128983498, -84}}; +static QuantArg conv_param__quant_arg_out[1] = {{0.01778890006244182587, 0}}; +static double conv_param__real_multiplier[12] = {0.001433333970799530351, 0.001722176774828924938, 0.00150269379968211614, 0.0007460003866156953226, 0.001913249346122961134, 0.001006991503636309139, 0.001601352314486244018, 0.001968734305210294733, 0.002073267527210802957, 0.00177985160945266568, 0.002501001060249878095, 0.0008705926067589928779}; +static int conv_param__left_shift[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +static int conv_param__right_shift[12] = {-9, -9, -9, -10, -9, -9, -9, -8, -8, -9, -8, -10}; +static int conv_param__quant_multiplier[12] = {1575967367, 1893553389, 1652229306, 1640472199, 2103639903, 1107198867, 1760705490, 1082323130, 1139790877, 1956967540, 1374939873, 1914453388}; +static int conv_param__out_act_min[1] = {0}; +static int conv_param__out_act_max[1] = {127}; +const ConvQuantArg conv_param__conv_quant_arg = {(RoundingMode)(1), 2, conv_param__quant_arg_in, conv_param__quant_arg_w, conv_param__quant_arg_out, conv_param__real_multiplier, conv_param__left_shift, conv_param__right_shift, conv_param__quant_multiplier, conv_param__out_act_min, conv_param__out_act_max, 1, 12, 1, 2}; +int thread_num = MSMIN(g_thread_num, 26); +const ConvParameter conv_param_ = {{ "", 35, g_thread_num}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 28, 28, 1, 1, 26, 26, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(1), 0, 0, 0}; +PackInputToC8Int8((int8_t *)(net_B+0), (int16_t *)(net_B+27488), &conv_param_); +Conv3x3Int8((int16_t *)(net_B+27488), net_W10, net_W11, (int8_t *)(net_B+784), (int16_t *)(net_B+10928), (int16_t *)(net_B+12976), (int *)(net_B+13232), (uint8_t *)(net_B+19376), 0, &conv_param_); +PackNC4HW4ToNHWCInt8((uint8_t *)(net_B+19376), (int8_t *)(net_B+784), 1, 676, 12); + } + { +static QuantArg pooling_parameter_quant_in = {0.01778890006244182587, 0}; +static QuantArg pooling_parameter_quant_out = {0.01778890006244182587, 0}; +static QuantArg *pooling_parameter_quant[2] = { &pooling_parameter_quant_in, &pooling_parameter_quant_out}; +const PoolingParameter pooling_parameter = {{ "", 92, g_thread_num}, (PoolMode)(1), (RoundMode)(2), (PadMode)(2), (ActType)(0), 0, false, 2, 2, 2, 2, 26, 26, 1, 12, 13, 13, 1, 12, 0, 0, 0, 0, 0, pooling_parameter_quant, false}; +MaxPoolingInt8((int8_t *)(net_B+784), (int8_t *)(net_B+8896), (PoolingParameter *)&pooling_parameter, 0); + } + { +memset((int16_t *)(net_B+10928), 0, 4096); +memset((int16_t *)(net_B+15024), 0, 256); +memset((int *)(net_B+15280), 0, 6144); +memset((uint8_t *)(net_B+21424), 0, 1452); +memset((int16_t *)(net_B+22876), 0, 5408); +static QuantArg conv_param__quant_arg_in[1] = {{0.01778890006244182587, 0}}; +static QuantArg conv_param__quant_arg_w[12] = {{0.005374609492719173431, 33}, {0.005837683100253343582, 22}, {0.004709810949862003326, -15}, {0.003726204857230186462, 27}, {0.00318551529198884964, -8}, {0.003453079145401716232, 50}, {0.004045850131660699844, -9}, {0.003903790842741727829, 30}, {0.004003710579127073288, -10}, {0.00560879148542881012, 27}, {0.005486610345542430878, -23}, {0.003554018214344978333, 4}}; +static QuantArg conv_param__quant_arg_out[1] = {{0.07183934003114700317, 0}}; +static double conv_param__real_multiplier[12] = {0.001330863973520378732, 0.001445530533608141606, 0.001166246148374064893, 0.0009226850783705293785, 0.0007887991893445710223, 0.0008550534992628172192, 0.001001835847923064193, 0.0009666590447744700769, 0.0009914011740411567478, 0.001388852288199173826, 0.00135859773990280961, 0.0008800481219728497088}; +static int conv_param__left_shift[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +static int conv_param__right_shift[12] = {-9, -9, -9, -10, -10, -10, -9, -10, -9, -9, -9, -10}; +static int conv_param__quant_multiplier[12] = {1463300414, 1589377630, 1282301201, 2029005945, 1734587761, 1880282530, 1101530164, 2125705720, 1090057119, 1527059240, 1493794012, 1935246286}; +static int conv_param__out_act_min[1] = {0}; +static int conv_param__out_act_max[1] = {127}; +const ConvQuantArg conv_param__conv_quant_arg = {(RoundingMode)(1), 2, conv_param__quant_arg_in, conv_param__quant_arg_w, conv_param__quant_arg_out, conv_param__real_multiplier, conv_param__left_shift, conv_param__right_shift, conv_param__quant_multiplier, conv_param__out_act_min, conv_param__out_act_max, 1, 12, 1, 2}; +int thread_num = MSMIN(g_thread_num, 11); +const ConvParameter conv_param_ = {{ "", 35, g_thread_num}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 13, 13, 12, 1, 11, 11, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(1), 0, 0, 0}; +PackInputToC8Int8((int8_t *)(net_B+8896), (int16_t *)(net_B+22876), &conv_param_); +Conv3x3Int8((int16_t *)(net_B+22876), net_W12, net_W13, (int8_t *)(net_B+0), (int16_t *)(net_B+10928), (int16_t *)(net_B+15024), (int *)(net_B+15280), (uint8_t *)(net_B+21424), 0, &conv_param_); +PackNC4HW4ToNHWCInt8((uint8_t *)(net_B+21424), (int8_t *)(net_B+0), 1, 121, 12); + } + { +static QuantArg pooling_parameter_quant_in = {0.07136065512895584106, 0}; +static QuantArg pooling_parameter_quant_out = {0.07136065512895584106, 0}; +static QuantArg *pooling_parameter_quant[2] = { &pooling_parameter_quant_in, &pooling_parameter_quant_out}; +const PoolingParameter pooling_parameter = {{ "", 92, g_thread_num}, (PoolMode)(1), (RoundMode)(2), (PadMode)(2), (ActType)(0), 0, false, 2, 2, 2, 2, 11, 11, 1, 12, 5, 5, 1, 12, 0, 0, 0, 0, 0, pooling_parameter_quant, false}; +MaxPoolingInt8((int8_t *)(net_B+0), (int8_t *)(net_B+1456), (PoolingParameter *)&pooling_parameter, 0); + } + { +const ReshapeQuantArg reshape_quant_arg = {{0.07136065512895584106, 0}, {0.07136065512895584106, 0}, -128, 127}; +Int8Reshape((int8_t *)(net_B+1456), (int8_t *)(net_B+0), 300, reshape_quant_arg); + } + { +int32_t tmp_weight_zp = 1; +RowMajor2Row16x4MajorInt8((int8_t *)(net_B+0)+0, (int8_t *)(net_B+10928), 1, 300); +CalcInputSums((int8_t *)(net_B+0)+0, 1, 300, tmp_weight_zp, (int *)(net_B+12144), RowMajor); +const float filter_scale[20] = {0.003479549195617437363, 0.004490676335990428925, 0.004529818892478942871, 0.002983231563121080399, 0.003455155529081821442, 0.003223794745281338692, 0.003272445406764745712, 0.003801185870543122292, 0.003679843153804540634, 0.003040234791114926338, 0.003704284550622105598, 0.003355232765898108482, 0.002904496388509869576, 0.003024494973942637444, 0.002794801956042647362, 0.004355110693722963333, 0.003499472280964255333, 0.004184196703135967255, 0.003057289868593215942, 0.003264668164774775505}; +const int filter_zp[20] = {1, 12, 3, 2, -10, -5, -11, 5, 12, 22, 16, 1, -5, 15, 13, 5, -10, -5, -6, 0}; +const int left_shift[20] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +const int right_shift[20] = {-10, -9, -9, -10, -10, -10, -10, -9, -9, -10, -9, -10, -10, -10, -10, -9, -10, -9, -10, -10}; +const int multiplier[20] = {2108215049, 1360422072, 1372280070, 1807502393, 2093435146, 1953256619, 1982733521, 1151545365, 1114785262, 1842040025, 1122189669, 2032893316, 1759797843, 1832503464, 1693335354, 1319353429, 2120286176, 1267576078, 1852373503, 1978021333}; +const MatmulQuantParameter matmul_quant_parameter = {{0.07136065512895584106, 0}, {0, 0}, {0.258998185396194458, 0}, -128, 127, filter_scale, filter_zp, left_shift, right_shift, multiplier}; +int32_t *cur_left = matmul_quant_parameter.left_shift_ + 0; +int32_t *cur_right = matmul_quant_parameter.right_shift_ + 0; +int32_t *cur_mul = matmul_quant_parameter.quant_multiplier_ + 0; +int32_t *cur_zp = matmul_quant_parameter.filter_zp_ + 0; +MatmulInt8Opt((int8_t *)(net_B+10928), net_W15+0 + 0, (int8_t *)(net_B+304)+0+0, 1, 20, 304, (int *)(net_B+12144), net_W16+0, -128, 127, 0, cur_mul, cur_left, cur_right, 20, true, cur_zp); + } + { +int32_t tmp_weight_zp = 1; +RowMajor2Row16x4MajorInt8((int8_t *)(net_B+304)+0, (int8_t *)(net_B+10928), 1, 20); +CalcInputSums((int8_t *)(net_B+304)+0, 1, 20, tmp_weight_zp, (int *)(net_B+11056), RowMajor); +const float filter_scale[10] = {0.004678330849856138229, 0.005127115640789270401, 0.00471437256783246994, 0.004531511571258306503, 0.005476122256368398666, 0.004348111804574728012, 0.004803542047739028931, 0.006081215571612119675, 0.004532597027719020844, 0.004762654658406972885}; +const int filter_zp[10] = {7, -2, 9, 2, -6, 21, 16, 10, -19, 8}; +const int left_shift[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +const int right_shift[10] = {-8, -8, -8, -8, -8, -8, -8, -8, -8, -8}; +const int multiplier[10] = {1242805482, 1362025788, 1252380041, 1203802750, 1454739904, 1155082292, 1276068015, 1615483838, 1204091115, 1265206260}; +const MatmulQuantParameter matmul_quant_parameter = {{0.258998185396194458, 0}, {0, 0}, {0.5359870791435241699, 0}, -128, 127, filter_scale, filter_zp, left_shift, right_shift, multiplier}; +int32_t *cur_left = matmul_quant_parameter.left_shift_ + 0; +int32_t *cur_right = matmul_quant_parameter.right_shift_ + 0; +int32_t *cur_mul = matmul_quant_parameter.quant_multiplier_ + 0; +int32_t *cur_zp = matmul_quant_parameter.filter_zp_ + 0; +MatmulInt8Opt((int8_t *)(net_B+10928), net_W18+0 + 0, (int8_t *)(net_B+0)+0+0, 1, 10, 32, (int *)(net_B+11056), net_W19+0, -128, 127, 0, cur_mul, cur_left, cur_right, 10, true, cur_zp); + } + { +DoDequantizeInt8ToFp32((int8_t *)(net_B+0), (float *)(net_B+16), 0.5359870791435241699, 0, 10); + } + { +const SoftmaxParameter softmax_parameter = {{ "", 138, g_thread_num}, 1, {1, 10}, 10, 2}; +memset((float *)(net_B+10928), 0, 4); +Softmax((float *)(net_B+16), (float *)(net_B+56), (float *)(net_B+10928), &softmax_parameter); + } +} diff --git a/mindspore/lite/micro/example/mnist/src/net.cmake b/mindspore/lite/micro/example/mnist/src/net.cmake new file mode 100644 index 00000000000..c6e47df7d4a --- /dev/null +++ b/mindspore/lite/micro/example/mnist/src/net.cmake @@ -0,0 +1,25 @@ +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/) +set(OP_SRC + common_func.c.o + common_func_int8.c.o + conv3x3_int8.c.o + conv_int8.c.o + exp_fp32.c.o + fixed_point.c.o + matmul_int8.c.o + matmul_int8_wrapper.c.o + pack_int8.c.o + pooling_int8.c.o + quant_dtype_cast_int8.c.o + reshape_int8.c.o + softmax_fp32.c.o + net_weight.c.o + net.c.o + session.cc.o + tensor.cc.o +) +file(GLOB NET_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/*.cc + ${CMAKE_CURRENT_SOURCE_DIR}/*.c + ) +add_library(net STATIC ${NET_SRC}) diff --git a/mindspore/lite/micro/example/mnist/src/net.h b/mindspore/lite/micro/example/mnist/src/net.h new file mode 100644 index 00000000000..ab4ab327325 --- /dev/null +++ b/mindspore/lite/micro/example/mnist/src/net.h @@ -0,0 +1,67 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "microtensor.h" + + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * set input tensors + * @param inputs, the input data ptr's array of the model, the tensors' count of input may be greater than one. + * @param num, the input data's number of the model. + **/ +int net_SetInputs(const void **inputs, int num); + +/** + * get output tensor of the model + **/ +const MicroTensorList *net_GetOutputs(); + +int CopyOutputsData(void **outputs, int num); + +/** + * @param weight_buffer, the address of the weight binary file + * @param weight_size, the size of the model file in bytes + **/ +int net_Init(void *weight_buffer, int weight_size); + +/** + * get the memory space size of the inference. + **/ +int net_GetBufferSize(); +/** + * set the memory space for the inference + **/ +int net_SetBuffer(void *buffer); + +/** + * free the memory of packed weights, and set the membuf buffer and input address to NULL + **/ +void net_FreeResource(); +/** + * net inference function + **/ +void net_Inference(); + + +#ifdef __cplusplus +} +#endif + diff --git a/mindspore/lite/micro/example/mnist/src/net.net b/mindspore/lite/micro/example/mnist/src/net.net new file mode 100644 index 00000000000..2bde284518c Binary files /dev/null and b/mindspore/lite/micro/example/mnist/src/net.net differ diff --git a/mindspore/lite/micro/example/mnist/src/net_weight.c b/mindspore/lite/micro/example/mnist/src/net_weight.c new file mode 100644 index 00000000000..5defba035c5 --- /dev/null +++ b/mindspore/lite/micro/example/mnist/src/net_weight.c @@ -0,0 +1,103 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "net_weight.h" + +unsigned char * net_B = 0 ; +int16_t net_W10[1536]; +int32_t net_W11[12]; +int16_t net_W12[3072]; +int32_t net_W13[12]; +int32_t *net_W14 = NULL; +int8_t *net_W15 = NULL; +int32_t *net_W16 = NULL; +int32_t *net_W17 = NULL; +int8_t *net_W18 = NULL; +int32_t *net_W19 = NULL; + +int net_Init(void *weight_buffer, int weight_size) { + if (weight_buffer == NULL) { + return RET_ERROR; + } + int g_thread_num = 1; + + struct ModelParameter { + void *addr; + size_t size; + size_t offset; + }; + int8_t *net_W6 = (weight_buffer + 9312); + int32_t *net_W7 = (weight_buffer + 15312); + int8_t *net_W8 = (weight_buffer + 15392); + int32_t *net_W9 = (weight_buffer + 15592); + + struct ModelParameter model_params[] = { + {net_W10, 3072, 0}, + {net_W11, 48, 3072}, + {net_W12, 6144, 3120}, + {net_W13, 48, 9264}, + }; + + for(int i = 0; i < 4; ++i) { + if (model_params[i].offset + model_params[i].size > weight_size) { + return RET_ERROR; + } + memcpy(model_params[i].addr, (weight_buffer + model_params[i].offset), model_params[i].size); + } +{ +net_W14 = malloc(80); +if (net_W14 == NULL) { + return RET_ERROR; +} +memset(net_W14, 0, 80); +memcpy(net_W14, net_W7, 80); +net_W16 = malloc(80); +if (net_W16 == NULL) { + return RET_ERROR; +} +memset(net_W16, 0, 80); +net_W15 = malloc(6080); +if (net_W15 == NULL) { + return RET_ERROR; +} +memset(net_W15, 0, 6080); +const int init_filter_zp[20] = {1, 12, 3, 2, -10, -5, -11, 5, 12, 22, 16, 1, -5, 15, 13, 5, -10, -5, -6, 0}; +InitInt8MatrixB(net_W6, net_W16, net_W15, 1, 300, 20, 20, 304, 0, init_filter_zp, net_W14, true, true); +} +{ +net_W17 = malloc(48); +if (net_W17 == NULL) { + return RET_ERROR; +} +memset(net_W17, 0, 48); +memcpy(net_W17, net_W9, 48); +net_W19 = malloc(48); +if (net_W19 == NULL) { + return RET_ERROR; +} +memset(net_W19, 0, 48); +net_W18 = malloc(384); +if (net_W18 == NULL) { + return RET_ERROR; +} +memset(net_W18, 0, 384); +const int init_filter_zp[10] = {7, -2, 9, 2, -6, 21, 16, 10, -19, 8}; +InitInt8MatrixB(net_W8, net_W19, net_W18, 1, 20, 10, 12, 32, 0, init_filter_zp, net_W17, true, true); +} + return RET_OK; +} + diff --git a/mindspore/lite/micro/example/mnist/src/net_weight.h b/mindspore/lite/micro/example/mnist/src/net_weight.h new file mode 100644 index 00000000000..65b597cc192 --- /dev/null +++ b/mindspore/lite/micro/example/mnist/src/net_weight.h @@ -0,0 +1,43 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/common_func.h" +#include "nnacl/errorcode.h" +#include "nnacl/fp32/softmax_fp32.h" +#include "nnacl/int8/common_func_int8.h" +#include "nnacl/int8/conv3x3_int8.h" +#include "nnacl/int8/conv_int8.h" +#include "nnacl/int8/matmul_int8.h" +#include "nnacl/int8/pooling_int8.h" +#include "nnacl/int8/quant_dtype_cast_int8.h" +#include "nnacl/int8/reshape_int8.h" +#include "wrapper/int8/matmul_int8_wrapper.h" +#include +#include +#include "microtensor.h" + +extern unsigned char *net_B; +extern int16_t net_W10[]; +extern int32_t net_W11[]; +extern int16_t net_W12[]; +extern int32_t net_W13[]; +extern int32_t *net_W14; +extern int8_t *net_W15; +extern int32_t *net_W16; +extern int32_t *net_W17; +extern int8_t *net_W18; +extern int32_t *net_W19; diff --git a/mindspore/lite/micro/example/mnist/src/session.cc b/mindspore/lite/micro/example/mnist/src/session.cc new file mode 100644 index 00000000000..04a1170b953 --- /dev/null +++ b/mindspore/lite/micro/example/mnist/src/session.cc @@ -0,0 +1,157 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "session.h" +#include "net.h" + +namespace mindspore { +namespace lite { +int LiteSession::CompileGraph(lite::Model *model) { + inputs_.resize(1); + inputs_[0] = new (std::nothrow) MTensor("graph_input-0", kNumberTypeFloat32, {1, 28, 28, 1, }); + MS_ERROR_IF_NULL(inputs_[0]); + outputs_.resize(1); + outputs_[0] = new (std::nothrow) MTensor("Softmax-7", kNumberTypeFloat32, {1, 10, }); + MS_ERROR_IF_NULL(outputs_[0]); + for (const auto &output: outputs_) { + output_tensor_map_[output->tensor_name()] = output; + } + return RET_OK; +} + + +int LiteSession::RunGraph(const KernelCallBack &before, const KernelCallBack &after) { + const void *inputs_data[inputs_.size()]; + for (size_t i = 0; i < inputs_.size(); ++i) { + inputs_data[i] = inputs_[i]->MutableData(); + } + net_SetInputs(inputs_data, inputs_.size()); + + net_Inference(); + + void *outputs_data[outputs_.size()]; + for (size_t i = 0; i < outputs_.size(); ++i) { + outputs_data[i] = outputs_[i]->MutableData(); + } + CopyOutputsData(outputs_data, outputs_.size()); + + return RET_OK; +} + +LiteSession::~LiteSession() { + net_FreeResource(); + if (runtime_buffer_ != nullptr) { + free(runtime_buffer_); + runtime_buffer_ = nullptr; + } + for (auto &input : inputs_) { + if (input == nullptr) { + continue; + } + delete input; + input = nullptr; + } + for (auto &item : output_tensor_map_) { + auto output = item.second; + if (output == nullptr) { + continue; + } + delete output; + output = nullptr; + } +} + +int LiteSession::InitRuntimeBuffer() { + int buffer_size = net_GetBufferSize(); + runtime_buffer_ = malloc(buffer_size); + if (runtime_buffer_ == nullptr) { + return RET_ERROR; + } + int ret = net_SetBuffer(runtime_buffer_); + if (ret != RET_OK) { + return RET_ERROR; + } + return RET_OK; +} + +std::vector LiteSession::GetInputs() const { + std::vector inputs; + inputs.insert(inputs.begin(), inputs_.begin(), inputs_.end()); + return inputs; +} + +std::vector LiteSession::GetOutputsByNodeName(const std::string &node_name) const { + auto iter = output_node_map_.find(node_name); + if (iter == output_node_map_.end()) { + std::vector empty; + return empty; + } + return iter->second; +} + +std::unordered_map LiteSession::GetOutputs() const { + return output_tensor_map_; +} + +std::vector LiteSession::GetOutputTensorNames() const { + std::vector output_names; + for (const auto &item : output_node_map_) { + for (const auto &output : item.second) { + output_names.emplace_back(output->tensor_name()); + } + } + return output_names; +} + +mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const std::string &tensor_name) const { + auto item = output_tensor_map_.find(tensor_name); + if (item == output_tensor_map_.end()) { + return nullptr; + } + return item->second; +} + +int LiteSession::Resize(const std::vector &inputs, const std::vector> &dims) { + return RET_OK; +} + +} // namespace lite + +session::LiteSession *session::LiteSession::CreateSession(const lite::Context *context) { + auto *session = new (std::nothrow) lite::LiteSession(); + if (session == nullptr) { + return nullptr; + } + session->InitRuntimeBuffer(); + return session; +} + +session::LiteSession *session::LiteSession::CreateSession(const char *net_buf, size_t size, + const lite::Context *context) { + session::LiteSession *session = CreateSession(context); + if (session == nullptr) { + return nullptr; + } + int ret = session->CompileGraph(nullptr); + if (ret != lite::RET_OK) { + return nullptr; + } + net_Init(const_cast(net_buf), size); + return session; +} +} // namespace mindspore + diff --git a/mindspore/lite/micro/example/mnist/src/session.h b/mindspore/lite/micro/example/mnist/src/session.h new file mode 100644 index 00000000000..161f594a175 --- /dev/null +++ b/mindspore/lite/micro/example/mnist/src/session.h @@ -0,0 +1,78 @@ + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_ +#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_ + +#include "include/errorcode.h" +#include "include/lite_session.h" + +#include "tensor.h" + +namespace mindspore { +namespace lite { + +#define MS_ERROR_IF_NULL(ptr) \ + do { \ + if ((ptr) == nullptr) { \ + return mindspore::lite::RET_ERROR; \ + } \ + } while (0) + +class LiteSession : public session::LiteSession { + public: + LiteSession() = default; + + ~LiteSession() override; + + void BindThread(bool if_bind) override {} + + int CompileGraph(lite::Model *model) override; + + std::vector GetInputs() const override; + + mindspore::tensor::MSTensor *GetInputsByTensorName(const std::string &tensor_name) const override { return nullptr; } + + int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) override; + + std::vector GetOutputsByNodeName(const std::string &node_name) const override; + + std::unordered_map GetOutputs() const override; + + std::vector GetOutputTensorNames() const override; + + mindspore::tensor::MSTensor *GetOutputByTensorName(const std::string &tensor_name) const override; + + int Resize(const std::vector &inputs, const std::vector> &dims) override; + + int InitRuntimeBuffer(); + + private: + int SetInputsData(const std::vector &inputs) const; + std::vector inputs_; + std::vector outputs_; + std::unordered_map output_tensor_map_; + std::unordered_map> output_node_map_; + + void *runtime_buffer_; +}; + +} // namespace lite +} // namespace mindspore + +#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_ + diff --git a/mindspore/lite/micro/example/mnist/src/tensor.cc b/mindspore/lite/micro/example/mnist/src/tensor.cc new file mode 100644 index 00000000000..debe6edf94e --- /dev/null +++ b/mindspore/lite/micro/example/mnist/src/tensor.cc @@ -0,0 +1,93 @@ + + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tensor.h" + +namespace mindspore { +namespace lite { +size_t DataTypeSize(const TypeId type) { + switch (type) { + case kNumberTypeFloat64: + return sizeof(double); + case kNumberTypeFloat: + case kNumberTypeFloat32: + return sizeof(float); + case kNumberTypeInt8: + return sizeof(int8_t); + case kNumberTypeUInt8: + return sizeof(uint8_t); + case kNumberTypeFloat16: + case kNumberTypeInt16: + return sizeof(int16_t); + case kNumberTypeInt32: + return sizeof(int32_t); + case kNumberTypeInt64: + return sizeof(int64_t); + case kNumberTypeUInt16: + return sizeof(uint16_t); + case kNumberTypeUInt32: + return sizeof(uint32_t); + case kNumberTypeUInt64: + return sizeof(uint64_t); + case kNumberTypeBool: + return sizeof(bool); + case kObjectTypeString: + return sizeof(char); + case kObjectTypeTensorType: + default: + return 0; + } +} + +MTensor::~MTensor() { + if (data_ != nullptr) { + free(data_); + data_ = nullptr; + } +} + +int MTensor::DimensionSize(const size_t index) const { + int dim_size = -1; + if (index < shape_.size()) { + dim_size = shape_[index]; + } + return dim_size; +} + +int MTensor::ElementsNum() const { + int elements = 1; + for (int i : shape_) { + elements *= i; + } + return elements; +} + +size_t MTensor::Size() const { + size_t element_size = DataTypeSize(data_type_); + return element_size * ElementsNum(); +} + +void *MTensor::MutableData() { + if (data_ == nullptr) { + data_ = malloc(this->Size()); + } + return data_; +} +} // namespace lite +} // namespace mindspore + diff --git a/mindspore/lite/micro/example/mnist/src/tensor.h b/mindspore/lite/micro/example/mnist/src/tensor.h new file mode 100644 index 00000000000..58f0f8ecf44 --- /dev/null +++ b/mindspore/lite/micro/example/mnist/src/tensor.h @@ -0,0 +1,71 @@ + + +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_ +#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_ + +#include "include/ms_tensor.h" +#include +#include + +namespace mindspore { +namespace lite { +struct QuantArg { + double scale; + int32_t zeroPoint; + float var_corr{1}; + float mean_corr{0}; + bool inited; + std::vector clusters{}; + int bitNum; + int roundType; + int multiplier; + int dstDtype; +}; + +class MTensor : public mindspore::tensor::MSTensor { + public: + MTensor() = default; + MTensor(std::string name, enum TypeId type, std::vector shape) + : tensor_name_(std::move(name)), data_type_(type), shape_(std::move(shape)) {} + ~MTensor() override; + + TypeId data_type() const override { return data_type_; } + std::vector shape() const override { return shape_; } + int DimensionSize(size_t index) const override; + int ElementsNum() const override; + size_t Size() const override; + void *MutableData() override; + std::string tensor_name() const override { return tensor_name_; } + void set_tensor_name(const std::string name) override { tensor_name_ = name; } + void set_data(void *data) override { data_ = data; } + + private: + std::string tensor_name_; + TypeId data_type_; + std::vector shape_; + void *data_ = nullptr; + std::vector quant_params_; +}; + +} // namespace lite +} // namespace mindspore + +#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_ + + diff --git a/scripts/check_clang_format.sh b/scripts/check_clang_format.sh index 4a38239acc0..d0b6da7da16 100755 --- a/scripts/check_clang_format.sh +++ b/scripts/check_clang_format.sh @@ -33,7 +33,7 @@ echo "SCRIPTS_PATH=$SCRIPTS_PATH" # print usage message function usage() { - echo "Check whether the specified source files were well formated" + echo "Check whether the specified source files were well formatted" echo "Usage:" echo "bash $0 [-a] [-c] [-l] [-h]" echo "e.g. $0 -a" @@ -97,8 +97,11 @@ fi CHECK_RESULT_FILE=__code_format_check_result__ echo "0" > "$CHECK_RESULT_FILE" -# check format of files modified in the lastest commit +# check format of files modified in the latest commit while read line; do + if [ ! -e ${line} ]; then + continue + fi BASE_NAME=$(basename "${line}") TEMP_FILE="__TEMP__${BASE_NAME}" cp "${line}" "${TEMP_FILE}" @@ -107,7 +110,7 @@ while read line; do ret=$? rm "${TEMP_FILE}" if [[ "${ret}" -ne 0 ]]; then - echo "File ${line} is not formated, please format it." + echo "File ${line} is not formatted, please format it." echo "1" > "${CHECK_RESULT_FILE}" break fi @@ -118,6 +121,6 @@ rm "${CHECK_RESULT_FILE}" rm "${CHECK_LIST_FILE}" cd "${CURRENT_PATH}" || exit 1 if [[ "X${result}" == "X0" ]]; then - echo "Check PASS: specified files are well formated!" + echo "Check PASS: specified files are well formatted!" fi exit "${result}"