micro refactor

This commit is contained in:
lz 2022-02-18 15:42:11 +08:00
parent d0d932c94f
commit 1056328d94
369 changed files with 1026 additions and 8556 deletions

View File

@ -38,7 +38,7 @@
# Lite # Lite
"mindspore/mindspore/lite/include/lite_utils.h" "build/include_what_you_use" "mindspore/mindspore/lite/include/lite_utils.h" "build/include_what_you_use"
"mindspore/mindspore/lite/nnacl/" "readability/casting" "mindspore/mindspore/lite/nnacl/" "readability/casting"
"mindspore/mindspore/lite/micro/coder/wrapper/" "readability/casting" "mindspore/mindspore/lite/tools/converter/micro/coder/wrapper/" "readability/casting"
"mindspore/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h" "runtime/references" "mindspore/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h" "runtime/references"
"mindspore/mindspore/lite/tools/converter/model_parser.h" "build/namespaces" "mindspore/mindspore/lite/tools/converter/model_parser.h" "build/namespaces"
"mindspore/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc" "readability/casting" "mindspore/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.cc" "readability/casting"

View File

@ -24,6 +24,7 @@ set(MSLITE_PROPOSAL_LIB_NAME libmslite_proposal)
set(MICRO_NNIE_LIB_NAME libmicro_nnie) set(MICRO_NNIE_LIB_NAME libmicro_nnie)
set(DPICO_ACL_ADAPTER_LIB_NAME libdpico_acl_adapter) set(DPICO_ACL_ADAPTER_LIB_NAME libdpico_acl_adapter)
set(BENCHMARK_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/benchmark) set(BENCHMARK_ROOT_DIR ${RUNTIME_PKG_NAME}/tools/benchmark)
set(MICRO_DIR ${TOP_DIR}/mindspore/lite/tools/converter/micro)
set(MINDSPORE_LITE_TRAIN_LIB_NAME libmindspore-lite-train) set(MINDSPORE_LITE_TRAIN_LIB_NAME libmindspore-lite-train)
set(BENCHMARK_TRAIN_NAME benchmark_train) set(BENCHMARK_TRAIN_NAME benchmark_train)
@ -156,7 +157,7 @@ function(__install_micro_wrapper)
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
install(DIRECTORY ${NNACL_DIR}/intrinsics DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl install(DIRECTORY ${NNACL_DIR}/intrinsics DESTINATION ${CODEGEN_ROOT_DIR}/include/nnacl
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
install(DIRECTORY ${TOP_DIR}/mindspore/lite/micro/coder/wrapper DESTINATION ${CODEGEN_ROOT_DIR}/include install(DIRECTORY ${MICRO_DIR}/coder/wrapper DESTINATION ${CODEGEN_ROOT_DIR}/include
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
install(TARGETS wrapper ARCHIVE DESTINATION ${CODEGEN_ROOT_DIR}/lib COMPONENT ${RUNTIME_COMPONENT_NAME}) install(TARGETS wrapper ARCHIVE DESTINATION ${CODEGEN_ROOT_DIR}/lib COMPONENT ${RUNTIME_COMPONENT_NAME})
endfunction() endfunction()
@ -171,7 +172,6 @@ function(__install_micro_codegen)
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
install(TARGETS cmsis_nn ARCHIVE DESTINATION ${CODEGEN_ROOT_DIR}/third_party/lib install(TARGETS cmsis_nn ARCHIVE DESTINATION ${CODEGEN_ROOT_DIR}/third_party/lib
COMPONENT ${RUNTIME_COMPONENT_NAME}) COMPONENT ${RUNTIME_COMPONENT_NAME})
install(TARGETS codegen RUNTIME DESTINATION ${CODEGEN_ROOT_DIR} COMPONENT ${RUNTIME_COMPONENT_NAME})
endfunction() endfunction()
if(WIN32) if(WIN32)
@ -236,7 +236,7 @@ if(PLATFORM_ARM64)
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE) COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE)
install(DIRECTORY ${TOP_DIR}/include/c_api/ DESTINATION ${RUNTIME_INC_DIR}/c_api install(DIRECTORY ${TOP_DIR}/include/c_api/ DESTINATION ${RUNTIME_INC_DIR}/c_api
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
if(NOT TARGET_MIX210) if(ANDROID_NDK_TOOLCHAIN_INCLUDED OR MSLITE_ENABLE_CONVERTER)
__install_micro_wrapper() __install_micro_wrapper()
endif() endif()
if(MSLITE_ENABLE_TOOLS) if(MSLITE_ENABLE_TOOLS)
@ -479,7 +479,9 @@ elseif(PLATFORM_ARM32)
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE) COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h" PATTERN "ops*" EXCLUDE)
install(DIRECTORY ${TOP_DIR}/include/c_api/ DESTINATION ${RUNTIME_INC_DIR}/c_api install(DIRECTORY ${TOP_DIR}/include/c_api/ DESTINATION ${RUNTIME_INC_DIR}/c_api
COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h") COMPONENT ${RUNTIME_COMPONENT_NAME} FILES_MATCHING PATTERN "*.h")
__install_micro_wrapper() if(ANDROID_NDK_TOOLCHAIN_INCLUDED OR MSLITE_ENABLE_CONVERTER)
__install_micro_wrapper()
endif()
if(MSLITE_ENABLE_TOOLS AND NOT TARGET_OHOS_LITE) if(MSLITE_ENABLE_TOOLS AND NOT TARGET_OHOS_LITE)
if(NOT MSLITE_COMPILE_TWICE) if(NOT MSLITE_COMPILE_TWICE)
install(TARGETS ${BENCHMARK_NAME} RUNTIME install(TARGETS ${BENCHMARK_NAME} RUNTIME

View File

@ -61,6 +61,7 @@ endif()
if(DEFINED ENV{MSLITE_ENABLE_TRAIN}) if(DEFINED ENV{MSLITE_ENABLE_TRAIN})
set(MSLITE_ENABLE_TRAIN $ENV{MSLITE_ENABLE_TRAIN}) set(MSLITE_ENABLE_TRAIN $ENV{MSLITE_ENABLE_TRAIN})
endif() endif()
if(DEFINED ENV{MSLITE_ENABLE_SSE}) if(DEFINED ENV{MSLITE_ENABLE_SSE})
set(MSLITE_ENABLE_SSE $ENV{MSLITE_ENABLE_SSE}) set(MSLITE_ENABLE_SSE $ENV{MSLITE_ENABLE_SSE})
endif() endif()
@ -599,7 +600,9 @@ if(MSLITE_MINDDATA_IMPLEMENT STREQUAL "lite_cv")
endif() endif()
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src/ops) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src/ops)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/micro/coder) if(ANDROID_NDK_TOOLCHAIN_INCLUDED)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/tools/converter/micro/coder)
endif()
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/src)
add_subdirectory(${CCSRC_DIR}/plugin/device/cpu/kernel/nnacl build) add_subdirectory(${CCSRC_DIR}/plugin/device/cpu/kernel/nnacl build)

View File

@ -1,17 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "coder/coder.h"
int main(int argc, const char **argv) { return mindspore::lite::micro::RunCoder(argc, argv); }

View File

@ -1,51 +0,0 @@
#include "include/errorcode.h"
#include "include/lite_session.h"
#include "include/ms_tensor.h"
#include "mnist_input_data.h"
using namespace mindspore;
int main(void) {
while (1) {
/* USER CODE END WHILE */
SEGGER_RTT_printf(0, "***********mnist test start***********\n");
float a = 3.1415926;
SEGGER_RTT_printf(0, "output: [%d] \n", (int)(a * 10000));
const char *model_buffer = nullptr;
int model_size = 0;
session::LiteSession *session = mindspore::session::LiteSession::CreateSession(model_buffer, model_size, nullptr);
Vector<tensor::MSTensor *> inputs = session->GetInputs();
size_t inputs_num = inputs.size();
void *inputs_binbuf[inputs_num];
int inputs_size[inputs_num];
for (size_t i = 0; i < inputs_num; ++i) {
inputs_size[i] = inputs[i]->Size();
}
// here mnist only have one input data,just hard code to it's array;
inputs_binbuf[0] = mnist_inputs_data;
for (size_t i = 0; i < inputs_num; ++i) {
void *input_data = inputs[i]->MutableData();
memcpy(input_data, inputs_binbuf[i], inputs_size[i]);
}
int ret = session->RunGraph();
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}
Vector<String> outputs_name = session->GetOutputTensorNames();
for (int i = 0; i < outputs_name.size(); ++i) {
tensor::MSTensor *output_tensor = session->GetOutputByTensorName(outputs_name[i]);
if (output_tensor == nullptr) {
return -1;
}
float *casted_data = static_cast<float *>(output_tensor->MutableData());
if (casted_data == nullptr) {
return -1;
}
for (size_t j = 0; j < 10 && j < output_tensor->ElementsNum(); j++) {
SEGGER_RTT_printf(0, "output[%d]: [%d]\n", j, (int)(casted_data[j] * 10000));
}
}
delete session;
SEGGER_RTT_printf(0, "***********mnist test end***********\n");
/* USER CODE BEGIN 3 */
}
/* USER CODE END 3 */
}

View File

@ -1,236 +0,0 @@
##########################################################################################################################
# File automatically-generated by tool: [projectgenerator] version: [3.13.0-B3] date: [Mon Mar 22 14:29:28 CST 2021]
##########################################################################################################################
# ------------------------------------------------
# Generic Makefile (based on gcc)
#
# ChangeLog :
# 2017-02-10 - Several enhancements + project update mode
# 2015-07-22 - first version
# ------------------------------------------------
######################################
# target
######################################
TARGET = mnist_test
######################################
# building variables
######################################
# debug build?
DEBUG = 1
# optimization
OPT = -Og
#######################################
# paths
#######################################
# Build path
BUILD_DIR = build
######################################
# source
######################################
# C sources
C_SOURCES = \
Core/Src/main.c \
Core/Src/stm32f7xx_it.c \
Core/Src/stm32f7xx_hal_msp.c \
Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_cortex.c \
Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim.c \
Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_tim_ex.c \
Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc.c \
Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_rcc_ex.c \
Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash.c \
Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_flash_ex.c \
Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_gpio.c \
Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma.c \
Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dma_ex.c \
Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr.c \
Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_pwr_ex.c \
Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal.c \
Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c.c \
Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_i2c_ex.c \
Drivers/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_exti.c \
Core/Src/system_stm32f7xx.c \
SEGGER/SEGGER_RTT.c \
SEGGER/SEGGER_RTT_printf.c \
SEGGER/SEGGER_SYSVIEW.c \
SEGGER/Syscalls/SEGGER_SYSVIEW_Config_NoOS.c \
SEGGER/Syscalls/SEGGER_RTT_Syscalls_GCC.c
USER_C_SOURCES_DIR = \
mnist_stm32f746/src \
mnist_stm32f746/operator_library/kernels/CMSIS/NN/ConvolutionFunctions \
mnist_stm32f746/operator_library/kernels/CMSIS/NN/FullyConnectedFunctions \
mnist_stm32f746/operator_library/kernels/CMSIS/NN/NNSupportFunctions \
mnist_stm32f746/operator_library/kernels/CMSIS/NN/PoolingFunctions \
mnist_stm32f746/operator_library/kernels/nnacl/fp32 \
mnist_stm32f746/operator_library/kernels/nnacl/int8
USER_CC_SOURCES_DIR = \
mnist_stm32f746/src \
# ASM sources
ASM_SOURCES = \
startup_stm32f746xx.s \
SEGGER_RTT_V683b/RTT/SEGGER_RTT_ASM_ARMv7M.s
#######################################
# binaries
#######################################
PREFIX = arm-none-eabi-
# The gcc compiler bin path can be either defined in make command via GCC_PATH variable (> make GCC_PATH=xxx)
# either it can be added to the PATH environment variable.
ifdef GCC_PATH
#CC = $(GCC_PATH)/$(PREFIX)gcc
#AS = $(GCC_PATH)/$(PREFIX)gcc -x assembler-with-cpp
CC = $(GCC_PATH)/$(PREFIX)g++
AS = $(GCC_PATH)/$(PREFIX)g++ -x assembler-with-cpp
CP = $(GCC_PATH)/$(PREFIX)objcopy
SZ = $(GCC_PATH)/$(PREFIX)size
else
#CC = $(PREFIX)gcc
#AS = $(PREFIX)gcc -x assembler-with-cpp
CC = $(PREFIX)g++
AS = $(PREFIX)g++ -x assembler-with-cpp
CP = $(PREFIX)objcopy
SZ = $(PREFIX)size
endif
HEX = $(CP) -O ihex
BIN = $(CP) -O binary -S
#######################################
# CFLAGS
#######################################
# cpu
CPU = -mcpu=cortex-m7
# fpu
FPU = -mfpu=fpv5-sp-d16
# float-abi
FLOAT-ABI = -mfloat-abi=hard
# mcu
MCU = $(CPU) -mthumb $(FPU) $(FLOAT-ABI)
# macros for gcc
# AS defines
AS_DEFS =
# C defines
C_DEFS = \
-DUSE_HAL_DRIVER \
-DSTM32F746xx
# AS includes
AS_INCLUDES =
# C includes
C_INCLUDES = \
-ICore/Inc \
-IDrivers/STM32F7xx_HAL_Driver/Inc \
-IDrivers/STM32F7xx_HAL_Driver/Inc/Legacy \
-IDrivers/CMSIS/Device/ST/STM32F7xx/Include \
-ISEGGER \
-IConfig \
-ISEGGER/Syscalls \
-Imnist_stm32f746/operator_library/include \
-Imnist_stm32f746/operator_library/include/CMSIS/Core/Include \
-Imnist_stm32f746/operator_library/include/CMSIS/DSP/Include \
-Imnist_stm32f746/operator_library/include/CMSIS/NN/Include \
-Imnist_stm32f746/src \
-Imnist_stm32f746
# compile gcc flags
ASFLAGS = $(MCU) $(AS_DEFS) $(AS_INCLUDES) $(OPT) -Wall -Wno-narrowing -fpermissive -fdata-sections -ffunction-sections
CFLAGS = $(MCU) $(C_DEFS) $(C_INCLUDES) $(OPT) -Wall -Wno-narrowing -fpermissive -fdata-sections -ffunction-sections
ifeq ($(DEBUG), 1)
CFLAGS += -g -gdwarf-2 -DARM_MATH_DSP
endif
# Generate dependency information
CFLAGS += -MMD -MP -MF"$(@:%.o=%.d)"
CFLAGS += -DNOT_USE_STL
CFLAGS += -D__cpluscplus -DARM_MATH_DSP
#######################################
# LDFLAGS
#######################################
# link script
LDSCRIPT = STM32F746IGKx_FLASH.ld
# libraries
LIBS = -lc -lm -lnosys
LIBDIR =
LDFLAGS = $(MCU) -specs=nosys.specs -T$(LDSCRIPT) $(LIBDIR) $(LIBS) -Wl,-Map=$(BUILD_DIR)/$(TARGET).map,--cref -Wl,--gc-sections
# default action: build all
all: $(BUILD_DIR)/$(TARGET).elf $(BUILD_DIR)/$(TARGET).hex $(BUILD_DIR)/$(TARGET).bin
#######################################
# build the application
#######################################
USER_C_SOURCES = $(foreach dir,$(USER_C_SOURCES_DIR),$(wildcard $(dir)/*.c))
USER_CC_SOURCES = $(foreach dir,$(USER_CC_SOURCES_DIR),$(wildcard $(dir)/*.cc))
# list of objects
OBJECTS = $(addprefix $(BUILD_DIR)/,$(notdir $(USER_CC_SOURCES:.cc=.o)))
vpath %.cc $(sort $(dir $(USER_CC_SOURCES)))
OBJECTS += $(addprefix $(BUILD_DIR)/,$(notdir $(USER_C_SOURCES:.c=.o)))
vpath %.c $(sort $(dir $(USER_C_SOURCES)))
OBJECTS += $(addprefix $(BUILD_DIR)/,$(notdir $(C_SOURCES:.c=.o)))
vpath %.c $(sort $(dir $(C_SOURCES)))
# list of ASM program objects
OBJECTS += $(addprefix $(BUILD_DIR)/,$(notdir $(ASM_SOURCES:.s=.o)))
vpath %.s $(sort $(dir $(ASM_SOURCES)))
$(BUILD_DIR)/%.o: %.c Makefile | $(BUILD_DIR)
$(CC) -c $(CFLAGS) -Wa,-a,-ad,-alms=$(BUILD_DIR)/$(notdir $(<:.c=.lst)) $< -o $@
$(BUILD_DIR)/%.o: %.cc Makefile | $(BUILD_DIR)
$(CC) -c $(CFLAGS) -Wa,-a,-ad,-alms=$(BUILD_DIR)/$(notdir $(<:.cc=.lst)) $< -o $@
$(BUILD_DIR)/%.o: %.s Makefile | $(BUILD_DIR)
$(AS) -c $(CFLAGS) $< -o $@
$(BUILD_DIR)/$(TARGET).elf: $(OBJECTS) Makefile
$(CC) $(OBJECTS) $(LDFLAGS) -o $@
$(SZ) $@
$(BUILD_DIR)/%.hex: $(BUILD_DIR)/%.elf | $(BUILD_DIR)
$(HEX) $< $@
$(BUILD_DIR)/%.bin: $(BUILD_DIR)/%.elf | $(BUILD_DIR)
$(BIN) $< $@
$(BUILD_DIR):
mkdir $@
#######################################
# clean up
#######################################
clean:
-rm -fR $(BUILD_DIR)
#######################################
# dependencies
#######################################
-include $(wildcard $(BUILD_DIR)/*.d)
# *** EOF ***

View File

@ -1,58 +0,0 @@
cmake_minimum_required(VERSION 3.14)
project(benchmark)
if(NOT DEFINED PKG_PATH)
message(FATAL_ERROR "PKG_PATH not set")
endif()
get_filename_component(PKG_PATH ${PKG_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})
set(HEADER_PATH ${PKG_PATH}/runtime)
option(MICRO_BUILD_ARM64 "build android arm64" OFF)
option(MICRO_BUILD_ARM32A "build android arm32" OFF)
add_compile_definitions(NOT_USE_STL)
if(MICRO_BUILD_ARM64 OR MICRO_BUILD_ARM32A)
add_compile_definitions(ENABLE_NEON)
add_compile_definitions(ENABLE_ARM)
endif()
if(MICRO_BUILD_ARM64)
add_compile_definitions(ENABLE_ARM64)
endif()
if(MICRO_BUILD_ARM32A)
add_compile_definitions(ENABLE_ARM32)
add_definitions(-mfloat-abi=softfp -mfpu=neon)
endif()
set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
message(STATUS "build benchmark with debug info")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
else()
message(STATUS "build benchmark release version")
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}")
string(REPLACE "-g" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
string(REPLACE "-g" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif()
add_subdirectory(src)
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
include_directories(${HEADER_PATH})
set(SRC_FILES
benchmark/benchmark.cc
benchmark/load_input.c
)
add_executable(benchmark ${SRC_FILES})
target_link_libraries(benchmark net -lm -pthread)

View File

@ -1,207 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <string>
#include <cstring>
#include "include/lite_session.h"
#include "include/ms_tensor.h"
#include "include/errorcode.h"
#include "load_input.h"
#include "calib_output.h"
using namespace mindspore;
void usage() {
printf(
"-- mindspore benchmark params usage:\n"
"args[0]: executable file\n"
"args[1]: inputs binary file\n"
"args[2]: model weight binary file\n"
"args[3]: loop count for performance test\n"
"args[4]: calibration file\n"
"args[5]: runtime thread num\n"
"args[6]: runtime thread bind mode\n\n");
}
uint64_t GetTimeUs() {
const int USEC = 1000000;
const int MSEC = 1000;
struct timespec ts = {0, 0};
if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) {
return 0;
}
uint64_t retval = (uint64_t)((ts.tv_sec * USEC) + (ts.tv_nsec / MSEC));
return retval;
}
template <typename T>
void PrintData(void *data, size_t data_number) {
if (data == nullptr) {
return;
}
auto casted_data = static_cast<T *>(data);
for (size_t i = 0; i < 10 && i < data_number; i++) {
printf("%s, ", std::to_string(casted_data[i]).c_str());
}
printf("\n");
}
void TensorToString(tensor::MSTensor *tensor) {
printf("name: %s, ", tensor->tensor_name().c_str());
printf("DataType: %d, ", tensor->data_type());
printf("Elements: %d, ", tensor->ElementsNum());
printf("Shape: [");
for (auto &dim : tensor->shape()) {
printf("%d ", dim);
}
printf("], Data: \n");
switch (tensor->data_type()) {
case kNumberTypeFloat32: {
PrintData<float>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeFloat16: {
PrintData<int16_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeInt32: {
PrintData<int32_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeInt16: {
PrintData<int16_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeInt8: {
PrintData<int8_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeUInt8: {
PrintData<uint8_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
default:
std::cout << "Unsupported data type to print" << std::endl;
break;
}
}
int main(int argc, const char **argv) {
if (argc < 2) {
printf("input command is invalid\n");
usage();
return lite::RET_ERROR;
}
printf("=======run benchmark======\n");
const char *model_buffer = nullptr;
int model_size = 0;
// read .bin file by ReadBinaryFile;
if (argc >= 3) {
model_buffer = static_cast<const char *>(ReadInputData(argv[2], &model_size));
}
lite::Context *context = nullptr;
if (argc >= 7) {
// config benchmark context
context = new (std::nothrow) lite::Context();
if (context == nullptr) {
return lite::RET_ERROR;
}
context->thread_num_ = atoi(argv[5]);
context->device_list_.resize(1);
context->device_list_[0] = {lite::DT_CPU, {{false, static_cast<lite::CpuBindMode>(atoi(argv[6]))}}};
printf("context: ThreadNum: %d, BindMode: %d\n", context->thread_num_,
context->device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_);
}
session::LiteSession *session = mindspore::session::LiteSession::CreateSession(model_buffer, model_size, context);
if (session == nullptr) {
printf("create lite session failed\n");
return lite::RET_ERROR;
}
delete[] model_buffer;
// set model inputs tensor data
Vector<tensor::MSTensor *> inputs = session->GetInputs();
size_t inputs_num = inputs.size();
void *inputs_binbuf[inputs_num];
int inputs_size[inputs_num];
for (size_t i = 0; i < inputs_num; ++i) {
inputs_size[i] = inputs[i]->Size();
}
int ret = ReadInputsFile(const_cast<char *>(argv[1]), inputs_binbuf, inputs_size, inputs_num);
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}
for (size_t i = 0; i < inputs_num; ++i) {
void *input_data = inputs[i]->MutableData();
memcpy(input_data, inputs_binbuf[i], inputs_size[i]);
}
if (argc >= 4) {
int loop_count = atoi(argv[3]);
printf("\nloop count: %d\n", loop_count);
uint64_t start_time = GetTimeUs();
for (int i = 0; i < loop_count; ++i) {
ret = session->RunGraph();
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}
}
uint64_t end_time = GetTimeUs();
float total_time = (float)(end_time - start_time) / 1000.0f;
printf("total time: %.5fms, per time: %.5fms\n", total_time, total_time / loop_count);
}
ret = session->RunGraph();
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}
printf("\noutputs: \n");
Vector<String> outputs_name = session->GetOutputTensorNames();
Vector<tensor::MSTensor *> outputs;
for (const auto &name : outputs_name) {
auto output = session->GetOutputByTensorName(name);
outputs.push_back(output);
TensorToString(output);
}
if (argc >= 5) {
lite::Calibrator *calibrator = new (std::nothrow) lite::Calibrator();
if (calibrator == nullptr) {
return lite::RET_NULL_PTR;
}
ret = calibrator->ReadCalibData(argv[4]);
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}
ret = calibrator->CompareOutputs(outputs);
if (ret != lite::RET_OK) {
return lite::RET_ERROR;
}
delete calibrator;
}
printf("========run success=======\n");
delete session;
session = nullptr;
if (context != nullptr) {
delete context;
context = nullptr;
}
for (size_t i = 0; i < inputs_num; ++i) {
free(inputs_binbuf[i]);
inputs_binbuf[i] = nullptr;
}
return lite::RET_OK;
}

View File

@ -1,148 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "calib_output.h"
#include <fstream>
#include <sstream>
#include <iostream>
#include <stdio.h>
#include <cmath>
namespace mindspore {
namespace lite {
constexpr float kToleranceVal = 0.0001;
#define MS_ERROR_IF_NULL(ptr) \
do { \
if ((ptr) == nullptr) { \
return mindspore::lite::RET_ERROR; \
} \
} while (0)
int Calibrator::ReadCalibData(const char *calib_data_path) {
std::ifstream in_file(calib_data_path);
if (!in_file.good()) {
printf("file is not exist, %s\n", calib_data_path);
return RET_ERROR;
}
if (!in_file.is_open()) {
printf("open file failed, %s\n", calib_data_path);
in_file.close();
return RET_ERROR;
}
while (!in_file.eof()) {
std::string line;
getline(in_file, line);
if (line.empty()) {
continue;
}
std::stringstream name_line(line);
std::string tensor_name;
size_t dim = 0;
name_line >> tensor_name >> dim;
size_t elements = 1;
for (size_t i = 0; i < dim; i++) {
size_t tmp_dim;
name_line >> tmp_dim;
elements *= tmp_dim;
}
getline(in_file, line);
std::stringstream data_line(line);
String name(tensor_name.c_str());
CalibTensor *output = new (std::nothrow) CalibTensor(name, elements);
MS_ERROR_IF_NULL(output);
float *data = output->MutableData();
MS_ERROR_IF_NULL(data);
for (size_t i = 0; i < elements; i++) {
data_line >> data[i];
}
calib_outputs_.push_back(output);
}
in_file.close();
return RET_OK;
}
template <typename T>
float CompareData(const T *output, const float *calib, size_t elements_num) {
float error = 0.;
if (output == nullptr || calib == nullptr) {
printf("output or calib is nullptr\n");
return error;
}
for (size_t i = 0; i < elements_num; ++i) {
if (std::isnan(output[i]) || std::isinf(output[i]) || std::isnan(calib[i]) || std::isinf(calib[i])) {
printf("error, output data is nan or inf\n");
return error;
}
error += std::abs(output[i] - calib[i]);
}
return error;
}
int Calibrator::CompareOutputs(const Vector<tensor::MSTensor *> &outputs) const {
if (outputs.size() != calib_outputs_.size()) {
printf("error, outputs and calibs size is mismatch\n");
return RET_ERROR;
}
float total_error = 0;
size_t outputs_num = outputs.size();
for (size_t i = 0; i < outputs_num; ++i) {
tensor::MSTensor *output = outputs[i];
MS_ERROR_IF_NULL(output);
CalibTensor *calib = calib_outputs_[i];
MS_ERROR_IF_NULL(calib);
if (output->tensor_name() != calib->tensor_name()) {
printf("error, output tensor name is not equal to calib\n");
return RET_ERROR;
}
if (output->ElementsNum() != calib->ElementsNum()) {
printf("error, output elements num is not equal to calib\n");
return RET_ERROR;
}
switch (output->data_type()) {
case TypeId::kNumberTypeFloat:
case TypeId::kNumberTypeFloat32: {
total_error += CompareData(static_cast<float *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
case TypeId::kNumberTypeInt8: {
total_error += CompareData(static_cast<int8_t *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
case TypeId::kNumberTypeUInt8: {
total_error += CompareData(static_cast<uint8_t *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
case TypeId::kNumberTypeUInt:
case TypeId::kNumberTypeUInt32: {
total_error += CompareData(static_cast<int32_t *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
default: {
printf("unsupported tensor data type\n");
}
}
}
if (total_error > kToleranceVal) {
printf("compare outputs failed, total error: %f\n", total_error);
return RET_ERROR;
}
printf("compare outputs success, total error: %f\n", total_error);
return RET_OK;
}
} // namespace lite
} // namespace mindspore

View File

@ -1,73 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_
#define MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_
#include "include/lite_utils.h"
#include "include/ms_tensor.h"
#include "include/errorcode.h"
namespace mindspore {
namespace lite {
class CalibTensor {
public:
CalibTensor(String name, size_t elements_num) : tensor_name_(name), elements_num_(elements_num) {}
~CalibTensor() {
free(data_);
data_ = nullptr;
}
String tensor_name() const { return tensor_name_; }
int ElementsNum() const { return elements_num_; }
float *MutableData() {
if (data_ == nullptr) {
if (elements_num_ == 0 || elements_num_ > INT16_MAX) {
return nullptr;
}
data_ = static_cast<float *>(malloc(elements_num_ * sizeof(float)));
}
return data_;
}
private:
String tensor_name_;
int elements_num_{0};
float *data_{nullptr};
};
class Calibrator {
public:
Calibrator() = default;
~Calibrator() {
for (auto &calib : calib_outputs_) {
delete calib;
calib = nullptr;
}
calib_outputs_.clear();
}
int ReadCalibData(const char *calib_data_path);
int CompareOutputs(const Vector<tensor::MSTensor *> &outputs) const;
private:
Vector<CalibTensor *> calib_outputs_;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_

View File

@ -1,95 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "load_input.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
void *ReadInputData(const char *real_input_path, int *size) {
if (real_input_path == NULL) {
return NULL;
}
if (strstr(real_input_path, ".bin") || strstr(real_input_path, ".net")) {
FILE *file;
file = fopen(real_input_path, "rb+");
if (!file) {
printf("Can't find %s\n", real_input_path);
return NULL;
}
int curr_file_posi = ftell(file);
fseek(file, 0, SEEK_END);
*size = ftell(file);
unsigned char *buf = malloc((*size));
(void)memset(buf, 0, (*size));
fseek(file, curr_file_posi, SEEK_SET);
int read_size = (int)(fread(buf, 1, *size, file));
if (read_size != (*size)) {
printf("read file failed, total file size: %d, read_size: %d\n", (*size), read_size);
fclose(file);
free(buf);
return NULL;
}
fclose(file);
return (void *)buf;
} else {
printf("input data file should be .bin , .net");
return NULL;
}
}
void SaveOutputData(char *final_name, unsigned char *output_data, unsigned int out_size) {
FILE *output_file;
output_file = fopen(final_name, "w");
if (output_file == NULL) {
printf("fopen output file: %s failed\n", final_name);
return;
}
unsigned char str[out_size];
for (unsigned int i = 0; i < out_size; ++i) {
str[i] = output_data[i];
fprintf(output_file, "%d\t", str[i]);
}
fclose(output_file);
}
int ReadInputsFile(char *path, void **buffers, const int *inputs_size, int inputs_num) {
char *inputs_path[inputs_num];
char *delim = ",";
char *token;
int i = 0;
while ((token = strtok_r(path, delim, &path))) {
if (i >= inputs_num) {
printf("inputs num is error, need: %d\n", inputs_num);
return -1;
}
inputs_path[i] = token;
printf("input %d: %s\n", i, inputs_path[i]);
i++;
}
for (i = 0; i < inputs_num; ++i) {
int size = 0;
buffers[i] = ReadInputData(inputs_path[i], &size);
if (size != inputs_size[i] || buffers[i] == NULL) {
printf("size mismatch, %s, input: %d, needed: %d\n", inputs_path[i], size, inputs_size[i]);
return -1;
}
}
return 0;
}

View File

@ -1,36 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_
#define MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_
#ifdef __cplusplus
extern "C" {
#endif
void *ReadInputData(const char *real_input_path, int *size);
void SaveOutputData(char *final_name, unsigned char *output_data, unsigned int out_size);
int ReadInputsFile(char *path, void **buffers, const int *inputs_size, int inputs_num);
#ifdef __cplusplus
}
#endif
#endif // MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_

View File

@ -1,134 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_INCLUDE_API_CELL_H
#define MINDSPORE_INCLUDE_API_CELL_H
#include <string>
#include <vector>
#include <map>
#include <memory>
#include "include/api/status.h"
#include "include/api/types.h"
#include "include/api/graph.h"
namespace mindspore {
class InputAndOutput;
using Input = InputAndOutput;
using Output = InputAndOutput;
class MS_API CellBase {
public:
CellBase() = default;
virtual ~CellBase() = default;
virtual std::vector<Output> Construct(const std::vector<Input> &inputs) { return {}; }
virtual std::shared_ptr<CellBase> Clone() const = 0;
virtual Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) { return kSuccess; }
std::vector<Output> operator()(const std::vector<Input> &inputs) const;
};
template <class T>
class MS_API Cell : public CellBase {
public:
virtual ~Cell() = default;
std::shared_ptr<CellBase> Clone() const override { return std::make_shared<T>(static_cast<const T &>(*this)); }
};
class MS_API ParameterCell final : public Cell<ParameterCell> {
public:
ParameterCell() = default;
~ParameterCell() override = default;
ParameterCell(const ParameterCell &);
ParameterCell &operator=(const ParameterCell &);
ParameterCell(ParameterCell &&);
ParameterCell &operator=(ParameterCell &&);
explicit ParameterCell(const MSTensor &);
ParameterCell &operator=(const MSTensor &);
explicit ParameterCell(MSTensor &&);
ParameterCell &operator=(MSTensor &&);
MSTensor GetTensor() const { return tensor_; }
private:
MSTensor tensor_;
};
class MS_API OpCellBase : public CellBase {
public:
explicit OpCellBase(const std::string &name) : name_(name) {}
~OpCellBase() override = default;
const std::string &GetOpType() const { return name_; }
protected:
std::string name_;
};
template <class T>
class MS_API OpCell : public OpCellBase, public std::enable_shared_from_this<T> {
public:
explicit OpCell(const std::string &name) : OpCellBase(name) {}
~OpCell() override = default;
std::shared_ptr<CellBase> Clone() const override { return std::make_shared<T>(static_cast<const T &>(*this)); }
};
class MS_API GraphCell final : public Cell<GraphCell> {
public:
class GraphImpl;
GraphCell() = default;
~GraphCell() override = default;
explicit GraphCell(const Graph &);
explicit GraphCell(Graph &&);
explicit GraphCell(const std::shared_ptr<Graph> &);
const std::shared_ptr<Graph> &GetGraph() const { return graph_; }
Status Run(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs) override;
std::vector<MSTensor> GetInputs();
std::vector<MSTensor> GetOutputs();
private:
friend class Model;
friend class ModelImpl;
Status Load(uint32_t device_id);
std::shared_ptr<Graph> graph_;
std::shared_ptr<GraphImpl> executor_;
};
class MS_API InputAndOutput {
public:
InputAndOutput();
~InputAndOutput() = default;
// no explicit
InputAndOutput(const MSTensor &); // NOLINT(runtime/explicit)
InputAndOutput(MSTensor &&); // NOLINT(runtime/explicit)
InputAndOutput(const std::shared_ptr<CellBase> &, const std::vector<InputAndOutput> &, int32_t index);
int32_t GetIndex() const { return index_; }
void SetIndex(int32_t index) { index_ = index; }
private:
std::shared_ptr<CellBase> cell_;
std::vector<InputAndOutput> prev_;
int32_t index_;
};
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_CELL_H

View File

@ -1,455 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_INCLUDE_API_CONTEXT_H
#define MINDSPORE_INCLUDE_API_CONTEXT_H
#include <string>
#include <memory>
#include <vector>
#include <map>
#include "include/api/types.h"
#include "include/api/dual_abi_helper.h"
namespace mindspore {
enum DeviceType {
kCPU = 0,
kGPU,
kKirinNPU,
kAscend,
kAscend910,
kAscend310,
// add new type here
kInvalidDeviceType = 100,
};
class Allocator;
class Delegate;
class DeviceInfoContext;
/// \brief Context is used to store environment variables during execution.
class MS_API Context {
public:
struct Data;
Context();
~Context() = default;
/// \brief Set the number of threads at runtime. Only valid for Lite.
///
/// \param[in] thread_num the number of threads at runtime.
void SetThreadNum(int32_t thread_num);
/// \brief Get the current thread number setting. Only valid for Lite.
///
/// \return The current thread number setting.
int32_t GetThreadNum() const;
/// \brief Set the thread affinity to CPU cores. Only valid for Lite.
///
/// \param[in] mode: 0: no affinities, 1: big cores first, 2: little cores first
void SetThreadAffinity(int mode);
/// \brief Get the thread affinity of CPU cores. Only valid for Lite.
///
/// \return Thread affinity to CPU cores. 0: no affinities, 1: big cores first, 2: little cores first
int GetThreadAffinityMode() const;
/// \brief Set the thread lists to CPU cores. Only valid for Lite.
///
/// \note If core_list and mode are set by SetThreadAffinity at the same time, the core_list is effective, but the
/// mode is not effective.
///
/// \param[in] core_list: a vector of thread core lists.
void SetThreadAffinity(const std::vector<int> &core_list);
/// \brief Get the thread lists of CPU cores. Only valid for Lite.
///
/// \return core_list: a vector of thread core lists.
std::vector<int32_t> GetThreadAffinityCoreList() const;
/// \brief Set the status whether to perform model inference or training in parallel. Only valid for Lite.
///
/// \param[in] is_parallel: true, parallel; false, not in parallel.
void SetEnableParallel(bool is_parallel);
/// \brief Get the status whether to perform model inference or training in parallel. Only valid for Lite.
///
/// \return Bool value that indicates whether in parallel.
bool GetEnableParallel() const;
/// \brief Set Delegate to access third-party AI framework. Only valid for Lite.
///
/// \param[in] Pointer to the custom delegate.
void SetDelegate(const std::shared_ptr<Delegate> &delegate);
/// \brief Get the delegate of the third-party AI framework. Only valid for Lite.
///
/// \return Pointer to the custom delegate.
std::shared_ptr<Delegate> GetDelegate() const;
/// \brief Get a mutable reference of DeviceInfoContext vector in this context. Only MindSpore Lite supports
/// heterogeneous scenarios with multiple members in the vector.
///
/// \return Mutable reference of DeviceInfoContext vector in this context.
std::vector<std::shared_ptr<DeviceInfoContext>> &MutableDeviceInfo();
private:
std::shared_ptr<Data> data_;
};
/// \brief DeviceInfoContext defines different device contexts.
class MS_API DeviceInfoContext : public std::enable_shared_from_this<DeviceInfoContext> {
public:
struct Data;
DeviceInfoContext();
virtual ~DeviceInfoContext() = default;
/// \brief Get the type of this DeviceInfoContext.
///
/// \return Type of this DeviceInfoContext.
virtual enum DeviceType GetDeviceType() const = 0;
/// \brief A similar function to RTTI is provided when the -fno-rtti compilation option is turned on, which converts
/// DeviceInfoContext to a shared pointer of type T, and returns nullptr if the conversion fails.
///
/// \param T Type
/// \return A pointer of type T after conversion. If the conversion fails, it will be nullptr.
template <class T>
std::shared_ptr<T> Cast() {
static_assert(std::is_base_of<DeviceInfoContext, T>::value, "Wrong cast type.");
if (GetDeviceType() != T().GetDeviceType()) {
return nullptr;
}
return std::static_pointer_cast<T>(shared_from_this());
}
/// \brief obtain provider's name
///
/// \return provider's name.
std::string GetProvider() const;
/// \brief set provider's name.
///
/// \param[in] provider define the provider's name.
void SetProvider(const std::string &provider);
/// \brief obtain provider's device type.
///
/// \return provider's device type.
std::string GetProviderDevice() const;
/// \brief set provider's device type.
///
/// \param[in] device define the provider's device type.EG: CPU.
void SetProviderDevice(const std::string &device);
/// \brief set memory allocator.
///
/// \param[in] allocator define the memory allocator which can be defined by user.
void SetAllocator(const std::shared_ptr<Allocator> &allocator);
/// \brief obtain memory allocator.
///
/// \return memory allocator.
std::shared_ptr<Allocator> GetAllocator() const;
protected:
std::shared_ptr<Data> data_;
};
/// \brief Derived from DeviceInfoContext, The configuration of the model running on the CPU. This option is only valid
/// for MindSpore Lite.
class MS_API CPUDeviceInfo : public DeviceInfoContext {
public:
/// \brief Get the type of this DeviceInfoContext.
///
/// \return Type of this DeviceInfoContext.
enum DeviceType GetDeviceType() const override { return DeviceType::kCPU; };
/// \brief Set enables to perform the float16 inference
///
/// \param[in] is_fp16 Enable float16 inference or not.
void SetEnableFP16(bool is_fp16);
/// \brief Get enables to perform the float16 inference
///
/// \return Whether enable float16 inference.
bool GetEnableFP16() const;
};
/// \brief Derived from DeviceInfoContext, The configuration of the model running on the NPU. This option is only valid
/// for MindSpore Lite.
class MS_API KirinNPUDeviceInfo : public DeviceInfoContext {
public:
/// \brief Get the type of this DeviceInfoContext.
///
/// \return Type of this DeviceInfoContext.
enum DeviceType GetDeviceType() const override { return DeviceType::kKirinNPU; };
/// \brief Set the NPU frequency.
///
/// \param[in] frequency Can be set to 1 (low power consumption), 2 (balanced), 3 (high performance), 4 (extreme
/// performance), default as 3.
void SetFrequency(int frequency);
/// \brief Get the NPU frequency.
///
/// \return NPU frequency
int GetFrequency() const;
};
/// \brief Derived from DeviceInfoContext, The configuration of the model running on the GPU.
class MS_API GPUDeviceInfo : public DeviceInfoContext {
public:
/// \brief Get the type of this DeviceInfoContext.
///
/// \return Type of this DeviceInfoContext.
enum DeviceType GetDeviceType() const override { return DeviceType::kGPU; };
/// \brief Set device id.
///
/// \param[in] device_id The device id.
void SetDeviceID(uint32_t device_id);
/// \brief Get the device id.
///
/// \return The device id.
uint32_t GetDeviceID() const;
/// \brief Get the distribution rank id.
///
/// \return The device id.
int GetRankID() const;
/// \brief Get the distribution group size.
///
/// \return The device id.
int GetGroupSize() const;
/// \brief Set the precision mode.
///
/// \param[in] precision_mode Optional "origin", "fp16". "origin" is set as default.
inline void SetPrecisionMode(const std::string &precision_mode);
/// \brief Get the precision mode.
///
/// \return The precision mode.
inline std::string GetPrecisionMode() const;
/// \brief Set enables to perform the float16 inference
///
/// \param[in] is_fp16 Enable float16 inference or not.
void SetEnableFP16(bool is_fp16);
/// \brief Get enables to perform the float16 inference
///
/// \return Whether enable float16 inference.
bool GetEnableFP16() const;
private:
void SetPrecisionMode(const std::vector<char> &precision_mode);
std::vector<char> GetPrecisionModeChar() const;
};
void GPUDeviceInfo::SetPrecisionMode(const std::string &precision_mode) {
SetPrecisionMode(StringToChar(precision_mode));
}
std::string GPUDeviceInfo::GetPrecisionMode() const { return CharToString(GetPrecisionModeChar()); }
/// \brief Derived from DeviceInfoContext, The configuration of the model running on the Ascend310. This option is
/// invalid for MindSpore Lite.
class MS_API AscendDeviceInfo : public DeviceInfoContext {
public:
/// \brief Get the type of this DeviceInfoContext.
///
/// \return Type of this DeviceInfoContext.
enum DeviceType GetDeviceType() const override { return DeviceType::kAscend; };
/// \brief Set device id.
///
/// \param[in] device_id The device id.
void SetDeviceID(uint32_t device_id);
/// \brief Get the device id.
///
/// \return The device id.
uint32_t GetDeviceID() const;
/// \brief Set AIPP configuration file path.
///
/// \param[in] cfg_path AIPP configuration file path.
inline void SetInsertOpConfigPath(const std::string &cfg_path);
/// \brief Get AIPP configuration file path.
///
/// \return AIPP configuration file path.
inline std::string GetInsertOpConfigPath() const;
/// \brief Set format of model inputs.
///
/// \param[in] format Optional "NCHW", "NHWC", etc.
inline void SetInputFormat(const std::string &format);
/// \brief Get format of model inputs.
///
/// \return The format of model inputs.
inline std::string GetInputFormat() const;
/// \brief Set shape of model inputs.
///
/// \param[in] shape e.g. "input_op_name1: 1,2,3,4;input_op_name2: 4,3,2,1".
inline void SetInputShape(const std::string &shape);
/// \brief Get shape of model inputs.
///
/// \return The shape of model inputs.
inline std::string GetInputShape() const;
/// \brief Set shape of model inputs.
///
/// \param[in] shape e.g. {{1, {1,2,3,4}}, {2, {4,3,2,1}}} means the first input shape 1,2,3,4 and the second input
/// shape 4,3,2,1.
void SetInputShapeMap(const std::map<int, std::vector<int>> &shape);
/// \brief Get shape of model inputs.
///
/// \return The shape of model inputs.
std::map<int, std::vector<int>> GetInputShapeMap() const;
void SetDynamicBatchSize(const std::vector<size_t> &dynamic_batch_size);
inline std::string GetDynamicBatchSize() const;
/// \brief Set the dynamic image size of model inputs.
///
/// \param[in] image size hw e.g. "66,88;32,64" means h1:66,w1:88; h2:32,w2:64.
inline void SetDynamicImageSize(const std::string &dynamic_image_size);
/// \brief Get dynamic image size of model inputs.
///
/// \return The image size of model inputs.
inline std::string GetDynamicImageSize() const;
/// \brief Set type of model outputs.
///
/// \param[in] output_type FP32, UINT8 or FP16, default as FP32.
void SetOutputType(enum DataType output_type);
/// \brief Get type of model outputs.
///
/// \return The set type of model outputs.
enum DataType GetOutputType() const;
/// \brief Set precision mode of model.
///
/// \param[in] precision_mode Optional "force_fp16", "allow_fp32_to_fp16", "must_keep_origin_dtype" and
/// "allow_mix_precision", "force_fp16" is set as default
inline void SetPrecisionMode(const std::string &precision_mode);
/// \brief Get precision mode of model.
///
/// \return The set type of model outputs
inline std::string GetPrecisionMode() const;
/// \brief Set op select implementation mode.
///
/// \param[in] op_select_impl_mode Optional "high_performance" and "high_precision", "high_performance" is set as
/// default.
inline void SetOpSelectImplMode(const std::string &op_select_impl_mode);
/// \brief Get op select implementation mode.
///
/// \return The set op select implementation mode.
inline std::string GetOpSelectImplMode() const;
inline void SetFusionSwitchConfigPath(const std::string &cfg_path);
inline std::string GetFusionSwitchConfigPath() const;
// Optional "l1_optimize", "l2_optimize", "off_optimize" or "l1_and_l2_optimize", default as "l2_optimize"
inline void SetBufferOptimizeMode(const std::string &buffer_optimize_mode);
inline std::string GetBufferOptimizeMode() const;
private:
void SetInsertOpConfigPath(const std::vector<char> &cfg_path);
std::vector<char> GetInsertOpConfigPathChar() const;
void SetInputFormat(const std::vector<char> &format);
std::vector<char> GetInputFormatChar() const;
void SetInputShape(const std::vector<char> &shape);
std::vector<char> GetInputShapeChar() const;
std::vector<char> GetDynamicBatchSizeChar() const;
void SetDynamicImageSize(const std::vector<char> &dynamic_image_size);
std::vector<char> GetDynamicImageSizeChar() const;
void SetPrecisionMode(const std::vector<char> &precision_mode);
std::vector<char> GetPrecisionModeChar() const;
void SetOpSelectImplMode(const std::vector<char> &op_select_impl_mode);
std::vector<char> GetOpSelectImplModeChar() const;
void SetFusionSwitchConfigPath(const std::vector<char> &cfg_path);
std::vector<char> GetFusionSwitchConfigPathChar() const;
void SetBufferOptimizeMode(const std::vector<char> &buffer_optimize_mode);
std::vector<char> GetBufferOptimizeModeChar() const;
};
using Ascend310DeviceInfo = AscendDeviceInfo;
using Ascend910DeviceInfo = AscendDeviceInfo;
void AscendDeviceInfo::SetInsertOpConfigPath(const std::string &cfg_path) {
SetInsertOpConfigPath(StringToChar(cfg_path));
}
std::string AscendDeviceInfo::GetInsertOpConfigPath() const { return CharToString(GetInsertOpConfigPathChar()); }
void AscendDeviceInfo::SetInputFormat(const std::string &format) { SetInputFormat(StringToChar(format)); }
std::string AscendDeviceInfo::GetInputFormat() const { return CharToString(GetInputFormatChar()); }
void AscendDeviceInfo::SetInputShape(const std::string &shape) { SetInputShape(StringToChar(shape)); }
std::string AscendDeviceInfo::GetInputShape() const { return CharToString(GetInputShapeChar()); }
std::string AscendDeviceInfo::GetDynamicBatchSize() const { return CharToString(GetDynamicBatchSizeChar()); }
void AscendDeviceInfo::SetDynamicImageSize(const std::string &dynamic_image_size) {
SetDynamicImageSize(StringToChar(dynamic_image_size));
}
std::string AscendDeviceInfo::GetDynamicImageSize() const { return CharToString(GetDynamicImageSizeChar()); }
void AscendDeviceInfo::SetPrecisionMode(const std::string &precision_mode) {
SetPrecisionMode(StringToChar(precision_mode));
}
std::string AscendDeviceInfo::GetPrecisionMode() const { return CharToString(GetPrecisionModeChar()); }
void AscendDeviceInfo::SetOpSelectImplMode(const std::string &op_select_impl_mode) {
SetOpSelectImplMode(StringToChar(op_select_impl_mode));
}
std::string AscendDeviceInfo::GetOpSelectImplMode() const { return CharToString(GetOpSelectImplModeChar()); }
void AscendDeviceInfo::SetFusionSwitchConfigPath(const std::string &cfg_path) {
SetFusionSwitchConfigPath(StringToChar(cfg_path));
}
std::string AscendDeviceInfo::GetFusionSwitchConfigPath() const {
return CharToString(GetFusionSwitchConfigPathChar());
}
void AscendDeviceInfo::SetBufferOptimizeMode(const std::string &buffer_optimize_mode) {
SetBufferOptimizeMode(StringToChar(buffer_optimize_mode));
}
std::string AscendDeviceInfo::GetBufferOptimizeMode() const { return CharToString(GetBufferOptimizeModeChar()); }
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_CONTEXT_H

View File

@ -1,43 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_INCLUDE_API_DATA_TYPE_H_
#define MINDSPORE_INCLUDE_API_DATA_TYPE_H_
namespace mindspore {
enum class DataType : int {
kTypeUnknown = 0,
kObjectTypeString = 12,
kObjectTypeList = 13,
kObjectTypeTuple = 14,
kObjectTypeTensorType = 17,
kNumberTypeBool = 30,
kNumberTypeInt8 = 32,
kNumberTypeInt16 = 33,
kNumberTypeInt32 = 34,
kNumberTypeInt64 = 35,
kNumberTypeUInt8 = 37,
kNumberTypeUInt16 = 38,
kNumberTypeUInt32 = 39,
kNumberTypeUInt64 = 40,
kNumberTypeFloat16 = 42,
kNumberTypeFloat32 = 43,
kNumberTypeFloat64 = 44,
kNumberTypeEnd = 46,
// add new enum here
kInvalidType = INT32_MAX,
};
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_DATA_TYPE_H_

View File

@ -1,164 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_
#define MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_
#include <algorithm>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <set>
#include <unordered_map>
#include <utility>
#include <vector>
namespace mindspore {
inline std::vector<char> StringToChar(const std::string &s) { return std::vector<char>(s.begin(), s.end()); }
inline std::string CharToString(const std::vector<char> &c) { return std::string(c.begin(), c.end()); }
inline std::optional<std::vector<char>> OptionalStringToChar(const std::optional<std::string> &s) {
if (s == std::nullopt) return std::nullopt;
std::optional<std::vector<char>> ret = std::vector<char>(s->begin(), s->end());
return ret;
}
inline std::optional<std::string> OptionalCharToString(const std::optional<std::vector<char>> &c) {
if (c == std::nullopt) return std::nullopt;
std::optional<std::string> ret = std::string(c->begin(), c->end());
return ret;
}
inline std::pair<std::vector<char>, int32_t> PairStringToChar(const std::pair<std::string, int32_t> &s) {
return std::pair<std::vector<char>, int32_t>(std::vector<char>(s.first.begin(), s.first.end()), s.second);
}
inline std::pair<std::string, int32_t> PairCharToString(const std::pair<std::vector<char>, int32_t> &c) {
return std::pair<std::string, int32_t>(std::string(c.first.begin(), c.first.end()), c.second);
}
inline std::vector<std::vector<char>> VectorStringToChar(const std::vector<std::string> &s) {
std::vector<std::vector<char>> ret;
std::transform(s.begin(), s.end(), std::back_inserter(ret),
[](auto str) { return std::vector<char>(str.begin(), str.end()); });
return ret;
}
inline std::vector<std::string> VectorCharToString(const std::vector<std::vector<char>> &c) {
std::vector<std::string> ret;
std::transform(c.begin(), c.end(), std::back_inserter(ret),
[](auto ch) { return std::string(ch.begin(), ch.end()); });
return ret;
}
inline std::set<std::vector<char>> SetStringToChar(const std::set<std::string> &s) {
std::set<std::vector<char>> ret;
std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()),
[](auto str) { return std::vector<char>(str.begin(), str.end()); });
return ret;
}
inline std::set<std::string> SetCharToString(const std::set<std::vector<char>> &c) {
std::set<std::string> ret;
std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()),
[](auto ch) { return std::string(ch.begin(), ch.end()); });
return ret;
}
inline std::map<std::vector<char>, int32_t> MapStringToChar(const std::map<std::string, int32_t> &s) {
std::map<std::vector<char>, int32_t> ret;
std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) {
return std::pair<std::vector<char>, int32_t>(std::vector<char>(str.first.begin(), str.first.end()), str.second);
});
return ret;
}
inline std::map<std::string, int32_t> MapCharToString(const std::map<std::vector<char>, int32_t> &c) {
std::map<std::string, int32_t> ret;
std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) {
return std::pair<std::string, int32_t>(std::string(ch.first.begin(), ch.first.end()), ch.second);
});
return ret;
}
inline std::map<std::vector<char>, std::vector<char>> UnorderedMapStringToChar(
const std::unordered_map<std::string, std::string> &s) {
std::map<std::vector<char>, std::vector<char>> ret;
std::transform(s.begin(), s.end(), std::inserter(ret, ret.begin()), [](auto str) {
return std::pair<std::vector<char>, std::vector<char>>(std::vector<char>(str.first.begin(), str.first.end()),
std::vector<char>(str.second.begin(), str.second.end()));
});
return ret;
}
inline std::unordered_map<std::string, std::string> UnorderedMapCharToString(
const std::map<std::vector<char>, std::vector<char>> &c) {
std::unordered_map<std::string, std::string> ret;
std::transform(c.begin(), c.end(), std::inserter(ret, ret.begin()), [](auto ch) {
return std::pair<std::string, std::string>(std::string(ch.first.begin(), ch.first.end()),
std::string(ch.second.begin(), ch.second.end()));
});
return ret;
}
inline std::vector<std::pair<std::vector<char>, std::vector<int32_t>>> ClassIndexStringToChar(
const std::vector<std::pair<std::string, std::vector<int32_t>>> &s) {
std::vector<std::pair<std::vector<char>, std::vector<int32_t>>> ret;
std::transform(s.begin(), s.end(), std::back_inserter(ret), [](auto str) {
return std::pair<std::vector<char>, std::vector<int32_t>>(std::vector<char>(str.first.begin(), str.first.end()),
str.second);
});
return ret;
}
inline std::vector<std::pair<std::string, std::vector<int32_t>>> ClassIndexCharToString(
const std::vector<std::pair<std::vector<char>, std::vector<int32_t>>> &c) {
std::vector<std::pair<std::string, std::vector<int32_t>>> ret;
std::transform(c.begin(), c.end(), std::back_inserter(ret), [](auto ch) {
return std::pair<std::string, std::vector<int32_t>>(std::string(ch.first.begin(), ch.first.end()), ch.second);
});
return ret;
}
template <class T>
inline std::map<std::vector<char>, T> PadInfoStringToChar(const std::map<std::string, T> &s_pad_info) {
std::map<std::vector<char>, T> ret;
std::transform(s_pad_info.begin(), s_pad_info.end(), std::inserter(ret, ret.begin()), [](auto str) {
return std::pair<std::vector<char>, T>(std::vector<char>(str.first.begin(), str.first.end()), str.second);
});
return ret;
}
template <class T>
inline std::map<std::string, T> PadInfoCharToString(const std::map<std::vector<char>, T> &c_pad_info) {
std::map<std::string, T> ret;
std::transform(c_pad_info.begin(), c_pad_info.end(), std::inserter(ret, ret.begin()), [](auto ch) {
return std::pair<std::string, T>(std::string(ch.first.begin(), ch.first.end()), ch.second);
});
return ret;
}
template <class T>
inline void TensorMapCharToString(const std::map<std::vector<char>, T> *c, std::unordered_map<std::string, T> *s) {
for (auto ch : *c) {
auto key = std::string(ch.first.begin(), ch.first.end());
auto val = ch.second;
s->insert(std::pair<std::string, T>(key, val));
}
}
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_DUAL_ABI_HELPER_H_

View File

@ -1,46 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_INCLUDE_API_GRAPH_H
#define MINDSPORE_INCLUDE_API_GRAPH_H
#include <cstddef>
#include <vector>
#include <map>
#include <memory>
#include "include/api/status.h"
#include "include/api/types.h"
namespace mindspore {
class MS_API Graph {
public:
class GraphData;
Graph();
explicit Graph(const std::shared_ptr<GraphData> &graph_data);
explicit Graph(std::shared_ptr<GraphData> &&graph_data);
explicit Graph(std::nullptr_t);
~Graph();
enum ModelType ModelType() const;
bool operator==(std::nullptr_t) const;
bool operator!=(std::nullptr_t) const;
private:
friend class GraphCell;
friend class ModelImpl;
std::shared_ptr<GraphData> graph_data_;
};
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_GRAPH_H

View File

@ -1,80 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_INCLUDE_API_MODEL_H
#define MINDSPORE_INCLUDE_API_MODEL_H
#include <string>
#include <vector>
#include <map>
#include <memory>
#include <utility>
#include "include/api/status.h"
#include "include/api/types.h"
#include "include/api/graph.h"
#include "include/api/context.h"
#include "include/api/cell.h"
#include "include/api/dual_abi_helper.h"
namespace mindspore {
class ModelImpl;
class MS_API Model {
public:
Model();
~Model();
Model(const Model &) = delete;
void operator=(const Model &) = delete;
Status Build(GraphCell graph, const std::shared_ptr<Context> &model_context = nullptr);
Status Resize(const std::vector<MSTensor> &inputs, const std::vector<std::vector<int64_t>> &dims);
Status Predict(const std::vector<MSTensor> &inputs, std::vector<MSTensor> *outputs);
std::vector<MSTensor> GetInputs();
inline MSTensor GetInputByTensorName(const std::string &tensor_name);
std::vector<MSTensor> GetOutputs();
inline std::vector<std::string> GetOutputTensorNames();
inline MSTensor GetOutputByTensorName(const std::string &tensor_name);
inline std::vector<MSTensor> GetOutputsByNodeName(const std::string &tensor_name);
static bool CheckModelSupport(enum DeviceType device_type, ModelType model_type);
private:
// api without std::string
MSTensor GetInputByTensorName(const std::vector<char> &tensor_name);
std::vector<std::vector<char>> GetOutputTensorNamesChar();
MSTensor GetOutputByTensorName(const std::vector<char> &tensor_name);
std::vector<MSTensor> GetOutputsByNodeName(const std::vector<char> &node_name);
std::shared_ptr<ModelImpl> impl_;
};
MSTensor Model::GetInputByTensorName(const std::string &tensor_name) {
return GetInputByTensorName(StringToChar(tensor_name));
}
std::vector<std::string> Model::GetOutputTensorNames() { return VectorCharToString(GetOutputTensorNamesChar()); }
MSTensor Model::GetOutputByTensorName(const std::string &tensor_name) {
return GetOutputByTensorName(StringToChar(tensor_name));
}
std::vector<MSTensor> Model::GetOutputsByNodeName(const std::string &tensor_name) {
return GetOutputsByNodeName(StringToChar(tensor_name));
}
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_MODEL_H

View File

@ -1,47 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_INCLUDE_API_SERIALIZATION_H
#define MINDSPORE_INCLUDE_API_SERIALIZATION_H
#include <string>
#include <vector>
#include <map>
#include <memory>
#include "include/api/status.h"
#include "include/api/types.h"
#include "include/api/model.h"
#include "include/api/graph.h"
#include "include/api/dual_abi_helper.h"
namespace mindspore {
class MS_API Serialization {
public:
static Status Load(const void *model_data, size_t data_size, ModelType model_type, Graph *graph);
inline static Status Load(const std::string &file, ModelType model_type, Graph *graph);
static Status LoadCheckPoint(const std::string &ckpt_file, std::map<std::string, Buffer> *parameters);
static Status SetParameters(const std::map<std::string, Buffer> &parameters, Model *model);
static Status ExportModel(const Model &model, ModelType model_type, Buffer *model_data);
static Status ExportModel(const Model &model, ModelType model_type, const std::string &model_file);
private:
static Status Load(const std::vector<char> &file, ModelType model_type, Graph *graph);
};
Status Serialization::Load(const std::string &file, ModelType model_type, Graph *graph) {
return Load(StringToChar(file), model_type, graph);
}
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_SERIALIZATION_H

View File

@ -1,164 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_INCLUDE_API_STATUS_H
#define MINDSPORE_INCLUDE_API_STATUS_H
#include <memory>
#include <string>
#include <vector>
#include <ostream>
#include <climits>
#include "include/api/dual_abi_helper.h"
#include "include/api/types.h"
namespace mindspore {
enum CompCode : uint32_t {
kCore = 0x00000000u,
kMD = 0x10000000u,
kME = 0x20000000u,
kMC = 0x30000000u,
kLite = 0xF0000000u,
};
enum StatusCode : uint32_t {
kSuccess = 0,
// Core
kCoreFailed = kCore | 0x1,
// MD
kMDOutOfMemory = kMD | 1,
kMDShapeMisMatch = kMD | 2,
kMDInterrupted = kMD | 3,
kMDNoSpace = kMD | 4,
kMDPyFuncException = kMD | 5,
kMDDuplicateKey = kMD | 6,
kMDPythonInterpreterFailure = kMD | 7,
kMDTDTPushFailure = kMD | 8,
kMDFileNotExist = kMD | 9,
kMDProfilingError = kMD | 10,
kMDBoundingBoxOutOfBounds = kMD | 11,
kMDBoundingBoxInvalidShape = kMD | 12,
kMDSyntaxError = kMD | 13,
kMDTimeOut = kMD | 14,
kMDBuddySpaceFull = kMD | 15,
kMDNetWorkError = kMD | 16,
kMDNotImplementedYet = kMD | 17,
// Make this error code the last one. Add new error code above it.
kMDUnexpectedError = kMD | 127,
// ME
kMEFailed = kME | 0x1,
kMEInvalidInput = kME | 0x2,
// MC
kMCFailed = kMC | 0x1,
kMCDeviceError = kMC | 0x2,
kMCInvalidInput = kMC | 0x3,
kMCInvalidArgs = kMC | 0x4,
// Lite // Common error code, range: [-1, -100
kLiteError = kLite | (0x0FFFFFFF & -1), /**< Common error code. */
kLiteNullptr = kLite | (0x0FFFFFFF & -2), /**< NULL pointer returned.*/
kLiteParamInvalid = kLite | (0x0FFFFFFF & -3), /**< Invalid parameter.*/
kLiteNoChange = kLite | (0x0FFFFFFF & -4), /**< No change. */
kLiteSuccessExit = kLite | (0x0FFFFFFF & -5), /**< No error but exit. */
kLiteMemoryFailed = kLite | (0x0FFFFFFF & -6), /**< Fail to create memory. */
kLiteNotSupport = kLite | (0x0FFFFFFF & -7), /**< Fail to support. */
kLiteThreadPoolError = kLite | (0x0FFFFFFF & -8), /**< Error occur in thread pool. */
// Executor error code, range: [-100,-200)
kLiteOutOfTensorRange = kLite | (0x0FFFFFFF & -100), /**< Failed to check range. */
kLiteInputTensorError = kLite | (0x0FFFFFFF & -101), /**< Failed to check input tensor. */
kLiteReentrantError = kLite | (0x0FFFFFFF & -102), /**< Exist executor running. */
// Graph error code, range: [-200,-300)
kLiteGraphFileError = kLite | (0x0FFFFFFF & -200), /**< Failed to verify graph file. */
// Node error code, range: [-300,-400)
kLiteNotFindOp = kLite | (0x0FFFFFFF & -300), /**< Failed to find operator. */
kLiteInvalidOpName = kLite | (0x0FFFFFFF & -301), /**< Invalid operator name. */
kLiteInvalidOpAttr = kLite | (0x0FFFFFFF & -302), /**< Invalid operator attr. */
kLiteOpExecuteFailure = kLite | (0x0FFFFFFF & -303), /**< Failed to execution operator. */
// Tensor error code, range: [-400,-500)
kLiteFormatError = kLite | (0x0FFFFFFF & -400), /**< Failed to checking tensor format. */
// InferShape error code, range: [-500,-600)
kLiteInferError = kLite | (0x0FFFFFFF & -500), /**< Failed to infer shape. */
kLiteInferInvalid = kLite | (0x0FFFFFFF & -501), /**< Invalid infer shape before runtime. */
// User input param error code, range: [-600, 700)
kLiteInputParamInvalid = kLite | (0x0FFFFFFF & -600), /**< Invalid input param by user. */
};
class MS_API Status {
public:
Status();
inline Status(enum StatusCode status_code, const std::string &status_msg = ""); // NOLINT(runtime/explicit)
inline Status(const StatusCode code, int line_of_code, const char *file_name, const std::string &extra = "");
~Status() = default;
enum StatusCode StatusCode() const;
inline std::string ToString() const;
int GetLineOfCode() const;
inline std::string GetErrDescription() const;
inline std::string SetErrDescription(const std::string &err_description);
friend std::ostream &operator<<(std::ostream &os, const Status &s);
bool operator==(const Status &other) const;
bool operator==(enum StatusCode other_code) const;
bool operator!=(const Status &other) const;
bool operator!=(enum StatusCode other_code) const;
explicit operator bool() const;
explicit operator int() const;
static Status OK();
bool IsOk() const;
bool IsError() const;
static inline std::string CodeAsString(enum StatusCode c);
private:
// api without std::string
explicit Status(enum StatusCode status_code, const std::vector<char> &status_msg);
Status(const enum StatusCode code, int line_of_code, const char *file_name, const std::vector<char> &extra);
std::vector<char> ToCString() const;
std::vector<char> GetErrDescriptionChar() const;
std::vector<char> SetErrDescription(const std::vector<char> &err_description);
static std::vector<char> CodeAsCString(enum StatusCode c);
struct Data;
std::shared_ptr<Data> data_;
};
Status::Status(enum StatusCode status_code, const std::string &status_msg)
: Status(status_code, StringToChar(status_msg)) {}
Status::Status(const enum StatusCode code, int line_of_code, const char *file_name, const std::string &extra)
: Status(code, line_of_code, file_name, StringToChar(extra)) {}
std::string Status::ToString() const { return CharToString(ToCString()); }
std::string Status::GetErrDescription() const { return CharToString(GetErrDescriptionChar()); }
std::string Status::SetErrDescription(const std::string &err_description) {
return CharToString(SetErrDescription(StringToChar(err_description)));
}
std::string Status::CodeAsString(enum StatusCode c) { return CharToString(CodeAsCString(c)); }
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_STATUS_H

View File

@ -1,137 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_INCLUDE_API_TYPES_H
#define MINDSPORE_INCLUDE_API_TYPES_H
#include <cstddef>
#include <string>
#include <vector>
#include <memory>
#include "include/api/data_type.h"
#include "include/api/dual_abi_helper.h"
#ifdef _WIN32
#define MS_API __declspec(dllexport)
#else
#define MS_API __attribute__((visibility("default")))
#endif
namespace mindspore {
enum ModelType : uint32_t {
kMindIR = 0,
kAIR = 1,
kOM = 2,
kONNX = 3,
// insert new data type here
kUnknownType = 0xFFFFFFFF
};
class MS_API MSTensor {
public:
class Impl;
static inline MSTensor *CreateTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static inline MSTensor *CreateRefTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static inline MSTensor *StringsToTensor(const std::string &name, const std::vector<std::string> &str);
static inline std::vector<std::string> TensorToStrings(const MSTensor &tensor);
static void DestroyTensorPtr(MSTensor *tensor) noexcept;
MSTensor();
explicit MSTensor(const std::shared_ptr<Impl> &impl);
inline MSTensor(const std::string &name, DataType type, const std::vector<int64_t> &shape, const void *data,
size_t data_len);
explicit MSTensor(std::nullptr_t);
~MSTensor();
inline std::string Name() const;
enum DataType DataType() const;
const std::vector<int64_t> &Shape() const;
int64_t ElementNum() const;
std::shared_ptr<const void> Data() const;
void *MutableData();
size_t DataSize() const;
bool IsDevice() const;
MSTensor *Clone() const;
bool operator==(std::nullptr_t) const;
bool operator!=(std::nullptr_t) const;
private:
// api without std::string
static MSTensor *CreateTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static MSTensor *CreateRefTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept;
static MSTensor *CharStringsToTensor(const std::vector<char> &name, const std::vector<std::vector<char>> &str);
static std::vector<std::vector<char>> TensorToStringChars(const MSTensor &tensor);
MSTensor(const std::vector<char> &name, enum DataType type, const std::vector<int64_t> &shape, const void *data,
size_t data_len);
std::vector<char> CharName() const;
friend class ModelImpl;
std::shared_ptr<Impl> impl_;
};
class MS_API Buffer {
public:
Buffer();
Buffer(const void *data, size_t data_len);
~Buffer();
const void *Data() const;
void *MutableData();
size_t DataSize() const;
bool ResizeData(size_t data_len);
bool SetData(const void *data, size_t data_len);
Buffer Clone() const;
private:
class Impl;
std::shared_ptr<Impl> impl_;
};
MSTensor *MSTensor::CreateTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept {
return CreateTensor(StringToChar(name), type, shape, data, data_len);
}
MSTensor *MSTensor::CreateRefTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape,
const void *data, size_t data_len) noexcept {
return CreateRefTensor(StringToChar(name), type, shape, data, data_len);
}
MSTensor *MSTensor::StringsToTensor(const std::string &name, const std::vector<std::string> &str) {
return CharStringsToTensor(StringToChar(name), VectorStringToChar(str));
}
std::vector<std::string> MSTensor::TensorToStrings(const MSTensor &tensor) {
return VectorCharToString(TensorToStringChars(tensor));
}
MSTensor::MSTensor(const std::string &name, enum DataType type, const std::vector<int64_t> &shape, const void *data,
size_t data_len)
: MSTensor(StringToChar(name), type, shape, data, data_len) {}
std::string MSTensor::Name() const { return CharToString(CharName()); }
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_API_TYPES_H

View File

@ -1,74 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INCLUDE_CONTEXT_H_
#define MINDSPORE_LITE_INCLUDE_CONTEXT_H_
#include "include/ms_tensor.h"
#include "include/lite_utils.h"
#include "include/lite_types.h"
namespace mindspore::lite {
/// \brief CpuDeviceInfo defined for CPU's configuration information.
typedef struct {
bool enable_float16_ = false; /**< prior enable float16 inference */
CpuBindMode cpu_bind_mode_ = MID_CPU;
} CpuDeviceInfo;
/// \brief GpuDeviceInfo defined for GPU's configuration information.
typedef struct {
bool enable_float16_ = false; /**< prior enable float16 inference */
} GpuDeviceInfo;
/// \brief NpuDeviceInfo defined for NPU's configuration information.
typedef struct {
int frequency_ = 3; /**< npu frequency inference */
} NpuDeviceInfo;
/// \brief DeviceInfo defined for backend's configuration information.
#ifdef NOT_USE_STL
// DeviceInfo() is implicitly deleted because
// the default definition of union struct would be ill-formed
struct DeviceInfo {
CpuDeviceInfo cpu_device_info_;
};
#else
union DeviceInfo {
CpuDeviceInfo cpu_device_info_;
GpuDeviceInfo gpu_device_info_;
NpuDeviceInfo npu_device_info_;
};
#endif // NOT_USE_STL
/// \brief DeviceContext defined for holding backend's configuration information.
struct DeviceContext {
DeviceType device_type_ = DT_CPU;
DeviceInfo device_info_;
};
/// \brief Context defined for holding environment variables during runtime.
struct Context {
String vendor_name_;
int thread_num_ = 2; /**< thread number config for thread pool */
AllocatorPtr allocator = nullptr;
#ifndef NOT_USE_STL
DeviceContextVector device_list_ = {{DT_CPU, {false, MID_CPU}}};
#else
DeviceContextVector device_list_;
#endif // NOT_USE_STL
};
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_INCLUDE_CONTEXT_H_

View File

@ -1,74 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INCLUDE_ERRORCODE_H_
#define MINDSPORE_LITE_INCLUDE_ERRORCODE_H_
#include "include/lite_utils.h"
namespace mindspore {
namespace lite {
/// \brief STATUS defined for holding error code in MindSpore Lite.
using STATUS = int;
/* Success */
constexpr int RET_OK = 0; /**< No error occurs. */
/* Common error code, range: [-1, -100*/
constexpr int RET_ERROR = -1; /**< Common error code. */
constexpr int RET_NULL_PTR = -2; /**< NULL pointer returned.*/
constexpr int RET_PARAM_INVALID = -3; /**< Invalid parameter.*/
constexpr int RET_NO_CHANGE = -4; /**< No change. */
constexpr int RET_SUCCESS_EXIT = -5; /**< No error but exit. */
constexpr int RET_MEMORY_FAILED = -6; /**< Fail to create memory. */
constexpr int RET_NOT_SUPPORT = -7; /**< Fail to support. */
constexpr int RET_THREAD_POOL_ERROR = -8; /**< Error occur in thread pool. */
/* Executor error code, range: [-100,-200) */
constexpr int RET_OUT_OF_TENSOR_RANGE = -100; /**< Failed to check range. */
constexpr int RET_INPUT_TENSOR_ERROR = -101; /**< Failed to check input tensor. */
constexpr int RET_REENTRANT_ERROR = -102; /**< Exist executor running. */
/* Graph error code, range: [-200,-300) */
constexpr int RET_GRAPH_FILE_ERR = -200; /**< Failed to verify graph file. */
/* Node error code, range: [-300,-400) */
constexpr int RET_NOT_FIND_OP = -300; /**< Failed to find operator. */
constexpr int RET_INVALID_OP_NAME = -301; /**< Invalid operator name. */
constexpr int RET_INVALID_OP_ATTR = -302; /**< Invalid operator attr. */
constexpr int RET_OP_EXECUTE_FAILURE = -303; /**< Failed to execution operator. */
/* Tensor error code, range: [-400,-500) */
constexpr int RET_FORMAT_ERR = -400; /**< Failed to checking tensor format. */
/* InferShape error code, range: [-500,-600) */
constexpr int RET_INFER_ERR = -500; /**< Failed to infer shape. */
constexpr int RET_INFER_INVALID = -501; /**< Invalid infer shape before runtime. */
/* User input param error code, range: [-600, 700)*/
constexpr int RET_INPUT_PARAM_INVALID = -600; /**< Invalid input param by user. */
/// \brief Print description of errorcode.
///
/// \param[in] error_code define return status of procedure.
///
/// \return String of errorcode info.
String GetErrorInfo(STATUS error_code);
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_INCLUDE_ERRORCODE_H_

View File

@ -1,95 +0,0 @@
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_IR_DTYPE_TYPE_ID_H_
#define MINDSPORE_CORE_IR_DTYPE_TYPE_ID_H_
namespace mindspore {
//
// Supported meta type
//
enum TypeId : int {
kTypeUnknown = 0,
kMetaTypeBegin = kTypeUnknown,
kMetaTypeType, // Type
kMetaTypeAnything,
kMetaTypeObject,
kMetaTypeTypeType, // TypeType
kMetaTypeProblem,
kMetaTypeExternal,
kMetaTypeNone,
kMetaTypeNull,
kMetaTypeEllipsis,
kMetaTypeEnd,
//
// Object types
//
kObjectTypeBegin = kMetaTypeEnd,
kObjectTypeNumber,
kObjectTypeString,
kObjectTypeList,
kObjectTypeTuple,
kObjectTypeSlice,
kObjectTypeKeyword,
kObjectTypeTensorType,
kObjectTypeRowTensorType,
kObjectTypeCOOTensorType,
kObjectTypeUndeterminedType,
kObjectTypeClass,
kObjectTypeDictionary,
kObjectTypeFunction,
kObjectTypeJTagged,
kObjectTypeSymbolicKeyType,
kObjectTypeEnvType,
kObjectTypeRefKey,
kObjectTypeRef,
kObjectTypeEnd,
//
// Number Types
//
kNumberTypeBegin = kObjectTypeEnd,
kNumberTypeBool,
kNumberTypeInt,
kNumberTypeInt8,
kNumberTypeInt16,
kNumberTypeInt32,
kNumberTypeInt64,
kNumberTypeUInt,
kNumberTypeUInt8,
kNumberTypeUInt16,
kNumberTypeUInt32,
kNumberTypeUInt64,
kNumberTypeFloat,
kNumberTypeFloat16,
kNumberTypeFloat32,
kNumberTypeFloat64,
kNumberTypeComplex64,
kNumberTypeEnd,
//
// Monad Types
//
// Monad types is placed at the end of enum,
// in order to keep fit with the type of existing model on the lite side.
kMonadTypeBegin = kNumberTypeEnd,
kObjectTypeMonad,
kObjectTypeUMonad,
kObjectTypeIOMonad,
kMonadTypeEnd
};
} // namespace mindspore
#endif // MINDSPORE_CORE_IR_DTYPE_TYPE_ID_H_

View File

@ -1,46 +0,0 @@
/**
* This is the C++ adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
*
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CORE_IR_FORMAT_H_
#define MINDSPORE_CORE_IR_FORMAT_H_
#include <cstdint>
namespace mindspore {
enum Format : int64_t {
NCHW = 0,
NHWC = 1,
NHWC4 = 2,
HWKC = 3,
HWCK = 4,
KCHW = 5,
CKHW = 6,
KHWC = 7,
CHWK = 8,
HW = 9,
HW4 = 10,
NC = 11,
NC4 = 12,
NC4HW4 = 13,
NUM_OF_FORMAT = 14,
NCDHW = 15,
NWC = 16,
NCW = 17
};
} // namespace mindspore
#endif // MINDSPORE_CORE_IR_FORMAT_H_

View File

@ -1,125 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INCLUDE_LITE_SESSION_H
#define MINDSPORE_LITE_INCLUDE_LITE_SESSION_H
#ifndef NOT_USE_STL
#include <unordered_map>
#endif // NOT_USE_STL
#include "include/ms_tensor.h"
#include "include/model.h"
#include "include/context.h"
namespace mindspore {
namespace session {
/// \brief LiteSession defined session in MindSpore Lite for compiling Model and forwarding model.
class MS_API LiteSession {
public:
/// \brief Static method to create a LiteSession pointer.
///
/// \param[in] context Define the context of session to be created.
///
/// \return Pointer of MindSpore Lite LiteSession.
static LiteSession *CreateSession(const lite::Context *context);
/// \brief Static method to create a LiteSession pointer which has already compiled a model.
///
/// \param[in] model_buf Define the buffer read from a model file.
/// \param[in] size Define bytes number of model buffer.
/// \param[in] context Define the context of session to be created.
///
/// \return Pointer of MindSpore Lite LiteSession.
static LiteSession *CreateSession(const char *model_buf, size_t size, const lite::Context *context);
/// \brief Destructor of MindSpore Lite LiteSession.
virtual ~LiteSession() = default;
/// \brief Attempt to bind or unbind threads in the thread pool to or from the specified cpu core.
///
/// \param[in] if_bind Define whether to bind or unbind threads.
virtual void BindThread(bool if_bind) = 0;
/// \brief Compile MindSpore Lite model.
///
/// \note CompileGraph should be called before RunGraph.
///
/// \param[in] model Define the model to be compiled.
///
/// \return STATUS as an error code of compiling graph, STATUS is defined in errorcode.h.
virtual int CompileGraph(lite::Model *model) = 0;
/// \brief Get input MindSpore Lite MSTensors of model.
///
/// \return The vector of MindSpore Lite MSTensor.
virtual Vector<tensor::MSTensor *> GetInputs() const = 0;
/// \brief Get input MindSpore Lite MSTensors of model by tensor name.
///
/// \param[in] node_name Define tensor name.
///
/// \return The vector of MindSpore Lite MSTensor.
virtual mindspore::tensor::MSTensor *GetInputsByTensorName(const String &tensor_name) const = 0;
/// \brief Run session with callback.
///
/// \param[in] before Define a call_back_function to be called before running each node.
/// \param[in] after Define a call_back_function called after running each node.
///
/// \note RunGraph should be called after CompileGraph.
///
/// \return STATUS as an error code of running graph, STATUS is defined in errorcode.h.
virtual int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) = 0;
/// \brief Get output MindSpore Lite MSTensors of model by node name.
///
/// \param[in] node_name Define node name.
///
/// \note Deprecated, replace with GetOutputByTensorName
///
/// \return The vector of MindSpore Lite MSTensor.
virtual Vector<tensor::MSTensor *> GetOutputsByNodeName(const String &node_name) const = 0;
#ifndef NOT_USE_STL
/// \brief Get output MindSpore Lite MSTensors of model mapped by tensor name.
///
/// \return The map of output tensor name and MindSpore Lite MSTensor.
virtual std::unordered_map<String, mindspore::tensor::MSTensor *> GetOutputs() const = 0;
#endif
/// \brief Get name of output tensors of model compiled by this session.
///
/// \return The vector of string as output tensor names in order.
virtual Vector<String> GetOutputTensorNames() const = 0;
/// \brief Get output MindSpore Lite MSTensors of model by tensor name.
///
/// \param[in] tensor_name Define tensor name.
///
/// \return Pointer of MindSpore Lite MSTensor.
virtual mindspore::tensor::MSTensor *GetOutputByTensorName(const String &tensor_name) const = 0;
/// \brief Resize inputs shape.
///
/// \param[in] inputs Define the inputs of the model.
/// \param[in] dims Define the inputs new shape.
///
/// \return STATUS as an error code of resize inputs, STATUS is defined in errorcode.h.
virtual int Resize(const Vector<tensor::MSTensor *> &inputs, const Vector<Vector<int>> &dims) = 0;
};
} // namespace session
} // namespace mindspore
#endif // MINDSPORE_LITE_INCLUDE_LITE_SESSION_H

View File

@ -1,36 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INCLUDE_LITE_TYPES_H_
#define MINDSPORE_LITE_INCLUDE_LITE_TYPES_H_
namespace mindspore::lite {
/// \brief CpuBindMode defined for holding bind cpu strategy argument.
typedef enum {
NO_BIND, /**< no bind */
HIGHER_CPU, /**< bind higher cpu first */
MID_CPU /**< bind middle cpu first */
} CpuBindMode;
/// \brief DeviceType defined for holding user's preferred backend.
typedef enum {
DT_CPU, /**< CPU device type */
DT_GPU, /**< GPU device type */
DT_NPU /**< NPU device type */
} DeviceType;
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_INCLUDE_LITE_TYPES_H_

View File

@ -1,666 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_
#define MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_
#ifndef NOT_USE_STL
#include <vector>
#include <string>
#include <memory>
#include <functional>
#else
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
#include <stdio.h>
#include <float.h>
#include <new>
#endif // NOT_USE_STL
#ifndef MS_API
#ifdef _WIN32
#define MS_API __declspec(dllexport)
#else
#define MS_API __attribute__((visibility("default")))
#endif
#endif
namespace mindspore {
namespace schema {
struct Tensor;
} // namespace schema
namespace tensor {
class MSTensor;
} // namespace tensor
namespace lite {
struct DeviceContext;
} // namespace lite
#ifdef NOT_USE_STL
#define MS_C_EXCEPTION(...) exit(1)
class String {
public:
String() {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
}
String(size_t count, char ch) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (count + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memset(buffer_, ch, count);
buffer_[count] = '\0';
size_ = count;
}
String(const char *s, size_t count) {
if (s == nullptr) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
return;
}
size_t size_s = strlen(s);
if (size_s <= count) {
size_ = size_s;
} else {
size_ = count;
}
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
strncpy(buffer_, s, size_);
buffer_[size_] = '\0';
}
explicit String(const char *s) {
if (s == nullptr) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
return;
}
size_ = strlen(s);
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(buffer_, s, size_ + 1);
}
String(const String &other) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (other.size_ + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
size_ = other.size_;
memcpy(buffer_, other.buffer_, size_ + 1);
}
String(const String &other, size_t pos, size_t count = npos) {
if (pos >= other.size_) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
} else {
if (count == npos) {
count = other.size_ - pos;
}
if (pos + count > other.size_) {
size_ = other.size_ - pos;
} else {
size_ = count;
}
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
strncpy_s(buffer_, size_ + 1, other.buffer_ + pos, size_);
buffer_[size_] = '\0';
}
}
~String() { free(buffer_); }
String &operator=(const String &str) {
if (this == &str) {
return *this;
}
free(buffer_);
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (str.size_ + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
size_ = str.size_;
memcpy(buffer_, str.buffer_, size_ + 1);
return *this;
}
String &operator=(const char *str) {
free(buffer_);
if (str == nullptr) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
return *this;
}
size_t size_s = strlen(str);
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_s + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
size_ = size_s;
memcpy(buffer_, str, size_ + 1);
return *this;
}
char &at(size_t pos) {
if (pos >= size_) {
MS_C_EXCEPTION("pos out of range");
}
return buffer_[pos];
}
const char &at(size_t pos) const {
if (pos >= size_) {
MS_C_EXCEPTION("pos out of range");
}
return buffer_[pos];
}
inline char &operator[](size_t pos) {
if (pos >= size_) {
MS_C_EXCEPTION("pos out of range");
}
return this->at(pos);
}
inline const char &operator[](size_t pos) const {
if (pos >= size_) {
MS_C_EXCEPTION("pos out of range");
}
return this->at(pos);
}
char *data() noexcept { return buffer_; }
const char *data() const noexcept { return buffer_; }
const char *c_str() const noexcept { return buffer_; }
// capacity
bool empty() const noexcept { return size_ == 0; }
size_t size() const noexcept { return size_; }
size_t length() const noexcept { return size_; }
// operations
void clear() noexcept {
free(buffer_);
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
}
String &append(size_t count, const char ch) {
(*this) += ch;
return *this;
}
String &append(const String &str) {
(*this) += str;
return *this;
}
String &append(const char *str) {
if (str == nullptr) {
return *this;
}
(*this) += str;
return *this;
}
String &operator+(const String &str) {
(*this) += str;
return *this;
}
String &operator+=(const String &str) {
size_t new_size = size_ + str.size_;
char *tmp = reinterpret_cast<char *>(malloc(sizeof(char) * (new_size + 1)));
if (tmp == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(tmp, this->buffer_, size_ + 1);
strncat_s(tmp, new_size + 1, str.buffer_, str.size_);
tmp[new_size] = '\0';
free(buffer_);
buffer_ = tmp;
size_ = new_size;
return *this;
}
String &operator+=(const char *str) {
if (str == nullptr) {
return *this;
}
size_t str_size = strlen(str);
size_t new_size = size_ + str_size;
char *tmp = reinterpret_cast<char *>(malloc(sizeof(char) * (new_size + 1)));
if (tmp == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(tmp, this->buffer_, size_ + 1);
strncat(tmp, str, str_size);
tmp[new_size] = '\0';
free(buffer_);
buffer_ = tmp;
size_ = new_size;
return *this;
}
String &operator+=(const char ch) {
char *tmp = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 2)));
if (tmp == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(tmp, this->buffer_, size_ + 1);
tmp[size_] = ch;
tmp[size_ + 1] = '\0';
free(buffer_);
buffer_ = tmp;
size_ += 1;
return *this;
}
int compare(const String &str) const { return strcmp(buffer_, str.buffer_); }
int compare(const char *str) const { return strcmp(buffer_, str); }
String substr(size_t pos = 0, size_t count = npos) const { return String(*this, pos, count); }
static const size_t npos = -1;
private:
size_t size_;
char *buffer_;
};
inline String operator+(const String &lhs, const char *rhs) {
String str = lhs;
str += rhs;
return str;
}
inline String operator+(const char *lhs, const String &rhs) {
String str = rhs;
str += lhs;
return str;
}
inline bool operator!=(const String &lhs, const String &rhs) { return lhs.compare(rhs) != 0; }
inline bool operator==(const String &lhs, const String &rhs) { return lhs.compare(rhs) == 0; }
inline bool operator==(const String &lhs, const char *rhs) { return lhs.compare(rhs) == 0; }
inline bool operator==(const char *lhs, const String &rhs) { return rhs.compare(lhs) == 0; }
inline String to_String(int32_t value) {
char tmp[sizeof(int32_t) * 4];
snprintf(tmp, sizeof(int32_t) * 4, "%d", value);
return String(tmp, strlen(tmp));
}
inline String to_String(float value) {
char tmp[FLT_MAX_10_EXP + 20];
snprintf(tmp, FLT_MAX_10_EXP + 20, "%f", value);
return String(tmp, strlen(tmp));
}
#define DEFAULT_CAPACITY 4
#define MIN(x, y) ((x < y) ? (x) : (y))
template <typename T>
class Vector {
public:
Vector() {
size_ = 0;
capacity_ = DEFAULT_CAPACITY;
elem_size_ = sizeof(T);
data_ = nullptr;
}
explicit Vector(size_t size) {
size_ = size;
elem_size_ = sizeof(T);
capacity_ = (size == 0 ? DEFAULT_CAPACITY : size);
data_ = new (std::nothrow) T[capacity_];
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
}
Vector(size_t size, const T &value) {
size_ = size;
elem_size_ = sizeof(T);
capacity_ = (size == 0 ? DEFAULT_CAPACITY : size);
data_ = new (std::nothrow) T[capacity_];
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
for (int i = 0; i < static_cast<int>(size_); ++i) {
data_[i] = value;
}
}
Vector(const Vector<T> &vec) {
size_ = vec.size_;
elem_size_ = sizeof(T);
capacity_ = vec.capacity_;
data_ = new (std::nothrow) T[capacity_];
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
for (int i = 0; i < static_cast<int>(size_); ++i) {
data_[i] = vec.data_[i];
}
}
~Vector() {
if (data_ != nullptr) {
delete[] data_;
}
}
void clear() {
size_ = 0;
if (data_ != nullptr) {
delete[] data_;
data_ = nullptr;
}
}
void push_back(const T &elem) {
if (data_ == nullptr) {
data_ = new (std::nothrow) T[capacity_];
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
} else if (size_ == capacity_) {
resize(size_ + 1);
--size_;
}
data_[size_] = elem;
++size_;
}
void push_back(T &&elem) {
if (data_ == nullptr) {
data_ = new (std::nothrow) T[capacity_];
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
} else if (size_ == capacity_) {
resize(size_ + 1);
--size_;
}
data_[size_] = elem;
++size_;
}
void pop_back() {
if (size_ > 0) {
--size_;
} else {
MS_C_EXCEPTION("Index is out of range!");
}
}
void insert(const T &elem, size_t index) {
if (index <= size_) {
++size_;
if (size_ > capacity_) {
resize(size_);
}
if (index == size_ - 1) {
push_back(elem);
} else {
for (int i = static_cast<int>(size_) - 1; i > static_cast<int>(index); --i) {
data_[i + 1] = data_[i];
}
data_[index] = elem;
}
} else {
MS_C_EXCEPTION("Input index is out of range!");
}
}
T *begin() { return data_; }
const T *begin() const { return data_; }
T *end() { return data_ + size_; }
const T *end() const { return data_ + size_; }
T &front() {
if (size_ > 0) {
return data_[0];
}
MS_C_EXCEPTION("Index is out of range!");
}
const T &front() const {
if (size_ > 0) {
return data_[0];
}
MS_C_EXCEPTION("Index is out of range!");
}
T &back() {
if (size_ > 0) {
return data_[size_ - 1];
}
MS_C_EXCEPTION("Index is out of range!");
}
const T &back() const {
if (size_ > 0) {
return data_[size_ - 1];
}
MS_C_EXCEPTION("Index is out of range!");
}
T &at(size_t index) {
if (index < size_) {
return data_[index];
}
MS_C_EXCEPTION("Input index is out of range!");
}
const T &at(size_t index) const {
if (index < size_) {
return data_[index];
}
MS_C_EXCEPTION("Input index is out of range!");
}
T &operator[](size_t index) {
if (index < size_) {
return data_[index];
}
MS_C_EXCEPTION("Input index is out of range!");
}
const T &operator[](size_t index) const {
if (index < size_) {
return data_[index];
}
MS_C_EXCEPTION("Input index is out of range!");
}
T *data() { return data_; }
const T *data() const { return data_; }
size_t size() const { return size_; }
size_t capacity() const { return capacity_; }
bool empty() const { return size_ == 0; }
void erase(size_t index) {
if (index == size_ - 1) {
--size_;
} else if (index < size_) {
for (int i = index; i < static_cast<int>(size_); ++i) {
data_[i] = data_[i + 1];
}
--size_;
} else {
MS_C_EXCEPTION("Input index is out of range!");
}
}
void resize(size_t size) {
while (size > capacity_) {
capacity_ *= 2;
}
T *tmp = data_;
data_ = new (std::nothrow) T[capacity_];
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
for (int i = 0; i < MIN(static_cast<int>(size), static_cast<int>(size_)); ++i) {
data_[i] = tmp[i];
}
size_ = size;
delete[] tmp;
}
void reserve(size_t capacity) {
if (capacity > capacity_) {
capacity_ = capacity;
}
}
Vector<T> &operator=(const Vector<T> &vec) {
if (this == &vec) {
return *this;
}
size_ = vec.size_;
elem_size_ = sizeof(T);
capacity_ = vec.capacity_;
data_ = new (std::nothrow) T[capacity_];
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
for (int i = 0; i < static_cast<int>(size_); ++i) {
data_[i] = vec.data_[i];
}
return *this;
}
private:
size_t size_;
size_t elem_size_;
size_t capacity_;
T *data_;
};
using TensorPtrVector = Vector<mindspore::schema::Tensor *>;
using Uint32Vector = Vector<uint32_t>;
using AllocatorPtr = void *;
using DeviceContextVector = Vector<lite::DeviceContext>;
using KernelCallBack = void (*)(void *, void *);
#else
/// \brief Allocator defined a memory pool for malloc memory and free memory dynamically.
///
/// \note List public class and interface for reference.
class Allocator;
using AllocatorPtr = std::shared_ptr<Allocator>;
using TensorPtrVector = std::vector<mindspore::schema::Tensor *>;
using Uint32Vector = std::vector<uint32_t>;
template <typename T>
using Vector = std::vector<T>;
template <typename T>
inline std::string to_string(T t) {
return std::to_string(t);
}
namespace tensor {
using String = std::string;
} // namespace tensor
namespace session {
using String = std::string;
} // namespace session
/// \brief CallBackParam defined input arguments for callBack function.
struct CallBackParam {
session::String node_name; /**< node name argument */
session::String node_type; /**< node type argument */
};
struct GPUCallBackParam : CallBackParam {
double execute_time{-1.f};
};
/// \brief KernelCallBack defined the function pointer for callBack.
using KernelCallBack = std::function<bool(Vector<tensor::MSTensor *> inputs, Vector<tensor::MSTensor *> outputs,
const CallBackParam &opInfo)>;
namespace lite {
using String = std::string;
using DeviceContextVector = std::vector<DeviceContext>;
/// \brief Set data of MSTensor from string vector.
///
/// \param[in] input string vector.
/// \param[out] MSTensor.
///
/// \return STATUS as an error code of this interface, STATUS is defined in errorcode.h.
int MS_API StringsToMSTensor(const Vector<String> &inputs, tensor::MSTensor *tensor);
/// \brief Get string vector from MSTensor.
/// \param[in] MSTensor.
/// \return string vector.
Vector<String> MS_API MSTensorToStrings(const tensor::MSTensor *tensor);
} // namespace lite
#endif // NOT_USE_STL
} // namespace mindspore
#endif // MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_

View File

@ -1,66 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INCLUDE_MODEL_H_
#define MINDSPORE_LITE_INCLUDE_MODEL_H_
#include "include/lite_utils.h"
namespace mindspore::lite {
struct MS_API Model {
struct Node {
String name_;
int node_type_;
const void *primitive_;
Uint32Vector input_indices_;
Uint32Vector output_indices_;
int quant_type_;
};
using NodePtrVector = Vector<Node *>;
struct SubGraph {
String name_;
Uint32Vector input_indices_;
Uint32Vector output_indices_;
Uint32Vector node_indices_;
Uint32Vector tensor_indices_;
};
using SubGraphPtrVector = Vector<SubGraph *>;
String name_;
String version_;
TensorPtrVector all_tensors_;
NodePtrVector all_nodes_;
char *buf;
SubGraphPtrVector sub_graphs_;
/// \brief Static method to create a Model pointer.
///
/// \param[in] model_buf Define the buffer read from a model file.
/// \param[in] size Define bytes number of model buffer.
///
/// \return Pointer of MindSpore Lite Model.
static Model *Import(const char *model_buf, size_t size);
/// \brief Free meta graph temporary buffer
virtual void Free() = 0;
/// \brief Free all temporary buffer.EG: nodes in the model.
virtual void Destroy() = 0;
/// \brief Model destruct, free all memory
virtual ~Model() = default;
};
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_INCLUDE_MODEL_H_

View File

@ -1,118 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_
#define MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_
#include "include/lite_utils.h"
#include "ir/dtype/type_id.h"
namespace mindspore {
enum Format : int64_t ;
namespace tensor {
/// \brief MSTensor defined tensor in MindSpore Lite.
class MS_API MSTensor {
public:
/// \brief Constructor of MindSpore Lite MSTensor.
///
/// \return Instance of MindSpore Lite MSTensor.
MSTensor() = default;
/// \brief Destructor of MindSpore Lite Model.
virtual ~MSTensor() = default;
/// \brief Create a MSTensor.
///
/// \return Pointer to an instance of MindSpore Lite MSTensor.
static MSTensor *CreateTensor(const String &name, TypeId type, const Vector<int> &shape, const void *data,
size_t data_len);
/// \brief Get memory allocator of current MSTensor.
///
/// \return Pointer of memory allocator class.
virtual AllocatorPtr allocator() const = 0;
/// \brief Get data type of the MindSpore Lite MSTensor.
///
/// \note TypeId is defined in mindspore/mindspore/include/api/type_id.h. Only number types in TypeId enum are
/// suitable for MSTensor.
///
/// \return MindSpore Lite TypeId of the MindSpore Lite MSTensor.
virtual TypeId data_type() const = 0;
/// \brief Set data type of current MSTensor.
///
/// \param[in] data_type Define data type, which is shown in type_id.h.
virtual void set_data_type(TypeId data_type) = 0;
/// \brief Set format of current MSTensor.
///
/// \param[in] format Define format of data, which is shown in format.h
virtual void set_format(mindspore::Format format) = 0;
/// \brief Get format of current MSTensor.
///
/// \return format, which is shown in format.h
virtual mindspore::Format format() const = 0;
/// \brief Get shape of the MindSpore Lite MSTensor.
///
/// \return A vector of int as the shape of the MindSpore Lite MSTensor.
virtual Vector<int> shape() const = 0;
/// \brief Set the shape of MSTensor.
virtual void set_shape(const Vector<int> &shape) = 0;
/// \brief Get number of element in MSTensor.
///
/// \return Number of element in MSTensor.
virtual int ElementsNum() const = 0;
/// \brief Get byte size of data in MSTensor.
///
/// \return Byte size of data in MSTensor.
virtual size_t Size() const = 0;
/// \brief Get the name of MSTensor.
///
/// \return the name of MSTensor.
virtual String tensor_name() const = 0;
/// \brief Set the name of MSTensor.
virtual void set_tensor_name(const String &name) = 0;
/// \brief Get the pointer of data in MSTensor.
///
/// \note The data pointer can be used to both write and read data in MSTensor. The memory buffer will be
/// automatically allocated.
///
/// \return the pointer points to data in MSTensor.
virtual void *MutableData() = 0;
/// \brief Get the pointer of data in MSTensor.
///
/// \note The data pointer can be used to both write and read data in MSTensor. No memory buffer will be
/// allocated.
///
/// \return the pointer points to data in MSTensor.
virtual void *data() = 0;
/// \brief Set the data of MSTensor.
virtual void set_data(void *data) = 0;
};
} // namespace tensor
} // namespace mindspore
#endif // MINDSPORE_LITE_INCLUDE_MS_TENSOR_H_

View File

@ -1,38 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INCLUDE_VERSION_H_
#define MINDSPORE_LITE_INCLUDE_VERSION_H_
#include "include/lite_utils.h"
namespace mindspore {
namespace lite {
const int ms_version_major = 1;
const int ms_version_minor = 2;
const int ms_version_revision = 0;
/// \brief Global method to get a version string.
///
/// \return The version string of MindSpore Lite.
inline String Version() {
return "MindSpore Lite " + to_string(ms_version_major) + "." + to_string(ms_version_minor) + "." +
to_string(ms_version_revision);
}
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_INCLUDE_VERSION_H_

View File

@ -1,196 +0,0 @@
[-1.5255959e+00, -7.5023180e-01, -6.5398091e-01, -1.6094848e+00,
-1.0016718e-01, -6.0918891e-01, -9.7977227e-01, -1.6090963e+00,
-7.1214461e-01, 3.0372199e-01, -7.7731431e-01, -2.5145525e-01,
-2.2227049e-01, 1.6871134e+00, 2.2842517e-01, 4.6763551e-01,
-6.9697243e-01, -1.1607615e+00, 6.9954240e-01, 1.9908163e-01,
8.6569238e-01, 2.4440390e-01, -6.6291136e-01, 8.0730826e-01,
1.1016806e+00, -1.7593604e-01, -2.2455578e+00, -1.4464580e+00,
6.1155282e-02, -6.1774445e-01, -7.9806983e-01, -1.3162321e-01,
1.8793458e+00, -7.2131783e-02, 1.5777060e-01, -7.7345490e-01,
1.9905651e-01, 4.5702778e-02, 1.5295692e-01, -4.7567880e-01,
-1.1101983e-01, 2.9273525e-01, -1.5784515e-01, -2.8787140e-02,
2.3571110e+00, -1.0373387e+00, 1.5747981e+00, -6.2984723e-01,
-9.2739171e-01, 5.4514152e-01, 6.6280261e-02, -4.3704012e-01,
7.6260060e-01, 4.4151092e-01, 1.1651385e+00, 2.0153918e+00,
1.3741246e-01, 9.3864471e-01, -1.8600109e-01, -6.4463931e-01,
1.5392458e+00, -8.6958760e-01, -3.3311536e+00, -7.4787223e-01,
-2.5502462e-02, -1.0233306e+00, -5.9618515e-01, -1.0055307e+00,
-2.1060631e-01, -7.5475276e-03, 1.6734272e+00, 1.0342831e-02,
-7.0395666e-01, -1.8526579e-01, -9.9623507e-01, -8.3125526e-01,
-4.6102202e-01, -5.6008244e-01, 3.9557618e-01, -9.8227710e-01,
-5.0648659e-01, 9.9775404e-02, -6.5397340e-01, 7.3169369e-01,
-1.4343859e+00, -5.0081307e-01, 1.7163314e-01, -1.5999313e-01,
2.5463349e-01, -5.0195730e-01, -1.0412000e+00, 7.3226720e-01,
-1.0483401e+00, -4.7087720e-01, 2.9113635e-01, 1.9907043e+00,
6.6144532e-01, 1.1899205e+00, 8.1653392e-01, -9.1352361e-01,
1.3851457e+00, -8.1384623e-01, -9.2757654e-01, 1.1119633e+00,
1.3352057e+00, 6.0427362e-01, -1.0344208e-01, -1.5121692e-01,
-2.1020830e+00, -6.2002194e-01, -1.4782310e+00, -1.1334175e+00,
8.7379628e-01, -5.6025940e-01, 1.2857845e+00, 8.1682384e-01,
2.0530410e-01, 3.0510718e-01, 5.3568703e-01, -4.3118501e-01,
2.5581384e+00, -2.3336388e-01, -1.3472130e-02, 1.8606348e+00,
-1.9804063e+00, 1.7985829e+00, 1.0181159e-01, 3.4000599e-01,
7.1236455e-01, -1.7765073e+00, 3.5386458e-01, 1.1996132e+00,
-3.0299741e-01, -1.7618417e+00, 6.3484460e-01, -8.0435908e-01,
-1.6111118e+00, -1.8716129e+00, 5.4308361e-01, 6.6067863e-01,
2.2952116e+00, 6.7490596e-01, 1.7133216e+00, -1.7942734e+00,
-1.3632672e+00, -9.8321962e-01, 1.5112667e+00, 6.4187074e-01,
4.7296381e-01, -4.2859009e-01, 5.5137074e-01, -1.5473709e+00,
5.1811212e-01, 1.0653535e-01, 2.6924077e-01, 1.3247679e+00,
1.7460191e+00, 1.8549690e+00, -7.0636910e-01, 2.5570862e+00,
4.1753429e-01, -2.1271861e-01, -8.3995801e-01, -4.2001787e-01,
-6.2403631e-01, -9.7729611e-01, 8.7484282e-01, 9.8728138e-01,
3.0957633e-01, 1.5206900e+00, 1.2052339e+00, -1.8155910e+00,
-4.0346155e-01, -9.5914519e-01, -5.2077039e-03, -7.8863136e-02,
8.4365427e-01, 1.1657013e+00, 5.2693218e-01, 1.6192533e+00,
-9.6397626e-01, 1.4152038e-01, -1.6366096e-01, -3.5822257e-01,
1.7222793e+00, -3.0357561e-01, 2.3887420e-01, 1.3440012e+00,
1.0322569e-01, 1.1003542e+00, -3.4168020e-01, 9.4733888e-01,
-5.6851596e-01, 8.3759618e-01, 1.7836607e+00, -1.9542466e-01,
5.1491612e-01, -1.8474776e+00, -2.9167426e+00, -5.6732988e-01,
-5.4128021e-01, 8.9517403e-01, -8.8250703e-01, 5.3181124e-01,
-1.5457772e+00, -1.7329982e-01, 7.2824633e-01, 5.7061020e-02,
9.0551722e-01, 1.0462948e+00, -5.2059698e-01, 1.3547838e+00,
2.3519313e-01, 1.9142433e+00, 1.8364111e+00, 1.3245324e+00,
-9.6900916e-01, 1.2516364e+00, 1.2103242e+00, -5.2792060e-01,
2.1856615e-01, -5.7430726e-01, 1.4571251e+00, 1.7709557e+00,
1.6499138e+00, -4.3200457e-01, -2.7102691e-01, -1.4391626e+00,
1.2470404e+00, 1.2738512e+00, 3.9094925e-01, 3.8721049e-01,
-7.9828717e-02, 3.4172431e-01, 9.4882733e-01, -1.3839359e+00,
1.7240863e+00, -2.3647652e+00, -9.2949092e-01, 2.9362530e-01,
2.1513203e-01, 9.3846369e-01, 1.4657077e+00, -5.5647439e-01,
-7.4484080e-01, -2.0215721e-01, -2.2966790e-01, 1.3313366e-03,
3.7527591e-01, -5.8106792e-01, -5.7230884e-01, 1.0097175e+00,
-1.0564939e-01, -1.1796960e+00, -9.0779595e-02, 5.6311435e-01,
-1.2560141e+00, 8.9555502e-01, 1.6747737e-01, 7.5142086e-01,
2.4142299e+00, 1.0205840e+00, -4.4048381e-01, -1.7341677e+00,
-1.2362250e+00, 1.5785813e+00, -1.1160507e+00, 7.6777023e-01,
-5.8820677e-01, 2.1188903e+00, -5.4219025e-01, -2.4592547e+00,
-1.1108288e+00, -1.1187209e+00, 7.5799555e-01, -4.9565765e-01,
-1.9700006e-01, -3.3396218e-02, 7.1929151e-01, 1.0644146e+00,
8.3402544e-01, -1.9162164e+00, -3.4202927e-01, -6.6049206e-01,
3.1508535e-01, 1.1422518e+00, 3.0550566e-01, -5.7888174e-01,
-2.3828252e-01, -1.3541743e+00, 2.6868939e-01, 1.1455697e-01,
-1.5562972e+00, -1.0757437e+00, -8.7519461e-01, -4.7281876e-01,
9.9123681e-01, -5.8622282e-02, 1.1787646e+00, 6.2218499e-01,
7.8785008e-01, 1.3685523e+00, -8.5068983e-01, 5.1260746e-01,
1.0476325e+00, -3.1758463e-01, 1.3948506e-01, 2.3402624e+00,
-6.1160916e-01, 8.1602710e-01, 2.4772300e-01, -3.8672671e-01,
1.9948451e-01, 7.9926956e-01, -2.6190341e-01, 1.5132962e-01,
1.1981666e+00, -2.2832582e+00, -1.0129594e+00, -8.8789088e-01,
6.5221924e-01, -8.7262028e-01, 3.5253752e-02, -3.3653030e-01,
1.4023319e+00, 4.8412141e-01, -7.0304507e-01, -8.2676607e-01,
7.7439600e-01, 6.9199395e-01, -1.0184799e+00, -8.0337167e-01,
-7.0711321e-01, 7.5211829e-01, -1.9208279e-02, 1.1033330e+00,
-6.0679215e-01, -5.2522349e-01, -5.6618774e-01, 6.6039857e-04,
7.2245878e-01, 1.5263520e-01, 1.4495978e-01, -2.3442194e+00,
3.6000299e-01, 4.6668175e-01, 1.2830665e+00, 1.2678007e+00,
1.9883296e-01, 5.4408771e-01, -3.9781693e-01, -1.9291055e+00,
2.3236869e-01, 8.6146563e-01, 6.2175733e-01, -1.7811896e+00,
-7.8206092e-01, -1.4236701e+00, 1.6090765e+00, -3.2787595e-02,
8.5323340e-01, 5.5063650e-02, -1.7425371e+00, 8.7500376e-01,
-2.7188172e+00, -2.2192061e-01, 3.4208494e-01, 1.1093477e+00,
-5.7314759e-01, 9.5778459e-01, 9.8202319e-04, -1.3847686e+00,
-9.9650228e-01, 8.0734813e-01, 1.1738863e+00, -9.3984646e-01,
1.3109189e+00, -3.1670693e-01, -1.8610410e-01, -5.7646018e-01,
6.8665183e-01, 4.2086706e-01, -1.0213808e+00, 9.8856664e-01,
-5.6187165e-01, -1.5792575e-01, 1.5042593e+00, -1.3950295e+00,
8.0079097e-01, -6.6194439e-01, 1.2563107e+00, 4.9999446e-01,
-2.7133808e-01, 1.8469073e+00, -3.1249959e-02, -9.3872704e-02,
-6.1907429e-01, -6.3632655e-01, -4.2415860e-01, -2.0271668e+00,
4.0962908e-01, -1.5421267e+00, -1.0128618e+00, -2.9737514e-02,
-2.8895226e-01, 1.5219319e-01, -2.9803404e-01, -1.3135384e-01,
-6.2809873e-01, 1.1968799e+00, 6.1099350e-01, -4.5477438e-01,
-9.6037018e-01, 2.7690458e-01, -6.8010890e-01, -5.4578751e-01,
-4.5518342e-01, 3.1859580e-01, -3.5494208e-01, 6.8589437e-01,
-3.7613729e-01, -2.4106996e+00, -1.2778088e+00, -6.2887415e-02,
-9.4712764e-02, -2.3144305e+00, 5.5653399e-01, 5.0569206e-01,
-2.0759584e-01, 6.9363183e-01, 4.1949040e-01, 2.2523544e+00,
9.3852311e-01, 1.4252927e+00, 1.5083258e+00, 1.0539497e-01,
-1.6049961e+00, -1.0644839e-01, 2.4656655e-01, 6.1250836e-01,
7.3980182e-01, -1.7860015e-01, 7.8490011e-02, -4.3981805e-01,
-3.6079338e-01, -1.2617406e+00, 1.9146918e+00, -1.8612741e+00,
-9.6749123e-03, 2.6038763e-01, 2.8203353e-01, 2.5829947e-01,
-4.2654869e-01, 9.8075122e-01, 1.8588890e+00, -1.0920147e+00,
7.6300204e-01, 2.2761525e-01, -1.4569789e+00, 1.7043737e+00,
-3.2686386e+00, 4.7498712e-01, -2.1142473e+00, -1.5002301e+00,
1.0692973e+00, 1.4393831e+00, 5.0645941e-01, 8.3597529e-01,
1.1752968e+00, -3.4211743e-01, -3.8716367e-01, 5.4765379e-01,
-1.5891987e-01, -7.3604894e-01, -2.3351878e-01, -5.4039150e-01,
1.5708433e-01, -5.9762299e-01, -8.8390934e-01, 6.0767305e-01,
-3.8843614e-01, -3.1578582e-02, -5.6058836e-01, -6.5552413e-01,
7.2615027e-01, 6.7892069e-01, -4.3017429e-01, -3.8485083e-01,
-1.5082921e+00, -7.1995616e-01, -1.1909670e+00, 1.3271062e+00,
-2.1984124e+00, 2.8614265e-01, -2.0104712e-01, -2.5348804e+00,
-1.5848289e+00, 2.1679449e-01, -1.4276333e-01, 1.4274154e+00,
1.6425379e-01, -3.1606898e-01, 1.2852281e-01, -5.2765143e-01,
1.0834497e+00, 7.2746372e-01, 5.7725620e-01, 5.3688127e-01,
-4.3616110e-01, 2.7676934e-01, 2.9459488e-01, -5.6314898e-01,
5.1899290e-01, 1.3394899e+00, -2.3876244e-01, -6.7961216e-02,
-1.5035529e-01, 5.2330041e-01, -2.1156418e-01, -1.2541972e+00,
1.8176029e-02, 1.4141930e+00, -1.7437581e+00, 1.1289321e-01,
4.5267120e-01, 3.1554270e-01, -6.9010293e-01, -2.8289640e-01,
3.5618150e-01, -6.5616649e-01, 6.7499673e-01, 1.2909728e+00,
2.8768075e-01, 1.1313233e+00, -1.9227705e-03, -2.3545134e-01,
-7.7834469e-01, 1.7674841e-02, 1.1869689e+00, -5.9568787e-01,
-1.5738513e+00, 9.0094990e-01, 1.0499262e+00, 4.2925611e-01,
3.4665063e-01, 1.1960464e+00, 5.0744399e-02, -2.4047236e+00,
6.6365647e-01, -3.9687249e-01, 4.0486488e-01, 3.4154087e-01,
-5.9558362e-01, 1.1019011e+00, 5.5386519e-01, -9.5087808e-01,
-5.0393552e-01, 1.7358937e+00, 1.1365190e+00, 7.3528785e-01,
-6.3713288e-01, -8.8953024e-01, 5.9735751e-01, -6.1928016e-01,
1.2089928e+00, 8.0966818e-01, -3.7273017e-01, -5.3331411e-01,
-4.9985203e-01, 3.9947726e-02, -7.8146380e-01, 3.1946027e-01,
8.2106584e-01, 8.6431539e-01, 4.9166805e-01, 4.4538009e-01,
-8.8726664e-01, 5.2979738e-01, 2.6839951e-01, 3.5011527e-01,
-2.7225810e-01, 1.0665658e+00, -8.9532214e-01, 1.4147978e+00,
-9.1728181e-01, 8.3720893e-01, 1.4950181e+00, -8.3034581e-01,
-1.9900607e+00, -8.7786657e-01, 2.2035673e-01, -1.9547749e+00,
8.5329479e-01, -1.4188342e+00, 9.8297036e-01, -5.3868419e-01,
1.3784917e-01, 9.2474985e-01, 2.9384881e-01, 3.0301414e+00,
-1.4259109e+00, 3.3642095e-01, -6.0710046e-02, -2.7827954e+00,
1.3488874e+00, 2.6844734e-01, -1.1277022e+00, -5.9944046e-01,
-2.7945054e-01, -2.1999671e-01, 1.1315615e+00, -5.5813056e-01,
-8.4985018e-01, -5.9133893e-01, 9.1871524e-01, -1.7054160e+00,
-6.2452555e-01, -1.5477768e+00, -4.3917063e-01, -8.2900178e-01,
-4.2779538e-01, 1.2994735e+00, -1.0199753e+00, -8.5336286e-01,
-1.8470149e+00, -5.6316632e-01, -2.9311785e-01, -1.5726203e+00,
-1.0079967e+00, -1.1254747e+00, 2.0839548e+00, 2.8445369e-01,
-2.0898786e-01, 2.7948596e+00, 9.4693983e-01, 1.1613066e+00,
2.1592824e-02, 2.1849406e+00, 3.7046966e-01, 8.3229375e-01,
1.0294781e+00, -4.6743554e-01, 1.2099822e+00, -9.2927051e-01,
1.5964565e+00, -3.5177864e-02, 1.9276363e-01, 9.4458717e-01,
4.0307879e-01, 7.8339100e-01, 1.6240975e+00, -1.9683785e+00,
9.2987645e-01, 1.5981036e+00, 4.2616895e-01, 2.5072601e+00,
4.4090030e-01, -2.0394561e+00, 1.0628663e+00, 7.7601296e-01,
8.3457164e-02, 1.7073935e+00, -2.0758156e-01, -2.7201766e-01,
-6.5246433e-01, 2.3190866e+00, -3.1556660e-01, 1.2293459e+00,
1.9086858e-02, 1.6939967e+00, -9.7426087e-01, 1.0000985e-01,
1.6331865e-01, 1.1104544e+00, 6.5858930e-01, -1.8446711e-01,
-6.9782162e-01, 5.4673910e-01, -1.0919048e+00, -2.0058967e-01,
-2.1976221e-01, -7.5056171e-01, 9.1047740e-01, 1.4996040e+00,
-2.7725294e-01, 9.9202655e-02, -1.5756993e+00, 7.4856669e-01,
-2.4229655e-01, -1.8000333e-01, 9.5837879e-01, 3.7814003e-01,
1.9289158e-01, 2.4711327e-01, -3.1152922e-01, 4.4534847e-02,
-7.7046400e-01, 4.5658717e-01, -1.3150460e+00, -5.0721991e-01,
4.1748023e-01, 9.2643857e-01, 6.3569260e-01, -1.6128796e-01,
1.0286627e+00, 4.7581047e-02, 4.1486391e-01, -2.7009306e+00,
-1.5045499e+00, -1.8634710e-01, -9.3207240e-01, 3.0545831e-01,
-5.1035285e-01, 8.7927073e-01, 1.7738712e+00, -1.3286506e-01,
1.3458737e+00, -4.6432903e-01, -3.7430039e-01, 9.7058731e-01,
-1.9518436e+00, -6.4998013e-01, 1.3482264e+00, 3.0995172e-01,
-1.5216483e+00, 9.7610706e-01, 3.9083481e-01, 2.7913565e-02,
-4.1744223e-01, 1.7064806e+00, -2.5080970e-01, -3.3612009e-02,
5.8338016e-01, 1.6178854e+00, -1.3733586e+00, -8.5550433e-01,
1.5778065e+00, 1.0752751e-01, 1.1045673e+00, 5.9758538e-01,
7.1269102e-02, -5.0374931e-01, 8.0341589e-01, 1.1834451e+00,
6.3811505e-01, -5.0269210e-01, -9.9724096e-01, -5.6425828e-01,
-3.4610125e-01, 2.7074468e-01, -1.3578615e+00, -9.6113062e-01,
1.1768451e+00, 1.1981529e-01, 6.6130060e-01, 1.7996032e+00,
-1.4726470e+00, -1.4529139e+00, 2.5632006e-01, -7.5283742e-01,
1.2143371e+00, 5.3680718e-01, -5.9180927e-01, 1.1358957e+00,
1.4462845e+00, -1.1436753e+00, 7.8876835e-01, -6.7686230e-01,
-9.3259799e-01, 7.4118137e-02, 2.1128911e-01, 2.6312185e-02,
-2.2259822e-02, -1.5083861e+00, -2.7273307e+00, -8.5954350e-01,
-4.6734902e-01, 1.5499024e+00, 4.5016751e-01, 1.2971551e+00,
2.9964414e-01, -1.0238653e+00, 1.0269226e+00, -1.9246057e-01]

View File

@ -1,217 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INPUT_DATA_H_
#define MINDSPORE_LITE_INPUT_DATA_H_
float mnist_inputs_data [] = {
-1.5255959e+00, -7.5023180e-01, -6.5398091e-01, -1.6094848e+00,
-1.0016718e-01, -6.0918891e-01, -9.7977227e-01, -1.6090963e+00,
-7.1214461e-01, 3.0372199e-01, -7.7731431e-01, -2.5145525e-01,
-2.2227049e-01, 1.6871134e+00, 2.2842517e-01, 4.6763551e-01,
-6.9697243e-01, -1.1607615e+00, 6.9954240e-01, 1.9908163e-01,
8.6569238e-01, 2.4440390e-01, -6.6291136e-01, 8.0730826e-01,
1.1016806e+00, -1.7593604e-01, -2.2455578e+00, -1.4464580e+00,
6.1155282e-02, -6.1774445e-01, -7.9806983e-01, -1.3162321e-01,
1.8793458e+00, -7.2131783e-02, 1.5777060e-01, -7.7345490e-01,
1.9905651e-01, 4.5702778e-02, 1.5295692e-01, -4.7567880e-01,
-1.1101983e-01, 2.9273525e-01, -1.5784515e-01, -2.8787140e-02,
2.3571110e+00, -1.0373387e+00, 1.5747981e+00, -6.2984723e-01,
-9.2739171e-01, 5.4514152e-01, 6.6280261e-02, -4.3704012e-01,
7.6260060e-01, 4.4151092e-01, 1.1651385e+00, 2.0153918e+00,
1.3741246e-01, 9.3864471e-01, -1.8600109e-01, -6.4463931e-01,
1.5392458e+00, -8.6958760e-01, -3.3311536e+00, -7.4787223e-01,
-2.5502462e-02, -1.0233306e+00, -5.9618515e-01, -1.0055307e+00,
-2.1060631e-01, -7.5475276e-03, 1.6734272e+00, 1.0342831e-02,
-7.0395666e-01, -1.8526579e-01, -9.9623507e-01, -8.3125526e-01,
-4.6102202e-01, -5.6008244e-01, 3.9557618e-01, -9.8227710e-01,
-5.0648659e-01, 9.9775404e-02, -6.5397340e-01, 7.3169369e-01,
-1.4343859e+00, -5.0081307e-01, 1.7163314e-01, -1.5999313e-01,
2.5463349e-01, -5.0195730e-01, -1.0412000e+00, 7.3226720e-01,
-1.0483401e+00, -4.7087720e-01, 2.9113635e-01, 1.9907043e+00,
6.6144532e-01, 1.1899205e+00, 8.1653392e-01, -9.1352361e-01,
1.3851457e+00, -8.1384623e-01, -9.2757654e-01, 1.1119633e+00,
1.3352057e+00, 6.0427362e-01, -1.0344208e-01, -1.5121692e-01,
-2.1020830e+00, -6.2002194e-01, -1.4782310e+00, -1.1334175e+00,
8.7379628e-01, -5.6025940e-01, 1.2857845e+00, 8.1682384e-01,
2.0530410e-01, 3.0510718e-01, 5.3568703e-01, -4.3118501e-01,
2.5581384e+00, -2.3336388e-01, -1.3472130e-02, 1.8606348e+00,
-1.9804063e+00, 1.7985829e+00, 1.0181159e-01, 3.4000599e-01,
7.1236455e-01, -1.7765073e+00, 3.5386458e-01, 1.1996132e+00,
-3.0299741e-01, -1.7618417e+00, 6.3484460e-01, -8.0435908e-01,
-1.6111118e+00, -1.8716129e+00, 5.4308361e-01, 6.6067863e-01,
2.2952116e+00, 6.7490596e-01, 1.7133216e+00, -1.7942734e+00,
-1.3632672e+00, -9.8321962e-01, 1.5112667e+00, 6.4187074e-01,
4.7296381e-01, -4.2859009e-01, 5.5137074e-01, -1.5473709e+00,
5.1811212e-01, 1.0653535e-01, 2.6924077e-01, 1.3247679e+00,
1.7460191e+00, 1.8549690e+00, -7.0636910e-01, 2.5570862e+00,
4.1753429e-01, -2.1271861e-01, -8.3995801e-01, -4.2001787e-01,
-6.2403631e-01, -9.7729611e-01, 8.7484282e-01, 9.8728138e-01,
3.0957633e-01, 1.5206900e+00, 1.2052339e+00, -1.8155910e+00,
-4.0346155e-01, -9.5914519e-01, -5.2077039e-03, -7.8863136e-02,
8.4365427e-01, 1.1657013e+00, 5.2693218e-01, 1.6192533e+00,
-9.6397626e-01, 1.4152038e-01, -1.6366096e-01, -3.5822257e-01,
1.7222793e+00, -3.0357561e-01, 2.3887420e-01, 1.3440012e+00,
1.0322569e-01, 1.1003542e+00, -3.4168020e-01, 9.4733888e-01,
-5.6851596e-01, 8.3759618e-01, 1.7836607e+00, -1.9542466e-01,
5.1491612e-01, -1.8474776e+00, -2.9167426e+00, -5.6732988e-01,
-5.4128021e-01, 8.9517403e-01, -8.8250703e-01, 5.3181124e-01,
-1.5457772e+00, -1.7329982e-01, 7.2824633e-01, 5.7061020e-02,
9.0551722e-01, 1.0462948e+00, -5.2059698e-01, 1.3547838e+00,
2.3519313e-01, 1.9142433e+00, 1.8364111e+00, 1.3245324e+00,
-9.6900916e-01, 1.2516364e+00, 1.2103242e+00, -5.2792060e-01,
2.1856615e-01, -5.7430726e-01, 1.4571251e+00, 1.7709557e+00,
1.6499138e+00, -4.3200457e-01, -2.7102691e-01, -1.4391626e+00,
1.2470404e+00, 1.2738512e+00, 3.9094925e-01, 3.8721049e-01,
-7.9828717e-02, 3.4172431e-01, 9.4882733e-01, -1.3839359e+00,
1.7240863e+00, -2.3647652e+00, -9.2949092e-01, 2.9362530e-01,
2.1513203e-01, 9.3846369e-01, 1.4657077e+00, -5.5647439e-01,
-7.4484080e-01, -2.0215721e-01, -2.2966790e-01, 1.3313366e-03,
3.7527591e-01, -5.8106792e-01, -5.7230884e-01, 1.0097175e+00,
-1.0564939e-01, -1.1796960e+00, -9.0779595e-02, 5.6311435e-01,
-1.2560141e+00, 8.9555502e-01, 1.6747737e-01, 7.5142086e-01,
2.4142299e+00, 1.0205840e+00, -4.4048381e-01, -1.7341677e+00,
-1.2362250e+00, 1.5785813e+00, -1.1160507e+00, 7.6777023e-01,
-5.8820677e-01, 2.1188903e+00, -5.4219025e-01, -2.4592547e+00,
-1.1108288e+00, -1.1187209e+00, 7.5799555e-01, -4.9565765e-01,
-1.9700006e-01, -3.3396218e-02, 7.1929151e-01, 1.0644146e+00,
8.3402544e-01, -1.9162164e+00, -3.4202927e-01, -6.6049206e-01,
3.1508535e-01, 1.1422518e+00, 3.0550566e-01, -5.7888174e-01,
-2.3828252e-01, -1.3541743e+00, 2.6868939e-01, 1.1455697e-01,
-1.5562972e+00, -1.0757437e+00, -8.7519461e-01, -4.7281876e-01,
9.9123681e-01, -5.8622282e-02, 1.1787646e+00, 6.2218499e-01,
7.8785008e-01, 1.3685523e+00, -8.5068983e-01, 5.1260746e-01,
1.0476325e+00, -3.1758463e-01, 1.3948506e-01, 2.3402624e+00,
-6.1160916e-01, 8.1602710e-01, 2.4772300e-01, -3.8672671e-01,
1.9948451e-01, 7.9926956e-01, -2.6190341e-01, 1.5132962e-01,
1.1981666e+00, -2.2832582e+00, -1.0129594e+00, -8.8789088e-01,
6.5221924e-01, -8.7262028e-01, 3.5253752e-02, -3.3653030e-01,
1.4023319e+00, 4.8412141e-01, -7.0304507e-01, -8.2676607e-01,
7.7439600e-01, 6.9199395e-01, -1.0184799e+00, -8.0337167e-01,
-7.0711321e-01, 7.5211829e-01, -1.9208279e-02, 1.1033330e+00,
-6.0679215e-01, -5.2522349e-01, -5.6618774e-01, 6.6039857e-04,
7.2245878e-01, 1.5263520e-01, 1.4495978e-01, -2.3442194e+00,
3.6000299e-01, 4.6668175e-01, 1.2830665e+00, 1.2678007e+00,
1.9883296e-01, 5.4408771e-01, -3.9781693e-01, -1.9291055e+00,
2.3236869e-01, 8.6146563e-01, 6.2175733e-01, -1.7811896e+00,
-7.8206092e-01, -1.4236701e+00, 1.6090765e+00, -3.2787595e-02,
8.5323340e-01, 5.5063650e-02, -1.7425371e+00, 8.7500376e-01,
-2.7188172e+00, -2.2192061e-01, 3.4208494e-01, 1.1093477e+00,
-5.7314759e-01, 9.5778459e-01, 9.8202319e-04, -1.3847686e+00,
-9.9650228e-01, 8.0734813e-01, 1.1738863e+00, -9.3984646e-01,
1.3109189e+00, -3.1670693e-01, -1.8610410e-01, -5.7646018e-01,
6.8665183e-01, 4.2086706e-01, -1.0213808e+00, 9.8856664e-01,
-5.6187165e-01, -1.5792575e-01, 1.5042593e+00, -1.3950295e+00,
8.0079097e-01, -6.6194439e-01, 1.2563107e+00, 4.9999446e-01,
-2.7133808e-01, 1.8469073e+00, -3.1249959e-02, -9.3872704e-02,
-6.1907429e-01, -6.3632655e-01, -4.2415860e-01, -2.0271668e+00,
4.0962908e-01, -1.5421267e+00, -1.0128618e+00, -2.9737514e-02,
-2.8895226e-01, 1.5219319e-01, -2.9803404e-01, -1.3135384e-01,
-6.2809873e-01, 1.1968799e+00, 6.1099350e-01, -4.5477438e-01,
-9.6037018e-01, 2.7690458e-01, -6.8010890e-01, -5.4578751e-01,
-4.5518342e-01, 3.1859580e-01, -3.5494208e-01, 6.8589437e-01,
-3.7613729e-01, -2.4106996e+00, -1.2778088e+00, -6.2887415e-02,
-9.4712764e-02, -2.3144305e+00, 5.5653399e-01, 5.0569206e-01,
-2.0759584e-01, 6.9363183e-01, 4.1949040e-01, 2.2523544e+00,
9.3852311e-01, 1.4252927e+00, 1.5083258e+00, 1.0539497e-01,
-1.6049961e+00, -1.0644839e-01, 2.4656655e-01, 6.1250836e-01,
7.3980182e-01, -1.7860015e-01, 7.8490011e-02, -4.3981805e-01,
-3.6079338e-01, -1.2617406e+00, 1.9146918e+00, -1.8612741e+00,
-9.6749123e-03, 2.6038763e-01, 2.8203353e-01, 2.5829947e-01,
-4.2654869e-01, 9.8075122e-01, 1.8588890e+00, -1.0920147e+00,
7.6300204e-01, 2.2761525e-01, -1.4569789e+00, 1.7043737e+00,
-3.2686386e+00, 4.7498712e-01, -2.1142473e+00, -1.5002301e+00,
1.0692973e+00, 1.4393831e+00, 5.0645941e-01, 8.3597529e-01,
1.1752968e+00, -3.4211743e-01, -3.8716367e-01, 5.4765379e-01,
-1.5891987e-01, -7.3604894e-01, -2.3351878e-01, -5.4039150e-01,
1.5708433e-01, -5.9762299e-01, -8.8390934e-01, 6.0767305e-01,
-3.8843614e-01, -3.1578582e-02, -5.6058836e-01, -6.5552413e-01,
7.2615027e-01, 6.7892069e-01, -4.3017429e-01, -3.8485083e-01,
-1.5082921e+00, -7.1995616e-01, -1.1909670e+00, 1.3271062e+00,
-2.1984124e+00, 2.8614265e-01, -2.0104712e-01, -2.5348804e+00,
-1.5848289e+00, 2.1679449e-01, -1.4276333e-01, 1.4274154e+00,
1.6425379e-01, -3.1606898e-01, 1.2852281e-01, -5.2765143e-01,
1.0834497e+00, 7.2746372e-01, 5.7725620e-01, 5.3688127e-01,
-4.3616110e-01, 2.7676934e-01, 2.9459488e-01, -5.6314898e-01,
5.1899290e-01, 1.3394899e+00, -2.3876244e-01, -6.7961216e-02,
-1.5035529e-01, 5.2330041e-01, -2.1156418e-01, -1.2541972e+00,
1.8176029e-02, 1.4141930e+00, -1.7437581e+00, 1.1289321e-01,
4.5267120e-01, 3.1554270e-01, -6.9010293e-01, -2.8289640e-01,
3.5618150e-01, -6.5616649e-01, 6.7499673e-01, 1.2909728e+00,
2.8768075e-01, 1.1313233e+00, -1.9227705e-03, -2.3545134e-01,
-7.7834469e-01, 1.7674841e-02, 1.1869689e+00, -5.9568787e-01,
-1.5738513e+00, 9.0094990e-01, 1.0499262e+00, 4.2925611e-01,
3.4665063e-01, 1.1960464e+00, 5.0744399e-02, -2.4047236e+00,
6.6365647e-01, -3.9687249e-01, 4.0486488e-01, 3.4154087e-01,
-5.9558362e-01, 1.1019011e+00, 5.5386519e-01, -9.5087808e-01,
-5.0393552e-01, 1.7358937e+00, 1.1365190e+00, 7.3528785e-01,
-6.3713288e-01, -8.8953024e-01, 5.9735751e-01, -6.1928016e-01,
1.2089928e+00, 8.0966818e-01, -3.7273017e-01, -5.3331411e-01,
-4.9985203e-01, 3.9947726e-02, -7.8146380e-01, 3.1946027e-01,
8.2106584e-01, 8.6431539e-01, 4.9166805e-01, 4.4538009e-01,
-8.8726664e-01, 5.2979738e-01, 2.6839951e-01, 3.5011527e-01,
-2.7225810e-01, 1.0665658e+00, -8.9532214e-01, 1.4147978e+00,
-9.1728181e-01, 8.3720893e-01, 1.4950181e+00, -8.3034581e-01,
-1.9900607e+00, -8.7786657e-01, 2.2035673e-01, -1.9547749e+00,
8.5329479e-01, -1.4188342e+00, 9.8297036e-01, -5.3868419e-01,
1.3784917e-01, 9.2474985e-01, 2.9384881e-01, 3.0301414e+00,
-1.4259109e+00, 3.3642095e-01, -6.0710046e-02, -2.7827954e+00,
1.3488874e+00, 2.6844734e-01, -1.1277022e+00, -5.9944046e-01,
-2.7945054e-01, -2.1999671e-01, 1.1315615e+00, -5.5813056e-01,
-8.4985018e-01, -5.9133893e-01, 9.1871524e-01, -1.7054160e+00,
-6.2452555e-01, -1.5477768e+00, -4.3917063e-01, -8.2900178e-01,
-4.2779538e-01, 1.2994735e+00, -1.0199753e+00, -8.5336286e-01,
-1.8470149e+00, -5.6316632e-01, -2.9311785e-01, -1.5726203e+00,
-1.0079967e+00, -1.1254747e+00, 2.0839548e+00, 2.8445369e-01,
-2.0898786e-01, 2.7948596e+00, 9.4693983e-01, 1.1613066e+00,
2.1592824e-02, 2.1849406e+00, 3.7046966e-01, 8.3229375e-01,
1.0294781e+00, -4.6743554e-01, 1.2099822e+00, -9.2927051e-01,
1.5964565e+00, -3.5177864e-02, 1.9276363e-01, 9.4458717e-01,
4.0307879e-01, 7.8339100e-01, 1.6240975e+00, -1.9683785e+00,
9.2987645e-01, 1.5981036e+00, 4.2616895e-01, 2.5072601e+00,
4.4090030e-01, -2.0394561e+00, 1.0628663e+00, 7.7601296e-01,
8.3457164e-02, 1.7073935e+00, -2.0758156e-01, -2.7201766e-01,
-6.5246433e-01, 2.3190866e+00, -3.1556660e-01, 1.2293459e+00,
1.9086858e-02, 1.6939967e+00, -9.7426087e-01, 1.0000985e-01,
1.6331865e-01, 1.1104544e+00, 6.5858930e-01, -1.8446711e-01,
-6.9782162e-01, 5.4673910e-01, -1.0919048e+00, -2.0058967e-01,
-2.1976221e-01, -7.5056171e-01, 9.1047740e-01, 1.4996040e+00,
-2.7725294e-01, 9.9202655e-02, -1.5756993e+00, 7.4856669e-01,
-2.4229655e-01, -1.8000333e-01, 9.5837879e-01, 3.7814003e-01,
1.9289158e-01, 2.4711327e-01, -3.1152922e-01, 4.4534847e-02,
-7.7046400e-01, 4.5658717e-01, -1.3150460e+00, -5.0721991e-01,
4.1748023e-01, 9.2643857e-01, 6.3569260e-01, -1.6128796e-01,
1.0286627e+00, 4.7581047e-02, 4.1486391e-01, -2.7009306e+00,
-1.5045499e+00, -1.8634710e-01, -9.3207240e-01, 3.0545831e-01,
-5.1035285e-01, 8.7927073e-01, 1.7738712e+00, -1.3286506e-01,
1.3458737e+00, -4.6432903e-01, -3.7430039e-01, 9.7058731e-01,
-1.9518436e+00, -6.4998013e-01, 1.3482264e+00, 3.0995172e-01,
-1.5216483e+00, 9.7610706e-01, 3.9083481e-01, 2.7913565e-02,
-4.1744223e-01, 1.7064806e+00, -2.5080970e-01, -3.3612009e-02,
5.8338016e-01, 1.6178854e+00, -1.3733586e+00, -8.5550433e-01,
1.5778065e+00, 1.0752751e-01, 1.1045673e+00, 5.9758538e-01,
7.1269102e-02, -5.0374931e-01, 8.0341589e-01, 1.1834451e+00,
6.3811505e-01, -5.0269210e-01, -9.9724096e-01, -5.6425828e-01,
-3.4610125e-01, 2.7074468e-01, -1.3578615e+00, -9.6113062e-01,
1.1768451e+00, 1.1981529e-01, 6.6130060e-01, 1.7996032e+00,
-1.4726470e+00, -1.4529139e+00, 2.5632006e-01, -7.5283742e-01,
1.2143371e+00, 5.3680718e-01, -5.9180927e-01, 1.1358957e+00,
1.4462845e+00, -1.1436753e+00, 7.8876835e-01, -6.7686230e-01,
-9.3259799e-01, 7.4118137e-02, 2.1128911e-01, 2.6312185e-02,
-2.2259822e-02, -1.5083861e+00, -2.7273307e+00, -8.5954350e-01,
-4.6734902e-01, 1.5499024e+00, 4.5016751e-01, 1.2971551e+00,
2.9964414e-01, -1.0238653e+00, 1.0269226e+00, -1.9246057e-01
};
#endif // MINDSPORE_LITE_INPUT_DATA_H_

View File

@ -1,5 +0,0 @@
# Ignore everything in this directory
*
# Except this file
!.gitignore

View File

@ -1,5 +0,0 @@
# Ignore everything in this directory
*
# Except this file
!.gitignore

View File

@ -1,51 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "nnacl/fp32/exp_fp32.h"
#include <math.h>
#include <string.h>
#include "nnacl/errorcode.h"
int Exp(const float *input_data, float *output_data, const ExpParameter *parameter, int task_id) {
if (parameter->scale_ == 1) {
for (size_t i = task_id; i < parameter->element_num_; i += parameter->thread_num_) {
output_data[i] = expf(input_data[i]);
}
} else {
for (size_t i = task_id; i < parameter->element_num_; i += parameter->thread_num_) {
output_data[i] = expf(input_data[i] * parameter->in_scale_);
}
}
if (parameter->out_scale_ != 1) {
for (size_t i = task_id; i < parameter->element_num_; i += parameter->thread_num_) {
output_data[i] = output_data[i] * parameter->out_scale_;
}
}
return NNACL_OK;
}
void ExpFp32(const float *src, float *dst, int num) {
int i = 0;
#ifdef ENABLE_ARM64
int count = (num / C4NUM) * C4NUM;
for (; i < count; i += C4NUM) {
simd_exp128(vld1q_f32(src + i), dst + i);
}
#endif
for (; i < num; ++i) {
simd_exp32(src[i], dst + i);
}
}

View File

@ -1,146 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "nnacl/fp32/softmax_fp32.h"
#include <math.h>
#include <float.h>
#include "nnacl/fp32/exp_fp32.h"
void SoftmaxNorm(const float *src, float *dst, int batch, int channel) {
int cur_batch_offset = 0;
for (int i = 0; i < batch; i++, cur_batch_offset += channel) {
int j = 0;
#ifdef ENABLE_NEON
float32x4_t max4 = vdupq_n_f32(-FLT_MAX);
int count = (channel / C4NUM) * C4NUM;
for (; j < count; j += C4NUM) {
float32x4_t input4 = vld1q_f32(src + cur_batch_offset + j);
max4 = vmaxq_f32(max4, input4);
}
#ifdef ENABLE_ARM64
float max = vmaxvq_f32(max4);
#else
float max = max4[0];
for (int m = 1; m < 4; ++m) {
max = MSMAX(max, max4[m]);
}
#endif
#else
float max = -FLT_MAX;
#endif
for (; j < channel; j++) {
float input = src[cur_batch_offset + j];
if (input > max) {
max = input;
}
}
int k = 0;
#ifdef ENABLE_NEON
int count2 = (channel / C4NUM) * C4NUM;
for (; k < count2; k += C4NUM) {
float32x4_t input4 = vld1q_f32(src + cur_batch_offset + k);
float32x4_t output4 = vsubq_f32(input4, vdupq_n_f32(max));
vst1q_f32(dst + cur_batch_offset + k, output4);
}
#endif
for (; k < channel; k++) {
int offset = cur_batch_offset + k;
dst[offset] = src[offset] - max;
}
}
}
void SumAndDiv(const float *src, float *dst, int batch, int channel) {
int cur_batch_offset = 0;
for (int i = 0; i < batch; i++, cur_batch_offset += channel) {
float sum = 0;
int j = 0;
#ifdef ENABLE_NEON
float32x4_t sum4 = vdupq_n_f32(0);
int count = (channel / C4NUM) * C4NUM;
for (; j < count; j += C4NUM) {
sum4 = vaddq_f32(sum4, vld1q_f32(src + cur_batch_offset + j));
}
#ifdef ENABLE_ARM64
sum = vaddvq_f32(sum4);
#else
sum = sum4[0] + sum4[1] + sum4[2] + sum4[3];
#endif
#endif
for (; j < channel; j++) {
sum += src[cur_batch_offset + j];
}
int k = 0;
#ifdef ENABLE_NEON
const float div = 1.0f / sum;
for (; k < count; k += C4NUM) {
vst1q_f32(dst + cur_batch_offset + k, vmulq_n_f32(vld1q_f32(src + cur_batch_offset + k), div));
}
#endif
for (; k < channel; k++) {
dst[cur_batch_offset + k] = src[cur_batch_offset + k] / sum;
}
}
}
void SoftmaxLastAxis(const float *src, float *dst, int batch, int channel) {
SoftmaxNorm(src, dst, batch, channel);
ExpFp32(dst, dst, batch * channel);
SumAndDiv(dst, dst, batch, channel);
}
// output = exp(input) / reduce_sum(exp(input), axis)
void Softmax(const float *input_ptr, float *output_ptr, float *sum_data, const SoftmaxParameter *parameter) {
int axis = parameter->axis_;
int n_dim = parameter->n_dim_;
const int *input_shape = parameter->input_shape_;
int inner_size = 1;
int outter_size = 1;
for (int i = 0; i < axis; i++) {
outter_size *= input_shape[i];
}
for (int i = axis + 1; i < n_dim; i++) {
inner_size *= input_shape[i];
}
for (int i = 0; i < outter_size; i++) {
int outter_offset = i * input_shape[axis] * inner_size;
int sum_outter_offset = i * inner_size;
for (int k = 0; k < inner_size; k++) {
int inner_offset = outter_offset + k;
float max_data = input_ptr[inner_offset];
for (int j = 0; j < input_shape[axis]; j++) {
int axis_offset = inner_offset + j * inner_size;
max_data = max_data > input_ptr[axis_offset] ? max_data : input_ptr[axis_offset];
}
for (int j = 0; j < input_shape[axis]; j++) {
int axis_offset = inner_offset + j * inner_size;
output_ptr[axis_offset] = exp(input_ptr[axis_offset] - max_data);
sum_data[k + sum_outter_offset] += output_ptr[axis_offset];
}
}
}
for (int i = 0; i < outter_size; i++) {
int outter_offset = i * input_shape[axis] * inner_size;
int sum_outter_offset = i * inner_size;
for (int j = 0; j < input_shape[axis]; j++) {
int axis_offset = outter_offset + j * inner_size;
for (int k = 0; k < inner_size; k++) {
int inner_offset = axis_offset + k;
output_ptr[inner_offset] = output_ptr[inner_offset] / sum_data[k + sum_outter_offset];
}
}
}
}

View File

@ -1,125 +0,0 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include "nnacl/int8/quant_dtype_cast_int8.h"
#include "nnacl/errorcode.h"
int DoDequantizeInt8ToFp32(const int8_t *quant_values, float *real_values, float scale, int32_t zp, int size) {
if (quant_values == NULL || real_values == NULL) {
return NNACL_PARAM_INVALID;
}
for (int i = 0; i < size; ++i) {
real_values[i] = (quant_values[i] - zp) * scale;
}
return NNACL_OK;
}
int DoQuantizeFp32ToInt8(const float *real_values, int8_t *quant_values, float scale, int32_t zp, int size,
bool uint8_flag) {
if (quant_values == NULL || real_values == NULL) {
return NNACL_PARAM_INVALID;
}
if (uint8_flag) {
zp += 128;
}
const float inverse_scale = 1.0f / scale;
for (int i = 0; i < size; ++i) {
if (isinf(real_values[i])) {
quant_values[i] = 127;
} else {
int temp = round(real_values[i] * inverse_scale + zp);
if (uint8_flag) {
temp -= 128;
}
temp = temp < 127 ? temp : 127;
temp = temp > -128 ? temp : -128;
quant_values[i] = (int8_t)temp;
}
}
return NNACL_OK;
}
int DoDequantizeUInt8ToFp32(const uint8_t *quant_values, float *real_values, float scale, int32_t zp, int size) {
if (quant_values == NULL || real_values == NULL) {
return NNACL_PARAM_INVALID;
}
for (int i = 0; i < size; ++i) {
real_values[i] = (float)((int)quant_values[i] - zp) * scale;
}
return NNACL_OK;
}
int DoQuantizeFp32ToUInt8(const float *real_values, uint8_t *quant_values, float scale, int32_t zp, int size) {
if (quant_values == NULL || real_values == NULL) {
return NNACL_PARAM_INVALID;
}
for (int i = 0; i < size; ++i) {
if (isinf(real_values[i])) {
quant_values[i] = 255;
} else {
float temp = (float)round(real_values[i] * 1.0 / scale + zp);
if (temp > 255) {
quant_values[i] = 255;
} else if (temp < 0) {
quant_values[i] = 0;
} else {
quant_values[i] = (uint8_t)temp;
}
}
}
return NNACL_OK;
}
int Int8ToUInt8(const int8_t *quant_values, uint8_t *real_values, int size) {
if (quant_values == NULL || real_values == NULL) {
return NNACL_PARAM_INVALID;
}
for (int i = 0; i < size; ++i) {
int temp = quant_values[i] + 128;
if (temp > 255) {
real_values[i] = (uint8_t)255;
} else if (temp < 0) {
real_values[i] = 0;
} else {
real_values[i] = (uint8_t)temp;
}
}
return NNACL_OK;
}
int UInt8ToInt8(const uint8_t *real_values, int8_t *quant_values, int size) {
if (quant_values == NULL || real_values == NULL) {
return NNACL_PARAM_INVALID;
}
for (int i = 0; i < size; ++i) {
int temp = real_values[i] - 128;
if (temp > 127) {
quant_values[i] = 127;
} else if (temp < -128) {
quant_values[i] = -128;
} else {
quant_values[i] = (int8_t)temp;
}
}
return NNACL_OK;
}

View File

@ -1,5 +0,0 @@
# Ignore everything in this directory
*
# Except this file
!.gitignore

View File

@ -1,85 +0,0 @@
cmake_minimum_required(VERSION 3.14)
project(net)
if(NOT DEFINED PKG_PATH)
message(FATAL_ERROR "PKG_PATH not set")
endif()
get_filename_component(PKG_PATH ${PKG_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})
set(OP_LIB ${PKG_PATH}/tools/codegen/operator_library/lib/libops.a)
set(OP_HEADER_PATH ${PKG_PATH}/tools/codegen/operator_library/include)
set(HEADER_PATH ${PKG_PATH}/runtime)
message("operator lib path: ${OP_LIB}")
message("operator header path: ${OP_HEADER_PATH}")
add_compile_definitions(NOT_USE_STL)
include_directories(${OP_HEADER_PATH})
include_directories(${HEADER_PATH})
include(net.cmake)
option(MICRO_BUILD_ARM64 "build android arm64" OFF)
option(MICRO_BUILD_ARM32A "build android arm32" OFF)
if(MICRO_BUILD_ARM64 OR MICRO_BUILD_ARM32A)
add_compile_definitions(ENABLE_NEON)
add_compile_definitions(ENABLE_ARM)
endif()
if(MICRO_BUILD_ARM64)
add_compile_definitions(ENABLE_ARM64)
endif()
if(MICRO_BUILD_ARM32A)
add_compile_definitions(ENABLE_ARM32)
add_definitions(-mfloat-abi=softfp -mfpu=neon)
endif()
set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
message(STATUS "build net library with debug info")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
else()
message(STATUS "build net library release version")
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}")
string(REPLACE "-g" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
string(REPLACE "-g" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif()
function(create_library)
add_custom_command(TARGET net
POST_BUILD
COMMAND rm -rf tmp
COMMAND mkdir tmp
COMMAND cd tmp && ar -x ${OP_LIB}
COMMAND echo "raw static library ${library_name} size:"
COMMAND ls -lh ${library_name}
COMMAND mv ${library_name} ./tmp && cd tmp && ar -x ${library_name}
COMMENT "unzip raw static library ${library_name}"
)
foreach(object_file ${OP_SRC})
add_custom_command(TARGET net POST_BUILD COMMAND mv ./tmp/${object_file} .)
endforeach()
add_custom_command(TARGET net
POST_BUILD
COMMAND ar cr ${library_name} *.o
COMMAND ranlib ${library_name}
COMMAND echo "new static library ${library_name} size:"
COMMAND ls -lh ${library_name}
COMMAND rm -rf tmp && rm -rf *.o
COMMENT "generate specified static library ${library_name}"
)
endfunction(create_library)
string(CONCAT library_name "lib" net ".a")
create_library()

View File

@ -1,64 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_LIBRARY_SOURCE_MODEL_H_
#define MINDSPORE_LITE_LIBRARY_SOURCE_MODEL_H_
#include "include/model.h"
#include "session.h"
#include <new>
#include <string.h>
namespace mindspore::lite {
class MModel : public Model {
public:
void Free() override {
if (this->buf != nullptr) {
free(this->buf);
this->buf = nullptr;
this->buf_size_ = 0;
}
}
void Destroy() override { Free(); }
~MModel() override { Destroy(); }
void set_buf_size(size_t size) { buf_size_ = size; }
size_t buf_size() const { return buf_size_; }
private:
size_t buf_size_{0};
};
Model *Model::Import(const char *model_buf, size_t size) {
MS_NULLPTR_IF_NULL(model_buf);
if (size == 0) {
return nullptr;
}
MModel *model = new (std::nothrow) MModel();
MS_NULLPTR_IF_NULL(model);
model->buf = reinterpret_cast<char *>(malloc(size));
if (model->buf == nullptr) {
delete model;
return nullptr;
}
memcpy(model->buf, model_buf, size);
model->set_buf_size(size);
return model;
}
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_LIBRARY_SOURCE_MODEL_H_

View File

@ -1,97 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "weight.h"
#include "net.h"
static const unsigned char *g_Input0 = 0;
int SetInputs(const void **inputs, int num) {
if (inputs == NULL) {
return RET_ERROR;
}
if (num !=1) {
return RET_ERROR;
}
g_Input0 = inputs[0];
return RET_OK;
}
int CopyOutputsData(void **outputs, int num) {
if (outputs == NULL) {
return RET_ERROR;
}
if (num != 1) {
return RET_ERROR;
}
memcpy(outputs[0], g_Buffer+32, 40);
return RET_OK;
}
int GetBufferSize() {
return 10576;
}
int SetBuffer( void *buffer) {
if (buffer == NULL) {
return RET_ERROR;
}
g_Buffer = buffer;
return RET_OK;
}
void FreeResource() {
g_Buffer= NULL;
g_Input0 = NULL;
void *allocated[] = { };
for (int i = 0; i < 0; ++i) {
free(allocated[i]);
allocated[i] = NULL;
}
}
void Inference() {
{
memset((int16_t *)(g_Buffer+10144), 0, 36);
const int output_shift[12] = {-9, -9, -9, -9, -9, -9, -9, -10, -10, -9, -9, -9};
const int output_mult[12] = {1354133566, 1485574432, 1737792646, 1225484872, 1221530746, 1184403831, 1344308820, 1080459089, 1432168625, 1245831715, 1804167149, 1092395059};
arm_convolve_s8((int8_t *)(g_Input0), 28, 28, 1, 1, g_Weight1, 12, 3, 3, 0, 0, 1, 1, g_Weight2, (int8_t *)(g_Buffer+0), output_shift, output_mult, 17, 128, -128, 127, 26, 26, (int16_t *)(g_Buffer+10144));
}
{
arm_max_pool_s8(26, 26, 13, 13, 2, 2, 2, 2, 0, 0, -128, 127, 12, (int8_t *)(g_Buffer+0), NULL, (int8_t *)(g_Buffer+8112));
}
{
memset((int16_t *)(g_Buffer+10144), 0, 432);
const int output_shift[12] = {-10, -10, -10, -9, -10, -10, -10, -10, -10, -9, -9, -10};
const int output_mult[12] = {2143437276, 1710269977, 1664140445, 1275314678, 2121906679, 1591651427, 1589631258, 1721320620, 1939131746, 1186858310, 1223164752, 1583392613};
arm_convolve_s8((int8_t *)(g_Buffer+8112), 13, 13, 12, 1, g_Weight3, 12, 3, 3, 0, 0, 1, 1, g_Weight4, (int8_t *)(g_Buffer+0), output_shift, output_mult, 31, -17, -128, 127, 11, 11, (int16_t *)(g_Buffer+10144));
}
{
arm_max_pool_s8(11, 11, 5, 5, 2, 2, 2, 2, 0, 0, -128, 127, 12, (int8_t *)(g_Buffer+0), NULL, (int8_t *)(g_Buffer+1456));
}
{
memcpy((int8_t *)(g_Buffer+0), (int8_t *)(g_Buffer+1456), 300);
}
{
arm_fully_connected_s8((int8_t *)(g_Buffer+0), g_Weight6, 300, 20, 1, -31, 0, 1379728884, -8, 11, g_Weight7, (int8_t *)(g_Buffer+304), -128, 127, NULL);
}
{
arm_fully_connected_s8((int8_t *)(g_Buffer+304), g_Weight8, 20, 10, 1, -11, 0, 1282256809, -8, -20, g_Weight9, (int8_t *)(g_Buffer+0), -128, 127, NULL);
}
{
arm_softmax_s8((int8_t *)(g_Buffer+0), 1, 10, 1152553088, 27, -15, (int8_t *)(g_Buffer+16));
}
{
DoDequantizeInt8ToFp32((int8_t *)(g_Buffer+16), (float *)(g_Buffer+32), 0.00390625, -128, 10);
}
}

View File

@ -1,24 +0,0 @@
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/)
include_directories(${OP_HEADER_PATH}/CMSIS/NN/Include)
include_directories(${OP_HEADER_PATH}/CMSIS/DSP/Include)
include_directories(${OP_HEADER_PATH}/CMSIS/Core/Include)
set(OP_SRC
arm_convolve_s8.c.o
arm_fully_connected_s8.c.o
arm_max_pool_s8.c.o
arm_nn_mat_mult_kernel_s8_s16.c.o
arm_nn_vec_mat_mult_t_s8.c.o
arm_q7_to_q15_with_offset.c.o
arm_softmax_s8.c.o
quant_dtype_cast_int8.c.o
weight.c.o
net.c.o
session.cc.o
tensor.cc.o
string.cc.o
)
file(GLOB NET_SRC
${CMAKE_CURRENT_SOURCE_DIR}/*.cc
${CMAKE_CURRENT_SOURCE_DIR}/*.c
)
add_library(net STATIC ${NET_SRC})

View File

@ -1,50 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* set input tensors
* @param inputs, the input data ptr's array of the model, the tensors' count of input may be greater than one.
* @param num, the input data's number of the model.
**/
int SetInputs(const void **inputs, int num);
int CopyOutputsData(void **outputs, int num);
/**
* get the memory space size of the inference.
**/
int GetBufferSize();
/**
* set the memory space for the inference
**/
int SetBuffer(void *buffer);
/**
* free the memory of packed weights, and set the membuf buffer and input address to NULL
**/
void FreeResource();
/**
* net inference function
**/
void Inference();
#ifdef __cplusplus
}
#endif

View File

@ -1,148 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "session.h"
#include "mmodel.h"
#include "net.h"
#include <new>
namespace mindspore {
namespace lite {
int LiteSession::CompileGraph(lite::Model *model) {
inputs_.resize(1);
Vector<int> in_shape_0;
in_shape_0.resize(4);
in_shape_0[0] = 1;
in_shape_0[1] = 28;
in_shape_0[2] = 28;
in_shape_0[3] = 1;
inputs_[0] = new (std::nothrow) MTensor(String("graph_input-0"), kNumberTypeInt8, in_shape_0);
MS_ERROR_IF_NULL(inputs_[0]);
outputs_.resize(1);
Vector<int> out_shape_0;
out_shape_0.resize(2);
out_shape_0[0] = 1;
out_shape_0[1] = 10;
outputs_[0] = new (std::nothrow) MTensor(String("int8toft32_Softmax-7_post0/output-0"), kNumberTypeFloat32, out_shape_0);
MS_ERROR_IF_NULL(outputs_[0]);
return RET_OK;
}
int LiteSession::RunGraph(const KernelCallBack &before, const KernelCallBack &after) {
const void *inputs_data[inputs_.size()];
for (size_t i = 0; i < inputs_.size(); ++i) {
inputs_data[i] = inputs_[i]->MutableData();
}
SetInputs(inputs_data, inputs_.size());
Inference();
void *outputs_data[outputs_.size()];
for (size_t i = 0; i < outputs_.size(); ++i) {
outputs_data[i] = outputs_[i]->MutableData();
}
CopyOutputsData(outputs_data, outputs_.size());
return RET_OK;
}
LiteSession::~LiteSession() {
FreeResource();
if (runtime_buffer_ != nullptr) {
free(runtime_buffer_);
runtime_buffer_ = nullptr;
}
for (auto &input : inputs_) {
if (input == nullptr) {
continue;
}
delete input;
input = nullptr;
}
for (auto &output : outputs_) {
if (output == nullptr) {
continue;
}
delete output;
output = nullptr;
}
}
int LiteSession::InitRuntimeBuffer() {
int buffer_size = GetBufferSize();
runtime_buffer_ = malloc(buffer_size);
if (runtime_buffer_ == nullptr) {
return RET_ERROR;
}
int ret = SetBuffer(runtime_buffer_);
if (ret != RET_OK) {
return RET_ERROR;
}
return RET_OK;
}
Vector<tensor::MSTensor *> LiteSession::GetInputs() const {
Vector<tensor::MSTensor *> inputs;
for (const auto &input : inputs_) {
inputs.push_back(input);
}
return inputs;
}
Vector<tensor::MSTensor *> LiteSession::GetOutputsByNodeName(const String &node_name) const {
Vector<tensor::MSTensor *> outputs;
return outputs;
}
Vector<String> LiteSession::GetOutputTensorNames() const {
Vector<String> output_names;
for (const auto &output : outputs_) {
output_names.push_back(output->tensor_name());
}
return output_names;
}
mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const String &tensor_name) const {
for (const auto &output : outputs_) {
if (output->tensor_name() == tensor_name) {
return output;
}
}
return nullptr;
}
} // namespace lite
session::LiteSession *session::LiteSession::CreateSession(const lite::Context *context) {
auto *session = new (std::nothrow) lite::LiteSession();
MS_NULLPTR_IF_NULL(session);
int ret = session->InitRuntimeBuffer();
MS_NULLPTR_IF_ERROR(ret);
return session;
}
session::LiteSession *session::LiteSession::CreateSession(const char *model_buf, size_t size,
const lite::Context *context) {
session::LiteSession *session = CreateSession(context);
MS_NULLPTR_IF_NULL(session);
lite::Model *model = lite::Model::Import(model_buf, size);
int ret = session->CompileGraph(model);
MS_NULLPTR_IF_ERROR(ret);
delete model;
return session;
}
} // namespace mindspore

View File

@ -1,85 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_
#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_
#include "include/errorcode.h"
#include "include/lite_session.h"
#include "tensor.h"
namespace mindspore {
namespace lite {
#define MS_ERROR_IF_NULL(ptr) \
do { \
if ((ptr) == nullptr) { \
return mindspore::lite::RET_ERROR; \
} \
} while (0)
#define MS_NULLPTR_IF_NULL(ptr) \
do { \
if ((ptr) == nullptr) { \
return nullptr; \
} \
} while (0)
#define MS_NULLPTR_IF_ERROR(ptr) \
do { \
if ((ptr) != mindspore::lite::RET_OK) { \
return nullptr; \
} \
} while (0)
class LiteSession : public session::LiteSession {
public:
LiteSession() = default;
~LiteSession() override;
void BindThread(bool if_bind) override {}
int CompileGraph(lite::Model *model) override;
Vector<tensor::MSTensor *> GetInputs() const override;
mindspore::tensor::MSTensor *GetInputsByTensorName(const String &tensor_name) const override { return nullptr; }
int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) override;
Vector<tensor::MSTensor *> GetOutputsByNodeName(const String &node_name) const override;
Vector<String> GetOutputTensorNames() const override;
mindspore::tensor::MSTensor *GetOutputByTensorName(const String &tensor_name) const override;
int Resize(const Vector<tensor::MSTensor *> &inputs, const Vector<Vector<int>> &dims) override { return RET_ERROR; }
int InitRuntimeBuffer();
private:
Vector<MTensor *> inputs_;
Vector<MTensor *> outputs_;
void *runtime_buffer_;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_

View File

@ -1,307 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef NOT_USE_STL
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <float.h>
#include <stdint.h>
#include "include/lite_utils.h"
namespace mindspore {
String::String() {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
}
String::String(size_t count, char ch) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (count + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memset(buffer_, ch, count);
buffer_[count] = '\0';
size_ = count;
}
String::String(const char *s, size_t count) {
if (s == nullptr) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
return;
}
size_t size_s = strlen(s);
if (size_s <= count) {
size_ = size_s;
} else {
size_ = count;
}
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
strncpy(buffer_, s, size_);
buffer_[size_] = '\0';
}
String::String(const char *s) {
if (s == nullptr) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
return;
}
size_ = strlen(s);
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(buffer_, s, size_ + 1);
}
String::String(const String &other) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (other.size_ + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
size_ = other.size_;
memcpy(buffer_, other.buffer_, size_ + 1);
}
String::String(const String &other, size_t pos, size_t count) {
if (pos >= other.size_) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
} else {
if (count == npos) {
count = other.size_ - pos;
}
if (pos + count > other.size_) {
size_ = other.size_ - pos;
} else {
size_ = count;
}
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
strncpy(buffer_, other.buffer_ + pos, size_);
buffer_[size_] = '\0';
}
}
String::~String() { free(buffer_); }
String &String::operator=(const String &str) {
if (this == &str) {
return *this;
}
free(buffer_);
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (str.size_ + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
size_ = str.size_;
memcpy(buffer_, str.buffer_, size_ + 1);
return *this;
}
String &String::operator=(const char *str) {
free(buffer_);
if (str == nullptr) {
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
return *this;
}
size_t size_s = strlen(str);
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_s + 1)));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
size_ = size_s;
memcpy(buffer_, str, size_ + 1);
return *this;
}
char &String::at(size_t pos) {
if (pos >= size_) {
MS_C_EXCEPTION("pos out of range");
}
return buffer_[pos];
}
const char &String::at(size_t pos) const {
if (pos >= size_) {
MS_C_EXCEPTION("pos out of range");
}
return buffer_[pos];
}
char &String::operator[](size_t pos) {
if (pos >= size_) {
MS_C_EXCEPTION("pos out of range");
}
return this->at(pos);
}
const char &String::operator[](size_t pos) const {
if (pos >= size_) {
MS_C_EXCEPTION("pos out of range");
}
return this->at(pos);
}
char *String::data() noexcept { return buffer_; };
const char *String::data() const noexcept { return buffer_; }
const char *String::c_str() const noexcept { return buffer_; }
// capacity
bool String::empty() const noexcept { return size_ == 0; }
size_t String::size() const noexcept { return size_; }
size_t String::length() const noexcept { return size_; }
// operations
void String::clear() noexcept {
free(buffer_);
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
if (buffer_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
buffer_[0] = '\0';
size_ = 0;
}
String &String::operator+(const String &str) {
(*this) += str;
return *this;
}
String &String::operator+=(const String &str) {
size_t new_size = size_ + str.size_;
char *tmp = reinterpret_cast<char *>(malloc(sizeof(char) * (new_size + 1)));
if (tmp == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(tmp, this->buffer_, size_ + 1);
strncat(tmp, str.buffer_, str.size_);
tmp[new_size] = '\0';
free(buffer_);
buffer_ = tmp;
size_ = new_size;
return *this;
}
String &String::operator+=(const char *str) {
if (str == nullptr) {
return *this;
}
size_t str_size = strlen(str);
size_t new_size = size_ + str_size;
char *tmp = reinterpret_cast<char *>(malloc(sizeof(char) * (new_size + 1)));
if (tmp == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(tmp, this->buffer_, size_ + 1);
strncat(tmp, str, str_size);
tmp[new_size] = '\0';
free(buffer_);
buffer_ = tmp;
size_ = new_size;
return *this;
}
String &String::operator+=(const char ch) {
char *tmp = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 2)));
if (tmp == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(tmp, this->buffer_, size_ + 1);
tmp[size_] = ch;
tmp[size_ + 1] = '\0';
free(buffer_);
buffer_ = tmp;
size_ += 1;
return *this;
}
String &String::append(size_t count, const char ch) {
(*this) += ch;
return *this;
}
String &String::append(const String &str) {
(*this) += str;
return *this;
}
String &String::append(const char *str) {
if (str == nullptr) {
return *this;
}
(*this) += str;
return *this;
}
int String::compare(const String &str) const { return strcmp(buffer_, str.buffer_); }
int String::compare(const char *str) const { return strcmp(buffer_, str); }
String String::substr(size_t pos, size_t count) const { return String(*this, pos, count); }
String operator+(const String &lhs, const char *rhs) {
String str = lhs;
str += rhs;
return str;
}
String operator+(const char *lhs, const String &rhs) {
String str = rhs;
str += lhs;
return str;
}
bool operator!=(const String &lhs, const String &rhs) { return lhs.compare(rhs) != 0; }
bool operator==(const String &lhs, const String &rhs) { return lhs.compare(rhs) == 0; }
bool operator==(const String &lhs, const char *rhs) { return lhs.compare(rhs) == 0; }
bool operator==(const char *lhs, const String &rhs) { return rhs.compare(lhs) == 0; }
String to_String(int32_t value) {
char tmp[sizeof(int32_t) * 4];
snprintf(tmp, sizeof(int32_t) * 4, "%d", value);
return String(tmp, strlen(tmp));
}
String to_String(float value) {
char tmp[FLT_MAX_10_EXP + 20];
snprintf(tmp, FLT_MAX_10_EXP + 20, "%f", value);
return String(tmp, strlen(tmp));
}
} // namespace mindspore
#endif // NOT_USE_STL

View File

@ -1,83 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tensor.h"
namespace mindspore {
namespace lite {
size_t DataTypeSize(const TypeId type) {
switch (type) {
case kNumberTypeFloat64:
return sizeof(double);
case kNumberTypeFloat:
case kNumberTypeFloat32:
return sizeof(float);
case kNumberTypeInt8:
return sizeof(int8_t);
case kNumberTypeUInt8:
return sizeof(uint8_t);
case kNumberTypeFloat16:
case kNumberTypeInt16:
return sizeof(int16_t);
case kNumberTypeInt32:
return sizeof(int32_t);
case kNumberTypeInt64:
return sizeof(int64_t);
case kNumberTypeUInt16:
return sizeof(uint16_t);
case kNumberTypeUInt32:
return sizeof(uint32_t);
case kNumberTypeUInt64:
return sizeof(uint64_t);
case kNumberTypeBool:
return sizeof(bool);
case kObjectTypeString:
return sizeof(char);
case kObjectTypeTensorType:
default:
return 0;
}
}
MTensor::~MTensor() {
if (data_ != nullptr) {
free(data_);
data_ = nullptr;
}
}
int MTensor::ElementsNum() const {
int elements = 1;
for (int i : shape_) {
elements *= i;
}
return elements;
}
size_t MTensor::Size() const {
size_t element_size = DataTypeSize(data_type_);
return element_size * ElementsNum();
}
void *MTensor::MutableData() {
if (data_ == nullptr) {
data_ = malloc(this->Size());
}
return data_;
}
} // namespace lite
} // namespace mindspore

View File

@ -1,73 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_
#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_
#include "include/ms_tensor.h"
#include "ir/format.h"
namespace mindspore {
namespace lite {
struct LiteQuantParam {
double scale;
int32_t zeroPoint;
float var_corr{1};
float mean_corr{0};
bool inited;
Vector<float> clusters{};
int bitNum;
int roundType;
int multiplier;
int dstDtype;
};
class MTensor : public mindspore::tensor::MSTensor {
public:
MTensor() = default;
MTensor(String name, TypeId type, Vector<int> shape) : tensor_name_(name), data_type_(type), shape_(shape) {}
~MTensor() override;
void set_allocator(mindspore::Allocator *allocator) override {}
mindspore::Allocator *allocator() const override { return nullptr; }
TypeId data_type() const override { return data_type_; }
void set_data_type(TypeId data_type) override { data_type_ = data_type; }
void set_format(mindspore::Format format) override {}
mindspore::Format format() const override { return mindspore::NHWC; }
Vector<int> shape() const override { return shape_; }
void set_shape(const Vector<int> &shape) override { shape_ = shape; }
int ElementsNum() const override;
size_t Size() const override;
String tensor_name() const override { return tensor_name_; }
void set_tensor_name(const String &name) override { tensor_name_ = name; }
void *MutableData() override;
void *data() override { return data_; }
void set_data(void *data) override { data_ = data; }
Vector<LiteQuantParam> quant_params() const override { return this->quant_params_; }
void set_quant_params(const Vector<LiteQuantParam> quant_params) override { this->quant_params_ = quant_params; }
private:
String tensor_name_;
TypeId data_type_;
Vector<int> shape_;
void *data_ = nullptr;
Vector<LiteQuantParam> quant_params_;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_

View File

@ -1,220 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "weight.h"
int g_thread_num = 1;
unsigned char * g_Buffer = 0;
const int8_t g_Weight1[] = {
-101, -12, 52, 40, 127, 91, 23, 66, -109, 13, -61, -7, -15, -127, -36, -16, -67, 6, 30, -83, -127, 97, 62, -82, 25, 33, 27, -120, -11, -73, -46, -26, -127, 45, 5, -60, -20, 68, 101, -94, 26, 123, -127, -115, 78,
-127, -39, 42, -106, 49, 105, -12, 24, 80, -19, -127, -106, -34, 5, -10, 55, 96, 106, -7, -13, 106, 22, -12, 108, 127, 27, 73, 7, 71, 16, -6, 96, 23, 37, 127, 75, -57, -80, -127, -96, -47, -19, -100, 30, 69,
53, 86, 66, 75, 20, -40, -63, -71, -127, -52, -127, -39, 57, -41, 45, 22, 93, 62,
};
const int32_t g_Weight2[] = {
-3147, 16811, -3738, 18629, -147, -9883, -7741, -22579, -16827, 3940, -6678, -16145,
};
const int8_t g_Weight3[] = {
-28, -29, 54, -29, -16, -13, -6, 44, 34, -41, -11, 49, 7, -22, 21, -44, -47, -6, 85, 45, 43, 16, 17, 53, 71, -14, -22, -12, -11, 51, 102, 39, 39, 6, 64, 92, -52, -3, -43, -1, 32, -23, -70, -29, -21,
-39, 47, -16, 11, -69, -12, -33, 49, -59, 25, 6, 40, -32, 87, -12, 20, -70, -86, -20, -45, -48, -7, -13, -22, -4, 108, 17, -31, 9, -13, 94, -41, -60, 30, -59, -75, 33, -38, 8, -58, 37, -19, 90, -35, -53,
84, -39, -56, 64, 36, 38, -63, 13, -73, 35, -91, -41, 72, -31, -20, 127, -13, -3, 45, 13, 3, -21, -37, 28, -47, 27, 29, -3, 56, -8, -53, -6, -36, 11, -31, -72, -17, -39, -37, 49, 30, -25, -127, -33, -48,
4, -75, -8, 101, -40, -57, 48, -38, 60, -42, 98, -77, 66, -51, -34, -15, -31, -10, 78, 116, 24, -8, 1, -102, -48, 21, 38, 44, 4, -46, 33, -55, 36, 79, -19, -30, -9, 15, 95, 27, 64, 63, 7, 44, 62,
-84, 17, -15, 85, 5, 0, 6, -57, -50, 65, 5, 17, -17, 4, 5, 1, 55, 61, -12, -38, -21, 25, -13, 39, -15, -44, 4, -68, -17, -9, -25, 36, 0, -3, 0, 22, -28, 43, 45, 16, -57, -45, -32, 32, -2,
7, 55, -25, -63, 2, -8, 5, -76, 15, 13, -62, -28, 74, -61, -41, 36, -74, -45, -77, 5, 42, 13, 12, -51, -27, -16, 56, -28, -25, -14, -37, -71, 13, -9, 4, 5, 48, 30, 3, -52, -67, -21, -24, -16, 41,
25, -54, -58, -15, -81, 63, 60, -13, -120, -87, -47, 93, -73, 16, 7, -1, 27, 65, -46, -55, -72, -31, -40, 51, 18, 4, -4, 21, -44, 50, 53, -52, -85, -75, -92, 79, 9, 67, 44, -15, -43, 57, 69, -69, -59,
-59, -74, 75, -127, 61, 29, 13, 82, 8, 7, 12, 30, 11, -43, 14, 40, -10, 4, -10, 3, 63, 36, 21, -23, 13, -45, 52, 22, -3, -18, 71, 63, 44, 14, -25, -45, -3, 12, 29, 4, 14, -5, 28, 52, 3,
-1, -28, 12, -41, -19, -17, 40, -15, 10, -22, -28, 9, 6, -3, 59, 13, -20, -6, 102, 8, -12, 7, -32, 89, -4, 59, 2, 41, 1, 24, -21, -43, -61, 64, 17, -16, -37, -12, -34, -6, 71, -25, -57, -29, -20,
-7, -18, -84, -49, -50, 107, -56, 102, -28, 24, -15, 18, -38, -48, 22, -25, 0, 127, 5, -60, -36, 90, -29, -5, -34, -71, 77, 8, -50, 14, -13, -6, 25, -38, 56, 26, -50, 14, 13, 3, -20, 36, -9, 7, -8,
-91, -3, -1, -6, 16, 1, -62, 29, 42, 14, -55, -25, -62, -11, -5, -4, -43, 24, -13, -36, 6, -43, -3, 17, -99, 37, 38, -38, 30, -24, -55, -30, -8, -7, -27, -11, -98, -46, -8, 22, 36, -35, -51, 68, -39,
38, -15, 7, -41, 9, 5, 57, -36, -2, -9, -70, -63, -37, -29, -16, -127, 26, 27, -2, 17, -49, -57, 31, -30, 37, -23, -18, -80, -17, 3, 50, -7, -38, -21, 42, -14, 25, 10, 3, -2, -39, -26, 57, 5, -42,
20, -49, -21, -29, -46, -5, 7, -38, -70, 66, 29, -34, -127, 31, -26, 7, -30, -24, -11, -33, -55, 62, 22, -7, 23, 56, -2, 38, 101, 29, -2, 37, 10, 4, -10, 12, 23, -43, 103, -39, -37, -47, 86, -40, -15,
-32, -52, 67, 3, -60, 37, -49, 19, 26, 88, -38, -85, -19, -72, 19, -63, 9, 99, 29, 5, -34, 46, -71, -1, -4, -57, 33, -20, -7, 96, -37, 49, 2, 19, 23, 24, -50, 36, -8, 42, -56, 47, -28, 25, 27,
72, 3, -29, -50, 5, 44, -8, -62, -6, -5, -44, 6, 60, 57, 8, -7, 11, 17, -100, 19, -84, 62, -37, 28, -54, -66, -97, 91, -31, 4, -11, -80, -33, -38, 38, 22, -4, -18, -3, -38, -31, 10, -4, 23, -8,
24, -102, -125, -33, -4, 11, 2, 65, -13, -113, 53, -81, 27, 45, 36, -64, -31, -97, 112, -62, -18, 47, -81, -16, -89, 51, 67, -20, 18, 48, -59, -22, 79, -29, -1, 90, 50, -127, -48, -16, -31, -1, -19, 81, -26,
-80, 15, -20, 41, 84, 75, -55, -11, -52, 45, -59, 12, 22, -37, 41, -35, 46, 81, -78, 64, 80, -36, -64, 16, -64, -40, 102, 8, -86, -54, -17, -54, -30, -15, 11, -52, -68, 14, 21, 42, 47, -60, -98, -65, -58,
24, -50, -43, -15, -105, 22, -77, 58, 0, -47, 27, -31, 14, -16, -9, -6, -58, -69, -62, 62, 73, -33, 66, 62, -43, -12, 24, -105, 46, 30, 106, 39, -57, -27, -61, -75, 62, -92, -43, -127, 8, 44, 6, 31, -79,
-115, -84, -24, -24, -68, -61, -35, -15, -51, -10, 64, -22, -99, -1, 14, -49, 0, -49, -16, -74, 40, -17, -32, 3, -16, 4, -21, 2, -46, -1, -43, -10, 39, 59, -21, -68, 15, 0, -53, -24, -40, 42, -87, 24, 13,
39, -17, -89, -44, -50, -69, -2, -65, -13, -20, 27, -24, -18, -83, -23, 68, -40, -94, 66, -15, -14, -15, 17, -51, -11, -22, -21, 86, -64, -72, 50, 46, 20, -39, -4, 31, 45, -93, -2, 127, -27, -68, 51, -33, 0,
40, 7, 53, -62, -44, 25, 116, 21, 0, -3, -8, 90, 47, -44, 17, -22, -44, 54, 72, 1, -11, -19, -4, 15, 3, -40, -26, -14, -10, 17, 15, 10, 11, -13, 23, 11, -8, -11, -23, -5, 13, -61, -23, -27, -27,
-47, 96, -30, -17, -45, -39, -38, -4, 1, 28, -17, -3, -32, 67, -21, 28, -27, -36, 5, -30, 12, 25, 9, 3, -21, 23, 5, -53, -5, -82, 3, 29, -3, -92, -47, -42, -1, -16, -60, -24, -8, -33, -53, 41, -3,
-37, 34, -2, 0, 36, 18, 32, -61, -89, -62, 17, -6, -104, 26, 6, -40, 127, -19, -66, -9, -3, 38, -42, -50, 89, -59, -51, 31, -26, 4, -78, 47, 27, 74, -37, -44, 74, -38, -35, 77, 21, 32, -74, 74, -41,
69, -80, -79, 65, -74, -33, 66, -33, 13, 45, -3, -7, -41, -4, 21, 49, -1, 4, -30, 4, 30, -10, -21, -15, -23, -13, 7, 25, 35, 18, 16, -21, 9, -6, -66, 64, -17, 22, 16, 75, 7, 12, 1, -58, 52,
9, -43, 50, -22, 44, 44, 40, 23, 21, -44, 23, 53, 29, 17, 11, -11, -4, 31, -49, 16, 20, 20, 70, 0, -36, 16, -38, 1, -67, -51, -23, -55, -59, 53, -2, -23, 10, -17, -1, 30, -35, -38, -14, -6, -23,
15, 61, -1, -35, 33, -5, 68, -57, 32, 10, -7, -44, 122, 36, 15, -18, -22, -127, -60, -72, 30, 54, 3, -8, 27, -54, 11, -35, 7, -63, 14, -62, -2, -49, -25, -21, 54, -11, -11, 10, 4, -39, -21, -33, 61,
25, -14, -3, 49, -65, 29, 11, -34, -45, -60, -26, 20, 10, 3, 29, -9, 27, 17, -54, -10, 64, -3, -22, -71, 76, -57, -50, -8, -63, -3, -87, -2, 29, 65, -57, -36, 32, -46, -85, 30, -97, 8, -82, -32, -69,
9, -46, -75, -33, -63, -16, -39, -42, -36, 17, -56, -7, -84, -36, -27, -14, -18, 0, -66, -24, 1, -17, -127, 50, -108, -27, -79, -29, -5, -6, -116, -109, -32, -49, -115, 16, -56, -66, -38, -39, 5, -24, -100, -114, 6,
1, -10, -53, 24, -44, 47, 86, 36, 37, -23, 10, -4, -6, -45, -49, -4, -36, -16, -6, 29, -8, -44, -56, 7, -5, -70, 1, -19, 39, 22, 34, 52, 57, -34, -27, 3,
};
const int32_t g_Weight4[] = {
-1881, -1752, -3047, -3475, -1192, -6212, -2717, -535, -2109, -1355, -969, -828,
};
const int8_t g_Weight6[] = {
-8, 22, 6, -9, -14, 4, -6, -1, 17, 29, 33, 5, 12, 3, 29, 8, 24, 8, 3, 7, 33, 26, 28, 6, 3, 31, -12, 26, -15, -1, -18, -4, 9, 0, -17, 28, -25, -7, -26, 29, -37, -18, 22, -4, -23,
-11, -9, 25, -34, 3, -32, 18, -7, 10, 26, -10, 17, -21, -51, 23, 25, 20, 2, 62, -13, 23, -16, 3, 20, 39, -5, 46, -24, -7, -14, 51, -16, 3, -9, -2, -4, -19, 6, 11, -14, 4, -3, 59, -12, 9,
30, -14, -14, -33, -20, -27, -39, -1, 34, -9, 5, -6, 38, -6, 10, -72, 10, -22, -52, 13, 0, 33, 16, 0, 69, -10, -22, -7, -17, 18, -4, 14, 17, -12, 10, 4, -23, 17, 49, 16, -1, 18, -21, -15, -20,
56, -8, 20, 2, 26, 6, 33, 8, 34, -13, 1, 33, 21, -53, 54, 14, 21, 6, -21, -15, 13, -8, 23, 68, 1, 10, 26, 13, -66, 31, -14, 29, -40, 5, 38, 59, 21, 26, 3, 9, -69, 24, -45, 31, -59,
-4, 16, 26, -22, 12, -38, 26, -24, 27, -40, 10, -22, -21, 43, 87, -29, 39, -41, 30, -75, 2, -26, 42, -35, -2, 33, -11, 9, -4, -15, -23, -16, -41, -25, 14, 27, 25, -28, -9, 6, 14, -11, -3, -9, 30,
15, 12, -9, 48, 14, -22, 18, 5, 33, 9, -12, 54, 27, 25, 28, -36, -25, 25, -26, -23, -3, 27, -31, -29, -76, 0, -30, -15, -66, 2, 6, -5, -60, 29, -20, -22, -40, -33, -43, 3, -11, -51, -12, -44, -6,
6, -19, -22, -9, -36, -80, -6, 13, 0, 15, -14, 34, -21, -15, 15, -27, -41, -33, 11, -20, -9, 2, -9, 7, -9, 26, 20, 17, -11, 3, -35, 10, 5, 16, -6, 0, 20, -14, 0, -1, -22, -34, -19, 7, 10,
0, 27, 5, 15, -17, -15, -10, 10, 8, 41, -14, 4, -48, -42, -2, -18, -8, 12, -1, 6, -29, 5, -22, 34, -26, -2, -10, 4, 10, 21, 1, 11, -13, 5, -13, 59, 1, 35, -3, -11, -26, 22, 36, 35, 28,
-25, -6, 16, 20, -21, 1, -14, 28, 1, -29, -32, 12, 10, -24, -7, 15, -40, 30, 11, -11, 18, 2, -40, 50, -16, -32, -36, -2, 5, -21, -18, 16, 26, 43, 4, 53, 28, -28, -2, 14, -33, 30, -14, -14, 34,
39, -22, 10, 66, -35, 8, 9, -8, 33, -41, -5, 67, 52, 12, 39, -13, 5, -23, -7, -4, -8, 7, 18, -24, -29, -27, 7, -34, 11, 23, -22, 25, 4, 20, -12, -19, -24, 15, 21, -23, 14, -17, 5, 70, -57,
34, -19, -11, -19, -5, -73, 59, 13, -56, 47, -41, -69, -26, 23, -35, 13, 19, -27, 9, -18, -37, 50, -70, 6, -24, 24, 3, 26, -43, 32, -17, 7, 25, 17, 7, 26, 26, 12, -4, -7, 1, 27, -16, 31, 17,
6, 22, -16, 16, 50, -14, -34, 6, -21, -48, 6, -36, 74, 11, -26, 17, -3, -7, -41, 9, 9, 1, -29, -24, 62, -49, 38, -31, -17, 14, 40, -58, -38, 11, 28, -14, 45, -9, 32, -6, 53, -15, -40, 16, -34,
-21, 9, -12, 38, -36, -2, 13, 11, -18, -11, -23, 26, -29, -37, 6, 34, -16, 28, -17, 49, -27, -45, -54, 27, -18, -39, -15, 22, -32, 54, 8, -32, 14, -13, -32, 23, -15, 23, 82, -39, 0, 54, -4, -55, 26,
1, 46, 9, 3, 91, 51, -88, 70, -110, 9, -27, -48, -77, 109, -44, -1, 9, -5, 0, 14, -8, -50, -17, -17, 6, -2, -16, -5, 25, -22, 56, -2, -3, 3, -14, 0, 21, 7, 33, 27, -14, 3, -3, -64, 2,
23, -20, 20, -15, -36, -28, -12, -4, 3, -7, -14, 10, 73, 7, 29, -23, -23, -17, -4, 16, 56, -27, 21, 0, 20, 3, 43, 23, 21, -1, -24, -25, -7, -1, -37, -8, -46, 23, -11, -22, -41, 8, -7, -5, -15,
34, -46, 15, -11, 29, 1, 33, -40, 12, -14, -62, -21, -25, -32, 3, 6, 2, 25, 0, -36, 8, 11, -45, 23, -16, 21, 8, 26, -32, 41, -14, -31, 9, 41, 18, 96, -41, 32, 0, -26, -55, 33, -11, 27, -12,
32, -29, -44, 3, -54, 11, -53, 35, -21, 1, -8, 45, 20, -1, -7, 41, 22, 24, 5, 38, 14, 4, -31, 17, -20, -28, -30, 31, 35, -28, 12, 3, -1, 5, -17, -19, 39, -14, 14, 8, -15, -26, -6, -14, -13,
8, -12, -21, 100, -14, -60, 42, 17, 19, -54, -9, 10, 38, 4, 1, 66, 26, -20, -18, 43, 77, -23, 21, 27, 80, 42, 37, 52, -1, -33, 14, 5, 18, -22, 75, 7, 57, 38, -17, 34, -11, -44, 32, -30, 4,
-27, 70, 18, 33, -23, 14, 22, 1, -31, 28, -23, 29, -13, -5, -15, 57, -15, -23, -28, -9, -33, 33, -4, 39, -2, 9, 3, 3, -3, 28, 5, 14, 10, 4, 2, 2, -8, -3, 17, 44, 3, 28, -32, 31, 1,
17, -6, 29, -58, 4, 17, 28, 31, 46, -14, 20, 44, -47, 20, 12, 20, -47, -12, 8, 10, -11, -51, 28, 37, -51, 0, -6, 56, -32, -53, -33, -3, -10, -51, 18, 33, -70, 12, -39, 45, -19, -64, -60, 31, -77,
-9, -18, -29, 46, 19, -1, -4, 8, -6, 7, -24, -8, 39, 1, -44, 106, 8, 31, 32, 37, -22, 15, -42, 1, 13, 11, -47, 0, -38, -8, 61, 10, -17, -20, -36, -48, -18, -10, -15, -42, 53, -13, 82, 8, -3,
-15, -11, -23, -39, 11, 3, -12, 43, -14, -13, -4, -11, -12, 27, -5, 37, -25, -33, 21, 4, 1, -64, 23, -52, 38, -61, -13, 48, -39, -49, -8, -36, 70, -9, 59, -4, 24, -30, 52, 32, -29, 27, -65, 16, 7,
4, 51, 18, 16, 30, 18, -16, 18, 44, -13, 59, 21, 19, 14, 36, -12, 12, 10, -32, 9, 96, -5, 71, -2, 28, -12, -5, -39, 39, -34, 33, -30, -35, -38, 37, 18, -34, 13, 31, 57, 3, 45, 67, 75, -13,
-67, 60, 0, -27, 9, 34, 50, 70, -30, -12, 85, 25, -31, 52, -20, 12, 4, -21, 13, 22, 9, 29, -13, 13, 20, 6, -38, 18, -6, -14, 8, -21, -6, -18, 6, -45, 18, 7, 5, -33, -25, -10, 22, -17, -23,
10, 31, 2, -43, 59, 4, -8, 8, 36, 16, 21, 51, 11, 30, -43, -8, 35, -6, -6, 68, 29, 20, 53, -4, -11, 0, -4, -2, 48, -29, 3, 59, -19, -6, -20, 41, -1, -28, -22, 40, -21, 32, -9, 9, -4,
39, -2, 6, 46, -35, -35, 31, -1, 13, -7, -12, 46, 18, -7, 5, 20, 21, 10, -2, 21, 36, -31, -2, 16, 69, 13, 38, -19, 29, 2, -28, 45, 39, -23, 5, 11, -23, 30, 40, -36, 6, 32, -48, 17, -38,
37, 13, -43, -5, -13, 3, -9, 0, -22, 13, -33, -33, -6, 8, -53, -1, -16, -20, 11, -17, -24, 1, 25, -24, -25, -31, -23, -22, 57, -50, -25, -29, -32, -21, 18, -1, 5, 9, -20, -10, 4, -10, -33, -21, -3,
-1, -3, -20, 16, -35, -18, 8, -14, 16, 7, 18, 27, -30, 14, 8, 29, -23, 8, -18, 32, -12, 5, 16, 44, 2, 24, -19, -1, -11, 18, -14, 41, -29, 12, -1, 59, -17, 26, 15, 53, 13, 17, 36, 77, -14,
-62, 13, 9, -9, 14, -13, 30, 13, -23, -26, -6, -52, -35, -15, 9, 42, -24, -27, 14, -5, -27, -10, -62, -37, 5, 3, -9, 16, 8, 14, 2, 24, 9, 43, -25, 30, 49, 9, -13, 1, 1, 32, -45, 9, 20,
39, 29, 34, 97, 17, -7, -47, -40, 29, -114, -10, 53, 23, 56, 1, -7, -21, -20, 10, 1, -9, 13, 13, -68, -37, -35, 13, -12, -52, -38, 41, 0, 51, -6, 16, -11, 3, -73, -9, -28, 8, 1, -16, 15, 12,
32, 24, -7, -49, -21, 41, -10, -54, -53, -2, 24, -16, -3, 42, -14, -42, 5, 23, 22, -28, -52, -14, -17, -38, -35, 69, -35, 39, -27, 21, 52, 22, -27, -8, -5, 22, -30, 43, -23, 46, 42, 16, 30, -39, -19,
6, 7, 24, -22, 11, 30, 34, 28, -7, -4, 32, -3, -44, 42, -16, 59, 31, -13, -45, 0, -10, -29, 9, 5, -11, 3, -21, 0, 0, -63, -7, 5, -21, -65, 27, -13, 1, -17, -32, 16, 20, -47, 50, -30, -7,
24, 48, 12, -6, 10, -12, 6, 18, -5, 21, 36, 7, 35, 48, 4, -35, -20, 7, -13, 5, 38, 0, 14, 32, -2, 11, 12, 1, 1, 25, 40, 13, -25, 6, -20, 23, -1, -57, -41, -24, 10, 24, 29, -13, -13,
11, -27, 3, -23, 15, -21, 11, 6, 0, -19, -6, 19, 4, -30, -28, 16, 21, 41, 29, -29, 23, -30, -36, 14, 4, 20, 30, -14, -2, 7, -13, -12, 28, -6, -53, -4, 14, 42, -4, 6, 12, -17, -16, -21, 2,
-1, -2, -19, -2, 34, 8, -3, 11, 5, 29, -17, -3, -11, 37, 8, 32, 14, 24, 29, 39, 9, 28, -11, 58, 8, -7, 25, -14, -34, 23, -10, 4, 24, 45, -43, 37, 8, -31, 20, -25, 10, 62, 3, 22, 47,
51, 6, 12, 32, -24, 7, -2, -1, 2, -14, 4, 5, 41, -13, 1, 13, -36, -1, -10, -14, 4, -26, 12, 28, 2, -22, 17, 42, -35, 1, -19, 11, -28, -25, -32, -29, 63, -34, 16, 47, -3, 18, 43, -41, 43,
-45, -7, 27, -30, -21, -20, 10, -23, -6, -58, -56, 0, -37, -20, 9, 70, 28, 28, 38, -10, -36, -48, -35, 21, -9, -68, 26, 15, -3, 36, -46, 4, 4, -21, -32, 8, 4, 1, 18, -2, -5, 31, -5, -37, -3,
-13, 10, -31, -25, 15, 43, 6, -4, 15, 45, -28, 40, -81, 6, -4, -38, -11, 7, -21, 16, -72, 14, 6, -53, -66, -40, -63, 11, 13, -7, -2, 17, -44, 32, -31, 18, -28, 9, -46, -35, 38, -24, -8, -16, 52,
3, -51, 23, 16, 17, -17, 24, -27, 6, -57, -2, -24, 3, 11, 15, -1, -17, -22, 54, 16, 47, -14, 25, -46, -4, 2, -13, -33, 40, -28, -52, -33, 21, -7, -52, -18, -19, -3, -19, -17, -5, 3, -21, 0, 14,
-2, -14, -38, 32, -20, -68, -15, -21, -78, -32, 12, -8, 17, -30, -62, 34, 29, -23, -23, 12, -78, -41, 33, -1, -15, 16, 14, 50, 69, -22, 20, -33, -51, -34, 34, -18, -54, -6, -67, 8, -25, -69, -47, 51, -40,
5, 7, 15, -33, 18, 2, -14, -10, 19, -3, -11, -13, 16, -30, 11, -16, -2, -16, -12, 13, -8, -6, 7, 13, 6, -41, -7, 9, 24, 24, -22, 41, 14, -8, -24, 38, 1, -18, -8, 17, -1, 29, -32, -3, -3,
2, -33, -3, -11, -32, -47, -18, -37, -17, -43, -14, -14, 4, -34, -7, 24, -7, -14, -76, 11, -27, 14, -28, 22, 19, 30, -45, 19, 33, 9, -53, -6, -35, -31, -22, -2, 15, 31, 8, 9, -16, -8, -55, -18, -29,
-35, -35, 6, -9, -4, -25, -9, -8, 2, 19, -5, -37, -6, -9, -43, 23, -27, 5, -17, -24, -19, -4, 4, -38, 18, -1, -64, -41, 0, -4, 20, 26, 7, 23, -24, -5, -20, -22, 3, 13, 68, -32, 7, 52, 27,
-27, -37, -40, -26, -5, 13, -3, 17, 5, 26, -40, -49, 11, -29, -16, -3, -11, -6, 31, -1, -45, -10, 12, -11, -23, -14, 13, -18, 9, -3, 41, -16, -1, -12, -10, -33, -11, 9, 5, 2, 16, 2, 39, 10, 13,
-13, -16, -15, 25, -31, 27, -25, -26, 17, 10, -24, 53, -29, -50, -39, -8, -48, 35, -20, 22, -24, 40, -60, 43, 45, -30, -7, 45, -60, 16, -23, 29, 30, 55, -18, -11, -2, 30, 8, 15, 8, 28, 32, 5, 14,
-13, 1, 34, -35, -7, 21, -42, 2, 3, 12, -9, -35, -27, -7, 16, 11, -12, -9, 24, 31, 0, -14, 27, 44, 19, -26, 31, 37, 18, -25, 25, -13, 66, -42, 64, 44, 47, 25, 54, 53, -4, 34, -8, 16, 20,
-35, -43, 32, 22, 43, 92, 17, 34, 33, -1, -16, -17, -24, -8, 13, -13, 40, 28, 4, 5, 32, 11, 2, 46, -2, 17, 28, 30, -34, 22, -57, 4, -8, 6, 7, 13, 79, 25, 26, -1, -18, 25, -30, 11, -29,
64, 56, -15, 68, 6, -51, -25, -20, -25, -47, 62, -17, 0, 35, -24, 94, 0, -22, -24, -4, -64, -44, 91, -16, -28, 42, -21, 69, -29, -26, -41, 29, -36, -29, 76, 16, -19, 28, -42, 64, -14, -31, -1, 39, 9,
11, 1, -22, -15, -3, -22, 21, 4, -35, 38, -23, -16, -31, 22, 4, -18, -15, 7, 18, 17, -19, -56, -5, -6, -57, -7, 29, -55, 65, -1, 8, 32, -55, -52, 29, -38, -37, 22, 6, -19, 32, -21, 25, 16, -47,
-31, -24, -12, 15, 64, 55, -9, 3, -23, 31, 34, -31, -1, 51, -42, -28, 11, 43, 6, -22, -16, 5, 8, 28, -17, 29, 15, -11, 20, 42, -46, -5, -24, -16, -44, 13, -4, 45, -2, 7, 1, -42, 43, 2, 3,
-14, -27, 23, -8, 1, 6, -37, 16, 12, -17, -27, 4, -2, -1, 1, 6, -44, -1, -7, -16, -12, -1, -7, 33, 10, -38, 18, -1, -15, 21, 0, 16, 22, -10, -16, 4, 7, -1, 23, -30, -36, -22, -9, 15, 34,
-27, 15, -5, 4, -10, -16, -22, -34, 24, -26, -29, 16, 4, 26, 12, 7, -24, -3, 26, -21, 14, 45, -15, 11, 40, -7, 17, 13, 0, 32, 9, -4, 36, 44, -29, 25, 31, -3, 53, -6, -10, 65, 13, -30, -4,
36, -3, -20, -8, 22, -36, 24, 5, -13, 16, -24, -20, -24, -32, -13, -3, 21, 8, 24, -8, -21, -20, -17, -19, 4, -49, 12, 23, 5, 14, -30, -5, 15, 5, -35, 2, 22, -9, 13, 32, -18, -16, -27, 18, 45,
-3, -1, 4, 74, -26, -9, 28, -11, 13, -27, -6, 51, 34, 13, 5, 1, -20, 31, 7, -13, 0, -10, -40, 19, -1, -22, 24, -39, -13, -10, -19, -20, -12, 13, -26, 13, -18, -22, 14, -36, 40, 18, -11, 5, -8,
70, -15, 10, -36, 37, -38, -5, 74, 38, 7, 48, 15, 30, 24, 12, 5, 58, -17, 8, 45, 77, 25, 25, 4, 72, 1, 20, 18, 65, 3, -49, -1, 24, -20, -31, -16, 5, -31, 31, -42, 32, 2, -32, 8, 34,
13, 10, -43, 27, -40, -11, -42, 16, -56, -26, 21, 22, -24, -45, -10, 17, -70, -19, -48, 11, -46, 7, -5, 30, 29, 3, -16, -12, -8, -32, 9, 3, 2, 34, 0, -9, -2, 1, -17, -10, 6, -26, -10, 47, -22,
-32, 10, 15, 73, -40, -5, 23, -30, -37, -71, 19, -27, -33, -22, 20, 58, -28, -26, 7, -62, -41, -77, -27, -21, -2, -39, -31, 4, -8, 12, -11, -14, 30, -34, -30, -5, -9, -15, -23, -25, 30, 31, -38, -8, 23,
0, -10, 58, 21, 4, -11, -5, 16, 6, -17, 19, 6, 12, 3, 34, 10, -41, -6, 27, -16, 16, -37, -8, -27, 14, -5, -3, 19, -57, -77, 14, -2, 16, -21, 30, -47, 38, -70, 25, 51, -27, -25, -12, -35, 13,
17, 46, 41, 36, -43, -19, 25, 21, -4, -4, 1, -18, 13, 2, -14, -18, -21, 10, -74, 4, 25, -33, -22, -29, 1, 6, -20, -49, -32, -10, 22, -9, -27, -4, -31, -19, -34, 21, 10, 49, -1, 23, 35, 31, -16,
23, -32, 54, -15, 19, 43, 34, 7, 4, 19, 25, 13, 5, 10, 42, 30, 22, 27, 9, 13, 18, -31, -12, -6, -21, 27, 5, 47, 39, -18, -9, 21, 41, -49, 25, 21, 6, -19, 26, 20, -15, -23, -48, -24, -19,
26, -30, -21, 45, 1, 19, -26, 9, -15, 2, -5, -1, 15, -4, -10, 8, 6, -3, -31, 44, -26, 30, -11, -26, -18, -14, -22, 44, -17, 11, -5, 4, 18, 16, -25, 12, 8, -46, -22, 15, 11, 10, 4, 12, 14,
-26, -21, 58, -67, -41, -57, 18, -53, -11, -14, -25, 1, -37, -71, 19, 74, -15, -50, 30, -23, 26, -42, 12, 12, 30, -2, 39, 76, -22, -15, -45, -28, 43, -43, 32, 53, 59, 16, 57, 14, -17, 11, -15, -31, 33,
-11, 32, 45, -11, -6, 48, -83, -18, 28, -5, 21, -1, 29, -4, 8, -62, -55, 27, -79, -13, 32, 42, 47, -1, 84, -40, -36, -72, -58, -7, 18, 17, -11, -64, 25, -2, -24, -14, 73, 55, 41, 15, 38, 49, 12,
-42, 25, -7, -39, -6, 5, 22, 51, -50, -33, 41, 3, -45, 10, -53, 18, 45, -23, -29, 32, -34, -7, -10, 45, -30, 24, -19, 27, -24, -68, -73, -2, -41, 0, 33, 40, -13, 17, -5, 34, -20, -11, -79, 45, -69,
-23, -7, 19, -10, 30, -59, -7, -19, -14, -37, -5, 2, -5, 5, -19, 14, -7, -9, -8, 24, -14, -21, 42, 1, -6, -31, 2, -13, 27, -24, -4, 59, -47, -2, -8, 5, 6, 14, 16, -34, 4, 22, 2, -1, -30,
15, 29, 1, 38, 2, 41, -36, 2, -39, 19, 15, -20, -18, 47, -20, -11, 5, 12, 23, -30, 45, -1, -15, 38, 7, 14, 42, -18, -5, -8, 2, 40, 52, -21, -44, 25, 6, -21, 28, -20, 20, -9, -3, 11, -9,
15, 3, 10, 2, 32, -26, 28, -14, -5, 2, -8, 6, -22, 27, -9, -12, -13, 1, 24, -27, -10, -50, 1, -49, 11, 15, -50, 5, 35, 15, 31, -25, -4, 5, 6, -16, -50, -24, -12, 2, 18, -13, 35, -9, -13,
-36, -27, 4, -38, -14, -6, 3, 11, 15, 30, -45, 15, -27, -20, 25, -54, 14, 3, 36, -8, 20, 37, -40, 32, 4, -41, 42, -45, 10, 28, 23, -17, -8, 34, -54, 25, -1, -14, 21, -52, 27, 7, -4, 2, -18,
-37, -1, 12, -27, -22, -19, 15, -12, -2, -52, 14, -3, 16, -14, 15, -8, 8, 19, 32, -4, 27, 15, -9, 10, 36, -12, -12, 19, 0, -3, -35, -17, 41, 31, -3, 21, 58, -44, -26, 11, -38, 10, -36, 2, 41,
54, -14, 17, 54, -55, -32, 10, -7, 38, -58, 12, 44, 23, -24, 25, 9, 3, 0, 7, 28, -16, 12, -26, -11, -21, -43, -8, -2, -12, -1, -19, 32, -7, 18, -4, -25, -7, -27, -17, 10, -40, -4, -50, 17, -50,
4, -6, -8, 22, -27, -30, 34, 8, -42, -36, -16, -44, -17, 28, -36, 25, -2, 2, -3, -20, -27, 6, -18, 3, 22, 52, -38, 39, -44, 56, -13, -21, -36, 3, 14, -17, 9, 12, -28, 6, -2, 22, 33, -22, -72,
40, -19, 14, -24, 56, 3, 15, 22, 6, 3, 14, -6, 3, -20, 15, -27, 16, 35, 27, -4, -36, -18, 21, 12, -59, -8, -16, -8, 7, -32, -21, 11, 1, -20, 12, 33, -24, 29, -53, 30, 17, -64, -38, 28, -20,
-16, 7, -9, 6, 21, 14, -64, -11, 4, 42, 25, 35, 8, 45, -13, 21, -4, 31, -37, 19, 1, 59, 35, 47, 4, 34, 19, 8, 18, -11, 8, 44, -2, -3, 63, 12, -5, 38, 38, -33, 20, -11, 37, -2, -55,
29, 1, -5, -45, 58, 30, -72, -3, -25, 9, -6, -43, -34, 9, -17, 2, -2, -41, -15, 28, 1, -7, 42, -26, -4, 0, -17, -16, 17, -12, -18, 8, 5, -29, 13, -4, 24, 0, 19, 6, 9, 11, -9, 13, -7,
-20, -19, 10, 9, -2, -8, 29, -19, 4, 5, -5, 3, -20, 6, 19, 12, 14, 10, 19, 15, 8, -3, -20, -4, -27, 22, 25, 4, 14, 0, -13, -21, -4, 39, -10, -10, 9, 1, -7, 14, 11, 24, 3, -14, 12,
25, 6, -4, -6, 22, -15, 19, -53, 31, 33, 16, -30, 59, 41, 36, 16, 60, 21, 18, -7, 50, 44, -9, -43, 15, -8, 18, 1, 21, 37, 17, -1, 25, 3, 9, -1, 7, -36, 14, -17, -10, 51, 35, -17, 19,
30, -34, -33, -45, 31, -5, 7, 34, 15, 4, -19, 23, 39, -50, -7, 13, 18, 49, -13, 6, 28, 50, -21, -24, -22, -13, -16, -15, -5, 3, 7, 26, -20, -14, -23, 31, -21, -26, 0, -61, 15, -36, 31, 61, -61,
-49, -11, -15, -19, -4, 25, -31, -17, -42, 34, 28, -28, 3, -27, -47, 12, 48, -2, 11, 36, -15, 34, -13, 7, -4, 52, -8, 15, 25, 33, 6, 19, -20, -6, -34, 33, -7, 31, -19, -42, 34, 31, -43, 84, -10,
36, -55, -26, -58, 51, -4, -43, 4, 12, -50, 25, -21, 4, 29, -56, -1, 27, -21, 29, 16, -22, 13, 14, -20, -11, -5, 16, 62, -7, -1, -10, 17, 33, -18, -8, 7, 22, -14, 4, 3, 43, -19, 2, 37, 11,
-16, 29, -58, 9, -50, 4, 2, 20, 19, -16, 14, -27, 13, 26, -28, 33, -17, -25, -22, 7, -28, 2, -36, -22, 13, -9, 5, -7, 10, 8, 13, -55, 5, 33, -19, -23, 22, -3, 34, 20, 31, 26, 61, -19, -2,
12, 21, 48, -13, -10, -5, 9, -13, 9, -22, 21, -3, 65, -1, 21, -40, -3, -9, -22, 6, 45, -39, 17, -1, 57, -40, -16, -2, 10, 19, -84, 25, 28, 0, -21, 50, 33, -79, -40, -18, -29, 29, -54, -14, 9,
13, -52, 31, 39, -80, -54, -28, 8, -10, -86, 19, -11, 13, -54, -2, 22, 13, -3, -37, 75, -15, 11, -18, 18, 14, 10, -43, 42, 5, -19, -37, 36, -16, -17, -36, 11, 34, 34, -25, 60, 14, -11, 20, -19, -30,
-37, -23, 1, 63, 32, -14, 40, -2, -11, 14, -19, -35, -46, 11, 2, 75, 18, 15, -11, -57, -33, -6, 20, -12, -11, 33, -27, 30, -18, 38, 48, 10, -21, 14, 59, 9, 3, 5, 7, 39, 2, -55, -20, 22, -18,
-52, -28, -2, 10, 52, -29, 34, 5, 7, 48, 30, 52, -38, -27, -5, -24, -17, 34, 109, 17, 8, 39, 31, -27, 79, -30, 16, -42, -25, 62, 74, 26, 35, -31, -16, 1, 5, -11, 6, -15, 5, 14, 40, -17, 53,
-29, -5, -6, 31, -2, 30, 17, -1, 25, 11, 6, 39, -29, 7, -2, -12, -32, 7, -20, 31, 1, 13, -12, -19, 7, -3, -18, 44, -63, -5, -13, -3, 23, -6, -19, 23, -46, -10, -15, 21, 9, 7, -12, -41, 8,
-36, -8, 16, -21, -3, -7, 9, 8, 14, -4, -28, -29, -21, -10, 6, -38, -38, 8, 9, 47, 4, 9, 17, -38, -33, -4, 13, 14, -5, -1, 36, -11, 20, -19, 67, -10, 19, 16, -16, 10, 10, -12, -17, 6, 6,
-61, -9, 14, -7, 19, 30, -5, 24, 21, -7, 11, 0, -14, -28, 22, 14, 57, 14, -5, 14, -3, 9, -6, 30, -10, 16, -13, 5, 16, 23, 61, -4, 25, 21, 2, 32, -52, -51, 26, -22, -2, 20, 38, 15, -4,
42, -14, 7, 26, -2, 31, 29, 1, -3, 13, 34, -13, 27, -17, 14, 34, -2, -1, -17, -16, -1, 8, 25, 12, 4, 27, 19, 41, 8, -13, 3, -9, 25, 30, -20, 23, -2, 37, 6, 28, 32, 23, 0, -10, -4,
47, -20, 19, 9, -16, 39, -23, 9, 33, -5, -23, 47, -1, 6, -2, 46, -12, 8, 12, -16, 21, 31, -14, -61, -13, -13, 41, -1, -48, 12, 30, -41, 4, -15, -3, -31, 10, 4, 43, -39, 29, -3, 45, -46, 7,
-51, 47, -22, -40, 20, 4, -15, 39, -2, 29, -14, -11, -9, 37, -17, -17, -10, -13, -2, -2, 13, -37, -20, 12, -25, -24, 11, 24, 4, -11, 59, 24, 6, 1, 32, -4, -20, -10, 2, 23, -10, 21, -20, -27, 49,
-16, 35, 33, -6, 1, 55, 5, -7, 60, 0, 9, 36, -11, -57, 57, -8, 3, 4, 21, 25, -8, 16, 44, -15, 6, -29, 25, -33, 29, 6, 12, -32, -27, -61, 45, 10, -28, 7, -27, -3, -11, -15, 53, -3, -56,
30, 18, -10, -18, 35, 15, 55, 18, 12, 50, 12, 24, -21, -34, 37, -37, 45, 40, 46, -41, 25, 54, -16, -1, -70, 16, -35, 15, 10, 5, 24, 28, 14, -2, -14, -17, -4, 27, -21, -4, -60, -1, -8, 26, 27,
-10, -19, 27, -1, 37, 26, -60, -23, -3, 35, 45, 14, -15, 72, -14, -21, 9, -28, 14, -23, -3, 24, 52, -26, -30, 61, 37, -64, 9, -112, 86, 52, -35, -26, 18, -80, -65, 12, -40, 10, -15, -54, 98, 67, -74,
-22, -30, -28, -79, -30, -59, 3, -34, 22, 0, -57, -72, -68, -14, -11, 29, 16, 20, -3, 4, 2, -6, -28, 2, -3, 9, -1, -20, -16, 11, -51, -19, -35, -30, -25, 13, -31, 17, 5, -3, 15, 30, -4, 5, -12,
-45, -1, 21, 0, 37, 21, -9, -39, 22, 11, 10, 4, -39, 4, 17, -10, 3, 7, -4, -58, -28, 42, 47, 2, 26, -12, 15, -17, -26, -18, -6, 20, 18, 7, 8, 4, 9, -4, 22, -10, 22, 7, -4, -13, -10,
-18, -27, -28, -10, -55, 10, 17, 17, -37, -3, 29, 31, 10, -91, -37, -29, -62, 26, 21, -31, -2, -20, -5, 76, 9, -19, 12, -53, -33, -2, -8, 39, 16, 31, -26, -10, -4, 9, -9, -36, -34, -45, -18, -5, 36,
-24, 3, 1, 44, 5, -18, 20, 4, -47, -58, -7, -72, -52, -3, -12, 63, -38, 6, 6, 5, -30, -39, -13, -14, -25, 55, 83, 18, -14, -18, 26, -26, 1, -48, -7, -11, 27, -35, 2, 86, 48, 9, -52, -94, 86,
-13, 20, 55, 43, 9, 11, 14, 22, -43, -31, -30, 14, -41, 3, -3, -18, -21, -6, 21, -33, 10, -25, 15, -51, 2, 0, 4, -40, -38, 11, 21, 4, -31, -1, 38, -48, -20, -8, -20, 24, 4, -36, 46, -39, -53,
13, 39, -15, -32, -29, -3, -19, -15, -59, 59, 8, -16, -29, -12, -27, 14, -10, -28, -105, 24, -32, -7, 3, -1, -2, 4, -56, -46, -5, -9, -15, -4, 10, -10, -32, -24, -21, 5, 12, -15, -24, 32, 7, -27, 3,
14, -56, 62, -33, 34, 31, -7, -28, 10, -1, 0, 5, -34, -68, 31, -13, -13, 14, -5, -52, -4, -8, -10, 25, -36, 6, 32, 18, -36, 9, -21, 11, 25, -81, 15, 42, -8, 14, -36, 11, -21, -49, -67, 5, -1,
51, -14, 21, -22, -21, 17, -50, -35, 0, -8, 5, -16, 41, -12, -18, -31, -8, -29, -45, 54, 5, 14, -23, -8, 3, -11, -19, 28, -8, 18, -45, 34, 21, 23, -37, 14, 9, -36, 7, 20, 1, 34, -12, 23, 8,
20, -9, 18, -13, -69, -36, -15, -12, -25, -42, 22, -10, -24, -65, -21, 32, -18, -19, -49, 10, 3, -23, -12, 6, 25, 13, -24, 73, -5, 9, -72, 16, -25, -17, -23, 37, 56, 20, -8, 29, 22, 14, -28, -43, -1,
-25, -12, 2, 0, 19, 2, -14, -28, 16, 20, 5, -26, -12, -4, -17, -20, -5, 10, -68, -16, -22, 1, 21, -33, -2, -6, -63, -50, -16, -27, 33, 11, -14, 0, 15, -4, 6, -34, 19, 20, 23, -4, 25, 31, -5,
-1, 21, -20, -2, 19, -2, 14, -5, -51, 28, 25, -5, -46, -34, -8, -19, 27, 23, 19, 22, -8, 7, 1, 13, -21, 27, -1, -23, -17, 8, -5, -8, 18, -15, 26, -17, -41, 48, -22, 12, 3, -34, -32, 24, -17,
-27, -12, -37, 4, 10, 9, -24, 14, -28, 28, -7, 24, 14, -59, -72, 20, -22, 39, -20, 49, -9, 52, -39, 24, 41, -32, 4, -6, -41, -16, -24, 88, 15, 49, -19, -6, -1, 14, 29, -35, 27, -3, 4, 26, -35,
-46, 41, 17, -45, -14, 28, -33, 9, -42, 7, -7, -21, -25, 24, 24, 17, -6, -16, 28, 18, 48, -59, 9, 53, 27, 31, 41, 53, 62, -6, 2, 5, 37, -33, 15, 44, 67, 36, 28, 19, 43, 29, -27, 34, -25,
10, 22, 13, 29, 69, 53, 0, 12, -4, -1, 1, -40, 20, 41, -25, -14, -18, 26, -30, -31, 17, 10, -11, 52, 0, 6, -34, 2, -28, 35, -22, 17, 35, -25, 32, -41, 26, 31, 12, -9, 16, 5, 11, 26, 20,
26, -8, 17, 43, 14, 21, -12, 28, 12, 27, -19, 25, 16, -24, 14, 11, 7, -3, 2, -16, 17, -9, 4, -32, -16, -6, -17, -6, -38, 26, -9, -34, -10, -19, -17, 8, -26, 41, -18, 26, -22, 9, 4, 10, 0,
-35, 7, 4, -12, 48, -41, 5, -2, -26, -2, 4, -37, -29, 5, -39, 18, -18, -37, -25, 27, -37, 1, -16, -32, -18, -23, -24, 24, -20, -35, -10, 9, -22, -13, -59, -34, 15, -11, 12, -21, 0, 2, -9, -20, -11,
-17, 30, -27, 36, 42, 47, -33, 32, 8, -5, -18, -2, 3, 19, -29, 40, -29, -18, 43, -10, 31, -43, 39, -28, 61, 35, 7, 13, -13, -30, 34, -52, 16, -33, 32, 9, 46, -10, 22, 4, 25, 31, 45, -12, 19,
42, -26, 31, 40, -24, 31, 1, -31, 24, 47, 1, 28, -29, -26, 78, 32, 39, -1, 68, -8, -26, 24, 28, -7, -58, -29, 32, 38, 29, -29, 35, 32, -21, -5, -18, 87, -12, 23, 91, 94, 41, 48, 10, 30, 43,
-42, 7, -13, -29, 8, -4, 4, 6, -15, 30, -14, 2, 45, -37, -41, 19, 27, -9, -19, 7, 27, -10, -27, -57, 110, -4, -5, -15, -12, 1, 22, 5, -1, -30, -48, -70, 58, -22, 85, -1, -10, 5, 21, -41, 59,
9, 9, 22, -36, 19, -56, 24, 3, -18, -12, 16, -32, 10, -5, 0, -4, -14, -6, 14, 32, 34, -5, 3, 20, 25, 3, -4, -47, -36, 33, -37, -57, 24, 0, -43, 19, -24, 35, 17, 4, -36, -4, -27, -107, 9,
-52, 13, 24, -28, -13, -3, 56, 7, 23, 17, -1, 31, 5, -56, 10, -33, 15, -1, -41, -3, -1, 7, 57, -6, 1, 44, -15, -14, -19, 37, -61, 12, -18, -34, 14, -16, -15, 22, 20, -12, -15, 19, -4, 42, -7,
-65, -18, -8, 32, -7, 50, -12, 6, -52, 11, -13, -12, -51, -35, 9, 0, -12, 27, -35, -11, -48, 5, -30, -2, -25, -19, 9, 19, -24, 24, -6, 3, 19, -16, 5, -33, 47, 2, 0, 31, 42, -7, -7, 7, -13,
-17, 1, -33, -18, -6, -9, -15, 8, 21, 10, 11, -30, 24, 3, 16, -3, 22, -6, 2, -28, 5, -5, -3, -38, 34, -8, -16, -22, 38, -13, 6, -43, -5, -16, -9, -34, 13, -28, -2, -3, 9, -30, -15, -64, 15,
-3, 30, 16, 10, 2, 21, 27, 17, 30, 27, 40, -30, -26, -27, -6, 5, -50, 20, -41, -3, 19, 50, 5, 0, 2, -25, -31, 35, -50, 34, 16, -1, -29, 5, -15, -9, -39, 19, 18, -24, 15, -14, 5, 0, -12,
-23, 21, -6, -8, 32, 34, -11, 16, -28, 9, -11, -4, 6, -5, -31, -26, 16, 25, 48, -33, 3, 7, -6, 47, -13, 7, -2, -41, -9, 68, -11, -13, 8, 36, -81, -11, -54, -21, -4, -23, 21, 22, 21, 13, -19,
14, -71, -35, -49, 33, -9, -42, 36, 7, 56, 16, 11, -4, -6, 12, -30, 22, -12, 6, 10, 4, -12, 5, 31, 10, 22, 12, 47, -33, 4, 1, 34, 25, -15, -40, -27, 67, -15, -18, -68, -36, -13, -6, -35, 23,
23, -29, -12, 37, -2, -11, -23, -25, 9, 31, -43, -53, 77, -6, 61, -55, -47, 51, 6, -1, 28, 37, -14, 1, 14, 2, 53, -74, -48, 84, -1, 20, 51, 61, 4, 25, 32, -3, 31, -9, 48, 64, 74, -52, 43,
-32, -13, -25, 34, -15, -14, 72, -14, 3, -6, -28, 1, -52, -17, -15, 49, 33, -18, 54, -26, -33, -65, -39, -40, -39, 42, 18, -42, 58, -40, 82, 7, -16, -62, 36, -70, 2, 87, 32, -15, 72, -33, 73, -5, -36,
-23, 51, -52, 24, 54, 52, -10, 53, -36, 58, -13, -34, 12, 53, 5, -26, -8, 10, 1, 8, 0, 10, -4, -38, -7, -42, -19, -49, -6, 17, -21, 35, 21, 37, -14, -33, -64, -5, 7, -39, 41, 48, -66, 48, -23,
23, -12, -54, -46, 65, 9, 20, 40, -22, -25, 50, -3, -31, 32, -21, 37, 40, -27, 51, 30, 9, 13, 9, -3, -26, 59, 7, 35, 55, -9, -80, 14, 19, 12, -25, -38, 32, -25, -43, -59, -6, 8, -15, 27, 38,
-27, 14, -18, 10, -79, -10, -51, 21, 14, 14, 31, -11, -2, 53, 1, 9, -30, 28, -38, 9, 4, -12, 1, -36, 2, -17, -7, -21, 41, -10, 11, -32, 18, -27, -8, -55, -6, -40, 41, -4, 23, 23, 34, -40, 60,
27, -22, 8, -21, 3, -19, -24, 0, -33, -18, -70, -18, -8, -29, -39, -9, 11, 3, 3, 23, -45, -18, -34, 39, 2, -26, -6, -40, 23, 26, 41, 7, -15, 10, -1, 1, 3, -28, 8, -26, -11, 32, 7, -20, 29,
-8, -28, 21, 23, -15, 34, -7, -27, 7, -12, -11, 3, -33, -13, -22, 20, -7, -32, 8, 10, -17, -17, 6, 23, 52, -9, 7, 15, -9, 0, -2, 0, 3, 3, 6, -13, -21, -16, -6, -12, -28, 18, 19, 20, -5,
13, 47, -10, 5, -8, 30, 29, -36, -15, -5, 18, -13, -18, 20, 8, 12, 6, 7, 27, 5, -17, 20, -6, -13, -34, -20, 33, -29, 28, -11, -1, 3, 3, 11, 7, 15, 72, 8, 5, -21, -9, -28, 31, -4, 7,
9, 22, -15, -8, 40, -27, -43, -25, -42, -27, -4, -32, 27, 7, -5, -42, 15, 4, -9, 1, -2, -4, -54, -29, -1, 20, -7, -33, -16, -24, -19, -42, 10, -35, -49, -64, -15, 2, -37, -59, 19, -35, -30, -56, -3,
48, 9, -43, -6, 39, -16, -5, 4, -30, 64, -5, -18, 60, -23, -48, -61, -4, 19, -33, 23, 8, 49, 35, 30, 31, 14, 10, -12, 34, -3, -25, 38, 15, 40, 44, 42, -7, 24, -9, -7, 19, 6, -21, 30, 5,
42, 41, 24, -50, -15, -18, 20, 22, -10, 18, 12, -51, 0, -10, 6, 51, 5, -1, -47, 56, -7, 16, -16, 34, 58, 21, 2, 11, 53, -24, -127, 15, -35, -3, 10, 18, 57, 100, -3, -18, 94, 28, -62, -1, -38,
7, -19, 13, 32, 35, 14, 12, 10, -18, 17, -14, -8, 13, -1, 21, 5, -1, -21, -71, 19, 25, 3, -16, 15, 10, -28, -3, -10, 3, -14, -52, 5, 8, -14, 14, -32, 38, 18, 16, -26, -41, 12, -34, 31, 34,
-36, 20, 0, 18, 0, 2, -33, 0, 8, -35, -25, 37, -20, 47, -26, -2, -19, -61, -33, 3, 8, -43, -23, 25, 36, 0, 16, -9, -31, -15, -40, 17, 55, 25, -14, 12, 1, -1, 13, 19, -13, 30, 2, 4, -1,
-14, -5, -16, -7, 5, 6, 0, -2, 12, -28, 1, -4, 7, -32, 0, -1, 16, 10, 3, 19, -4, -46, -30, 0, 13, 0, -32, 13, 22, 2, -46, 10, -6, 5, -25, 25, 35, -18, -18, 45, -15, 1, -42, -18, 11,
-5, 43, 22, 57, -27, -36, 40, -15, 25, -50, 10, 18, 24, 29, 56,
};
const int32_t g_Weight7[] = {
51, 6, -132, 36, 151, -241, -110, 192, -196, -23, -218, 283, -121, -351, -168, -267, 139, 331, 49, 173,
};
const int8_t g_Weight8[] = {
-11, 87, -61, -70, -49, 39, 21, 37, -18, 75, 15, 28, -67, -113, -58, -41, -96, 81, -17, -2, -26, -64, -16, 108, 102, -84, -50, 29, -1, 91, -78, 93, -121, -38, 28, 40, 18, -4, 14, 54, 53, -86, -53, 20, -53,
-35, 5, 26, -106, 24, -67, 58, 40, -46, -107, 9, 61, 75, -76, 60, -27, 44, 75, 75, -52, -55, 64, -12, -13, 54, 15, -62, 79, -28, 76, 84, 74, -41, -70, 28, 36, -89, 45, 12, -35, 58, -22, 84, 81, -18,
-78, -32, -127, 38, -60, -5, 13, -90, 30, -96, -100, 87, 90, -7, 84, -57, 19, -5, 29, -98, 74, 28, 18, -58, -18, -27, 72, -24, -12, -63, -13, 65, -109, -31, -15, 38, -36, 64, 25, -99, -51, -45, 59, -57, 53,
-85, 0, -16, 92, 45, 26, 25, 40, 108, -12, 74, -90, -29, -90, 68, 42, 79, -24, 19, -78, 1, -38, -101, -62, 8, 81, 55, 38, -41, 13, 30, -44, -21, -24, -17, -50, 15, -10, -38, 109, -66, 95, 38, -89, -42,
-11, -85, -9, -84, 14, 62, -2, -54, 55, -2, 61, 26, 5, 21, 41, 89, -26, -45, -67, -63,
};
const int32_t g_Weight9[] = {
30, 58, 83, -64, -12, 29, -45, -28, 48, -79,
};

View File

@ -1,36 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "CMSIS/NN/Include/arm_nnfunctions.h"
#include "nnacl/int8/quant_dtype_cast_int8.h"
#include <stdlib.h>
#include <string.h>
extern unsigned char *g_Buffer;
enum STATUS {
RET_OK = 0,
RET_ERROR = 1,
};
extern int g_thread_num;
extern const int8_t g_Weight1[];
extern const int32_t g_Weight2[];
extern const int8_t g_Weight3[];
extern const int32_t g_Weight4[];
extern const int8_t g_Weight6[];
extern const int32_t g_Weight7[];
extern const int8_t g_Weight8[];
extern const int32_t g_Weight9[];

View File

@ -1,62 +0,0 @@
cmake_minimum_required(VERSION 3.14)
project(benchmark)
if(NOT DEFINED PKG_PATH)
message(FATAL_ERROR "PKG_PATH not set")
endif()
get_filename_component(PKG_PATH ${PKG_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})
set(HEADER_PATH ${PKG_PATH}/runtime)
option(PLATFORM_ARM64 "build android arm64" OFF)
option(PLATFORM_ARM32 "build android arm32" OFF)
add_compile_definitions(NOT_USE_STL)
if(PLATFORM_ARM64 OR PLATFORM_ARM32)
add_compile_definitions(ENABLE_NEON)
add_compile_definitions(ENABLE_ARM)
endif()
if(PLATFORM_ARM64)
add_compile_definitions(ENABLE_ARM64)
endif()
if(PLATFORM_ARM32)
add_compile_definitions(ENABLE_ARM32)
add_definitions(-mfloat-abi=softfp -mfpu=neon)
endif()
set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
message(STATUS "build benchmark with debug info")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
else()
message(STATUS "build benchmark release version")
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}")
string(REPLACE "-g" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
string(REPLACE "-g" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif()
string(APPEND CMAKE_EXE_LINKER_FLAGS " -Wl,--gc-sections")
add_subdirectory(src)
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
include_directories(${HEADER_PATH})
include_directories(${HEADER_PATH}/include)
set(SRC_FILES
benchmark/benchmark.cc
benchmark/calib_output.cc
benchmark/load_input.c
)
add_executable(benchmark ${SRC_FILES})
target_link_libraries(benchmark net -lm -pthread)

View File

@ -1,268 +0,0 @@
# X86编译部署
`Linux` `IoT` `C++` `全流程` `模型编译` `模型代码生成` `模型部署` `推理应用` `初级` `中级` `高级`
<!-- TOC -->
- [X86编译部署](#X86编译部署)
- [概述](#概述)
- [模型编译体验](#模型编译体验)
- [详细步骤](#详细步骤)
- [生成代码](#生成代码)
- [部署应用](#部署应用)
- [编译依赖](#编译依赖)
- [构建与运行](#构建与运行)
- [编写推理代码示例](#编写推理代码示例)
- [更多详情](#更多详情)
- [Android平台编译部署](#android平台编译部署)
- [Arm&nbsp;Cortex-M平台编译部署](#armcortex-m平台编译部署)
<!-- /TOC -->
## 概述
本教程以MNIST分类模型推理代码为例帮助用户了解codegen生成代码、编译构建、部署等流程。
## 模型编译体验
用户可以使用脚本一键式编译生成MNIST分类模型的推理代码并执行推理得到单次推理输出。下载[MindSpore源码](https://gitee.com/mindspore/mindspore),进入[`mindspore/mindspore/lite/micro/examples/mnist_x86`](https://gitee.com/mindspore/mindspore/tree/master/mindspore/lite/micro/example/mnist_x86)目录,执行脚本`mnist.sh`自动生成模型推理代码并编译工程目录,即可得到单次推理输出。
```bash
bash mnist.sh
```
推理结果如下:
```text
======run benchmark======
input 0: mnist_input.bin
outputs:
name: Softmax-7, DataType: 43, Size: 40, Shape: [1 10], Data:
0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,
======run success=======
```
也可以按照**详细步骤**从生成代码开始逐步完成使用codegen编译一个MNIST分类模型的全流程。
## 详细步骤
在编译此工程之前需要预先获取Ubuntu-x64 CPU平台的[Release包](https://www.mindspore.cn/lite/docs/zh-CN/master/use/downloads.html),解压后得到`mindspore-lite-{version}-linux-x64`,将其拷贝到当前目录下。
> `{version}`为版本号字符串,如`1.2.0`。
以本教程为例预置x86平台的Release包目录如下
```text
mindspore-lite-{version}-linux-x64
└── tools
└── codegen # 代码生成工具
├── codegen # 可执行程序
├── include # 推理框架头文件
│ ├── nnacl # nnacl 算子头文件
│ └── wrapper
├── lib
│ └── libwrapper.a # MindSpore Lite CodeGen生成代码依赖的部分算子静态库
└── third_party
├── include
│ └── CMSIS # ARM CMSIS NN 算子头文件
└── lib
└── libcmsis_nn.a # ARM CMSIS NN 算子静态库
```
### 生成代码
下载[MNIST分类网络](https://download.mindspore.cn/model_zoo/official/lite/mnist_lite/mnist.ms)。使用Release包中的codegen编译MNIST分类模型生成对应的x86平台推理代码。生成代码的具体命令如下
```bash
./codegen --codePath=. --modelPath=mnist.ms --target=x86
```
codegen在当前目录下将生成mnist目录其中包含了可编译构建的mnist分类模型的代码。
> 关于codegen的更多使用命令说明可参见[codegen使用说明](https://www.mindspore.cn/lite/docs/zh-CN/master/use/micro.html#id4)。
### 部署应用
接下来介绍如何构建MindSpore Lite CodeGen生成的模型推理代码工程并在x86平台完成部署。上文中codegen生成的代码与`mindspore/mindspore/lite/micro/example/mnist_x86`相同本章节编译、构建步骤将对该目录展开用户也可参照相同操作编译上文codegen生成mnist目录代码。
#### 编译依赖
- [CMake](https://cmake.org/download/) >= 3.18.3
- [GCC](https://gcc.gnu.org/releases.html) >= 7.3.0
#### 构建与运行
1. **生成代码工程说明**
进入`mindspore/mindspore/lite/micro/example/mnist_x86`目录中。
生成代码工程目录说明:
当前目录下预置了MNIST分类网络生成的代码。
```text
mnist_x86/ # 生成代码的根目录
├── benchmark # 生成代码的benchmark目录
└── src # 模型推理代码目录
```
2. **代码编译**
组织模型生成的推理代码以及算子静态库编译生成模型推理静态库并编译生成benchmark可执行文件,
进入代码工程目录下新建并进入build目录
```bash
mkdir build && cd build
```
开始编译:
```bash
cmake -DPKG_PATH={path to}/mindspore-lite-{version}-linux-x64 ..
make
```
> `{path to}`和`{version}`需要用户根据实际情况填写。
代码工程编译成功结果:
```text
Scanning dependencies of target net
[ 12%] Building C object src/CMakeFiles/net.dir/net.c.o
[ 25%] Building CXX object src/CMakeFiles/net.dir/session.cc.o
[ 37%] Building CXX object src/CMakeFiles/net.dir/tensor.cc.o
[ 50%] Building C object src/CMakeFiles/net.dir/weight.c.o
[ 62%] Linking CXX static library libnet.a
unzip raw static library libnet.a
raw static library libnet.a size:
-rw-r--r-- 1 user user 58K Mar 22 10:09 libnet.a
generate specified static library libnet.a
new static library libnet.a size:
-rw-r--r-- 1 user user 162K Mar 22 10:09 libnet.a
[ 62%] Built target net
Scanning dependencies of target benchmark
[ 75%] Building CXX object CMakeFiles/benchmark.dir/benchmark/benchmark.cc.o
[ 87%] Building C object CMakeFiles/benchmark.dir/benchmark/load_input.c.o
[100%] Linking CXX executable benchmark
[100%] Built target benchmark
```
此时在`mnist_x86/build/src/`目录下生成了`libnet.a`,推理执行库,在`mnist_x86/build`目录下生成了`benchmark`可执行文件。
3. **代码部署**
本示例部署于x86平台。由代码工程编译成功以后的产物为`benchmark`可执行文件将其拷贝到用户的目标Linux服务器中即可执行。
在目标Linux服务上执行编译成功的二进制文件
```bash
./benchmark mnist_input.bin net.bin
```
> mnist_input.bin在`example/mnist_x86`目录下,`net.bin`为模型参数文件,在`example/mnist_x86/src`目录下。
生成结果如下:
```text
start run benchmark
input 0: mnist_input.bin
output size: 1
uint8:
Name: Softmax-7, DataType: 43, Size: 40, Shape: 1 10, Data:
0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000,
run benchmark success
```
#### 编写推理代码示例
本教程中的`benchmark`内部实现主要用于指导用户如何编写以及调用codegen编译的模型推理代码接口。以下为接口调用的详细介绍详情代码可以参见[examples/mnist_x86](https://gitee.com/mindspore/mindspore/tree/master/mindspore/lite/micro/example/mnist_x86)下的示例代码示例:
1. **构建推理的上下文以及会话**
本教程生成的代码为非并行代码无需上下文context可直接设为空。
```cpp
size_t model_size = 0;
Context *context = nullptr;
session::LiteSession *session = mindspore::session::LiteSession::CreateSession(model_buffer, model_size, context);
if (session == nullptr) {
std::cerr << "create lite session failed" << std::endl;
return RET_ERROR;
}
```
2. **输入数据准备**
用户所需要准备的输入数据内存空间,若输入是持久化文件,可通过读文件方式获取。若输入数据已经存在内存中,则此处无需读取,可直接传入数据指针。
```cpp
std::vector<MSTensor *> inputs = session->GetInputs();
MSTensor *input = inputs.at(0);
if (input == nullptr) {
return RET_ERROR;
}
// Assume we have got input data in memory.
memcpy(input->MutableData(), input_buffer, input->Size());
```
3. **执行推理**
```cpp
session->RunGraph();
```
4. **推理结束获取输出**
```cpp
Vector<String> outputs_name = session->GetOutputTensorNames();
for (const auto &name : outputs_name) {
auto output = session->GetOutputByTensorName(name);
// deal with output
......
}
```
5. **释放内存session**
```cpp
delete session;
```
6. **推理代码整体调用流程**
```cpp
// Assume we have got model_buffer data in memory.
size_t model_size = 0;
Context *context = nullptr;
session::LiteSession *session = mindspore::session::LiteSession::CreateSession(model_buffer, model_size, context);
if (session == nullptr) {
std::cerr << "create lite session failed" << std::endl;
return RET_ERROR;
}
std::vector<MSTensor *> inputs = session->GetInputs();
MSTensor *input = inputs.at(0);
if (input == nullptr) {
return RET_ERROR;
}
// Assume we have got input data in memory.
memcpy(input->MutableData(), input_buffer, input->Size());
session->RunGraph();
Vector<String> outputs_name = session->GetOutputTensorNames();
for (const auto &name : outputs_name) {
auto output = session->GetOutputByTensorName(name);
// deal with output
......
}
delete session;
```
## 更多详情
### [Android平台编译部署](https://gitee.com/mindspore/mindspore/blob/master/mindspore/lite/micro/example/mobilenetv2/README.md#)
### [Arm&nbsp;Cortex-M平台编译部署](https://www.mindspore.cn/lite/docs/zh-CN/master/use/micro.html)

View File

@ -1,227 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <string>
#include <cstring>
#include "lite_session.h"
#include "ms_tensor.h"
#include "errorcode.h"
#include "load_input.h"
#include "calib_output.h"
using namespace mindspore;
void usage() {
printf(
"-- mindspore benchmark params usage:\n"
"args[0]: executable file\n"
"args[1]: inputs binary file\n"
"args[2]: model weight binary file\n"
"args[3]: loop count for performance test\n"
"args[4]: calibration file\n"
"args[5]: runtime thread num\n"
"args[6]: runtime thread bind mode\n\n");
}
uint64_t GetTimeUs() {
const int USEC = 1000000;
const int MSEC = 1000;
struct timespec ts = {0, 0};
if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) {
return 0;
}
uint64_t retval = (uint64_t)((ts.tv_sec * USEC) + (ts.tv_nsec / MSEC));
return retval;
}
template <typename T>
void PrintData(void *data, size_t data_number) {
if (data == nullptr) {
return;
}
auto casted_data = static_cast<T *>(data);
for (size_t i = 0; i < 10 && i < data_number; i++) {
printf("%s, ", std::to_string(casted_data[i]).c_str());
}
printf("\n");
}
void TensorToString(tensor::MSTensor *tensor) {
printf("name: %s, ", tensor->tensor_name().c_str());
printf("DataType: %d, ", tensor->data_type());
printf("Elements: %d, ", static_cast<int>(tensor->ElementsNum()));
printf("Shape: [");
for (auto &dim : tensor->shape()) {
printf("%d ", dim);
}
printf("], Data: \n");
switch (tensor->data_type()) {
case kNumberTypeFloat32: {
PrintData<float>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeFloat16: {
PrintData<int16_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeInt32: {
PrintData<int32_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeInt16: {
PrintData<int16_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeInt8: {
PrintData<int8_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
case kNumberTypeUInt8: {
PrintData<uint8_t>(tensor->MutableData(), tensor->ElementsNum());
} break;
default:
std::cout << "Unsupported data type to print" << std::endl;
break;
}
}
int main(int argc, const char **argv) {
if (argc < 2) {
printf("input command is invalid\n");
usage();
return lite::RET_ERROR;
}
printf("=======run benchmark======\n");
const char *model_buffer = nullptr;
int model_size = 0;
// read .bin file by ReadBinaryFile;
if (argc >= 3) {
model_buffer = static_cast<const char *>(ReadInputData(argv[2], &model_size));
}
lite::Context *context = nullptr;
if (argc >= 7) {
// config benchmark context
context = new (std::nothrow) lite::Context();
if (context == nullptr) {
return lite::RET_ERROR;
}
context->thread_num_ = atoi(argv[5]);
if (context->thread_num_ < 1) {
printf("Thread number error! It should be greater than 0\n");
return lite::RET_ERROR;
}
context->device_list_.resize(1);
context->device_list_[0].device_type_ = lite::DT_CPU;
context->device_list_[0].device_info_.cpu_device_info_.enable_float16_ = false;
lite::CpuBindMode bind_mode = static_cast<lite::CpuBindMode>(atoi(argv[6]));
if (bind_mode < lite::NO_BIND || bind_mode > lite::MID_CPU) {
printf("Thread bind mode error! 0: No bind, 1: Bind hign cpu, 2: Bind mid cpu.\n");
return lite::RET_ERROR;
}
context->device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_ = bind_mode;
printf("context: ThreadNum: %d, BindMode: %d\n", context->thread_num_,
context->device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_);
}
session::LiteSession *session = mindspore::session::LiteSession::CreateSession(model_buffer, model_size, context);
if (session == nullptr) {
printf("create lite session failed\n");
return lite::RET_ERROR;
}
delete[] model_buffer;
// set model inputs tensor data
Vector<tensor::MSTensor *> inputs = session->GetInputs();
size_t inputs_num = inputs.size();
void *inputs_binbuf[inputs_num];
int inputs_size[inputs_num];
for (size_t i = 0; i < inputs_num; ++i) {
inputs_size[i] = inputs[i]->Size();
}
int ret = ReadInputsFile(const_cast<char *>(argv[1]), inputs_binbuf, inputs_size, inputs_num);
if (ret != lite::RET_OK) {
delete session;
return lite::RET_ERROR;
}
for (size_t i = 0; i < inputs_num; ++i) {
void *input_data = inputs[i]->MutableData();
memcpy(input_data, inputs_binbuf[i], inputs_size[i]);
}
if (argc >= 4) {
int loop_count = atoi(argv[3]);
printf("\nloop count: %d\n", loop_count);
uint64_t start_time = GetTimeUs();
for (int i = 0; i < loop_count; ++i) {
ret = session->RunGraph();
if (ret != lite::RET_OK) {
delete session;
return lite::RET_ERROR;
}
}
uint64_t end_time = GetTimeUs();
float total_time = (float)(end_time - start_time) / 1000.0f;
printf("total time: %.5fms, per time: %.5fms\n", total_time, total_time / loop_count);
}
ret = session->RunGraph();
if (ret != lite::RET_OK) {
delete session;
return lite::RET_ERROR;
}
printf("\noutputs: \n");
Vector<String> outputs_name = session->GetOutputTensorNames();
Vector<tensor::MSTensor *> outputs;
for (const auto &name : outputs_name) {
auto output = session->GetOutputByTensorName(name);
outputs.push_back(output);
TensorToString(output);
}
if (argc >= 5) {
lite::Calibrator *calibrator = new (std::nothrow) lite::Calibrator();
if (calibrator == nullptr) {
delete session;
return lite::RET_NULL_PTR;
}
ret = calibrator->ReadCalibData(argv[4]);
if (ret != lite::RET_OK) {
delete session;
delete calibrator;
return lite::RET_ERROR;
}
ret = calibrator->CompareOutputs(outputs);
if (ret != lite::RET_OK) {
delete session;
delete calibrator;
return lite::RET_ERROR;
}
delete calibrator;
}
printf("========run success=======\n");
delete session;
session = nullptr;
if (context != nullptr) {
delete context;
context = nullptr;
}
for (size_t i = 0; i < inputs_num; ++i) {
free(inputs_binbuf[i]);
inputs_binbuf[i] = nullptr;
}
return lite::RET_OK;
}

View File

@ -1,147 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "calib_output.h"
#include <fstream>
#include <sstream>
#include <iostream>
#include <stdio.h>
#include <cmath>
namespace mindspore {
namespace lite {
constexpr float kToleranceVal = 0.0001;
#define MS_ERROR_IF_NULL(ptr) \
do { \
if ((ptr) == nullptr) { \
return mindspore::lite::RET_ERROR; \
} \
} while (0)
int Calibrator::ReadCalibData(const char *calib_data_path) {
std::ifstream in_file(calib_data_path);
if (!in_file.good()) {
printf("file is not exist, %s\n", calib_data_path);
return RET_ERROR;
}
if (!in_file.is_open()) {
printf("open file failed, %s\n", calib_data_path);
in_file.close();
return RET_ERROR;
}
while (!in_file.eof()) {
std::string line;
getline(in_file, line);
if (line.empty()) {
continue;
}
std::stringstream name_line(line);
std::string tensor_name;
size_t dim = 0;
name_line >> tensor_name >> dim;
size_t elements = 1;
for (size_t i = 0; i < dim; i++) {
size_t tmp_dim;
name_line >> tmp_dim;
elements *= tmp_dim;
}
getline(in_file, line);
std::stringstream data_line(line);
String name(tensor_name.c_str());
CalibTensor *output = new (std::nothrow) CalibTensor(name, elements);
MS_ERROR_IF_NULL(output);
float *data = output->MutableData();
MS_ERROR_IF_NULL(data);
for (size_t i = 0; i < elements; i++) {
data_line >> data[i];
}
calib_outputs_.push_back(output);
}
in_file.close();
return RET_OK;
}
template <typename T>
float CompareData(const T *output, const float *calib, size_t elements_num) {
float error = 0.;
if (output == nullptr || calib == nullptr) {
printf("output or calib is nullptr\n");
return error;
}
for (size_t i = 0; i < elements_num; ++i) {
if (std::isnan(output[i]) || std::isinf(output[i]) || std::isnan(calib[i]) || std::isinf(calib[i])) {
printf("error, output data is nan or inf\n");
return error;
}
error += std::abs(output[i] - calib[i]);
}
return error;
}
int Calibrator::CompareOutputs(const Vector<tensor::MSTensor *> &outputs) const {
if (outputs.size() != calib_outputs_.size()) {
printf("error, outputs and calibs size is mismatch\n");
return RET_ERROR;
}
float total_error = 0;
size_t outputs_num = outputs.size();
for (size_t i = 0; i < outputs_num; ++i) {
tensor::MSTensor *output = outputs[i];
MS_ERROR_IF_NULL(output);
CalibTensor *calib = calib_outputs_[i];
MS_ERROR_IF_NULL(calib);
if (output->tensor_name() != calib->tensor_name()) {
printf("warning, output tensor name is not equal to calib\n");
}
if (output->ElementsNum() != calib->ElementsNum()) {
printf("error, output elements num is not equal to calib\n");
return RET_ERROR;
}
switch (output->data_type()) {
case TypeId::kNumberTypeFloat:
case TypeId::kNumberTypeFloat32: {
total_error += CompareData(static_cast<float *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
case TypeId::kNumberTypeInt8: {
total_error += CompareData(static_cast<int8_t *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
case TypeId::kNumberTypeUInt8: {
total_error += CompareData(static_cast<uint8_t *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
case TypeId::kNumberTypeUInt:
case TypeId::kNumberTypeUInt32: {
total_error += CompareData(static_cast<int32_t *>(output->data()), calib->MutableData(), output->ElementsNum());
break;
}
default: {
printf("unsupported tensor data type\n");
}
}
}
if (total_error > kToleranceVal) {
printf("compare outputs failed, total error: %f\n", total_error);
return RET_ERROR;
}
printf("compare outputs success, total error: %f\n", total_error);
return RET_OK;
}
} // namespace lite
} // namespace mindspore

View File

@ -1,73 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_
#define MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_
#include "lite_utils.h"
#include "ms_tensor.h"
#include "errorcode.h"
namespace mindspore {
namespace lite {
class CalibTensor {
public:
CalibTensor(String name, size_t elements_num) : tensor_name_(name), elements_num_(elements_num) {}
~CalibTensor() {
free(data_);
data_ = nullptr;
}
String tensor_name() const { return tensor_name_; }
int ElementsNum() const { return elements_num_; }
float *MutableData() {
if (data_ == nullptr) {
if (elements_num_ == 0 || elements_num_ > INT16_MAX) {
return nullptr;
}
data_ = static_cast<float *>(malloc(elements_num_ * sizeof(float)));
}
return data_;
}
private:
String tensor_name_;
int elements_num_{0};
float *data_{nullptr};
};
class Calibrator {
public:
Calibrator() = default;
~Calibrator() {
for (auto &calib : calib_outputs_) {
delete calib;
calib = nullptr;
}
calib_outputs_.clear();
}
int ReadCalibData(const char *calib_data_path);
int CompareOutputs(const Vector<tensor::MSTensor *> &outputs) const;
private:
Vector<CalibTensor *> calib_outputs_;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_

View File

@ -1,95 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "load_input.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
void *ReadInputData(const char *real_input_path, int *size) {
if (real_input_path == NULL) {
return NULL;
}
if (strstr(real_input_path, ".bin") || strstr(real_input_path, ".net")) {
FILE *file;
file = fopen(real_input_path, "rb");
if (!file) {
printf("Can't find %s\n", real_input_path);
return NULL;
}
int curr_file_posi = ftell(file);
fseek(file, 0, SEEK_END);
*size = ftell(file);
unsigned char *buf = malloc((*size));
(void)memset(buf, 0, (*size));
fseek(file, curr_file_posi, SEEK_SET);
int read_size = (int)(fread(buf, 1, *size, file));
if (read_size != (*size)) {
printf("read file failed, total file size: %d, read_size: %d\n", (*size), read_size);
fclose(file);
free(buf);
return NULL;
}
fclose(file);
return (void *)buf;
} else {
printf("input data file should be .bin , .net");
return NULL;
}
}
void SaveOutputData(char *final_name, unsigned char *output_data, unsigned int out_size) {
FILE *output_file;
output_file = fopen(final_name, "w");
if (output_file == NULL) {
printf("fopen output file: %s failed\n", final_name);
return;
}
unsigned char str[out_size];
for (unsigned int i = 0; i < out_size; ++i) {
str[i] = output_data[i];
fprintf(output_file, "%d\t", str[i]);
}
fclose(output_file);
}
int ReadInputsFile(char *path, void **buffers, const int *inputs_size, int inputs_num) {
char *inputs_path[inputs_num];
char *delim = ",";
char *token;
int i = 0;
while ((token = strtok_r(path, delim, &path))) {
if (i >= inputs_num) {
printf("inputs num is error, need: %d\n", inputs_num);
return -1;
}
inputs_path[i] = token;
printf("input %d: %s\n", i, inputs_path[i]);
i++;
}
for (i = 0; i < inputs_num; ++i) {
int size = 0;
buffers[i] = ReadInputData(inputs_path[i], &size);
if (size != inputs_size[i] || buffers[i] == NULL) {
printf("size mismatch, %s, input: %d, needed: %d\n", inputs_path[i], size, inputs_size[i]);
return -1;
}
}
return 0;
}

View File

@ -1,36 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_
#define MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_
#ifdef __cplusplus
extern "C" {
#endif
void *ReadInputData(const char *real_input_path, int *size);
void SaveOutputData(char *final_name, unsigned char *output_data, unsigned int out_size);
int ReadInputsFile(char *path, void **buffers, const int *inputs_size, int inputs_num);
#ifdef __cplusplus
}
#endif
#endif // MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_

View File

@ -1,105 +0,0 @@
#!/bin/bash
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
set -e
GEN=OFF
TARBALL=""
while getopts 'r:g:' OPT
do
case "${OPT}" in
g)
GEN=$OPTARG
;;
r)
TARBALL=$OPTARG
;;
?)
echo "Usage: add -g on , -r specific release.tar.gz"
esac
done
BASEPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
MINDSPORE_ROOT_DIR=${BASEPATH%%/mindspore/lite/micro/example/mnist_x86}
echo "current dir is: ${BASEPATH}"
INPUT_BIN=${BASEPATH}/mnist_input.bin
MNIST_NAME=mnist
MNIST_FILE=${MNIST_NAME}.ms
BENCHMARK_PATH=${BASEPATH}
get_version() {
local VERSION_HEADER=${MINDSPORE_ROOT_DIR}/mindspore/lite/include/version.h
local VERSION_MAJOR=$(grep "const int ms_version_major =" ${VERSION_HEADER} | tr -dc "[0-9]")
local VERSION_MINOR=$(grep "const int ms_version_minor =" ${VERSION_HEADER} | tr -dc "[0-9]")
local VERSION_REVISION=$(grep "const int ms_version_revision =" ${VERSION_HEADER} | tr -dc "[0-9]")
VERSION_STR=${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_REVISION}
}
download_mnist() {
local MNIST_DOWNLOAD_URL=https://download.mindspore.cn/model_zoo/official/lite/mnist_lite/${MNIST_FILE}
if [ ! -e ${BASEPATH}/build/${MNIST_FILE} ]; then
wget -c -O ${BASEPATH}/build/${MNIST_FILE} --no-check-certificate ${MNIST_DOWNLOAD_URL}
fi
}
gen_mnist() {
local CODEGEN_PATH=${BASEPATH}/build/${MINDSPORE_FILE_NAME}/tools/codegen
${CODEGEN_PATH}/codegen --codePath=${BASEPATH}/build --modelPath=${BASEPATH}/build/${MNIST_FILE}
}
mkdir -p ${BASEPATH}/build
get_version
MINDSPORE_FILE_NAME="mindspore-lite-${VERSION_STR}-linux-x64"
MINDSPORE_FILE="${MINDSPORE_FILE_NAME}.tar.gz"
echo "tar ball is: ${TARBALL}"
if [ -n "$TARBALL" ]; then
echo "cp file"
cp ${TARBALL} ${BASEPATH}/build/${MINDSPORE_FILE}
fi
download_inference() {
local MINDSPORE_LITE_DOWNLOAD_URL="https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION_STR}/MindSpore/lite/release/linux/x86_64/${MINDSPORE_FILE}"
wget -c -O ${BASEPATH}/build/${MINDSPORE_FILE} --no-check-certificate ${MINDSPORE_LITE_DOWNLOAD_URL}
}
if [ ! -e ${BASEPATH}/build/${MINDSPORE_FILE} ]; then
echo "need down inference"
download_inference
fi
tar xzvf ${BASEPATH}/build/${MINDSPORE_FILE} -C ${BASEPATH}/build/ || exit 1
#rm ${BASEPATH}/build/${MINDSPORE_FILE} || exit 1
PKG_PATH=${BASEPATH}/build/${MINDSPORE_FILE_NAME}
if [[ "${GEN}" == "ON" ]] || [[ "${GEN}" == "on" ]]; then
echo "downloading mnist.ms!"
download_mnist
echo "generating mnist"
gen_mnist
BENCHMARK_PATH=${BASEPATH}/build/${MNIST_NAME}
fi
# 1. build benchmark
rm -rf ${BASEPATH}/build/benchmark
mkdir -p ${BASEPATH}/build/benchmark && cd ${BASEPATH}/build/benchmark || exit 1
cmake -DPKG_PATH=${PKG_PATH} ${BENCHMARK_PATH}
make
# 2. run benchmark
echo "net file: ${BENCHMARK_PATH}/src/mnist.bin"
./benchmark ${INPUT_BIN} ${BENCHMARK_PATH}/src/net.bin

View File

@ -1 +0,0 @@
€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€ƒ’’’þ/š&wÿ€€€€€€€€€€€€ž¤Þ*}}}}}a,}rCÀ€€€€€€€€€€€±n}}}}}}}}{ÝÒÒ¸§€€€€€€€€€€€€’[}}}}}F6wq€€€€€€€€€€€€€€€€€€Ðë}}M€«€€€€€€€€€€€€€€€€€€€Ž<E282AC>}Ú€€€€€€€€€€€€€€€€€€€€€€€€€ }>‚€€€€€€€€€€€€€€€€€€€€€€€€‹>}Æ€€€€€€€€€€€€€€€€€€€€€€€€€£qa ì<>€€€€€€€€€€€€€€€€€€€€€€€Ñp}}÷™€€€€€€€€€€€€€€€€€€€€€€€­:}}€€€€€€€€€€€€€€€€€€€€€€€<E282AC>Ý|};€€€€€€€€€€€€€€€€€€€€€€€€€y}yÀ€€€€€€€€€€€€€€€€€€€€€®7}}O€€€€€€€€€€€€€€€€€€€§e}}}z6€€€€€€€€€€€€€€€€€€˜ò]}}}}I΀€€€€€€€€€€€€€€€€—ÂU}}}}FÑ€€€€€€€€€€€€€€€€+[}}}}CЉ€€€€€€€€€€€€€€€€·,b}}}}t‹€€€€€€€€€€€€€€€€€€}}}T<04>€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€

View File

@ -1,101 +0,0 @@
cmake_minimum_required(VERSION 3.14)
project(net)
if(NOT DEFINED PKG_PATH)
message(FATAL_ERROR "PKG_PATH not set")
endif()
get_filename_component(PKG_PATH ${PKG_PATH} ABSOLUTE BASE_DIR ${CMAKE_CURRENT_BINARY_DIR})
set(OP_LIB ${PKG_PATH}/runtime/lib/libmindspore-lite.a)
set(WRAPPER_LIB ${PKG_PATH}/tools/codegen/lib/libwrapper.a)
set(OP_HEADER_PATH ${PKG_PATH}/tools/codegen/include)
set(HEADER_PATH ${PKG_PATH}/runtime)
message(STATUS "operator lib path: ${OP_LIB}")
message(STATUS "operator header path: ${OP_HEADER_PATH}")
add_compile_definitions(NOT_USE_STL)
include_directories(${OP_HEADER_PATH})
include_directories(${HEADER_PATH})
include_directories(${HEADER_PATH}/include)
if(NOT PLATFORM_ARM32 AND NOT PLATFORM_ARM64)
include_directories(${PKG_PATH}/tools/codegen/third_party/include)
include_directories(${PKG_PATH}/tools/codegen/third_party/include/CMSIS/Core/Include)
include_directories(${PKG_PATH}/tools/codegen/third_party/include/CMSIS/DSP/Include)
include_directories(${PKG_PATH}/tools/codegen/third_party/include/CMSIS/NN/Include)
endif()
include(net.cmake)
option(PLATFORM_ARM64 "build android arm64" OFF)
option(PLATFORM_ARM32 "build android arm32" OFF)
if(PLATFORM_ARM64 OR PLATFORM_ARM32)
add_compile_definitions(ENABLE_NEON)
add_compile_definitions(ENABLE_ARM)
endif()
if(PLATFORM_ARM64)
add_compile_definitions(ENABLE_ARM64)
endif()
if(PLATFORM_ARM32)
add_compile_definitions(ENABLE_ARM32)
add_definitions(-mfloat-abi=softfp -mfpu=neon)
endif()
set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")
if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
message(STATUS "build net library with debug info")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDebug -g")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDebug -g")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=default")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility=default")
else()
message(STATUS "build net library release version")
set(CMAKE_C_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-fPIC -fPIE -D_FORTIFY_SOURCE=2 -O3 -Wall -Werror -fstack-protector-strong -Wno-attributes \
-Wno-deprecated-declarations -Wno-missing-braces -Wno-overloaded-virtual ${CMAKE_CXX_FLAGS}")
string(REPLACE "-g" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
string(REPLACE "-g" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
endif()
function(create_library)
add_custom_command(TARGET net
POST_BUILD
COMMAND rm -rf tmp
COMMAND mkdir tmp
COMMAND cd tmp && ar -x ${OP_LIB}
COMMAND cd tmp && ar -x ${WRAPPER_LIB}
COMMAND echo "raw static library ${library_name} size:"
COMMAND ls -lh ${library_name}
COMMAND mv ${library_name} ./tmp && cd tmp && ar -x ${library_name}
COMMENT "unzip raw static library ${library_name}"
)
if(NOT PLATFORM_ARM32 AND NOT PLATFORM_ARM64)
set(CMSIS_LIB ${PKG_PATH}/tools/codegen/third_party/lib/libcmsis_nn.a)
add_custom_command(TARGET net POST_BUILD COMMAND cd tmp && ar -x ${CMSIS_LIB})
endif()
foreach(object_file ${OP_SRC})
add_custom_command(TARGET net POST_BUILD COMMAND mv ./tmp/${object_file} .)
endforeach()
add_custom_command(TARGET net
POST_BUILD
COMMAND ar cr ${library_name} *.o
COMMAND ranlib ${library_name}
COMMAND echo "new static library ${library_name} size:"
COMMAND ls -lh ${library_name}
COMMAND rm -rf tmp && rm -rf *.o
COMMENT "generate specified static library ${library_name}"
)
endfunction(create_library)
string(CONCAT library_name "lib" net ".a")
create_library()

View File

@ -1,65 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_LIBRARY_SOURCE_MODEL_H_
#define MINDSPORE_LITE_LIBRARY_SOURCE_MODEL_H_
#include "model.h"
#include "session.h"
#include <new>
#include <string.h>
namespace mindspore::lite {
class MModel : public Model {
public:
void Free() override {
if (this->buf != nullptr) {
free(this->buf);
this->buf = nullptr;
this->buf_size_ = 0;
}
}
void Destroy() override { Free(); }
~MModel() override { Destroy(); }
void set_buf_size(size_t size) { buf_size_ = size; }
size_t buf_size() const { return buf_size_; }
private:
size_t buf_size_{0};
};
Model *Model::Import(const char *model_buf, size_t size) {
MS_NULLPTR_IF_NULL(model_buf);
if (size == 0) {
return nullptr;
}
MModel *model = new (std::nothrow) MModel();
MS_NULLPTR_IF_NULL(model);
model->buf = reinterpret_cast<char *>(malloc(size));
if (model->buf == nullptr) {
delete model;
return nullptr;
}
memcpy(model->buf, model_buf, size);
model->set_buf_size(size);
return model;
}
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_LIBRARY_SOURCE_MODEL_H_

View File

@ -1,168 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "weight.h"
#include "net.h"
static const unsigned char *g_Input0 = 0;
int SetInputs(const void **inputs, int num) {
if (inputs == NULL) {
return RET_ERROR;
}
if (num !=1) {
return RET_ERROR;
}
g_Input0 = inputs[0];
return RET_OK;
}
int CopyOutputsData(void **outputs, int num) {
if (outputs == NULL) {
return RET_ERROR;
}
if (num != 1) {
return RET_ERROR;
}
memcpy(outputs[0], g_Buffer + 32, 40);
return RET_OK;
}
int GetBufferSize() {
return 39248;
}
int SetBuffer( void *buffer) {
if (buffer == NULL) {
return RET_ERROR;
}
g_Buffer = buffer;
return RET_OK;
}
void FreeResource() {
g_Buffer= NULL;
g_Input0 = NULL;
void *allocated[] = {g_Weight14, g_Weight15, g_Weight16, g_Weight17, g_Weight18, g_Weight19, };
for (int i = 0; i < 6; ++i) {
free(allocated[i]);
allocated[i] = NULL;
}
}
void Inference() {
{
memset((int16_t *)(g_Buffer + 10144), 0, 2048);
memset((int16_t *)(g_Buffer + 12192), 0, 256);
memset((int *)(g_Buffer + 12448), 0, 6144);
memset((int8_t *)(g_Buffer + 18592), 0, 8112);
memset((int16_t *)(g_Buffer + 26704), 0, 12544);
QuantArg conv_param__quant_arg_in[1] = {{0.003921568859368562698, -128}};
QuantArg conv_param__quant_arg_w[12] = {{0.005689438898116350174, 0}, {0.006241692230105400085, 0}, {0.007301395758986473083, 0}, {0.005148916970938444138, 0}, {0.005132303573191165924, 0}, {0.004976313561201095581, 0}, {0.00564815988764166832, 0}, {0.002269793068990111351, 0}, {0.0030086529441177845, 0}, {0.005234404932707548141, 0}, {0.007580270525068044662, 0}, {0.004589735530316829681, 0}};
QuantArg conv_param__quant_arg_out[1] = {{0.01811622083187103271, 17}};
double conv_param__real_multiplier[12] = {0.001231577267748737653, 0.001351122051282624588, 0.00158051323770531417, 0.001114571969708069233, 0.001110975704014940469, 0.001077209041359399825, 0.001222641776980984765, 0.0004913359221160916793, 0.0006512749113606706042, 0.001133077320583530554, 0.001640880438584302065, 0.0009935275121536731122};
int conv_param__left_shift[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int conv_param__right_shift[12] = {-9, -9, -9, -9, -9, -9, -9, -10, -10, -9, -9, -9};
int conv_param__quant_multiplier[12] = {1354133526, 1485574406, 1737792683, 1225484841, 1221530705, 1184403867, 1344308850, 1080459119, 1432168676, 1245831689, 1804167122, 1092395052};
int conv_param__out_act_min[1] = {-128};
int conv_param__out_act_max[1] = {127};
ConvQuantArg conv_param__conv_quant_arg = {(RoundingMode)(2), 2, conv_param__quant_arg_in, conv_param__quant_arg_w, conv_param__quant_arg_out, conv_param__real_multiplier, conv_param__left_shift, conv_param__right_shift, conv_param__quant_multiplier, conv_param__out_act_min, conv_param__out_act_max, 1, 12, 1, 2};
int thread_num = MSMIN(g_thread_num, 26);
ConvParameter conv_param_ = {{ "", 35, g_thread_num, 0}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 28, 28, 1, 1, 26, 26, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(0), 0, 0, 0};
PackInputToC8Int8((int8_t *)(g_Input0), (int16_t *)(g_Buffer + 26704), &conv_param_);
Conv3x3Int8((int16_t *)(g_Buffer + 26704), g_Weight10, g_Weight11, (int8_t *)(g_Buffer + 0), (int16_t *)(g_Buffer + 10144), (int16_t *)(g_Buffer + 12192), (int *)(g_Buffer + 12448), (int8_t *)(g_Buffer + 18592), 0, &conv_param_);
PackNC4HW4ToNHWCInt8((int8_t *)(g_Buffer + 18592), (int8_t *)(g_Buffer + 0), 1, 676, 12);
}
{
static QuantArg pooling_parameter_quant_in = {0.01811622083187103271, 17};
static QuantArg pooling_parameter_quant_out = {0.01811622083187103271, 17};
static QuantArg *pooling_parameter_quant[2] = { &pooling_parameter_quant_in, &pooling_parameter_quant_out};
const PoolingParameter pooling_parameter = {{ "", 92, g_thread_num, 0}, (PoolMode)(1), (RoundMode)(2), (PadMode)(2), (ActType)(0), 0, false, 2, 2, 2, 2, 26, 26, 1, 12, 13, 13, 1, 12, 0, 0, 0, 0, 0, pooling_parameter_quant, false};
MaxPoolingInt8((int8_t *)(g_Buffer + 0), (int8_t *)(g_Buffer + 8112), (PoolingParameter *)&pooling_parameter, 0);
}
{
memset((int16_t *)(g_Buffer + 10144), 0, 4096);
memset((int16_t *)(g_Buffer + 14240), 0, 256);
memset((int *)(g_Buffer + 14496), 0, 6144);
memset((int8_t *)(g_Buffer + 20640), 0, 1452);
memset((int16_t *)(g_Buffer + 22092), 0, 5408);
QuantArg conv_param__quant_arg_in[1] = {{0.01811622083187103271, 17}};
QuantArg conv_param__quant_arg_w[12] = {{0.006381968967616558075, 0}, {0.005092236679047346115, 0}, {0.004954888485372066498, 0}, {0.007594361435621976852, 0}, {0.006317862775176763535, 0}, {0.004739056341350078583, 0}, {0.004733041394501924515, 0}, {0.005125139374285936356, 0}, {0.005773660261183977127, 0}, {0.007067613303661346436, 0}, {0.00728381425142288208, 0}, {0.004714466165751218796, 0}};
QuantArg conv_param__quant_arg_out[1] = {{0.118615470826625824, 31}};
double conv_param__real_multiplier[12] = {0.0009747224012760375951, 0.0007777407468524931162, 0.0007567634496453238277, 0.001159891919861241348, 0.0009649314419479496259, 0.0007237992569070154231, 0.0007228806183814449719, 0.0007827659621256170689, 0.0008818150205007141765, 0.001079441365823280083, 0.001112461807995879974, 0.0007200436103814696152};
int conv_param__left_shift[12] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int conv_param__right_shift[12] = {-10, -10, -10, -9, -10, -10, -10, -10, -10, -9, -9, -10};
int conv_param__quant_multiplier[12] = {2143437228, 1710269989, 1664140425, 1275314653, 2121906681, 1591651398, 1589631291, 1721320554, 1939131737, 1186858333, 1223164693, 1583392644};
int conv_param__out_act_min[1] = {-128};
int conv_param__out_act_max[1] = {127};
ConvQuantArg conv_param__conv_quant_arg = {(RoundingMode)(1), 2, conv_param__quant_arg_in, conv_param__quant_arg_w, conv_param__quant_arg_out, conv_param__real_multiplier, conv_param__left_shift, conv_param__right_shift, conv_param__quant_multiplier, conv_param__out_act_min, conv_param__out_act_max, 1, 12, 1, 2};
int thread_num = MSMIN(g_thread_num, 11);
ConvParameter conv_param_ = {{ "", 35, g_thread_num, 0}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 13, 13, 12, 1, 11, 11, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(0), 0, 0, 0};
PackInputToC8Int8((int8_t *)(g_Buffer + 8112), (int16_t *)(g_Buffer + 22092), &conv_param_);
Conv3x3Int8((int16_t *)(g_Buffer + 22092), g_Weight12, g_Weight13, (int8_t *)(g_Buffer + 0), (int16_t *)(g_Buffer + 10144), (int16_t *)(g_Buffer + 14240), (int *)(g_Buffer + 14496), (int8_t *)(g_Buffer + 20640), 0, &conv_param_);
PackNC4HW4ToNHWCInt8((int8_t *)(g_Buffer + 20640), (int8_t *)(g_Buffer + 0), 1, 121, 12);
}
{
static QuantArg pooling_parameter_quant_in = {0.118615470826625824, 31};
static QuantArg pooling_parameter_quant_out = {0.118615470826625824, 31};
static QuantArg *pooling_parameter_quant[2] = { &pooling_parameter_quant_in, &pooling_parameter_quant_out};
const PoolingParameter pooling_parameter = {{ "", 92, g_thread_num, 0}, (PoolMode)(1), (RoundMode)(2), (PadMode)(2), (ActType)(0), 0, false, 2, 2, 2, 2, 11, 11, 1, 12, 5, 5, 1, 12, 0, 0, 0, 0, 0, pooling_parameter_quant, false};
MaxPoolingInt8((int8_t *)(g_Buffer + 0), (int8_t *)(g_Buffer + 1456), (PoolingParameter *)&pooling_parameter, 0);
}
{
const ReshapeQuantArg reshape_quant_arg = {{0.118615470826625824, 31}, {0.118615470826625824, 31}, -128, 127};
Int8Reshape((int8_t *)(g_Buffer + 1456), (int8_t *)(g_Buffer + 0), 300, reshape_quant_arg);
}
{
int32_t tmp_weight_zp = 0;
RowMajor2Row16x4MajorInt8((int8_t *)(g_Buffer + 0)+0, (int8_t *)(g_Buffer + 10144), 1, 300);
CalcInputSums((int8_t *)(g_Buffer + 0)+0, 1, 300, tmp_weight_zp, (int *)(g_Buffer + 11360), RowMajor);
float filter_scale[1] = {0.007667620200663805008};
int filter_zp[1] = {0};
int left_shift[1] = {0};
int right_shift[1] = {-8};
int multiplier[1] = {1379728867};
const MatmulQuantParameter matmul_quant_parameter = {{0.118615470826625824, 31}, {0, 0}, {0.3623915016651153564, 11}, -128, 127, filter_scale, filter_zp, left_shift, right_shift, multiplier};
int32_t *cur_left = matmul_quant_parameter.left_shift_;
int32_t *cur_right = matmul_quant_parameter.right_shift_;
int32_t *cur_mul = matmul_quant_parameter.quant_multiplier_ ;
int32_t *cur_zp = matmul_quant_parameter.filter_zp_ ;
MatmulInt8Opt((int8_t *)(g_Buffer + 10144), g_Weight15+0 + 0, (int8_t *)(g_Buffer + 304)+0+0, 1, 20, 304, (int *)(g_Buffer + 11360), g_Weight16+0, -128, 127, 11, cur_mul, cur_left, cur_right, 20, false, cur_zp);
}
{
int32_t tmp_weight_zp = 0;
RowMajor2Row16x4MajorInt8((int8_t *)(g_Buffer + 304)+0, (int8_t *)(g_Buffer + 10144), 1, 20);
CalcInputSums((int8_t *)(g_Buffer + 304)+0, 1, 20, tmp_weight_zp, (int *)(g_Buffer + 10272), RowMajor);
float filter_scale[1] = {0.006908571347594261169};
int filter_zp[1] = {0};
int left_shift[1] = {0};
int right_shift[1] = {-8};
int multiplier[1] = {1282256865};
const MatmulQuantParameter matmul_quant_parameter = {{0.3623915016651153564, 11}, {0, 0}, {1.073398709297180176, -20}, -128, 127, filter_scale, filter_zp, left_shift, right_shift, multiplier};
int32_t *cur_left = matmul_quant_parameter.left_shift_;
int32_t *cur_right = matmul_quant_parameter.right_shift_;
int32_t *cur_mul = matmul_quant_parameter.quant_multiplier_ ;
int32_t *cur_zp = matmul_quant_parameter.filter_zp_ ;
MatmulInt8Opt((int8_t *)(g_Buffer + 10144), g_Weight18+0 + 0, (int8_t *)(g_Buffer + 0)+0+0, 1, 10, 32, (int *)(g_Buffer + 10272), g_Weight19+0, -128, 127, -20, cur_mul, cur_left, cur_right, 10, false, cur_zp);
}
{
SoftmaxQuantArg quant_params = {{1.073398709297180176, 20}, {0.00390625, -128}, -128, 127, 1152553088, 27, 27};
const SoftmaxParameter softmax_parameter = {{ "", 138, g_thread_num, 0}, 1, {1, 10}, 10, 2};
memset((int *)(g_Buffer + 10144), 0, 40);
memset((int *)(g_Buffer + 10184), 0, 40);
SoftmaxInt8((int8_t *)(g_Buffer + 0), (int8_t *)(g_Buffer + 16), 1, (int *)(g_Buffer + 10144), (int *)(g_Buffer + 10184), &quant_params, (SoftmaxParameter *)&softmax_parameter);
}
{
DoDequantizeInt8ToFp32((int8_t *)(g_Buffer + 16), (float *)(g_Buffer + 32), 0.00390625, -128, 10);
}
}

View File

@ -1,25 +0,0 @@
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/)
set(OP_SRC
common_func.c.o
common_func_int8.c.o
conv3x3_int8.c.o
conv_int8.c.o
fixed_point.c.o
matmul_int8.c.o
matmul_int8_wrapper.c.o
pack_int8.c.o
pooling_int8.c.o
quant_dtype_cast_int8.c.o
relux_int8.c.o
reshape_int8.c.o
softmax_int8.c.o
weight.c.o
net.c.o
session.cc.o
tensor.cc.o
)
file(GLOB NET_SRC
${CMAKE_CURRENT_SOURCE_DIR}/*.cc
${CMAKE_CURRENT_SOURCE_DIR}/*.c
)
add_library(net STATIC ${NET_SRC})

View File

@ -1,56 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* set input tensors
* @param inputs, the input data ptr's array of the model, the tensors' count of input may be greater than one.
* @param num, the input data's number of the model.
**/
int SetInputs(const void **inputs, int num);
int CopyOutputsData(void **outputs, int num);
/**
* @param weight_buffer, the address of the weight binary file
* @param weight_size, the size of the model file in bytes
**/
int Init(void *weight_buffer, int weight_size);
/**
* get the memory space size of the inference.
**/
int GetBufferSize();
/**
* set the memory space for the inference
**/
int SetBuffer(void *buffer);
/**
* free the memory of packed weights, and set the membuf buffer and input address to NULL
**/
void FreeResource();
/**
* net inference function
**/
void Inference();
#ifdef __cplusplus
}
#endif

View File

@ -1,149 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "session.h"
#include "mmodel.h"
#include "net.h"
#include <new>
namespace mindspore {
namespace lite {
int LiteSession::CompileGraph(lite::Model *model) {
inputs_.resize(1);
Vector<int32_t> in_shape_0;
in_shape_0.resize(4);
in_shape_0[0] = 1;
in_shape_0[1] = 28;
in_shape_0[2] = 28;
in_shape_0[3] = 1;
inputs_[0] = new (std::nothrow) MTensor(String("graph_input-0"), kNumberTypeInt8, in_shape_0);
MS_ERROR_IF_NULL(inputs_[0]);
outputs_.resize(1);
Vector<int32_t> out_shape_0;
out_shape_0.resize(2);
out_shape_0[0] = 1;
out_shape_0[1] = 10;
outputs_[0] = new (std::nothrow) MTensor(String("int8toft32_Softmax-7_post0/output-0"), kNumberTypeFloat32, out_shape_0);
MS_ERROR_IF_NULL(outputs_[0]);
int ret = Init(model->buf, static_cast<MModel *>(model)->buf_size());
return ret;
}
LiteSession::~LiteSession() {
FreeResource();
if (runtime_buffer_ != nullptr) {
free(runtime_buffer_);
runtime_buffer_ = nullptr;
}
for (auto &input : inputs_) {
if (input == nullptr) {
continue;
}
delete input;
input = nullptr;
}
for (auto &output : outputs_) {
if (output == nullptr) {
continue;
}
delete output;
output = nullptr;
}
}
int LiteSession::RunGraph(const KernelCallBack &before, const KernelCallBack &after) {
const void *inputs_data[inputs_.size()];
for (size_t i = 0; i < inputs_.size(); ++i) {
inputs_data[i] = inputs_[i]->MutableData();
}
SetInputs(inputs_data, inputs_.size());
Inference();
void *outputs_data[outputs_.size()];
for (size_t i = 0; i < outputs_.size(); ++i) {
outputs_data[i] = outputs_[i]->MutableData();
}
CopyOutputsData(outputs_data, outputs_.size());
return RET_OK;
}
int LiteSession::InitRuntimeBuffer() {
int buffer_size = GetBufferSize();
runtime_buffer_ = malloc(buffer_size);
if (runtime_buffer_ == nullptr) {
return RET_ERROR;
}
int ret = SetBuffer(runtime_buffer_);
if (ret != RET_OK) {
return RET_ERROR;
}
return RET_OK;
}
Vector<tensor::MSTensor *> LiteSession::GetInputs() const {
Vector<tensor::MSTensor *> inputs;
for (const auto &input : inputs_) {
inputs.push_back(input);
}
return inputs;
}
Vector<tensor::MSTensor *> LiteSession::GetOutputsByNodeName(const String &node_name) const {
Vector<tensor::MSTensor *> outputs;
return outputs;
}
Vector<String> LiteSession::GetOutputTensorNames() const {
Vector<String> output_names;
for (const auto &output : outputs_) {
output_names.push_back(output->tensor_name());
}
return output_names;
}
mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const String &tensor_name) const {
for (const auto &output : outputs_) {
if (output->tensor_name() == tensor_name) {
return output;
}
}
return nullptr;
}
} // namespace lite
session::LiteSession *session::LiteSession::CreateSession(const lite::Context *context) {
auto *session = new (std::nothrow) lite::LiteSession();
MS_NULLPTR_IF_NULL(session);
int ret = session->InitRuntimeBuffer();
MS_NULLPTR_IF_ERROR(ret);
return session;
}
session::LiteSession *session::LiteSession::CreateSession(const char *model_buf, size_t size,
const lite::Context *context) {
session::LiteSession *session = CreateSession(context);
MS_NULLPTR_IF_NULL(session);
lite::Model *model = lite::Model::Import(model_buf, size);
int ret = session->CompileGraph(model);
MS_NULLPTR_IF_ERROR(ret);
delete model;
return session;
}
} // namespace mindspore

View File

@ -1,86 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_
#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_
#include "errorcode.h"
#include "lite_session.h"
#include "tensor.h"
namespace mindspore {
namespace lite {
#define MS_ERROR_IF_NULL(ptr) \
do { \
if ((ptr) == nullptr) { \
return mindspore::lite::RET_ERROR; \
} \
} while (0)
#define MS_NULLPTR_IF_NULL(ptr) \
do { \
if ((ptr) == nullptr) { \
return nullptr; \
} \
} while (0)
#define MS_NULLPTR_IF_ERROR(ptr) \
do { \
if ((ptr) != mindspore::lite::RET_OK) { \
return nullptr; \
} \
} while (0)
class LiteSession : public session::LiteSession {
public:
LiteSession() = default;
~LiteSession() override;
void BindThread(bool if_bind) override {}
int CompileGraph(lite::Model *model) override;
Vector<tensor::MSTensor *> GetInputs() const override;
mindspore::tensor::MSTensor *GetInputsByTensorName(const String &tensor_name) const override { return nullptr; }
int RunGraph(const KernelCallBack &before = nullptr, const KernelCallBack &after = nullptr) override;
Vector<tensor::MSTensor *> GetOutputsByNodeName(const String &node_name) const override;
Vector<String> GetOutputTensorNames() const override;
mindspore::tensor::MSTensor *GetOutputByTensorName(const String &tensor_name) const override;
int Resize(const Vector<tensor::MSTensor *> &inputs, const Vector<Vector<int>> &dims) override { return RET_ERROR; }
int InitRuntimeBuffer();
private:
Vector<MTensor *> inputs_;
Vector<MTensor *> outputs_;
void *runtime_buffer_;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_

View File

@ -1,84 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tensor.h"
namespace mindspore {
namespace lite {
size_t DataTypeSize(const TypeId type) {
switch (type) {
case kNumberTypeFloat64:
return sizeof(double);
case kNumberTypeFloat:
case kNumberTypeFloat32:
return sizeof(float);
case kNumberTypeInt8:
return sizeof(int8_t);
case kNumberTypeUInt8:
return sizeof(uint8_t);
case kNumberTypeFloat16:
case kNumberTypeInt16:
return sizeof(int16_t);
case kNumberTypeInt32:
return sizeof(int32_t);
case kNumberTypeInt64:
return sizeof(int64_t);
case kNumberTypeUInt16:
return sizeof(uint16_t);
case kNumberTypeUInt32:
return sizeof(uint32_t);
case kNumberTypeUInt64:
return sizeof(uint64_t);
case kNumberTypeBool:
return sizeof(bool);
case kObjectTypeString:
return sizeof(char);
case kObjectTypeTensorType:
default:
return 0;
}
}
MTensor::~MTensor() {
if (data_ != nullptr) {
free(data_);
data_ = nullptr;
}
}
int64_t MTensor::ElementsNum() const {
int64_t elements = 1;
for (int i : shape_) {
elements *= i;
}
return elements;
}
size_t MTensor::Size() const {
size_t element_size = DataTypeSize(data_type_);
return element_size * ElementsNum();
}
void *MTensor::MutableData() {
if (data_ == nullptr) {
data_ = malloc(this->Size());
}
return data_;
}
} // namespace lite
} // namespace mindspore

View File

@ -1,75 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_
#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_
#include "ms_tensor.h"
#include "api/format.h"
namespace mindspore {
namespace lite {
struct LiteQuantParam {
double scale;
int32_t zeroPoint;
float var_corr{1};
float mean_corr{0};
bool inited;
Vector<float> clusters{};
int bitNum;
int roundType;
int multiplier;
int dstDtype;
};
class MTensor : public mindspore::tensor::MSTensor {
public:
MTensor() = default;
MTensor(String name, TypeId type, Vector<int32_t> shape) : tensor_name_(name), data_type_(type), shape_(shape) {}
~MTensor() override;
void set_allocator(AllocatorPtr allocator) override {}
AllocatorPtr allocator() const override { return nullptr; }
TypeId data_type() const override { return data_type_; }
void set_data_type(TypeId data_type) override { data_type_ = data_type; }
void set_format(mindspore::Format format) override {}
mindspore::Format format() const override { return mindspore::NHWC; }
Vector<int> shape() const override { return shape_; }
void set_shape(const Vector<int> &shape) override { shape_ = shape; }
int64_t ElementsNum() const override;
size_t Size() const override;
String tensor_name() const override { return tensor_name_; }
void set_tensor_name(const String &name) override { tensor_name_ = name; }
void *MutableData() override;
void *data() override { return data_; }
void set_data(void *data) override { data_ = data; }
Vector<LiteQuantParam> quant_params() const override { return this->quant_params_; }
void set_quant_params(const Vector<LiteQuantParam> quant_params) override { this->quant_params_ = quant_params; }
bool IsConst() const override {return this->data_ != nullptr;}
private:
String tensor_name_;
TypeId data_type_;
Vector<int> shape_;
void *data_ = nullptr;
Vector<LiteQuantParam> quant_params_;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_

View File

@ -1,106 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "weight.h"
int g_thread_num = 1;
unsigned char * g_Buffer = 0;
int16_t g_Weight10[1536];
int32_t g_Weight11[12];
int16_t g_Weight12[3072];
int32_t g_Weight13[12];
int32_t *g_Weight14 = NULL;
int8_t *g_Weight15 = NULL;
int32_t *g_Weight16 = NULL;
int32_t *g_Weight17 = NULL;
int8_t *g_Weight18 = NULL;
int32_t *g_Weight19 = NULL;
int8_t g_Weight6[6000];
int32_t g_Weight7[20];
int8_t g_Weight8[200];
int32_t g_Weight9[10];
int Init(void *weight_buffer, int weight_size) {
if (weight_buffer == NULL) {
return RET_ERROR;
}
struct ModelParameter {
void *addr;
size_t size;
size_t offset;
};
struct ModelParameter model_params[] = {
{g_Weight10, 3072, 0},
{g_Weight11, 48, 3072},
{g_Weight12, 6144, 3120},
{g_Weight13, 48, 9264},
{g_Weight6, 6000, 9312},
{g_Weight7, 80, 15312},
{g_Weight8, 200, 15392},
{g_Weight9, 40, 15592},
};
for(int i = 0; i < 8; ++i) {
if (model_params[i].offset + model_params[i].size > weight_size) {
return RET_ERROR;
}
memcpy(model_params[i].addr, (weight_buffer + model_params[i].offset), model_params[i].size);
}
{
g_Weight14 = malloc(80);
if (g_Weight14 == NULL) {
return RET_ERROR;
}
memset(g_Weight14, 0, 80);
memcpy(g_Weight14, g_Weight7, 80);
g_Weight16 = malloc(80);
if (g_Weight16 == NULL) {
return RET_ERROR;
}
memset(g_Weight16, 0, 80);
g_Weight15 = malloc(6080);
if (g_Weight15 == NULL) {
return RET_ERROR;
}
memset(g_Weight15, 0, 6080);
int init_filter_zp[1] = {0};
InitInt8MatrixB(g_Weight6, g_Weight16, g_Weight15, 1, 300, 20, 20, 304, 31, init_filter_zp, g_Weight14, true, false);
}
{
g_Weight17 = malloc(48);
if (g_Weight17 == NULL) {
return RET_ERROR;
}
memset(g_Weight17, 0, 48);
memcpy(g_Weight17, g_Weight9, 40);
g_Weight19 = malloc(48);
if (g_Weight19 == NULL) {
return RET_ERROR;
}
memset(g_Weight19, 0, 48);
g_Weight18 = malloc(384);
if (g_Weight18 == NULL) {
return RET_ERROR;
}
memset(g_Weight18, 0, 384);
int init_filter_zp[1] = {0};
InitInt8MatrixB(g_Weight8, g_Weight19, g_Weight18, 1, 20, 10, 12, 32, 11, init_filter_zp, g_Weight17, true, false);
}
return RET_OK;
}

View File

@ -1,53 +0,0 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "nnacl/common_func.h"
#include "nnacl/errorcode.h"
#include "nnacl/int8/common_func_int8.h"
#include "nnacl/int8/conv3x3_int8.h"
#include "nnacl/int8/conv_int8.h"
#include "nnacl/int8/fixed_point.h"
#include "nnacl/int8/matmul_int8.h"
#include "nnacl/int8/pooling_int8.h"
#include "nnacl/int8/quant_dtype_cast_int8.h"
#include "nnacl/int8/relux_int8.h"
#include "nnacl/int8/reshape_int8.h"
#include "nnacl/int8/softmax_int8.h"
#include "wrapper/int8/matmul_int8_wrapper.h"
#include <stdlib.h>
#include <string.h>
extern unsigned char *g_Buffer;
enum STATUS {
RET_OK = 0,
RET_ERROR = 1,
};
extern int g_thread_num;
extern int16_t g_Weight10[];
extern int32_t g_Weight11[];
extern int16_t g_Weight12[];
extern int32_t g_Weight13[];
extern int32_t *g_Weight14;
extern int8_t *g_Weight15;
extern int32_t *g_Weight16;
extern int32_t *g_Weight17;
extern int8_t *g_Weight18;
extern int32_t *g_Weight19;
extern int8_t g_Weight6[];
extern int32_t g_Weight7[];
extern int8_t g_Weight8[];
extern int32_t g_Weight9[];

View File

@ -1,149 +0,0 @@
# Android编译部署
`Linux` `Android` `IOT` `C/C++` `全流程` `模型编译` `模型代码生成` `模型部署` `推理应用` `初级` `中级` `高级`
<!-- TOC -->
- Android编译部署
- [编译依赖](#编译依赖)
- [工程构建](#工程构建)
- [工程部署](#工程部署)
- [更多详情](#更多详情)
- [Linux_x86_64编译部署](#Linux_x86_64编译部署)
- [STM32F746编译部署](#STM32F746编译部署)
<!-- /TOC -->
## Android编译部署
本教程以MobileNetv2在安卓手机编译部署为例使用用户快速了解codegen在安卓平台生成代码、工程构建以及部署的一系列流程。关于converter、codegen的获取以及详细参数介绍可参考mindspore的[编译构建介绍](https://www.mindspore.cn/lite/docs/zh-CN/master/use/build.html)。
### 编译依赖
安卓平台的编译部署需要提前配置ANDROID_NDK到环境变量。
- NDK 21.3
- [GCC](https://gcc.gnu.org/releases.html) >= 7.3.0
- [CMake](https://cmake.org/download/) >= 3.18.3
### 工程构建
#### 快速使用
进入`mindspore/mindspore/lite/micro/examples/mobilenetv2`目录执行脚本`mobilenetv2.sh`自动生成模型推理代码并编译工程目录
```
bash mobilenetv2.sh
```
codegen编译[MobileNetv2模型](https://download.mindspore.cn/model_zoo/official/lite/mobilenetv2_imagenet/r1.2/mobilenetv2.ms),生成对应的模型推理代码。具体命令如下:
```bash
./codegen --codePath=. --modelPath=mobilenetv2.ms --target=ARM64
```
关于codegen的更多使用命令说明可参见[codegen工具的详细介绍](https://www.mindspore.cn/lite/docs/zh-CN/master/use/downloads.html)
#### 生成代码工程说明
```bash
├── mobilenetv2
└── operator_library
```
##### 算子静态库目录说明
在编译此工程之前需要预先获取安卓平台对应的[Release包](https://www.mindspore.cn/lite/docs/zh-CN/master/use/downloads.html)。
安卓平台对应的Release包的目录如下:
```text
mindspore-lite-{version}-inference-android-{arch}
├── inference
│ ├── include # 推理框架头文件
│ ├── lib # 推理框架库
│ │ ├── libmindspore-lite.a # MindSpore Lite推理框架的静态库
│ │ └── libmindspore-lite.so # MindSpore Lite推理框架的动态库
│ ├── minddata # 图像处理库
│ │ ├── include
│ │ └── lib
│ │ └── libminddata-lite.so # 图像处理动态库文件
│ └── third_party # NPU库
│ └── hiai_ddk
└── tools
├── benchmark # 基准测试工具
│ └── benchmark
└── codegen # 代码生成工具
├── include # 算子头文件
└── lib # 算子静态库
```
生成代码工程目录如下:
```bash
├── mobilenetv2 # 生成代码的根目录
├── benchmark # 生成代码的benchmark目录
└── src # 模型推理代码目录
```
#### 代码工程编译
组织生成的模型推理代码以及安卓平台算子静态库编译模型推理静态库
进入代码工程目录新建并进入build目录
```bash
mkdir mobilenetv2/build && cd mobilenetv2/build
```
开始编译
```bash
cmake -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" \
-DANDROID_ABI="arm64-v8a" \
-DANDROID_TOOLCHAIN_NAME="aarch64-linux-android-clang" \
-DANDROID_NATIVE_API_LEVEL="19" \
-DPLATFORM_ARM64=ON \
-DPKG_PATH={path to}/mindspore-lite-{version}-inference-android-{arch} ..
make
```
`{path to}`和`{version}`需要用户根据实际情况填写。若用户需要编译安卓arm32环境则使用:
```bash
cmake -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" \
-DANDROID_ABI="armeabi-v7a" \
-DANDROID_TOOLCHAIN_NAME="clang" \
-DANDROID_NATIVE_API_LEVEL="19" \
-DPLATFORM_ARM32=ON \
-DPKG_PATH={path to}/mindspore-lite-{version}-inference-android-{arch} ..
make
```
此时在`mobilenetv2/build/src/`目录下生成了`libnet.a`,推理执行库,在`mobilenetv2/build`目录下生成了`benchmark`可执行文件。而对应的模型参数文件net.bin在生成的代码src目录下。
### 工程部署
adb将生成的可执行二进制文件benchmark、mobilenetv2_input.bin以及模型参数文件net.bin拷贝到目标安卓服务器执行以下命令即可
```bash
./benchmark mobilenetv2_input.bin net.bin 100
```
#### 执行结果
```bash
=========run benchmark========
input 0: mobilenetv2_input.bin
name: Softmax-65, ,DataType: 43, Size: 4004, Shape:1 1001, Data:
0.000010,0.000010,0.000014,0.000091,0.000080,0.000717,0.000112,0.000738,0.000008,0.000003
=========run success========
```
## 更多详情
### [Linux_x86_64编译部署](https://www.mindspore.cn/lite/docs/zh-CN/master/use/micro.html)
### [STM32F746编译部署](https://gitee.com/mindspore/mindspore/tree/master/mindspore/lite/micro/example/mnist_stm32f746)

View File

@ -1,146 +0,0 @@
#!/bin/bash
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
set -e
usage()
{
echo "Usage:"
echo "bash build.sh [-I arm64|arm32]"
echo "Options:"
echo " -I download and build for arm64 or arm32, default arm64"
}
LITE_PLATFORM="arm64"
while getopts 'I:' OPT
do
OPTARG=$(echo ${OPTARG} | tr '[A-Z]' '[a-z]')
case $OPT in
I)
if [[ "$OPTARG" == "arm64" ]]; then
LITE_PLATFORM="arm64"
elif [[ "$OPTARG" == "arm32" ]]; then
LITE_PLATFORM="arm32"
else
echo "-I parameter must be arm64 or arm32"
exit 1
fi
;;
*)
echo "Unknown option ${opt}!"
usage
exit 1
esac
done
BASEPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
MINDSPORE_ROOT_DIR=${BASEPATH%%/mindspore/lite/micro/example/mobilenetv2}
echo "current dir is: ${BASEPATH}"
MOBILE_NAME=mobilenetv2
MOBILE_FILE=${MOBILE_NAME}.ms
get_version() {
local VERSION_HEADER=${MINDSPORE_ROOT_DIR}/mindspore/lite/include/version.h
local VERSION_MAJOR=$(grep "const int ms_version_major =" ${VERSION_HEADER} | tr -dc "[0-9]")
local VERSION_MINOR=$(grep "const int ms_version_minor =" ${VERSION_HEADER} | tr -dc "[0-9]")
local VERSION_REVISION=$(grep "const int ms_version_revision =" ${VERSION_HEADER} | tr -dc "[0-9]")
VERSION_STR=${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_REVISION}
}
download_inference() {
if [[ "${LITE_PLATFORM}" == "arm64" ]]; then
local ARM_NAME=aarch64
local DEVICE=gpu
else
local ARM_NAME=aarch32
local DEVICE=cpu
fi
MINDSPORE_FILE_NAME="mindspore-lite-${VERSION_STR}-android-${ARM_NAME}"
local MINDSPORE_FILE="${MINDSPORE_FILE_NAME}.tar.gz"
local MINDSPORE_LITE_DOWNLOAD_URL="https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION_STR}/MindSpore/lite/release/android/${DEVICE}/${MINDSPORE_FILE}"
if [ ! -e ${BASEPATH}/build/${MINDSPORE_FILE} ]; then
wget -c -O ${BASEPATH}/build/${MINDSPORE_FILE} --no-check-certificate ${MINDSPORE_LITE_DOWNLOAD_URL}
fi
tar xzvf ${BASEPATH}/build/${MINDSPORE_FILE} -C ${BASEPATH}/build/ || exit 1
rm ${BASEPATH}/build/${MINDSPORE_FILE} || exit 1
PKG_PATH=${BASEPATH}/build/${MINDSPORE_FILE_NAME}
}
download_mobile() {
local MOBILE_DOWNLOAD_URL=https://download.mindspore.cn/model_zoo/official/lite/mobilenetv2_imagenet/r1.2/${MOBILE_FILE}
if [ ! -e ${BASEPATH}/build/${MOBILE_FILE} ]; then
wget -c -O ${BASEPATH}/build/${MOBILE_FILE} --no-check-certificate ${MOBILE_DOWNLOAD_URL}
fi
}
gen_mobile() {
local CODEGEN_FILE_NAME="mindspore-lite-${VERSION_STR}-linux-x64"
local CODEGEN_FILE="${CODEGEN_FILE_NAME}.tar.gz"
local CODEGEN_LITE_DOWNLOAD_URL="https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION_STR}/MindSpore/lite/release/linux/x86_64/${CODEGEN_FILE}"
if [ ! -e ${BASEPATH}/build/${CODEGEN_FILE} ]; then
wget -c -O ${BASEPATH}/build/${CODEGEN_FILE} --no-check-certificate ${CODEGEN_LITE_DOWNLOAD_URL}
fi
tar xzvf ${BASEPATH}/build/${CODEGEN_FILE} -C ${BASEPATH}/build/ || exit 1
rm ${BASEPATH}/build/${CODEGEN_FILE} || exit 1
CODEGEN_PATH=${BASEPATH}/build/${CODEGEN_FILE_NAME}/tools/codegen
if [[ "${LITE_PLATFORM}" == "arm64" ]]; then
local TARGET=ARM64
else
local TARGET=ARM32A
fi
${CODEGEN_PATH}/codegen --codePath=${BASEPATH}/build --modelPath=${BASEPATH}/build/${MOBILE_FILE} --target=${TARGET}
}
mkdir -p ${BASEPATH}/build
get_version
download_inference
echo "downloading ${MOBILE_FILE}!"
download_mobile
echo "generating mobilenetv2"
gen_mobile
BENCHMARK_PATH=${BASEPATH}/build/${MOBILE_NAME}
# build benchmark
rm -rf ${BASEPATH}/build/benchmark
mkdir -p ${BASEPATH}/build/benchmark && cd ${BASEPATH}/build/benchmark || exit 1
if [[ "${LITE_PLATFORM}" == "arm64" ]]; then
echo "making arm64"
cmake -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" \
-DANDROID_ABI="arm64-v8a" \
-DANDROID_TOOLCHAIN_NAME="aarch64-linux-android-clang" \
-DANDROID_NATIVE_API_LEVEL="19" \
-DPLATFORM_ARM64=ON \
-DPKG_PATH=${PKG_PATH} ${BENCHMARK_PATH}
else
cmake -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" \
-DANDROID_ABI="armeabi-v7a" \
-DANDROID_TOOLCHAIN_NAME="clang" \
-DANDROID_NATIVE_API_LEVEL="19" \
-DPLATFORM_ARM32=ON \
-DPKG_PATH=${PKG_PATH} ${BENCHMARK_PATH}
fi
make

View File

@ -207,7 +207,7 @@ std::string RealPath(const char *path) {
char *real_path = realpath(path, resolved_path.get()); char *real_path = realpath(path, resolved_path.get());
#endif #endif
if (real_path == nullptr || strlen(real_path) == 0) { if (real_path == nullptr || strlen(real_path) == 0) {
MS_LOG(ERROR) << "file path is not valid : " << path; MS_LOG(ERROR) << "file path not exists: " << path;
return ""; return "";
} }
std::string res = resolved_path.get(); std::string res = resolved_path.get();

View File

@ -191,6 +191,7 @@ if(MSLITE_ENABLE_CONVERTER AND (NOT MSLITE_ENABLE_RUNTIME_CONVERT))
mindspore::glog mindspore::glog
preprocess_mid preprocess_mid
config_parser_mid config_parser_mid
coder_mid
) )
endif() endif()

View File

@ -0,0 +1,30 @@
[common_quant_param]
# Supports WEIGHT_QUANT or FULL_QUANT
#quant_type=WEIGHT_QUANT
# Weight quantization support the number of bits [0,16], Set to 0 is mixed bit quantization, otherwise it is fixed bit quantization
# Full quantization support the number of bits [1,8]
#bit_num=8
# Layers with size of weights exceeds threshold `min_quant_weight_size` will be quantized.
#min_quant_weight_size=0
# Layers with channel size of weights exceeds threshold `min_quant_weight_channel` will be quantized.
#min_quant_weight_channel=16
[micro_param]
# enable code-generation for MCU HW
enable_micro=true
# specify HW target, support x86,ARM32M, AMR32A, ARM64 only.
target=ARM32A
# path to source code generated automatically
output_path=./
# code generation for Inference or Train
codegen_mode=Inference
# enable parallel inference or not
support_parallel=false
# enable debug
debug_mode=false

View File

@ -0,0 +1,30 @@
[common_quant_param]
# Supports WEIGHT_QUANT or FULL_QUANT
#quant_type=WEIGHT_QUANT
# Weight quantization support the number of bits [0,16], Set to 0 is mixed bit quantization, otherwise it is fixed bit quantization
# Full quantization support the number of bits [1,8]
#bit_num=8
# Layers with size of weights exceeds threshold `min_quant_weight_size` will be quantized.
#min_quant_weight_size=0
# Layers with channel size of weights exceeds threshold `min_quant_weight_channel` will be quantized.
#min_quant_weight_channel=16
[micro_param]
# enable code-generation for MCU HW
enable_micro=true
# specify HW target, support x86,ARM32M, AMR32A, ARM64 only.
target=ARM64
# path to source code generated automatically
output_path=./
# code generation for Inference or Train
codegen_mode=Inference
# enable parallel inference or not
support_parallel=false
# enable debug
debug_mode=false

View File

@ -0,0 +1,30 @@
[common_quant_param]
# Supports WEIGHT_QUANT or FULL_QUANT
#quant_type=WEIGHT_QUANT
# Weight quantization support the number of bits [0,16], Set to 0 is mixed bit quantization, otherwise it is fixed bit quantization
# Full quantization support the number of bits [1,8]
#bit_num=8
# Layers with size of weights exceeds threshold `min_quant_weight_size` will be quantized.
#min_quant_weight_size=0
# Layers with channel size of weights exceeds threshold `min_quant_weight_channel` will be quantized.
#min_quant_weight_channel=16
[micro_param]
# enable code-generation for MCU HW
enable_micro=true
# specify HW target, support x86,ARM32M, AMR32A, ARM64 only.
target=x86
# path to source code generated automatically
output_path=./
# code generation for Inference or Train
codegen_mode=Inference
# enable parallel inference or not
support_parallel=false
# enable debug
debug_mode=false

View File

@ -0,0 +1,30 @@
[common_quant_param]
# Supports WEIGHT_QUANT or FULL_QUANT
#quant_type=WEIGHT_QUANT
# Weight quantization support the number of bits [0,16], Set to 0 is mixed bit quantization, otherwise it is fixed bit quantization
# Full quantization support the number of bits [1,8]
#bit_num=8
# Layers with size of weights exceeds threshold `min_quant_weight_size` will be quantized.
#min_quant_weight_size=0
# Layers with channel size of weights exceeds threshold `min_quant_weight_channel` will be quantized.
#min_quant_weight_channel=16
[micro_param]
# enable code-generation for MCU HW
enable_micro=true
# specify HW target, support x86,ARM32M, AMR32A, ARM64 only.
target=x86
# path to source code generated automatically
output_path=./
# code generation for Inference or Train
codegen_mode=Inference
# enable parallel inference or not
support_parallel=true
# enable debug
debug_mode=false

View File

@ -6,21 +6,23 @@ hiai_model_0909_kd_rot_ps_softmax.tflite
# hiai_model_normalize_object_scene_ps_20200519.tflite # hiai_model_normalize_object_scene_ps_20200519.tflite
# mtk_AADB_HADB_MBV2_model_fp32.tflite # mtk_AADB_HADB_MBV2_model_fp32.tflite
# mtk_AADB_HADB_MBV3_model_fp32.tflite # mtk_AADB_HADB_MBV3_model_fp32.tflite
mobilenet_v1_0.25_128.tflite
mobilenet_v1_0.25_160.tflite # testcases work but repeat models
mobilenet_v1_0.25_192.tflite # mobilenet_v1_0.25_128.tflite
mobilenet_v1_0.25_224.tflite # mobilenet_v1_0.25_160.tflite
mobilenet_v1_0.5_128.tflite # mobilenet_v1_0.25_192.tflite
mobilenet_v1_0.5_160.tflite # mobilenet_v1_0.25_224.tflite
mobilenet_v1_0.5_192.tflite # mobilenet_v1_0.5_128.tflite
mobilenet_v1_0.5_224.tflite # mobilenet_v1_0.5_160.tflite
mobilenet_v1_0.75_128.tflite # mobilenet_v1_0.5_192.tflite
mobilenet_v1_0.75_160.tflite # mobilenet_v1_0.5_224.tflite
mobilenet_v1_0.75_192.tflite # mobilenet_v1_0.75_128.tflite
mobilenet_v1_0.75_224.tflite # mobilenet_v1_0.75_160.tflite
mobilenet_v1_1.0_128.tflite # mobilenet_v1_0.75_192.tflite
mobilenet_v1_1.0_160.tflite # mobilenet_v1_0.75_224.tflite
mobilenet_v1_1.0_192.tflite # mobilenet_v1_1.0_128.tflite
# mobilenet_v1_1.0_160.tflite
# mobilenet_v1_1.0_192.tflite
mobilenet_v1_1.0_224.tflite mobilenet_v1_1.0_224.tflite
mobilenet_v2_1.0_224.tflite mobilenet_v2_1.0_224.tflite
# mtk_model_normalize_object_scene_ps_20200519_f32.tflite # mtk_model_normalize_object_scene_ps_20200519_f32.tflite
@ -92,11 +94,14 @@ hiai_cv_labelDetectorModel_v4.tflite
# hiai_iMaxSR_RGB.tflite # hiai_iMaxSR_RGB.tflite
hiai_label_and_video.tflite hiai_label_and_video.tflite
# hiai_lm_inference_graph.tflite # hiai_lm_inference_graph.tflite
efficientnet_lite0_fp32_2.tflite
efficientnet_lite1_fp32_2.tflite # testcases work but repeat models
efficientnet_lite2_fp32_2.tflite # efficientnet_lite0_fp32_2.tflite
efficientnet_lite3_fp32_2.tflite # efficientnet_lite1_fp32_2.tflite
# efficientnet_lite2_fp32_2.tflite
# efficientnet_lite3_fp32_2.tflite
efficientnet_lite4_fp32_2.tflite efficientnet_lite4_fp32_2.tflite
# mnasnet_0.50_224_1_metadata_1.tflite # mnasnet_0.50_224_1_metadata_1.tflite
# mnasnet_0.75_224_1_metadata_1.tflite # mnasnet_0.75_224_1_metadata_1.tflite
# mnasnet_1.0_128_1_metadata_1.tflite # mnasnet_1.0_128_1_metadata_1.tflite

View File

@ -1,8 +1,12 @@
#!/bin/bash #!/bin/bash
source ./scripts/base_functions.sh source ./scripts/base_functions.sh
# Run converter on x86 platform: function Run_x86_codegen() {
function Run_Converter() { # $1:buildPath $2:modelPath $3:models_list $4:logFile $5:resultFile $6:micro_cofig
local bind_mode thread_num suffix run_result
rm -rf $1
mkdir -p $1
# Unzip x86 runtime and converter # Unzip x86 runtime and converter
cd ${x86_path} || exit 1 cd ${x86_path} || exit 1
tar -zxf mindspore-lite-${version}-linux-x64.tar.gz || exit 1 tar -zxf mindspore-lite-${version}-linux-x64.tar.gz || exit 1
@ -11,60 +15,89 @@ function Run_Converter() {
cp tools/converter/converter/converter_lite ./ || exit 1 cp tools/converter/converter/converter_lite ./ || exit 1
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./tools/converter/lib/:./tools/converter/third_party/glog/lib export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./tools/converter/lib/:./tools/converter/third_party/glog/lib
rm -rf ${ms_models_path}
mkdir -p ${ms_models_path}
# Prepare the config file list
local cfg_file_list=("$models_codegen_parallel_config" "$models_codegen_config")
# Convert models:
# $1:cfgFileList; $2:inModelPath; $3:outModelPath; $4:logFile; $5:resultFile;
Convert "${cfg_file_list[*]}" $models_path $ms_models_path $run_converter_log_file $run_converter_result_file
}
# Run on x86 codegen benchmark
function Run_x86_codegen() {
# $1:buildPath $2:modelPath $3:cfgFile $4:logFile $5:resultFile
local support_parallel bind_mode thread_num suffix run_result
local CODEGEN_PATH=${x86_path}/mindspore-lite-${version}-linux-x64/tools/codegen
rm -rf $1
mkdir -p $1
while read line; do while read line; do
model_name=${line} if [[ $line == \#* || $line == "" ]]; then
if [[ $model_name == \#* || $model_name == "" ]]; then continue
continue fi
fi model_info=`echo ${line} | awk -F ' ' '{print $1}'`
support_parallel="false" model_name=`echo ${model_info} | awk -F ';' '{print $1}'`
bind_mode="" model_type=${model_name##*.}
thread_num="" case $model_type in
suffix="" pb)
if [[ $3 =~ "parallel" ]]; then model_fmk="TF"
support_parallel="true" ;;
bind_mode="0" tflite)
thread_num="4" model_fmk="TFLITE"
suffix="_parallel" ;;
fi onnx)
echo ${model_name} >> "$4" model_fmk="ONNX"
${CODEGEN_PATH}/codegen --codePath=$1 --modelPath=$2/${model_name}.ms --supportParallel=${support_parallel} >> $4 ;;
# 1. build benchmark mindir)
mkdir -p $1/${model_name}/build && cd $1/${model_name}/build || exit 1 model_fmk="MINDIR"
cmake -DPKG_PATH=${x86_path}/mindspore-lite-${version}-linux-x64 $1/${model_name} >> $4 ;;
make >> $4 *)
# 2. run benchmark model_type="caffe"
echo "net file: $1/${model_name}/src/net.bin" >> $4 model_fmk="CAFFE"
echo "./benchmark ${models_path}/input_output/input/${model_name}.ms.bin $1/${model_name}/src/net.bin 1 ${models_path}/input_output/output/${model_name}.ms.out ${thread_num} ${bind_mode}" >> $4 ;;
./benchmark ${models_path}/input_output/input/${model_name}.ms.bin $1/${model_name}/src/net.bin 1 ${models_path}/input_output/output/${model_name}.ms.out ${thread_num} ${bind_mode} >> $4 esac
if [ $? = 0 ]; then # set parameters
run_result='x86_codegen'${suffix}': '${model_name}' pass'; echo ${run_result} >> $5 model_file=$2"/"${model_name}
else weight_file=""
run_result='x86_codegen'${suffix}': '${model_name}' failed'; echo ${run_result} >> $5; return 1 if [[ $model_fmk == "CAFFE" ]]; then
fi model_file=${model_file}".prototxt"
done < $3 weight_file=${model_file%.*}".caffemodel"
fi
output_file=$1"/"${model_name}
quant_type=""
config_file=$6
spec_shapes=""
train_model="false"
in_dtype="DEFAULT"
out_dtype="DEFAULT"
rm -rf $1 # start running converter
cd ${x86_path}/mindspore-lite-${version}-linux-x64/ || exit 1
echo ${model_name} >> "$4"
echo './converter_lite --fmk='${model_fmk}' --modelFile='${model_file}' --weightFile='${weight_file}' --outputFile='${output_file}\
' --inputDataType='${in_dtype}' --outputDataType='${out_dtype}' --inputShape='${spec_shapes}\
' --configFile='${config_file}' --trainModel='${train_model} >> "$4"
./converter_lite --fmk=${model_fmk} --modelFile=${model_file} --weightFile=${weight_file} --outputFile=${output_file}\
--inputDataType=${in_dtype} --outputDataType=${out_dtype} --inputShape=${spec_shapes}\
--configFile=${config_file} --trainModel=${train_model} >> "$4"
if [ $? = 0 ]; then
converter_result='converter '${model_type}''${quant_type}' '${model_name}' pass';echo ${converter_result} >> $5
else
converter_result='converter '${model_type}''${quant_type}' '${model_name}' failed';echo ${converter_result} >> $5
fi
bind_mode=""
thread_num=""
suffix=""
if [[ $3 =~ "parallel" ]]; then
bind_mode="0"
thread_num="4"
suffix="_parallel"
fi
echo ${model_name} >> "$4"
# 1. build benchmark
mkdir -p ${output_file}/build && cd ${output_file}/build || exit 1
cmake -DPKG_PATH=${x86_path}/mindspore-lite-${version}-linux-x64 ${output_file} >> $4
make >> $4
# 2. run benchmark
echo "net file: ${output_file}/src/net.bin" >> $4
echo "./benchmark ${models_path}/input_output/input/${model_name}.ms.bin ${output_file}/src/net.bin 1 ${models_path}/input_output/output/${model_name}.ms.out ${thread_num} ${bind_mode}" >> $4
./benchmark ${models_path}/input_output/input/${model_name}.ms.bin ${output_file}/src/net.bin 1 ${models_path}/input_output/output/${model_name}.ms.out ${thread_num} ${bind_mode} >> $4
if [ $? = 0 ]; then
run_result='x86_codegen'${suffix}': '${model_name}' pass'; echo ${run_result} >> $5
else
run_result='x86_codegen'${suffix}': '${model_name}' failed'; echo ${run_result} >> $5;
fi
done < $3
} }
function Run_arm_codegen() { function Run_arm_codegen() {
# $1:buildPath $2:modelPath $3:cfgFile $4:logFile $5:resultFile $6:deviceID $7:processor # $1:buildPath $2:modelPath $3:model_list $4:logFile $5:resultFile $6:deviceID $7:processor $8:micro_cofig $9:failNotReturn;
local package_path package_suffix target platform android_abi toolchain_name package_path run_result local package_path package_suffix target platform android_abi toolchain_name package_path run_result
echo "ANDROID_NDK: ${ANDROID_NDK}" >> $4 echo "ANDROID_NDK: ${ANDROID_NDK}" >> $4
package_path=${arm64_path} package_path=${arm64_path}
@ -84,69 +117,117 @@ function Run_arm_codegen() {
cd ${package_path} || exit 1 cd ${package_path} || exit 1
tar -zxf mindspore-lite-${version}-android-${package_suffix}.tar.gz || exit 1 tar -zxf mindspore-lite-${version}-android-${package_suffix}.tar.gz || exit 1
local PKG_PATH=${package_path}/mindspore-lite-${version}-android-${package_suffix} local PKG_PATH=${package_path}/mindspore-lite-${version}-android-${package_suffix}
local CODEGEN_PATH=${x86_path}/mindspore-lite-${version}-linux-x64/tools/codegen
# Unzip x86 runtime and converter
cd ${x86_path} || exit 1
tar -zxf mindspore-lite-${version}-linux-x64.tar.gz || exit 1
cd ${x86_path}/mindspore-lite-${version}-linux-x64/ || exit 1
cp tools/converter/converter/converter_lite ./ || exit 1
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./tools/converter/lib/:./tools/converter/third_party/glog/lib
rm -rf $1 rm -rf $1
mkdir -p $1 mkdir -p $1
# Run tflite converted models: # Run tflite converted models:
while read line; do while read line; do
model_name=${line} if [[ $line == \#* || $line == "" ]]; then
if [[ $model_name == \#* ]]; then continue
continue fi
fi model_info=`echo ${line} | awk -F ' ' '{print $1}'`
model_name=`echo ${model_info} | awk -F ';' '{print $1}'`
model_type=${model_name##*.}
case $model_type in
pb)
model_fmk="TF"
;;
tflite)
model_fmk="TFLITE"
;;
onnx)
model_fmk="ONNX"
;;
mindir)
model_fmk="MINDIR"
;;
*)
model_type="caffe"
model_fmk="CAFFE"
;;
esac
# set parameters
model_file=$2"/"${model_name}
weight_file=""
if [[ $model_fmk == "CAFFE" ]]; then
model_file=${model_file}".prototxt"
weight_file=${model_file%.*}".caffemodel"
fi
output_file=$1"/"${model_name}
quant_type=""
config_file=$8
spec_shapes=""
train_model="false"
in_dtype="DEFAULT"
out_dtype="DEFAULT"
{ # start running converter
echo "$7_codegen: ${model_name}" cd ${x86_path}/mindspore-lite-${version}-linux-x64/ || exit 1
echo "${CODEGEN_PATH}/codegen --codePath=$1 --modelPath=$2/${model_name}.ms --target=${target}" echo ${model_name} >> "$4"
${CODEGEN_PATH}/codegen --codePath=$1 --modelPath=$2/${model_name}.ms --target=${target} echo './converter_lite --fmk='${model_fmk}' --modelFile='${model_file}' --weightFile='${weight_file}' --outputFile='${output_file}\
} >> $4 ' --inputDataType='${in_dtype}' --outputDataType='${out_dtype}' --inputShape='${spec_shapes}\
' --configFile='${config_file}' --trainModel='${train_model} >> "$4"
./converter_lite --fmk=${model_fmk} --modelFile=${model_file} --weightFile=${weight_file} --outputFile=${output_file}\
--inputDataType=${in_dtype} --outputDataType=${out_dtype} --inputShape=${spec_shapes}\
--configFile=${config_file} --trainModel=${train_model} >> "$4"
if [ $? = 0 ]; then
converter_result='converter '${model_type}''${quant_type}' '${model_name}' pass';echo ${converter_result} >> $5
else
converter_result='converter '${model_type}''${quant_type}' '${model_name}' failed';echo ${converter_result} >> $5
fi
rm -rf $1/benchmark rm -rf $1/benchmark
mkdir -p $1/benchmark && cd $1/benchmark || exit 1 mkdir -p $1/benchmark && cd $1/benchmark || exit 1
{ {
echo "cmake -DCMAKE_BUILD_TYPE=Release echo "cmake -DCMAKE_BUILD_TYPE=Release
-DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake
-DANDROID_ABI=${android_abi} -DANDROID_ABI=${android_abi}
-DANDROID_TOOLCHAIN_NAME=${toolchain_name} -DANDROID_TOOLCHAIN_NAME=${toolchain_name}
-DANDROID_NATIVE_API_LEVEL=19 -DANDROID_NATIVE_API_LEVEL=19
-DPLATFORM_${platform}=ON -DPLATFORM_${platform}=ON
-DPKG_PATH=${PKG_PATH} $1/${model_name}" -DPKG_PATH=${PKG_PATH} $1/${model_name}"
cmake -DCMAKE_BUILD_TYPE=Release \ cmake -DCMAKE_BUILD_TYPE=Release \
-DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" \ -DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" \
-DANDROID_ABI=${android_abi} \ -DANDROID_ABI=${android_abi} \
-DANDROID_TOOLCHAIN_NAME=${toolchain_name} \ -DANDROID_TOOLCHAIN_NAME=${toolchain_name} \
-DANDROID_NATIVE_API_LEVEL="19" \ -DANDROID_NATIVE_API_LEVEL="19" \
-DPLATFORM_${platform}=ON \ -DPLATFORM_${platform}=ON \
-DPKG_PATH=${PKG_PATH} $1/${model_name} -DPKG_PATH=${PKG_PATH} $1/${model_name}
make -j4
} >> $4
make -j4 benchmark_dir="$1/codegen_test_$7"
} >> $4 rm -rf "$benchmark_dir"
mkdir "$benchmark_dir" && cd "$benchmark_dir" || exit 1
cp -a "$1/benchmark/benchmark" "$benchmark_dir/benchmark" || exit 1
cp -a "$1/$model_name/src/net.bin" "$benchmark_dir/net.bin" || exit 1
benchmark_dir="$1/codegen_test_$7" {
rm -rf "$benchmark_dir"
mkdir "$benchmark_dir" && cd "$benchmark_dir" || exit 1
cp -a "$1/benchmark/benchmark" "$benchmark_dir/benchmark" || exit 1
cp -a "$1/$model_name/src/net.bin" "$benchmark_dir/net.bin" || exit 1
{
echo "ls $benchmark_dir:" echo "ls $benchmark_dir:"
ls "$benchmark_dir" ls "$benchmark_dir"
} >> $4 } >> $4
# adb push all needed files to the phone # adb push all needed files to the phone
adb -s $6 push "$benchmark_dir" /data/local/tmp/ > adb_push_log.txt adb -s $6 push "$benchmark_dir" /data/local/tmp/ > adb_push_log.txt
{
{ echo "cd /data/local/tmp/codegen_test_$7"
echo "cd /data/local/tmp/codegen_test_$7" echo 'chmod 777 benchmark'
echo 'chmod 777 benchmark' echo 'chmod 777 net.bin'
echo 'chmod 777 net.bin' echo 'ls'
echo 'ls' echo './benchmark /data/local/tmp/input_output/input/'${model_name}'.ms.bin ./net.bin 1 /data/local/tmp/input_output/output/'${model_name}'.ms.out'
echo './benchmark /data/local/tmp/input_output/input/'${model_name}'.ms.bin ./net.bin 1 /data/local/tmp/input_output/output/'${model_name}'.ms.out' echo "cd .. && rm -rf codegen_test_$7"
echo "cd .. && rm -rf codegen_test_$7" } >> $4
} >> $4
{ {
echo "cd /data/local/tmp/codegen_test_$7" echo "cd /data/local/tmp/codegen_test_$7"
@ -161,7 +242,7 @@ function Run_arm_codegen() {
if [ $? = 0 ]; then if [ $? = 0 ]; then
run_result=$7'_codegen: '${model_name}' pass'; echo ${run_result} >> $5 run_result=$7'_codegen: '${model_name}' pass'; echo ${run_result} >> $5
else else
run_result=$7'_codegen: '${model_name}' failed'; echo ${run_result} >> $5; return 1 run_result=$7'_codegen: '${model_name}' failed'; echo ${run_result} >> $5;
fi fi
done < $3 done < $3
@ -204,38 +285,29 @@ file_name=$(ls ${x86_path}/*-linux-x64.tar.gz)
IFS="-" read -r -a file_name_array <<< "$file_name" IFS="-" read -r -a file_name_array <<< "$file_name"
version=${file_name_array[2]} version=${file_name_array[2]}
# Set models config filepath # Set model-list
models_codegen_config=${basepath}/../config/models_codegen.cfg models_codegen_config=${basepath}/../config/models_codegen.cfg
models_codegen_parallel_config=${basepath}/../config/models_codegen_parallel.cfg models_codegen_parallel_config=${basepath}/../config/models_codegen_parallel.cfg
#micro config
micro_x86_config=${basepath}/../config/micro/micro_x86.cfg
micro_x86_parallel_config=${basepath}/../config/micro/micro_x86_parallel.cfg
micro_arm64_config=${basepath}/../config/micro/micro_arm64.cfg
micro_arm32A_config=${basepath}/../config/micro/micro_arm32A.cfg
# Set models and build path # Set models and build path
ms_models_path=${basepath}/ms_models
build_path_x86=${basepath}/codegen_build_x86 build_path_x86=${basepath}/codegen_build_x86
build_path_parallel=${basepath}/codegen_build_parallel build_path_parallel=${basepath}/codegen_build_parallel
build_path_arm64=${basepath}/codegen_build_arm64 build_path_arm64=${basepath}/codegen_build_arm64
build_path_arm32=${basepath}/codegen_build_arm32 build_path_arm32=${basepath}/codegen_build_arm32
# Write converter result to temp file # Write converter result to temp file
run_converter_log_file=${basepath}/run_converter_log.txt run_converter_log_file=${basepath}/run_converter_log.txt
echo ' ' > ${run_converter_log_file} echo ' ' > ${run_converter_log_file}
run_converter_result_file=${basepath}/run_converter_result.txt run_converter_result_file=${basepath}/run_converter_result.txt
echo ' ' > ${run_converter_result_file} echo ' ' > ${run_converter_result_file}
# Run converter
echo "start Run converter ..."
Run_Converter
Run_converter_status=$?
# Check converter result and return value
if [[ ${Run_converter_status} = 0 ]];then
echo "Run converter success"
Print_Converter_Result $run_converter_result_file
else
echo "Run converter failed"
cat ${run_converter_log_file}
Print_Converter_Result $run_converter_result_file
exit 1
fi
# Write benchmark result to temp file # Write benchmark result to temp file
run_benchmark_result_file=${basepath}/run_benchmark_result.txt run_benchmark_result_file=${basepath}/run_benchmark_result.txt
echo ' ' > ${run_benchmark_result_file} echo ' ' > ${run_benchmark_result_file}
@ -256,31 +328,28 @@ echo "current backend is ${backend}"
if [[ $backend == "all" || $backend == "codegen" || $backend == "x86_codegen" ]]; then if [[ $backend == "all" || $backend == "codegen" || $backend == "x86_codegen" ]]; then
# Run on x86-codegen # Run on x86-codegen
echo "start Run x86 codegen ..." echo "start Run x86 codegen ..."
Run_x86_codegen ${build_path_x86} ${ms_models_path} ${models_codegen_config} ${run_x86_codegen_log_file} ${run_benchmark_result_file} & Run_x86_codegen ${build_path_x86} ${models_path} ${models_codegen_config} ${run_x86_codegen_log_file} ${run_benchmark_result_file} ${micro_x86_config}&
Run_x86_codegen_PID=$! Run_x86_codegen_PID=$!
sleep 1 sleep 1
fi fi
if [[ $backend == "all" || $backend == "codegen" || $backend == "x86_codegen" || $backend == "x86_codegen_parallel" ]]; then if [[ $backend == "all" || $backend == "codegen" || $backend == "x86_codegen" || $backend == "x86_codegen_parallel" ]]; then
# Run on x86-codegen-parallel # Run on x86-codegen-parallel
echo "start Run x86 codegen parallel ..." echo "start Run x86 codegen parallel ..."
Run_x86_codegen ${build_path_parallel} ${ms_models_path} ${models_codegen_parallel_config} ${run_x86_codegen_parallel_log_file} ${run_benchmark_result_file} & Run_x86_codegen ${build_path_parallel} ${models_path} ${models_codegen_parallel_config} ${run_x86_codegen_parallel_log_file} ${run_benchmark_result_file} ${micro_x86_parallel_config}&
# Run_x86_codegen_parallel_status=$?
Run_x86_codegen_parallel_PID=$! Run_x86_codegen_parallel_PID=$!
sleep 1 sleep 1
fi fi
if [[ $backend == "all" || $backend == "codegen" || $backend == "arm64_codegen" ]]; then if [[ $backend == "all" || $backend == "codegen" || $backend == "arm64_codegen" ]]; then
# Run on codegen # Run on codegen
echo "start Run arm64 codegen ..." echo "start Run arm64 codegen ..."
Run_arm_codegen ${build_path_arm64} ${ms_models_path} ${models_codegen_config} ${run_arm64_fp32_codegen_log_file} ${run_benchmark_result_file} ${device_id} "arm64" & Run_arm_codegen ${build_path_arm64} ${models_path} ${models_codegen_config} ${run_arm64_fp32_codegen_log_file} ${run_benchmark_result_file} ${device_id} "arm64" ${micro_arm64_config}&
# Run_arm64_codegen_status=$?
Run_arm64_codegen_PID=$! Run_arm64_codegen_PID=$!
sleep 1 sleep 1
fi fi
if [[ $backend == "all" || $backend == "codegen" || $backend == "arm32_codegen" ]]; then if [[ $backend == "all" || $backend == "codegen" || $backend == "arm32_codegen" ]]; then
# Run on arm32 codegen # Run on arm32 codegen
echo "start Run arm32 codegen ..." echo "start Run arm32 codegen ..."
Run_arm_codegen ${build_path_arm32} ${ms_models_path} ${models_codegen_config} ${run_arm32_fp32_codegen_log_file} ${run_benchmark_result_file} ${device_id} "arm32" & Run_arm_codegen ${build_path_arm32} ${models_path} ${models_codegen_config} ${run_arm32_fp32_codegen_log_file} ${run_benchmark_result_file} ${device_id} "arm32" ${micro_arm32A_config}&
# Run_arm32_codegen_status=$?
Run_arm32_codegen_PID=$! Run_arm32_codegen_PID=$!
sleep 1 sleep 1
fi fi

View File

@ -108,11 +108,7 @@ bool MetaGraphSerializer::InitPath(const std::string &output_path) {
return true; return true;
} }
bool MetaGraphSerializer::Init(const schema::MetaGraphT &graph, const std::string &output_path, bool save_together) { bool MetaGraphSerializer::Init(const schema::MetaGraphT &graph, bool save_together) {
if (!InitPath(output_path)) {
MS_LOG(ERROR) << "Init path failed";
return false;
}
// init file streams // init file streams
ChangeMod(save_model_path_); ChangeMod(save_model_path_);
model_fs_ = OpenFile(save_model_path_, std::ios::out | std::ios::binary | std::ios::trunc); model_fs_ = OpenFile(save_model_path_, std::ios::out | std::ios::binary | std::ios::trunc);
@ -248,9 +244,13 @@ int MetaGraphSerializer::Save(const schema::MetaGraphT &graph, const std::string
builder.Finish(offset); builder.Finish(offset);
schema::FinishMetaGraphBuffer(builder, offset); schema::FinishMetaGraphBuffer(builder, offset);
size_t size = builder.GetSize(); size_t size = builder.GetSize();
auto save_together = (size < kModelSizeLimit);
MetaGraphSerializer meta_graph_serializer; MetaGraphSerializer meta_graph_serializer;
if (!meta_graph_serializer.Init(graph, output_path, save_together)) { if (!meta_graph_serializer.InitPath(output_path)) {
MS_LOG(ERROR) << "Init path failed";
return RET_ERROR;
}
auto save_together = (size < kModelSizeLimit);
if (!meta_graph_serializer.Init(graph, save_together)) {
MS_LOG(ERROR) << "Init MetaGraphSerializer failed"; MS_LOG(ERROR) << "Init MetaGraphSerializer failed";
return RET_ERROR; return RET_ERROR;
} }

View File

@ -25,6 +25,7 @@
namespace mindspore::lite { namespace mindspore::lite {
class MetaGraphSerializer { class MetaGraphSerializer {
public: public:
// save serialized fb model
static int Save(const schema::MetaGraphT &graph, const std::string &output_path); static int Save(const schema::MetaGraphT &graph, const std::string &output_path);
private: private:
@ -34,7 +35,7 @@ class MetaGraphSerializer {
bool InitPath(const std::string &real_output_path); bool InitPath(const std::string &real_output_path);
bool Init(const schema::MetaGraphT &graph, const std::string &output_path, bool save_together = true); bool Init(const schema::MetaGraphT &graph, bool save_together = true);
schema::ExternalDataT *AddExternalData(const char *data, size_t size); schema::ExternalDataT *AddExternalData(const char *data, size_t size);
@ -50,8 +51,14 @@ class MetaGraphSerializer {
std::string model_name_; std::string model_name_;
std::string save_model_path_; std::string save_model_path_;
std::string save_data_path_; std::string save_data_path_;
std::string code_mode_;
std::string target_;
bool support_parallel_{false};
bool debug_mode_{false};
std::fstream *model_fs_ = nullptr; std::fstream *model_fs_ = nullptr;
std::fstream *data_fs_ = nullptr; std::fstream *data_fs_ = nullptr;
friend class Coder;
}; };
} // namespace mindspore::lite } // namespace mindspore::lite

View File

@ -72,6 +72,7 @@ add_subdirectory(registry)
add_subdirectory(preprocess) add_subdirectory(preprocess)
add_subdirectory(config_parser) add_subdirectory(config_parser)
add_subdirectory(${CORE_DIR} mindspore_core) add_subdirectory(${CORE_DIR} mindspore_core)
add_subdirectory(micro/coder)
if(MSLITE_ENABLE_ACL) if(MSLITE_ENABLE_ACL)
set(MODE_ASCEND_ACL ON) set(MODE_ASCEND_ACL ON)
@ -219,6 +220,7 @@ if(MSLITE_GPU_BACKEND STREQUAL opencl)
include_directories(${SRC_DIR}/runtime/kernel/opencl) include_directories(${SRC_DIR}/runtime/kernel/opencl)
target_link_libraries(converter_lite PRIVATE opencl_kernel_mid) target_link_libraries(converter_lite PRIVATE opencl_kernel_mid)
endif() endif()
target_link_libraries(converter_lite PRIVATE target_link_libraries(converter_lite PRIVATE
ccsrc_src_mid ccsrc_src_mid
converter_src_mid converter_src_mid
@ -242,6 +244,7 @@ target_link_libraries(converter_lite PRIVATE
mindspore::protobuf mindspore::protobuf
preprocess_mid preprocess_mid
config_parser_mid config_parser_mid
coder_mid
) )
if(MSLITE_ENABLE_ACL) if(MSLITE_ENABLE_ACL)

View File

@ -3,7 +3,6 @@ file(GLOB_RECURSE CONFIG_PARSER_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
) )
set_property(SOURCE ${CONFIG_PARSER_SRC_LIST} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_LITE) set_property(SOURCE ${CONFIG_PARSER_SRC_LIST} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_LITE)
add_library(config_parser_mid OBJECT add_library(config_parser_mid OBJECT
${CONFIG_PARSER_SRC_LIST} ${CONFIG_PARSER_SRC_LIST})
)
add_dependencies(config_parser_mid fbs_src) add_dependencies(config_parser_mid fbs_src)
add_dependencies(config_parser_mid fbs_inner_src) add_dependencies(config_parser_mid fbs_inner_src)

View File

@ -29,6 +29,7 @@ constexpr auto kMixedBitWeightQuantParam = "mixed_bit_weight_quant_param";
constexpr auto kDataPreprocessParam = "data_preprocess_param"; constexpr auto kDataPreprocessParam = "data_preprocess_param";
constexpr auto kRegistry = "registry"; constexpr auto kRegistry = "registry";
constexpr auto kAclOptionParam = "acl_option_cfg_param"; constexpr auto kAclOptionParam = "acl_option_cfg_param";
constexpr auto kMicroParam = "micro_param";
} // namespace } // namespace
int ConfigFileParser::ParseConfigFile(const std::string &config_file_path) { int ConfigFileParser::ParseConfigFile(const std::string &config_file_path) {
std::map<std::string, std::map<std::string, std::string>> maps; std::map<std::string, std::map<std::string, std::string>> maps;
@ -73,6 +74,13 @@ int ConfigFileParser::ParseConfigFile(const std::string &config_file_path) {
MS_LOG(ERROR) << "ParseAclOptionCfgString failed."; MS_LOG(ERROR) << "ParseAclOptionCfgString failed.";
return ret; return ret;
} }
ret = ParseMicroParamString(maps);
(void)maps.erase(kMicroParam);
if (ret != RET_OK) {
MS_LOG(ERROR) << "ParseMicroParamString failed.";
return ret;
}
for (const auto &config_info : maps) { for (const auto &config_info : maps) {
ConverterInnerContext::GetInstance()->SetExternalUsedConfigInfos(config_info.first, config_info.second); ConverterInnerContext::GetInstance()->SetExternalUsedConfigInfos(config_info.first, config_info.second);
} }
@ -189,5 +197,19 @@ int ConfigFileParser::ParseAclOptionCfgString(const std::map<std::string, std::m
} }
return RET_OK; return RET_OK;
} }
int ConfigFileParser::ParseMicroParamString(const std::map<std::string, std::map<std::string, std::string>> &maps) {
if (maps.find(kMicroParam) != maps.end()) {
const auto &map = maps.at(kMicroParam);
std::map<std::string, std::string &> parse_map{{"target", micro_param_string_.target},
{"codegen_mode", micro_param_string_.codegen_mode},
{"output_path", micro_param_string_.output_path},
{"debug_mode", micro_param_string_.debug_mode},
{"support_parallel", micro_param_string_.support_parallel},
{"enable_micro", micro_param_string_.enable_micro}};
return SetMapData(map, parse_map, kMicroParam);
}
return RET_OK;
}
} // namespace lite } // namespace lite
} // namespace mindspore } // namespace mindspore

View File

@ -78,6 +78,15 @@ struct AclOptionCfgString {
std::string dynamic_image_size; std::string dynamic_image_size;
}; };
struct MicroParamString {
std::string output_path;
std::string codegen_mode;
std::string target;
std::string support_parallel;
std::string debug_mode;
std::string enable_micro;
};
class ConfigFileParser { class ConfigFileParser {
public: public:
int ParseConfigFile(const std::string &config_file_path); int ParseConfigFile(const std::string &config_file_path);
@ -88,6 +97,7 @@ class ConfigFileParser {
FullQuantString GetFullQuantString() const { return this->full_quant_string_; } FullQuantString GetFullQuantString() const { return this->full_quant_string_; }
RegistryInfoString GetRegistryInfoString() const { return this->registry_info_string_; } RegistryInfoString GetRegistryInfoString() const { return this->registry_info_string_; }
AclOptionCfgString GetAclOptionCfgString() { return this->acl_option_cfg_string_; } AclOptionCfgString GetAclOptionCfgString() { return this->acl_option_cfg_string_; }
MicroParamString GetMicroParamString() { return this->micro_param_string_; }
private: private:
int ParseDataPreProcessString(const std::map<std::string, std::map<std::string, std::string>> &maps); int ParseDataPreProcessString(const std::map<std::string, std::map<std::string, std::string>> &maps);
@ -98,6 +108,7 @@ class ConfigFileParser {
int ParseAclOptionCfgString(const std::map<std::string, std::map<std::string, std::string>> &maps); int ParseAclOptionCfgString(const std::map<std::string, std::map<std::string, std::string>> &maps);
int SetMapData(const std::map<std::string, std::string> &input_map, int SetMapData(const std::map<std::string, std::string> &input_map,
const std::map<std::string, std::string &> &parse_map, const std::string &section); const std::map<std::string, std::string &> &parse_map, const std::string &section);
int ParseMicroParamString(const std::map<std::string, std::map<std::string, std::string>> &maps);
private: private:
DataPreProcessString data_pre_process_string_; DataPreProcessString data_pre_process_string_;
@ -106,6 +117,7 @@ class ConfigFileParser {
FullQuantString full_quant_string_; FullQuantString full_quant_string_;
RegistryInfoString registry_info_string_; RegistryInfoString registry_info_string_;
AclOptionCfgString acl_option_cfg_string_; AclOptionCfgString acl_option_cfg_string_;
MicroParamString micro_param_string_;
}; };
} // namespace lite } // namespace lite

View File

@ -0,0 +1,110 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tools/converter/config_parser/micro_param_parser.h"
#include "tools/converter/micro/coder/config.h"
#include "tools/common/string_util.h"
#include "src/common/log_adapter.h"
#include "src/common/log_util.h"
namespace mindspore {
namespace lite {
STATUS MicroParamParser::ParseTarget(const std::string &target, micro::MicroParam *micro_param) {
MS_LOG(DEBUG) << "Micro HW target: " << target;
micro_param->target = target;
return RET_OK;
}
STATUS MicroParamParser::ParseOutputPath(const std::string &output_path, micro::MicroParam *micro_param) {
MS_LOG(DEBUG) << "Micro codegen output_path: " << output_path;
micro_param->output_path = output_path;
return RET_OK;
}
STATUS MicroParamParser::ParseCodeGenMode(const std::string &codegen_mode, micro::MicroParam *micro_param) {
MS_LOG(DEBUG) << "Micro codegen mode: " << codegen_mode;
micro_param->codegen_mode = codegen_mode;
return RET_OK;
}
STATUS MicroParamParser::ParseSupportParallel(const std::string &support_parallel, micro::MicroParam *micro_param) {
MS_LOG(DEBUG) << "Micro supports parallel: " << support_parallel;
micro_param->support_parallel = false; // default
bool is_parallel;
if (ConvertBool(support_parallel, &is_parallel)) {
micro_param->support_parallel = is_parallel;
}
return RET_OK;
}
STATUS MicroParamParser::ParseDebugMode(const std::string &debug_mode, micro::MicroParam *micro_param) {
MS_LOG(DEBUG) << "Micro enables debug mode: " << debug_mode;
micro_param->debug_mode = false; // default
bool is_debug_mode;
if (ConvertBool(debug_mode, &is_debug_mode)) {
micro_param->debug_mode = is_debug_mode;
}
return RET_OK;
}
STATUS MicroParamParser::ParseEnableMicro(const std::string &enable_micro, micro::MicroParam *micro_param) {
MS_LOG(DEBUG) << "Micro enables : " << enable_micro;
micro_param->enable_micro = false; // default
bool is_enable_micro;
if (ConvertBool(enable_micro, &is_enable_micro)) {
micro_param->enable_micro = is_enable_micro;
}
return RET_OK;
}
STATUS MicroParamParser::ParseMicroParam(const MicroParamString &micro_param_string, micro::MicroParam *micro_param) {
CHECK_NULL_RETURN(micro_param);
if (!micro_param_string.target.empty()) {
if (ParseTarget(micro_param_string.target, micro_param) != RET_OK) {
MS_LOG(ERROR) << "Parse HW target val: " << micro_param_string.target;
return RET_INPUT_PARAM_INVALID;
}
}
if (!micro_param_string.output_path.empty()) {
if (ParseOutputPath(micro_param_string.output_path, micro_param) != RET_OK) {
MS_LOG(ERROR) << "Parse output_path val " << micro_param_string.output_path;
return RET_INPUT_PARAM_INVALID;
}
}
if (!micro_param_string.codegen_mode.empty()) {
if (ParseCodeGenMode(micro_param_string.codegen_mode, micro_param) != RET_OK) {
MS_LOG(ERROR) << "Parse codegen_mode val " << micro_param_string.codegen_mode;
return RET_INPUT_PARAM_INVALID;
}
}
if (!micro_param_string.support_parallel.empty()) {
if (ParseSupportParallel(micro_param_string.support_parallel, micro_param) != RET_OK) {
MS_LOG(ERROR) << "Parse support_parallel val " << micro_param_string.support_parallel;
return RET_INPUT_PARAM_INVALID;
}
}
if (!micro_param_string.debug_mode.empty()) {
if (ParseDebugMode(micro_param_string.debug_mode, micro_param) != RET_OK) {
MS_LOG(ERROR) << "Parse debug mode val " << micro_param_string.debug_mode;
return RET_INPUT_PARAM_INVALID;
}
}
if (!micro_param_string.enable_micro.empty()) {
if (ParseEnableMicro(micro_param_string.enable_micro, micro_param) != RET_OK) {
MS_LOG(ERROR) << "Parse enable micro val " << micro_param_string.enable_micro;
return RET_INPUT_PARAM_INVALID;
}
}
return RET_OK;
}
} // namespace lite
} // namespace mindspore

View File

@ -0,0 +1,41 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LITE_MICRO_PARAM_PARSER_H
#define LITE_MICRO_PARAM_PARSER_H
#include <string>
#include "tools/converter/config_parser/config_file_parser.h"
#include "tools/converter/micro/coder/config.h"
#include "include/errorcode.h"
namespace mindspore {
namespace lite {
class MicroParamParser {
public:
STATUS ParseMicroParam(const MicroParamString &micro_param_string, micro::MicroParam *micro_param);
private:
STATUS ParseEnableMicro(const std::string &enable_micro, micro::MicroParam *micro_param);
STATUS ParseTarget(const std::string &target, micro::MicroParam *micro_param);
STATUS ParseOutputPath(const std::string &output_path, micro::MicroParam *micro_param);
STATUS ParseCodeGenMode(const std::string &codegen_mode, micro::MicroParam *micro_param);
STATUS ParseSupportParallel(const std::string &support_parallel, micro::MicroParam *micro_param);
STATUS ParseDebugMode(const std::string &debug_mode, micro::MicroParam *micro_param);
};
} // namespace lite
} // namespace mindspore
#endif // LITE_MICRO_PARAM_PARSER_H

View File

@ -32,6 +32,7 @@
#include "tools/converter/parser/parser_utils.h" #include "tools/converter/parser/parser_utils.h"
#include "tools/converter/import/mindspore_importer.h" #include "tools/converter/import/mindspore_importer.h"
#include "nnacl/op_base.h" #include "nnacl/op_base.h"
#include "tools/converter/micro/coder/coder.h"
namespace mindspore { namespace mindspore {
namespace lite { namespace lite {
namespace { namespace {
@ -211,17 +212,31 @@ int RunConverter(int argc, const char **argv) {
status = RET_ERROR; status = RET_ERROR;
return status; return status;
} }
// save graph to file // save graph to file
meta_graph->version = Version(); meta_graph->version = Version();
status = MetaGraphSerializer::Save(*meta_graph, flags->outputFile);
if (status != RET_OK) { if (flags->microParam.enable_micro) {
delete meta_graph; status = micro::Coder::MicroSourceCodeGeneration(*meta_graph, flags->outputFile, flags->microParam.codegen_mode,
oss.clear(); flags->microParam.target, flags->microParam.support_parallel,
oss << "SAVE GRAPH FAILED:" << status << " " << GetErrorInfo(status); flags->microParam.debug_mode);
MS_LOG(ERROR) << oss.str(); if (status != RET_OK) {
std::cout << oss.str() << std::endl; delete meta_graph;
return status; oss.clear();
oss << "MICRO CODEGEN FAILED:" << status << " " << GetErrorInfo(status);
MS_LOG(ERROR) << oss.str();
std::cout << oss.str() << std::endl;
return status;
}
} else {
status = MetaGraphSerializer::Save(*meta_graph, flags->outputFile);
if (status != RET_OK) {
delete meta_graph;
oss.clear();
oss << "SAVE GRAPH FAILED:" << status << " " << GetErrorInfo(status);
MS_LOG(ERROR) << oss.str();
std::cout << oss.str() << std::endl;
return status;
}
} }
delete meta_graph; delete meta_graph;

View File

@ -31,6 +31,7 @@
#include "tools/converter/config_parser/preprocess_parser.h" #include "tools/converter/config_parser/preprocess_parser.h"
#include "tools/converter/config_parser/quant_param_parser.h" #include "tools/converter/config_parser/quant_param_parser.h"
#include "tools/converter/config_parser/acl_option_param_parser.h" #include "tools/converter/config_parser/acl_option_param_parser.h"
#include "tools/converter/config_parser/micro_param_parser.h"
namespace mindspore { namespace mindspore {
namespace converter { namespace converter {
@ -299,6 +300,13 @@ int Flags::InitConfigFile() {
return ret; return ret;
} }
(void)CheckOfflineParallelConfig(this->configFile, &parallel_split_config_); (void)CheckOfflineParallelConfig(this->configFile, &parallel_split_config_);
lite::MicroParamParser micro_param_parser;
ret = micro_param_parser.ParseMicroParam(config_file_parser.GetMicroParamString(), &this->microParam);
if (ret != RET_OK) {
MS_LOG(ERROR) << "Parse micro param failed.";
return ret;
}
return RET_OK; return RET_OK;
} }

View File

@ -27,6 +27,7 @@
#include "tools/converter/preprocess/preprocess_param.h" #include "tools/converter/preprocess/preprocess_param.h"
#include "tools/converter/quantizer/quant_params.h" #include "tools/converter/quantizer/quant_params.h"
#include "tools/converter/adapter/acl/common/acl_types.h" #include "tools/converter/adapter/acl/common/acl_types.h"
#include "micro/coder/config.h"
namespace mindspore { namespace mindspore {
namespace lite { namespace lite {
@ -90,12 +91,14 @@ class Flags : public virtual mindspore::lite::FlagParser {
std::string graphInputFormatStr; std::string graphInputFormatStr;
std::string device; std::string device;
mindspore::Format graphInputFormat = mindspore::NHWC; mindspore::Format graphInputFormat = mindspore::NHWC;
bool enable_micro = false;
lite::quant::CommonQuantParam commonQuantParam; lite::quant::CommonQuantParam commonQuantParam;
lite::quant::MixedBitWeightQuantParam mixedBitWeightQuantParam; lite::quant::MixedBitWeightQuantParam mixedBitWeightQuantParam;
lite::quant::FullQuantParam fullQuantParam; lite::quant::FullQuantParam fullQuantParam;
lite::preprocess::DataPreProcessParam dataPreProcessParam; lite::preprocess::DataPreProcessParam dataPreProcessParam;
lite::acl::AclModelOptionCfg aclModelOptionCfgParam; lite::acl::AclModelOptionCfg aclModelOptionCfgParam;
lite::micro::MicroParam microParam;
}; };
bool CheckOfflineParallelConfig(const std::string &file, ParallelSplitConfig *parallel_split_config); bool CheckOfflineParallelConfig(const std::string &file, ParallelSplitConfig *parallel_split_config);

View File

@ -126,42 +126,9 @@ set(CODER_OPCODERS_SRC
#### custom #### custom
${MICRO_DIR}/coder/opcoders/custom/custom_coder.cc ${MICRO_DIR}/coder/opcoders/custom/custom_coder.cc
) )
set(LITE_SRC
${LITE_DIR}/src/cxx_api/tensor_utils.cc
${LITE_DIR}/src/cxx_api/types.cc
${LITE_DIR}/src/cxx_api/tensor/tensor_impl.cc
${LITE_DIR}/src/common/file_utils.cc
${LITE_DIR}/src/common/graph_util.cc
${LITE_DIR}/src/common/prim_util.cc
${LITE_DIR}/src/common/string_util.cc
${LITE_DIR}/src/common/lite_utils.cc
${LITE_DIR}/src/common/tensor_util.cc
${LITE_DIR}/src/runtime/infer_manager.cc
${LITE_DIR}/src/registry/register_kernel_interface.cc
${LITE_DIR}/src/registry/kernel_interface_registry.cc
${LITE_DIR}/src/registry/register_kernel.cc
${LITE_DIR}/src/registry/register_kernel_impl.cc
${LITE_DIR}/src/lite_model.cc
${LITE_DIR}/src/ms_tensor.cc
${LITE_DIR}/src/schema_tensor_wrapper.cc
${LITE_DIR}/src/tensorlist.cc
${LITE_DIR}/src/tensor.cc
${LITE_DIR}/src/tensor_category.cc
${LITE_DIR}/src/weight_decoder.cc
${LITE_DIR}/src/huffman_decode.cc
${LITE_DIR}/src/common/log.cc
${LITE_DIR}/src/common/utils.cc
${LITE_DIR}/../core/utils/status.cc
### tools
${LITE_DIR}/tools/common/flag_parser.cc
${LITE_DIR}/tools/converter/quantizer/fse_decoder.cc
${LITE_DIR}/tools/converter/quantizer/fse_bit_stream.cc
)
if(MSLITE_ENABLE_SERVER_INFERENCE) if(MSLITE_ENABLE_SERVER_INFERENCE)
set(LITE_SRC set(LITE_SRC
${LITE_SRC} ${LITE_DIR}/src/pack_weight_manager.cc
${LITE_DIR}/src/pack_weight_manager.cc
) )
endif() endif()

View File

@ -1,5 +1,5 @@
include_directories(${LITE_DIR}/micro/coder/) include_directories(${MICRO_DIR}/coder/)
set(WRAPPER_DIR ${LITE_DIR}/micro/coder/wrapper/) set(WRAPPER_DIR ${MICRO_DIR}/coder/wrapper/)
set(WRAPPER_SRC set(WRAPPER_SRC
${WRAPPER_DIR}/base/common_wrapper.c ${WRAPPER_DIR}/base/common_wrapper.c

View File

@ -1,6 +1,6 @@
set(3RD_DIR ${TOP_DIR}/third_party) set(3RD_DIR ${TOP_DIR}/third_party)
set(LITE_DIR ${TOP_DIR}/mindspore/lite) set(LITE_DIR ${TOP_DIR}/mindspore/lite)
set(MICRO_DIR ${LITE_DIR}/micro) set(MICRO_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../)
if(MSLITE_ENABLE_CONVERTER AND NOT MACHINE_LINUX_ARM64) if(MSLITE_ENABLE_CONVERTER AND NOT MACHINE_LINUX_ARM64)
set(CODEGEN_PATH ${CMAKE_BINARY_DIR}/micro/coder/codegen) set(CODEGEN_PATH ${CMAKE_BINARY_DIR}/micro/coder/codegen)
@ -10,14 +10,13 @@ endif()
#include 3rd #include 3rd
include_directories(${3RD_DIR}) include_directories(${3RD_DIR})
include_directories(${3RD_DIR}/flatbuffers/include)
#include ms #include ms
include_directories(${MICRO_DIR})
include_directories(${TOP_DIR}/) include_directories(${TOP_DIR}/)
include_directories(${TOP_DIR}/mindspore/core/) include_directories(${TOP_DIR}/mindspore/core/)
include_directories(${NNACL_DIR}/../) include_directories(${NNACL_DIR}/../)
include_directories(${LITE_DIR}) include_directories(${LITE_DIR})
include_directories(${MICRO_DIR})
#include coder #include coder
if(NOT MSVC) if(NOT MSVC)
@ -26,15 +25,10 @@ if(NOT MSVC)
add_subdirectory(wrapper) add_subdirectory(wrapper)
endif() endif()
if(MSLITE_ENABLE_CONVERTER AND NOT MACHINE_LINUX_ARM64) if(MSLITE_ENABLE_CONVERTER)
include(${MICRO_DIR}/cmake/file_list.cmake) include(${MICRO_DIR}/cmake/file_list.cmake)
set_property(SOURCE ${FILE_SET} PROPERTY COMPILE_OPTIONS -Wno-error=stringop-overflow=) set_property(SOURCE ${FILE_SET} PROPERTY COMPILE_OPTIONS -Wno-error=stringop-overflow=)
add_executable(codegen main.cc ${FILE_SET}) add_library(coder_mid OBJECT ${FILE_SET} utils/common.h)
add_dependencies(codegen fbs_src) add_dependencies(coder_mid fbs_src fbs_inner_src)
add_dependencies(codegen fbs_inner_src) target_link_libraries(coder_mid PRIVATE ${SECUREC_LIBRARY} wrapper)
target_link_libraries(codegen PRIVATE ${SECUREC_LIBRARY} wrapper_mid nnacl_mid cpu_ops_mid)
if(ENABLE_MODEL_OBF)
target_link_libraries(codegen PRIVATE
${OBF_LIB_DIR}/libmsdeobfuscator-lite.so)
endif()
endif() endif()

View File

@ -14,11 +14,11 @@
* limitations under the License. * limitations under the License.
*/ */
#include "coder/allocator/allocator.h" #include "tools/converter/micro/coder/allocator/allocator.h"
#include <string> #include <string>
#include <map> #include <map>
#include "coder/allocator/memory_manager.h" #include "tools/converter/micro/coder/allocator/memory_manager.h"
#include "coder/opcoders/op_coder.h" #include "tools/converter/micro/coder/opcoders/op_coder.h"
namespace mindspore::lite::micro { namespace mindspore::lite::micro {
namespace { namespace {

View File

@ -21,12 +21,12 @@
#include <memory> #include <memory>
#include <utility> #include <utility>
#include <string> #include <string>
#include "coder/allocator/memory_manager.h" #include "tools/converter/micro/coder/allocator/memory_manager.h"
#include "coder/log.h" #include "tools/converter/micro/coder/log.h"
#include "coder/utils/type_cast.h" #include "tools/converter/micro/coder/utils/type_cast.h"
#include "src/tensor.h" #include "src/tensor.h"
#include "src/common/log_adapter.h" #include "src/common/log_adapter.h"
#include "coder/generator/component/component.h" #include "tools/converter/micro/coder/generator/component/component.h"
namespace mindspore::lite::micro { namespace mindspore::lite::micro {
/* /*

Some files were not shown because too many files have changed in this diff Show More