forked from mindspore-Ecosystem/mindspore
!28562 [MS][LITE] fix micro mnist_x86 example
Merge pull request !28562 from zhengjun10/fix
This commit is contained in:
commit
dfabc2b349
|
@ -197,7 +197,7 @@ int MatMulBaseInt8Coder::DoCode(CoderContext *const context) {
|
|||
if (bias_ptr_) {
|
||||
init_code.CodeMallocExpression(bias_ptr_, bias_ptr_size_);
|
||||
init_code.CodeFunction("memset", bias_ptr_, 0, bias_ptr_size_);
|
||||
init_code.CodeFunction("memcpy", bias_ptr_, bias_tensor_, bias_ptr_size_);
|
||||
init_code.CodeFunction("memcpy", bias_ptr_, bias_tensor_, bias_tensor_->Size());
|
||||
}
|
||||
if (param_->b_const_) {
|
||||
init_code.CodeMallocExpression(weight_bias_sums_, weight_bias_sums_size_);
|
||||
|
|
|
@ -46,13 +46,17 @@ else()
|
|||
string(REPLACE "-g" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
|
||||
string(REPLACE "-g" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
|
||||
endif()
|
||||
string(APPEND CMAKE_EXE_LINKER_FLAGS " -Wl,--gc-sections")
|
||||
|
||||
add_subdirectory(src)
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||
include_directories(${HEADER_PATH})
|
||||
include_directories(${HEADER_PATH}/include)
|
||||
set(SRC_FILES
|
||||
benchmark/benchmark.cc
|
||||
benchmark/calib_output.cc
|
||||
benchmark/load_input.c
|
||||
)
|
||||
add_executable(benchmark ${SRC_FILES})
|
||||
target_link_libraries(benchmark net -lm -pthread)
|
||||
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
#include <string>
|
||||
#include <cstring>
|
||||
|
||||
#include "include/lite_session.h"
|
||||
#include "include/ms_tensor.h"
|
||||
#include "include/errorcode.h"
|
||||
#include "lite_session.h"
|
||||
#include "ms_tensor.h"
|
||||
#include "errorcode.h"
|
||||
|
||||
#include "load_input.h"
|
||||
#include "calib_output.h"
|
||||
|
@ -66,7 +66,7 @@ void PrintData(void *data, size_t data_number) {
|
|||
void TensorToString(tensor::MSTensor *tensor) {
|
||||
printf("name: %s, ", tensor->tensor_name().c_str());
|
||||
printf("DataType: %d, ", tensor->data_type());
|
||||
printf("Elements: %d, ", tensor->ElementsNum());
|
||||
printf("Elements: %d, ", static_cast<int>(tensor->ElementsNum()));
|
||||
printf("Shape: [");
|
||||
for (auto &dim : tensor->shape()) {
|
||||
printf("%d ", dim);
|
||||
|
@ -120,8 +120,19 @@ int main(int argc, const char **argv) {
|
|||
return lite::RET_ERROR;
|
||||
}
|
||||
context->thread_num_ = atoi(argv[5]);
|
||||
if (context->thread_num_ < 1) {
|
||||
printf("Thread number error! It should be greater than 0\n");
|
||||
return lite::RET_ERROR;
|
||||
}
|
||||
context->device_list_.resize(1);
|
||||
context->device_list_[0] = {lite::DT_CPU, {{false, static_cast<lite::CpuBindMode>(atoi(argv[6]))}}};
|
||||
context->device_list_[0].device_type_ = lite::DT_CPU;
|
||||
context->device_list_[0].device_info_.cpu_device_info_.enable_float16_ = false;
|
||||
lite::CpuBindMode bind_mode = static_cast<lite::CpuBindMode>(atoi(argv[6]));
|
||||
if (bind_mode < lite::NO_BIND || bind_mode > lite::MID_CPU) {
|
||||
printf("Thread bind mode error! 0: No bind, 1: Bind hign cpu, 2: Bind mid cpu.\n");
|
||||
return lite::RET_ERROR;
|
||||
}
|
||||
context->device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_ = bind_mode;
|
||||
printf("context: ThreadNum: %d, BindMode: %d\n", context->thread_num_,
|
||||
context->device_list_[0].device_info_.cpu_device_info_.cpu_bind_mode_);
|
||||
}
|
||||
|
@ -143,6 +154,7 @@ int main(int argc, const char **argv) {
|
|||
}
|
||||
int ret = ReadInputsFile(const_cast<char *>(argv[1]), inputs_binbuf, inputs_size, inputs_num);
|
||||
if (ret != lite::RET_OK) {
|
||||
delete session;
|
||||
return lite::RET_ERROR;
|
||||
}
|
||||
for (size_t i = 0; i < inputs_num; ++i) {
|
||||
|
@ -157,6 +169,7 @@ int main(int argc, const char **argv) {
|
|||
for (int i = 0; i < loop_count; ++i) {
|
||||
ret = session->RunGraph();
|
||||
if (ret != lite::RET_OK) {
|
||||
delete session;
|
||||
return lite::RET_ERROR;
|
||||
}
|
||||
}
|
||||
|
@ -166,6 +179,7 @@ int main(int argc, const char **argv) {
|
|||
}
|
||||
ret = session->RunGraph();
|
||||
if (ret != lite::RET_OK) {
|
||||
delete session;
|
||||
return lite::RET_ERROR;
|
||||
}
|
||||
|
||||
|
@ -180,14 +194,19 @@ int main(int argc, const char **argv) {
|
|||
if (argc >= 5) {
|
||||
lite::Calibrator *calibrator = new (std::nothrow) lite::Calibrator();
|
||||
if (calibrator == nullptr) {
|
||||
delete session;
|
||||
return lite::RET_NULL_PTR;
|
||||
}
|
||||
ret = calibrator->ReadCalibData(argv[4]);
|
||||
if (ret != lite::RET_OK) {
|
||||
delete session;
|
||||
delete calibrator;
|
||||
return lite::RET_ERROR;
|
||||
}
|
||||
ret = calibrator->CompareOutputs(outputs);
|
||||
if (ret != lite::RET_OK) {
|
||||
delete session;
|
||||
delete calibrator;
|
||||
return lite::RET_ERROR;
|
||||
}
|
||||
delete calibrator;
|
||||
|
@ -205,3 +224,4 @@ int main(int argc, const char **argv) {
|
|||
}
|
||||
return lite::RET_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -106,8 +106,7 @@ int Calibrator::CompareOutputs(const Vector<tensor::MSTensor *> &outputs) const
|
|||
CalibTensor *calib = calib_outputs_[i];
|
||||
MS_ERROR_IF_NULL(calib);
|
||||
if (output->tensor_name() != calib->tensor_name()) {
|
||||
printf("error, output tensor name is not equal to calib\n");
|
||||
return RET_ERROR;
|
||||
printf("warning, output tensor name is not equal to calib\n");
|
||||
}
|
||||
if (output->ElementsNum() != calib->ElementsNum()) {
|
||||
printf("error, output elements num is not equal to calib\n");
|
||||
|
|
|
@ -18,9 +18,9 @@
|
|||
#ifndef MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_
|
||||
#define MINDSPORE_LITE_MICRO_CALIB_OUTPUT_H_
|
||||
|
||||
#include "include/lite_utils.h"
|
||||
#include "include/ms_tensor.h"
|
||||
#include "include/errorcode.h"
|
||||
#include "lite_utils.h"
|
||||
#include "ms_tensor.h"
|
||||
#include "errorcode.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
|
|
|
@ -26,7 +26,7 @@ void *ReadInputData(const char *real_input_path, int *size) {
|
|||
}
|
||||
if (strstr(real_input_path, ".bin") || strstr(real_input_path, ".net")) {
|
||||
FILE *file;
|
||||
file = fopen(real_input_path, "rb+");
|
||||
file = fopen(real_input_path, "rb");
|
||||
if (!file) {
|
||||
printf("Can't find %s\n", real_input_path);
|
||||
return NULL;
|
||||
|
|
|
@ -16,13 +16,18 @@
|
|||
set -e
|
||||
|
||||
GEN=OFF
|
||||
while getopts 'g' OPT
|
||||
TARBALL=""
|
||||
while getopts 'r:g:' OPT
|
||||
do
|
||||
case $OPT in
|
||||
case "${OPT}" in
|
||||
g)
|
||||
GEN=ON;;
|
||||
GEN=$OPTARG
|
||||
;;
|
||||
r)
|
||||
TARBALL=$OPTARG
|
||||
;;
|
||||
?)
|
||||
echo "Usage: add -g or left it empty"
|
||||
echo "Usage: add -g on , -r specific release.tar.gz"
|
||||
esac
|
||||
done
|
||||
|
||||
|
@ -44,20 +49,6 @@ get_version() {
|
|||
VERSION_STR=${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_REVISION}
|
||||
}
|
||||
|
||||
download_inference() {
|
||||
MINDSPORE_FILE_NAME="mindspore-lite-${VERSION_STR}-linux-x64"
|
||||
local MINDSPORE_FILE="${MINDSPORE_FILE_NAME}.tar.gz"
|
||||
local MINDSPORE_LITE_DOWNLOAD_URL="https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION_STR}/MindSpore/lite/release/linux/${MINDSPORE_FILE}"
|
||||
|
||||
if [ ! -e ${BASEPATH}/build/${MINDSPORE_FILE} ]; then
|
||||
wget -c -O ${BASEPATH}/build/${MINDSPORE_FILE} --no-check-certificate ${MINDSPORE_LITE_DOWNLOAD_URL}
|
||||
fi
|
||||
|
||||
tar xzvf ${BASEPATH}/build/${MINDSPORE_FILE} -C ${BASEPATH}/build/ || exit 1
|
||||
rm ${BASEPATH}/build/${MINDSPORE_FILE} || exit 1
|
||||
PKG_PATH=${BASEPATH}/build/${MINDSPORE_FILE_NAME}
|
||||
}
|
||||
|
||||
download_mnist() {
|
||||
local MNIST_DOWNLOAD_URL=https://download.mindspore.cn/model_zoo/official/lite/mnist_lite/${MNIST_FILE}
|
||||
|
||||
|
@ -74,9 +65,28 @@ gen_mnist() {
|
|||
mkdir -p ${BASEPATH}/build
|
||||
|
||||
get_version
|
||||
download_inference
|
||||
MINDSPORE_FILE_NAME="mindspore-lite-${VERSION_STR}-linux-x64"
|
||||
MINDSPORE_FILE="${MINDSPORE_FILE_NAME}.tar.gz"
|
||||
echo "tar ball is: ${TARBALL}"
|
||||
if [ -n "$TARBALL" ]; then
|
||||
echo "cp file"
|
||||
cp ${TARBALL} ${BASEPATH}/build/${MINDSPORE_FILE}
|
||||
fi
|
||||
|
||||
if [[ "${GEN}" == "ON" ]]; then
|
||||
download_inference() {
|
||||
local MINDSPORE_LITE_DOWNLOAD_URL="https://ms-release.obs.cn-north-4.myhuaweicloud.com/${VERSION_STR}/MindSpore/lite/release/linux/${MINDSPORE_FILE}"
|
||||
wget -c -O ${BASEPATH}/build/${MINDSPORE_FILE} --no-check-certificate ${MINDSPORE_LITE_DOWNLOAD_URL}
|
||||
}
|
||||
if [ ! -e ${BASEPATH}/build/${MINDSPORE_FILE} ]; then
|
||||
echo "need down inference"
|
||||
download_inference
|
||||
fi
|
||||
|
||||
tar xzvf ${BASEPATH}/build/${MINDSPORE_FILE} -C ${BASEPATH}/build/ || exit 1
|
||||
#rm ${BASEPATH}/build/${MINDSPORE_FILE} || exit 1
|
||||
PKG_PATH=${BASEPATH}/build/${MINDSPORE_FILE_NAME}
|
||||
|
||||
if [[ "${GEN}" == "ON" ]] || [[ "${GEN}" == "on" ]]; then
|
||||
echo "downloading mnist.ms!"
|
||||
download_mnist
|
||||
echo "generating mnist"
|
||||
|
|
|
@ -13,13 +13,14 @@ set(WRAPPER_LIB ${PKG_PATH}/tools/codegen/lib/libwrapper.a)
|
|||
set(OP_HEADER_PATH ${PKG_PATH}/tools/codegen/include)
|
||||
set(HEADER_PATH ${PKG_PATH}/runtime)
|
||||
|
||||
message("operator lib path: ${OP_LIB}")
|
||||
message("operator header path: ${OP_HEADER_PATH}")
|
||||
message(STATUS "operator lib path: ${OP_LIB}")
|
||||
message(STATUS "operator header path: ${OP_HEADER_PATH}")
|
||||
|
||||
add_compile_definitions(NOT_USE_STL)
|
||||
|
||||
include_directories(${OP_HEADER_PATH})
|
||||
include_directories(${HEADER_PATH})
|
||||
include_directories(${HEADER_PATH}/include)
|
||||
|
||||
if(NOT PLATFORM_ARM32 AND NOT PLATFORM_ARM64)
|
||||
include_directories(${PKG_PATH}/tools/codegen/third_party/include)
|
||||
|
@ -97,3 +98,4 @@ function(create_library)
|
|||
endfunction(create_library)
|
||||
string(CONCAT library_name "lib" net ".a")
|
||||
create_library()
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#ifndef MINDSPORE_LITE_LIBRARY_SOURCE_MODEL_H_
|
||||
#define MINDSPORE_LITE_LIBRARY_SOURCE_MODEL_H_
|
||||
|
||||
#include "include/model.h"
|
||||
#include "model.h"
|
||||
#include "session.h"
|
||||
#include <new>
|
||||
#include <string.h>
|
||||
|
@ -62,3 +62,4 @@ Model *Model::Import(const char *model_buf, size_t size) {
|
|||
}
|
||||
} // namespace mindspore::lite
|
||||
#endif // MINDSPORE_LITE_LIBRARY_SOURCE_MODEL_H_
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ int CopyOutputsData(void **outputs, int num) {
|
|||
if (num != 1) {
|
||||
return RET_ERROR;
|
||||
}
|
||||
memcpy(outputs[0], g_Buffer+32, 40);
|
||||
memcpy(outputs[0], g_Buffer + 32, 40);
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
|
@ -62,11 +62,11 @@ void FreeResource() {
|
|||
}
|
||||
void Inference() {
|
||||
{
|
||||
memset((int16_t *)(g_Buffer+10144), 0, 2048);
|
||||
memset((int16_t *)(g_Buffer+12192), 0, 256);
|
||||
memset((int *)(g_Buffer+12448), 0, 6144);
|
||||
memset((int8_t *)(g_Buffer+18592), 0, 8112);
|
||||
memset((int16_t *)(g_Buffer+26704), 0, 12544);
|
||||
memset((int16_t *)(g_Buffer + 10144), 0, 2048);
|
||||
memset((int16_t *)(g_Buffer + 12192), 0, 256);
|
||||
memset((int *)(g_Buffer + 12448), 0, 6144);
|
||||
memset((int8_t *)(g_Buffer + 18592), 0, 8112);
|
||||
memset((int16_t *)(g_Buffer + 26704), 0, 12544);
|
||||
QuantArg conv_param__quant_arg_in[1] = {{0.003921568859368562698, -128}};
|
||||
QuantArg conv_param__quant_arg_w[12] = {{0.005689438898116350174, 0}, {0.006241692230105400085, 0}, {0.007301395758986473083, 0}, {0.005148916970938444138, 0}, {0.005132303573191165924, 0}, {0.004976313561201095581, 0}, {0.00564815988764166832, 0}, {0.002269793068990111351, 0}, {0.0030086529441177845, 0}, {0.005234404932707548141, 0}, {0.007580270525068044662, 0}, {0.004589735530316829681, 0}};
|
||||
QuantArg conv_param__quant_arg_out[1] = {{0.01811622083187103271, 17}};
|
||||
|
@ -78,24 +78,24 @@ int conv_param__out_act_min[1] = {-128};
|
|||
int conv_param__out_act_max[1] = {127};
|
||||
ConvQuantArg conv_param__conv_quant_arg = {(RoundingMode)(2), 2, conv_param__quant_arg_in, conv_param__quant_arg_w, conv_param__quant_arg_out, conv_param__real_multiplier, conv_param__left_shift, conv_param__right_shift, conv_param__quant_multiplier, conv_param__out_act_min, conv_param__out_act_max, 1, 12, 1, 2};
|
||||
int thread_num = MSMIN(g_thread_num, 26);
|
||||
ConvParameter conv_param_ = {{ "", false, 35, g_thread_num, 0}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 28, 28, 1, 1, 26, 26, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(0), 0, 0, 0};
|
||||
PackInputToC8Int8((int8_t *)(g_Input0), (int16_t *)(g_Buffer+26704), &conv_param_);
|
||||
Conv3x3Int8((int16_t *)(g_Buffer+26704), g_Weight10, g_Weight11, (int8_t *)(g_Buffer+0), (int16_t *)(g_Buffer+10144), (int16_t *)(g_Buffer+12192), (int *)(g_Buffer+12448), (int8_t *)(g_Buffer+18592), 0, &conv_param_);
|
||||
PackNC4HW4ToNHWCInt8((int8_t *)(g_Buffer+18592), (int8_t *)(g_Buffer+0), 1, 676, 12);
|
||||
ConvParameter conv_param_ = {{ "", 35, g_thread_num, 0}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 28, 28, 1, 1, 26, 26, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(0), 0, 0, 0};
|
||||
PackInputToC8Int8((int8_t *)(g_Input0), (int16_t *)(g_Buffer + 26704), &conv_param_);
|
||||
Conv3x3Int8((int16_t *)(g_Buffer + 26704), g_Weight10, g_Weight11, (int8_t *)(g_Buffer + 0), (int16_t *)(g_Buffer + 10144), (int16_t *)(g_Buffer + 12192), (int *)(g_Buffer + 12448), (int8_t *)(g_Buffer + 18592), 0, &conv_param_);
|
||||
PackNC4HW4ToNHWCInt8((int8_t *)(g_Buffer + 18592), (int8_t *)(g_Buffer + 0), 1, 676, 12);
|
||||
}
|
||||
{
|
||||
static QuantArg pooling_parameter_quant_in = {0.01811622083187103271, 17};
|
||||
static QuantArg pooling_parameter_quant_out = {0.01811622083187103271, 17};
|
||||
static QuantArg *pooling_parameter_quant[2] = { &pooling_parameter_quant_in, &pooling_parameter_quant_out};
|
||||
const PoolingParameter pooling_parameter = {{ "", false, 92, g_thread_num, 0}, (PoolMode)(1), (RoundMode)(2), (PadMode)(2), (ActType)(0), 0, false, 2, 2, 2, 2, 26, 26, 1, 12, 13, 13, 1, 12, 0, 0, 0, 0, 0, pooling_parameter_quant, false};
|
||||
MaxPoolingInt8((int8_t *)(g_Buffer+0), (int8_t *)(g_Buffer+8112), (PoolingParameter *)&pooling_parameter, 0);
|
||||
const PoolingParameter pooling_parameter = {{ "", 92, g_thread_num, 0}, (PoolMode)(1), (RoundMode)(2), (PadMode)(2), (ActType)(0), 0, false, 2, 2, 2, 2, 26, 26, 1, 12, 13, 13, 1, 12, 0, 0, 0, 0, 0, pooling_parameter_quant, false};
|
||||
MaxPoolingInt8((int8_t *)(g_Buffer + 0), (int8_t *)(g_Buffer + 8112), (PoolingParameter *)&pooling_parameter, 0);
|
||||
}
|
||||
{
|
||||
memset((int16_t *)(g_Buffer+10144), 0, 4096);
|
||||
memset((int16_t *)(g_Buffer+14240), 0, 256);
|
||||
memset((int *)(g_Buffer+14496), 0, 6144);
|
||||
memset((int8_t *)(g_Buffer+20640), 0, 1452);
|
||||
memset((int16_t *)(g_Buffer+22092), 0, 5408);
|
||||
memset((int16_t *)(g_Buffer + 10144), 0, 4096);
|
||||
memset((int16_t *)(g_Buffer + 14240), 0, 256);
|
||||
memset((int *)(g_Buffer + 14496), 0, 6144);
|
||||
memset((int8_t *)(g_Buffer + 20640), 0, 1452);
|
||||
memset((int16_t *)(g_Buffer + 22092), 0, 5408);
|
||||
QuantArg conv_param__quant_arg_in[1] = {{0.01811622083187103271, 17}};
|
||||
QuantArg conv_param__quant_arg_w[12] = {{0.006381968967616558075, 0}, {0.005092236679047346115, 0}, {0.004954888485372066498, 0}, {0.007594361435621976852, 0}, {0.006317862775176763535, 0}, {0.004739056341350078583, 0}, {0.004733041394501924515, 0}, {0.005125139374285936356, 0}, {0.005773660261183977127, 0}, {0.007067613303661346436, 0}, {0.00728381425142288208, 0}, {0.004714466165751218796, 0}};
|
||||
QuantArg conv_param__quant_arg_out[1] = {{0.118615470826625824, 31}};
|
||||
|
@ -107,26 +107,26 @@ int conv_param__out_act_min[1] = {-128};
|
|||
int conv_param__out_act_max[1] = {127};
|
||||
ConvQuantArg conv_param__conv_quant_arg = {(RoundingMode)(1), 2, conv_param__quant_arg_in, conv_param__quant_arg_w, conv_param__quant_arg_out, conv_param__real_multiplier, conv_param__left_shift, conv_param__right_shift, conv_param__quant_multiplier, conv_param__out_act_min, conv_param__out_act_max, 1, 12, 1, 2};
|
||||
int thread_num = MSMIN(g_thread_num, 11);
|
||||
ConvParameter conv_param_ = {{ "", false, 35, g_thread_num, 0}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 13, 13, 12, 1, 11, 11, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(0), 0, 0, 0};
|
||||
PackInputToC8Int8((int8_t *)(g_Buffer+8112), (int16_t *)(g_Buffer+22092), &conv_param_);
|
||||
Conv3x3Int8((int16_t *)(g_Buffer+22092), g_Weight12, g_Weight13, (int8_t *)(g_Buffer+0), (int16_t *)(g_Buffer+10144), (int16_t *)(g_Buffer+14240), (int *)(g_Buffer+14496), (int8_t *)(g_Buffer+20640), 0, &conv_param_);
|
||||
PackNC4HW4ToNHWCInt8((int8_t *)(g_Buffer+20640), (int8_t *)(g_Buffer+0), 1, 121, 12);
|
||||
ConvParameter conv_param_ = {{ "", 35, g_thread_num, 0}, conv_param__conv_quant_arg, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 13, 13, 12, 1, 11, 11, 12, thread_num, 0, 0, (PadMode)(2), (ActType)(0), 0, 0, 0};
|
||||
PackInputToC8Int8((int8_t *)(g_Buffer + 8112), (int16_t *)(g_Buffer + 22092), &conv_param_);
|
||||
Conv3x3Int8((int16_t *)(g_Buffer + 22092), g_Weight12, g_Weight13, (int8_t *)(g_Buffer + 0), (int16_t *)(g_Buffer + 10144), (int16_t *)(g_Buffer + 14240), (int *)(g_Buffer + 14496), (int8_t *)(g_Buffer + 20640), 0, &conv_param_);
|
||||
PackNC4HW4ToNHWCInt8((int8_t *)(g_Buffer + 20640), (int8_t *)(g_Buffer + 0), 1, 121, 12);
|
||||
}
|
||||
{
|
||||
static QuantArg pooling_parameter_quant_in = {0.118615470826625824, 31};
|
||||
static QuantArg pooling_parameter_quant_out = {0.118615470826625824, 31};
|
||||
static QuantArg *pooling_parameter_quant[2] = { &pooling_parameter_quant_in, &pooling_parameter_quant_out};
|
||||
const PoolingParameter pooling_parameter = {{ "", false, 92, g_thread_num, 0}, (PoolMode)(1), (RoundMode)(2), (PadMode)(2), (ActType)(0), 0, false, 2, 2, 2, 2, 11, 11, 1, 12, 5, 5, 1, 12, 0, 0, 0, 0, 0, pooling_parameter_quant, false};
|
||||
MaxPoolingInt8((int8_t *)(g_Buffer+0), (int8_t *)(g_Buffer+1456), (PoolingParameter *)&pooling_parameter, 0);
|
||||
const PoolingParameter pooling_parameter = {{ "", 92, g_thread_num, 0}, (PoolMode)(1), (RoundMode)(2), (PadMode)(2), (ActType)(0), 0, false, 2, 2, 2, 2, 11, 11, 1, 12, 5, 5, 1, 12, 0, 0, 0, 0, 0, pooling_parameter_quant, false};
|
||||
MaxPoolingInt8((int8_t *)(g_Buffer + 0), (int8_t *)(g_Buffer + 1456), (PoolingParameter *)&pooling_parameter, 0);
|
||||
}
|
||||
{
|
||||
const ReshapeQuantArg reshape_quant_arg = {{0.118615470826625824, 31}, {0.118615470826625824, 31}, -128, 127};
|
||||
Int8Reshape((int8_t *)(g_Buffer+1456), (int8_t *)(g_Buffer+0), 300, reshape_quant_arg);
|
||||
Int8Reshape((int8_t *)(g_Buffer + 1456), (int8_t *)(g_Buffer + 0), 300, reshape_quant_arg);
|
||||
}
|
||||
{
|
||||
int32_t tmp_weight_zp = 0;
|
||||
RowMajor2Row16x4MajorInt8((int8_t *)(g_Buffer+0)+0, (int8_t *)(g_Buffer+10144), 1, 300);
|
||||
CalcInputSums((int8_t *)(g_Buffer+0)+0, 1, 300, tmp_weight_zp, (int *)(g_Buffer+11360), RowMajor);
|
||||
RowMajor2Row16x4MajorInt8((int8_t *)(g_Buffer + 0)+0, (int8_t *)(g_Buffer + 10144), 1, 300);
|
||||
CalcInputSums((int8_t *)(g_Buffer + 0)+0, 1, 300, tmp_weight_zp, (int *)(g_Buffer + 11360), RowMajor);
|
||||
float filter_scale[1] = {0.007667620200663805008};
|
||||
int filter_zp[1] = {0};
|
||||
int left_shift[1] = {0};
|
||||
|
@ -137,12 +137,12 @@ int32_t *cur_left = matmul_quant_parameter.left_shift_;
|
|||
int32_t *cur_right = matmul_quant_parameter.right_shift_;
|
||||
int32_t *cur_mul = matmul_quant_parameter.quant_multiplier_ ;
|
||||
int32_t *cur_zp = matmul_quant_parameter.filter_zp_ ;
|
||||
MatmulInt8Opt((int8_t *)(g_Buffer+10144), g_Weight15+0 + 0, (int8_t *)(g_Buffer+304)+0+0, 1, 20, 304, (int *)(g_Buffer+11360), g_Weight16+0, -128, 127, 11, cur_mul, cur_left, cur_right, 20, false, cur_zp);
|
||||
MatmulInt8Opt((int8_t *)(g_Buffer + 10144), g_Weight15+0 + 0, (int8_t *)(g_Buffer + 304)+0+0, 1, 20, 304, (int *)(g_Buffer + 11360), g_Weight16+0, -128, 127, 11, cur_mul, cur_left, cur_right, 20, false, cur_zp);
|
||||
}
|
||||
{
|
||||
int32_t tmp_weight_zp = 0;
|
||||
RowMajor2Row16x4MajorInt8((int8_t *)(g_Buffer+304)+0, (int8_t *)(g_Buffer+10144), 1, 20);
|
||||
CalcInputSums((int8_t *)(g_Buffer+304)+0, 1, 20, tmp_weight_zp, (int *)(g_Buffer+10272), RowMajor);
|
||||
RowMajor2Row16x4MajorInt8((int8_t *)(g_Buffer + 304)+0, (int8_t *)(g_Buffer + 10144), 1, 20);
|
||||
CalcInputSums((int8_t *)(g_Buffer + 304)+0, 1, 20, tmp_weight_zp, (int *)(g_Buffer + 10272), RowMajor);
|
||||
float filter_scale[1] = {0.006908571347594261169};
|
||||
int filter_zp[1] = {0};
|
||||
int left_shift[1] = {0};
|
||||
|
@ -153,16 +153,16 @@ int32_t *cur_left = matmul_quant_parameter.left_shift_;
|
|||
int32_t *cur_right = matmul_quant_parameter.right_shift_;
|
||||
int32_t *cur_mul = matmul_quant_parameter.quant_multiplier_ ;
|
||||
int32_t *cur_zp = matmul_quant_parameter.filter_zp_ ;
|
||||
MatmulInt8Opt((int8_t *)(g_Buffer+10144), g_Weight18+0 + 0, (int8_t *)(g_Buffer+0)+0+0, 1, 10, 32, (int *)(g_Buffer+10272), g_Weight19+0, -128, 127, -20, cur_mul, cur_left, cur_right, 10, false, cur_zp);
|
||||
MatmulInt8Opt((int8_t *)(g_Buffer + 10144), g_Weight18+0 + 0, (int8_t *)(g_Buffer + 0)+0+0, 1, 10, 32, (int *)(g_Buffer + 10272), g_Weight19+0, -128, 127, -20, cur_mul, cur_left, cur_right, 10, false, cur_zp);
|
||||
}
|
||||
{
|
||||
const SoftmaxQuantArg quant_args = {{1.073398709297180176, 20}, {0.00390625, -128}, -128, 127, 1152553088, 27, 27};
|
||||
const SoftmaxParameter softmax_parameter = {{ "", false, 138, g_thread_num, 0}, 1, {1, 10}, 10, 2};
|
||||
memset((int *)(g_Buffer+10144), 0, 40);
|
||||
memset((int *)(g_Buffer+10184), 0, 40);
|
||||
SoftmaxInt8((int8_t *)(g_Buffer+0), (int8_t *)(g_Buffer+16), 1, (int *)(g_Buffer+10144), (int *)(g_Buffer+10184), quant_args, (SoftmaxParameter *)&softmax_parameter);
|
||||
SoftmaxQuantArg quant_params = {{1.073398709297180176, 20}, {0.00390625, -128}, -128, 127, 1152553088, 27, 27};
|
||||
const SoftmaxParameter softmax_parameter = {{ "", 138, g_thread_num, 0}, 1, {1, 10}, 10, 2};
|
||||
memset((int *)(g_Buffer + 10144), 0, 40);
|
||||
memset((int *)(g_Buffer + 10184), 0, 40);
|
||||
SoftmaxInt8((int8_t *)(g_Buffer + 0), (int8_t *)(g_Buffer + 16), 1, (int *)(g_Buffer + 10144), (int *)(g_Buffer + 10184), &quant_params, (SoftmaxParameter *)&softmax_parameter);
|
||||
}
|
||||
{
|
||||
DoDequantizeInt8ToFp32((int8_t *)(g_Buffer+16), (float *)(g_Buffer+32), 0.00390625, -128, 10);
|
||||
DoDequantizeInt8ToFp32((int8_t *)(g_Buffer + 16), (float *)(g_Buffer + 32), 0.00390625, -128, 10);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,13 +10,13 @@ set(OP_SRC
|
|||
pack_int8.c.o
|
||||
pooling_int8.c.o
|
||||
quant_dtype_cast_int8.c.o
|
||||
relux_int8.c.o
|
||||
reshape_int8.c.o
|
||||
softmax_int8.c.o
|
||||
weight.c.o
|
||||
net.c.o
|
||||
session.cc.o
|
||||
tensor.cc.o
|
||||
string.cc.o
|
||||
)
|
||||
file(GLOB NET_SRC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/*.cc
|
||||
|
|
|
@ -24,7 +24,7 @@ namespace mindspore {
|
|||
namespace lite {
|
||||
int LiteSession::CompileGraph(lite::Model *model) {
|
||||
inputs_.resize(1);
|
||||
Vector<int> in_shape_0;
|
||||
Vector<int32_t> in_shape_0;
|
||||
in_shape_0.resize(4);
|
||||
in_shape_0[0] = 1;
|
||||
in_shape_0[1] = 28;
|
||||
|
@ -33,7 +33,7 @@ int LiteSession::CompileGraph(lite::Model *model) {
|
|||
inputs_[0] = new (std::nothrow) MTensor(String("graph_input-0"), kNumberTypeInt8, in_shape_0);
|
||||
MS_ERROR_IF_NULL(inputs_[0]);
|
||||
outputs_.resize(1);
|
||||
Vector<int> out_shape_0;
|
||||
Vector<int32_t> out_shape_0;
|
||||
out_shape_0.resize(2);
|
||||
out_shape_0[0] = 1;
|
||||
out_shape_0[1] = 10;
|
||||
|
@ -43,25 +43,6 @@ int LiteSession::CompileGraph(lite::Model *model) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int LiteSession::RunGraph(const KernelCallBack &before, const KernelCallBack &after) {
|
||||
const void *inputs_data[inputs_.size()];
|
||||
for (size_t i = 0; i < inputs_.size(); ++i) {
|
||||
inputs_data[i] = inputs_[i]->MutableData();
|
||||
}
|
||||
SetInputs(inputs_data, inputs_.size());
|
||||
|
||||
Inference();
|
||||
|
||||
void *outputs_data[outputs_.size()];
|
||||
for (size_t i = 0; i < outputs_.size(); ++i) {
|
||||
outputs_data[i] = outputs_[i]->MutableData();
|
||||
}
|
||||
CopyOutputsData(outputs_data, outputs_.size());
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
LiteSession::~LiteSession() {
|
||||
FreeResource();
|
||||
if (runtime_buffer_ != nullptr) {
|
||||
|
@ -84,6 +65,24 @@ LiteSession::~LiteSession() {
|
|||
}
|
||||
}
|
||||
|
||||
int LiteSession::RunGraph(const KernelCallBack &before, const KernelCallBack &after) {
|
||||
const void *inputs_data[inputs_.size()];
|
||||
for (size_t i = 0; i < inputs_.size(); ++i) {
|
||||
inputs_data[i] = inputs_[i]->MutableData();
|
||||
}
|
||||
SetInputs(inputs_data, inputs_.size());
|
||||
|
||||
Inference();
|
||||
|
||||
void *outputs_data[outputs_.size()];
|
||||
for (size_t i = 0; i < outputs_.size(); ++i) {
|
||||
outputs_data[i] = outputs_[i]->MutableData();
|
||||
}
|
||||
CopyOutputsData(outputs_data, outputs_.size());
|
||||
|
||||
return RET_OK;
|
||||
}
|
||||
|
||||
int LiteSession::InitRuntimeBuffer() {
|
||||
int buffer_size = GetBufferSize();
|
||||
runtime_buffer_ = malloc(buffer_size);
|
||||
|
@ -127,6 +126,7 @@ mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const String &te
|
|||
return nullptr;
|
||||
}
|
||||
} // namespace lite
|
||||
|
||||
session::LiteSession *session::LiteSession::CreateSession(const lite::Context *context) {
|
||||
auto *session = new (std::nothrow) lite::LiteSession();
|
||||
MS_NULLPTR_IF_NULL(session);
|
||||
|
|
|
@ -18,8 +18,8 @@
|
|||
#ifndef MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_
|
||||
#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_
|
||||
|
||||
#include "include/errorcode.h"
|
||||
#include "include/lite_session.h"
|
||||
#include "errorcode.h"
|
||||
#include "lite_session.h"
|
||||
|
||||
#include "tensor.h"
|
||||
|
||||
|
@ -83,3 +83,4 @@ class LiteSession : public session::LiteSession {
|
|||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_SESSION_H_
|
||||
|
||||
|
|
|
@ -1,307 +0,0 @@
|
|||
|
||||
|
||||
/**
|
||||
* Copyright 2021 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifdef NOT_USE_STL
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <float.h>
|
||||
#include <stdint.h>
|
||||
#include "include/lite_utils.h"
|
||||
|
||||
namespace mindspore {
|
||||
String::String() {
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
buffer_[0] = '\0';
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
String::String(size_t count, char ch) {
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (count + 1)));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
memset(buffer_, ch, count);
|
||||
buffer_[count] = '\0';
|
||||
size_ = count;
|
||||
}
|
||||
String::String(const char *s, size_t count) {
|
||||
if (s == nullptr) {
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
buffer_[0] = '\0';
|
||||
size_ = 0;
|
||||
return;
|
||||
}
|
||||
size_t size_s = strlen(s);
|
||||
if (size_s <= count) {
|
||||
size_ = size_s;
|
||||
} else {
|
||||
size_ = count;
|
||||
}
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 1)));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
strncpy(buffer_, s, size_);
|
||||
buffer_[size_] = '\0';
|
||||
}
|
||||
|
||||
String::String(const char *s) {
|
||||
if (s == nullptr) {
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
buffer_[0] = '\0';
|
||||
size_ = 0;
|
||||
return;
|
||||
}
|
||||
size_ = strlen(s);
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 1)));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
memcpy(buffer_, s, size_ + 1);
|
||||
}
|
||||
|
||||
String::String(const String &other) {
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (other.size_ + 1)));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
size_ = other.size_;
|
||||
memcpy(buffer_, other.buffer_, size_ + 1);
|
||||
}
|
||||
|
||||
String::String(const String &other, size_t pos, size_t count) {
|
||||
if (pos >= other.size_) {
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
buffer_[0] = '\0';
|
||||
size_ = 0;
|
||||
} else {
|
||||
if (count == npos) {
|
||||
count = other.size_ - pos;
|
||||
}
|
||||
if (pos + count > other.size_) {
|
||||
size_ = other.size_ - pos;
|
||||
} else {
|
||||
size_ = count;
|
||||
}
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 1)));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
strncpy(buffer_, other.buffer_ + pos, size_);
|
||||
buffer_[size_] = '\0';
|
||||
}
|
||||
}
|
||||
|
||||
String::~String() { free(buffer_); }
|
||||
|
||||
String &String::operator=(const String &str) {
|
||||
if (this == &str) {
|
||||
return *this;
|
||||
}
|
||||
free(buffer_);
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (str.size_ + 1)));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
size_ = str.size_;
|
||||
memcpy(buffer_, str.buffer_, size_ + 1);
|
||||
return *this;
|
||||
}
|
||||
|
||||
String &String::operator=(const char *str) {
|
||||
free(buffer_);
|
||||
if (str == nullptr) {
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
buffer_[0] = '\0';
|
||||
size_ = 0;
|
||||
return *this;
|
||||
}
|
||||
size_t size_s = strlen(str);
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * (size_s + 1)));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
size_ = size_s;
|
||||
memcpy(buffer_, str, size_ + 1);
|
||||
return *this;
|
||||
}
|
||||
|
||||
char &String::at(size_t pos) {
|
||||
if (pos >= size_) {
|
||||
MS_C_EXCEPTION("pos out of range");
|
||||
}
|
||||
return buffer_[pos];
|
||||
}
|
||||
const char &String::at(size_t pos) const {
|
||||
if (pos >= size_) {
|
||||
MS_C_EXCEPTION("pos out of range");
|
||||
}
|
||||
return buffer_[pos];
|
||||
}
|
||||
char &String::operator[](size_t pos) {
|
||||
if (pos >= size_) {
|
||||
MS_C_EXCEPTION("pos out of range");
|
||||
}
|
||||
return this->at(pos);
|
||||
}
|
||||
const char &String::operator[](size_t pos) const {
|
||||
if (pos >= size_) {
|
||||
MS_C_EXCEPTION("pos out of range");
|
||||
}
|
||||
return this->at(pos);
|
||||
}
|
||||
char *String::data() noexcept { return buffer_; };
|
||||
const char *String::data() const noexcept { return buffer_; }
|
||||
const char *String::c_str() const noexcept { return buffer_; }
|
||||
|
||||
// capacity
|
||||
bool String::empty() const noexcept { return size_ == 0; }
|
||||
size_t String::size() const noexcept { return size_; }
|
||||
size_t String::length() const noexcept { return size_; }
|
||||
|
||||
// operations
|
||||
void String::clear() noexcept {
|
||||
free(buffer_);
|
||||
buffer_ = reinterpret_cast<char *>(malloc(sizeof(char) * 1));
|
||||
if (buffer_ == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
buffer_[0] = '\0';
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
String &String::operator+(const String &str) {
|
||||
(*this) += str;
|
||||
return *this;
|
||||
}
|
||||
|
||||
String &String::operator+=(const String &str) {
|
||||
size_t new_size = size_ + str.size_;
|
||||
char *tmp = reinterpret_cast<char *>(malloc(sizeof(char) * (new_size + 1)));
|
||||
if (tmp == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
memcpy(tmp, this->buffer_, size_ + 1);
|
||||
strncat(tmp, str.buffer_, str.size_);
|
||||
tmp[new_size] = '\0';
|
||||
free(buffer_);
|
||||
buffer_ = tmp;
|
||||
size_ = new_size;
|
||||
return *this;
|
||||
}
|
||||
|
||||
String &String::operator+=(const char *str) {
|
||||
if (str == nullptr) {
|
||||
return *this;
|
||||
}
|
||||
size_t str_size = strlen(str);
|
||||
size_t new_size = size_ + str_size;
|
||||
char *tmp = reinterpret_cast<char *>(malloc(sizeof(char) * (new_size + 1)));
|
||||
if (tmp == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
memcpy(tmp, this->buffer_, size_ + 1);
|
||||
strncat(tmp, str, str_size);
|
||||
tmp[new_size] = '\0';
|
||||
free(buffer_);
|
||||
buffer_ = tmp;
|
||||
size_ = new_size;
|
||||
return *this;
|
||||
}
|
||||
|
||||
String &String::operator+=(const char ch) {
|
||||
char *tmp = reinterpret_cast<char *>(malloc(sizeof(char) * (size_ + 2)));
|
||||
if (tmp == nullptr) {
|
||||
MS_C_EXCEPTION("malloc data failed");
|
||||
}
|
||||
memcpy(tmp, this->buffer_, size_ + 1);
|
||||
tmp[size_] = ch;
|
||||
tmp[size_ + 1] = '\0';
|
||||
free(buffer_);
|
||||
buffer_ = tmp;
|
||||
size_ += 1;
|
||||
return *this;
|
||||
}
|
||||
|
||||
String &String::append(size_t count, const char ch) {
|
||||
(*this) += ch;
|
||||
return *this;
|
||||
}
|
||||
String &String::append(const String &str) {
|
||||
(*this) += str;
|
||||
return *this;
|
||||
}
|
||||
String &String::append(const char *str) {
|
||||
if (str == nullptr) {
|
||||
return *this;
|
||||
}
|
||||
(*this) += str;
|
||||
return *this;
|
||||
}
|
||||
|
||||
int String::compare(const String &str) const { return strcmp(buffer_, str.buffer_); }
|
||||
int String::compare(const char *str) const { return strcmp(buffer_, str); }
|
||||
|
||||
String String::substr(size_t pos, size_t count) const { return String(*this, pos, count); }
|
||||
|
||||
String operator+(const String &lhs, const char *rhs) {
|
||||
String str = lhs;
|
||||
str += rhs;
|
||||
return str;
|
||||
}
|
||||
|
||||
String operator+(const char *lhs, const String &rhs) {
|
||||
String str = rhs;
|
||||
str += lhs;
|
||||
return str;
|
||||
}
|
||||
|
||||
bool operator!=(const String &lhs, const String &rhs) { return lhs.compare(rhs) != 0; }
|
||||
bool operator==(const String &lhs, const String &rhs) { return lhs.compare(rhs) == 0; }
|
||||
bool operator==(const String &lhs, const char *rhs) { return lhs.compare(rhs) == 0; }
|
||||
bool operator==(const char *lhs, const String &rhs) { return rhs.compare(lhs) == 0; }
|
||||
|
||||
String to_String(int32_t value) {
|
||||
char tmp[sizeof(int32_t) * 4];
|
||||
snprintf(tmp, sizeof(int32_t) * 4, "%d", value);
|
||||
return String(tmp, strlen(tmp));
|
||||
}
|
||||
|
||||
String to_String(float value) {
|
||||
char tmp[FLT_MAX_10_EXP + 20];
|
||||
snprintf(tmp, FLT_MAX_10_EXP + 20, "%f", value);
|
||||
return String(tmp, strlen(tmp));
|
||||
}
|
||||
} // namespace mindspore
|
||||
#endif // NOT_USE_STL
|
|
@ -60,8 +60,8 @@ MTensor::~MTensor() {
|
|||
}
|
||||
}
|
||||
|
||||
int MTensor::ElementsNum() const {
|
||||
int elements = 1;
|
||||
int64_t MTensor::ElementsNum() const {
|
||||
int64_t elements = 1;
|
||||
for (int i : shape_) {
|
||||
elements *= i;
|
||||
}
|
||||
|
@ -81,3 +81,4 @@ void *MTensor::MutableData() {
|
|||
}
|
||||
} // namespace lite
|
||||
} // namespace mindspore
|
||||
|
||||
|
|
|
@ -18,8 +18,8 @@
|
|||
#ifndef MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_
|
||||
#define MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_
|
||||
|
||||
#include "include/ms_tensor.h"
|
||||
#include "ir/format.h"
|
||||
#include "ms_tensor.h"
|
||||
#include "api/format.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace lite {
|
||||
|
@ -39,7 +39,7 @@ struct LiteQuantParam {
|
|||
class MTensor : public mindspore::tensor::MSTensor {
|
||||
public:
|
||||
MTensor() = default;
|
||||
MTensor(String name, TypeId type, Vector<int> shape) : tensor_name_(name), data_type_(type), shape_(shape) {}
|
||||
MTensor(String name, TypeId type, Vector<int32_t> shape) : tensor_name_(name), data_type_(type), shape_(shape) {}
|
||||
~MTensor() override;
|
||||
|
||||
void set_allocator(AllocatorPtr allocator) override {}
|
||||
|
@ -50,7 +50,7 @@ class MTensor : public mindspore::tensor::MSTensor {
|
|||
mindspore::Format format() const override { return mindspore::NHWC; }
|
||||
Vector<int> shape() const override { return shape_; }
|
||||
void set_shape(const Vector<int> &shape) override { shape_ = shape; }
|
||||
int ElementsNum() const override;
|
||||
int64_t ElementsNum() const override;
|
||||
size_t Size() const override;
|
||||
String tensor_name() const override { return tensor_name_; }
|
||||
void set_tensor_name(const String &name) override { tensor_name_ = name; }
|
||||
|
@ -59,6 +59,7 @@ class MTensor : public mindspore::tensor::MSTensor {
|
|||
void set_data(void *data) override { data_ = data; }
|
||||
Vector<LiteQuantParam> quant_params() const override { return this->quant_params_; }
|
||||
void set_quant_params(const Vector<LiteQuantParam> quant_params) override { this->quant_params_ = quant_params; }
|
||||
bool IsConst() const override {return this->data_ != nullptr;}
|
||||
|
||||
private:
|
||||
String tensor_name_;
|
||||
|
@ -71,3 +72,4 @@ class MTensor : public mindspore::tensor::MSTensor {
|
|||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_LITE_MICRO_LIBRARY_SOURCE_TENSOR_H_
|
||||
|
||||
|
|
|
@ -29,6 +29,10 @@ int32_t *g_Weight16 = NULL;
|
|||
int32_t *g_Weight17 = NULL;
|
||||
int8_t *g_Weight18 = NULL;
|
||||
int32_t *g_Weight19 = NULL;
|
||||
int8_t g_Weight6[6000];
|
||||
int32_t g_Weight7[20];
|
||||
int8_t g_Weight8[200];
|
||||
int32_t g_Weight9[10];
|
||||
|
||||
int Init(void *weight_buffer, int weight_size) {
|
||||
if (weight_buffer == NULL) {
|
||||
|
@ -39,19 +43,19 @@ int Init(void *weight_buffer, int weight_size) {
|
|||
size_t size;
|
||||
size_t offset;
|
||||
};
|
||||
int8_t *g_Weight6 = (weight_buffer + 9312);
|
||||
int32_t *g_Weight7 = (weight_buffer + 15312);
|
||||
int8_t *g_Weight8 = (weight_buffer + 15392);
|
||||
int32_t *g_Weight9 = (weight_buffer + 15592);
|
||||
|
||||
struct ModelParameter model_params[] = {
|
||||
{g_Weight10, 3072, 0},
|
||||
{g_Weight11, 48, 3072},
|
||||
{g_Weight12, 6144, 3120},
|
||||
{g_Weight13, 48, 9264},
|
||||
{g_Weight6, 6000, 9312},
|
||||
{g_Weight7, 80, 15312},
|
||||
{g_Weight8, 200, 15392},
|
||||
{g_Weight9, 40, 15592},
|
||||
};
|
||||
|
||||
for(int i = 0; i < 4; ++i) {
|
||||
for(int i = 0; i < 8; ++i) {
|
||||
if (model_params[i].offset + model_params[i].size > weight_size) {
|
||||
return RET_ERROR;
|
||||
}
|
||||
|
@ -83,7 +87,7 @@ if (g_Weight17 == NULL) {
|
|||
return RET_ERROR;
|
||||
}
|
||||
memset(g_Weight17, 0, 48);
|
||||
memcpy(g_Weight17, g_Weight9, 48);
|
||||
memcpy(g_Weight17, g_Weight9, 40);
|
||||
g_Weight19 = malloc(48);
|
||||
if (g_Weight19 == NULL) {
|
||||
return RET_ERROR;
|
||||
|
|
|
@ -20,9 +20,11 @@
|
|||
#include "nnacl/int8/common_func_int8.h"
|
||||
#include "nnacl/int8/conv3x3_int8.h"
|
||||
#include "nnacl/int8/conv_int8.h"
|
||||
#include "nnacl/int8/fixed_point.h"
|
||||
#include "nnacl/int8/matmul_int8.h"
|
||||
#include "nnacl/int8/pooling_int8.h"
|
||||
#include "nnacl/int8/quant_dtype_cast_int8.h"
|
||||
#include "nnacl/int8/relux_int8.h"
|
||||
#include "nnacl/int8/reshape_int8.h"
|
||||
#include "nnacl/int8/softmax_int8.h"
|
||||
#include "wrapper/int8/matmul_int8_wrapper.h"
|
||||
|
@ -45,3 +47,7 @@ extern int32_t *g_Weight16;
|
|||
extern int32_t *g_Weight17;
|
||||
extern int8_t *g_Weight18;
|
||||
extern int32_t *g_Weight19;
|
||||
extern int8_t g_Weight6[];
|
||||
extern int32_t g_Weight7[];
|
||||
extern int8_t g_Weight8[];
|
||||
extern int32_t g_Weight9[];
|
||||
|
|
Loading…
Reference in New Issue