Fix some tiny bugs for converter in windows enviroment and conv ops

This commit is contained in:
liuwenhao4 2020-08-30 01:00:54 +08:00
parent fa97e24938
commit 22e37013b9
7 changed files with 47 additions and 18 deletions

View File

@ -20,14 +20,14 @@ IF NOT EXIST "%BASEPATH%/build" (
md "build"
)
cd "%BASEPATH%/build"
cd %BASEPATH%/build
set BUILD_PATH=%CD%
IF NOT EXIST "%BUILD_PATH%/mindspore" (
md "mindspore"
)
cd "%CD%/mindspore"
cd %CD%/mindspore
IF "%1%" == "lite" (
call :gene_gtest
@ -56,7 +56,7 @@ IF "%1%" == "lite" (
echo "build fail."
goto run_fail
) ELSE (
cd "%BASEPATH%/output"
cd %BASEPATH%/output
rd /s /q _CPack_Packages
)
) ELSE (
@ -78,22 +78,22 @@ IF "%1%" == "lite" (
)
)
cd "%BASEPATH%"
cd %BASEPATH%
goto run_eof
:run_cmake
cd "%BUILD_PATH%/mindspore"
cd %BUILD_PATH%/mindspore
cmake -DBUILD_DEVICE=on -DBUILD_CONVERTER=on -DPLATFORM_ARM64=off -DSUPPORT_TRAIN=off ^
-DCMAKE_BUILD_TYPE=Release -DSUPPORT_GPU=off -DBUILD_MINDDATA=off -DOFFLINE_COMPILE=off ^
-G "CodeBlocks - MinGW Makefiles" "%BASEPATH%/mindspore/lite"
GOTO:EOF
:gene_gtest
cd "%BASEPATH%/third_party"
cd %BASEPATH%/third_party
IF EXIST googletest rd /s /q googletest
git submodule update --init --recursive googletest
cd "%BUILD_PATH%/mindspore"
cd %BUILD_PATH%/mindspore
GOTO:EOF
:gene_protobuf
@ -111,22 +111,22 @@ GOTO:EOF
:gene_flatbuffer
SET FLATC="%BASEPATH%/build/mindspore/_deps/flatbuffers-src/_build/flatc"
SET FLAT_DIR="%BASEPATH%/mindspore/lite/schema"
SET FLAT_DIR=%BASEPATH%/mindspore/lite/schema
cd %FLAT_DIR%
IF EXIST inner rd /s /q inner
md inner
%FLATC% -c -b *.fbs
%FLATC% -c -b --reflect-types --gen-mutable --reflect-names --gen-object-api -o %FLAT_DIR%/inner *.fbs
%FLATC% -c -b --reflect-types --gen-mutable --reflect-names --gen-object-api -o "%FLAT_DIR%/inner" *.fbs
SET FLAT_DIR="%BASEPATH%/mindspore/lite/tools/converter/parser/tflite"
SET FLAT_DIR=%BASEPATH%/mindspore/lite/tools/converter/parser/tflite
cd %FLAT_DIR%
%FLATC% -c -b --reflect-types --gen-mutable --reflect-names --gen-object-api -o %FLAT_DIR% *.fbs
cd "%BUILD_PATH%/mindspore"
%FLATC% -c -b --reflect-types --gen-mutable --reflect-names --gen-object-api -o "%FLAT_DIR%" *.fbs
cd %BUILD_PATH%/mindspore
GOTO:EOF
:run_fail
cd "%BASEPATH%"
cd %BASEPATH%
set errorlevel=1
:run_eof

View File

@ -421,13 +421,37 @@ End3:
smax v17.4s, v17.4s, v7.4s
smax v18.4s, v18.4s, v7.4s
smax v19.4s, v19.4s, v7.4s
smax v20.4s, v20.4s, v7.4s
smax v21.4s, v21.4s, v7.4s
smax v22.4s, v22.4s, v7.4s
smax v23.4s, v23.4s, v7.4s
smax v24.4s, v24.4s, v7.4s
smax v25.4s, v25.4s, v7.4s
smax v26.4s, v26.4s, v7.4s
smax v27.4s, v27.4s, v7.4s
smax v28.4s, v28.4s, v7.4s
smax v29.4s, v29.4s, v7.4s
smax v30.4s, v30.4s, v7.4s
smax v31.4s, v31.4s, v7.4s
// Apply the act_min bound
// Apply the act_max bound
dup v6.4s, w9
smin v16.4s, v16.4s, v6.4s
smin v17.4s, v17.4s, v6.4s
smin v18.4s, v18.4s, v6.4s
smin v19.4s, v19.4s, v6.4s
smin v20.4s, v20.4s, v6.4s
smin v21.4s, v21.4s, v6.4s
smin v22.4s, v22.4s, v6.4s
smin v23.4s, v23.4s, v6.4s
smin v24.4s, v24.4s, v6.4s
smin v25.4s, v25.4s, v6.4s
smin v26.4s, v26.4s, v6.4s
smin v27.4s, v27.4s, v6.4s
smin v28.4s, v28.4s, v6.4s
smin v29.4s, v29.4s, v6.4s
smin v30.4s, v30.4s, v6.4s
smin v31.4s, v31.4s, v6.4s
// int32 -> int16
sqxtn v0.4h, v16.4s

View File

@ -57,6 +57,6 @@ void MatMulRInt8_optimize_handler(const int8_t *a, const int8_t *b, int8_t *dst,
int32_t *right_shift, int32_t *multiplier, int32_t output_zp, int32_t mini,
int32_t maxi, bool per_channel) {
return MatmulInt8DpNeon64(a, b, dst, UP_ROUND(row, 8), UP_ROUND(col, 8), deep_4, input_sum, bias, mini, maxi,
output_zp, multiplier[0], left_shift[0], right_shift[0], row, col, col);
output_zp, multiplier[0], left_shift[0], right_shift[0], row, col, stride);
}
#endif

View File

@ -28,7 +28,11 @@ using mindspore::lite::RET_OK;
using mindspore::schema::PrimitiveType_Split;
namespace mindspore::kernel {
int SplitBaseCPUKernel::Init() { return RET_OK; }
int SplitBaseCPUKernel::Init() {
auto split_dim = param->split_dim_;
param->split_dim_ = split_dim >= 0 ? split_dim : in_tensors_.front()->shape().size() + split_dim;
return RET_OK;
}
int SplitBaseCPUKernel::ReSize() {
auto in_tensor = in_tensors_.front();

View File

@ -38,7 +38,7 @@ Convolution1x1Int8CPUKernel::~Convolution1x1Int8CPUKernel() {
matmul_param_ = nullptr;
}
if (packed_weight_ != nullptr) {
delete packed_weight_;
free(packed_weight_);
packed_weight_ = nullptr;
}
FreeResizeBuf();

View File

@ -60,6 +60,7 @@ int ConvolutionDepthwiseInt8CPUKernel::InitWeightBias() {
for (int i = 0; i < weight_tensor->ElementsNum(); i++) {
packed_weight_[i] = (int16_t)(tmp_weight[i] - weight_zp);
}
free(tmp_weight);
bias_data_ = reinterpret_cast<int32_t *>(malloc(channel * sizeof(int32_t)));
if (bias_data_ == nullptr) {

View File

@ -402,7 +402,7 @@ kernel::LiteKernel *CpuConvInt8KernelCreator(const std::vector<lite::tensor::Ten
if (kernel_h == 3 && kernel_w == 3 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1) {
kernel = new (std::nothrow) kernel::ConvolutionInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
} else if (kernel_h == 1 && kernel_w == 1 && filter_quant_size == 1) {
kernel = new (std::nothrow) kernel::ConvolutionInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
kernel = new (std::nothrow) kernel::Convolution1x1Int8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
} else {
kernel = new (std::nothrow) kernel::ConvolutionInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive);
}