[MSLITE][DEVELOP] fix VS bug

This commit is contained in:
yangruoqi713 2022-02-18 11:10:42 +08:00
parent 288bbd832a
commit 54e6a75875
4 changed files with 7 additions and 6 deletions

View File

@ -262,7 +262,7 @@ void InputTransform4x4Pack12(float *src_data, float *dst_data, int src_step, int
for (int l = 0; l < 4; ++l) {
float *src = src_data + l * pack_tile * block_tile;
// 12 * 4 -> 4 * 12
float tmp_mat[pack_tile][block_tile];
float tmp_mat[4][12];
for (int i = 0; i < block_tile; ++i) {
for (int j = 0; j < pack_tile; ++j) {
tmp_mat[j][i] = src[i * pack_tile + j];
@ -500,7 +500,7 @@ void InputTransform6x6Pack12(float *src_data, float *dst_data, int src_step, int
for (int l = 0; l < 6; ++l) {
float *src = src_data + l * pack_tile * block_tile;
// 12 * 4 -> 4 * 12
float tmp_mat[pack_tile][block_tile];
float tmp_mat[4][12];
for (int i = 0; i < block_tile; ++i) {
for (int j = 0; j < pack_tile; ++j) {
tmp_mat[j][i] = src[i * pack_tile + j];
@ -838,7 +838,7 @@ void InputTransform8x8Pack12(float *src_data, float *dst_data, int src_step, int
for (int l = 0; l < 8; ++l) {
float *src = src_data + l * pack_tile * block_tile;
// 12 * 4 -> 4 * 12
float tmp_mat[pack_tile][block_tile];
float tmp_mat[4][12];
for (int i = 0; i < block_tile; ++i) {
for (int j = 0; j < pack_tile; ++j) {
tmp_mat[j][i] = src[i * pack_tile + j];

View File

@ -16,7 +16,8 @@
#include "thread/core_affinity.h"
#include <string.h>
#include <stdlib.h>
#include <cstdlib>
#include <string>
#include <algorithm>
#ifdef MS_COMPILE_IOS
#include <sys/types.h>

View File

@ -16,7 +16,7 @@
#include "src/ops/populate/populate_register.h"
#include "src/common/log_adapter.h"
#include "src/tensor.h"
using PrimType::PrimType_Custom;
using mindspore::schema::PrimitiveType_Custom;
namespace mindspore {
namespace lite {

View File

@ -107,7 +107,7 @@ int QuantDTypeCastCPUKernel::QuantDTypeCast(int task_id) {
auto input_quant_arg = in_tensors_.front()->quant_params().front();
ret = DoDequantizeInt8ToFp32(int8_ptr_ + thread_offset, float32_ptr_ + thread_offset, input_quant_arg.scale,
input_quant_arg.zeroPoint, num_unit_thread);
if (ret) {
if (ret == RET_OK) {
auto output_quant_arg = out_tensors_.front()->quant_params().front();
if (quant_arg.dstDtype == TypeId::kNumberTypeUInt8) {
ret = DoQuantizeFp32ToInt8FromUint8Source(float32_ptr_ + thread_offset, int8_out_ptr_ + thread_offset,