[MSLITE] fp32 int and uint bug

This commit is contained in:
ling 2021-08-09 15:04:50 +08:00
parent e6e544dbc4
commit 8f2ff022d4
5 changed files with 10 additions and 8 deletions

View File

@ -21,10 +21,10 @@ void PostConvFuncComm(const float *src_ptr_, float *out_ptr, const float *bias_p
if (size == 0) {
return;
}
for (int oc = 0; oc < output_channel; oc++) {
for (size_t oc = 0; oc < output_channel; oc++) {
int oc_div = oc / size;
int oc_mod = oc % size;
for (int hw = 0; hw < plane_size; hw++) {
for (int hw = 0; hw < (int)plane_size; hw++) {
int src_index = oc_div * size * plane_stride + hw * size + oc_mod;
int dst_index = hw * oc_stride + oc;
float value = src_ptr_[src_index];

View File

@ -52,7 +52,8 @@ int ConvDw(float *output_data, const float *input_data, const float *weight_data
int end_kh = MSMIN(conv_param->kernel_h_, UP_DIV(conv_param->input_h_ - ih_origin, conv_param->dilation_h_));
for (int ow = 0; ow < conv_param->output_w_; ow++) {
memcpy(dst_data + ow * conv_param->output_channel_, bias_data, conv_param->output_channel_ * sizeof(float));
memcpy(dst_data + ow * conv_param->output_channel_, bias_data,
conv_param->output_channel_ * (int)(sizeof(float)));
}
for (int kh = start_kh; kh < end_kh; kh++) {
int ih = ih_origin + conv_param->dilation_w_ * kh;
@ -764,10 +765,10 @@ void ConvDwFp32IndirectRow(float *output, float **input, const float *weights, c
int output_width, int input_stride, bool relu, bool relu6, int kernel) {
do {
float **in = input;
size_t c = channels;
size_t c = (size_t)channels;
const float *w = weights;
float *out = output;
memcpy(out, bias, channels * sizeof(float));
memcpy(out, bias, channels * (int)sizeof(float));
for (; c >= C4NUM; c -= C4NUM) {
for (int i = 0; i < C4NUM; i++) {
for (int k = 0; k < kernel; k++) {

View File

@ -61,7 +61,7 @@ void DeConvPostFp32C8(const float *src, float *tmp, const float *bias, float *ds
for (int c = 0; c < oc8; c += 8) {
float *dst_ptr = tmp + c * output_plane;
const float *src_ptr = src + c * in_plane_round * kernel_plane;
memset(dst_ptr, 0, output_plane * C8NUM * sizeof(float));
memset(dst_ptr, 0, output_plane * C8NUM * (int)sizeof(float));
for (int ih = 0; ih < conv_param->input_h_; ih++) {
for (int iw = 0; iw < conv_param->input_w_; iw++) {

View File

@ -41,7 +41,7 @@ void PackLstmBias(float *dst, const float *src, int batch, int col, int col_alig
for (int i = 0; i < unidirectional_batch; i++) {
const float *src_batch = src + i * col;
float *dst_batch = dst + i * col_align;
memcpy(dst_batch, src_batch, col * sizeof(float));
memcpy(dst_batch, src_batch, col * (int)sizeof(float));
}
if (is_bidirectional) {
const float *backward_src = src + batch * col;

View File

@ -104,7 +104,8 @@ void LiteOpActor::IsolateInputData(std::vector<std::shared_ptr<LiteOpActor>> *ac
Tensor *new_tensor = new Tensor(new_data_type, old_tensor->shape(), old_tensor->format(), old_tensor->category());
new_tensor->set_allocator(old_tensor->allocator());
if (new_tensor->allocator() == nullptr && kernel_->Context() != nullptr) {
if (new_tensor->allocator() == nullptr && kernel_->Context() != nullptr &&
kernel_->desc().arch != kernel::kDelegate) {
new_tensor->set_allocator(kernel_->Context()->allocator);
}