!15794 [MSLITE] Fix bug of deconvolution fp16 initialization.

From: @wang_shaocong
Reviewed-by: @zhang_xue_tong,@zhanghaibo5
Signed-off-by: @zhang_xue_tong
This commit is contained in:
mindspore-ci-bot 2021-04-28 09:29:23 +08:00 committed by Gitee
commit 6f59321206
2 changed files with 6 additions and 10 deletions

View File

@ -70,10 +70,6 @@ int DeConvolutionFp16CPUKernel::InitWeightBias() {
MS_LOG(ERROR) << "DeConv fp16 only support fp16 weight"; MS_LOG(ERROR) << "DeConv fp16 only support fp16 weight";
return RET_ERROR; return RET_ERROR;
} }
if (bias_size != in_tensors_.at(2)->Size()) {
MS_LOG(ERROR) << "input bias size not match : " << bias_size << " vs " << in_tensors_.at(2)->Size();
return RET_ERROR;
}
memcpy(bias_data_, in_tensors_.at(2)->data_c(), bias_size); memcpy(bias_data_, in_tensors_.at(2)->data_c(), bias_size);
} }

View File

@ -50,12 +50,12 @@ class DeConvolutionFp16CPUKernel : public ConvolutionBaseCPUKernel {
int output_plane_; int output_plane_;
int thread_count_; int thread_count_;
int thread_stride_; int thread_stride_;
float16_t *pack_input_; float16_t *pack_input_ = nullptr;
float16_t *pack_weight_; float16_t *pack_weight_ = nullptr;
float16_t *pack_output_; float16_t *pack_output_ = nullptr;
float16_t *tmp_buffer_; float16_t *tmp_buffer_ = nullptr;
float16_t *batch_input_; float16_t *batch_input_ = nullptr;
float16_t *batch_output_; float16_t *batch_output_ = nullptr;
}; };
} // namespace mindspore::kernel } // namespace mindspore::kernel
#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_DECONVOLUTION_H_ #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_DECONVOLUTION_H_