From 84959e8bafe6d5457d60692e4937f0f392963194 Mon Sep 17 00:00:00 2001 From: xutianchun Date: Mon, 12 Apr 2021 09:51:08 +0800 Subject: [PATCH] fix weight conv quant --- mindspore/lite/tools/converter/quantizer/weight_quantizer.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mindspore/lite/tools/converter/quantizer/weight_quantizer.cc b/mindspore/lite/tools/converter/quantizer/weight_quantizer.cc index b80d163f0a7..c3235a37a8d 100644 --- a/mindspore/lite/tools/converter/quantizer/weight_quantizer.cc +++ b/mindspore/lite/tools/converter/quantizer/weight_quantizer.cc @@ -100,8 +100,8 @@ STATUS WeightQuantizer::DoConvQuantize(const CNodePtr &cnode) { } if (tensor_info->data_type() != mindspore::kNumberTypeFloat32) { - MS_LOG(ERROR) << "model weight data type invalid which is " << tensor_info->data_type(); - return RET_ERROR; + MS_LOG(WARNING) << cnode->fullname_with_scope() << " weight data type is not fp32 but " << tensor_info->data_type(); + return RET_OK; } auto status = RET_ERROR; if (type_id_ == kNumberTypeInt8) {