fix bias correction

This commit is contained in:
yeyunpeng2020 2022-01-10 19:50:46 +08:00
parent 42a48bd6fe
commit 4ed99ae928
3 changed files with 7 additions and 8 deletions

View File

@ -116,7 +116,7 @@ int PreprocessParser::ParseCalibratePath(const std::string &str, std::map<std::s
return RET_INPUT_PARAM_INVALID;
}
auto data_path = string_split.at(1);
for (size_t i = 2; i < string_split.size() - 1; ++i) {
for (size_t i = 2; i < string_split.size(); ++i) {
data_path += ":" + string_split[i];
}
if (data_path.empty()) {
@ -184,7 +184,7 @@ int PreprocessParser::CollectCalibInputs(const std::map<std::string, std::string
for (const auto &image_path : calibrate_data_path) {
DIR *root = opendir(image_path.second.c_str());
if (root == nullptr) {
MS_LOG(ERROR) << "invalid data path: " << image_path;
MS_LOG(ERROR) << "cant open dir: " << image_path.second.c_str();
return RET_PARAM_INVALID;
}
struct dirent *image_dir = readdir(root);

View File

@ -32,7 +32,7 @@
namespace mindspore::lite::quant {
namespace {
constexpr int kHasBiasTensorSize = 3;
constexpr int kHasBiasTensorSize = 4;
const std::set<std::string> kSupportBiasCorrectionNode = {
schema::EnumNamePrimitiveType(schema::PrimitiveType_Conv2DFusion)};
} // namespace
@ -523,8 +523,7 @@ int BiasCorrectionStrategy::DoCNodeBiasCorrection(const FuncGraphPtr &quant_func
auto quant_param_holder = GetCNodeQuantHolder(primitive);
MS_CHECK_TRUE_MSG(quant_param_holder != nullptr, RET_NULL_PTR, "quant_param_holder is nullptr.");
auto input_quant_params = quant_param_holder->get_input_quant_params();
if (input_quant_params.size() == kHasBiasTensorSize) {
// compensate the existed
if (cnode->size() == kHasBiasTensorSize) {
auto bias = cnode->input(THIRD_INPUT + 1);
auto bias_parameter_ptr = bias->cast<ParameterPtr>();
auto bias_default_param = bias_parameter_ptr->default_param();
@ -543,7 +542,7 @@ int BiasCorrectionStrategy::DoCNodeBiasCorrection(const FuncGraphPtr &quant_func
return RET_ERROR;
}
}
} else if (input_quant_params.size() == kHasBiasTensorSize - 1) {
} else if (cnode->size() == kHasBiasTensorSize - 1) {
MS_LOG(INFO) << op_name << " add bias input";
// need to add bias input
auto parameter = quant_func_graph->add_parameter();
@ -560,8 +559,7 @@ int BiasCorrectionStrategy::DoCNodeBiasCorrection(const FuncGraphPtr &quant_func
}
}
} else {
MS_LOG(WARNING) << op_name << " unexpected size: " << input_quant_params.size()
<< ", and shared weight tensor does not support bias correction temporarily.";
MS_LOG(WARNING) << op_name << " unexpected size: " << cnode->size();
}
return RET_OK;
}

View File

@ -427,6 +427,7 @@ void FullQuantQuantizer::InitNvGpuConfig() {
support_int8_ops_ = {prim::kPrimConv2DFusion, prim::kPrimFullConnection, prim::kPrimMatMul,
prim::kPrimConv2dTransposeFusion, prim::kPrimConv2dTransposeFusion};
per_channel_ops_ = {prim::kPrimConv2DFusion, prim::kPrimMatMul, prim::kPrimFullConnection};
flags_.fullQuantParam.bias_correction = false;
}
void FullQuantQuantizer::InitQMinMax() {