fix bug in quant deploy export

This commit is contained in:
Wei Luning 2020-06-23 20:34:44 +08:00
parent 0478b7d191
commit dcd5773f64
3 changed files with 5 additions and 3 deletions

View File

@ -154,6 +154,8 @@ class ConvertToQuantNetwork:
per_channel=self.act_channel,
symmetric=self.act_symmetric,
narrow_range=self.act_range)
prefix = self._convert_op_name(prim_op.name)
if network.param_prefix:
prefix = '.'.join([network.param_prefix, self._convert_op_name(prim_op.name)])
add_quant.update_parameters_name(prefix + '.')
del network.__dict__[name]

View File

@ -125,7 +125,7 @@ def scale_zp_from_fack_quant_cell(cell, data_type):
"""
minq = cell.minq.data.asnumpy()
maxq = cell.maxq.data.asnumpy()
op = cell.fake_quant
op = cell.fake_quant_infer
scale, zp = cal_quantization_params(
minq, maxq, data_type,

View File

@ -67,7 +67,7 @@ def test_qat_lenet():
img = Tensor(np.ones((32, 1, 32, 32)).astype(np.float32))
net = LeNet5()
net = qat.convert_quant_network(
net, quant_delay=0, bn_fold=False, freeze_bn=10000, num_bits=8)
net, freeze_bn=10000, num_bits=8)
# should load the checkpoint. mock here
for param in net.get_parameters():
param.init_data()