fix QuantizationAwareTraining note typo error

This commit is contained in:
yuchaojie 2020-11-23 14:33:59 +08:00
parent 63fcdb44b5
commit 1c095221e6
1 changed files with 2 additions and 2 deletions

View File

@ -140,7 +140,7 @@ class QuantizationAwareTraining(Quantizer):
support QAT. Default: OptimizeOption.QAT support QAT. Default: OptimizeOption.QAT
Examples: Examples:
>>> class Net(nn.Cell): >>> class LeNet5(nn.Cell):
>>> def __init__(self, num_class=10, channel=1): >>> def __init__(self, num_class=10, channel=1):
>>> super(LeNet5, self).__init__() >>> super(LeNet5, self).__init__()
>>> self.type = "fusion" >>> self.type = "fusion"
@ -168,7 +168,7 @@ class QuantizationAwareTraining(Quantizer):
>>> x = self.fc3(x) >>> x = self.fc3(x)
>>> return x >>> return x
>>> >>>
>>> net = Net() >>> net = LeNet5()
>>> quantizer = QuantizationAwareTraining(bn_fold=False, per_channel=[True, False], symmetric=[True, False]) >>> quantizer = QuantizationAwareTraining(bn_fold=False, per_channel=[True, False], symmetric=[True, False])
>>> net_qat = quantizer.quantize(net) >>> net_qat = quantizer.quantize(net)
""" """