From 1c095221e6f9b28cfa6219b0f6936c4d2f541c54 Mon Sep 17 00:00:00 2001 From: yuchaojie Date: Mon, 23 Nov 2020 14:33:59 +0800 Subject: [PATCH] fix QuantizationAwareTraining note typo error --- mindspore/compression/quant/qat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mindspore/compression/quant/qat.py b/mindspore/compression/quant/qat.py index 0a6156a0107..c6b3aabf356 100644 --- a/mindspore/compression/quant/qat.py +++ b/mindspore/compression/quant/qat.py @@ -140,7 +140,7 @@ class QuantizationAwareTraining(Quantizer): support QAT. Default: OptimizeOption.QAT Examples: - >>> class Net(nn.Cell): + >>> class LeNet5(nn.Cell): >>> def __init__(self, num_class=10, channel=1): >>> super(LeNet5, self).__init__() >>> self.type = "fusion" @@ -168,7 +168,7 @@ class QuantizationAwareTraining(Quantizer): >>> x = self.fc3(x) >>> return x >>> - >>> net = Net() + >>> net = LeNet5() >>> quantizer = QuantizationAwareTraining(bn_fold=False, per_channel=[True, False], symmetric=[True, False]) >>> net_qat = quantizer.quantize(net) """