!15322 fix momentum parameter of batchnorm in quantization

From: @chenzhuo42
Reviewed-by: @zh_qh,@zhoufeng54
Signed-off-by: @zh_qh
This commit is contained in:
mindspore-ci-bot 2021-04-19 14:17:57 +08:00 committed by Gitee
commit 24319a647b
4 changed files with 6 additions and 6 deletions

View File

@ -316,7 +316,7 @@ class QuantizationAwareTraining(Quantizer):
dilation=conv_inner.dilation, dilation=conv_inner.dilation,
group=conv_inner.group, group=conv_inner.group,
eps=bn_inner.eps, eps=bn_inner.eps,
momentum=bn_inner.momentum, momentum=1 - bn_inner.momentum,
has_bias=conv_inner.has_bias, has_bias=conv_inner.has_bias,
bias_init=conv_inner.bias_init, bias_init=conv_inner.bias_init,
quant_config=self.quant_config, quant_config=self.quant_config,
@ -332,7 +332,7 @@ class QuantizationAwareTraining(Quantizer):
dilation=conv_inner.dilation, dilation=conv_inner.dilation,
group=conv_inner.group, group=conv_inner.group,
eps=bn_inner.eps, eps=bn_inner.eps,
momentum=bn_inner.momentum, momentum=1 - bn_inner.momentum,
has_bias=conv_inner.has_bias, has_bias=conv_inner.has_bias,
bias_init=conv_inner.bias_init, bias_init=conv_inner.bias_init,
freeze_bn=self.freeze_bn, freeze_bn=self.freeze_bn,
@ -358,7 +358,7 @@ class QuantizationAwareTraining(Quantizer):
dilation=conv_inner.dilation, dilation=conv_inner.dilation,
group=conv_inner.group, group=conv_inner.group,
eps=bn_inner.eps, eps=bn_inner.eps,
momentum=bn_inner.momentum, momentum=1 - bn_inner.momentum,
has_bias=conv_inner.has_bias, has_bias=conv_inner.has_bias,
bias_init=conv_inner.bias_init, bias_init=conv_inner.bias_init,
quant_config=self.quant_config, quant_config=self.quant_config,

View File

@ -496,7 +496,7 @@ class Conv2dBnFoldQuantOneConv(Cell):
self.dilation = twice(dilation) self.dilation = twice(dilation)
self.group = group self.group = group
self.eps = eps self.eps = eps
self.momentum = momentum self.momentum = 1 - momentum
self.has_bias = has_bias self.has_bias = has_bias
self.fake = fake self.fake = fake
self.quant_config = quant_config self.quant_config = quant_config

View File

@ -32,7 +32,7 @@ def conv_block(in_channels,
padding=padding, padding=padding,
dilation=dilation, dilation=dilation,
has_bn=True, has_bn=True,
momentum=0.1, momentum=0.9,
activation='relu') activation='relu')

View File

@ -36,7 +36,7 @@ def _conv_bn_relu(in_channel,
padding=0, padding=0,
dilation=1, dilation=1,
alpha=0.1, alpha=0.1,
momentum=0.9, momentum=0.1,
eps=1e-5, eps=1e-5,
pad_mode="same"): pad_mode="same"):
"""Get a conv2d batchnorm and relu layer""" """Get a conv2d batchnorm and relu layer"""