From 296e756b509cbb48940e90f08ec5d051ebfcf5e8 Mon Sep 17 00:00:00 2001 From: wangshuide2020 Date: Thu, 3 Jun 2021 15:23:42 +0800 Subject: [PATCH] fix the reduction args of Loss operator, line too long and other warning problems. --- mindspore/nn/loss/loss.py | 4 +--- mindspore/nn/optim/lars.py | 5 +++-- mindspore/nn/optim/lazyadam.py | 3 ++- mindspore/ops/_grad/grad_quant_ops.py | 1 + 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/mindspore/nn/loss/loss.py b/mindspore/nn/loss/loss.py index 1c70ba34c69..0d62e9ca8cd 100644 --- a/mindspore/nn/loss/loss.py +++ b/mindspore/nn/loss/loss.py @@ -40,11 +40,9 @@ class Loss(Cell): """ def __init__(self, reduction='mean'): super(Loss, self).__init__() - if reduction is None: - reduction = 'none' if reduction not in ('mean', 'sum', 'none'): - raise ValueError(f"reduction method for {reduction.lower()} is not supported") + raise ValueError(f"reduction method for {reduction} is not supported") self.average = True self.reduce = True diff --git a/mindspore/nn/optim/lars.py b/mindspore/nn/optim/lars.py index f19992fcb75..0dcf543c7db 100755 --- a/mindspore/nn/optim/lars.py +++ b/mindspore/nn/optim/lars.py @@ -58,7 +58,8 @@ class LARS(Optimizer): .. math:: \begin{array}{ll} \\ - \lambda = \frac{\theta \text{ * } || \omega || }{|| g_{t} || \text{ + } \delta \text{ * } || \omega || } \\ + \lambda = \frac{\theta \text{ * } || \omega || } \\ + {|| g_{t} || \text{ + } \delta \text{ * } || \omega || } \\ \lambda = \begin{cases} \min(\frac{\lambda}{\alpha }, 1) @@ -70,7 +71,7 @@ class LARS(Optimizer): \end{array} :math:`\theta` represents `coefficient`, :math:`\omega` represents `parameters`, :math:`g` represents `gradients`, - :math:`t` represents updateing step, :math:`\delta` represents `weight_decay`, + :math:`t` represents updating step, :math:`\delta` represents `weight_decay`, :math:`\alpha` represents `learning_rate`, :math:`clip` represents `use_clip`. Args: diff --git a/mindspore/nn/optim/lazyadam.py b/mindspore/nn/optim/lazyadam.py index 8eca2300582..49141009623 100644 --- a/mindspore/nn/optim/lazyadam.py +++ b/mindspore/nn/optim/lazyadam.py @@ -107,7 +107,8 @@ class LazyAdam(Optimizer): r""" This optimizer will apply a lazy adam algorithm when gradient is sparse. - The original adam algorithm is proposed in `Adam: A Method for Stochastic Optimization `_. + The original adam algorithm is proposed in + `Adam: A Method for Stochastic Optimization `_. The updating formulas are as follows, diff --git a/mindspore/ops/_grad/grad_quant_ops.py b/mindspore/ops/_grad/grad_quant_ops.py index 8809c7ac2fe..57c2829a2e0 100644 --- a/mindspore/ops/_grad/grad_quant_ops.py +++ b/mindspore/ops/_grad/grad_quant_ops.py @@ -138,6 +138,7 @@ def get_bprop_BatchNormFold(self): @bprop_getters.register(P.BNTrainingReduce) def get_bprop_BNTrainingReduce(self): + """Generate bprop for BNTrainingReduce for Ascend""" def bprop(x, out, dout): return (zeros_like(x),)