!37263 mv the interface is_dynamic_lr_or_weight_decay into inner

Merge pull request !37263 from zhangbuxue/mv_the_interface_is_dynamic_lr_or_weight_decay_into_inner
This commit is contained in:
i-robot 2022-07-05 03:08:39 +00:00 committed by Gitee
commit ad86e9cf0a
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
6 changed files with 8 additions and 8 deletions

View File

@ -30,8 +30,8 @@ class MIND_API BatchNormGradGrad : public BaseOperator {
public:
MIND_API_BASE_MEMBER(BatchNormGradGrad);
BatchNormGradGrad() : BaseOperator(kNameBatchNormGradGrad) {
InitIOName({"dy", "x", "scale", "mean", "variance", "dout_dx", "dout_dscale", "dout_dbias"},
{"ddy", "dx", "dscale"});
InitIOName({"x", "dy", "scale", "mean", "variance", "dout_dx", "dout_dscale", "dout_dbias"},
{"dx", "ddy", "dscale"});
}
void Init(bool is_training = false, float epsilon = 1e-05, const std::string &format = "NCHW");
void set_is_training(bool is_training);

View File

@ -181,7 +181,7 @@ class ASGD(Optimizer):
gradients = self.gradients_centralization(gradients)
gradients = self.scale_grad(gradients)
lrs = self.get_lr()
if not self.is_dynamic_lr_or_weight_decay():
if not self._is_dynamic_lr_or_weight_decay():
self.assignadd(self.global_step, self.global_step_increase_tensor)
success = True
params = self._parameters

View File

@ -260,7 +260,7 @@ class Lamb(Optimizer):
def construct(self, gradients):
weight_decay = self.get_weight_decay()
lr = self.get_lr()
if not self.is_dynamic_lr_or_weight_decay():
if not self._is_dynamic_lr_or_weight_decay():
self.assignadd(self.global_step, self.global_step_increase_tensor)
lamb_opt = _lamb_opt
gradients = self.flatten_gradients(gradients)

View File

@ -195,6 +195,6 @@ class LARS(Optimizer):
gradients = self.hyper_map(F.partial(_lars_opt, self.lars, self.loss_scale, lr, weight_decay),
gradients, params, self.decay_flags, self.lars_flag)
success = self.opt(gradients)
if self.is_dynamic_lr_or_weight_decay() and not self.opt.is_dynamic_lr_or_weight_decay():
if self._is_dynamic_lr_or_weight_decay() and not self.opt.dynamic_lr and not self.opt.dynamic_weight_decay:
self.assignadd(self.global_step, self.global_step_increase_tensor)
return success

View File

@ -695,7 +695,7 @@ class Optimizer(Cell):
lr += (current_dynamic_lr,)
else:
lr = self.learning_rate(self.global_step).reshape(())
if self.is_dynamic_lr_or_weight_decay():
if self._is_dynamic_lr_or_weight_decay():
self.assignadd(self.global_step, self.global_step_increase_tensor)
return lr
@ -754,7 +754,7 @@ class Optimizer(Cell):
return lr if isinstance(param, list) else lr[0]
def is_dynamic_lr_or_weight_decay(self):
def _is_dynamic_lr_or_weight_decay(self):
"""
Determine whether the learning rate or weight decay is dynamic.

View File

@ -196,7 +196,7 @@ class Rprop(Optimizer):
gradients = self.gradients_centralization(gradients)
gradients = self.scale_grad(gradients)
lrs = self.get_lr()
if not self.is_dynamic_lr_or_weight_decay():
if not self._is_dynamic_lr_or_weight_decay():
self.assignadd(self.global_step, self.global_step_increase_tensor)
success = True