diff --git a/mindspore/nn/optim/adam.py b/mindspore/nn/optim/adam.py index 7048e44055e..198cb537a42 100755 --- a/mindspore/nn/optim/adam.py +++ b/mindspore/nn/optim/adam.py @@ -350,15 +350,15 @@ class Adam(Optimizer): beta2_power = self.beta2_power * self.beta2 self.beta2_power = beta2_power if self.is_group_lr: - success = self.map_reverse(F.partial(_adam_opt, self.opt, self.sparse_opt, self._ps_push, self._ps_pull, - self.use_locking, self.use_nesterov, self._is_device, - beta1_power, beta2_power, self.beta1, self.beta2, self.eps), - lr, gradients, params, moment1, moment2, self.ps_parameters, self.cache_enable) + success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt, self._ps_push, self._ps_pull, + self.use_locking, self.use_nesterov, self._is_device, + beta1_power, beta2_power, self.beta1, self.beta2, self.eps), + lr, gradients, params, moment1, moment2, self.ps_parameters, self.cache_enable) else: - success = self.map_reverse(F.partial(_adam_opt, self.opt, self.sparse_opt, self._ps_push, self._ps_pull, - self.use_locking, self.use_nesterov, self._is_device, - beta1_power, beta2_power, self.beta1, self.beta2, self.eps, lr), - gradients, params, moment1, moment2, self.ps_parameters, self.cache_enable) + success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt, self._ps_push, self._ps_pull, + self.use_locking, self.use_nesterov, self._is_device, + beta1_power, beta2_power, self.beta1, self.beta2, self.eps, lr), + gradients, params, moment1, moment2, self.ps_parameters, self.cache_enable) return success @Optimizer.target.setter diff --git a/mindspore/nn/optim/ftrl.py b/mindspore/nn/optim/ftrl.py index 27b5c9180a3..aa684fb1ab8 100644 --- a/mindspore/nn/optim/ftrl.py +++ b/mindspore/nn/optim/ftrl.py @@ -226,9 +226,9 @@ class FTRL(Optimizer): grads = self._grad_sparse_indices_deduplicate(grads) lr = self.get_lr() - success = self.map_reverse(F.partial(_ftrl_opt, self.opt, self.sparse_opt, self._ps_push, self._ps_pull, - self.l1, self.l2, self.lr_power, lr), - linear, grads, params, moments, self.ps_parameters, self.cache_enable) + success = self.map_(F.partial(_ftrl_opt, self.opt, self.sparse_opt, self._ps_push, self._ps_pull, + self.l1, self.l2, self.lr_power, lr), + linear, grads, params, moments, self.ps_parameters, self.cache_enable) return success @Optimizer.target.setter