From d4e3d69f376d2368c01f51560ad0713268b411ee Mon Sep 17 00:00:00 2001 From: "wangnan39@huawei.com" Date: Fri, 12 Jun 2020 19:22:58 +0800 Subject: [PATCH] support loss scale for sparse situation --- mindspore/nn/optim/adam.py | 5 +--- mindspore/nn/optim/ftrl.py | 22 ++++---------- mindspore/nn/optim/lazyadam.py | 3 -- mindspore/nn/optim/optimizer.py | 11 ++++++- tests/ut/python/nn/optim/test_adam.py | 36 ++--------------------- tests/ut/python/nn/optim/test_ftrl.py | 4 +-- tests/ut/python/nn/optim/test_lazyadam.py | 4 +-- 7 files changed, 23 insertions(+), 62 deletions(-) diff --git a/mindspore/nn/optim/adam.py b/mindspore/nn/optim/adam.py index 9586a404f93..13f4d779a34 100755 --- a/mindspore/nn/optim/adam.py +++ b/mindspore/nn/optim/adam.py @@ -234,8 +234,6 @@ class Adam(Optimizer): _check_param_value(beta1, beta2, eps, weight_decay, self.cls_name) validator.check_value_type("use_locking", use_locking, [bool], self.cls_name) validator.check_value_type("use_nesterov", use_nesterov, [bool], self.cls_name) - validator.check_value_type("loss_scale", loss_scale, [float], self.cls_name) - validator.check_number_range("loss_scale", loss_scale, 0.0, float("inf"), Rel.INC_LEFT, self.cls_name) self.beta1 = Tensor(beta1, mstype.float32) self.beta2 = Tensor(beta2, mstype.float32) @@ -247,9 +245,8 @@ class Adam(Optimizer): self.moment2 = self.parameters.clone(prefix="moment2", init='zeros') self.hyper_map = C.HyperMap() - self.map_ = C.Map() self.opt = P.Adam(use_locking, use_nesterov) - self.sparse_opt = P.SparseApplyAdam() + self.sparse_opt = P.SparseApplyAdam(use_locking, use_nesterov) def construct(self, gradients): params = self.parameters diff --git a/mindspore/nn/optim/ftrl.py b/mindspore/nn/optim/ftrl.py index 95a39aed7e4..a48e93bea23 100644 --- a/mindspore/nn/optim/ftrl.py +++ b/mindspore/nn/optim/ftrl.py @@ -41,15 +41,11 @@ def _tensor_run_opt(opt, spars_opt, learning_rate, l1, l2, lr_power, linear, gra return success -def _check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale=1.0, weight_decay=0.0, - prim_name=None): +def _check_param(initial_accum, lr_power, l1, l2, use_locking, weight_decay=0.0, prim_name=None): """Check param.""" validator.check_value_type("initial_accum", initial_accum, [float], prim_name) validator.check_number("initial_accum", initial_accum, 0.0, Rel.GE, prim_name) - validator.check_value_type("learning_rate", learning_rate, [float], prim_name) - validator.check_number("learning_rate", learning_rate, 0.0, Rel.GT, prim_name) - validator.check_value_type("lr_power", lr_power, [float], prim_name) validator.check_number("lr_power", lr_power, 0.0, Rel.LE, prim_name) @@ -61,9 +57,6 @@ def _check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, lo validator.check_value_type("use_locking", use_locking, [bool], prim_name) - validator.check_value_type("loss_scale", loss_scale, [float], prim_name) - validator.check_number("loss_scale", loss_scale, 1.0, Rel.GE, prim_name) - validator.check_value_type("weight_decay", weight_decay, [float], prim_name) validator.check_number("weight_decay", weight_decay, 0.0, Rel.GE, prim_name) @@ -110,21 +103,18 @@ class FTRL(Optimizer): """ def __init__(self, params, initial_accum=0.1, learning_rate=0.001, lr_power=-0.5, l1=0.0, l2=0.0, use_locking=False, loss_scale=1.0, weight_decay=0.0): - super(FTRL, self).__init__(learning_rate, params) + super(FTRL, self).__init__(learning_rate, params, loss_scale=loss_scale) if self.is_group: raise RuntimeError(f"The {self.cls_name} optimizer cannot support group setting.") - _check_param(initial_accum, learning_rate, lr_power, l1, l2, use_locking, loss_scale, weight_decay, - self.cls_name) + _check_param(initial_accum, lr_power, l1, l2, use_locking, weight_decay, self.cls_name) self.moments = self.parameters.clone(prefix="moments", init=initial_accum) self.linear = self.parameters.clone(prefix="linear", init='zeros') self.l1 = l1 self.l2 = l2 self.lr_power = lr_power - self.reciprocal_scale = 1.0 / loss_scale self.weight_decay = weight_decay self.decay_tf = tuple((lambda: True)() for x in self.parameters) self.hyper_map = C.HyperMap() - self.map_ = C.Map() self.opt = P.ApplyFtrl(use_locking=use_locking) self.sparse_opt = P.SparseApplyFtrl(learning_rate, l1, l2, lr_power, use_locking=use_locking) @@ -132,11 +122,11 @@ class FTRL(Optimizer): params = self.parameters moments = self.moments linear = self.linear + lr = self.learning_rate if self.weight_decay > 0.0: grads = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_tf, params, grads) - if self.reciprocal_scale != 1.0: - grads = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), grads) - lr = self.learning_rate + + grads = self.scale_grad(grads) success = self.map_(F.partial(ftrl_opt, self.opt, self.sparse_opt, lr, self.l1, self.l2, self.lr_power), linear, grads, params, moments) return success diff --git a/mindspore/nn/optim/lazyadam.py b/mindspore/nn/optim/lazyadam.py index 0dacb6630eb..6c8408d9685 100644 --- a/mindspore/nn/optim/lazyadam.py +++ b/mindspore/nn/optim/lazyadam.py @@ -164,8 +164,6 @@ class LazyAdam(Optimizer): _check_param_value(beta1, beta2, eps, weight_decay, self.cls_name) validator.check_value_type("use_locking", use_locking, [bool], self.cls_name) validator.check_value_type("use_nesterov", use_nesterov, [bool], self.cls_name) - validator.check_value_type("loss_scale", loss_scale, [float], self.cls_name) - validator.check_number_range("loss_scale", loss_scale, 1.0, float("inf"), Rel.INC_LEFT, self.cls_name) self.beta1 = Tensor(beta1, mstype.float32) self.beta2 = Tensor(beta2, mstype.float32) @@ -179,7 +177,6 @@ class LazyAdam(Optimizer): self.moment2 = self.parameters.clone(prefix="moment2", init='zeros') self.hyper_map = C.HyperMap() - self.map_ = C.Map() self.opt = P.Adam(use_locking, use_nesterov) self.sparse_opt = P.SparseApplyLazyAdam(use_locking, use_nesterov) diff --git a/mindspore/nn/optim/optimizer.py b/mindspore/nn/optim/optimizer.py index a63430fb323..3f787c690a1 100755 --- a/mindspore/nn/optim/optimizer.py +++ b/mindspore/nn/optim/optimizer.py @@ -153,6 +153,7 @@ class Optimizer(Cell): self.reciprocal_scale = 1.0 / loss_scale self.exec_weight_decay = any(self.decay_flags) self.param_length = len(self.parameters) + self.map_ = C.Map() def decay_weight(self, gradients): """ @@ -195,7 +196,7 @@ class Optimizer(Cell): """ if self.reciprocal_scale != 1.0: - gradients = self.hyper_map(F.partial(grad_scale, self.reciprocal_scale), gradients) + gradients = self.map_(F.partial(grad_scale, self.reciprocal_scale), gradients) return gradients @@ -409,3 +410,11 @@ def tensor_grad_scale(scale, grad): if scale == 1.0: return grad return grad * scale + + +@grad_scale.register("Number", "Tuple") +def tensor_grad_scale_with_sparse(scale, grad): + """Get grad with scale.""" + if scale == 1.0: + return grad + return grad[0], grad[1] * scale, grad[2] diff --git a/tests/ut/python/nn/optim/test_adam.py b/tests/ut/python/nn/optim/test_adam.py index 3fa240d45fc..be22c8abdcd 100644 --- a/tests/ut/python/nn/optim/test_adam.py +++ b/tests/ut/python/nn/optim/test_adam.py @@ -18,7 +18,6 @@ import pytest import mindspore.nn as nn from mindspore import Tensor, Parameter -import mindspore.common.dtype as mstype from mindspore.common.api import _executor from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.nn.optim import Adam, AdamWeightDecay, AdamWeightDecayDynamicLR @@ -100,14 +99,14 @@ def test_adam_compile(): _executor.compile(train_network, inputs, label) -def test_spares_adam_compile(): +def test_sparse_adam_compile(): """ test_sparse_adam_compile """ indices = Tensor(np.array([0, 1]).astype(np.int32)) label = Tensor(np.zeros([2, 1, 2]).astype(np.float32)) net = NetWithSparseGatherV2() net.set_train() - optimizer = Adam(net.trainable_params(), learning_rate=0.1) + optimizer = Adam(net.trainable_params(), learning_rate=0.1, loss_scale=1024.0) train_network = TrainOneStepCell(net, optimizer) _executor.compile(train_network, indices, label) @@ -149,34 +148,3 @@ def test_adam_mindspore_with_empty_params(): net = nn.Flatten() with pytest.raises(ValueError, match=r"Optimizer got an empty parameter list"): AdamWeightDecay(net.get_parameters()) - - -class TestSparseOps(nn.Cell): - """Define sparse operator""" - def __init__(self, sparse_opt): - super(TestSparseOps, self).__init__() - self.sparse_apply_adam = sparse_opt - self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="var") - self.m = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="m") - self.v = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name="v") - - def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices): - out = self.sparse_apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, - grad, indices) - return out - - -def test_sparse_adam(): - """test sparse operator""" - gradient = Tensor(np.random.rand(3, 3, 3).astype(np.float32)) - indices = Tensor([0, 1, 2], mstype.int32) - net = TestSparseOps(P.SparseApplyAdam()) - _executor.compile(net, 0.9, 0.999, 0.001, 0.9, 0.999, 1e-8, gradient, indices) - - -def test_sparse_lazy_adam(): - """test sparse operator""" - gradient = Tensor(np.random.rand(3, 3, 3).astype(np.float32)) - indices = Tensor([0, 1, 2], mstype.int32) - net = TestSparseOps(P.SparseApplyLazyAdam()) - _executor.compile(net, 0.9, 0.999, 0.001, 0.9, 0.999, 1e-8, gradient, indices) diff --git a/tests/ut/python/nn/optim/test_ftrl.py b/tests/ut/python/nn/optim/test_ftrl.py index e38cc527ef7..213ce6c460b 100644 --- a/tests/ut/python/nn/optim/test_ftrl.py +++ b/tests/ut/python/nn/optim/test_ftrl.py @@ -57,7 +57,7 @@ def test_ftrl(): net = Net() net.set_train() loss = nn.SoftmaxCrossEntropyWithLogits() - optimizer = FTRL(net.trainable_params()) + optimizer = FTRL(net.trainable_params(), weight_decay=0.9, loss_scale=2.0) net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) _executor.compile(train_network, inputs, label) @@ -70,6 +70,6 @@ def test_spares_ftrl_compile(): net = NetWithSparseGatherV2() net.set_train() - optimizer = FTRL(net.trainable_params()) + optimizer = FTRL(net.trainable_params(), loss_scale=2.0) train_network = TrainOneStepCell(net, optimizer) _executor.compile(train_network, indices, label) diff --git a/tests/ut/python/nn/optim/test_lazyadam.py b/tests/ut/python/nn/optim/test_lazyadam.py index a78a3ab7260..77b02f9ff90 100644 --- a/tests/ut/python/nn/optim/test_lazyadam.py +++ b/tests/ut/python/nn/optim/test_lazyadam.py @@ -60,7 +60,7 @@ def test_lazy_adam_compile(): net.set_train() loss = nn.SoftmaxCrossEntropyWithLogits() - optimizer = LazyAdam(net.trainable_params(), learning_rate=0.1, weight_decay=0.9) + optimizer = LazyAdam(net.trainable_params(), learning_rate=0.1, weight_decay=0.9, loss_scale=2.0) net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) @@ -74,7 +74,7 @@ def test_spares_lazy_adam_compile(): net = NetWithSparseGatherV2() net.set_train() - optimizer = LazyAdam(net.trainable_params(), learning_rate=0.1) + optimizer = LazyAdam(net.trainable_params(), learning_rate=0.1, loss_scale=2.0) train_network = TrainOneStepCell(net, optimizer) _executor.compile(train_network, indices, label)