diff --git a/mindspore/nn/optim/optimizer.py b/mindspore/nn/optim/optimizer.py index e2b0cddb71..cd0ed93a10 100755 --- a/mindspore/nn/optim/optimizer.py +++ b/mindspore/nn/optim/optimizer.py @@ -84,7 +84,7 @@ apply_decay = C.MultitypeFuncGraph("apply_decay") def _tensor_apply_decay(weight_decay, if_apply, weight, gradient): """Get grad with weight_decay.""" if if_apply: - return op_add((gradient, weight * F.scalar_to_array(weight_decay))) + return op_add((gradient, weight * weight_decay)) return gradient diff --git a/mindspore/nn/wrap/loss_scale.py b/mindspore/nn/wrap/loss_scale.py index f7c686f535..a11c753eda 100644 --- a/mindspore/nn/wrap/loss_scale.py +++ b/mindspore/nn/wrap/loss_scale.py @@ -32,7 +32,7 @@ reciprocal = P.Reciprocal() @_grad_scale.register("Tensor", "Tensor") def tensor_grad_scale(scale, grad): - return grad * reciprocal(scale) + return grad * F.cast(reciprocal(scale), F.dtype(grad)) class DynamicLossScaleUpdateCell(Cell):