forked from mindspore-Ecosystem/mindspore
!12 Fix dtype bug for loss_scale and weight_decay
Merge pull request !12 from seatea/dynamic-loss-scale
This commit is contained in:
commit
062b744b19
|
@ -84,7 +84,7 @@ apply_decay = C.MultitypeFuncGraph("apply_decay")
|
||||||
def _tensor_apply_decay(weight_decay, if_apply, weight, gradient):
|
def _tensor_apply_decay(weight_decay, if_apply, weight, gradient):
|
||||||
"""Get grad with weight_decay."""
|
"""Get grad with weight_decay."""
|
||||||
if if_apply:
|
if if_apply:
|
||||||
return op_add((gradient, weight * F.scalar_to_array(weight_decay)))
|
return op_add((gradient, weight * weight_decay))
|
||||||
return gradient
|
return gradient
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@ reciprocal = P.Reciprocal()
|
||||||
|
|
||||||
@_grad_scale.register("Tensor", "Tensor")
|
@_grad_scale.register("Tensor", "Tensor")
|
||||||
def tensor_grad_scale(scale, grad):
|
def tensor_grad_scale(scale, grad):
|
||||||
return grad * reciprocal(scale)
|
return grad * F.cast(reciprocal(scale), F.dtype(grad))
|
||||||
|
|
||||||
|
|
||||||
class DynamicLossScaleUpdateCell(Cell):
|
class DynamicLossScaleUpdateCell(Cell):
|
||||||
|
|
Loading…
Reference in New Issue