forked from mindspore-Ecosystem/mindspore
!21217 modify loss scale manager example
Merge pull request !21217 from wangnan39/code_docs_loss_scale
This commit is contained in:
commit
7f71a99993
|
@ -1403,8 +1403,7 @@ class GraphCell(Cell):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.train import export, load
|
||||
>>> from mindspore import Tensor, export, load
|
||||
>>>
|
||||
>>> net = nn.Conv2d(1, 1, kernel_size=3, weight_init="ones")
|
||||
>>> input = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
|
||||
|
|
|
@ -88,15 +88,14 @@ class DynamicLossScaleUpdateCell(Cell):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor, Parameter, nn
|
||||
>>> from mindspore.ops import operations as P
|
||||
>>> from mindspore.nn.wrap.cell_wrapper import WithLossCell
|
||||
>>> import mindspore.ops as ops
|
||||
>>>
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self, in_features, out_features):
|
||||
... super(Net, self).__init__()
|
||||
... self.weight = Parameter(Tensor(np.ones([in_features, out_features]).astype(np.float32)),
|
||||
... name='weight')
|
||||
... self.matmul = P.MatMul()
|
||||
... self.matmul = ops.MatMul()
|
||||
...
|
||||
... def construct(self, x):
|
||||
... output = self.matmul(x, self.weight)
|
||||
|
@ -106,7 +105,7 @@ class DynamicLossScaleUpdateCell(Cell):
|
|||
>>> net = Net(in_features, out_features)
|
||||
>>> loss = nn.MSELoss()
|
||||
>>> optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
||||
>>> net_with_loss = WithLossCell(net, loss)
|
||||
>>> net_with_loss = nn.WithLossCell(net, loss)
|
||||
>>> manager = nn.DynamicLossScaleUpdateCell(loss_scale_value=2**12, scale_factor=2, scale_window=1000)
|
||||
>>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=manager)
|
||||
>>> input = Tensor(np.ones([out_features, in_features]), mindspore.float32)
|
||||
|
@ -179,15 +178,14 @@ class FixedLossScaleUpdateCell(Cell):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor, Parameter, nn
|
||||
>>> from mindspore.ops import operations as P
|
||||
>>> from mindspore.nn.wrap.cell_wrapper import WithLossCell
|
||||
>>> from mindspore.ops as ops
|
||||
>>>
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self, in_features, out_features):
|
||||
... super(Net, self).__init__()
|
||||
... self.weight = Parameter(Tensor(np.ones([in_features, out_features]).astype(np.float32)),
|
||||
... name='weight')
|
||||
... self.matmul = P.MatMul()
|
||||
... self.matmul = ops.MatMul()
|
||||
...
|
||||
... def construct(self, x):
|
||||
... output = self.matmul(x, self.weight)
|
||||
|
@ -197,7 +195,7 @@ class FixedLossScaleUpdateCell(Cell):
|
|||
>>> net = Net(in_features, out_features)
|
||||
>>> loss = nn.MSELoss()
|
||||
>>> optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
||||
>>> net_with_loss = WithLossCell(net, loss)
|
||||
>>> net_with_loss = nn.WithLossCell(net, loss)
|
||||
>>> manager = nn.FixedLossScaleUpdateCell(loss_scale_value=2**12)
|
||||
>>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=manager)
|
||||
>>> input = Tensor(np.ones([out_features, in_features]), mindspore.float32)
|
||||
|
@ -253,16 +251,15 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor, Parameter, nn
|
||||
>>> from mindspore.ops import operations as P
|
||||
>>> from mindspore.nn.wrap.cell_wrapper import WithLossCell
|
||||
>>> from mindspore.common import dtype as mstype
|
||||
>>> from mindspore.ops as ops
|
||||
>>> from mindspore import dtype as mstype
|
||||
>>>
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self, in_features, out_features):
|
||||
... super(Net, self).__init__()
|
||||
... self.weight = Parameter(Tensor(np.ones([in_features, out_features]).astype(np.float32)),
|
||||
... name='weight')
|
||||
... self.matmul = P.MatMul()
|
||||
... self.matmul = ops.MatMul()
|
||||
...
|
||||
... def construct(self, x):
|
||||
... output = self.matmul(x, self.weight)
|
||||
|
@ -273,7 +270,7 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
|
|||
>>> net = Net(in_features, out_features)
|
||||
>>> loss = nn.MSELoss()
|
||||
>>> optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
||||
>>> net_with_loss = WithLossCell(net, loss)
|
||||
>>> net_with_loss = nn.WithLossCell(net, loss)
|
||||
>>> manager = nn.DynamicLossScaleUpdateCell(loss_scale_value=2**12, scale_factor=2, scale_window=1000)
|
||||
>>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=manager)
|
||||
>>> input = Tensor(np.ones([out_features, in_features]), mindspore.float32)
|
||||
|
@ -284,7 +281,7 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
|
|||
>>> net = Net(in_features, out_features)
|
||||
>>> loss = nn.MSELoss()
|
||||
>>> optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
||||
>>> net_with_loss = WithLossCell(net, loss)
|
||||
>>> net_with_loss = nn.WithLossCell(net, loss)
|
||||
>>> inputs = Tensor(np.ones([size, in_features]).astype(np.float32))
|
||||
>>> label = Tensor(np.zeros([size, out_features]).astype(np.float32))
|
||||
>>> scaling_sens = Tensor(np.full((1), np.finfo(np.float32).max), dtype=mstype.float32)
|
||||
|
|
|
@ -115,8 +115,7 @@ class DynamicLossScaleManager(LossScaleManager):
|
|||
scale_window (int): Maximum continuous normal steps when there is no overflow. Default: 2000.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Model, nn
|
||||
>>> from mindspore.train.loss_scale_manager import DynamicLossScaleManager
|
||||
>>> from mindspore import Model, nn, DynamicLossScaleManager
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> loss_scale_manager = DynamicLossScaleManager()
|
||||
|
|
|
@ -615,8 +615,7 @@ class Model:
|
|||
Default: -1.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Model, nn
|
||||
>>> from mindspore.train.loss_scale_manager import FixedLossScaleManager
|
||||
>>> from mindspore import Model, nn, FixedLossScaleManager
|
||||
>>>
|
||||
>>> # For details about how to build the dataset, please refer to the tutorial
|
||||
>>> # document on the official website.
|
||||
|
@ -872,10 +871,9 @@ class Model:
|
|||
>>> # mindspore.cn.
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import Model, context, Tensor, nn
|
||||
>>> from mindspore import Model, context, Tensor, nn, FixedLossScaleManager
|
||||
>>> from mindspore.context import ParallelMode
|
||||
>>> from mindspore.communication import init
|
||||
>>> from mindspore.train.loss_scale_manager import FixedLossScaleManager
|
||||
>>>
|
||||
>>> context.set_context(mode=context.GRAPH_MODE)
|
||||
>>> init()
|
||||
|
|
|
@ -331,8 +331,7 @@ def load(file_name, **kwargs):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.train import export, load
|
||||
>>> from mindspore import Tensor, export, load
|
||||
>>>
|
||||
>>> net = nn.Conv2d(1, 1, kernel_size=3, weight_init="ones")
|
||||
>>> input = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
|
||||
|
|
|
@ -195,7 +195,7 @@ class ConvertModelUtils():
|
|||
Examples:
|
||||
>>> from mindspore.nn.optim import thor
|
||||
>>> from mindspore.train.model import Model
|
||||
>>> from mindspore.train.loss_scale_manager import FixedLossScaleManager
|
||||
>>> from mindspore import FixedLossScaleManager
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> loss_manager = FixedLossScaleManager(128, drop_overflow_update=False)
|
||||
|
|
Loading…
Reference in New Issue