diff --git a/mindspore/nn/cell.py b/mindspore/nn/cell.py index 46ed2ce34d5..a079b573736 100755 --- a/mindspore/nn/cell.py +++ b/mindspore/nn/cell.py @@ -1403,8 +1403,7 @@ class GraphCell(Cell): Examples: >>> import numpy as np >>> import mindspore.nn as nn - >>> from mindspore import Tensor - >>> from mindspore.train import export, load + >>> from mindspore import Tensor, export, load >>> >>> net = nn.Conv2d(1, 1, kernel_size=3, weight_init="ones") >>> input = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32)) diff --git a/mindspore/nn/wrap/loss_scale.py b/mindspore/nn/wrap/loss_scale.py index 735ef2edcec..4f0ed67bf29 100644 --- a/mindspore/nn/wrap/loss_scale.py +++ b/mindspore/nn/wrap/loss_scale.py @@ -88,15 +88,14 @@ class DynamicLossScaleUpdateCell(Cell): Examples: >>> import numpy as np >>> from mindspore import Tensor, Parameter, nn - >>> from mindspore.ops import operations as P - >>> from mindspore.nn.wrap.cell_wrapper import WithLossCell + >>> import mindspore.ops as ops >>> >>> class Net(nn.Cell): ... def __init__(self, in_features, out_features): ... super(Net, self).__init__() ... self.weight = Parameter(Tensor(np.ones([in_features, out_features]).astype(np.float32)), ... name='weight') - ... self.matmul = P.MatMul() + ... self.matmul = ops.MatMul() ... ... def construct(self, x): ... output = self.matmul(x, self.weight) @@ -106,7 +105,7 @@ class DynamicLossScaleUpdateCell(Cell): >>> net = Net(in_features, out_features) >>> loss = nn.MSELoss() >>> optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) - >>> net_with_loss = WithLossCell(net, loss) + >>> net_with_loss = nn.WithLossCell(net, loss) >>> manager = nn.DynamicLossScaleUpdateCell(loss_scale_value=2**12, scale_factor=2, scale_window=1000) >>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=manager) >>> input = Tensor(np.ones([out_features, in_features]), mindspore.float32) @@ -179,15 +178,14 @@ class FixedLossScaleUpdateCell(Cell): Examples: >>> import numpy as np >>> from mindspore import Tensor, Parameter, nn - >>> from mindspore.ops import operations as P - >>> from mindspore.nn.wrap.cell_wrapper import WithLossCell + >>> from mindspore.ops as ops >>> >>> class Net(nn.Cell): ... def __init__(self, in_features, out_features): ... super(Net, self).__init__() ... self.weight = Parameter(Tensor(np.ones([in_features, out_features]).astype(np.float32)), ... name='weight') - ... self.matmul = P.MatMul() + ... self.matmul = ops.MatMul() ... ... def construct(self, x): ... output = self.matmul(x, self.weight) @@ -197,7 +195,7 @@ class FixedLossScaleUpdateCell(Cell): >>> net = Net(in_features, out_features) >>> loss = nn.MSELoss() >>> optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) - >>> net_with_loss = WithLossCell(net, loss) + >>> net_with_loss = nn.WithLossCell(net, loss) >>> manager = nn.FixedLossScaleUpdateCell(loss_scale_value=2**12) >>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=manager) >>> input = Tensor(np.ones([out_features, in_features]), mindspore.float32) @@ -253,16 +251,15 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell): Examples: >>> import numpy as np >>> from mindspore import Tensor, Parameter, nn - >>> from mindspore.ops import operations as P - >>> from mindspore.nn.wrap.cell_wrapper import WithLossCell - >>> from mindspore.common import dtype as mstype + >>> from mindspore.ops as ops + >>> from mindspore import dtype as mstype >>> >>> class Net(nn.Cell): ... def __init__(self, in_features, out_features): ... super(Net, self).__init__() ... self.weight = Parameter(Tensor(np.ones([in_features, out_features]).astype(np.float32)), ... name='weight') - ... self.matmul = P.MatMul() + ... self.matmul = ops.MatMul() ... ... def construct(self, x): ... output = self.matmul(x, self.weight) @@ -273,7 +270,7 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell): >>> net = Net(in_features, out_features) >>> loss = nn.MSELoss() >>> optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) - >>> net_with_loss = WithLossCell(net, loss) + >>> net_with_loss = nn.WithLossCell(net, loss) >>> manager = nn.DynamicLossScaleUpdateCell(loss_scale_value=2**12, scale_factor=2, scale_window=1000) >>> train_network = nn.TrainOneStepWithLossScaleCell(net_with_loss, optimizer, scale_sense=manager) >>> input = Tensor(np.ones([out_features, in_features]), mindspore.float32) @@ -284,7 +281,7 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell): >>> net = Net(in_features, out_features) >>> loss = nn.MSELoss() >>> optimizer = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9) - >>> net_with_loss = WithLossCell(net, loss) + >>> net_with_loss = nn.WithLossCell(net, loss) >>> inputs = Tensor(np.ones([size, in_features]).astype(np.float32)) >>> label = Tensor(np.zeros([size, out_features]).astype(np.float32)) >>> scaling_sens = Tensor(np.full((1), np.finfo(np.float32).max), dtype=mstype.float32) diff --git a/mindspore/train/loss_scale_manager.py b/mindspore/train/loss_scale_manager.py index 02a134fd590..501aebb5c1c 100644 --- a/mindspore/train/loss_scale_manager.py +++ b/mindspore/train/loss_scale_manager.py @@ -115,8 +115,7 @@ class DynamicLossScaleManager(LossScaleManager): scale_window (int): Maximum continuous normal steps when there is no overflow. Default: 2000. Examples: - >>> from mindspore import Model, nn - >>> from mindspore.train.loss_scale_manager import DynamicLossScaleManager + >>> from mindspore import Model, nn, DynamicLossScaleManager >>> >>> net = Net() >>> loss_scale_manager = DynamicLossScaleManager() diff --git a/mindspore/train/model.py b/mindspore/train/model.py index d87ec722425..416d8707d86 100644 --- a/mindspore/train/model.py +++ b/mindspore/train/model.py @@ -615,8 +615,7 @@ class Model: Default: -1. Examples: - >>> from mindspore import Model, nn - >>> from mindspore.train.loss_scale_manager import FixedLossScaleManager + >>> from mindspore import Model, nn, FixedLossScaleManager >>> >>> # For details about how to build the dataset, please refer to the tutorial >>> # document on the official website. @@ -872,10 +871,9 @@ class Model: >>> # mindspore.cn. >>> import numpy as np >>> import mindspore as ms - >>> from mindspore import Model, context, Tensor, nn + >>> from mindspore import Model, context, Tensor, nn, FixedLossScaleManager >>> from mindspore.context import ParallelMode >>> from mindspore.communication import init - >>> from mindspore.train.loss_scale_manager import FixedLossScaleManager >>> >>> context.set_context(mode=context.GRAPH_MODE) >>> init() diff --git a/mindspore/train/serialization.py b/mindspore/train/serialization.py index 1e4c96c7b1e..edfb1196cf0 100644 --- a/mindspore/train/serialization.py +++ b/mindspore/train/serialization.py @@ -331,8 +331,7 @@ def load(file_name, **kwargs): Examples: >>> import numpy as np >>> import mindspore.nn as nn - >>> from mindspore import Tensor - >>> from mindspore.train import export, load + >>> from mindspore import Tensor, export, load >>> >>> net = nn.Conv2d(1, 1, kernel_size=3, weight_init="ones") >>> input = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32)) diff --git a/mindspore/train/train_thor/convert_utils.py b/mindspore/train/train_thor/convert_utils.py index 34d6166e450..7ce34a94b95 100644 --- a/mindspore/train/train_thor/convert_utils.py +++ b/mindspore/train/train_thor/convert_utils.py @@ -195,7 +195,7 @@ class ConvertModelUtils(): Examples: >>> from mindspore.nn.optim import thor >>> from mindspore.train.model import Model - >>> from mindspore.train.loss_scale_manager import FixedLossScaleManager + >>> from mindspore import FixedLossScaleManager >>> >>> net = Net() >>> loss_manager = FixedLossScaleManager(128, drop_overflow_update=False)