diff --git a/docs/api/api_python/mindspore/mindspore.ParameterTuple.rst b/docs/api/api_python/mindspore/mindspore.ParameterTuple.rst index 88833893041..25515143d84 100644 --- a/docs/api/api_python/mindspore/mindspore.ParameterTuple.rst +++ b/docs/api/api_python/mindspore/mindspore.ParameterTuple.rst @@ -1,7 +1,7 @@ mindspore.ParameterTuple ======================== -.. py:class:: mindspore.ParameterTuple(iterable) +.. py:class:: mindspore.ParameterTuple 继承于tuple,用于管理多个Parameter。 diff --git a/docs/api/api_python/nn/mindspore.nn.GraphCell.rst b/docs/api/api_python/nn/mindspore.nn.GraphCell.rst index 8227d1b8f4f..a3d98d2e239 100644 --- a/docs/api/api_python/nn/mindspore.nn.GraphCell.rst +++ b/docs/api/api_python/nn/mindspore.nn.GraphCell.rst @@ -1,7 +1,7 @@ mindspore.nn.GraphCell ====================== -.. py:class:: mindspore.nn.GraphCell(graph) +.. py:class:: mindspore.nn.GraphCell(graph, params_init=None) 运行从MindIR加载的计算图。 diff --git a/docs/api/api_python/nn/mindspore.nn.HausdorffDistance.rst b/docs/api/api_python/nn/mindspore.nn.HausdorffDistance.rst index d8c5b737b6e..b0af445c4f0 100644 --- a/docs/api/api_python/nn/mindspore.nn.HausdorffDistance.rst +++ b/docs/api/api_python/nn/mindspore.nn.HausdorffDistance.rst @@ -49,3 +49,6 @@ mindspore.nn.HausdorffDistance **异常:** - **ValueError** - 输入的数量不等于3。 + - **TypeError** - label_idx 的数据类型不是int或float。 + - **ValueError** - label_idx 的值不在y_pred或y中。 + - **ValueError** - y_pred 和 y 的shape不同。 diff --git a/docs/api/api_python/nn/mindspore.nn.Loss.rst b/docs/api/api_python/nn/mindspore.nn.Loss.rst index e8e741bfdc2..b56fec1ee32 100644 --- a/docs/api/api_python/nn/mindspore.nn.Loss.rst +++ b/docs/api/api_python/nn/mindspore.nn.Loss.rst @@ -22,7 +22,7 @@ mindspore.nn.Loss **异常:** - RuntimeError:样本总数为0。 + - **RuntimeError** - 样本总数为0。 .. py:method:: update(*inputs) diff --git a/docs/api/api_python/nn/mindspore.nn.get_metric_fn.rst b/docs/api/api_python/nn/mindspore.nn.get_metric_fn.rst index aa542d1cff7..2a5c096c183 100644 --- a/docs/api/api_python/nn/mindspore.nn.get_metric_fn.rst +++ b/docs/api/api_python/nn/mindspore.nn.get_metric_fn.rst @@ -14,7 +14,3 @@ mindspore.nn.get_metric_fn **返回:** metric对象,metric方法的类实例。 - - **异常:** - - - **TypeError** - 入参 `metric` 的类型不是None, dict或set。 diff --git a/mindspore/python/mindspore/nn/cell.py b/mindspore/python/mindspore/nn/cell.py index d20c2cc2ebc..09b730c32f5 100755 --- a/mindspore/python/mindspore/nn/cell.py +++ b/mindspore/python/mindspore/nn/cell.py @@ -1144,7 +1144,8 @@ class Cell(Cell_): Iteration, all parameters at the cell. Examples: - >>> net = Net() + >>> from mindspore import nn + >>> net = nn.Dense(3, 4) >>> parameters = [] >>> for item in net.get_parameters(): ... parameters.append(item) @@ -1178,7 +1179,8 @@ class Cell(Cell_): Iteration, all the names and corresponding parameters in the cell. Examples: - >>> n = Net() + >>> from mindspore import nn + >>> n = nn.Dense(3, 4) >>> names = [] >>> for m in n.parameters_and_names(): ... if m[0]: @@ -1218,7 +1220,14 @@ class Cell(Cell_): Iteration, all the child cells and corresponding names in the cell. Examples: - >>> n = Net() + >>> from mindspore import nn + >>> class Net(nn.Cell): + ... def __init__(self): + ... super(Net, self).__init__() + ... self.conv = nn.Conv2d(3, 64, 3) + ... def construct(self, x): + ... out = self.conv(x) + ... return out >>> names = [] >>> for m in n.cells_and_names(): ... if m[0]: @@ -1988,7 +1997,6 @@ class GraphCell(Cell): TypeError: If the `params_init` is not a dict. TypeError: If the key of the `params_init` is not a str. TypeError: If the value of the `params_init` is neither a Tensor nor a Parameter. - ValueError: If the initial value's dtype and shape are not consistent with the parameter would be inited. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` diff --git a/mindspore/python/mindspore/nn/metrics/perplexity.py b/mindspore/python/mindspore/nn/metrics/perplexity.py index 9ed354b2b89..a17ed357de6 100644 --- a/mindspore/python/mindspore/nn/metrics/perplexity.py +++ b/mindspore/python/mindspore/nn/metrics/perplexity.py @@ -34,9 +34,6 @@ class Perplexity(Metric): Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` - Note: - The method `update` must be called with the form `update(preds, labels)`. - Examples: >>> import numpy as np >>> from mindspore import nn, Tensor diff --git a/mindspore/python/mindspore/nn/optim/lamb.py b/mindspore/python/mindspore/nn/optim/lamb.py index 5be5367708d..f26d413d871 100755 --- a/mindspore/python/mindspore/nn/optim/lamb.py +++ b/mindspore/python/mindspore/nn/optim/lamb.py @@ -273,6 +273,7 @@ class Lamb(Optimizer): Examples: >>> from mindspore import nn, Model + >>> from mindspore.nn import learning_rate_schedule >>> >>> net = Net() >>> #1) All parameters use the same learning rate and weight decay diff --git a/mindspore/python/mindspore/nn/wrap/cell_wrapper.py b/mindspore/python/mindspore/nn/wrap/cell_wrapper.py index efa6f8c4003..9c031f6013a 100644 --- a/mindspore/python/mindspore/nn/wrap/cell_wrapper.py +++ b/mindspore/python/mindspore/nn/wrap/cell_wrapper.py @@ -398,8 +398,10 @@ class GetNextSingleOp(Cell): Examples: >>> import mindspore >>> from mindspore import ops, nn + >>> from mindspore import dataset as ds >>> - >>> train_dataset = create_custom_dataset() + >>> data_path = "/path/to/MNIST_Data/train/" + >>> train_dataset = ds.MnistDataset(data_path, num_samples=10) >>> dataset_helper = mindspore.DatasetHelper(train_dataset, dataset_sink_mode=True) >>> dataset = dataset_helper.iter.dataset >>> dataset_types, dataset_shapes = dataset_helper.types_shapes() @@ -409,7 +411,7 @@ class GetNextSingleOp(Cell): >>> relu = ops.ReLU() >>> result = relu(data).asnumpy() >>> print(result.shape) - (32, 1, 32, 32) + (28, 28, 1) """ def __init__(self, dataset_types, dataset_shapes, queue_name): diff --git a/mindspore/python/mindspore/ops/operations/nn_ops.py b/mindspore/python/mindspore/ops/operations/nn_ops.py index 015debfab34..c87d0dc9d57 100644 --- a/mindspore/python/mindspore/ops/operations/nn_ops.py +++ b/mindspore/python/mindspore/ops/operations/nn_ops.py @@ -3471,7 +3471,9 @@ class GetNext(Primitive): Examples: >>> import mindspore >>> from mindspore import ops - >>> train_dataset = create_custom_dataset() + >>> from mindspore import dataset as ds + >>> data_path = "/path/to/MNIST_Data/train/" + >>> train_dataset = ds.MnistDataset(data_path, num_samples=10) >>> dataset_helper = mindspore.DatasetHelper(train_dataset, dataset_sink_mode=True) >>> dataset = dataset_helper.iter.dataset >>> dataset_types, dataset_shapes = dataset_helper.types_shapes() @@ -3481,7 +3483,7 @@ class GetNext(Primitive): >>> relu = ops.ReLU() >>> result = relu(data).asnumpy() >>> print(result.shape) - (32, 1, 32, 32) + (28, 28, 1) """ @prim_attr_register diff --git a/mindspore/python/mindspore/train/callback/_callback.py b/mindspore/python/mindspore/train/callback/_callback.py index ac7df00a0a1..0fc9aefab7f 100644 --- a/mindspore/python/mindspore/train/callback/_callback.py +++ b/mindspore/python/mindspore/train/callback/_callback.py @@ -85,21 +85,24 @@ class Callback: `Callback `_. Examples: + >>> import numpy as np >>> from mindspore import Model, nn >>> from mindspore.train.callback import Callback + >>> from mindspore import dataset as ds >>> class Print_info(Callback): ... def step_end(self, run_context): ... cb_params = run_context.original_args() ... print("step_num: ", cb_params.cur_step_num) >>> >>> print_cb = Print_info() - >>> dataset = create_custom_dataset() - >>> net = Net() + >>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))} + >>> dataset = ds.NumpySlicesDataset(data=data).batch(32) + >>> net = nn.Dense(10, 5) >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') >>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9) >>> model = Model(net, loss_fn=loss, optimizer=optim) >>> model.train(1, dataset, callbacks=print_cb) - step_num: 1 + step_num: 2 """ def __enter__(self): diff --git a/mindspore/python/mindspore/train/callback/_lr_scheduler_callback.py b/mindspore/python/mindspore/train/callback/_lr_scheduler_callback.py index f3ceb11cccb..9b468841c29 100644 --- a/mindspore/python/mindspore/train/callback/_lr_scheduler_callback.py +++ b/mindspore/python/mindspore/train/callback/_lr_scheduler_callback.py @@ -32,9 +32,11 @@ class LearningRateScheduler(Callback): learning_rate_function (Function): The function about how to change the learning rate during training. Examples: + >>> import numpy as np >>> from mindspore import Model >>> from mindspore.train.callback import LearningRateScheduler >>> import mindspore.nn as nn + >>> from mindspore import dataset as ds ... >>> def learning_rate_function(lr, cur_step_num): ... if cur_step_num%1000 == 0: @@ -43,12 +45,13 @@ class LearningRateScheduler(Callback): ... >>> lr = 0.1 >>> momentum = 0.9 - >>> net = Net() - >>> loss = nn.SoftmaxCrossEntropyWithLogits() + >>> net = nn.Dense(10, 5) + >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') >>> optim = nn.Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum) >>> model = Model(net, loss_fn=loss, optimizer=optim) ... - >>> dataset = create_custom_dataset("custom_dataset_path") + >>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))} + >>> dataset = ds.NumpySlicesDataset(data=data).batch(32) >>> model.train(1, dataset, callbacks=[LearningRateScheduler(learning_rate_function)], ... dataset_sink_mode=False) """ diff --git a/mindspore/python/mindspore/train/dataset_helper.py b/mindspore/python/mindspore/train/dataset_helper.py index e99c1234069..bbf9da612c7 100644 --- a/mindspore/python/mindspore/train/dataset_helper.py +++ b/mindspore/python/mindspore/train/dataset_helper.py @@ -157,12 +157,15 @@ def connect_network_with_dataset(network, dataset_helper): ``Ascend`` ``GPU`` Examples: + >>> import numpy as np >>> from mindspore import DatasetHelper + >>> from mindspore import DatasetHelper, nn, connect_network_with_dataset + >>> from mindspore import dataset as ds >>> - >>> # call create_dataset function to create a regular dataset, refer to mindspore.dataset - >>> train_dataset = create_custom_dataset() + >>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))} + >>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32) >>> dataset_helper = DatasetHelper(train_dataset, dataset_sink_mode=True) - >>> net = Net() + >>> net = nn.Dense(10, 5) >>> net_with_get_next = connect_network_with_dataset(net, dataset_helper) """ dataset_iter = dataset_helper.iter @@ -236,12 +239,15 @@ class DatasetHelper: epoch_num (int): The number of passes of the entire dataset to be sent. Default: 1. Examples: - >>> from mindspore import DatasetHelper + >>> import numpy as np + >>> from mindspore import DatasetHelper, nn + >>> from mindspore import dataset as ds >>> - >>> train_dataset = create_custom_dataset() + >>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))} + >>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32) >>> set_helper = DatasetHelper(train_dataset, dataset_sink_mode=False) >>> - >>> net = Net() + >>> net = nn.Dense(10, 5) >>> # Object of DatasetHelper is iterable >>> for next_element in set_helper: ... # `next_element` includes data and label, using data to run the net diff --git a/mindspore/python/mindspore/train/model.py b/mindspore/python/mindspore/train/model.py index 5e8271da7c3..744eeaa7126 100644 --- a/mindspore/python/mindspore/train/model.py +++ b/mindspore/python/mindspore/train/model.py @@ -117,7 +117,7 @@ class Model: - "O2": Cast network to float16, keep BatchNorm run in float32, using dynamic loss scale. - "O3": Cast network to float16, the BatchNorm is also cast to float16, loss scale will not be used. - auto: Set level to recommended level in different devices. Set level to "O2" on GPU, set - level to "O3" on Ascend. The recommended level is chosen by the export experience, not applicable to all + level to "O3" on Ascend. The recommended level is chosen by the expert experience, not applicable to all scenarios. User should specify the level for special network. "O2" is recommended on GPU, "O3" is recommended on Ascend. @@ -165,8 +165,9 @@ class Model: >>> loss = nn.SoftmaxCrossEntropyWithLogits() >>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) - >>> # For details about how to build the dataset, please refer to the tutorial - >>> # document on the official website. + >>> # For details about how to build the dataset, please refer to the function `create_dataset` in tutorial + >>> # document on the official website: + >>> # https://www.mindspore.cn/tutorials/zh-CN/master/quick_start.html >>> dataset = create_custom_dataset() >>> model.train(2, dataset) """