fix comments related with dataset

This commit is contained in:
liutongtong 2022-02-25 17:26:18 +08:00
parent 1de823f1eb
commit d6854000b4
14 changed files with 55 additions and 33 deletions

View File

@ -1,7 +1,7 @@
mindspore.ParameterTuple
========================
.. py:class:: mindspore.ParameterTuple(iterable)
.. py:class:: mindspore.ParameterTuple
继承于tuple用于管理多个Parameter。

View File

@ -1,7 +1,7 @@
mindspore.nn.GraphCell
======================
.. py:class:: mindspore.nn.GraphCell(graph)
.. py:class:: mindspore.nn.GraphCell(graph, params_init=None)
运行从MindIR加载的计算图。

View File

@ -49,3 +49,6 @@ mindspore.nn.HausdorffDistance
**异常:**
- **ValueError** - 输入的数量不等于3。
- **TypeError** - label_idx 的数据类型不是int或float。
- **ValueError** - label_idx 的值不在y_pred或y中。
- **ValueError** - y_pred 和 y 的shape不同。

View File

@ -22,7 +22,7 @@ mindspore.nn.Loss
**异常:**
RuntimeError样本总数为0。
- **RuntimeError** - 样本总数为0。
.. py:method:: update(*inputs)

View File

@ -14,7 +14,3 @@ mindspore.nn.get_metric_fn
**返回:**
metric对象metric方法的类实例。
**异常:**
- **TypeError** - 入参 `metric` 的类型不是None, dict或set。

View File

@ -1144,7 +1144,8 @@ class Cell(Cell_):
Iteration, all parameters at the cell.
Examples:
>>> net = Net()
>>> from mindspore import nn
>>> net = nn.Dense(3, 4)
>>> parameters = []
>>> for item in net.get_parameters():
... parameters.append(item)
@ -1178,7 +1179,8 @@ class Cell(Cell_):
Iteration, all the names and corresponding parameters in the cell.
Examples:
>>> n = Net()
>>> from mindspore import nn
>>> n = nn.Dense(3, 4)
>>> names = []
>>> for m in n.parameters_and_names():
... if m[0]:
@ -1218,7 +1220,14 @@ class Cell(Cell_):
Iteration, all the child cells and corresponding names in the cell.
Examples:
>>> n = Net()
>>> from mindspore import nn
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.conv = nn.Conv2d(3, 64, 3)
... def construct(self, x):
... out = self.conv(x)
... return out
>>> names = []
>>> for m in n.cells_and_names():
... if m[0]:
@ -1988,7 +1997,6 @@ class GraphCell(Cell):
TypeError: If the `params_init` is not a dict.
TypeError: If the key of the `params_init` is not a str.
TypeError: If the value of the `params_init` is neither a Tensor nor a Parameter.
ValueError: If the initial value's dtype and shape are not consistent with the parameter would be inited.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``

View File

@ -34,9 +34,6 @@ class Perplexity(Metric):
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Note:
The method `update` must be called with the form `update(preds, labels)`.
Examples:
>>> import numpy as np
>>> from mindspore import nn, Tensor

View File

@ -273,6 +273,7 @@ class Lamb(Optimizer):
Examples:
>>> from mindspore import nn, Model
>>> from mindspore.nn import learning_rate_schedule
>>>
>>> net = Net()
>>> #1) All parameters use the same learning rate and weight decay

View File

@ -398,8 +398,10 @@ class GetNextSingleOp(Cell):
Examples:
>>> import mindspore
>>> from mindspore import ops, nn
>>> from mindspore import dataset as ds
>>>
>>> train_dataset = create_custom_dataset()
>>> data_path = "/path/to/MNIST_Data/train/"
>>> train_dataset = ds.MnistDataset(data_path, num_samples=10)
>>> dataset_helper = mindspore.DatasetHelper(train_dataset, dataset_sink_mode=True)
>>> dataset = dataset_helper.iter.dataset
>>> dataset_types, dataset_shapes = dataset_helper.types_shapes()
@ -409,7 +411,7 @@ class GetNextSingleOp(Cell):
>>> relu = ops.ReLU()
>>> result = relu(data).asnumpy()
>>> print(result.shape)
(32, 1, 32, 32)
(28, 28, 1)
"""
def __init__(self, dataset_types, dataset_shapes, queue_name):

View File

@ -3471,7 +3471,9 @@ class GetNext(Primitive):
Examples:
>>> import mindspore
>>> from mindspore import ops
>>> train_dataset = create_custom_dataset()
>>> from mindspore import dataset as ds
>>> data_path = "/path/to/MNIST_Data/train/"
>>> train_dataset = ds.MnistDataset(data_path, num_samples=10)
>>> dataset_helper = mindspore.DatasetHelper(train_dataset, dataset_sink_mode=True)
>>> dataset = dataset_helper.iter.dataset
>>> dataset_types, dataset_shapes = dataset_helper.types_shapes()
@ -3481,7 +3483,7 @@ class GetNext(Primitive):
>>> relu = ops.ReLU()
>>> result = relu(data).asnumpy()
>>> print(result.shape)
(32, 1, 32, 32)
(28, 28, 1)
"""
@prim_attr_register

View File

@ -85,21 +85,24 @@ class Callback:
`Callback <https://www.mindspore.cn/docs/programming_guide/zh-CN/master/custom_debugging_info.html>`_.
Examples:
>>> import numpy as np
>>> from mindspore import Model, nn
>>> from mindspore.train.callback import Callback
>>> from mindspore import dataset as ds
>>> class Print_info(Callback):
... def step_end(self, run_context):
... cb_params = run_context.original_args()
... print("step_num: ", cb_params.cur_step_num)
>>>
>>> print_cb = Print_info()
>>> dataset = create_custom_dataset()
>>> net = Net()
>>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))}
>>> dataset = ds.NumpySlicesDataset(data=data).batch(32)
>>> net = nn.Dense(10, 5)
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
>>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9)
>>> model = Model(net, loss_fn=loss, optimizer=optim)
>>> model.train(1, dataset, callbacks=print_cb)
step_num: 1
step_num: 2
"""
def __enter__(self):

View File

@ -32,9 +32,11 @@ class LearningRateScheduler(Callback):
learning_rate_function (Function): The function about how to change the learning rate during training.
Examples:
>>> import numpy as np
>>> from mindspore import Model
>>> from mindspore.train.callback import LearningRateScheduler
>>> import mindspore.nn as nn
>>> from mindspore import dataset as ds
...
>>> def learning_rate_function(lr, cur_step_num):
... if cur_step_num%1000 == 0:
@ -43,12 +45,13 @@ class LearningRateScheduler(Callback):
...
>>> lr = 0.1
>>> momentum = 0.9
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> net = nn.Dense(10, 5)
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
>>> optim = nn.Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum)
>>> model = Model(net, loss_fn=loss, optimizer=optim)
...
>>> dataset = create_custom_dataset("custom_dataset_path")
>>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))}
>>> dataset = ds.NumpySlicesDataset(data=data).batch(32)
>>> model.train(1, dataset, callbacks=[LearningRateScheduler(learning_rate_function)],
... dataset_sink_mode=False)
"""

View File

@ -157,12 +157,15 @@ def connect_network_with_dataset(network, dataset_helper):
``Ascend`` ``GPU``
Examples:
>>> import numpy as np
>>> from mindspore import DatasetHelper
>>> from mindspore import DatasetHelper, nn, connect_network_with_dataset
>>> from mindspore import dataset as ds
>>>
>>> # call create_dataset function to create a regular dataset, refer to mindspore.dataset
>>> train_dataset = create_custom_dataset()
>>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))}
>>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32)
>>> dataset_helper = DatasetHelper(train_dataset, dataset_sink_mode=True)
>>> net = Net()
>>> net = nn.Dense(10, 5)
>>> net_with_get_next = connect_network_with_dataset(net, dataset_helper)
"""
dataset_iter = dataset_helper.iter
@ -236,12 +239,15 @@ class DatasetHelper:
epoch_num (int): The number of passes of the entire dataset to be sent. Default: 1.
Examples:
>>> from mindspore import DatasetHelper
>>> import numpy as np
>>> from mindspore import DatasetHelper, nn
>>> from mindspore import dataset as ds
>>>
>>> train_dataset = create_custom_dataset()
>>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))}
>>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32)
>>> set_helper = DatasetHelper(train_dataset, dataset_sink_mode=False)
>>>
>>> net = Net()
>>> net = nn.Dense(10, 5)
>>> # Object of DatasetHelper is iterable
>>> for next_element in set_helper:
... # `next_element` includes data and label, using data to run the net

View File

@ -117,7 +117,7 @@ class Model:
- "O2": Cast network to float16, keep BatchNorm run in float32, using dynamic loss scale.
- "O3": Cast network to float16, the BatchNorm is also cast to float16, loss scale will not be used.
- auto: Set level to recommended level in different devices. Set level to "O2" on GPU, set
level to "O3" on Ascend. The recommended level is chosen by the export experience, not applicable to all
level to "O3" on Ascend. The recommended level is chosen by the expert experience, not applicable to all
scenarios. User should specify the level for special network.
"O2" is recommended on GPU, "O3" is recommended on Ascend.
@ -165,8 +165,9 @@ class Model:
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None)
>>> # For details about how to build the dataset, please refer to the tutorial
>>> # document on the official website.
>>> # For details about how to build the dataset, please refer to the function `create_dataset` in tutorial
>>> # document on the official website:
>>> # https://www.mindspore.cn/tutorials/zh-CN/master/quick_start.html
>>> dataset = create_custom_dataset()
>>> model.train(2, dataset)
"""