Fix minddata python doc

This commit is contained in:
luoyang 2021-06-25 18:15:01 +08:00
parent f98497ca09
commit 992da13168
8 changed files with 20 additions and 14 deletions

View File

@ -15,7 +15,7 @@
This module provides APIs to load and process various common datasets such as MNIST,
CIFAR-10, CIFAR-100, VOC, COCO, ImageNet, CelebA, CLUE, etc. It also supports datasets
in standard format, including MindRecord, TFRecord, Manifest, etc. Users can also define
their owndatasets with this module.
their own datasets with this module.
Besides, this module provides APIs to sample data while loading.

View File

@ -25,8 +25,9 @@ from mindspore import log as logger
__all__ = ['set_seed', 'get_seed', 'set_prefetch_size', 'get_prefetch_size', 'set_num_parallel_workers',
'get_num_parallel_workers', 'set_numa_enable', 'get_numa_enable', 'set_monitor_sampling_interval',
'get_monitor_sampling_interval', 'load', 'get_callback_timeout', 'set_auto_num_workers',
'get_auto_num_workers', '_init_device_info', 'set_enable_shared_mem', 'get_enable_shared_mem']
'get_monitor_sampling_interval', 'set_callback_timeout', 'get_callback_timeout',
'set_auto_num_workers', 'get_auto_num_workers', 'set_enable_shared_mem', 'get_enable_shared_mem',
'set_sending_batches', 'load', '_init_device_info']
INT32_MAX = 2147483647
UINT32_MAX = 4294967295

View File

@ -22,6 +22,7 @@ high performance and parse data precisely. It also provides the following
operations for users to preprocess data: shuffle, batch, repeat, map, and zip.
"""
from ..callback import DSCallback, WaitedDSCallback
from ..core import config
from .cache_client import DatasetCache
from .datasets import *
@ -35,4 +36,5 @@ __all__ = ["CelebADataset", "Cifar100Dataset", "Cifar10Dataset", "CLUEDataset",
"NumpySlicesDataset", "PaddedDataset", "TextFileDataset", "TFRecordDataset", "VOCDataset",
"DistributedSampler", "PKSampler", "RandomSampler", "SequentialSampler", "SubsetRandomSampler",
"WeightedRandomSampler", "SubsetSampler",
"config", "DatasetCache", "Schema", "zip"]
"DatasetCache", "DSCallback", "Schema", "WaitedDSCallback", "compare", "deserialize",
"serialize", "show", "zip"]

View File

@ -655,7 +655,7 @@ class Dataset:
option could be beneficial if the Python operation is computational heavy (default=False).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
callbacks: (DSCallback, list[DSCallback], optional): List of Dataset callbacks to be called (Default=None).
callbacks (DSCallback, list[DSCallback], optional): List of Dataset callbacks to be called (Default=None).
Returns:
@ -2562,7 +2562,7 @@ class MapDataset(Dataset):
option could be beneficial if the Python operation is computational heavy (default=False).
cache (DatasetCache, optional): Use tensor caching service to speed up dataset processing.
(default=None, which means no cache is used).
callbacks: (DSCallback, list[DSCallback], optional): List of Dataset callbacks to be called (Default=None)
callbacks (DSCallback, list[DSCallback], optional): List of Dataset callbacks to be called (Default=None)
max_rowsize(int, optional): Maximum size of row in MB that is used for shared memory allocation to copy
data between processes. This is only used if python_multiprocessing is set to True (default 16 MB).

View File

@ -85,7 +85,7 @@ class Fill(TensorOperation):
The output tensor will have the same shape and type as the input tensor.
Args:
fill_value (Union[str, bytes, int, float, bool])) : scalar value
fill_value (Union[str, bytes, int, float, bool]) : scalar value
to fill the tensor with.
Examples:
@ -432,7 +432,7 @@ class RandomApply(TensorOperation):
Args:
transforms (list): List of transformations to be applied.
prob (float, optional): The probability to apply the transformation list (default=0.5)
prob (float, optional): The probability to apply the transformation list (default=0.5).
Examples:
>>> rand_apply = c_transforms.RandomApply([c_vision.RandomCrop(512)])

View File

@ -140,13 +140,13 @@ class Compose:
@staticmethod
def reduce(operations):
"""
Wraps adjacent Python operations in a Compose to allow mixing of Python and C++ operations
Wraps adjacent Python operations in a Compose to allow mixing of Python and C++ operations.
Args:
operations (list): list of tensor operations
operations (list): list of tensor operations.
Returns:
list, the reduced list of operations
list, the reduced list of operations.
"""
if len(operations) == 1:
if str(operations).find("c_transform") >= 0 or isinstance(operations[0], TensorOperation):

View File

@ -231,7 +231,7 @@ class CutMixBatch(ImageTensorOperation):
Args:
image_batch_format (Image Batch Format): The method of padding. Can be any of
[ImageBatchFormat.NHWC, ImageBatchFormat.NCHW]
[ImageBatchFormat.NHWC, ImageBatchFormat.NCHW].
alpha (float, optional): hyperparameter of beta distribution (default = 1.0).
prob (float, optional): The probability by which CutMix is applied to each image (default = 1.0).
@ -591,7 +591,7 @@ class RandomAffine(ImageTensorOperation):
TypeError: If degrees is not a number or a list or a tuple.
If degrees is a list or tuple, its length is not 2.
TypeError: If translate is specified but is not list or a tuple of length 2 or 4.
TypeError: If scale is not a list or tuple of length 2.''
TypeError: If scale is not a list or tuple of length 2.
TypeError: If shear is not a list or tuple of length 2 or 4.
TypeError: If fill_value is not a single integer or a 3-tuple.

View File

@ -580,7 +580,10 @@ def to_type(img, output_type):
if not is_numpy(img):
raise TypeError("img should be NumPy image. Got {}.".format(type(img)))
return img.astype(output_type)
try:
return img.astype(output_type)
except:
raise RuntimeError("output_type: " + str(output_type) + " is not a valid datatype.")
def rotate(img, angle, resample, expand, center, fill_value):