forked from mindspore-Ecosystem/mindspore
!8118 update wrong comment for 'resnext50/densnet121/vgg16'
Merge pull request !8118 from caojian05/ms_master_bugfix
This commit is contained in:
commit
250ea7c001
|
@ -69,7 +69,7 @@ def classification_dataset(data_dir, image_size, per_batch_size, max_epoch, rank
|
||||||
Args:
|
Args:
|
||||||
data_dir (str): Path to the root directory that contains the dataset for "input_mode="folder"".
|
data_dir (str): Path to the root directory that contains the dataset for "input_mode="folder"".
|
||||||
Or path of the textfile that contains every image's path of the dataset.
|
Or path of the textfile that contains every image's path of the dataset.
|
||||||
image_size (str): Size of the input images.
|
image_size (Union(int, sequence)): Size of the input images.
|
||||||
per_batch_size (int): the batch size of evey step during training.
|
per_batch_size (int): the batch size of evey step during training.
|
||||||
max_epoch (int): the number of epochs.
|
max_epoch (int): the number of epochs.
|
||||||
rank (int): The shard ID within num_shards (default=None).
|
rank (int): The shard ID within num_shards (default=None).
|
||||||
|
@ -90,14 +90,14 @@ def classification_dataset(data_dir, image_size, per_batch_size, max_epoch, rank
|
||||||
Examples:
|
Examples:
|
||||||
>>> from src.datasets.classification import classification_dataset
|
>>> from src.datasets.classification import classification_dataset
|
||||||
>>> # path to imagefolder directory. This directory needs to contain sub-directories which contain the images
|
>>> # path to imagefolder directory. This directory needs to contain sub-directories which contain the images
|
||||||
>>> dataset_dir = "/path/to/imagefolder_directory"
|
>>> data_dir = "/path/to/imagefolder_directory"
|
||||||
>>> de_dataset = classification_dataset(train_data_dir, image_size=[224, 244],
|
>>> de_dataset = classification_dataset(data_dir, image_size=[224, 244],
|
||||||
>>> per_batch_size=64, max_epoch=100,
|
>>> per_batch_size=64, max_epoch=100,
|
||||||
>>> rank=0, group_size=4)
|
>>> rank=0, group_size=4)
|
||||||
>>> # Path of the textfile that contains every image's path of the dataset.
|
>>> # Path of the textfile that contains every image's path of the dataset.
|
||||||
>>> dataset_dir = "/path/to/dataset/images/train.txt"
|
>>> data_dir = "/path/to/dataset/images/train.txt"
|
||||||
>>> images_dir = "/path/to/dataset/images"
|
>>> images_dir = "/path/to/dataset/images"
|
||||||
>>> de_dataset = classification_dataset(train_data_dir, image_size=[224, 244],
|
>>> de_dataset = classification_dataset(data_dir, image_size=[224, 244],
|
||||||
>>> per_batch_size=64, max_epoch=100,
|
>>> per_batch_size=64, max_epoch=100,
|
||||||
>>> rank=0, group_size=4,
|
>>> rank=0, group_size=4,
|
||||||
>>> input_mode="txt", root=images_dir)
|
>>> input_mode="txt", root=images_dir)
|
||||||
|
|
|
@ -73,7 +73,7 @@ def classification_dataset(data_dir, image_size, per_batch_size, max_epoch, rank
|
||||||
Args:
|
Args:
|
||||||
data_dir (str): Path to the root directory that contains the dataset for "input_mode="folder"".
|
data_dir (str): Path to the root directory that contains the dataset for "input_mode="folder"".
|
||||||
Or path of the textfile that contains every image's path of the dataset.
|
Or path of the textfile that contains every image's path of the dataset.
|
||||||
image_size (str): Size of the input images.
|
image_size (Union(int, sequence)): Size of the input images.
|
||||||
per_batch_size (int): the batch size of evey step during training.
|
per_batch_size (int): the batch size of evey step during training.
|
||||||
max_epoch (int): the number of epochs.
|
max_epoch (int): the number of epochs.
|
||||||
rank (int): The shard ID within num_shards (default=None).
|
rank (int): The shard ID within num_shards (default=None).
|
||||||
|
@ -92,16 +92,16 @@ def classification_dataset(data_dir, image_size, per_batch_size, max_epoch, rank
|
||||||
unique index starting from 0).
|
unique index starting from 0).
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> from mindvision.common.datasets.classification import classification_dataset
|
>>> from src.dataset import classification_dataset
|
||||||
>>> # path to imagefolder directory. This directory needs to contain sub-directories which contain the images
|
>>> # path to imagefolder directory. This directory needs to contain sub-directories which contain the images
|
||||||
>>> dataset_dir = "/path/to/imagefolder_directory"
|
>>> data_dir = "/path/to/imagefolder_directory"
|
||||||
>>> de_dataset = classification_dataset(train_data_dir, image_size=[224, 244],
|
>>> de_dataset = classification_dataset(data_dir, image_size=[224, 244],
|
||||||
>>> per_batch_size=64, max_epoch=100,
|
>>> per_batch_size=64, max_epoch=100,
|
||||||
>>> rank=0, group_size=4)
|
>>> rank=0, group_size=4)
|
||||||
>>> # Path of the textfile that contains every image's path of the dataset.
|
>>> # Path of the textfile that contains every image's path of the dataset.
|
||||||
>>> dataset_dir = "/path/to/dataset/images/train.txt"
|
>>> data_dir = "/path/to/dataset/images/train.txt"
|
||||||
>>> images_dir = "/path/to/dataset/images"
|
>>> images_dir = "/path/to/dataset/images"
|
||||||
>>> de_dataset = classification_dataset(train_data_dir, image_size=[224, 244],
|
>>> de_dataset = classification_dataset(data_dir, image_size=[224, 244],
|
||||||
>>> per_batch_size=64, max_epoch=100,
|
>>> per_batch_size=64, max_epoch=100,
|
||||||
>>> rank=0, group_size=4,
|
>>> rank=0, group_size=4,
|
||||||
>>> input_mode="txt", root=images_dir)
|
>>> input_mode="txt", root=images_dir)
|
||||||
|
|
|
@ -88,7 +88,7 @@ def classification_dataset(data_dir, image_size, per_batch_size, rank=0, group_s
|
||||||
Args:
|
Args:
|
||||||
data_dir (str): Path to the root directory that contains the dataset for "input_mode="folder"".
|
data_dir (str): Path to the root directory that contains the dataset for "input_mode="folder"".
|
||||||
Or path of the textfile that contains every image's path of the dataset.
|
Or path of the textfile that contains every image's path of the dataset.
|
||||||
image_size (str): Size of the input images.
|
image_size (Union(int, sequence)): Size of the input images.
|
||||||
per_batch_size (int): the batch size of evey step during training.
|
per_batch_size (int): the batch size of evey step during training.
|
||||||
rank (int): The shard ID within num_shards (default=None).
|
rank (int): The shard ID within num_shards (default=None).
|
||||||
group_size (int): Number of shards that the dataset should be divided
|
group_size (int): Number of shards that the dataset should be divided
|
||||||
|
@ -107,15 +107,15 @@ def classification_dataset(data_dir, image_size, per_batch_size, rank=0, group_s
|
||||||
unique index starting from 0).
|
unique index starting from 0).
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> from mindvision.common.datasets.classification import classification_dataset
|
>>> from src.dataset import classification_dataset
|
||||||
>>> # path to imagefolder directory. This directory needs to contain sub-directories which contain the images
|
>>> # path to imagefolder directory. This directory needs to contain sub-directories which contain the images
|
||||||
>>> dataset_dir = "/path/to/imagefolder_directory"
|
>>> data_dir = "/path/to/imagefolder_directory"
|
||||||
>>> de_dataset = classification_dataset(train_data_dir, image_size=[224, 244],
|
>>> de_dataset = classification_dataset(data_dir, image_size=[224, 244],
|
||||||
>>> per_batch_size=64, rank=0, group_size=4)
|
>>> per_batch_size=64, rank=0, group_size=4)
|
||||||
>>> # Path of the textfile that contains every image's path of the dataset.
|
>>> # Path of the textfile that contains every image's path of the dataset.
|
||||||
>>> dataset_dir = "/path/to/dataset/images/train.txt"
|
>>> data_dir = "/path/to/dataset/images/train.txt"
|
||||||
>>> images_dir = "/path/to/dataset/images"
|
>>> images_dir = "/path/to/dataset/images"
|
||||||
>>> de_dataset = classification_dataset(train_data_dir, image_size=[224, 244],
|
>>> de_dataset = classification_dataset(data_dir, image_size=[224, 244],
|
||||||
>>> per_batch_size=64, rank=0, group_size=4,
|
>>> per_batch_size=64, rank=0, group_size=4,
|
||||||
>>> input_mode="txt", root=images_dir)
|
>>> input_mode="txt", root=images_dir)
|
||||||
"""
|
"""
|
||||||
|
|
Loading…
Reference in New Issue