From 02f630286c1046d95b369f9a7d5d517c77c04401 Mon Sep 17 00:00:00 2001 From: CaoJian Date: Mon, 2 Nov 2020 16:29:22 +0800 Subject: [PATCH] update wrong comment --- .../cv/densenet121/src/datasets/classification.py | 10 +++++----- model_zoo/official/cv/resnext50/src/dataset.py | 12 ++++++------ model_zoo/official/cv/vgg16/src/dataset.py | 12 ++++++------ 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/model_zoo/official/cv/densenet121/src/datasets/classification.py b/model_zoo/official/cv/densenet121/src/datasets/classification.py index 0e9f2124e54..59cbd74a0ee 100644 --- a/model_zoo/official/cv/densenet121/src/datasets/classification.py +++ b/model_zoo/official/cv/densenet121/src/datasets/classification.py @@ -69,7 +69,7 @@ def classification_dataset(data_dir, image_size, per_batch_size, max_epoch, rank Args: data_dir (str): Path to the root directory that contains the dataset for "input_mode="folder"". Or path of the textfile that contains every image's path of the dataset. - image_size (str): Size of the input images. + image_size (Union(int, sequence)): Size of the input images. per_batch_size (int): the batch size of evey step during training. max_epoch (int): the number of epochs. rank (int): The shard ID within num_shards (default=None). @@ -90,14 +90,14 @@ def classification_dataset(data_dir, image_size, per_batch_size, max_epoch, rank Examples: >>> from src.datasets.classification import classification_dataset >>> # path to imagefolder directory. This directory needs to contain sub-directories which contain the images - >>> dataset_dir = "/path/to/imagefolder_directory" - >>> de_dataset = classification_dataset(train_data_dir, image_size=[224, 244], + >>> data_dir = "/path/to/imagefolder_directory" + >>> de_dataset = classification_dataset(data_dir, image_size=[224, 244], >>> per_batch_size=64, max_epoch=100, >>> rank=0, group_size=4) >>> # Path of the textfile that contains every image's path of the dataset. - >>> dataset_dir = "/path/to/dataset/images/train.txt" + >>> data_dir = "/path/to/dataset/images/train.txt" >>> images_dir = "/path/to/dataset/images" - >>> de_dataset = classification_dataset(train_data_dir, image_size=[224, 244], + >>> de_dataset = classification_dataset(data_dir, image_size=[224, 244], >>> per_batch_size=64, max_epoch=100, >>> rank=0, group_size=4, >>> input_mode="txt", root=images_dir) diff --git a/model_zoo/official/cv/resnext50/src/dataset.py b/model_zoo/official/cv/resnext50/src/dataset.py index 0176ffa082e..474d750b424 100644 --- a/model_zoo/official/cv/resnext50/src/dataset.py +++ b/model_zoo/official/cv/resnext50/src/dataset.py @@ -73,7 +73,7 @@ def classification_dataset(data_dir, image_size, per_batch_size, max_epoch, rank Args: data_dir (str): Path to the root directory that contains the dataset for "input_mode="folder"". Or path of the textfile that contains every image's path of the dataset. - image_size (str): Size of the input images. + image_size (Union(int, sequence)): Size of the input images. per_batch_size (int): the batch size of evey step during training. max_epoch (int): the number of epochs. rank (int): The shard ID within num_shards (default=None). @@ -92,16 +92,16 @@ def classification_dataset(data_dir, image_size, per_batch_size, max_epoch, rank unique index starting from 0). Examples: - >>> from mindvision.common.datasets.classification import classification_dataset + >>> from src.dataset import classification_dataset >>> # path to imagefolder directory. This directory needs to contain sub-directories which contain the images - >>> dataset_dir = "/path/to/imagefolder_directory" - >>> de_dataset = classification_dataset(train_data_dir, image_size=[224, 244], + >>> data_dir = "/path/to/imagefolder_directory" + >>> de_dataset = classification_dataset(data_dir, image_size=[224, 244], >>> per_batch_size=64, max_epoch=100, >>> rank=0, group_size=4) >>> # Path of the textfile that contains every image's path of the dataset. - >>> dataset_dir = "/path/to/dataset/images/train.txt" + >>> data_dir = "/path/to/dataset/images/train.txt" >>> images_dir = "/path/to/dataset/images" - >>> de_dataset = classification_dataset(train_data_dir, image_size=[224, 244], + >>> de_dataset = classification_dataset(data_dir, image_size=[224, 244], >>> per_batch_size=64, max_epoch=100, >>> rank=0, group_size=4, >>> input_mode="txt", root=images_dir) diff --git a/model_zoo/official/cv/vgg16/src/dataset.py b/model_zoo/official/cv/vgg16/src/dataset.py index 1361eb08f2f..12fa5582f52 100644 --- a/model_zoo/official/cv/vgg16/src/dataset.py +++ b/model_zoo/official/cv/vgg16/src/dataset.py @@ -88,7 +88,7 @@ def classification_dataset(data_dir, image_size, per_batch_size, rank=0, group_s Args: data_dir (str): Path to the root directory that contains the dataset for "input_mode="folder"". Or path of the textfile that contains every image's path of the dataset. - image_size (str): Size of the input images. + image_size (Union(int, sequence)): Size of the input images. per_batch_size (int): the batch size of evey step during training. rank (int): The shard ID within num_shards (default=None). group_size (int): Number of shards that the dataset should be divided @@ -107,15 +107,15 @@ def classification_dataset(data_dir, image_size, per_batch_size, rank=0, group_s unique index starting from 0). Examples: - >>> from mindvision.common.datasets.classification import classification_dataset + >>> from src.dataset import classification_dataset >>> # path to imagefolder directory. This directory needs to contain sub-directories which contain the images - >>> dataset_dir = "/path/to/imagefolder_directory" - >>> de_dataset = classification_dataset(train_data_dir, image_size=[224, 244], + >>> data_dir = "/path/to/imagefolder_directory" + >>> de_dataset = classification_dataset(data_dir, image_size=[224, 244], >>> per_batch_size=64, rank=0, group_size=4) >>> # Path of the textfile that contains every image's path of the dataset. - >>> dataset_dir = "/path/to/dataset/images/train.txt" + >>> data_dir = "/path/to/dataset/images/train.txt" >>> images_dir = "/path/to/dataset/images" - >>> de_dataset = classification_dataset(train_data_dir, image_size=[224, 244], + >>> de_dataset = classification_dataset(data_dir, image_size=[224, 244], >>> per_batch_size=64, rank=0, group_size=4, >>> input_mode="txt", root=images_dir) """