!33811 [MD] Transform Unification Feature - Phase 2B - test_[r-z]*.py Python UT updates

Merge pull request !33811 from cathwong/ckw_xtranuni_phase2b_uts_r_to_z
This commit is contained in:
i-robot 2022-05-05 18:48:57 +00:00 committed by Gitee
commit 0828efe02e
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
51 changed files with 1026 additions and 1110 deletions

View File

@ -18,7 +18,7 @@ Testing RandomAdjustSharpness in DE
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, visualize_image, diff_mse from util import visualize_list, visualize_image, diff_mse
@ -34,7 +34,7 @@ def test_random_adjust_sharpness_pipeline(plot=False):
# Original Images # Original Images
data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False) data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
transforms_original = [c_vision.Decode(), c_vision.Resize(size=[224, 224])] transforms_original = [vision.Decode(), vision.Resize(size=[224, 224])]
ds_original = data_set.map(operations=transforms_original, input_columns="image") ds_original = data_set.map(operations=transforms_original, input_columns="image")
ds_original = ds_original.batch(512) ds_original = ds_original.batch(512)
@ -48,9 +48,9 @@ def test_random_adjust_sharpness_pipeline(plot=False):
# Randomly Sharpness Adjusted Images # Randomly Sharpness Adjusted Images
data_set1 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False) data_set1 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
transform_random_adjust_sharpness = [c_vision.Decode(), transform_random_adjust_sharpness = [vision.Decode(),
c_vision.Resize(size=[224, 224]), vision.Resize(size=[224, 224]),
c_vision.RandomAdjustSharpness(2.0, 0.6)] vision.RandomAdjustSharpness(2.0, 0.6)]
ds_random_adjust_sharpness = data_set1.map(operations=transform_random_adjust_sharpness, input_columns="image") ds_random_adjust_sharpness = data_set1.map(operations=transform_random_adjust_sharpness, input_columns="image")
ds_random_adjust_sharpness = ds_random_adjust_sharpness.batch(512) ds_random_adjust_sharpness = ds_random_adjust_sharpness.batch(512)
for idx, (image, _) in enumerate(ds_random_adjust_sharpness): for idx, (image, _) in enumerate(ds_random_adjust_sharpness):
@ -77,9 +77,9 @@ def test_random_adjust_sharpness_eager():
img = np.fromfile(image_file, dtype=np.uint8) img = np.fromfile(image_file, dtype=np.uint8)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape)) logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
img = c_vision.Decode()(img) img = vision.Decode()(img)
img_sharped = c_vision.RandomSharpness((2.0, 2.0))(img) img_sharped = vision.RandomSharpness((2.0, 2.0))(img)
img_random_sharped = c_vision.RandomAdjustSharpness(2.0, 1.0)(img) img_random_sharped = vision.RandomAdjustSharpness(2.0, 1.0)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img_random_sharped), img_random_sharped.shape)) logger.info("Image.type: {}, Image.shape: {}".format(type(img_random_sharped), img_random_sharped.shape))
assert img_random_sharped.all() == img_sharped.all() assert img_random_sharped.all() == img_sharped.all()
@ -89,8 +89,8 @@ def test_random_adjust_sharpness_comp(plot=False):
""" """
Test RandomAdjustSharpness op compared with Sharpness op. Test RandomAdjustSharpness op compared with Sharpness op.
""" """
random_adjust_sharpness_op = c_vision.RandomAdjustSharpness(degree=2.0, prob=1.0) random_adjust_sharpness_op = vision.RandomAdjustSharpness(degree=2.0, prob=1.0)
sharpness_op = c_vision.RandomSharpness((2.0, 2.0)) sharpness_op = vision.RandomSharpness((2.0, 2.0))
dataset1 = ds.ImageFolderDataset(data_dir, 1, shuffle=False, decode=True) dataset1 = ds.ImageFolderDataset(data_dir, 1, shuffle=False, decode=True)
for item in dataset1.create_dict_iterator(num_epochs=1, output_numpy=True): for item in dataset1.create_dict_iterator(num_epochs=1, output_numpy=True):
@ -118,7 +118,7 @@ def test_random_adjust_sharpness_invalid_prob():
logger.info("test_random_adjust_sharpness_invalid_prob") logger.info("test_random_adjust_sharpness_invalid_prob")
dataset = ds.ImageFolderDataset(data_dir, 1, shuffle=False, decode=True) dataset = ds.ImageFolderDataset(data_dir, 1, shuffle=False, decode=True)
try: try:
random_adjust_sharpness_op = c_vision.RandomAdjustSharpness(2.0, 1.5) random_adjust_sharpness_op = vision.RandomAdjustSharpness(2.0, 1.5)
dataset = dataset.map(operations=random_adjust_sharpness_op, input_columns=['image']) dataset = dataset.map(operations=random_adjust_sharpness_op, input_columns=['image'])
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
@ -132,7 +132,7 @@ def test_random_adjust_sharpness_invalid_degree():
logger.info("test_random_adjust_sharpness_invalid_prob") logger.info("test_random_adjust_sharpness_invalid_prob")
dataset = ds.ImageFolderDataset(data_dir, 1, shuffle=False, decode=True) dataset = ds.ImageFolderDataset(data_dir, 1, shuffle=False, decode=True)
try: try:
random_adjust_sharpness_op = c_vision.RandomAdjustSharpness(-1.0, 1.5) random_adjust_sharpness_op = vision.RandomAdjustSharpness(-1.0, 1.5)
dataset = dataset.map(operations=random_adjust_sharpness_op, input_columns=['image']) dataset = dataset.map(operations=random_adjust_sharpness_op, input_columns=['image'])
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))

View File

@ -17,9 +17,8 @@ Testing RandomAffine op in DE
""" """
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.py_transforms as py_vision import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.c_transforms as c_vision
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, save_and_check_md5, \ from util import visualize_list, save_and_check_md5, \
config_get_set_seed, config_get_set_num_parallel_workers config_get_set_seed, config_get_set_num_parallel_workers
@ -38,17 +37,17 @@ def test_random_affine_op(plot=False):
logger.info("test_random_affine_op") logger.info("test_random_affine_op")
# define map operations # define map operations
transforms1 = [ transforms1 = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1)), vision.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1)),
py_vision.ToTensor() vision.ToTensor()
] ]
transform1 = mindspore.dataset.transforms.py_transforms.Compose(transforms1) transform1 = mindspore.dataset.transforms.transforms.Compose(transforms1)
transforms2 = [ transforms2 = [
py_vision.Decode(), vision.Decode(True),
py_vision.ToTensor() vision.ToTensor()
] ]
transform2 = mindspore.dataset.transforms.py_transforms.Compose(transforms2) transform2 = mindspore.dataset.transforms.transforms.Compose(transforms2)
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -76,12 +75,12 @@ def test_random_affine_op_c(plot=False):
logger.info("test_random_affine_op_c") logger.info("test_random_affine_op_c")
# define map operations # define map operations
transforms1 = [ transforms1 = [
c_vision.Decode(), vision.Decode(),
c_vision.RandomAffine(degrees=0, translate=(0.5, 0.5, 0, 0)) vision.RandomAffine(degrees=0, translate=(0.5, 0.5, 0, 0))
] ]
transforms2 = [ transforms2 = [
c_vision.Decode() vision.Decode()
] ]
# First dataset # First dataset
@ -112,12 +111,12 @@ def test_random_affine_md5():
original_num_parallel_workers = config_get_set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# define map operations # define map operations
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomAffine(degrees=(-5, 15), translate=(0.1, 0.3), vision.RandomAffine(degrees=(-5, 15), translate=(0.1, 0.3),
scale=(0.9, 1.1), shear=(-10, 10, -5, 5)), scale=(0.9, 1.1), shear=(-10, 10, -5, 5)),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -141,9 +140,9 @@ def test_random_affine_c_md5():
original_num_parallel_workers = config_get_set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# define map operations # define map operations
transforms = [ transforms = [
c_vision.Decode(), vision.Decode(),
c_vision.RandomAffine(degrees=(-5, 15), translate=(-0.1, 0.1, -0.3, 0.3), vision.RandomAffine(degrees=(-5, 15), translate=(-0.1, 0.1, -0.3, 0.3),
scale=(0.9, 1.1), shear=(-10, 10, -5, 5)) scale=(0.9, 1.1), shear=(-10, 10, -5, 5))
] ]
# Generate dataset # Generate dataset
@ -168,8 +167,8 @@ def test_random_affine_default_c_md5():
original_num_parallel_workers = config_get_set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# define map operations # define map operations
transforms = [ transforms = [
c_vision.Decode(), vision.Decode(),
c_vision.RandomAffine(degrees=0) vision.RandomAffine(degrees=0)
] ]
# Generate dataset # Generate dataset
@ -192,8 +191,8 @@ def test_random_affine_py_exception_non_pil_images():
logger.info("test_random_affine_exception_negative_degrees") logger.info("test_random_affine_exception_negative_degrees")
dataset = ds.MnistDataset(MNIST_DATA_DIR, num_samples=3, num_parallel_workers=3) dataset = ds.MnistDataset(MNIST_DATA_DIR, num_samples=3, num_parallel_workers=3)
try: try:
transform = mindspore.dataset.transforms.py_transforms.Compose([py_vision.ToTensor(), transform = mindspore.dataset.transforms.transforms.Compose([vision.ToTensor(),
py_vision.RandomAffine(degrees=(15, 15))]) vision.RandomAffine(degrees=(15, 15))])
dataset = dataset.map(operations=transform, input_columns=["image"], num_parallel_workers=3) dataset = dataset.map(operations=transform, input_columns=["image"], num_parallel_workers=3)
for _ in dataset.create_dict_iterator(num_epochs=1): for _ in dataset.create_dict_iterator(num_epochs=1):
pass pass
@ -208,7 +207,7 @@ def test_random_affine_exception_negative_degrees():
""" """
logger.info("test_random_affine_exception_negative_degrees") logger.info("test_random_affine_exception_negative_degrees")
try: try:
_ = py_vision.RandomAffine(degrees=-15) _ = vision.RandomAffine(degrees=-15)
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Input degrees is not within the required interval of [0, 16777216]." assert str(e) == "Input degrees is not within the required interval of [0, 16777216]."
@ -220,13 +219,13 @@ def test_random_affine_exception_translation_range():
""" """
logger.info("test_random_affine_exception_translation_range") logger.info("test_random_affine_exception_translation_range")
try: try:
_ = c_vision.RandomAffine(degrees=15, translate=(0.1, 1.5)) _ = vision.RandomAffine(degrees=15, translate=(0.1, 1.5))
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Input translate at 1 is not within the required interval of [-1.0, 1.0]." assert str(e) == "Input translate at 1 is not within the required interval of [-1.0, 1.0]."
logger.info("test_random_affine_exception_translation_range") logger.info("test_random_affine_exception_translation_range")
try: try:
_ = c_vision.RandomAffine(degrees=15, translate=(-2, 1.5)) _ = vision.RandomAffine(degrees=15, translate=(-2, 1.5))
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Input translate at 0 is not within the required interval of [-1.0, 1.0]." assert str(e) == "Input translate at 0 is not within the required interval of [-1.0, 1.0]."
@ -238,13 +237,13 @@ def test_random_affine_exception_scale_value():
""" """
logger.info("test_random_affine_exception_scale_value") logger.info("test_random_affine_exception_scale_value")
try: try:
_ = py_vision.RandomAffine(degrees=15, scale=(0.0, 0.0)) _ = vision.RandomAffine(degrees=15, scale=(0.0, 0.0))
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Input scale[1] must be greater than 0." assert str(e) == "Input scale[1] must be greater than 0."
try: try:
_ = py_vision.RandomAffine(degrees=15, scale=(2.0, 1.1)) _ = vision.RandomAffine(degrees=15, scale=(2.0, 1.1))
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Input scale[1] must be equal to or greater than scale[0]." assert str(e) == "Input scale[1] must be equal to or greater than scale[0]."
@ -256,26 +255,26 @@ def test_random_affine_exception_shear_value():
""" """
logger.info("test_random_affine_exception_shear_value") logger.info("test_random_affine_exception_shear_value")
try: try:
_ = py_vision.RandomAffine(degrees=15, shear=-5) _ = vision.RandomAffine(degrees=15, shear=-5)
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Input shear must be greater than 0." assert str(e) == "Input shear must be greater than 0."
try: try:
_ = py_vision.RandomAffine(degrees=15, shear=(5, 1)) _ = vision.RandomAffine(degrees=15, shear=(5, 1))
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Input shear[1] must be equal to or greater than shear[0]" assert str(e) == "Input shear[1] must be equal to or greater than shear[0]"
try: try:
_ = py_vision.RandomAffine(degrees=15, shear=(5, 1, 2, 8)) _ = vision.RandomAffine(degrees=15, shear=(5, 1, 2, 8))
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Input shear[1] must be equal to or greater than shear[0] and " \ assert str(e) == "Input shear[1] must be equal to or greater than shear[0] and " \
"shear[3] must be equal to or greater than shear[2]." "shear[3] must be equal to or greater than shear[2]."
try: try:
_ = py_vision.RandomAffine(degrees=15, shear=(5, 9, 2, 1)) _ = vision.RandomAffine(degrees=15, shear=(5, 9, 2, 1))
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Input shear[1] must be equal to or greater than shear[0] and " \ assert str(e) == "Input shear[1] must be equal to or greater than shear[0] and " \
@ -289,7 +288,7 @@ def test_random_affine_exception_degrees_size():
""" """
logger.info("test_random_affine_exception_degrees_size") logger.info("test_random_affine_exception_degrees_size")
try: try:
_ = py_vision.RandomAffine(degrees=[15]) _ = vision.RandomAffine(degrees=[15])
except TypeError as e: except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "If degrees is a sequence, the length must be 2." assert str(e) == "If degrees is a sequence, the length must be 2."
@ -302,7 +301,7 @@ def test_random_affine_exception_translate_size():
""" """
logger.info("test_random_affine_exception_translate_size") logger.info("test_random_affine_exception_translate_size")
try: try:
_ = py_vision.RandomAffine(degrees=15, translate=(0.1)) _ = vision.RandomAffine(degrees=15, translate=(0.1))
except TypeError as e: except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str( assert str(
@ -317,7 +316,7 @@ def test_random_affine_exception_scale_size():
""" """
logger.info("test_random_affine_exception_scale_size") logger.info("test_random_affine_exception_scale_size")
try: try:
_ = py_vision.RandomAffine(degrees=15, scale=(0.5)) _ = vision.RandomAffine(degrees=15, scale=(0.5))
except TypeError as e: except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Argument scale with value 0.5 is not of type [<class 'tuple'>," \ assert str(e) == "Argument scale with value 0.5 is not of type [<class 'tuple'>," \
@ -331,7 +330,7 @@ def test_random_affine_exception_shear_size():
""" """
logger.info("test_random_affine_exception_shear_size") logger.info("test_random_affine_exception_shear_size")
try: try:
_ = py_vision.RandomAffine(degrees=15, shear=(-5, 5, 10)) _ = vision.RandomAffine(degrees=15, shear=(-5, 5, 10))
except TypeError as e: except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "shear must be of length 2 or 4." assert str(e) == "shear must be of length 2 or 4."

View File

@ -17,8 +17,8 @@ Testing RandomApply op in DE
""" """
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms as py_transforms import mindspore.dataset.transforms.transforms as data_trans
import mindspore.dataset.vision.py_transforms as py_vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, config_get_set_seed, \ from util import visualize_list, config_get_set_seed, \
config_get_set_num_parallel_workers, save_and_check_md5 config_get_set_num_parallel_workers, save_and_check_md5
@ -35,19 +35,19 @@ def test_random_apply_op(plot=False):
""" """
logger.info("test_random_apply_op") logger.info("test_random_apply_op")
# define map operations # define map operations
transforms_list = [py_vision.CenterCrop(64), py_vision.RandomRotation(30)] transforms_list = [vision.CenterCrop(64), vision.RandomRotation(30)]
transforms1 = [ transforms1 = [
py_vision.Decode(), vision.Decode(True),
py_transforms.RandomApply(transforms_list, prob=0.6), data_trans.RandomApply(transforms_list, prob=0.6),
py_vision.ToTensor() vision.ToTensor()
] ]
transform1 = py_transforms.Compose(transforms1) transform1 = data_trans.Compose(transforms1)
transforms2 = [ transforms2 = [
py_vision.Decode(), vision.Decode(True),
py_vision.ToTensor() vision.ToTensor()
] ]
transform2 = py_transforms.Compose(transforms2) transform2 = data_trans.Compose(transforms2)
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -76,14 +76,14 @@ def test_random_apply_md5():
original_seed = config_get_set_seed(10) original_seed = config_get_set_seed(10)
original_num_parallel_workers = config_get_set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# define map operations # define map operations
transforms_list = [py_vision.CenterCrop(64), py_vision.RandomRotation(30)] transforms_list = [vision.CenterCrop(64), vision.RandomRotation(30)]
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
# Note: using default value "prob=0.5" # Note: using default value "prob=0.5"
py_transforms.RandomApply(transforms_list), data_trans.RandomApply(transforms_list),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = py_transforms.Compose(transforms) transform = data_trans.Compose(transforms)
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -107,15 +107,15 @@ def test_random_apply_exception_random_crop_badinput():
original_seed = config_get_set_seed(200) original_seed = config_get_set_seed(200)
original_num_parallel_workers = config_get_set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# define map operations # define map operations
transforms_list = [py_vision.Resize([32, 32]), transforms_list = [vision.Resize([32, 32]),
py_vision.RandomCrop(100), # crop size > image size vision.RandomCrop(100), # crop size > image size
py_vision.RandomRotation(30)] vision.RandomRotation(30)]
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_transforms.RandomApply(transforms_list, prob=0.6), data_trans.RandomApply(transforms_list, prob=0.6),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = py_transforms.Compose(transforms) transform = data_trans.Compose(transforms)
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])

View File

@ -18,7 +18,7 @@ Testing RandomAutoContrast op in DE
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, visualize_image, diff_mse from util import visualize_list, visualize_image, diff_mse
@ -34,7 +34,7 @@ def test_random_auto_contrast_pipeline(plot=False):
# Original Images # Original Images
data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False) data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
transforms_original = [c_vision.Decode(), c_vision.Resize(size=[224, 224])] transforms_original = [vision.Decode(), vision.Resize(size=[224, 224])]
ds_original = data_set.map(operations=transforms_original, input_columns="image") ds_original = data_set.map(operations=transforms_original, input_columns="image")
ds_original = ds_original.batch(512) ds_original = ds_original.batch(512)
@ -48,9 +48,9 @@ def test_random_auto_contrast_pipeline(plot=False):
# Randomly Automatically Contrasted Images # Randomly Automatically Contrasted Images
data_set1 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False) data_set1 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
transform_random_auto_contrast = [c_vision.Decode(), transform_random_auto_contrast = [vision.Decode(),
c_vision.Resize(size=[224, 224]), vision.Resize(size=[224, 224]),
c_vision.RandomAutoContrast(prob=0.6)] vision.RandomAutoContrast(prob=0.6)]
ds_random_auto_contrast = data_set1.map(operations=transform_random_auto_contrast, input_columns="image") ds_random_auto_contrast = data_set1.map(operations=transform_random_auto_contrast, input_columns="image")
ds_random_auto_contrast = ds_random_auto_contrast.batch(512) ds_random_auto_contrast = ds_random_auto_contrast.batch(512)
for idx, (image, _) in enumerate(ds_random_auto_contrast): for idx, (image, _) in enumerate(ds_random_auto_contrast):
@ -77,9 +77,9 @@ def test_random_auto_contrast_eager():
img = np.fromfile(image_file, dtype=np.uint8) img = np.fromfile(image_file, dtype=np.uint8)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape)) logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
img = c_vision.Decode()(img) img = vision.Decode()(img)
img_auto_contrast = c_vision.AutoContrast(1.0, None)(img) img_auto_contrast = vision.AutoContrast(1.0, None)(img)
img_random_auto_contrast = c_vision.RandomAutoContrast(1.0, None, 1.0)(img) img_random_auto_contrast = vision.RandomAutoContrast(1.0, None, 1.0)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img_auto_contrast), img_random_auto_contrast.shape)) logger.info("Image.type: {}, Image.shape: {}".format(type(img_auto_contrast), img_random_auto_contrast.shape))
assert img_auto_contrast.all() == img_random_auto_contrast.all() assert img_auto_contrast.all() == img_random_auto_contrast.all()
@ -89,8 +89,8 @@ def test_random_auto_contrast_comp(plot=False):
""" """
Test RandomAutoContrast op compared with AutoContrast op. Test RandomAutoContrast op compared with AutoContrast op.
""" """
random_auto_contrast_op = c_vision.RandomAutoContrast(prob=1.0) random_auto_contrast_op = vision.RandomAutoContrast(prob=1.0)
auto_contrast_op = c_vision.AutoContrast() auto_contrast_op = vision.AutoContrast()
dataset1 = ds.ImageFolderDataset(data_dir, 1, shuffle=False, decode=True) dataset1 = ds.ImageFolderDataset(data_dir, 1, shuffle=False, decode=True)
for item in dataset1.create_dict_iterator(num_epochs=1, output_numpy=True): for item in dataset1.create_dict_iterator(num_epochs=1, output_numpy=True):
@ -117,7 +117,7 @@ def test_random_auto_contrast_invalid_prob():
logger.info("test_random_auto_contrast_invalid_prob") logger.info("test_random_auto_contrast_invalid_prob")
dataset = ds.ImageFolderDataset(data_dir, 1, shuffle=False, decode=True) dataset = ds.ImageFolderDataset(data_dir, 1, shuffle=False, decode=True)
try: try:
random_auto_contrast_op = c_vision.RandomAutoContrast(prob=1.5) random_auto_contrast_op = vision.RandomAutoContrast(prob=1.5)
dataset = dataset.map(operations=random_auto_contrast_op, input_columns=['image']) dataset = dataset.map(operations=random_auto_contrast_op, input_columns=['image'])
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
@ -131,20 +131,20 @@ def test_random_auto_contrast_invalid_ignore():
logger.info("test_random_auto_contrast_invalid_ignore") logger.info("test_random_auto_contrast_invalid_ignore")
try: try:
data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False) data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data_set = data_set.map(operations=[c_vision.Decode(), data_set = data_set.map(operations=[vision.Decode(),
c_vision.Resize((224, 224)), vision.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"]) lambda img: np.array(img[:, :, 0])], input_columns=["image"])
# invalid ignore # invalid ignore
data_set = data_set.map(operations=c_vision.RandomAutoContrast(ignore=255.5), input_columns="image") data_set = data_set.map(operations=vision.RandomAutoContrast(ignore=255.5), input_columns="image")
except TypeError as error: except TypeError as error:
logger.info("Got an exception in DE: {}".format(str(error))) logger.info("Got an exception in DE: {}".format(str(error)))
assert "Argument ignore with value 255.5 is not of type" in str(error) assert "Argument ignore with value 255.5 is not of type" in str(error)
try: try:
data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False) data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data_set = data_set.map(operations=[c_vision.Decode(), c_vision.Resize((224, 224)), data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"]) lambda img: np.array(img[:, :, 0])], input_columns=["image"])
# invalid ignore # invalid ignore
data_set = data_set.map(operations=c_vision.RandomAutoContrast(ignore=(10, 100)), input_columns="image") data_set = data_set.map(operations=vision.RandomAutoContrast(ignore=(10, 100)), input_columns="image")
except TypeError as error: except TypeError as error:
logger.info("Got an exception in DE: {}".format(str(error))) logger.info("Got an exception in DE: {}".format(str(error)))
assert "Argument ignore with value (10,100) is not of type" in str(error) assert "Argument ignore with value (10,100) is not of type" in str(error)
@ -157,21 +157,21 @@ def test_random_auto_contrast_invalid_cutoff():
logger.info("test_random_auto_contrast_invalid_cutoff") logger.info("test_random_auto_contrast_invalid_cutoff")
try: try:
data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False) data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data_set = data_set.map(operations=[c_vision.Decode(), data_set = data_set.map(operations=[vision.Decode(),
c_vision.Resize((224, 224)), vision.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"]) lambda img: np.array(img[:, :, 0])], input_columns=["image"])
# invalid cutoff # invalid cutoff
data_set = data_set.map(operations=c_vision.RandomAutoContrast(cutoff=-10.0), input_columns="image") data_set = data_set.map(operations=vision.RandomAutoContrast(cutoff=-10.0), input_columns="image")
except ValueError as error: except ValueError as error:
logger.info("Got an exception in DE: {}".format(str(error))) logger.info("Got an exception in DE: {}".format(str(error)))
assert "Input cutoff is not within the required interval of [0, 50)." in str(error) assert "Input cutoff is not within the required interval of [0, 50)." in str(error)
try: try:
data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False) data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data_set = data_set.map(operations=[c_vision.Decode(), data_set = data_set.map(operations=[vision.Decode(),
c_vision.Resize((224, 224)), vision.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"]) lambda img: np.array(img[:, :, 0])], input_columns=["image"])
# invalid cutoff # invalid cutoff
data_set = data_set.map(operations=c_vision.RandomAutoContrast(cutoff=120.0), input_columns="image") data_set = data_set.map(operations=vision.RandomAutoContrast(cutoff=120.0), input_columns="image")
except ValueError as error: except ValueError as error:
logger.info("Got an exception in DE: {}".format(str(error))) logger.info("Got an exception in DE: {}".format(str(error)))
assert "Input cutoff is not within the required interval of [0, 50)." in str(error) assert "Input cutoff is not within the required interval of [0, 50)." in str(error)
@ -185,11 +185,11 @@ def test_random_auto_contrast_one_channel():
""" """
logger.info("test_random_auto_contrast_one_channel") logger.info("test_random_auto_contrast_one_channel")
c_op = c_vision.RandomAutoContrast() c_op = vision.RandomAutoContrast()
try: try:
data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False) data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data_set = data_set.map(operations=[c_vision.Decode(), c_vision.Resize((224, 224)), data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"]) lambda img: np.array(img[:, :, 0])], input_columns=["image"])
data_set = data_set.map(operations=c_op, input_columns="image") data_set = data_set.map(operations=c_op, input_columns="image")
@ -207,11 +207,11 @@ def test_random_auto_contrast_four_dim():
""" """
logger.info("test_random_auto_contrast_four_dim") logger.info("test_random_auto_contrast_four_dim")
c_op = c_vision.RandomAutoContrast() c_op = vision.RandomAutoContrast()
try: try:
data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False) data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data_set = data_set.map(operations=[c_vision.Decode(), c_vision.Resize((224, 224)), data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224)),
lambda img: np.array(img[2, 200, 10, 32])], input_columns=["image"]) lambda img: np.array(img[2, 200, 10, 32])], input_columns=["image"])
data_set = data_set.map(operations=c_op, input_columns="image") data_set = data_set.map(operations=c_op, input_columns="image")
@ -229,11 +229,11 @@ def test_random_auto_contrast_invalid_input():
""" """
logger.info("test_random_invert_invalid_input") logger.info("test_random_invert_invalid_input")
c_op = c_vision.RandomAutoContrast() c_op = vision.RandomAutoContrast()
try: try:
data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False) data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data_set = data_set.map(operations=[c_vision.Decode(), c_vision.Resize((224, 224)), data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224)),
lambda img: np.array(img[2, 32, 3], dtype=uint32)], input_columns=["image"]) lambda img: np.array(img[2, 32, 3], dtype=uint32)], input_columns=["image"])
data_set = data_set.map(operations=c_op, input_columns="image") data_set = data_set.map(operations=c_op, input_columns="image")

View File

@ -17,8 +17,8 @@ Testing RandomChoice op in DE
""" """
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms as py_transforms import mindspore.dataset.transforms.transforms as data_trans
import mindspore.dataset.vision.py_transforms as py_vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, diff_mse from util import visualize_list, diff_mse
@ -32,19 +32,19 @@ def test_random_choice_op(plot=False):
""" """
logger.info("test_random_choice_op") logger.info("test_random_choice_op")
# define map operations # define map operations
transforms_list = [py_vision.CenterCrop(64), py_vision.RandomRotation(30)] transforms_list = [vision.CenterCrop(64), vision.RandomRotation(30)]
transforms1 = [ transforms1 = [
py_vision.Decode(), vision.Decode(True),
py_transforms.RandomChoice(transforms_list), data_trans.RandomChoice(transforms_list),
py_vision.ToTensor() vision.ToTensor()
] ]
transform1 = py_transforms.Compose(transforms1) transform1 = data_trans.Compose(transforms1)
transforms2 = [ transforms2 = [
py_vision.Decode(), vision.Decode(True),
py_vision.ToTensor() vision.ToTensor()
] ]
transform2 = py_transforms.Compose(transforms2) transform2 = data_trans.Compose(transforms2)
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -71,20 +71,20 @@ def test_random_choice_comp(plot=False):
""" """
logger.info("test_random_choice_comp") logger.info("test_random_choice_comp")
# define map operations # define map operations
transforms_list = [py_vision.CenterCrop(64)] transforms_list = [vision.CenterCrop(64)]
transforms1 = [ transforms1 = [
py_vision.Decode(), vision.Decode(True),
py_transforms.RandomChoice(transforms_list), data_trans.RandomChoice(transforms_list),
py_vision.ToTensor() vision.ToTensor()
] ]
transform1 = py_transforms.Compose(transforms1) transform1 = data_trans.Compose(transforms1)
transforms2 = [ transforms2 = [
py_vision.Decode(), vision.Decode(True),
py_vision.CenterCrop(64), vision.CenterCrop(64),
py_vision.ToTensor() vision.ToTensor()
] ]
transform2 = py_transforms.Compose(transforms2) transform2 = data_trans.Compose(transforms2)
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -116,13 +116,13 @@ def test_random_choice_exception_random_crop_badinput():
logger.info("test_random_choice_exception_random_crop_badinput") logger.info("test_random_choice_exception_random_crop_badinput")
# define map operations # define map operations
# note: crop size[5000, 5000] > image size[4032, 2268] # note: crop size[5000, 5000] > image size[4032, 2268]
transforms_list = [py_vision.RandomCrop(5000)] transforms_list = [vision.RandomCrop(5000)]
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_transforms.RandomChoice(transforms_list), data_trans.RandomChoice(transforms_list),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = py_transforms.Compose(transforms) transform = data_trans.Compose(transforms)
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])

View File

@ -19,9 +19,8 @@ import numpy as np
import pytest import pytest
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.c_transforms as vision import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.py_transforms as F
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, diff_mse, save_and_check_md5, \ from util import visualize_list, diff_mse, save_and_check_md5, \
config_get_set_seed, config_get_set_num_parallel_workers config_get_set_seed, config_get_set_num_parallel_workers
@ -45,9 +44,9 @@ def test_random_color_py(degrees=(0.1, 1.9), plot=False):
# Original Images # Original Images
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), transforms_original = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
F.Resize((224, 224)), vision.Resize((224, 224)),
F.ToTensor()]) vision.ToTensor()])
ds_original = data.map(operations=transforms_original, input_columns="image") ds_original = data.map(operations=transforms_original, input_columns="image")
@ -64,10 +63,10 @@ def test_random_color_py(degrees=(0.1, 1.9), plot=False):
# Random Color Adjusted Images # Random Color Adjusted Images
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_random_color = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), transforms_random_color = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
F.Resize((224, 224)), vision.Resize((224, 224)),
F.RandomColor(degrees=degrees), vision.RandomColor(degrees=degrees),
F.ToTensor()]) vision.ToTensor()])
ds_random_color = data.map(operations=transforms_random_color, input_columns="image") ds_random_color = data.map(operations=transforms_random_color, input_columns="image")
@ -147,9 +146,9 @@ def test_random_color_py_md5():
# Generate dataset # Generate dataset
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), transforms = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
F.RandomColor((2.0, 2.5)), vision.RandomColor((2.0, 2.5)),
F.ToTensor()]) vision.ToTensor()])
data = data.map(operations=transforms, input_columns="image") data = data.map(operations=transforms, input_columns="image")
# Compare with expected md5 from images # Compare with expected md5 from images
@ -177,13 +176,13 @@ def test_compare_random_color_op(degrees=None, plot=False):
if degrees is None: if degrees is None:
c_op = vision.RandomColor() c_op = vision.RandomColor()
p_op = F.RandomColor() p_op = vision.RandomColor()
else: else:
c_op = vision.RandomColor(degrees) c_op = vision.RandomColor(degrees)
p_op = F.RandomColor(degrees) p_op = vision.RandomColor(degrees)
transforms_random_color_py = mindspore.dataset.transforms.py_transforms.Compose( transforms_random_color_py = mindspore.dataset.transforms.transforms.Compose(
[lambda img: img.astype(np.uint8), F.ToPIL(), [lambda img: img.astype(np.uint8), vision.ToPIL(),
p_op, np.array]) p_op, np.array])
data1 = data1.map(operations=[vision.Decode(), c_op], input_columns=["image"]) data1 = data1.map(operations=[vision.Decode(), c_op], input_columns=["image"])

View File

@ -19,9 +19,8 @@ import pytest
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.py_transforms as py_vision
from mindspore import log as logger from mindspore import log as logger
from util import diff_mse, visualize_image, save_and_check_md5, \ from util import diff_mse, visualize_image, save_and_check_md5, \
config_get_set_seed, config_get_set_num_parallel_workers config_get_set_seed, config_get_set_num_parallel_workers
@ -38,19 +37,19 @@ def util_test_random_color_adjust_error(brightness=(1, 1), contrast=(1, 1), satu
""" """
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.Grayscale(1), vision.Grayscale(1),
py_vision.ToTensor(), vision.ToTensor(),
(lambda image: (image.transpose(1, 2, 0) * 255).astype(np.uint8)) (lambda image: (image.transpose(1, 2, 0) * 255).astype(np.uint8))
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data1 = data1.map(operations=transform, input_columns=["image"]) data1 = data1.map(operations=transform, input_columns=["image"])
# if input is grayscale, the output dimensions should be single channel, the following should fail # if input is grayscale, the output dimensions should be single channel, the following should fail
random_adjust_op = c_vision.RandomColorAdjust(brightness=brightness, contrast=contrast, saturation=saturation, random_adjust_op = vision.RandomColorAdjust(brightness=brightness, contrast=contrast, saturation=saturation,
hue=hue) hue=hue)
with pytest.raises(RuntimeError) as info: with pytest.raises(RuntimeError) as info:
data1 = data1.map(operations=random_adjust_op, input_columns=["image"]) data1 = data1.map(operations=random_adjust_op, input_columns=["image"])
dataset_shape_1 = [] dataset_shape_1 = []
@ -70,10 +69,10 @@ def util_test_random_color_adjust_op(brightness=(1, 1), contrast=(1, 1), saturat
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
random_adjust_op = c_vision.RandomColorAdjust(brightness=brightness, contrast=contrast, saturation=saturation, random_adjust_op = vision.RandomColorAdjust(brightness=brightness, contrast=contrast, saturation=saturation,
hue=hue) hue=hue)
ctrans = [decode_op, ctrans = [decode_op,
random_adjust_op, random_adjust_op,
@ -83,12 +82,12 @@ def util_test_random_color_adjust_op(brightness=(1, 1), contrast=(1, 1), saturat
# Second dataset # Second dataset
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomColorAdjust(brightness=brightness, contrast=contrast, saturation=saturation, vision.RandomColorAdjust(brightness=brightness, contrast=contrast, saturation=saturation,
hue=hue), hue=hue),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=transform, input_columns=["image"]) data2 = data2.map(operations=transform, input_columns=["image"])
@ -203,18 +202,18 @@ def test_random_color_adjust_md5():
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
random_adjust_op = c_vision.RandomColorAdjust(0.4, 0.4, 0.4, 0.1) random_adjust_op = vision.RandomColorAdjust(0.4, 0.4, 0.4, 0.1)
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_adjust_op, input_columns=["image"]) data1 = data1.map(operations=random_adjust_op, input_columns=["image"])
# Second dataset # Second dataset
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomColorAdjust(0.4, 0.4, 0.4, 0.1), vision.RandomColorAdjust(0.4, 0.4, 0.4, 0.1),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=transform, input_columns=["image"]) data2 = data2.map(operations=transform, input_columns=["image"])
# Compare with expected md5 from images # Compare with expected md5 from images

View File

@ -17,10 +17,8 @@ Testing RandomCrop op in DE
""" """
import numpy as np import numpy as np
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms as ops
import mindspore.dataset.transforms.c_transforms as ops import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.vision.utils as mode import mindspore.dataset.vision.utils as mode
import mindspore.dataset as ds import mindspore.dataset as ds
from mindspore import log as logger from mindspore import log as logger
@ -42,8 +40,8 @@ def test_random_crop_op_c(plot=False):
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
random_crop_op = c_vision.RandomCrop([512, 512], [200, 200, 200, 200]) random_crop_op = vision.RandomCrop([512, 512], [200, 200, 200, 200])
decode_op = c_vision.Decode() decode_op = vision.Decode()
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_crop_op, input_columns=["image"]) data1 = data1.map(operations=random_crop_op, input_columns=["image"])
@ -71,20 +69,20 @@ def test_random_crop_op_py(plot=False):
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms1 = [ transforms1 = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomCrop([512, 512], [200, 200, 200, 200]), vision.RandomCrop([512, 512], [200, 200, 200, 200]),
py_vision.ToTensor() vision.ToTensor()
] ]
transform1 = mindspore.dataset.transforms.py_transforms.Compose(transforms1) transform1 = ops.Compose(transforms1)
data1 = data1.map(operations=transform1, input_columns=["image"]) data1 = data1.map(operations=transform1, input_columns=["image"])
# Second dataset # Second dataset
# Second dataset for comparison # Second dataset for comparison
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms2 = [ transforms2 = [
py_vision.Decode(), vision.Decode(True),
py_vision.ToTensor() vision.ToTensor()
] ]
transform2 = mindspore.dataset.transforms.py_transforms.Compose(transforms2) transform2 = ops.Compose(transforms2)
data2 = data2.map(operations=transform2, input_columns=["image"]) data2 = data2.map(operations=transform2, input_columns=["image"])
crop_images = [] crop_images = []
@ -100,7 +98,7 @@ def test_random_crop_op_py(plot=False):
def test_random_crop_01_c(): def test_random_crop_01_c():
""" """
Test RandomCrop op with c_transforms: size is a single integer, expected to pass Test RandomCrop op with C implementation: size is a single integer, expected to pass
""" """
logger.info("test_random_crop_01_c") logger.info("test_random_crop_01_c")
original_seed = config_get_set_seed(0) original_seed = config_get_set_seed(0)
@ -109,8 +107,8 @@ def test_random_crop_01_c():
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: If size is an int, a square crop of size (size, size) is returned. # Note: If size is an int, a square crop of size (size, size) is returned.
random_crop_op = c_vision.RandomCrop(512) random_crop_op = vision.RandomCrop(512)
decode_op = c_vision.Decode() decode_op = vision.Decode()
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=random_crop_op, input_columns=["image"]) data = data.map(operations=random_crop_op, input_columns=["image"])
@ -123,7 +121,7 @@ def test_random_crop_01_c():
def test_random_crop_01_py(): def test_random_crop_01_py():
""" """
Test RandomCrop op with py_transforms: size is a single integer, expected to pass Test RandomCrop op with transforms: size is a single integer, expected to pass
""" """
logger.info("test_random_crop_01_py") logger.info("test_random_crop_01_py")
original_seed = config_get_set_seed(0) original_seed = config_get_set_seed(0)
@ -133,11 +131,11 @@ def test_random_crop_01_py():
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: If size is an int, a square crop of size (size, size) is returned. # Note: If size is an int, a square crop of size (size, size) is returned.
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomCrop(512), vision.RandomCrop(512),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
filename = "random_crop_01_py_result.npz" filename = "random_crop_01_py_result.npz"
@ -149,7 +147,7 @@ def test_random_crop_01_py():
def test_random_crop_02_c(): def test_random_crop_02_c():
""" """
Test RandomCrop op with c_transforms: size is a list/tuple with length 2, expected to pass Test RandomCrop op with C implementation: size is a list/tuple with length 2, expected to pass
""" """
logger.info("test_random_crop_02_c") logger.info("test_random_crop_02_c")
original_seed = config_get_set_seed(0) original_seed = config_get_set_seed(0)
@ -158,8 +156,8 @@ def test_random_crop_02_c():
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: If size is a sequence of length 2, it should be (height, width). # Note: If size is a sequence of length 2, it should be (height, width).
random_crop_op = c_vision.RandomCrop([512, 375]) random_crop_op = vision.RandomCrop([512, 375])
decode_op = c_vision.Decode() decode_op = vision.Decode()
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=random_crop_op, input_columns=["image"]) data = data.map(operations=random_crop_op, input_columns=["image"])
@ -172,7 +170,7 @@ def test_random_crop_02_c():
def test_random_crop_02_py(): def test_random_crop_02_py():
""" """
Test RandomCrop op with py_transforms: size is a list/tuple with length 2, expected to pass Test RandomCrop op with transforms: size is a list/tuple with length 2, expected to pass
""" """
logger.info("test_random_crop_02_py") logger.info("test_random_crop_02_py")
original_seed = config_get_set_seed(0) original_seed = config_get_set_seed(0)
@ -182,11 +180,11 @@ def test_random_crop_02_py():
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: If size is a sequence of length 2, it should be (height, width). # Note: If size is a sequence of length 2, it should be (height, width).
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomCrop([512, 375]), vision.RandomCrop([512, 375]),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
filename = "random_crop_02_py_result.npz" filename = "random_crop_02_py_result.npz"
@ -198,7 +196,7 @@ def test_random_crop_02_py():
def test_random_crop_03_c(): def test_random_crop_03_c():
""" """
Test RandomCrop op with c_transforms: input image size == crop size, expected to pass Test RandomCrop op with C implementation: input image size == crop size, expected to pass
""" """
logger.info("test_random_crop_03_c") logger.info("test_random_crop_03_c")
original_seed = config_get_set_seed(0) original_seed = config_get_set_seed(0)
@ -207,8 +205,8 @@ def test_random_crop_03_c():
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: The size of the image is 4032*2268 # Note: The size of the image is 4032*2268
random_crop_op = c_vision.RandomCrop([2268, 4032]) random_crop_op = vision.RandomCrop([2268, 4032])
decode_op = c_vision.Decode() decode_op = vision.Decode()
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=random_crop_op, input_columns=["image"]) data = data.map(operations=random_crop_op, input_columns=["image"])
@ -221,7 +219,7 @@ def test_random_crop_03_c():
def test_random_crop_03_py(): def test_random_crop_03_py():
""" """
Test RandomCrop op with py_transforms: input image size == crop size, expected to pass Test RandomCrop op with transforms: input image size == crop size, expected to pass
""" """
logger.info("test_random_crop_03_py") logger.info("test_random_crop_03_py")
original_seed = config_get_set_seed(0) original_seed = config_get_set_seed(0)
@ -231,11 +229,11 @@ def test_random_crop_03_py():
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: The size of the image is 4032*2268 # Note: The size of the image is 4032*2268
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomCrop([2268, 4032]), vision.RandomCrop([2268, 4032]),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
filename = "random_crop_03_py_result.npz" filename = "random_crop_03_py_result.npz"
@ -247,15 +245,15 @@ def test_random_crop_03_py():
def test_random_crop_04_c(): def test_random_crop_04_c():
""" """
Test RandomCrop op with c_transforms: input image size < crop size, expected to fail Test RandomCrop op with C implementation: input image size < crop size, expected to fail
""" """
logger.info("test_random_crop_04_c") logger.info("test_random_crop_04_c")
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: The size of the image is 4032*2268 # Note: The size of the image is 4032*2268
random_crop_op = c_vision.RandomCrop([2268, 4033]) random_crop_op = vision.RandomCrop([2268, 4033])
decode_op = c_vision.Decode() decode_op = vision.Decode()
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=random_crop_op, input_columns=["image"]) data = data.map(operations=random_crop_op, input_columns=["image"])
try: try:
@ -266,7 +264,7 @@ def test_random_crop_04_c():
def test_random_crop_04_py(): def test_random_crop_04_py():
""" """
Test RandomCrop op with py_transforms: Test RandomCrop op with transforms:
input image size < crop size, expected to fail input image size < crop size, expected to fail
""" """
logger.info("test_random_crop_04_py") logger.info("test_random_crop_04_py")
@ -275,11 +273,11 @@ def test_random_crop_04_py():
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: The size of the image is 4032*2268 # Note: The size of the image is 4032*2268
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomCrop([2268, 4033]), vision.RandomCrop([2268, 4033]),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
try: try:
data.create_dict_iterator(num_epochs=1).__next__() data.create_dict_iterator(num_epochs=1).__next__()
@ -289,7 +287,7 @@ def test_random_crop_04_py():
def test_random_crop_05_c(): def test_random_crop_05_c():
""" """
Test RandomCrop op with c_transforms: Test RandomCrop op with C implementation:
input image size < crop size but pad_if_needed is enabled, input image size < crop size but pad_if_needed is enabled,
expected to pass expected to pass
""" """
@ -300,8 +298,8 @@ def test_random_crop_05_c():
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: The size of the image is 4032*2268 # Note: The size of the image is 4032*2268
random_crop_op = c_vision.RandomCrop([2268, 4033], [200, 200, 200, 200], pad_if_needed=True) random_crop_op = vision.RandomCrop([2268, 4033], [200, 200, 200, 200], pad_if_needed=True)
decode_op = c_vision.Decode() decode_op = vision.Decode()
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=random_crop_op, input_columns=["image"]) data = data.map(operations=random_crop_op, input_columns=["image"])
@ -314,7 +312,7 @@ def test_random_crop_05_c():
def test_random_crop_05_py(): def test_random_crop_05_py():
""" """
Test RandomCrop op with py_transforms: Test RandomCrop op with transforms:
input image size < crop size but pad_if_needed is enabled, input image size < crop size but pad_if_needed is enabled,
expected to pass expected to pass
""" """
@ -326,11 +324,11 @@ def test_random_crop_05_py():
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: The size of the image is 4032*2268 # Note: The size of the image is 4032*2268
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomCrop([2268, 4033], [200, 200, 200, 200], pad_if_needed=True), vision.RandomCrop([2268, 4033], [200, 200, 200, 200], pad_if_needed=True),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
filename = "random_crop_05_py_result.npz" filename = "random_crop_05_py_result.npz"
@ -342,7 +340,7 @@ def test_random_crop_05_py():
def test_random_crop_06_c(): def test_random_crop_06_c():
""" """
Test RandomCrop op with c_transforms: Test RandomCrop op with C implementation:
invalid size, expected to raise TypeError invalid size, expected to raise TypeError
""" """
logger.info("test_random_crop_06_c") logger.info("test_random_crop_06_c")
@ -351,8 +349,8 @@ def test_random_crop_06_c():
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try: try:
# Note: if size is neither an int nor a list of length 2, an exception will raise # Note: if size is neither an int nor a list of length 2, an exception will raise
random_crop_op = c_vision.RandomCrop([512, 512, 375]) random_crop_op = vision.RandomCrop([512, 512, 375])
decode_op = c_vision.Decode() decode_op = vision.Decode()
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=random_crop_op, input_columns=["image"]) data = data.map(operations=random_crop_op, input_columns=["image"])
except TypeError as e: except TypeError as e:
@ -361,7 +359,7 @@ def test_random_crop_06_c():
def test_random_crop_06_py(): def test_random_crop_06_py():
""" """
Test RandomCrop op with py_transforms: Test RandomCrop op with transforms:
invalid size, expected to raise TypeError invalid size, expected to raise TypeError
""" """
logger.info("test_random_crop_06_py") logger.info("test_random_crop_06_py")
@ -371,11 +369,11 @@ def test_random_crop_06_py():
try: try:
# Note: if size is neither an int nor a list of length 2, an exception will raise # Note: if size is neither an int nor a list of length 2, an exception will raise
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomCrop([512, 512, 375]), vision.RandomCrop([512, 512, 375]),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
except TypeError as e: except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
@ -383,7 +381,7 @@ def test_random_crop_06_py():
def test_random_crop_07_c(): def test_random_crop_07_c():
""" """
Test RandomCrop op with c_transforms: Test RandomCrop op with C implementation:
padding_mode is Border.CONSTANT and fill_value is 255 (White), padding_mode is Border.CONSTANT and fill_value is 255 (White),
expected to pass expected to pass
""" """
@ -394,8 +392,8 @@ def test_random_crop_07_c():
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: The padding_mode is default as Border.CONSTANT and set filling color to be white. # Note: The padding_mode is default as Border.CONSTANT and set filling color to be white.
random_crop_op = c_vision.RandomCrop(512, [200, 200, 200, 200], fill_value=(255, 255, 255)) random_crop_op = vision.RandomCrop(512, [200, 200, 200, 200], fill_value=(255, 255, 255))
decode_op = c_vision.Decode() decode_op = vision.Decode()
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=random_crop_op, input_columns=["image"]) data = data.map(operations=random_crop_op, input_columns=["image"])
@ -408,7 +406,7 @@ def test_random_crop_07_c():
def test_random_crop_07_py(): def test_random_crop_07_py():
""" """
Test RandomCrop op with py_transforms: Test RandomCrop op with transforms:
padding_mode is Border.CONSTANT and fill_value is 255 (White), padding_mode is Border.CONSTANT and fill_value is 255 (White),
expected to pass expected to pass
""" """
@ -420,11 +418,11 @@ def test_random_crop_07_py():
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: The padding_mode is default as Border.CONSTANT and set filling color to be white. # Note: The padding_mode is default as Border.CONSTANT and set filling color to be white.
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomCrop(512, [200, 200, 200, 200], fill_value=(255, 255, 255)), vision.RandomCrop(512, [200, 200, 200, 200], fill_value=(255, 255, 255)),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
filename = "random_crop_07_py_result.npz" filename = "random_crop_07_py_result.npz"
@ -436,7 +434,7 @@ def test_random_crop_07_py():
def test_random_crop_08_c(): def test_random_crop_08_c():
""" """
Test RandomCrop op with c_transforms: padding_mode is Border.EDGE, Test RandomCrop op with C implementation: padding_mode is Border.EDGE,
expected to pass expected to pass
""" """
logger.info("test_random_crop_08_c") logger.info("test_random_crop_08_c")
@ -446,8 +444,8 @@ def test_random_crop_08_c():
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: The padding_mode is Border.EDGE. # Note: The padding_mode is Border.EDGE.
random_crop_op = c_vision.RandomCrop(512, [200, 200, 200, 200], padding_mode=mode.Border.EDGE) random_crop_op = vision.RandomCrop(512, [200, 200, 200, 200], padding_mode=mode.Border.EDGE)
decode_op = c_vision.Decode() decode_op = vision.Decode()
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=random_crop_op, input_columns=["image"]) data = data.map(operations=random_crop_op, input_columns=["image"])
@ -460,7 +458,7 @@ def test_random_crop_08_c():
def test_random_crop_08_py(): def test_random_crop_08_py():
""" """
Test RandomCrop op with py_transforms: padding_mode is Border.EDGE, Test RandomCrop op with transforms: padding_mode is Border.EDGE,
expected to pass expected to pass
""" """
logger.info("test_random_crop_08_py") logger.info("test_random_crop_08_py")
@ -471,11 +469,11 @@ def test_random_crop_08_py():
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Note: The padding_mode is Border.EDGE. # Note: The padding_mode is Border.EDGE.
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomCrop(512, [200, 200, 200, 200], padding_mode=mode.Border.EDGE), vision.RandomCrop(512, [200, 200, 200, 200], padding_mode=mode.Border.EDGE),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
filename = "random_crop_08_py_result.npz" filename = "random_crop_08_py_result.npz"
@ -487,25 +485,25 @@ def test_random_crop_08_py():
def test_random_crop_09(): def test_random_crop_09():
""" """
Test RandomCrop op: invalid type of input image (not PIL), expected to raise TypeError Test RandomCrop op: invalid type of input image format, expected to raise RuntimeError
""" """
logger.info("test_random_crop_09") logger.info("test_random_crop_09")
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.ToTensor(), vision.ToTensor(),
# Note: if input is not PIL image, TypeError will raise # Note: Input is wrong image format
py_vision.RandomCrop(512) vision.RandomCrop(512)
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
try: try:
data.create_dict_iterator(num_epochs=1).__next__() data.create_dict_iterator(num_epochs=1).__next__()
except RuntimeError as e: except RuntimeError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert "should be PIL image" in str(e) assert "Unexpected error. Pad: input shape is not <H,W,C> or <H, W>, got rank: 3" in str(e)
def test_random_crop_comp(plot=False): def test_random_crop_comp(plot=False):
""" """
@ -516,19 +514,19 @@ def test_random_crop_comp(plot=False):
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
random_crop_op = c_vision.RandomCrop(cropped_size) random_crop_op = vision.RandomCrop(cropped_size)
decode_op = c_vision.Decode() decode_op = vision.Decode()
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_crop_op, input_columns=["image"]) data1 = data1.map(operations=random_crop_op, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomCrop(cropped_size), vision.RandomCrop(cropped_size),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data2 = data2.map(operations=transform, input_columns=["image"]) data2 = data2.map(operations=transform, input_columns=["image"])
image_c_cropped = [] image_c_cropped = []
@ -551,8 +549,8 @@ def test_random_crop_09_c():
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(operations=ops.Duplicate(), input_columns=["image"], data = data.map(operations=ops.Duplicate(), input_columns=["image"],
output_columns=["image", "image_copy"], column_order=["image", "image_copy"]) output_columns=["image", "image_copy"], column_order=["image", "image_copy"])
random_crop_op = c_vision.RandomCrop([512, 512], [200, 200, 200, 200]) random_crop_op = vision.RandomCrop([512, 512], [200, 200, 200, 200])
decode_op = c_vision.Decode() decode_op = vision.Decode()
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=decode_op, input_columns=["image_copy"]) data = data.map(operations=decode_op, input_columns=["image_copy"])

View File

@ -18,10 +18,8 @@ Testing RandomCropAndResize op in DE
import numpy as np import numpy as np
import cv2 import cv2
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms as ops
import mindspore.dataset.transforms.c_transforms as ops import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.vision.utils as mode import mindspore.dataset.vision.utils as mode
import mindspore.dataset as ds import mindspore.dataset as ds
from mindspore.dataset.vision.utils import Inter from mindspore.dataset.vision.utils import Inter
@ -43,12 +41,12 @@ def test_random_crop_and_resize_callable():
img = np.fromfile("../data/dataset/apple.jpg", dtype=np.uint8) img = np.fromfile("../data/dataset/apple.jpg", dtype=np.uint8)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape)) logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
decode_op = c_vision.Decode() decode_op = vision.Decode()
img = decode_op(img) img = decode_op(img)
assert img.shape == (2268, 4032, 3) assert img.shape == (2268, 4032, 3)
# test one tensor # test one tensor
random_crop_and_resize_op1 = c_vision.RandomResizedCrop((256, 512), (2, 2), (1, 3)) random_crop_and_resize_op1 = vision.RandomResizedCrop((256, 512), (2, 2), (1, 3))
img1 = random_crop_and_resize_op1(img) img1 = random_crop_and_resize_op1(img)
assert img1.shape == (256, 512, 3) assert img1.shape == (256, 512, 3)
@ -61,9 +59,9 @@ def test_random_crop_and_resize_op_c(plot=False):
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
# With these inputs we expect the code to crop the whole image # With these inputs we expect the code to crop the whole image
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (2, 2), (1, 3)) random_crop_and_resize_op = vision.RandomResizedCrop((256, 512), (2, 2), (1, 3))
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_crop_and_resize_op, input_columns=["image"]) data1 = data1.map(operations=random_crop_and_resize_op, input_columns=["image"])
@ -98,20 +96,20 @@ def test_random_crop_and_resize_op_py(plot=False):
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# With these inputs we expect the code to crop the whole image # With these inputs we expect the code to crop the whole image
transforms1 = [ transforms1 = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomResizedCrop((256, 512), (2, 2), (1, 3)), vision.RandomResizedCrop((256, 512), (2, 2), (1, 3)),
py_vision.ToTensor() vision.ToTensor()
] ]
transform1 = mindspore.dataset.transforms.py_transforms.Compose(transforms1) transform1 = ops.Compose(transforms1)
data1 = data1.map(operations=transform1, input_columns=["image"]) data1 = data1.map(operations=transform1, input_columns=["image"])
# Second dataset # Second dataset
# Second dataset for comparison # Second dataset for comparison
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms2 = [ transforms2 = [
py_vision.Decode(), vision.Decode(True),
py_vision.ToTensor() vision.ToTensor()
] ]
transform2 = mindspore.dataset.transforms.py_transforms.Compose(transforms2) transform2 = ops.Compose(transforms2)
data2 = data2.map(operations=transform2, input_columns=["image"]) data2 = data2.map(operations=transform2, input_columns=["image"])
num_iter = 0 num_iter = 0
crop_and_resize_images = [] crop_and_resize_images = []
@ -140,11 +138,11 @@ def test_random_crop_and_resize_op_py_ANTIALIAS():
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# With these inputs we expect the code to crop the whole image # With these inputs we expect the code to crop the whole image
transforms1 = [ transforms1 = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomResizedCrop((256, 512), (2, 2), (1, 3), Inter.ANTIALIAS), vision.RandomResizedCrop((256, 512), (2, 2), (1, 3), Inter.ANTIALIAS),
py_vision.ToTensor() vision.ToTensor()
] ]
transform1 = mindspore.dataset.transforms.py_transforms.Compose(transforms1) transform1 = ops.Compose(transforms1)
data1 = data1.map(operations=transform1, input_columns=["image"]) data1 = data1.map(operations=transform1, input_columns=["image"])
num_iter = 0 num_iter = 0
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True): for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
@ -161,19 +159,19 @@ def test_random_crop_and_resize_01():
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (0.5, 0.5), (1, 1)) random_crop_and_resize_op = vision.RandomResizedCrop((256, 512), (0.5, 0.5), (1, 1))
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_crop_and_resize_op, input_columns=["image"]) data1 = data1.map(operations=random_crop_and_resize_op, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomResizedCrop((256, 512), (0.5, 0.5), (1, 1)), vision.RandomResizedCrop((256, 512), (0.5, 0.5), (1, 1)),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data2 = data2.map(operations=transform, input_columns=["image"]) data2 = data2.map(operations=transform, input_columns=["image"])
filename1 = "random_crop_and_resize_01_c_result.npz" filename1 = "random_crop_and_resize_01_c_result.npz"
@ -197,19 +195,19 @@ def test_random_crop_and_resize_02():
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), interpolation=mode.Inter.NEAREST) random_crop_and_resize_op = vision.RandomResizedCrop((256, 512), interpolation=mode.Inter.NEAREST)
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_crop_and_resize_op, input_columns=["image"]) data1 = data1.map(operations=random_crop_and_resize_op, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomResizedCrop((256, 512), interpolation=mode.Inter.NEAREST), vision.RandomResizedCrop((256, 512), interpolation=mode.Inter.NEAREST),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data2 = data2.map(operations=transform, input_columns=["image"]) data2 = data2.map(operations=transform, input_columns=["image"])
filename1 = "random_crop_and_resize_02_c_result.npz" filename1 = "random_crop_and_resize_02_c_result.npz"
@ -232,19 +230,19 @@ def test_random_crop_and_resize_03():
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), max_attempts=1) random_crop_and_resize_op = vision.RandomResizedCrop((256, 512), max_attempts=1)
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_crop_and_resize_op, input_columns=["image"]) data1 = data1.map(operations=random_crop_and_resize_op, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomResizedCrop((256, 512), max_attempts=1), vision.RandomResizedCrop((256, 512), max_attempts=1),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data2 = data2.map(operations=transform, input_columns=["image"]) data2 = data2.map(operations=transform, input_columns=["image"])
filename1 = "random_crop_and_resize_03_c_result.npz" filename1 = "random_crop_and_resize_03_c_result.npz"
@ -266,10 +264,10 @@ def test_random_crop_and_resize_04_c():
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
try: try:
# If input range of scale is not in the order of (min, max), ValueError will be raised. # If input range of scale is not in the order of (min, max), ValueError will be raised.
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (1, 0.5), (0.5, 0.5)) random_crop_and_resize_op = vision.RandomResizedCrop((256, 512), (1, 0.5), (0.5, 0.5))
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=random_crop_and_resize_op, input_columns=["image"]) data = data.map(operations=random_crop_and_resize_op, input_columns=["image"])
except ValueError as e: except ValueError as e:
@ -279,7 +277,7 @@ def test_random_crop_and_resize_04_c():
def test_random_crop_and_resize_04_py(): def test_random_crop_and_resize_04_py():
""" """
Test RandomCropAndResize with py_transforms: invalid range of scale (max<min), Test RandomCropAndResize with transforms: invalid range of scale (max<min),
expected to raise ValueError expected to raise ValueError
""" """
logger.info("test_random_crop_and_resize_04_py") logger.info("test_random_crop_and_resize_04_py")
@ -288,12 +286,12 @@ def test_random_crop_and_resize_04_py():
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try: try:
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
# If input range of scale is not in the order of (min, max), ValueError will be raised. # If input range of scale is not in the order of (min, max), ValueError will be raised.
py_vision.RandomResizedCrop((256, 512), (1, 0.5), (0.5, 0.5)), vision.RandomResizedCrop((256, 512), (1, 0.5), (0.5, 0.5)),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
@ -302,16 +300,16 @@ def test_random_crop_and_resize_04_py():
def test_random_crop_and_resize_05_c(): def test_random_crop_and_resize_05_c():
""" """
Test RandomCropAndResize with c_transforms: invalid range of ratio (max<min), Test RandomCropAndResize with C implementation: invalid range of ratio (max<min),
expected to raise ValueError expected to raise ValueError
""" """
logger.info("test_random_crop_and_resize_05_c") logger.info("test_random_crop_and_resize_05_c")
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
try: try:
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (1, 1), (1, 0.5)) random_crop_and_resize_op = vision.RandomResizedCrop((256, 512), (1, 1), (1, 0.5))
# If input range of ratio is not in the order of (min, max), ValueError will be raised. # If input range of ratio is not in the order of (min, max), ValueError will be raised.
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=random_crop_and_resize_op, input_columns=["image"]) data = data.map(operations=random_crop_and_resize_op, input_columns=["image"])
@ -322,7 +320,7 @@ def test_random_crop_and_resize_05_c():
def test_random_crop_and_resize_05_py(): def test_random_crop_and_resize_05_py():
""" """
Test RandomCropAndResize with py_transforms: invalid range of ratio (max<min), Test RandomCropAndResize with transforms: invalid range of ratio (max<min),
expected to raise ValueError expected to raise ValueError
""" """
logger.info("test_random_crop_and_resize_05_py") logger.info("test_random_crop_and_resize_05_py")
@ -331,12 +329,12 @@ def test_random_crop_and_resize_05_py():
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try: try:
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
# If input range of ratio is not in the order of (min, max), ValueError will be raised. # If input range of ratio is not in the order of (min, max), ValueError will be raised.
py_vision.RandomResizedCrop((256, 512), (1, 1), (1, 0.5)), vision.RandomResizedCrop((256, 512), (1, 1), (1, 0.5)),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
@ -351,19 +349,19 @@ def test_random_crop_and_resize_comp(plot=False):
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
random_crop_and_resize_op = c_vision.RandomResizedCrop(512, (1, 1), (0.5, 0.5)) random_crop_and_resize_op = vision.RandomResizedCrop(512, (1, 1), (0.5, 0.5))
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_crop_and_resize_op, input_columns=["image"]) data1 = data1.map(operations=random_crop_and_resize_op, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomResizedCrop(512, (1, 1), (0.5, 0.5)), vision.RandomResizedCrop(512, (1, 1), (0.5, 0.5)),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data2 = data2.map(operations=transform, input_columns=["image"]) data2 = data2.map(operations=transform, input_columns=["image"])
image_c_cropped = [] image_c_cropped = []
@ -382,16 +380,16 @@ def test_random_crop_and_resize_comp(plot=False):
def test_random_crop_and_resize_06(): def test_random_crop_and_resize_06():
""" """
Test RandomCropAndResize with c_transforms: invalid values for scale, Test RandomCropAndResize with C implementation: invalid values for scale,
expected to raise ValueError expected to raise ValueError
""" """
logger.info("test_random_crop_and_resize_05_c") logger.info("test_random_crop_and_resize_05_c")
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
try: try:
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), scale="", ratio=(1, 0.5)) random_crop_and_resize_op = vision.RandomResizedCrop((256, 512), scale="", ratio=(1, 0.5))
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data.map(operations=random_crop_and_resize_op, input_columns=["image"]) data.map(operations=random_crop_and_resize_op, input_columns=["image"])
except TypeError as e: except TypeError as e:
@ -399,7 +397,7 @@ def test_random_crop_and_resize_06():
assert "Argument scale with value \"\" is not of type [<class 'tuple'>, <class 'list'>]" in str(e) assert "Argument scale with value \"\" is not of type [<class 'tuple'>, <class 'list'>]" in str(e)
try: try:
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), scale=(1, "2"), ratio=(1, 0.5)) random_crop_and_resize_op = vision.RandomResizedCrop((256, 512), scale=(1, "2"), ratio=(1, 0.5))
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data.map(operations=random_crop_and_resize_op, input_columns=["image"]) data.map(operations=random_crop_and_resize_op, input_columns=["image"])
except TypeError as e: except TypeError as e:
@ -415,8 +413,8 @@ def test_random_crop_and_resize_07():
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(operations=ops.Duplicate(), input_columns=["image"], data = data.map(operations=ops.Duplicate(), input_columns=["image"],
output_columns=["image", "image_copy"], column_order=["image", "image_copy"]) output_columns=["image", "image_copy"], column_order=["image", "image_copy"])
random_crop_and_resize_op = c_vision.RandomResizedCrop((256, 512), (2, 2), (1, 3)) random_crop_and_resize_op = vision.RandomResizedCrop((256, 512), (2, 2), (1, 3))
decode_op = c_vision.Decode() decode_op = vision.Decode()
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=decode_op, input_columns=["image_copy"]) data = data.map(operations=decode_op, input_columns=["image_copy"])

View File

@ -17,7 +17,7 @@ Testing RandomCropAndResizeWithBBox op in DE
""" """
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \ from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \
@ -45,7 +45,7 @@ def test_random_resized_crop_with_bbox_op_c(plot_vis=False):
dataVoc1 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True) dataVoc1 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
test_op = c_vision.RandomResizedCropWithBBox((256, 512), (0.5, 0.5), (0.5, 0.5)) test_op = vision.RandomResizedCropWithBBox((256, 512), (0.5, 0.5), (0.5, 0.5))
# map to apply ops # map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
@ -83,7 +83,7 @@ def test_random_resized_crop_with_bbox_op_coco_c(plot_vis=False):
dataCoco2 = ds.CocoDataset(DATA_DIR_COCO[0], annotation_file=DATA_DIR_COCO[1], task="Detection", dataCoco2 = ds.CocoDataset(DATA_DIR_COCO[0], annotation_file=DATA_DIR_COCO[1], task="Detection",
decode=True, shuffle=False) decode=True, shuffle=False)
test_op = c_vision.RandomResizedCropWithBBox((512, 512), (0.5, 1), (0.5, 1)) test_op = vision.RandomResizedCropWithBBox((512, 512), (0.5, 1), (0.5, 1))
dataCoco2 = dataCoco2.map(operations=[test_op], input_columns=["image", "bbox"], dataCoco2 = dataCoco2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"], output_columns=["image", "bbox"],
@ -111,7 +111,7 @@ def test_random_resized_crop_with_bbox_op_edge_c(plot_vis=False):
dataVoc1 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True) dataVoc1 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
test_op = c_vision.RandomResizedCropWithBBox((256, 512), (0.5, 0.5), (0.5, 0.5)) test_op = vision.RandomResizedCropWithBBox((256, 512), (0.5, 0.5), (0.5, 0.5))
# maps to convert data into valid edge case data # maps to convert data into valid edge case data
dataVoc1 = dataVoc1.map( dataVoc1 = dataVoc1.map(
@ -149,7 +149,7 @@ def test_random_resized_crop_with_bbox_op_invalid_c():
try: try:
# If input range of scale is not in the order of (min, max), ValueError will be raised. # If input range of scale is not in the order of (min, max), ValueError will be raised.
test_op = c_vision.RandomResizedCropWithBBox((256, 512), (1, 0.5), (0.5, 0.5)) test_op = vision.RandomResizedCropWithBBox((256, 512), (1, 0.5), (0.5, 0.5))
# map to apply ops # map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
@ -174,7 +174,7 @@ def test_random_resized_crop_with_bbox_op_invalid2_c():
try: try:
# If input range of ratio is not in the order of (min, max), ValueError will be raised. # If input range of ratio is not in the order of (min, max), ValueError will be raised.
test_op = c_vision.RandomResizedCropWithBBox((256, 512), (1, 1), (1, 0.5)) test_op = vision.RandomResizedCropWithBBox((256, 512), (1, 1), (1, 0.5))
# map to apply ops # map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
@ -194,7 +194,7 @@ def test_random_resized_crop_with_bbox_op_bad_c():
Test RandomCropWithBBox op with invalid bounding boxes, expected to catch multiple errors. Test RandomCropWithBBox op with invalid bounding boxes, expected to catch multiple errors.
""" """
logger.info("test_random_resized_crop_with_bbox_op_bad_c") logger.info("test_random_resized_crop_with_bbox_op_bad_c")
test_op = c_vision.RandomResizedCropWithBBox((256, 512), (0.5, 0.5), (0.5, 0.5)) test_op = vision.RandomResizedCropWithBBox((256, 512), (0.5, 0.5), (0.5, 0.5))
data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True) data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image") check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image")

View File

@ -16,7 +16,7 @@
Testing RandomCropDecodeResize op in DE Testing RandomCropDecodeResize op in DE
""" """
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import diff_mse, visualize_image, save_and_check_md5, \ from util import diff_mse, visualize_image, save_and_check_md5, \
config_get_set_seed, config_get_set_num_parallel_workers config_get_set_seed, config_get_set_num_parallel_workers

View File

@ -17,7 +17,7 @@ Testing RandomCropWithBBox op in DE
""" """
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.utils as mode import mindspore.dataset.vision.utils as mode
from mindspore import log as logger from mindspore import log as logger
@ -43,7 +43,7 @@ def test_random_crop_with_bbox_op_c(plot_vis=False):
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
# define test OP with values to match existing Op UT # define test OP with values to match existing Op UT
test_op = c_vision.RandomCropWithBBox([512, 512], [200, 200, 200, 200]) test_op = vision.RandomCropWithBBox([512, 512], [200, 200, 200, 200])
# map to apply ops # map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
@ -74,7 +74,7 @@ def test_random_crop_with_bbox_op_coco_c(plot_vis=False):
dataCoco2 = ds.CocoDataset(DATA_DIR_COCO[0], annotation_file=DATA_DIR_COCO[1], task="Detection", dataCoco2 = ds.CocoDataset(DATA_DIR_COCO[0], annotation_file=DATA_DIR_COCO[1], task="Detection",
decode=True, shuffle=False) decode=True, shuffle=False)
test_op = c_vision.RandomCropWithBBox([512, 512], [200, 200, 200, 200]) test_op = vision.RandomCropWithBBox([512, 512], [200, 200, 200, 200])
dataCoco2 = dataCoco2.map(operations=[test_op], input_columns=["image", "bbox"], dataCoco2 = dataCoco2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"], output_columns=["image", "bbox"],
@ -105,7 +105,7 @@ def test_random_crop_with_bbox_op2_c(plot_vis=False):
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
# define test OP with values to match existing Op unit - test # define test OP with values to match existing Op unit - test
test_op = c_vision.RandomCropWithBBox(512, [200, 200, 200, 200], fill_value=(255, 255, 255)) test_op = vision.RandomCropWithBBox(512, [200, 200, 200, 200], fill_value=(255, 255, 255))
# map to apply ops # map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
@ -142,7 +142,7 @@ def test_random_crop_with_bbox_op3_c(plot_vis=False):
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
# define test OP with values to match existing Op unit - test # define test OP with values to match existing Op unit - test
test_op = c_vision.RandomCropWithBBox(512, [200, 200, 200, 200], padding_mode=mode.Border.EDGE) test_op = vision.RandomCropWithBBox(512, [200, 200, 200, 200], padding_mode=mode.Border.EDGE)
# map to apply ops # map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
@ -172,7 +172,7 @@ def test_random_crop_with_bbox_op_edge_c(plot_vis=False):
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
# define test OP with values to match existing Op unit - test # define test OP with values to match existing Op unit - test
test_op = c_vision.RandomCropWithBBox(512, [200, 200, 200, 200], padding_mode=mode.Border.EDGE) test_op = vision.RandomCropWithBBox(512, [200, 200, 200, 200], padding_mode=mode.Border.EDGE)
# maps to convert data into valid edge case data # maps to convert data into valid edge case data
dataVoc1 = dataVoc1.map( dataVoc1 = dataVoc1.map(
@ -210,7 +210,7 @@ def test_random_crop_with_bbox_op_invalid_c():
try: try:
# define test OP with values to match existing Op unit - test # define test OP with values to match existing Op unit - test
test_op = c_vision.RandomCropWithBBox([512, 512, 375]) test_op = vision.RandomCropWithBBox([512, 512, 375])
# map to apply ops # map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
@ -229,7 +229,7 @@ def test_random_crop_with_bbox_op_bad_c():
Tests RandomCropWithBBox Op with invalid bounding boxes, expected to catch multiple errors. Tests RandomCropWithBBox Op with invalid bounding boxes, expected to catch multiple errors.
""" """
logger.info("test_random_crop_with_bbox_op_bad_c") logger.info("test_random_crop_with_bbox_op_bad_c")
test_op = c_vision.RandomCropWithBBox([512, 512], [200, 200, 200, 200]) test_op = vision.RandomCropWithBBox([512, 512], [200, 200, 200, 200])
data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True) data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image") check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image")
@ -250,7 +250,7 @@ def test_random_crop_with_bbox_op_bad_padding():
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
try: try:
test_op = c_vision.RandomCropWithBBox([512, 512], padding=-1) test_op = vision.RandomCropWithBBox([512, 512], padding=-1)
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"], output_columns=["image", "bbox"],
@ -263,7 +263,7 @@ def test_random_crop_with_bbox_op_bad_padding():
assert "Input padding is not within the required interval of [0, 2147483647]." in str(err) assert "Input padding is not within the required interval of [0, 2147483647]." in str(err)
try: try:
test_op = c_vision.RandomCropWithBBox([512, 512], padding=[16777216, 16777216, 16777216, 16777216]) test_op = vision.RandomCropWithBBox([512, 512], padding=[16777216, 16777216, 16777216, 16777216])
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"], output_columns=["image", "bbox"],

View File

@ -18,7 +18,7 @@ Testing RandomEqualize op in DE
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
from mindspore.dataset.vision.c_transforms import Decode, Resize, RandomEqualize, Equalize from mindspore.dataset.vision.transforms import Decode, Resize, RandomEqualize, Equalize
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, visualize_image, diff_mse from util import visualize_list, visualize_image, diff_mse

View File

@ -18,8 +18,8 @@ Testing RandomErasing op in DE
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.py_transforms as vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import diff_mse, visualize_image, save_and_check_md5, \ from util import diff_mse, visualize_image, save_and_check_md5, \
config_get_set_seed, config_get_set_num_parallel_workers config_get_set_seed, config_get_set_num_parallel_workers
@ -32,28 +32,28 @@ GENERATE_GOLDEN = False
def test_random_erasing_op(plot=False): def test_random_erasing_op(plot=False):
""" """
Test RandomErasing and Cutout Test RandomErasing and CutOut
""" """
logger.info("test_random_erasing") logger.info("test_random_erasing")
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_1 = [ transforms_1 = [
vision.Decode(), vision.Decode(True),
vision.ToTensor(), vision.ToTensor(),
vision.RandomErasing(value='random') vision.RandomErasing(value='random')
] ]
transform_1 = mindspore.dataset.transforms.py_transforms.Compose(transforms_1) transform_1 = mindspore.dataset.transforms.transforms.Compose(transforms_1)
data1 = data1.map(operations=transform_1, input_columns=["image"]) data1 = data1.map(operations=transform_1, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_2 = [ transforms_2 = [
vision.Decode(), vision.Decode(True),
vision.ToTensor(), vision.ToTensor(),
vision.Cutout(80) vision.CutOut(80, is_hwc=False)
] ]
transform_2 = mindspore.dataset.transforms.py_transforms.Compose(transforms_2) transform_2 = mindspore.dataset.transforms.transforms.Compose(transforms_2)
data2 = data2.map(operations=transform_2, input_columns=["image"]) data2 = data2.map(operations=transform_2, input_columns=["image"])
num_iter = 0 num_iter = 0
@ -85,11 +85,11 @@ def test_random_erasing_md5():
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_1 = [ transforms_1 = [
vision.Decode(), vision.Decode(True),
vision.ToTensor(), vision.ToTensor(),
vision.RandomErasing(value='random') vision.RandomErasing(value='random')
] ]
transform_1 = mindspore.dataset.transforms.py_transforms.Compose(transforms_1) transform_1 = mindspore.dataset.transforms.transforms.Compose(transforms_1)
data = data.map(operations=transform_1, input_columns=["image"]) data = data.map(operations=transform_1, input_columns=["image"])
# Compare with expected md5 from images # Compare with expected md5 from images
filename = "random_erasing_01_result.npz" filename = "random_erasing_01_result.npz"

View File

@ -17,8 +17,8 @@ Testing RandomGrayscale op in DE
""" """
import numpy as np import numpy as np
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.py_transforms as py_vision import mindspore.dataset.vision.transforms as vision
import mindspore.dataset as ds import mindspore.dataset as ds
from mindspore import log as logger from mindspore import log as logger
from util import save_and_check_md5, visualize_list, \ from util import save_and_check_md5, visualize_list, \
@ -37,23 +37,25 @@ def test_random_grayscale_valid_prob(plot=False):
logger.info("test_random_grayscale_valid_prob") logger.info("test_random_grayscale_valid_prob")
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[
"image"], shuffle=False)
transforms1 = [ transforms1 = [
py_vision.Decode(), vision.Decode(True),
# Note: prob is 1 so the output should always be grayscale images # Note: prob is 1 so the output should always be grayscale images
py_vision.RandomGrayscale(1), vision.RandomGrayscale(1),
py_vision.ToTensor() vision.ToTensor()
] ]
transform1 = mindspore.dataset.transforms.py_transforms.Compose(transforms1) transform1 = mindspore.dataset.transforms.transforms.Compose(transforms1)
data1 = data1.map(operations=transform1, input_columns=["image"]) data1 = data1.map(operations=transform1, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[
"image"], shuffle=False)
transforms2 = [ transforms2 = [
py_vision.Decode(), vision.Decode(True),
py_vision.ToTensor() vision.ToTensor()
] ]
transform2 = mindspore.dataset.transforms.py_transforms.Compose(transforms2) transform2 = mindspore.dataset.transforms.transforms.Compose(transforms2)
data2 = data2.map(operations=transform2, input_columns=["image"]) data2 = data2.map(operations=transform2, input_columns=["image"])
image_gray = [] image_gray = []
@ -77,24 +79,26 @@ def test_random_grayscale_input_grayscale_images():
original_num_parallel_workers = config_get_set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[
"image"], shuffle=False)
transforms1 = [ transforms1 = [
py_vision.Decode(), vision.Decode(True),
py_vision.Grayscale(1), vision.Grayscale(1),
# Note: If the input images is grayscale image with 1 channel. # Note: If the input images is grayscale image with 1 channel.
py_vision.RandomGrayscale(0.5), vision.RandomGrayscale(0.5),
py_vision.ToTensor() vision.ToTensor()
] ]
transform1 = mindspore.dataset.transforms.py_transforms.Compose(transforms1) transform1 = mindspore.dataset.transforms.transforms.Compose(transforms1)
data1 = data1.map(operations=transform1, input_columns=["image"]) data1 = data1.map(operations=transform1, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[
"image"], shuffle=False)
transforms2 = [ transforms2 = [
py_vision.Decode(), vision.Decode(True),
py_vision.ToTensor() vision.ToTensor()
] ]
transform2 = mindspore.dataset.transforms.py_transforms.Compose(transforms2) transform2 = mindspore.dataset.transforms.transforms.Compose(transforms2)
data2 = data2.map(operations=transform2, input_columns=["image"]) data2 = data2.map(operations=transform2, input_columns=["image"])
image_gray = [] image_gray = []
@ -125,13 +129,14 @@ def test_random_grayscale_md5_valid_input():
original_num_parallel_workers = config_get_set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[
"image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomGrayscale(0.8), vision.RandomGrayscale(0.8),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
# Check output images with md5 comparison # Check output images with md5 comparison
@ -152,13 +157,14 @@ def test_random_grayscale_md5_no_param():
original_num_parallel_workers = config_get_set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[
"image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomGrayscale(), vision.RandomGrayscale(),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
# Check output images with md5 comparison # Check output images with md5 comparison
@ -177,18 +183,20 @@ def test_random_grayscale_invalid_param():
logger.info("test_random_grayscale_invalid_param") logger.info("test_random_grayscale_invalid_param")
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[
"image"], shuffle=False)
try: try:
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomGrayscale(1.5), vision.RandomGrayscale(1.5),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input prob is not within the required interval of [0.0, 1.0]." in str(e) assert "Input prob is not within the required interval of [0.0, 1.0]." in str(
e)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -17,10 +17,8 @@ Testing the random horizontal flip op in DE
""" """
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms as ops
import mindspore.dataset.transforms.c_transforms as ops import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
from mindspore import log as logger from mindspore import log as logger
from util import save_and_check_md5, visualize_list, visualize_image, diff_mse, \ from util import save_and_check_md5, visualize_list, visualize_image, diff_mse, \
config_get_set_seed, config_get_set_num_parallel_workers config_get_set_seed, config_get_set_num_parallel_workers
@ -49,14 +47,16 @@ def test_random_horizontal_op(plot=False):
logger.info("test_random_horizontal_op") logger.info("test_random_horizontal_op")
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[
decode_op = c_vision.Decode() "image"], shuffle=False)
random_horizontal_op = c_vision.RandomHorizontalFlip(1.0) decode_op = vision.Decode()
random_horizontal_op = vision.RandomHorizontalFlip(1.0)
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_horizontal_op, input_columns=["image"]) data1 = data1.map(operations=random_horizontal_op, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[
"image"], shuffle=False)
data2 = data2.map(operations=decode_op, input_columns=["image"]) data2 = data2.map(operations=decode_op, input_columns=["image"])
num_iter = 0 num_iter = 0
@ -81,16 +81,17 @@ def test_random_horizontal_op(plot=False):
def test_random_horizontal_valid_prob_c(): def test_random_horizontal_valid_prob_c():
""" """
Test RandomHorizontalFlip op with c_transforms: valid non-default input, expect to pass Test RandomHorizontalFlip op with C implementation: valid non-default input, expect to pass
""" """
logger.info("test_random_horizontal_valid_prob_c") logger.info("test_random_horizontal_valid_prob_c")
original_seed = config_get_set_seed(0) original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[
decode_op = c_vision.Decode() "image"], shuffle=False)
random_horizontal_op = c_vision.RandomHorizontalFlip(0.8) decode_op = vision.Decode()
random_horizontal_op = vision.RandomHorizontalFlip(0.8)
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=random_horizontal_op, input_columns=["image"]) data = data.map(operations=random_horizontal_op, input_columns=["image"])
@ -104,20 +105,21 @@ def test_random_horizontal_valid_prob_c():
def test_random_horizontal_valid_prob_py(): def test_random_horizontal_valid_prob_py():
""" """
Test RandomHorizontalFlip op with py_transforms: valid non-default input, expect to pass Test RandomHorizontalFlip op with transforms: valid non-default input, expect to pass
""" """
logger.info("test_random_horizontal_valid_prob_py") logger.info("test_random_horizontal_valid_prob_py")
original_seed = config_get_set_seed(0) original_seed = config_get_set_seed(0)
original_num_parallel_workers = config_get_set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[
"image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomHorizontalFlip(0.8), vision.RandomHorizontalFlip(0.8),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
filename = "random_horizontal_01_py_result.npz" filename = "random_horizontal_01_py_result.npz"
@ -130,44 +132,49 @@ def test_random_horizontal_valid_prob_py():
def test_random_horizontal_invalid_prob_c(): def test_random_horizontal_invalid_prob_c():
""" """
Test RandomHorizontalFlip op in c_transforms: invalid input, expect to raise error Test RandomHorizontalFlip op in C implementation: invalid input, expect to raise error
""" """
logger.info("test_random_horizontal_invalid_prob_c") logger.info("test_random_horizontal_invalid_prob_c")
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[
decode_op = c_vision.Decode() "image"], shuffle=False)
decode_op = vision.Decode()
try: try:
# Note: Valid range of prob should be [0.0, 1.0] # Note: Valid range of prob should be [0.0, 1.0]
random_horizontal_op = c_vision.RandomHorizontalFlip(1.5) random_horizontal_op = vision.RandomHorizontalFlip(1.5)
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=random_horizontal_op, input_columns=["image"]) data = data.map(operations=random_horizontal_op,
input_columns=["image"])
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input prob is not within the required interval of [0.0, 1.0]." in str(e) assert "Input prob is not within the required interval of [0.0, 1.0]." in str(
e)
def test_random_horizontal_invalid_prob_py(): def test_random_horizontal_invalid_prob_py():
""" """
Test RandomHorizontalFlip op in py_transforms: invalid input, expect to raise error Test RandomHorizontalFlip op in transforms: invalid input, expect to raise error
""" """
logger.info("test_random_horizontal_invalid_prob_py") logger.info("test_random_horizontal_invalid_prob_py")
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[
"image"], shuffle=False)
try: try:
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
# Note: Valid range of prob should be [0.0, 1.0] # Note: Valid range of prob should be [0.0, 1.0]
py_vision.RandomHorizontalFlip(1.5), vision.RandomHorizontalFlip(1.5),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input prob is not within the required interval of [0.0, 1.0]." in str(e) assert "Input prob is not within the required interval of [0.0, 1.0]." in str(
e)
def test_random_horizontal_comp(plot=False): def test_random_horizontal_comp(plot=False):
@ -176,22 +183,24 @@ def test_random_horizontal_comp(plot=False):
""" """
logger.info("test_random_horizontal_comp") logger.info("test_random_horizontal_comp")
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[
decode_op = c_vision.Decode() "image"], shuffle=False)
decode_op = vision.Decode()
# Note: The image must be flipped if prob is set to be 1 # Note: The image must be flipped if prob is set to be 1
random_horizontal_op = c_vision.RandomHorizontalFlip(1) random_horizontal_op = vision.RandomHorizontalFlip(1)
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_horizontal_op, input_columns=["image"]) data1 = data1.map(operations=random_horizontal_op, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[
"image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
# Note: The image must be flipped if prob is set to be 1 # Note: The image must be flipped if prob is set to be 1
py_vision.RandomHorizontalFlip(1), vision.RandomHorizontalFlip(1),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data2 = data2.map(operations=transform, input_columns=["image"]) data2 = data2.map(operations=transform, input_columns=["image"])
images_list_c = [] images_list_c = []
@ -209,21 +218,24 @@ def test_random_horizontal_comp(plot=False):
if plot: if plot:
visualize_list(images_list_c, images_list_py, visualize_mode=2) visualize_list(images_list_c, images_list_py, visualize_mode=2)
def test_random_horizontal_op_1(): def test_random_horizontal_op_1():
""" """
Test RandomHorizontalFlip with different fields. Test RandomHorizontalFlip with different fields.
""" """
logger.info("Test RandomHorizontalFlip with different fields.") logger.info("Test RandomHorizontalFlip with different fields.")
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=[
"image"], shuffle=False)
data = data.map(operations=ops.Duplicate(), input_columns=["image"], data = data.map(operations=ops.Duplicate(), input_columns=["image"],
output_columns=["image", "image_copy"], column_order=["image", "image_copy"]) output_columns=["image", "image_copy"], column_order=["image", "image_copy"])
random_horizontal_op = c_vision.RandomHorizontalFlip(1.0) random_horizontal_op = vision.RandomHorizontalFlip(1.0)
decode_op = c_vision.Decode() decode_op = vision.Decode()
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=decode_op, input_columns=["image_copy"]) data = data.map(operations=decode_op, input_columns=["image_copy"])
data = data.map(operations=random_horizontal_op, input_columns=["image", "image_copy"]) data = data.map(operations=random_horizontal_op,
input_columns=["image", "image_copy"])
num_iter = 0 num_iter = 0
for data1 in data.create_dict_iterator(num_epochs=1, output_numpy=True): for data1 in data.create_dict_iterator(num_epochs=1, output_numpy=True):

View File

@ -18,7 +18,7 @@ Testing the random horizontal flip with bounding boxes op in DE
import numpy as np import numpy as np
import mindspore.log as logger import mindspore.log as logger
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.transforms as vision
from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \ from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \
config_get_set_seed, config_get_set_num_parallel_workers, save_and_check_md5 config_get_set_seed, config_get_set_num_parallel_workers, save_and_check_md5
@ -37,25 +37,27 @@ def test_random_horizontal_flip_with_bbox_op_c(plot_vis=False):
logger.info("test_random_horizontal_flip_with_bbox_op_c") logger.info("test_random_horizontal_flip_with_bbox_op_c")
# Load dataset # Load dataset
dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) data_voc1 = ds.VOCDataset(DATA_DIR, task="Detection",
usage="train", shuffle=False, decode=True)
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection",
usage="train", shuffle=False, decode=True)
test_op = c_vision.RandomHorizontalFlipWithBBox(1) test_op = vision.RandomHorizontalFlipWithBBox(1)
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], data_voc2 = data_voc2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"], output_columns=["image", "bbox"],
column_order=["image", "bbox"]) column_order=["image", "bbox"])
unaugSamp, augSamp = [], [] unaug_samp, aug_samp = [], []
for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True), for unaug, aug in zip(data_voc1.create_dict_iterator(num_epochs=1, output_numpy=True),
dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)): data_voc2.create_dict_iterator(num_epochs=1, output_numpy=True)):
unaugSamp.append(unAug) unaug_samp.append(unaug)
augSamp.append(Aug) aug_samp.append(aug)
if plot_vis: if plot_vis:
visualize_with_bounding_boxes(unaugSamp, augSamp) visualize_with_bounding_boxes(unaug_samp, aug_samp)
def test_random_horizontal_flip_with_bbox_op_coco_c(plot_vis=False): def test_random_horizontal_flip_with_bbox_op_coco_c(plot_vis=False):
@ -71,27 +73,27 @@ def test_random_horizontal_flip_with_bbox_op_coco_c(plot_vis=False):
dataCoco2 = ds.CocoDataset(DATA_DIR_2[0], annotation_file=DATA_DIR_2[1], task="Detection", dataCoco2 = ds.CocoDataset(DATA_DIR_2[0], annotation_file=DATA_DIR_2[1], task="Detection",
decode=True, shuffle=False) decode=True, shuffle=False)
test_op = c_vision.RandomHorizontalFlipWithBBox(1) test_op = vision.RandomHorizontalFlipWithBBox(1)
dataCoco2 = dataCoco2.map(operations=[test_op], input_columns=["image", "bbox"], dataCoco2 = dataCoco2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"], output_columns=["image", "bbox"],
column_order=["image", "bbox"]) column_order=["image", "bbox"])
unaugSamp, augSamp = [], [] unaug_samp, aug_samp = [], []
for unAug, Aug in zip(dataCoco1.create_dict_iterator(num_epochs=1, output_numpy=True), for unaug, aug in zip(dataCoco1.create_dict_iterator(num_epochs=1, output_numpy=True),
dataCoco2.create_dict_iterator(num_epochs=1, output_numpy=True)): dataCoco2.create_dict_iterator(num_epochs=1, output_numpy=True)):
unaugSamp.append(unAug) unaug_samp.append(unaug)
augSamp.append(Aug) aug_samp.append(aug)
if plot_vis: if plot_vis:
visualize_with_bounding_boxes(unaugSamp, augSamp, "bbox") visualize_with_bounding_boxes(unaug_samp, aug_samp, "bbox")
def test_random_horizontal_flip_with_bbox_valid_rand_c(plot_vis=False): def test_random_horizontal_flip_with_bbox_valid_rand_c(plot_vis=False):
""" """
Uses a valid non-default input, expect to pass Uses a valid non-default input, expect to pass
Prints images side by side with and without Aug applied + bboxes to Prints images side by side with and without augmentation applied + bboxes to
compare and test compare and test
""" """
logger.info("test_random_horizontal_bbox_valid_rand_c") logger.info("test_random_horizontal_bbox_valid_rand_c")
@ -100,29 +102,31 @@ def test_random_horizontal_flip_with_bbox_valid_rand_c(plot_vis=False):
original_num_parallel_workers = config_get_set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Load dataset # Load dataset
dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) data_voc1 = ds.VOCDataset(DATA_DIR, task="Detection",
usage="train", shuffle=False, decode=True)
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection",
usage="train", shuffle=False, decode=True)
test_op = c_vision.RandomHorizontalFlipWithBBox(0.6) test_op = vision.RandomHorizontalFlipWithBBox(0.6)
# map to apply ops # map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], data_voc2 = data_voc2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"], output_columns=["image", "bbox"],
column_order=["image", "bbox"]) column_order=["image", "bbox"])
filename = "random_horizontal_flip_with_bbox_01_c_result.npz" filename = "random_horizontal_flip_with_bbox_01_c_result.npz"
save_and_check_md5(dataVoc2, filename, generate_golden=GENERATE_GOLDEN) save_and_check_md5(data_voc2, filename, generate_golden=GENERATE_GOLDEN)
unaugSamp, augSamp = [], [] unaug_samp, aug_samp = [], []
for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True), for unaug, aug in zip(data_voc1.create_dict_iterator(num_epochs=1, output_numpy=True),
dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)): data_voc2.create_dict_iterator(num_epochs=1, output_numpy=True)):
unaugSamp.append(unAug) unaug_samp.append(unaug)
augSamp.append(Aug) aug_samp.append(aug)
if plot_vis: if plot_vis:
visualize_with_bounding_boxes(unaugSamp, augSamp) visualize_with_bounding_boxes(unaug_samp, aug_samp)
# Restore config setting # Restore config setting
ds.config.set_seed(original_seed) ds.config.set_seed(original_seed)
@ -132,40 +136,44 @@ def test_random_horizontal_flip_with_bbox_valid_rand_c(plot_vis=False):
def test_random_horizontal_flip_with_bbox_valid_edge_c(plot_vis=False): def test_random_horizontal_flip_with_bbox_valid_edge_c(plot_vis=False):
""" """
Test RandomHorizontalFlipWithBBox op (testing with valid edge case, box covering full image). Test RandomHorizontalFlipWithBBox op (testing with valid edge case, box covering full image).
Prints images side by side with and without Aug applied + bboxes to compare and test Prints images side by side with and without augmentation applied + bboxes to compare and test
""" """
logger.info("test_horizontal_flip_with_bbox_valid_edge_c") logger.info("test_horizontal_flip_with_bbox_valid_edge_c")
dataVoc1 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) data_voc1 = ds.VOCDataset(DATA_DIR, task="Detection",
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) usage="train", shuffle=False, decode=True)
data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection",
usage="train", shuffle=False, decode=True)
test_op = c_vision.RandomHorizontalFlipWithBBox(1) test_op = vision.RandomHorizontalFlipWithBBox(1)
# map to apply ops # map to apply ops
# Add column for "bbox" # Add column for "bbox"
dataVoc1 = dataVoc1.map( data_voc1 = data_voc1.map(
operations=lambda img, bbox: (img, np.array([[0, 0, img.shape[1], img.shape[0], 0, 0, 0]]).astype(np.float32)), operations=lambda img, bbox: (img, np.array(
[[0, 0, img.shape[1], img.shape[0], 0, 0, 0]]).astype(np.float32)),
input_columns=["image", "bbox"], input_columns=["image", "bbox"],
output_columns=["image", "bbox"], output_columns=["image", "bbox"],
column_order=["image", "bbox"]) column_order=["image", "bbox"])
dataVoc2 = dataVoc2.map( data_voc2 = data_voc2.map(
operations=lambda img, bbox: (img, np.array([[0, 0, img.shape[1], img.shape[0], 0, 0, 0]]).astype(np.float32)), operations=lambda img, bbox: (img, np.array(
[[0, 0, img.shape[1], img.shape[0], 0, 0, 0]]).astype(np.float32)),
input_columns=["image", "bbox"], input_columns=["image", "bbox"],
output_columns=["image", "bbox"], output_columns=["image", "bbox"],
column_order=["image", "bbox"]) column_order=["image", "bbox"])
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], data_voc2 = data_voc2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"], output_columns=["image", "bbox"],
column_order=["image", "bbox"]) column_order=["image", "bbox"])
unaugSamp, augSamp = [], [] unaug_samp, aug_samp = [], []
for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True), for unaug, aug in zip(data_voc1.create_dict_iterator(num_epochs=1, output_numpy=True),
dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)): data_voc2.create_dict_iterator(num_epochs=1, output_numpy=True)):
unaugSamp.append(unAug) unaug_samp.append(unaug)
augSamp.append(Aug) aug_samp.append(aug)
if plot_vis: if plot_vis:
visualize_with_bounding_boxes(unaugSamp, augSamp) visualize_with_bounding_boxes(unaug_samp, aug_samp)
def test_random_horizontal_flip_with_bbox_invalid_prob_c(): def test_random_horizontal_flip_with_bbox_invalid_prob_c():
@ -174,18 +182,20 @@ def test_random_horizontal_flip_with_bbox_invalid_prob_c():
""" """
logger.info("test_random_horizontal_bbox_invalid_prob_c") logger.info("test_random_horizontal_bbox_invalid_prob_c")
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection",
usage="train", shuffle=False, decode=True)
try: try:
# Note: Valid range of prob should be [0.0, 1.0] # Note: Valid range of prob should be [0.0, 1.0]
test_op = c_vision.RandomHorizontalFlipWithBBox(1.5) test_op = vision.RandomHorizontalFlipWithBBox(1.5)
# map to apply ops # map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], data_voc2 = data_voc2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"], output_columns=["image", "bbox"],
column_order=["image", "bbox"]) # Add column for "bbox" column_order=["image", "bbox"]) # Add column for "bbox"
except ValueError as error: except ValueError as error:
logger.info("Got an exception in DE: {}".format(str(error))) logger.info("Got an exception in DE: {}".format(str(error)))
assert "Input prob is not within the required interval of [0.0, 1.0]." in str(error) assert "Input prob is not within the required interval of [0.0, 1.0]." in str(
error)
def test_random_horizontal_flip_with_bbox_invalid_bounds_c(): def test_random_horizontal_flip_with_bbox_invalid_bounds_c():
@ -194,16 +204,23 @@ def test_random_horizontal_flip_with_bbox_invalid_bounds_c():
""" """
logger.info("test_random_horizontal_bbox_invalid_bounds_c") logger.info("test_random_horizontal_bbox_invalid_bounds_c")
test_op = c_vision.RandomHorizontalFlipWithBBox(1) test_op = vision.RandomHorizontalFlipWithBBox(1)
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection",
check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image") usage="train", shuffle=False, decode=True)
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WidthOverflow,
check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.HeightOverflow, "bounding boxes is out of bounds of the image") "bounding boxes is out of bounds of the image")
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection",
check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.NegativeXY, "negative value") usage="train", shuffle=False, decode=True)
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) check_bad_bbox(data_voc2, test_op, InvalidBBoxType.HeightOverflow,
check_bad_bbox(dataVoc2, test_op, InvalidBBoxType.WrongShape, "4 features") "bounding boxes is out of bounds of the image")
data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection",
usage="train", shuffle=False, decode=True)
check_bad_bbox(data_voc2, test_op,
InvalidBBoxType.NegativeXY, "negative value")
data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection",
usage="train", shuffle=False, decode=True)
check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WrongShape, "4 features")
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -18,7 +18,7 @@ Testing RandomInvert in DE
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
from mindspore.dataset.vision.c_transforms import Decode, Resize, RandomInvert, Invert from mindspore.dataset.vision.transforms import Decode, Resize, RandomInvert, Invert
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, visualize_image, diff_mse from util import visualize_list, visualize_image, diff_mse

View File

@ -19,9 +19,8 @@ import numpy as np
import pytest import pytest
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.py_transforms as F import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.c_transforms as C
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, diff_mse, save_and_check_md5, \ from util import visualize_list, diff_mse, save_and_check_md5, \
config_get_set_seed, config_get_set_num_parallel_workers config_get_set_seed, config_get_set_num_parallel_workers
@ -35,19 +34,21 @@ GENERATE_GOLDEN = False
def test_random_lighting_py(alpha=1, plot=False): def test_random_lighting_py(alpha=1, plot=False):
""" """
Feature: RandomLighting Feature: RandomLighting
Description: test RandomLighting python op Description: test RandomLighting Python implementation
Expectation: equal results Expectation: equal results
""" """
logger.info("Test RandomLighting python op") logger.info("Test RandomLighting Python implementation")
# Original Images # Original Images
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), transforms_original = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
F.Resize((224, 224)), vision.Resize(
F.ToTensor()]) (224, 224)),
vision.ToTensor()])
ds_original = data.map(operations=transforms_original, input_columns="image") ds_original = data.map(
operations=transforms_original, input_columns="image")
ds_original = ds_original.batch(512) ds_original = ds_original.batch(512)
@ -55,19 +56,22 @@ def test_random_lighting_py(alpha=1, plot=False):
if idx == 0: if idx == 0:
images_original = np.transpose(image, (0, 2, 3, 1)) images_original = np.transpose(image, (0, 2, 3, 1))
else: else:
images_original = np.append(images_original, np.transpose(image, (0, 2, 3, 1)), axis=0) images_original = np.append(
images_original, np.transpose(image, (0, 2, 3, 1)), axis=0)
# Random Lighting Adjusted Images # Random Lighting Adjusted Images
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
alpha = alpha if alpha is not None else 0.05 alpha = alpha if alpha is not None else 0.05
py_op = F.RandomLighting(alpha) py_op = vision.RandomLighting(alpha)
transforms_random_lighting = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), transforms_random_lighting = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
F.Resize((224, 224)), vision.Resize(
py_op, (224, 224)),
F.ToTensor()]) py_op,
ds_random_lighting = data.map(operations=transforms_random_lighting, input_columns="image") vision.ToTensor()])
ds_random_lighting = data.map(
operations=transforms_random_lighting, input_columns="image")
ds_random_lighting = ds_random_lighting.batch(512) ds_random_lighting = ds_random_lighting.batch(512)
@ -75,7 +79,8 @@ def test_random_lighting_py(alpha=1, plot=False):
if idx == 0: if idx == 0:
images_random_lighting = np.transpose(image, (0, 2, 3, 1)) images_random_lighting = np.transpose(image, (0, 2, 3, 1))
else: else:
images_random_lighting = np.append(images_random_lighting, np.transpose(image, (0, 2, 3, 1)), axis=0) images_random_lighting = np.append(
images_random_lighting, np.transpose(image, (0, 2, 3, 1)), axis=0)
num_samples = images_original.shape[0] num_samples = images_original.shape[0]
mse = np.zeros(num_samples) mse = np.zeros(num_samples)
@ -91,21 +96,21 @@ def test_random_lighting_py(alpha=1, plot=False):
def test_random_lighting_py_md5(): def test_random_lighting_py_md5():
""" """
Feature: RandomLighting Feature: RandomLighting
Description: test RandomLighting python op with md5 comparison Description: test RandomLighting Python implementation with md5 comparison
Expectation: same MD5 Expectation: same MD5
""" """
logger.info("Test RandomLighting python op with md5 comparison") logger.info("Test RandomLighting Python implementation with md5 comparison")
original_seed = config_get_set_seed(140) original_seed = config_get_set_seed(140)
original_num_parallel_workers = config_get_set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# define map operations # define map operations
transforms = [ transforms = [
F.Decode(), vision.Decode(True),
F.Resize((224, 224)), vision.Resize((224, 224)),
F.RandomLighting(1), vision.RandomLighting(1),
F.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
# Generate dataset # Generate dataset
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
@ -130,9 +135,10 @@ def test_random_lighting_c(alpha=1, plot=False):
# Original Images # Original Images
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = [C.Decode(), C.Resize((224, 224))] transforms_original = [vision.Decode(), vision.Resize((224, 224))]
ds_original = data.map(operations=transforms_original, input_columns="image") ds_original = data.map(
operations=transforms_original, input_columns="image")
ds_original = ds_original.batch(512) ds_original = ds_original.batch(512)
@ -146,11 +152,13 @@ def test_random_lighting_c(alpha=1, plot=False):
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
alpha = alpha if alpha is not None else 0.05 alpha = alpha if alpha is not None else 0.05
c_op = C.RandomLighting(alpha) c_op = vision.RandomLighting(alpha)
transforms_random_lighting = [C.Decode(), C.Resize((224, 224)), c_op] transforms_random_lighting = [
vision.Decode(), vision.Resize((224, 224)), c_op]
ds_random_lighting = data.map(operations=transforms_random_lighting, input_columns="image") ds_random_lighting = data.map(
operations=transforms_random_lighting, input_columns="image")
ds_random_lighting = ds_random_lighting.batch(512) ds_random_lighting = ds_random_lighting.batch(512)
@ -158,7 +166,8 @@ def test_random_lighting_c(alpha=1, plot=False):
if idx == 0: if idx == 0:
images_random_lighting = image images_random_lighting = image
else: else:
images_random_lighting = np.append(images_random_lighting, image, axis=0) images_random_lighting = np.append(
images_random_lighting, image, axis=0)
num_samples = images_original.shape[0] num_samples = images_original.shape[0]
mse = np.zeros(num_samples) mse = np.zeros(num_samples)
@ -181,16 +190,18 @@ def test_random_lighting_c_py(alpha=1, plot=False):
# RandomLighting Images # RandomLighting Images
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data = data.map(operations=[C.Decode(), C.Resize((200, 300))], input_columns=["image"]) data = data.map(operations=[vision.Decode(), vision.Resize(
(200, 300))], input_columns=["image"])
python_op = F.RandomLighting(alpha) python_op = vision.RandomLighting(alpha)
c_op = C.RandomLighting(alpha) c_op = vision.RandomLighting(alpha)
transforms_op = mindspore.dataset.transforms.py_transforms.Compose([lambda img: F.ToPIL()(img.astype(np.uint8)), transforms_op = mindspore.dataset.transforms.transforms.Compose([lambda img: vision.ToPIL()(img.astype(np.uint8)),
python_op, python_op,
np.array]) np.array])
ds_random_lighting_py = data.map(operations=transforms_op, input_columns="image") ds_random_lighting_py = data.map(
operations=transforms_op, input_columns="image")
ds_random_lighting_py = ds_random_lighting_py.batch(512) ds_random_lighting_py = ds_random_lighting_py.batch(512)
@ -199,12 +210,15 @@ def test_random_lighting_c_py(alpha=1, plot=False):
images_random_lighting_py = image images_random_lighting_py = image
else: else:
images_random_lighting_py = np.append(images_random_lighting_py, image, axis=0) images_random_lighting_py = np.append(
images_random_lighting_py, image, axis=0)
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data = data.map(operations=[C.Decode(), C.Resize((200, 300))], input_columns=["image"]) data = data.map(operations=[vision.Decode(), vision.Resize(
(200, 300))], input_columns=["image"])
ds_images_random_lighting_c = data.map(operations=c_op, input_columns="image") ds_images_random_lighting_c = data.map(
operations=c_op, input_columns="image")
ds_random_lighting_c = ds_images_random_lighting_c.batch(512) ds_random_lighting_c = ds_images_random_lighting_c.batch(512)
@ -212,15 +226,18 @@ def test_random_lighting_c_py(alpha=1, plot=False):
if idx == 0: if idx == 0:
images_random_lighting_c = image images_random_lighting_c = image
else: else:
images_random_lighting_c = np.append(images_random_lighting_c, image, axis=0) images_random_lighting_c = np.append(
images_random_lighting_c, image, axis=0)
num_samples = images_random_lighting_c.shape[0] num_samples = images_random_lighting_c.shape[0]
mse = np.zeros(num_samples) mse = np.zeros(num_samples)
for i in range(num_samples): for i in range(num_samples):
mse[i] = diff_mse(images_random_lighting_c[i], images_random_lighting_py[i]) mse[i] = diff_mse(images_random_lighting_c[i],
images_random_lighting_py[i])
logger.info("MSE= {}".format(str(np.mean(mse)))) logger.info("MSE= {}".format(str(np.mean(mse))))
if plot: if plot:
visualize_list(images_random_lighting_c, images_random_lighting_py, visualize_mode=2) visualize_list(images_random_lighting_c,
images_random_lighting_py, visualize_mode=2)
def test_random_lighting_invalid_params(): def test_random_lighting_invalid_params():
@ -232,14 +249,15 @@ def test_random_lighting_invalid_params():
logger.info("Test RandomLighting with invalid input parameters.") logger.info("Test RandomLighting with invalid input parameters.")
with pytest.raises(ValueError) as error_info: with pytest.raises(ValueError) as error_info:
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data = data.map(operations=[C.Decode(), C.Resize((224, 224)), data = data.map(operations=[vision.Decode(), vision.Resize((224, 224)),
C.RandomLighting(-2)], input_columns=["image"]) vision.RandomLighting(-2)], input_columns=["image"])
assert "Input alpha is not within the required interval of [0, 16777216]." in str(error_info.value) assert "Input alpha is not within the required interval of [0, 16777216]." in str(
error_info.value)
with pytest.raises(TypeError) as error_info: with pytest.raises(TypeError) as error_info:
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data = data.map(operations=[C.Decode(), C.Resize((224, 224)), data = data.map(operations=[vision.Decode(), vision.Resize((224, 224)),
C.RandomLighting('1')], input_columns=["image"]) vision.RandomLighting('1')], input_columns=["image"])
err_msg = "Argument alpha with value 1 is not of type [<class 'float'>, <class 'int'>], but got <class 'str'>." err_msg = "Argument alpha with value 1 is not of type [<class 'float'>, <class 'int'>], but got <class 'str'>."
assert err_msg in str(error_info.value) assert err_msg in str(error_info.value)

View File

@ -17,8 +17,8 @@ Testing RandomOrder op in DE
""" """
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms as py_transforms import mindspore.dataset.transforms.transforms as data_trans
import mindspore.dataset.vision.py_transforms as py_vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, config_get_set_seed, \ from util import visualize_list, config_get_set_seed, \
config_get_set_num_parallel_workers, save_and_check_md5 config_get_set_num_parallel_workers, save_and_check_md5
@ -37,19 +37,19 @@ def test_random_order_op(plot=False):
""" """
logger.info("test_random_order_op") logger.info("test_random_order_op")
# define map operations # define map operations
transforms_list = [py_vision.CenterCrop(64), py_vision.RandomRotation(30)] transforms_list = [vision.CenterCrop(64), vision.RandomRotation(30)]
transforms1 = [ transforms1 = [
py_vision.Decode(), vision.Decode(True),
py_transforms.RandomOrder(transforms_list), data_trans.RandomOrder(transforms_list),
py_vision.ToTensor() vision.ToTensor()
] ]
transform1 = py_transforms.Compose(transforms1) transform1 = data_trans.Compose(transforms1)
transforms2 = [ transforms2 = [
py_vision.Decode(), vision.Decode(True),
py_vision.ToTensor() vision.ToTensor()
] ]
transform2 = py_transforms.Compose(transforms2) transform2 = data_trans.Compose(transforms2)
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -80,13 +80,13 @@ def test_random_order_md5():
original_seed = config_get_set_seed(8) original_seed = config_get_set_seed(8)
original_num_parallel_workers = config_get_set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# define map operations # define map operations
transforms_list = [py_vision.RandomCrop(64), py_vision.RandomRotation(30)] transforms_list = [vision.RandomCrop(64), vision.RandomRotation(30)]
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_transforms.RandomOrder(transforms_list), data_trans.RandomOrder(transforms_list),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = py_transforms.Compose(transforms) transform = data_trans.Compose(transforms)
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)

View File

@ -17,8 +17,8 @@ Testing RandomPerspective op in DE
""" """
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.py_transforms as py_vision import mindspore.dataset.vision.transforms as vision
from mindspore.dataset.vision.utils import Inter from mindspore.dataset.vision.utils import Inter
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, save_and_check_md5, \ from util import visualize_list, save_and_check_md5, \
@ -37,17 +37,17 @@ def test_random_perspective_op(plot=False):
logger.info("test_random_perspective_op") logger.info("test_random_perspective_op")
# define map operations # define map operations
transforms1 = [ transforms1 = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomPerspective(), vision.RandomPerspective(),
py_vision.ToTensor() vision.ToTensor()
] ]
transform1 = mindspore.dataset.transforms.py_transforms.Compose(transforms1) transform1 = mindspore.dataset.transforms.transforms.Compose(transforms1)
transforms2 = [ transforms2 = [
py_vision.Decode(), vision.Decode(True),
py_vision.ToTensor() vision.ToTensor()
] ]
transform2 = mindspore.dataset.transforms.py_transforms.Compose(transforms2) transform2 = mindspore.dataset.transforms.transforms.Compose(transforms2)
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -78,13 +78,13 @@ def skip_test_random_perspective_md5():
# define map operations # define map operations
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomPerspective(distortion_scale=0.3, prob=0.7, vision.RandomPerspective(distortion_scale=0.3, prob=0.7,
interpolation=Inter.BILINEAR), interpolation=Inter.BILINEAR),
py_vision.Resize(1450), # resize to a smaller size to prevent round-off error vision.Resize(1450), # resize to a smaller size to prevent round-off error
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -105,7 +105,7 @@ def test_random_perspective_exception_distortion_scale_range():
""" """
logger.info("test_random_perspective_exception_distortion_scale_range") logger.info("test_random_perspective_exception_distortion_scale_range")
try: try:
_ = py_vision.RandomPerspective(distortion_scale=1.5) _ = vision.RandomPerspective(distortion_scale=1.5)
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Input distortion_scale is not within the required interval of [0.0, 1.0]." assert str(e) == "Input distortion_scale is not within the required interval of [0.0, 1.0]."
@ -117,7 +117,7 @@ def test_random_perspective_exception_prob_range():
""" """
logger.info("test_random_perspective_exception_prob_range") logger.info("test_random_perspective_exception_prob_range")
try: try:
_ = py_vision.RandomPerspective(prob=1.2) _ = vision.RandomPerspective(prob=1.2)
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Input prob is not within the required interval of [0.0, 1.0]." assert str(e) == "Input prob is not within the required interval of [0.0, 1.0]."

View File

@ -17,7 +17,7 @@ Testing RandomPosterize op in DE
""" """
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, save_and_check_md5, \ from util import visualize_list, save_and_check_md5, \
config_get_set_seed, config_get_set_num_parallel_workers, diff_mse config_get_set_seed, config_get_set_num_parallel_workers, diff_mse
@ -40,8 +40,8 @@ def test_random_posterize_op_c(plot=False, run_golden=False):
# define map operations # define map operations
transforms1 = [ transforms1 = [
c_vision.Decode(), vision.Decode(),
c_vision.RandomPosterize((1, 8)) vision.RandomPosterize((1, 8))
] ]
# First dataset # First dataset
@ -49,7 +49,7 @@ def test_random_posterize_op_c(plot=False, run_golden=False):
data1 = data1.map(operations=transforms1, input_columns=["image"]) data1 = data1.map(operations=transforms1, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=[c_vision.Decode()], input_columns=["image"]) data2 = data2.map(operations=[vision.Decode()], input_columns=["image"])
image_posterize = [] image_posterize = []
image_original = [] image_original = []
@ -89,8 +89,8 @@ def test_random_posterize_op_fixed_point_c(plot=False, run_golden=True):
# define map operations # define map operations
transforms1 = [ transforms1 = [
c_vision.Decode(), vision.Decode(),
c_vision.RandomPosterize(1) vision.RandomPosterize(1)
] ]
# First dataset # First dataset
@ -98,7 +98,7 @@ def test_random_posterize_op_fixed_point_c(plot=False, run_golden=True):
data1 = data1.map(operations=transforms1, input_columns=["image"]) data1 = data1.map(operations=transforms1, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=[c_vision.Decode()], input_columns=["image"]) data2 = data2.map(operations=[vision.Decode()], input_columns=["image"])
image_posterize = [] image_posterize = []
image_original = [] image_original = []
@ -131,8 +131,8 @@ def test_random_posterize_default_c_md5(plot=False, run_golden=True):
original_num_parallel_workers = config_get_set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# define map operations # define map operations
transforms1 = [ transforms1 = [
c_vision.Decode(), vision.Decode(),
c_vision.RandomPosterize() vision.RandomPosterize()
] ]
# First dataset # First dataset
@ -140,7 +140,7 @@ def test_random_posterize_default_c_md5(plot=False, run_golden=True):
data1 = data1.map(operations=transforms1, input_columns=["image"]) data1 = data1.map(operations=transforms1, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=[c_vision.Decode()], input_columns=["image"]) data2 = data2.map(operations=[vision.Decode()], input_columns=["image"])
image_posterize = [] image_posterize = []
image_original = [] image_original = []
@ -171,32 +171,32 @@ def test_random_posterize_exception_bit():
logger.info("test_random_posterize_exception_bit") logger.info("test_random_posterize_exception_bit")
# Test max > 8 # Test max > 8
try: try:
_ = c_vision.RandomPosterize((1, 9)) _ = vision.RandomPosterize((1, 9))
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Input is not within the required interval of [1, 8]." assert str(e) == "Input is not within the required interval of [1, 8]."
# Test min < 1 # Test min < 1
try: try:
_ = c_vision.RandomPosterize((0, 7)) _ = vision.RandomPosterize((0, 7))
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Input is not within the required interval of [1, 8]." assert str(e) == "Input is not within the required interval of [1, 8]."
# Test max < min # Test max < min
try: try:
_ = c_vision.RandomPosterize((8, 1)) _ = vision.RandomPosterize((8, 1))
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Input is not within the required interval of [1, 8]." assert str(e) == "Input is not within the required interval of [1, 8]."
# Test wrong type (not uint8) # Test wrong type (not uint8)
try: try:
_ = c_vision.RandomPosterize(1.1) _ = vision.RandomPosterize(1.1)
except TypeError as e: except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == ("Argument bits with value 1.1 is not of type [<class 'list'>, <class 'tuple'>, " assert str(e) == ("Argument bits with value 1.1 is not of type [<class 'list'>, <class 'tuple'>, "
"<class 'int'>], but got <class 'float'>.") "<class 'int'>], but got <class 'float'>.")
# Test wrong number of bits # Test wrong number of bits
try: try:
_ = c_vision.RandomPosterize((1, 1, 1)) _ = vision.RandomPosterize((1, 1, 1))
except TypeError as e: except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Size of bits should be a single integer or a list/tuple (min, max) of length 2." assert str(e) == "Size of bits should be a single integer or a list/tuple (min, max) of length 2."
@ -211,10 +211,10 @@ def test_rescale_with_random_posterize():
DATA_DIR_10 = "../data/dataset/testCifar10Data" DATA_DIR_10 = "../data/dataset/testCifar10Data"
dataset = ds.Cifar10Dataset(DATA_DIR_10) dataset = ds.Cifar10Dataset(DATA_DIR_10)
rescale_op = c_vision.Rescale((1.0 / 255.0), 0.0) rescale_op = vision.Rescale((1.0 / 255.0), 0.0)
dataset = dataset.map(operations=rescale_op, input_columns=["image"]) dataset = dataset.map(operations=rescale_op, input_columns=["image"])
random_posterize_op = c_vision.RandomPosterize((4, 8)) random_posterize_op = vision.RandomPosterize((4, 8))
dataset = dataset.map(operations=random_posterize_op, input_columns=["image"], num_parallel_workers=1) dataset = dataset.map(operations=random_posterize_op, input_columns=["image"], num_parallel_workers=1)
try: try:

View File

@ -16,8 +16,8 @@
Testing RandomResize op in DE Testing RandomResize op in DE
""" """
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as ops import mindspore.dataset.transforms.transforms as ops
import mindspore.dataset.vision.c_transforms as vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, save_and_check_md5, diff_mse, \ from util import visualize_list, save_and_check_md5, diff_mse, \
config_get_set_seed, config_get_set_num_parallel_workers config_get_set_seed, config_get_set_num_parallel_workers

View File

@ -17,7 +17,7 @@ Testing the random resize with bounding boxes op in DE
""" """
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \ from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \
@ -43,7 +43,7 @@ def test_random_resize_with_bbox_op_voc_c(plot_vis=False):
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True)
test_op = c_vision.RandomResizeWithBBox(100) test_op = vision.RandomResizeWithBBox(100)
# map to apply ops # map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
@ -85,7 +85,7 @@ def test_random_resize_with_bbox_op_rand_coco_c(plot_vis=False):
dataCoco2 = ds.CocoDataset(DATA_DIR_2[0], annotation_file=DATA_DIR_2[1], task="Detection", dataCoco2 = ds.CocoDataset(DATA_DIR_2[0], annotation_file=DATA_DIR_2[1], task="Detection",
decode=True, shuffle=False) decode=True, shuffle=False)
test_op = c_vision.RandomResizeWithBBox(200) test_op = vision.RandomResizeWithBBox(200)
# map to apply ops # map to apply ops
@ -122,7 +122,7 @@ def test_random_resize_with_bbox_op_edge_c(plot_vis=False):
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True)
test_op = c_vision.RandomResizeWithBBox(500) test_op = vision.RandomResizeWithBBox(500)
# maps to convert data into valid edge case data # maps to convert data into valid edge case data
dataVoc1 = dataVoc1.map( dataVoc1 = dataVoc1.map(
@ -156,7 +156,7 @@ def test_random_resize_with_bbox_op_invalid_c():
try: try:
# zero value for resize # zero value for resize
c_vision.RandomResizeWithBBox(0) vision.RandomResizeWithBBox(0)
except ValueError as err: except ValueError as err:
logger.info("Got an exception in DE: {}".format(str(err))) logger.info("Got an exception in DE: {}".format(str(err)))
@ -164,7 +164,7 @@ def test_random_resize_with_bbox_op_invalid_c():
try: try:
# one of the size values is zero # one of the size values is zero
c_vision.RandomResizeWithBBox((0, 100)) vision.RandomResizeWithBBox((0, 100))
except ValueError as err: except ValueError as err:
logger.info("Got an exception in DE: {}".format(str(err))) logger.info("Got an exception in DE: {}".format(str(err)))
@ -172,7 +172,7 @@ def test_random_resize_with_bbox_op_invalid_c():
try: try:
# negative value for resize # negative value for resize
c_vision.RandomResizeWithBBox(-10) vision.RandomResizeWithBBox(-10)
except ValueError as err: except ValueError as err:
logger.info("Got an exception in DE: {}".format(str(err))) logger.info("Got an exception in DE: {}".format(str(err)))
@ -180,7 +180,7 @@ def test_random_resize_with_bbox_op_invalid_c():
try: try:
# invalid input shape # invalid input shape
c_vision.RandomResizeWithBBox((100, 100, 100)) vision.RandomResizeWithBBox((100, 100, 100))
except TypeError as err: except TypeError as err:
logger.info("Got an exception in DE: {}".format(str(err))) logger.info("Got an exception in DE: {}".format(str(err)))
@ -192,7 +192,7 @@ def test_random_resize_with_bbox_op_bad_c():
Tests RandomResizeWithBBox Op with invalid bounding boxes, expected to catch multiple errors Tests RandomResizeWithBBox Op with invalid bounding boxes, expected to catch multiple errors
""" """
logger.info("test_random_resize_with_bbox_op_bad_c") logger.info("test_random_resize_with_bbox_op_bad_c")
test_op = c_vision.RandomResizeWithBBox((400, 300)) test_op = vision.RandomResizeWithBBox((400, 300))
data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True)
check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image") check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image")

View File

@ -19,9 +19,8 @@ import numpy as np
import cv2 import cv2
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.py_transforms as py_vision
from mindspore.dataset.vision.utils import Inter from mindspore.dataset.vision.utils import Inter
from mindspore import log as logger from mindspore import log as logger
from util import visualize_image, visualize_list, diff_mse, save_and_check_md5, \ from util import visualize_image, visualize_list, diff_mse, save_and_check_md5, \
@ -41,9 +40,9 @@ def test_random_rotation_op_c(plot=False):
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
# use [90, 90] to force rotate 90 degrees, expand is set to be True to match output size # use [90, 90] to force rotate 90 degrees, expand is set to be True to match output size
random_rotation_op = c_vision.RandomRotation((90, 90), expand=True) random_rotation_op = vision.RandomRotation((90, 90), expand=True)
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_rotation_op, input_columns=["image"]) data1 = data1.map(operations=random_rotation_op, input_columns=["image"])
@ -78,10 +77,10 @@ def test_random_rotation_op_c_area():
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
# Use [180, 180] to force rotate 180 degrees, expand is set to be True to match output size # Use [180, 180] to force rotate 180 degrees, expand is set to be True to match output size
# Use resample with Interpolation AREA # Use resample with Interpolation AREA
random_rotation_op = c_vision.RandomRotation((180, 180), expand=True, resample=Inter.AREA) random_rotation_op = vision.RandomRotation((180, 180), expand=True, resample=Inter.AREA)
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_rotation_op, input_columns=["image"]) data1 = data1.map(operations=random_rotation_op, input_columns=["image"])
@ -112,15 +111,15 @@ def test_random_rotation_op_py(plot=False):
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
# use [90, 90] to force rotate 90 degrees, expand is set to be True to match output size # use [90, 90] to force rotate 90 degrees, expand is set to be True to match output size
transform1 = mindspore.dataset.transforms.py_transforms.Compose([py_vision.Decode(), transform1 = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
py_vision.RandomRotation((90, 90), expand=True), vision.RandomRotation((90, 90), expand=True),
py_vision.ToTensor()]) vision.ToTensor()])
data1 = data1.map(operations=transform1, input_columns=["image"]) data1 = data1.map(operations=transform1, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transform2 = mindspore.dataset.transforms.py_transforms.Compose([py_vision.Decode(), transform2 = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
py_vision.ToTensor()]) vision.ToTensor()])
data2 = data2.map(operations=transform2, input_columns=["image"]) data2 = data2.map(operations=transform2, input_columns=["image"])
num_iter = 0 num_iter = 0
@ -149,11 +148,11 @@ def test_random_rotation_op_py_ANTIALIAS():
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
# use [90, 90] to force rotate 90 degrees, expand is set to be True to match output size # use [90, 90] to force rotate 90 degrees, expand is set to be True to match output size
transform1 = mindspore.dataset.transforms.py_transforms.Compose([py_vision.Decode(), transform1 = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
py_vision.RandomRotation((90, 90), vision.RandomRotation((90, 90),
expand=True, expand=True,
resample=Inter.ANTIALIAS), resample=Inter.ANTIALIAS),
py_vision.ToTensor()]) vision.ToTensor()])
data1 = data1.map(operations=transform1, input_columns=["image"]) data1 = data1.map(operations=transform1, input_columns=["image"])
num_iter = 0 num_iter = 0
@ -170,9 +169,9 @@ def test_random_rotation_expand():
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
# expand is set to be True to match output size # expand is set to be True to match output size
random_rotation_op = c_vision.RandomRotation((0, 90), expand=True) random_rotation_op = vision.RandomRotation((0, 90), expand=True)
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_rotation_op, input_columns=["image"]) data1 = data1.map(operations=random_rotation_op, input_columns=["image"])
@ -193,24 +192,24 @@ def test_random_rotation_md5():
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
resize_op = c_vision.RandomRotation((0, 90), resize_op = vision.RandomRotation((0, 90),
expand=True, expand=True,
resample=Inter.BILINEAR, resample=Inter.BILINEAR,
center=(50, 50), center=(50, 50),
fill_value=150) fill_value=150)
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=resize_op, input_columns=["image"]) data1 = data1.map(operations=resize_op, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
transform2 = mindspore.dataset.transforms.py_transforms.Compose([py_vision.Decode(), transform2 = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
py_vision.RandomRotation((0, 90), vision.RandomRotation((0, 90),
expand=True, expand=True,
resample=Inter.BILINEAR, resample=Inter.BILINEAR,
center=(50, 50), center=(50, 50),
fill_value=150), fill_value=150),
py_vision.ToTensor()]) vision.ToTensor()])
data2 = data2.map(operations=transform2, input_columns=["image"]) data2 = data2.map(operations=transform2, input_columns=["image"])
# Compare with expected md5 from images # Compare with expected md5 from images
@ -232,9 +231,9 @@ def test_rotation_diff(plot=False):
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
rotation_op = c_vision.RandomRotation((45, 45)) rotation_op = vision.RandomRotation((45, 45))
ctrans = [decode_op, ctrans = [decode_op,
rotation_op rotation_op
] ]
@ -243,11 +242,11 @@ def test_rotation_diff(plot=False):
# Second dataset # Second dataset
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomRotation((45, 45)), vision.RandomRotation((45, 45)),
py_vision.ToTensor(), vision.ToTensor(),
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=transform, input_columns=["image"]) data2 = data2.map(operations=transform, input_columns=["image"])

View File

@ -14,8 +14,8 @@
# ============================================================================== # ==============================================================================
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as ops import mindspore.dataset.transforms.transforms as ops
import mindspore.dataset.vision.c_transforms as visions import mindspore.dataset.vision.transforms as visions
def test_random_select_subpolicy(): def test_random_select_subpolicy():

View File

@ -17,9 +17,8 @@ Testing RandomSharpness op in DE
""" """
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.py_transforms as F import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.c_transforms as C
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, visualize_one_channel_dataset, diff_mse, save_and_check_md5, \ from util import visualize_list, visualize_one_channel_dataset, diff_mse, save_and_check_md5, \
config_get_set_seed, config_get_set_num_parallel_workers config_get_set_seed, config_get_set_num_parallel_workers
@ -32,16 +31,16 @@ GENERATE_GOLDEN = False
def test_random_sharpness_py(degrees=(0.7, 0.7), plot=False): def test_random_sharpness_py(degrees=(0.7, 0.7), plot=False):
""" """
Test RandomSharpness python op Test RandomSharpness Python implementation
""" """
logger.info("Test RandomSharpness python op") logger.info("Test RandomSharpness Python implementation")
# Original Images # Original Images
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), transforms_original = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
F.Resize((224, 224)), vision.Resize((224, 224)),
F.ToTensor()]) vision.ToTensor()])
ds_original = data.map(operations=transforms_original, input_columns="image") ds_original = data.map(operations=transforms_original, input_columns="image")
@ -58,14 +57,14 @@ def test_random_sharpness_py(degrees=(0.7, 0.7), plot=False):
# Random Sharpness Adjusted Images # Random Sharpness Adjusted Images
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
py_op = F.RandomSharpness() py_op = vision.RandomSharpness()
if degrees is not None: if degrees is not None:
py_op = F.RandomSharpness(degrees) py_op = vision.RandomSharpness(degrees)
transforms_random_sharpness = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), transforms_random_sharpness = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
F.Resize((224, 224)), vision.Resize((224, 224)),
py_op, py_op,
F.ToTensor()]) vision.ToTensor()])
ds_random_sharpness = data.map(operations=transforms_random_sharpness, input_columns="image") ds_random_sharpness = data.map(operations=transforms_random_sharpness, input_columns="image")
@ -92,19 +91,19 @@ def test_random_sharpness_py(degrees=(0.7, 0.7), plot=False):
def test_random_sharpness_py_md5(): def test_random_sharpness_py_md5():
""" """
Test RandomSharpness python op with md5 comparison Test RandomSharpness Python implementation with md5 comparison
""" """
logger.info("Test RandomSharpness python op with md5 comparison") logger.info("Test RandomSharpness Python implementation with md5 comparison")
original_seed = config_get_set_seed(5) original_seed = config_get_set_seed(5)
original_num_parallel_workers = config_get_set_num_parallel_workers(1) original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# define map operations # define map operations
transforms = [ transforms = [
F.Decode(), vision.Decode(True),
F.RandomSharpness((20.0, 25.0)), vision.RandomSharpness((20.0, 25.0)),
F.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
# Generate dataset # Generate dataset
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
@ -129,8 +128,8 @@ def test_random_sharpness_c(degrees=(1.6, 1.6), plot=False):
# Original Images # Original Images
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = [C.Decode(), transforms_original = [vision.Decode(),
C.Resize((224, 224))] vision.Resize((224, 224))]
ds_original = data.map(operations=transforms_original, input_columns="image") ds_original = data.map(operations=transforms_original, input_columns="image")
@ -147,12 +146,12 @@ def test_random_sharpness_c(degrees=(1.6, 1.6), plot=False):
# Random Sharpness Adjusted Images # Random Sharpness Adjusted Images
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
c_op = C.RandomSharpness() c_op = vision.RandomSharpness()
if degrees is not None: if degrees is not None:
c_op = C.RandomSharpness(degrees) c_op = vision.RandomSharpness(degrees)
transforms_random_sharpness = [C.Decode(), transforms_random_sharpness = [vision.Decode(),
C.Resize((224, 224)), vision.Resize((224, 224)),
c_op] c_op]
ds_random_sharpness = data.map(operations=transforms_random_sharpness, input_columns="image") ds_random_sharpness = data.map(operations=transforms_random_sharpness, input_columns="image")
@ -188,8 +187,8 @@ def test_random_sharpness_c_md5():
# define map operations # define map operations
transforms = [ transforms = [
C.Decode(), vision.Decode(),
C.RandomSharpness((10.0, 15.0)) vision.RandomSharpness((10.0, 15.0))
] ]
# Generate dataset # Generate dataset
@ -213,14 +212,14 @@ def test_random_sharpness_c_py(degrees=(1.0, 1.0), plot=False):
# RandomSharpness Images # RandomSharpness Images
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data = data.map(operations=[C.Decode(), C.Resize((200, 300))], input_columns=["image"]) data = data.map(operations=[vision.Decode(), vision.Resize((200, 300))], input_columns=["image"])
python_op = F.RandomSharpness(degrees) python_op = vision.RandomSharpness(degrees)
c_op = C.RandomSharpness(degrees) c_op = vision.RandomSharpness(degrees)
transforms_op = mindspore.dataset.transforms.py_transforms.Compose([lambda img: F.ToPIL()(img.astype(np.uint8)), transforms_op = mindspore.dataset.transforms.transforms.Compose([lambda img: vision.ToPIL()(img.astype(np.uint8)),
python_op, python_op,
np.array]) np.array])
ds_random_sharpness_py = data.map(operations=transforms_op, input_columns="image") ds_random_sharpness_py = data.map(operations=transforms_op, input_columns="image")
@ -236,7 +235,7 @@ def test_random_sharpness_c_py(degrees=(1.0, 1.0), plot=False):
axis=0) axis=0)
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data = data.map(operations=[C.Decode(), C.Resize((200, 300))], input_columns=["image"]) data = data.map(operations=[vision.Decode(), vision.Resize((200, 300))], input_columns=["image"])
ds_images_random_sharpness_c = data.map(operations=c_op, input_columns="image") ds_images_random_sharpness_c = data.map(operations=c_op, input_columns="image")
@ -269,9 +268,9 @@ def test_random_sharpness_one_channel_c(degrees=(1.4, 1.4), plot=False):
""" """
logger.info("Test RandomSharpness C Op With MNIST Dataset (Grayscale images)") logger.info("Test RandomSharpness C Op With MNIST Dataset (Grayscale images)")
c_op = C.RandomSharpness() c_op = vision.RandomSharpness()
if degrees is not None: if degrees is not None:
c_op = C.RandomSharpness(degrees) c_op = vision.RandomSharpness(degrees)
# RandomSharpness Images # RandomSharpness Images
data = ds.MnistDataset(dataset_dir=MNIST_DATA_DIR, num_samples=2, shuffle=False) data = ds.MnistDataset(dataset_dir=MNIST_DATA_DIR, num_samples=2, shuffle=False)
ds_random_sharpness_c = data.map(operations=c_op, input_columns="image") ds_random_sharpness_c = data.map(operations=c_op, input_columns="image")
@ -299,24 +298,24 @@ def test_random_sharpness_invalid_params():
logger.info("Test RandomSharpness with invalid input parameters.") logger.info("Test RandomSharpness with invalid input parameters.")
try: try:
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data = data.map(operations=[C.Decode(), C.Resize((224, 224)), data = data.map(operations=[vision.Decode(), vision.Resize((224, 224)),
C.RandomSharpness(10)], input_columns=["image"]) vision.RandomSharpness(10)], input_columns=["image"])
except TypeError as error: except TypeError as error:
logger.info("Got an exception in DE: {}".format(str(error))) logger.info("Got an exception in DE: {}".format(str(error)))
assert "tuple" in str(error) assert "tuple" in str(error)
try: try:
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data = data.map(operations=[C.Decode(), C.Resize((224, 224)), data = data.map(operations=[vision.Decode(), vision.Resize((224, 224)),
C.RandomSharpness((-10, 10))], input_columns=["image"]) vision.RandomSharpness((-10, 10))], input_columns=["image"])
except ValueError as error: except ValueError as error:
logger.info("Got an exception in DE: {}".format(str(error))) logger.info("Got an exception in DE: {}".format(str(error)))
assert "interval" in str(error) assert "interval" in str(error)
try: try:
data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data = data.map(operations=[C.Decode(), C.Resize((224, 224)), data = data.map(operations=[vision.Decode(), vision.Resize((224, 224)),
C.RandomSharpness((10, 5))], input_columns=["image"]) vision.RandomSharpness((10, 5))], input_columns=["image"])
except ValueError as error: except ValueError as error:
logger.info("Got an exception in DE: {}".format(str(error))) logger.info("Got an exception in DE: {}".format(str(error)))
assert "(min,max)" in str(error) assert "(min,max)" in str(error)

View File

@ -17,7 +17,7 @@ Testing RandomSolarizeOp op in DE
""" """
import pytest import pytest
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, save_and_check_md5, config_get_set_seed, config_get_set_num_parallel_workers, \ from util import visualize_list, save_and_check_md5, config_get_set_seed, config_get_set_num_parallel_workers, \
visualize_one_channel_dataset visualize_one_channel_dataset

View File

@ -17,10 +17,8 @@ Testing the random vertical flip op in DE
""" """
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms as ops
import mindspore.dataset.transforms.c_transforms as ops import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
from mindspore import log as logger from mindspore import log as logger
from util import save_and_check_md5, visualize_list, visualize_image, diff_mse, \ from util import save_and_check_md5, visualize_list, visualize_image, diff_mse, \
config_get_set_seed, config_get_set_num_parallel_workers config_get_set_seed, config_get_set_num_parallel_workers
@ -50,8 +48,8 @@ def test_random_vertical_op(plot=False):
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
random_vertical_op = c_vision.RandomVerticalFlip(1.0) random_vertical_op = vision.RandomVerticalFlip(1.0)
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_vertical_op, input_columns=["image"]) data1 = data1.map(operations=random_vertical_op, input_columns=["image"])
@ -81,7 +79,7 @@ def test_random_vertical_op(plot=False):
def test_random_vertical_valid_prob_c(): def test_random_vertical_valid_prob_c():
""" """
Test RandomVerticalFlip op with c_transforms: valid non-default input, expect to pass Test RandomVerticalFlip op with C implementation: valid non-default input, expect to pass
""" """
logger.info("test_random_vertical_valid_prob_c") logger.info("test_random_vertical_valid_prob_c")
original_seed = config_get_set_seed(0) original_seed = config_get_set_seed(0)
@ -89,8 +87,8 @@ def test_random_vertical_valid_prob_c():
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
random_horizontal_op = c_vision.RandomVerticalFlip(0.8) random_horizontal_op = vision.RandomVerticalFlip(0.8)
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=random_horizontal_op, input_columns=["image"]) data = data.map(operations=random_horizontal_op, input_columns=["image"])
@ -104,7 +102,7 @@ def test_random_vertical_valid_prob_c():
def test_random_vertical_valid_prob_py(): def test_random_vertical_valid_prob_py():
""" """
Test RandomVerticalFlip op with py_transforms: valid non-default input, expect to pass Test RandomVerticalFlip op with Python implementation: valid non-default input, expect to pass
""" """
logger.info("test_random_vertical_valid_prob_py") logger.info("test_random_vertical_valid_prob_py")
original_seed = config_get_set_seed(0) original_seed = config_get_set_seed(0)
@ -113,11 +111,11 @@ def test_random_vertical_valid_prob_py():
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.RandomVerticalFlip(0.8), vision.RandomVerticalFlip(0.8),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
filename = "random_vertical_01_py_result.npz" filename = "random_vertical_01_py_result.npz"
@ -130,16 +128,16 @@ def test_random_vertical_valid_prob_py():
def test_random_vertical_invalid_prob_c(): def test_random_vertical_invalid_prob_c():
""" """
Test RandomVerticalFlip op in c_transforms: invalid input, expect to raise error Test RandomVerticalFlip op in C implementation: invalid input, expect to raise error
""" """
logger.info("test_random_vertical_invalid_prob_c") logger.info("test_random_vertical_invalid_prob_c")
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
try: try:
# Note: Valid range of prob should be [0.0, 1.0] # Note: Valid range of prob should be [0.0, 1.0]
random_horizontal_op = c_vision.RandomVerticalFlip(1.5) random_horizontal_op = vision.RandomVerticalFlip(1.5)
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=random_horizontal_op, input_columns=["image"]) data = data.map(operations=random_horizontal_op, input_columns=["image"])
except ValueError as e: except ValueError as e:
@ -149,7 +147,7 @@ def test_random_vertical_invalid_prob_c():
def test_random_vertical_invalid_prob_py(): def test_random_vertical_invalid_prob_py():
""" """
Test RandomVerticalFlip op in py_transforms: invalid input, expect to raise error Test RandomVerticalFlip op in Python implementation: invalid input, expect to raise error
""" """
logger.info("test_random_vertical_invalid_prob_py") logger.info("test_random_vertical_invalid_prob_py")
@ -157,12 +155,12 @@ def test_random_vertical_invalid_prob_py():
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try: try:
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
# Note: Valid range of prob should be [0.0, 1.0] # Note: Valid range of prob should be [0.0, 1.0]
py_vision.RandomVerticalFlip(1.5), vision.RandomVerticalFlip(1.5),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
except ValueError as e: except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
@ -177,21 +175,21 @@ def test_random_vertical_comp(plot=False):
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
# Note: The image must be flipped if prob is set to be 1 # Note: The image must be flipped if prob is set to be 1
random_horizontal_op = c_vision.RandomVerticalFlip(1) random_horizontal_op = vision.RandomVerticalFlip(1)
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_horizontal_op, input_columns=["image"]) data1 = data1.map(operations=random_horizontal_op, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
# Note: The image must be flipped if prob is set to be 1 # Note: The image must be flipped if prob is set to be 1
py_vision.RandomVerticalFlip(1), vision.RandomVerticalFlip(1),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = ops.Compose(transforms)
data2 = data2.map(operations=transform, input_columns=["image"]) data2 = data2.map(operations=transform, input_columns=["image"])
images_list_c = [] images_list_c = []
@ -218,8 +216,8 @@ def test_random_vertical_op_1():
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(operations=ops.Duplicate(), input_columns=["image"], data = data.map(operations=ops.Duplicate(), input_columns=["image"],
output_columns=["image", "image_copy"], column_order=["image", "image_copy"]) output_columns=["image", "image_copy"], column_order=["image", "image_copy"])
random_vertical_op = c_vision.RandomVerticalFlip(1.0) random_vertical_op = vision.RandomVerticalFlip(1.0)
decode_op = c_vision.Decode() decode_op = vision.Decode()
data = data.map(operations=decode_op, input_columns=["image"]) data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=decode_op, input_columns=["image_copy"]) data = data.map(operations=decode_op, input_columns=["image_copy"])

View File

@ -17,7 +17,7 @@ Testing RandomVerticalFlipWithBBox op in DE
""" """
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \ from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \
@ -41,7 +41,7 @@ def test_random_vertical_flip_with_bbox_op_c(plot_vis=False):
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
test_op = c_vision.RandomVerticalFlipWithBBox(1) test_op = vision.RandomVerticalFlipWithBBox(1)
# map to apply ops # map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
@ -72,13 +72,13 @@ def test_random_vertical_flip_with_bbox_op_coco_c(plot_vis=False):
dataCoco2 = ds.CocoDataset(DATA_DIR_COCO[0], annotation_file=DATA_DIR_COCO[1], task="Detection", dataCoco2 = ds.CocoDataset(DATA_DIR_COCO[0], annotation_file=DATA_DIR_COCO[1], task="Detection",
decode=True, shuffle=False) decode=True, shuffle=False)
test_op = c_vision.RandomVerticalFlipWithBBox(1) test_op = vision.RandomVerticalFlipWithBBox(1)
dataCoco2 = dataCoco2.map(operations=[test_op], input_columns=["image", "bbox"], dataCoco2 = dataCoco2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"], output_columns=["image", "bbox"],
column_order=["image", "bbox"]) column_order=["image", "bbox"])
test_op = c_vision.RandomVerticalFlipWithBBox(1) test_op = vision.RandomVerticalFlipWithBBox(1)
unaugSamp, augSamp = [], [] unaugSamp, augSamp = [], []
@ -105,7 +105,7 @@ def test_random_vertical_flip_with_bbox_op_rand_c(plot_vis=False):
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
test_op = c_vision.RandomVerticalFlipWithBBox(0.8) test_op = vision.RandomVerticalFlipWithBBox(0.8)
# map to apply ops # map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
@ -140,7 +140,7 @@ def test_random_vertical_flip_with_bbox_op_edge_c(plot_vis=False):
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
test_op = c_vision.RandomVerticalFlipWithBBox(1) test_op = vision.RandomVerticalFlipWithBBox(1)
# maps to convert data into valid edge case data # maps to convert data into valid edge case data
dataVoc1 = dataVoc1.map( dataVoc1 = dataVoc1.map(
@ -175,7 +175,7 @@ def test_random_vertical_flip_with_bbox_op_invalid_c():
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
try: try:
test_op = c_vision.RandomVerticalFlipWithBBox(2) test_op = vision.RandomVerticalFlipWithBBox(2)
# map to apply ops # map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
@ -195,7 +195,7 @@ def test_random_vertical_flip_with_bbox_op_bad_c():
Tests RandomVerticalFlipWithBBox Op with invalid bounding boxes, expected to catch multiple errors Tests RandomVerticalFlipWithBBox Op with invalid bounding boxes, expected to catch multiple errors
""" """
logger.info("test_random_vertical_flip_with_bbox_op_bad_c") logger.info("test_random_vertical_flip_with_bbox_op_bad_c")
test_op = c_vision.RandomVerticalFlipWithBBox(1) test_op = vision.RandomVerticalFlipWithBBox(1)
data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True) data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image") check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image")

View File

@ -18,7 +18,7 @@ Test Repeat Op
import numpy as np import numpy as np
import pytest import pytest
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import save_and_check_dict from util import save_and_check_dict

View File

@ -16,7 +16,7 @@
Testing the rescale op in DE Testing the rescale op in DE
""" """
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import visualize_image, diff_mse, save_and_check_md5 from util import visualize_image, diff_mse, save_and_check_md5

View File

@ -19,7 +19,7 @@ import os
import numpy as np import numpy as np
import pytest import pytest
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.transforms as vision
from util_minddataset import add_and_remove_cv_file from util_minddataset import add_and_remove_cv_file
np.random.seed(0) np.random.seed(0)
@ -38,9 +38,9 @@ def create_cifar_dataset1(size):
crop_size = 64 crop_size = 64
data = ds.Cifar100Dataset(data_dir, num_samples=size, shuffle=False) data = ds.Cifar100Dataset(data_dir, num_samples=size, shuffle=False)
data = data.project(["image"]) data = data.project(["image"])
pad_op = c_vision.Pad(pad_size) pad_op = vision.Pad(pad_size)
data = data.map(operations=pad_op, input_columns=["image"]) data = data.map(operations=pad_op, input_columns=["image"])
crop_op = c_vision.CenterCrop(crop_size) crop_op = vision.CenterCrop(crop_size)
data = data.map(operations=crop_op, input_columns=["image"]) data = data.map(operations=crop_op, input_columns=["image"])
return data return data
@ -53,9 +53,9 @@ def create_cifar_dataset2(size):
data = ds.Cifar100Dataset(data_dir, num_samples=size, shuffle=False) data = ds.Cifar100Dataset(data_dir, num_samples=size, shuffle=False)
data = data.repeat(repeat_count) data = data.repeat(repeat_count)
data = data.project(["image"]) data = data.project(["image"])
pad_op = c_vision.Pad(pad_size) pad_op = vision.Pad(pad_size)
data = data.map(operations=pad_op, input_columns=["image"]) data = data.map(operations=pad_op, input_columns=["image"])
crop_op = c_vision.CenterCrop(crop_size) crop_op = vision.CenterCrop(crop_size)
data = data.map(operations=crop_op, input_columns=["image"]) data = data.map(operations=crop_op, input_columns=["image"])
return data return data

View File

@ -17,8 +17,7 @@ Testing Resize op in DE
""" """
import pytest import pytest
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.py_transforms as py_vision
from mindspore.dataset.vision.utils import Inter from mindspore.dataset.vision.utils import Inter
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, save_and_check_md5, \ from util import visualize_list, save_and_check_md5, \
@ -68,11 +67,11 @@ def test_resize_op_ANTIALIAS():
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# define map operations # define map operations
decode_op = py_vision.Decode() decode_op = vision.Decode(True)
resize_op = py_vision.Resize(20, Inter.ANTIALIAS) resize_op = vision.Resize(20, Inter.ANTIALIAS)
# apply map operations on images # apply map operations on images
data1 = data1.map(operations=[decode_op, resize_op, py_vision.ToTensor()], input_columns=["image"]) data1 = data1.map(operations=[decode_op, resize_op, vision.ToTensor()], input_columns=["image"])
num_iter = 0 num_iter = 0
for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True): for _ in data1.create_dict_iterator(num_epochs=1, output_numpy=True):

View File

@ -19,7 +19,7 @@ import numpy as np
import pytest import pytest
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \ from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \
save_and_check_md5 save_and_check_md5
@ -43,7 +43,7 @@ def test_resize_with_bbox_op_voc_c(plot_vis=False):
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True)
test_op = c_vision.ResizeWithBBox(100) test_op = vision.ResizeWithBBox(100)
# map to apply ops # map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"], dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
@ -79,7 +79,7 @@ def test_resize_with_bbox_op_coco_c(plot_vis=False):
dataCOCO2 = ds.CocoDataset(DATA_DIR_2[0], annotation_file=DATA_DIR_2[1], task="Detection", dataCOCO2 = ds.CocoDataset(DATA_DIR_2[0], annotation_file=DATA_DIR_2[1], task="Detection",
decode=True, shuffle=False) decode=True, shuffle=False)
test_op = c_vision.ResizeWithBBox(200) test_op = vision.ResizeWithBBox(200)
# map to apply ops # map to apply ops
@ -112,7 +112,7 @@ def test_resize_with_bbox_op_edge_c(plot_vis=False):
dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) dataVoc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True)
test_op = c_vision.ResizeWithBBox(500) test_op = vision.ResizeWithBBox(500)
# maps to convert data into valid edge case data # maps to convert data into valid edge case data
dataVoc1 = dataVoc1.map( dataVoc1 = dataVoc1.map(
@ -146,7 +146,7 @@ def test_resize_with_bbox_op_invalid_c():
try: try:
# invalid interpolation value # invalid interpolation value
c_vision.ResizeWithBBox(400, interpolation="invalid") vision.ResizeWithBBox(400, interpolation="invalid")
except TypeError as err: except TypeError as err:
logger.info("Got an exception in DE: {}".format(str(err))) logger.info("Got an exception in DE: {}".format(str(err)))
@ -158,7 +158,7 @@ def test_resize_with_bbox_op_bad_c():
Tests ResizeWithBBox Op with invalid bounding boxes, expected to catch multiple errors Tests ResizeWithBBox Op with invalid bounding boxes, expected to catch multiple errors
""" """
logger.info("test_resize_with_bbox_op_bad_c") logger.info("test_resize_with_bbox_op_bad_c")
test_op = c_vision.ResizeWithBBox((200, 300)) test_op = vision.ResizeWithBBox((200, 300))
data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True) data_voc2 = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", shuffle=False, decode=True)
check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image") check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image")
@ -179,7 +179,7 @@ def test_resize_with_bbox_op_params_outside_of_interpolation_dict():
size = (500, 500) size = (500, 500)
more_para = None more_para = None
with pytest.raises(KeyError, match="None"): with pytest.raises(KeyError, match="None"):
c_vision.ResizeWithBBox(size, more_para) vision.ResizeWithBBox(size, more_para)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -1,169 +0,0 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing RgbToBgr op in DE
"""
import numpy as np
from numpy.testing import assert_allclose
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.vision.py_transforms_util as util
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
def generate_numpy_random_rgb(shape):
# Only generate floating points that are fractions like n / 256, since they
# are RGB pixels. Some low-precision floating point types in this test can't
# handle arbitrary precision floating points well.
return np.random.randint(0, 256, shape) / 255.
def test_rgb_bgr_hwc_py():
# Eager
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((8, 8, 3))
bgr_np_pred = util.rgb_to_bgrs(rgb_np, True)
r, g, b = rgb_np[:, :, 0], rgb_np[:, :, 1], rgb_np[:, :, 2]
bgr_np_gt = np.stack((b, g, r), axis=2)
assert bgr_np_pred.shape == rgb_np.shape
assert_allclose(bgr_np_pred.flatten(),
bgr_np_gt.flatten(),
rtol=1e-5,
atol=0)
def test_rgb_bgr_hwc_c():
# Eager
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((8, 8, 3))
rgb2bgr_op = vision.RgbToBgr()
bgr_np_pred = rgb2bgr_op(rgb_np)
r, g, b = rgb_np[:, :, 0], rgb_np[:, :, 1], rgb_np[:, :, 2]
bgr_np_gt = np.stack((b, g, r), axis=2)
assert bgr_np_pred.shape == rgb_np.shape
assert_allclose(bgr_np_pred.flatten(),
bgr_np_gt.flatten(),
rtol=1e-5,
atol=0)
def test_rgb_bgr_chw_py():
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((3, 8, 8))
rgb_np_pred = util.rgb_to_bgrs(rgb_np, False)
rgb_np_gt = rgb_np[::-1, :, :]
assert rgb_np_pred.shape == rgb_np.shape
assert_allclose(rgb_np_pred.flatten(),
rgb_np_gt.flatten(),
rtol=1e-5,
atol=0)
def test_rgb_bgr_pipeline_py():
# First dataset
transforms1 = [py_vision.Decode(), py_vision.Resize([64, 64]), py_vision.ToTensor()]
transforms1 = mindspore.dataset.transforms.py_transforms.Compose(
transforms1)
ds1 = ds.TFRecordDataset(DATA_DIR,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds1 = ds1.map(operations=transforms1, input_columns=["image"])
# Second dataset
transforms2 = [
py_vision.Decode(),
py_vision.Resize([64, 64]),
py_vision.ToTensor(),
py_vision.RgbToBgr()
]
transforms2 = mindspore.dataset.transforms.py_transforms.Compose(
transforms2)
ds2 = ds.TFRecordDataset(DATA_DIR,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds2 = ds2.map(operations=transforms2, input_columns=["image"])
num_iter = 0
for data1, data2 in zip(ds1.create_dict_iterator(num_epochs=1),
ds2.create_dict_iterator(num_epochs=1)):
num_iter += 1
ori_img = data1["image"].asnumpy()
cvt_img = data2["image"].asnumpy()
cvt_img_gt = ori_img[::-1, :, :]
assert_allclose(cvt_img_gt.flatten(),
cvt_img.flatten(),
rtol=1e-5,
atol=0)
assert ori_img.shape == cvt_img.shape
def test_rgb_bgr_pipeline_c():
# First dataset
transforms1 = [
vision.Decode(),
vision.Resize([64, 64])
]
transforms1 = mindspore.dataset.transforms.py_transforms.Compose(
transforms1)
ds1 = ds.TFRecordDataset(DATA_DIR,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds1 = ds1.map(operations=transforms1, input_columns=["image"])
# Second dataset
transforms2 = [
vision.Decode(),
vision.Resize([64, 64]),
vision.RgbToBgr()
]
transforms2 = mindspore.dataset.transforms.py_transforms.Compose(
transforms2)
ds2 = ds.TFRecordDataset(DATA_DIR,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds2 = ds2.map(operations=transforms2, input_columns=["image"])
num_iter = 0
for data1, data2 in zip(ds1.create_dict_iterator(num_epochs=1),
ds2.create_dict_iterator(num_epochs=1)):
num_iter += 1
ori_img = data1["image"].asnumpy()
cvt_img = data2["image"].asnumpy()
cvt_img_gt = ori_img[:, :, ::-1]
assert_allclose(cvt_img_gt.flatten(),
cvt_img.flatten(),
rtol=1e-5,
atol=0)
assert ori_img.shape == cvt_img.shape
if __name__ == "__main__":
test_rgb_bgr_hwc_py()
test_rgb_bgr_hwc_c()
test_rgb_bgr_chw_py()
test_rgb_bgr_pipeline_py()
test_rgb_bgr_pipeline_c()

View File

@ -21,8 +21,8 @@ import numpy as np
from numpy.testing import assert_allclose from numpy.testing import assert_allclose
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.py_transforms as vision import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.py_transforms_util as util import mindspore.dataset.vision.py_transforms_util as util
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
@ -134,23 +134,23 @@ def test_rgb_hsv_batch_chw():
def test_rgb_hsv_pipeline(): def test_rgb_hsv_pipeline():
# First dataset # First dataset
transforms1 = [ transforms1 = [
vision.Decode(), vision.Decode(True),
vision.Resize([64, 64]), vision.Resize([64, 64]),
vision.ToTensor() vision.ToTensor()
] ]
transforms1 = mindspore.dataset.transforms.py_transforms.Compose(transforms1) transforms1 = mindspore.dataset.transforms.transforms.Compose(transforms1)
ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
ds1 = ds1.map(operations=transforms1, input_columns=["image"]) ds1 = ds1.map(operations=transforms1, input_columns=["image"])
# Second dataset # Second dataset
transforms2 = [ transforms2 = [
vision.Decode(), vision.Decode(True),
vision.Resize([64, 64]), vision.Resize([64, 64]),
vision.ToTensor(), vision.ToTensor(),
vision.RgbToHsv(), vision.RgbToHsv(),
vision.HsvToRgb() vision.HsvToRgb()
] ]
transform2 = mindspore.dataset.transforms.py_transforms.Compose(transforms2) transform2 = mindspore.dataset.transforms.transforms.Compose(transforms2)
ds2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) ds2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
ds2 = ds2.map(operations=transform2, input_columns=["image"]) ds2 = ds2.map(operations=transform2, input_columns=["image"])

View File

@ -18,7 +18,7 @@ Testing Rotate Python API
import cv2 import cv2
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from mindspore.dataset.vision.utils import Inter from mindspore.dataset.vision.utils import Inter
from util import visualize_image, diff_mse from util import visualize_image, diff_mse
@ -30,14 +30,14 @@ IMAGE_FILE = "../data/dataset/apple.jpg"
def test_rotate_pipeline_with_expanding(plot=False): def test_rotate_pipeline_with_expanding(plot=False):
""" """
Test Rotate of c_transforms with expanding Test Rotate of C implementation with expanding
""" """
logger.info("test_rotate_pipeline_with_expanding") logger.info("test_rotate_pipeline_with_expanding")
# First dataset # First dataset
dataset1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) dataset1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
rotate_op = c_vision.Rotate(90, expand=True) rotate_op = vision.Rotate(90, expand=True)
dataset1 = dataset1.map(operations=decode_op, input_columns=["image"]) dataset1 = dataset1.map(operations=decode_op, input_columns=["image"])
dataset1 = dataset1.map(operations=rotate_op, input_columns=["image"]) dataset1 = dataset1.map(operations=rotate_op, input_columns=["image"])
@ -63,15 +63,15 @@ def test_rotate_pipeline_with_expanding(plot=False):
def test_rotate_pipeline_without_expanding(): def test_rotate_pipeline_without_expanding():
""" """
Test Rotate of c_transforms without expanding Test Rotate of C implementation without expanding
""" """
logger.info("test_rotate_pipeline_without_expanding") logger.info("test_rotate_pipeline_without_expanding")
# Create a Dataset then decode and rotate the image # Create a Dataset then decode and rotate the image
dataset = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) dataset = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
resize_op = c_vision.Resize((64, 128)) resize_op = vision.Resize((64, 128))
rotate_op = c_vision.Rotate(30) rotate_op = vision.Rotate(30)
dataset = dataset.map(operations=decode_op, input_columns=["image"]) dataset = dataset.map(operations=decode_op, input_columns=["image"])
dataset = dataset.map(operations=resize_op, input_columns=["image"]) dataset = dataset.map(operations=resize_op, input_columns=["image"])
dataset = dataset.map(operations=rotate_op, input_columns=["image"]) dataset = dataset.map(operations=rotate_op, input_columns=["image"])
@ -87,8 +87,8 @@ def test_rotate_eager():
""" """
logger.info("test_rotate_eager") logger.info("test_rotate_eager")
img = cv2.imread(IMAGE_FILE) img = cv2.imread(IMAGE_FILE)
resize_img = c_vision.Resize((32, 64))(img) resize_img = vision.Resize((32, 64))(img)
rotate_img = c_vision.Rotate(-90, expand=True)(resize_img) rotate_img = vision.Rotate(-90, expand=True)(resize_img)
assert rotate_img.shape == (64, 32, 3) assert rotate_img.shape == (64, 32, 3)
@ -98,17 +98,17 @@ def test_rotate_exception():
""" """
logger.info("test_rotate_exception") logger.info("test_rotate_exception")
try: try:
_ = c_vision.Rotate("60") _ = vision.Rotate("60")
except TypeError as e: except TypeError as e:
logger.info("Got an exception in Rotate: {}".format(str(e))) logger.info("Got an exception in Rotate: {}".format(str(e)))
assert "not of type [<class 'float'>, <class 'int'>]" in str(e) assert "not of type [<class 'float'>, <class 'int'>]" in str(e)
try: try:
_ = c_vision.Rotate(30, Inter.BICUBIC, False, (0, 0, 0)) _ = vision.Rotate(30, Inter.BICUBIC, False, (0, 0, 0))
except ValueError as e: except ValueError as e:
logger.info("Got an exception in Rotate: {}".format(str(e))) logger.info("Got an exception in Rotate: {}".format(str(e)))
assert "Value center needs to be a 2-tuple." in str(e) assert "Value center needs to be a 2-tuple." in str(e)
try: try:
_ = c_vision.Rotate(-120, Inter.NEAREST, False, (-1, -1), (255, 255)) _ = vision.Rotate(-120, Inter.NEAREST, False, (-1, -1), (255, 255))
except TypeError as e: except TypeError as e:
logger.info("Got an exception in Rotate: {}".format(str(e))) logger.info("Got an exception in Rotate: {}".format(str(e)))
assert "fill_value should be a single integer or a 3-tuple." in str(e) assert "fill_value should be a single integer or a 3-tuple." in str(e)

View File

@ -15,7 +15,7 @@
import numpy as np import numpy as np
import pytest import pytest
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as c_transforms import mindspore.dataset.transforms.transforms as transforms
from mindspore import log as logger from mindspore import log as logger
from util import save_and_check_md5 from util import save_and_check_md5
@ -437,7 +437,7 @@ def test_manifest_sampler_chain_batch_repeat():
# Create ManifestDataset with sampler chain # Create ManifestDataset with sampler chain
data1 = ds.ManifestDataset(manifest_file, decode=True, sampler=sampler) data1 = ds.ManifestDataset(manifest_file, decode=True, sampler=sampler)
one_hot_encode = c_transforms.OneHot(3) one_hot_encode = transforms.OneHot(3)
data1 = data1.map(operations=one_hot_encode, input_columns=["label"]) data1 = data1.map(operations=one_hot_encode, input_columns=["label"])
data1 = data1.batch(batch_size=1, drop_remainder=False) data1 = data1.batch(batch_size=1, drop_remainder=False)
data1 = data1.repeat(count=2) data1 = data1.repeat(count=2)

View File

@ -26,10 +26,8 @@ from util import config_get_set_num_parallel_workers, config_get_set_seed
import mindspore.common.dtype as mstype import mindspore.common.dtype as mstype
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as c import mindspore.dataset.transforms.transforms as transforms
import mindspore.dataset.transforms.py_transforms as py import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.py_transforms as py_vision
from mindspore import log as logger from mindspore import log as logger
from mindspore.dataset.vision import Inter from mindspore.dataset.vision import Inter
@ -53,7 +51,7 @@ def test_serdes_imagefolder_dataset(remove_json_files=True):
sampler.add_child(child_sampler) sampler.add_child(child_sampler)
data1 = ds.ImageFolderDataset(data_dir, sampler=sampler) data1 = ds.ImageFolderDataset(data_dir, sampler=sampler)
data1 = data1.repeat(1) data1 = data1.repeat(1)
data1 = data1.map(operations=[vision.Decode(True)], input_columns=["image"]) data1 = data1.map(operations=[vision.Decode()], input_columns=["image"])
rescale_op = vision.Rescale(rescale, shift) rescale_op = vision.Rescale(rescale, shift)
resize_op = vision.Resize((resize_height, resize_width), Inter.LINEAR) resize_op = vision.Resize((resize_height, resize_width), Inter.LINEAR)
@ -113,7 +111,7 @@ def test_serdes_mnist_dataset(remove_json_files=True):
ds.config.set_seed(1) ds.config.set_seed(1)
data1 = ds.MnistDataset(data_dir, num_samples=100) data1 = ds.MnistDataset(data_dir, num_samples=100)
one_hot_encode = c.OneHot(10) # num_classes is input argument one_hot_encode = transforms.OneHot(10) # num_classes is input argument
data1 = data1.map(operations=one_hot_encode, input_columns="label") data1 = data1.map(operations=one_hot_encode, input_columns="label")
# batch_size is input argument # batch_size is input argument
@ -161,11 +159,11 @@ def test_serdes_cifar10_dataset(remove_json_files=True):
vision.RandomCrop((32, 32), (4, 4, 4, 4)), vision.RandomCrop((32, 32), (4, 4, 4, 4)),
vision.Resize((224, 224)), vision.Resize((224, 224)),
vision.Rescale(1.0 / 255.0, 0.0), vision.Rescale(1.0 / 255.0, 0.0),
vision.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]), vision.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010], True),
vision.HWC2CHW() vision.HWC2CHW()
] ]
type_cast_op = c.TypeCast(mstype.int32) type_cast_op = transforms.TypeCast(mstype.int32)
data1 = data1.map(operations=type_cast_op, input_columns="label") data1 = data1.map(operations=type_cast_op, input_columns="label")
data1 = data1.map(operations=trans, input_columns="image") data1 = data1.map(operations=trans, input_columns="image")
data1 = data1.batch(3, drop_remainder=True) data1 = data1.batch(3, drop_remainder=True)
@ -383,18 +381,17 @@ def test_serdes_pyvision(remove_json_files=True):
schema_file = "../data/dataset/test_tf_file_3_images/datasetSchema.json" schema_file = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
data1 = ds.TFRecordDataset(data_dir, schema_file, columns_list=["image", "label"], shuffle=False) data1 = ds.TFRecordDataset(data_dir, schema_file, columns_list=["image", "label"], shuffle=False)
transforms1 = [ transforms1 = [
py_vision.Decode(), vision.Decode(True),
py_vision.CenterCrop([32, 32]), vision.CenterCrop([32, 32]),
py_vision.ToTensor() vision.ToTensor()
] ]
transforms2 = [ transforms2 = [
py_vision.RandomColorAdjust(), vision.RandomColorAdjust(),
py_vision.FiveCrop(1), vision.FiveCrop(1),
py_vision.Grayscale(), vision.Grayscale()
py.OneHotOp(1)
] ]
data1 = data1.map(operations=py.Compose(transforms1), input_columns=["image"]) data1 = data1.map(operations=transforms.Compose(transforms1), input_columns=["image"])
data1 = data1.map(operations=py.RandomApply(transforms2), input_columns=["image"]) data1 = data1.map(operations=transforms.RandomApply(transforms2), input_columns=["image"])
util_check_serialize_deserialize_file(data1, "pyvision_dataset_pipeline", remove_json_files) util_check_serialize_deserialize_file(data1, "pyvision_dataset_pipeline", remove_json_files)
data2 = ds.TFRecordDataset(data_dir, schema_file, columns_list=["image", "label"], shuffle=False) data2 = ds.TFRecordDataset(data_dir, schema_file, columns_list=["image", "label"], shuffle=False)
data2 = data2.map(operations=(lambda x, y, z: ( data2 = data2.map(operations=(lambda x, y, z: (
@ -438,7 +435,7 @@ def skip_test_serdes_fill(remove_json_files=True):
yield (np.array([4, 5, 6, 7], dtype=np.int32),) yield (np.array([4, 5, 6, 7], dtype=np.int32),)
data = ds.GeneratorDataset(gen, column_names=["col"]) data = ds.GeneratorDataset(gen, column_names=["col"])
fill_op = c.Fill(3) fill_op = transforms.Fill(3)
data = data.map(operations=fill_op, input_columns=["col"]) data = data.map(operations=fill_op, input_columns=["col"])
expected = np.array([3, 3, 3, 3], dtype=np.int32) expected = np.array([3, 3, 3, 3], dtype=np.int32)

View File

@ -16,7 +16,7 @@ import numpy as np
import pytest import pytest
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision import mindspore.dataset.vision.transforms as vision
DATA_DIR_TF2 = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] DATA_DIR_TF2 = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]

View File

@ -19,7 +19,7 @@ import numpy as np
import pytest import pytest
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as ops import mindspore.dataset.transforms.transforms as ops
def slice_compare(array, indexing, expected_array): def slice_compare(array, indexing, expected_array):

View File

@ -19,7 +19,7 @@ import functools
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.utils as mode import mindspore.dataset.vision.utils as mode
from mindspore import log as logger from mindspore import log as logger
@ -73,12 +73,12 @@ def slice_to_patches(ori_size, num_h, num_w, pad_or_drop, fill_value=0, plot=Fal
cols = ['img' + str(x) for x in range(num_h*num_w)] cols = ['img' + str(x) for x in range(num_h*num_w)]
# First dataset # First dataset
dataset1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) dataset1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
resize_op = c_vision.Resize(ori_size) # H, W resize_op = vision.Resize(ori_size) # H, W
slice_patches_op = c_vision.SlicePatches( slice_patches_op = vision.SlicePatches(
num_h, num_w, mode.SliceMode.PAD, fill_value) num_h, num_w, mode.SliceMode.PAD, fill_value)
if not pad_or_drop: if not pad_or_drop:
slice_patches_op = c_vision.SlicePatches( slice_patches_op = vision.SlicePatches(
num_h, num_w, mode.SliceMode.DROP) num_h, num_w, mode.SliceMode.DROP)
dataset1 = dataset1.map(operations=decode_op, input_columns=["image"]) dataset1 = dataset1.map(operations=decode_op, input_columns=["image"])
dataset1 = dataset1.map(operations=resize_op, input_columns=["image"]) dataset1 = dataset1.map(operations=resize_op, input_columns=["image"])
@ -117,39 +117,39 @@ def test_slice_patches_exception_01():
""" """
logger.info("test_Slice_Patches_exception") logger.info("test_Slice_Patches_exception")
try: try:
_ = c_vision.SlicePatches(0, 2) _ = vision.SlicePatches(0, 2)
except ValueError as e: except ValueError as e:
logger.info("Got an exception in SlicePatches: {}".format(str(e))) logger.info("Got an exception in SlicePatches: {}".format(str(e)))
assert "Input num_height is not within" in str(e) assert "Input num_height is not within" in str(e)
try: try:
_ = c_vision.SlicePatches(2, 0) _ = vision.SlicePatches(2, 0)
except ValueError as e: except ValueError as e:
logger.info("Got an exception in SlicePatches: {}".format(str(e))) logger.info("Got an exception in SlicePatches: {}".format(str(e)))
assert "Input num_width is not within" in str(e) assert "Input num_width is not within" in str(e)
try: try:
_ = c_vision.SlicePatches(2, 2, 1) _ = vision.SlicePatches(2, 2, 1)
except TypeError as e: except TypeError as e:
logger.info("Got an exception in SlicePatches: {}".format(str(e))) logger.info("Got an exception in SlicePatches: {}".format(str(e)))
assert "Argument slice_mode with value" in str(e) assert "Argument slice_mode with value" in str(e)
try: try:
_ = c_vision.SlicePatches(2, 2, mode.SliceMode.PAD, -1) _ = vision.SlicePatches(2, 2, mode.SliceMode.PAD, -1)
except ValueError as e: except ValueError as e:
logger.info("Got an exception in SlicePatches: {}".format(str(e))) logger.info("Got an exception in SlicePatches: {}".format(str(e)))
assert "Input fill_value is not within" in str(e) assert "Input fill_value is not within" in str(e)
def test_slice_patches_06(): def test_slice_patches_06():
image = np.random.randint(0, 255, (158, 126, 1)).astype(np.int32) image = np.random.randint(0, 255, (158, 126, 1)).astype(np.int32)
slice_patches_op = c_vision.SlicePatches(2, 8) slice_patches_op = vision.SlicePatches(2, 8)
patches = slice_patches_op(image) patches = slice_patches_op(image)
assert len(patches) == 16 assert len(patches) == 16
assert patches[0].shape == (79, 16, 1) assert patches[0].shape == (79, 16, 1)
def test_slice_patches_07(): def test_slice_patches_07():
image = np.random.randint(0, 255, (158, 126)).astype(np.int32) image = np.random.randint(0, 255, (158, 126)).astype(np.int32)
slice_patches_op = c_vision.SlicePatches(2, 8) slice_patches_op = vision.SlicePatches(2, 8)
patches = slice_patches_op(image) patches = slice_patches_op(image)
assert len(patches) == 16 assert len(patches) == 16
assert patches[0].shape == (79, 16) assert patches[0].shape == (79, 16)
@ -157,7 +157,7 @@ def test_slice_patches_07():
def test_slice_patches_08(): def test_slice_patches_08():
np_data = np.random.randint(0, 255, (1, 56, 82, 256)).astype(np.uint8) np_data = np.random.randint(0, 255, (1, 56, 82, 256)).astype(np.uint8)
dataset = ds.NumpySlicesDataset(np_data, column_names=["image"]) dataset = ds.NumpySlicesDataset(np_data, column_names=["image"])
slice_patches_op = c_vision.SlicePatches(2, 2) slice_patches_op = vision.SlicePatches(2, 2)
dataset = dataset.map(input_columns=["image"], output_columns=["img0", "img1", "img2", "img3"], dataset = dataset.map(input_columns=["image"], output_columns=["img0", "img1", "img2", "img3"],
column_order=["img0", "img1", "img2", "img3"], column_order=["img0", "img1", "img2", "img3"],
operations=slice_patches_op) operations=slice_patches_op)
@ -167,21 +167,21 @@ def test_slice_patches_08():
def test_slice_patches_09(): def test_slice_patches_09():
image = np.random.randint(0, 255, (56, 82, 256)).astype(np.uint8) image = np.random.randint(0, 255, (56, 82, 256)).astype(np.uint8)
slice_patches_op = c_vision.SlicePatches(4, 3, mode.SliceMode.PAD) slice_patches_op = vision.SlicePatches(4, 3, mode.SliceMode.PAD)
patches = slice_patches_op(image) patches = slice_patches_op(image)
assert len(patches) == 12 assert len(patches) == 12
assert patches[0].shape == (14, 28, 256) assert patches[0].shape == (14, 28, 256)
def skip_test_slice_patches_10(): def skip_test_slice_patches_10():
image = np.random.randint(0, 255, (7000, 7000, 255)).astype(np.uint8) image = np.random.randint(0, 255, (7000, 7000, 255)).astype(np.uint8)
slice_patches_op = c_vision.SlicePatches(10, 13, mode.SliceMode.DROP) slice_patches_op = vision.SlicePatches(10, 13, mode.SliceMode.DROP)
patches = slice_patches_op(image) patches = slice_patches_op(image)
assert patches[0].shape == (700, 538, 255) assert patches[0].shape == (700, 538, 255)
def skip_test_slice_patches_11(): def skip_test_slice_patches_11():
np_data = np.random.randint(0, 255, (1, 7000, 7000, 256)).astype(np.uint8) np_data = np.random.randint(0, 255, (1, 7000, 7000, 256)).astype(np.uint8)
dataset = ds.NumpySlicesDataset(np_data, column_names=["image"]) dataset = ds.NumpySlicesDataset(np_data, column_names=["image"])
slice_patches_op = c_vision.SlicePatches(10, 13, mode.SliceMode.DROP) slice_patches_op = vision.SlicePatches(10, 13, mode.SliceMode.DROP)
cols = ['img' + str(x) for x in range(10*13)] cols = ['img' + str(x) for x in range(10*13)]
dataset = dataset.map(input_columns=["image"], output_columns=cols, dataset = dataset.map(input_columns=["image"], output_columns=cols,
column_order=cols, operations=slice_patches_op) column_order=cols, operations=slice_patches_op)

View File

@ -16,7 +16,7 @@
Testing soft dvpp SoftDvppDecodeResizeJpeg and SoftDvppDecodeRandomCropResizeJpeg in DE Testing soft dvpp SoftDvppDecodeResizeJpeg and SoftDvppDecodeRandomCropResizeJpeg in DE
""" """
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import diff_mse, visualize_image from util import diff_mse, visualize_image

View File

@ -18,8 +18,8 @@ import pytest
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.py_transforms as vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, save_and_check_md5 from util import visualize_list, save_and_check_md5
@ -35,20 +35,20 @@ def util_test_ten_crop(crop_size, vertical_flip=False, plot=False):
""" """
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_1 = [ transforms_1 = [
vision.Decode(), vision.Decode(True),
vision.ToTensor(), vision.ToTensor(),
] ]
transform_1 = mindspore.dataset.transforms.py_transforms.Compose(transforms_1) transform_1 = mindspore.dataset.transforms.transforms.Compose(transforms_1)
data1 = data1.map(operations=transform_1, input_columns=["image"]) data1 = data1.map(operations=transform_1, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_2 = [ transforms_2 = [
vision.Decode(), vision.Decode(True),
vision.TenCrop(crop_size, use_vertical_flip=vertical_flip), vision.TenCrop(crop_size, use_vertical_flip=vertical_flip),
lambda *images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images lambda *images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
] ]
transform_2 = mindspore.dataset.transforms.py_transforms.Compose(transforms_2) transform_2 = mindspore.dataset.transforms.transforms.Compose(transforms_2)
data2 = data2.map(operations=transform_2, input_columns=["image"]) data2 = data2.map(operations=transform_2, input_columns=["image"])
num_iter = 0 num_iter = 0
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True), for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
@ -107,11 +107,11 @@ def test_ten_crop_md5():
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_2 = [ transforms_2 = [
vision.Decode(), vision.Decode(True),
vision.TenCrop((200, 100), use_vertical_flip=True), vision.TenCrop((200, 100), use_vertical_flip=True),
lambda *images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images lambda *images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
] ]
transform_2 = mindspore.dataset.transforms.py_transforms.Compose(transforms_2) transform_2 = mindspore.dataset.transforms.transforms.Compose(transforms_2)
data2 = data2.map(operations=transform_2, input_columns=["image"]) data2 = data2.map(operations=transform_2, input_columns=["image"])
# Compare with expected md5 from images # Compare with expected md5 from images
filename = "ten_crop_01_result.npz" filename = "ten_crop_01_result.npz"
@ -126,7 +126,7 @@ def test_ten_crop_list_size_error_msg():
with pytest.raises(TypeError) as info: with pytest.raises(TypeError) as info:
_ = [ _ = [
vision.Decode(), vision.Decode(True),
vision.TenCrop([200, 200, 200]), vision.TenCrop([200, 200, 200]),
lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
] ]
@ -142,7 +142,7 @@ def test_ten_crop_invalid_size_error_msg():
with pytest.raises(ValueError) as info: with pytest.raises(ValueError) as info:
_ = [ _ = [
vision.Decode(), vision.Decode(True),
vision.TenCrop(0), vision.TenCrop(0),
lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
] ]
@ -151,7 +151,7 @@ def test_ten_crop_invalid_size_error_msg():
with pytest.raises(ValueError) as info: with pytest.raises(ValueError) as info:
_ = [ _ = [
vision.Decode(), vision.Decode(True),
vision.TenCrop(-10), vision.TenCrop(-10),
lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
] ]
@ -167,11 +167,11 @@ def test_ten_crop_wrong_img_error_msg():
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
vision.Decode(), vision.Decode(True),
vision.TenCrop(200), vision.TenCrop(200),
vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
with pytest.raises(RuntimeError) as info: with pytest.raises(RuntimeError) as info:

View File

@ -16,9 +16,8 @@
Testing ToPIL op in DE Testing ToPIL op in DE
""" """
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.py_transforms as py_vision
from mindspore import log as logger from mindspore import log as logger
from util import save_and_check_md5 from util import save_and_check_md5
@ -38,13 +37,13 @@ def test_to_pil_01():
# Generate dataset # Generate dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
# If input is already PIL image. # If input is already PIL image.
py_vision.ToPIL(), vision.ToPIL(),
py_vision.CenterCrop(375), vision.CenterCrop(375),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data1 = data1.map(operations=transform, input_columns=["image"]) data1 = data1.map(operations=transform, input_columns=["image"])
# Compare with expected md5 from images # Compare with expected md5 from images
@ -60,14 +59,14 @@ def test_to_pil_02():
# Generate dataset # Generate dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
transforms = [ transforms = [
# If input type is not PIL. # If input type is not PIL.
py_vision.ToPIL(), vision.ToPIL(),
py_vision.CenterCrop(375), vision.CenterCrop(375),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data1 = data1.map(operations=decode_op, input_columns=["image"]) data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=transform, input_columns=["image"]) data1 = data1.map(operations=transform, input_columns=["image"])

View File

@ -16,10 +16,12 @@
Testing ToType op in DE Testing ToType op in DE
""" """
import numpy as np import numpy as np
import mindspore._c_dataengine as cde
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.py_transforms as py_vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from mindspore.dataset.core.datatypes import nptype_to_detype
from util import save_and_check_md5 from util import save_and_check_md5
GENERATE_GOLDEN = False GENERATE_GOLDEN = False
@ -37,21 +39,21 @@ def test_to_type_op():
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms1 = [ transforms1 = [
py_vision.Decode(), vision.Decode(True),
py_vision.ToTensor(), vision.ToTensor(),
# Note: Convert the datatype from float32 to int16 # Note: Convert the datatype from float32 to int16
py_vision.ToType(np.int16) vision.ToType(np.int16)
] ]
transform1 = mindspore.dataset.transforms.py_transforms.Compose(transforms1) transform1 = mindspore.dataset.transforms.transforms.Compose(transforms1)
data1 = data1.map(operations=transform1, input_columns=["image"]) data1 = data1.map(operations=transform1, input_columns=["image"])
# Second dataset # Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms2 = [ transforms2 = [
py_vision.Decode(), vision.Decode(True),
py_vision.ToTensor() vision.ToTensor()
] ]
transform2 = mindspore.dataset.transforms.py_transforms.Compose(transforms2) transform2 = mindspore.dataset.transforms.transforms.Compose(transforms2)
data2 = data2.map(operations=transform2, input_columns=["image"]) data2 = data2.map(operations=transform2, input_columns=["image"])
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True), for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
@ -76,12 +78,12 @@ def test_to_type_01():
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.ToTensor(), vision.ToTensor(),
# Note: Convert the datatype from float32 to int32 # Note: Convert the datatype from float32 to int32
py_vision.ToType(np.int32) vision.ToType(np.int32)
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
# Compare with expected md5 from images # Compare with expected md5 from images
@ -98,12 +100,12 @@ def test_to_type_02():
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.ToTensor(), vision.ToTensor(),
# Note: Convert to type int # Note: Convert to type int
py_vision.ToType('int') vision.ToType('int')
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
# Compare with expected md5 from images # Compare with expected md5 from images
@ -122,11 +124,11 @@ def test_to_type_03():
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
# Note: If the object is not numpy, e.g. PIL image, TypeError will raise # Note: If the object is not numpy, e.g. PIL image, TypeError will raise
py_vision.ToType(np.int32) vision.ToType(np.int32)
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
except Exception as e: except Exception as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
@ -144,12 +146,12 @@ def test_to_type_04():
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.ToTensor(), vision.ToTensor(),
# Note: if output_type is not explicitly given # Note: if output_type is not explicitly given
py_vision.ToType() vision.ToType()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
except Exception as e: except Exception as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
@ -167,18 +169,64 @@ def test_to_type_05():
# Generate dataset # Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [ transforms = [
py_vision.Decode(), vision.Decode(True),
py_vision.ToTensor(), vision.ToTensor(),
# Note: if output_type is not explicitly given # Note: if output_type is not explicitly given
py_vision.ToType('invalid') vision.ToType('invalid')
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"]) data = data.map(operations=transform, input_columns=["image"])
except Exception as e: except Exception as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert "data type" in str(e) assert "data type" in str(e)
def test_np_to_de():
"""
Feature: NumPy Type to DE Type conversion
Description: Test NumPy Type to DE Type conversion for all valid types
Expectation: Data results are correct
"""
assert nptype_to_detype(np.dtype("bool")) == cde.DataType("bool")
assert nptype_to_detype(np.dtype("int8")) == cde.DataType("int8")
assert nptype_to_detype(np.dtype("int16")) == cde.DataType("int16")
assert nptype_to_detype(np.dtype("int32")) == cde.DataType("int32")
assert nptype_to_detype(np.dtype("int64")) == cde.DataType("int64")
assert nptype_to_detype(np.dtype("int")) == cde.DataType("int64")
assert nptype_to_detype(np.dtype("uint8")) == cde.DataType("uint8")
assert nptype_to_detype(np.dtype("uint16")) == cde.DataType("uint16")
assert nptype_to_detype(np.dtype("uint32")) == cde.DataType("uint32")
assert nptype_to_detype(np.dtype("uint64")) == cde.DataType("uint64")
assert nptype_to_detype(np.dtype("float16")) == cde.DataType("float16")
assert nptype_to_detype(np.dtype("float32")) == cde.DataType("float32")
assert nptype_to_detype(np.dtype("float64")) == cde.DataType("float64")
assert nptype_to_detype(np.dtype("str")) == cde.DataType("string")
assert nptype_to_detype(bool) == cde.DataType("bool")
assert nptype_to_detype(np.int8) == cde.DataType("int8")
assert nptype_to_detype(np.int16) == cde.DataType("int16")
assert nptype_to_detype(np.int32) == cde.DataType("int32")
assert nptype_to_detype(np.int64) == cde.DataType("int64")
assert nptype_to_detype(int) == cde.DataType("int64")
assert nptype_to_detype(np.uint8) == cde.DataType("uint8")
assert nptype_to_detype(np.uint16) == cde.DataType("uint16")
assert nptype_to_detype(np.uint32) == cde.DataType("uint32")
assert nptype_to_detype(np.uint64) == cde.DataType("uint64")
assert nptype_to_detype(np.float16) == cde.DataType("float16")
assert nptype_to_detype(np.float32) == cde.DataType("float32")
assert nptype_to_detype(np.float64) == cde.DataType("float64")
assert nptype_to_detype(str) == cde.DataType("string")
if __name__ == "__main__": if __name__ == "__main__":
test_to_type_op() test_to_type_op()
test_to_type_01() test_to_type_01()
@ -186,3 +234,4 @@ if __name__ == "__main__":
test_to_type_03() test_to_type_03()
test_to_type_04() test_to_type_04()
test_to_type_05() test_to_type_05()
test_np_to_de()

View File

@ -19,10 +19,8 @@ import numpy as np
import mindspore.common.dtype as mstype import mindspore.common.dtype as mstype
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as data_util import mindspore.dataset.transforms.transforms as data_trans
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
from mindspore import log as logger from mindspore import log as logger
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"] DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
@ -37,9 +35,9 @@ def test_type_cast():
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
type_cast_op = data_util.TypeCast(mstype.float32) type_cast_op = data_trans.TypeCast(mstype.float32)
ctrans = [decode_op, ctrans = [decode_op,
type_cast_op, type_cast_op,
@ -48,10 +46,10 @@ def test_type_cast():
data1 = data1.map(operations=ctrans, input_columns=["image"]) data1 = data1.map(operations=ctrans, input_columns=["image"])
# Second dataset # Second dataset
transforms = [py_vision.Decode(), transforms = [vision.Decode(True),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = data_trans.Compose(transforms)
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=transform, input_columns=["image"]) data2 = data2.map(operations=transform, input_columns=["image"])
@ -60,13 +58,13 @@ def test_type_cast():
data2.create_dict_iterator(num_epochs=1, output_numpy=True)): data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
num_iter += 1 num_iter += 1
c_image = item1["image"] c_image = item1["image"]
py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
logger.info("shape of c_image: {}".format(c_image.shape)) logger.info("shape of c_image: {}".format(c_image.shape))
logger.info("shape of py_image: {}".format(py_image.shape)) logger.info("shape of image: {}".format(image.shape))
logger.info("dtype of c_image: {}".format(c_image.dtype)) logger.info("dtype of c_image: {}".format(c_image.dtype))
logger.info("dtype of py_image: {}".format(py_image.dtype)) logger.info("dtype of image: {}".format(image.dtype))
assert c_image.dtype == "float32" assert c_image.dtype == "float32"
@ -78,9 +76,9 @@ def test_type_cast_string():
# First dataset # First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
type_cast_op = data_util.TypeCast(mstype.float16) type_cast_op = data_trans.TypeCast(mstype.float16)
ctrans = [decode_op, ctrans = [decode_op,
type_cast_op type_cast_op
@ -89,10 +87,10 @@ def test_type_cast_string():
data1 = data1.map(operations=ctrans, input_columns=["image"]) data1 = data1.map(operations=ctrans, input_columns=["image"])
# Second dataset # Second dataset
transforms = [py_vision.Decode(), transforms = [vision.Decode(True),
py_vision.ToTensor() vision.ToTensor()
] ]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms) transform = data_trans.Compose(transforms)
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False) data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=transform, input_columns=["image"]) data2 = data2.map(operations=transform, input_columns=["image"])
@ -101,13 +99,13 @@ def test_type_cast_string():
data2.create_dict_iterator(num_epochs=1, output_numpy=True)): data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
num_iter += 1 num_iter += 1
c_image = item1["image"] c_image = item1["image"]
py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8) image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
logger.info("shape of c_image: {}".format(c_image.shape)) logger.info("shape of c_image: {}".format(c_image.shape))
logger.info("shape of py_image: {}".format(py_image.shape)) logger.info("shape of image: {}".format(image.shape))
logger.info("dtype of c_image: {}".format(c_image.dtype)) logger.info("dtype of c_image: {}".format(c_image.dtype))
logger.info("dtype of py_image: {}".format(py_image.dtype)) logger.info("dtype of image: {}".format(image.dtype))
assert c_image.dtype == "float16" assert c_image.dtype == "float16"

View File

@ -19,9 +19,8 @@ import numpy as np
import pytest import pytest
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.c_transforms as C import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.vision.py_transforms as F
from mindspore import log as logger from mindspore import log as logger
from util import visualize_list, diff_mse from util import visualize_list, diff_mse
@ -36,13 +35,13 @@ def test_uniform_augment_callable(num_ops=2):
img = np.fromfile("../data/dataset/apple.jpg", dtype=np.uint8) img = np.fromfile("../data/dataset/apple.jpg", dtype=np.uint8)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape)) logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
decode_op = C.Decode() decode_op = vision.Decode()
img = decode_op(img) img = decode_op(img)
assert img.shape == (2268, 4032, 3) assert img.shape == (2268, 4032, 3)
transforms_ua = [C.RandomCrop(size=[400, 400], padding=[32, 32, 32, 32]), transforms_ua = [vision.RandomCrop(size=[400, 400], padding=[32, 32, 32, 32]),
C.RandomCrop(size=[400, 400], padding=[32, 32, 32, 32])] vision.RandomCrop(size=[400, 400], padding=[32, 32, 32, 32])]
uni_aug = C.UniformAugment(transforms=transforms_ua, num_ops=num_ops) uni_aug = vision.UniformAugment(transforms=transforms_ua, num_ops=num_ops)
img = uni_aug(img) img = uni_aug(img)
assert img.shape == (2268, 4032, 3) or img.shape == (400, 400, 3) assert img.shape == (2268, 4032, 3) or img.shape == (400, 400, 3)
@ -56,9 +55,9 @@ def test_uniform_augment(plot=False, num_ops=2):
# Original Images # Original Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), transforms_original = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
F.Resize((224, 224)), vision.Resize((224, 224)),
F.ToTensor()]) vision.ToTensor()])
ds_original = data_set.map(operations=transforms_original, input_columns="image") ds_original = data_set.map(operations=transforms_original, input_columns="image")
@ -75,19 +74,19 @@ def test_uniform_augment(plot=False, num_ops=2):
# UniformAugment Images # UniformAugment Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transform_list = [F.RandomRotation(45), transform_list = [vision.RandomRotation(45),
F.RandomColor(), vision.RandomColor(),
F.RandomSharpness(), vision.RandomSharpness(),
F.Invert(), vision.Invert(),
F.AutoContrast(), vision.AutoContrast(),
F.Equalize()] vision.Equalize()]
transforms_ua = \ transforms_ua = \
mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
F.Resize((224, 224)), vision.Resize((224, 224)),
F.UniformAugment(transforms=transform_list, vision.UniformAugment(transforms=transform_list,
num_ops=num_ops), num_ops=num_ops),
F.ToTensor()]) vision.ToTensor()])
ds_ua = data_set.map(operations=transforms_ua, input_columns="image") ds_ua = data_set.map(operations=transforms_ua, input_columns="image")
@ -120,8 +119,8 @@ def test_cpp_uniform_augment(plot=False, num_ops=2):
# Original Images # Original Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = [C.Decode(), C.Resize(size=[224, 224]), transforms_original = [vision.Decode(), vision.Resize(size=[224, 224]),
F.ToTensor()] vision.ToTensor()]
ds_original = data_set.map(operations=transforms_original, input_columns="image") ds_original = data_set.map(operations=transforms_original, input_columns="image")
@ -137,17 +136,17 @@ def test_cpp_uniform_augment(plot=False, num_ops=2):
# UniformAugment Images # UniformAugment Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_ua = [C.RandomCrop(size=[224, 224], padding=[32, 32, 32, 32]), transforms_ua = [vision.RandomCrop(size=[224, 224], padding=[32, 32, 32, 32]),
C.RandomHorizontalFlip(), vision.RandomHorizontalFlip(),
C.RandomVerticalFlip(), vision.RandomVerticalFlip(),
C.RandomColorAdjust(), vision.RandomColorAdjust(),
C.RandomRotation(degrees=45)] vision.RandomRotation(degrees=45)]
uni_aug = C.UniformAugment(transforms=transforms_ua, num_ops=num_ops) uni_aug = vision.UniformAugment(transforms=transforms_ua, num_ops=num_ops)
transforms_all = [C.Decode(), C.Resize(size=[224, 224]), transforms_all = [vision.Decode(), vision.Resize(size=[224, 224]),
uni_aug, uni_aug,
F.ToTensor()] vision.ToTensor()]
ds_ua = data_set.map(operations=transforms_all, input_columns="image", num_parallel_workers=1) ds_ua = data_set.map(operations=transforms_all, input_columns="image", num_parallel_workers=1)
@ -170,21 +169,21 @@ def test_cpp_uniform_augment(plot=False, num_ops=2):
logger.info("MSE= {}".format(str(np.mean(mse)))) logger.info("MSE= {}".format(str(np.mean(mse))))
def test_cpp_uniform_augment_exception_pyops(num_ops=2): def skip_test_cpp_uniform_augment_exception_pyops(num_ops=2):
""" """
Test UniformAugment invalid op in operations Test UniformAugment invalid op in operations
""" """
logger.info("Test CPP UniformAugment invalid OP exception") logger.info("Test CPP UniformAugment invalid OP exception")
transforms_ua = [C.RandomCrop(size=[224, 224], padding=[32, 32, 32, 32]), transforms_ua = [vision.RandomCrop(size=[224, 224], padding=[32, 32, 32, 32]),
C.RandomHorizontalFlip(), vision.RandomHorizontalFlip(),
C.RandomVerticalFlip(), vision.RandomVerticalFlip(),
C.RandomColorAdjust(), vision.RandomColorAdjust(),
C.RandomRotation(degrees=45), vision.RandomRotation(degrees=45),
F.Invert()] vision.Invert()]
with pytest.raises(TypeError) as e: with pytest.raises(TypeError) as e:
C.UniformAugment(transforms=transforms_ua, num_ops=num_ops) vision.UniformAugment(transforms=transforms_ua, num_ops=num_ops)
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
assert "Type of Transforms[5] must be c_transform" in str(e.value) assert "Type of Transforms[5] must be c_transform" in str(e.value)
@ -196,14 +195,14 @@ def test_cpp_uniform_augment_exception_large_numops(num_ops=6):
""" """
logger.info("Test CPP UniformAugment invalid large num_ops exception") logger.info("Test CPP UniformAugment invalid large num_ops exception")
transforms_ua = [C.RandomCrop(size=[224, 224], padding=[32, 32, 32, 32]), transforms_ua = [vision.RandomCrop(size=[224, 224], padding=[32, 32, 32, 32]),
C.RandomHorizontalFlip(), vision.RandomHorizontalFlip(),
C.RandomVerticalFlip(), vision.RandomVerticalFlip(),
C.RandomColorAdjust(), vision.RandomColorAdjust(),
C.RandomRotation(degrees=45)] vision.RandomRotation(degrees=45)]
try: try:
_ = C.UniformAugment(transforms=transforms_ua, num_ops=num_ops) _ = vision.UniformAugment(transforms=transforms_ua, num_ops=num_ops)
except Exception as e: except Exception as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
@ -216,14 +215,14 @@ def test_cpp_uniform_augment_exception_nonpositive_numops(num_ops=0):
""" """
logger.info("Test CPP UniformAugment invalid non-positive num_ops exception") logger.info("Test CPP UniformAugment invalid non-positive num_ops exception")
transforms_ua = [C.RandomCrop(size=[224, 224], padding=[32, 32, 32, 32]), transforms_ua = [vision.RandomCrop(size=[224, 224], padding=[32, 32, 32, 32]),
C.RandomHorizontalFlip(), vision.RandomHorizontalFlip(),
C.RandomVerticalFlip(), vision.RandomVerticalFlip(),
C.RandomColorAdjust(), vision.RandomColorAdjust(),
C.RandomRotation(degrees=45)] vision.RandomRotation(degrees=45)]
try: try:
_ = C.UniformAugment(transforms=transforms_ua, num_ops=num_ops) _ = vision.UniformAugment(transforms=transforms_ua, num_ops=num_ops)
except Exception as e: except Exception as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
@ -236,14 +235,14 @@ def test_cpp_uniform_augment_exception_float_numops(num_ops=2.5):
""" """
logger.info("Test CPP UniformAugment invalid float num_ops exception") logger.info("Test CPP UniformAugment invalid float num_ops exception")
transforms_ua = [C.RandomCrop(size=[224, 224], padding=[32, 32, 32, 32]), transforms_ua = [vision.RandomCrop(size=[224, 224], padding=[32, 32, 32, 32]),
C.RandomHorizontalFlip(), vision.RandomHorizontalFlip(),
C.RandomVerticalFlip(), vision.RandomVerticalFlip(),
C.RandomColorAdjust(), vision.RandomColorAdjust(),
C.RandomRotation(degrees=45)] vision.RandomRotation(degrees=45)]
try: try:
_ = C.UniformAugment(transforms=transforms_ua, num_ops=num_ops) _ = vision.UniformAugment(transforms=transforms_ua, num_ops=num_ops)
except Exception as e: except Exception as e:
logger.info("Got an exception in DE: {}".format(str(e))) logger.info("Got an exception in DE: {}".format(str(e)))
@ -261,10 +260,10 @@ def test_cpp_uniform_augment_random_crop_badinput(num_ops=1):
transforms_ua = [ transforms_ua = [
# Note: crop size [224, 224] > image size [32, 32] # Note: crop size [224, 224] > image size [32, 32]
C.RandomCrop(size=[224, 224]), vision.RandomCrop(size=[224, 224]),
C.RandomHorizontalFlip() vision.RandomHorizontalFlip()
] ]
uni_aug = C.UniformAugment(transforms=transforms_ua, num_ops=num_ops) uni_aug = vision.UniformAugment(transforms=transforms_ua, num_ops=num_ops)
ds1 = ds1.map(operations=uni_aug, input_columns="image") ds1 = ds1.map(operations=uni_aug, input_columns="image")
# apply DatasetOps # apply DatasetOps

View File

@ -18,7 +18,7 @@ Testing unique op in DE
import numpy as np import numpy as np
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as ops import mindspore.dataset.transforms.transforms as ops
def compare(array, res, idx, cnt): def compare(array, res, idx, cnt):

View File

@ -18,7 +18,7 @@ Testing VerticalFlip Python API
import cv2 import cv2
import mindspore.dataset as ds import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger from mindspore import log as logger
from util import visualize_image, diff_mse from util import visualize_image, diff_mse
@ -30,14 +30,14 @@ IMAGE_FILE = "../data/dataset/apple.jpg"
def test_vertical_flip_pipeline(plot=False): def test_vertical_flip_pipeline(plot=False):
""" """
Test VerticalFlip of c_transforms Test VerticalFlip of C implementation
""" """
logger.info("test_vertical_flip_pipeline") logger.info("test_vertical_flip_pipeline")
# First dataset # First dataset
dataset1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False) dataset1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
decode_op = c_vision.Decode() decode_op = vision.Decode()
vertical_flip_op = c_vision.VerticalFlip() vertical_flip_op = vision.VerticalFlip()
dataset1 = dataset1.map(operations=decode_op, input_columns=["image"]) dataset1 = dataset1.map(operations=decode_op, input_columns=["image"])
dataset1 = dataset1.map(operations=vertical_flip_op, input_columns=["image"]) dataset1 = dataset1.map(operations=vertical_flip_op, input_columns=["image"])
@ -68,7 +68,7 @@ def test_vertical_flip_eager():
logger.info("test_vertical_flip_eager") logger.info("test_vertical_flip_eager")
img = cv2.imread(IMAGE_FILE) img = cv2.imread(IMAGE_FILE)
img_ms = c_vision.VerticalFlip()(img) img_ms = vision.VerticalFlip()(img)
img_cv = cv2.flip(img, 0) img_cv = cv2.flip(img, 0)
mse = diff_mse(img_ms, img_cv) mse = diff_mse(img_ms, img_cv)
assert mse == 0 assert mse == 0