!33807 [MD] Transform Unification Feature - Phase 2A - test_[a-q]*.py Python UT Updates

Merge pull request !33807 from cathwong/ckw_xtranuni_phase2a_uts
This commit is contained in:
i-robot 2022-05-05 12:43:58 +00:00 committed by Gitee
commit fddb926dd5
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
90 changed files with 1080 additions and 960 deletions

Binary file not shown.

View File

@ -20,9 +20,8 @@ from numpy.testing import assert_allclose
import PIL
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.py_transforms as F
import mindspore.dataset.vision.c_transforms as C
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testImageNetData/train/"
@ -44,20 +43,20 @@ def generate_numpy_random_rgb(shape):
def test_adjust_gamma_c_eager():
"""
Feature: AdjustGamma op
Description: Test eager support for AdjustGamma C++ op
Description: Test eager support for AdjustGamma C implementation
Expectation: Receive non-None output image from op
"""
# Eager 3-channel
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
img_in = rgb_flat.reshape((8, 8, 3))
adjustgamma_op = C.AdjustGamma(10, 1)
adjustgamma_op = vision.AdjustGamma(10, 1)
img_out = adjustgamma_op(img_in)
assert img_out is not None
img_in2 = PIL.Image.open("../data/dataset/apple.jpg").convert("RGB")
adjustgamma_op2 = C.AdjustGamma(10, 1)
adjustgamma_op2 = vision.AdjustGamma(10, 1)
img_out2 = adjustgamma_op2(img_in2)
assert img_out2 is not None
@ -65,20 +64,20 @@ def test_adjust_gamma_c_eager():
def test_adjust_gamma_py_eager():
"""
Feature: AdjustGamma op
Description: Test eager support for AdjustGamma Python op
Description: Test eager support for AdjustGamma Python implementation
Expectation: Receive non-None output image from op
"""
# Eager 3-channel
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.uint8)
img_in = PIL.Image.fromarray(rgb_flat.reshape((8, 8, 3)))
adjustgamma_op = F.AdjustGamma(10, 1)
adjustgamma_op = vision.AdjustGamma(10, 1)
img_out = adjustgamma_op(img_in)
assert img_out is not None
img_in2 = PIL.Image.open("../data/dataset/apple.jpg").convert("RGB")
adjustgamma_op2 = F.AdjustGamma(10, 1)
adjustgamma_op2 = vision.AdjustGamma(10, 1)
img_out2 = adjustgamma_op2(img_in2)
assert img_out2 is not None
@ -88,7 +87,7 @@ def test_adjust_gamma_c_eager_gray():
rgb_flat = generate_numpy_random_rgb((64, 1)).astype(np.float32)
img_in = rgb_flat.reshape((8, 8))
adjustgamma_op = C.AdjustGamma(10, 1)
adjustgamma_op = vision.AdjustGamma(10, 1)
img_out = adjustgamma_op(img_in)
assert img_out is not None
@ -98,32 +97,34 @@ def test_adjust_gamma_py_eager_gray():
rgb_flat = generate_numpy_random_rgb((64, 1)).astype(np.uint8)
img_in = PIL.Image.fromarray(rgb_flat.reshape((8, 8)))
adjustgamma_op = F.AdjustGamma(10, 1)
adjustgamma_op = vision.AdjustGamma(10, 1)
img_out = adjustgamma_op(img_in)
assert img_out is not None
def test_adjust_gamma_invalid_gamma_param_c():
"""
Test AdjustGamma C Op with invalid ignore parameter
Test AdjustGamma C implementation with invalid ignore parameter
"""
logger.info("Test AdjustGamma C Op with invalid ignore parameter")
logger.info("Test AdjustGamma C implementation with invalid ignore parameter")
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224)), lambda img: np.array(img[:, :, 0])],
input_columns=["image"])
data_set = data_set.map(
operations=[vision.Decode(), vision.Resize((224, 224)), lambda img: np.array(img[:, :, 0])],
input_columns=["image"])
# invalid gamma
data_set = data_set.map(operations=C.AdjustGamma(gamma=-10.0, gain=1.0),
data_set = data_set.map(operations=vision.AdjustGamma(gamma=-10.0, gain=1.0),
input_columns="image")
except ValueError as error:
logger.info("Got an exception in AdjustGamma: {}".format(str(error)))
assert "Input is not within the required interval of " in str(error)
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224)), lambda img: np.array(img[:, :, 0])],
input_columns=["image"])
data_set = data_set.map(
operations=[vision.Decode(), vision.Resize((224, 224)), lambda img: np.array(img[:, :, 0])],
input_columns=["image"])
# invalid gamma
data_set = data_set.map(operations=C.AdjustGamma(gamma=[1, 2], gain=1.0),
data_set = data_set.map(operations=vision.AdjustGamma(gamma=[1, 2], gain=1.0),
input_columns="image")
except TypeError as error:
logger.info("Got an exception in AdjustGamma: {}".format(str(error)))
@ -132,16 +133,16 @@ def test_adjust_gamma_invalid_gamma_param_c():
def test_adjust_gamma_invalid_gamma_param_py():
"""
Test AdjustGamma python Op with invalid ignore parameter
Test AdjustGamma Python implementation with invalid ignore parameter
"""
logger.info("Test AdjustGamma python Op with invalid ignore parameter")
logger.info("Test AdjustGamma Python implementation with invalid ignore parameter")
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
trans = mindspore.dataset.transforms.py_transforms.Compose([
F.Decode(),
F.Resize((224, 224)),
F.AdjustGamma(gamma=-10.0),
F.ToTensor()
trans = mindspore.dataset.transforms.transforms.Compose([
vision.Decode(True),
vision.Resize((224, 224)),
vision.AdjustGamma(gamma=-10.0),
vision.ToTensor()
])
data_set = data_set.map(operations=[trans], input_columns=["image"])
except ValueError as error:
@ -149,11 +150,11 @@ def test_adjust_gamma_invalid_gamma_param_py():
assert "Input is not within the required interval of " in str(error)
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
trans = mindspore.dataset.transforms.py_transforms.Compose([
F.Decode(),
F.Resize((224, 224)),
F.AdjustGamma(gamma=[1, 2]),
F.ToTensor()
trans = mindspore.dataset.transforms.transforms.Compose([
vision.Decode(True),
vision.Resize((224, 224)),
vision.AdjustGamma(gamma=[1, 2]),
vision.ToTensor()
])
data_set = data_set.map(operations=[trans], input_columns=["image"])
except TypeError as error:
@ -163,15 +164,16 @@ def test_adjust_gamma_invalid_gamma_param_py():
def test_adjust_gamma_invalid_gain_param_c():
"""
Test AdjustGamma C Op with invalid gain parameter
Test AdjustGamma C implementation with invalid gain parameter
"""
logger.info("Test AdjustGamma C Op with invalid gain parameter")
logger.info("Test AdjustGamma C implementation with invalid gain parameter")
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224)), lambda img: np.array(img[:, :, 0])],
input_columns=["image"])
data_set = data_set.map(
operations=[vision.Decode(), vision.Resize((224, 224)), lambda img: np.array(img[:, :, 0])],
input_columns=["image"])
# invalid gain
data_set = data_set.map(operations=C.AdjustGamma(gamma=10.0, gain=[1, 10]),
data_set = data_set.map(operations=vision.AdjustGamma(gamma=10.0, gain=[1, 10]),
input_columns="image")
except TypeError as error:
logger.info("Got an exception in AdjustGamma: {}".format(str(error)))
@ -180,16 +182,16 @@ def test_adjust_gamma_invalid_gain_param_c():
def test_adjust_gamma_invalid_gain_param_py():
"""
Test AdjustGamma python Op with invalid gain parameter
Test AdjustGamma Python implementation with invalid gain parameter
"""
logger.info("Test AdjustGamma python Op with invalid gain parameter")
logger.info("Test AdjustGamma Python implementation with invalid gain parameter")
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
trans = mindspore.dataset.transforms.py_transforms.Compose([
F.Decode(),
F.Resize((224, 224)),
F.AdjustGamma(gamma=10.0, gain=[1, 10]),
F.ToTensor()
trans = mindspore.dataset.transforms.transforms.Compose([
vision.Decode(True),
vision.Resize((224, 224)),
vision.AdjustGamma(gamma=10.0, gain=[1, 10]),
vision.ToTensor()
])
data_set = data_set.map(operations=[trans], input_columns=["image"])
except TypeError as error:
@ -199,11 +201,11 @@ def test_adjust_gamma_invalid_gain_param_py():
def test_adjust_gamma_pipeline_c():
"""
Test AdjustGamma C Op Pipeline
Test AdjustGamma C implementation Pipeline
"""
# First dataset
transforms1 = [C.Decode(), C.Resize([64, 64])]
transforms1 = mindspore.dataset.transforms.py_transforms.Compose(
transforms1 = [vision.Decode(), vision.Resize([64, 64])]
transforms1 = mindspore.dataset.transforms.transforms.Compose(
transforms1)
ds1 = ds.TFRecordDataset(DATA_DIR_2,
SCHEMA_DIR,
@ -213,11 +215,11 @@ def test_adjust_gamma_pipeline_c():
# Second dataset
transforms2 = [
C.Decode(),
C.Resize([64, 64]),
C.AdjustGamma(1.0, 1.0)
vision.Decode(),
vision.Resize([64, 64]),
vision.AdjustGamma(1.0, 1.0)
]
transform2 = mindspore.dataset.transforms.py_transforms.Compose(
transform2 = mindspore.dataset.transforms.transforms.Compose(
transforms2)
ds2 = ds.TFRecordDataset(DATA_DIR_2,
SCHEMA_DIR,
@ -240,11 +242,11 @@ def test_adjust_gamma_pipeline_c():
def test_adjust_gamma_pipeline_py():
"""
Test AdjustGamma python Op Pipeline
Test AdjustGamma Python implementation Pipeline
"""
# First dataset
transforms1 = [F.Decode(), F.Resize([64, 64]), F.ToTensor()]
transforms1 = mindspore.dataset.transforms.py_transforms.Compose(
transforms1 = [vision.Decode(True), vision.Resize([64, 64]), vision.ToTensor()]
transforms1 = mindspore.dataset.transforms.transforms.Compose(
transforms1)
ds1 = ds.TFRecordDataset(DATA_DIR_2,
SCHEMA_DIR,
@ -254,12 +256,12 @@ def test_adjust_gamma_pipeline_py():
# Second dataset
transforms2 = [
F.Decode(),
F.Resize([64, 64]),
F.AdjustGamma(1.0, 1.0),
F.ToTensor()
vision.Decode(True),
vision.Resize([64, 64]),
vision.AdjustGamma(1.0, 1.0),
vision.ToTensor()
]
transform2 = mindspore.dataset.transforms.py_transforms.Compose(
transform2 = mindspore.dataset.transforms.transforms.Compose(
transforms2)
ds2 = ds.TFRecordDataset(DATA_DIR_2,
SCHEMA_DIR,
@ -282,12 +284,11 @@ def test_adjust_gamma_pipeline_py():
def test_adjust_gamma_pipeline_py_gray():
"""
Test AdjustGamma python Op Pipeline 1-channel
Test AdjustGamma Python implementation Pipeline 1-channel
"""
# First dataset
transforms1 = [F.Decode(), F.Resize([64, 64]), F.Grayscale(), F.ToTensor()]
transforms1 = mindspore.dataset.transforms.py_transforms.Compose(
transforms1)
transforms1_list = [vision.Decode(True), vision.Resize([60, 60]), vision.Grayscale(), vision.ToTensor()]
transforms1 = mindspore.dataset.transforms.transforms.Compose(transforms1_list)
ds1 = ds.TFRecordDataset(DATA_DIR_2,
SCHEMA_DIR,
columns_list=["image"],
@ -295,15 +296,14 @@ def test_adjust_gamma_pipeline_py_gray():
ds1 = ds1.map(operations=transforms1, input_columns=["image"])
# Second dataset
transforms2 = [
F.Decode(),
F.Resize([64, 64]),
F.Grayscale(),
F.AdjustGamma(1.0, 1.0),
F.ToTensor()
transforms2_list = [
vision.Decode(True),
vision.Resize([60, 60]),
vision.Grayscale(),
vision.AdjustGamma(1.0, 1.0),
vision.ToTensor()
]
transform2 = mindspore.dataset.transforms.py_transforms.Compose(
transforms2)
transform2 = mindspore.dataset.transforms.transforms.Compose(transforms2_list)
ds2 = ds.TFRecordDataset(DATA_DIR_2,
SCHEMA_DIR,
columns_list=["image"],

0
tests/ut/python/dataset/test_angle.py Executable file → Normal file
View File

View File

@ -15,7 +15,7 @@
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testPK/data"
@ -51,7 +51,7 @@ def test_apply_imagefolder_case():
data2 = ds.ImageFolderDataset(DATA_DIR, num_shards=4, shard_id=3)
decode_op = vision.Decode()
normalize_op = vision.Normalize([121.0, 115.0, 100.0], [70.0, 68.0, 71.0])
normalize_op = vision.Normalize([121.0, 115.0, 100.0], [70.0, 68.0, 71.0], True)
def dataset_fn(ds_):
ds_ = ds_.map(operations=decode_op)

View File

@ -18,7 +18,7 @@ Testing AutoAugment in DE
import numpy as np
import mindspore.dataset as ds
from mindspore.dataset.vision.c_transforms import Decode, AutoAugment, Resize
from mindspore.dataset.vision.transforms import Decode, AutoAugment, Resize
from mindspore.dataset.vision.utils import AutoAugmentPolicy, Inter
from mindspore import log as logger
from util import visualize_image, visualize_list, diff_mse

View File

@ -17,9 +17,8 @@ Testing AutoContrast op in DE
"""
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.py_transforms as F
import mindspore.dataset.vision.c_transforms as C
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import visualize_list, visualize_one_channel_dataset, diff_mse, save_and_check_md5
@ -33,14 +32,14 @@ def test_auto_contrast_py(plot=False):
"""
Test AutoContrast
"""
logger.info("Test AutoContrast Python Op")
logger.info("Test AutoContrast Python implementation")
# Original Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
F.ToTensor()])
transforms_original = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Resize((224, 224)),
vision.ToTensor()])
ds_original = data_set.map(operations=transforms_original, input_columns="image")
@ -58,10 +57,10 @@ def test_auto_contrast_py(plot=False):
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_auto_contrast = \
mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
F.AutoContrast(cutoff=10.0, ignore=[10, 20]),
F.ToTensor()])
mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Resize((224, 224)),
vision.AutoContrast(cutoff=10.0, ignore=[10, 20]),
vision.ToTensor()])
ds_auto_contrast = data_set.map(operations=transforms_auto_contrast, input_columns="image")
@ -91,18 +90,18 @@ def test_auto_contrast_py(plot=False):
def test_auto_contrast_c(plot=False):
"""
Test AutoContrast C Op
Test AutoContrast C implementation
"""
logger.info("Test AutoContrast C Op")
logger.info("Test AutoContrast C implementation")
# AutoContrast Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224))], input_columns=["image"])
python_op = F.AutoContrast(cutoff=10.0, ignore=[10, 20])
c_op = C.AutoContrast(cutoff=10.0, ignore=[10, 20])
transforms_op = mindspore.dataset.transforms.py_transforms.Compose([lambda img: F.ToPIL()(img.astype(np.uint8)),
python_op,
np.array])
data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224))], input_columns=["image"])
python_op = vision.AutoContrast(cutoff=10.0, ignore=[10, 20])
c_op = vision.AutoContrast(cutoff=10.0, ignore=[10, 20])
transforms_op = mindspore.dataset.transforms.transforms.Compose([lambda img: vision.ToPIL()(img.astype(np.uint8)),
python_op,
np.array])
ds_auto_contrast_py = data_set.map(operations=transforms_op, input_columns="image")
@ -117,7 +116,7 @@ def test_auto_contrast_c(plot=False):
axis=0)
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224))], input_columns=["image"])
data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224))], input_columns=["image"])
ds_auto_contrast_c = data_set.map(operations=c_op, input_columns="image")
@ -148,19 +147,19 @@ def test_auto_contrast_c(plot=False):
def test_auto_contrast_one_channel_c(plot=False):
"""
Test AutoContrast C op with one channel
Test AutoContrast C implementation with one channel
"""
logger.info("Test AutoContrast C Op With One Channel Images")
logger.info("Test AutoContrast C implementation With One Channel Images")
# AutoContrast Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224))], input_columns=["image"])
python_op = F.AutoContrast()
c_op = C.AutoContrast()
# not using F.ToTensor() since it converts to floats
transforms_op = mindspore.dataset.transforms.py_transforms.Compose(
data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224))], input_columns=["image"])
python_op = vision.AutoContrast()
c_op = vision.AutoContrast()
# not using vision.ToTensor() since it converts to floats
transforms_op = mindspore.dataset.transforms.transforms.Compose(
[lambda img: (np.array(img)[:, :, 0]).astype(np.uint8),
F.ToPIL(),
vision.ToPIL(),
python_op,
np.array])
@ -177,7 +176,7 @@ def test_auto_contrast_one_channel_c(plot=False):
axis=0)
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224)), lambda img: np.array(img[:, :, 0])],
data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224)), lambda img: np.array(img[:, :, 0])],
input_columns=["image"])
ds_auto_contrast_c = data_set.map(operations=c_op, input_columns="image")
@ -205,11 +204,11 @@ def test_auto_contrast_one_channel_c(plot=False):
def test_auto_contrast_mnist_c(plot=False):
"""
Test AutoContrast C op with MNIST dataset (Grayscale images)
Test AutoContrast C implementation with MNIST dataset (Grayscale images)
"""
logger.info("Test AutoContrast C Op With MNIST Images")
logger.info("Test AutoContrast C implementation With MNIST Images")
data_set = ds.MnistDataset(dataset_dir=MNIST_DATA_DIR, num_samples=2, shuffle=False)
ds_auto_contrast_c = data_set.map(operations=C.AutoContrast(cutoff=1, ignore=(0, 255)), input_columns="image")
ds_auto_contrast_c = data_set.map(operations=vision.AutoContrast(cutoff=1, ignore=(0, 255)), input_columns="image")
ds_orig = ds.MnistDataset(dataset_dir=MNIST_DATA_DIR, num_samples=2, shuffle=False)
images = []
@ -232,25 +231,25 @@ def test_auto_contrast_mnist_c(plot=False):
def test_auto_contrast_invalid_ignore_param_c():
"""
Test AutoContrast C Op with invalid ignore parameter
Test AutoContrast C implementation with invalid ignore parameter
"""
logger.info("Test AutoContrast C Op with invalid ignore parameter")
logger.info("Test AutoContrast C implementation with invalid ignore parameter")
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(),
C.Resize((224, 224)),
data_set = data_set.map(operations=[vision.Decode(),
vision.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"])
# invalid ignore
data_set = data_set.map(operations=C.AutoContrast(ignore=255.5), input_columns="image")
data_set = data_set.map(operations=vision.AutoContrast(ignore=255.5), input_columns="image")
except TypeError as error:
logger.info("Got an exception in DE: {}".format(str(error)))
assert "Argument ignore with value 255.5 is not of type" in str(error)
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224)),
data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"])
# invalid ignore
data_set = data_set.map(operations=C.AutoContrast(ignore=(10, 100)), input_columns="image")
data_set = data_set.map(operations=vision.AutoContrast(ignore=(10, 100)), input_columns="image")
except TypeError as error:
logger.info("Got an exception in DE: {}".format(str(error)))
assert "Argument ignore with value (10,100) is not of type" in str(error)
@ -258,26 +257,26 @@ def test_auto_contrast_invalid_ignore_param_c():
def test_auto_contrast_invalid_cutoff_param_c():
"""
Test AutoContrast C Op with invalid cutoff parameter
Test AutoContrast C implementation with invalid cutoff parameter
"""
logger.info("Test AutoContrast C Op with invalid cutoff parameter")
logger.info("Test AutoContrast C implementation with invalid cutoff parameter")
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(),
C.Resize((224, 224)),
data_set = data_set.map(operations=[vision.Decode(),
vision.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"])
# invalid ignore
data_set = data_set.map(operations=C.AutoContrast(cutoff=-10.0), input_columns="image")
data_set = data_set.map(operations=vision.AutoContrast(cutoff=-10.0), input_columns="image")
except ValueError as error:
logger.info("Got an exception in DE: {}".format(str(error)))
assert "Input cutoff is not within the required interval of [0, 50)." in str(error)
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(),
C.Resize((224, 224)),
data_set = data_set.map(operations=[vision.Decode(),
vision.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"])
# invalid ignore
data_set = data_set.map(operations=C.AutoContrast(cutoff=120.0), input_columns="image")
data_set = data_set.map(operations=vision.AutoContrast(cutoff=120.0), input_columns="image")
except ValueError as error:
logger.info("Got an exception in DE: {}".format(str(error)))
assert "Input cutoff is not within the required interval of [0, 50)." in str(error)
@ -285,27 +284,27 @@ def test_auto_contrast_invalid_cutoff_param_c():
def test_auto_contrast_invalid_ignore_param_py():
"""
Test AutoContrast python Op with invalid ignore parameter
Test AutoContrast Python implementation with invalid ignore parameter
"""
logger.info("Test AutoContrast python Op with invalid ignore parameter")
logger.info("Test AutoContrast Python implementation with invalid ignore parameter")
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
F.AutoContrast(
ignore=255.5),
F.ToTensor()])],
data_set = data_set.map(operations=[mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Resize((224, 224)),
vision.AutoContrast(
ignore=255.5),
vision.ToTensor()])],
input_columns=["image"])
except TypeError as error:
logger.info("Got an exception in DE: {}".format(str(error)))
assert "Argument ignore with value 255.5 is not of type" in str(error)
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
F.AutoContrast(
ignore=(10, 100)),
F.ToTensor()])],
data_set = data_set.map(operations=[mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Resize((224, 224)),
vision.AutoContrast(
ignore=(10, 100)),
vision.ToTensor()])],
input_columns=["image"])
except TypeError as error:
logger.info("Got an exception in DE: {}".format(str(error)))
@ -314,16 +313,16 @@ def test_auto_contrast_invalid_ignore_param_py():
def test_auto_contrast_invalid_cutoff_param_py():
"""
Test AutoContrast python Op with invalid cutoff parameter
Test AutoContrast Python implementation with invalid cutoff parameter
"""
logger.info("Test AutoContrast python Op with invalid cutoff parameter")
logger.info("Test AutoContrast Python implementation with invalid cutoff parameter")
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
F.AutoContrast(
cutoff=-10.0),
F.ToTensor()])],
data_set = data_set.map(operations=[mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Resize((224, 224)),
vision.AutoContrast(
cutoff=-10.0),
vision.ToTensor()])],
input_columns=["image"])
except ValueError as error:
logger.info("Got an exception in DE: {}".format(str(error)))
@ -331,10 +330,10 @@ def test_auto_contrast_invalid_cutoff_param_py():
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(
operations=[mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
F.AutoContrast(cutoff=120.0),
F.ToTensor()])],
operations=[mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Resize((224, 224)),
vision.AutoContrast(cutoff=120.0),
vision.ToTensor()])],
input_columns=["image"])
except ValueError as error:
logger.info("Got an exception in DE: {}".format(str(error)))

View File

@ -21,8 +21,8 @@ import random
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as c_transforms
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.transforms.transforms as transforms
import mindspore.dataset.vision.transforms as vision
MNIST_DATA_DIR = "../data/dataset/testMnistData"
DATA_DIR = "../data/dataset/testPK/data"
@ -123,7 +123,7 @@ class TestAutotuneSaveLoad:
ds.config.set_seed(1)
data1 = ds.MnistDataset(MNIST_DATA_DIR, num_samples=100)
one_hot_encode = c_transforms.OneHot(10) # num_classes is input argument
one_hot_encode = transforms.OneHot(10) # num_classes is input argument
data1 = data1.map(operations=one_hot_encode, input_columns="label")
data1 = data1.batch(batch_size=10, drop_remainder=True)
@ -169,8 +169,8 @@ class TestAutotuneSaveLoad:
# Dataset with offload activated.
dataset = ds.ImageFolderDataset(DATA_DIR, num_samples=8)
dataset = dataset.map(operations=[c_vision.Decode()], input_columns="image")
dataset = dataset.map(operations=[c_vision.HWC2CHW()], input_columns="image", offload=True)
dataset = dataset.map(operations=[vision.Decode()], input_columns="image")
dataset = dataset.map(operations=[vision.HWC2CHW()], input_columns="image", offload=True)
dataset = dataset.batch(8, drop_remainder=True)
for _ in dataset.create_tuple_iterator(num_epochs=1, output_numpy=True):
@ -205,7 +205,7 @@ class TestAutotuneSaveLoad:
ds.config.set_enable_autotune(True, str(tmp_path / at_final_json_filename))
data1 = ds.MnistDataset(MNIST_DATA_DIR, num_samples=100)
one_hot_encode = c_transforms.OneHot(10) # num_classes is input argument
one_hot_encode = transforms.OneHot(10) # num_classes is input argument
data1 = data1.map(operations=one_hot_encode, input_columns="label")
data1 = data1.batch(batch_size=10, drop_remainder=True)

View File

@ -19,7 +19,7 @@ Testing the bounding box augment op in DE
import numpy as np
import mindspore.log as logger
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.transforms as c_vision
from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \
config_get_set_seed, config_get_set_num_parallel_workers, save_and_check_md5

View File

@ -15,8 +15,7 @@
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as ops
import mindspore.dataset.transforms.py_transforms as py_ops
import mindspore.dataset.transforms.transforms as ops
def test_compose():
@ -39,7 +38,7 @@ def test_compose():
assert test_config([[1, 0]], [ops.Duplicate(), ops.Concatenate(), ops.Duplicate(), ops.Concatenate()]) == [
[1, 0] * 4]
# test one python transform followed by a C transform. type after oneHot is float (mixed use-case)
assert test_config([1, 0], [py_ops.OneHotOp(2), ops.TypeCast(mstype.int32)]) == [[0, 1], [1, 0]]
assert test_config([1, 0], [ops.OneHot(2), ops.TypeCast(mstype.int32)]) == [[0, 1], [1, 0]]
# test exceptions. compose, randomApply randomChoice use the same validator
assert "op_list[0] is neither a c_transform op" in test_config([1, 0], [1, ops.TypeCast(mstype.int32)])
# test empty op list

View File

@ -15,7 +15,7 @@
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as ops
import mindspore.dataset.transforms.transforms as ops
def test_random_apply():

View File

@ -15,7 +15,7 @@
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as ops
import mindspore.dataset.transforms.transforms as ops
def test_random_choice():

View File

@ -19,8 +19,7 @@ import os
import pytest
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.vision.transforms as c_vision
from mindspore import log as logger
from util import save_and_check_md5
@ -441,7 +440,7 @@ def test_cache_map_failure7():
some_cache = ds.DatasetCache(session_id=session_id, size=0)
data = ds.GeneratorDataset(generator_1d, ["data"])
data = data.map(py_vision.not_random(lambda x: x), ["data"], cache=some_cache)
data = data.map(vision.not_random(lambda x: x), ["data"], cache=some_cache)
data = data.repeat(4)
with pytest.raises(RuntimeError) as e:

View File

@ -22,8 +22,7 @@ import pytest
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.text as text
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.vision.transforms as c_vision
from mindspore import log as logger
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
@ -1690,7 +1689,7 @@ def test_cache_nomap_clue2():
some_cache = ds.DatasetCache(session_id=session_id, size=0)
ds1 = ds.CLUEDataset(CLUE_DATA_DIR, task='AFQMC', usage='train', num_samples=2)
ds1 = ds1.map(py_vision.not_random(lambda x: x), ["label"], cache=some_cache)
ds1 = ds1.map(vision.not_random(lambda x: x), ["label"], cache=some_cache)
num_epoch = 4
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch, output_numpy=True)
@ -1767,7 +1766,7 @@ def test_cache_nomap_csv2():
ds1 = ds.CSVDataset(CSV_DATA_DIR, column_defaults=["1", "2", "3", "4"],
column_names=['col1', 'col2', 'col3', 'col4'], num_samples=2)
ds1 = ds1.map(py_vision.not_random(lambda x: x), ["col1"], cache=some_cache)
ds1 = ds1.map(vision.not_random(lambda x: x), ["col1"], cache=some_cache)
num_epoch = 4
iter1 = ds1.create_dict_iterator(num_epochs=num_epoch, output_numpy=True)
@ -2203,7 +2202,7 @@ def test_cache_nomap_pyfunc_lambda():
# This dataset has 12 records in it
data1 = ds.TFRecordDataset(PYFUNC_DATA_DIR, PYFUNC_SCHEMA_DIR, shuffle=False)
transforms = [py_vision.not_random(lambda x: x + x), py_vision.not_random(lambda x: x - 1)]
transforms = [vision.not_random(lambda x: x + x), vision.not_random(lambda x: x - 1)]
data1 = data1.map(operations=transforms, input_columns="col0", cache=some_cache)
num_iter = 0
@ -2244,7 +2243,7 @@ def test_cache_nomap_pyfunc_builtin():
some_cache = ds.DatasetCache(session_id=session_id, size=0)
# This dataset has 3 records in it only
ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"])
ds1 = ds1.map(operations=[py_vision.Decode(), py_vision.ToTensor()], input_columns=["image"], cache=some_cache)
ds1 = ds1.map(operations=[vision.Decode(), vision.ToTensor()], input_columns=["image"], cache=some_cache)
num_iter = 0
for _ in ds1.create_dict_iterator(num_epochs=1):
@ -2254,7 +2253,7 @@ def test_cache_nomap_pyfunc_builtin():
other_cache = ds.DatasetCache(session_id=session_id, size=0)
# This dataset has 3 records in it only
ds2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"])
ds2 = ds2.map(operations=[py_vision.Decode(), py_vision.RandomCrop(224), py_vision.ToTensor()],
ds2 = ds2.map(operations=[vision.Decode(), vision.RandomCrop(224), vision.ToTensor()],
input_columns=["image"], cache=other_cache)
with pytest.raises(RuntimeError) as e:
@ -2269,7 +2268,7 @@ def test_cache_nomap_pyfunc_builtin():
def test_cache_nomap_pyfunc_function():
"""
Test cache after map op with a python customized function.
Only allowed if the function is decorated with 'py_vision.not_random', otherwise an error will be raised.
Only allowed if the function is decorated with 'vision.not_random', otherwise an error will be raised.
Cache
|
@ -2278,7 +2277,7 @@ def test_cache_nomap_pyfunc_function():
TFRecord
"""
@py_vision.not_random
@vision.not_random
def not_random_func(x):
return np.ones(x.shape, dtype=x.dtype)

View File

@ -17,9 +17,8 @@ Testing CenterCrop op in DE
"""
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import diff_mse, visualize_list, save_and_check_md5
@ -91,11 +90,11 @@ def test_center_crop_comp(height=375, width=375, plot=False):
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.CenterCrop([height, width]),
py_vision.ToTensor()
vision.Decode(True),
vision.CenterCrop([height, width]),
vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data2 = data2.map(operations=transform, input_columns=["image"])
image_c_cropped = []
@ -120,13 +119,13 @@ def test_crop_grayscale(height=375, width=375):
# Note: image.transpose performs channel swap to allow py transforms to
# work with c transforms
transforms = [
py_vision.Decode(),
py_vision.Grayscale(1),
py_vision.ToTensor(),
vision.Decode(True),
vision.Grayscale(1),
vision.ToTensor(),
(lambda image: (image.transpose(1, 2, 0) * 255).astype(np.uint8))
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data1 = data1.map(operations=transform, input_columns=["image"])

View File

@ -17,11 +17,8 @@ import numpy as np
import pytest
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as c_transforms
import mindspore.dataset.transforms.py_transforms as py_transforms
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.transforms.transforms as transforms
import mindspore.dataset.vision.transforms as vision
from util import visualize_list, save_and_check_md5, config_get_set_seed, config_get_set_num_parallel_workers
@ -32,7 +29,7 @@ def test_compose():
"""
Test C++ and Python Compose Op
"""
ds.config.set_seed(0)
original_seed = config_get_set_seed(0)
def test_config(arr, op_list):
try:
@ -46,68 +43,71 @@ def test_compose():
return str(e)
# Test simple compose with only 1 op, this would generate a warning
assert test_config([[1, 0], [3, 4]], c_transforms.Compose([c_transforms.Fill(2)])) == [[2, 2], [2, 2]]
assert test_config([[1, 0], [3, 4]], transforms.Compose([transforms.Fill(2)])) == [[2, 2], [2, 2]]
# Test 1 column -> 2 columns -> 1 -> 2 -> 1
assert test_config([[1, 0]],
c_transforms.Compose(
[c_transforms.Duplicate(), c_transforms.Concatenate(), c_transforms.Duplicate(),
c_transforms.Concatenate()])) \
transforms.Compose(
[transforms.Duplicate(), transforms.Concatenate(), transforms.Duplicate(),
transforms.Concatenate()])) \
== [[1, 0] * 4]
# Test one Python transform followed by a C++ transform. Type after OneHot is a float (mixed use-case)
assert test_config([1, 0],
c_transforms.Compose([py_transforms.OneHotOp(2), c_transforms.TypeCast(mstype.int32)])) \
transforms.Compose([transforms.OneHot(2), transforms.TypeCast(mstype.int32)])) \
== [[0, 1], [1, 0]]
# Test exceptions.
with pytest.raises(TypeError) as error_info:
c_transforms.Compose([1, c_transforms.TypeCast(mstype.int32)])
transforms.Compose([1, transforms.TypeCast(mstype.int32)])
assert "op_list[0] is neither a c_transform op (TensorOperation) nor a callable pyfunc." in str(error_info.value)
# Test empty op list
with pytest.raises(ValueError) as error_info:
test_config([1, 0], c_transforms.Compose([]))
test_config([1, 0], transforms.Compose([]))
assert "op_list can not be empty." in str(error_info.value)
# Test Python compose op
assert test_config([1, 0], py_transforms.Compose([py_transforms.OneHotOp(2)])) == [[0, 1], [1, 0]]
assert test_config([1, 0], py_transforms.Compose([py_transforms.OneHotOp(2), (lambda x: x + x)])) == [[0, 2],
[2, 0]]
assert test_config([1, 0], transforms.Compose([transforms.OneHot(2)])) == [[0, 1], [1, 0]]
assert test_config([1, 0], transforms.Compose([transforms.OneHot(2), (lambda x: x + x)])) == [[0, 2],
[2, 0]]
# Test nested Python compose op
assert test_config([1, 0],
py_transforms.Compose([py_transforms.Compose([py_transforms.OneHotOp(2)]), (lambda x: x + x)])) \
transforms.Compose([transforms.Compose([transforms.OneHot(2)]), (lambda x: x + x)])) \
== [[0, 2], [2, 0]]
# Test passing a list of Python ops without Compose wrapper
# Test passing a list of Python implementations without Compose wrapper
assert test_config([1, 0],
[py_transforms.Compose([py_transforms.OneHotOp(2)]), (lambda x: x + x)]) \
[transforms.Compose([transforms.OneHot(2)]), (lambda x: x + x)]) \
== [[0, 2], [2, 0]]
assert test_config([1, 0], [py_transforms.OneHotOp(2), (lambda x: x + x)]) == [[0, 2], [2, 0]]
assert test_config([1, 0], [transforms.OneHot(2), (lambda x: x + x)]) == [[0, 2], [2, 0]]
# Test a non callable function
with pytest.raises(ValueError) as error_info:
py_transforms.Compose([1])
assert "transforms[0] is not callable." in str(error_info.value)
with pytest.raises(TypeError) as error_info:
transforms.Compose([1])
assert "op_list[0] is neither a c_transform op (TensorOperation) nor a callable pyfunc." in str(error_info.value)
# Test empty Python op list
# Test empty Python implementation list
with pytest.raises(ValueError) as error_info:
test_config([1, 0], py_transforms.Compose([]))
assert "transforms list is empty." in str(error_info.value)
test_config([1, 0], transforms.Compose([]))
assert "op_list can not be empty." in str(error_info.value)
# Pass in extra brackets
with pytest.raises(TypeError) as error_info:
py_transforms.Compose([(lambda x: x + x)])()
transforms.Compose([(lambda x: x + x)])()
assert "Compose was called without an image. Fix invocation (avoid it being invoked as Compose([...])())." in str(
error_info.value)
# Restore configuration
ds.config.set_seed(original_seed)
def test_lambdas():
"""
Test Multi Column Python Compose Op
"""
ds.config.set_seed(0)
original_seed = config_get_set_seed(0)
def test_config(arr, input_columns, output_cols, op_list):
data = ds.NumpySlicesDataset(arr, column_names=input_columns, shuffle=False)
@ -121,20 +121,23 @@ def test_lambdas():
arr = ([[1]], [[3]])
assert test_config(arr, ["col0", "col1"], ["a"], py_transforms.Compose([(lambda x, y: x)])) == [[1]]
assert test_config(arr, ["col0", "col1"], ["a"], py_transforms.Compose([lambda x, y: x, lambda x: x])) == [[1]]
assert test_config(arr, ["col0", "col1"], ["a"], transforms.Compose([(lambda x, y: x)])) == [[1]]
assert test_config(arr, ["col0", "col1"], ["a"], transforms.Compose([lambda x, y: x, lambda x: x])) == [[1]]
assert test_config(arr, ["col0", "col1"], ["a", "b"],
py_transforms.Compose([lambda x, y: x, lambda x: (x, x * 2)])) == \
transforms.Compose([lambda x, y: x, lambda x: (x, x * 2)])) == \
[[1], [2]]
assert test_config(arr, ["col0", "col1"], ["a", "b"],
[lambda x, y: (x, x + y), lambda x, y: (x, y * 2)]) == [[1], [8]]
# Restore configuration
ds.config.set_seed(original_seed)
def test_c_py_compose_transforms_module():
"""
Test combining Python and C++ transforms
"""
ds.config.set_seed(0)
original_seed = config_get_set_seed(0)
def test_config(arr, input_columns, output_cols, op_list):
data = ds.NumpySlicesDataset(arr, column_names=input_columns, shuffle=False)
@ -148,21 +151,24 @@ def test_c_py_compose_transforms_module():
arr = [1, 0]
assert test_config(arr, ["cols"], ["cols"],
[py_transforms.OneHotOp(2), c_transforms.Mask(c_transforms.Relational.EQ, 1)]) == \
[transforms.OneHot(2), transforms.Mask(transforms.Relational.EQ, 1)]) == \
[[False, True],
[True, False]]
assert test_config(arr, ["cols"], ["cols"],
[py_transforms.OneHotOp(2), (lambda x: x + x), c_transforms.Fill(1)]) \
[transforms.OneHot(2), (lambda x: x + x), transforms.Fill(1)]) \
== [[1, 1], [1, 1]]
assert test_config(arr, ["cols"], ["cols"],
[py_transforms.OneHotOp(2), (lambda x: x + x), c_transforms.Fill(1), (lambda x: x + x)]) \
[transforms.OneHot(2), (lambda x: x + x), transforms.Fill(1), (lambda x: x + x)]) \
== [[2, 2], [2, 2]]
assert test_config([[1, 3]], ["cols"], ["cols"],
[c_transforms.PadEnd([3], -1), (lambda x: x + x)]) \
[transforms.PadEnd([3], -1), (lambda x: x + x)]) \
== [[2, 6, -2]]
arr = ([[1]], [[3]])
assert test_config(arr, ["col0", "col1"], ["a"], [(lambda x, y: x + y), c_transforms.PadEnd([2], -1)]) == [[4, -1]]
assert test_config(arr, ["col0", "col1"], ["a"], [(lambda x, y: x + y), transforms.PadEnd([2], -1)]) == [[4, -1]]
# Restore configuration
ds.config.set_seed(original_seed)
def test_c_py_compose_vision_module(plot=False, run_golden=True):
@ -177,7 +183,7 @@ def test_c_py_compose_vision_module(plot=False, run_golden=True):
data1 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data1 = data1.map(operations=op_list, input_columns=["image"])
data2 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data2 = data2.map(operations=c_vision.Decode(), input_columns=["image"])
data2 = data2.map(operations=vision.Decode(), input_columns=["image"])
original_images = []
transformed_images = []
@ -193,23 +199,23 @@ def test_c_py_compose_vision_module(plot=False, run_golden=True):
if plot:
visualize_list(original_images, transformed_images)
test_config(op_list=[c_vision.Decode(),
py_vision.ToPIL(),
py_vision.Resize((224, 224)),
test_config(op_list=[vision.Decode(),
vision.ToPIL(),
vision.Resize((224, 224)),
np.array],
plot=plot, file_name="compose_c_py_1.npz")
test_config(op_list=[c_vision.Decode(),
c_vision.Resize((224, 244)),
py_vision.ToPIL(),
test_config(op_list=[vision.Decode(),
vision.Resize((224, 244)),
vision.ToPIL(),
np.array,
c_vision.Resize((24, 24))],
vision.Resize((24, 24))],
plot=plot, file_name="compose_c_py_2.npz")
test_config(op_list=[py_vision.Decode(),
py_vision.Resize((224, 224)),
test_config(op_list=[vision.Decode(True),
vision.Resize((224, 224)),
np.array,
c_vision.RandomColor()],
vision.RandomColor()],
plot=plot, file_name="compose_c_py_3.npz")
# Restore configuration
@ -217,46 +223,14 @@ def test_c_py_compose_vision_module(plot=False, run_golden=True):
ds.config.set_num_parallel_workers((original_num_parallel_workers))
def test_py_transforms_with_c_vision():
def test_vision_with_transforms():
"""
These examples will fail, as c_transform should not be used in py_transforms.Random(Apply/Choice/Order)
Feature: Data transforms and vision ops
Description: Test (Python implementation) vision operations with C++ implementation transforms operations
Expectation: Valid input succeeds. Invalid input fails.
"""
ds.config.set_seed(0)
def test_config(op_list):
data_dir = "../data/dataset/testImageNetData/train/"
data = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)
data = data.map(operations=op_list)
res = []
for i in data.create_dict_iterator(num_epochs=1, output_numpy=True):
for col_name in output_cols:
res.append(i[col_name].tolist())
return res
with pytest.raises(ValueError) as error_info:
test_config(py_transforms.RandomApply([c_vision.RandomResizedCrop(200)]))
assert "transforms[0] is not a py transforms." in str(error_info.value)
with pytest.raises(ValueError) as error_info:
test_config(py_transforms.RandomChoice([c_vision.RandomResizedCrop(200)]))
assert "transforms[0] is not a py transforms." in str(error_info.value)
with pytest.raises(ValueError) as error_info:
test_config(py_transforms.RandomOrder([np.array, c_vision.RandomResizedCrop(200)]))
assert "transforms[1] is not a py transforms." in str(error_info.value)
with pytest.raises(RuntimeError) as error_info:
test_config([py_transforms.OneHotOp(20, 0.1)])
assert "is smaller than the category number" in str(error_info.value)
def test_py_vision_with_c_transforms():
"""
Test combining Python vision operations with C++ transforms operations
"""
ds.config.set_seed(0)
original_seed = config_get_set_seed(0)
def test_config(op_list):
data_dir = "../data/dataset/testImageNetData/train/"
@ -269,9 +243,9 @@ def test_py_vision_with_c_transforms():
return transformed_images
# Test with Mask Op
output_arr = test_config([py_vision.Decode(),
py_vision.CenterCrop((2)), np.array,
c_transforms.Mask(c_transforms.Relational.GE, 100)])
output_arr = test_config([vision.Decode(True),
vision.CenterCrop((2)), np.array,
transforms.Mask(transforms.Relational.GE, 100)])
exp_arr = [np.array([[[True, False, False],
[True, False, False]],
@ -286,9 +260,9 @@ def test_py_vision_with_c_transforms():
np.testing.assert_array_equal(exp_a, output)
# Test with Fill Op
output_arr = test_config([py_vision.Decode(),
py_vision.CenterCrop((4)), np.array,
c_transforms.Fill(10)])
output_arr = test_config([vision.Decode(True),
vision.CenterCrop((4)), np.array,
transforms.Fill(10)])
exp_arr = [np.ones((4, 4, 3)) * 10] * 2
for exp_a, output in zip(exp_arr, output_arr):
@ -296,11 +270,14 @@ def test_py_vision_with_c_transforms():
# Test with Concatenate Op, which will raise an error since ConcatenateOp only supports rank 1 tensors.
with pytest.raises(RuntimeError) as error_info:
test_config([py_vision.Decode(),
py_vision.CenterCrop((2)), np.array,
c_transforms.Concatenate(0)])
test_config([vision.Decode(True),
vision.CenterCrop((2)), np.array,
transforms.Concatenate(0)])
assert "only 1D input supported" in str(error_info.value)
# Restore configuration
ds.config.set_seed(original_seed)
def test_compose_with_custom_function():
"""
@ -333,6 +310,5 @@ if __name__ == "__main__":
test_lambdas()
test_c_py_compose_transforms_module()
test_c_py_compose_vision_module(plot=True)
test_py_transforms_with_c_vision()
test_py_vision_with_c_transforms()
test_vision_with_transforms()
test_compose_with_custom_function()

View File

@ -16,9 +16,8 @@ import numpy as np
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as C
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.py_transforms as F
import mindspore.dataset.transforms.transforms as C
import mindspore.dataset.vision.transforms as F
from mindspore import log as logger
@ -326,15 +325,15 @@ def test_concat_14():
Test concat: Testing concat on two different source datasets with different dataset operations.
"""
logger.info("test_concat_14")
DATA_DIR = "../data/dataset/testPK/data"
DATA_DIR2 = "../data/dataset/testImageNetData/train/"
data_dir = "../data/dataset/testPK/data"
data_dir2 = "../data/dataset/testImageNetData/train/"
data1 = ds.ImageFolderDataset(DATA_DIR, num_samples=3)
data2 = ds.ImageFolderDataset(DATA_DIR2, num_samples=2)
data1 = ds.ImageFolderDataset(data_dir, num_samples=3)
data2 = ds.ImageFolderDataset(data_dir2, num_samples=2)
transforms1 = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
F.ToTensor()])
transforms1 = C.Compose([F.Decode(True),
F.Resize((224, 224)),
F.ToTensor()])
data1 = data1.map(operations=transforms1, input_columns=["image"])
data2 = data2.map(operations=transforms1, input_columns=["image"])
@ -360,11 +359,11 @@ def test_concat_15():
Test concat: create dataset with different format of dataset file, and then concat
"""
logger.info("test_concat_15")
DATA_DIR = "../data/dataset/testPK/data"
DATA_DIR2 = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
data_dir = "../data/dataset/testPK/data"
data_dir2 = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
data1 = ds.ImageFolderDataset(DATA_DIR)
data2 = ds.TFRecordDataset(DATA_DIR2, columns_list=["image"])
data1 = ds.ImageFolderDataset(data_dir)
data2 = ds.TFRecordDataset(data_dir2, columns_list=["image"])
data1 = data1.project(["image"])
data3 = data1 + data2
@ -377,11 +376,11 @@ def test_concat_16():
Test concat: test get_dataset_size on nested concats
"""
logger.info("test_concat_16")
DATA_DIR = "../data/dataset/testPK/data"
DATA_DIR2 = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
data_dir = "../data/dataset/testPK/data"
data_dir2 = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
data1 = ds.ImageFolderDataset(DATA_DIR)
data2 = ds.TFRecordDataset(DATA_DIR2, columns_list=["image"])
data1 = ds.ImageFolderDataset(data_dir)
data2 = ds.TFRecordDataset(data_dir2, columns_list=["image"])
data3 = ds.GeneratorDataset(generator, ["col1"])
data4 = ds.GeneratorDataset(generator_10, ["col1"])

View File

@ -20,7 +20,7 @@ import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as data_trans
import mindspore.dataset.transforms.transforms as data_trans
def test_concatenate_op_all():

View File

@ -22,9 +22,8 @@ import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.engine.iterators as it
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import dataset_equal
@ -94,12 +93,12 @@ def test_pipeline():
num_parallel_workers_original = ds.config.get_num_parallel_workers()
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
data1 = data1.map(operations=[c_vision.Decode(True)], input_columns=["image"])
data1 = data1.map(operations=[vision.Decode()], input_columns=["image"])
ds.serialize(data1, "testpipeline.json")
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, num_parallel_workers=num_parallel_workers_original,
shuffle=False)
data2 = data2.map(operations=[c_vision.Decode(True)], input_columns=["image"])
data2 = data2.map(operations=[vision.Decode()], input_columns=["image"])
ds.serialize(data2, "testpipeline2.json")
# check that the generated output is different
@ -137,8 +136,8 @@ def test_deterministic_run_fail():
# Assuming we get the same seed on calling constructor, if this op is re-used then result won't be
# the same in between the two datasets. For example, RandomCrop constructor takes seed (0)
# outputs a deterministic series of numbers, e,g "a" = [1, 2, 3, 4, 5, 6] <- pretend these are random
random_crop_op = c_vision.RandomCrop([512, 512], [200, 200, 200, 200])
decode_op = c_vision.Decode()
random_crop_op = vision.RandomCrop([512, 512], [200, 200, 200, 200])
decode_op = vision.Decode()
data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_crop_op, input_columns=["image"])
@ -177,8 +176,8 @@ def test_seed_undeterministic():
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# We get the seed when constructor is called
random_crop_op = c_vision.RandomCrop([512, 512], [200, 200, 200, 200])
decode_op = c_vision.Decode()
random_crop_op = vision.RandomCrop([512, 512], [200, 200, 200, 200])
decode_op = vision.Decode()
data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_crop_op, input_columns=["image"])
@ -187,7 +186,7 @@ def test_seed_undeterministic():
data2 = data2.map(operations=decode_op, input_columns=["image"])
# Since seed is set up on constructor, so the two ops output deterministic sequence.
# Assume the generated random sequence "a" = [1, 2, 3, 4, 5, 6] <- pretend these are random
random_crop_op2 = c_vision.RandomCrop([512, 512], [200, 200, 200, 200])
random_crop_op2 = vision.RandomCrop([512, 512], [200, 200, 200, 200])
data2 = data2.map(operations=random_crop_op2, input_columns=["image"])
try:
dataset_equal(data1, data2, 0)
@ -217,8 +216,8 @@ def test_seed_deterministic():
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# seed will be read in during constructor call
random_crop_op = c_vision.RandomCrop([512, 512], [200, 200, 200, 200])
decode_op = c_vision.Decode()
random_crop_op = vision.RandomCrop([512, 512], [200, 200, 200, 200])
decode_op = vision.Decode()
data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_crop_op, input_columns=["image"])
@ -226,7 +225,7 @@ def test_seed_deterministic():
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=decode_op, input_columns=["image"])
# If seed is set up on constructor, so the two ops output deterministic sequence
random_crop_op2 = c_vision.RandomCrop([512, 512], [200, 200, 200, 200])
random_crop_op2 = vision.RandomCrop([512, 512], [200, 200, 200, 200])
data2 = data2.map(operations=random_crop_op2, input_columns=["image"])
dataset_equal(data1, data2, 0)
@ -252,8 +251,8 @@ def test_deterministic_run_distribution():
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
random_horizontal_flip_op = c_vision.RandomHorizontalFlip(0.1)
decode_op = c_vision.Decode()
random_horizontal_flip_op = vision.RandomHorizontalFlip(0.1)
decode_op = vision.Decode()
data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_horizontal_flip_op, input_columns=["image"])
@ -261,7 +260,7 @@ def test_deterministic_run_distribution():
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=decode_op, input_columns=["image"])
# If seed is set up on constructor, so the two ops output deterministic sequence
random_horizontal_flip_op2 = c_vision.RandomHorizontalFlip(0.1)
random_horizontal_flip_op2 = vision.RandomHorizontalFlip(0.1)
data2 = data2.map(operations=random_horizontal_flip_op2, input_columns=["image"])
dataset_equal(data1, data2, 0)
@ -288,11 +287,11 @@ def test_deterministic_python_seed():
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomCrop([512, 512], [200, 200, 200, 200]),
py_vision.ToTensor(),
vision.Decode(True),
vision.RandomCrop([512, 512], [200, 200, 200, 200]),
vision.ToTensor(),
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data1 = data1.map(operations=transform, input_columns=["image"])
data1_output = []
# config.set_seed() calls random.seed()
@ -340,11 +339,11 @@ def test_deterministic_python_seed_multi_thread():
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.RandomCrop([512, 512], [200, 200, 200, 200]),
py_vision.ToTensor(),
vision.Decode(True),
vision.RandomCrop([512, 512], [200, 200, 200, 200]),
vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data1 = data1.map(operations=transform, input_columns=["image"], python_multiprocessing=True)
data1_output = []
# config.set_seed() calls random.seed()

View File

@ -18,7 +18,7 @@ Testing ConvertColor op in DE
import cv2
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.transforms as c_vision
import mindspore.dataset.vision.utils as mode
from mindspore import log as logger
from util import visualize_image, diff_mse
@ -61,7 +61,7 @@ def convert_color(ms_convert, cv_convert, plot=False):
def test_convertcolor_pipeline(plot=False):
"""
Test ConvertColor of c_transforms
Test ConvertColor of transforms
"""
logger.info("test_convertcolor_pipeline")
convert_color(mode.ConvertMode.COLOR_BGR2GRAY, cv2.COLOR_BGR2GRAY, plot)

View File

@ -18,7 +18,7 @@ Testing Crop op in DE
import cv2
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.transforms as c_vision
from mindspore import log as logger
from util import visualize_image, diff_mse
@ -30,7 +30,7 @@ IMAGE_FILE = "../data/dataset/apple.jpg"
def test_crop_pipeline(plot=False):
"""
Test Crop of c_transforms
Test Crop of transforms
"""
logger.info("test_crop_pipeline")

View File

@ -18,9 +18,8 @@ Testing CutOut op in DE
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.c_transforms as c
import mindspore.dataset.vision.py_transforms as f
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import visualize_image, visualize_list, diff_mse, save_and_check_md5, \
config_get_set_seed, config_get_set_num_parallel_workers
@ -33,7 +32,7 @@ GENERATE_GOLDEN = False
def test_cut_out_op(plot=False):
"""
Test Cutout
Test CutOut
"""
logger.info("test_cut_out")
@ -41,17 +40,17 @@ def test_cut_out_op(plot=False):
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_1 = [
f.Decode(),
f.ToTensor(),
f.RandomErasing(value='random')
vision.Decode(True),
vision.ToTensor(),
vision.RandomErasing(value='random')
]
transform_1 = mindspore.dataset.transforms.py_transforms.Compose(transforms_1)
transform_1 = mindspore.dataset.transforms.transforms.Compose(transforms_1)
data1 = data1.map(operations=transform_1, input_columns=["image"])
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c.Decode()
cut_out_op = c.CutOut(80)
decode_op = vision.Decode()
cut_out_op = vision.CutOut(80, is_hwc=True)
transforms_2 = [
decode_op,
@ -81,7 +80,7 @@ def test_cut_out_op(plot=False):
def test_cut_out_op_multicut(plot=False):
"""
Test Cutout
Test CutOut
"""
logger.info("test_cut_out")
@ -89,16 +88,16 @@ def test_cut_out_op_multicut(plot=False):
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_1 = [
f.Decode(),
f.ToTensor(),
vision.Decode(True),
vision.ToTensor(),
]
transform_1 = mindspore.dataset.transforms.py_transforms.Compose(transforms_1)
transform_1 = mindspore.dataset.transforms.transforms.Compose(transforms_1)
data1 = data1.map(operations=transform_1, input_columns=["image"])
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c.Decode()
cut_out_op = c.CutOut(80, num_patches=10)
decode_op = vision.Decode()
cut_out_op = vision.CutOut(80, num_patches=10, is_hwc=True)
transforms_2 = [
decode_op,
@ -129,7 +128,7 @@ def test_cut_out_op_multicut(plot=False):
def test_cut_out_md5():
"""
Test Cutout with md5 check
Test CutOut with md5 check
"""
logger.info("test_cut_out_md5")
original_seed = config_get_set_seed(2)
@ -137,24 +136,24 @@ def test_cut_out_md5():
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c.Decode()
cut_out_op = c.CutOut(100)
decode_op = vision.Decode()
cut_out_op = vision.CutOut(100, is_hwc=True)
data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=cut_out_op, input_columns=["image"])
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
f.Decode(),
f.ToTensor(),
f.Cutout(100)
vision.Decode(True),
vision.ToTensor(),
vision.CutOut(100, is_hwc=False)
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data2 = data2.map(operations=transform, input_columns=["image"])
# Compare with expected md5 from images
filename1 = "cut_out_01_c_result.npz"
save_and_check_md5(data1, filename1, generate_golden=GENERATE_GOLDEN)
filename2 = "cut_out_01_py_result.npz"
filename2 = "cut_out_02_c_result.npz"
save_and_check_md5(data2, filename2, generate_golden=GENERATE_GOLDEN)
# Restore config
@ -162,9 +161,11 @@ def test_cut_out_md5():
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_cut_out_comp(plot=False):
def test_cut_out_comp_hwc(plot=False):
"""
Test Cutout with c++ and python op comparison
Feature: CutOut op
Description: Test CutOut with HWC input, Decode(to_pil=True) & ToTensor versus Decode(to_pil=False) comparison
Expectation: Test succeeds. Manual confirmation of logged info. Manual visualization confirmation.
"""
logger.info("test_cut_out_comp")
@ -172,19 +173,19 @@ def test_cut_out_comp(plot=False):
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_1 = [
f.Decode(),
f.ToTensor(),
f.Cutout(200)
vision.Decode(True),
vision.ToTensor(),
vision.CutOut(250, is_hwc=False)
]
transform_1 = mindspore.dataset.transforms.py_transforms.Compose(transforms_1)
transform_1 = mindspore.dataset.transforms.transforms.Compose(transforms_1)
data1 = data1.map(operations=transform_1, input_columns=["image"])
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_2 = [
c.Decode(),
c.CutOut(200)
vision.Decode(),
vision.CutOut(250, is_hwc=True)
]
data2 = data2.map(operations=transforms_2, input_columns=["image"])
@ -209,8 +210,59 @@ def test_cut_out_comp(plot=False):
visualize_list(image_list_1, image_list_2, visualize_mode=2)
def skip_test_cut_out_comp_chw():
"""
Feature: CutOut op
Description: Test CutOut with CHW input, Decode(to_pil=True) & ToTensor versus Decode(to_pil=False) & HWC2CHW
comparison.
Expectation: Test succeeds. Manual confirmation of logged info.
"""
logger.info("test_cut_out_comp_chw")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_1 = [
vision.Decode(),
vision.HWC2CHW(),
vision.CutOut(200, is_hwc=False)
]
transform_1 = mindspore.dataset.transforms.transforms.Compose(transforms_1)
data1 = data1.map(operations=transform_1, input_columns=["image"])
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_2 = [
vision.Decode(True),
vision.ToTensor(),
vision.CutOut(200, is_hwc=False)
]
data2 = data2.map(operations=transforms_2, input_columns=["image"])
num_iter = 0
image_list_1, image_list_2 = [], []
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
num_iter += 1
image_1 = item1["image"]
image_2 = item2["image"]
if image_1.shape != image_2.shape:
raise RuntimeError("image_1.shape != image_2.shape: " + str(image_1.shape) + " " + str(image_2.shape))
image_list_1.append(image_1)
image_list_2.append(image_2)
logger.info("shape of image_1: {}".format(image_1.shape))
logger.info("shape of image_2: {}".format(image_2.shape))
logger.info("dtype of image_1: {}".format(image_1.dtype))
logger.info("dtype of image_2: {}".format(image_2.dtype))
if __name__ == "__main__":
test_cut_out_op(plot=True)
test_cut_out_op_multicut(plot=True)
test_cut_out_md5()
test_cut_out_comp(plot=True)
test_cut_out_comp_hwc(plot=True)
skip_test_cut_out_comp_chw()

View File

@ -18,8 +18,8 @@ Testing the CutMixBatch op in DE
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.transforms.c_transforms as data_trans
import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.transforms.transforms as data_trans
import mindspore.dataset.vision.utils as mode
from mindspore import log as logger
from util import save_and_check_md5, diff_mse, visualize_list, config_get_set_seed, \

View File

@ -18,7 +18,7 @@ import numpy as np
import pandas as pd
import mindspore.dataset as de
from mindspore import log as logger
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
def test_numpy_slices_list_1():
@ -69,7 +69,7 @@ def test_numpy_slices_list_append():
data1 = de.TFRecordDataset(DATA_DIR)
resize_op = vision.Resize((resize_height, resize_width))
data1 = data1.map(operations=[vision.Decode(True), resize_op], input_columns=["image"])
data1 = data1.map(operations=[vision.Decode(), resize_op], input_columns=["image"])
res = []
for data in data1.create_dict_iterator(num_epochs=1, output_numpy=True):

0
tests/ut/python/dataset/test_datasets_amazon_review.py Executable file → Normal file
View File

14
tests/ut/python/dataset/test_datasets_caltech101.py Executable file → Normal file
View File

@ -24,7 +24,7 @@ from PIL import Image
from scipy.io import loadmat
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATASET_DIR = "../data/dataset/testCaltech101Data"
@ -176,21 +176,21 @@ def test_caltech101_basic():
assert num_iter == 8
# case 5: test get_dataset_size, resize and batch
all_data = ds.Caltech101Dataset(DATASET_DIR, num_samples=4)
all_data = all_data.map(operations=[c_vision.Decode(), c_vision.Resize((224, 224))], input_columns=["image"],
all_data = ds.Caltech101Dataset(DATASET_DIR, num_samples=12)
all_data = all_data.map(operations=[vision.Decode(), vision.Resize((120, 120))], input_columns=["image"],
num_parallel_workers=1)
assert all_data.get_dataset_size() == 4
assert all_data.get_batch_size() == 1
# drop_remainder is default to be False
all_data = all_data.batch(batch_size=3)
assert all_data.get_batch_size() == 3
assert all_data.get_dataset_size() == 2
all_data = all_data.batch(batch_size=4)
assert all_data.get_batch_size() == 4
assert all_data.get_dataset_size() == 1
num_iter = 0
for _ in all_data.create_dict_iterator(num_epochs=1):
num_iter += 1
assert num_iter == 2
assert num_iter == 1
# case 6: test get_class_indexing
all_data = ds.Caltech101Dataset(DATASET_DIR, num_samples=4)

12
tests/ut/python/dataset/test_datasets_caltech256.py Executable file → Normal file
View File

@ -19,7 +19,7 @@ import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
IMAGE_DATA_DIR = "../data/dataset/testPK/data"
@ -72,21 +72,21 @@ def test_caltech256_basic():
assert num_iter == 8
# case 5: test get_dataset_size, resize and batch
all_data = ds.Caltech256Dataset(IMAGE_DATA_DIR, num_samples=4)
all_data = all_data.map(operations=[c_vision.Decode(), c_vision.Resize((224, 224))], input_columns=["image"],
all_data = ds.Caltech256Dataset(IMAGE_DATA_DIR, num_samples=8)
all_data = all_data.map(operations=[vision.Decode(), vision.Resize((256, 256))], input_columns=["image"],
num_parallel_workers=1)
assert all_data.get_dataset_size() == 4
assert all_data.get_dataset_size() == 8
assert all_data.get_batch_size() == 1
# drop_remainder is default to be False
all_data = all_data.batch(batch_size=3)
assert all_data.get_batch_size() == 3
assert all_data.get_dataset_size() == 2
assert all_data.get_dataset_size() == 3
num_iter = 0
for _ in all_data.create_dict_iterator(num_epochs=1):
num_iter += 1
assert num_iter == 2
assert num_iter == 3
def test_caltech256_decode():

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from mindspore.dataset.vision import Inter

View File

@ -20,7 +20,7 @@ import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.transforms as vision
DATASET_DIR = "../data/dataset/testCityscapesData/cityscapes"
@ -108,7 +108,7 @@ def test_cityscapes_basic_func():
# case 3: test batch with drop_remainder=False
data3 = ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, decode=True)
resize_op = c_vision.Resize((100, 100))
resize_op = vision.Resize((100, 100))
data3 = data3.map(operations=resize_op, input_columns=["image"], num_parallel_workers=1)
data3 = data3.map(operations=resize_op, input_columns=["task"], num_parallel_workers=1)
assert data3.get_dataset_size() == 5
@ -123,7 +123,7 @@ def test_cityscapes_basic_func():
# case 4: test batch with drop_remainder=True
data4 = ds.CityscapesDataset(DATASET_DIR, usage=usage, quality_mode=quality_mode, task=task, decode=True)
resize_op = c_vision.Resize((100, 100))
resize_op = vision.Resize((100, 100))
data4 = data4.map(operations=resize_op, input_columns=["image"], num_parallel_workers=1)
data4 = data4.map(operations=resize_op, input_columns=["task"], num_parallel_workers=1)
assert data4.get_dataset_size() == 5

View File

@ -15,7 +15,7 @@
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.text as text
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
DATA_DIR = "../data/dataset/testCOCO/train/"
DATA_DIR_2 = "../data/dataset/testCOCO/train"

View File

@ -17,7 +17,7 @@ import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.transforms as vision
DATASET_DIR = "../data/dataset/testDIV2KData/div2k"
@ -90,7 +90,7 @@ def test_div2k_basic_func():
data3 = ds.DIV2KDataset(DATASET_DIR, usage=usage, downgrade=downgrade, scale=scale, decode=True)
assert data3.get_dataset_size() == 5
assert data3.get_batch_size() == 1
resize_op = c_vision.Resize([100, 100])
resize_op = vision.Resize([100, 100])
data3 = data3.map(operations=resize_op, input_columns=["hr_image"], num_parallel_workers=1)
data3 = data3.map(operations=resize_op, input_columns=["lr_image"], num_parallel_workers=1)
data3 = data3.batch(batch_size=3) # drop_remainder is default to be False

View File

@ -23,7 +23,7 @@ import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testEMnistDataset"

View File

@ -23,7 +23,7 @@ import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testMnistData"

View File

@ -16,7 +16,7 @@ import numpy as np
import matplotlib.pyplot as plt
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
FLICKR30K_DATASET_DIR = "../data/dataset/testFlickrData/flickr30k/flickr30k-images"
@ -95,7 +95,7 @@ def test_flickr30k_dataset_basic():
# case 3: test batch with drop_remainder=False
data3 = ds.FlickrDataset(FLICKR30K_DATASET_DIR, FLICKR30K_ANNOTATION_FILE_2, decode=True, shuffle=False)
resize_op = c_vision.Resize((100, 100))
resize_op = vision.Resize((100, 100))
data3 = data3.map(operations=resize_op, input_columns=["image"], num_parallel_workers=1)
assert data3.get_dataset_size() == 3
assert data3.get_batch_size() == 1
@ -109,7 +109,7 @@ def test_flickr30k_dataset_basic():
# case 4: test batch with drop_remainder=True
data4 = ds.FlickrDataset(FLICKR30K_DATASET_DIR, FLICKR30K_ANNOTATION_FILE_2, decode=True, shuffle=False)
resize_op = c_vision.Resize((100, 100))
resize_op = vision.Resize((100, 100))
data4 = data4.map(operations=resize_op, input_columns=["image"], num_parallel_workers=1)
assert data4.get_dataset_size() == 3
assert data4.get_batch_size() == 1

View File

@ -1,4 +1,4 @@
# Copyright 2021 Huawei Technologies Co., Ltd
# Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -24,7 +24,7 @@ from PIL import Image
from scipy.io import loadmat
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.transforms as c_vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testFlowers102Dataset"

View File

@ -14,7 +14,7 @@
# ==============================================================================
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
IMAGENET_RAWDATA_DIR = "../data/dataset/testImageNetData2/train"
IMAGENET_TFFILE_DIR = ["../data/dataset/test_tf_file_3_images2/train-0000-of-0001.data",

View File

@ -14,7 +14,7 @@
# ==============================================================================
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testPK/data"

View File

@ -17,7 +17,7 @@ import re
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testKITTI"

View File

@ -23,7 +23,7 @@ import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testMnistData"

View File

@ -18,7 +18,7 @@ Test LSUN dataset operators
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testLSUN"

View File

@ -15,8 +15,8 @@
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.transforms.c_transforms as data_trans
import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.transforms.transforms as data_trans
from mindspore import log as logger
DATA_FILE = "../data/dataset/testManifestData/test.manifest"

View File

@ -20,7 +20,7 @@ import pytest
import numpy as np
import matplotlib.pyplot as plt
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testMnistData"

View File

@ -16,7 +16,7 @@
Test Omniglot dataset operators
"""
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testOmniglot"

View File

@ -1,4 +1,4 @@
# Copyright 2021 Huawei Technologies Co., Ltd
# Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -22,7 +22,7 @@ import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testQMnistData"

View File

@ -20,7 +20,7 @@ import pytest
import mindspore.dataset as ds
from mindspore import log as logger
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.transforms as vision
DATASET_DIR = "../data/dataset/testSBData/sbd"
@ -100,7 +100,7 @@ def test_sbd_basic02():
# case 3: test batch with drop_remainder=False
data3 = ds.SBDataset(DATASET_DIR, task='Segmentation', usage='train', shuffle=False, decode=True)
resize_op = c_vision.Resize((100, 100))
resize_op = vision.Resize((100, 100))
data3 = data3.map(operations=resize_op, input_columns=["image"], num_parallel_workers=1)
data3 = data3.map(operations=resize_op, input_columns=["task"], num_parallel_workers=1)
assert data3.get_dataset_size() == 4
@ -115,7 +115,7 @@ def test_sbd_basic02():
# case 4: test batch with drop_remainder=True
data4 = ds.SBDataset(DATASET_DIR, task='Segmentation', usage='train', shuffle=False, decode=True)
resize_op = c_vision.Resize((100, 100))
resize_op = vision.Resize((100, 100))
data4 = data4.map(operations=resize_op, input_columns=["image"], num_parallel_workers=1)
data4 = data4.map(operations=resize_op, input_columns=["task"], num_parallel_workers=1)
assert data4.get_dataset_size() == 4

View File

@ -1,4 +1,4 @@
# Copyright 2021 Huawei Technologies Co., Ltd
# Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -23,7 +23,7 @@ import pytest
from PIL import Image
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testSBUDataset"
@ -99,7 +99,7 @@ def test_sbu_case():
dataset = ds.SBUDataset(DATA_DIR, decode=False)
dataset = dataset.map(operations=[vision.Decode(rgb=True), vision.Resize((224, 224))], input_columns=["image"])
dataset = dataset.map(operations=[vision.Decode(), vision.Resize((224, 224))], input_columns=["image"])
repeat_num = 4
dataset = dataset.repeat(repeat_num)
batch_size = 2
@ -280,7 +280,7 @@ def test_sbu_decode():
sampler = ds.SequentialSampler(num_samples=50)
dataset = ds.SBUDataset(dataset_dir=DATA_DIR, decode=False, sampler=sampler)
dataset_1 = dataset.map(operations=[vision.Decode(rgb=True)], input_columns=["image"])
dataset_1 = dataset.map(operations=[vision.Decode()], input_columns=["image"])
dataset_2 = ds.SBUDataset(dataset_dir=DATA_DIR, decode=True, sampler=sampler)

View File

@ -19,7 +19,7 @@ import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.transforms as vision
DATA_DIR_SEMEION = "../data/dataset/testSemeionData"
@ -228,7 +228,7 @@ def test_semeion_pipeline():
"""
# Original image
dataset = ds.SemeionDataset(DATA_DIR_SEMEION, num_samples=1)
resize_op = c_vision.Resize((100, 100))
resize_op = vision.Resize((100, 100))
# Filtered image by Resize
dataset = dataset.map(operations=resize_op, input_columns=["image"], num_parallel_workers=1)
i = 0

View File

@ -22,7 +22,7 @@ import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testSTL10Data"

View File

@ -1,4 +1,4 @@
# Copyright 2021 Huawei Technologies Co., Ltd
# Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -23,7 +23,7 @@ import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testUSPSDataset"

View File

@ -14,7 +14,7 @@
# ==============================================================================
import mindspore.dataset as ds
import mindspore.dataset.text as text
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
DATA_DIR = "../data/dataset/testVOC2012"
IMAGE_ID = ["32", "33", "39", "42", "61", "63", "68", "121", "123", "129"]

View File

@ -16,7 +16,7 @@ import pytest
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
import mindspore.log as logger
DATA_DIR = "../data/dataset/testWIDERFace/"

View File

@ -19,8 +19,7 @@ import cv2
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import diff_mse
@ -38,7 +37,7 @@ def test_decode_op():
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Serialize and Load dataset requires using vision.Decode instead of vision.Decode().
data1 = data1.map(operations=[vision.Decode(True)], input_columns=["image"])
data1 = data1.map(operations=[vision.Decode()], input_columns=["image"])
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -60,7 +59,7 @@ def test_decode_op_tf_file_dataset():
# Decode with rgb format set to True
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=ds.Shuffle.FILES)
data1 = data1.map(operations=vision.Decode(True), input_columns=["image"])
data1 = data1.map(operations=vision.Decode(), input_columns=["image"])
for item in data1.create_dict_iterator(num_epochs=1):
logger.info('decode == {}'.format(item['image']))
@ -103,8 +102,8 @@ def test_read_image_decode_op():
data_path = "../data/dataset/testPK/data/class1/0.jpg"
dataset1 = ds.GeneratorDataset(ImageDataset(data_path, data_type="numpy"), ["data", "label"])
dataset2 = ds.GeneratorDataset(ImageDataset(data_path, data_type="bytes"), ["data", "label"])
decode_op = py_vision.Decode()
to_tensor = py_vision.ToTensor(output_type=np.int32)
decode_op = vision.Decode(True)
to_tensor = vision.ToTensor(output_type=np.int32)
dataset1 = dataset1.map(operations=[decode_op, to_tensor], input_columns=["data"])
dataset2 = dataset2.map(operations=[decode_op, to_tensor], input_columns=["data"])

View File

@ -15,7 +15,7 @@
import time
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]

View File

@ -18,7 +18,7 @@ Testing Duplicate op in DE
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as ops
import mindspore.dataset.transforms.transforms as ops
def compare(array):

View File

@ -18,7 +18,7 @@ Eager Tests for Transform Tensor ops
import numpy as np
import mindspore.common.dtype as mstype
import mindspore.dataset.transforms.c_transforms as data_trans
import mindspore.dataset.transforms.transforms as data_trans
def test_eager_concatenate():

View File

@ -16,28 +16,27 @@ import cv2
import numpy as np
import pytest
from PIL import Image
import mindspore.dataset.vision.c_transforms as C
import mindspore.dataset.vision.py_transforms as PY
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
def test_eager_decode_c():
"""
Feature: Decode op
Description: Test eager support for Decode C++ op
Description: Test eager support for Decode C implementation
Expectation: Output image size from op is correct
"""
img = np.fromfile("../data/dataset/apple.jpg", dtype=np.uint8)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
img = C.Decode()(img)
img = vision.Decode()(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
assert img.shape == (2268, 4032, 3)
fp = open("../data/dataset/apple.jpg", "rb")
img2 = fp.read()
img2 = C.Decode()(img2)
img2 = vision.Decode()(img2)
logger.info("Image.type: {}, Image.shape: {}".format(type(img2), img2.shape))
assert img2.shape == (2268, 4032, 3)
@ -45,20 +44,20 @@ def test_eager_decode_c():
def test_eager_decode_py():
"""
Feature: Decode op
Description: Test eager support for Decode Python op
Description: Test eager support for Decode Python implementation
Expectation: Output image size from op is correct
"""
img = np.fromfile("../data/dataset/apple.jpg", dtype=np.uint8)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
img = PY.Decode()(img)
img = vision.Decode(to_pil=True)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
assert img.size == (4032, 2268)
fp = open("../data/dataset/apple.jpg", "rb")
img2 = fp.read()
img2 = PY.Decode()(img2)
img2 = vision.Decode(to_pil=True)(img2)
logger.info("Image.type: {}, Image.shape: {}".format(type(img2), img2.size))
assert img2.size == (4032, 2268)
@ -67,7 +66,7 @@ def test_eager_resize():
img = cv2.imread("../data/dataset/apple.jpg")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
img = C.Resize(size=(32, 32))(img)
img = vision.Resize(size=(32, 32))(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
assert img.shape == (32, 32, 3)
@ -79,17 +78,17 @@ def test_eager_rescale():
pixel = img[0][0][0]
rescale_factor = 0.5
img = C.Rescale(rescale=rescale_factor, shift=0)(img)
img = vision.Rescale(rescale=rescale_factor, shift=0)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
pixel_rescaled = img[0][0][0]
assert pixel * rescale_factor == pixel_rescaled
def test_eager_normalize_c():
def test_eager_normalize_hwc():
"""
Feature: Normalize op
Description: Test eager support for Normalize C++ op
Description: Test eager support for Normalize with HWC shape
Expectation: Output image info from op is correct
"""
img = Image.open("../data/dataset/apple.jpg").convert("RGB")
@ -98,144 +97,149 @@ def test_eager_normalize_c():
mean_vec = [100, 100, 100]
std_vec = [2, 2, 2]
img = C.Normalize(mean=mean_vec, std=std_vec)(img)
img = vision.Normalize(mean=mean_vec, std=std_vec, is_hwc=True)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
pixel_normalized = img[0][0][0]
assert (pixel - mean_vec[0]) / std_vec[0] == pixel_normalized
def test_eager_normalize_py():
def test_eager_normalize_chw():
"""
Feature: Normalize op
Description: Test eager support for Normalize Python op
Description: Test eager support for Normalize with CHW shape
Expectation: Output image info from op is correct
"""
img = Image.open("../data/dataset/apple.jpg").convert("RGB")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
pixel = img.getpixel((0, 0))[0]
img = PY.ToTensor()(img)
img = vision.ToTensor()(img)
mean_vec = [.100, .100, .100]
std_vec = [.2, .2, .2]
img = PY.Normalize(mean=mean_vec, std=std_vec)(img)
img = vision.Normalize(mean=mean_vec, std=std_vec, is_hwc=False)(img)
pixel_normalized = img[0][0][0]
assert (pixel / 255 - mean_vec[0]) / std_vec[0] == pytest.approx(pixel_normalized, 0.0001)
def test_eager_HWC2CHW():
def test_eager_hwc2chw():
"""
Feature: HWC2CHW op
Description: Test eager support for HWC2CHW op
Expectation: Output image size from op is correct
"""
img = cv2.imread("../data/dataset/apple.jpg")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
channel = img.shape
img = C.HWC2CHW()(img)
img = vision.HWC2CHW()(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
channel_swaped = img.shape
channel_swapped = img.shape
assert channel == (channel_swaped[1], channel_swaped[2], channel_swaped[0])
assert channel == (channel_swapped[1], channel_swapped[2], channel_swapped[0])
def test_eager_pad_c():
"""
Feature: Pad op
Description: Test eager support for Pad C++ op
Description: Test eager support for Pad C implementation
Expectation: Output image size info from op is correct
"""
img = Image.open("../data/dataset/apple.jpg").convert("RGB")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
img = cv2.imread("../data/dataset/apple.jpg")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
img = C.Resize(size=(32, 32))(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
size = img.shape
img = vision.Resize(size=(32, 32))(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
shape_org = img.shape
pad = 4
img = C.Pad(padding=pad)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
size_padded = img.shape
img = vision.Pad(padding=pad)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
shape_padded = img.shape
assert size_padded == (size[0] + 2 * pad, size[1] + 2 * pad, size[2])
assert shape_padded == (shape_org[0] + 2 * pad, shape_org[1] + 2 * pad, shape_org[2])
def test_eager_pad_py():
"""
Feature: Pad op
Description: Test eager support for Pad Python op
Description: Test eager support for Pad Python implementation
Expectation: Output image size info from op is correct
"""
img = Image.open("../data/dataset/apple.jpg").convert("RGB")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
img = PY.Resize(size=(32, 32))(img)
img = vision.Resize(size=(32, 32))(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
size = img.size
pad = 4
img = PY.Pad(padding=pad)(img)
img = vision.Pad(padding=pad)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
size_padded = img.size
assert size_padded == (size[0] + 2 * pad, size[1] + 2 * pad)
def test_eager_cutout_pil_c():
def test_eager_cutout_hwc_pil():
"""
Feature: CutOut op
Description: Test eager support for CutOut C++ op with PIL input
Description: Test eager support for CutOut with HWC shape and PIL input
Expectation: Output image size info from op is correct
"""
img = Image.open("../data/dataset/apple.jpg").convert("RGB")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
img = C.Resize(size=(32, 32))(img)
img = vision.Resize(size=(32, 32))(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
size = img.shape
size = img.size
img = C.CutOut(2, 4)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
img = vision.CutOut(2, 4)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
size_cutout = img.shape
assert size_cutout == size
assert (size_cutout[0], size_cutout[1]) == size
def test_eager_cutout_pil_py():
def test_eager_cutout_chw_pil():
"""
Feature: CutOut op
Description: Test eager support for CutOut Python op with PIL input
Description: Test eager support for CutOut with CHW shape and PIL input
Expectation: Receive non-None output image from op
"""
img = Image.open("../data/dataset/apple.jpg").convert("RGB")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
img = PY.Resize(size=(32, 32))(img)
img = vision.Resize(size=(32, 32))(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
img = PY.ToTensor()(img)
img = vision.ToTensor()(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
img = PY.Cutout(2, 4)(img)
img = vision.CutOut(2, 4, is_hwc=False)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
assert img is not None
def test_eager_cutout_cv_c():
def test_eager_cutout_hwc_cv():
"""
Feature: CutOut op
Description: Test eager support for CutOut C++ op with CV input
Description: Test eager support for CutOut with HWC shape and CV input
Expectation: Output image size info from op is correct
"""
img = cv2.imread("../data/dataset/apple.jpg")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
img = C.Resize(size=(32, 32))(img)
img = vision.Resize(size=(32, 32))(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
size = img.shape
size = img.size
img = C.CutOut(2, 4)(img)
img = vision.CutOut(2, 4)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
size_cutout = img.shape
size_cutout = img.size
assert size_cutout == size
@ -243,61 +247,54 @@ def test_eager_cutout_cv_c():
def test_eager_exceptions():
try:
img = "../data/dataset/apple.jpg"
img = C.Decode()(img)
img = vision.Decode()(img)
assert False
except TypeError as e:
assert "Input should be an encoded image in 1-D NumPy format" in str(e)
try:
img = np.array(["a", "b", "c"])
img = C.Decode()(img)
img = vision.Decode()(img)
assert False
except TypeError as e:
assert "Input should be an encoded image in 1-D NumPy format" in str(e)
try:
img = cv2.imread("../data/dataset/apple.jpg")
img = C.Resize(size=(-32, 32))(img)
img = vision.Resize(size=(-32, 32))(img)
assert False
except ValueError as e:
assert "not within the required interval" in str(e)
try:
img = "../data/dataset/apple.jpg"
img = C.Pad(padding=4)(img)
assert False
except TypeError as e:
assert "Input should be NumPy or PIL image" in str(e)
def test_eager_exceptions_normalize():
"""
Feature: Normalize op
Description: Exception eager support test for Normalize Python op
Description: Exception eager support test for Normalize Python implementation
Expectation: Error input image is detected
"""
try:
img = Image.open("../data/dataset/apple.jpg").convert("RGB")
mean_vec = [.100, .100, .100]
std_vec = [.2, .2, .2]
_ = PY.Normalize(mean=mean_vec, std=std_vec)(img)
_ = vision.Normalize(mean=mean_vec, std=std_vec, is_hwc=False)(img)
assert False
except TypeError as e:
assert "img should be NumPy image" in str(e)
except RuntimeError as e:
assert "Normalize: number of channels does not match the size of mean and std vectors" in str(e)
def test_eager_exceptions_pad():
"""
Feature: Pad op
Description: Exception eager support test for Pad Python op
Description: Exception eager support test for Pad Python implementation
Expectation: Error input image is detected
"""
try:
img = "../data/dataset/apple.jpg"
_ = PY.Pad(padding=4)(img)
_ = vision.Pad(padding=4)(img)
assert False
except TypeError as e:
assert "img should be PIL image" in str(e)
except RuntimeError as e:
assert "tensor should be in shape of <H,W,C> or <H,W>" in str(e)
if __name__ == '__main__':
@ -305,14 +302,14 @@ if __name__ == '__main__':
test_eager_decode_py()
test_eager_resize()
test_eager_rescale()
test_eager_normalize_c()
test_eager_normalize_py()
test_eager_HWC2CHW()
test_eager_normalize_hwc()
test_eager_normalize_chw()
test_eager_hwc2chw()
test_eager_pad_c()
test_eager_pad_py()
test_eager_cutout_pil_c()
test_eager_cutout_pil_py()
test_eager_cutout_cv_c()
test_eager_cutout_hwc_pil()
test_eager_cutout_chw_pil()
test_eager_cutout_hwc_cv()
test_eager_exceptions()
test_eager_exceptions_normalize()
test_eager_exceptions_pad()

View File

@ -21,7 +21,7 @@ import pytest
import cv2
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
@ -78,7 +78,7 @@ def test_decode_op():
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
# Serialize and Load dataset requires using vision.Decode instead of vision.Decode().
data1 = data1.map(operations=[vision.Decode(True)], input_columns=["image"])
data1 = data1.map(operations=[vision.Decode()], input_columns=["image"])
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)

View File

@ -18,9 +18,8 @@ Testing Equalize op in DE
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.c_transforms as C
import mindspore.dataset.vision.py_transforms as F
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import visualize_list, visualize_one_channel_dataset, diff_mse, save_and_check_md5
@ -32,16 +31,16 @@ GENERATE_GOLDEN = False
def test_equalize_py(plot=False):
"""
Test Equalize py op
Test Equalize Python implementation
"""
logger.info("Test Equalize")
# Original Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
F.ToTensor()])
transforms_original = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Resize((224, 224)),
vision.ToTensor()])
ds_original = data_set.map(operations=transforms_original, input_columns="image")
@ -58,10 +57,10 @@ def test_equalize_py(plot=False):
# Color Equalized Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_equalize = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
F.Equalize(),
F.ToTensor()])
transforms_equalize = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Resize((224, 224)),
vision.Equalize(),
vision.ToTensor()])
ds_equalize = data_set.map(operations=transforms_equalize, input_columns="image")
@ -87,14 +86,14 @@ def test_equalize_py(plot=False):
def test_equalize_c(plot=False):
"""
Test Equalize Cpp op
Test Equalize C implementation
"""
logger.info("Test Equalize cpp op")
logger.info("Test Equalize C implementation")
# Original Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = [C.Decode(), C.Resize(size=[224, 224])]
transforms_original = [vision.Decode(), vision.Resize(size=[224, 224])]
ds_original = data_set.map(operations=transforms_original, input_columns="image")
@ -111,8 +110,8 @@ def test_equalize_c(plot=False):
# Equalize Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transform_equalize = [C.Decode(), C.Resize(size=[224, 224]),
C.Equalize()]
transform_equalize = [vision.Decode(), vision.Resize(size=[224, 224]),
vision.Equalize()]
ds_equalize = data_set.map(operations=transform_equalize, input_columns="image")
@ -137,15 +136,15 @@ def test_equalize_c(plot=False):
def test_equalize_py_c(plot=False):
"""
Test Equalize Cpp op and python op
Test Equalize C implementation and Python implementation
"""
logger.info("Test Equalize cpp and python op")
logger.info("Test Equalize cpp and Python implementation")
# equalize Images in cpp
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224))], input_columns=["image"])
data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224))], input_columns=["image"])
ds_c_equalize = data_set.map(operations=C.Equalize(), input_columns="image")
ds_c_equalize = data_set.map(operations=vision.Equalize(), input_columns="image")
ds_c_equalize = ds_c_equalize.batch(512)
@ -159,12 +158,12 @@ def test_equalize_py_c(plot=False):
# Equalize images in python
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224))], input_columns=["image"])
data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224))], input_columns=["image"])
transforms_p_equalize = mindspore.dataset.transforms.py_transforms.Compose([lambda img: img.astype(np.uint8),
F.ToPIL(),
F.Equalize(),
np.array])
transforms_p_equalize = mindspore.dataset.transforms.transforms.Compose([lambda img: img.astype(np.uint8),
vision.ToPIL(),
vision.Equalize(),
np.array])
ds_p_equalize = data_set.map(operations=transforms_p_equalize, input_columns="image")
@ -190,15 +189,15 @@ def test_equalize_py_c(plot=False):
def test_equalize_one_channel():
"""
Test Equalize cpp op with one channel image
"""
logger.info("Test Equalize C Op With One Channel Images")
Test Equalize C implementation with one channel image
"""
logger.info("Test Equalize C implementation With One Channel Images")
c_op = C.Equalize()
c_op = vision.Equalize()
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224)),
data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"])
data_set.map(operations=c_op, input_columns="image")
@ -210,11 +209,11 @@ def test_equalize_one_channel():
def test_equalize_mnist_c(plot=False):
"""
Test Equalize C op with MNIST dataset (Grayscale images)
Test Equalize C implementation with MNIST dataset (Grayscale images)
"""
logger.info("Test Equalize C Op With MNIST Images")
logger.info("Test Equalize C implementation With MNIST Images")
data_set = ds.MnistDataset(dataset_dir=MNIST_DATA_DIR, num_samples=2, shuffle=False)
ds_equalize_c = data_set.map(operations=C.Equalize(), input_columns="image")
ds_equalize_c = data_set.map(operations=vision.Equalize(), input_columns="image")
ds_orig = ds.MnistDataset(dataset_dir=MNIST_DATA_DIR, num_samples=2, shuffle=False)
images = []
@ -237,15 +236,15 @@ def test_equalize_mnist_c(plot=False):
def test_equalize_md5_py():
"""
Test Equalize py op with md5 check
Test Equalize Python implementation with md5 check
"""
logger.info("Test Equalize")
# First dataset
data1 = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Equalize(),
F.ToTensor()])
transforms = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Equalize(),
vision.ToTensor()])
data1 = data1.map(operations=transforms, input_columns="image")
# Compare with expected md5 from images
@ -253,19 +252,19 @@ def test_equalize_md5_py():
save_and_check_md5(data1, filename, generate_golden=GENERATE_GOLDEN)
def test_equalize_md5_c():
def skip_test_equalize_md5_c():
"""
Test Equalize cpp op with md5 check
Test Equalize C implementation with md5 check
"""
logger.info("Test Equalize cpp op with md5 check")
logger.info("Test Equalize C implementation with md5 check")
# Generate dataset
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_equalize = [C.Decode(),
C.Resize(size=[224, 224]),
C.Equalize(),
F.ToTensor()]
transforms_equalize = [vision.Decode(),
vision.Resize(size=[224, 224]),
vision.Equalize(),
vision.ToTensor()]
data = data_set.map(operations=transforms_equalize, input_columns="image")
# Compare with expected md5 from images

View File

@ -15,7 +15,7 @@
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]

View File

@ -18,7 +18,7 @@ Testing fill op
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as data_trans
import mindspore.dataset.transforms.transforms as data_trans
def test_fillop_basic():

View File

@ -16,7 +16,7 @@
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as cde
import mindspore.dataset.vision.transforms as cde
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"

View File

@ -18,8 +18,8 @@ import pytest
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.py_transforms as vision
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import visualize_list, save_and_check_md5
@ -37,20 +37,20 @@ def test_five_crop_op(plot=False):
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_1 = [
vision.Decode(),
vision.Decode(True),
vision.ToTensor(),
]
transform_1 = mindspore.dataset.transforms.py_transforms.Compose(transforms_1)
transform_1 = mindspore.dataset.transforms.transforms.Compose(transforms_1)
data1 = data1.map(operations=transform_1, input_columns=["image"])
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms_2 = [
vision.Decode(),
vision.Decode(True),
vision.FiveCrop(200),
lambda *images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 5 images
]
transform_2 = mindspore.dataset.transforms.py_transforms.Compose(transforms_2)
transform_2 = mindspore.dataset.transforms.transforms.Compose(transforms_2)
data2 = data2.map(operations=transform_2, input_columns=["image"])
num_iter = 0
@ -81,11 +81,11 @@ def test_five_crop_error_msg():
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
vision.Decode(),
vision.Decode(True),
vision.FiveCrop(200),
vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"])
with pytest.raises(RuntimeError) as info:
@ -106,11 +106,11 @@ def test_five_crop_md5():
# First dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
vision.Decode(),
vision.Decode(True),
vision.FiveCrop(100),
lambda *images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 5 images
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data = data.map(operations=transform, input_columns=["image"])
# Compare with expected md5 from images
filename = "five_crop_01_result.npz"

View File

@ -18,7 +18,7 @@ Testing GaussianBlur Python API
import cv2
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import visualize_image, diff_mse
@ -30,14 +30,14 @@ IMAGE_FILE = "../data/dataset/apple.jpg"
def test_gaussian_blur_pipeline(plot=False):
"""
Test GaussianBlur of c_transforms
Test GaussianBlur of C implementation
"""
logger.info("test_gaussian_blur_pipeline")
# First dataset
dataset1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
decode_op = c_vision.Decode()
gaussian_blur_op = c_vision.GaussianBlur(3, 3)
decode_op = vision.Decode()
gaussian_blur_op = vision.GaussianBlur(3, 3)
dataset1 = dataset1.map(operations=decode_op, input_columns=["image"])
dataset1 = dataset1.map(operations=gaussian_blur_op, input_columns=["image"])
@ -68,7 +68,7 @@ def test_gaussian_blur_eager():
logger.info("test_gaussian_blur_eager")
img = cv2.imread(IMAGE_FILE)
img_ms = c_vision.GaussianBlur((3, 5), (3.5, 3.5))(img)
img_ms = vision.GaussianBlur((3, 5), (3.5, 3.5))(img)
img_cv = cv2.GaussianBlur(img, (3, 5), 3.5, 3.5)
mse = diff_mse(img_ms, img_cv)
assert mse == 0
@ -80,22 +80,22 @@ def test_gaussian_blur_exception():
"""
logger.info("test_gaussian_blur_exception")
try:
_ = c_vision.GaussianBlur([2, 2])
_ = vision.GaussianBlur([2, 2])
except ValueError as e:
logger.info("Got an exception in GaussianBlur: {}".format(str(e)))
assert "not an odd value" in str(e)
try:
_ = c_vision.GaussianBlur(3.0, [3, 3])
_ = vision.GaussianBlur(3.0, [3, 3])
except TypeError as e:
logger.info("Got an exception in GaussianBlur: {}".format(str(e)))
assert "not of type [<class 'int'>, <class 'list'>, <class 'tuple'>]" in str(e)
try:
_ = c_vision.GaussianBlur(3, -3)
_ = vision.GaussianBlur(3, -3)
except ValueError as e:
logger.info("Got an exception in GaussianBlur: {}".format(str(e)))
assert "not within the required interval" in str(e)
try:
_ = c_vision.GaussianBlur(3, [3, 3, 3])
_ = vision.GaussianBlur(3, [3, 3, 3])
except TypeError as e:
logger.info("Got an exception in GaussianBlur: {}".format(str(e)))
assert "should be a single number or a list/tuple of length 2" in str(e)

View File

@ -15,7 +15,7 @@
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
CELEBA_DIR = "../data/dataset/testCelebAData"
CIFAR10_DIR = "../data/dataset/testCifar10Data"

0
tests/ut/python/dataset/test_highpass_biquad.py Executable file → Normal file
View File

View File

@ -18,7 +18,7 @@ Testing HorizontalFlip Python API
import cv2
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import visualize_image, diff_mse
@ -30,14 +30,14 @@ IMAGE_FILE = "../data/dataset/apple.jpg"
def test_horizontal_flip_pipeline(plot=False):
"""
Test HorizontalFlip of c_transforms
Test HorizontalFlip of C implementation
"""
logger.info("test_horizontal_flip_pipeline")
# First dataset
dataset1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
decode_op = c_vision.Decode()
horizontal_flip_op = c_vision.HorizontalFlip()
decode_op = vision.Decode()
horizontal_flip_op = vision.HorizontalFlip()
dataset1 = dataset1.map(operations=decode_op, input_columns=["image"])
dataset1 = dataset1.map(operations=horizontal_flip_op, input_columns=["image"])
@ -68,7 +68,7 @@ def test_horizontal_flip_eager():
logger.info("test_horizontal_flip_eager")
img = cv2.imread(IMAGE_FILE)
img_ms = c_vision.HorizontalFlip()(img)
img_ms = vision.HorizontalFlip()(img)
img_cv = cv2.flip(img, 1)
mse = diff_mse(img_ms, img_cv)
assert mse == 0

View File

@ -18,9 +18,8 @@ Testing HWC2CHW op in DE
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import diff_mse, visualize_list, save_and_check_md5
@ -41,24 +40,24 @@ def test_hwc2chw_callable():
assert img.shape == (50, 50, 3)
# test one tensor
img1 = c_vision.HWC2CHW()(img)
img1 = vision.HWC2CHW()(img)
assert img1.shape == (3, 50, 50)
# test one tensor with 5 channels
img2 = np.zeros([50, 50, 5])
assert img2.shape == (50, 50, 5)
img3 = c_vision.HWC2CHW()(img2)
img3 = vision.HWC2CHW()(img2)
assert img3.shape == (5, 50, 50)
# test input multiple tensors
with pytest.raises(RuntimeError) as info:
imgs = [img, img]
_ = c_vision.HWC2CHW()(*imgs)
assert "The op is OneToOne, can only accept one tensor as input." in str(info.value)
_ = vision.HWC2CHW()(*imgs)
assert "The op is OneToOne, can only accept one tensor as input" in str(info.value)
with pytest.raises(RuntimeError) as info:
_ = c_vision.HWC2CHW()(img, img)
assert "The op is OneToOne, can only accept one tensor as input." in str(info.value)
_ = vision.HWC2CHW()(img, img)
assert "The op is OneToOne, can only accept one tensor as input" in str(info.value)
def test_hwc2chw_multi_channels():
@ -77,7 +76,7 @@ def test_hwc2chw_multi_channels():
input_data = np.array([raw_data])
dataset = ds.NumpySlicesDataset(input_data, column_names=["col1"], shuffle=False)
hwc2chw = c_vision.HWC2CHW()
hwc2chw = vision.HWC2CHW()
dataset = dataset.map(hwc2chw, input_columns=["col1"])
for item in dataset.create_tuple_iterator(output_numpy=True):
assert np.allclose(item[0], expect_output)
@ -93,8 +92,8 @@ def test_hwc2chw(plot=False):
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
hwc2chw_op = c_vision.HWC2CHW()
decode_op = vision.Decode()
hwc2chw_op = vision.HWC2CHW()
data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=hwc2chw_op, input_columns=["image"])
@ -129,8 +128,8 @@ def test_hwc2chw_md5():
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
hwc2chw_op = c_vision.HWC2CHW()
decode_op = vision.Decode()
hwc2chw_op = vision.HWC2CHW()
data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=hwc2chw_op, input_columns=["image"])
@ -145,21 +144,21 @@ def test_hwc2chw_comp(plot=False):
Description: Test HWC2CHW between Python and C image augmentation
Expectation: Image augmentations should be almost the same with mse < 0.001
"""
logger.info("Test HWC2CHW with c_transform and py_transform comparison")
logger.info("Test HWC2CHW with C and Python image augmentation comparison")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
hwc2chw_op = c_vision.HWC2CHW()
decode_op = vision.Decode()
hwc2chw_op = vision.HWC2CHW()
data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=hwc2chw_op, input_columns=["image"])
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.ToTensor()]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
vision.Decode(True),
vision.ToTensor()]
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data2 = data2.map(operations=transform, input_columns=["image"])
image_c_transposed = []
@ -169,13 +168,13 @@ def test_hwc2chw_comp(plot=False):
c_image = item1["image"]
py_image = (item2["image"] * 255).astype(np.uint8)
# Compare images between that applying c_transform and py_transform
# Compare images between that applying C implementation and Python implementation image augmentations
mse = diff_mse(py_image, c_image)
# Note: The images aren't exactly the same due to rounding error
assert mse < 0.001
if plot:
image_c_transposed.append(c_image.transpose(1, 2, 0))
image_py_transposed.append(py_image.transpose(1, 2, 0))
if plot:
visualize_list(image_c_transposed, image_py_transposed, visualize_mode=2)

View File

@ -18,9 +18,8 @@ Testing Invert op in DE
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.py_transforms as F
import mindspore.dataset.vision.c_transforms as C
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import visualize_list, save_and_check_md5, diff_mse
@ -37,8 +36,8 @@ def test_invert_callable():
img = np.fromfile("../data/dataset/apple.jpg", dtype=np.uint8)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
img = C.Decode()(img)
img = C.Invert()(img)
img = vision.Decode()(img)
img = vision.Invert()(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
assert img.shape == (2268, 4032, 3)
@ -46,16 +45,16 @@ def test_invert_callable():
def test_invert_py(plot=False):
"""
Test Invert python op
Test Invert Python implementation
"""
logger.info("Test Invert Python op")
logger.info("Test Invert Python implementation")
# Original Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
F.ToTensor()])
transforms_original = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Resize((224, 224)),
vision.ToTensor()])
ds_original = data_set.map(operations=transforms_original, input_columns="image")
@ -72,10 +71,10 @@ def test_invert_py(plot=False):
# Color Inverted Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_invert = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
F.Invert(),
F.ToTensor()])
transforms_invert = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Resize((224, 224)),
vision.Invert(),
vision.ToTensor()])
ds_invert = data_set.map(operations=transforms_invert, input_columns="image")
@ -101,14 +100,14 @@ def test_invert_py(plot=False):
def test_invert_c(plot=False):
"""
Test Invert Cpp op
Test Invert C implementation
"""
logger.info("Test Invert cpp op")
logger.info("Test Invert C implementation")
# Original Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = [C.Decode(), C.Resize(size=[224, 224])]
transforms_original = [vision.Decode(), vision.Resize(size=[224, 224])]
ds_original = data_set.map(operations=transforms_original, input_columns="image")
@ -125,8 +124,8 @@ def test_invert_c(plot=False):
# Invert Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transform_invert = [C.Decode(), C.Resize(size=[224, 224]),
C.Invert()]
transform_invert = [vision.Decode(), vision.Resize(size=[224, 224]),
vision.Invert()]
ds_invert = data_set.map(operations=transform_invert, input_columns="image")
@ -151,15 +150,15 @@ def test_invert_c(plot=False):
def test_invert_py_c(plot=False):
"""
Test Invert Cpp op and python op
Test Invert C implementation and Python implementation
"""
logger.info("Test Invert cpp and python op")
logger.info("Test Invert cpp and Python implementations")
# Invert Images in cpp
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224))], input_columns=["image"])
data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224))], input_columns=["image"])
ds_c_invert = data_set.map(operations=C.Invert(), input_columns="image")
ds_c_invert = data_set.map(operations=vision.Invert(), input_columns="image")
ds_c_invert = ds_c_invert.batch(512)
@ -173,12 +172,12 @@ def test_invert_py_c(plot=False):
# invert images in python
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224))], input_columns=["image"])
data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224))], input_columns=["image"])
transforms_p_invert = mindspore.dataset.transforms.py_transforms.Compose([lambda img: img.astype(np.uint8),
F.ToPIL(),
F.Invert(),
np.array])
transforms_p_invert = mindspore.dataset.transforms.transforms.Compose([lambda img: img.astype(np.uint8),
vision.ToPIL(),
vision.Invert(),
np.array])
ds_p_invert = data_set.map(operations=transforms_p_invert, input_columns="image")
@ -204,15 +203,15 @@ def test_invert_py_c(plot=False):
def test_invert_one_channel():
"""
Test Invert cpp op with one channel image
"""
logger.info("Test Invert C Op With One Channel Images")
Test Invert C implementation with one channel image
"""
logger.info("Test Invert C implementation With One Channel Images")
c_op = C.Invert()
c_op = vision.Invert()
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224)),
data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"])
data_set.map(operations=c_op, input_columns="image")
@ -224,16 +223,16 @@ def test_invert_one_channel():
def test_invert_md5_py():
"""
Test Invert python op with md5 check
Test Invert Python implementation with md5 check
"""
logger.info("Test Invert python op with md5 check")
logger.info("Test Invert Python implementation with md5 check")
# Generate dataset
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_invert = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Invert(),
F.ToTensor()])
transforms_invert = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Invert(),
vision.ToTensor()])
data = data_set.map(operations=transforms_invert, input_columns="image")
# Compare with expected md5 from images
@ -241,19 +240,19 @@ def test_invert_md5_py():
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
def test_invert_md5_c():
def skip_test_invert_md5_c():
"""
Test Invert cpp op with md5 check
Test Invert C implementation with md5 check
"""
logger.info("Test Invert cpp op with md5 check")
logger.info("Test Invert C implementation with md5 check")
# Generate dataset
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_invert = [C.Decode(),
C.Resize(size=[224, 224]),
C.Invert(),
F.ToTensor()]
transforms_invert = [vision.Decode(),
vision.Resize(size=[224, 224]),
vision.Invert(),
vision.ToTensor()]
data = data_set.map(operations=transforms_invert, input_columns="image")
# Compare with expected md5 from images

View File

@ -17,8 +17,8 @@ Testing LinearTransformation op in DE
"""
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import diff_mse, visualize_list, save_and_check_md5
@ -43,18 +43,18 @@ def test_linear_transformation_op(plot=False):
# Define operations
transforms = [
py_vision.Decode(),
py_vision.CenterCrop([height, weight]),
py_vision.ToTensor()
vision.Decode(True),
vision.CenterCrop([height, weight]),
vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data1 = data1.map(operations=transform, input_columns=["image"])
# Note: if transformation matrix is diagonal matrix with all 1 in diagonal,
# the output matrix in expected to be the same as the input matrix.
data1 = data1.map(operations=py_vision.LinearTransformation(transformation_matrix, mean_vector),
data1 = data1.map(operations=vision.LinearTransformation(transformation_matrix, mean_vector),
input_columns=["image"])
# Second dataset
@ -93,12 +93,12 @@ def test_linear_transformation_md5():
# Generate dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.CenterCrop([height, weight]),
py_vision.ToTensor(),
py_vision.LinearTransformation(transformation_matrix, mean_vector)
vision.Decode(True),
vision.CenterCrop([height, weight]),
vision.ToTensor(),
vision.LinearTransformation(transformation_matrix, mean_vector)
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data1 = data1.map(operations=transform, input_columns=["image"])
# Compare with expected md5 from images
@ -123,12 +123,12 @@ def test_linear_transformation_exception_01():
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
transforms = [
py_vision.Decode(),
py_vision.CenterCrop([height, weight]),
py_vision.ToTensor(),
py_vision.LinearTransformation(None, mean_vector)
vision.Decode(True),
vision.CenterCrop([height, weight]),
vision.ToTensor(),
vision.LinearTransformation(None, mean_vector)
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data1 = data1.map(operations=transform, input_columns=["image"])
except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
@ -152,12 +152,12 @@ def test_linear_transformation_exception_02():
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
transforms = [
py_vision.Decode(),
py_vision.CenterCrop([height, weight]),
py_vision.ToTensor(),
py_vision.LinearTransformation(transformation_matrix, None)
vision.Decode(True),
vision.CenterCrop([height, weight]),
vision.ToTensor(),
vision.LinearTransformation(transformation_matrix, None)
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data1 = data1.map(operations=transform, input_columns=["image"])
except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
@ -182,12 +182,12 @@ def test_linear_transformation_exception_03():
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
transforms = [
py_vision.Decode(),
py_vision.CenterCrop([height, weight]),
py_vision.ToTensor(),
py_vision.LinearTransformation(transformation_matrix, mean_vector)
vision.Decode(True),
vision.CenterCrop([height, weight]),
vision.ToTensor(),
vision.LinearTransformation(transformation_matrix, mean_vector)
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data1 = data1.map(operations=transform, input_columns=["image"])
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
@ -212,12 +212,12 @@ def test_linear_transformation_exception_04():
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:
transforms = [
py_vision.Decode(),
py_vision.CenterCrop([height, weight]),
py_vision.ToTensor(),
py_vision.LinearTransformation(transformation_matrix, mean_vector)
vision.Decode(True),
vision.CenterCrop([height, weight]),
vision.ToTensor(),
vision.LinearTransformation(transformation_matrix, mean_vector)
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data1 = data1.map(operations=transform, input_columns=["image"])
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))

View File

@ -14,10 +14,8 @@
# ==============================================================================
import pytest
import mindspore.dataset as ds
from mindspore.dataset.transforms import c_transforms
from mindspore.dataset.transforms import py_transforms
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
from mindspore.dataset.transforms import transforms
import mindspore.dataset.vision.transforms as vision
DATA_DIR = "../data/dataset/testPK/data"
@ -25,7 +23,7 @@ DATA_DIR = "../data/dataset/testPK/data"
def test_map_c_transform_exception():
"""
Feature: test c error op def
Description: op defined like c_vision.HWC2CHW
Description: op defined like vision.HWC2CHW
Expectation: success
"""
data_set = ds.ImageFolderDataset(DATA_DIR, num_parallel_workers=1, shuffle=True)
@ -35,12 +33,12 @@ def test_map_c_transform_exception():
std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
# define map operations
random_crop_decode_resize_op = c_vision.RandomCropDecodeResize(train_image_size,
scale=(0.08, 1.0),
ratio=(0.75, 1.333))
random_horizontal_flip_op = c_vision.RandomHorizontalFlip(prob=0.5)
normalize_op = c_vision.Normalize(mean=mean, std=std)
hwc2chw_op = c_vision.HWC2CHW # exception
random_crop_decode_resize_op = vision.RandomCropDecodeResize(train_image_size,
scale=(0.08, 1.0),
ratio=(0.75, 1.333))
random_horizontal_flip_op = vision.RandomHorizontalFlip(prob=0.5)
normalize_op = vision.Normalize(mean=mean, std=std)
hwc2chw_op = vision.HWC2CHW # exception
data_set = data_set.map(operations=random_crop_decode_resize_op, input_columns="image", num_parallel_workers=1)
data_set = data_set.map(operations=random_horizontal_flip_op, input_columns="image", num_parallel_workers=1)
@ -51,44 +49,44 @@ def test_map_c_transform_exception():
# compose exception
with pytest.raises(ValueError) as info:
c_transforms.Compose([
c_vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
c_vision.RandomHorizontalFlip,
c_vision.Normalize(mean=mean, std=std),
c_vision.HWC2CHW()])
transforms.Compose([
vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
vision.RandomHorizontalFlip,
vision.Normalize(mean=mean, std=std),
vision.HWC2CHW()])
assert " should be a " in str(info.value)
# randomapply exception
with pytest.raises(ValueError) as info:
c_transforms.RandomApply([
c_vision.RandomCropDecodeResize,
c_vision.RandomHorizontalFlip(prob=0.5),
c_vision.Normalize(mean=mean, std=std),
c_vision.HWC2CHW()])
transforms.RandomApply([
vision.RandomCropDecodeResize,
vision.RandomHorizontalFlip(prob=0.5),
vision.Normalize(mean=mean, std=std),
vision.HWC2CHW()])
assert " should be a " in str(info.value)
# randomchoice exception
with pytest.raises(ValueError) as info:
c_transforms.RandomChoice([
c_vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
c_vision.RandomHorizontalFlip(prob=0.5),
c_vision.Normalize,
c_vision.HWC2CHW()])
transforms.RandomChoice([
vision.RandomCropDecodeResize(train_image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
vision.RandomHorizontalFlip(prob=0.5),
vision.Normalize,
vision.HWC2CHW()])
assert " should be a " in str(info.value)
def test_map_py_transform_exception():
"""
Feature: test python error op def
Description: op defined like py_vision.RandomHorizontalFlip
Description: op defined like vision.RandomHorizontalFlip
Expectation: success
"""
data_set = ds.ImageFolderDataset(DATA_DIR, num_parallel_workers=1, shuffle=True)
# define map operations
decode_op = py_vision.Decode()
random_horizontal_flip_op = py_vision.RandomHorizontalFlip # exception
to_tensor_op = py_vision.ToTensor()
decode_op = vision.Decode(to_pil=True)
random_horizontal_flip_op = vision.RandomHorizontalFlip # exception
to_tensor_op = vision.ToTensor()
trans = [decode_op, random_horizontal_flip_op, to_tensor_op]
with pytest.raises(ValueError) as info:
@ -97,26 +95,26 @@ def test_map_py_transform_exception():
# compose exception
with pytest.raises(ValueError) as info:
py_transforms.Compose([
py_vision.Decode,
py_vision.RandomHorizontalFlip(),
py_vision.ToTensor()])
transforms.Compose([
vision.Decode,
vision.RandomHorizontalFlip(),
vision.ToTensor()])
assert " should be a " in str(info.value)
# randomapply exception
with pytest.raises(ValueError) as info:
py_transforms.RandomApply([
py_vision.Decode(),
py_vision.RandomHorizontalFlip,
py_vision.ToTensor()])
transforms.RandomApply([
vision.Decode(to_pil=True),
vision.RandomHorizontalFlip,
vision.ToTensor()])
assert " should be a " in str(info.value)
# randomchoice exception
with pytest.raises(ValueError) as info:
py_transforms.RandomChoice([
py_vision.Decode(),
py_vision.RandomHorizontalFlip(),
py_vision.ToTensor])
transforms.RandomChoice([
vision.Decode(to_pil=True),
vision.RandomHorizontalFlip(),
vision.ToTensor])
assert " should be a " in str(info.value)

View File

@ -17,8 +17,8 @@ import pytest
import mindspore.dataset as ds
import mindspore.common.dtype as mstype
import mindspore.dataset.vision.c_transforms as C
import mindspore.dataset.transforms.c_transforms as C2
import mindspore.dataset.vision.transforms as C
import mindspore.dataset.transforms.transforms as C2
DATA_DIR = "../data/dataset/testPK/data"

View File

@ -20,7 +20,7 @@ import pytest
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as ops
import mindspore.dataset.transforms.transforms as ops
mstype_to_np_type = {
mstype.bool_: np.bool,

View File

@ -25,7 +25,7 @@ import pytest
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from mindspore.dataset.vision import Inter
from mindspore.mindrecord import FileWriter

View File

@ -18,7 +18,7 @@ Testing Autotune support in DE for MindDataset
import os
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.transforms as vision
from mindspore.dataset.vision import Inter
from util_minddataset import add_and_remove_cv_file

View File

@ -15,30 +15,27 @@
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as c
import mindspore.dataset.transforms.py_transforms as f
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.transforms.transforms as data_trans
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
DATA_DIR = "../data/dataset/testImageNetData/train"
DATA_DIR_2 = "../data/dataset/testImageNetData2/train"
def test_one_hot_op():
def skip_test_one_hot_op():
"""
Test one hot encoding op
"""
logger.info("Test one hot encoding op")
# define map operations
# ds = de.ImageFolderDataset(DATA_DIR, schema=SCHEMA_DIR)
dataset = ds.ImageFolderDataset(DATA_DIR)
num_classes = 2
epsilon_para = 0.1
transforms = [f.OneHotOp(num_classes=num_classes, smoothing_rate=epsilon_para)]
transform_label = f.Compose(transforms)
transforms = [data_trans.OneHot(num_classes=num_classes, smoothing_rate=epsilon_para)]
transform_label = transCompose(transforms)
dataset = dataset.map(operations=transform_label, input_columns=["label"])
golden_label = np.ones(num_classes) * epsilon_para / num_classes
@ -65,9 +62,9 @@ def test_mix_up_single():
ds1 = ds.ImageFolderDataset(DATA_DIR_2)
num_classes = 10
decode_op = c_vision.Decode()
resize_op = c_vision.Resize((resize_height, resize_width), c_vision.Inter.LINEAR)
one_hot_encode = c.OneHot(num_classes) # num_classes is input argument
decode_op = vision.Decode()
resize_op = vision.Resize((resize_height, resize_width), vision.Inter.LINEAR)
one_hot_encode = data_trans.OneHot(num_classes) # num_classes is input argument
ds1 = ds1.map(operations=decode_op, input_columns=["image"])
ds1 = ds1.map(operations=resize_op, input_columns=["image"])
@ -79,7 +76,7 @@ def test_mix_up_single():
ds2 = ds1
alpha = 0.2
transforms = [py_vision.MixUp(batch_size=batch_size, alpha=alpha, is_single=True)
transforms = [vision.MixUp(batch_size=batch_size, alpha=alpha, is_single=True)
]
ds1 = ds1.map(operations=transforms, input_columns=["image", "label"])
@ -115,9 +112,9 @@ def test_mix_up_multi():
ds1 = ds.ImageFolderDataset(DATA_DIR_2)
num_classes = 3
decode_op = c_vision.Decode()
resize_op = c_vision.Resize((resize_height, resize_width), c_vision.Inter.LINEAR)
one_hot_encode = c.OneHot(num_classes) # num_classes is input argument
decode_op = vision.Decode()
resize_op = vision.Resize((resize_height, resize_width), vision.Inter.LINEAR)
one_hot_encode = data_trans.OneHot(num_classes) # num_classes is input argument
ds1 = ds1.map(operations=decode_op, input_columns=["image"])
ds1 = ds1.map(operations=resize_op, input_columns=["image"])
@ -129,7 +126,7 @@ def test_mix_up_multi():
ds2 = ds1
alpha = 0.2
transforms = [py_vision.MixUp(batch_size=batch_size, alpha=alpha, is_single=False)
transforms = [vision.MixUp(batch_size=batch_size, alpha=alpha, is_single=False)
]
ds1 = ds1.map(operations=transforms, input_columns=["image", "label"])
num_iter = 0
@ -161,6 +158,6 @@ def test_mix_up_multi():
if __name__ == "__main__":
test_one_hot_op()
skip_test_one_hot_op()
test_mix_up_single()
test_mix_up_multi()

View File

@ -18,8 +18,8 @@ Testing the MixUpBatch op in DE
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.transforms.c_transforms as data_trans
import mindspore.dataset.vision.transforms as vision
import mindspore.dataset.transforms.transforms as data_trans
from mindspore import log as logger
from util import save_and_check_md5, diff_mse, visualize_list, config_get_set_seed, \
config_get_set_num_parallel_workers

View File

@ -18,9 +18,8 @@ Testing Normalize op in DE
import numpy as np
from PIL import Image
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import diff_mse, save_and_check_md5, visualize_image
@ -42,31 +41,29 @@ def normalize_np(image, mean, std):
return image
def util_test_normalize(mean, std, op_type):
def util_test_normalize(mean, std, add_to_pil):
"""
Utility function for testing Normalize. Input arguments are given by other tests
"""
if op_type == "cpp":
if not add_to_pil:
# define map operations
decode_op = c_vision.Decode()
normalize_op = c_vision.Normalize(mean, std)
decode_op = vision.Decode()
normalize_op = vision.Normalize(mean, std, True)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(operations=decode_op, input_columns=["image"])
data = data.map(operations=normalize_op, input_columns=["image"])
elif op_type == "python":
else:
# define map operations
transforms = [
py_vision.Decode(),
py_vision.ToTensor(),
py_vision.Normalize(mean, std)
vision.Decode(True),
vision.ToTensor(),
vision.Normalize(mean, std, False)
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(operations=transform, input_columns=["image"])
else:
raise ValueError("Wrong parameter value")
return data
@ -75,28 +72,30 @@ def util_test_normalize_grayscale(num_output_channels, mean, std):
Utility function for testing Normalize. Input arguments are given by other tests
"""
transforms = [
py_vision.Decode(),
py_vision.Grayscale(num_output_channels),
py_vision.ToTensor(),
py_vision.Normalize(mean, std)
vision.Decode(True),
vision.Grayscale(num_output_channels),
vision.ToTensor(),
vision.Normalize(mean, std, False)
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(operations=transform, input_columns=["image"])
return data
def test_normalize_op_c(plot=False):
def test_normalize_op_hwc(plot=False):
"""
Test Normalize in cpp transformations
Feature: Normalize op
Description: Test Normalize with Decode versus NumPy comparison
Expectation: Test succeeds. MSE difference is negligible.
"""
logger.info("Test Normalize in cpp")
logger.info("Test Normalize in with hwc")
mean = [121.0, 115.0, 100.0]
std = [70.0, 68.0, 71.0]
# define map operations
decode_op = c_vision.Decode()
normalize_op = c_vision.Normalize(mean, std)
decode_op = vision.Decode()
normalize_op = vision.Normalize(mean, std, True)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -113,28 +112,30 @@ def test_normalize_op_c(plot=False):
image_de_normalized = item1["image"]
image_original = item2["image"]
image_np_normalized = normalize_np(image_original, mean, std)
np.testing.assert_almost_equal(image_de_normalized, image_np_normalized, 2)
mse = diff_mse(image_de_normalized, image_np_normalized)
logger.info("image_{}, mse: {}".format(num_iter + 1, mse))
assert mse < 0.01
if plot:
visualize_image(image_original, image_de_normalized, mse, image_np_normalized)
num_iter += 1
def test_normalize_op_py(plot=False):
def test_normalize_op_chw(plot=False):
"""
Test Normalize in python transformations
Feature: Normalize op
Description: Test Normalize with CHW input, Decode(to_pil=True) & ToTensor versus NumPy comparison
Expectation: Test succeeds. MSE difference is negligible.
"""
logger.info("Test Normalize in python")
logger.info("Test Normalize with chw")
mean = [0.475, 0.45, 0.392]
std = [0.275, 0.267, 0.278]
# define map operations
transforms = [
py_vision.Decode(),
py_vision.ToTensor()
vision.Decode(True),
vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
normalize_op = py_vision.Normalize(mean, std)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
normalize_op = vision.Normalize(mean, std, False)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -150,11 +151,11 @@ def test_normalize_op_py(plot=False):
data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
image_de_normalized = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image_np_normalized = (normalize_np(item2["image"].transpose(1, 2, 0), mean, std) * 255).astype(np.uint8)
image_original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
mse = diff_mse(image_de_normalized, image_np_normalized)
logger.info("image_{}, mse: {}".format(num_iter + 1, mse))
assert mse < 0.01
if plot:
image_original = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
visualize_image(image_original, image_de_normalized, mse, image_np_normalized)
num_iter += 1
@ -169,7 +170,7 @@ def test_decode_op():
shuffle=False)
# define map operations
decode_op = c_vision.Decode()
decode_op = vision.Decode()
# apply map operations on images
data1 = data1.map(operations=decode_op, input_columns=["image"])
@ -191,8 +192,8 @@ def test_decode_normalize_op():
shuffle=False)
# define map operations
decode_op = c_vision.Decode()
normalize_op = c_vision.Normalize([121.0, 115.0, 100.0], [70.0, 68.0, 71.0])
decode_op = vision.Decode()
normalize_op = vision.Normalize([121.0, 115.0, 100.0], [70.0, 68.0, 71.0], True)
# apply map operations on images
data1 = data1.map(operations=[decode_op, normalize_op], input_columns=["image"])
@ -210,12 +211,12 @@ def test_normalize_md5_01():
expected to pass
"""
logger.info("test_normalize_md5_01")
data_c = util_test_normalize([121.0, 115.0, 100.0], [70.0, 68.0, 71.0], "cpp")
data_py = util_test_normalize([0.475, 0.45, 0.392], [0.275, 0.267, 0.278], "python")
data_c = util_test_normalize([121.0, 115.0, 100.0], [70.0, 68.0, 71.0], False)
data_py = util_test_normalize([0.475, 0.45, 0.392], [0.275, 0.267, 0.278], True)
# check results with md5 comparison
filename1 = "normalize_01_c_result.npz"
filename2 = "normalize_01_py_result.npz"
filename2 = "normalize_01_to_pil_result.npz"
save_and_check_md5(data_c, filename1, generate_golden=GENERATE_GOLDEN)
save_and_check_md5(data_py, filename2, generate_golden=GENERATE_GOLDEN)
@ -226,79 +227,84 @@ def test_normalize_md5_02():
expected to pass
"""
logger.info("test_normalize_md5_02")
data_py = util_test_normalize([0.475], [0.275], "python")
data_py = util_test_normalize([0.475], [0.275], True)
# check results with md5 comparison
filename2 = "normalize_02_py_result.npz"
filename2 = "normalize_02_to_pil_result.npz"
save_and_check_md5(data_py, filename2, generate_golden=GENERATE_GOLDEN)
def test_normalize_exception_unequal_size_c():
def test_normalize_exception_unequal_size_1():
"""
Test Normalize in c transformation: len(mean) != len(std)
expected to raise ValueError
Feature: Normalize op
Description: Test Normalize with error input: len(mean) != len(std)
Expectation: ValueError raised
"""
logger.info("test_normalize_exception_unequal_size_c")
logger.info("test_normalize_exception_unequal_size_1")
try:
_ = c_vision.Normalize([100, 250, 125], [50, 50, 75, 75])
_ = vision.Normalize([100, 250, 125], [50, 50, 75, 75])
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Length of mean and std must be equal."
def test_normalize_exception_out_of_range_c():
def test_normalize_exception_out_of_range():
"""
Test Normalize in c transformation: mean, std out of range
expected to raise ValueError
Feature: Normalize op
Description: Test Normalize with error input: mean, std out of range
Expectation: ValueError raised
"""
logger.info("test_normalize_exception_out_of_range_c")
logger.info("test_normalize_exception_out_of_range")
try:
_ = c_vision.Normalize([256, 250, 125], [50, 75, 75])
_ = vision.Normalize([256, 250, 125], [50, 75, 75])
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "not within the required interval" in str(e)
try:
_ = c_vision.Normalize([255, 250, 125], [0, 75, 75])
_ = vision.Normalize([255, 250, 125], [0, 75, 75])
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "not within the required interval" in str(e)
def test_normalize_exception_unequal_size_py():
def test_normalize_exception_unequal_size_2():
"""
Test Normalize in python transformation: len(mean) != len(std)
expected to raise ValueError
Feature: Normalize op
Description: Test Normalize with error input: len(mean) != len(std)
Expectation: ValueError raised
"""
logger.info("test_normalize_exception_unequal_size_py")
logger.info("test_normalize_exception_unequal_size_2")
try:
_ = py_vision.Normalize([0.50, 0.30, 0.75], [0.18, 0.32, 0.71, 0.72])
_ = vision.Normalize([0.50, 0.30, 0.75], [0.18, 0.32, 0.71, 0.72], False)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Length of mean and std must be equal."
def test_normalize_exception_invalid_size_py():
def test_normalize_exception_invalid_size():
"""
Test Normalize in python transformation: len(mean)=len(std)=2
expected to raise RuntimeError
Feature: Normalize op
Description: Test Normalize with error input: len(mean)=len(std)=2
Expectation: RuntimeError raised
"""
logger.info("test_normalize_exception_invalid_size_py")
data = util_test_normalize([0.75, 0.25], [0.18, 0.32], "python")
logger.info("test_normalize_exception_invalid_size")
data = util_test_normalize([0.75, 0.25], [0.18, 0.32], False)
try:
_ = data.create_dict_iterator(num_epochs=1).__next__()
except RuntimeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Length of mean and std must both be 1 or" in str(e)
assert "Normalize: number of channels does not match the size of mean and std vectors" in str(e)
def test_normalize_exception_invalid_range_py():
def test_normalize_exception_invalid_range():
"""
Test Normalize in python transformation: value is not in range [0,1]
expected to raise ValueError
Feature: Normalize op
Description: Test Normalize with error input: value is not in range [0,1]
Expectation: ValueError raised
"""
logger.info("test_normalize_exception_invalid_range_py")
logger.info("test_normalize_exception_invalid_range")
try:
_ = py_vision.Normalize([0.75, 1.25, 0.5], [0.1, 0.18, 1.32])
_ = vision.Normalize([0.75, 1.25, 0.5], [0.1, 0.18, 1.32], False)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input mean_value is not within the required interval of [0.0, 1.0]." in str(e)
@ -312,7 +318,7 @@ def test_normalize_grayscale_md5_01():
logger.info("test_normalize_grayscale_md5_01")
data = util_test_normalize_grayscale(1, [0.5], [0.175])
# check results with md5 comparison
filename = "normalize_03_py_result.npz"
filename = "normalize_03_to_pil_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
@ -324,7 +330,7 @@ def test_normalize_grayscale_md5_02():
logger.info("test_normalize_grayscale_md5_02")
data = util_test_normalize_grayscale(3, [0.5, 0.5, 0.5], [0.175, 0.235, 0.512])
# check results with md5 comparison
filename = "normalize_04_py_result.npz"
filename = "normalize_04_to_pil_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
@ -346,7 +352,7 @@ def test_multiple_channels():
def util_test(item, mean, std):
data = ds.NumpySlicesDataset([item], shuffle=False)
data = data.map(c_vision.Normalize(mean, std))
data = data.map(vision.Normalize(mean, std, True))
for d in data.create_tuple_iterator(num_epochs=1, output_numpy=True):
actual = d[0]
mean = np.array(mean, dtype=item.dtype)
@ -372,48 +378,91 @@ def test_multiple_channels():
util_test(np.ones(shape=[6, 6, 129]), mean=[0.5], std=[0.1])
def test_normalize_c_eager():
def test_normalize_eager_hwc():
"""
Feature: Normalize op
Description: Test eager support for Normalize C++ op
Description: Test eager support for Normalize C implementation with HWC input
Expectation: Receive non-None output image from op
"""
img_in = Image.open("../data/dataset/apple.jpg").convert("RGB")
mean_vec = [1, 100, 255]
std_vec = [1, 20, 255]
normalize_op = c_vision.Normalize(mean=mean_vec, std=std_vec)
normalize_op = vision.Normalize(mean=mean_vec, std=std_vec)
img_out = normalize_op(img_in)
assert img_out is not None
def test_normalize_py_eager():
def test_normalize_eager_chw():
"""
Feature: Normalize op
Description: Test eager support for Normalize Python op
Description: Test eager support for Normalize C implementation with CHW input
Expectation: Receive non-None output image from op
"""
img_in = Image.open("../data/dataset/apple.jpg").convert("RGB")
img_in = py_vision.ToTensor()(img_in)
img_in = vision.ToTensor()(img_in)
mean_vec = [0.1, 0.5, 1.0]
std_vec = [0.1, 0.4, 1.0]
normalize_op = py_vision.Normalize(mean=mean_vec, std=std_vec)
normalize_op = vision.Normalize(mean=mean_vec, std=std_vec, is_hwc=False)
img_out = normalize_op(img_in)
assert img_out is not None
def test_normalize_op_comp_chw():
"""
Feature: Normalize op
Description: Test Normalize with CHW input, Decode(to_pil=True) & ToTensor versus Decode(to_pil=False) & HWC2CHW
comparison.
Expectation: Test succeeds. MSE difference is negligible.
"""
logger.info("Test Normalize with CHW input")
mean = [0.475, 0.45, 0.392]
std = [0.275, 0.267, 0.278]
# define map operations
transforms = [
vision.Decode(True),
vision.ToTensor()
]
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
normalize_op = vision.Normalize(mean, std, False)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data1 = data1.map(operations=transform, input_columns=["image"])
data1 = data1.map(operations=normalize_op, input_columns=["image"])
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=vision.Decode(), input_columns=["image"])
data2 = data2.map(operations=vision.HWC2CHW(), input_columns=["image"])
data2 = data2.map(operations=vision.Normalize(mean, std, False), input_columns=["image"])
num_iter = 0
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
image_de_normalized = item1["image"]
image_np_normalized = item2["image"] / 255
mse = diff_mse(image_de_normalized, image_np_normalized)
logger.info("image_{}, mse: {}".format(num_iter + 1, mse))
assert mse < 0.01
num_iter += 1
if __name__ == "__main__":
test_decode_op()
test_decode_normalize_op()
test_normalize_op_c(plot=True)
test_normalize_op_py(plot=True)
test_normalize_op_hwc(plot=True)
test_normalize_op_chw(plot=True)
test_normalize_md5_01()
test_normalize_md5_02()
test_normalize_exception_unequal_size_c()
test_normalize_exception_unequal_size_py()
test_normalize_exception_invalid_size_py()
test_normalize_exception_invalid_range_py()
test_normalize_exception_unequal_size_1()
test_normalize_exception_out_of_range()
test_normalize_exception_unequal_size_2()
test_normalize_exception_invalid_size()
test_normalize_exception_invalid_range()
test_normalize_grayscale_md5_01()
test_normalize_grayscale_md5_02()
test_normalize_grayscale_exception()
test_normalize_c_eager()
test_normalize_py_eager()
test_multiple_channels()
test_normalize_eager_hwc()
test_normalize_eager_chw()
test_normalize_op_comp_chw()

View File

@ -17,9 +17,8 @@ Testing Normalize op in DE
"""
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import diff_mse, visualize_image
@ -43,16 +42,18 @@ def normalizepad_np(image, mean, std):
return output
def test_normalizepad_op_c(plot=False):
def test_normalizepad_op_hwc(plot=False):
"""
Test NormalizePad in cpp transformations
Feature: NormalizePad op
Description: Test NormalizePad with Decode versus NumPy comparison
Expectation: Test succeeds. MSE difference is negligible.
"""
logger.info("Test Normalize in cpp")
logger.info("Test NormalizePad with hwc")
mean = [121.0, 115.0, 100.0]
std = [70.0, 68.0, 71.0]
# define map operations
decode_op = c_vision.Decode()
normalizepad_op = c_vision.NormalizePad(mean, std)
decode_op = vision.Decode()
normalizepad_op = vision.NormalizePad(mean, std, is_hwc=True)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -77,29 +78,37 @@ def test_normalizepad_op_c(plot=False):
num_iter += 1
def test_normalizepad_op_py(plot=False):
def test_normalizepad_op_chw(plot=False):
"""
Test NormalizePad in python transformations
Feature: NormalizePad op
Description: Test NormalizePad with CHW input, Decode(to_pil=True) & ToTensor versus NumPy comparison
Expectation: Test succeeds. MSE difference is negligible.
"""
logger.info("Test Normalize in python")
logger.info("Test NormalizePad with chw")
mean = [0.475, 0.45, 0.392]
std = [0.275, 0.267, 0.278]
# define map operations
transforms = [
py_vision.Decode(),
py_vision.ToTensor()
vision.Decode(True),
vision.ToTensor()
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
normalizepad_op = py_vision.NormalizePad(mean, std)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
normalizepad_op = vision.NormalizePad(mean, std, is_hwc=False)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data1 = data1.map(operations=transform, input_columns=["image"])
data1 = data1.map(operations=normalizepad_op, input_columns=["image"])
transforms2 = [
vision.Decode(True),
vision.ToTensor()
]
transform2 = mindspore.dataset.transforms.transforms.Compose(transforms2)
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=transform, input_columns=["image"])
data2 = data2.map(operations=transform2, input_columns=["image"])
num_iter = 0
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
@ -115,6 +124,45 @@ def test_normalizepad_op_py(plot=False):
num_iter += 1
def test_normalizepad_op_comp_chw():
"""
Feature: NormalizePad op
Description: Test NormalizePad with CHW input, Decode(to_pil=True) & ToTensor versus Decode(to_pil=False) & HWC2CHW
comparison.
Expectation: Test succeeds. MSE difference is negligible.
"""
logger.info("Test NormalizePad with CHW input")
mean = [0.475, 0.45, 0.392]
std = [0.275, 0.267, 0.278]
# define map operations
transforms = [
vision.Decode(True),
vision.ToTensor()
]
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
normalizepad_op = vision.NormalizePad(mean, std, is_hwc=False)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data1 = data1.map(operations=transform, input_columns=["image"])
data1 = data1.map(operations=normalizepad_op, input_columns=["image"])
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=vision.Decode(), input_columns=["image"])
data2 = data2.map(operations=vision.HWC2CHW(), input_columns=["image"])
data2 = data2.map(operations=vision.NormalizePad(mean, std, is_hwc=False), input_columns=["image"])
num_iter = 0
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
image_de_normalized = item1["image"]
image_np_normalized = item2["image"] / 255
mse = diff_mse(image_de_normalized, image_np_normalized)
logger.info("image_{}, mse: {}".format(num_iter + 1, mse))
assert mse < 0.01
def test_decode_normalizepad_op():
"""
Test Decode op followed by NormalizePad op
@ -125,8 +173,8 @@ def test_decode_normalizepad_op():
shuffle=False)
# define map operations
decode_op = c_vision.Decode()
normalizepad_op = c_vision.NormalizePad([121.0, 115.0, 100.0], [70.0, 68.0, 71.0], "float16")
decode_op = vision.Decode()
normalizepad_op = vision.NormalizePad([121.0, 115.0, 100.0], [70.0, 68.0, 71.0], "float16")
# apply map operations on images
data1 = data1.map(operations=[decode_op, normalizepad_op], input_columns=["image"])
@ -138,64 +186,77 @@ def test_decode_normalizepad_op():
num_iter += 1
def test_normalizepad_exception_unequal_size_c():
def test_normalizepad_exception_unequal_size_1():
"""
Test NormalizePad in c transformation: len(mean) != len(std)
expected to raise ValueError
Feature: Normalize op
Description: Test Normalize with error input: len(mean) != len(std)
Expectation: ValueError raised
"""
logger.info("test_normalize_exception_unequal_size_c")
logger.info("test_normalizepad_exception_unequal_size_1")
try:
_ = c_vision.NormalizePad([100, 250, 125], [50, 50, 75, 75])
_ = vision.NormalizePad([100, 250, 125], [50, 50, 75, 75])
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Length of mean and std must be equal."
try:
_ = c_vision.NormalizePad([100, 250, 125], [50, 50, 75], 1)
_ = vision.NormalizePad([100, 250, 125], [50, 50, 75], 1)
except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "dtype should be string."
try:
_ = c_vision.NormalizePad([100, 250, 125], [50, 50, 75], "")
_ = vision.NormalizePad([100, 250, 125], [50, 50, 75], "")
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "dtype only support float32 or float16."
assert str(e) == "dtype only supports float32 or float16."
def test_normalizepad_exception_unequal_size_py():
def test_normalizepad_exception_unequal_size_2():
"""
Test NormalizePad in python transformation: len(mean) != len(std)
expected to raise ValueError
Feature: Normalize op
Description: Test Normalize with error input: len(mean) != len(std)
Expectation: ValueError raised
"""
logger.info("test_normalizepad_exception_unequal_size_py")
logger.info("test_normalizepad_exception_unequal_size_2")
try:
_ = py_vision.NormalizePad([0.50, 0.30, 0.75], [0.18, 0.32, 0.71, 0.72])
_ = vision.NormalizePad([0.50, 0.30, 0.75], [0.18, 0.32, 0.71, 0.72], is_hwc=False)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "Length of mean and std must be equal."
try:
_ = py_vision.NormalizePad([0.50, 0.30, 0.75], [0.18, 0.32, 0.71], 1)
_ = vision.NormalizePad([0.50, 0.30, 0.75], [0.18, 0.32, 0.71], 1, is_hwc=False)
except TypeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "dtype should be string."
try:
_ = py_vision.NormalizePad([0.50, 0.30, 0.75], [0.18, 0.32, 0.71], "")
_ = vision.NormalizePad([0.50, 0.30, 0.75], [0.18, 0.32, 0.71], "", is_hwc=False)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert str(e) == "dtype only support float32 or float16."
assert str(e) == "dtype only supports float32 or float16."
def test_normalizepad_exception_invalid_range_py():
def test_normalizepad_exception_invalid_range():
"""
Test NormalizePad in python transformation: value is not in range [0,1]
expected to raise ValueError
Feature: Normalize op
Description: Test Normalize with error input: value is not in range [0,1]
Expectation: ValueError raised
"""
logger.info("test_normalizepad_exception_invalid_range_py")
logger.info("test_normalizepad_exception_invalid_range")
try:
_ = py_vision.NormalizePad([0.75, 1.25, 0.5], [0.1, 0.18, 1.32])
_ = vision.NormalizePad([0.75, 1.25, 0.5], [0.1, 0.18, 1.32], is_hwc=False)
except ValueError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Input mean_value is not within the required interval of [0.0, 1.0]." in str(e)
if __name__ == "__main__":
test_normalizepad_op_hwc(plot=True)
test_normalizepad_op_chw(plot=True)
test_normalizepad_op_comp_chw()
test_decode_normalizepad_op()
test_normalizepad_exception_unequal_size_1()
test_normalizepad_exception_unequal_size_2()
test_normalizepad_exception_invalid_range()

View File

@ -18,9 +18,8 @@ Testing the OneHot Op
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as data_trans
import mindspore.dataset.transforms.py_transforms as py_trans
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.transforms.transforms as data_trans
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import dataset_equal_with_function
@ -69,9 +68,9 @@ def test_one_hot_post_aug():
resize_height, resize_width = 224, 224
# Define map operations
decode_op = c_vision.Decode()
rescale_op = c_vision.Rescale(rescale, shift)
resize_op = c_vision.Resize((resize_height, resize_width))
decode_op = vision.Decode()
rescale_op = vision.Rescale(rescale, shift)
resize_op = vision.Resize((resize_height, resize_width))
# Apply map operations on images
data1 = data1.map(operations=decode_op, input_columns=["image"])
@ -99,6 +98,7 @@ def test_one_hot_post_aug():
assert num_iter == 1
def test_one_hot_success():
# success
class GetDatasetGenerator:
@ -117,13 +117,14 @@ def test_one_hot_success():
dataset = ds.GeneratorDataset(GetDatasetGenerator(), ["data", "label"], shuffle=False)
one_hot_encode = py_trans.OneHotOp(10)
trans = py_trans.Compose([one_hot_encode])
one_hot_encode = data_trans.OneHot(10)
trans = data_trans.Compose([one_hot_encode])
dataset = dataset.map(operations=trans, input_columns=["label"])
for index, item in enumerate(dataset.create_dict_iterator(num_epochs=1, output_numpy=True)):
assert item["label"][index] == 1.0
def test_one_hot_success2():
# success
class GetDatasetGenerator:
@ -142,13 +143,14 @@ def test_one_hot_success2():
dataset = ds.GeneratorDataset(GetDatasetGenerator(), ["data", "label"], shuffle=False)
one_hot_encode = py_trans.OneHotOp(10)
trans = py_trans.Compose([one_hot_encode])
one_hot_encode = data_trans.OneHot(10)
trans = data_trans.Compose([one_hot_encode])
dataset = dataset.map(operations=trans, input_columns=["label"])
for index, item in enumerate(dataset.create_dict_iterator(num_epochs=1, output_numpy=True)):
logger.info(item)
assert item["label"][0][index] == 1.0
assert item["label"][index] == 1.0
def test_one_hot_success3():
# success
@ -171,14 +173,15 @@ def test_one_hot_success3():
dataset = ds.GeneratorDataset(GetDatasetGenerator(), ["data", "label"], shuffle=False)
one_hot_encode = py_trans.OneHotOp(10)
trans = py_trans.Compose([one_hot_encode])
one_hot_encode = data_trans.OneHot(10)
trans = data_trans.Compose([one_hot_encode])
dataset = dataset.map(operations=trans, input_columns=["label"])
for item in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
logger.info(item)
for i in range(10):
assert item["label"][i][0][i] == 1.0
assert item["label"][i][i] == 1.0
def test_one_hot_type_error():
# type error
@ -198,15 +201,16 @@ def test_one_hot_type_error():
dataset = ds.GeneratorDataset(GetDatasetGenerator(), ["data", "label"], shuffle=False)
one_hot_encode = py_trans.OneHotOp(10)
trans = py_trans.Compose([one_hot_encode])
one_hot_encode = data_trans.OneHot(10)
trans = data_trans.Compose([one_hot_encode])
dataset = dataset.map(operations=trans, input_columns=["label"])
try:
for index, item in enumerate(dataset.create_dict_iterator(num_epochs=1, output_numpy=True)):
assert item["label"][index] == 1.0
except RuntimeError as e:
assert "the input numpy type should be int" in str(e)
assert "OneHot only support input of int type, but got:float64" in str(e)
if __name__ == "__main__":
test_one_hot()

View File

@ -18,9 +18,8 @@ Testing Pad op in DE
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import diff_mse, save_and_check_md5
@ -37,9 +36,9 @@ def test_pad_op():
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
decode_op = vision.Decode()
pad_op = c_vision.Pad((100, 100, 100, 100))
pad_op = vision.Pad((100, 100, 100, 100))
ctrans = [decode_op,
pad_op,
]
@ -48,11 +47,11 @@ def test_pad_op():
# Second dataset
transforms = [
py_vision.Decode(),
py_vision.Pad(100),
py_vision.ToTensor(),
vision.Decode(True),
vision.Pad(100),
vision.ToTensor(),
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=transform, input_columns=["image"])
@ -79,9 +78,9 @@ def test_pad_op2():
logger.info("test padding parameter with size 2")
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
resize_op = c_vision.Resize([90, 90])
pad_op = c_vision.Pad((100, 9,))
decode_op = vision.Decode()
resize_op = vision.Resize([90, 90])
pad_op = vision.Pad((100, 9,))
ctrans = [decode_op, resize_op, pad_op]
data1 = data1.map(operations=ctrans, input_columns=["image"])
@ -101,18 +100,18 @@ def test_pad_grayscale():
# Note: image.transpose performs channel swap to allow py transforms to
# work with c transforms
transforms = [
py_vision.Decode(),
py_vision.Grayscale(1),
py_vision.ToTensor(),
vision.Decode(True),
vision.Grayscale(1),
vision.ToTensor(),
(lambda image: (image.transpose(1, 2, 0) * 255).astype(np.uint8))
]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
transform = mindspore.dataset.transforms.transforms.Compose(transforms)
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data1 = data1.map(operations=transform, input_columns=["image"])
# if input is grayscale, the output dimensions should be single channel
pad_gray = c_vision.Pad(100, fill_value=(20, 20, 20))
pad_gray = vision.Pad(100, fill_value=(20, 20, 20))
data1 = data1.map(operations=pad_gray, input_columns=["image"])
dataset_shape_1 = []
for item1 in data1.create_dict_iterator(num_epochs=1, output_numpy=True):
@ -121,7 +120,7 @@ def test_pad_grayscale():
# Dataset for comparison
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
decode_op = vision.Decode()
# we use the same padding logic
ctrans = [decode_op, pad_gray]
@ -135,7 +134,7 @@ def test_pad_grayscale():
for shape1, shape2 in zip(dataset_shape_1, dataset_shape_2):
# validate that the first two dimensions are the same
# we have a little inconsistency here because the third dimension is 1 after py_vision.Grayscale
# we have a little inconsistency here because the third dimension is 1 after vision.Grayscale
assert shape1[0:1] == shape2[0:1]
@ -147,8 +146,8 @@ def test_pad_md5():
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
decode_op = c_vision.Decode()
pad_op = c_vision.Pad(150)
decode_op = vision.Decode()
pad_op = vision.Pad(150)
ctrans = [decode_op,
pad_op,
]
@ -158,11 +157,11 @@ def test_pad_md5():
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
pytrans = [
py_vision.Decode(),
py_vision.Pad(150),
py_vision.ToTensor(),
vision.Decode(True),
vision.Pad(150),
vision.ToTensor(),
]
transform = mindspore.dataset.transforms.py_transforms.Compose(pytrans)
transform = mindspore.dataset.transforms.transforms.Compose(pytrans)
data2 = data2.map(operations=transform, input_columns=["image"])
# Compare with expected md5 from images
filename1 = "pad_01_c_result.npz"

View File

@ -19,7 +19,7 @@ import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as ops
import mindspore.dataset.transforms.transforms as ops
# Extensive testing of PadEnd is already done in batch with Pad test cases

View File

@ -7,7 +7,7 @@ from PIL import Image
import mindspore.dataset as ds
from mindspore.mindrecord import FileWriter
import mindspore.dataset.vision.c_transforms as V_C
import mindspore.dataset.vision.transforms as V_C
FILES_NUM = 4
CV_DIR_NAME = "../data/mindrecord/testImageNetData"

View File

@ -21,8 +21,8 @@ import numpy as np
import pytest
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as C
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.transforms.transforms as C
import mindspore.dataset.vision.transforms as vision
import mindspore._c_dataengine as cde
FILES = ["../data/dataset/testTFTestAllTypes/test.data"]

View File

@ -22,7 +22,7 @@ import pytest
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore._c_dataengine as cde
import mindspore.dataset.transforms.c_transforms as C
import mindspore.dataset.transforms.transforms as C
FILES = ["../data/dataset/testTFTestAllTypes/test.data"]
DATASET_ROOT = "../data/dataset/testTFTestAllTypes/"

View File

@ -17,7 +17,7 @@ import numpy as np
from util import save_and_check_tuple
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as C
import mindspore.dataset.transforms.transforms as C
from mindspore.common import dtype as mstype
DATA_DIR_TF = ["../data/dataset/testTFTestAllTypes/test.data"]

View File

@ -18,8 +18,8 @@ Test Python Multiprocessing with Python functions/ops
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms as py_transforms
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.transforms.transforms as transforms
import mindspore.dataset.vision.transforms as vision
from util import visualize_list
MNIST_DATA_DIR = "../data/dataset/testMnistData"
@ -65,25 +65,25 @@ def skip_test_pyfunc_multiproc_shrmem():
def create_dataset_pyop_multiproc(num_parallel_workers=None, max_rowsize=16, batch_size=32, repeat_size=1,
num_samples=None):
"""
Create dataset with Python ops list and python_multiprocessing=True for Map op
Create dataset with Python implementations list and python_multiprocessing=True for Map op
"""
# Define dataset
data1 = ds.MnistDataset(MNIST_DATA_DIR, num_samples=num_samples)
data1 = data1.map(operations=[py_vision.ToType(np.int32)], input_columns="label",
data1 = data1.map(operations=[vision.ToType(np.int32)], input_columns="label",
num_parallel_workers=num_parallel_workers,
python_multiprocessing=True, max_rowsize=max_rowsize)
# Setup transforms list which include Python ops
# Setup transforms list which include Python implementations
transforms_list = [
lambda x: x,
py_vision.HWC2CHW(),
py_vision.RandomErasing(0.9, value='random'),
py_vision.Cutout(4, 2),
vision.HWC2CHW(),
vision.RandomErasing(0.9, value='random'),
vision.CutOut(4, 2, is_hwc=False),
lambda y: y
]
compose_op = py_transforms.Compose(transforms_list)
compose_op = transforms.Compose(transforms_list)
data1 = data1.map(operations=compose_op, input_columns="image", num_parallel_workers=num_parallel_workers,
python_multiprocessing=True, max_rowsize=max_rowsize)
@ -160,7 +160,7 @@ def test_pyfunc_multiproc_max_rowsize_large():
def test_pyfunc_multiproc_basic_pipeline(plot=False):
"""
Feature: Python Multiprocessing
Description: Test Map op with python_multiprocessing=True in a basic pipeline with Py ops
Description: Test Map op with python_multiprocessing=True in a basic pipeline with Python implementations
Expectation: Images in plots from the 2 pipelines are visually fine
"""
# Reduce memory required by disabling the shared memory optimization
@ -168,19 +168,19 @@ def test_pyfunc_multiproc_basic_pipeline(plot=False):
ds.config.set_enable_shared_mem(False)
# Define map operations
transforms_list = [py_vision.CenterCrop(64), py_vision.RandomRotation(30)]
transforms_list = [vision.CenterCrop(64), vision.RandomRotation(30)]
transforms1 = [
py_vision.Decode(),
py_transforms.RandomChoice(transforms_list),
py_vision.ToTensor()
vision.Decode(True),
transforms.RandomChoice(transforms_list),
vision.ToTensor()
]
transform1 = py_transforms.Compose(transforms1)
transform1 = transforms.Compose(transforms1)
transforms2 = [
py_vision.Decode(),
py_vision.ToTensor()
vision.Decode(True),
vision.ToTensor()
]
transform2 = py_transforms.Compose(transforms2)
transform2 = transforms.Compose(transforms2)
# First dataset
data1 = ds.TFRecordDataset(TF_DATA_DIR, TF_SCHEMA_DIR, columns_list=["image"], shuffle=False)
@ -207,7 +207,7 @@ def test_pyfunc_multiproc_basic_pipeline(plot=False):
def test_pyfunc_multiproc_child_exception():
"""
Feature: Python Multiprocessing
Description: Test Map op with python_multiprocessing=True with Python op encountering exception
Description: Test Map op with python_multiprocessing=True with Python implementation encountering exception
Expectation: Exception is correctly processed
"""
# Reduce memory required by disabling the shared memory optimization
@ -216,13 +216,13 @@ def test_pyfunc_multiproc_child_exception():
# Define map operations
# Note: crop size[5000, 5000] > image size[4032, 2268]
transforms_list = [py_vision.RandomCrop(5000)]
transforms = [
py_vision.Decode(),
py_transforms.RandomChoice(transforms_list),
py_vision.ToTensor()
transforms_list = [vision.RandomCrop(5000)]
transform = [
vision.Decode(True),
transforms.RandomChoice(transforms_list),
vision.ToTensor()
]
transform = py_transforms.Compose(transforms)
transform = transforms.Compose(transform)
# Generate dataset
data = ds.TFRecordDataset(TF_DATA_DIR, TF_SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(operations=transform, input_columns=["image"], python_multiprocessing=True)

View File

@ -18,10 +18,8 @@ Test Python Multiprocessing with AutoTuning
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as c_transforms
import mindspore.dataset.transforms.py_transforms as py_transforms
import mindspore.dataset.vision.c_transforms as c_vision
import mindspore.dataset.vision.py_transforms as py_vision
import mindspore.dataset.transforms.transforms as transforms
import mindspore.dataset.vision.transforms as vision
from mindspore.dataset.vision import Inter
CIFAR10_DATA_DIR = "../data/dataset/testCifar10Data"
@ -37,15 +35,15 @@ def create_pyfunc_dataset(batch_size=32, repeat_size=1, num_parallel_workers=1,
# Define dataset
cifar10_ds = ds.Cifar10Dataset(CIFAR10_DATA_DIR, num_samples=num_samples)
cifar10_ds = cifar10_ds.map(operations=[py_vision.ToType(np.int32)], input_columns="label",
cifar10_ds = cifar10_ds.map(operations=[vision.ToType(np.int32)], input_columns="label",
num_parallel_workers=num_parallel_workers, python_multiprocessing=True)
# Setup transforms list which include Python ops / Pyfuncs
# Setup transforms list which include Python implementations / Pyfuncs
transforms_list = [
py_vision.ToPIL(),
py_vision.RandomGrayscale(prob=0.2),
vision.ToPIL(),
vision.RandomGrayscale(prob=0.2),
np.array] # need to convert PIL image to a NumPy array to pass it to C++ operation
compose_op = py_transforms.Compose(transforms_list)
compose_op = transforms.Compose(transforms_list)
cifar10_ds = cifar10_ds.map(operations=compose_op, input_columns="image",
num_parallel_workers=num_parallel_workers,
python_multiprocessing=True)
@ -69,7 +67,7 @@ def create_pyop_cop_dataset(batch_size=32, repeat_size=1, num_parallel_workers=1
cifar10_ds = ds.Cifar10Dataset(CIFAR10_DATA_DIR, num_samples=num_samples)
# Map#1 - with Pyfunc
cifar10_ds = cifar10_ds.map(operations=[py_vision.ToType(np.int32)], input_columns="label",
cifar10_ds = cifar10_ds.map(operations=[vision.ToType(np.int32)], input_columns="label",
num_parallel_workers=num_parallel_workers, python_multiprocessing=True)
# Map#2 - with C Ops
@ -78,19 +76,19 @@ def create_pyop_cop_dataset(batch_size=32, repeat_size=1, num_parallel_workers=1
shift = 0.0
rescale_nml = 1 / 0.3081
shift_nml = -1 * 0.1307 / 0.3081
resize_op = c_vision.Resize((resize_height, resize_width), interpolation=Inter.LINEAR)
rescale_op = c_vision.Rescale(rescale, shift)
rescale_nml_op = c_vision.Rescale(rescale_nml, shift_nml)
hwc2chw_op = c_vision.HWC2CHW()
transforms = [resize_op, rescale_op, rescale_nml_op, hwc2chw_op]
compose_op = c_transforms.Compose(transforms)
resize_op = vision.Resize((resize_height, resize_width), interpolation=Inter.LINEAR)
rescale_op = vision.Rescale(rescale, shift)
rescale_nml_op = vision.Rescale(rescale_nml, shift_nml)
hwc2chw_op = vision.HWC2CHW()
transform = [resize_op, rescale_op, rescale_nml_op, hwc2chw_op]
compose_op = transforms.Compose(transform)
cifar10_ds = cifar10_ds.map(operations=compose_op, input_columns="image",
num_parallel_workers=num_parallel_workers,
python_multiprocessing=True)
# Map#3 - with Pyfunc
transforms_list = [lambda x: x]
compose_op = py_transforms.Compose(transforms_list)
compose_op = transforms.Compose(transforms_list)
cifar10_ds = cifar10_ds.map(operations=compose_op, input_columns="image",
num_parallel_workers=num_parallel_workers,
python_multiprocessing=True)
@ -113,14 +111,14 @@ def create_mixed_map_dataset(batch_size=32, repeat_size=1, num_parallel_workers=
# Define dataset
cifar10_ds = ds.Cifar10Dataset(CIFAR10_DATA_DIR, num_samples=num_samples)
cifar10_ds = cifar10_ds.map(operations=[py_vision.ToType(np.int32)], input_columns="label",
cifar10_ds = cifar10_ds.map(operations=[vision.ToType(np.int32)], input_columns="label",
num_parallel_workers=num_parallel_workers, python_multiprocessing=True)
# Map with operations: Pyfunc + C Ops + Pyfunc
resize_op = c_vision.Resize((32, 32), interpolation=Inter.LINEAR)
rescale_op = c_vision.Rescale(1.0 / 255.0, 0.0)
rescale_nml_op = c_vision.Rescale(1 / 0.3081, -1 * 0.1307 / 0.3081)
hwc2chw_op = c_vision.HWC2CHW()
resize_op = vision.Resize((32, 32), interpolation=Inter.LINEAR)
rescale_op = vision.Rescale(1.0 / 255.0, 0.0)
rescale_nml_op = vision.Rescale(1 / 0.3081, -1 * 0.1307 / 0.3081)
hwc2chw_op = vision.HWC2CHW()
cifar10_ds = cifar10_ds.map(
operations=[lambda x: x, resize_op, rescale_op, rescale_nml_op, hwc2chw_op, lambda y: y],
input_columns="image", num_parallel_workers=num_parallel_workers, python_multiprocessing=True)
@ -140,7 +138,7 @@ def create_per_batch_map_dataset(batch_size=32, repeat_size=1, num_parallel_work
# Define dataset
cifar100_ds = ds.Cifar100Dataset(CIFAR100_DATA_DIR, num_samples=num_samples)
cifar100_ds = cifar100_ds.map(operations=[py_vision.ToType(np.int32)], input_columns="fine_label")
cifar100_ds = cifar100_ds.map(operations=[vision.ToType(np.int32)], input_columns="fine_label")
cifar100_ds = cifar100_ds.map(operations=[lambda z: z], input_columns="image")
@ -171,15 +169,15 @@ def create_mp_dataset(batch_size=32, repeat_size=1, num_parallel_workers=1, num_
# Define dataset
cifar10_ds = ds.Cifar10Dataset(CIFAR10_DATA_DIR, num_samples=num_samples)
cifar10_ds = cifar10_ds.map(operations=[py_vision.ToType(np.int32)], input_columns="label",
cifar10_ds = cifar10_ds.map(operations=[vision.ToType(np.int32)], input_columns="label",
num_parallel_workers=num_parallel_workers, python_multiprocessing=True)
# Setup transforms list which include Python ops / Pyfuncs
# Setup transforms list which include Python implementations / Pyfuncs
transforms_list = [
py_vision.ToPIL(),
py_vision.RandomGrayscale(prob=0.8),
vision.ToPIL(),
vision.RandomGrayscale(prob=0.8),
np.array] # need to convert PIL image to a NumPy array to pass it to C++ operation
compose_op = py_transforms.Compose(transforms_list)
compose_op = transforms.Compose(transforms_list)
cifar10_ds = cifar10_ds.map(operations=compose_op, input_columns="image",
num_parallel_workers=num_parallel_workers, python_multiprocessing=True)