!33263 [MD] UT Updates - more eager tests, minor updates

Merge pull request !33263 from cathwong/ckw_ut_misc_fixes
This commit is contained in:
i-robot 2022-04-19 20:34:16 +00:00 committed by Gitee
commit 49f030358e
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
8 changed files with 425 additions and 140 deletions

View File

@ -1,4 +1,4 @@
# Copyright 2021 Huawei Technologies Co., Ltd
# Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -42,6 +42,11 @@ def generate_numpy_random_rgb(shape):
def test_adjust_gamma_c_eager():
"""
Feature: AdjustGamma op
Description: Test eager support for AdjustGamma C++ op
Expectation: Receive non-None output image from op
"""
# Eager 3-channel
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.float32)
img_in = rgb_flat.reshape((8, 8, 3))
@ -50,8 +55,19 @@ def test_adjust_gamma_c_eager():
img_out = adjustgamma_op(img_in)
assert img_out is not None
img_in2 = PIL.Image.open("../data/dataset/apple.jpg").convert("RGB")
adjustgamma_op2 = C.AdjustGamma(10, 1)
img_out2 = adjustgamma_op2(img_in2)
assert img_out2 is not None
def test_adjust_gamma_py_eager():
"""
Feature: AdjustGamma op
Description: Test eager support for AdjustGamma Python op
Expectation: Receive non-None output image from op
"""
# Eager 3-channel
rgb_flat = generate_numpy_random_rgb((64, 3)).astype(np.uint8)
img_in = PIL.Image.fromarray(rgb_flat.reshape((8, 8, 3)))
@ -60,6 +76,12 @@ def test_adjust_gamma_py_eager():
img_out = adjustgamma_op(img_in)
assert img_out is not None
img_in2 = PIL.Image.open("../data/dataset/apple.jpg").convert("RGB")
adjustgamma_op2 = F.AdjustGamma(10, 1)
img_out2 = adjustgamma_op2(img_in2)
assert img_out2 is not None
def test_adjust_gamma_c_eager_gray():
# Eager 3-channel

View File

@ -1,4 +1,4 @@
# Copyright 2020 Huawei Technologies Co., Ltd
# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -14,12 +14,19 @@
# ==============================================================================
import cv2
import numpy as np
import pytest
from PIL import Image
import mindspore.dataset.vision.c_transforms as C
import mindspore.dataset.vision.py_transforms as PY
from mindspore import log as logger
def test_eager_decode():
def test_eager_decode_c():
"""
Feature: Decode op
Description: Test eager support for Decode C++ op
Expectation: Output image size from op is correct
"""
img = np.fromfile("../data/dataset/apple.jpg", dtype=np.uint8)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
@ -35,6 +42,27 @@ def test_eager_decode():
assert img2.shape == (2268, 4032, 3)
def test_eager_decode_py():
"""
Feature: Decode op
Description: Test eager support for Decode Python op
Expectation: Output image size from op is correct
"""
img = np.fromfile("../data/dataset/apple.jpg", dtype=np.uint8)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
img = PY.Decode()(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
assert img.size == (4032, 2268)
fp = open("../data/dataset/apple.jpg", "rb")
img2 = fp.read()
img2 = PY.Decode()(img2)
logger.info("Image.type: {}, Image.shape: {}".format(type(img2), img2.size))
assert img2.size == (4032, 2268)
def test_eager_resize():
img = cv2.imread("../data/dataset/apple.jpg")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
@ -44,6 +72,7 @@ def test_eager_resize():
assert img.shape == (32, 32, 3)
def test_eager_rescale():
img = cv2.imread("../data/dataset/apple.jpg")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
@ -54,9 +83,15 @@ def test_eager_rescale():
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
pixel_rescaled = img[0][0][0]
assert pixel*rescale_factor == pixel_rescaled
assert pixel * rescale_factor == pixel_rescaled
def test_eager_normalize():
def test_eager_normalize_c():
"""
Feature: Normalize op
Description: Test eager support for Normalize C++ op
Expectation: Output image info from op is correct
"""
img = Image.open("../data/dataset/apple.jpg").convert("RGB")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
pixel = img.getpixel((0, 0))[0]
@ -69,6 +104,27 @@ def test_eager_normalize():
assert (pixel - mean_vec[0]) / std_vec[0] == pixel_normalized
def test_eager_normalize_py():
"""
Feature: Normalize op
Description: Test eager support for Normalize Python op
Expectation: Output image info from op is correct
"""
img = Image.open("../data/dataset/apple.jpg").convert("RGB")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
pixel = img.getpixel((0, 0))[0]
img = PY.ToTensor()(img)
mean_vec = [.100, .100, .100]
std_vec = [.2, .2, .2]
img = PY.Normalize(mean=mean_vec, std=std_vec)(img)
pixel_normalized = img[0][0][0]
assert (pixel / 255 - mean_vec[0]) / std_vec[0] == pytest.approx(pixel_normalized, 0.0001)
def test_eager_HWC2CHW():
img = cv2.imread("../data/dataset/apple.jpg")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
@ -80,7 +136,13 @@ def test_eager_HWC2CHW():
assert channel == (channel_swaped[1], channel_swaped[2], channel_swaped[0])
def test_eager_pad():
def test_eager_pad_c():
"""
Feature: Pad op
Description: Test eager support for Pad C++ op
Expectation: Output image size info from op is correct
"""
img = Image.open("../data/dataset/apple.jpg").convert("RGB")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
@ -95,6 +157,89 @@ def test_eager_pad():
assert size_padded == (size[0] + 2 * pad, size[1] + 2 * pad, size[2])
def test_eager_pad_py():
"""
Feature: Pad op
Description: Test eager support for Pad Python op
Expectation: Output image size info from op is correct
"""
img = Image.open("../data/dataset/apple.jpg").convert("RGB")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
img = PY.Resize(size=(32, 32))(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
size = img.size
pad = 4
img = PY.Pad(padding=pad)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
size_padded = img.size
assert size_padded == (size[0] + 2 * pad, size[1] + 2 * pad)
def test_eager_cutout_pil_c():
"""
Feature: CutOut op
Description: Test eager support for CutOut C++ op with PIL input
Expectation: Output image size info from op is correct
"""
img = Image.open("../data/dataset/apple.jpg").convert("RGB")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
img = C.Resize(size=(32, 32))(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
size = img.shape
img = C.CutOut(2, 4)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
size_cutout = img.shape
assert size_cutout == size
def test_eager_cutout_pil_py():
"""
Feature: CutOut op
Description: Test eager support for CutOut Python op with PIL input
Expectation: Receive non-None output image from op
"""
img = Image.open("../data/dataset/apple.jpg").convert("RGB")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
img = PY.Resize(size=(32, 32))(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
img = PY.ToTensor()(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
img = PY.Cutout(2, 4)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
assert img is not None
def test_eager_cutout_cv_c():
"""
Feature: CutOut op
Description: Test eager support for CutOut C++ op with CV input
Expectation: Output image size info from op is correct
"""
img = cv2.imread("../data/dataset/apple.jpg")
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
img = C.Resize(size=(32, 32))(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
size = img.shape
img = C.CutOut(2, 4)(img)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.size))
size_cutout = img.shape
assert size_cutout == size
def test_eager_exceptions():
try:
img = "../data/dataset/apple.jpg"
@ -125,12 +270,49 @@ def test_eager_exceptions():
assert "Input should be NumPy or PIL image" in str(e)
def test_eager_exceptions_normalize():
"""
Feature: Normalize op
Description: Exception eager support test for Normalize Python op
Expectation: Error input image is detected
"""
try:
img = Image.open("../data/dataset/apple.jpg").convert("RGB")
mean_vec = [.100, .100, .100]
std_vec = [.2, .2, .2]
_ = PY.Normalize(mean=mean_vec, std=std_vec)(img)
assert False
except TypeError as e:
assert "img should be NumPy image" in str(e)
def test_eager_exceptions_pad():
"""
Feature: Pad op
Description: Exception eager support test for Pad Python op
Expectation: Error input image is detected
"""
try:
img = "../data/dataset/apple.jpg"
_ = PY.Pad(padding=4)(img)
assert False
except TypeError as e:
assert "img should be PIL image" in str(e)
if __name__ == '__main__':
test_eager_decode()
test_eager_decode_c()
test_eager_decode_py()
test_eager_resize()
test_eager_rescale()
test_eager_normalize()
test_eager_normalize_c()
test_eager_normalize_py()
test_eager_HWC2CHW()
test_eager_pad()
test_eager_pad_c()
test_eager_pad_py()
test_eager_cutout_pil_c()
test_eager_cutout_pil_py()
test_eager_cutout_cv_c()
test_eager_exceptions()
test_eager_exceptions_normalize()
test_eager_exceptions_pad()

View File

@ -1,4 +1,4 @@
# Copyright 2020-2021 Huawei Technologies Co., Ltd
# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -30,9 +30,11 @@ DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
def test_HWC2CHW_callable():
def test_hwc2chw_callable():
"""
Test HWC2CHW is callable
Feature: HWC2CHW op
Description: Test HWC2CHW op is callable.
Expectation: Valid input succeeds. Invalid input fails.
"""
logger.info("Test HWC2CHW callable")
img = np.zeros([50, 50, 3])
@ -59,7 +61,7 @@ def test_HWC2CHW_callable():
assert "The op is OneToOne, can only accept one tensor as input." in str(info.value)
def test_HWC2CHW_multi_channels():
def test_hwc2chw_multi_channels():
"""
Feature: Test HWC2CHW feature
Description: The input is a HWC format array with 5 channels
@ -81,9 +83,11 @@ def test_HWC2CHW_multi_channels():
assert np.allclose(item[0], expect_output)
def test_HWC2CHW(plot=False):
def test_hwc2chw(plot=False):
"""
Test HWC2CHW
Feature: HWC2CHW op
Description: Test HWC2CHW op in pipeline
Expectation: Pipelines succeed with comparison mse=0
"""
logger.info("Test HWC2CHW")
@ -115,9 +119,11 @@ def test_HWC2CHW(plot=False):
visualize_list(image, image_transposed)
def test_HWC2CHW_md5():
def test_hwc2chw_md5():
"""
Test HWC2CHW(md5)
Feature: HWC2CHW op
Description: Test HWC2CHW op with md5 check.
Expectation: Pipeline results match in md5 comparison
"""
logger.info("Test HWC2CHW with md5 comparison")
@ -133,9 +139,11 @@ def test_HWC2CHW_md5():
save_and_check_md5(data1, filename, generate_golden=GENERATE_GOLDEN)
def test_HWC2CHW_comp(plot=False):
def test_hwc2chw_comp(plot=False):
"""
Test HWC2CHW between python and c image augmentation
Feature: HWC2CHW op
Description: Test HWC2CHW between Python and C image augmentation
Expectation: Image augmentations should be almost the same with mse < 0.001
"""
logger.info("Test HWC2CHW with c_transform and py_transform comparison")
@ -150,9 +158,7 @@ def test_HWC2CHW_comp(plot=False):
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
transforms = [
py_vision.Decode(),
py_vision.ToTensor(),
py_vision.HWC2CHW()
]
py_vision.ToTensor()]
transform = mindspore.dataset.transforms.py_transforms.Compose(transforms)
data2 = data2.map(operations=transform, input_columns=["image"])
@ -161,7 +167,7 @@ def test_HWC2CHW_comp(plot=False):
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
c_image = item1["image"]
py_image = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
py_image = (item2["image"] * 255).astype(np.uint8)
# Compare images between that applying c_transform and py_transform
mse = diff_mse(py_image, c_image)
@ -174,8 +180,8 @@ def test_HWC2CHW_comp(plot=False):
if __name__ == '__main__':
test_HWC2CHW_callable()
test_HWC2CHW_multi_channels()
test_HWC2CHW(True)
test_HWC2CHW_md5()
test_HWC2CHW_comp(True)
test_hwc2chw_callable()
test_hwc2chw_multi_channels()
test_hwc2chw(True)
test_hwc2chw_md5()
test_hwc2chw_comp(True)

View File

@ -1,4 +1,4 @@
# Copyright 2019 Huawei Technologies Co., Ltd
# Copyright 2019-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -153,7 +153,10 @@ def test_iterator_exception():
class MyDict(dict):
def __getattr__(self, key):
return self[key]
try:
return self[key]
except KeyError:
raise AttributeError
def __setattr__(self, key, value):
self[key] = value
@ -164,9 +167,10 @@ class MyDict(dict):
def test_tree_copy():
"""
Testing copying the tree with a pyfunc that cannot be pickled
Feature: Iterators
Description: Test copying the tree with a pyfunc that cannot be pickled
Expectation: ids of iterator data are different than pre-iterator data
"""
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=COLUMNS)
data1 = data.map(operations=[MyDict()])
@ -174,7 +178,6 @@ def test_tree_copy():
assert id(data1) != id(itr.dataset)
assert id(data) != id(itr.dataset.children[0])
assert id(data1.operations[0]) == id(itr.dataset.operations[0])
itr.release()

View File

@ -1,4 +1,4 @@
# Copyright 2019 Huawei Technologies Co., Ltd
# Copyright 2019-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -16,6 +16,7 @@
Testing Normalize op in DE
"""
import numpy as np
from PIL import Image
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.c_transforms as c_vision
@ -367,10 +368,38 @@ def test_multiple_channels():
util_test(np.ones(shape=[20, 45, 4]) * 1.3, mean=[0.5, 0.6, 0.7, 0.8], std=[0.1, 0.2, 0.3, 0.4])
util_test(np.ones(shape=[2, 2]), mean=[0.5], std=[0.1])
util_test(np.ones(shape=[2, 2, 5]), mean=[0.5], std=[0.1])
util_test(np.ones(shape=[6, 6, 129]), mean=[0.5]*129, std=[0.1]*129)
util_test(np.ones(shape=[6, 6, 129]), mean=[0.5] * 129, std=[0.1] * 129)
util_test(np.ones(shape=[6, 6, 129]), mean=[0.5], std=[0.1])
def test_normalize_c_eager():
"""
Feature: Normalize op
Description: Test eager support for Normalize C++ op
Expectation: Receive non-None output image from op
"""
img_in = Image.open("../data/dataset/apple.jpg").convert("RGB")
mean_vec = [1, 100, 255]
std_vec = [1, 20, 255]
normalize_op = c_vision.Normalize(mean=mean_vec, std=std_vec)
img_out = normalize_op(img_in)
assert img_out is not None
def test_normalize_py_eager():
"""
Feature: Normalize op
Description: Test eager support for Normalize Python op
Expectation: Receive non-None output image from op
"""
img_in = Image.open("../data/dataset/apple.jpg").convert("RGB")
img_in = py_vision.ToTensor()(img_in)
mean_vec = [0.1, 0.5, 1.0]
std_vec = [0.1, 0.4, 1.0]
normalize_op = py_vision.Normalize(mean=mean_vec, std=std_vec)
img_out = normalize_op(img_in)
assert img_out is not None
if __name__ == "__main__":
test_decode_op()
@ -386,3 +415,5 @@ if __name__ == "__main__":
test_normalize_grayscale_md5_01()
test_normalize_grayscale_md5_02()
test_normalize_grayscale_exception()
test_normalize_c_eager()
test_normalize_py_eager()

View File

@ -77,7 +77,6 @@ def create_dataset_pyop_multiproc(num_parallel_workers=None, max_rowsize=16, bat
# Setup transforms list which include Python ops
transforms_list = [
py_vision.ToTensor(),
lambda x: x,
py_vision.HWC2CHW(),
py_vision.RandomErasing(0.9, value='random'),

View File

@ -1,102 +1,106 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing RandomOrder op in DE
"""
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms as py_transforms
import mindspore.dataset.vision.py_transforms as py_vision
from mindspore import log as logger
from util import visualize_list, config_get_set_seed, \
config_get_set_num_parallel_workers, save_and_check_md5
GENERATE_GOLDEN = False
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
def test_random_order_op(plot=False):
"""
Test RandomOrder in python transformations
"""
logger.info("test_random_order_op")
# define map operations
transforms_list = [py_vision.CenterCrop(64), py_vision.RandomRotation(30)]
transforms1 = [
py_vision.Decode(),
py_transforms.RandomOrder(transforms_list),
py_vision.ToTensor()
]
transform1 = py_transforms.Compose(transforms1)
transforms2 = [
py_vision.Decode(),
py_vision.ToTensor()
]
transform2 = py_transforms.Compose(transforms2)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data1 = data1.map(operations=transform1, input_columns=["image"])
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=transform2, input_columns=["image"])
image_order = []
image_original = []
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image_order.append(image1)
image_original.append(image2)
if plot:
visualize_list(image_original, image_order)
def test_random_order_md5():
"""
Test RandomOrder op with md5 check
"""
logger.info("test_random_order_md5")
original_seed = config_get_set_seed(8)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# define map operations
transforms_list = [py_vision.RandomCrop(64), py_vision.RandomRotation(30)]
transforms = [
py_vision.Decode(),
py_transforms.RandomOrder(transforms_list),
py_vision.ToTensor()
]
transform = py_transforms.Compose(transforms)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(operations=transform, input_columns=["image"])
# check results with md5 comparison
filename = "random_order_01_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore configuration
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers((original_num_parallel_workers))
if __name__ == '__main__':
test_random_order_op(plot=True)
test_random_order_md5()
# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing RandomOrder op in DE
"""
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms as py_transforms
import mindspore.dataset.vision.py_transforms as py_vision
from mindspore import log as logger
from util import visualize_list, config_get_set_seed, \
config_get_set_num_parallel_workers, save_and_check_md5
GENERATE_GOLDEN = False
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
def test_random_order_op(plot=False):
"""
Feature: RandomOrder op
Description: Test RandomOrder in python transformations
Expectation: Pipelines execute successfully
"""
logger.info("test_random_order_op")
# define map operations
transforms_list = [py_vision.CenterCrop(64), py_vision.RandomRotation(30)]
transforms1 = [
py_vision.Decode(),
py_transforms.RandomOrder(transforms_list),
py_vision.ToTensor()
]
transform1 = py_transforms.Compose(transforms1)
transforms2 = [
py_vision.Decode(),
py_vision.ToTensor()
]
transform2 = py_transforms.Compose(transforms2)
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data1 = data1.map(operations=transform1, input_columns=["image"])
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=transform2, input_columns=["image"])
image_order = []
image_original = []
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
image1 = (item1["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image2 = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
image_order.append(image1)
image_original.append(image2)
if plot:
visualize_list(image_original, image_order)
def test_random_order_md5():
"""
Feature: RandomOrder op
Description: Test RandomOrder op with md5 check
Expectation: Pipeline results match in md5 comparison
"""
logger.info("test_random_order_md5")
original_seed = config_get_set_seed(8)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# define map operations
transforms_list = [py_vision.RandomCrop(64), py_vision.RandomRotation(30)]
transforms = [
py_vision.Decode(),
py_transforms.RandomOrder(transforms_list),
py_vision.ToTensor()
]
transform = py_transforms.Compose(transforms)
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data = data.map(operations=transform, input_columns=["image"])
# check results with md5 comparison
filename = "random_order_01_result.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
# Restore configuration
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers((original_num_parallel_workers))
if __name__ == '__main__':
test_random_order_op(plot=True)
test_random_order_md5()

View File

@ -1,4 +1,4 @@
# Copyright 2019 Huawei Technologies Co., Ltd
# Copyright 2019-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -68,6 +68,41 @@ def test_random_rotation_op_c(plot=False):
visualize_image(original, rotation_de, mse, rotation_cv)
def test_random_rotation_op_c_area():
"""
Feature: RandomRotation op
Description: Test RandomRotation in C++ transformations op with Interpolation AREA
Expectation: Number of returned data rows is correct
"""
logger.info("test_random_rotation_op_c_area")
# First dataset
data1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, shuffle=False)
decode_op = c_vision.Decode()
# Use [180, 180] to force rotate 180 degrees, expand is set to be True to match output size
# Use resample with Interpolation AREA
random_rotation_op = c_vision.RandomRotation((180, 180), expand=True, resample=Inter.AREA)
data1 = data1.map(operations=decode_op, input_columns=["image"])
data1 = data1.map(operations=random_rotation_op, input_columns=["image"])
# Second dataset
data2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
data2 = data2.map(operations=decode_op, input_columns=["image"])
num_iter = 0
for item1, item2 in zip(data1.create_dict_iterator(num_epochs=1, output_numpy=True),
data2.create_dict_iterator(num_epochs=1, output_numpy=True)):
rotation_de = item1["image"]
original = item2["image"]
logger.info("shape before rotate: {}".format(original.shape))
rotation_cv = cv2.rotate(original, cv2.ROTATE_180)
mse = diff_mse(rotation_de, rotation_cv)
logger.info("random_rotation_op_{}, mse: {}".format(num_iter + 1, mse))
assert mse == 0
num_iter += 1
assert num_iter == 3
def test_random_rotation_op_py(plot=False):
"""
Test RandomRotation in python transformations op
@ -104,6 +139,7 @@ def test_random_rotation_op_py(plot=False):
if plot:
visualize_image(original, rotation_de, mse, rotation_cv)
def test_random_rotation_op_py_ANTIALIAS():
"""
Test RandomRotation in python transformations op
@ -125,6 +161,7 @@ def test_random_rotation_op_py_ANTIALIAS():
num_iter += 1
logger.info("use RandomRotation by Inter.ANTIALIAS process {} images.".format(num_iter))
def test_random_rotation_expand():
"""
Test RandomRotation op
@ -238,6 +275,7 @@ def test_rotation_diff(plot=False):
if __name__ == "__main__":
test_random_rotation_op_c(plot=True)
test_random_rotation_op_c_area()
test_random_rotation_op_py(plot=True)
test_random_rotation_op_py_ANTIALIAS()
test_random_rotation_expand()