!8714 dataset Python UT: Add Sampler Chain Tests, with get_dataset_size

From: @cathwong
Reviewed-by: @nsyca,@mikef
Signed-off-by: @nsyca
This commit is contained in:
mindspore-ci-bot 2020-11-19 03:26:44 +08:00 committed by Gitee
commit f1ef84e1a6
3 changed files with 520 additions and 0 deletions

View File

@ -273,6 +273,7 @@ def test_sequential_sampler():
result.append(item["label"])
num_iter += 1
assert num_iter == 44
logger.info("Result: {}".format(result))
assert result == golden
@ -383,6 +384,7 @@ def test_weighted_random_sampler():
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 11
def test_weighted_random_sampler_exception():
"""
Test error cases for WeightedRandomSampler
@ -413,6 +415,250 @@ def test_weighted_random_sampler_exception():
weights = [0, 0, 0, 0, 0]
ds.WeightedRandomSampler(weights)
def test_chained_sampler_01():
logger.info("Test Case Chained Sampler - Random and Sequential, with repeat")
# Create chained sampler, random and sequential
sampler = ds.RandomSampler()
child_sampler = ds.SequentialSampler()
sampler.add_child(child_sampler)
# Create ImageFolderDataset with sampler
data1 = ds.ImageFolderDataset(DATA_DIR, sampler=sampler)
data1 = data1.repeat(count=3)
# Verify dataset size
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 132
# Verify number of iterations
num_iter = 0
for item in data1.create_dict_iterator(num_epochs=1): # each data is a dictionary
# in this example, each dictionary has keys "image" and "label"
logger.info("image is {}".format(item["image"]))
logger.info("label is {}".format(item["label"]))
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 132
def test_chained_sampler_02():
logger.info("Test Case Chained Sampler - Random and Sequential, with batch then repeat")
# Create chained sampler, random and sequential
sampler = ds.RandomSampler()
child_sampler = ds.SequentialSampler()
sampler.add_child(child_sampler)
# Create ImageFolderDataset with sampler
data1 = ds.ImageFolderDataset(DATA_DIR, sampler=sampler)
data1 = data1.batch(batch_size=5, drop_remainder=True)
data1 = data1.repeat(count=2)
# Verify dataset size
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 16
# Verify number of iterations
num_iter = 0
for item in data1.create_dict_iterator(num_epochs=1): # each data is a dictionary
# in this example, each dictionary has keys "image" and "label"
logger.info("image is {}".format(item["image"]))
logger.info("label is {}".format(item["label"]))
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 16
def test_chained_sampler_03():
logger.info("Test Case Chained Sampler - Random and Sequential, with repeat then batch")
# Create chained sampler, random and sequential
sampler = ds.RandomSampler()
child_sampler = ds.SequentialSampler()
sampler.add_child(child_sampler)
# Create ImageFolderDataset with sampler
data1 = ds.ImageFolderDataset(DATA_DIR, sampler=sampler)
data1 = data1.repeat(count=2)
data1 = data1.batch(batch_size=5, drop_remainder=False)
# Verify dataset size
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 18
# Verify number of iterations
num_iter = 0
for item in data1.create_dict_iterator(num_epochs=1): # each data is a dictionary
# in this example, each dictionary has keys "image" and "label"
logger.info("image is {}".format(item["image"]))
logger.info("label is {}".format(item["label"]))
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
assert num_iter == 18
def test_chained_sampler_04():
logger.info("Test Case Chained Sampler - Distributed and Random, with batch then repeat")
# Create chained sampler, distributed and random
sampler = ds.DistributedSampler(num_shards=4, shard_id=3)
child_sampler = ds.RandomSampler()
sampler.add_child(child_sampler)
# Create ImageFolderDataset with sampler
data1 = ds.ImageFolderDataset(DATA_DIR, sampler=sampler)
data1 = data1.batch(batch_size=5, drop_remainder=True)
data1 = data1.repeat(count=3)
# Verify dataset size
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 24
# Verify number of iterations
num_iter = 0
for item in data1.create_dict_iterator(num_epochs=1): # each data is a dictionary
# in this example, each dictionary has keys "image" and "label"
logger.info("image is {}".format(item["image"]))
logger.info("label is {}".format(item["label"]))
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
# Note: Each of the 4 shards has 44/4=11 samples
# Note: Number of iterations is (11/5 = 2) * 3 = 6
assert num_iter == 6
def skip_test_chained_sampler_05():
logger.info("Test Case Chained Sampler - PKSampler and WeightedRandom")
# Create chained sampler, PKSampler and WeightedRandom
sampler = ds.PKSampler(num_val=3) # Number of elements per class is 3 (and there are 4 classes)
weights = [1.0, 0.1, 0.02, 0.3, 0.4, 0.05, 1.2, 0.13, 0.14, 0.015, 0.16, 0.5]
child_sampler = ds.WeightedRandomSampler(weights, num_samples=12)
sampler.add_child(child_sampler)
# Create ImageFolderDataset with sampler
data1 = ds.ImageFolderDataset(DATA_DIR, sampler=sampler)
# Verify dataset size
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 12
# Verify number of iterations
num_iter = 0
for item in data1.create_dict_iterator(num_epochs=1): # each data is a dictionary
# in this example, each dictionary has keys "image" and "label"
logger.info("image is {}".format(item["image"]))
logger.info("label is {}".format(item["label"]))
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
# Note: PKSampler produces 4x3=12 samples
# Note: Child WeightedRandomSampler produces 12 samples
assert num_iter == 12
def test_chained_sampler_06():
logger.info("Test Case Chained Sampler - WeightedRandom and PKSampler")
# Create chained sampler, WeightedRandom and PKSampler
weights = [1.0, 0.1, 0.02, 0.3, 0.4, 0.05, 1.2, 0.13, 0.14, 0.015, 0.16, 0.5]
sampler = ds.WeightedRandomSampler(weights=weights, num_samples=12)
child_sampler = ds.PKSampler(num_val=3) # Number of elements per class is 3 (and there are 4 classes)
sampler.add_child(child_sampler)
# Create ImageFolderDataset with sampler
data1 = ds.ImageFolderDataset(DATA_DIR, sampler=sampler)
# Verify dataset size
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 12
# Verify number of iterations
num_iter = 0
for item in data1.create_dict_iterator(num_epochs=1): # each data is a dictionary
# in this example, each dictionary has keys "image" and "label"
logger.info("image is {}".format(item["image"]))
logger.info("label is {}".format(item["label"]))
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
# Note: WeightedRandomSampler produces 12 samples
# Note: Child PKSampler produces 12 samples
assert num_iter == 12
def test_chained_sampler_07():
logger.info("Test Case Chained Sampler - SubsetRandom and Distributed, 2 shards")
# Create chained sampler, subset random and distributed
indices = [0, 1, 2, 3, 4, 5, 12, 13, 14, 15, 16, 11]
sampler = ds.SubsetRandomSampler(indices, num_samples=12)
child_sampler = ds.DistributedSampler(num_shards=2, shard_id=1)
sampler.add_child(child_sampler)
# Create ImageFolderDataset with sampler
data1 = ds.ImageFolderDataset(DATA_DIR, sampler=sampler)
# Verify dataset size
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 12
# Verify number of iterations
num_iter = 0
for item in data1.create_dict_iterator(num_epochs=1): # each data is a dictionary
# in this example, each dictionary has keys "image" and "label"
logger.info("image is {}".format(item["image"]))
logger.info("label is {}".format(item["label"]))
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
# Note: SubsetRandomSampler produces 12 samples
# Note: Each of 2 shards has 6 samples
# FIXME: Uncomment the following assert when code issue is resolved; at runtime, number of samples is 12 not 6
# assert num_iter == 6
def skip_test_chained_sampler_08():
logger.info("Test Case Chained Sampler - SubsetRandom and Distributed, 4 shards")
# Create chained sampler, subset random and distributed
indices = [0, 1, 2, 3, 4, 5, 12, 13, 14, 15, 16, 11]
sampler = ds.SubsetRandomSampler(indices, num_samples=12)
child_sampler = ds.DistributedSampler(num_shards=4, shard_id=1)
sampler.add_child(child_sampler)
# Create ImageFolderDataset with sampler
data1 = ds.ImageFolderDataset(DATA_DIR, sampler=sampler)
# Verify dataset size
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 3
# Verify number of iterations
num_iter = 0
for item in data1.create_dict_iterator(num_epochs=1): # each data is a dictionary
# in this example, each dictionary has keys "image" and "label"
logger.info("image is {}".format(item["image"]))
logger.info("label is {}".format(item["label"]))
num_iter += 1
logger.info("Number of data in data1: {}".format(num_iter))
# Note: SubsetRandomSampler returns 12 samples
# Note: Each of 4 shards has 3 samples
assert num_iter == 3
def test_imagefolder_rename():
logger.info("Test Case rename")
# define parameters
@ -498,6 +744,30 @@ if __name__ == '__main__':
test_weighted_random_sampler_exception()
logger.info('test_weighted_random_sampler_exception Ended.\n')
test_chained_sampler_01()
logger.info('test_chained_sampler_01 Ended.\n')
test_chained_sampler_02()
logger.info('test_chained_sampler_02 Ended.\n')
test_chained_sampler_03()
logger.info('test_chained_sampler_03 Ended.\n')
test_chained_sampler_04()
logger.info('test_chained_sampler_04 Ended.\n')
# test_chained_sampler_05()
# logger.info('test_chained_sampler_05 Ended.\n')
test_chained_sampler_06()
logger.info('test_chained_sampler_06 Ended.\n')
test_chained_sampler_07()
logger.info('test_chained_sampler_07 Ended.\n')
# test_chained_sampler_08()
# logger.info('test_chained_sampler_07 Ended.\n')
test_imagefolder_numshards()
logger.info('test_imagefolder_numshards Ended.\n')

View File

@ -0,0 +1,250 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import pytest
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as c_transforms
from mindspore import log as logger
from util import save_and_check_md5
GENERATE_GOLDEN = False
def test_numpyslices_sampler_no_chain():
"""
Test NumpySlicesDataset with sampler, no chain
"""
logger.info("test_numpyslices_sampler_no_chain")
# Create NumpySlicesDataset with sampler, no chain
np_data = [1, 2, 3, 4]
sampler = ds.SequentialSampler(start_index=1, num_samples=2)
data1 = ds.NumpySlicesDataset(np_data, sampler=sampler)
# Verify dataset size
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 2
# Verify number of rows
assert sum([1 for _ in data1]) == 2
# Verify dataset contents
res = []
for item in data1.create_tuple_iterator(num_epochs=1, output_numpy=True):
logger.info("item: {}".format(item))
res.append(item)
logger.info("dataset: {}".format(res))
def test_numpyslices_sampler_chain():
"""
Test NumpySlicesDataset sampler chain
"""
logger.info("test_numpyslices_sampler_chain")
# Create NumpySlicesDataset with sampler chain
# Use 1 statement to add child sampler
np_data = [1, 2, 3, 4]
sampler = ds.SequentialSampler(start_index=1, num_samples=2)
sampler = sampler.add_child(ds.SequentialSampler(start_index=1, num_samples=2))
data1 = ds.NumpySlicesDataset(np_data, sampler=sampler)
# Verify dataset size
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 4
# Verify number of rows
assert sum([1 for _ in data1]) == 4
# Verify dataset contents
res = []
for item in data1.create_tuple_iterator(num_epochs=1, output_numpy=True):
logger.info("item: {}".format(item))
res.append(item)
logger.info("dataset: {}".format(res))
def test_numpyslices_sampler_chain2():
"""
Test NumpySlicesDataset sampler chain
"""
logger.info("test_numpyslices_sampler_chain2")
# Create NumpySlicesDataset with sampler chain
# Use 2 statements to add child sampler
np_data = [1, 2, 3, 4]
sampler = ds.SequentialSampler(start_index=1, num_samples=2)
child_sampler = ds.SequentialSampler(start_index=1, num_samples=2)
sampler.add_child(child_sampler)
data1 = ds.NumpySlicesDataset(np_data, sampler=sampler)
# Verify dataset size
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
# FIXME: Uncomment the following assert when code issue is resolved; at runtime, data1_size is 2 not 4
# assert data1_size == 4
# Verify number of rows
# FIXME: Uncomment the following assert when code issue is resolved; at runtime, number of rows is 2 not 4
# assert sum([1 for _ in data1]) == 4
# Verify dataset contents
# FIXME: Uncomment the following test code when runtime code issue is resolved
# res = []
# for item in data1.create_tuple_iterator(num_epochs=1, output_numpy=True):
# logger.info("item: {}".format(item))
# res.append(item)
# logger.info("dataset: {}".format(res))
def test_numpyslices_sampler_chain_batch():
"""
Test NumpySlicesDataset sampler chaining, with batch
"""
logger.info("test_numpyslices_sampler_chain_batch")
# Create NumpySlicesDataset with sampler chain
np_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
sampler = ds.SequentialSampler(start_index=1, num_samples=3)
sampler = sampler.add_child(ds.SequentialSampler(start_index=1, num_samples=2))
data1 = ds.NumpySlicesDataset(np_data, sampler=sampler)
data1 = data1.batch(batch_size=3, drop_remainder=False)
# Verify dataset size
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 4
# Verify number of rows
assert sum([1 for _ in data1]) == 4
# Verify dataset contents
res = []
for item in data1.create_tuple_iterator(num_epochs=1, output_numpy=True):
logger.info("item: {}".format(item))
res.append(item)
logger.info("dataset: {}".format(res))
def test_sampler_chain_errors():
"""
Test error cases for sampler chains
"""
logger.info("test_sampler_chain_errors")
error_msg_1 = "'NoneType' object has no attribute 'add_child'"
# Test add child sampler within child sampler
sampler = ds.SequentialSampler(start_index=1, num_samples=2)
sampler = sampler.add_child(ds.SequentialSampler(start_index=1, num_samples=2))
with pytest.raises(AttributeError, match=error_msg_1):
sampler.add_child(ds.SequentialSampler(start_index=1, num_samples=2))
# error_msg_2 = "'NoneType' object has no attribute 'add_child'"
# Test add second and nested child sampler
sampler = ds.SequentialSampler(start_index=1, num_samples=2)
child_sampler = ds.SequentialSampler(start_index=1, num_samples=2)
sampler.add_child(child_sampler)
child_sampler2 = ds.SequentialSampler(start_index=1, num_samples=2)
sampler.add_child(child_sampler2)
# FIXME - no error is raised; uncomment after code issue is resolved
# with pytest.raises(AttributeError, match=error_msg_2):
# sampler.add_child(child_sampler2)
# np_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# data1 = ds.NumpySlicesDataset(np_data, sampler=sampler)
error_msg_3 = "Conflicting arguments during sampler assignments."
# Test conflicting arguments (sampler and shuffle=False) for sampler (no chain)
np_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
sampler = ds.SequentialSampler(start_index=1, num_samples=3)
with pytest.raises(ValueError, match=error_msg_3):
ds.NumpySlicesDataset(np_data, shuffle=False, sampler=sampler)
# error_msg_4 = "Conflicting arguments during sampler assignments."
# Test conflicting arguments (sampler and shuffle=False) for sampler chaining
np_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
sampler = ds.SequentialSampler(start_index=1, num_samples=3)
sampler = sampler.add_child(ds.SequentialSampler(start_index=1, num_samples=2))
# FIXME - no error is raised; uncomment after code issue is resolved
# with pytest.raises(ValueError, match=error_msg_4):
# ds.NumpySlicesDataset(np_data, shuffle=False, sampler=sampler)
def test_manifest_sampler_chain_repeat():
"""
Test ManifestDataset sampler chain DistributedSampler->SequentialSampler, with repeat
"""
logger.info("test_manifest_sampler_chain_batch")
manifest_file = "../data/dataset/testManifestData/test5trainimgs.json"
# Create sampler chain DistributedSampler->SequentialSampler
sampler = ds.DistributedSampler(num_shards=1, shard_id=0, shuffle=False, num_samples=5)
child_sampler = ds.SequentialSampler()
sampler.add_child(child_sampler)
# Create ManifestDataset with sampler chain
data1 = ds.ManifestDataset(manifest_file, sampler=sampler)
data1 = data1.repeat(count=2)
# Verify dataset size
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 10
# Verify number of rows
assert sum([1 for _ in data1]) == 10
# Verify dataset contents
filename = "sampler_chain_manifest_repeat_result.npz"
save_and_check_md5(data1, filename, generate_golden=GENERATE_GOLDEN)
def test_manifest_sampler_chain_batch_repeat():
"""
Test ManifestDataset sampler chain DistributedSampler->SequentialSampler, with batch then repeat
"""
logger.info("test_manifest_sampler_chain_batch_repeat")
manifest_file = "../data/dataset/testManifestData/test5trainimgs.json"
# Create sampler chain DistributedSampler->SequentialSampler
sampler = ds.DistributedSampler(num_shards=1, shard_id=0, shuffle=False, num_samples=5)
child_sampler = ds.SequentialSampler()
sampler.add_child(child_sampler)
# Create ManifestDataset with sampler chain
data1 = ds.ManifestDataset(manifest_file, decode=True, sampler=sampler)
one_hot_encode = c_transforms.OneHot(3)
data1 = data1.map(operations=one_hot_encode, input_columns=["label"])
data1 = data1.batch(batch_size=5, drop_remainder=False)
data1 = data1.repeat(count=2)
# Verify dataset size
data1_size = data1.get_dataset_size()
logger.info("dataset size is: {}".format(data1_size))
assert data1_size == 2
# Verify number of rows
# FIXME: Uncomment the following assert when code issue is resolved
# assert sum([1 for _ in data1]) == 2
if __name__ == '__main__':
test_numpyslices_sampler_no_chain()
test_numpyslices_sampler_chain()
test_numpyslices_sampler_chain2()
test_numpyslices_sampler_chain_batch()
test_sampler_chain_errors()
test_manifest_sampler_chain_repeat()
test_manifest_sampler_chain_batch_repeat()