!1713 [Dataset] Clean pylint.

This commit is contained in:
Yang 2020-05-30 11:30:00 +08:00
parent 2aa1020d2e
commit 5522edc4b5
7 changed files with 12 additions and 15 deletions

View File

@ -124,12 +124,12 @@ def test_case_2():
num_iter = 0
for _ in dataset1.create_dict_iterator():
num_iter += 1
assert (num_iter == 5)
assert num_iter == 5
num_iter = 0
for _ in dataset2.create_dict_iterator():
num_iter += 1
assert (num_iter == 5)
assert num_iter == 5
def test_voc_exception():

View File

@ -14,9 +14,8 @@
"""
Testing FiveCrop in DE
"""
import matplotlib.pyplot as plt
import numpy as np
import pytest
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.vision.py_transforms as vision

View File

@ -272,7 +272,7 @@ def test_random_crop_and_resize_05_py():
expected to raise ValueError
"""
logger.info("test_random_crop_and_resize_05_py")
# Generate dataset
data = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
try:

View File

@ -215,7 +215,7 @@ def test_random_horizontal_comp(plot=False):
image_py = (item2["image"].transpose(1, 2, 0) * 255).astype(np.uint8)
images_list_c.append(image_c)
images_list_py.append(image_py)
# Check if the output images are the same
mse = diff_mse(image_c, image_py)
assert mse < 0.001

View File

@ -170,6 +170,7 @@ def test_subset_sampler():
map_ = {(172876, 0): 0, (54214, 0): 1, (54214, 1): 2, (173673, 0): 3, (64631, 1): 4}
def test_config(num_samples, start_index, subset_size):
_ = num_samples
sampler = ds.SubsetSampler(start_index, subset_size)
d = ds.ManifestDataset(manifest_file, sampler=sampler)

View File

@ -204,7 +204,6 @@ def test_sync_exception_03():
Test sync: with wrong batch size
"""
logger.info("test_sync_exception_03")
batch_size = 6
dataset = ds.GeneratorDataset(gen, column_names=["input"])
@ -223,7 +222,6 @@ def test_sync_exception_04():
Test sync: with negative batch size in update
"""
logger.info("test_sync_exception_04")
batch_size = 6
dataset = ds.GeneratorDataset(gen, column_names=["input"])
@ -233,7 +231,7 @@ def test_sync_exception_04():
dataset = dataset.map(input_columns=["input"], operations=[aug.preprocess])
count = 0
try:
for item in dataset.create_dict_iterator():
for _ in dataset.create_dict_iterator():
count += 1
data = {"loss": count}
# dataset.disable_sync()
@ -246,7 +244,6 @@ def test_sync_exception_05():
Test sync: with wrong batch size in update
"""
logger.info("test_sync_exception_05")
batch_size = 6
dataset = ds.GeneratorDataset(gen, column_names=["input"])
count = 0
@ -255,7 +252,7 @@ def test_sync_exception_05():
dataset = dataset.sync_wait(condition_name="every batch", callback=aug.update)
dataset = dataset.map(input_columns=["input"], operations=[aug.preprocess])
try:
for item in dataset.create_dict_iterator():
for _ in dataset.create_dict_iterator():
dataset.disable_sync()
count += 1
data = {"loss": count}

View File

@ -17,9 +17,9 @@ Testing TenCrop in DE
import pytest
import numpy as np
from util import visualize, save_and_check_md5
import mindspore.dataset as ds
import mindspore.dataset.transforms.vision.py_transforms as vision
from util import visualize, save_and_check_md5
from mindspore import log as logger
GENERATE_GOLDEN = False
@ -123,7 +123,7 @@ def test_ten_crop_list_size_error_msg():
logger.info("test_ten_crop_list_size_error_msg")
with pytest.raises(TypeError) as info:
transforms = [
_ = [
vision.Decode(),
vision.TenCrop([200, 200, 200]),
lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
@ -139,7 +139,7 @@ def test_ten_crop_invalid_size_error_msg():
logger.info("test_ten_crop_invalid_size_error_msg")
with pytest.raises(ValueError) as info:
transforms = [
_ = [
vision.Decode(),
vision.TenCrop(0),
lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images
@ -148,7 +148,7 @@ def test_ten_crop_invalid_size_error_msg():
assert error_msg == str(info.value)
with pytest.raises(ValueError) as info:
transforms = [
_ = [
vision.Decode(),
vision.TenCrop(-10),
lambda images: np.stack([vision.ToTensor()(image) for image in images]) # 4D stack of 10 images