forked from mindspore-Ecosystem/mindspore
143 lines
5.2 KiB
Python
143 lines
5.2 KiB
Python
# Copyright 2020 Huawei Technologies Co., Ltd
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# ============================================================================
|
|
"""
|
|
Data operations, will be used in train.py and eval.py
|
|
"""
|
|
import os
|
|
|
|
import mindspore.common.dtype as mstype
|
|
import mindspore.dataset as ds
|
|
import mindspore.dataset.transforms.c_transforms as C
|
|
import mindspore.dataset.vision.c_transforms as vision
|
|
|
|
def create_dataset_cifar10(data_home, repeat_num=1, training=True, cifar_cfg=None):
|
|
"""Data operations."""
|
|
data_dir = os.path.join(data_home, "cifar-10-batches-bin")
|
|
if not training:
|
|
data_dir = os.path.join(data_home, "cifar-10-verify-bin")
|
|
|
|
if training:
|
|
rank_size, rank_id = _get_rank_info()
|
|
data_set = ds.Cifar10Dataset(data_dir, num_shards=rank_size, shard_id=rank_id, shuffle=True)
|
|
else:
|
|
data_set = ds.Cifar10Dataset(data_dir, shuffle=False)
|
|
|
|
resize_height = cifar_cfg.image_height
|
|
resize_width = cifar_cfg.image_width
|
|
|
|
# define map operations
|
|
random_crop_op = vision.RandomCrop((32, 32), (4, 4, 4, 4)) # padding_mode default CONSTANT
|
|
random_horizontal_op = vision.RandomHorizontalFlip()
|
|
resize_op = vision.Resize((resize_height, resize_width)) # interpolation default BILINEAR
|
|
rescale_op = vision.Rescale(1.0 / 255.0, 0.0)
|
|
normalize_op = vision.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
|
|
changeswap_op = vision.HWC2CHW()
|
|
type_cast_op = C.TypeCast(mstype.int32)
|
|
|
|
c_trans = []
|
|
if training:
|
|
c_trans = [random_crop_op, random_horizontal_op]
|
|
c_trans += [resize_op, rescale_op, normalize_op, changeswap_op]
|
|
|
|
# apply map operations on images
|
|
data_set = data_set.map(operations=type_cast_op, input_columns="label")
|
|
data_set = data_set.map(operations=c_trans, input_columns="image")
|
|
|
|
# apply batch operations
|
|
data_set = data_set.batch(batch_size=cifar_cfg.batch_size, drop_remainder=True)
|
|
|
|
# apply repeat operations
|
|
data_set = data_set.repeat(repeat_num)
|
|
|
|
return data_set
|
|
|
|
|
|
def create_dataset_imagenet(dataset_path, repeat_num=1, training=True,
|
|
num_parallel_workers=None, shuffle=None, imagenet_cfg=None):
|
|
"""
|
|
create a train or eval imagenet2012 dataset for resnet50
|
|
|
|
Args:
|
|
dataset_path(string): the path of dataset.
|
|
do_train(bool): whether dataset is used for train or eval.
|
|
repeat_num(int): the repeat times of dataset. Default: 1
|
|
batch_size(int): the batch size of dataset. Default: 32
|
|
target(str): the device target. Default: Ascend
|
|
|
|
Returns:
|
|
dataset
|
|
"""
|
|
if training:
|
|
device_num, rank_id = _get_rank_info()
|
|
if device_num == 1:
|
|
data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=num_parallel_workers, shuffle=shuffle)
|
|
else:
|
|
data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=num_parallel_workers, shuffle=shuffle,
|
|
num_shards=device_num, shard_id=rank_id)
|
|
else:
|
|
data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=num_parallel_workers, shuffle=shuffle)
|
|
|
|
assert imagenet_cfg.image_height == imagenet_cfg.image_width, "image_height not equal image_width"
|
|
image_size = imagenet_cfg.image_height
|
|
mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
|
|
std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
|
|
|
|
# define map operations
|
|
if training:
|
|
transform_img = [
|
|
vision.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
|
|
vision.RandomHorizontalFlip(prob=0.5),
|
|
vision.RandomColorAdjust(0.4, 0.4, 0.4, 0.1),
|
|
vision.Normalize(mean=mean, std=std),
|
|
vision.HWC2CHW()
|
|
]
|
|
else:
|
|
transform_img = [
|
|
vision.Decode(),
|
|
vision.Resize(256),
|
|
vision.CenterCrop(image_size),
|
|
vision.Normalize(mean=mean, std=std),
|
|
vision.HWC2CHW()
|
|
]
|
|
|
|
transform_label = [C.TypeCast(mstype.int32)]
|
|
|
|
data_set = data_set.map(input_columns="image", num_parallel_workers=12, operations=transform_img)
|
|
data_set = data_set.map(input_columns="label", num_parallel_workers=4, operations=transform_label)
|
|
|
|
# apply batch operations
|
|
data_set = data_set.batch(imagenet_cfg.batch_size, drop_remainder=True)
|
|
|
|
# apply dataset repeat operation
|
|
data_set = data_set.repeat(repeat_num)
|
|
|
|
return data_set
|
|
|
|
|
|
def _get_rank_info():
|
|
"""
|
|
get rank size and rank id
|
|
"""
|
|
rank_size = int(os.environ.get("RANK_SIZE", 1))
|
|
|
|
if rank_size > 1:
|
|
from mindspore.communication.management import get_rank, get_group_size
|
|
rank_size = get_group_size()
|
|
rank_id = get_rank()
|
|
else:
|
|
rank_size = rank_id = None
|
|
|
|
return rank_size, rank_id
|