change unsupport to unsupported
This commit is contained in:
parent
4149274b9e
commit
bb125cb309
|
@ -384,7 +384,7 @@ std::vector<std::pair<KernelGraphPtr, std::vector<AnfNodePtr>>> AscendControlPar
|
||||||
ret.emplace_back(target_graph, args);
|
ret.emplace_back(target_graph, args);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
MS_LOG(EXCEPTION) << "Unsupport call node: " << cnode->DebugString(5);
|
MS_LOG(EXCEPTION) << "Unsupported call node: " << cnode->DebugString(5);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,7 +59,7 @@ MPI_Op GetMpiOp(const std::string &op_type) {
|
||||||
return MPI_PROD;
|
return MPI_PROD;
|
||||||
}
|
}
|
||||||
|
|
||||||
RAISE_EXCEPTION_WITH_PARAM("unsupport op_type: ", op_type);
|
RAISE_EXCEPTION_WITH_PARAM("Unsupported op_type: ", op_type);
|
||||||
return MPI_SUM;
|
return MPI_SUM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -159,7 +159,7 @@ void convertDataItem2Scalar(const char *str_data_ptr, const string &tensor_type,
|
||||||
} else if (type_id == TypeId::kNumberTypeFloat64) {
|
} else if (type_id == TypeId::kNumberTypeFloat64) {
|
||||||
PrintScalarToString<double>(str_data_ptr, tensor_type, buf);
|
PrintScalarToString<double>(str_data_ptr, tensor_type, buf);
|
||||||
} else {
|
} else {
|
||||||
MS_LOG(EXCEPTION) << "Cannot print scalar because of unsupport data type: " << tensor_type << ".";
|
MS_LOG(EXCEPTION) << "Cannot print scalar because of unsupported data type: " << tensor_type << ".";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ int Cast::InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<li
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
if (kSupportDataType.find(input->data_type()) == kSupportDataType.end()) {
|
if (kSupportDataType.find(input->data_type()) == kSupportDataType.end()) {
|
||||||
MS_LOG(ERROR) << "Unsupport input data type " << input->data_type();
|
MS_LOG(ERROR) << "Unsupported input data type " << input->data_type();
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
if (GetDstT() != kNumberTypeFloat && GetDstT() != kNumberTypeFloat32) {
|
if (GetDstT() != kNumberTypeFloat && GetDstT() != kNumberTypeFloat32) {
|
||||||
|
|
|
@ -76,7 +76,7 @@ int CastFp16CPUKernel::DoCast(int thread_id) {
|
||||||
reinterpret_cast<float *>(output_data) + offset, data_num);
|
reinterpret_cast<float *>(output_data) + offset, data_num);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
MS_LOG(ERROR) << "Unsupport input data type " << input->data_type();
|
MS_LOG(ERROR) << "Unsupported input data type " << input->data_type();
|
||||||
return RET_ERROR;
|
return RET_ERROR;
|
||||||
}
|
}
|
||||||
return RET_OK;
|
return RET_OK;
|
||||||
|
|
|
@ -139,7 +139,7 @@ def _shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b):
|
||||||
if [i for i in shape_bias[-2:]] != [m_shape, n_shape]:
|
if [i for i in shape_bias[-2:]] != [m_shape, n_shape]:
|
||||||
raise RuntimeError("non broadcast bias shape must be same as output shape")
|
raise RuntimeError("non broadcast bias shape must be same as output shape")
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("unsupport input shape now for batch bias case")
|
raise RuntimeError("Unsupported input shape now for batch bias case")
|
||||||
|
|
||||||
|
|
||||||
def _get_bias(shape_bias):
|
def _get_bias(shape_bias):
|
||||||
|
|
|
@ -136,7 +136,7 @@ src_dtype: str
|
||||||
if [i for i in shape_bias[-2:]] != [m_shape, n_shape]:
|
if [i for i in shape_bias[-2:]] != [m_shape, n_shape]:
|
||||||
raise RuntimeError("non broadcast bias shape must be same as output shape")
|
raise RuntimeError("non broadcast bias shape must be same as output shape")
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("unsupport input shape now for batch bias case")
|
raise RuntimeError("Unsupported input shape now for batch bias case")
|
||||||
|
|
||||||
|
|
||||||
def _get_bias(shape_bias):
|
def _get_bias(shape_bias):
|
||||||
|
|
|
@ -141,7 +141,7 @@ def _shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b):
|
||||||
if [i for i in shape_bias[-2:]] != [m_shape, n_shape]:
|
if [i for i in shape_bias[-2:]] != [m_shape, n_shape]:
|
||||||
raise RuntimeError("non broadcast bias shape must be same as output shape")
|
raise RuntimeError("non broadcast bias shape must be same as output shape")
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("unsupport input shape now for batch bias case")
|
raise RuntimeError("unsupported input shape now for batch bias case")
|
||||||
|
|
||||||
|
|
||||||
def _get_bias(shape_bias):
|
def _get_bias(shape_bias):
|
||||||
|
|
|
@ -427,7 +427,7 @@ class Profiler:
|
||||||
logger.error("Fail to get DEVICE_ID, use 0 instead.")
|
logger.error("Fail to get DEVICE_ID, use 0 instead.")
|
||||||
|
|
||||||
if device_target and device_target not in ["Davinci", "Ascend", "GPU"]:
|
if device_target and device_target not in ["Davinci", "Ascend", "GPU"]:
|
||||||
msg = "Profiling: unsupport backend: %s" % device_target
|
msg = "Profiling: unsupported backend: %s" % device_target
|
||||||
raise RuntimeError(msg)
|
raise RuntimeError(msg)
|
||||||
|
|
||||||
self._dev_id = dev_id
|
self._dev_id = dev_id
|
||||||
|
|
|
@ -131,7 +131,7 @@ class Model:
|
||||||
def _check_kwargs(self, kwargs):
|
def _check_kwargs(self, kwargs):
|
||||||
for arg in kwargs:
|
for arg in kwargs:
|
||||||
if arg not in ['loss_scale_manager', 'keep_batchnorm_fp32']:
|
if arg not in ['loss_scale_manager', 'keep_batchnorm_fp32']:
|
||||||
raise ValueError(f"Unsupport arg '{arg}'")
|
raise ValueError(f"Unsupported arg '{arg}'")
|
||||||
|
|
||||||
def _build_train_network(self):
|
def _build_train_network(self):
|
||||||
"""Build train network"""
|
"""Build train network"""
|
||||||
|
|
|
@ -88,7 +88,7 @@ if __name__ == '__main__':
|
||||||
context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
|
context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
|
||||||
mirror_mean=True)
|
mirror_mean=True)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Unsupport platform.")
|
raise ValueError("Unsupported platform.")
|
||||||
|
|
||||||
dataset = create_dataset(cfg.data_path, 1)
|
dataset = create_dataset(cfg.data_path, 1)
|
||||||
batch_num = dataset.get_dataset_size()
|
batch_num = dataset.get_dataset_size()
|
||||||
|
|
|
@ -467,7 +467,7 @@ def data_to_mindrecord_byte_image(dataset="coco", is_training=True, prefix="mask
|
||||||
if dataset == "coco":
|
if dataset == "coco":
|
||||||
image_files, image_anno_dict, masks, masks_shape = create_coco_label(is_training)
|
image_files, image_anno_dict, masks, masks_shape = create_coco_label(is_training)
|
||||||
else:
|
else:
|
||||||
print("Error unsupport other dataset")
|
print("Error unsupported other dataset")
|
||||||
return
|
return
|
||||||
|
|
||||||
maskrcnn_json = {
|
maskrcnn_json = {
|
||||||
|
|
|
@ -30,31 +30,31 @@ from src.mobilenetV2 import mobilenet_v2
|
||||||
parser = argparse.ArgumentParser(description='Image classification')
|
parser = argparse.ArgumentParser(description='Image classification')
|
||||||
parser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoint file path')
|
parser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoint file path')
|
||||||
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
|
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
|
||||||
parser.add_argument('--platform', type=str, default=None, help='run platform')
|
parser.add_argument('--device_targe', type=str, default=None, help='run device_targe')
|
||||||
args_opt = parser.parse_args()
|
args_opt = parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
config_platform = None
|
config = None
|
||||||
net = None
|
net = None
|
||||||
if args_opt.platform == "Ascend":
|
if args_opt.device_target == "Ascend":
|
||||||
config_platform = config_ascend
|
config = config_ascend
|
||||||
device_id = int(os.getenv('DEVICE_ID'))
|
device_id = int(os.getenv('DEVICE_ID'))
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend",
|
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend",
|
||||||
device_id=device_id, save_graphs=False)
|
device_id=device_id, save_graphs=False)
|
||||||
net = mobilenet_v2(num_classes=config_platform.num_classes, platform="Ascend")
|
net = mobilenet_v2(num_classes=config.num_classes, device_target="Ascend")
|
||||||
elif args_opt.platform == "GPU":
|
elif args_opt.device_target == "GPU":
|
||||||
config_platform = config_gpu
|
config = config_gpu
|
||||||
context.set_context(mode=context.GRAPH_MODE,
|
context.set_context(mode=context.GRAPH_MODE,
|
||||||
device_target="GPU", save_graphs=False)
|
device_target="GPU", save_graphs=False)
|
||||||
net = mobilenet_v2(num_classes=config_platform.num_classes, platform="GPU")
|
net = mobilenet_v2(num_classes=config.num_classes, device_target="GPU")
|
||||||
else:
|
else:
|
||||||
raise ValueError("Unsupport platform.")
|
raise ValueError("Unsupported device_target.")
|
||||||
|
|
||||||
loss = nn.SoftmaxCrossEntropyWithLogits(
|
loss = nn.SoftmaxCrossEntropyWithLogits(
|
||||||
is_grad=False, sparse=True, reduction='mean')
|
is_grad=False, sparse=True, reduction='mean')
|
||||||
|
|
||||||
if args_opt.platform == "Ascend":
|
if args_opt.device_target == "Ascend":
|
||||||
net.to_float(mstype.float16)
|
net.to_float(mstype.float16)
|
||||||
for _, cell in net.cells_and_names():
|
for _, cell in net.cells_and_names():
|
||||||
if isinstance(cell, nn.Dense):
|
if isinstance(cell, nn.Dense):
|
||||||
|
@ -62,9 +62,9 @@ if __name__ == '__main__':
|
||||||
|
|
||||||
dataset = create_dataset(dataset_path=args_opt.dataset_path,
|
dataset = create_dataset(dataset_path=args_opt.dataset_path,
|
||||||
do_train=False,
|
do_train=False,
|
||||||
config=config_platform,
|
config=config,
|
||||||
platform=args_opt.platform,
|
device_target=args_opt.device_target,
|
||||||
batch_size=config_platform.batch_size)
|
batch_size=config.batch_size)
|
||||||
step_size = dataset.get_dataset_size()
|
step_size = dataset.get_dataset_size()
|
||||||
|
|
||||||
if args_opt.checkpoint_path:
|
if args_opt.checkpoint_path:
|
||||||
|
|
|
@ -15,8 +15,8 @@
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
if [ $# != 3 ]
|
if [ $# != 3 ]
|
||||||
then
|
then
|
||||||
echo "Ascend: sh run_infer.sh [PLATFORM] [DATASET_PATH] [CHECKPOINT_PATH] \
|
echo "Ascend: sh run_infer.sh [DEVICE_TARGET] [DATASET_PATH] [CHECKPOINT_PATH] \
|
||||||
GPU: sh run_infer.sh [PLATFORM] [DATASET_PATH] [CHECKPOINT_PATH]"
|
GPU: sh run_infer.sh [DEVICE_TARGET] [DATASET_PATH] [CHECKPOINT_PATH]"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ cd ../eval || exit
|
||||||
|
|
||||||
# luanch
|
# luanch
|
||||||
python ${BASEPATH}/../eval.py \
|
python ${BASEPATH}/../eval.py \
|
||||||
--platform=$1 \
|
--device_target=$1 \
|
||||||
--dataset_path=$2 \
|
--dataset_path=$2 \
|
||||||
--checkpoint_path=$3 \
|
--checkpoint_path=$3 \
|
||||||
&> ../infer.log & # dataset val folder path
|
&> ../infer.log & # dataset val folder path
|
||||||
|
|
|
@ -43,7 +43,7 @@ run_ascend()
|
||||||
--training_script=${BASEPATH}/../train.py \
|
--training_script=${BASEPATH}/../train.py \
|
||||||
--dataset_path=$5 \
|
--dataset_path=$5 \
|
||||||
--pre_trained=$6 \
|
--pre_trained=$6 \
|
||||||
--platform=$1 &> ../train.log & # dataset train folder
|
--device_target=$1 &> ../train.log & # dataset train folder
|
||||||
}
|
}
|
||||||
|
|
||||||
run_gpu()
|
run_gpu()
|
||||||
|
@ -73,7 +73,7 @@ run_gpu()
|
||||||
mpirun -n $2 --allow-run-as-root \
|
mpirun -n $2 --allow-run-as-root \
|
||||||
python ${BASEPATH}/../train.py \
|
python ${BASEPATH}/../train.py \
|
||||||
--dataset_path=$4 \
|
--dataset_path=$4 \
|
||||||
--platform=$1 \
|
--device_target=$1 \
|
||||||
&> ../train.log & # dataset train folder
|
&> ../train.log & # dataset train folder
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,6 +91,6 @@ if [ $1 = "Ascend" ] ; then
|
||||||
elif [ $1 = "GPU" ] ; then
|
elif [ $1 = "GPU" ] ; then
|
||||||
run_gpu "$@"
|
run_gpu "$@"
|
||||||
else
|
else
|
||||||
echo "Unsupported platform."
|
echo "Unsupported device_target."
|
||||||
fi;
|
fi;
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ import mindspore.dataset.engine as de
|
||||||
import mindspore.dataset.transforms.vision.c_transforms as C
|
import mindspore.dataset.transforms.vision.c_transforms as C
|
||||||
import mindspore.dataset.transforms.c_transforms as C2
|
import mindspore.dataset.transforms.c_transforms as C2
|
||||||
|
|
||||||
def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch_size=32):
|
def create_dataset(dataset_path, do_train, config, device_target, repeat_num=1, batch_size=32):
|
||||||
"""
|
"""
|
||||||
create a train or eval dataset
|
create a train or eval dataset
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch
|
||||||
Returns:
|
Returns:
|
||||||
dataset
|
dataset
|
||||||
"""
|
"""
|
||||||
if platform == "Ascend":
|
if device_target == "Ascend":
|
||||||
rank_size = int(os.getenv("RANK_SIZE"))
|
rank_size = int(os.getenv("RANK_SIZE"))
|
||||||
rank_id = int(os.getenv("RANK_ID"))
|
rank_id = int(os.getenv("RANK_ID"))
|
||||||
if rank_size == 1:
|
if rank_size == 1:
|
||||||
|
@ -42,7 +42,7 @@ def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch
|
||||||
else:
|
else:
|
||||||
ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True,
|
ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True,
|
||||||
num_shards=rank_size, shard_id=rank_id)
|
num_shards=rank_size, shard_id=rank_id)
|
||||||
elif platform == "GPU":
|
elif device_target == "GPU":
|
||||||
if do_train:
|
if do_train:
|
||||||
from mindspore.communication.management import get_rank, get_group_size
|
from mindspore.communication.management import get_rank, get_group_size
|
||||||
ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True,
|
ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True,
|
||||||
|
@ -50,7 +50,7 @@ def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch
|
||||||
else:
|
else:
|
||||||
ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True)
|
ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Unsupport platform.")
|
raise ValueError("Unsupported device_target.")
|
||||||
|
|
||||||
resize_height = config.image_height
|
resize_height = config.image_height
|
||||||
resize_width = config.image_width
|
resize_width = config.image_width
|
||||||
|
|
|
@ -119,15 +119,15 @@ class ConvBNReLU(nn.Cell):
|
||||||
>>> ConvBNReLU(16, 256, kernel_size=1, stride=1, groups=1)
|
>>> ConvBNReLU(16, 256, kernel_size=1, stride=1, groups=1)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, platform, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
|
def __init__(self, device_target, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
|
||||||
super(ConvBNReLU, self).__init__()
|
super(ConvBNReLU, self).__init__()
|
||||||
padding = (kernel_size - 1) // 2
|
padding = (kernel_size - 1) // 2
|
||||||
if groups == 1:
|
if groups == 1:
|
||||||
conv = nn.Conv2d(in_planes, out_planes, kernel_size, stride, pad_mode='pad', padding=padding)
|
conv = nn.Conv2d(in_planes, out_planes, kernel_size, stride, pad_mode='pad', padding=padding)
|
||||||
else:
|
else:
|
||||||
if platform == "Ascend":
|
if device_target == "Ascend":
|
||||||
conv = DepthwiseConv(in_planes, kernel_size, stride, pad_mode='pad', pad=padding)
|
conv = DepthwiseConv(in_planes, kernel_size, stride, pad_mode='pad', pad=padding)
|
||||||
elif platform == "GPU":
|
elif device_target == "GPU":
|
||||||
conv = nn.Conv2d(in_planes, out_planes, kernel_size, stride,
|
conv = nn.Conv2d(in_planes, out_planes, kernel_size, stride,
|
||||||
group=in_planes, pad_mode='pad', padding=padding)
|
group=in_planes, pad_mode='pad', padding=padding)
|
||||||
|
|
||||||
|
@ -156,7 +156,7 @@ class InvertedResidual(nn.Cell):
|
||||||
>>> ResidualBlock(3, 256, 1, 1)
|
>>> ResidualBlock(3, 256, 1, 1)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, platform, inp, oup, stride, expand_ratio):
|
def __init__(self, device_target, inp, oup, stride, expand_ratio):
|
||||||
super(InvertedResidual, self).__init__()
|
super(InvertedResidual, self).__init__()
|
||||||
assert stride in [1, 2]
|
assert stride in [1, 2]
|
||||||
|
|
||||||
|
@ -165,10 +165,10 @@ class InvertedResidual(nn.Cell):
|
||||||
|
|
||||||
layers = []
|
layers = []
|
||||||
if expand_ratio != 1:
|
if expand_ratio != 1:
|
||||||
layers.append(ConvBNReLU(platform, inp, hidden_dim, kernel_size=1))
|
layers.append(ConvBNReLU(device_target, inp, hidden_dim, kernel_size=1))
|
||||||
layers.extend([
|
layers.extend([
|
||||||
# dw
|
# dw
|
||||||
ConvBNReLU(platform, hidden_dim, hidden_dim,
|
ConvBNReLU(device_target, hidden_dim, hidden_dim,
|
||||||
stride=stride, groups=hidden_dim),
|
stride=stride, groups=hidden_dim),
|
||||||
# pw-linear
|
# pw-linear
|
||||||
nn.Conv2d(hidden_dim, oup, kernel_size=1,
|
nn.Conv2d(hidden_dim, oup, kernel_size=1,
|
||||||
|
@ -204,7 +204,7 @@ class MobileNetV2(nn.Cell):
|
||||||
>>> MobileNetV2(num_classes=1000)
|
>>> MobileNetV2(num_classes=1000)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, platform, num_classes=1000, width_mult=1.,
|
def __init__(self, device_target, num_classes=1000, width_mult=1.,
|
||||||
has_dropout=False, inverted_residual_setting=None, round_nearest=8):
|
has_dropout=False, inverted_residual_setting=None, round_nearest=8):
|
||||||
super(MobileNetV2, self).__init__()
|
super(MobileNetV2, self).__init__()
|
||||||
block = InvertedResidual
|
block = InvertedResidual
|
||||||
|
@ -227,16 +227,16 @@ class MobileNetV2(nn.Cell):
|
||||||
# building first layer
|
# building first layer
|
||||||
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
|
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
|
||||||
self.out_channels = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
|
self.out_channels = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
|
||||||
features = [ConvBNReLU(platform, 3, input_channel, stride=2)]
|
features = [ConvBNReLU(device_target, 3, input_channel, stride=2)]
|
||||||
# building inverted residual blocks
|
# building inverted residual blocks
|
||||||
for t, c, n, s in self.cfgs:
|
for t, c, n, s in self.cfgs:
|
||||||
output_channel = _make_divisible(c * width_mult, round_nearest)
|
output_channel = _make_divisible(c * width_mult, round_nearest)
|
||||||
for i in range(n):
|
for i in range(n):
|
||||||
stride = s if i == 0 else 1
|
stride = s if i == 0 else 1
|
||||||
features.append(block(platform, input_channel, output_channel, stride, expand_ratio=t))
|
features.append(block(device_target, input_channel, output_channel, stride, expand_ratio=t))
|
||||||
input_channel = output_channel
|
input_channel = output_channel
|
||||||
# building last several layers
|
# building last several layers
|
||||||
features.append(ConvBNReLU(platform, input_channel, self.out_channels, kernel_size=1))
|
features.append(ConvBNReLU(device_target, input_channel, self.out_channels, kernel_size=1))
|
||||||
# make it nn.CellList
|
# make it nn.CellList
|
||||||
self.features = nn.SequentialCell(features)
|
self.features = nn.SequentialCell(features)
|
||||||
# mobilenet head
|
# mobilenet head
|
||||||
|
|
|
@ -49,10 +49,10 @@ de.config.set_seed(1)
|
||||||
parser = argparse.ArgumentParser(description='Image classification')
|
parser = argparse.ArgumentParser(description='Image classification')
|
||||||
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
|
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
|
||||||
parser.add_argument('--pre_trained', type=str, default=None, help='Pretrained checkpoint path')
|
parser.add_argument('--pre_trained', type=str, default=None, help='Pretrained checkpoint path')
|
||||||
parser.add_argument('--platform', type=str, default=None, help='run platform')
|
parser.add_argument('--device_targe', type=str, default=None, help='run device_targe')
|
||||||
args_opt = parser.parse_args()
|
args_opt = parser.parse_args()
|
||||||
|
|
||||||
if args_opt.platform == "Ascend":
|
if args_opt.device_targe == "Ascend":
|
||||||
device_id = int(os.getenv('DEVICE_ID'))
|
device_id = int(os.getenv('DEVICE_ID'))
|
||||||
rank_id = int(os.getenv('RANK_ID'))
|
rank_id = int(os.getenv('RANK_ID'))
|
||||||
rank_size = int(os.getenv('RANK_SIZE'))
|
rank_size = int(os.getenv('RANK_SIZE'))
|
||||||
|
@ -61,7 +61,7 @@ if args_opt.platform == "Ascend":
|
||||||
context.set_context(mode=context.GRAPH_MODE,
|
context.set_context(mode=context.GRAPH_MODE,
|
||||||
device_target="Ascend",
|
device_target="Ascend",
|
||||||
device_id=device_id, save_graphs=False)
|
device_id=device_id, save_graphs=False)
|
||||||
elif args_opt.platform == "GPU":
|
elif args_opt.device_targe == "GPU":
|
||||||
context.set_context(mode=context.GRAPH_MODE,
|
context.set_context(mode=context.GRAPH_MODE,
|
||||||
device_target="GPU",
|
device_target="GPU",
|
||||||
save_graphs=False)
|
save_graphs=False)
|
||||||
|
@ -161,13 +161,13 @@ class Monitor(Callback):
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
if args_opt.platform == "GPU":
|
if args_opt.device_targe == "GPU":
|
||||||
# train on gpu
|
# train on gpu
|
||||||
print("train args: ", args_opt)
|
print("train args: ", args_opt)
|
||||||
print("cfg: ", config_gpu)
|
print("cfg: ", config_gpu)
|
||||||
|
|
||||||
# define network
|
# define network
|
||||||
net = mobilenet_v2(num_classes=config_gpu.num_classes, platform="GPU")
|
net = mobilenet_v2(num_classes=config_gpu.num_classes, device_targe="GPU")
|
||||||
# define loss
|
# define loss
|
||||||
if config_gpu.label_smooth > 0:
|
if config_gpu.label_smooth > 0:
|
||||||
loss = CrossEntropyWithLabelSmooth(smooth_factor=config_gpu.label_smooth,
|
loss = CrossEntropyWithLabelSmooth(smooth_factor=config_gpu.label_smooth,
|
||||||
|
@ -179,7 +179,7 @@ if __name__ == '__main__':
|
||||||
dataset = create_dataset(dataset_path=args_opt.dataset_path,
|
dataset = create_dataset(dataset_path=args_opt.dataset_path,
|
||||||
do_train=True,
|
do_train=True,
|
||||||
config=config_gpu,
|
config=config_gpu,
|
||||||
platform=args_opt.platform,
|
device_targe=args_opt.device_targe,
|
||||||
repeat_num=1,
|
repeat_num=1,
|
||||||
batch_size=config_gpu.batch_size)
|
batch_size=config_gpu.batch_size)
|
||||||
step_size = dataset.get_dataset_size()
|
step_size = dataset.get_dataset_size()
|
||||||
|
@ -216,7 +216,7 @@ if __name__ == '__main__':
|
||||||
# begin train
|
# begin train
|
||||||
model.train(epoch_size, dataset, callbacks=cb)
|
model.train(epoch_size, dataset, callbacks=cb)
|
||||||
print("============== End Training ==============")
|
print("============== End Training ==============")
|
||||||
elif args_opt.platform == "Ascend":
|
elif args_opt.device_targe == "Ascend":
|
||||||
# train on ascend
|
# train on ascend
|
||||||
print("train args: ", args_opt, "\ncfg: ", config_ascend,
|
print("train args: ", args_opt, "\ncfg: ", config_ascend,
|
||||||
"\nparallel args: rank_id {}, device_id {}, rank_size {}".format(rank_id, device_id, rank_size))
|
"\nparallel args: rank_id {}, device_id {}, rank_size {}".format(rank_id, device_id, rank_size))
|
||||||
|
@ -228,7 +228,7 @@ if __name__ == '__main__':
|
||||||
init()
|
init()
|
||||||
|
|
||||||
epoch_size = config_ascend.epoch_size
|
epoch_size = config_ascend.epoch_size
|
||||||
net = mobilenet_v2(num_classes=config_ascend.num_classes, platform="Ascend")
|
net = mobilenet_v2(num_classes=config_ascend.num_classes, device_targe="Ascend")
|
||||||
net.to_float(mstype.float16)
|
net.to_float(mstype.float16)
|
||||||
for _, cell in net.cells_and_names():
|
for _, cell in net.cells_and_names():
|
||||||
if isinstance(cell, nn.Dense):
|
if isinstance(cell, nn.Dense):
|
||||||
|
@ -242,7 +242,7 @@ if __name__ == '__main__':
|
||||||
dataset = create_dataset(dataset_path=args_opt.dataset_path,
|
dataset = create_dataset(dataset_path=args_opt.dataset_path,
|
||||||
do_train=True,
|
do_train=True,
|
||||||
config=config_ascend,
|
config=config_ascend,
|
||||||
platform=args_opt.platform,
|
device_targe=args_opt.device_targe,
|
||||||
repeat_num=1,
|
repeat_num=1,
|
||||||
batch_size=config_ascend.batch_size)
|
batch_size=config_ascend.batch_size)
|
||||||
step_size = dataset.get_dataset_size()
|
step_size = dataset.get_dataset_size()
|
||||||
|
@ -276,4 +276,4 @@ if __name__ == '__main__':
|
||||||
cb += [ckpt_cb]
|
cb += [ckpt_cb]
|
||||||
model.train(epoch_size, dataset, callbacks=cb)
|
model.train(epoch_size, dataset, callbacks=cb)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Unsupport platform.")
|
raise ValueError("Unsupported device_targe.")
|
||||||
|
|
|
@ -61,7 +61,7 @@ def create_dataset(dataset_path, do_train, config, device_target, repeat_num=1,
|
||||||
else:
|
else:
|
||||||
ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True)
|
ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Unsupport device_target.")
|
raise ValueError("Unsupported device_target.")
|
||||||
|
|
||||||
resize_height = config.image_height
|
resize_height = config.image_height
|
||||||
|
|
||||||
|
|
|
@ -207,3 +207,5 @@ if __name__ == '__main__':
|
||||||
train_on_ascend()
|
train_on_ascend()
|
||||||
elif args_opt.device_target == "GPU":
|
elif args_opt.device_target == "GPU":
|
||||||
train_on_gpu()
|
train_on_gpu()
|
||||||
|
else:
|
||||||
|
raise ValueError("Unsupported device target.")
|
||||||
|
|
|
@ -30,29 +30,29 @@ from src.mobilenetV3 import mobilenet_v3_large
|
||||||
parser = argparse.ArgumentParser(description='Image classification')
|
parser = argparse.ArgumentParser(description='Image classification')
|
||||||
parser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoint file path')
|
parser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoint file path')
|
||||||
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
|
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
|
||||||
parser.add_argument('--platform', type=str, default=None, help='run platform')
|
parser.add_argument('--device_target', type=str, default=None, help='run device_target')
|
||||||
args_opt = parser.parse_args()
|
args_opt = parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
config_platform = None
|
config = None
|
||||||
if args_opt.platform == "Ascend":
|
if args_opt.device_target == "Ascend":
|
||||||
config_platform = config_ascend
|
config = config_ascend
|
||||||
device_id = int(os.getenv('DEVICE_ID'))
|
device_id = int(os.getenv('DEVICE_ID'))
|
||||||
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend",
|
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend",
|
||||||
device_id=device_id, save_graphs=False)
|
device_id=device_id, save_graphs=False)
|
||||||
elif args_opt.platform == "GPU":
|
elif args_opt.device_target == "GPU":
|
||||||
config_platform = config_gpu
|
config = config_gpu
|
||||||
context.set_context(mode=context.GRAPH_MODE,
|
context.set_context(mode=context.GRAPH_MODE,
|
||||||
device_target="GPU", save_graphs=False)
|
device_target="GPU", save_graphs=False)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Unsupport platform.")
|
raise ValueError("Unsupported device_target.")
|
||||||
|
|
||||||
loss = nn.SoftmaxCrossEntropyWithLogits(
|
loss = nn.SoftmaxCrossEntropyWithLogits(
|
||||||
is_grad=False, sparse=True, reduction='mean')
|
is_grad=False, sparse=True, reduction='mean')
|
||||||
net = mobilenet_v3_large(num_classes=config_platform.num_classes)
|
net = mobilenet_v3_large(num_classes=config.num_classes)
|
||||||
|
|
||||||
if args_opt.platform == "Ascend":
|
if args_opt.device_target == "Ascend":
|
||||||
net.to_float(mstype.float16)
|
net.to_float(mstype.float16)
|
||||||
for _, cell in net.cells_and_names():
|
for _, cell in net.cells_and_names():
|
||||||
if isinstance(cell, nn.Dense):
|
if isinstance(cell, nn.Dense):
|
||||||
|
@ -60,9 +60,9 @@ if __name__ == '__main__':
|
||||||
|
|
||||||
dataset = create_dataset(dataset_path=args_opt.dataset_path,
|
dataset = create_dataset(dataset_path=args_opt.dataset_path,
|
||||||
do_train=False,
|
do_train=False,
|
||||||
config=config_platform,
|
config=config,
|
||||||
platform=args_opt.platform,
|
device_target=args_opt.device_target,
|
||||||
batch_size=config_platform.batch_size)
|
batch_size=config.batch_size)
|
||||||
step_size = dataset.get_dataset_size()
|
step_size = dataset.get_dataset_size()
|
||||||
|
|
||||||
if args_opt.checkpoint_path:
|
if args_opt.checkpoint_path:
|
||||||
|
|
|
@ -15,8 +15,7 @@
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
if [ $# != 3 ]
|
if [ $# != 3 ]
|
||||||
then
|
then
|
||||||
echo "Ascend: sh run_infer.sh [PLATFORM] [DATASET_PATH] [CHECKPOINT_PATH] \
|
echo "GPU: sh run_infer.sh [DEVICE_TARGET] [DATASET_PATH] [CHECKPOINT_PATH]"
|
||||||
GPU: sh run_infer.sh [PLATFORM] [DATASET_PATH] [CHECKPOINT_PATH]"
|
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -49,7 +48,7 @@ cd ../eval || exit
|
||||||
|
|
||||||
# luanch
|
# luanch
|
||||||
python ${BASEPATH}/../eval.py \
|
python ${BASEPATH}/../eval.py \
|
||||||
--platform=$1 \
|
--device_target=$1 \
|
||||||
--dataset_path=$2 \
|
--dataset_path=$2 \
|
||||||
--checkpoint_path=$3 \
|
--checkpoint_path=$3 \
|
||||||
&> ../infer.log & # dataset val folder path
|
&> ../infer.log & # dataset val folder path
|
||||||
|
|
|
@ -13,36 +13,6 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
run_ascend()
|
|
||||||
{
|
|
||||||
if [ $2 -lt 1 ] && [ $2 -gt 8 ]
|
|
||||||
then
|
|
||||||
echo "error: DEVICE_NUM=$2 is not in (1-8)"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -d $5 ]
|
|
||||||
then
|
|
||||||
echo "error: DATASET_PATH=$5 is not a directory"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
BASEPATH=$(cd "`dirname $0`" || exit; pwd)
|
|
||||||
export PYTHONPATH=${BASEPATH}:$PYTHONPATH
|
|
||||||
if [ -d "../train" ];
|
|
||||||
then
|
|
||||||
rm -rf ../train
|
|
||||||
fi
|
|
||||||
mkdir ../train
|
|
||||||
cd ../train || exit
|
|
||||||
python ${BASEPATH}/../src/launch.py \
|
|
||||||
--nproc_per_node=$2 \
|
|
||||||
--visible_devices=$4 \
|
|
||||||
--server_id=$3 \
|
|
||||||
--training_script=${BASEPATH}/../train.py \
|
|
||||||
--dataset_path=$5 \
|
|
||||||
--platform=$1 &> ../train.log & # dataset train folder
|
|
||||||
}
|
|
||||||
|
|
||||||
run_gpu()
|
run_gpu()
|
||||||
{
|
{
|
||||||
|
@ -71,24 +41,21 @@ run_gpu()
|
||||||
mpirun -n $2 --allow-run-as-root \
|
mpirun -n $2 --allow-run-as-root \
|
||||||
python ${BASEPATH}/../train.py \
|
python ${BASEPATH}/../train.py \
|
||||||
--dataset_path=$4 \
|
--dataset_path=$4 \
|
||||||
--platform=$1 \
|
--device_target=$1 \
|
||||||
&> ../train.log & # dataset train folder
|
&> ../train.log & # dataset train folder
|
||||||
}
|
}
|
||||||
|
|
||||||
if [ $# -gt 5 ] || [ $# -lt 4 ]
|
if [ $# -gt 5 ] || [ $# -lt 4 ]
|
||||||
then
|
then
|
||||||
echo "Usage:\n \
|
echo "Usage:\n \
|
||||||
Ascend: sh run_train.sh Ascend [DEVICE_NUM] [SERVER_IP(x.x.x.x)] [VISIABLE_DEVICES(0,1,2,3,4,5,6,7)] [DATASET_PATH]\n \
|
|
||||||
GPU: sh run_train.sh GPU [DEVICE_NUM] [VISIABLE_DEVICES(0,1,2,3,4,5,6,7)] [DATASET_PATH]\n \
|
GPU: sh run_train.sh GPU [DEVICE_NUM] [VISIABLE_DEVICES(0,1,2,3,4,5,6,7)] [DATASET_PATH]\n \
|
||||||
"
|
"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $1 = "Ascend" ] ; then
|
if [ $1 = "GPU" ] ; then
|
||||||
run_ascend "$@"
|
|
||||||
elif [ $1 = "GPU" ] ; then
|
|
||||||
run_gpu "$@"
|
run_gpu "$@"
|
||||||
else
|
else
|
||||||
echo "not support platform"
|
echo "Unsupported device_target"
|
||||||
fi;
|
fi;
|
||||||
|
|
||||||
|
|
|
@ -17,24 +17,6 @@ network config setting, will be used in train.py and eval.py
|
||||||
"""
|
"""
|
||||||
from easydict import EasyDict as ed
|
from easydict import EasyDict as ed
|
||||||
|
|
||||||
config_ascend = ed({
|
|
||||||
"num_classes": 1000,
|
|
||||||
"image_height": 224,
|
|
||||||
"image_width": 224,
|
|
||||||
"batch_size": 256,
|
|
||||||
"epoch_size": 200,
|
|
||||||
"warmup_epochs": 4,
|
|
||||||
"lr": 0.4,
|
|
||||||
"momentum": 0.9,
|
|
||||||
"weight_decay": 4e-5,
|
|
||||||
"label_smooth": 0.1,
|
|
||||||
"loss_scale": 1024,
|
|
||||||
"save_checkpoint": True,
|
|
||||||
"save_checkpoint_epochs": 1,
|
|
||||||
"keep_checkpoint_max": 200,
|
|
||||||
"save_checkpoint_path": "./checkpoint",
|
|
||||||
})
|
|
||||||
|
|
||||||
config_gpu = ed({
|
config_gpu = ed({
|
||||||
"num_classes": 1000,
|
"num_classes": 1000,
|
||||||
"image_height": 224,
|
"image_height": 224,
|
||||||
|
|
|
@ -15,14 +15,13 @@
|
||||||
"""
|
"""
|
||||||
create train or eval dataset.
|
create train or eval dataset.
|
||||||
"""
|
"""
|
||||||
import os
|
|
||||||
import mindspore.common.dtype as mstype
|
import mindspore.common.dtype as mstype
|
||||||
import mindspore.dataset.engine as de
|
import mindspore.dataset.engine as de
|
||||||
import mindspore.dataset.transforms.vision.c_transforms as C
|
import mindspore.dataset.transforms.vision.c_transforms as C
|
||||||
import mindspore.dataset.transforms.c_transforms as C2
|
import mindspore.dataset.transforms.c_transforms as C2
|
||||||
|
|
||||||
|
|
||||||
def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch_size=32):
|
def create_dataset(dataset_path, do_train, config, device_target, repeat_num=1, batch_size=32):
|
||||||
"""
|
"""
|
||||||
create a train or eval dataset
|
create a train or eval dataset
|
||||||
|
|
||||||
|
@ -35,15 +34,7 @@ def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch
|
||||||
Returns:
|
Returns:
|
||||||
dataset
|
dataset
|
||||||
"""
|
"""
|
||||||
if platform == "Ascend":
|
if device_target == "GPU":
|
||||||
rank_size = int(os.getenv("RANK_SIZE"))
|
|
||||||
rank_id = int(os.getenv("RANK_ID"))
|
|
||||||
if rank_size == 1:
|
|
||||||
ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True)
|
|
||||||
else:
|
|
||||||
ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True,
|
|
||||||
num_shards=rank_size, shard_id=rank_id)
|
|
||||||
elif platform == "GPU":
|
|
||||||
if do_train:
|
if do_train:
|
||||||
from mindspore.communication.management import get_rank, get_group_size
|
from mindspore.communication.management import get_rank, get_group_size
|
||||||
ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True,
|
ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True,
|
||||||
|
@ -51,7 +42,7 @@ def create_dataset(dataset_path, do_train, config, platform, repeat_num=1, batch
|
||||||
else:
|
else:
|
||||||
ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True)
|
ds = de.ImageFolderDatasetV2(dataset_path, num_parallel_workers=8, shuffle=True)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Unsupport platform.")
|
raise ValueError("Unsupported device_target.")
|
||||||
|
|
||||||
resize_height = config.image_height
|
resize_height = config.image_height
|
||||||
resize_width = config.image_width
|
resize_width = config.image_width
|
||||||
|
|
|
@ -22,7 +22,6 @@ import numpy as np
|
||||||
from mindspore import context
|
from mindspore import context
|
||||||
from mindspore import Tensor
|
from mindspore import Tensor
|
||||||
from mindspore import nn
|
from mindspore import nn
|
||||||
from mindspore.parallel._auto_parallel_context import auto_parallel_context
|
|
||||||
from mindspore.nn.optim.momentum import Momentum
|
from mindspore.nn.optim.momentum import Momentum
|
||||||
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
|
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
|
||||||
from mindspore.nn.loss.loss import _Loss
|
from mindspore.nn.loss.loss import _Loss
|
||||||
|
@ -38,7 +37,7 @@ from mindspore.communication.management import init, get_group_size, get_rank
|
||||||
|
|
||||||
from src.dataset import create_dataset
|
from src.dataset import create_dataset
|
||||||
from src.lr_generator import get_lr
|
from src.lr_generator import get_lr
|
||||||
from src.config import config_gpu, config_ascend
|
from src.config import config_gpu
|
||||||
from src.mobilenetV3 import mobilenet_v3_large
|
from src.mobilenetV3 import mobilenet_v3_large
|
||||||
|
|
||||||
random.seed(1)
|
random.seed(1)
|
||||||
|
@ -48,10 +47,10 @@ de.config.set_seed(1)
|
||||||
parser = argparse.ArgumentParser(description='Image classification')
|
parser = argparse.ArgumentParser(description='Image classification')
|
||||||
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
|
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
|
||||||
parser.add_argument('--pre_trained', type=str, default=None, help='Pretrained checkpoint path')
|
parser.add_argument('--pre_trained', type=str, default=None, help='Pretrained checkpoint path')
|
||||||
parser.add_argument('--platform', type=str, default=None, help='run platform')
|
parser.add_argument('--device_target', type=str, default=None, help='run device_target')
|
||||||
args_opt = parser.parse_args()
|
args_opt = parser.parse_args()
|
||||||
|
|
||||||
if args_opt.platform == "Ascend":
|
if args_opt.device_target == "Ascend":
|
||||||
device_id = int(os.getenv('DEVICE_ID'))
|
device_id = int(os.getenv('DEVICE_ID'))
|
||||||
rank_id = int(os.getenv('RANK_ID'))
|
rank_id = int(os.getenv('RANK_ID'))
|
||||||
rank_size = int(os.getenv('RANK_SIZE'))
|
rank_size = int(os.getenv('RANK_SIZE'))
|
||||||
|
@ -61,7 +60,7 @@ if args_opt.platform == "Ascend":
|
||||||
device_target="Ascend",
|
device_target="Ascend",
|
||||||
device_id=device_id,
|
device_id=device_id,
|
||||||
save_graphs=False)
|
save_graphs=False)
|
||||||
elif args_opt.platform == "GPU":
|
elif args_opt.device_target == "GPU":
|
||||||
context.set_context(mode=context.GRAPH_MODE,
|
context.set_context(mode=context.GRAPH_MODE,
|
||||||
device_target="GPU",
|
device_target="GPU",
|
||||||
save_graphs=False)
|
save_graphs=False)
|
||||||
|
@ -70,7 +69,7 @@ elif args_opt.platform == "GPU":
|
||||||
parallel_mode=ParallelMode.DATA_PARALLEL,
|
parallel_mode=ParallelMode.DATA_PARALLEL,
|
||||||
mirror_mean=True)
|
mirror_mean=True)
|
||||||
else:
|
else:
|
||||||
raise ValueError("Unsupport platform.")
|
raise ValueError("Unsupported device_target.")
|
||||||
|
|
||||||
|
|
||||||
class CrossEntropyWithLabelSmooth(_Loss):
|
class CrossEntropyWithLabelSmooth(_Loss):
|
||||||
|
@ -161,7 +160,7 @@ class Monitor(Callback):
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
if args_opt.platform == "GPU":
|
if args_opt.device_target == "GPU":
|
||||||
# train on gpu
|
# train on gpu
|
||||||
print("train args: ", args_opt)
|
print("train args: ", args_opt)
|
||||||
print("cfg: ", config_gpu)
|
print("cfg: ", config_gpu)
|
||||||
|
@ -180,7 +179,7 @@ if __name__ == '__main__':
|
||||||
dataset = create_dataset(dataset_path=args_opt.dataset_path,
|
dataset = create_dataset(dataset_path=args_opt.dataset_path,
|
||||||
do_train=True,
|
do_train=True,
|
||||||
config=config_gpu,
|
config=config_gpu,
|
||||||
platform=args_opt.platform,
|
device_target=args_opt.device_target,
|
||||||
repeat_num=1,
|
repeat_num=1,
|
||||||
batch_size=config_gpu.batch_size)
|
batch_size=config_gpu.batch_size)
|
||||||
step_size = dataset.get_dataset_size()
|
step_size = dataset.get_dataset_size()
|
||||||
|
@ -213,64 +212,3 @@ if __name__ == '__main__':
|
||||||
cb += [ckpt_cb]
|
cb += [ckpt_cb]
|
||||||
# begine train
|
# begine train
|
||||||
model.train(epoch_size, dataset, callbacks=cb)
|
model.train(epoch_size, dataset, callbacks=cb)
|
||||||
elif args_opt.platform == "Ascend":
|
|
||||||
# train on ascend
|
|
||||||
print("train args: ", args_opt, "\ncfg: ", config_ascend,
|
|
||||||
"\nparallel args: rank_id {}, device_id {}, rank_size {}".format(rank_id, device_id, rank_size))
|
|
||||||
|
|
||||||
if run_distribute:
|
|
||||||
context.set_auto_parallel_context(device_num=rank_size, parallel_mode=ParallelMode.DATA_PARALLEL,
|
|
||||||
parameter_broadcast=True, mirror_mean=True)
|
|
||||||
auto_parallel_context().set_all_reduce_fusion_split_indices([140])
|
|
||||||
init()
|
|
||||||
|
|
||||||
epoch_size = config_ascend.epoch_size
|
|
||||||
net = mobilenet_v3_large(num_classes=config_ascend.num_classes)
|
|
||||||
net.to_float(mstype.float16)
|
|
||||||
for _, cell in net.cells_and_names():
|
|
||||||
if isinstance(cell, nn.Dense):
|
|
||||||
cell.to_float(mstype.float32)
|
|
||||||
if config_ascend.label_smooth > 0:
|
|
||||||
loss = CrossEntropyWithLabelSmooth(
|
|
||||||
smooth_factor=config_ascend.label_smooth, num_classes=config.num_classes)
|
|
||||||
else:
|
|
||||||
loss = SoftmaxCrossEntropyWithLogits(
|
|
||||||
is_grad=False, sparse=True, reduction='mean')
|
|
||||||
dataset = create_dataset(dataset_path=args_opt.dataset_path,
|
|
||||||
do_train=True,
|
|
||||||
config=config_ascend,
|
|
||||||
platform=args_opt.platform,
|
|
||||||
repeat_num=1,
|
|
||||||
batch_size=config_ascend.batch_size)
|
|
||||||
step_size = dataset.get_dataset_size()
|
|
||||||
if args_opt.pre_trained:
|
|
||||||
param_dict = load_checkpoint(args_opt.pre_trained)
|
|
||||||
load_param_into_net(net, param_dict)
|
|
||||||
|
|
||||||
loss_scale = FixedLossScaleManager(
|
|
||||||
config_ascend.loss_scale, drop_overflow_update=False)
|
|
||||||
lr = Tensor(get_lr(global_step=0,
|
|
||||||
lr_init=0,
|
|
||||||
lr_end=0,
|
|
||||||
lr_max=config_ascend.lr,
|
|
||||||
warmup_epochs=config_ascend.warmup_epochs,
|
|
||||||
total_epochs=epoch_size,
|
|
||||||
steps_per_epoch=step_size))
|
|
||||||
opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, config_ascend.momentum,
|
|
||||||
config_ascend.weight_decay, config_ascend.loss_scale)
|
|
||||||
|
|
||||||
model = Model(net, loss_fn=loss, optimizer=opt,
|
|
||||||
loss_scale_manager=loss_scale)
|
|
||||||
|
|
||||||
cb = None
|
|
||||||
if rank_id == 0:
|
|
||||||
cb = [Monitor(lr_init=lr.asnumpy())]
|
|
||||||
if config_ascend.save_checkpoint:
|
|
||||||
config_ck = CheckpointConfig(save_checkpoint_steps=config_ascend.save_checkpoint_epochs * step_size,
|
|
||||||
keep_checkpoint_max=config_ascend.keep_checkpoint_max)
|
|
||||||
ckpt_cb = ModelCheckpoint(
|
|
||||||
prefix="mobilenetV3", directory=config_ascend.save_checkpoint_path, config=config_ck)
|
|
||||||
cb += [ckpt_cb]
|
|
||||||
model.train(epoch_size, dataset, callbacks=cb)
|
|
||||||
else:
|
|
||||||
raise Exception
|
|
||||||
|
|
|
@ -176,7 +176,7 @@ class Model:
|
||||||
def _check_kwargs(self, kwargs):
|
def _check_kwargs(self, kwargs):
|
||||||
for arg in kwargs:
|
for arg in kwargs:
|
||||||
if arg not in ['loss_scale_manager', 'keep_batchnorm_fp32']:
|
if arg not in ['loss_scale_manager', 'keep_batchnorm_fp32']:
|
||||||
raise ValueError(f"Unsupport arg '{arg}'")
|
raise ValueError(f"Unsupported arg '{arg}'")
|
||||||
|
|
||||||
def _build_train_network(self):
|
def _build_train_network(self):
|
||||||
"""Build train network"""
|
"""Build train network"""
|
||||||
|
|
|
@ -1085,7 +1085,7 @@ Status DvppJsonConfigParser::InitWithJsonConfigImp(const std::string &json_confi
|
||||||
return FAILED;
|
return FAILED;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
MSI_LOG_ERROR << "Unsupport op name " << op_name << ", expect resize, crop or crop_and_paste";
|
MSI_LOG_ERROR << "Unsupported op name " << op_name << ", expect resize, crop or crop_and_paste";
|
||||||
return FAILED;
|
return FAILED;
|
||||||
}
|
}
|
||||||
return SUCCESS;
|
return SUCCESS;
|
||||||
|
|
|
@ -169,7 +169,7 @@ class Model:
|
||||||
def _check_kwargs(self, kwargs):
|
def _check_kwargs(self, kwargs):
|
||||||
for arg in kwargs:
|
for arg in kwargs:
|
||||||
if arg not in ['loss_scale_manager', 'keep_batchnorm_fp32']:
|
if arg not in ['loss_scale_manager', 'keep_batchnorm_fp32']:
|
||||||
raise ValueError(f"Unsupport arg '{arg}'")
|
raise ValueError(f"Unsupported arg '{arg}'")
|
||||||
|
|
||||||
def _build_train_network(self):
|
def _build_train_network(self):
|
||||||
"""Build train network"""
|
"""Build train network"""
|
||||||
|
|
Loading…
Reference in New Issue