!23027 recitify improper import of ParallelMode

Merge pull request !23027 from gengdongjie/fix_issues
This commit is contained in:
i-robot 2021-09-08 09:42:47 +00:00 committed by Gitee
commit e81a37facf
21 changed files with 47 additions and 54 deletions

View File

@ -21,7 +21,7 @@ import matplotlib
import numpy as np
import cv2
from mindspore.train.model import ParallelMode
from mindspore.context import ParallelMode
from mindspore.communication.management import init
from mindspore import context
from src.deep.feature_extractor import Extractor

View File

@ -24,7 +24,8 @@ import mindspore.nn as nn
from mindspore import Tensor, context
from mindspore.communication.management import init
from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor
from mindspore.train.model import Model, ParallelMode
from mindspore.train.model import Model
from mindspore.context import ParallelMode
from mindspore.common import set_seed
from original_model import Net
set_seed(1234)

View File

@ -28,8 +28,9 @@ from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.nn.metrics import Accuracy
from mindspore.nn.optim.adam import Adam
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
from mindspore.train.model import Model, ParallelMode
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.context import ParallelMode
from src.cnn_direction_model import CNNDirectionModel
from src.dataset import create_dataset_train
from src.model_utils.config import config

View File

@ -22,10 +22,11 @@ import mindspore
from mindspore import Tensor, context
from mindspore.communication.management import get_group_size, get_rank, init
from mindspore.nn import SGD, RMSProp
from mindspore.context import ParallelMode
from mindspore.train.callback import (CheckpointConfig, LossMonitor,
ModelCheckpoint, TimeMonitor)
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.train.model import Model, ParallelMode
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.config import basic_config, dataset_config
from src.dataset import create_dataset

View File

@ -34,8 +34,8 @@ from mindspore.nn import RMSProp
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor, LossMonitor
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.train.model import ParallelMode
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.context import ParallelMode
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python'

View File

@ -19,7 +19,8 @@ from mindspore import context
from mindspore import Tensor
from mindspore.common import set_seed
from mindspore.nn.optim.momentum import Momentum
from mindspore.train.model import Model, ParallelMode
from mindspore.context import ParallelMode
from mindspore.train.model import Model
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor, LossMonitor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.communication.management import init, get_rank, get_group_size

View File

@ -25,7 +25,7 @@ from mindspore.common import set_seed
from mindspore.train.model import Model
from mindspore.train.callback import TimeMonitor, LossMonitor, CheckpointConfig, ModelCheckpoint
from mindspore.communication.management import init, get_rank, get_group_size
from mindspore.train.model import ParallelMode
from mindspore.context import ParallelMode
from src.config import srcnn_cfg as config
from src.dataset import create_train_dataset

View File

@ -19,7 +19,8 @@ import time
from mindspore import context
from mindspore import Tensor
from mindspore.nn.optim.momentum import Momentum
from mindspore.train.model import Model, ParallelMode
from mindspore.context import ParallelMode
from mindspore.train.model import Model
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor, LossMonitor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.communication.management import init

View File

@ -22,7 +22,7 @@ from mindspore.train import Model
from mindspore.common import set_seed
from mindspore import context, Tensor
import mindspore.common.dtype as mstype
from mindspore.train.model import ParallelMode
from mindspore.context import ParallelMode
from mindspore.communication.management import init, get_rank, get_group_size
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor

View File

@ -22,7 +22,8 @@ import numpy as np
import mindspore.nn as nn
from mindspore import context, Tensor
import mindspore.ops as ops
from mindspore.train.model import Model, ParallelMode
from mindspore.context import ParallelMode
from mindspore.train.model import Model
from mindspore import dtype as mstype
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
from mindspore.communication.management import init

View File

@ -20,7 +20,8 @@ import argparse
from mindspore import context
from mindspore import Tensor
from mindspore.nn import SGD, RMSProp
from mindspore.train.model import Model, ParallelMode
from mindspore.context import ParallelMode
from mindspore.train.model import Model
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.communication.management import init

View File

@ -24,7 +24,8 @@ import numpy as np
from mindspore import Tensor
from mindspore import context
from mindspore import dataset as de
from mindspore.train.model import Model, ParallelMode
from mindspore.context import ParallelMode
from mindspore.train.model import Model
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.train.serialization import load_checkpoint, load_param_into_net

View File

@ -30,7 +30,8 @@ from mindspore.nn.metrics import Accuracy
from mindspore.communication.management import init
import mindspore.common.initializer as weight_init
from mindspore.nn.optim.momentum import Momentum
from mindspore.train.model import Model, ParallelMode
from mindspore.context import ParallelMode
from mindspore.train.model import Model
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor

View File

@ -21,7 +21,8 @@ import argparse
import ast
from mindspore import context
from mindspore import Tensor
from mindspore.train.model import Model, ParallelMode
from mindspore.context import ParallelMode
from mindspore.train.model import Model
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.train.serialization import load_checkpoint, load_param_into_net

View File

@ -20,7 +20,8 @@ import os
import mindspore.nn as nn
from mindspore import context
from mindspore.train.model import Model, ParallelMode
from mindspore.context import ParallelMode
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor, Callback
from mindspore.nn.metrics import Accuracy

View File

@ -20,8 +20,8 @@ import numpy as np
from mindspore.communication import init, get_rank
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor, LossMonitor
from mindspore.train.model import ParallelMode
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.context import ParallelMode
from mindspore import Model
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.nn import RMSProp

View File

@ -20,7 +20,8 @@ import argparse
from mindspore import context
from mindspore import Tensor
from mindspore.nn import SGD, RMSProp
from mindspore.train.model import Model, ParallelMode
from mindspore.train.model import Model
from mindspore.context import ParallelMode
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.communication.management import init

View File

@ -29,7 +29,7 @@ from mindspore import context
from mindspore import Tensor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.communication.management import init
from mindspore.train.model import ParallelMode
from mindspore.context import ParallelMode
from src.model import models
from src.config import stgcn_chebconv_45min_cfg, stgcn_chebconv_30min_cfg, stgcn_chebconv_15min_cfg, stgcn_gcnconv_45min_cfg, stgcn_gcnconv_30min_cfg, stgcn_gcnconv_15min_cfg

View File

@ -28,7 +28,8 @@ import mindspore.nn as nn
from mindspore import context
from mindspore import Tensor
from mindspore.communication.management import init
from mindspore.train.model import Model, ParallelMode
from mindspore.train.model import Model
from mindspore.context import ParallelMode
from mindspore.train.callback import CheckpointConfig, LossMonitor, ModelCheckpoint, TimeMonitor
from mindspore.common import set_seed

View File

@ -19,7 +19,8 @@ import argparse
import copy
from mindspore.communication.management import init, get_rank, get_group_size
from mindspore.train.model import ParallelMode, Model
from mindspore.context import ParallelMode
from mindspore.train.model import Model
from mindspore.train.callback import TimeMonitor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.train.loss_scale_manager import FixedLossScaleManager
@ -101,18 +102,18 @@ parser.add_argument('--GPU', action='store_true', default=False,
help='Use GPU for training (default: False)')
def main():
"""Main entrance for training"""
if __name__ == '__main__':
args = parser.parse_args()
print(sys.argv)
devid, args.rank_id, args.rank_size = 0, 0, 1
context.set_context(mode=context.GRAPH_MODE)
if args.GPU:
context.set_context(device_target='GPU')
if args.distributed:
if args.GPU:
init("nccl")
context.set_context(device_target='GPU')
else:
init()
devid = int(os.getenv('DEVICE_ID'))
@ -125,15 +126,11 @@ def main():
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True,
device_num=args.rank_size)
else:
if args.GPU:
context.set_context(device_target='GPU')
is_master = not args.distributed or (args.rank_id == 0)
# parse model argument
assert args.model.startswith(
"tinynet"), "Only Tinynet models are supported."
assert args.model.startswith("tinynet"), "Only Tinynet models are supported."
_, sub_name = args.model.split("_")
net = tinynet(sub_model=sub_name,
num_classes=args.num_classes,
@ -166,11 +163,9 @@ def main():
input_size=input_size)
batches_per_epoch = train_dataset.get_dataset_size()
loss = LabelSmoothingCrossEntropy(
smooth_factor=args.smoothing, num_classes=args.num_classes)
loss = LabelSmoothingCrossEntropy(smooth_factor=args.smoothing, num_classes=args.num_classes)
time_cb = TimeMonitor(data_size=batches_per_epoch)
loss_scale_manager = FixedLossScaleManager(
args.loss_scale, drop_overflow_update=False)
loss_scale_manager = FixedLossScaleManager(args.loss_scale, drop_overflow_update=False)
lr_array = get_lr(base_lr=args.lr,
total_epochs=args.epochs,
@ -181,26 +176,18 @@ def main():
warmup_lr_init=args.warmup_lr,
global_epoch=0)
lr = Tensor(lr_array)
loss_cb = LossMonitor(lr_array,
args.epochs,
per_print_times=args.per_print_times,
start_epoch=0)
loss_cb = LossMonitor(lr_array, args.epochs, per_print_times=args.per_print_times, start_epoch=0)
param_group = add_weight_decay(net, weight_decay=args.weight_decay)
if is_master:
print(f'Using {args.opt} optimizer')
if args.opt == 'sgd':
if is_master:
print('Using SGD optimizer')
optimizer = SGD(param_group,
learning_rate=lr,
optimizer = SGD(param_group, learning_rate=lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
loss_scale=args.loss_scale)
elif args.opt == 'rmsprop':
if is_master:
print('Using rmsprop optimizer')
optimizer = RMSProp(param_group,
learning_rate=lr,
decay=0.9,
@ -239,12 +226,5 @@ def main():
callbacks = [loss_cb, ema_cb, time_cb] if is_master else []
if is_master:
print("Training on " + args.model
+ " with " + str(args.num_classes) + " classes")
model.train(args.epochs, train_dataset, callbacks=callbacks,
dataset_sink_mode=args.dataset_sink)
if __name__ == '__main__':
main()
print("Training on " + args.model + " with " + str(args.num_classes) + " classes")
model.train(args.epochs, train_dataset, callbacks=callbacks, dataset_sink_mode=args.dataset_sink)

View File

@ -18,7 +18,7 @@ import datetime
import numpy as np
from mindspore import context
from mindspore import Tensor, Model
from mindspore.train.model import ParallelMode
from mindspore.context import ParallelMode
from mindspore.nn.optim import Momentum
from mindspore.common import dtype as mstype
from mindspore.train.serialization import load_checkpoint