From 07117e4dd46890fb374a5be5e9df2c03d583c178 Mon Sep 17 00:00:00 2001 From: yao_yf Date: Thu, 27 Aug 2020 17:15:34 +0800 Subject: [PATCH] mv ParallelMode to context --- mindspore/context.py | 25 ++++++++++- mindspore/nn/layer/embedding.py | 8 ++-- mindspore/nn/optim/optimizer.py | 2 +- mindspore/nn/wrap/cell_wrapper.py | 2 +- mindspore/nn/wrap/grad_reducer.py | 3 +- mindspore/nn/wrap/loss_scale.py | 2 +- mindspore/train/__init__.py | 3 +- mindspore/train/amp.py | 2 +- mindspore/train/model.py | 2 +- mindspore/train/parallel_utils.py | 41 ------------------- model_zoo/official/cv/deeplabv3/train.py | 3 +- model_zoo/official/cv/faster_rcnn/train.py | 3 +- model_zoo/official/cv/googlenet/train.py | 3 +- model_zoo/official/cv/inceptionv3/train.py | 2 +- model_zoo/official/cv/maskrcnn/train.py | 3 +- model_zoo/official/cv/mobilenetv2/train.py | 3 +- .../official/cv/mobilenetv2_quant/train.py | 3 +- model_zoo/official/cv/mobilenetv3/train.py | 3 +- model_zoo/official/cv/resnet/train.py | 3 +- model_zoo/official/cv/resnet50_quant/train.py | 3 +- .../cv/resnet_thor/src/grad_reducer_thor.py | 3 +- .../official/cv/resnet_thor/src/model_thor.py | 2 +- model_zoo/official/cv/resnet_thor/train.py | 2 +- model_zoo/official/cv/resnext50/train.py | 2 +- model_zoo/official/cv/ssd/src/ssd.py | 3 +- model_zoo/official/cv/ssd/train.py | 3 +- model_zoo/official/cv/vgg16/train.py | 3 +- .../cv/warpctc/src/warpctc_for_train.py | 2 +- model_zoo/official/cv/warpctc/train.py | 3 +- .../official/cv/yolov3_darknet53/eval.py | 2 +- .../official/cv/yolov3_darknet53/src/yolo.py | 3 +- .../official/cv/yolov3_darknet53/train.py | 2 +- .../cv/yolov3_darknet53_quant/eval.py | 2 +- .../cv/yolov3_darknet53_quant/src/yolo.py | 3 +- .../cv/yolov3_darknet53_quant/train.py | 2 +- .../official/cv/yolov3_resnet18/src/yolov3.py | 3 +- .../official/cv/yolov3_resnet18/train.py | 3 +- model_zoo/official/nlp/bert/run_pretrain.py | 2 +- .../nlp/bert/src/bert_for_finetune.py | 2 +- .../nlp/bert/src/bert_for_pre_training.py | 2 +- .../official/nlp/bert_thor/run_pretrain.py | 2 +- .../bert_thor/src/bert_for_pre_training.py | 2 +- .../nlp/bert_thor/src/grad_reducer_thor.py | 3 +- .../official/nlp/bert_thor/src/model_thor.py | 2 +- .../src/transformer/transformer_for_train.py | 2 +- model_zoo/official/nlp/mass/train.py | 3 +- .../nlp/tinybert/run_general_distill.py | 2 +- .../nlp/tinybert/src/tinybert_for_gd_td.py | 2 +- .../transformer/src/transformer_for_train.py | 2 +- model_zoo/official/nlp/transformer/train.py | 2 +- model_zoo/official/recommend/deepfm/train.py | 3 +- .../recommend/wide_and_deep/src/callbacks.py | 2 +- .../wide_and_deep/src/wide_and_deep.py | 2 +- .../train_and_eval_auto_parallel.py | 2 +- .../train_and_eval_distribute.py | 2 +- .../train_and_eval_parameter_server.py | 2 +- .../src/wide_and_deep.py | 2 +- .../train_and_eval_distribute.py | 2 +- .../st/auto_parallel/resnet50_expand_loss.py | 3 +- tests/st/mem_reuse/resnet_cifar_memreuse.py | 3 +- tests/st/mem_reuse/resnet_cifar_normal.py | 3 +- .../train_and_test_multinpu_ci.py | 2 +- .../python_file_for_ci/wide_and_deep.py | 2 +- ...rain_and_test_multinpu_ci_data_parallel.py | 2 +- tests/st/model_zoo_tests/yolov3/src/yolov3.py | 3 +- .../models/bert/src/bert_for_pre_training.py | 2 +- tests/st/networks/models/bert/src/utils.py | 2 +- .../resnet50/src_thor/dataset_helper.py | 2 +- .../resnet50/src_thor/grad_reducer_thor.py | 3 +- .../models/resnet50/src_thor/model_thor.py | 2 +- .../models/resnet50/test_resnet50_imagenet.py | 3 +- tests/st/tbe_networks/resnet_cifar.py | 3 +- tests/st/tbe_networks/test_resnet_cifar_8p.py | 3 +- .../communication/test_data_parallel_dense.py | 2 +- .../communication/test_data_parallel_lenet.py | 3 +- .../test_data_parallel_resnet.py | 3 +- tests/ut/python/model/test_mix_precision.py | 2 +- .../python/parallel/test_allreduce_fusion.py | 3 +- tests/ut/python/parallel/test_alltoall.py | 3 +- .../parallel/test_auto_parallel_onehot.py | 3 +- .../parallel/test_auto_parallel_resnet.py | 3 +- .../parallel/test_batchnorm_batch_parallel.py | 3 +- .../ut/python/parallel/test_bn_prelu_cell.py | 2 +- .../python/parallel/test_dataset_interface.py | 3 +- tests/ut/python/parallel/test_full_batch.py | 3 +- .../parallel/test_gather_v2_primitive.py | 3 +- tests/ut/python/parallel/test_loss_scale.py | 3 +- tests/ut/python/parallel/test_one_dev.py | 3 +- tests/ut/python/parallel/test_one_hot_net.py | 3 +- .../parallel/test_operator_model_parallel.py | 3 +- tests/ut/python/parallel/test_optimizer.py | 2 +- tests/ut/python/parallel/test_prelu_cell.py | 3 +- tests/ut/python/parallel/test_reshape.py | 3 +- tests/ut/python/parallel/test_transpose.py | 3 +- tests/ut/python/train/test_amp.py | 3 +- 95 files changed, 168 insertions(+), 139 deletions(-) delete mode 100644 mindspore/train/parallel_utils.py diff --git a/mindspore/context.py b/mindspore/context.py index 985270d1fa..361c48c09b 100644 --- a/mindspore/context.py +++ b/mindspore/context.py @@ -28,7 +28,7 @@ from mindspore.parallel._auto_parallel_context import _set_auto_parallel_context _reset_auto_parallel_context __all__ = ['GRAPH_MODE', 'PYNATIVE_MODE', 'set_context', 'get_context', 'set_auto_parallel_context', - 'get_auto_parallel_context', 'reset_auto_parallel_context'] + 'get_auto_parallel_context', 'reset_auto_parallel_context', 'ParallelMode'] GRAPH_MODE = 0 PYNATIVE_MODE = 1 @@ -647,3 +647,26 @@ def get_context(attr_key): raise ValueError( "Get context keyword %s is not recognized!" % attr_key) return getattr(_context(), attr_key) + +class ParallelMode: + """ + Parallel mode options. + + There are five kinds of parallel modes, "STAND_ALONE", "DATA_PARALLEL", + "HYBRID_PARALLEL", "SEMI_AUTO_PARALLEL" and "AUTO_PARALLEL". Default: "STAND_ALONE". + + - STAND_ALONE: Only one processor working. + - DATA_PARALLEL: Distributing the data across different processors. + - HYBRID_PARALLEL: Achieving data parallelism and model parallelism manually. + - SEMI_AUTO_PARALLEL: Achieving data parallelism and model parallelism by setting parallel strategies. + - AUTO_PARALLEL: Achieving parallelism automatically. + + MODE_LIST: The list for all supported parallel modes. + """ + + STAND_ALONE = "stand_alone" + DATA_PARALLEL = "data_parallel" + HYBRID_PARALLEL = "hybrid_parallel" + SEMI_AUTO_PARALLEL = "semi_auto_parallel" + AUTO_PARALLEL = "auto_parallel" + MODE_LIST = [STAND_ALONE, DATA_PARALLEL, HYBRID_PARALLEL, SEMI_AUTO_PARALLEL, AUTO_PARALLEL] diff --git a/mindspore/nn/layer/embedding.py b/mindspore/nn/layer/embedding.py index f5be4d9a25..b721df626f 100755 --- a/mindspore/nn/layer/embedding.py +++ b/mindspore/nn/layer/embedding.py @@ -20,7 +20,7 @@ from mindspore.common.parameter import Parameter from mindspore.common.initializer import initializer from mindspore._checkparam import Validator from mindspore.communication.management import get_group_size -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.parallel._utils import _get_parallel_mode from ..cell import Cell from ..._checkparam import Validator as validator, Rel @@ -129,9 +129,9 @@ class EmbeddingLookup(Cell): embedding_size (int): The size of each embedding vector. param_init (str): The initialize way of embedding table. Default: 'normal'. target (str): Specify the target where the op is executed. The value should in - ['DEVICE', 'CPU']. Default: 'CPU'. - slice_mode (str): The slicing way in semi auto parallel/auto parallel. The value should get through - nn.EmbeddingLookUpSplitMode. Default: 'batch_slice'. + ['DEVICE', 'CPU']. Default: 'CPU'. + slice_mode (str): The slicing way in semi_auto_parallel/auto_parallel. The value should get through + nn.EmbeddingLookUpSplitMode. Default: nn.EmbeddingLookUpSplitMode.BATCH_SLICE. manual_shapes (tuple): The accompaniment array in field slice mode. Inputs: diff --git a/mindspore/nn/optim/optimizer.py b/mindspore/nn/optim/optimizer.py index 7cbc9fa097..c60498df21 100755 --- a/mindspore/nn/optim/optimizer.py +++ b/mindspore/nn/optim/optimizer.py @@ -29,7 +29,7 @@ from mindspore._checkparam import Validator as validator from mindspore._checkparam import Rel from mindspore import log as logger from mindspore.parallel._utils import _get_global_rank, _get_device_num, _get_parallel_mode -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore import context from mindspore.nn.learning_rate_schedule import LearningRateSchedule diff --git a/mindspore/nn/wrap/cell_wrapper.py b/mindspore/nn/wrap/cell_wrapper.py index c32b7eadd9..d24c166caf 100644 --- a/mindspore/nn/wrap/cell_wrapper.py +++ b/mindspore/nn/wrap/cell_wrapper.py @@ -15,7 +15,7 @@ """Cell_wrapper.""" from mindspore.parallel._utils import (_get_device_num, _get_mirror_mean, _get_parallel_mode) -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from ...common import dtype as mstype from ...common.parameter import Parameter, ParameterTuple from ...ops import composite as C diff --git a/mindspore/nn/wrap/grad_reducer.py b/mindspore/nn/wrap/grad_reducer.py index 8e8a9ef756..345854a858 100644 --- a/mindspore/nn/wrap/grad_reducer.py +++ b/mindspore/nn/wrap/grad_reducer.py @@ -251,8 +251,9 @@ class DistributedGradReducer(Cell): >>> from mindspore.ops import operations as P >>> from mindspore.ops import functional as F >>> from mindspore import context + >>> from mindspore.context import ParallelMode >>> from mindspore import nn - >>> from mindspore import ParallelMode, ParameterTuple + >>> from mindspore import ParameterTuple >>> >>> device_id = int(os.environ["DEVICE_ID"]) >>> context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=True, diff --git a/mindspore/nn/wrap/loss_scale.py b/mindspore/nn/wrap/loss_scale.py index 88a0f26a34..999873ea6e 100644 --- a/mindspore/nn/wrap/loss_scale.py +++ b/mindspore/nn/wrap/loss_scale.py @@ -15,7 +15,7 @@ """Loss scale cell for loss scale training.""" import mindspore.context as context from mindspore.nn.wrap.grad_reducer import DistributedGradReducer -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.parallel._utils import _get_device_num, _get_parallel_mode, _get_mirror_mean from ..cell import Cell from ...common import Tensor, RowTensor diff --git a/mindspore/train/__init__.py b/mindspore/train/__init__.py index 1895743e20..ea5910be07 100644 --- a/mindspore/train/__init__.py +++ b/mindspore/train/__init__.py @@ -18,8 +18,7 @@ High-Level training interfaces. Helper functions in train piplines. """ from .model import Model -from .parallel_utils import ParallelMode from .dataset_helper import DatasetHelper from . import amp -__all__ = ["Model", "ParallelMode", "DatasetHelper", "amp"] +__all__ = ["Model", "DatasetHelper", "amp"] diff --git a/mindspore/train/amp.py b/mindspore/train/amp.py index e2da1618bf..edbb49894f 100644 --- a/mindspore/train/amp.py +++ b/mindspore/train/amp.py @@ -23,7 +23,7 @@ from ..nn.wrap.cell_wrapper import _VirtualDatasetCell from ..ops import functional as F from ..parallel._utils import _get_parallel_mode from .loss_scale_manager import DynamicLossScaleManager, LossScaleManager -from .parallel_utils import ParallelMode +from ..context import ParallelMode from .. import context __all__ = ["build_train_network"] diff --git a/mindspore/train/model.py b/mindspore/train/model.py index 324b09b453..453e1f64d8 100755 --- a/mindspore/train/model.py +++ b/mindspore/train/model.py @@ -30,7 +30,7 @@ from ..parallel._utils import _get_parallel_mode, _get_device_num, _get_global_r from ..nn.metrics import Loss from .. import nn from ..nn.wrap.cell_wrapper import _VirtualDatasetCell -from .parallel_utils import ParallelMode +from ..context import ParallelMode from ..parallel._utils import _need_to_full, _to_full_tensor from ..common import dtype as mstype from .dataset_helper import DatasetHelper diff --git a/mindspore/train/parallel_utils.py b/mindspore/train/parallel_utils.py deleted file mode 100644 index 4f460a5d98..0000000000 --- a/mindspore/train/parallel_utils.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Parallel utils""" - -__all__ = ["ParallelMode"] - - -class ParallelMode: - """ - Parallel mode options. - - There are five kinds of parallel modes, "STAND_ALONE", "DATA_PARALLEL", - "HYBRID_PARALLEL", "SEMI_AUTO_PARALLEL" and "AUTO_PARALLEL". Default: "STAND_ALONE". - - - STAND_ALONE: Only one processor working. - - DATA_PARALLEL: Distributing the data across different processors. - - HYBRID_PARALLEL: Achieving data parallelism and model parallelism manually. - - SEMI_AUTO_PARALLEL: Achieving data parallelism and model parallelism by setting parallel strategies. - - AUTO_PARALLEL: Achieving parallelism automatically. - - MODE_LIST: The list for all supported parallel modes. - """ - - STAND_ALONE = "stand_alone" - DATA_PARALLEL = "data_parallel" - HYBRID_PARALLEL = "hybrid_parallel" - SEMI_AUTO_PARALLEL = "semi_auto_parallel" - AUTO_PARALLEL = "auto_parallel" - MODE_LIST = [STAND_ALONE, DATA_PARALLEL, HYBRID_PARALLEL, SEMI_AUTO_PARALLEL, AUTO_PARALLEL] diff --git a/model_zoo/official/cv/deeplabv3/train.py b/model_zoo/official/cv/deeplabv3/train.py index 56ef5b02bb..0269d5f540 100644 --- a/model_zoo/official/cv/deeplabv3/train.py +++ b/model_zoo/official/cv/deeplabv3/train.py @@ -17,7 +17,8 @@ import argparse from mindspore import context from mindspore.communication.management import init from mindspore.nn.optim.momentum import Momentum -from mindspore import Model, ParallelMode +from mindspore import Model +from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.train.callback import Callback, CheckpointConfig, ModelCheckpoint, TimeMonitor from src.md_dataset import create_dataset diff --git a/model_zoo/official/cv/faster_rcnn/train.py b/model_zoo/official/cv/faster_rcnn/train.py index d48466f621..52995c8bda 100644 --- a/model_zoo/official/cv/faster_rcnn/train.py +++ b/model_zoo/official/cv/faster_rcnn/train.py @@ -25,7 +25,8 @@ import mindspore.common.dtype as mstype from mindspore import context, Tensor from mindspore.communication.management import init from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, TimeMonitor -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.nn import SGD import mindspore.dataset.engine as de diff --git a/model_zoo/official/cv/googlenet/train.py b/model_zoo/official/cv/googlenet/train.py index 5181f9c484..442a4262e5 100644 --- a/model_zoo/official/cv/googlenet/train.py +++ b/model_zoo/official/cv/googlenet/train.py @@ -28,7 +28,8 @@ from mindspore import context from mindspore.communication.management import init, get_rank from mindspore.nn.optim.momentum import Momentum from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor -from mindspore.train.model import Model, ParallelMode +from mindspore.train.model import Model +from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net from src.config import cifar_cfg as cfg diff --git a/model_zoo/official/cv/inceptionv3/train.py b/model_zoo/official/cv/inceptionv3/train.py index f2d2256eef..db7a0d0854 100644 --- a/model_zoo/official/cv/inceptionv3/train.py +++ b/model_zoo/official/cv/inceptionv3/train.py @@ -21,7 +21,7 @@ import numpy as np import mindspore.nn as nn from mindspore import Tensor from mindspore import context -from mindspore import ParallelMode +from mindspore.context import ParallelMode from mindspore.communication.management import init, get_rank, get_group_size from mindspore.nn.optim.rmsprop import RMSProp from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor diff --git a/model_zoo/official/cv/maskrcnn/train.py b/model_zoo/official/cv/maskrcnn/train.py index 8df2fab00e..06c5e87b4c 100644 --- a/model_zoo/official/cv/maskrcnn/train.py +++ b/model_zoo/official/cv/maskrcnn/train.py @@ -24,7 +24,8 @@ import mindspore.common.dtype as mstype from mindspore import context, Tensor from mindspore.communication.management import init from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, TimeMonitor -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.nn import SGD import mindspore.dataset.engine as de diff --git a/model_zoo/official/cv/mobilenetv2/train.py b/model_zoo/official/cv/mobilenetv2/train.py index 4fb800d6dd..02935921e3 100644 --- a/model_zoo/official/cv/mobilenetv2/train.py +++ b/model_zoo/official/cv/mobilenetv2/train.py @@ -30,7 +30,8 @@ from mindspore.nn.loss.loss import _Loss from mindspore.ops import operations as P from mindspore.ops import functional as F from mindspore.common import dtype as mstype -from mindspore.train.model import Model, ParallelMode +from mindspore.train.model import Model +from mindspore.context import ParallelMode from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, Callback from mindspore.train.loss_scale_manager import FixedLossScaleManager from mindspore.train.serialization import load_checkpoint, load_param_into_net diff --git a/model_zoo/official/cv/mobilenetv2_quant/train.py b/model_zoo/official/cv/mobilenetv2_quant/train.py index ebe60996cf..0674625382 100644 --- a/model_zoo/official/cv/mobilenetv2_quant/train.py +++ b/model_zoo/official/cv/mobilenetv2_quant/train.py @@ -22,7 +22,8 @@ import numpy as np from mindspore import context from mindspore import Tensor from mindspore import nn -from mindspore.train.model import Model, ParallelMode +from mindspore.train.model import Model +from mindspore.context import ParallelMode from mindspore.train.loss_scale_manager import FixedLossScaleManager from mindspore.train.callback import ModelCheckpoint, CheckpointConfig from mindspore.train.serialization import load_checkpoint diff --git a/model_zoo/official/cv/mobilenetv3/train.py b/model_zoo/official/cv/mobilenetv3/train.py index 60f3723244..5bd8c974c3 100644 --- a/model_zoo/official/cv/mobilenetv3/train.py +++ b/model_zoo/official/cv/mobilenetv3/train.py @@ -28,7 +28,8 @@ from mindspore.nn.loss.loss import _Loss from mindspore.ops import operations as P from mindspore.ops import functional as F from mindspore.common import dtype as mstype -from mindspore.train.model import Model, ParallelMode +from mindspore.train.model import Model +from mindspore.context import ParallelMode from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, Callback from mindspore.train.loss_scale_manager import FixedLossScaleManager from mindspore.train.serialization import load_checkpoint, load_param_into_net diff --git a/model_zoo/official/cv/resnet/train.py b/model_zoo/official/cv/resnet/train.py index 0a891b9163..addb865573 100755 --- a/model_zoo/official/cv/resnet/train.py +++ b/model_zoo/official/cv/resnet/train.py @@ -22,7 +22,8 @@ from mindspore import Tensor from mindspore import dataset as de from mindspore.parallel._auto_parallel_context import auto_parallel_context from mindspore.nn.optim.momentum import Momentum -from mindspore.train.model import Model, ParallelMode +from mindspore.train.model import Model +from mindspore.context import ParallelMode from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.train.loss_scale_manager import FixedLossScaleManager diff --git a/model_zoo/official/cv/resnet50_quant/train.py b/model_zoo/official/cv/resnet50_quant/train.py index 2e13ec37ff..553597f4c0 100755 --- a/model_zoo/official/cv/resnet50_quant/train.py +++ b/model_zoo/official/cv/resnet50_quant/train.py @@ -21,7 +21,8 @@ from mindspore import context from mindspore import Tensor from mindspore.parallel._auto_parallel_context import auto_parallel_context from mindspore.nn.optim.momentum import Momentum -from mindspore.train.model import Model, ParallelMode +from mindspore.train.model import Model +from mindspore.context import ParallelMode from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor from mindspore.train.loss_scale_manager import FixedLossScaleManager from mindspore.train.serialization import load_checkpoint diff --git a/model_zoo/official/cv/resnet_thor/src/grad_reducer_thor.py b/model_zoo/official/cv/resnet_thor/src/grad_reducer_thor.py index 86ee3fcc8f..35cbaa1460 100644 --- a/model_zoo/official/cv/resnet_thor/src/grad_reducer_thor.py +++ b/model_zoo/official/cv/resnet_thor/src/grad_reducer_thor.py @@ -102,7 +102,8 @@ class DistributedGradReducerThor(Cell): >>> from mindspore.ops import functional as F >>> from mindspore import context >>> from mindspore import nn - >>> from mindspore import ParallelMode, ParameterTuple + >>> from mindspore import ParameterTuple + >>> from mindspore.context import ParallelMode >>> >>> device_id = int(os.environ["DEVICE_ID"]) >>> context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=True, diff --git a/model_zoo/official/cv/resnet_thor/src/model_thor.py b/model_zoo/official/cv/resnet_thor/src/model_thor.py index 4433dd38ac..1b86acf51f 100644 --- a/model_zoo/official/cv/resnet_thor/src/model_thor.py +++ b/model_zoo/official/cv/resnet_thor/src/model_thor.py @@ -18,7 +18,7 @@ import math from mindspore.train.callback import RunContext from mindspore import context from mindspore import nn -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.train.model import Model from mindspore.parallel._utils import _need_to_full, _to_full_tensor from mindspore.common.dtype import pytype_to_dtype diff --git a/model_zoo/official/cv/resnet_thor/train.py b/model_zoo/official/cv/resnet_thor/train.py index d7c667dffa..48dd86d7a0 100644 --- a/model_zoo/official/cv/resnet_thor/train.py +++ b/model_zoo/official/cv/resnet_thor/train.py @@ -22,7 +22,7 @@ from mindspore import context from mindspore import Tensor from mindspore import dataset as de from mindspore.parallel._auto_parallel_context import auto_parallel_context -from mindspore.train.model import ParallelMode +from mindspore.context import ParallelMode from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor, LossMonitor from mindspore.train.loss_scale_manager import FixedLossScaleManager from mindspore.communication.management import init, get_rank, get_group_size diff --git a/model_zoo/official/cv/resnext50/train.py b/model_zoo/official/cv/resnext50/train.py index d2cb72d5d2..c328939e30 100644 --- a/model_zoo/official/cv/resnext50/train.py +++ b/model_zoo/official/cv/resnext50/train.py @@ -20,7 +20,7 @@ import datetime import mindspore.nn as nn from mindspore import Tensor, context -from mindspore import ParallelMode +from mindspore.context import ParallelMode from mindspore.nn.optim import Momentum from mindspore.communication.management import init, get_rank, get_group_size from mindspore.train.callback import ModelCheckpoint diff --git a/model_zoo/official/cv/ssd/src/ssd.py b/model_zoo/official/cv/ssd/src/ssd.py index fca8a1948d..8fbca66c5c 100644 --- a/model_zoo/official/cv/ssd/src/ssd.py +++ b/model_zoo/official/cv/ssd/src/ssd.py @@ -19,6 +19,7 @@ import mindspore.common.dtype as mstype import mindspore as ms import mindspore.nn as nn from mindspore import Parameter, context, Tensor +from mindspore.context import ParallelMode from mindspore.parallel._auto_parallel_context import auto_parallel_context from mindspore.communication.management import get_group_size from mindspore.ops import operations as P @@ -388,7 +389,7 @@ class TrainingWrapper(nn.Cell): self.reducer_flag = False self.grad_reducer = None self.parallel_mode = context.get_auto_parallel_context("parallel_mode") - if self.parallel_mode in [ms.ParallelMode.DATA_PARALLEL, ms.ParallelMode.HYBRID_PARALLEL]: + if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]: self.reducer_flag = True if self.reducer_flag: mean = context.get_auto_parallel_context("mirror_mean") diff --git a/model_zoo/official/cv/ssd/train.py b/model_zoo/official/cv/ssd/train.py index 73fd3391a1..896d459f8a 100644 --- a/model_zoo/official/cv/ssd/train.py +++ b/model_zoo/official/cv/ssd/train.py @@ -21,7 +21,8 @@ import mindspore.nn as nn from mindspore import context, Tensor from mindspore.communication.management import init from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net from src.ssd import SSD300, SSDWithLossCell, TrainingWrapper, ssd_mobilenet_v2 from src.config import config diff --git a/model_zoo/official/cv/vgg16/train.py b/model_zoo/official/cv/vgg16/train.py index ae2f934e1e..d911f3388c 100644 --- a/model_zoo/official/cv/vgg16/train.py +++ b/model_zoo/official/cv/vgg16/train.py @@ -29,7 +29,8 @@ from mindspore import context from mindspore.communication.management import init, get_rank, get_group_size from mindspore.nn.optim.momentum import Momentum from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor -from mindspore.train.model import Model, ParallelMode +from mindspore.train.model import Model +from mindspore.context import ParallelMode from mindspore.train.serialization import load_param_into_net, load_checkpoint from mindspore.train.loss_scale_manager import FixedLossScaleManager from src.dataset import vgg_create_dataset diff --git a/model_zoo/official/cv/warpctc/src/warpctc_for_train.py b/model_zoo/official/cv/warpctc/src/warpctc_for_train.py index 8391ffe676..1640342fad 100755 --- a/model_zoo/official/cv/warpctc/src/warpctc_for_train.py +++ b/model_zoo/official/cv/warpctc/src/warpctc_for_train.py @@ -16,7 +16,7 @@ import numpy as np from mindspore.parallel._utils import (_get_device_num, _get_mirror_mean, _get_parallel_mode) -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.common import dtype as mstype from mindspore.ops import composite as C from mindspore.ops import functional as F diff --git a/model_zoo/official/cv/warpctc/train.py b/model_zoo/official/cv/warpctc/train.py index 380308653f..6344368ace 100755 --- a/model_zoo/official/cv/warpctc/train.py +++ b/model_zoo/official/cv/warpctc/train.py @@ -21,7 +21,8 @@ import numpy as np import mindspore.nn as nn from mindspore import context from mindspore import dataset as de -from mindspore.train.model import Model, ParallelMode +from mindspore.train.model import Model +from mindspore.context import ParallelMode from mindspore.nn.wrap import WithLossCell from mindspore.train.callback import TimeMonitor, LossMonitor, CheckpointConfig, ModelCheckpoint from mindspore.communication.management import init, get_group_size, get_rank diff --git a/model_zoo/official/cv/yolov3_darknet53/eval.py b/model_zoo/official/cv/yolov3_darknet53/eval.py index f04ed2447c..1688594c7c 100644 --- a/model_zoo/official/cv/yolov3_darknet53/eval.py +++ b/model_zoo/official/cv/yolov3_darknet53/eval.py @@ -25,7 +25,7 @@ from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from mindspore import Tensor -from mindspore.train import ParallelMode +from mindspore.context import ParallelMode from mindspore import context from mindspore.train.serialization import load_checkpoint, load_param_into_net import mindspore as ms diff --git a/model_zoo/official/cv/yolov3_darknet53/src/yolo.py b/model_zoo/official/cv/yolov3_darknet53/src/yolo.py index eefa1e8bfa..eae0d9f028 100644 --- a/model_zoo/official/cv/yolov3_darknet53/src/yolo.py +++ b/model_zoo/official/cv/yolov3_darknet53/src/yolo.py @@ -17,6 +17,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore.common.tensor import Tensor from mindspore import context +from mindspore.context import ParallelMode from mindspore.parallel._auto_parallel_context import auto_parallel_context from mindspore.communication.management import get_group_size from mindspore.ops import operations as P @@ -417,7 +418,7 @@ class TrainingWrapper(nn.Cell): self.reducer_flag = False self.grad_reducer = None self.parallel_mode = context.get_auto_parallel_context("parallel_mode") - if self.parallel_mode in [ms.ParallelMode.DATA_PARALLEL, ms.ParallelMode.HYBRID_PARALLEL]: + if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]: self.reducer_flag = True if self.reducer_flag: mean = context.get_auto_parallel_context("mirror_mean") diff --git a/model_zoo/official/cv/yolov3_darknet53/train.py b/model_zoo/official/cv/yolov3_darknet53/train.py index 92ac9f5353..54015d7229 100644 --- a/model_zoo/official/cv/yolov3_darknet53/train.py +++ b/model_zoo/official/cv/yolov3_darknet53/train.py @@ -18,7 +18,7 @@ import time import argparse import datetime -from mindspore import ParallelMode +from mindspore.context import ParallelMode from mindspore.nn.optim.momentum import Momentum from mindspore import Tensor import mindspore.nn as nn diff --git a/model_zoo/official/cv/yolov3_darknet53_quant/eval.py b/model_zoo/official/cv/yolov3_darknet53_quant/eval.py index 24260f6ee9..99f50fe370 100644 --- a/model_zoo/official/cv/yolov3_darknet53_quant/eval.py +++ b/model_zoo/official/cv/yolov3_darknet53_quant/eval.py @@ -25,7 +25,7 @@ from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from mindspore import Tensor -from mindspore.train import ParallelMode +from mindspore.context import ParallelMode from mindspore import context from mindspore.train.serialization import load_checkpoint, load_param_into_net import mindspore as ms diff --git a/model_zoo/official/cv/yolov3_darknet53_quant/src/yolo.py b/model_zoo/official/cv/yolov3_darknet53_quant/src/yolo.py index e794218636..81b00303df 100644 --- a/model_zoo/official/cv/yolov3_darknet53_quant/src/yolo.py +++ b/model_zoo/official/cv/yolov3_darknet53_quant/src/yolo.py @@ -17,6 +17,7 @@ import mindspore as ms import mindspore.nn as nn from mindspore.common.tensor import Tensor from mindspore import context +from mindspore.context import ParallelMode from mindspore.parallel._auto_parallel_context import auto_parallel_context from mindspore.communication.management import get_group_size from mindspore.ops import operations as P @@ -417,7 +418,7 @@ class TrainingWrapper(nn.Cell): self.reducer_flag = False self.grad_reducer = None self.parallel_mode = context.get_auto_parallel_context("parallel_mode") - if self.parallel_mode in [ms.ParallelMode.DATA_PARALLEL, ms.ParallelMode.HYBRID_PARALLEL]: + if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]: self.reducer_flag = True if self.reducer_flag: mean = context.get_auto_parallel_context("mirror_mean") diff --git a/model_zoo/official/cv/yolov3_darknet53_quant/train.py b/model_zoo/official/cv/yolov3_darknet53_quant/train.py index 75d1eb0902..047c43f84c 100644 --- a/model_zoo/official/cv/yolov3_darknet53_quant/train.py +++ b/model_zoo/official/cv/yolov3_darknet53_quant/train.py @@ -19,7 +19,7 @@ import time import argparse import datetime -from mindspore import ParallelMode +from mindspore.context import ParallelMode from mindspore.nn.optim.momentum import Momentum from mindspore import Tensor from mindspore import context diff --git a/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py b/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py index c33ed1a0d3..2fe4c8f07c 100644 --- a/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py +++ b/model_zoo/official/cv/yolov3_resnet18/src/yolov3.py @@ -19,6 +19,7 @@ import numpy as np import mindspore as ms import mindspore.nn as nn from mindspore import context, Tensor +from mindspore.context import ParallelMode from mindspore.parallel._auto_parallel_context import auto_parallel_context from mindspore.communication.management import get_group_size from mindspore.common.initializer import TruncatedNormal @@ -652,7 +653,7 @@ class TrainingWrapper(nn.Cell): self.reducer_flag = False self.grad_reducer = None self.parallel_mode = context.get_auto_parallel_context("parallel_mode") - if self.parallel_mode in [ms.ParallelMode.DATA_PARALLEL, ms.ParallelMode.HYBRID_PARALLEL]: + if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]: self.reducer_flag = True if self.reducer_flag: mean = context.get_auto_parallel_context("mirror_mean") diff --git a/model_zoo/official/cv/yolov3_resnet18/train.py b/model_zoo/official/cv/yolov3_resnet18/train.py index e0d309be9c..6aadb95327 100644 --- a/model_zoo/official/cv/yolov3_resnet18/train.py +++ b/model_zoo/official/cv/yolov3_resnet18/train.py @@ -29,7 +29,8 @@ import mindspore.nn as nn from mindspore import context, Tensor from mindspore.communication.management import init from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.common.initializer import initializer diff --git a/model_zoo/official/nlp/bert/run_pretrain.py b/model_zoo/official/nlp/bert/run_pretrain.py index 73b1021003..d644a1166c 100644 --- a/model_zoo/official/nlp/bert/run_pretrain.py +++ b/model_zoo/official/nlp/bert/run_pretrain.py @@ -24,7 +24,7 @@ import mindspore.communication.management as D import mindspore.common.dtype as mstype from mindspore import context from mindspore.train.model import Model -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor from mindspore.train.serialization import load_checkpoint, load_param_into_net diff --git a/model_zoo/official/nlp/bert/src/bert_for_finetune.py b/model_zoo/official/nlp/bert/src/bert_for_finetune.py index 1b147982f0..886cc15398 100644 --- a/model_zoo/official/nlp/bert/src/bert_for_finetune.py +++ b/model_zoo/official/nlp/bert/src/bert_for_finetune.py @@ -25,7 +25,7 @@ from mindspore.common.tensor import Tensor from mindspore.common.parameter import Parameter from mindspore.common import dtype as mstype from mindspore.nn.wrap.grad_reducer import DistributedGradReducer -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.communication.management import get_group_size from mindspore import context from .bert_for_pre_training import clip_grad diff --git a/model_zoo/official/nlp/bert/src/bert_for_pre_training.py b/model_zoo/official/nlp/bert/src/bert_for_pre_training.py index 84f442c22c..b57f93143a 100644 --- a/model_zoo/official/nlp/bert/src/bert_for_pre_training.py +++ b/model_zoo/official/nlp/bert/src/bert_for_pre_training.py @@ -24,7 +24,7 @@ from mindspore.common.tensor import Tensor from mindspore.common.parameter import Parameter from mindspore.common import dtype as mstype from mindspore.nn.wrap.grad_reducer import DistributedGradReducer -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.communication.management import get_group_size from mindspore import context from mindspore.ops import _selected_ops diff --git a/model_zoo/official/nlp/bert_thor/run_pretrain.py b/model_zoo/official/nlp/bert_thor/run_pretrain.py index 5c5e1c282f..222412e44e 100644 --- a/model_zoo/official/nlp/bert_thor/run_pretrain.py +++ b/model_zoo/official/nlp/bert_thor/run_pretrain.py @@ -35,7 +35,7 @@ from mindspore import log as logger from mindspore.nn.optim import Lamb, Momentum, AdamWeightDecay from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net _current_dir = os.path.dirname(os.path.realpath(__file__)) diff --git a/model_zoo/official/nlp/bert_thor/src/bert_for_pre_training.py b/model_zoo/official/nlp/bert_thor/src/bert_for_pre_training.py index 807d5a5d31..81271a6992 100644 --- a/model_zoo/official/nlp/bert_thor/src/bert_for_pre_training.py +++ b/model_zoo/official/nlp/bert_thor/src/bert_for_pre_training.py @@ -27,7 +27,7 @@ from mindspore.ops import _selected_ops from mindspore.ops import composite as C from mindspore.ops import functional as F from mindspore.ops import operations as P -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from .bert_model import BertModel from .config import cfg from .lr_generator import get_bert_damping diff --git a/model_zoo/official/nlp/bert_thor/src/grad_reducer_thor.py b/model_zoo/official/nlp/bert_thor/src/grad_reducer_thor.py index 47b86a4e65..cd0cc34819 100644 --- a/model_zoo/official/nlp/bert_thor/src/grad_reducer_thor.py +++ b/model_zoo/official/nlp/bert_thor/src/grad_reducer_thor.py @@ -102,7 +102,8 @@ class DistributedGradReducerThor(Cell): >>> from mindspore.ops import functional as F >>> from mindspore import context >>> from mindspore import nn - >>> from mindspore import ParallelMode, ParameterTuple + >>> from mindspore import ParameterTuple + >>> from mindspore.context import ParallelMode >>> >>> device_id = int(os.environ["DEVICE_ID"]) >>> context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=True, diff --git a/model_zoo/official/nlp/bert_thor/src/model_thor.py b/model_zoo/official/nlp/bert_thor/src/model_thor.py index 2ccb8b6ec6..01697f65a7 100644 --- a/model_zoo/official/nlp/bert_thor/src/model_thor.py +++ b/model_zoo/official/nlp/bert_thor/src/model_thor.py @@ -36,7 +36,7 @@ from mindspore.parallel._utils import _need_to_full from mindspore.train import amp from mindspore.parallel._utils import _to_full_tensor from mindspore.train.callback import _InternalCallbackParam, RunContext, _CallbackManager -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from .dataset_helper import DatasetHelper diff --git a/model_zoo/official/nlp/mass/src/transformer/transformer_for_train.py b/model_zoo/official/nlp/mass/src/transformer/transformer_for_train.py index 3cb1b3739a..fd5974e162 100644 --- a/model_zoo/official/nlp/mass/src/transformer/transformer_for_train.py +++ b/model_zoo/official/nlp/mass/src/transformer/transformer_for_train.py @@ -22,7 +22,7 @@ from mindspore.common.tensor import Tensor from mindspore.common.parameter import Parameter from mindspore.common import dtype as mstype from mindspore.nn.wrap.grad_reducer import DistributedGradReducer -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.parallel._utils import _get_device_num, _get_parallel_mode, _get_mirror_mean from .transformer import Transformer diff --git a/model_zoo/official/nlp/mass/train.py b/model_zoo/official/nlp/mass/train.py index 14ab3c1b82..0562d75b3d 100644 --- a/model_zoo/official/nlp/mass/train.py +++ b/model_zoo/official/nlp/mass/train.py @@ -26,7 +26,8 @@ from mindspore.nn.optim import Adam, Lamb from mindspore.train.model import Model from mindspore.train.loss_scale_manager import DynamicLossScaleManager, FixedLossScaleManager from mindspore.train.callback import CheckpointConfig, ModelCheckpoint -from mindspore import context, ParallelMode, Parameter +from mindspore import context, Parameter +from mindspore.context import ParallelMode from mindspore.communication import management as MultiAscend from mindspore.train.serialization import load_checkpoint diff --git a/model_zoo/official/nlp/tinybert/run_general_distill.py b/model_zoo/official/nlp/tinybert/run_general_distill.py index 8fdc86b8bc..3bc9dc3e85 100644 --- a/model_zoo/official/nlp/tinybert/run_general_distill.py +++ b/model_zoo/official/nlp/tinybert/run_general_distill.py @@ -24,7 +24,7 @@ import mindspore.common.dtype as mstype from mindspore import context from mindspore.train.model import Model from mindspore.train.callback import TimeMonitor -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.nn.optim import AdamWeightDecay from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell from mindspore import log as logger diff --git a/model_zoo/official/nlp/tinybert/src/tinybert_for_gd_td.py b/model_zoo/official/nlp/tinybert/src/tinybert_for_gd_td.py index f003ec26e7..4e595ec4e6 100644 --- a/model_zoo/official/nlp/tinybert/src/tinybert_for_gd_td.py +++ b/model_zoo/official/nlp/tinybert/src/tinybert_for_gd_td.py @@ -26,7 +26,7 @@ from mindspore.common import dtype as mstype from mindspore.common.parameter import Parameter from mindspore.communication.management import get_group_size from mindspore.nn.wrap.grad_reducer import DistributedGradReducer -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net from .tinybert_model import BertModel, TinyBertModel, BertModelCLS diff --git a/model_zoo/official/nlp/transformer/src/transformer_for_train.py b/model_zoo/official/nlp/transformer/src/transformer_for_train.py index 32d5ad7e20..f26396d1f1 100644 --- a/model_zoo/official/nlp/transformer/src/transformer_for_train.py +++ b/model_zoo/official/nlp/transformer/src/transformer_for_train.py @@ -22,7 +22,7 @@ from mindspore.common.tensor import Tensor from mindspore.common.parameter import Parameter, ParameterTuple from mindspore.common import dtype as mstype from mindspore.nn.wrap.grad_reducer import DistributedGradReducer -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.parallel._utils import _get_device_num, _get_parallel_mode, _get_mirror_mean from mindspore.communication.management import get_group_size from mindspore import context diff --git a/model_zoo/official/nlp/transformer/train.py b/model_zoo/official/nlp/transformer/train.py index 8b7dc43456..2ccc01a6d1 100644 --- a/model_zoo/official/nlp/transformer/train.py +++ b/model_zoo/official/nlp/transformer/train.py @@ -29,7 +29,7 @@ from mindspore.train.callback import Callback, TimeMonitor from mindspore.train.serialization import load_checkpoint, load_param_into_net import mindspore.dataset.engine as de import mindspore.communication.management as D -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore import context from src.transformer_for_train import TransformerTrainOneStepCell, TransformerNetworkWithLoss, \ diff --git a/model_zoo/official/recommend/deepfm/train.py b/model_zoo/official/recommend/deepfm/train.py index 95810c3a7e..3da4bc29ea 100644 --- a/model_zoo/official/recommend/deepfm/train.py +++ b/model_zoo/official/recommend/deepfm/train.py @@ -19,7 +19,8 @@ import argparse import random import numpy as np -from mindspore import context, ParallelMode +from mindspore import context +from mindspore.context import ParallelMode from mindspore.communication.management import init, get_rank, get_group_size from mindspore.train.model import Model from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor diff --git a/model_zoo/official/recommend/wide_and_deep/src/callbacks.py b/model_zoo/official/recommend/wide_and_deep/src/callbacks.py index 40dba54578..c48ac8a699 100644 --- a/model_zoo/official/recommend/wide_and_deep/src/callbacks.py +++ b/model_zoo/official/recommend/wide_and_deep/src/callbacks.py @@ -17,7 +17,7 @@ callbacks import time from mindspore.train.callback import Callback from mindspore import context -from mindspore.train import ParallelMode +from mindspore.context import ParallelMode from mindspore.communication.management import get_rank def add_write(file_path, out_str): diff --git a/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py b/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py index e873c15ad0..6a03d6feff 100644 --- a/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py +++ b/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py @@ -23,7 +23,7 @@ from mindspore.ops import operations as P from mindspore.nn import Dropout from mindspore.nn.optim import Adam, FTRL, LazyAdam from mindspore.common.initializer import Uniform, initializer -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.nn.wrap.grad_reducer import DistributedGradReducer from mindspore.communication.management import get_group_size diff --git a/model_zoo/official/recommend/wide_and_deep/train_and_eval_auto_parallel.py b/model_zoo/official/recommend/wide_and_deep/train_and_eval_auto_parallel.py index 5440873d2c..5fd6e1afad 100644 --- a/model_zoo/official/recommend/wide_and_deep/train_and_eval_auto_parallel.py +++ b/model_zoo/official/recommend/wide_and_deep/train_and_eval_auto_parallel.py @@ -20,7 +20,7 @@ import sys import mindspore.dataset.engine as de from mindspore import Model, context from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor -from mindspore.train import ParallelMode +from mindspore.context import ParallelMode from mindspore.communication.management import get_rank, get_group_size, init from mindspore.parallel import set_multi_subgraphs from mindspore.nn.wrap.cell_wrapper import VirtualDatasetCellTriple diff --git a/model_zoo/official/recommend/wide_and_deep/train_and_eval_distribute.py b/model_zoo/official/recommend/wide_and_deep/train_and_eval_distribute.py index 7e99aa72bb..9bd312a621 100644 --- a/model_zoo/official/recommend/wide_and_deep/train_and_eval_distribute.py +++ b/model_zoo/official/recommend/wide_and_deep/train_and_eval_distribute.py @@ -20,7 +20,7 @@ import sys import numpy as np from mindspore import Model, context from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor -from mindspore.train import ParallelMode +from mindspore.context import ParallelMode from mindspore.communication.management import get_rank, get_group_size, init from src.wide_and_deep import PredictWithSigmoid, TrainStepWrap, NetWithLossClass, WideDeepModel diff --git a/model_zoo/official/recommend/wide_and_deep/train_and_eval_parameter_server.py b/model_zoo/official/recommend/wide_and_deep/train_and_eval_parameter_server.py index 5f93e202da..f52e673b74 100644 --- a/model_zoo/official/recommend/wide_and_deep/train_and_eval_parameter_server.py +++ b/model_zoo/official/recommend/wide_and_deep/train_and_eval_parameter_server.py @@ -20,7 +20,7 @@ import sys import numpy as np from mindspore import Model, context from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor -from mindspore.train import ParallelMode +from mindspore.context import ParallelMode from mindspore.communication.management import get_rank, get_group_size, init from src.wide_and_deep import PredictWithSigmoid, TrainStepWrap, NetWithLossClass, WideDeepModel diff --git a/model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py b/model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py index c36a7ea66f..7d9b566a88 100644 --- a/model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py +++ b/model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py @@ -24,7 +24,7 @@ from mindspore.ops import operations as P from mindspore.nn import Dropout, Flatten from mindspore.nn.optim import Adam, FTRL from mindspore.common.initializer import Uniform, initializer -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.nn.wrap.grad_reducer import DistributedGradReducer diff --git a/model_zoo/official/recommend/wide_and_deep_multitable/train_and_eval_distribute.py b/model_zoo/official/recommend/wide_and_deep_multitable/train_and_eval_distribute.py index 756a227fa3..99fe089fe6 100644 --- a/model_zoo/official/recommend/wide_and_deep_multitable/train_and_eval_distribute.py +++ b/model_zoo/official/recommend/wide_and_deep_multitable/train_and_eval_distribute.py @@ -20,7 +20,7 @@ import numpy as np from mindspore import Model, context from mindspore.train.callback import ModelCheckpoint, CheckpointConfig from mindspore.train.callback import TimeMonitor -from mindspore.train import ParallelMode +from mindspore.context import ParallelMode from mindspore.communication.management import get_rank, get_group_size, init from src.wide_and_deep import PredictWithSigmoid, TrainStepWrap, NetWithLossClass, WideDeepModel diff --git a/tests/st/auto_parallel/resnet50_expand_loss.py b/tests/st/auto_parallel/resnet50_expand_loss.py index f46bb587fb..fc6d00ff61 100644 --- a/tests/st/auto_parallel/resnet50_expand_loss.py +++ b/tests/st/auto_parallel/resnet50_expand_loss.py @@ -28,7 +28,8 @@ from mindspore.nn.optim.momentum import Momentum from mindspore.ops import operations as P from mindspore.parallel import set_algo_parameters from mindspore.train.callback import Callback -from mindspore.train.model import Model, ParallelMode +from mindspore.train.model import Model +from mindspore.context import ParallelMode context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(device_id=int(os.getenv('DEVICE_ID'))) diff --git a/tests/st/mem_reuse/resnet_cifar_memreuse.py b/tests/st/mem_reuse/resnet_cifar_memreuse.py index 3a63dbbbc2..fc4cf42b51 100644 --- a/tests/st/mem_reuse/resnet_cifar_memreuse.py +++ b/tests/st/mem_reuse/resnet_cifar_memreuse.py @@ -30,7 +30,8 @@ from mindspore.nn.optim.momentum import Momentum from mindspore.ops import functional as F from mindspore.ops import operations as P from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor -from mindspore.train.model import Model, ParallelMode +from mindspore.train.model import Model +from mindspore.context import ParallelMode random.seed(1) np.random.seed(1) diff --git a/tests/st/mem_reuse/resnet_cifar_normal.py b/tests/st/mem_reuse/resnet_cifar_normal.py index d96d016e66..83241220cd 100644 --- a/tests/st/mem_reuse/resnet_cifar_normal.py +++ b/tests/st/mem_reuse/resnet_cifar_normal.py @@ -30,7 +30,8 @@ from mindspore.nn.optim.momentum import Momentum from mindspore.ops import functional as F from mindspore.ops import operations as P from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor -from mindspore.train.model import Model, ParallelMode +from mindspore.train.model import Model +from mindspore.context import ParallelMode random.seed(1) np.random.seed(1) diff --git a/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/train_and_test_multinpu_ci.py b/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/train_and_test_multinpu_ci.py index 073c23423e..22cb8bb40e 100644 --- a/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/train_and_test_multinpu_ci.py +++ b/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/train_and_test_multinpu_ci.py @@ -19,7 +19,7 @@ import os import sys from mindspore import Model, context from mindspore.train.callback import TimeMonitor -from mindspore.train import ParallelMode +from mindspore.context import ParallelMode from mindspore.communication.management import get_rank, get_group_size, init from mindspore.parallel import set_multi_subgraphs from mindspore.nn.wrap.cell_wrapper import VirtualDatasetCellTriple diff --git a/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/wide_and_deep.py b/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/wide_and_deep.py index c31b3b5b0e..6944b46b2d 100644 --- a/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/wide_and_deep.py +++ b/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/wide_and_deep.py @@ -25,7 +25,7 @@ from mindspore.nn.optim import Adam, FTRL from mindspore.common.initializer import Uniform, initializer # from mindspore.train.callback import ModelCheckpoint, CheckpointConfig from mindspore.parallel._utils import _get_device_num, _get_parallel_mode, _get_mirror_mean -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.nn.wrap.grad_reducer import DistributedGradReducer from mindspore.communication.management import get_group_size import numpy as np diff --git a/tests/st/model_zoo_tests/wide_and_deep/train_and_test_multinpu_ci_data_parallel.py b/tests/st/model_zoo_tests/wide_and_deep/train_and_test_multinpu_ci_data_parallel.py index 0f909b0236..eaae9dfff3 100644 --- a/tests/st/model_zoo_tests/wide_and_deep/train_and_test_multinpu_ci_data_parallel.py +++ b/tests/st/model_zoo_tests/wide_and_deep/train_and_test_multinpu_ci_data_parallel.py @@ -20,7 +20,7 @@ import sys import numpy as np from mindspore import Model, context from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor -from mindspore.train import ParallelMode +from mindspore.context import ParallelMode from mindspore.communication.management import get_rank, get_group_size, init from src.wide_and_deep import PredictWithSigmoid, TrainStepWrap, NetWithLossClass, WideDeepModel diff --git a/tests/st/model_zoo_tests/yolov3/src/yolov3.py b/tests/st/model_zoo_tests/yolov3/src/yolov3.py index c33ed1a0d3..2fe4c8f07c 100644 --- a/tests/st/model_zoo_tests/yolov3/src/yolov3.py +++ b/tests/st/model_zoo_tests/yolov3/src/yolov3.py @@ -19,6 +19,7 @@ import numpy as np import mindspore as ms import mindspore.nn as nn from mindspore import context, Tensor +from mindspore.context import ParallelMode from mindspore.parallel._auto_parallel_context import auto_parallel_context from mindspore.communication.management import get_group_size from mindspore.common.initializer import TruncatedNormal @@ -652,7 +653,7 @@ class TrainingWrapper(nn.Cell): self.reducer_flag = False self.grad_reducer = None self.parallel_mode = context.get_auto_parallel_context("parallel_mode") - if self.parallel_mode in [ms.ParallelMode.DATA_PARALLEL, ms.ParallelMode.HYBRID_PARALLEL]: + if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]: self.reducer_flag = True if self.reducer_flag: mean = context.get_auto_parallel_context("mirror_mean") diff --git a/tests/st/networks/models/bert/src/bert_for_pre_training.py b/tests/st/networks/models/bert/src/bert_for_pre_training.py index 2577cf617a..0a0675a805 100644 --- a/tests/st/networks/models/bert/src/bert_for_pre_training.py +++ b/tests/st/networks/models/bert/src/bert_for_pre_training.py @@ -24,7 +24,7 @@ from mindspore.common.tensor import Tensor from mindspore.common.parameter import Parameter, ParameterTuple from mindspore.common import dtype as mstype from mindspore.nn.wrap.grad_reducer import DistributedGradReducer -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.communication.management import get_group_size from mindspore import context from .bert_model import BertModel diff --git a/tests/st/networks/models/bert/src/utils.py b/tests/st/networks/models/bert/src/utils.py index 2b19d3d291..bcea50dc3b 100644 --- a/tests/st/networks/models/bert/src/utils.py +++ b/tests/st/networks/models/bert/src/utils.py @@ -26,7 +26,7 @@ from mindspore.common.tensor import Tensor from mindspore.common.parameter import Parameter, ParameterTuple from mindspore.common import dtype as mstype from mindspore.nn.wrap.grad_reducer import DistributedGradReducer -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from mindspore.communication.management import get_group_size from mindspore import context from mindspore.model_zoo.Bert_NEZHA.bert_model import BertModel diff --git a/tests/st/networks/models/resnet50/src_thor/dataset_helper.py b/tests/st/networks/models/resnet50/src_thor/dataset_helper.py index 4e900fd38a..448589b6f7 100644 --- a/tests/st/networks/models/resnet50/src_thor/dataset_helper.py +++ b/tests/st/networks/models/resnet50/src_thor/dataset_helper.py @@ -16,7 +16,7 @@ from mindspore._checkparam import check_bool from mindspore.parallel._utils import _get_device_num, _get_parallel_mode, _to_full_shapes from mindspore.train._utils import _exec_datagraph, _get_types_and_shapes -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode def _send_data(dataset): """Engine dataset to write data to tdt queue.""" diff --git a/tests/st/networks/models/resnet50/src_thor/grad_reducer_thor.py b/tests/st/networks/models/resnet50/src_thor/grad_reducer_thor.py index e84c941249..97d2cb2270 100644 --- a/tests/st/networks/models/resnet50/src_thor/grad_reducer_thor.py +++ b/tests/st/networks/models/resnet50/src_thor/grad_reducer_thor.py @@ -103,7 +103,8 @@ class DistributedGradReducerThor(Cell): >>> from mindspore.ops import functional as F >>> from mindspore import context >>> from mindspore import nn - >>> from mindspore import ParallelMode, ParameterTuple + >>> from mindspore import ParameterTuple + >>> from mindspore.context import ParallelMode >>> >>> device_id = int(os.environ["DEVICE_ID"]) >>> context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=True, diff --git a/tests/st/networks/models/resnet50/src_thor/model_thor.py b/tests/st/networks/models/resnet50/src_thor/model_thor.py index 2399c4a01b..8144b1cd80 100644 --- a/tests/st/networks/models/resnet50/src_thor/model_thor.py +++ b/tests/st/networks/models/resnet50/src_thor/model_thor.py @@ -30,7 +30,7 @@ from mindspore.parallel._utils import _get_parallel_mode, _get_device_num, _get_ _get_parameter_broadcast, _device_number_check, _parameter_broadcast_check from mindspore.train import amp from mindspore.train.callback import _InternalCallbackParam, RunContext, _CallbackManager -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from .dataset_helper import DatasetHelper diff --git a/tests/st/networks/models/resnet50/test_resnet50_imagenet.py b/tests/st/networks/models/resnet50/test_resnet50_imagenet.py index 220b986208..42b209f25e 100644 --- a/tests/st/networks/models/resnet50/test_resnet50_imagenet.py +++ b/tests/st/networks/models/resnet50/test_resnet50_imagenet.py @@ -24,7 +24,8 @@ import numpy as np from mindspore import context, Tensor from mindspore.communication.management import init from mindspore.parallel._auto_parallel_context import auto_parallel_context -from mindspore.train.model import Model, ParallelMode +from mindspore.train.model import Model +from mindspore.context import ParallelMode from mindspore.train.callback import Callback from mindspore.train.loss_scale_manager import FixedLossScaleManager import mindspore.nn as nn diff --git a/tests/st/tbe_networks/resnet_cifar.py b/tests/st/tbe_networks/resnet_cifar.py index c6b1ee0a78..c40b4809be 100644 --- a/tests/st/tbe_networks/resnet_cifar.py +++ b/tests/st/tbe_networks/resnet_cifar.py @@ -32,7 +32,8 @@ from mindspore.communication.management import init from mindspore.nn.optim.momentum import Momentum from mindspore.ops import operations as P from mindspore.parallel._auto_parallel_context import auto_parallel_context -from mindspore.train.model import Model, ParallelMode +from mindspore.train.model import Model +from mindspore.context import ParallelMode random.seed(1) np.random.seed(1) diff --git a/tests/st/tbe_networks/test_resnet_cifar_8p.py b/tests/st/tbe_networks/test_resnet_cifar_8p.py index 56d6a91d64..7eefdcc7a9 100644 --- a/tests/st/tbe_networks/test_resnet_cifar_8p.py +++ b/tests/st/tbe_networks/test_resnet_cifar_8p.py @@ -32,7 +32,8 @@ from mindspore.nn.optim.momentum import Momentum from mindspore.ops import operations as P from mindspore.parallel._auto_parallel_context import auto_parallel_context from mindspore.train.callback import Callback -from mindspore.train.model import Model, ParallelMode +from mindspore.train.model import Model +from mindspore.context import ParallelMode random.seed(1) np.random.seed(1) diff --git a/tests/ut/python/communication/test_data_parallel_dense.py b/tests/ut/python/communication/test_data_parallel_dense.py index ef042e3f39..d2fdf2d158 100644 --- a/tests/ut/python/communication/test_data_parallel_dense.py +++ b/tests/ut/python/communication/test_data_parallel_dense.py @@ -25,7 +25,7 @@ from mindspore.common.api import _executor from mindspore.nn import Momentum from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.ops import operations as P -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode class DenseMMNet(nn.Cell): diff --git a/tests/ut/python/communication/test_data_parallel_lenet.py b/tests/ut/python/communication/test_data_parallel_lenet.py index 7a5062b941..0897023a69 100755 --- a/tests/ut/python/communication/test_data_parallel_lenet.py +++ b/tests/ut/python/communication/test_data_parallel_lenet.py @@ -21,7 +21,8 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn -from mindspore import Tensor, Model, ParallelMode +from mindspore import Tensor, Model +from mindspore.context import ParallelMode from mindspore.nn.optim import Momentum from mindspore.ops import operations as P diff --git a/tests/ut/python/communication/test_data_parallel_resnet.py b/tests/ut/python/communication/test_data_parallel_resnet.py index fa798c0616..ad60c3c957 100644 --- a/tests/ut/python/communication/test_data_parallel_resnet.py +++ b/tests/ut/python/communication/test_data_parallel_resnet.py @@ -19,7 +19,8 @@ import numpy as np import mindspore.context as context import mindspore.nn as nn -from mindspore import Tensor, Model, ParallelMode +from mindspore import Tensor, Model +from mindspore.context import ParallelMode from mindspore.nn.optim import Momentum from mindspore.ops.operations import TensorAdd from ....dataset_mock import MindData diff --git a/tests/ut/python/model/test_mix_precision.py b/tests/ut/python/model/test_mix_precision.py index 4570bd243a..9faf7341b6 100644 --- a/tests/ut/python/model/test_mix_precision.py +++ b/tests/ut/python/model/test_mix_precision.py @@ -26,7 +26,7 @@ from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.ops import functional as F -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from tests.ops_common import convert from ....train_step_wrap import train_step_with_loss_warp diff --git a/tests/ut/python/parallel/test_allreduce_fusion.py b/tests/ut/python/parallel/test_allreduce_fusion.py index c93df7ffb1..d1d9c74e5d 100644 --- a/tests/ut/python/parallel/test_allreduce_fusion.py +++ b/tests/ut/python/parallel/test_allreduce_fusion.py @@ -22,7 +22,8 @@ from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.nn.optim.momentum import Momentum from mindspore.parallel import _cost_model_context as cost_model_context from mindspore.parallel._auto_parallel_context import auto_parallel_context -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from tests.dataset_mock import MindData diff --git a/tests/ut/python/parallel/test_alltoall.py b/tests/ut/python/parallel/test_alltoall.py index 96ff843504..53a77a0bc6 100644 --- a/tests/ut/python/parallel/test_alltoall.py +++ b/tests/ut/python/parallel/test_alltoall.py @@ -24,7 +24,8 @@ from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.nn.optim.momentum import Momentum from mindspore.ops import operations as P from mindspore.parallel._utils import _reset_op_id -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from tests.dataset_mock import MindData diff --git a/tests/ut/python/parallel/test_auto_parallel_onehot.py b/tests/ut/python/parallel/test_auto_parallel_onehot.py index 302de23a50..32ee1c99ed 100644 --- a/tests/ut/python/parallel/test_auto_parallel_onehot.py +++ b/tests/ut/python/parallel/test_auto_parallel_onehot.py @@ -23,7 +23,8 @@ from mindspore.common.parameter import Parameter from mindspore.nn.optim.momentum import Momentum from mindspore.ops import composite as C from mindspore.ops import operations as P -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from tests.dataset_mock import MindData from tests.ut.python.ops.test_math_ops import VirtualLoss diff --git a/tests/ut/python/parallel/test_auto_parallel_resnet.py b/tests/ut/python/parallel/test_auto_parallel_resnet.py index 5a117bbb1a..9d4d2f2755 100644 --- a/tests/ut/python/parallel/test_auto_parallel_resnet.py +++ b/tests/ut/python/parallel/test_auto_parallel_resnet.py @@ -29,7 +29,8 @@ from mindspore.ops import operations as P from mindspore.parallel import _cost_model_context as cost_model_context from mindspore.parallel import set_algo_parameters from mindspore.parallel._utils import _reset_op_id as resset_op_id -from mindspore.train.model import Model, ParallelMode +from mindspore.train.model import Model +from mindspore.context import ParallelMode context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") context.set_context(device_id=0) diff --git a/tests/ut/python/parallel/test_batchnorm_batch_parallel.py b/tests/ut/python/parallel/test_batchnorm_batch_parallel.py index 21d5003b4c..236cc263df 100644 --- a/tests/ut/python/parallel/test_batchnorm_batch_parallel.py +++ b/tests/ut/python/parallel/test_batchnorm_batch_parallel.py @@ -26,7 +26,8 @@ from mindspore.nn.layer.pooling import MaxPool2d from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.nn.optim.momentum import Momentum from mindspore.ops import operations as P -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from tests.dataset_mock import MindData dev_num = 8 diff --git a/tests/ut/python/parallel/test_bn_prelu_cell.py b/tests/ut/python/parallel/test_bn_prelu_cell.py index 07f5d3906b..0694c331b1 100644 --- a/tests/ut/python/parallel/test_bn_prelu_cell.py +++ b/tests/ut/python/parallel/test_bn_prelu_cell.py @@ -27,7 +27,7 @@ from mindspore.nn.optim.momentum import Momentum from mindspore.ops import functional as F from mindspore.ops import operations as P from mindspore.train.model import Model -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode from tests.dataset_mock import MindData diff --git a/tests/ut/python/parallel/test_dataset_interface.py b/tests/ut/python/parallel/test_dataset_interface.py index 0e70b2513c..02153cadf1 100644 --- a/tests/ut/python/parallel/test_dataset_interface.py +++ b/tests/ut/python/parallel/test_dataset_interface.py @@ -22,7 +22,8 @@ from mindspore.common.parameter import Parameter, ParameterTuple from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.nn.optim.momentum import Momentum from mindspore.ops import composite as C, functional as F, operations as P -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from mindspore.train.loss_scale_manager import DynamicLossScaleManager from tests.dataset_mock import MindData diff --git a/tests/ut/python/parallel/test_full_batch.py b/tests/ut/python/parallel/test_full_batch.py index 70a68a5b00..ddb0b057e2 100644 --- a/tests/ut/python/parallel/test_full_batch.py +++ b/tests/ut/python/parallel/test_full_batch.py @@ -23,7 +23,8 @@ from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.nn.optim.momentum import Momentum from mindspore.ops import operations as P from mindspore.parallel._utils import _reset_op_id -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from tests.dataset_mock import MindData class Dataset(MindData): diff --git a/tests/ut/python/parallel/test_gather_v2_primitive.py b/tests/ut/python/parallel/test_gather_v2_primitive.py index 4c0534aad0..8ad626e184 100644 --- a/tests/ut/python/parallel/test_gather_v2_primitive.py +++ b/tests/ut/python/parallel/test_gather_v2_primitive.py @@ -27,7 +27,8 @@ from mindspore.nn.optim import Momentum from mindspore.ops import composite as C from mindspore.ops import functional as F from mindspore.ops import operations as P -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode context.set_context(mode=context.GRAPH_MODE) device_number = 32 diff --git a/tests/ut/python/parallel/test_loss_scale.py b/tests/ut/python/parallel/test_loss_scale.py index 3469400d2a..88997160f8 100644 --- a/tests/ut/python/parallel/test_loss_scale.py +++ b/tests/ut/python/parallel/test_loss_scale.py @@ -25,7 +25,8 @@ from mindspore.ops import functional as F from mindspore.nn.optim.momentum import Momentum from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell import mindspore.nn as nn -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from tests.dataset_mock import MindData diff --git a/tests/ut/python/parallel/test_one_dev.py b/tests/ut/python/parallel/test_one_dev.py index 056f4a15c7..ec0990761d 100644 --- a/tests/ut/python/parallel/test_one_dev.py +++ b/tests/ut/python/parallel/test_one_dev.py @@ -25,7 +25,8 @@ from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.nn.optim.momentum import Momentum from mindspore.ops import operations as P from mindspore.parallel._utils import _reset_op_id -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from tests.dataset_mock import MindData context.set_context(mode=context.GRAPH_MODE) diff --git a/tests/ut/python/parallel/test_one_hot_net.py b/tests/ut/python/parallel/test_one_hot_net.py index 33c8fcbc82..32f0d2a85c 100644 --- a/tests/ut/python/parallel/test_one_hot_net.py +++ b/tests/ut/python/parallel/test_one_hot_net.py @@ -25,7 +25,8 @@ from mindspore.nn.optim.momentum import Momentum from mindspore.ops import composite as C from mindspore.ops import functional as F from mindspore.ops import operations as P -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from tests.dataset_mock import MindData from tests.ut.python.ops.test_math_ops import VirtualLoss diff --git a/tests/ut/python/parallel/test_operator_model_parallel.py b/tests/ut/python/parallel/test_operator_model_parallel.py index 788521c525..96e3c86f51 100644 --- a/tests/ut/python/parallel/test_operator_model_parallel.py +++ b/tests/ut/python/parallel/test_operator_model_parallel.py @@ -29,7 +29,8 @@ from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.nn.optim.momentum import Momentum from mindspore.ops import operations as P from mindspore.ops.operations import TensorAdd -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from tests.dataset_mock import MindData dev_num = 8 diff --git a/tests/ut/python/parallel/test_optimizer.py b/tests/ut/python/parallel/test_optimizer.py index e848f4ed9b..0d6de9526a 100644 --- a/tests/ut/python/parallel/test_optimizer.py +++ b/tests/ut/python/parallel/test_optimizer.py @@ -23,7 +23,7 @@ from mindspore.nn import Dense from mindspore.nn import Momentum from mindspore.nn import TrainOneStepCell, WithLossCell from mindspore.ops import operations as P -from mindspore.train.parallel_utils import ParallelMode +from mindspore.context import ParallelMode class Net(nn.Cell): diff --git a/tests/ut/python/parallel/test_prelu_cell.py b/tests/ut/python/parallel/test_prelu_cell.py index dca467ef8d..074e585f53 100644 --- a/tests/ut/python/parallel/test_prelu_cell.py +++ b/tests/ut/python/parallel/test_prelu_cell.py @@ -24,7 +24,8 @@ from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.nn.optim.momentum import Momentum from mindspore.ops import functional as F from mindspore.ops import operations as P -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from tests.dataset_mock import MindData context.set_context(mode=context.GRAPH_MODE) diff --git a/tests/ut/python/parallel/test_reshape.py b/tests/ut/python/parallel/test_reshape.py index 9cfb376e1b..d2e08230be 100644 --- a/tests/ut/python/parallel/test_reshape.py +++ b/tests/ut/python/parallel/test_reshape.py @@ -28,7 +28,8 @@ from mindspore.ops import functional as F from mindspore.ops import operations as P from mindspore.ops.operations.comm_ops import _VirtualDataset from mindspore.parallel import set_algo_parameters -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from tests.dataset_mock import MindData from tests.ut.python.ops.test_math_ops import VirtualLoss diff --git a/tests/ut/python/parallel/test_transpose.py b/tests/ut/python/parallel/test_transpose.py index b0b917bf19..2669a57433 100644 --- a/tests/ut/python/parallel/test_transpose.py +++ b/tests/ut/python/parallel/test_transpose.py @@ -21,7 +21,8 @@ from mindspore.common.parameter import Parameter from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.nn.optim.momentum import Momentum from mindspore.ops import operations as P -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from tests.dataset_mock import MindData diff --git a/tests/ut/python/train/test_amp.py b/tests/ut/python/train/test_amp.py index 6406a4b80d..0cba6bfa08 100644 --- a/tests/ut/python/train/test_amp.py +++ b/tests/ut/python/train/test_amp.py @@ -20,7 +20,8 @@ import mindspore.context as context from mindspore import Tensor from mindspore import amp from mindspore import nn -from mindspore.train import Model, ParallelMode +from mindspore.train import Model +from mindspore.context import ParallelMode from mindspore.common import dtype as mstype from ....dataset_mock import MindData from mindspore.parallel._auto_parallel_context import auto_parallel_context