!33239 Adjust the import specification of initializer, context and train

Merge pull request !33239 from 冯一航/adjust_import_spec_replenish
This commit is contained in:
i-robot 2022-04-27 00:47:22 +00:00 committed by Gitee
commit 2b13573044
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
35 changed files with 214 additions and 170 deletions

View File

@ -26,6 +26,9 @@ from .parameter import Parameter, ParameterTuple
from .seed import set_seed, get_seed
from .tensor import Tensor, RowTensor, SparseTensor, COOTensor, CSRTensor
from .variable import Variable
from .initializer import Initializer, TruncatedNormal, Normal, \
Uniform, HeUniform, HeNormal, XavierUniform, One, Zero, Constant, Identity, \
Sparse, Dirac, Orthogonal, VarianceScaling
# symbols from dtype
__all__ = [
@ -50,7 +53,14 @@ __all__ = [
"complex64", "complex128",
# __method__ from dtype
"dtype_to_nptype", "issubclass_", "dtype_to_pytype",
"pytype_to_dtype", "get_py_obj_dtype"
"pytype_to_dtype", "get_py_obj_dtype", 'Initializer',
'TruncatedNormal', 'Normal',
'Uniform', 'HeUniform',
'HeNormal', 'XavierUniform',
'One', 'Zero',
'Constant', 'Identity',
'Sparse', 'Dirac',
'Orthogonal', 'VarianceScaling'
]
__all__.extend([

View File

@ -62,10 +62,10 @@ def set_dump(target, enabled=True):
>>> import numpy as np
>>>
>>> import mindspore.nn as nn
>>> import mindspore.context as context
>>> from mindspore import set_context, GRAPH_MODE
>>> from mindspore import Tensor, set_dump
>>>
>>> context.set_context(device_target="Ascend", mode=context.GRAPH_MODE)
>>> set_context(device_target="Ascend", mode=GRAPH_MODE)
>>>
>>> class MyNet(nn.Cell):
... def __init__(self):

View File

@ -63,9 +63,9 @@ class HookHandle:
>>> import mindspore
>>> import mindspore.nn as nn
>>> from mindspore import Tensor
>>> from mindspore import context
>>> from mindspore import set_context, PYNATIVE_MODE
>>> from mindspore.ops import GradOperation
>>> context.set_context(mode=context.PYNATIVE_MODE)
>>> set_context(mode=PYNATIVE_MODE)
>>> def forward_pre_hook_fn(cell_id, inputs):
... print("forward inputs: ", inputs)
...

View File

@ -94,7 +94,8 @@ class Zero(Initializer):
Examples:
>>> import mindspore
>>> from mindspore.common.initializer import initializer, Zero
>>> from mindspore import Zero
>>> from mindspore.common.initializer import initializer
>>> tensor1 = initializer(Zero(), [1, 2, 3], mindspore.float32)
>>> tensor2 = initializer('zeros', [1, 2, 3], mindspore.float32)
"""
@ -109,7 +110,8 @@ class One(Initializer):
Examples:
>>> import mindspore
>>> from mindspore.common.initializer import initializer, One
>>> from mindspore import One
>>> from mindspore.common.initializer import initializer
>>> tensor1 = initializer(One(), [1, 2, 3], mindspore.float32)
>>> tensor2 = initializer('ones', [1, 2, 3], mindspore.float32)
"""
@ -247,7 +249,8 @@ class XavierUniform(Initializer):
Examples:
>>> import mindspore
>>> from mindspore.common.initializer import initializer, XavierUniform
>>> from mindspore import XavierUniform
>>> from mindspore.common.initializer import initializer
>>> tensor1 = initializer(XavierUniform(), [1, 2, 3], mindspore.float32)
>>> tensor2 = initializer('xavier_uniform', [1, 2, 3], mindspore.float32)
"""
@ -291,7 +294,8 @@ class HeUniform(Initializer):
Examples:
>>> import mindspore
>>> from mindspore.common.initializer import initializer, HeUniform
>>> from mindspore import HeUniform
>>> from mindspore.common.initializer import initializer
>>> tensor1 = initializer(HeUniform(), [1, 2, 3], mindspore.float32)
>>> tensor2 = initializer('he_uniform', [1, 2, 3], mindspore.float32)
"""
@ -337,7 +341,8 @@ class HeNormal(Initializer):
Examples:
>>> import mindspore
>>> from mindspore.common.initializer import initializer, HeNormal
>>> from mindspore import HeNormal
>>> from mindspore.common.initializer import initializer
>>> tensor1 = initializer(HeNormal(), [1, 2, 3], mindspore.float32)
>>> tensor2 = initializer('he_normal', [1, 2, 3], mindspore.float32)
"""
@ -388,7 +393,8 @@ class Identity(Initializer):
Examples:
>>> import mindspore
>>> from mindspore.common.initializer import initializer, Identity
>>> from mindspore import Identity
>>> from mindspore.common.initializer import initializer
>>> tensor1 = initializer(Identity(), [2, 3], mindspore.float32)
>>> tensor2 = initializer('identity', [2, 3], mindspore.float32)
"""
@ -415,7 +421,8 @@ class Sparse(Initializer):
Examples:
>>> import mindspore
>>> from mindspore.common.initializer import initializer, Sparse
>>> from mindspore import Sparse
>>> from mindspore.common.initializer import initializer
>>> tensor1 = initializer(Sparse(sparsity=0.1, sigma=0.01), [5, 8], mindspore.float32)
"""
def __init__(self, sparsity, sigma=0.01):
@ -452,7 +459,8 @@ class Dirac(Initializer):
Examples:
>>> import mindspore
>>> from mindspore.common.initializer import initializer, Dirac
>>> from mindspore import Dirac
>>> from mindspore.common.initializer import initializer
>>> tensor1 = initializer(Dirac(groups=2), [6, 4, 3, 3], mindspore.float32)
>>> tensor2 = initializer("dirac", [6, 4, 3, 3], mindspore.float32)
"""
@ -503,7 +511,8 @@ class Orthogonal(Initializer):
Examples:
>>> import mindspore
>>> from mindspore.common.initializer import initializer, Orthogonal
>>> from mindspore import Orthogonal
>>> from mindspore.common.initializer import initializer
>>> tensor1 = initializer(Orthogonal(gain=2.), [2, 3, 4], mindspore.float32)
>>> tensor2 = initializer('orthogonal', [2, 3, 4], mindspore.float32)
"""
@ -558,7 +567,8 @@ class VarianceScaling(Initializer):
Examples:
>>> import mindspore
>>> from mindspore.common.initializer import initializer, VarianceScaling
>>> from mindspore import VarianceScaling
>>> from mindspore.common.initializer import initializer
>>> tensor1 = initializer(VarianceScaling(scale=1.0, mode='fan_out',
... distribution='untruncated_normal'), [2, 3], mindspore.float32)
>>> tensor2 = initializer('varianceScaling', [2, 3], mindspore.float32)
@ -615,7 +625,8 @@ class Uniform(Initializer):
Examples:
>>> import mindspore
>>> from mindspore.common.initializer import initializer, Uniform
>>> from mindspore import Uniform
>>> from mindspore.common.initializer import initializer
>>> tensor1 = initializer(Uniform(), [1, 2, 3], mindspore.float32)
>>> tensor2 = initializer('uniform', [1, 2, 3], mindspore.float32)
"""
@ -643,7 +654,8 @@ class Normal(Initializer):
Examples:
>>> import mindspore
>>> from mindspore.common.initializer import initializer, Normal
>>> from mindspore import Normal
>>> from mindspore.common.initializer import initializer
>>> tensor1 = initializer(Normal(), [1, 2, 3], mindspore.float32)
>>> tensor2 = initializer('normal', [1, 2, 3], mindspore.float32)
"""
@ -671,7 +683,8 @@ class TruncatedNormal(Initializer):
Examples:
>>> import mindspore
>>> from mindspore.common.initializer import initializer, TruncatedNormal
>>> from mindspore import TruncatedNormal
>>> from mindspore.common.initializer import initializer
>>> tensor1 = initializer(TruncatedNormal(), [1, 2, 3], mindspore.float32)
>>> tensor2 = initializer('truncatedNormal', [1, 2, 3], mindspore.float32)
"""
@ -715,7 +728,8 @@ def initializer(init, shape=None, dtype=mstype.float32):
>>> import numpy as np
>>> import mindspore
>>> from mindspore import Tensor
>>> from mindspore.common.initializer import initializer, One
>>> from mindspore import One
>>> from mindspore.common.initializer import initializer
>>> data = Tensor(np.zeros([1, 2, 3]), mindspore.float32)
>>> tensor1 = initializer(data, [1, 2, 3], mindspore.float32)
>>> tensor2 = initializer('ones', [1, 2, 3], mindspore.float32)

View File

@ -63,7 +63,7 @@ class Tensor(Tensor_):
>>> import numpy as np
>>> import mindspore as ms
>>> from mindspore import Tensor
>>> from mindspore.common.initializer import One
>>> from mindspore import One
>>> # initialize a tensor with numpy.ndarray
>>> t1 = Tensor(np.zeros([1, 2, 3]), ms.float32)
>>> print(t1)
@ -1555,8 +1555,9 @@ class Tensor(Tensor_):
Examples:
>>> import mindspore as ms
>>> import mindspore.common.initializer as init
>>> x = init.initializer(init.Constant(1), [2, 2], ms.float32)
>>> from mindspore import Constant
>>> from mindspore.common.initializer import initializer
>>> x = initializer(Constant(1), [2, 2], ms.float32)
>>> out = x.init_data()
>>> print(out)
[[1. 1.]
@ -1630,8 +1631,9 @@ class Tensor(Tensor_):
Examples:
>>> import mindspore as ms
>>> import mindspore.common.initializer as init
>>> x = init.initializer(init.Constant(1), [2, 2], ms.float32)
>>> from mindspore import Constant
>>> from mindspore.common.initializer import initializer
>>> x = initializer(Constant(1), [2, 2], ms.float32)
>>> out = x.to_tensor()
>>> print(out)
[[1. 1.]

View File

@ -544,26 +544,26 @@ def set_auto_parallel_context(**kwargs):
ValueError: If input key is not attribute in auto parallel context.
Examples:
>>> from mindspore import context
>>> context.set_auto_parallel_context(device_num=8)
>>> context.set_auto_parallel_context(global_rank=0)
>>> context.set_auto_parallel_context(gradients_mean=True)
>>> context.set_auto_parallel_context(gradient_fp32_sync=False)
>>> context.set_auto_parallel_context(parallel_mode="auto_parallel")
>>> context.set_auto_parallel_context(search_mode="dynamic_programming")
>>> context.set_auto_parallel_context(auto_parallel_search_mode="dynamic_programming")
>>> context.set_auto_parallel_context(parameter_broadcast=False)
>>> context.set_auto_parallel_context(strategy_ckpt_load_file="./strategy_stage1.ckpt")
>>> context.set_auto_parallel_context(strategy_ckpt_save_file="./strategy_stage1.ckpt")
>>> context.set_auto_parallel_context(dataset_strategy=((1, 8), (1, 8)))
>>> context.set_auto_parallel_context(enable_parallel_optimizer=False)
>>> context.set_auto_parallel_context(enable_alltoall=False)
>>> context.set_auto_parallel_context(all_reduce_fusion_config=[8, 160])
>>> context.set_auto_parallel_context(pipeline_stages=2)
>>> from mindspore import set_auto_parallel_context
>>> set_auto_parallel_context(device_num=8)
>>> set_auto_parallel_context(global_rank=0)
>>> set_auto_parallel_context(gradients_mean=True)
>>> set_auto_parallel_context(gradient_fp32_sync=False)
>>> set_auto_parallel_context(parallel_mode="auto_parallel")
>>> set_auto_parallel_context(search_mode="dynamic_programming")
>>> set_auto_parallel_context(auto_parallel_search_mode="dynamic_programming")
>>> set_auto_parallel_context(parameter_broadcast=False)
>>> set_auto_parallel_context(strategy_ckpt_load_file="./strategy_stage1.ckpt")
>>> set_auto_parallel_context(strategy_ckpt_save_file="./strategy_stage1.ckpt")
>>> set_auto_parallel_context(dataset_strategy=((1, 8), (1, 8)))
>>> set_auto_parallel_context(enable_parallel_optimizer=False)
>>> set_auto_parallel_context(enable_alltoall=False)
>>> set_auto_parallel_context(all_reduce_fusion_config=[8, 160])
>>> set_auto_parallel_context(pipeline_stages=2)
>>> parallel_config = {"gradient_accumulation_shard": True, "parallel_optimizer_threshold": 24}
>>> context.set_auto_parallel_context(parallel_optimizer_config=parallel_config, enable_parallel_optimizer=True)
>>> set_auto_parallel_context(parallel_optimizer_config=parallel_config, enable_parallel_optimizer=True)
>>> config = {"allreduce": {"mode": "size", "config": 32}, "allgather": {"mode": "size", "config": 32}}
>>> context.set_auto_parallel_context(comm_fusion=config)
>>> set_auto_parallel_context(comm_fusion=config)
"""
_set_auto_parallel_context(**kwargs)
@ -582,9 +582,9 @@ def get_auto_parallel_context(attr_key):
ValueError: If input key is not attribute in auto parallel context.
Examples:
>>> from mindspore import context
>>> parallel_mode = context.get_auto_parallel_context("parallel_mode")
>>> dataset_strategy = context.get_auto_parallel_context("dataset_strategy")
>>> from mindspore import get_auto_parallel_context
>>> parallel_mode = get_auto_parallel_context("parallel_mode")
>>> dataset_strategy = get_auto_parallel_context("dataset_strategy")
"""
return _get_auto_parallel_context(attr_key)
@ -864,32 +864,32 @@ def set_context(**kwargs):
ValueError: If input key is not an attribute in context.
Examples:
>>> from mindspore import context
>>> context.set_context(mode=context.PYNATIVE_MODE)
>>> context.set_context(precompile_only=True)
>>> context.set_context(device_target="Ascend")
>>> context.set_context(device_id=0)
>>> context.set_context(save_graphs=True, save_graphs_path="./model.ms")
>>> context.set_context(enable_reduce_precision=True)
>>> context.set_context(enable_dump=True, save_dump_path=".")
>>> context.set_context(enable_graph_kernel=True)
>>> context.set_context(graph_kernel_flags="--opt_level=2 --dump_as_text")
>>> context.set_context(reserve_class_name_in_scope=True)
>>> context.set_context(variable_memory_max_size="6GB")
>>> context.set_context(enable_profiling=True,
>>> from mindspore import set_context, PYNATIVE_MODE
>>> set_context(mode=PYNATIVE_MODE)
>>> set_context(precompile_only=True)
>>> set_context(device_target="Ascend")
>>> set_context(device_id=0)
>>> set_context(save_graphs=True, save_graphs_path="./model.ms")
>>> set_context(enable_reduce_precision=True)
>>> set_context(enable_dump=True, save_dump_path=".")
>>> set_context(enable_graph_kernel=True)
>>> set_context(graph_kernel_flags="--opt_level=2 --dump_as_text")
>>> set_context(reserve_class_name_in_scope=True)
>>> set_context(variable_memory_max_size="6GB")
>>> set_context(enable_profiling=True,
... profiling_options='{"output":"/home/data/output","training_trace":"on"}')
>>> context.set_context(check_bprop=True)
>>> context.set_context(max_device_memory="3.5GB")
>>> context.set_context(mempool_block_size="1GB")
>>> context.set_context(print_file_path="print.pb")
>>> context.set_context(enable_sparse=True)
>>> context.set_context(max_call_depth=80)
>>> context.set_context(env_config_path="./env_config.json")
>>> context.set_context(auto_tune_mode="GA,RL")
>>> context.set_context(grad_for_scalar=True)
>>> context.set_context(enable_compile_cache=True, compile_cache_path="./cache.ms")
>>> context.set_context(pynative_synchronize=True)
>>> context.set_context(runtime_num_threads=10)
>>> set_context(check_bprop=True)
>>> set_context(max_device_memory="3.5GB")
>>> set_context(mempool_block_size="1GB")
>>> set_context(print_file_path="print.pb")
>>> set_context(enable_sparse=True)
>>> set_context(max_call_depth=80)
>>> set_context(env_config_path="./env_config.json")
>>> set_context(auto_tune_mode="GA,RL")
>>> set_context(grad_for_scalar=True)
>>> set_context(enable_compile_cache=True, compile_cache_path="./cache.ms")
>>> set_context(pynative_synchronize=True)
>>> set_context(runtime_num_threads=10)
"""
ctx = _context()
# set device target first
@ -936,9 +936,9 @@ def get_context(attr_key):
Raises:
ValueError: If input key is not an attribute in context.
Examples:
>>> from mindspore import context
>>> context.get_context("device_target")
>>> context.get_context("device_id")
>>> from mindspore import get_context
>>> get_context("device_target")
>>> get_context("device_id")
"""
ctx = _context()
device = ctx.get_param(ms_ctx_param.device_target)
@ -1027,8 +1027,8 @@ def set_ps_context(**kwargs):
ValueError: If input key is not the attribute in parameter server training mode context.
Examples:
>>> from mindspore import context
>>> context.set_ps_context(enable_ps=True, enable_ssl=True, client_password='123456', server_password='123456')
>>> from mindspore import set_ps_context
>>> set_ps_context(enable_ps=True, enable_ssl=True, client_password='123456', server_password='123456')
"""
_set_ps_context(**kwargs)
@ -1049,8 +1049,8 @@ def get_ps_context(attr_key):
ValueError: If input key is not attribute in auto parallel context.
Examples:
>>> from mindspore import context
>>> context.get_ps_context("enable_ps")
>>> from mindspore import get_ps_context
>>> get_ps_context("enable_ps")
"""
return _get_ps_context(attr_key)
@ -1144,7 +1144,8 @@ def set_fl_context(**kwargs):
ValueError: If input key is not the attribute in federated learning mode context.
Examples:
>>> context.set_fl_context(enable_fl=True, server_mode='FEDERATED_LEARNING')
>>> from mindspore import set_fl_context
>>> set_fl_context(enable_fl=True, server_mode='FEDERATED_LEARNING')
"""
_set_ps_context(**kwargs)
@ -1164,6 +1165,7 @@ def get_fl_context(attr_key):
ValueError: If input key is not attribute in federated learning mode context.
Examples:
>>> context.get_fl_context("server_mode")
>>> from mindspore import get_fl_context
>>> get_fl_context("server_mode")
"""
return _get_ps_context(attr_key)

View File

@ -154,12 +154,12 @@ class WaitedDSCallback(Callback, DSCallback):
Examples:
>>> import mindspore.nn as nn
>>> from mindspore.dataset import WaitedDSCallback
>>> from mindspore import context
>>> from mindspore import set_context, GRAPH_MODE
>>> from mindspore import Model
>>> from mindspore.train.callback import Callback
>>> from mindspore import Callback
>>> import mindspore.dataset as ds
>>>
>>> context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
>>> set_context(mode=GRAPH_MODE, device_target="CPU")
>>>
>>> # custom callback class for data synchronization in data pipeline
>>> class MyWaitedCallback(WaitedDSCallback):

View File

@ -279,7 +279,7 @@ def sync_wait_for_dataset(rank_id, rank_size, current_epoch):
>>> # Create a synchronization callback
>>>
>>> from mindspore.dataset import sync_wait_for_dataset
>>> from mindspore.train.callback import Callback
>>> from mindspore import Callback
>>>
>>> class SyncForDataset(Callback):
... def __init__(self):

View File

@ -890,9 +890,9 @@ class Cell(Cell_):
Examples:
>>> import numpy as np
>>> import mindspore
>>> from mindspore import nn, Tensor, context
>>> from mindspore import nn, Tensor, set_context, GRAPH_MODE
>>>
>>> context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
>>> set_context(mode=GRAPH_MODE, device_target="Ascend")
>>> class reluNet(nn.Cell):
... def __init__(self):
... super(reluNet, self).__init__()
@ -1709,9 +1709,9 @@ class Cell(Cell_):
>>> import mindspore
>>> import mindspore.nn as nn
>>> from mindspore import Tensor
>>> from mindspore import context
>>> from mindspore import set_context, PYNATIVE_MODE
>>> from mindspore.ops import GradOperation
>>> context.set_context(mode=context.PYNATIVE_MODE)
>>> set_context(mode=PYNATIVE_MODE)
>>> def forward_pre_hook_fn(cell_id, inputs):
... print("forward inputs: ", inputs)
...
@ -1810,9 +1810,9 @@ class Cell(Cell_):
>>> import mindspore
>>> import mindspore.nn as nn
>>> from mindspore import Tensor
>>> from mindspore import context
>>> from mindspore import set_context, PYNATIVE_MODE
>>> from mindspore.ops import GradOperation
>>> context.set_context(mode=context.PYNATIVE_MODE)
>>> set_context(mode=PYNATIVE_MODE)
>>> def forward_hook_fn(cell_id, inputs, output):
... print("forward inputs: ", inputs)
... print("forward output: ", output)
@ -1916,9 +1916,9 @@ class Cell(Cell_):
>>> import mindspore
>>> import mindspore.nn as nn
>>> from mindspore import Tensor
>>> from mindspore import context
>>> from mindspore import set_context, PYNATIVE_MODE
>>> from mindspore.ops import GradOperation
>>> context.set_context(mode=context.PYNATIVE_MODE)
>>> set_context(mode=PYNATIVE_MODE)
>>> def backward_hook_fn(cell_id, grad_input, grad_output):
... print("backward input: ", grad_input)
... print("backward output: ", grad_output)

View File

@ -712,16 +712,16 @@ class SyncBatchNorm(_BatchNorm):
>>> # Variables, Calling the Collective Communication Library, Running the Script.
>>> import numpy as np
>>> from mindspore.communication import init
>>> from mindspore import context
>>> from mindspore import set_context, GRAPH_MODE, reset_auto_parallel_context, set_auto_parallel_context
>>> from mindspore import ParallelMode
>>> from mindspore import Tensor
>>> from mindspore import nn
>>> from mindspore import dtype as mstype
>>>
>>> context.set_context(mode=context.GRAPH_MODE)
>>> set_context(mode=GRAPH_MODE)
>>> init()
>>> context.reset_auto_parallel_context()
>>> context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL)
>>> reset_auto_parallel_context()
>>> set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL)
>>> sync_bn_op = nn.SyncBatchNorm(num_features=3, process_groups=[[0, 1], [2, 3]])
>>> x = Tensor(np.ones([1, 3, 2, 2]), mstype.float32)
>>> output = sync_bn_op(x)

View File

@ -337,8 +337,8 @@ def thor(net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0
>>> from mindspore.nn import thor
>>> from mindspore import Model
>>> from mindspore import FixedLossScaleManager
>>> from mindspore.train.callback import LossMonitor
>>> from mindspore.train.train_thor import ConvertModelUtils
>>> from mindspore import LossMonitor
>>> from mindspore import ConvertModelUtils
>>> from mindspore import nn
>>> from mindspore import Tensor
>>>

View File

@ -255,7 +255,7 @@ For DNN researchers who are unfamiliar with Bayesian models, MDP provides high-l
1. Define a Deep Neural Network. The LeNet is used in this example.
```python
from mindspore.common.initializer import TruncatedNormal
from mindspore import TruncatedNormal
import mindspore.nn as nn
import mindspore.ops.operations as P

View File

@ -44,8 +44,8 @@ class SparseToDense(Cell):
>>> import mindspore as ms
>>> from mindspore import Tensor, COOTensor
>>> import mindspore.nn as nn
>>> import mindspore.context as context
>>> context.set_context(mode=context.PYNATIVE_MODE)
>>> from mindspore import set_context, PYNATIVE_MODE
>>> set_context(mode=PYNATIVE_MODE)
>>> indices = Tensor([[0, 1], [1, 2]])
>>> values = Tensor([1, 2], dtype=ms.int32)
>>> dense_shape = (3, 4)

View File

@ -312,15 +312,15 @@ class DistributedGradReducer(Cell):
>>> import numpy as np
>>> from mindspore.communication import init
>>> from mindspore import ops
>>> from mindspore import context
>>> from mindspore import set_context, reset_auto_parallel_context, set_auto_parallel_context, GRAPH_MODE
>>> from mindspore import ParallelMode
>>> from mindspore import Parameter, Tensor
>>> from mindspore import nn
>>>
>>> context.set_context(mode=context.GRAPH_MODE)
>>> set_context(mode=GRAPH_MODE)
>>> init()
>>> context.reset_auto_parallel_context()
>>> context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL)
>>> reset_auto_parallel_context()
>>> set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL)
>>>
>>> class TrainingWrapper(nn.Cell):
... def __init__(self, network, optimizer, sens=1.0):

View File

@ -973,8 +973,8 @@ def unique(x, return_inverse=False):
Examples:
>>> import mindspore.numpy as np
>>> from mindspore import context
>>> context.set_context(mode=context.GRAPH_MODE)
>>> from mindspore import set_context, GRAPH_MODE
>>> set_context(mode=GRAPH_MODE)
>>> input_x = np.asarray([1, 2, 2, 2, 3, 4, 5]).astype('int32')
>>> output_x = np.unique(input_x)
>>> print(output_x)

View File

@ -192,10 +192,10 @@ def grad(fn, grad_position=0, sens_param=False):
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> import mindspore.context as context
>>> from mindspore import set_context, GRAPH_MODE
>>> from mindspore import Tensor
>>> from mindspore.ops.functional import grad
>>> context.set_context(mode=context.GRAPH_MODE)
>>> set_context(mode=GRAPH_MODE)
>>> class Net(nn.Cell):
... def construct(self, x, y, z):
... return x*y*z
@ -282,11 +282,11 @@ def jet(fn, primals, series):
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> import mindspore.context as context
>>> from mindspore import set_context, GRAPH_MODE
>>> import mindspore.ops as P
>>> from mindspore import Tensor
>>> from mindspore.ops.functional import jet
>>> context.set_context(mode=context.GRAPH_MODE)
>>> set_context(mode=GRAPH_MODE)
>>> class Net(nn.Cell):
... def __init__(self):
... super().__init__()
@ -358,11 +358,11 @@ def derivative(fn, primals, order):
Examples:
>>> import numpy as np
>>> import mindspore.nn as nn
>>> import mindspore.context as context
>>> from mindspore import set_context, GRAPH_MODE
>>> import mindspore.ops as P
>>> from mindspore import Tensor
>>> from mindspore.ops.functional import derivative
>>> context.set_context(mode=context.GRAPH_MODE)
>>> set_context(mode=GRAPH_MODE)
>>> class Net(nn.Cell):
... def __init__(self):
... super().__init__()

View File

@ -662,6 +662,10 @@ class GpuConvertToDynamicShape(PrimitiveWithCheck):
Examples:
>>> # make a model, since dynamic shape operators must be in GRAPH_MODE
>>> import mindspore.nn as nn
>>> from mindspore import set_context, GRAPH_MODE
>>> from mindspore.ops.operations import _inner_ops as inner
>>> from mindspore.ops import operations as P
>>> class TestDynamicShapeReshapeNet(nn.Cell):
>>> def __init__(self):
>>> super(TestDynamicShapeReshapeNet, self).__init__()
@ -673,7 +677,7 @@ class GpuConvertToDynamicShape(PrimitiveWithCheck):
>>> dynamic_shape_input = self.convert_to_dynamic_shape(input)
>>> reshaped_input = self.reshape(input, new_shape)
>>>
>>> context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
>>> set_context(mode=GRAPH_MODE, device_target="GPU")
>>> input = Tensor(np.array([0, 1, 2, 3])
>>> new_shape = (2, 2)
>>> net = TestDynamicShapeReshapeNet()
@ -706,6 +710,10 @@ class ErrorOnDynamicShapeInput(PrimitiveWithInfer):
Examples:
>>> # make a model, since dynamic shape operators must be in GRAPH_MODE
>>> import mindspore.nn as nn
>>> from mindspore.ops.operations import _inner_ops as inner
>>> from mindspore.ops import operations as P
>>> from mindspore import set_context, GRAPH_MODE
>>> class AssertDynamicShapeNet(nn.Cell):
>>> def __init__(self):
>>> super(AssertDynamicShapeNet, self).__init__()
@ -716,7 +724,7 @@ class ErrorOnDynamicShapeInput(PrimitiveWithInfer):
>>> dynamic_shape_input = self.convert_to_dynamic_shape(input)
>>> self.error_on_dynamic_shape_input(dynamic_shape_input)
>>>
>>> context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
>>> set_context(mode=GRAPH_MODE, device_target="GPU")
>>> input = Tensor(np.array([0])
>>> net = TestDynamicShapeReshapeNet()
>>> output = net(input, new_shape)
@ -1774,10 +1782,10 @@ class CellBackwardHook(PrimitiveWithInfer):
Examples:
>>> import mindspore
>>> from mindspore import Tensor
>>> from mindspore import context
>>> from mindspore import set_context, PYNATIVE_MODE
>>> from mindspore.ops import GradOperation
>>> from mindspore.ops.operations import _inner_ops as inner
>>> context.set_context(mode=context.PYNATIVE_MODE)
>>> set_context(mode=PYNATIVE_MODE)
>>> def hook_fn(grad):
... print(grad)
...

View File

@ -6168,7 +6168,6 @@ class EditDistance(PrimitiveWithInfer):
Examples:
>>> import numpy as np
>>> from mindspore import context
>>> from mindspore import Tensor
>>> import mindspore.nn as nn
>>> import mindspore.ops as ops

View File

@ -209,9 +209,9 @@ class AllGather(PrimitiveWithInfer):
>>> import mindspore.ops as ops
>>> import mindspore.nn as nn
>>> from mindspore.communication import init
>>> from mindspore import Tensor, context
>>> from mindspore import Tensor, set_context, GRAPH_MODE
>>>
>>> context.set_context(mode=context.GRAPH_MODE)
>>> set_context(mode=GRAPH_MODE)
>>> init()
>>> class Net(nn.Cell):
... def __init__(self):
@ -405,14 +405,14 @@ class ReduceScatter(PrimitiveWithInfer):
Examples:
>>> # This example should be run with two devices. Refer to the tutorial > Distributed Training on mindspore.cn
>>> from mindspore import Tensor, context
>>> from mindspore import Tensor, set_context, GRAPH_MODE
>>> from mindspore.communication import init
>>> from mindspore.ops import ReduceOp
>>> import mindspore.nn as nn
>>> import mindspore.ops as ops
>>> import numpy as np
>>>
>>> context.set_context(mode=context.GRAPH_MODE)
>>> set_context(mode=GRAPH_MODE)
>>> init()
>>> class Net(nn.Cell):
... def __init__(self):
@ -549,13 +549,13 @@ class Broadcast(PrimitiveWithInfer):
>>> # on mindspore.cn and focus on the contents of these three parts: Configuring Distributed Environment
>>> # Variables, Calling the Collective Communication Library, Running The Script.
>>> from mindspore import Tensor
>>> from mindspore import context
>>> from mindspore import set_context, GRAPH_MODE
>>> from mindspore.communication import init
>>> import mindspore.nn as nn
>>> import mindspore.ops as ops
>>> import numpy as np
>>>
>>> context.set_context(mode=context.GRAPH_MODE)
>>> set_context(mode=GRAPH_MODE)
>>> init()
>>> class Net(nn.Cell):
... def __init__(self):
@ -682,7 +682,7 @@ class NeighborExchange(Primitive):
>>> import os
>>> import mindspore as ms
>>> from mindspore import Tensor
>>> from mindspore import context
>>> from mindspore import set_context, GRAPH_MODE
>>> from mindspore.communication import init
>>> import mindspore.nn as nn
>>> import mindspore.ops as ops
@ -698,7 +698,7 @@ class NeighborExchange(Primitive):
... def construct(self, x):
... out = self.neighborexchange((x,))
...
>>> context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
>>> set_context(mode=GRAPH_MODE, device_target='Ascend')
>>> init()
>>> net = Net()
>>> input_x = Tensor(np.ones([3, 3]), dtype = ms.float32)
@ -759,7 +759,7 @@ class AlltoAll(PrimitiveWithInfer):
>>> import os
>>> import mindspore as ms
>>> from mindspore import Tensor
>>> from mindspore import context
>>> from mindspore import set_context, GRAPH_MODE
>>> from mindspore.communication import init
>>> import mindspore.nn as nn
>>> import mindspore.ops as ops
@ -773,7 +773,7 @@ class AlltoAll(PrimitiveWithInfer):
... out = self.alltoall(x)
... return out
...
>>> context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
>>> set_context(mode=GRAPH_MODE, device_target='Ascend')
>>> init()
>>> net = Net()
>>> rank_id = int(os.getenv("RANK_ID"))
@ -853,7 +853,7 @@ class NeighborExchangeV2(Primitive):
>>> import os
>>> import mindspore as ms
>>> from mindspore import Tensor
>>> from mindspore import context
>>> from mindspore import set_context, GRAPH_MODE
>>> from mindspore.communication import init
>>> import mindspore.nn as nn
>>> import mindspore.ops as ops
@ -871,7 +871,7 @@ class NeighborExchangeV2(Primitive):
... out = self.neighborexchangev2(x)
... return out
...
>>> context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
>>> set_context(mode=GRAPH_MODE, device_target='Ascend')
>>> init()
>>> input_x = Tensor(np.ones([1, 1, 2, 2]), dtype = ms.float32)
>>> net = Net()

View File

@ -358,9 +358,9 @@ class HookBackward(PrimitiveWithInfer):
>>> import mindspore
>>> from mindspore import ops
>>> from mindspore import Tensor
>>> from mindspore import context
>>> from mindspore import set_context, PYNATIVE_MODE
>>> from mindspore.ops import GradOperation
>>> context.set_context(mode=context.PYNATIVE_MODE)
>>> set_context(mode=PYNATIVE_MODE)
>>> def hook_fn(grad):
... print(grad)
...

View File

@ -455,7 +455,7 @@ class FusedCastAdamWeightDecay(PrimitiveWithInfer):
Examples:
>>> import numpy as np
>>> import mindspore.context as context
>>> from mindspore import set_context, GRAPH_MODE
>>> import mindspore.nn as nn
>>> import mindspore.ops as ops
>>> from mindspore import Tensor, Parameter
@ -470,7 +470,7 @@ class FusedCastAdamWeightDecay(PrimitiveWithInfer):
... def construct(self, lr, beta1, beta2, epsilon, decay, grad):
... out = self.opt(self.var, self.m, self.v, lr, beta1, beta2, epsilon, decay, grad)
... return out
>>> context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
>>> set_context(mode=GRAPH_MODE, device_target="CPU")
>>> net = Net()
>>> gradient = Tensor(np.ones([2, 2]), mstype.float16)
>>> output = net(0.001, 0.9, 0.999, 1e-8, 0.0, gradient)
@ -584,7 +584,7 @@ class FusedAdaFactor(PrimitiveWithInfer):
Examples:
>>> import numpy as np
>>> import mindspore.context as context
>>> from mindspore import set_context, GRAPH_MODE
>>> import mindspore.nn as nn
>>> import mindspore.ops as ops
>>> from mindspore import Tensor, Parameter
@ -604,7 +604,7 @@ class FusedAdaFactor(PrimitiveWithInfer):
... out = self.opt(epsilon, clip_threshold, beta1, beta2, weight_decay, lr, grad, self.param,
... self.exp_avg, self.exp_avg_sq_row, self.exp_avg_sq_col, self.exp_avg_sq)
... return out
>>> context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
>>> set_context(mode=GRAPH_MODE, device_target="CPU")
>>> net = Net()
>>> gradient = Tensor(np.ones(param_shape), mstype.float32)
>>> net((1e-30, 1e-3), 1.0, 0.9, 0.8, 1e-2, 0.03, gradient)

View File

@ -197,7 +197,8 @@ def _set_ps_context(**kwargs):
ValueError: If input key is not the attribute in parameter server training mode context.
Examples:
>>> context.set_ps_context(enable_ps=True, enable_ssl=True, client_password='123456', server_password='123456')
>>> from mindspore import set_ps_context
>>> set_ps_context(enable_ps=True, enable_ssl=True, client_password='123456', server_password='123456')
"""
kwargs = _check_conflict_value(kwargs)
for key, value in kwargs.items():

View File

@ -78,8 +78,8 @@ class Profiler:
Examples:
>>> import numpy as np
>>> from mindspore import nn, context
>>> from mindspore import Model
>>> from mindspore import nn
>>> from mindspore import Model, set_context, GRAPH_MODE
>>> import mindspore.dataset as ds
>>> from mindspore import Profiler
>>>
@ -104,7 +104,7 @@ class Profiler:
>>>
>>> if __name__ == '__main__':
... # If the device_target is GPU, set the device_target to "GPU"
... context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
... set_context(mode=GRAPH_MODE, device_target="Ascend")
...
... # Init Profiler
... # Note that the Profiler should be initialized before model.train

View File

@ -25,8 +25,16 @@ from .loss_scale_manager import LossScaleManager, FixedLossScaleManager, Dynamic
from .serialization import save_checkpoint, load_checkpoint, load_param_into_net, export, load, parse_print,\
build_searched_strategy, merge_sliced_parameter, load_distributed_checkpoint, async_ckpt_thread_status,\
restore_group_info_list
from .callback import Callback, LossMonitor, TimeMonitor, ModelCheckpoint, SummaryCollector, CheckpointConfig, \
RunContext, LearningRateScheduler, SummaryLandscape, FederatedLearningManager, History, LambdaCallback
from .summary import SummaryRecord
from .train_thor import ConvertNetUtils, ConvertModelUtils
__all__ = ["Model", "DatasetHelper", "amp", "connect_network_with_dataset", "build_train_network", "LossScaleManager",
"FixedLossScaleManager", "DynamicLossScaleManager", "save_checkpoint", "load_checkpoint",
"load_param_into_net", "export", "load", "parse_print", "build_searched_strategy", "merge_sliced_parameter",
"load_distributed_checkpoint", "async_ckpt_thread_status", "restore_group_info_list"]
__all__.extend(callback.__all__)
__all__.extend(summary.__all__)
__all__.extend(train_thor.__all__)

View File

@ -87,7 +87,7 @@ class Callback:
Examples:
>>> import numpy as np
>>> from mindspore import Model, nn
>>> from mindspore.train.callback import Callback
>>> from mindspore import Callback
>>> from mindspore import dataset as ds
>>> class Print_info(Callback):
... def step_end(self, run_context):

View File

@ -104,8 +104,8 @@ class CheckpointConfig:
Examples:
>>> from mindspore import Model, nn
>>> from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
>>> from mindspore.common.initializer import Normal
>>> from mindspore import ModelCheckpoint, CheckpointConfig
>>> from mindspore import Normal
>>>
>>> class LeNet5(nn.Cell):
... def __init__(self, num_class=10, num_channel=1):

View File

@ -34,7 +34,7 @@ class History(Callback):
Examples:
>>> import numpy as np
>>> import mindspore.dataset as ds
>>> from mindspore.train.callback import History
>>> from mindspore import History
>>> from mindspore import Model, nn
>>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))}
>>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32)

View File

@ -39,7 +39,7 @@ class LambdaCallback(Callback):
Examples:
>>> import numpy as np
>>> import mindspore.dataset as ds
>>> from mindspore.train.callback import LambdaCallback
>>> from mindspore import LambdaCallback
>>> from mindspore import Model, nn
>>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))}
>>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32)

View File

@ -178,14 +178,14 @@ class SummaryLandscape:
Examples:
>>> import mindspore.nn as nn
>>> from mindspore import context
>>> from mindspore.train.callback import SummaryCollector, SummaryLandscape
>>> from mindspore import set_context, GRAPH_MODE
>>> from mindspore import SummaryCollector, SummaryLandscape
>>> from mindspore import Model
>>> from mindspore.nn import Loss, Accuracy
>>>
>>> if __name__ == '__main__':
... # If the device_target is Ascend, set the device_target to "Ascend"
... context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
... set_context(mode=GRAPH_MODE, device_target="GPU")
... mnist_dataset_dir = '/path/to/mnist_dataset_directory'
... # The detail of create_dataset method shown in model_zoo.official.cv.lenet.src.dataset.py
... ds_train = create_dataset(mnist_dataset_dir, 32)

View File

@ -34,7 +34,7 @@ class LearningRateScheduler(Callback):
Examples:
>>> import numpy as np
>>> from mindspore import Model
>>> from mindspore.train.callback import LearningRateScheduler
>>> from mindspore import LearningRateScheduler
>>> import mindspore.nn as nn
>>> from mindspore import dataset as ds
...

View File

@ -175,14 +175,14 @@ class SummaryCollector(Callback):
Examples:
>>> import mindspore.nn as nn
>>> from mindspore import context
>>> from mindspore.train.callback import SummaryCollector
>>> from mindspore import set_context, GRAPH_MODE
>>> from mindspore import SummaryCollector
>>> from mindspore import Model
>>> from mindspore.nn import Accuracy
>>>
>>> if __name__ == '__main__':
... # If the device_target is GPU, set the device_target to "GPU"
... context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
... set_context(mode=GRAPH_MODE, device_target="Ascend")
... mnist_dataset_dir = '/path/to/mnist_dataset_directory'
... # The detail of create_dataset method shown in model_zoo.official.cv.lenet.src.dataset.py
... ds_train = create_dataset(mnist_dataset_dir, 32)

View File

@ -1182,13 +1182,13 @@ class Model:
>>> # mindspore.cn.
>>> import numpy as np
>>> import mindspore as ms
>>> from mindspore import Model, context, Tensor, nn, FixedLossScaleManager
>>> from mindspore import ParallelMode
>>> from mindspore import Model, set_context, Tensor, nn, FixedLossScaleManager, GRAPH_MODE
>>> from mindspore import ParallelMode, set_auto_parallel_context
>>> from mindspore.communication import init
>>>
>>> context.set_context(mode=context.GRAPH_MODE)
>>> set_context(mode=GRAPH_MODE)
>>> init()
>>> context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL)
>>> set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL)
>>>
>>> # For details about how to build the dataset, please refer to the tutorial
>>> # document on the official website.
@ -1238,13 +1238,13 @@ class Model:
>>> # mindspore.cn.
>>> import numpy as np
>>> import mindspore as ms
>>> from mindspore import Model, context, Tensor
>>> from mindspore import ParallelMode
>>> from mindspore import Model, set_context, Tensor, GRAPH_MODE
>>> from mindspore import ParallelMode, set_auto_parallel_context
>>> from mindspore.communication import init
>>>
>>> context.set_context(mode=context.GRAPH_MODE)
>>> set_context(mode=GRAPH_MODE)
>>> init()
>>> context.set_auto_parallel_context(full_batch=True, parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL)
>>> set_auto_parallel_context(full_batch=True, parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL)
>>> input_data = Tensor(np.random.randint(0, 255, [1, 1, 32, 32]), ms.float32)
>>> model = Model(Net())
>>> predict_map = model.infer_predict_layout(input_data)

View File

@ -1190,8 +1190,8 @@ def parse_print(print_file_name):
>>> import numpy as np
>>> import mindspore.ops as ops
>>> from mindspore import nn
>>> from mindspore import Tensor, context
>>> context.set_context(mode=context.GRAPH_MODE, print_file_path='log.data')
>>> from mindspore import Tensor, set_context, GRAPH_MODE
>>> set_context(mode=GRAPH_MODE, print_file_path='log.data')
>>> class PrintInputTensor(nn.Cell):
... def __init__(self):
... super().__init__()

View File

@ -136,7 +136,7 @@ class SummaryRecord:
ValueError: The Summary is not supported, please without `-s on` and recompile source.
Examples:
>>> from mindspore.train.summary import SummaryRecord
>>> from mindspore import SummaryRecord
>>> if __name__ == '__main__':
... # use in with statement to auto close
... with SummaryRecord(log_dir="./summary_dir") as summary_record:
@ -209,7 +209,7 @@ class SummaryRecord:
ValueError: `mode` is not in the optional value.
Examples:
>>> from mindspore.train.summary import SummaryRecord
>>> from mindspore import SummaryRecord
>>> if __name__ == '__main__':
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
... summary_record.set_mode('eval')
@ -270,7 +270,7 @@ class SummaryRecord:
Examples:
>>> from mindspore import Tensor
>>> from mindspore.train.summary import SummaryRecord
>>> from mindspore import SummaryRecord
>>> if __name__ == '__main__':
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
... summary_record.add_value('scalar', 'loss', Tensor(0.1))
@ -325,7 +325,7 @@ class SummaryRecord:
<https://www.mindspore.cn/docs/en/master/api_python/nn/mindspore.nn.Cell.html#mindspore-nn-cell>`_ 。
Examples:
>>> from mindspore.train.summary import SummaryRecord
>>> from mindspore import SummaryRecord
>>> if __name__ == '__main__':
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
... result = summary_record.record(step=2)
@ -438,7 +438,7 @@ class SummaryRecord:
str, the full path of log file.
Examples:
>>> from mindspore.train.summary import SummaryRecord
>>> from mindspore import SummaryRecord
>>> if __name__ == '__main__':
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
... log_dir = summary_record.log_dir
@ -452,7 +452,7 @@ class SummaryRecord:
Call it to make sure that all pending events have been written to disk.
Examples:
>>> from mindspore.train.summary import SummaryRecord
>>> from mindspore import SummaryRecord
>>> if __name__ == '__main__':
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
... summary_record.flush()
@ -468,7 +468,7 @@ class SummaryRecord:
Flush the buffer and write files to disk and close summary records. Please use the statement to autoclose.
Examples:
>>> from mindspore.train.summary import SummaryRecord
>>> from mindspore import SummaryRecord
>>> if __name__ == '__main__':
... try:
... summary_record = SummaryRecord(log_dir="./summary_dir")

View File

@ -217,8 +217,8 @@ class ConvertModelUtils:
>>> from mindspore.nn import thor
>>> from mindspore import Model
>>> from mindspore import FixedLossScaleManager
>>> from mindspore.train.callback import LossMonitor
>>> from mindspore.train.train_thor import ConvertModelUtils
>>> from mindspore import LossMonitor
>>> from mindspore import ConvertModelUtils
>>>
>>> net = Net()
>>> dataset = create_dataset()