forked from mindspore-Ecosystem/mindspore
!33239 Adjust the import specification of initializer, context and train
Merge pull request !33239 from 冯一航/adjust_import_spec_replenish
This commit is contained in:
commit
2b13573044
|
@ -26,6 +26,9 @@ from .parameter import Parameter, ParameterTuple
|
||||||
from .seed import set_seed, get_seed
|
from .seed import set_seed, get_seed
|
||||||
from .tensor import Tensor, RowTensor, SparseTensor, COOTensor, CSRTensor
|
from .tensor import Tensor, RowTensor, SparseTensor, COOTensor, CSRTensor
|
||||||
from .variable import Variable
|
from .variable import Variable
|
||||||
|
from .initializer import Initializer, TruncatedNormal, Normal, \
|
||||||
|
Uniform, HeUniform, HeNormal, XavierUniform, One, Zero, Constant, Identity, \
|
||||||
|
Sparse, Dirac, Orthogonal, VarianceScaling
|
||||||
|
|
||||||
# symbols from dtype
|
# symbols from dtype
|
||||||
__all__ = [
|
__all__ = [
|
||||||
|
@ -50,7 +53,14 @@ __all__ = [
|
||||||
"complex64", "complex128",
|
"complex64", "complex128",
|
||||||
# __method__ from dtype
|
# __method__ from dtype
|
||||||
"dtype_to_nptype", "issubclass_", "dtype_to_pytype",
|
"dtype_to_nptype", "issubclass_", "dtype_to_pytype",
|
||||||
"pytype_to_dtype", "get_py_obj_dtype"
|
"pytype_to_dtype", "get_py_obj_dtype", 'Initializer',
|
||||||
|
'TruncatedNormal', 'Normal',
|
||||||
|
'Uniform', 'HeUniform',
|
||||||
|
'HeNormal', 'XavierUniform',
|
||||||
|
'One', 'Zero',
|
||||||
|
'Constant', 'Identity',
|
||||||
|
'Sparse', 'Dirac',
|
||||||
|
'Orthogonal', 'VarianceScaling'
|
||||||
]
|
]
|
||||||
|
|
||||||
__all__.extend([
|
__all__.extend([
|
||||||
|
|
|
@ -62,10 +62,10 @@ def set_dump(target, enabled=True):
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>>
|
>>>
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> import mindspore.context as context
|
>>> from mindspore import set_context, GRAPH_MODE
|
||||||
>>> from mindspore import Tensor, set_dump
|
>>> from mindspore import Tensor, set_dump
|
||||||
>>>
|
>>>
|
||||||
>>> context.set_context(device_target="Ascend", mode=context.GRAPH_MODE)
|
>>> set_context(device_target="Ascend", mode=GRAPH_MODE)
|
||||||
>>>
|
>>>
|
||||||
>>> class MyNet(nn.Cell):
|
>>> class MyNet(nn.Cell):
|
||||||
... def __init__(self):
|
... def __init__(self):
|
||||||
|
|
|
@ -63,9 +63,9 @@ class HookHandle:
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, PYNATIVE_MODE
|
||||||
>>> from mindspore.ops import GradOperation
|
>>> from mindspore.ops import GradOperation
|
||||||
>>> context.set_context(mode=context.PYNATIVE_MODE)
|
>>> set_context(mode=PYNATIVE_MODE)
|
||||||
>>> def forward_pre_hook_fn(cell_id, inputs):
|
>>> def forward_pre_hook_fn(cell_id, inputs):
|
||||||
... print("forward inputs: ", inputs)
|
... print("forward inputs: ", inputs)
|
||||||
...
|
...
|
||||||
|
|
|
@ -94,7 +94,8 @@ class Zero(Initializer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore.common.initializer import initializer, Zero
|
>>> from mindspore import Zero
|
||||||
|
>>> from mindspore.common.initializer import initializer
|
||||||
>>> tensor1 = initializer(Zero(), [1, 2, 3], mindspore.float32)
|
>>> tensor1 = initializer(Zero(), [1, 2, 3], mindspore.float32)
|
||||||
>>> tensor2 = initializer('zeros', [1, 2, 3], mindspore.float32)
|
>>> tensor2 = initializer('zeros', [1, 2, 3], mindspore.float32)
|
||||||
"""
|
"""
|
||||||
|
@ -109,7 +110,8 @@ class One(Initializer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore.common.initializer import initializer, One
|
>>> from mindspore import One
|
||||||
|
>>> from mindspore.common.initializer import initializer
|
||||||
>>> tensor1 = initializer(One(), [1, 2, 3], mindspore.float32)
|
>>> tensor1 = initializer(One(), [1, 2, 3], mindspore.float32)
|
||||||
>>> tensor2 = initializer('ones', [1, 2, 3], mindspore.float32)
|
>>> tensor2 = initializer('ones', [1, 2, 3], mindspore.float32)
|
||||||
"""
|
"""
|
||||||
|
@ -247,7 +249,8 @@ class XavierUniform(Initializer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore.common.initializer import initializer, XavierUniform
|
>>> from mindspore import XavierUniform
|
||||||
|
>>> from mindspore.common.initializer import initializer
|
||||||
>>> tensor1 = initializer(XavierUniform(), [1, 2, 3], mindspore.float32)
|
>>> tensor1 = initializer(XavierUniform(), [1, 2, 3], mindspore.float32)
|
||||||
>>> tensor2 = initializer('xavier_uniform', [1, 2, 3], mindspore.float32)
|
>>> tensor2 = initializer('xavier_uniform', [1, 2, 3], mindspore.float32)
|
||||||
"""
|
"""
|
||||||
|
@ -291,7 +294,8 @@ class HeUniform(Initializer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore.common.initializer import initializer, HeUniform
|
>>> from mindspore import HeUniform
|
||||||
|
>>> from mindspore.common.initializer import initializer
|
||||||
>>> tensor1 = initializer(HeUniform(), [1, 2, 3], mindspore.float32)
|
>>> tensor1 = initializer(HeUniform(), [1, 2, 3], mindspore.float32)
|
||||||
>>> tensor2 = initializer('he_uniform', [1, 2, 3], mindspore.float32)
|
>>> tensor2 = initializer('he_uniform', [1, 2, 3], mindspore.float32)
|
||||||
"""
|
"""
|
||||||
|
@ -337,7 +341,8 @@ class HeNormal(Initializer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore.common.initializer import initializer, HeNormal
|
>>> from mindspore import HeNormal
|
||||||
|
>>> from mindspore.common.initializer import initializer
|
||||||
>>> tensor1 = initializer(HeNormal(), [1, 2, 3], mindspore.float32)
|
>>> tensor1 = initializer(HeNormal(), [1, 2, 3], mindspore.float32)
|
||||||
>>> tensor2 = initializer('he_normal', [1, 2, 3], mindspore.float32)
|
>>> tensor2 = initializer('he_normal', [1, 2, 3], mindspore.float32)
|
||||||
"""
|
"""
|
||||||
|
@ -388,7 +393,8 @@ class Identity(Initializer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore.common.initializer import initializer, Identity
|
>>> from mindspore import Identity
|
||||||
|
>>> from mindspore.common.initializer import initializer
|
||||||
>>> tensor1 = initializer(Identity(), [2, 3], mindspore.float32)
|
>>> tensor1 = initializer(Identity(), [2, 3], mindspore.float32)
|
||||||
>>> tensor2 = initializer('identity', [2, 3], mindspore.float32)
|
>>> tensor2 = initializer('identity', [2, 3], mindspore.float32)
|
||||||
"""
|
"""
|
||||||
|
@ -415,7 +421,8 @@ class Sparse(Initializer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore.common.initializer import initializer, Sparse
|
>>> from mindspore import Sparse
|
||||||
|
>>> from mindspore.common.initializer import initializer
|
||||||
>>> tensor1 = initializer(Sparse(sparsity=0.1, sigma=0.01), [5, 8], mindspore.float32)
|
>>> tensor1 = initializer(Sparse(sparsity=0.1, sigma=0.01), [5, 8], mindspore.float32)
|
||||||
"""
|
"""
|
||||||
def __init__(self, sparsity, sigma=0.01):
|
def __init__(self, sparsity, sigma=0.01):
|
||||||
|
@ -452,7 +459,8 @@ class Dirac(Initializer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore.common.initializer import initializer, Dirac
|
>>> from mindspore import Dirac
|
||||||
|
>>> from mindspore.common.initializer import initializer
|
||||||
>>> tensor1 = initializer(Dirac(groups=2), [6, 4, 3, 3], mindspore.float32)
|
>>> tensor1 = initializer(Dirac(groups=2), [6, 4, 3, 3], mindspore.float32)
|
||||||
>>> tensor2 = initializer("dirac", [6, 4, 3, 3], mindspore.float32)
|
>>> tensor2 = initializer("dirac", [6, 4, 3, 3], mindspore.float32)
|
||||||
"""
|
"""
|
||||||
|
@ -503,7 +511,8 @@ class Orthogonal(Initializer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore.common.initializer import initializer, Orthogonal
|
>>> from mindspore import Orthogonal
|
||||||
|
>>> from mindspore.common.initializer import initializer
|
||||||
>>> tensor1 = initializer(Orthogonal(gain=2.), [2, 3, 4], mindspore.float32)
|
>>> tensor1 = initializer(Orthogonal(gain=2.), [2, 3, 4], mindspore.float32)
|
||||||
>>> tensor2 = initializer('orthogonal', [2, 3, 4], mindspore.float32)
|
>>> tensor2 = initializer('orthogonal', [2, 3, 4], mindspore.float32)
|
||||||
"""
|
"""
|
||||||
|
@ -558,7 +567,8 @@ class VarianceScaling(Initializer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore.common.initializer import initializer, VarianceScaling
|
>>> from mindspore import VarianceScaling
|
||||||
|
>>> from mindspore.common.initializer import initializer
|
||||||
>>> tensor1 = initializer(VarianceScaling(scale=1.0, mode='fan_out',
|
>>> tensor1 = initializer(VarianceScaling(scale=1.0, mode='fan_out',
|
||||||
... distribution='untruncated_normal'), [2, 3], mindspore.float32)
|
... distribution='untruncated_normal'), [2, 3], mindspore.float32)
|
||||||
>>> tensor2 = initializer('varianceScaling', [2, 3], mindspore.float32)
|
>>> tensor2 = initializer('varianceScaling', [2, 3], mindspore.float32)
|
||||||
|
@ -615,7 +625,8 @@ class Uniform(Initializer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore.common.initializer import initializer, Uniform
|
>>> from mindspore import Uniform
|
||||||
|
>>> from mindspore.common.initializer import initializer
|
||||||
>>> tensor1 = initializer(Uniform(), [1, 2, 3], mindspore.float32)
|
>>> tensor1 = initializer(Uniform(), [1, 2, 3], mindspore.float32)
|
||||||
>>> tensor2 = initializer('uniform', [1, 2, 3], mindspore.float32)
|
>>> tensor2 = initializer('uniform', [1, 2, 3], mindspore.float32)
|
||||||
"""
|
"""
|
||||||
|
@ -643,7 +654,8 @@ class Normal(Initializer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore.common.initializer import initializer, Normal
|
>>> from mindspore import Normal
|
||||||
|
>>> from mindspore.common.initializer import initializer
|
||||||
>>> tensor1 = initializer(Normal(), [1, 2, 3], mindspore.float32)
|
>>> tensor1 = initializer(Normal(), [1, 2, 3], mindspore.float32)
|
||||||
>>> tensor2 = initializer('normal', [1, 2, 3], mindspore.float32)
|
>>> tensor2 = initializer('normal', [1, 2, 3], mindspore.float32)
|
||||||
"""
|
"""
|
||||||
|
@ -671,7 +683,8 @@ class TruncatedNormal(Initializer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore.common.initializer import initializer, TruncatedNormal
|
>>> from mindspore import TruncatedNormal
|
||||||
|
>>> from mindspore.common.initializer import initializer
|
||||||
>>> tensor1 = initializer(TruncatedNormal(), [1, 2, 3], mindspore.float32)
|
>>> tensor1 = initializer(TruncatedNormal(), [1, 2, 3], mindspore.float32)
|
||||||
>>> tensor2 = initializer('truncatedNormal', [1, 2, 3], mindspore.float32)
|
>>> tensor2 = initializer('truncatedNormal', [1, 2, 3], mindspore.float32)
|
||||||
"""
|
"""
|
||||||
|
@ -715,7 +728,8 @@ def initializer(init, shape=None, dtype=mstype.float32):
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore.common.initializer import initializer, One
|
>>> from mindspore import One
|
||||||
|
>>> from mindspore.common.initializer import initializer
|
||||||
>>> data = Tensor(np.zeros([1, 2, 3]), mindspore.float32)
|
>>> data = Tensor(np.zeros([1, 2, 3]), mindspore.float32)
|
||||||
>>> tensor1 = initializer(data, [1, 2, 3], mindspore.float32)
|
>>> tensor1 = initializer(data, [1, 2, 3], mindspore.float32)
|
||||||
>>> tensor2 = initializer('ones', [1, 2, 3], mindspore.float32)
|
>>> tensor2 = initializer('ones', [1, 2, 3], mindspore.float32)
|
||||||
|
|
|
@ -63,7 +63,7 @@ class Tensor(Tensor_):
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> import mindspore as ms
|
>>> import mindspore as ms
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore.common.initializer import One
|
>>> from mindspore import One
|
||||||
>>> # initialize a tensor with numpy.ndarray
|
>>> # initialize a tensor with numpy.ndarray
|
||||||
>>> t1 = Tensor(np.zeros([1, 2, 3]), ms.float32)
|
>>> t1 = Tensor(np.zeros([1, 2, 3]), ms.float32)
|
||||||
>>> print(t1)
|
>>> print(t1)
|
||||||
|
@ -1555,8 +1555,9 @@ class Tensor(Tensor_):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore as ms
|
>>> import mindspore as ms
|
||||||
>>> import mindspore.common.initializer as init
|
>>> from mindspore import Constant
|
||||||
>>> x = init.initializer(init.Constant(1), [2, 2], ms.float32)
|
>>> from mindspore.common.initializer import initializer
|
||||||
|
>>> x = initializer(Constant(1), [2, 2], ms.float32)
|
||||||
>>> out = x.init_data()
|
>>> out = x.init_data()
|
||||||
>>> print(out)
|
>>> print(out)
|
||||||
[[1. 1.]
|
[[1. 1.]
|
||||||
|
@ -1630,8 +1631,9 @@ class Tensor(Tensor_):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore as ms
|
>>> import mindspore as ms
|
||||||
>>> import mindspore.common.initializer as init
|
>>> from mindspore import Constant
|
||||||
>>> x = init.initializer(init.Constant(1), [2, 2], ms.float32)
|
>>> from mindspore.common.initializer import initializer
|
||||||
|
>>> x = initializer(Constant(1), [2, 2], ms.float32)
|
||||||
>>> out = x.to_tensor()
|
>>> out = x.to_tensor()
|
||||||
>>> print(out)
|
>>> print(out)
|
||||||
[[1. 1.]
|
[[1. 1.]
|
||||||
|
|
|
@ -544,26 +544,26 @@ def set_auto_parallel_context(**kwargs):
|
||||||
ValueError: If input key is not attribute in auto parallel context.
|
ValueError: If input key is not attribute in auto parallel context.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_auto_parallel_context
|
||||||
>>> context.set_auto_parallel_context(device_num=8)
|
>>> set_auto_parallel_context(device_num=8)
|
||||||
>>> context.set_auto_parallel_context(global_rank=0)
|
>>> set_auto_parallel_context(global_rank=0)
|
||||||
>>> context.set_auto_parallel_context(gradients_mean=True)
|
>>> set_auto_parallel_context(gradients_mean=True)
|
||||||
>>> context.set_auto_parallel_context(gradient_fp32_sync=False)
|
>>> set_auto_parallel_context(gradient_fp32_sync=False)
|
||||||
>>> context.set_auto_parallel_context(parallel_mode="auto_parallel")
|
>>> set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||||
>>> context.set_auto_parallel_context(search_mode="dynamic_programming")
|
>>> set_auto_parallel_context(search_mode="dynamic_programming")
|
||||||
>>> context.set_auto_parallel_context(auto_parallel_search_mode="dynamic_programming")
|
>>> set_auto_parallel_context(auto_parallel_search_mode="dynamic_programming")
|
||||||
>>> context.set_auto_parallel_context(parameter_broadcast=False)
|
>>> set_auto_parallel_context(parameter_broadcast=False)
|
||||||
>>> context.set_auto_parallel_context(strategy_ckpt_load_file="./strategy_stage1.ckpt")
|
>>> set_auto_parallel_context(strategy_ckpt_load_file="./strategy_stage1.ckpt")
|
||||||
>>> context.set_auto_parallel_context(strategy_ckpt_save_file="./strategy_stage1.ckpt")
|
>>> set_auto_parallel_context(strategy_ckpt_save_file="./strategy_stage1.ckpt")
|
||||||
>>> context.set_auto_parallel_context(dataset_strategy=((1, 8), (1, 8)))
|
>>> set_auto_parallel_context(dataset_strategy=((1, 8), (1, 8)))
|
||||||
>>> context.set_auto_parallel_context(enable_parallel_optimizer=False)
|
>>> set_auto_parallel_context(enable_parallel_optimizer=False)
|
||||||
>>> context.set_auto_parallel_context(enable_alltoall=False)
|
>>> set_auto_parallel_context(enable_alltoall=False)
|
||||||
>>> context.set_auto_parallel_context(all_reduce_fusion_config=[8, 160])
|
>>> set_auto_parallel_context(all_reduce_fusion_config=[8, 160])
|
||||||
>>> context.set_auto_parallel_context(pipeline_stages=2)
|
>>> set_auto_parallel_context(pipeline_stages=2)
|
||||||
>>> parallel_config = {"gradient_accumulation_shard": True, "parallel_optimizer_threshold": 24}
|
>>> parallel_config = {"gradient_accumulation_shard": True, "parallel_optimizer_threshold": 24}
|
||||||
>>> context.set_auto_parallel_context(parallel_optimizer_config=parallel_config, enable_parallel_optimizer=True)
|
>>> set_auto_parallel_context(parallel_optimizer_config=parallel_config, enable_parallel_optimizer=True)
|
||||||
>>> config = {"allreduce": {"mode": "size", "config": 32}, "allgather": {"mode": "size", "config": 32}}
|
>>> config = {"allreduce": {"mode": "size", "config": 32}, "allgather": {"mode": "size", "config": 32}}
|
||||||
>>> context.set_auto_parallel_context(comm_fusion=config)
|
>>> set_auto_parallel_context(comm_fusion=config)
|
||||||
"""
|
"""
|
||||||
_set_auto_parallel_context(**kwargs)
|
_set_auto_parallel_context(**kwargs)
|
||||||
|
|
||||||
|
@ -582,9 +582,9 @@ def get_auto_parallel_context(attr_key):
|
||||||
ValueError: If input key is not attribute in auto parallel context.
|
ValueError: If input key is not attribute in auto parallel context.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> from mindspore import context
|
>>> from mindspore import get_auto_parallel_context
|
||||||
>>> parallel_mode = context.get_auto_parallel_context("parallel_mode")
|
>>> parallel_mode = get_auto_parallel_context("parallel_mode")
|
||||||
>>> dataset_strategy = context.get_auto_parallel_context("dataset_strategy")
|
>>> dataset_strategy = get_auto_parallel_context("dataset_strategy")
|
||||||
"""
|
"""
|
||||||
return _get_auto_parallel_context(attr_key)
|
return _get_auto_parallel_context(attr_key)
|
||||||
|
|
||||||
|
@ -864,32 +864,32 @@ def set_context(**kwargs):
|
||||||
ValueError: If input key is not an attribute in context.
|
ValueError: If input key is not an attribute in context.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, PYNATIVE_MODE
|
||||||
>>> context.set_context(mode=context.PYNATIVE_MODE)
|
>>> set_context(mode=PYNATIVE_MODE)
|
||||||
>>> context.set_context(precompile_only=True)
|
>>> set_context(precompile_only=True)
|
||||||
>>> context.set_context(device_target="Ascend")
|
>>> set_context(device_target="Ascend")
|
||||||
>>> context.set_context(device_id=0)
|
>>> set_context(device_id=0)
|
||||||
>>> context.set_context(save_graphs=True, save_graphs_path="./model.ms")
|
>>> set_context(save_graphs=True, save_graphs_path="./model.ms")
|
||||||
>>> context.set_context(enable_reduce_precision=True)
|
>>> set_context(enable_reduce_precision=True)
|
||||||
>>> context.set_context(enable_dump=True, save_dump_path=".")
|
>>> set_context(enable_dump=True, save_dump_path=".")
|
||||||
>>> context.set_context(enable_graph_kernel=True)
|
>>> set_context(enable_graph_kernel=True)
|
||||||
>>> context.set_context(graph_kernel_flags="--opt_level=2 --dump_as_text")
|
>>> set_context(graph_kernel_flags="--opt_level=2 --dump_as_text")
|
||||||
>>> context.set_context(reserve_class_name_in_scope=True)
|
>>> set_context(reserve_class_name_in_scope=True)
|
||||||
>>> context.set_context(variable_memory_max_size="6GB")
|
>>> set_context(variable_memory_max_size="6GB")
|
||||||
>>> context.set_context(enable_profiling=True,
|
>>> set_context(enable_profiling=True,
|
||||||
... profiling_options='{"output":"/home/data/output","training_trace":"on"}')
|
... profiling_options='{"output":"/home/data/output","training_trace":"on"}')
|
||||||
>>> context.set_context(check_bprop=True)
|
>>> set_context(check_bprop=True)
|
||||||
>>> context.set_context(max_device_memory="3.5GB")
|
>>> set_context(max_device_memory="3.5GB")
|
||||||
>>> context.set_context(mempool_block_size="1GB")
|
>>> set_context(mempool_block_size="1GB")
|
||||||
>>> context.set_context(print_file_path="print.pb")
|
>>> set_context(print_file_path="print.pb")
|
||||||
>>> context.set_context(enable_sparse=True)
|
>>> set_context(enable_sparse=True)
|
||||||
>>> context.set_context(max_call_depth=80)
|
>>> set_context(max_call_depth=80)
|
||||||
>>> context.set_context(env_config_path="./env_config.json")
|
>>> set_context(env_config_path="./env_config.json")
|
||||||
>>> context.set_context(auto_tune_mode="GA,RL")
|
>>> set_context(auto_tune_mode="GA,RL")
|
||||||
>>> context.set_context(grad_for_scalar=True)
|
>>> set_context(grad_for_scalar=True)
|
||||||
>>> context.set_context(enable_compile_cache=True, compile_cache_path="./cache.ms")
|
>>> set_context(enable_compile_cache=True, compile_cache_path="./cache.ms")
|
||||||
>>> context.set_context(pynative_synchronize=True)
|
>>> set_context(pynative_synchronize=True)
|
||||||
>>> context.set_context(runtime_num_threads=10)
|
>>> set_context(runtime_num_threads=10)
|
||||||
"""
|
"""
|
||||||
ctx = _context()
|
ctx = _context()
|
||||||
# set device target first
|
# set device target first
|
||||||
|
@ -936,9 +936,9 @@ def get_context(attr_key):
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: If input key is not an attribute in context.
|
ValueError: If input key is not an attribute in context.
|
||||||
Examples:
|
Examples:
|
||||||
>>> from mindspore import context
|
>>> from mindspore import get_context
|
||||||
>>> context.get_context("device_target")
|
>>> get_context("device_target")
|
||||||
>>> context.get_context("device_id")
|
>>> get_context("device_id")
|
||||||
"""
|
"""
|
||||||
ctx = _context()
|
ctx = _context()
|
||||||
device = ctx.get_param(ms_ctx_param.device_target)
|
device = ctx.get_param(ms_ctx_param.device_target)
|
||||||
|
@ -1027,8 +1027,8 @@ def set_ps_context(**kwargs):
|
||||||
ValueError: If input key is not the attribute in parameter server training mode context.
|
ValueError: If input key is not the attribute in parameter server training mode context.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_ps_context
|
||||||
>>> context.set_ps_context(enable_ps=True, enable_ssl=True, client_password='123456', server_password='123456')
|
>>> set_ps_context(enable_ps=True, enable_ssl=True, client_password='123456', server_password='123456')
|
||||||
"""
|
"""
|
||||||
_set_ps_context(**kwargs)
|
_set_ps_context(**kwargs)
|
||||||
|
|
||||||
|
@ -1049,8 +1049,8 @@ def get_ps_context(attr_key):
|
||||||
ValueError: If input key is not attribute in auto parallel context.
|
ValueError: If input key is not attribute in auto parallel context.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> from mindspore import context
|
>>> from mindspore import get_ps_context
|
||||||
>>> context.get_ps_context("enable_ps")
|
>>> get_ps_context("enable_ps")
|
||||||
"""
|
"""
|
||||||
return _get_ps_context(attr_key)
|
return _get_ps_context(attr_key)
|
||||||
|
|
||||||
|
@ -1144,7 +1144,8 @@ def set_fl_context(**kwargs):
|
||||||
ValueError: If input key is not the attribute in federated learning mode context.
|
ValueError: If input key is not the attribute in federated learning mode context.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> context.set_fl_context(enable_fl=True, server_mode='FEDERATED_LEARNING')
|
>>> from mindspore import set_fl_context
|
||||||
|
>>> set_fl_context(enable_fl=True, server_mode='FEDERATED_LEARNING')
|
||||||
"""
|
"""
|
||||||
_set_ps_context(**kwargs)
|
_set_ps_context(**kwargs)
|
||||||
|
|
||||||
|
@ -1164,6 +1165,7 @@ def get_fl_context(attr_key):
|
||||||
ValueError: If input key is not attribute in federated learning mode context.
|
ValueError: If input key is not attribute in federated learning mode context.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> context.get_fl_context("server_mode")
|
>>> from mindspore import get_fl_context
|
||||||
|
>>> get_fl_context("server_mode")
|
||||||
"""
|
"""
|
||||||
return _get_ps_context(attr_key)
|
return _get_ps_context(attr_key)
|
||||||
|
|
|
@ -154,12 +154,12 @@ class WaitedDSCallback(Callback, DSCallback):
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> from mindspore.dataset import WaitedDSCallback
|
>>> from mindspore.dataset import WaitedDSCallback
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, GRAPH_MODE
|
||||||
>>> from mindspore import Model
|
>>> from mindspore import Model
|
||||||
>>> from mindspore.train.callback import Callback
|
>>> from mindspore import Callback
|
||||||
>>> import mindspore.dataset as ds
|
>>> import mindspore.dataset as ds
|
||||||
>>>
|
>>>
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
>>> set_context(mode=GRAPH_MODE, device_target="CPU")
|
||||||
>>>
|
>>>
|
||||||
>>> # custom callback class for data synchronization in data pipeline
|
>>> # custom callback class for data synchronization in data pipeline
|
||||||
>>> class MyWaitedCallback(WaitedDSCallback):
|
>>> class MyWaitedCallback(WaitedDSCallback):
|
||||||
|
|
|
@ -279,7 +279,7 @@ def sync_wait_for_dataset(rank_id, rank_size, current_epoch):
|
||||||
>>> # Create a synchronization callback
|
>>> # Create a synchronization callback
|
||||||
>>>
|
>>>
|
||||||
>>> from mindspore.dataset import sync_wait_for_dataset
|
>>> from mindspore.dataset import sync_wait_for_dataset
|
||||||
>>> from mindspore.train.callback import Callback
|
>>> from mindspore import Callback
|
||||||
>>>
|
>>>
|
||||||
>>> class SyncForDataset(Callback):
|
>>> class SyncForDataset(Callback):
|
||||||
... def __init__(self):
|
... def __init__(self):
|
||||||
|
|
|
@ -890,9 +890,9 @@ class Cell(Cell_):
|
||||||
Examples:
|
Examples:
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore import nn, Tensor, context
|
>>> from mindspore import nn, Tensor, set_context, GRAPH_MODE
|
||||||
>>>
|
>>>
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
>>> set_context(mode=GRAPH_MODE, device_target="Ascend")
|
||||||
>>> class reluNet(nn.Cell):
|
>>> class reluNet(nn.Cell):
|
||||||
... def __init__(self):
|
... def __init__(self):
|
||||||
... super(reluNet, self).__init__()
|
... super(reluNet, self).__init__()
|
||||||
|
@ -1709,9 +1709,9 @@ class Cell(Cell_):
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, PYNATIVE_MODE
|
||||||
>>> from mindspore.ops import GradOperation
|
>>> from mindspore.ops import GradOperation
|
||||||
>>> context.set_context(mode=context.PYNATIVE_MODE)
|
>>> set_context(mode=PYNATIVE_MODE)
|
||||||
>>> def forward_pre_hook_fn(cell_id, inputs):
|
>>> def forward_pre_hook_fn(cell_id, inputs):
|
||||||
... print("forward inputs: ", inputs)
|
... print("forward inputs: ", inputs)
|
||||||
...
|
...
|
||||||
|
@ -1810,9 +1810,9 @@ class Cell(Cell_):
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, PYNATIVE_MODE
|
||||||
>>> from mindspore.ops import GradOperation
|
>>> from mindspore.ops import GradOperation
|
||||||
>>> context.set_context(mode=context.PYNATIVE_MODE)
|
>>> set_context(mode=PYNATIVE_MODE)
|
||||||
>>> def forward_hook_fn(cell_id, inputs, output):
|
>>> def forward_hook_fn(cell_id, inputs, output):
|
||||||
... print("forward inputs: ", inputs)
|
... print("forward inputs: ", inputs)
|
||||||
... print("forward output: ", output)
|
... print("forward output: ", output)
|
||||||
|
@ -1916,9 +1916,9 @@ class Cell(Cell_):
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, PYNATIVE_MODE
|
||||||
>>> from mindspore.ops import GradOperation
|
>>> from mindspore.ops import GradOperation
|
||||||
>>> context.set_context(mode=context.PYNATIVE_MODE)
|
>>> set_context(mode=PYNATIVE_MODE)
|
||||||
>>> def backward_hook_fn(cell_id, grad_input, grad_output):
|
>>> def backward_hook_fn(cell_id, grad_input, grad_output):
|
||||||
... print("backward input: ", grad_input)
|
... print("backward input: ", grad_input)
|
||||||
... print("backward output: ", grad_output)
|
... print("backward output: ", grad_output)
|
||||||
|
|
|
@ -712,16 +712,16 @@ class SyncBatchNorm(_BatchNorm):
|
||||||
>>> # Variables, Calling the Collective Communication Library, Running the Script.
|
>>> # Variables, Calling the Collective Communication Library, Running the Script.
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> from mindspore.communication import init
|
>>> from mindspore.communication import init
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, GRAPH_MODE, reset_auto_parallel_context, set_auto_parallel_context
|
||||||
>>> from mindspore import ParallelMode
|
>>> from mindspore import ParallelMode
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore import nn
|
>>> from mindspore import nn
|
||||||
>>> from mindspore import dtype as mstype
|
>>> from mindspore import dtype as mstype
|
||||||
>>>
|
>>>
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE)
|
>>> set_context(mode=GRAPH_MODE)
|
||||||
>>> init()
|
>>> init()
|
||||||
>>> context.reset_auto_parallel_context()
|
>>> reset_auto_parallel_context()
|
||||||
>>> context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL)
|
>>> set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL)
|
||||||
>>> sync_bn_op = nn.SyncBatchNorm(num_features=3, process_groups=[[0, 1], [2, 3]])
|
>>> sync_bn_op = nn.SyncBatchNorm(num_features=3, process_groups=[[0, 1], [2, 3]])
|
||||||
>>> x = Tensor(np.ones([1, 3, 2, 2]), mstype.float32)
|
>>> x = Tensor(np.ones([1, 3, 2, 2]), mstype.float32)
|
||||||
>>> output = sync_bn_op(x)
|
>>> output = sync_bn_op(x)
|
||||||
|
|
|
@ -337,8 +337,8 @@ def thor(net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0
|
||||||
>>> from mindspore.nn import thor
|
>>> from mindspore.nn import thor
|
||||||
>>> from mindspore import Model
|
>>> from mindspore import Model
|
||||||
>>> from mindspore import FixedLossScaleManager
|
>>> from mindspore import FixedLossScaleManager
|
||||||
>>> from mindspore.train.callback import LossMonitor
|
>>> from mindspore import LossMonitor
|
||||||
>>> from mindspore.train.train_thor import ConvertModelUtils
|
>>> from mindspore import ConvertModelUtils
|
||||||
>>> from mindspore import nn
|
>>> from mindspore import nn
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>>
|
>>>
|
||||||
|
|
|
@ -255,7 +255,7 @@ For DNN researchers who are unfamiliar with Bayesian models, MDP provides high-l
|
||||||
1. Define a Deep Neural Network. The LeNet is used in this example.
|
1. Define a Deep Neural Network. The LeNet is used in this example.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from mindspore.common.initializer import TruncatedNormal
|
from mindspore import TruncatedNormal
|
||||||
import mindspore.nn as nn
|
import mindspore.nn as nn
|
||||||
import mindspore.ops.operations as P
|
import mindspore.ops.operations as P
|
||||||
|
|
||||||
|
|
|
@ -44,8 +44,8 @@ class SparseToDense(Cell):
|
||||||
>>> import mindspore as ms
|
>>> import mindspore as ms
|
||||||
>>> from mindspore import Tensor, COOTensor
|
>>> from mindspore import Tensor, COOTensor
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> import mindspore.context as context
|
>>> from mindspore import set_context, PYNATIVE_MODE
|
||||||
>>> context.set_context(mode=context.PYNATIVE_MODE)
|
>>> set_context(mode=PYNATIVE_MODE)
|
||||||
>>> indices = Tensor([[0, 1], [1, 2]])
|
>>> indices = Tensor([[0, 1], [1, 2]])
|
||||||
>>> values = Tensor([1, 2], dtype=ms.int32)
|
>>> values = Tensor([1, 2], dtype=ms.int32)
|
||||||
>>> dense_shape = (3, 4)
|
>>> dense_shape = (3, 4)
|
||||||
|
|
|
@ -312,15 +312,15 @@ class DistributedGradReducer(Cell):
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> from mindspore.communication import init
|
>>> from mindspore.communication import init
|
||||||
>>> from mindspore import ops
|
>>> from mindspore import ops
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, reset_auto_parallel_context, set_auto_parallel_context, GRAPH_MODE
|
||||||
>>> from mindspore import ParallelMode
|
>>> from mindspore import ParallelMode
|
||||||
>>> from mindspore import Parameter, Tensor
|
>>> from mindspore import Parameter, Tensor
|
||||||
>>> from mindspore import nn
|
>>> from mindspore import nn
|
||||||
>>>
|
>>>
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE)
|
>>> set_context(mode=GRAPH_MODE)
|
||||||
>>> init()
|
>>> init()
|
||||||
>>> context.reset_auto_parallel_context()
|
>>> reset_auto_parallel_context()
|
||||||
>>> context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL)
|
>>> set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL)
|
||||||
>>>
|
>>>
|
||||||
>>> class TrainingWrapper(nn.Cell):
|
>>> class TrainingWrapper(nn.Cell):
|
||||||
... def __init__(self, network, optimizer, sens=1.0):
|
... def __init__(self, network, optimizer, sens=1.0):
|
||||||
|
|
|
@ -973,8 +973,8 @@ def unique(x, return_inverse=False):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore.numpy as np
|
>>> import mindspore.numpy as np
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, GRAPH_MODE
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE)
|
>>> set_context(mode=GRAPH_MODE)
|
||||||
>>> input_x = np.asarray([1, 2, 2, 2, 3, 4, 5]).astype('int32')
|
>>> input_x = np.asarray([1, 2, 2, 2, 3, 4, 5]).astype('int32')
|
||||||
>>> output_x = np.unique(input_x)
|
>>> output_x = np.unique(input_x)
|
||||||
>>> print(output_x)
|
>>> print(output_x)
|
||||||
|
|
|
@ -192,10 +192,10 @@ def grad(fn, grad_position=0, sens_param=False):
|
||||||
Examples:
|
Examples:
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> import mindspore.context as context
|
>>> from mindspore import set_context, GRAPH_MODE
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore.ops.functional import grad
|
>>> from mindspore.ops.functional import grad
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE)
|
>>> set_context(mode=GRAPH_MODE)
|
||||||
>>> class Net(nn.Cell):
|
>>> class Net(nn.Cell):
|
||||||
... def construct(self, x, y, z):
|
... def construct(self, x, y, z):
|
||||||
... return x*y*z
|
... return x*y*z
|
||||||
|
@ -282,11 +282,11 @@ def jet(fn, primals, series):
|
||||||
Examples:
|
Examples:
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> import mindspore.context as context
|
>>> from mindspore import set_context, GRAPH_MODE
|
||||||
>>> import mindspore.ops as P
|
>>> import mindspore.ops as P
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore.ops.functional import jet
|
>>> from mindspore.ops.functional import jet
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE)
|
>>> set_context(mode=GRAPH_MODE)
|
||||||
>>> class Net(nn.Cell):
|
>>> class Net(nn.Cell):
|
||||||
... def __init__(self):
|
... def __init__(self):
|
||||||
... super().__init__()
|
... super().__init__()
|
||||||
|
@ -358,11 +358,11 @@ def derivative(fn, primals, order):
|
||||||
Examples:
|
Examples:
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> import mindspore.context as context
|
>>> from mindspore import set_context, GRAPH_MODE
|
||||||
>>> import mindspore.ops as P
|
>>> import mindspore.ops as P
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore.ops.functional import derivative
|
>>> from mindspore.ops.functional import derivative
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE)
|
>>> set_context(mode=GRAPH_MODE)
|
||||||
>>> class Net(nn.Cell):
|
>>> class Net(nn.Cell):
|
||||||
... def __init__(self):
|
... def __init__(self):
|
||||||
... super().__init__()
|
... super().__init__()
|
||||||
|
|
|
@ -662,6 +662,10 @@ class GpuConvertToDynamicShape(PrimitiveWithCheck):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> # make a model, since dynamic shape operators must be in GRAPH_MODE
|
>>> # make a model, since dynamic shape operators must be in GRAPH_MODE
|
||||||
|
>>> import mindspore.nn as nn
|
||||||
|
>>> from mindspore import set_context, GRAPH_MODE
|
||||||
|
>>> from mindspore.ops.operations import _inner_ops as inner
|
||||||
|
>>> from mindspore.ops import operations as P
|
||||||
>>> class TestDynamicShapeReshapeNet(nn.Cell):
|
>>> class TestDynamicShapeReshapeNet(nn.Cell):
|
||||||
>>> def __init__(self):
|
>>> def __init__(self):
|
||||||
>>> super(TestDynamicShapeReshapeNet, self).__init__()
|
>>> super(TestDynamicShapeReshapeNet, self).__init__()
|
||||||
|
@ -673,7 +677,7 @@ class GpuConvertToDynamicShape(PrimitiveWithCheck):
|
||||||
>>> dynamic_shape_input = self.convert_to_dynamic_shape(input)
|
>>> dynamic_shape_input = self.convert_to_dynamic_shape(input)
|
||||||
>>> reshaped_input = self.reshape(input, new_shape)
|
>>> reshaped_input = self.reshape(input, new_shape)
|
||||||
>>>
|
>>>
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
>>> set_context(mode=GRAPH_MODE, device_target="GPU")
|
||||||
>>> input = Tensor(np.array([0, 1, 2, 3])
|
>>> input = Tensor(np.array([0, 1, 2, 3])
|
||||||
>>> new_shape = (2, 2)
|
>>> new_shape = (2, 2)
|
||||||
>>> net = TestDynamicShapeReshapeNet()
|
>>> net = TestDynamicShapeReshapeNet()
|
||||||
|
@ -706,6 +710,10 @@ class ErrorOnDynamicShapeInput(PrimitiveWithInfer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> # make a model, since dynamic shape operators must be in GRAPH_MODE
|
>>> # make a model, since dynamic shape operators must be in GRAPH_MODE
|
||||||
|
>>> import mindspore.nn as nn
|
||||||
|
>>> from mindspore.ops.operations import _inner_ops as inner
|
||||||
|
>>> from mindspore.ops import operations as P
|
||||||
|
>>> from mindspore import set_context, GRAPH_MODE
|
||||||
>>> class AssertDynamicShapeNet(nn.Cell):
|
>>> class AssertDynamicShapeNet(nn.Cell):
|
||||||
>>> def __init__(self):
|
>>> def __init__(self):
|
||||||
>>> super(AssertDynamicShapeNet, self).__init__()
|
>>> super(AssertDynamicShapeNet, self).__init__()
|
||||||
|
@ -716,7 +724,7 @@ class ErrorOnDynamicShapeInput(PrimitiveWithInfer):
|
||||||
>>> dynamic_shape_input = self.convert_to_dynamic_shape(input)
|
>>> dynamic_shape_input = self.convert_to_dynamic_shape(input)
|
||||||
>>> self.error_on_dynamic_shape_input(dynamic_shape_input)
|
>>> self.error_on_dynamic_shape_input(dynamic_shape_input)
|
||||||
>>>
|
>>>
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
>>> set_context(mode=GRAPH_MODE, device_target="GPU")
|
||||||
>>> input = Tensor(np.array([0])
|
>>> input = Tensor(np.array([0])
|
||||||
>>> net = TestDynamicShapeReshapeNet()
|
>>> net = TestDynamicShapeReshapeNet()
|
||||||
>>> output = net(input, new_shape)
|
>>> output = net(input, new_shape)
|
||||||
|
@ -1774,10 +1782,10 @@ class CellBackwardHook(PrimitiveWithInfer):
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, PYNATIVE_MODE
|
||||||
>>> from mindspore.ops import GradOperation
|
>>> from mindspore.ops import GradOperation
|
||||||
>>> from mindspore.ops.operations import _inner_ops as inner
|
>>> from mindspore.ops.operations import _inner_ops as inner
|
||||||
>>> context.set_context(mode=context.PYNATIVE_MODE)
|
>>> set_context(mode=PYNATIVE_MODE)
|
||||||
>>> def hook_fn(grad):
|
>>> def hook_fn(grad):
|
||||||
... print(grad)
|
... print(grad)
|
||||||
...
|
...
|
||||||
|
|
|
@ -6168,7 +6168,6 @@ class EditDistance(PrimitiveWithInfer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> from mindspore import context
|
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> import mindspore.ops as ops
|
>>> import mindspore.ops as ops
|
||||||
|
|
|
@ -209,9 +209,9 @@ class AllGather(PrimitiveWithInfer):
|
||||||
>>> import mindspore.ops as ops
|
>>> import mindspore.ops as ops
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> from mindspore.communication import init
|
>>> from mindspore.communication import init
|
||||||
>>> from mindspore import Tensor, context
|
>>> from mindspore import Tensor, set_context, GRAPH_MODE
|
||||||
>>>
|
>>>
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE)
|
>>> set_context(mode=GRAPH_MODE)
|
||||||
>>> init()
|
>>> init()
|
||||||
>>> class Net(nn.Cell):
|
>>> class Net(nn.Cell):
|
||||||
... def __init__(self):
|
... def __init__(self):
|
||||||
|
@ -405,14 +405,14 @@ class ReduceScatter(PrimitiveWithInfer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> # This example should be run with two devices. Refer to the tutorial > Distributed Training on mindspore.cn
|
>>> # This example should be run with two devices. Refer to the tutorial > Distributed Training on mindspore.cn
|
||||||
>>> from mindspore import Tensor, context
|
>>> from mindspore import Tensor, set_context, GRAPH_MODE
|
||||||
>>> from mindspore.communication import init
|
>>> from mindspore.communication import init
|
||||||
>>> from mindspore.ops import ReduceOp
|
>>> from mindspore.ops import ReduceOp
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> import mindspore.ops as ops
|
>>> import mindspore.ops as ops
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>>
|
>>>
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE)
|
>>> set_context(mode=GRAPH_MODE)
|
||||||
>>> init()
|
>>> init()
|
||||||
>>> class Net(nn.Cell):
|
>>> class Net(nn.Cell):
|
||||||
... def __init__(self):
|
... def __init__(self):
|
||||||
|
@ -549,13 +549,13 @@ class Broadcast(PrimitiveWithInfer):
|
||||||
>>> # on mindspore.cn and focus on the contents of these three parts: Configuring Distributed Environment
|
>>> # on mindspore.cn and focus on the contents of these three parts: Configuring Distributed Environment
|
||||||
>>> # Variables, Calling the Collective Communication Library, Running The Script.
|
>>> # Variables, Calling the Collective Communication Library, Running The Script.
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, GRAPH_MODE
|
||||||
>>> from mindspore.communication import init
|
>>> from mindspore.communication import init
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> import mindspore.ops as ops
|
>>> import mindspore.ops as ops
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>>
|
>>>
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE)
|
>>> set_context(mode=GRAPH_MODE)
|
||||||
>>> init()
|
>>> init()
|
||||||
>>> class Net(nn.Cell):
|
>>> class Net(nn.Cell):
|
||||||
... def __init__(self):
|
... def __init__(self):
|
||||||
|
@ -682,7 +682,7 @@ class NeighborExchange(Primitive):
|
||||||
>>> import os
|
>>> import os
|
||||||
>>> import mindspore as ms
|
>>> import mindspore as ms
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, GRAPH_MODE
|
||||||
>>> from mindspore.communication import init
|
>>> from mindspore.communication import init
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> import mindspore.ops as ops
|
>>> import mindspore.ops as ops
|
||||||
|
@ -698,7 +698,7 @@ class NeighborExchange(Primitive):
|
||||||
... def construct(self, x):
|
... def construct(self, x):
|
||||||
... out = self.neighborexchange((x,))
|
... out = self.neighborexchange((x,))
|
||||||
...
|
...
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
|
>>> set_context(mode=GRAPH_MODE, device_target='Ascend')
|
||||||
>>> init()
|
>>> init()
|
||||||
>>> net = Net()
|
>>> net = Net()
|
||||||
>>> input_x = Tensor(np.ones([3, 3]), dtype = ms.float32)
|
>>> input_x = Tensor(np.ones([3, 3]), dtype = ms.float32)
|
||||||
|
@ -759,7 +759,7 @@ class AlltoAll(PrimitiveWithInfer):
|
||||||
>>> import os
|
>>> import os
|
||||||
>>> import mindspore as ms
|
>>> import mindspore as ms
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, GRAPH_MODE
|
||||||
>>> from mindspore.communication import init
|
>>> from mindspore.communication import init
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> import mindspore.ops as ops
|
>>> import mindspore.ops as ops
|
||||||
|
@ -773,7 +773,7 @@ class AlltoAll(PrimitiveWithInfer):
|
||||||
... out = self.alltoall(x)
|
... out = self.alltoall(x)
|
||||||
... return out
|
... return out
|
||||||
...
|
...
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
|
>>> set_context(mode=GRAPH_MODE, device_target='Ascend')
|
||||||
>>> init()
|
>>> init()
|
||||||
>>> net = Net()
|
>>> net = Net()
|
||||||
>>> rank_id = int(os.getenv("RANK_ID"))
|
>>> rank_id = int(os.getenv("RANK_ID"))
|
||||||
|
@ -853,7 +853,7 @@ class NeighborExchangeV2(Primitive):
|
||||||
>>> import os
|
>>> import os
|
||||||
>>> import mindspore as ms
|
>>> import mindspore as ms
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, GRAPH_MODE
|
||||||
>>> from mindspore.communication import init
|
>>> from mindspore.communication import init
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> import mindspore.ops as ops
|
>>> import mindspore.ops as ops
|
||||||
|
@ -871,7 +871,7 @@ class NeighborExchangeV2(Primitive):
|
||||||
... out = self.neighborexchangev2(x)
|
... out = self.neighborexchangev2(x)
|
||||||
... return out
|
... return out
|
||||||
...
|
...
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
|
>>> set_context(mode=GRAPH_MODE, device_target='Ascend')
|
||||||
>>> init()
|
>>> init()
|
||||||
>>> input_x = Tensor(np.ones([1, 1, 2, 2]), dtype = ms.float32)
|
>>> input_x = Tensor(np.ones([1, 1, 2, 2]), dtype = ms.float32)
|
||||||
>>> net = Net()
|
>>> net = Net()
|
||||||
|
|
|
@ -358,9 +358,9 @@ class HookBackward(PrimitiveWithInfer):
|
||||||
>>> import mindspore
|
>>> import mindspore
|
||||||
>>> from mindspore import ops
|
>>> from mindspore import ops
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, PYNATIVE_MODE
|
||||||
>>> from mindspore.ops import GradOperation
|
>>> from mindspore.ops import GradOperation
|
||||||
>>> context.set_context(mode=context.PYNATIVE_MODE)
|
>>> set_context(mode=PYNATIVE_MODE)
|
||||||
>>> def hook_fn(grad):
|
>>> def hook_fn(grad):
|
||||||
... print(grad)
|
... print(grad)
|
||||||
...
|
...
|
||||||
|
|
|
@ -455,7 +455,7 @@ class FusedCastAdamWeightDecay(PrimitiveWithInfer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> import mindspore.context as context
|
>>> from mindspore import set_context, GRAPH_MODE
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> import mindspore.ops as ops
|
>>> import mindspore.ops as ops
|
||||||
>>> from mindspore import Tensor, Parameter
|
>>> from mindspore import Tensor, Parameter
|
||||||
|
@ -470,7 +470,7 @@ class FusedCastAdamWeightDecay(PrimitiveWithInfer):
|
||||||
... def construct(self, lr, beta1, beta2, epsilon, decay, grad):
|
... def construct(self, lr, beta1, beta2, epsilon, decay, grad):
|
||||||
... out = self.opt(self.var, self.m, self.v, lr, beta1, beta2, epsilon, decay, grad)
|
... out = self.opt(self.var, self.m, self.v, lr, beta1, beta2, epsilon, decay, grad)
|
||||||
... return out
|
... return out
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
>>> set_context(mode=GRAPH_MODE, device_target="CPU")
|
||||||
>>> net = Net()
|
>>> net = Net()
|
||||||
>>> gradient = Tensor(np.ones([2, 2]), mstype.float16)
|
>>> gradient = Tensor(np.ones([2, 2]), mstype.float16)
|
||||||
>>> output = net(0.001, 0.9, 0.999, 1e-8, 0.0, gradient)
|
>>> output = net(0.001, 0.9, 0.999, 1e-8, 0.0, gradient)
|
||||||
|
@ -584,7 +584,7 @@ class FusedAdaFactor(PrimitiveWithInfer):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> import mindspore.context as context
|
>>> from mindspore import set_context, GRAPH_MODE
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> import mindspore.ops as ops
|
>>> import mindspore.ops as ops
|
||||||
>>> from mindspore import Tensor, Parameter
|
>>> from mindspore import Tensor, Parameter
|
||||||
|
@ -604,7 +604,7 @@ class FusedAdaFactor(PrimitiveWithInfer):
|
||||||
... out = self.opt(epsilon, clip_threshold, beta1, beta2, weight_decay, lr, grad, self.param,
|
... out = self.opt(epsilon, clip_threshold, beta1, beta2, weight_decay, lr, grad, self.param,
|
||||||
... self.exp_avg, self.exp_avg_sq_row, self.exp_avg_sq_col, self.exp_avg_sq)
|
... self.exp_avg, self.exp_avg_sq_row, self.exp_avg_sq_col, self.exp_avg_sq)
|
||||||
... return out
|
... return out
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
|
>>> set_context(mode=GRAPH_MODE, device_target="CPU")
|
||||||
>>> net = Net()
|
>>> net = Net()
|
||||||
>>> gradient = Tensor(np.ones(param_shape), mstype.float32)
|
>>> gradient = Tensor(np.ones(param_shape), mstype.float32)
|
||||||
>>> net((1e-30, 1e-3), 1.0, 0.9, 0.8, 1e-2, 0.03, gradient)
|
>>> net((1e-30, 1e-3), 1.0, 0.9, 0.8, 1e-2, 0.03, gradient)
|
||||||
|
|
|
@ -197,7 +197,8 @@ def _set_ps_context(**kwargs):
|
||||||
ValueError: If input key is not the attribute in parameter server training mode context.
|
ValueError: If input key is not the attribute in parameter server training mode context.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> context.set_ps_context(enable_ps=True, enable_ssl=True, client_password='123456', server_password='123456')
|
>>> from mindspore import set_ps_context
|
||||||
|
>>> set_ps_context(enable_ps=True, enable_ssl=True, client_password='123456', server_password='123456')
|
||||||
"""
|
"""
|
||||||
kwargs = _check_conflict_value(kwargs)
|
kwargs = _check_conflict_value(kwargs)
|
||||||
for key, value in kwargs.items():
|
for key, value in kwargs.items():
|
||||||
|
|
|
@ -78,8 +78,8 @@ class Profiler:
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> from mindspore import nn, context
|
>>> from mindspore import nn
|
||||||
>>> from mindspore import Model
|
>>> from mindspore import Model, set_context, GRAPH_MODE
|
||||||
>>> import mindspore.dataset as ds
|
>>> import mindspore.dataset as ds
|
||||||
>>> from mindspore import Profiler
|
>>> from mindspore import Profiler
|
||||||
>>>
|
>>>
|
||||||
|
@ -104,7 +104,7 @@ class Profiler:
|
||||||
>>>
|
>>>
|
||||||
>>> if __name__ == '__main__':
|
>>> if __name__ == '__main__':
|
||||||
... # If the device_target is GPU, set the device_target to "GPU"
|
... # If the device_target is GPU, set the device_target to "GPU"
|
||||||
... context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
... set_context(mode=GRAPH_MODE, device_target="Ascend")
|
||||||
...
|
...
|
||||||
... # Init Profiler
|
... # Init Profiler
|
||||||
... # Note that the Profiler should be initialized before model.train
|
... # Note that the Profiler should be initialized before model.train
|
||||||
|
|
|
@ -25,8 +25,16 @@ from .loss_scale_manager import LossScaleManager, FixedLossScaleManager, Dynamic
|
||||||
from .serialization import save_checkpoint, load_checkpoint, load_param_into_net, export, load, parse_print,\
|
from .serialization import save_checkpoint, load_checkpoint, load_param_into_net, export, load, parse_print,\
|
||||||
build_searched_strategy, merge_sliced_parameter, load_distributed_checkpoint, async_ckpt_thread_status,\
|
build_searched_strategy, merge_sliced_parameter, load_distributed_checkpoint, async_ckpt_thread_status,\
|
||||||
restore_group_info_list
|
restore_group_info_list
|
||||||
|
from .callback import Callback, LossMonitor, TimeMonitor, ModelCheckpoint, SummaryCollector, CheckpointConfig, \
|
||||||
|
RunContext, LearningRateScheduler, SummaryLandscape, FederatedLearningManager, History, LambdaCallback
|
||||||
|
from .summary import SummaryRecord
|
||||||
|
from .train_thor import ConvertNetUtils, ConvertModelUtils
|
||||||
|
|
||||||
|
|
||||||
__all__ = ["Model", "DatasetHelper", "amp", "connect_network_with_dataset", "build_train_network", "LossScaleManager",
|
__all__ = ["Model", "DatasetHelper", "amp", "connect_network_with_dataset", "build_train_network", "LossScaleManager",
|
||||||
"FixedLossScaleManager", "DynamicLossScaleManager", "save_checkpoint", "load_checkpoint",
|
"FixedLossScaleManager", "DynamicLossScaleManager", "save_checkpoint", "load_checkpoint",
|
||||||
"load_param_into_net", "export", "load", "parse_print", "build_searched_strategy", "merge_sliced_parameter",
|
"load_param_into_net", "export", "load", "parse_print", "build_searched_strategy", "merge_sliced_parameter",
|
||||||
"load_distributed_checkpoint", "async_ckpt_thread_status", "restore_group_info_list"]
|
"load_distributed_checkpoint", "async_ckpt_thread_status", "restore_group_info_list"]
|
||||||
|
__all__.extend(callback.__all__)
|
||||||
|
__all__.extend(summary.__all__)
|
||||||
|
__all__.extend(train_thor.__all__)
|
||||||
|
|
|
@ -87,7 +87,7 @@ class Callback:
|
||||||
Examples:
|
Examples:
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> from mindspore import Model, nn
|
>>> from mindspore import Model, nn
|
||||||
>>> from mindspore.train.callback import Callback
|
>>> from mindspore import Callback
|
||||||
>>> from mindspore import dataset as ds
|
>>> from mindspore import dataset as ds
|
||||||
>>> class Print_info(Callback):
|
>>> class Print_info(Callback):
|
||||||
... def step_end(self, run_context):
|
... def step_end(self, run_context):
|
||||||
|
|
|
@ -104,8 +104,8 @@ class CheckpointConfig:
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> from mindspore import Model, nn
|
>>> from mindspore import Model, nn
|
||||||
>>> from mindspore.train.callback import ModelCheckpoint, CheckpointConfig
|
>>> from mindspore import ModelCheckpoint, CheckpointConfig
|
||||||
>>> from mindspore.common.initializer import Normal
|
>>> from mindspore import Normal
|
||||||
>>>
|
>>>
|
||||||
>>> class LeNet5(nn.Cell):
|
>>> class LeNet5(nn.Cell):
|
||||||
... def __init__(self, num_class=10, num_channel=1):
|
... def __init__(self, num_class=10, num_channel=1):
|
||||||
|
|
|
@ -34,7 +34,7 @@ class History(Callback):
|
||||||
Examples:
|
Examples:
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> import mindspore.dataset as ds
|
>>> import mindspore.dataset as ds
|
||||||
>>> from mindspore.train.callback import History
|
>>> from mindspore import History
|
||||||
>>> from mindspore import Model, nn
|
>>> from mindspore import Model, nn
|
||||||
>>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))}
|
>>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))}
|
||||||
>>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32)
|
>>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32)
|
||||||
|
|
|
@ -39,7 +39,7 @@ class LambdaCallback(Callback):
|
||||||
Examples:
|
Examples:
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> import mindspore.dataset as ds
|
>>> import mindspore.dataset as ds
|
||||||
>>> from mindspore.train.callback import LambdaCallback
|
>>> from mindspore import LambdaCallback
|
||||||
>>> from mindspore import Model, nn
|
>>> from mindspore import Model, nn
|
||||||
>>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))}
|
>>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))}
|
||||||
>>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32)
|
>>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32)
|
||||||
|
|
|
@ -178,14 +178,14 @@ class SummaryLandscape:
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, GRAPH_MODE
|
||||||
>>> from mindspore.train.callback import SummaryCollector, SummaryLandscape
|
>>> from mindspore import SummaryCollector, SummaryLandscape
|
||||||
>>> from mindspore import Model
|
>>> from mindspore import Model
|
||||||
>>> from mindspore.nn import Loss, Accuracy
|
>>> from mindspore.nn import Loss, Accuracy
|
||||||
>>>
|
>>>
|
||||||
>>> if __name__ == '__main__':
|
>>> if __name__ == '__main__':
|
||||||
... # If the device_target is Ascend, set the device_target to "Ascend"
|
... # If the device_target is Ascend, set the device_target to "Ascend"
|
||||||
... context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
|
... set_context(mode=GRAPH_MODE, device_target="GPU")
|
||||||
... mnist_dataset_dir = '/path/to/mnist_dataset_directory'
|
... mnist_dataset_dir = '/path/to/mnist_dataset_directory'
|
||||||
... # The detail of create_dataset method shown in model_zoo.official.cv.lenet.src.dataset.py
|
... # The detail of create_dataset method shown in model_zoo.official.cv.lenet.src.dataset.py
|
||||||
... ds_train = create_dataset(mnist_dataset_dir, 32)
|
... ds_train = create_dataset(mnist_dataset_dir, 32)
|
||||||
|
|
|
@ -34,7 +34,7 @@ class LearningRateScheduler(Callback):
|
||||||
Examples:
|
Examples:
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> from mindspore import Model
|
>>> from mindspore import Model
|
||||||
>>> from mindspore.train.callback import LearningRateScheduler
|
>>> from mindspore import LearningRateScheduler
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> from mindspore import dataset as ds
|
>>> from mindspore import dataset as ds
|
||||||
...
|
...
|
||||||
|
|
|
@ -175,14 +175,14 @@ class SummaryCollector(Callback):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore.nn as nn
|
>>> import mindspore.nn as nn
|
||||||
>>> from mindspore import context
|
>>> from mindspore import set_context, GRAPH_MODE
|
||||||
>>> from mindspore.train.callback import SummaryCollector
|
>>> from mindspore import SummaryCollector
|
||||||
>>> from mindspore import Model
|
>>> from mindspore import Model
|
||||||
>>> from mindspore.nn import Accuracy
|
>>> from mindspore.nn import Accuracy
|
||||||
>>>
|
>>>
|
||||||
>>> if __name__ == '__main__':
|
>>> if __name__ == '__main__':
|
||||||
... # If the device_target is GPU, set the device_target to "GPU"
|
... # If the device_target is GPU, set the device_target to "GPU"
|
||||||
... context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
|
... set_context(mode=GRAPH_MODE, device_target="Ascend")
|
||||||
... mnist_dataset_dir = '/path/to/mnist_dataset_directory'
|
... mnist_dataset_dir = '/path/to/mnist_dataset_directory'
|
||||||
... # The detail of create_dataset method shown in model_zoo.official.cv.lenet.src.dataset.py
|
... # The detail of create_dataset method shown in model_zoo.official.cv.lenet.src.dataset.py
|
||||||
... ds_train = create_dataset(mnist_dataset_dir, 32)
|
... ds_train = create_dataset(mnist_dataset_dir, 32)
|
||||||
|
|
|
@ -1182,13 +1182,13 @@ class Model:
|
||||||
>>> # mindspore.cn.
|
>>> # mindspore.cn.
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> import mindspore as ms
|
>>> import mindspore as ms
|
||||||
>>> from mindspore import Model, context, Tensor, nn, FixedLossScaleManager
|
>>> from mindspore import Model, set_context, Tensor, nn, FixedLossScaleManager, GRAPH_MODE
|
||||||
>>> from mindspore import ParallelMode
|
>>> from mindspore import ParallelMode, set_auto_parallel_context
|
||||||
>>> from mindspore.communication import init
|
>>> from mindspore.communication import init
|
||||||
>>>
|
>>>
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE)
|
>>> set_context(mode=GRAPH_MODE)
|
||||||
>>> init()
|
>>> init()
|
||||||
>>> context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL)
|
>>> set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL)
|
||||||
>>>
|
>>>
|
||||||
>>> # For details about how to build the dataset, please refer to the tutorial
|
>>> # For details about how to build the dataset, please refer to the tutorial
|
||||||
>>> # document on the official website.
|
>>> # document on the official website.
|
||||||
|
@ -1238,13 +1238,13 @@ class Model:
|
||||||
>>> # mindspore.cn.
|
>>> # mindspore.cn.
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> import mindspore as ms
|
>>> import mindspore as ms
|
||||||
>>> from mindspore import Model, context, Tensor
|
>>> from mindspore import Model, set_context, Tensor, GRAPH_MODE
|
||||||
>>> from mindspore import ParallelMode
|
>>> from mindspore import ParallelMode, set_auto_parallel_context
|
||||||
>>> from mindspore.communication import init
|
>>> from mindspore.communication import init
|
||||||
>>>
|
>>>
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE)
|
>>> set_context(mode=GRAPH_MODE)
|
||||||
>>> init()
|
>>> init()
|
||||||
>>> context.set_auto_parallel_context(full_batch=True, parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL)
|
>>> set_auto_parallel_context(full_batch=True, parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL)
|
||||||
>>> input_data = Tensor(np.random.randint(0, 255, [1, 1, 32, 32]), ms.float32)
|
>>> input_data = Tensor(np.random.randint(0, 255, [1, 1, 32, 32]), ms.float32)
|
||||||
>>> model = Model(Net())
|
>>> model = Model(Net())
|
||||||
>>> predict_map = model.infer_predict_layout(input_data)
|
>>> predict_map = model.infer_predict_layout(input_data)
|
||||||
|
|
|
@ -1190,8 +1190,8 @@ def parse_print(print_file_name):
|
||||||
>>> import numpy as np
|
>>> import numpy as np
|
||||||
>>> import mindspore.ops as ops
|
>>> import mindspore.ops as ops
|
||||||
>>> from mindspore import nn
|
>>> from mindspore import nn
|
||||||
>>> from mindspore import Tensor, context
|
>>> from mindspore import Tensor, set_context, GRAPH_MODE
|
||||||
>>> context.set_context(mode=context.GRAPH_MODE, print_file_path='log.data')
|
>>> set_context(mode=GRAPH_MODE, print_file_path='log.data')
|
||||||
>>> class PrintInputTensor(nn.Cell):
|
>>> class PrintInputTensor(nn.Cell):
|
||||||
... def __init__(self):
|
... def __init__(self):
|
||||||
... super().__init__()
|
... super().__init__()
|
||||||
|
|
|
@ -136,7 +136,7 @@ class SummaryRecord:
|
||||||
ValueError: The Summary is not supported, please without `-s on` and recompile source.
|
ValueError: The Summary is not supported, please without `-s on` and recompile source.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> from mindspore.train.summary import SummaryRecord
|
>>> from mindspore import SummaryRecord
|
||||||
>>> if __name__ == '__main__':
|
>>> if __name__ == '__main__':
|
||||||
... # use in with statement to auto close
|
... # use in with statement to auto close
|
||||||
... with SummaryRecord(log_dir="./summary_dir") as summary_record:
|
... with SummaryRecord(log_dir="./summary_dir") as summary_record:
|
||||||
|
@ -209,7 +209,7 @@ class SummaryRecord:
|
||||||
ValueError: `mode` is not in the optional value.
|
ValueError: `mode` is not in the optional value.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> from mindspore.train.summary import SummaryRecord
|
>>> from mindspore import SummaryRecord
|
||||||
>>> if __name__ == '__main__':
|
>>> if __name__ == '__main__':
|
||||||
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
||||||
... summary_record.set_mode('eval')
|
... summary_record.set_mode('eval')
|
||||||
|
@ -270,7 +270,7 @@ class SummaryRecord:
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> from mindspore import Tensor
|
>>> from mindspore import Tensor
|
||||||
>>> from mindspore.train.summary import SummaryRecord
|
>>> from mindspore import SummaryRecord
|
||||||
>>> if __name__ == '__main__':
|
>>> if __name__ == '__main__':
|
||||||
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
||||||
... summary_record.add_value('scalar', 'loss', Tensor(0.1))
|
... summary_record.add_value('scalar', 'loss', Tensor(0.1))
|
||||||
|
@ -325,7 +325,7 @@ class SummaryRecord:
|
||||||
<https://www.mindspore.cn/docs/en/master/api_python/nn/mindspore.nn.Cell.html#mindspore-nn-cell>`_ 。
|
<https://www.mindspore.cn/docs/en/master/api_python/nn/mindspore.nn.Cell.html#mindspore-nn-cell>`_ 。
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> from mindspore.train.summary import SummaryRecord
|
>>> from mindspore import SummaryRecord
|
||||||
>>> if __name__ == '__main__':
|
>>> if __name__ == '__main__':
|
||||||
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
||||||
... result = summary_record.record(step=2)
|
... result = summary_record.record(step=2)
|
||||||
|
@ -438,7 +438,7 @@ class SummaryRecord:
|
||||||
str, the full path of log file.
|
str, the full path of log file.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> from mindspore.train.summary import SummaryRecord
|
>>> from mindspore import SummaryRecord
|
||||||
>>> if __name__ == '__main__':
|
>>> if __name__ == '__main__':
|
||||||
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
||||||
... log_dir = summary_record.log_dir
|
... log_dir = summary_record.log_dir
|
||||||
|
@ -452,7 +452,7 @@ class SummaryRecord:
|
||||||
Call it to make sure that all pending events have been written to disk.
|
Call it to make sure that all pending events have been written to disk.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> from mindspore.train.summary import SummaryRecord
|
>>> from mindspore import SummaryRecord
|
||||||
>>> if __name__ == '__main__':
|
>>> if __name__ == '__main__':
|
||||||
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
||||||
... summary_record.flush()
|
... summary_record.flush()
|
||||||
|
@ -468,7 +468,7 @@ class SummaryRecord:
|
||||||
Flush the buffer and write files to disk and close summary records. Please use the statement to autoclose.
|
Flush the buffer and write files to disk and close summary records. Please use the statement to autoclose.
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> from mindspore.train.summary import SummaryRecord
|
>>> from mindspore import SummaryRecord
|
||||||
>>> if __name__ == '__main__':
|
>>> if __name__ == '__main__':
|
||||||
... try:
|
... try:
|
||||||
... summary_record = SummaryRecord(log_dir="./summary_dir")
|
... summary_record = SummaryRecord(log_dir="./summary_dir")
|
||||||
|
|
|
@ -217,8 +217,8 @@ class ConvertModelUtils:
|
||||||
>>> from mindspore.nn import thor
|
>>> from mindspore.nn import thor
|
||||||
>>> from mindspore import Model
|
>>> from mindspore import Model
|
||||||
>>> from mindspore import FixedLossScaleManager
|
>>> from mindspore import FixedLossScaleManager
|
||||||
>>> from mindspore.train.callback import LossMonitor
|
>>> from mindspore import LossMonitor
|
||||||
>>> from mindspore.train.train_thor import ConvertModelUtils
|
>>> from mindspore import ConvertModelUtils
|
||||||
>>>
|
>>>
|
||||||
>>> net = Net()
|
>>> net = Net()
|
||||||
>>> dataset = create_dataset()
|
>>> dataset = create_dataset()
|
||||||
|
|
Loading…
Reference in New Issue