forked from mindspore-Ecosystem/mindspore
adjust_import_mode
This commit is contained in:
parent
2534345459
commit
9cecb77581
|
@ -2389,11 +2389,11 @@ lenet_quant.mindir
|
|||
<td>
|
||||
|
||||
```python
|
||||
>>> from mindspore import export
|
||||
>>> import mindspore as ms
|
||||
>>>
|
||||
>>> network = LeNetQuant()
|
||||
>>> inputs = Tensor(np.ones([1, 1, 32, 32]), mindspore.float32)
|
||||
>>> export(network, inputs, file_name="lenet_quant", file_format='MINDIR', quant_mode='AUTO')
|
||||
>>> ms.export(network, inputs, file_name="lenet_quant", file_format='MINDIR', quant_mode='AUTO')
|
||||
lenet_quant.mindir
|
||||
```
|
||||
|
||||
|
|
|
@ -44,7 +44,8 @@ class GroupLossScaleManager(Cell):
|
|||
``Ascend``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import boost, Model, nn
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import boost, nn
|
||||
>>>
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self, enhanced_amp, num_class=10, num_channel=1):
|
||||
|
@ -85,7 +86,7 @@ class GroupLossScaleManager(Cell):
|
|||
>>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
||||
>>> boost_config_dict = {"boost": {"mode": "manual", "less_bn": False, "grad_freeze": False, "adasum": False, \
|
||||
>>> "grad_accumulation": False, "dim_reduce": False, "loss_scale_group": True}}
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, loss_scale_manager=loss_scale_manager, \
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim, metrics=None, loss_scale_manager=loss_scale_manager, \
|
||||
>>> boost_level="O1", boost_config_dict=boost_config_dict)
|
||||
>>> # For details about how to build the dataset, please refer to the function `create_dataset` in tutorial
|
||||
>>> # document on the official website:
|
||||
|
|
|
@ -60,12 +60,11 @@ def set_dump(target, enabled=True):
|
|||
>>> # running this example to actually get the dump data.
|
||||
>>> # See the document of this API for details.
|
||||
>>> import numpy as np
|
||||
>>>
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> from mindspore import Tensor, set_dump
|
||||
>>>
|
||||
>>> set_context(device_target="Ascend", mode=GRAPH_MODE)
|
||||
>>> ms.set_context(device_target="Ascend", mode=ms.GRAPH_MODE)
|
||||
>>>
|
||||
>>> class MyNet(nn.Cell):
|
||||
... def __init__(self):
|
||||
|
|
|
@ -60,12 +60,11 @@ class HookHandle:
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import set_context, PYNATIVE_MODE
|
||||
>>> from mindspore.ops import GradOperation
|
||||
>>> set_context(mode=PYNATIVE_MODE)
|
||||
>>> ms.set_context(mode=ms.PYNATIVE_MODE)
|
||||
>>> def forward_pre_hook_fn(cell_id, inputs):
|
||||
... print("forward inputs: ", inputs)
|
||||
...
|
||||
|
|
|
@ -223,10 +223,10 @@ def get_local_rank(group=GlobalComm.WORLD_COMM_GROUP):
|
|||
ValueError: If backend is invalid.
|
||||
RuntimeError: If HCCL is not available or MindSpore is GPU version.
|
||||
Examples:
|
||||
>>> from mindspore import set_context, set_auto_parallel_context
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore.communication.management import init, get_rank, get_local_rank
|
||||
>>> set_context(device_target="Ascend")
|
||||
>>> set_auto_parallel_context(device_num=16) # 2 server, each server with 8 NPU.
|
||||
>>> ms.set_context(device_target="Ascend")
|
||||
>>> ms.set_auto_parallel_context(device_num=16) # 2 server, each server with 8 NPU.
|
||||
>>> init()
|
||||
>>> world_rank = get_rank()
|
||||
>>> local_rank = get_local_rank()
|
||||
|
@ -260,10 +260,10 @@ def get_group_size(group=GlobalComm.WORLD_COMM_GROUP):
|
|||
RuntimeError: If HCCL/NCCL is not available.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import set_context, set_auto_parallel_context
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore.communication.management import init, get_group_size
|
||||
>>> set_context(device_target="Ascend")
|
||||
>>> set_auto_parallel_context(device_num=8)
|
||||
>>> ms.set_context(device_target="Ascend")
|
||||
>>> ms.set_auto_parallel_context(device_num=8)
|
||||
>>> init()
|
||||
>>> group_size = get_group_size()
|
||||
>>> print("group_size is: ", group_size)
|
||||
|
@ -296,10 +296,10 @@ def get_local_rank_size(group=GlobalComm.WORLD_COMM_GROUP):
|
|||
ValueError: If backend is invalid.
|
||||
RuntimeError: If HCCL is not available or MindSpore is GPU version.
|
||||
Examples:
|
||||
>>> from mindspore import set_context, set_auto_parallel_context
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore.communication.management import init, get_local_rank_size
|
||||
>>> set_context(device_target="Ascend")
|
||||
>>> set_auto_parallel_context(device_num=16) # 2 server, each server with 8 NPU.
|
||||
>>> ms.set_context(device_target="Ascend")
|
||||
>>> ms.set_auto_parallel_context(device_num=16) # 2 server, each server with 8 NPU.
|
||||
>>> init()
|
||||
>>> local_rank_size = get_local_rank_size()
|
||||
>>> print("local_rank_size is: ", local_rank_size)
|
||||
|
|
|
@ -361,7 +361,7 @@ def load_nonquant_param_into_quant_net(quant_model, params_dict, quant_new_param
|
|||
nor in `quant_new_params`.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import load_checkpoint
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore.compression.quant.quant_utils import load_nonquant_param_into_quant_net
|
||||
>>> class LeNet5(nn.Cell):
|
||||
... def __init__(self, num_class=10, channel=1):
|
||||
|
@ -393,7 +393,7 @@ def load_nonquant_param_into_quant_net(quant_model, params_dict, quant_new_param
|
|||
...
|
||||
>>> net = LeNet5()
|
||||
>>> ckpt_file_name = "./checkpoint/LeNet5_noquant-1_32.ckpt"
|
||||
>>> param_dict = load_checkpoint(ckpt_file_name)
|
||||
>>> param_dict = ms.load_checkpoint(ckpt_file_name)
|
||||
>>> load_nonquant_param_into_quant_net(net, param_dict)
|
||||
"""
|
||||
if quant_new_params is not None and not isinstance(quant_new_params, list):
|
||||
|
|
|
@ -544,26 +544,26 @@ def set_auto_parallel_context(**kwargs):
|
|||
ValueError: If input key is not attribute in auto parallel context.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import set_auto_parallel_context
|
||||
>>> set_auto_parallel_context(device_num=8)
|
||||
>>> set_auto_parallel_context(global_rank=0)
|
||||
>>> set_auto_parallel_context(gradients_mean=True)
|
||||
>>> set_auto_parallel_context(gradient_fp32_sync=False)
|
||||
>>> set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
>>> set_auto_parallel_context(search_mode="dynamic_programming")
|
||||
>>> set_auto_parallel_context(auto_parallel_search_mode="dynamic_programming")
|
||||
>>> set_auto_parallel_context(parameter_broadcast=False)
|
||||
>>> set_auto_parallel_context(strategy_ckpt_load_file="./strategy_stage1.ckpt")
|
||||
>>> set_auto_parallel_context(strategy_ckpt_save_file="./strategy_stage1.ckpt")
|
||||
>>> set_auto_parallel_context(dataset_strategy=((1, 8), (1, 8)))
|
||||
>>> set_auto_parallel_context(enable_parallel_optimizer=False)
|
||||
>>> set_auto_parallel_context(enable_alltoall=False)
|
||||
>>> set_auto_parallel_context(all_reduce_fusion_config=[8, 160])
|
||||
>>> set_auto_parallel_context(pipeline_stages=2)
|
||||
>>> import mindspore as ms
|
||||
>>> ms.set_auto_parallel_context(device_num=8)
|
||||
>>> ms.set_auto_parallel_context(global_rank=0)
|
||||
>>> ms.set_auto_parallel_context(gradients_mean=True)
|
||||
>>> ms.set_auto_parallel_context(gradient_fp32_sync=False)
|
||||
>>> ms.set_auto_parallel_context(parallel_mode="auto_parallel")
|
||||
>>> ms.set_auto_parallel_context(search_mode="dynamic_programming")
|
||||
>>> ms.set_auto_parallel_context(auto_parallel_search_mode="dynamic_programming")
|
||||
>>> ms.set_auto_parallel_context(parameter_broadcast=False)
|
||||
>>> ms.set_auto_parallel_context(strategy_ckpt_load_file="./strategy_stage1.ckpt")
|
||||
>>> ms.set_auto_parallel_context(strategy_ckpt_save_file="./strategy_stage1.ckpt")
|
||||
>>> ms.set_auto_parallel_context(dataset_strategy=((1, 8), (1, 8)))
|
||||
>>> ms.set_auto_parallel_context(enable_parallel_optimizer=False)
|
||||
>>> ms.set_auto_parallel_context(enable_alltoall=False)
|
||||
>>> ms.set_auto_parallel_context(all_reduce_fusion_config=[8, 160])
|
||||
>>> ms.set_auto_parallel_context(pipeline_stages=2)
|
||||
>>> parallel_config = {"gradient_accumulation_shard": True, "parallel_optimizer_threshold": 24}
|
||||
>>> set_auto_parallel_context(parallel_optimizer_config=parallel_config, enable_parallel_optimizer=True)
|
||||
>>> ms.set_auto_parallel_context(parallel_optimizer_config=parallel_config, enable_parallel_optimizer=True)
|
||||
>>> config = {"allreduce": {"mode": "size", "config": 32}, "allgather": {"mode": "size", "config": 32}}
|
||||
>>> set_auto_parallel_context(comm_fusion=config)
|
||||
>>> ms.set_auto_parallel_context(comm_fusion=config)
|
||||
"""
|
||||
_set_auto_parallel_context(**kwargs)
|
||||
|
||||
|
@ -582,9 +582,9 @@ def get_auto_parallel_context(attr_key):
|
|||
ValueError: If input key is not attribute in auto parallel context.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import get_auto_parallel_context
|
||||
>>> parallel_mode = get_auto_parallel_context("parallel_mode")
|
||||
>>> dataset_strategy = get_auto_parallel_context("dataset_strategy")
|
||||
>>> import mindspore as ms
|
||||
>>> parallel_mode = ms.get_auto_parallel_context("parallel_mode")
|
||||
>>> dataset_strategy = ms.get_auto_parallel_context("dataset_strategy")
|
||||
"""
|
||||
return _get_auto_parallel_context(attr_key)
|
||||
|
||||
|
@ -847,28 +847,28 @@ def set_context(**kwargs):
|
|||
ValueError: If input key is not an attribute in context.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import set_context, PYNATIVE_MODE
|
||||
>>> set_context(mode=PYNATIVE_MODE)
|
||||
>>> set_context(precompile_only=True)
|
||||
>>> set_context(device_target="Ascend")
|
||||
>>> set_context(device_id=0)
|
||||
>>> set_context(save_graphs=True, save_graphs_path="./model.ms")
|
||||
>>> set_context(enable_reduce_precision=True)
|
||||
>>> set_context(enable_graph_kernel=True)
|
||||
>>> set_context(graph_kernel_flags="--opt_level=2 --dump_as_text")
|
||||
>>> set_context(reserve_class_name_in_scope=True)
|
||||
>>> set_context(variable_memory_max_size="6GB")
|
||||
>>> set_context(check_bprop=True)
|
||||
>>> set_context(max_device_memory="3.5GB")
|
||||
>>> set_context(mempool_block_size="1GB")
|
||||
>>> set_context(print_file_path="print.pb")
|
||||
>>> set_context(max_call_depth=80)
|
||||
>>> set_context(env_config_path="./env_config.json")
|
||||
>>> set_context(auto_tune_mode="GA,RL")
|
||||
>>> set_context(grad_for_scalar=True)
|
||||
>>> set_context(enable_compile_cache=True, compile_cache_path="./cache.ms")
|
||||
>>> set_context(pynative_synchronize=True)
|
||||
>>> set_context(runtime_num_threads=10)
|
||||
>>> import mindspore as ms
|
||||
>>> ms.set_context(mode=ms.PYNATIVE_MODE)
|
||||
>>> ms.set_context(precompile_only=True)
|
||||
>>> ms.set_context(device_target="Ascend")
|
||||
>>> ms.set_context(device_id=0)
|
||||
>>> ms.set_context(save_graphs=True, save_graphs_path="./model.ms")
|
||||
>>> ms.set_context(enable_reduce_precision=True)
|
||||
>>> ms.set_context(enable_graph_kernel=True)
|
||||
>>> ms.set_context(graph_kernel_flags="--opt_level=2 --dump_as_text")
|
||||
>>> ms.set_context(reserve_class_name_in_scope=True)
|
||||
>>> ms.set_context(variable_memory_max_size="6GB")
|
||||
>>> ms.set_context(check_bprop=True)
|
||||
>>> ms.set_context(max_device_memory="3.5GB")
|
||||
>>> ms.set_context(mempool_block_size="1GB")
|
||||
>>> ms.set_context(print_file_path="print.pb")
|
||||
>>> ms.set_context(max_call_depth=80)
|
||||
>>> ms.set_context(env_config_path="./env_config.json")
|
||||
>>> ms.set_context(auto_tune_mode="GA,RL")
|
||||
>>> ms.set_context(grad_for_scalar=True)
|
||||
>>> ms.set_context(enable_compile_cache=True, compile_cache_path="./cache.ms")
|
||||
>>> ms.set_context(pynative_synchronize=True)
|
||||
>>> ms.set_context(runtime_num_threads=10)
|
||||
"""
|
||||
ctx = _context()
|
||||
# set device target first
|
||||
|
@ -918,9 +918,9 @@ def get_context(attr_key):
|
|||
Raises:
|
||||
ValueError: If input key is not an attribute in context.
|
||||
Examples:
|
||||
>>> from mindspore import get_context
|
||||
>>> get_context("device_target")
|
||||
>>> get_context("device_id")
|
||||
>>> import mindspore as ms
|
||||
>>> ms.get_context("device_target")
|
||||
>>> ms.get_context("device_id")
|
||||
"""
|
||||
ctx = _context()
|
||||
device = ctx.get_param(ms_ctx_param.device_target)
|
||||
|
@ -1009,8 +1009,8 @@ def set_ps_context(**kwargs):
|
|||
ValueError: If input key is not the attribute in parameter server training mode context.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import set_ps_context
|
||||
>>> set_ps_context(enable_ps=True, enable_ssl=True, client_password='123456', server_password='123456')
|
||||
>>> import mindspore as ms
|
||||
>>> ms.set_ps_context(enable_ps=True, enable_ssl=True, client_password='123456', server_password='123456')
|
||||
"""
|
||||
_set_ps_context(**kwargs)
|
||||
|
||||
|
@ -1031,8 +1031,8 @@ def get_ps_context(attr_key):
|
|||
ValueError: If input key is not attribute in auto parallel context.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import get_ps_context
|
||||
>>> get_ps_context("enable_ps")
|
||||
>>> import mindspore as ms
|
||||
>>> ms.get_ps_context("enable_ps")
|
||||
"""
|
||||
return _get_ps_context(attr_key)
|
||||
|
||||
|
@ -1233,8 +1233,8 @@ def set_fl_context(**kwargs):
|
|||
ValueError: If input key is not the attribute in federated learning mode context.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import set_fl_context
|
||||
>>> set_fl_context(enable_fl=True, server_mode='FEDERATED_LEARNING')
|
||||
>>> import mindspore as ms
|
||||
>>> ms.set_fl_context(enable_fl=True, server_mode='FEDERATED_LEARNING')
|
||||
"""
|
||||
_set_ps_context(**kwargs)
|
||||
|
||||
|
@ -1254,7 +1254,7 @@ def get_fl_context(attr_key):
|
|||
ValueError: If input key is not attribute in federated learning mode context.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import get_fl_context
|
||||
>>> get_fl_context("server_mode")
|
||||
>>> import mindspore as ms
|
||||
>>> ms.get_fl_context("server_mode")
|
||||
"""
|
||||
return _get_ps_context(attr_key)
|
||||
|
|
|
@ -153,13 +153,11 @@ class WaitedDSCallback(Callback, DSCallback):
|
|||
|
||||
Examples:
|
||||
>>> import mindspore.nn as nn
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore.dataset import WaitedDSCallback
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> from mindspore import Model
|
||||
>>> from mindspore import Callback
|
||||
>>> import mindspore.dataset as ds
|
||||
>>>
|
||||
>>> set_context(mode=GRAPH_MODE, device_target="CPU")
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE, device_target="CPU")
|
||||
>>>
|
||||
>>> # custom callback class for data synchronization in data pipeline
|
||||
>>> class MyWaitedCallback(WaitedDSCallback):
|
||||
|
@ -213,7 +211,7 @@ class WaitedDSCallback(Callback, DSCallback):
|
|||
>>> data = data.map(operations=(lambda x: x), callbacks=my_cb1)
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> model = Model(net)
|
||||
>>> model = ms.Model(net)
|
||||
>>>
|
||||
>>> # add the data and network callback objects to the model training callback list
|
||||
>>> model.train(2, data, dataset_sink_mode=False, callbacks=[my_cb2, my_cb1])
|
||||
|
|
|
@ -277,11 +277,10 @@ def sync_wait_for_dataset(rank_id, rank_size, current_epoch):
|
|||
|
||||
Examples:
|
||||
>>> # Create a synchronization callback
|
||||
>>>
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore.dataset import sync_wait_for_dataset
|
||||
>>> from mindspore import Callback
|
||||
>>>
|
||||
>>> class SyncForDataset(Callback):
|
||||
>>> class SyncForDataset(ms.Callback):
|
||||
... def __init__(self):
|
||||
... super(SyncForDataset, self).__init__()
|
||||
... def epoch_begin(self, run_context):
|
||||
|
|
|
@ -874,10 +874,10 @@ class Cell(Cell_):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore
|
||||
>>> from mindspore import nn, Tensor, set_context, GRAPH_MODE
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn, Tensor
|
||||
>>>
|
||||
>>> set_context(mode=GRAPH_MODE, device_target="Ascend")
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE, device_target="Ascend")
|
||||
>>> class reluNet(nn.Cell):
|
||||
... def __init__(self):
|
||||
... super(reluNet, self).__init__()
|
||||
|
@ -886,9 +886,9 @@ class Cell(Cell_):
|
|||
... return self.relu(x)
|
||||
>>>
|
||||
>>> net = reluNet()
|
||||
>>> input_dyn = Tensor(shape=[3, None], dtype=mindspore.float32)
|
||||
>>> input_dyn = Tensor(shape=[3, None], dtype=ms.float32)
|
||||
>>> net.set_inputs(input_dyn)
|
||||
>>> input1 = Tensor(np.random.random([3, 10]), dtype=mindspore.float32)
|
||||
>>> input1 = Tensor(np.random.random([3, 10]), dtype=ms.float32)
|
||||
>>> output = net(input1)
|
||||
|
||||
NOTE:
|
||||
|
@ -1703,12 +1703,11 @@ class Cell(Cell_):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import set_context, PYNATIVE_MODE
|
||||
>>> from mindspore.ops import GradOperation
|
||||
>>> set_context(mode=PYNATIVE_MODE)
|
||||
>>> ms.set_context(mode=ms.PYNATIVE_MODE)
|
||||
>>> def forward_pre_hook_fn(cell_id, inputs):
|
||||
... print("forward inputs: ", inputs)
|
||||
...
|
||||
|
@ -1804,12 +1803,11 @@ class Cell(Cell_):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import set_context, PYNATIVE_MODE
|
||||
>>> from mindspore.ops import GradOperation
|
||||
>>> set_context(mode=PYNATIVE_MODE)
|
||||
>>> ms.set_context(mode=ms.PYNATIVE_MODE)
|
||||
>>> def forward_hook_fn(cell_id, inputs, output):
|
||||
... print("forward inputs: ", inputs)
|
||||
... print("forward output: ", output)
|
||||
|
@ -1910,12 +1908,11 @@ class Cell(Cell_):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import set_context, PYNATIVE_MODE
|
||||
>>> from mindspore.ops import GradOperation
|
||||
>>> set_context(mode=PYNATIVE_MODE)
|
||||
>>> ms.set_context(mode=ms.PYNATIVE_MODE)
|
||||
>>> def backward_hook_fn(cell_id, grad_input, grad_output):
|
||||
... print("backward input: ", grad_input)
|
||||
... print("backward output: ", grad_output)
|
||||
|
@ -2207,13 +2204,14 @@ class GraphCell(Cell):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, export, load
|
||||
>>> from mindspore import Tensor
|
||||
>>>
|
||||
>>> net = nn.Conv2d(1, 1, kernel_size=3, weight_init="ones")
|
||||
>>> input = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
|
||||
>>> export(net, input, file_name="net", file_format="MINDIR")
|
||||
>>> graph = load("net.mindir")
|
||||
>>> ms.export(net, input, file_name="net", file_format="MINDIR")
|
||||
>>> graph = ms.load("net.mindir")
|
||||
>>> net = nn.GraphCell(graph)
|
||||
>>> output = net(input)
|
||||
>>> print(output)
|
||||
|
|
|
@ -715,17 +715,16 @@ class SyncBatchNorm(_BatchNorm):
|
|||
>>> # on mindspore.cn and focus on the contents of these three parts: Configuring Distributed Environment
|
||||
>>> # Variables, Calling the Collective Communication Library, Running the Script.
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore.communication import init
|
||||
>>> from mindspore import set_context, GRAPH_MODE, reset_auto_parallel_context, set_auto_parallel_context
|
||||
>>> from mindspore import ParallelMode
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import nn
|
||||
>>> from mindspore import dtype as mstype
|
||||
>>>
|
||||
>>> set_context(mode=GRAPH_MODE)
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
||||
>>> init()
|
||||
>>> reset_auto_parallel_context()
|
||||
>>> set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL)
|
||||
>>> ms.reset_auto_parallel_context()
|
||||
>>> ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.DATA_PARALLEL)
|
||||
>>> sync_bn_op = nn.SyncBatchNorm(num_features=3, process_groups=[[0, 1], [2, 3]])
|
||||
>>> x = Tensor(np.ones([1, 3, 2, 2]), mstype.float32)
|
||||
>>> output = sync_bn_op(x)
|
||||
|
|
|
@ -153,7 +153,8 @@ class Adagrad(Optimizer):
|
|||
``Ascend`` ``CPU`` ``GPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> #1) All parameters use the same learning rate and weight decay
|
||||
|
@ -173,7 +174,7 @@ class Adagrad(Optimizer):
|
|||
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
|
||||
>>>
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
"""
|
||||
|
||||
@opt_init_args_register
|
||||
|
|
|
@ -253,7 +253,8 @@ class AdaFactor(Optimizer):
|
|||
``Ascend``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> #1) Parameters use the default learning rate with None and weight decay with 0.
|
||||
|
@ -264,7 +265,7 @@ class AdaFactor(Optimizer):
|
|||
>>> group_params = [{'params': [all_params[0]]}, {'params': [all_params[1]]}]
|
||||
>>> optim = nn.AdaFactor(group_params, learning_rate=0.1, weight_decay=0.0, relative_step=False)
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
"""
|
||||
_support_parallel_optimizer = True
|
||||
|
||||
|
|
|
@ -424,7 +424,8 @@ class Adam(Optimizer):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> #1) All parameters use the same learning rate and weight decay
|
||||
|
@ -444,7 +445,7 @@ class Adam(Optimizer):
|
|||
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
|
||||
>>>
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
"""
|
||||
|
||||
@opt_init_args_register
|
||||
|
@ -660,7 +661,8 @@ class AdamWeightDecay(Optimizer):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> #1) All parameters use the same learning rate and weight decay
|
||||
|
@ -678,7 +680,7 @@ class AdamWeightDecay(Optimizer):
|
|||
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
|
||||
>>>
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
"""
|
||||
_support_parallel_optimizer = True
|
||||
|
||||
|
@ -831,7 +833,8 @@ class AdamOffload(Optimizer):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> #1) All parameters use the same learning rate and weight decay
|
||||
|
@ -849,7 +852,7 @@ class AdamOffload(Optimizer):
|
|||
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
|
||||
>>>
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
"""
|
||||
|
||||
def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False,
|
||||
|
|
|
@ -155,7 +155,8 @@ class AdaMax(Optimizer):
|
|||
``Ascend``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> #1) All parameters use the same learning rate and weight decay
|
||||
|
@ -175,7 +176,7 @@ class AdaMax(Optimizer):
|
|||
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
|
||||
>>>
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
"""
|
||||
@opt_init_args_register
|
||||
def __init__(self, params, learning_rate=0.001, beta1=0.9, beta2=0.999, eps=1e-08,
|
||||
|
|
|
@ -121,7 +121,8 @@ class ASGD(Optimizer):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> #1) All parameters use the same learning rate and weight decay
|
||||
|
@ -141,7 +142,7 @@ class ASGD(Optimizer):
|
|||
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
|
||||
>>>
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
"""
|
||||
|
||||
@opt_init_args_register
|
||||
|
|
|
@ -221,7 +221,8 @@ class FTRL(Optimizer):
|
|||
``Ascend`` ``GPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> #1) All parameters use the same learning rate and weight decay
|
||||
|
@ -241,7 +242,7 @@ class FTRL(Optimizer):
|
|||
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
|
||||
>>>
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
"""
|
||||
|
||||
@opt_init_args_register
|
||||
|
|
|
@ -213,7 +213,8 @@ class Lamb(Optimizer):
|
|||
``Ascend`` ``GPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>> from mindspore.nn import learning_rate_schedule
|
||||
>>>
|
||||
>>> net = Net()
|
||||
|
@ -236,7 +237,7 @@ class Lamb(Optimizer):
|
|||
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
|
||||
>>>
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
"""
|
||||
_support_parallel_optimizer = True
|
||||
|
||||
|
|
|
@ -102,13 +102,14 @@ class LARS(Optimizer):
|
|||
``Ascend``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> opt = nn.Momentum(net.trainable_params(), 0.1, 0.9)
|
||||
>>> opt_lars = nn.LARS(opt, epsilon=1e-08, coefficient=0.02)
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=opt_lars, metrics=None)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=opt_lars, metrics=None)
|
||||
"""
|
||||
|
||||
@opt_init_args_register
|
||||
|
|
|
@ -304,7 +304,8 @@ class LazyAdam(Optimizer):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> #1) All parameters use the same learning rate and weight decay
|
||||
|
@ -324,7 +325,7 @@ class LazyAdam(Optimizer):
|
|||
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
|
||||
>>>
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
"""
|
||||
|
||||
@opt_init_args_register
|
||||
|
|
|
@ -165,7 +165,8 @@ class Momentum(Optimizer):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> #1) All parameters use the same learning rate and weight decay
|
||||
|
@ -185,7 +186,7 @@ class Momentum(Optimizer):
|
|||
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
|
||||
>>>
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim, metrics=None)
|
||||
"""
|
||||
@opt_init_args_register
|
||||
def __init__(self, params, learning_rate, momentum, weight_decay=0.0, loss_scale=1.0, use_nesterov=False):
|
||||
|
|
|
@ -160,7 +160,8 @@ class ProximalAdagrad(Optimizer):
|
|||
``Ascend``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> #1) All parameters use the same learning rate and weight decay
|
||||
|
@ -180,7 +181,7 @@ class ProximalAdagrad(Optimizer):
|
|||
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
|
||||
>>>
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
"""
|
||||
|
||||
@opt_init_args_register
|
||||
|
|
|
@ -172,7 +172,8 @@ class RMSProp(Optimizer):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> #1) All parameters use the same learning rate and weight decay
|
||||
|
@ -192,7 +193,7 @@ class RMSProp(Optimizer):
|
|||
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
|
||||
>>>
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
"""
|
||||
|
||||
@opt_init_args_register
|
||||
|
|
|
@ -124,7 +124,8 @@ class Rprop(Optimizer):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> #1) All parameters use the same learning rate and weight decay
|
||||
|
@ -144,7 +145,7 @@ class Rprop(Optimizer):
|
|||
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
|
||||
>>>
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
"""
|
||||
|
||||
@opt_init_args_register
|
||||
|
|
|
@ -117,7 +117,8 @@ class SGD(Optimizer):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import nn, Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> #1) All parameters use the same learning rate and weight decay
|
||||
|
@ -137,7 +138,7 @@ class SGD(Optimizer):
|
|||
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
|
||||
>>>
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
"""
|
||||
|
||||
@opt_init_args_register
|
||||
|
|
|
@ -334,11 +334,8 @@ def thor(net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0
|
|||
``Ascend`` ``GPU``
|
||||
|
||||
Examples:
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore.nn import thor
|
||||
>>> from mindspore import Model
|
||||
>>> from mindspore import FixedLossScaleManager
|
||||
>>> from mindspore import LossMonitor
|
||||
>>> from mindspore import ConvertModelUtils
|
||||
>>> from mindspore import nn
|
||||
>>> from mindspore import Tensor
|
||||
>>>
|
||||
|
@ -347,13 +344,13 @@ def thor(net, learning_rate, damping, momentum, weight_decay=0.0, loss_scale=1.0
|
|||
>>> temp = Tensor([4e-4, 1e-4, 1e-5, 1e-5], mstype.float32)
|
||||
>>> optim = thor(net, learning_rate=temp, damping=temp, momentum=0.9, loss_scale=128, frequency=4)
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
|
||||
>>> loss_scale = FixedLossScaleManager(128, drop_overflow_update=False)
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim, loss_scale_manager=loss_scale, metrics={'acc'},
|
||||
>>> loss_scale = ms.FixedLossScaleManager(128, drop_overflow_update=False)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim, loss_scale_manager=loss_scale, metrics={'acc'},
|
||||
... amp_level="O2", keep_batchnorm_fp32=False)
|
||||
>>> model = ConvertModelUtils.convert_to_thor_model(model=model, network=net, loss_fn=loss, optimizer=optim,
|
||||
>>> model = ms.ConvertModelUtils.convert_to_thor_model(model=model, network=net, loss_fn=loss, optimizer=optim,
|
||||
... loss_scale_manager=loss_scale, metrics={'acc'},
|
||||
... amp_level="O2", keep_batchnorm_fp32=False)
|
||||
>>> loss_cb = LossMonitor()
|
||||
>>> loss_cb = ms.LossMonitor()
|
||||
>>> model.train(1, dataset, callbacks=loss_cb, sink_size=4, dataset_sink_mode=True)
|
||||
|
||||
"""
|
||||
|
|
|
@ -44,8 +44,7 @@ class SparseToDense(Cell):
|
|||
>>> import mindspore as ms
|
||||
>>> from mindspore import Tensor, COOTensor
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import set_context, PYNATIVE_MODE
|
||||
>>> set_context(mode=PYNATIVE_MODE)
|
||||
>>> ms.set_context(mode=ms.PYNATIVE_MODE)
|
||||
>>> indices = Tensor([[0, 1], [1, 2]])
|
||||
>>> values = Tensor([1, 2], dtype=ms.int32)
|
||||
>>> dense_shape = (3, 4)
|
||||
|
|
|
@ -313,17 +313,16 @@ class DistributedGradReducer(Cell):
|
|||
>>> # on mindspore.cn and focus on the contents of these three parts: Configuring Distributed Environment
|
||||
>>> # Variables, Calling the Collective Communication Library, Running The Script.
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore.communication import init
|
||||
>>> from mindspore import ops
|
||||
>>> from mindspore import set_context, reset_auto_parallel_context, set_auto_parallel_context, GRAPH_MODE
|
||||
>>> from mindspore import ParallelMode
|
||||
>>> from mindspore import Parameter, Tensor
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> set_context(mode=GRAPH_MODE)
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
||||
>>> init()
|
||||
>>> reset_auto_parallel_context()
|
||||
>>> set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL)
|
||||
>>> ms.reset_auto_parallel_context()
|
||||
>>> ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.DATA_PARALLEL)
|
||||
>>>
|
||||
>>> class TrainingWrapper(nn.Cell):
|
||||
... def __init__(self, network, optimizer, sens=1.0):
|
||||
|
@ -338,7 +337,7 @@ class DistributedGradReducer(Cell):
|
|||
... self.grad_reducer = None
|
||||
... self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
|
||||
... self.depend = ops.Depend()
|
||||
... if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]:
|
||||
... if self.parallel_mode in [ms.ParallelMode.DATA_PARALLEL, ms.ParallelMode.HYBRID_PARALLEL]:
|
||||
... self.reducer_flag = True
|
||||
... if self.reducer_flag:
|
||||
... mean = context.get_auto_parallel_context("gradients_mean")
|
||||
|
|
|
@ -973,8 +973,8 @@ def unique(x, return_inverse=False):
|
|||
|
||||
Examples:
|
||||
>>> import mindspore.numpy as np
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> set_context(mode=GRAPH_MODE)
|
||||
>>> import mindspore as ms
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
||||
>>> input_x = np.asarray([1, 2, 2, 2, 3, 4, 5]).astype('int32')
|
||||
>>> output_x = np.unique(input_x)
|
||||
>>> print(output_x)
|
||||
|
|
|
@ -216,11 +216,11 @@ def grad(fn, grad_position=0, sens_param=False):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.ops.functional import grad
|
||||
>>> set_context(mode=GRAPH_MODE)
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
||||
>>> class Net(nn.Cell):
|
||||
... def construct(self, x, y, z):
|
||||
... return x*y*z
|
||||
|
@ -312,11 +312,11 @@ def jet(fn, primals, series):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.ops as P
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.ops.functional import jet
|
||||
>>> set_context(mode=GRAPH_MODE)
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self):
|
||||
... super().__init__()
|
||||
|
@ -409,12 +409,12 @@ def derivative(fn, primals, order):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> import mindspore.ops as P
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.ops.functional import derivative
|
||||
>>> set_context(mode=GRAPH_MODE)
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self):
|
||||
... super().__init__()
|
||||
|
|
|
@ -718,8 +718,8 @@ class GpuConvertToDynamicShape(PrimitiveWithCheck):
|
|||
|
||||
Examples:
|
||||
>>> # make a model, since dynamic shape operators must be in GRAPH_MODE
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> from mindspore.ops.operations import _inner_ops as inner
|
||||
>>> from mindspore.ops import operations as P
|
||||
>>> class TestDynamicShapeReshapeNet(nn.Cell):
|
||||
|
@ -733,7 +733,7 @@ class GpuConvertToDynamicShape(PrimitiveWithCheck):
|
|||
>>> dynamic_shape_input = self.convert_to_dynamic_shape(input)
|
||||
>>> reshaped_input = self.reshape(input, new_shape)
|
||||
>>>
|
||||
>>> set_context(mode=GRAPH_MODE, device_target="GPU")
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE, device_target="GPU")
|
||||
>>> input = Tensor(np.array([0, 1, 2, 3])
|
||||
>>> new_shape = (2, 2)
|
||||
>>> net = TestDynamicShapeReshapeNet()
|
||||
|
@ -766,10 +766,10 @@ class ErrorOnDynamicShapeInput(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> # make a model, since dynamic shape operators must be in GRAPH_MODE
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore.ops.operations import _inner_ops as inner
|
||||
>>> from mindspore.ops import operations as P
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> class AssertDynamicShapeNet(nn.Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(AssertDynamicShapeNet, self).__init__()
|
||||
|
@ -780,7 +780,7 @@ class ErrorOnDynamicShapeInput(PrimitiveWithInfer):
|
|||
>>> dynamic_shape_input = self.convert_to_dynamic_shape(input)
|
||||
>>> self.error_on_dynamic_shape_input(dynamic_shape_input)
|
||||
>>>
|
||||
>>> set_context(mode=GRAPH_MODE, device_target="GPU")
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE, device_target="GPU")
|
||||
>>> input = Tensor(np.array([0])
|
||||
>>> net = TestDynamicShapeReshapeNet()
|
||||
>>> output = net(input, new_shape)
|
||||
|
@ -1779,12 +1779,11 @@ class CellBackwardHook(PrimitiveWithInfer):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import mindspore
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import set_context, PYNATIVE_MODE
|
||||
>>> from mindspore.ops import GradOperation
|
||||
>>> from mindspore.ops.operations import _inner_ops as inner
|
||||
>>> set_context(mode=PYNATIVE_MODE)
|
||||
>>> ms.set_context(mode=ms.PYNATIVE_MODE)
|
||||
>>> def hook_fn(grad):
|
||||
... print(grad)
|
||||
...
|
||||
|
@ -1982,12 +1981,12 @@ class KMeansCentroids(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.common.dtype as mstype
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.ops import operations as P
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> set_context(mode=GRAPH_MODE, device_target="Ascend")
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE, device_target="Ascend")
|
||||
|
||||
>>> class Net(nn.Cell):
|
||||
>>> def __init__(self):
|
||||
|
|
|
@ -213,12 +213,13 @@ class AllGather(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> # This example should be run with two devices. Refer to the tutorial > Distributed Training on mindspore.cn
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.ops as ops
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore.communication import init
|
||||
>>> from mindspore import Tensor, set_context, GRAPH_MODE
|
||||
>>> from mindspore import Tensor
|
||||
>>>
|
||||
>>> set_context(mode=GRAPH_MODE)
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
||||
>>> init()
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self):
|
||||
|
@ -412,14 +413,15 @@ class ReduceScatter(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> # This example should be run with two devices. Refer to the tutorial > Distributed Training on mindspore.cn
|
||||
>>> from mindspore import Tensor, set_context, GRAPH_MODE
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.communication import init
|
||||
>>> from mindspore.ops import ReduceOp
|
||||
>>> import mindspore.nn as nn
|
||||
>>> import mindspore.ops as ops
|
||||
>>> import numpy as np
|
||||
>>>
|
||||
>>> set_context(mode=GRAPH_MODE)
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
||||
>>> init()
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self):
|
||||
|
@ -555,14 +557,14 @@ class Broadcast(PrimitiveWithInfer):
|
|||
>>> # Please refer to the Programming Guide > Distributed Training -> Distributed Parallel Usage Example
|
||||
>>> # on mindspore.cn and focus on the contents of these three parts: Configuring Distributed Environment
|
||||
>>> # Variables, Calling the Collective Communication Library, Running The Script.
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> from mindspore.communication import init
|
||||
>>> import mindspore.nn as nn
|
||||
>>> import mindspore.ops as ops
|
||||
>>> import numpy as np
|
||||
>>>
|
||||
>>> set_context(mode=GRAPH_MODE)
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
||||
>>> init()
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self):
|
||||
|
@ -689,7 +691,6 @@ class NeighborExchange(Primitive):
|
|||
>>> import os
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> from mindspore.communication import init
|
||||
>>> import mindspore.nn as nn
|
||||
>>> import mindspore.ops as ops
|
||||
|
@ -705,7 +706,7 @@ class NeighborExchange(Primitive):
|
|||
... def construct(self, x):
|
||||
... out = self.neighborexchange((x,))
|
||||
...
|
||||
>>> set_context(mode=GRAPH_MODE, device_target='Ascend')
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE, device_target='Ascend')
|
||||
>>> init()
|
||||
>>> net = Net()
|
||||
>>> input_x = Tensor(np.ones([3, 3]), dtype = ms.float32)
|
||||
|
@ -766,7 +767,6 @@ class AlltoAll(PrimitiveWithInfer):
|
|||
>>> import os
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> from mindspore.communication import init
|
||||
>>> import mindspore.nn as nn
|
||||
>>> import mindspore.ops as ops
|
||||
|
@ -780,7 +780,7 @@ class AlltoAll(PrimitiveWithInfer):
|
|||
... out = self.alltoall(x)
|
||||
... return out
|
||||
...
|
||||
>>> set_context(mode=GRAPH_MODE, device_target='Ascend')
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE, device_target='Ascend')
|
||||
>>> init()
|
||||
>>> net = Net()
|
||||
>>> rank_id = int(os.getenv("RANK_ID"))
|
||||
|
@ -860,7 +860,6 @@ class NeighborExchangeV2(Primitive):
|
|||
>>> import os
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> from mindspore.communication import init
|
||||
>>> import mindspore.nn as nn
|
||||
>>> import mindspore.ops as ops
|
||||
|
@ -878,7 +877,7 @@ class NeighborExchangeV2(Primitive):
|
|||
... out = self.neighborexchangev2(x)
|
||||
... return out
|
||||
...
|
||||
>>> set_context(mode=GRAPH_MODE, device_target='Ascend')
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE, device_target='Ascend')
|
||||
>>> init()
|
||||
>>> input_x = Tensor(np.ones([1, 1, 2, 2]), dtype = ms.float32)
|
||||
>>> net = Net()
|
||||
|
|
|
@ -355,12 +355,11 @@ class HookBackward(PrimitiveWithInfer):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import mindspore
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import ops
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import set_context, PYNATIVE_MODE
|
||||
>>> from mindspore.ops import GradOperation
|
||||
>>> set_context(mode=PYNATIVE_MODE)
|
||||
>>> ms.set_context(mode=ms.PYNATIVE_MODE)
|
||||
>>> def hook_fn(grad):
|
||||
... print(grad)
|
||||
...
|
||||
|
@ -375,7 +374,7 @@ class HookBackward(PrimitiveWithInfer):
|
|||
>>> def backward(x, y):
|
||||
... return grad_all(hook_test)(x, y)
|
||||
...
|
||||
>>> output = backward(Tensor(1, mindspore.float32), Tensor(2, mindspore.float32))
|
||||
>>> output = backward(Tensor(1, ms.float32), Tensor(2, ms.float32))
|
||||
(Tensor(shape=[], dtype=Float32, value= 2),)
|
||||
>>> print(output)
|
||||
(Tensor(shape=[], dtype=Float32, value= 4), Tensor(shape=[], dtype=Float32, value= 4))
|
||||
|
|
|
@ -455,7 +455,7 @@ class FusedCastAdamWeightDecay(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.nn as nn
|
||||
>>> import mindspore.ops as ops
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
|
@ -470,7 +470,7 @@ class FusedCastAdamWeightDecay(PrimitiveWithInfer):
|
|||
... def construct(self, lr, beta1, beta2, epsilon, decay, grad):
|
||||
... out = self.opt(self.var, self.m, self.v, lr, beta1, beta2, epsilon, decay, grad)
|
||||
... return out
|
||||
>>> set_context(mode=GRAPH_MODE, device_target="CPU")
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE, device_target="CPU")
|
||||
>>> net = Net()
|
||||
>>> gradient = Tensor(np.ones([2, 2]), mstype.float16)
|
||||
>>> output = net(0.001, 0.9, 0.999, 1e-8, 0.0, gradient)
|
||||
|
@ -584,7 +584,7 @@ class FusedAdaFactor(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.nn as nn
|
||||
>>> import mindspore.ops as ops
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
|
@ -604,7 +604,7 @@ class FusedAdaFactor(PrimitiveWithInfer):
|
|||
... out = self.opt(epsilon, clip_threshold, beta1, beta2, weight_decay, lr, grad, self.param,
|
||||
... self.exp_avg, self.exp_avg_sq_row, self.exp_avg_sq_col, self.exp_avg_sq)
|
||||
... return out
|
||||
>>> set_context(mode=GRAPH_MODE, device_target="CPU")
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE, device_target="CPU")
|
||||
>>> net = Net()
|
||||
>>> gradient = Tensor(np.ones(param_shape), mstype.float32)
|
||||
>>> net((1e-30, 1e-3), 1.0, 0.9, 0.8, 1e-2, 0.03, gradient)
|
||||
|
|
|
@ -203,8 +203,8 @@ def _set_ps_context(**kwargs):
|
|||
ValueError: If input key is not the attribute in parameter server training mode context.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import set_ps_context
|
||||
>>> set_ps_context(enable_ps=True, enable_ssl=True, client_password='123456', server_password='123456')
|
||||
>>> import mindspore as ms
|
||||
>>> ms.set_ps_context(enable_ps=True, enable_ssl=True, client_password='123456', server_password='123456')
|
||||
"""
|
||||
kwargs = _check_conflict_value(kwargs)
|
||||
for key, value in kwargs.items():
|
||||
|
|
|
@ -83,8 +83,8 @@ class Profiler:
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>> from mindspore import Model, set_context, GRAPH_MODE
|
||||
>>> import mindspore.dataset as ds
|
||||
>>> from mindspore import Profiler
|
||||
>>>
|
||||
|
@ -104,12 +104,12 @@ class Profiler:
|
|||
... optimizer = nn.Momentum(net.trainable_params(), 1, 0.9)
|
||||
... loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
|
||||
... data = ds.GeneratorDataset(generator, ["data", "label"])
|
||||
... model = Model(net, loss, optimizer)
|
||||
... model = ms.Model(net, loss, optimizer)
|
||||
... model.train(1, data)
|
||||
>>>
|
||||
>>> if __name__ == '__main__':
|
||||
... # If the device_target is GPU, set the device_target to "GPU"
|
||||
... set_context(mode=GRAPH_MODE, device_target="Ascend")
|
||||
... ms.set_context(mode=ms.GRAPH_MODE, device_target="Ascend")
|
||||
...
|
||||
... # Init Profiler
|
||||
... # Note that the Profiler should be initialized before model.train
|
||||
|
|
|
@ -94,10 +94,10 @@ class Callback:
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Model, nn
|
||||
>>> from mindspore import Callback
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>> from mindspore import dataset as ds
|
||||
>>> class Print_info(Callback):
|
||||
>>> class Print_info(ms.Callback):
|
||||
... def step_end(self, run_context):
|
||||
... cb_params = run_context.original_args()
|
||||
... print("step_num: ", cb_params.cur_step_num)
|
||||
|
@ -108,7 +108,7 @@ class Callback:
|
|||
>>> net = nn.Dense(10, 5)
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
|
||||
>>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9)
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model.train(1, dataset, callbacks=print_cb)
|
||||
step_num: 2
|
||||
"""
|
||||
|
|
|
@ -104,8 +104,8 @@ class CheckpointConfig:
|
|||
ValueError: If input parameter is not the correct type.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Model, nn
|
||||
>>> from mindspore import ModelCheckpoint, CheckpointConfig
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>> from mindspore.common.initializer import Normal
|
||||
>>>
|
||||
>>> class LeNet5(nn.Cell):
|
||||
|
@ -132,11 +132,11 @@ class CheckpointConfig:
|
|||
>>> net = LeNet5()
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
|
||||
>>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9)
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> data_path = './MNIST_Data'
|
||||
>>> dataset = create_dataset(data_path)
|
||||
>>> config = CheckpointConfig(saved_network=net)
|
||||
>>> ckpoint_cb = ModelCheckpoint(prefix='LeNet5', directory='./checkpoint', config=config)
|
||||
>>> config = ms.CheckpointConfig(saved_network=net)
|
||||
>>> ckpoint_cb = ms.ModelCheckpoint(prefix='LeNet5', directory='./checkpoint', config=config)
|
||||
>>> model.train(10, dataset, callbacks=ckpoint_cb)
|
||||
"""
|
||||
|
||||
|
|
|
@ -33,16 +33,16 @@ class History(Callback):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.dataset as ds
|
||||
>>> from mindspore import History
|
||||
>>> from mindspore import Model, nn
|
||||
>>> from mindspore import nn
|
||||
>>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))}
|
||||
>>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32)
|
||||
>>> net = nn.Dense(10, 5)
|
||||
>>> crit = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
|
||||
>>> opt = nn.Momentum(net.trainable_params(), 0.01, 0.9)
|
||||
>>> history_cb = History()
|
||||
>>> model = Model(network=net, optimizer=opt, loss_fn=crit, metrics={"recall"})
|
||||
>>> history_cb = ms.History()
|
||||
>>> model = ms.Model(network=net, optimizer=opt, loss_fn=crit, metrics={"recall"})
|
||||
>>> model.train(2, train_dataset, callbacks=[history_cb])
|
||||
>>> print(history_cb.epoch)
|
||||
>>> print(history_cb.history)
|
||||
|
|
|
@ -44,17 +44,17 @@ class LambdaCallback(Callback):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.dataset as ds
|
||||
>>> from mindspore import LambdaCallback
|
||||
>>> from mindspore import Model, nn
|
||||
>>> from mindspore import nn
|
||||
>>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))}
|
||||
>>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32)
|
||||
>>> net = nn.Dense(10, 5)
|
||||
>>> crit = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
|
||||
>>> opt = nn.Momentum(net.trainable_params(), 0.01, 0.9)
|
||||
>>> lambda_callback = LambdaCallback(on_train_epoch_end=
|
||||
>>> lambda_callback = ms.LambdaCallback(on_train_epoch_end=
|
||||
... lambda run_context: print("loss: ", run_context.original_args().net_outputs))
|
||||
>>> model = Model(network=net, optimizer=opt, loss_fn=crit, metrics={"recall"})
|
||||
>>> model = ms.Model(network=net, optimizer=opt, loss_fn=crit, metrics={"recall"})
|
||||
>>> model.train(2, train_dataset, callbacks=[lambda_callback])
|
||||
loss: 1.6127687
|
||||
loss: 1.6106578
|
||||
|
|
|
@ -177,15 +177,13 @@ class SummaryLandscape:
|
|||
metadata and other data required to create landscape.
|
||||
|
||||
Examples:
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> from mindspore import SummaryCollector, SummaryLandscape
|
||||
>>> from mindspore import Model
|
||||
>>> from mindspore.nn import Loss, Accuracy
|
||||
>>>
|
||||
>>> if __name__ == '__main__':
|
||||
... # If the device_target is Ascend, set the device_target to "Ascend"
|
||||
... set_context(mode=GRAPH_MODE, device_target="GPU")
|
||||
... ms.set_context(mode=ms.GRAPH_MODE, device_target="GPU")
|
||||
... mnist_dataset_dir = '/path/to/mnist_dataset_directory'
|
||||
... # The detail of create_dataset method shown in model_zoo.official.cv.lenet.src.dataset.py
|
||||
... ds_train = create_dataset(mnist_dataset_dir, 32)
|
||||
|
@ -193,10 +191,10 @@ class SummaryLandscape:
|
|||
... network = LeNet5(10)
|
||||
... net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
|
||||
... net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)
|
||||
... model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})
|
||||
... model = ms.Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})
|
||||
... # Simple usage for collect landscape information:
|
||||
... interval_1 = [1, 2, 3, 4, 5]
|
||||
... summary_collector = SummaryCollector(summary_dir='./summary/lenet_interval_1',
|
||||
... summary_collector = ms.SummaryCollector(summary_dir='./summary/lenet_interval_1',
|
||||
... collect_specified_data={'collect_landscape':{"landscape_size": 4,
|
||||
... "unit": "step",
|
||||
... "create_landscape":{"train":True,
|
||||
|
@ -216,7 +214,7 @@ class SummaryLandscape:
|
|||
... ds_eval = create_dataset(mnist_dataset_dir, 32)
|
||||
... return model, network, ds_eval, metrics
|
||||
...
|
||||
... summary_landscape = SummaryLandscape('./summary/lenet_interval_1')
|
||||
... summary_landscape = ms.SummaryLandscape('./summary/lenet_interval_1')
|
||||
... # parameters of collect_landscape can be modified or unchanged
|
||||
... summary_landscape.gen_landscapes_with_multi_process(callback_fn,
|
||||
... collect_landscape={"landscape_size": 4,
|
||||
|
|
|
@ -38,12 +38,13 @@ class LossMonitor(Callback):
|
|||
ValueError: If per_print_times is not an integer or less than zero.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Model, nn
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = LeNet5()
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
|
||||
>>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9)
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> data_path = './MNIST_Data'
|
||||
>>> dataset = create_dataset(data_path)
|
||||
>>> loss_monitor = LossMonitor()
|
||||
|
|
|
@ -33,7 +33,8 @@ class LearningRateScheduler(Callback):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Model
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>> from mindspore import LearningRateScheduler
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import dataset as ds
|
||||
|
@ -48,7 +49,7 @@ class LearningRateScheduler(Callback):
|
|||
>>> net = nn.Dense(10, 5)
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
|
||||
>>> optim = nn.Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum)
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
...
|
||||
>>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))}
|
||||
>>> dataset = ds.NumpySlicesDataset(data=data).batch(32)
|
||||
|
|
|
@ -174,15 +174,13 @@ class SummaryCollector(Callback):
|
|||
ValueError: The Summary is not supported, please without `-s on` and recompile source.
|
||||
|
||||
Examples:
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import set_context, GRAPH_MODE
|
||||
>>> from mindspore import SummaryCollector
|
||||
>>> from mindspore import Model
|
||||
>>> from mindspore.nn import Accuracy
|
||||
>>>
|
||||
>>> if __name__ == '__main__':
|
||||
... # If the device_target is GPU, set the device_target to "GPU"
|
||||
... set_context(mode=GRAPH_MODE, device_target="Ascend")
|
||||
... ms.set_context(mode=ms.GRAPH_MODE, device_target="Ascend")
|
||||
... mnist_dataset_dir = '/path/to/mnist_dataset_directory'
|
||||
... # The detail of create_dataset method shown in model_zoo.official.cv.lenet.src.dataset.py
|
||||
... ds_train = create_dataset(mnist_dataset_dir, 32)
|
||||
|
@ -190,15 +188,15 @@ class SummaryCollector(Callback):
|
|||
... network = LeNet5(10)
|
||||
... net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
|
||||
... net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)
|
||||
... model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}, amp_level="O2")
|
||||
... model = ms.Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}, amp_level="O2")
|
||||
...
|
||||
... # Simple usage:
|
||||
... summary_collector = SummaryCollector(summary_dir='./summary_dir')
|
||||
... summary_collector = ms.SummaryCollector(summary_dir='./summary_dir')
|
||||
... model.train(1, ds_train, callbacks=[summary_collector], dataset_sink_mode=False)
|
||||
...
|
||||
... # Do not collect metric and collect the first layer parameter, others are collected by default
|
||||
... specified={'collect_metric': False, 'histogram_regular': '^conv1.*'}
|
||||
... summary_collector = SummaryCollector(summary_dir='./summary_dir', collect_specified_data=specified)
|
||||
... summary_collector = ms.SummaryCollector(summary_dir='./summary_dir', collect_specified_data=specified)
|
||||
... model.train(1, ds_train, callbacks=[summary_collector], dataset_sink_mode=False)
|
||||
"""
|
||||
|
||||
|
|
|
@ -33,12 +33,13 @@ class TimeMonitor(Callback):
|
|||
ValueError: If data_size is not positive int.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Model, nn
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = LeNet5()
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
|
||||
>>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9)
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
||||
>>> data_path = './MNIST_Data'
|
||||
>>> dataset = create_dataset(data_path)
|
||||
>>> time_monitor = TimeMonitor()
|
||||
|
|
|
@ -159,15 +159,15 @@ def connect_network_with_dataset(network, dataset_helper):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import DatasetHelper
|
||||
>>> from mindspore import DatasetHelper, nn, connect_network_with_dataset
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>> from mindspore import dataset as ds
|
||||
>>>
|
||||
>>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))}
|
||||
>>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32)
|
||||
>>> dataset_helper = DatasetHelper(train_dataset, dataset_sink_mode=True)
|
||||
>>> dataset_helper = ms.DatasetHelper(train_dataset, dataset_sink_mode=True)
|
||||
>>> net = nn.Dense(10, 5)
|
||||
>>> net_with_get_next = connect_network_with_dataset(net, dataset_helper)
|
||||
>>> net_with_get_next = ms.connect_network_with_dataset(net, dataset_helper)
|
||||
"""
|
||||
dataset_iter = dataset_helper.iter
|
||||
dataset = dataset_iter.dataset
|
||||
|
@ -241,12 +241,13 @@ class DatasetHelper:
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import DatasetHelper, nn
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>> from mindspore import dataset as ds
|
||||
>>>
|
||||
>>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))}
|
||||
>>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32)
|
||||
>>> set_helper = DatasetHelper(train_dataset, dataset_sink_mode=False)
|
||||
>>> set_helper = ms.DatasetHelper(train_dataset, dataset_sink_mode=False)
|
||||
>>>
|
||||
>>> net = nn.Dense(10, 5)
|
||||
>>> # Object of DatasetHelper is iterable
|
||||
|
@ -297,10 +298,10 @@ class DatasetHelper:
|
|||
Get the types and shapes from dataset on the current configuration.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import DatasetHelper
|
||||
>>> import mindspore as ms
|
||||
>>>
|
||||
>>> train_dataset = create_custom_dataset()
|
||||
>>> dataset_helper = DatasetHelper(train_dataset, dataset_sink_mode=True)
|
||||
>>> dataset_helper = ms.DatasetHelper(train_dataset, dataset_sink_mode=True)
|
||||
>>>
|
||||
>>> types, shapes = dataset_helper.types_shapes()
|
||||
"""
|
||||
|
@ -311,10 +312,10 @@ class DatasetHelper:
|
|||
Get sink_size for each iteration.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import DatasetHelper
|
||||
>>> import mindspore as ms
|
||||
>>>
|
||||
>>> train_dataset = create_custom_dataset()
|
||||
>>> dataset_helper = DatasetHelper(train_dataset, dataset_sink_mode=True, sink_size=-1)
|
||||
>>> dataset_helper = ms.DatasetHelper(train_dataset, dataset_sink_mode=True, sink_size=-1)
|
||||
>>>
|
||||
>>> # if sink_size==-1, then will return the full size of source dataset.
|
||||
>>> sink_size = dataset_helper.sink_size()
|
||||
|
@ -349,12 +350,12 @@ class DatasetHelper:
|
|||
Return the minimum and maximum data length of dynamic source dataset.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import DatasetHelper
|
||||
>>> import mindspore as ms
|
||||
>>>
|
||||
>>> train_dataset = create_custom_dataset()
|
||||
>>> # config dynamic shape
|
||||
>>> dataset.set_dynamic_columns(columns={"data1": [16, None, 83], "data2": [None]})
|
||||
>>> dataset_helper = DatasetHelper(train_dataset, dataset_sink_mode=True)
|
||||
>>> dataset_helper = ms.DatasetHelper(train_dataset, dataset_sink_mode=True)
|
||||
>>>
|
||||
>>> min_shapes, max_shapes = dataset_helper.dynamic_min_max_shapes()
|
||||
"""
|
||||
|
|
|
@ -56,19 +56,20 @@ class FixedLossScaleManager(LossScaleManager):
|
|||
not executed when overflow occurs. Default: True.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Model, nn, FixedLossScaleManager
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> #1) Drop the parameter update if there is an overflow
|
||||
>>> loss_scale_manager = FixedLossScaleManager()
|
||||
>>> loss_scale_manager = ms.FixedLossScaleManager()
|
||||
>>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
||||
>>> model = Model(net, loss_scale_manager=loss_scale_manager, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_scale_manager=loss_scale_manager, optimizer=optim)
|
||||
>>>
|
||||
>>> #2) Execute parameter update even if overflow occurs
|
||||
>>> loss_scale = 1024.0
|
||||
>>> loss_scale_manager = FixedLossScaleManager(loss_scale, False)
|
||||
>>> loss_scale_manager = ms.FixedLossScaleManager(loss_scale, False)
|
||||
>>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9, loss_scale=loss_scale)
|
||||
>>> model = Model(net, loss_scale_manager=loss_scale_manager, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_scale_manager=loss_scale_manager, optimizer=optim)
|
||||
"""
|
||||
def __init__(self, loss_scale=128.0, drop_overflow_update=True):
|
||||
if loss_scale < 1:
|
||||
|
@ -130,12 +131,13 @@ class DynamicLossScaleManager(LossScaleManager):
|
|||
scale_window (int): Maximum continuous normal steps when there is no overflow. Default: 2000.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Model, nn, DynamicLossScaleManager
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> loss_scale_manager = DynamicLossScaleManager()
|
||||
>>> loss_scale_manager = ms.DynamicLossScaleManager()
|
||||
>>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
||||
>>> model = Model(net, loss_scale_manager=loss_scale_manager, optimizer=optim)
|
||||
>>> model = ms.Model(net, loss_scale_manager=loss_scale_manager, optimizer=optim)
|
||||
"""
|
||||
def __init__(self,
|
||||
init_loss_scale=2 ** 24,
|
||||
|
|
|
@ -147,7 +147,8 @@ class Model:
|
|||
If you want to config boost mode by yourself, you can set boost_config_dict as `boost.py`.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Model, nn
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self, num_class=10, num_channel=1):
|
||||
|
@ -173,7 +174,7 @@ class Model:
|
|||
>>> net = Net()
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim, metrics=None)
|
||||
>>> # For details about how to build the dataset, please refer to the function `create_dataset` in tutorial
|
||||
>>> # document on the official website:
|
||||
>>> # https://www.mindspore.cn/tutorials/zh-CN/master/beginner/quick_start.html
|
||||
|
@ -935,16 +936,18 @@ class Model:
|
|||
Default: 0.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Model, nn, FixedLossScaleManager
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> # For details about how to build the dataset, please refer to the tutorial
|
||||
>>> # document on the official website.
|
||||
>>> dataset = create_custom_dataset()
|
||||
>>> net = Net()
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> loss_scale_manager = FixedLossScaleManager()
|
||||
>>> loss_scale_manager = ms.FixedLossScaleManager()
|
||||
>>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, loss_scale_manager=loss_scale_manager)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim, metrics=None,
|
||||
loss_scale_manager=loss_scale_manager)
|
||||
>>> model.train(2, dataset)
|
||||
"""
|
||||
Validator.check_bool(dataset_sink_mode)
|
||||
|
@ -1064,7 +1067,8 @@ class Model:
|
|||
Default: 0.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Model, nn, FixedLossScaleManager
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> # For details about how to build the dataset, please refer to the tutorial
|
||||
>>> # document on the official website.
|
||||
|
@ -1073,7 +1077,7 @@ class Model:
|
|||
>>> net = Net()
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics="accuracy")
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim, metrics="accuracy")
|
||||
>>> model.fit(2, train_dataset, valid_dataset)
|
||||
"""
|
||||
|
||||
|
@ -1156,16 +1160,18 @@ class Model:
|
|||
the combine like graph phase.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Model, nn, FixedLossScaleManager
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> # For details about how to build the dataset, please refer to the tutorial
|
||||
>>> # document on the official website.
|
||||
>>> dataset = create_custom_dataset()
|
||||
>>> net = Net()
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> loss_scale_manager = FixedLossScaleManager()
|
||||
>>> loss_scale_manager = ms.FixedLossScaleManager()
|
||||
>>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, loss_scale_manager=loss_scale_manager)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim, metrics=None,
|
||||
loss_scale_manager=loss_scale_manager)
|
||||
>>> model.build(dataset, epoch=2)
|
||||
>>> model.train(2, dataset)
|
||||
"""
|
||||
|
@ -1305,14 +1311,15 @@ class Model:
|
|||
the model in the test mode.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Model, nn
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>>
|
||||
>>> # For details about how to build the dataset, please refer to the tutorial
|
||||
>>> # document on the official website.
|
||||
>>> dataset = create_custom_dataset()
|
||||
>>> net = Net()
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=None, metrics={'acc'})
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=None, metrics={'acc'})
|
||||
>>> acc = model.eval(dataset, dataset_sink_mode=False)
|
||||
"""
|
||||
dataset_sink_mode = Validator.check_bool(dataset_sink_mode)
|
||||
|
@ -1368,10 +1375,10 @@ class Model:
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import Model, Tensor
|
||||
>>> from mindspore import Tensor
|
||||
>>>
|
||||
>>> input_data = Tensor(np.random.randint(0, 255, [1, 1, 32, 32]), ms.float32)
|
||||
>>> model = Model(Net())
|
||||
>>> model = ms.Model(Net())
|
||||
>>> result = model.predict(input_data)
|
||||
"""
|
||||
self._predict_network.set_train(False)
|
||||
|
@ -1452,22 +1459,22 @@ class Model:
|
|||
>>> # mindspore.cn.
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import Model, set_context, Tensor, nn, FixedLossScaleManager, GRAPH_MODE
|
||||
>>> from mindspore import ParallelMode, set_auto_parallel_context
|
||||
>>> from mindspore import Tensor, nn
|
||||
>>> from mindspore.communication import init
|
||||
>>>
|
||||
>>> set_context(mode=GRAPH_MODE)
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
||||
>>> init()
|
||||
>>> set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL)
|
||||
>>> ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.SEMI_AUTO_PARALLEL)
|
||||
>>>
|
||||
>>> # For details about how to build the dataset, please refer to the tutorial
|
||||
>>> # document on the official website.
|
||||
>>> dataset = create_custom_dataset()
|
||||
>>> net = Net()
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
||||
>>> loss_scale_manager = FixedLossScaleManager()
|
||||
>>> loss_scale_manager = ms.FixedLossScaleManager()
|
||||
>>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, loss_scale_manager=loss_scale_manager)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim, metrics=None,
|
||||
loss_scale_manager=loss_scale_manager)
|
||||
>>> layout_dict = model.infer_train_layout(dataset)
|
||||
"""
|
||||
self._infer_train_check(train_dataset, dataset_sink_mode, sink_size)
|
||||
|
@ -1508,15 +1515,14 @@ class Model:
|
|||
>>> # mindspore.cn.
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import Model, set_context, Tensor, GRAPH_MODE
|
||||
>>> from mindspore import ParallelMode, set_auto_parallel_context
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.communication import init
|
||||
>>>
|
||||
>>> set_context(mode=GRAPH_MODE)
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
||||
>>> init()
|
||||
>>> set_auto_parallel_context(full_batch=True, parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL)
|
||||
>>> ms.set_auto_parallel_context(full_batch=True, parallel_mode=ms.ParallelMode.SEMI_AUTO_PARALLEL)
|
||||
>>> input_data = Tensor(np.random.randint(0, 255, [1, 1, 32, 32]), ms.float32)
|
||||
>>> model = Model(Net())
|
||||
>>> model = ms.Model(Net())
|
||||
>>> predict_map = model.infer_predict_layout(input_data)
|
||||
"""
|
||||
if context.get_context("mode") != context.GRAPH_MODE:
|
||||
|
|
|
@ -285,10 +285,10 @@ def save_checkpoint(save_obj, ckpt_file_name, integrated_save=True,
|
|||
and `async_save` are not bool type. If the parameter ckpt_file_name is not string type.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import save_checkpoint
|
||||
>>> import mindspore as ms
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> save_checkpoint(net, "lenet.ckpt")
|
||||
>>> ms.save_checkpoint(net, "lenet.ckpt")
|
||||
"""
|
||||
ckpt_file_name = _check_save_obj_and_ckpt_file_name(save_obj, ckpt_file_name)
|
||||
integrated_save = Validator.check_bool(integrated_save)
|
||||
|
@ -398,13 +398,14 @@ def load(file_name, **kwargs):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.nn as nn
|
||||
>>> from mindspore import Tensor, export, load
|
||||
>>> from mindspore import Tensor
|
||||
>>>
|
||||
>>> net = nn.Conv2d(1, 1, kernel_size=3, weight_init="ones")
|
||||
>>> input_tensor = Tensor(np.ones([1, 1, 3, 3]).astype(np.float32))
|
||||
>>> export(net, input_tensor, file_name="net", file_format="MINDIR")
|
||||
>>> graph = load("net.mindir")
|
||||
>>> ms.export(net, input_tensor, file_name="net", file_format="MINDIR")
|
||||
>>> graph = ms.load("net.mindir")
|
||||
>>> net = nn.GraphCell(graph)
|
||||
>>> output = net(input_tensor)
|
||||
>>> print(output)
|
||||
|
@ -479,10 +480,10 @@ def load_checkpoint(ckpt_file_name, net=None, strict_load=False, filter_prefix=N
|
|||
TypeError: The type of `specify_prefix` or `filter_prefix` is incorrect.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import load_checkpoint
|
||||
>>> import mindspore as ms
|
||||
>>>
|
||||
>>> ckpt_file_name = "./checkpoint/LeNet5-1_32.ckpt"
|
||||
>>> param_dict = load_checkpoint(ckpt_file_name, filter_prefix="conv1", specify_prefix="conv", )
|
||||
>>> param_dict = ms.load_checkpoint(ckpt_file_name, filter_prefix="conv1", specify_prefix="conv", )
|
||||
>>> print(param_dict["conv2.weight"])
|
||||
Parameter (name=conv2.weight, shape=(16, 6, 5, 5), dtype=Float32, requires_grad=True)
|
||||
"""
|
||||
|
@ -644,12 +645,12 @@ def load_param_into_net(net, parameter_dict, strict_load=False):
|
|||
TypeError: Argument is not a Cell, or parameter_dict is not a Parameter dictionary.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import load_checkpoint, load_param_into_net
|
||||
>>> import mindspore as ms
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> ckpt_file_name = "./checkpoint/LeNet5-1_32.ckpt"
|
||||
>>> param_dict = load_checkpoint(ckpt_file_name, filter_prefix="conv1")
|
||||
>>> param_not_load = load_param_into_net(net, param_dict)
|
||||
>>> param_dict = ms.load_checkpoint(ckpt_file_name, filter_prefix="conv1")
|
||||
>>> param_not_load = ms.load_param_into_net(net, param_dict)
|
||||
>>> print(param_not_load)
|
||||
['conv1.weight']
|
||||
"""
|
||||
|
@ -861,12 +862,13 @@ def export(net, *inputs, file_name, file_format='AIR', **kwargs):
|
|||
Option: 'AES-GCM' | 'AES-CBC'. Default: 'AES-GCM'.
|
||||
|
||||
Examples:
|
||||
>>> import mindspore as ms
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import export, Tensor
|
||||
>>> from mindspore import Tensor
|
||||
>>>
|
||||
>>> net = LeNet()
|
||||
>>> input_tensor = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32))
|
||||
>>> export(net, input_tensor, file_name='lenet', file_format='MINDIR')
|
||||
>>> ms.export(net, input_tensor, file_name='lenet', file_format='MINDIR')
|
||||
"""
|
||||
logger.info("exporting model file:%s format:%s.", file_name, file_format)
|
||||
if check_input_dataset(*inputs, dataset_type=mindspore.dataset.Dataset):
|
||||
|
@ -1231,10 +1233,11 @@ def parse_print(print_file_name):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.ops as ops
|
||||
>>> from mindspore import nn
|
||||
>>> from mindspore import Tensor, set_context, GRAPH_MODE
|
||||
>>> set_context(mode=GRAPH_MODE, print_file_path='log.data')
|
||||
>>> from mindspore import Tensor
|
||||
>>> ms.set_context(mode=ms.GRAPH_MODE, print_file_path='log.data')
|
||||
>>> class PrintInputTensor(nn.Cell):
|
||||
... def __init__(self):
|
||||
... super().__init__()
|
||||
|
@ -1508,7 +1511,8 @@ def merge_sliced_parameter(sliced_parameters, strategy=None):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor, merge_sliced_parameter, Parameter
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import Tensor, Parameter
|
||||
>>>
|
||||
>>> sliced_parameters = [
|
||||
... Parameter(Tensor(np.array([0.00023915, 0.00013939, -0.00098059])),
|
||||
|
@ -1519,7 +1523,7 @@ def merge_sliced_parameter(sliced_parameters, strategy=None):
|
|||
... "network.embedding_table"),
|
||||
... Parameter(Tensor(np.array([0.00084451, 0.00089960, -0.00010431])),
|
||||
... "network.embedding_table")]
|
||||
>>> merged_parameter = merge_sliced_parameter(sliced_parameters)
|
||||
>>> merged_parameter = ms.merge_sliced_parameter(sliced_parameters)
|
||||
>>> print(merged_parameter)
|
||||
Parameter (name=network.embedding_table, shape=(12,), dtype=Float64, requires_grad=True)
|
||||
"""
|
||||
|
|
|
@ -136,10 +136,10 @@ class SummaryRecord:
|
|||
ValueError: The Summary is not supported, please without `-s on` and recompile source.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import SummaryRecord
|
||||
>>> import mindspore as ms
|
||||
>>> if __name__ == '__main__':
|
||||
... # use in with statement to auto close
|
||||
... with SummaryRecord(log_dir="./summary_dir") as summary_record:
|
||||
... with ms.SummaryRecord(log_dir="./summary_dir") as summary_record:
|
||||
... pass
|
||||
...
|
||||
... # use in try .. finally .. to ensure closing
|
||||
|
@ -209,9 +209,10 @@ class SummaryRecord:
|
|||
ValueError: `mode` is not in the optional value.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import SummaryRecord
|
||||
>>> import mindspore as ms
|
||||
>>> if __name__ == '__main__':
|
||||
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
||||
... with ms.SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy")
|
||||
as summary_record:
|
||||
... summary_record.set_mode('eval')
|
||||
"""
|
||||
mode_spec = 'train', 'eval'
|
||||
|
@ -269,10 +270,11 @@ class SummaryRecord:
|
|||
is 'scalar', 'image', 'tensor' or 'histogram'.
|
||||
|
||||
Examples:
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import SummaryRecord
|
||||
>>> if __name__ == '__main__':
|
||||
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
||||
... with ms.SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy")
|
||||
as summary_record:
|
||||
... summary_record.add_value('scalar', 'loss', Tensor(0.1))
|
||||
"""
|
||||
if plugin in ('tensor', 'scalar', 'image', 'histogram'):
|
||||
|
@ -325,9 +327,10 @@ class SummaryRecord:
|
|||
<https://www.mindspore.cn/docs/en/master/api_python/nn/mindspore.nn.Cell.html#mindspore-nn-cell>`_ 。
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import SummaryRecord
|
||||
>>> import mindspore as ms
|
||||
>>> if __name__ == '__main__':
|
||||
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
||||
... with ms.SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy")
|
||||
as summary_record:
|
||||
... result = summary_record.record(step=2)
|
||||
... print(result)
|
||||
...
|
||||
|
@ -438,9 +441,10 @@ class SummaryRecord:
|
|||
str, the full path of log file.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import SummaryRecord
|
||||
>>> import mindspore as ms
|
||||
>>> if __name__ == '__main__':
|
||||
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
||||
... with ms.SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy")
|
||||
as summary_record:
|
||||
... log_dir = summary_record.log_dir
|
||||
"""
|
||||
return self.file_info['file_path']
|
||||
|
@ -452,9 +456,10 @@ class SummaryRecord:
|
|||
Call it to make sure that all pending events have been written to disk.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import SummaryRecord
|
||||
>>> import mindspore as ms
|
||||
>>> if __name__ == '__main__':
|
||||
... with SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy") as summary_record:
|
||||
... with ms.SummaryRecord(log_dir="./summary_dir", file_prefix="xx_", file_suffix="_yy")
|
||||
as summary_record:
|
||||
... summary_record.flush()
|
||||
"""
|
||||
if self._status.get('closed'):
|
||||
|
@ -468,10 +473,10 @@ class SummaryRecord:
|
|||
Flush the buffer and write files to disk and close summary records. Please use the statement to autoclose.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import SummaryRecord
|
||||
>>> import mindspore as ms
|
||||
>>> if __name__ == '__main__':
|
||||
... try:
|
||||
... summary_record = SummaryRecord(log_dir="./summary_dir")
|
||||
... summary_record = ms.SummaryRecord(log_dir="./summary_dir")
|
||||
... finally:
|
||||
... summary_record.close()
|
||||
"""
|
||||
|
|
|
@ -212,26 +212,23 @@ class ConvertModelUtils:
|
|||
``Ascend`` ``GPU``
|
||||
|
||||
Examples:
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore import nn
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.nn import thor
|
||||
>>> from mindspore import Model
|
||||
>>> from mindspore import FixedLossScaleManager
|
||||
>>> from mindspore import LossMonitor
|
||||
>>> from mindspore import ConvertModelUtils
|
||||
>>>
|
||||
>>> net = Net()
|
||||
>>> dataset = create_dataset()
|
||||
>>> temp = Tensor([4e-4, 1e-4, 1e-5, 1e-5], mstype.float32)
|
||||
>>> opt = thor(net, learning_rate=temp, damping=temp, momentum=0.9, loss_scale=128, frequency=4)
|
||||
>>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
|
||||
>>> loss_scale = FixedLossScaleManager(128, drop_overflow_update=False)
|
||||
>>> model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics={'acc'},
|
||||
>>> loss_scale = ms.FixedLossScaleManager(128, drop_overflow_update=False)
|
||||
>>> model = ms.Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics={'acc'},
|
||||
... amp_level="O2", keep_batchnorm_fp32=False)
|
||||
>>> model = ConvertModelUtils.convert_to_thor_model(model=model, network=net, loss_fn=loss, optimizer=opt,
|
||||
... loss_scale_manager=loss_scale, metrics={'acc'},
|
||||
... amp_level="O2", keep_batchnorm_fp32=False)
|
||||
>>> loss_cb = LossMonitor()
|
||||
>>> loss_cb = ms.LossMonitor()
|
||||
>>> model.train(1, dataset, callbacks=loss_cb, sink_size=4, dataset_sink_mode=True)
|
||||
"""
|
||||
|
||||
|
|
Loading…
Reference in New Issue