From 9e867247352180a3bbecc7edac949b90dc56b425 Mon Sep 17 00:00:00 2001 From: lvyufeng Date: Thu, 15 Sep 2022 23:44:53 +0800 Subject: [PATCH] move Model and Callbacks from mindspore to mindspore.train --- docs/api/api_python/mindspore.rst | 26 -------------- docs/api/api_python/mindspore.train.rst | 28 +++++++++++++++ .../mindspore.train.Callback.rst} | 6 ++-- .../mindspore.train.CheckpointConfig.rst} | 6 ++-- .../mindspore.train.EarlyStopping.rst} | 6 ++-- .../mindspore.train.History.rst} | 6 ++-- .../mindspore.train.LambdaCallback.rst} | 6 ++-- ...mindspore.train.LearningRateScheduler.rst} | 6 ++-- .../mindspore.train.LossMonitor.rst} | 6 ++-- .../mindspore.train.Model.rst} | 6 ++-- .../mindspore.train.ModelCheckpoint.rst} | 6 ++-- .../mindspore.train.ReduceLROnPlateau.rst} | 6 ++-- .../mindspore.train.RunContext.rst} | 6 ++-- .../mindspore.train.TimeMonitor.rst} | 6 ++-- docs/api/api_python_en/mindspore.rst | 30 ---------------- docs/api/api_python_en/mindspore.train.rst | 32 +++++++++++++++++ .../mindspore/train/callback/_callback.py | 6 ++-- .../mindspore/train/callback/_checkpoint.py | 8 ++--- .../mindspore/train/callback/_early_stop.py | 4 +-- .../mindspore/train/callback/_history.py | 6 ++-- .../train/callback/_lambda_callback.py | 6 ++-- .../mindspore/train/callback/_landscape.py | 7 ++-- .../mindspore/train/callback/_loss_monitor.py | 4 +-- .../train/callback/_lr_scheduler_callback.py | 6 ++-- .../train/callback/_reduce_lr_on_plateau.py | 4 +-- .../train/callback/_summary_collector.py | 7 ++-- .../mindspore/train/callback/_time_monitor.py | 4 +-- mindspore/python/mindspore/train/model.py | 36 ++++++++++--------- 28 files changed, 147 insertions(+), 139 deletions(-) create mode 100644 docs/api/api_python/mindspore.train.rst rename docs/api/api_python/{mindspore/mindspore.Callback.rst => train/mindspore.train.Callback.rst} (98%) rename docs/api/api_python/{mindspore/mindspore.CheckpointConfig.rst => train/mindspore.train.CheckpointConfig.rst} (92%) rename docs/api/api_python/{mindspore/mindspore.EarlyStopping.rst => train/mindspore.train.EarlyStopping.rst} (92%) rename docs/api/api_python/{mindspore/mindspore.History.rst => train/mindspore.train.History.rst} (92%) rename docs/api/api_python/{mindspore/mindspore.LambdaCallback.rst => train/mindspore.train.LambdaCallback.rst} (77%) rename docs/api/api_python/{mindspore/mindspore.LearningRateScheduler.rst => train/mindspore.train.LearningRateScheduler.rst} (69%) rename docs/api/api_python/{mindspore/mindspore.LossMonitor.rst => train/mindspore.train.LossMonitor.rst} (89%) rename docs/api/api_python/{mindspore/mindspore.Model.rst => train/mindspore.train.Model.rst} (98%) rename docs/api/api_python/{mindspore/mindspore.ModelCheckpoint.rst => train/mindspore.train.ModelCheckpoint.rst} (90%) rename docs/api/api_python/{mindspore/mindspore.ReduceLROnPlateau.rst => train/mindspore.train.ReduceLROnPlateau.rst} (92%) rename docs/api/api_python/{mindspore/mindspore.RunContext.rst => train/mindspore.train.RunContext.rst} (98%) rename docs/api/api_python/{mindspore/mindspore.TimeMonitor.rst => train/mindspore.train.TimeMonitor.rst} (87%) create mode 100644 docs/api/api_python_en/mindspore.train.rst diff --git a/docs/api/api_python/mindspore.rst b/docs/api/api_python/mindspore.rst index 3a9b78b20d2..fda9a398b6a 100644 --- a/docs/api/api_python/mindspore.rst +++ b/docs/api/api_python/mindspore.rst @@ -63,32 +63,6 @@ mindspore mindspore.get_algo_parameters mindspore.reset_algo_parameters -模型 ------ - -.. mscnautosummary:: - :toctree: mindspore - - mindspore.Model - -回调函数 ---------- - -.. mscnautosummary:: - :toctree: mindspore - - mindspore.Callback - mindspore.CheckpointConfig - mindspore.EarlyStopping - mindspore.History - mindspore.LambdaCallback - mindspore.LearningRateScheduler - mindspore.LossMonitor - mindspore.ModelCheckpoint - mindspore.ReduceLROnPlateau - mindspore.RunContext - mindspore.TimeMonitor - 数据处理工具 ------------------- diff --git a/docs/api/api_python/mindspore.train.rst b/docs/api/api_python/mindspore.train.rst new file mode 100644 index 00000000000..281cdb54406 --- /dev/null +++ b/docs/api/api_python/mindspore.train.rst @@ -0,0 +1,28 @@ +mindspore.train +=============== + +模型 +----- + +.. mscnautosummary:: + :toctree: mindspore + + mindspore.train.Model + +回调函数 +--------- + +.. mscnautosummary:: + :toctree: mindspore + + mindspore.train.Callback + mindspore.train.CheckpointConfig + mindspore.train.EarlyStopping + mindspore.train.History + mindspore.train.LambdaCallback + mindspore.train.LearningRateScheduler + mindspore.train.LossMonitor + mindspore.train.ModelCheckpoint + mindspore.train.ReduceLROnPlateau + mindspore.train.RunContext + mindspore.train.TimeMonitor diff --git a/docs/api/api_python/mindspore/mindspore.Callback.rst b/docs/api/api_python/train/mindspore.train.Callback.rst similarity index 98% rename from docs/api/api_python/mindspore/mindspore.Callback.rst rename to docs/api/api_python/train/mindspore.train.Callback.rst index 802f4858bf1..419c1539a7c 100644 --- a/docs/api/api_python/mindspore/mindspore.Callback.rst +++ b/docs/api/api_python/train/mindspore.train.Callback.rst @@ -1,7 +1,7 @@ -mindspore.Callback -=================== +mindspore.train.Callback +======================== -.. py:class:: mindspore.Callback +.. py:class:: mindspore.train.Callback 用于构建Callback函数的基类。Callback函数是一个上下文管理器,在运行模型时被调用。 可以使用此机制进行一些自定义操作。 diff --git a/docs/api/api_python/mindspore/mindspore.CheckpointConfig.rst b/docs/api/api_python/train/mindspore.train.CheckpointConfig.rst similarity index 92% rename from docs/api/api_python/mindspore/mindspore.CheckpointConfig.rst rename to docs/api/api_python/train/mindspore.train.CheckpointConfig.rst index 1ea49e881bf..99135058b77 100644 --- a/docs/api/api_python/mindspore/mindspore.CheckpointConfig.rst +++ b/docs/api/api_python/train/mindspore.train.CheckpointConfig.rst @@ -1,7 +1,7 @@ -mindspore.CheckpointConfig -=========================== +mindspore.train.CheckpointConfig +================================ -.. py:class:: mindspore.CheckpointConfig(save_checkpoint_steps=1, save_checkpoint_seconds=0, keep_checkpoint_max=5, keep_checkpoint_per_n_minutes=0, integrated_save=True, async_save=False, saved_network=None, append_info=None, enc_key=None, enc_mode='AES-GCM', exception_save=False) +.. py:class:: mindspore.train.CheckpointConfig(save_checkpoint_steps=1, save_checkpoint_seconds=0, keep_checkpoint_max=5, keep_checkpoint_per_n_minutes=0, integrated_save=True, async_save=False, saved_network=None, append_info=None, enc_key=None, enc_mode='AES-GCM', exception_save=False) 保存checkpoint时的配置策略。 diff --git a/docs/api/api_python/mindspore/mindspore.EarlyStopping.rst b/docs/api/api_python/train/mindspore.train.EarlyStopping.rst similarity index 92% rename from docs/api/api_python/mindspore/mindspore.EarlyStopping.rst rename to docs/api/api_python/train/mindspore.train.EarlyStopping.rst index 7c75610219e..3a0d68f5530 100644 --- a/docs/api/api_python/mindspore/mindspore.EarlyStopping.rst +++ b/docs/api/api_python/train/mindspore.train.EarlyStopping.rst @@ -1,7 +1,7 @@ -mindspore.EarlyStopping -================================ +mindspore.train.EarlyStopping +============================= -.. py:class:: mindspore.EarlyStopping(monitor='eval_loss', min_delta=0, patience=0, verbose=False, mode='auto', baseline=None, restore_best_weights=False) +.. py:class:: mindspore.train.EarlyStopping(monitor='eval_loss', min_delta=0, patience=0, verbose=False, mode='auto', baseline=None, restore_best_weights=False) 当监控的指标停止改进时停止训练。 diff --git a/docs/api/api_python/mindspore/mindspore.History.rst b/docs/api/api_python/train/mindspore.train.History.rst similarity index 92% rename from docs/api/api_python/mindspore/mindspore.History.rst rename to docs/api/api_python/train/mindspore.train.History.rst index 6815dcd78d6..c293adfbe86 100644 --- a/docs/api/api_python/mindspore/mindspore.History.rst +++ b/docs/api/api_python/train/mindspore.train.History.rst @@ -1,7 +1,7 @@ -mindspore.History -=========================== +mindspore.train.History +======================= -.. py:class:: mindspore.History +.. py:class:: mindspore.train.History 将网络输出和评估指标的相关信息记录到 `History` 对象中。 diff --git a/docs/api/api_python/mindspore/mindspore.LambdaCallback.rst b/docs/api/api_python/train/mindspore.train.LambdaCallback.rst similarity index 77% rename from docs/api/api_python/mindspore/mindspore.LambdaCallback.rst rename to docs/api/api_python/train/mindspore.train.LambdaCallback.rst index fc1f484b558..cc6891f54b0 100644 --- a/docs/api/api_python/mindspore/mindspore.LambdaCallback.rst +++ b/docs/api/api_python/train/mindspore.train.LambdaCallback.rst @@ -1,7 +1,7 @@ -mindspore.LambdaCallback -=========================== +mindspore.train.LambdaCallback +============================== -.. py:class:: mindspore.LambdaCallback(on_train_epoch_begin=None, on_train_epoch_end=None, on_train_step_begin=None, on_train_step_end=None, on_train_begin=None, on_train_end=None, on_eval_epoch_begin=None, on_eval_epoch_end=None, on_eval_step_begin=None, on_eval_step_end=None, on_eval_begin=None, on_eval_end=None) +.. py:class:: mindspore.train.LambdaCallback(on_train_epoch_begin=None, on_train_epoch_end=None, on_train_step_begin=None, on_train_step_end=None, on_train_begin=None, on_train_end=None, on_eval_epoch_begin=None, on_eval_epoch_end=None, on_eval_step_begin=None, on_eval_step_end=None, on_eval_begin=None, on_eval_end=None) 用于自定义简单的callback。 diff --git a/docs/api/api_python/mindspore/mindspore.LearningRateScheduler.rst b/docs/api/api_python/train/mindspore.train.LearningRateScheduler.rst similarity index 69% rename from docs/api/api_python/mindspore/mindspore.LearningRateScheduler.rst rename to docs/api/api_python/train/mindspore.train.LearningRateScheduler.rst index edce63697e8..86d9bfff414 100644 --- a/docs/api/api_python/mindspore/mindspore.LearningRateScheduler.rst +++ b/docs/api/api_python/train/mindspore.train.LearningRateScheduler.rst @@ -1,7 +1,7 @@ -mindspore.LearningRateScheduler -================================ +mindspore.train.LearningRateScheduler +===================================== -.. py:class:: mindspore.LearningRateScheduler(learning_rate_function) +.. py:class:: mindspore.train.LearningRateScheduler(learning_rate_function) 用于在训练期间更改学习率。 diff --git a/docs/api/api_python/mindspore/mindspore.LossMonitor.rst b/docs/api/api_python/train/mindspore.train.LossMonitor.rst similarity index 89% rename from docs/api/api_python/mindspore/mindspore.LossMonitor.rst rename to docs/api/api_python/train/mindspore.train.LossMonitor.rst index 044b9688c28..3959d8201f9 100644 --- a/docs/api/api_python/mindspore/mindspore.LossMonitor.rst +++ b/docs/api/api_python/train/mindspore.train.LossMonitor.rst @@ -1,7 +1,7 @@ -mindspore.LossMonitor -================================ +mindspore.train.LossMonitor +=========================== -.. py:class:: mindspore.LossMonitor(per_print_times=1) +.. py:class:: mindspore.train.LossMonitor(per_print_times=1) 训练场景下,监控训练的loss;边训练边推理场景下,监控训练的loss和推理的metrics。 diff --git a/docs/api/api_python/mindspore/mindspore.Model.rst b/docs/api/api_python/train/mindspore.train.Model.rst similarity index 98% rename from docs/api/api_python/mindspore/mindspore.Model.rst rename to docs/api/api_python/train/mindspore.train.Model.rst index 2fe154b3887..291c7604494 100644 --- a/docs/api/api_python/mindspore/mindspore.Model.rst +++ b/docs/api/api_python/train/mindspore.train.Model.rst @@ -1,7 +1,7 @@ -mindspore.Model -================ +mindspore.train.Model +====================== -.. py:class:: mindspore.Model(network, loss_fn=None, optimizer=None, metrics=None, eval_network=None, eval_indexes=None, amp_level="O0", boost_level="O0", **kwargs) +.. py:class:: mindspore.train.Model(network, loss_fn=None, optimizer=None, metrics=None, eval_network=None, eval_indexes=None, amp_level="O0", boost_level="O0", **kwargs) 模型训练或推理的高阶接口。 `Model` 会根据用户传入的参数封装可训练或推理的实例。 diff --git a/docs/api/api_python/mindspore/mindspore.ModelCheckpoint.rst b/docs/api/api_python/train/mindspore.train.ModelCheckpoint.rst similarity index 90% rename from docs/api/api_python/mindspore/mindspore.ModelCheckpoint.rst rename to docs/api/api_python/train/mindspore.train.ModelCheckpoint.rst index ec1cd5d039c..7ea8c7e5cd3 100644 --- a/docs/api/api_python/mindspore/mindspore.ModelCheckpoint.rst +++ b/docs/api/api_python/train/mindspore.train.ModelCheckpoint.rst @@ -1,7 +1,7 @@ -mindspore.ModelCheckpoint -================================ +mindspore.train.ModelCheckpoint +=============================== -.. py:class:: mindspore.ModelCheckpoint(prefix='CKP', directory=None, config=None) +.. py:class:: mindspore.train.ModelCheckpoint(prefix='CKP', directory=None, config=None) checkpoint的回调函数。 diff --git a/docs/api/api_python/mindspore/mindspore.ReduceLROnPlateau.rst b/docs/api/api_python/train/mindspore.train.ReduceLROnPlateau.rst similarity index 92% rename from docs/api/api_python/mindspore/mindspore.ReduceLROnPlateau.rst rename to docs/api/api_python/train/mindspore.train.ReduceLROnPlateau.rst index 096a0a798e2..4705fca75b2 100644 --- a/docs/api/api_python/mindspore/mindspore.ReduceLROnPlateau.rst +++ b/docs/api/api_python/train/mindspore.train.ReduceLROnPlateau.rst @@ -1,7 +1,7 @@ -mindspore.ReduceLROnPlateau -================================ +mindspore.train.ReduceLROnPlateau +================================= -.. py:class:: mindspore.ReduceLROnPlateau(monitor='eval_loss', factor=0.1, patience=10, verbose=False, mode='auto', min_delta=1e-4, cooldown=0, min_lr=0) +.. py:class:: mindspore.train.ReduceLROnPlateau(monitor='eval_loss', factor=0.1, patience=10, verbose=False, mode='auto', min_delta=1e-4, cooldown=0, min_lr=0) 当 `monitor` 停止改进时降低学习率。 diff --git a/docs/api/api_python/mindspore/mindspore.RunContext.rst b/docs/api/api_python/train/mindspore.train.RunContext.rst similarity index 98% rename from docs/api/api_python/mindspore/mindspore.RunContext.rst rename to docs/api/api_python/train/mindspore.train.RunContext.rst index b1f09b5e6b1..9aae694ae40 100644 --- a/docs/api/api_python/mindspore/mindspore.RunContext.rst +++ b/docs/api/api_python/train/mindspore.train.RunContext.rst @@ -1,7 +1,7 @@ -mindspore.RunContext -================================ +mindspore.train.RunContext +========================== -.. py:class:: mindspore.RunContext(original_args) +.. py:class:: mindspore.train.RunContext(original_args) 保存和管理模型的相关信息。 diff --git a/docs/api/api_python/mindspore/mindspore.TimeMonitor.rst b/docs/api/api_python/train/mindspore.train.TimeMonitor.rst similarity index 87% rename from docs/api/api_python/mindspore/mindspore.TimeMonitor.rst rename to docs/api/api_python/train/mindspore.train.TimeMonitor.rst index c1848cb896f..422ceb0433c 100644 --- a/docs/api/api_python/mindspore/mindspore.TimeMonitor.rst +++ b/docs/api/api_python/train/mindspore.train.TimeMonitor.rst @@ -1,7 +1,7 @@ -mindspore.TimeMonitor -================================ +mindspore.train.TimeMonitor +=========================== -.. py:class:: mindspore.TimeMonitor(data_size=None) +.. py:class:: mindspore.train.TimeMonitor(data_size=None) 监控训练或推理的时间。 diff --git a/docs/api/api_python_en/mindspore.rst b/docs/api/api_python_en/mindspore.rst index c56da19cd40..d2464b7aa0c 100644 --- a/docs/api/api_python_en/mindspore.rst +++ b/docs/api/api_python_en/mindspore.rst @@ -174,36 +174,6 @@ Context mindspore.get_algo_parameters mindspore.reset_algo_parameters -Model ------ - -.. autosummary:: - :toctree: mindspore - :nosignatures: - :template: classtemplate.rst - - mindspore.Model - -Callback --------- - -.. autosummary:: - :toctree: mindspore - :nosignatures: - :template: classtemplate.rst - - mindspore.Callback - mindspore.CheckpointConfig - mindspore.EarlyStopping - mindspore.History - mindspore.LambdaCallback - mindspore.LearningRateScheduler - mindspore.LossMonitor - mindspore.ModelCheckpoint - mindspore.ReduceLROnPlateau - mindspore.RunContext - mindspore.TimeMonitor - Dataset Helper --------------- diff --git a/docs/api/api_python_en/mindspore.train.rst b/docs/api/api_python_en/mindspore.train.rst new file mode 100644 index 00000000000..388e6cbb377 --- /dev/null +++ b/docs/api/api_python_en/mindspore.train.rst @@ -0,0 +1,32 @@ +mindspore.train +=============== + +Model +----- + +.. autosummary:: + :toctree: mindspore + :nosignatures: + :template: classtemplate.rst + + mindspore.Model + +Callback +-------- + +.. autosummary:: + :toctree: mindspore + :nosignatures: + :template: classtemplate.rst + + mindspore.Callback + mindspore.CheckpointConfig + mindspore.EarlyStopping + mindspore.History + mindspore.LambdaCallback + mindspore.LearningRateScheduler + mindspore.LossMonitor + mindspore.ModelCheckpoint + mindspore.ReduceLROnPlateau + mindspore.RunContext + mindspore.TimeMonitor diff --git a/mindspore/python/mindspore/train/callback/_callback.py b/mindspore/python/mindspore/train/callback/_callback.py index 0a83d512204..e5b82bb6d1d 100644 --- a/mindspore/python/mindspore/train/callback/_callback.py +++ b/mindspore/python/mindspore/train/callback/_callback.py @@ -97,10 +97,10 @@ class Callback: Examples: >>> import numpy as np - >>> import mindspore as ms >>> from mindspore import nn >>> from mindspore import dataset as ds - >>> class Print_info(ms.Callback): + >>> from mindspore.train import Model, Callback + >>> class Print_info(Callback): ... def step_end(self, run_context): ... cb_params = run_context.original_args() ... print("step_num: ", cb_params.cur_step_num) @@ -111,7 +111,7 @@ class Callback: >>> net = nn.Dense(10, 5) >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') >>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9) - >>> model = ms.Model(net, loss_fn=loss, optimizer=optim) + >>> model = Model(net, loss_fn=loss, optimizer=optim) >>> model.train(1, dataset, callbacks=print_cb) step_num: 2 """ diff --git a/mindspore/python/mindspore/train/callback/_checkpoint.py b/mindspore/python/mindspore/train/callback/_checkpoint.py index e886fb31a9a..bfdb8740e7c 100644 --- a/mindspore/python/mindspore/train/callback/_checkpoint.py +++ b/mindspore/python/mindspore/train/callback/_checkpoint.py @@ -105,9 +105,9 @@ class CheckpointConfig: ValueError: If input parameter is not the correct type. Examples: - >>> import mindspore as ms >>> from mindspore import nn >>> from mindspore.common.initializer import Normal + >>> from mindspore.train import Model, CheckpointConfig, ModelCheckpoint >>> >>> class LeNet5(nn.Cell): ... def __init__(self, num_class=10, num_channel=1): @@ -133,11 +133,11 @@ class CheckpointConfig: >>> net = LeNet5() >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') >>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9) - >>> model = ms.Model(net, loss_fn=loss, optimizer=optim) + >>> model = Model(net, loss_fn=loss, optimizer=optim) >>> data_path = './MNIST_Data' >>> dataset = create_dataset(data_path) - >>> config = ms.CheckpointConfig(saved_network=net) - >>> ckpoint_cb = ms.ModelCheckpoint(prefix='LeNet5', directory='./checkpoint', config=config) + >>> config = CheckpointConfig(saved_network=net) + >>> ckpoint_cb = ModelCheckpoint(prefix='LeNet5', directory='./checkpoint', config=config) >>> model.train(10, dataset, callbacks=ckpoint_cb) """ diff --git a/mindspore/python/mindspore/train/callback/_early_stop.py b/mindspore/python/mindspore/train/callback/_early_stop.py index 294ed4864c2..503e6221a2d 100644 --- a/mindspore/python/mindspore/train/callback/_early_stop.py +++ b/mindspore/python/mindspore/train/callback/_early_stop.py @@ -82,8 +82,8 @@ class EarlyStopping(Callback): ValueError: The monitor value is not a scalar. Examples: - >>> from mindspore.train.callback import EarlyStopping - >>> from mindspore import Model, nn + >>> from mindspore import nn + >>> from mindspore.train import Model, EarlyStopping >>> net = LeNet5() >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') >>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9) diff --git a/mindspore/python/mindspore/train/callback/_history.py b/mindspore/python/mindspore/train/callback/_history.py index 1a3c77969b1..3bac4190293 100644 --- a/mindspore/python/mindspore/train/callback/_history.py +++ b/mindspore/python/mindspore/train/callback/_history.py @@ -35,16 +35,16 @@ class History(Callback): Examples: >>> import numpy as np - >>> import mindspore as ms >>> import mindspore.dataset as ds >>> from mindspore import nn + >>> from mindspore.train import Model, History >>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))} >>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32) >>> net = nn.Dense(10, 5) >>> crit = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') >>> opt = nn.Momentum(net.trainable_params(), 0.01, 0.9) - >>> history_cb = ms.History() - >>> model = ms.Model(network=net, optimizer=opt, loss_fn=crit, metrics={"recall"}) + >>> history_cb = History() + >>> model = Model(network=net, optimizer=opt, loss_fn=crit, metrics={"recall"}) >>> model.train(2, train_dataset, callbacks=[history_cb]) >>> print(history_cb.epoch) >>> print(history_cb.history) diff --git a/mindspore/python/mindspore/train/callback/_lambda_callback.py b/mindspore/python/mindspore/train/callback/_lambda_callback.py index 975fa788c14..a8ccf79fce6 100644 --- a/mindspore/python/mindspore/train/callback/_lambda_callback.py +++ b/mindspore/python/mindspore/train/callback/_lambda_callback.py @@ -45,17 +45,17 @@ class LambdaCallback(Callback): Examples: >>> import numpy as np - >>> import mindspore as ms >>> import mindspore.dataset as ds >>> from mindspore import nn + >>> from mindspore.train import Model, LambdaCallback >>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))} >>> train_dataset = ds.NumpySlicesDataset(data=data).batch(32) >>> net = nn.Dense(10, 5) >>> crit = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') >>> opt = nn.Momentum(net.trainable_params(), 0.01, 0.9) - >>> lambda_callback = ms.LambdaCallback(on_train_epoch_end= + >>> lambda_callback = LambdaCallback(on_train_epoch_end= ... lambda run_context: print("loss: ", run_context.original_args().net_outputs)) - >>> model = ms.Model(network=net, optimizer=opt, loss_fn=crit, metrics={"recall"}) + >>> model = Model(network=net, optimizer=opt, loss_fn=crit, metrics={"recall"}) >>> model.train(2, train_dataset, callbacks=[lambda_callback]) loss: 1.6127687 loss: 1.6106578 diff --git a/mindspore/python/mindspore/train/callback/_landscape.py b/mindspore/python/mindspore/train/callback/_landscape.py index e5cb5bf03b9..22887a71663 100644 --- a/mindspore/python/mindspore/train/callback/_landscape.py +++ b/mindspore/python/mindspore/train/callback/_landscape.py @@ -181,6 +181,7 @@ class SummaryLandscape: >>> import mindspore as ms >>> import mindspore.nn as nn >>> from mindspore.nn import Loss, Accuracy + >>> from mindspore.train import Model, SummaryCollector, SummaryLandscape >>> >>> if __name__ == '__main__': ... # If the device_target is Ascend, set the device_target to "Ascend" @@ -192,10 +193,10 @@ class SummaryLandscape: ... network = LeNet5(10) ... net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") ... net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) - ... model = ms.Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) + ... model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}) ... # Simple usage for collect landscape information: ... interval_1 = [1, 2, 3, 4, 5] - ... summary_collector = ms.SummaryCollector(summary_dir='./summary/lenet_interval_1', + ... summary_collector = SummaryCollector(summary_dir='./summary/lenet_interval_1', ... collect_specified_data={'collect_landscape':{"landscape_size": 4, ... "unit": "step", ... "create_landscape":{"train":True, @@ -215,7 +216,7 @@ class SummaryLandscape: ... ds_eval = create_dataset(mnist_dataset_dir, 32) ... return model, network, ds_eval, metrics ... - ... summary_landscape = ms.SummaryLandscape('./summary/lenet_interval_1') + ... summary_landscape = SummaryLandscape('./summary/lenet_interval_1') ... # parameters of collect_landscape can be modified or unchanged ... summary_landscape.gen_landscapes_with_multi_process(callback_fn, ... collect_landscape={"landscape_size": 4, diff --git a/mindspore/python/mindspore/train/callback/_loss_monitor.py b/mindspore/python/mindspore/train/callback/_loss_monitor.py index 788654a7876..059faec2c4d 100644 --- a/mindspore/python/mindspore/train/callback/_loss_monitor.py +++ b/mindspore/python/mindspore/train/callback/_loss_monitor.py @@ -38,13 +38,13 @@ class LossMonitor(Callback): ValueError: If per_print_times is not an integer or less than zero. Examples: - >>> import mindspore as ms >>> from mindspore import nn + >>> from mindspore.train import Model, LossMonitor >>> >>> net = LeNet5() >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') >>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9) - >>> model = ms.Model(net, loss_fn=loss, optimizer=optim) + >>> model = Model(net, loss_fn=loss, optimizer=optim) >>> data_path = './MNIST_Data' >>> dataset = create_dataset(data_path) >>> loss_monitor = LossMonitor() diff --git a/mindspore/python/mindspore/train/callback/_lr_scheduler_callback.py b/mindspore/python/mindspore/train/callback/_lr_scheduler_callback.py index ed165bc18f4..3cac3c5128c 100644 --- a/mindspore/python/mindspore/train/callback/_lr_scheduler_callback.py +++ b/mindspore/python/mindspore/train/callback/_lr_scheduler_callback.py @@ -34,10 +34,8 @@ class LearningRateScheduler(Callback): Examples: >>> import numpy as np - >>> import mindspore as ms >>> from mindspore import nn - >>> from mindspore import LearningRateScheduler - >>> import mindspore.nn as nn + >>> from mindspore.train import Model, LearningRateScheduler >>> from mindspore import dataset as ds ... >>> def learning_rate_function(lr, cur_step_num): @@ -50,7 +48,7 @@ class LearningRateScheduler(Callback): >>> net = nn.Dense(10, 5) >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') >>> optim = nn.Momentum(net.trainable_params(), learning_rate=lr, momentum=momentum) - >>> model = ms.Model(net, loss_fn=loss, optimizer=optim) + >>> model = Model(net, loss_fn=loss, optimizer=optim) ... >>> data = {"x": np.float32(np.random.rand(64, 10)), "y": np.random.randint(0, 5, (64,))} >>> dataset = ds.NumpySlicesDataset(data=data).batch(32) diff --git a/mindspore/python/mindspore/train/callback/_reduce_lr_on_plateau.py b/mindspore/python/mindspore/train/callback/_reduce_lr_on_plateau.py index 00610630c5b..62336ec7a0f 100644 --- a/mindspore/python/mindspore/train/callback/_reduce_lr_on_plateau.py +++ b/mindspore/python/mindspore/train/callback/_reduce_lr_on_plateau.py @@ -81,8 +81,8 @@ class ReduceLROnPlateau(Callback): ValueError: The learning rate is not a Parameter. Examples: - >>> from mindspore.train.callback import ReduceLROnPlateau - >>> from mindspore import Model, nn + >>> from mindspore import nn + >>> from mindspore.train import Model, ReduceLROnPlateau >>> net = LeNet5() >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') >>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9) diff --git a/mindspore/python/mindspore/train/callback/_summary_collector.py b/mindspore/python/mindspore/train/callback/_summary_collector.py index 105ceaeb1c5..85496ed2558 100644 --- a/mindspore/python/mindspore/train/callback/_summary_collector.py +++ b/mindspore/python/mindspore/train/callback/_summary_collector.py @@ -177,6 +177,7 @@ class SummaryCollector(Callback): Examples: >>> import mindspore as ms >>> import mindspore.nn as nn + >>> from mindspore.train import Model, SummaryCollector >>> from mindspore.nn import Accuracy >>> >>> if __name__ == '__main__': @@ -189,15 +190,15 @@ class SummaryCollector(Callback): ... network = LeNet5(10) ... net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") ... net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) - ... model = ms.Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}, amp_level="O2") + ... model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}, amp_level="O2") ... ... # Simple usage: - ... summary_collector = ms.SummaryCollector(summary_dir='./summary_dir') + ... summary_collector = SummaryCollector(summary_dir='./summary_dir') ... model.train(1, ds_train, callbacks=[summary_collector], dataset_sink_mode=False) ... ... # Do not collect metric and collect the first layer parameter, others are collected by default ... specified={'collect_metric': False, 'histogram_regular': '^conv1.*'} - ... summary_collector = ms.SummaryCollector(summary_dir='./summary_dir', collect_specified_data=specified) + ... summary_collector = SummaryCollector(summary_dir='./summary_dir', collect_specified_data=specified) ... model.train(1, ds_train, callbacks=[summary_collector], dataset_sink_mode=False) """ diff --git a/mindspore/python/mindspore/train/callback/_time_monitor.py b/mindspore/python/mindspore/train/callback/_time_monitor.py index 720cc7f94ea..55a0816d1ec 100644 --- a/mindspore/python/mindspore/train/callback/_time_monitor.py +++ b/mindspore/python/mindspore/train/callback/_time_monitor.py @@ -34,13 +34,13 @@ class TimeMonitor(Callback): ValueError: If data_size is not positive int. Examples: - >>> import mindspore as ms >>> from mindspore import nn + >>> from mindspore.train import Model, TimeMonitor >>> >>> net = LeNet5() >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') >>> optim = nn.Momentum(net.trainable_params(), 0.01, 0.9) - >>> model = ms.Model(net, loss_fn=loss, optimizer=optim) + >>> model = Model(net, loss_fn=loss, optimizer=optim) >>> data_path = './MNIST_Data' >>> dataset = create_dataset(data_path) >>> time_monitor = TimeMonitor() diff --git a/mindspore/python/mindspore/train/model.py b/mindspore/python/mindspore/train/model.py index e0bee88e649..9553efd6097 100644 --- a/mindspore/python/mindspore/train/model.py +++ b/mindspore/python/mindspore/train/model.py @@ -162,8 +162,8 @@ class Model: the Graph mode + Ascend platform, and for better acceleration, refer to the documentation to configure boost_config_dict. Examples: - >>> import mindspore as ms >>> from mindspore import nn + >>> from mindspore.train import Model >>> >>> class Net(nn.Cell): ... def __init__(self, num_class=10, num_channel=1): @@ -189,7 +189,7 @@ class Model: >>> net = Net() >>> loss = nn.SoftmaxCrossEntropyWithLogits() >>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) - >>> model = ms.Model(net, loss_fn=loss, optimizer=optim, metrics=None) + >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) >>> # For details about how to build the dataset, please refer to the variable `dataset_train` in tutorial >>> # document on the official website: >>> # https://www.mindspore.cn/tutorials/zh-CN/master/beginner/quick_start.html @@ -989,8 +989,8 @@ class Model: Default: 0. Examples: - >>> import mindspore as ms >>> from mindspore import nn + >>> from mindspore.train import Model >>> >>> # For details about how to build the dataset, please refer to the tutorial >>> # document on the official website. @@ -999,7 +999,7 @@ class Model: >>> loss = nn.SoftmaxCrossEntropyWithLogits() >>> loss_scale_manager = ms.FixedLossScaleManager() >>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) - >>> model = ms.Model(net, loss_fn=loss, optimizer=optim, metrics=None, + >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, ... loss_scale_manager=loss_scale_manager) >>> model.train(2, dataset) """ @@ -1126,8 +1126,8 @@ class Model: Default: 0. Examples: - >>> import mindspore as ms >>> from mindspore import nn + >>> from mindspore.train import Model >>> >>> # For details about how to build the dataset, please refer to the tutorial >>> # document on the official website. @@ -1136,7 +1136,7 @@ class Model: >>> net = Net() >>> loss = nn.SoftmaxCrossEntropyWithLogits() >>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) - >>> model = ms.Model(net, loss_fn=loss, optimizer=optim, metrics={"accuracy"}) + >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics={"accuracy"}) >>> model.fit(2, train_dataset, valid_dataset) """ @@ -1211,17 +1211,18 @@ class Model: epoch (int): Control the training epochs. Default: 1. Examples: - >>> import mindspore as ms >>> from mindspore import nn + >>> from mindspore.train import Model + >>> from mindspore.amp import FixedLossScaleManager >>> >>> # For details about how to build the dataset, please refer to the tutorial >>> # document on the official website. >>> dataset = create_custom_dataset() >>> net = Net() >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> loss_scale_manager = ms.FixedLossScaleManager() + >>> loss_scale_manager = FixedLossScaleManager() >>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) - >>> model = ms.Model(net, loss_fn=loss, optimizer=optim, metrics=None, + >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, ... loss_scale_manager=loss_scale_manager) >>> model.build(dataset, epoch=2) >>> model.train(2, dataset) @@ -1380,15 +1381,15 @@ class Model: the model in the test mode. Examples: - >>> import mindspore as ms >>> from mindspore import nn + >>> from mindspore.train import Model >>> >>> # For details about how to build the dataset, please refer to the tutorial >>> # document on the official website. >>> dataset = create_custom_dataset() >>> net = Net() >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> model = ms.Model(net, loss_fn=loss, optimizer=None, metrics={'acc'}) + >>> model = Model(net, loss_fn=loss, optimizer=None, metrics={'acc'}) >>> acc = model.eval(dataset, dataset_sink_mode=False) """ dataset_sink_mode = Validator.check_bool(dataset_sink_mode) @@ -1451,11 +1452,12 @@ class Model: Examples: >>> import numpy as np - >>> import mindspore as ms + >>> import mindspore >>> from mindspore import Tensor + >>> from mindspore.train import Model >>> - >>> input_data = Tensor(np.random.randint(0, 255, [1, 1, 32, 32]), ms.float32) - >>> model = ms.Model(Net()) + >>> input_data = Tensor(np.random.randint(0, 255, [1, 1, 32, 32]), mindspore.float32) + >>> model = Model(Net()) >>> result = model.predict(input_data) """ self._check_network_mode(self._predict_network, False) @@ -1537,6 +1539,7 @@ class Model: >>> import numpy as np >>> import mindspore as ms >>> from mindspore import Tensor, nn + >>> from mindspore.train import Model >>> from mindspore.communication import init >>> >>> ms.set_context(mode=ms.GRAPH_MODE) @@ -1550,7 +1553,7 @@ class Model: >>> loss = nn.SoftmaxCrossEntropyWithLogits() >>> loss_scale_manager = ms.FixedLossScaleManager() >>> optim = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) - >>> model = ms.Model(net, loss_fn=loss, optimizer=optim, metrics=None, + >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None, ... loss_scale_manager=loss_scale_manager) >>> layout_dict = model.infer_train_layout(dataset) """ @@ -1595,13 +1598,14 @@ class Model: >>> import numpy as np >>> import mindspore as ms >>> from mindspore import Tensor + >>> from mindspore.train import Model >>> from mindspore.communication import init >>> >>> ms.set_context(mode=ms.GRAPH_MODE) >>> init() >>> ms.set_auto_parallel_context(full_batch=True, parallel_mode=ms.ParallelMode.SEMI_AUTO_PARALLEL) >>> input_data = Tensor(np.random.randint(0, 255, [1, 1, 32, 32]), ms.float32) - >>> model = ms.Model(Net()) + >>> model = Model(Net()) >>> predict_map = model.infer_predict_layout(input_data) """ if context.get_context("mode") != context.GRAPH_MODE: