From 67e811ff6f1a365a77964272a1ad88d54468baa0 Mon Sep 17 00:00:00 2001 From: chenfei Date: Thu, 1 Jul 2021 09:38:22 +0800 Subject: [PATCH] rm space after : --- mindspore/train/amp.py | 2 +- mindspore/train/model.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/mindspore/train/amp.py b/mindspore/train/amp.py index 8896a8c3481..46dd79339d2 100644 --- a/mindspore/train/amp.py +++ b/mindspore/train/amp.py @@ -132,7 +132,7 @@ def build_train_network(network, optimizer, loss_fn=None, level='O0', **kwargs): O2 is recommended on GPU, O3 is recommended on Ascend.Property of `keep_batchnorm_fp32` , `cast_model_type` and `loss_scale_manager` determined by `level` setting may be overwritten by settings in `kwargs` . - cast_model_type (:class: `mindspore.dtype` ): Supports `mstype.float16` or `mstype.float32` . If set, the + cast_model_type (:class:`mindspore.dtype`): Supports `mstype.float16` or `mstype.float32` . If set, the network will be casted to `cast_model_type` ( `mstype.float16` or `mstype.float32` ), but not to be casted to the type determined by `level` setting. keep_batchnorm_fp32 (bool): Keep Batchnorm run in `float32` when the network is set to cast to `float16` . If diff --git a/mindspore/train/model.py b/mindspore/train/model.py index 989422802eb..9c59ccc4dea 100644 --- a/mindspore/train/model.py +++ b/mindspore/train/model.py @@ -76,8 +76,6 @@ class Model: elements, including the positions of loss value, predicted value and label. The loss value would be passed to the `Loss` metric, the predicted value and label would be passed to other metric. Default: None. - - Args: amp_level (str): Option for argument `level` in `mindspore.amp.build_train_network` , level for mixed precision training. Supports ["O0", "O2", "O3", "auto"]. Default: "O0".