From 9a38d4b42501198586e9a8add389dbcd5dfa7528 Mon Sep 17 00:00:00 2001 From: liangzelang Date: Fri, 19 Jun 2020 17:03:08 +0800 Subject: [PATCH] fix the problem of BatchNorm config failure at Amp O3 level and some unexpected indent --- mindspore/train/amp.py | 3 ++- mindspore/train/model.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/mindspore/train/amp.py b/mindspore/train/amp.py index 2c4cf69bf64..a47b16d0e02 100644 --- a/mindspore/train/amp.py +++ b/mindspore/train/amp.py @@ -127,7 +127,8 @@ def build_train_network(network, optimizer, loss_fn=None, level='O0', **kwargs): - O2: Cast network to float16, keep batchnorm and `loss_fn` (if set) run in float32, using dynamic loss scale. - O3: Cast network to float16, with additional property 'keep_batchnorm_fp32=False'. - O2 is recommended on GPU, O3 is recommemded on Ascend. + + O2 is recommended on GPU, O3 is recommended on Ascend. cast_model_type (:class:`mindspore.dtype`): Supports `mstype.float16` or `mstype.float32`. If set to `mstype.float16`, use `float16` mode to train. If set, overwrite the level setting. diff --git a/mindspore/train/model.py b/mindspore/train/model.py index 68042d8d0ae..6570c24edc1 100755 --- a/mindspore/train/model.py +++ b/mindspore/train/model.py @@ -59,6 +59,7 @@ class Model: - O0: Do not change. - O2: Cast network to float16, keep batchnorm run in float32, using dynamic loss scale. - O3: Cast network to float16, with additional property 'keep_batchnorm_fp32=False'. + O2 is recommended on GPU, O3 is recommended on Ascend. loss_scale_manager (Union[None, LossScaleManager]): If None, not scale the loss, or else @@ -113,7 +114,7 @@ class Model: self._build_predict_network() def _process_amp_args(self, kwargs): - if self._amp_level == "O0": + if self._amp_level in ["O0", "O3"]: self._keep_bn_fp32 = False if 'keep_batchnorm_fp32' in kwargs: self._keep_bn_fp32 = kwargs['keep_batchnorm_fp32']