forked from OSSInnovation/mindspore
add mixedprecision O3 level
This commit is contained in:
parent
5845c4ad0d
commit
2931ef8b24
|
@ -65,7 +65,11 @@ _config_level = {
|
|||
"O2": {
|
||||
"keep_batchnorm_fp32": True,
|
||||
"cast_model_type": mstype.float16,
|
||||
"loss_scale_manager": DynamicLossScaleManager()}}
|
||||
"loss_scale_manager": DynamicLossScaleManager()},
|
||||
"O3": {
|
||||
"keep_batchnorm_fp32": False,
|
||||
"cast_model_type": mstype.float16,
|
||||
"loss_scale_manager": None}}
|
||||
|
||||
|
||||
def _check_kwargs(key_words):
|
||||
|
@ -117,11 +121,13 @@ def build_train_network(network, optimizer, loss_fn=None, level='O0', **kwargs):
|
|||
loss_fn (Union[None, Cell]): Definition of the loss_fn. If None, the `network` should have the loss inside.
|
||||
Default: None.
|
||||
optimizer (Optimizer): Optimizer to update the Parameter.
|
||||
level (str): Supports [O0, O2]. Default: "O0".
|
||||
level (str): Supports [O0, O2, O3]. Default: "O0".
|
||||
|
||||
- O0: Do not change.
|
||||
- O2: Cast network to float16, keep batchnorm and `loss_fn` (if set) run in float32,
|
||||
using dynamic loss scale.
|
||||
- O3: Cast network to float16, with additional property 'keep_batchnorm_fp32=False'.
|
||||
O2 is recommended on GPU, O3 is recommemded on Ascend.
|
||||
|
||||
cast_model_type (:class:`mindspore.dtype`): Supports `mstype.float16` or `mstype.float32`.
|
||||
If set to `mstype.float16`, use `float16` mode to train. If set, overwrite the level setting.
|
||||
|
@ -131,7 +137,7 @@ def build_train_network(network, optimizer, loss_fn=None, level='O0', **kwargs):
|
|||
"""
|
||||
validator.check_value_type('network', network, nn.Cell, None)
|
||||
validator.check_value_type('optimizer', optimizer, nn.Optimizer, None)
|
||||
validator.check('level', level, "", ['O0', 'O2'], Rel.IN, None)
|
||||
validator.check('level', level, "", ['O0', 'O2', 'O3'], Rel.IN, None)
|
||||
_check_kwargs(kwargs)
|
||||
config = dict(_config_level[level], **kwargs)
|
||||
config = edict(config)
|
||||
|
|
|
@ -54,10 +54,12 @@ class Model:
|
|||
value would be passed to `Loss` metric, predict value and label would be passed to other
|
||||
metric. Default: None.
|
||||
amp_level (str): Option for argument `level` in `mindspore.amp.build_train_network`, level for mixed
|
||||
precision training. Supports [O0, O2]. Default: "O0".
|
||||
precision training. Supports [O0, O2, O3]. Default: "O0".
|
||||
|
||||
- O0: Do not change.
|
||||
- O2: Cast network to float16, keep batchnorm run in float32, using dynamic loss scale.
|
||||
- O3: Cast network to float16, with additional property 'keep_batchnorm_fp32=False'.
|
||||
O2 is recommended on GPU, O3 is recommended on Ascend.
|
||||
|
||||
loss_scale_manager (Union[None, LossScaleManager]): If None, not scale the loss, or else
|
||||
scale the loss by LossScaleManager. If it is set, overwrite the level setting. It's a eyword argument.
|
||||
|
|
Loading…
Reference in New Issue