!24609 Modify white list for master

Merge pull request !24609 from liuyang/master_white_list
This commit is contained in:
i-robot 2021-10-11 04:00:28 +00:00 committed by Gitee
commit b2f2a203d3
9 changed files with 74 additions and 47 deletions

View File

@ -173,6 +173,9 @@ def pytype_to_dtype(obj):
Returns:
Type of MindSpore type.
Raises:
NotImplementedError: If the python type cannot be converted to MindSpore type.
"""
if isinstance(obj, np.dtype):
@ -181,7 +184,7 @@ def pytype_to_dtype(obj):
return obj
if isinstance(obj, type) and obj in _simple_types:
return _simple_types[obj]
raise NotImplementedError(f"Unsupported convert python type {obj} to MindSpore type.")
raise NotImplementedError(f"The python type {obj} cannot be converted to MindSpore type.")
def get_py_obj_dtype(obj):

View File

@ -138,7 +138,8 @@ def _calculate_fan_in_and_fan_out(shape):
"""
dimensions = len(shape)
if dimensions < 2:
raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions")
raise ValueError("'fan_in' and 'fan_out' can not be computed for tensor with fewer than"
" 2 dimensions, but got dimensions {}.".format(dimensions))
if dimensions == 2: # Linear
fan_in = shape[1]
fan_out = shape[0]
@ -488,6 +489,10 @@ def initializer(init, shape=None, dtype=mstype.float32):
Returns:
Union[Tensor], return is Tensor object.
Raises:
TypeError: The type of the argument 'init' is not correct.
ValueError: Some values is not correct.
Examples:
>>> import mindspore
@ -497,15 +502,15 @@ def initializer(init, shape=None, dtype=mstype.float32):
>>> tensor3 = initializer(0, [1, 2, 3], mindspore.float32)
"""
if not isinstance(init, (Tensor, numbers.Number, str, Initializer)):
raise TypeError("Unsupported init type '{}', init should be 'Tensor', 'number', 'str' "
"or 'initializer' type".format(type(init)))
raise TypeError("The type of the 'init' argument should be 'Tensor', 'number', 'str' "
"or 'initializer', but got {}.".format(type(init)))
if isinstance(init, Tensor):
init_shape = init.shape
shape = shape if isinstance(shape, (tuple, list)) else [shape]
if shape is not None and init_shape != tuple(shape):
raise ValueError("The shape of init should be same as variable shape, but got the shape of init {} and "
"the variable shape {}.".format(list(init.shape), shape))
raise ValueError("The shape of the 'init' argument should be same as the argument 'shape', but got the "
"'init' shape {} and the 'shape' {}.".format(list(init.shape), shape))
return init
if isinstance(shape, list):
@ -515,7 +520,8 @@ def initializer(init, shape=None, dtype=mstype.float32):
for value in shape if shape is not None else ():
if not isinstance(value, int) or value <= 0:
raise ValueError(f"Shape is invalid, the value of shape must be positive integer, but got shape:{shape}")
raise ValueError(f"The argument 'shape' is invalid, the value of 'shape' must be positive integer, "
f"but got {shape}")
if isinstance(init, str):
init = _INITIALIZER_ALIAS[init.lower()]()

View File

@ -44,16 +44,24 @@ def _is_in_parallel_mode():
def init_to_value(init):
"""Get value of initializer."""
"""
Get value of initializer.
Returns:
Value of the initializer.
Raises:
ValueError: The value of the argument 'init' is not correct.
"""
if isinstance(init, str):
if init == 'zeros':
return 0.0
if init == 'ones':
return 1.0
raise ValueError("The 'init' argument should be one of values in 'zeros', 'ones'.")
raise ValueError("The argument 'init' should be one of values in ['zeros', 'ones'].")
if isinstance(init, numbers.Number):
return float(init)
raise ValueError("The 'init' argument should be number or string.")
raise ValueError("The argument 'init' should be number or string, but got {}.".format(type(init)))
class Parameter(Tensor_):
@ -161,8 +169,8 @@ class Parameter(Tensor_):
elif isinstance(default_input, (np.ndarray, list)):
Tensor_.__init__(self, default_input)
else:
raise TypeError(f"The 'default_input' argument must be [`Tensor`, `int`, `float`, `numpy.ndarray`, `list`]."
f"But got type {type(default_input)}.")
raise TypeError(f"The type of the argument 'default_input' must be in ['Tensor', 'int', 'float',"
f" 'numpy.ndarray', 'list']. But got type {type(default_input)}.")
def __deepcopy__(self, memodict):
new_obj = Parameter(self)
@ -222,10 +230,10 @@ class Parameter(Tensor_):
initialized on server. Default: False.
"""
if not(_is_role_worker() or _is_role_pserver() or _is_role_sched()):
raise RuntimeError("Must complete following two steps before calling set_param_ps: \
1. set_ps_context(enable_ps=True) \
2. export MS_ROLE environment variable \
Please refer to the official website for detailed usage.")
raise RuntimeError("Must complete following two steps before calling set_param_ps: \n"
"1. context.set_ps_context(enable_ps=True) \n"
"2. export MS_ROLE environment variable \n"
"Please refer to the official website for detailed usage.")
if init_in_server and (not self.name.endswith("embedding_table")):
raise RuntimeError("Can not initialize parameter '{}' in server, only parameters of "
"sparse operator support initialization in server.".format(self.name))
@ -284,7 +292,8 @@ class Parameter(Tensor_):
raise ValueError("The length of the '{}' name should be less than {}.".
format(name_, PARAMETER_NAME_PREFIX_MAX_LEN))
else:
raise ValueError("The type of the parameter's name should be `str` or `None`.")
raise ValueError("The type of the Parameter's name should be 'string' or 'None', "
"but got {}.".format(type(name_)))
if _is_role_worker() and self.cache_enable:
if len(self.shape) != 2:
@ -579,9 +588,9 @@ class Parameter(Tensor_):
init_data_args = ()
if layout is not None:
if not isinstance(layout, tuple):
raise TypeError("The layout should be tuple, but got layout is {}.".format(layout))
raise TypeError("The argument 'layout' should be tuple, but got {}.".format(type(layout)))
if len(layout) < 6:
raise ValueError("The length of layout must be larger than 5, but got layout is {}.".format(layout))
raise ValueError("The length of 'layout' must be larger than 5, but got {}.".format(len(layout)))
slice_index = int(_get_slice_index(layout[0], layout[1]))
init_data_args += (slice_index, layout[2], layout[5])

View File

@ -143,7 +143,7 @@ def set_seed(seed):
>>> c2 = ops.uniform((1, 4), minval, maxval, seed=2) # still get C1
"""
if not isinstance(seed, int):
raise TypeError("The argument 'seed' must be type of int.")
raise TypeError("The argument 'seed' must be type of int, but got {}.".format(type(seed)))
Validator.check_non_negative_int(seed, "seed", "global_seed")
import mindspore.dataset as de
np.random.seed(seed)

View File

@ -251,7 +251,8 @@ class RunContext:
"""
def __init__(self, original_args):
if not isinstance(original_args, dict):
raise TypeError("The argument 'original_args' of RunContext should be dict type.")
raise TypeError("The argument 'original_args' of RunContext should be dict type, "
"but got {}.".format(type(original_args)))
self._original_args = original_args
self._stop_requested = False

View File

@ -156,7 +156,8 @@ class CheckpointConfig:
keep_checkpoint_per_n_minutes = Validator.check_non_negative_int(keep_checkpoint_per_n_minutes)
if saved_network is not None and not isinstance(saved_network, nn.Cell):
raise TypeError(f"The type of saved_network must be None or Cell, but got {str(type(saved_network))}.")
raise TypeError(f"For 'CheckpointConfig', the type of 'saved_network' must be None or Cell, "
f"but got {str(type(saved_network))}.")
if not save_checkpoint_steps and not save_checkpoint_seconds and \
not keep_checkpoint_max and not keep_checkpoint_per_n_minutes:
@ -249,7 +250,7 @@ class CheckpointConfig:
if append_info is None or append_info == []:
return None
if not isinstance(append_info, list):
raise TypeError(f"The type of 'append_info' must list, but got {str(type(append_info))}.")
raise TypeError(f"The type of 'append_info' must be list, but got {str(type(append_info))}.")
handle_append_info = {}
if "epoch_num" in append_info:
handle_append_info["epoch_num"] = 0
@ -258,18 +259,21 @@ class CheckpointConfig:
dict_num = 0
for element in append_info:
if not isinstance(element, str) and not isinstance(element, dict):
raise TypeError(f"The type of append_info element must be str or dict, but got {str(type(element))}.")
raise TypeError(f"The type of 'append_info' element must be str or dict, "
f"but got {str(type(element))}.")
if isinstance(element, str) and element not in _info_list:
raise TypeError(f"The type of append_info element must be in {_info_list}, but got {element}.")
raise ValueError(f"The value of element in the argument 'append_info' must be in {_info_list}, "
f"but got {element}.")
if isinstance(element, dict):
dict_num += 1
if dict_num > 1:
raise TypeError(f"The element of append_info must has only one dict.")
raise TypeError(f"The element of 'append_info' must has only one dict.")
for key, value in element.items():
if isinstance(key, str) and isinstance(value, (int, float, bool)):
handle_append_info[key] = value
else:
raise TypeError(f"The type of dict in append_info must be key: str, value: int or float.")
raise TypeError(f"The type of dict in 'append_info' must be key: string, value: int or float, "
f"but got key: {type(key)}, value: {type(value)}")
return handle_append_info
@ -304,9 +308,8 @@ class ModelCheckpoint(Callback):
self._last_triggered_step = 0
if not isinstance(prefix, str) or prefix.find('/') >= 0:
raise ValueError("'Prefix' {} for checkpoint file name is invalid, 'prefix' must be "
"str and does not contain '/', please check and correct it and then "
"continue".format(prefix))
raise ValueError("The argument 'prefix' for checkpoint file name is invalid, 'prefix' must be "
"string and does not contain '/', but got {}.".format(prefix))
self._prefix = prefix
if directory is not None:
@ -318,7 +321,8 @@ class ModelCheckpoint(Callback):
self._config = CheckpointConfig()
else:
if not isinstance(config, CheckpointConfig):
raise TypeError("The argument 'config' should be CheckpointConfig type.")
raise TypeError("The argument 'config' should be 'CheckpointConfig' type, "
"but got {}.".format(type(config)))
self._config = config
# get existing checkpoint files

View File

@ -39,7 +39,7 @@ class LossMonitor(Callback):
def __init__(self, per_print_times=1):
super(LossMonitor, self).__init__()
if not isinstance(per_print_times, int) or per_print_times < 0:
raise ValueError("'Per_print_times' must be int and >= 0, "
raise ValueError("The argument 'per_print_times' must be int and >= 0, "
"but got {}".format(per_print_times))
self._per_print_times = per_print_times

View File

@ -123,6 +123,8 @@ def connect_network_with_dataset(network, dataset_helper):
Cell, a new network wrapped with 'GetNext' in the case of running the task on Ascend in graph mode, otherwise
it is the input network.
Raises:
RuntimeError: If the API was not called in dataset sink mode.
Supported Platforms:
``Ascend`` ``GPU``
@ -338,8 +340,8 @@ class _DatasetIter:
if hasattr(dataset, '__loop_size__'):
loop_size = dataset.__loop_size__
if loop_size <= dataset.get_dataset_size() and dataset.get_dataset_size() % loop_size != 0:
raise ValueError(f"Dataset size {dataset.get_dataset_size()} and sink_size {loop_size} "
f"are not matched, sink_size should be divisible by dataset size.")
raise ValueError(f"Dataset size {dataset.get_dataset_size()} and 'sink_size' {loop_size} "
f"are not matched, dataset size should be divisible by 'sink_size'.")
sink_count = math.ceil(dataset.get_dataset_size() / loop_size)
return sink_count

View File

@ -158,15 +158,17 @@ class Model:
self._build_predict_network()
def _check_for_graph_cell(self, kwargs):
"""Check for graph cell"""
if not isinstance(self._network, nn.GraphCell):
return
if self._amp_level != "O0":
logger.warning("amp_level will not work when network is a GraphCell.")
if self._loss_fn is not None or self._optimizer is not None:
raise ValueError("Currently loss_fn and optimizer should be None when network is a GraphCell. ")
raise ValueError("For 'Model', 'loss_fn' and 'optimizer' should be None when network is a GraphCell, "
"but got 'loss_fn': {}, 'optimizer': {}.".format(self._loss_fn, self._optimizer))
if kwargs:
raise ValueError("Currently kwargs should be empty when network is a GraphCell. ")
raise ValueError("For 'Model', the '**kwargs' argument should be empty when network is a GraphCell.")
def _process_amp_args(self, kwargs):
if self._amp_level in ["O0", "O3"]:
@ -180,8 +182,8 @@ class Model:
def _check_amp_level_arg(self, optimizer, amp_level):
if optimizer is None and amp_level != "O0":
raise ValueError(
"Auto mixed precision will not work because optimizer arg is None.Please set amp_level='O0' "
"to disable auto mixed precision or set optimizer arg not None to use auto mixed precision.")
"Auto mixed precision will not work because 'optimizer' is None.Please set amp_level='O0' "
"to disable auto mixed precision or set 'optimizer' not be None to use auto mixed precision.")
def _check_kwargs(self, kwargs):
for arg in kwargs:
@ -215,7 +217,7 @@ class Model:
"""Build train network"""
network = self._network
if self._loss_scale_manager is not None and self._optimizer is None:
raise ValueError("Optimizer can not be None when set loss_scale_manager.")
raise ValueError("The argument 'optimizer' can not be None when set 'loss_scale_manager'.")
if self._optimizer:
if self._loss_scale_manager_set:
@ -254,8 +256,8 @@ class Model:
if eval_network is not None:
if eval_indexes is not None and not (isinstance(eval_indexes, list) and len(eval_indexes) == 3):
raise ValueError("Eval_indexes must be a list or None. If eval_indexes is a list, length of it \
must be three. But got {}".format(eval_indexes))
raise ValueError("The argument 'eval_indexes' must be a list or None. If 'eval_indexes' is a list, "
"length of it must be three. But got {}".format(len(eval_indexes)))
self._eval_network = eval_network
self._eval_indexes = eval_indexes
@ -299,11 +301,10 @@ class Model:
if isinstance(outputs, Tensor):
outputs = (outputs,)
if not isinstance(outputs, tuple):
raise ValueError(f"The argument `outputs` should be tuple, but got {type(outputs)}.")
raise ValueError(f"The argument 'outputs' should be tuple, but got {type(outputs)}.")
if self._eval_indexes is not None and len(outputs) < 3:
raise ValueError("The length of `outputs` must be greater than or equal to 3, \
but got {}".format(len(outputs)))
raise ValueError("The length of 'outputs' must be >= 3, but got {}".format(len(outputs)))
for metric in self._metric_fns.values():
if self._eval_indexes is None:
@ -399,7 +400,7 @@ class Model:
raise RuntimeError('Pre-init process only supports GRAPH MODE and Ascend target currently.')
if not train_dataset and not valid_dataset:
raise ValueError("'Train_dataset' and 'valid_dataset' can not both be None or empty.")
raise ValueError("The argument 'train_dataset' and 'valid_dataset' can not both be None or empty.")
_device_number_check(self._parallel_mode, self._device_number)
@ -707,7 +708,7 @@ class Model:
if sink_size == -1:
sink_size = dataset_size
if sink_size < -1 or sink_size == 0:
raise ValueError("The 'sink_size' must be -1 or positive, but got sink_size {}.".format(sink_size))
raise ValueError("The argument 'sink_size' must be -1 or positive, but got {}.".format(sink_size))
_device_number_check(self._parallel_mode, self._device_number)
@ -942,7 +943,8 @@ class Model:
raise RuntimeError("Pre-compile process that generate parameter layout for the train network "
"only supports GRAPH MODE and Ascend target currently.")
if _get_parallel_mode() not in (ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL):
raise RuntimeError('Infer train layout only supports semi auto parallel and auto parallel mode.')
raise RuntimeError("'infer_train_layout' only supports 'semi_auto_parallel' and 'auto_parallel' "
"mode, but got {}.".format(_get_parallel_mode()))
dataset_sink_mode = Validator.check_bool(dataset_sink_mode)
if not dataset_sink_mode:
raise ValueError("Only dataset sink mode is supported for now.")