!34064 unify format under dir python/mindspore/nn & ops

Merge pull request !34064 from 李林杰/format_py
This commit is contained in:
i-robot 2022-05-10 01:34:52 +00:00 committed by Gitee
commit 5b00105ce2
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
72 changed files with 331 additions and 331 deletions

View File

@ -662,7 +662,7 @@ class Cell(Cell_):
raise TypeError(f"For 'Cell', the {name} should not be Parameter.") raise TypeError(f"For 'Cell', the {name} should not be Parameter.")
del self.__dict__[name] del self.__dict__[name]
if cells and name in cells: if cells and name in cells:
raise TypeError(f"For 'Cell', the {name} should be Cell, but got Parameter.") raise TypeError(f"For 'Cell', the {name} must be Cell, but got Parameter.")
self.insert_param_to_cell(name, value) self.insert_param_to_cell(name, value)
def _set_attr_for_parameter_tuple(self, name, value): def _set_attr_for_parameter_tuple(self, name, value):
@ -723,7 +723,7 @@ class Cell(Cell_):
if name in self.__dict__: if name in self.__dict__:
del self.__dict__[name] del self.__dict__[name]
if params and name in params: if params and name in params:
raise TypeError(f"For 'Cell', the {name} should be Parameter, but got Cell.") raise TypeError(f"For 'Cell', the {name} must be Parameter, but got Cell.")
if self._auto_prefix: if self._auto_prefix:
value.update_parameters_name(name + '.') value.update_parameters_name(name + '.')
cells[name] = value cells[name] = value
@ -734,7 +734,7 @@ class Cell(Cell_):
if isinstance(value, Tensor) and self._params[name] is not None: if isinstance(value, Tensor) and self._params[name] is not None:
self._params[name].set_data(value) self._params[name].set_data(value)
elif value is not None: elif value is not None:
raise TypeError(f"For 'Cell', the type of {name} should be Parameter or ParameterTuple, " raise TypeError(f"For 'Cell', the type of {name} must be Parameter or ParameterTuple, "
f"but got {type(value).__name__}.") f"but got {type(value).__name__}.")
else: else:
self.insert_param_to_cell(name, None) self.insert_param_to_cell(name, None)
@ -763,7 +763,7 @@ class Cell(Cell_):
self._set_attr_for_params(name, value) self._set_attr_for_params(name, value)
elif cells and name in cells: elif cells and name in cells:
if value is not None: if value is not None:
raise TypeError(f"For 'Cell', the type of {name} should be cell, but got {type(value).__name__}.") raise TypeError(f"For 'Cell', the type of {name} must be cell, but got {type(value).__name__}.")
self._cells[name] = None self._cells[name] = None
elif isinstance(value, Tensor): elif isinstance(value, Tensor):
self._set_attr_for_tensor(name, value) self._set_attr_for_tensor(name, value)
@ -825,7 +825,7 @@ class Cell(Cell_):
new_tensor = _load_tensor_by_layout(tensor, layout) new_tensor = _load_tensor_by_layout(tensor, layout)
params[key].set_data(new_tensor, True) params[key].set_data(new_tensor, True)
else: else:
raise TypeError("For 'load_parameter_slice', the argument 'params' should be OrderedDict type, " raise TypeError("For 'load_parameter_slice', the argument 'params' must be OrderedDict type, "
"but got {}.".format(type(params))) "but got {}.".format(type(params)))
def _load_inputs(self, *inputs): def _load_inputs(self, *inputs):
@ -1031,7 +1031,7 @@ class Cell(Cell_):
raise KeyError("For 'insert_param_to_cell', the {} parameter already exists in the network. Cannot " raise KeyError("For 'insert_param_to_cell', the {} parameter already exists in the network. Cannot "
"insert another parameter with the same name.".format(param_name)) "insert another parameter with the same name.".format(param_name))
if not isinstance(param, Parameter) and param is not None: if not isinstance(param, Parameter) and param is not None:
raise TypeError(f"For 'insert_param_to_cell', the argument 'param' should be 'Parameter' if not None, " raise TypeError(f"For 'insert_param_to_cell', the argument 'param' must be 'Parameter' if not None, "
f"but got {type(param)}.") f"but got {type(param)}.")
if isinstance(param, Parameter) and param.name == PARAMETER_NAME_DEFAULT: if isinstance(param, Parameter) and param.name == PARAMETER_NAME_DEFAULT:
param.name = param_name param.name = param_name
@ -1079,7 +1079,7 @@ class Cell(Cell_):
raise KeyError("For 'insert_child_to_cell', the {} child cell already exists in the network. Cannot " raise KeyError("For 'insert_child_to_cell', the {} child cell already exists in the network. Cannot "
"insert another child cell with the same name.".format(child_name)) "insert another child cell with the same name.".format(child_name))
if not isinstance(child_cell, Cell) and child_cell is not None: if not isinstance(child_cell, Cell) and child_cell is not None:
raise TypeError(f"For 'insert_child_to_cell', the argument 'child_cell' should be 'Cell' if not None, " raise TypeError(f"For 'insert_child_to_cell', the argument 'child_cell' must be 'Cell' if not None, "
f"but got type {type(child_cell)}.") f"but got type {type(child_cell)}.")
self._cells[child_name] = child_cell self._cells[child_name] = child_cell
@ -1553,7 +1553,7 @@ class Cell(Cell_):
>>> net.to_float(mstype.float16) >>> net.to_float(mstype.float16)
""" """
if dst_type not in (mstype.float16, mstype.float32): if dst_type not in (mstype.float16, mstype.float32):
raise ValueError("For 'to_float', the argument 'dst_type' should be float32 or float16, " raise ValueError("For 'to_float', the argument 'dst_type' must be float32 or float16, "
"but got {}.".format(dst_type)) "but got {}.".format(dst_type))
if dst_type == mstype.float16: if dst_type == mstype.float16:
self._set_mixed_precision_type_recursive(MixedPrecisionType.FP16) self._set_mixed_precision_type_recursive(MixedPrecisionType.FP16)
@ -1584,7 +1584,7 @@ class Cell(Cell_):
ValueError: If boost_type is not in the algorithm library. ValueError: If boost_type is not in the algorithm library.
""" """
if boost_type not in ("less_bn",): if boost_type not in ("less_bn",):
raise ValueError("For 'set_boost', the argument 'boost_type' should be 'less_bn', " raise ValueError("For 'set_boost', the argument 'boost_type' must be 'less_bn', "
"but got {}.".format(boost_type)) "but got {}.".format(boost_type))
flags = {"less_bn": boost_type == "less_bn"} flags = {"less_bn": boost_type == "less_bn"}
self.add_flags_recursive(**flags) self.add_flags_recursive(**flags)
@ -1744,7 +1744,7 @@ class Cell(Cell_):
return HookHandle() return HookHandle()
if not isinstance(hook_fn, (FunctionType, MethodType)): if not isinstance(hook_fn, (FunctionType, MethodType)):
raise TypeError(f"When using 'register_forward_pre_hook(hook_fn)', the type of 'hook_fn' should be python " raise TypeError(f"When using 'register_forward_pre_hook(hook_fn)', the type of 'hook_fn' must be python "
f"function, but got {type(hook_fn)}.") f"function, but got {type(hook_fn)}.")
if hook_fn.__code__.co_name == "staging_specialize": if hook_fn.__code__.co_name == "staging_specialize":
raise TypeError(f"Decorating hook function {hook_fn.__name__} with '@ms_function' is not supported.") raise TypeError(f"Decorating hook function {hook_fn.__name__} with '@ms_function' is not supported.")
@ -1847,7 +1847,7 @@ class Cell(Cell_):
return HookHandle() return HookHandle()
if not isinstance(hook_fn, (FunctionType, MethodType)): if not isinstance(hook_fn, (FunctionType, MethodType)):
raise TypeError(f"When using 'register_forward_hook(hook_fn)', the type of 'hook_fn' should be python " raise TypeError(f"When using 'register_forward_hook(hook_fn)', the type of 'hook_fn' must be python "
f"function, but got {type(hook_fn)}.") f"function, but got {type(hook_fn)}.")
if hook_fn.__code__.co_name == "staging_specialize": if hook_fn.__code__.co_name == "staging_specialize":
raise TypeError(f"Decorating hook function {hook_fn.__name__} with '@ms_function' is not supported.") raise TypeError(f"Decorating hook function {hook_fn.__name__} with '@ms_function' is not supported.")
@ -1951,7 +1951,7 @@ class Cell(Cell_):
return HookHandle() return HookHandle()
if not isinstance(hook_fn, (FunctionType, MethodType)): if not isinstance(hook_fn, (FunctionType, MethodType)):
raise TypeError(f"When using 'register_backward_hook(hook_fn)', the type of 'hook_fn' should be python " raise TypeError(f"When using 'register_backward_hook(hook_fn)', the type of 'hook_fn' must be python "
f"function, but got {type(hook_fn)}.") f"function, but got {type(hook_fn)}.")
if self._cell_backward_hook is None: if self._cell_backward_hook is None:
self._enable_backward_hook = True self._enable_backward_hook = True
@ -2163,14 +2163,14 @@ class Cell(Cell_):
len_dynamic_shape_inputs = len(self._dynamic_shape_inputs) len_dynamic_shape_inputs = len(self._dynamic_shape_inputs)
if len_dynamic_shape_inputs != len_inputs: if len_dynamic_shape_inputs != len_inputs:
raise ValueError( raise ValueError(
f"For 'set_inputs', the Length of Tensor should be {len_inputs}, but got {len_dynamic_shape_inputs}." f"For 'set_inputs', the Length of Tensor must be {len_inputs}, but got {len_dynamic_shape_inputs}."
) )
for tensor_index in range(len_dynamic_shape_inputs): for tensor_index in range(len_dynamic_shape_inputs):
i_dynamic_shape_inputs = self._dynamic_shape_inputs[tensor_index] i_dynamic_shape_inputs = self._dynamic_shape_inputs[tensor_index]
i_inputs = inputs[tensor_index] i_inputs = inputs[tensor_index]
if i_dynamic_shape_inputs.dtype is not i_inputs.dtype: if i_dynamic_shape_inputs.dtype is not i_inputs.dtype:
raise TypeError( raise TypeError(
f"For 'set_inputs', the DataType of Tensor should be {i_inputs.dtype}, but got " f"For 'set_inputs', the DataType of Tensor must be {i_inputs.dtype}, but got "
f"{i_dynamic_shape_inputs.dtype}." f"{i_dynamic_shape_inputs.dtype}."
) )
set_inputs_shape = list(i_dynamic_shape_inputs.shape) set_inputs_shape = list(i_dynamic_shape_inputs.shape)

View File

@ -309,7 +309,7 @@ def cosine_decay_lr(min_lr, max_lr, total_step, step_per_epoch, decay_epoch):
validator.check_positive_int(step_per_epoch, 'step_per_epoch') validator.check_positive_int(step_per_epoch, 'step_per_epoch')
validator.check_positive_int(decay_epoch, 'decay_epoch') validator.check_positive_int(decay_epoch, 'decay_epoch')
if min_lr >= max_lr: if min_lr >= max_lr:
raise ValueError("For 'cosine_decay_lr', the 'max_lr' should be greater than the 'min_lr', " raise ValueError("For 'cosine_decay_lr', the 'max_lr' must be greater than the 'min_lr', "
"but got 'max_lr' value: {}, 'min_lr' value: {}.".format(max_lr, min_lr)) "but got 'max_lr' value: {}, 'min_lr' value: {}.".format(max_lr, min_lr))
delta = 0.5 * (max_lr - min_lr) delta = 0.5 * (max_lr - min_lr)
lr = [] lr = []

View File

@ -676,20 +676,20 @@ class PReLU(Cell):
w = Tensor(tmp, dtype=mstype.float32) w = Tensor(tmp, dtype=mstype.float32)
elif isinstance(w, list): elif isinstance(w, list):
if len(w) != channel: if len(w) != channel:
raise ValueError(f"For '{self.cls_name}', the length of 'w' should be equal to the 'channel' when " raise ValueError(f"For '{self.cls_name}', the length of 'w' must be equal to the 'channel' when "
f"the 'w' is a list, but got the length of 'w': {len(w)}, the 'channel': {channel}.") f"the 'w' is a list, but got the length of 'w': {len(w)}, the 'channel': {channel}.")
for i in w: for i in w:
if not isinstance(i, (float, np.float32)): if not isinstance(i, (float, np.float32)):
raise ValueError(f"For '{self.cls_name}', all elements in 'w' should be " raise ValueError(f"For '{self.cls_name}', all elements in 'w' must be "
f"float when the 'w' is a list, but got {i}.") f"float when the 'w' is a list, but got {i}.")
w = Tensor(w, dtype=mstype.float32) w = Tensor(w, dtype=mstype.float32)
elif isinstance(w, Tensor): elif isinstance(w, Tensor):
if w.dtype not in (mstype.float16, mstype.float32): if w.dtype not in (mstype.float16, mstype.float32):
raise ValueError(f"For '{self.cls_name}', the dtype of 'w' should be float16 or " raise ValueError(f"For '{self.cls_name}', the dtype of 'w' must be float16 or "
f"float32 when the 'w' is a tensor, but got {w.dtype}.") f"float32 when the 'w' is a tensor, but got {w.dtype}.")
if len(w.shape) != 1 or w.shape[0] != channel: if len(w.shape) != 1 or w.shape[0] != channel:
raise ValueError(f"For '{self.cls_name}', the dimension of 'w' should be 1, and the elements number " raise ValueError(f"For '{self.cls_name}', the dimension of 'w' must be 1, and the elements number "
f"should be equal to the 'channel' when the 'w' is a tensor, " f"should be equal to the 'channel' when the 'w' is a tensor, "
f"but got 'w' shape {w.shape}, the 'channel' {channel}.") f"but got 'w' shape {w.shape}, the 'channel' {channel}.")
else: else:
@ -986,5 +986,5 @@ def get_activation(name, prim_name=None):
return None return None
if name not in _activation: if name not in _activation:
raise KeyError(f"{msg_prefix} 'name' should be in {list(_activation.keys())}, but got {name}.") raise KeyError(f"{msg_prefix} 'name' must be in {list(_activation.keys())}, but got {name}.")
return _activation[name]() return _activation[name]()

View File

@ -83,7 +83,7 @@ class L1Regularizer(Cell):
super(L1Regularizer, self).__init__() super(L1Regularizer, self).__init__()
Validator.check_value_type("scale", scale, [int, float], self.cls_name) Validator.check_value_type("scale", scale, [int, float], self.cls_name)
if scale <= 0: if scale <= 0:
raise ValueError(f"For '{self.cls_name}', the 'scale' should be greater than 0, but got {scale}.") raise ValueError(f"For '{self.cls_name}', the 'scale' must be greater than 0, but got {scale}.")
if math.isinf(scale) or math.isnan(scale): if math.isinf(scale) or math.isnan(scale):
raise ValueError(f"For '{self.cls_name}', the 'scale' can not be INF or NAN, but got {scale}.") raise ValueError(f"For '{self.cls_name}', the 'scale' can not be INF or NAN, but got {scale}.")
self.abs = P.Abs() self.abs = P.Abs()
@ -152,7 +152,7 @@ class Dropout(Cell):
super(Dropout, self).__init__() super(Dropout, self).__init__()
Validator.check_value_type('keep_prob', keep_prob, [float], self.cls_name) Validator.check_value_type('keep_prob', keep_prob, [float], self.cls_name)
if keep_prob <= 0 or keep_prob > 1: if keep_prob <= 0 or keep_prob > 1:
raise ValueError(f"For '{self.cls_name}', the 'keep_prob' should be a number in range (0, 1], " raise ValueError(f"For '{self.cls_name}', the 'keep_prob' must be a number in range (0, 1], "
f"but got {keep_prob}.") f"but got {keep_prob}.")
Validator.check_subclass("dtype", dtype, mstype.number_type, self.cls_name) Validator.check_subclass("dtype", dtype, mstype.number_type, self.cls_name)
self.keep_prob = keep_prob self.keep_prob = keep_prob
@ -294,9 +294,9 @@ class Dense(Cell):
if isinstance(weight_init, Tensor): if isinstance(weight_init, Tensor):
if weight_init.ndim != 2 or weight_init.shape[0] != out_channels or \ if weight_init.ndim != 2 or weight_init.shape[0] != out_channels or \
weight_init.shape[1] != in_channels: weight_init.shape[1] != in_channels:
raise ValueError(f"For '{self.cls_name}', weight init shape error. The ndim of 'weight_init' should " raise ValueError(f"For '{self.cls_name}', weight init shape error. The ndim of 'weight_init' must "
f"be equal to 2, and the first dim should be equal to 'out_channels', and the " f"be equal to 2, and the first dim must be equal to 'out_channels', and the "
f"second dim should be equal to 'in_channels'. But got 'weight_init': {weight_init}, " f"second dim must be equal to 'in_channels'. But got 'weight_init': {weight_init}, "
f"'out_channels': {out_channels}, 'in_channels': {in_channels}.") f"'out_channels': {out_channels}, 'in_channels': {in_channels}.")
self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight") self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight")
@ -305,7 +305,7 @@ class Dense(Cell):
if isinstance(bias_init, Tensor): if isinstance(bias_init, Tensor):
if bias_init.ndim != 1 or bias_init.shape[0] != out_channels: if bias_init.ndim != 1 or bias_init.shape[0] != out_channels:
raise ValueError(f"For '{self.cls_name}', bias init shape error. The ndim of 'bias_init' should " raise ValueError(f"For '{self.cls_name}', bias init shape error. The ndim of 'bias_init' should "
f"be equal to 1, and the first dim should be equal to 'out_channels'. But got " f"be equal to 1, and the first dim must be equal to 'out_channels'. But got "
f"'bias_init': {bias_init}, 'out_channels': {out_channels}.") f"'bias_init': {bias_init}, 'out_channels': {out_channels}.")
self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias") self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias")
self.bias_add = P.BiasAdd() self.bias_add = P.BiasAdd()
@ -831,7 +831,7 @@ def bilinear(shape, size, scale, align_corners, prim_name=None):
"""Check input and calculate shape""" """Check input and calculate shape"""
msg_prefix = f"For '{prim_name}', the" if prim_name else "The" msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
if not isinstance(align_corners, bool): if not isinstance(align_corners, bool):
raise TypeError(f"{msg_prefix} type of 'align_corners' should be boolean, " raise TypeError(f"{msg_prefix} type of 'align_corners' must be boolean, "
f"but got {type(align_corners).__name__}.") f"but got {type(align_corners).__name__}.")
if size is None and scale is None: if size is None and scale is None:
raise ValueError(f"{msg_prefix} 'size' and 'scale' both none.") raise ValueError(f"{msg_prefix} 'size' and 'scale' both none.")
@ -984,10 +984,10 @@ class Unfold(Cell):
def _check_tuple_or_list(arg_name, arg_val, prim_name): def _check_tuple_or_list(arg_name, arg_val, prim_name):
Validator.check_value_type(f"{arg_name}s", ksizes, [tuple, list], self.cls_name) Validator.check_value_type(f"{arg_name}s", ksizes, [tuple, list], self.cls_name)
if len(arg_val) != 4 or arg_val[0] != 1 or arg_val[3] != 1: if len(arg_val) != 4 or arg_val[0] != 1 or arg_val[3] != 1:
raise ValueError(f"For '{prim_name}' the format of '{arg_name}s' should be [1, {arg_name}_row, " raise ValueError(f"For '{prim_name}' the format of '{arg_name}s' must be [1, {arg_name}_row, "
f"{arg_name}_col, 1], but got {arg_val}.") f"{arg_name}_col, 1], but got {arg_val}.")
if not isinstance(arg_val[1], int) or not isinstance(arg_val[2], int) or arg_val[1] < 1 or arg_val[2] < 1: if not isinstance(arg_val[1], int) or not isinstance(arg_val[2], int) or arg_val[1] < 1 or arg_val[2] < 1:
raise ValueError(f"For '{prim_name}' the {arg_name}_row and {arg_name}_col in '{arg_name}s' should be " raise ValueError(f"For '{prim_name}' the {arg_name}_row and {arg_name}_col in '{arg_name}s' must be "
f"an positive integer number, but got {arg_name}_row is {arg_val[1]}, " f"an positive integer number, but got {arg_name}_row is {arg_val[1]}, "
f"{arg_name}_col is {arg_val[2]}") f"{arg_name}_col is {arg_val[2]}")

View File

@ -24,9 +24,9 @@ def _valid_index(cell_num, index, op_name=None):
"""Internal function, used to detect the value and type of index.""" """Internal function, used to detect the value and type of index."""
msg_prefix = f"For '{op_name}', the" if op_name else "The" msg_prefix = f"For '{op_name}', the" if op_name else "The"
if not isinstance(index, int): if not isinstance(index, int):
raise TypeError(f"{msg_prefix} type of 'index' should be int, but got {type(index).__name__}.") raise TypeError(f"{msg_prefix} type of 'index' must be int, but got {type(index).__name__}.")
if not -cell_num <= index < cell_num: if not -cell_num <= index < cell_num:
raise IndexError(f"{msg_prefix} value of 'index' should be a number in range [{-cell_num}, {cell_num}), " raise IndexError(f"{msg_prefix} value of 'index' must be a number in range [{-cell_num}, {cell_num}), "
f"but got {index}.") f"but got {index}.")
return index % cell_num return index % cell_num
@ -36,7 +36,7 @@ def _valid_cell(cell, op_name=None):
if issubclass(cell.__class__, Cell): if issubclass(cell.__class__, Cell):
return True return True
msg_prefix = f"For '{op_name}'," if op_name else "" msg_prefix = f"For '{op_name}'," if op_name else ""
raise TypeError(f'{msg_prefix} each cell should be subclass of Cell, but got {type(cell).__name__}.') raise TypeError(f'{msg_prefix} each cell must be subclass of Cell, but got {type(cell).__name__}.')
def _get_prefix_and_index(cells): def _get_prefix_and_index(cells):
@ -215,7 +215,7 @@ class SequentialCell(Cell):
del self._cells[key] del self._cells[key]
del self._is_dynamic_name[index] del self._is_dynamic_name[index]
else: else:
raise TypeError(f"For '{cls_name}', the type of index should be int type or slice type, " raise TypeError(f"For '{cls_name}', the type of index must be int type or slice type, "
f"but got {type(index).__name__}") f"but got {type(index).__name__}")
prefix, key_index = _get_prefix_and_index(self._cells) prefix, key_index = _get_prefix_and_index(self._cells)
temp_dict = OrderedDict() temp_dict = OrderedDict()
@ -316,13 +316,13 @@ class CellList(_CellListBase, Cell):
if isinstance(index, int): if isinstance(index, int):
index = _valid_index(len(self), index, cls_name) index = _valid_index(len(self), index, cls_name)
return self._cells[str(index)] return self._cells[str(index)]
raise TypeError(f"For '{cls_name}', the type of 'index' should be int or slice, " raise TypeError(f"For '{cls_name}', the type of 'index' must be int or slice, "
f"but got {type(index).__name__}.") f"but got {type(index).__name__}.")
def __setitem__(self, index, cell): def __setitem__(self, index, cell):
cls_name = self.__class__.__name__ cls_name = self.__class__.__name__
if not isinstance(index, int) and _valid_cell(cell, cls_name): if not isinstance(index, int) and _valid_cell(cell, cls_name):
raise TypeError(f"For '{cls_name}', the type of 'index' should be int, " raise TypeError(f"For '{cls_name}', the type of 'index' must be int, "
f"but got {type(index).__name__}.") f"but got {type(index).__name__}.")
index = _valid_index(len(self), index, cls_name) index = _valid_index(len(self), index, cls_name)
if self._auto_prefix: if self._auto_prefix:
@ -340,7 +340,7 @@ class CellList(_CellListBase, Cell):
for key in keys: for key in keys:
del self._cells[key] del self._cells[key]
else: else:
raise TypeError(f"For '{cls_name}', the type of 'index' should be int or slice, " raise TypeError(f"For '{cls_name}', the type of 'index' must be int or slice, "
f"but got {type(index).__name__}.") f"but got {type(index).__name__}.")
# adjust orderedDict # adjust orderedDict
prefix, key_index = _get_prefix_and_index(self._cells) prefix, key_index = _get_prefix_and_index(self._cells)

View File

@ -310,7 +310,7 @@ class Conv2d(_Conv):
@constexpr @constexpr
def _check_input_3d(input_shape, op_name): def _check_input_3d(input_shape, op_name):
if len(input_shape) != 3: if len(input_shape) != 3:
raise ValueError(f"For '{op_name}', the dimension of input should be 3d, but got {len(input_shape)}.") raise ValueError(f"For '{op_name}', the dimension of input must be 3d, but got {len(input_shape)}.")
class Conv1d(_Conv): class Conv1d(_Conv):
@ -506,7 +506,7 @@ class Conv1d(_Conv):
@constexpr @constexpr
def _check_input_5dims(input_shape, op_name): def _check_input_5dims(input_shape, op_name):
if len(input_shape) != 5: if len(input_shape) != 5:
raise ValueError(f"For '{op_name}', the dimension of input should be 5d, but got {len(input_shape)}.") raise ValueError(f"For '{op_name}', the dimension of input must be 5d, but got {len(input_shape)}.")
class Conv3d(_Conv): class Conv3d(_Conv):

View File

@ -39,7 +39,7 @@ __all__ = ['Embedding', 'EmbeddingLookup', 'MultiFieldEmbeddingLookup']
@constexpr @constexpr
def _check_input_2d(input_shape, param_name, func_name): def _check_input_2d(input_shape, param_name, func_name):
if len(input_shape) != 2: if len(input_shape) != 2:
raise ValueError(f"For '{func_name}', the dimension of '{param_name}' should be 2d, but got {len(input_shape)}") raise ValueError(f"For '{func_name}', the dimension of '{param_name}' must be 2d, but got {len(input_shape)}")
return True return True
@ -331,8 +331,8 @@ class EmbeddingLookup(Cell):
full_batch = _get_full_batch() full_batch = _get_full_batch()
if rank_size > 1 and not (full_batch and slice_mode == "table_row_slice"): if rank_size > 1 and not (full_batch and slice_mode == "table_row_slice"):
raise ValueError(f"For '{self.cls_name}', the cache of parameter server parallel should only be " raise ValueError(f"For '{self.cls_name}', the cache of parameter server parallel should only be "
f"used in \"full_batch\" and the value of \"full_batch\" should be True. " f"used in \"full_batch\" and the value of \"full_batch\" must be True. "
f"Meanwhile, the value of 'slice_mode' should be \"table_row_slice\"." f"Meanwhile, the value of 'slice_mode' must be \"table_row_slice\"."
f"But got full_batch: {full_batch} and 'slice_mode': \"{slice_mode}\".") f"But got full_batch: {full_batch} and 'slice_mode': \"{slice_mode}\".")
self.vocab_cache_size = self.vocab_cache_size * rank_size self.vocab_cache_size = self.vocab_cache_size * rank_size
_set_rank_id(rank_id) _set_rank_id(rank_id)
@ -576,7 +576,7 @@ class MultiFieldEmbeddingLookup(EmbeddingLookup):
self.inf_add.shard(((1, 1, get_group_size()), (1, 1, 1))) self.inf_add.shard(((1, 1, get_group_size()), (1, 1, 1)))
else: else:
if is_auto_parallel: if is_auto_parallel:
raise ValueError("For '{}', the 'slice_mode' should be in ['table_row_slice', 'batch_slice' and \ raise ValueError("For '{}', the 'slice_mode' must be in ['table_row_slice', 'batch_slice' and \
'table_column_slice'], but got {}".format(self.cls_name, str(slice_mode))) 'table_column_slice'], but got {}".format(self.cls_name, str(slice_mode)))
# Min value for fp32 # Min value for fp32

View File

@ -117,7 +117,7 @@ def _get_dtype_max(dtype):
@constexpr @constexpr
def _check_input_4d(input_shape, param_name, func_name): def _check_input_4d(input_shape, param_name, func_name):
if len(input_shape) != 4: if len(input_shape) != 4:
raise ValueError(f"For '{func_name}', the dimension of '{param_name}' should be 4d, " raise ValueError(f"For '{func_name}', the dimension of '{param_name}' must be 4d, "
f"but got {len(input_shape)}.") f"but got {len(input_shape)}.")
return True return True
@ -476,7 +476,7 @@ class PSNR(Cell):
@constexpr @constexpr
def _raise_dims_rank_error(input_shape, param_name, func_name): def _raise_dims_rank_error(input_shape, param_name, func_name):
"""raise error if input is not 3d or 4d""" """raise error if input is not 3d or 4d"""
raise ValueError(f"{func_name} {param_name} should be 3d or 4d, but got shape {input_shape}") raise ValueError(f"{func_name} {param_name} must be 3d or 4d, but got shape {input_shape}")
@constexpr @constexpr

View File

@ -754,7 +754,7 @@ def get_broadcast_matmul_shape(x_shape, y_shape, prim_name=None):
"""get broadcast_matmul shape""" """get broadcast_matmul shape"""
msg_prefix = f"For '{prim_name}', the" if prim_name else "The" msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
if (len(x_shape) < 2) or (len(y_shape) < 2): if (len(x_shape) < 2) or (len(y_shape) < 2):
raise ValueError(f"{msg_prefix} length of 'x_shape' and 'y_shape' should be equal to or greater than 2, " raise ValueError(f"{msg_prefix} length of 'x_shape' and 'y_shape' must be equal to or greater than 2, "
f"but got the length of 'x_shape': {len(x_shape)} and the length of 'y_shape': " f"but got the length of 'x_shape': {len(x_shape)} and the length of 'y_shape': "
f"{len(y_shape)}.") f"{len(y_shape)}.")
x_shape_batch = x_shape[:-2] x_shape_batch = x_shape[:-2]
@ -773,8 +773,8 @@ def get_broadcast_matmul_shape(x_shape, y_shape, prim_name=None):
elif x_shape[i] == y_shape[i]: elif x_shape[i] == y_shape[i]:
broadcast_shape_back.append(x_shape[i]) broadcast_shape_back.append(x_shape[i])
else: else:
raise ValueError(f"{msg_prefix} 'x_shape[{i}]' should be equal to 1, or the 'y_shape[{i}]' should be equal " raise ValueError(f"{msg_prefix} 'x_shape[{i}]' must be equal to 1, or the 'y_shape[{i}]' must be equal "
f"to 1, or the 'x_shape[{i}]' should be equal to 'y_shape[{i}]', but got " f"to 1, or the 'x_shape[{i}]' must be equal to 'y_shape[{i}]', but got "
f"'x_shape[{i}]': {x_shape[i]}, 'y_shape[{i}]': {y_shape[i]}.") f"'x_shape[{i}]': {x_shape[i]}, 'y_shape[{i}]': {y_shape[i]}.")
broadcast_shape_front = y_shape[0: y_len - length] if length == x_len else x_shape[0: x_len - length] broadcast_shape_front = y_shape[0: y_len - length] if length == x_len else x_shape[0: x_len - length]
@ -798,7 +798,7 @@ def check_col_row_equal(x1_shape, x2_shape, transpose_x1, transpose_x2, prim_nam
x1_col = x1_last[not transpose_x1] # x1_col = x1_last[1] if (not transpose_a) else x1_last[0] x1_col = x1_last[not transpose_x1] # x1_col = x1_last[1] if (not transpose_a) else x1_last[0]
x2_row = x2_last[transpose_x2] # x2_row = x2_last[0] if (not transpose_b) else x2_last[1] x2_row = x2_last[transpose_x2] # x2_row = x2_last[0] if (not transpose_b) else x2_last[1]
if x1_col != x2_row: if x1_col != x2_row:
raise ValueError(f"{msg_prefix} column of matrix dimensions of 'x1' should be equal to " raise ValueError(f"{msg_prefix} column of matrix dimensions of 'x1' must be equal to "
f"the row of matrix dimensions of 'x2', but got 'x1_col' {x1_col} and 'x2_row' {x2_row}.") f"the row of matrix dimensions of 'x2', but got 'x1_col' {x1_col} and 'x2_row' {x2_row}.")

View File

@ -65,7 +65,7 @@ class _BatchNorm(Cell):
raise ValueError(f"For '{self.cls_name}', the 'num_features' must be at least 1, but got {num_features}.") raise ValueError(f"For '{self.cls_name}', the 'num_features' must be at least 1, but got {num_features}.")
if momentum < 0 or momentum > 1: if momentum < 0 or momentum > 1:
raise ValueError(f"For '{self.cls_name}', the 'momentum' should be a number in range [0, 1], " raise ValueError(f"For '{self.cls_name}', the 'momentum' must be a number in range [0, 1], "
f"but got {momentum}.") f"but got {momentum}.")
self.input_dims = input_dims self.input_dims = input_dims
self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.cls_name) self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.cls_name)
@ -74,7 +74,7 @@ class _BatchNorm(Cell):
f"target {context.get_context('device_target')}.") f"target {context.get_context('device_target')}.")
self.use_batch_statistics = use_batch_statistics self.use_batch_statistics = use_batch_statistics
if self.use_batch_statistics is not None and not isinstance(self.use_batch_statistics, bool): if self.use_batch_statistics is not None and not isinstance(self.use_batch_statistics, bool):
raise ValueError(f"For '{self.cls_name}', the 'use_batch_statistics' should be a boolean value or None," raise ValueError(f"For '{self.cls_name}', the 'use_batch_statistics' must be a boolean value or None,"
f" but got {use_batch_statistics}.") f" but got {use_batch_statistics}.")
self.num_features = num_features self.num_features = num_features
self.eps = eps self.eps = eps
@ -169,7 +169,7 @@ class _BatchNorm(Cell):
f"local rank size, but got 'device_num_each_group': {group_size}, " f"local rank size, but got 'device_num_each_group': {group_size}, "
f"local rank size: {get_group_size()}.") f"local rank size: {get_group_size()}.")
if len(world_rank) % group_size != 0: if len(world_rank) % group_size != 0:
raise ValueError(f"For '{self.cls_name}', the dimension of device_list should be divisible by " raise ValueError(f"For '{self.cls_name}', the dimension of device_list must be divisible by "
f"'device_num_each_group', but got the length of device_list: {len(world_rank)}, " f"'device_num_each_group', but got the length of device_list: {len(world_rank)}, "
f"'device_num_each_group': {group_size}.") f"'device_num_each_group': {group_size}.")
world_rank_list = zip(*(iter(world_rank),) * group_size) world_rank_list = zip(*(iter(world_rank),) * group_size)
@ -181,7 +181,7 @@ class _BatchNorm(Cell):
for rid in itertools.chain(*process_groups): for rid in itertools.chain(*process_groups):
validator.check_int_range(rid, 0, rank_size, Rel.INC_LEFT, "rank id in process_groups", self.cls_name) validator.check_int_range(rid, 0, rank_size, Rel.INC_LEFT, "rank id in process_groups", self.cls_name)
if rid in seen: if rid in seen:
raise ValueError(f"For '{self.cls_name}', rank id in 'process_groups' should not be duplicated, " raise ValueError(f"For '{self.cls_name}', rank id in 'process_groups' must not be duplicated, "
f"but got {process_groups}.") f"but got {process_groups}.")
seen.add(rid) seen.add(rid)
@ -243,7 +243,7 @@ class _BatchNorm(Cell):
def _channel_check(channel, num_channel, prim_name=None): def _channel_check(channel, num_channel, prim_name=None):
msg_prefix = f"For '{prim_name}', the" if prim_name else "The" msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
if channel != num_channel: if channel != num_channel:
raise ValueError(f"{msg_prefix} channel(the second dim of the input 'x') should be equal to num_channels, " raise ValueError(f"{msg_prefix} channel(the second dim of the input 'x') must be equal to num_channels, "
f"but got channel: {channel}, num_channels: {num_channel}.") f"but got channel: {channel}, num_channels: {num_channel}.")
@ -828,7 +828,7 @@ class LayerNorm(Cell):
"""Initialize LayerNorm.""" """Initialize LayerNorm."""
super(LayerNorm, self).__init__() super(LayerNorm, self).__init__()
if not isinstance(normalized_shape, (tuple, list)): if not isinstance(normalized_shape, (tuple, list)):
raise TypeError(f"For '{self.cls_name}', the type of 'normalized_shape' should be tuple[int] or list[int], " raise TypeError(f"For '{self.cls_name}', the type of 'normalized_shape' must be tuple[int] or list[int], "
f"but got {normalized_shape} and the type is {type(normalized_shape)}.") f"but got {normalized_shape} and the type is {type(normalized_shape)}.")
self.normalized_shape = normalized_shape self.normalized_shape = normalized_shape
self.begin_norm_axis = begin_norm_axis self.begin_norm_axis = begin_norm_axis
@ -941,7 +941,7 @@ class InstanceNorm2d(Cell):
raise ValueError(f"For '{self.cls_name}', the 'num_features' must be at least 1, but got {num_features}.") raise ValueError(f"For '{self.cls_name}', the 'num_features' must be at least 1, but got {num_features}.")
if momentum < 0 or momentum > 1: if momentum < 0 or momentum > 1:
raise ValueError(f"For '{self.cls_name}', the 'momentum' should be a number in range [0, 1], " raise ValueError(f"For '{self.cls_name}', the 'momentum' must be a number in range [0, 1], "
f"but got {momentum}.") f"but got {momentum}.")
self.num_features = num_features self.num_features = num_features
self.eps = eps self.eps = eps
@ -976,10 +976,10 @@ class InstanceNorm2d(Cell):
for key, _ in args_dict.items(): for key, _ in args_dict.items():
val = args_dict[key] val = args_dict[key]
if not isinstance(val, (Tensor, numbers.Number, str, Initializer)): if not isinstance(val, (Tensor, numbers.Number, str, Initializer)):
raise TypeError(f"For '{self.cls_name}', the type of '{key}' should be in " raise TypeError(f"For '{self.cls_name}', the type of '{key}' must be in "
f"[Tensor, numbers.Number, str, Initializer], but got type {type(val).__name__}.") f"[Tensor, numbers.Number, str, Initializer], but got type {type(val).__name__}.")
if isinstance(val, Tensor) and val.dtype != mstype.float32: if isinstance(val, Tensor) and val.dtype != mstype.float32:
raise TypeError(f"For '{self.cls_name}', the type of '{key}' should be float32, " raise TypeError(f"For '{self.cls_name}', the type of '{key}' must be float32, "
f"but got {val.dtype}.") f"but got {val.dtype}.")
@ -1045,7 +1045,7 @@ class GroupNorm(Cell):
self.num_groups = validator.check_positive_int(num_groups, "num_groups", self.cls_name) self.num_groups = validator.check_positive_int(num_groups, "num_groups", self.cls_name)
self.num_channels = validator.check_positive_int(num_channels, "num_channels", self.cls_name) self.num_channels = validator.check_positive_int(num_channels, "num_channels", self.cls_name)
if num_channels % num_groups != 0: if num_channels % num_groups != 0:
raise ValueError(f"For '{self.cls_name}', the 'num_channels' should be divided by 'num_groups', " raise ValueError(f"For '{self.cls_name}', the 'num_channels' must be divided by 'num_groups', "
f"but got 'num_channels': {num_channels}, 'num_groups': {num_groups}.") f"but got 'num_channels': {num_channels}, 'num_groups': {num_groups}.")
self.eps = validator.check_value_type('eps', eps, (float,), type(self).__name__) self.eps = validator.check_value_type('eps', eps, (float,), type(self).__name__)
self.affine = validator.check_bool(affine, arg_name="affine", prim_name=self.cls_name) self.affine = validator.check_bool(affine, arg_name="affine", prim_name=self.cls_name)

View File

@ -38,7 +38,7 @@ class _PoolNd(Cell):
def _check_int_or_tuple(arg_name, arg_value): def _check_int_or_tuple(arg_name, arg_value):
validator.check_value_type(arg_name, arg_value, [int, tuple], self.cls_name) validator.check_value_type(arg_name, arg_value, [int, tuple], self.cls_name)
error_msg = f"For '{self.cls_name}', the '{arg_name}' should be an positive int number or " \ error_msg = f"For '{self.cls_name}', the '{arg_name}' must be an positive int number or " \
f"a tuple of two positive int numbers, but got {arg_value}" f"a tuple of two positive int numbers, but got {arg_value}"
if isinstance(arg_value, int): if isinstance(arg_value, int):
if arg_value <= 0: if arg_value <= 0:

View File

@ -408,7 +408,7 @@ class FakeQuantWithMinMaxObserver(UniformQuantObserver):
min_array = self._get_init_array(self.min_init) min_array = self._get_init_array(self.min_init)
max_array = self._get_init_array(self.max_init) max_array = self._get_init_array(self.max_init)
if not np.greater(max_array, min_array).all(): if not np.greater(max_array, min_array).all():
raise ValueError(f"For '{self.cls_name}', the 'max_init' should be greater than 'min_init', " raise ValueError(f"For '{self.cls_name}', the 'max_init' must be greater than 'min_init', "
f"but got 'max_init': {max_init}, 'min_init': {min_init}.") f"but got 'max_init': {max_init}, 'min_init': {min_init}.")
if self.mode == "DEFAULT": if self.mode == "DEFAULT":
self._default_init(min_array, max_array) self._default_init(min_array, max_array)
@ -440,7 +440,7 @@ class FakeQuantWithMinMaxObserver(UniformQuantObserver):
min_array = self._get_init_array(self.min_init) min_array = self._get_init_array(self.min_init)
max_array = self._get_init_array(self.max_init) max_array = self._get_init_array(self.max_init)
if not np.greater(max_array, min_array).all(): if not np.greater(max_array, min_array).all():
raise ValueError(f"For '{self.cls_name}', the 'max_init' should be greater than 'min_init', " raise ValueError(f"For '{self.cls_name}', the 'max_init' must be greater than 'min_init', "
f"but got 'max_init': {max_init}, 'min_init': {min_init}.") f"but got 'max_init': {max_init}, 'min_init': {min_init}.")
self.minq.set_data(Tensor(min_array)) self.minq.set_data(Tensor(min_array))
@ -525,11 +525,11 @@ class FakeQuantWithMinMaxObserver(UniformQuantObserver):
Convert the initial value to array. Convert the initial value to array.
""" """
if isinstance(init_date, list) and self.per_channel and len(init_date) != self.num_channels: if isinstance(init_date, list) and self.per_channel and len(init_date) != self.num_channels:
raise ValueError(f"For '{self.cls_name}', the length of 'min_init/max_init' list should be equal to " raise ValueError(f"For '{self.cls_name}', the length of 'min_init/max_init' list must be equal to "
f"'num_channels' for perchannel quant scenario, but got 'min_init/max_init': {init_date} " f"'num_channels' for perchannel quant scenario, but got 'min_init/max_init': {init_date} "
f"and num_channels: {self.num_channels}.") f"and num_channels: {self.num_channels}.")
if isinstance(init_date, list) and not self.per_channel and len(init_date) != 1: if isinstance(init_date, list) and not self.per_channel and len(init_date) != 1:
raise ValueError(f"For '{self.cls_name}', the length of the 'min_init/max_init' list should be 1 for " raise ValueError(f"For '{self.cls_name}', the length of the 'min_init/max_init' list must be 1 for "
f"perlayer quant scenario, but got {len(init_date)}.") f"perlayer quant scenario, but got {len(init_date)}.")
if isinstance(init_date, list): if isinstance(init_date, list):
@ -702,7 +702,7 @@ class Conv2dBnFoldQuantOneConv(Cell):
for dilation_elem in self.dilation: for dilation_elem in self.dilation:
Validator.check_positive_int(dilation_elem, 'dilation item', self.cls_name) Validator.check_positive_int(dilation_elem, 'dilation item', self.cls_name)
if pad_mode not in ('valid', 'same', 'pad'): if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError(f"For '{self.cls_name}', the 'pad_mode' should be one of values " raise ValueError(f"For '{self.cls_name}', the 'pad_mode' must be one of values "
f"in ('valid', 'same', 'pad'), but got {pad_mode}.") f"in ('valid', 'same', 'pad'), but got {pad_mode}.")
self.pad_mode = pad_mode self.pad_mode = pad_mode
if isinstance(padding, int): if isinstance(padding, int):
@ -942,7 +942,7 @@ class Conv2dBnFoldQuant(Cell):
for dilation_elem in self.dilation: for dilation_elem in self.dilation:
Validator.check_positive_int(dilation_elem, 'dilation item', self.cls_name) Validator.check_positive_int(dilation_elem, 'dilation item', self.cls_name)
if pad_mode not in ('valid', 'same', 'pad'): if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError(f"For '{self.cls_name}', the 'pad_mode' should be one of values in " raise ValueError(f"For '{self.cls_name}', the 'pad_mode' must be one of values in "
f"('valid', 'same', 'pad'), but got {pad_mode}.") f"('valid', 'same', 'pad'), but got {pad_mode}.")
self.pad_mode = pad_mode self.pad_mode = pad_mode
if isinstance(padding, int): if isinstance(padding, int):
@ -1154,7 +1154,7 @@ class Conv2dBnWithoutFoldQuant(Cell):
for dilation_elem in self.dilation: for dilation_elem in self.dilation:
Validator.check_positive_int(dilation_elem, 'dilation item', self.cls_name) Validator.check_positive_int(dilation_elem, 'dilation item', self.cls_name)
if pad_mode not in ('valid', 'same', 'pad'): if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError(f"For '{self.cls_name}', the 'pad_mode' should be one of values in " raise ValueError(f"For '{self.cls_name}', the 'pad_mode' must be one of values in "
f"('valid', 'same', 'pad'), but got {pad_mode}.") f"('valid', 'same', 'pad'), but got {pad_mode}.")
self.pad_mode = pad_mode self.pad_mode = pad_mode
if isinstance(padding, int): if isinstance(padding, int):
@ -1298,7 +1298,7 @@ class Conv2dQuant(Cell):
for dilation_elem in self.dilation: for dilation_elem in self.dilation:
Validator.check_positive_int(dilation_elem, 'dilation item', self.cls_name) Validator.check_positive_int(dilation_elem, 'dilation item', self.cls_name)
if pad_mode not in ('valid', 'same', 'pad'): if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError(f"For '{self.cls_name}', the 'pad_mode' should be one of values " raise ValueError(f"For '{self.cls_name}', the 'pad_mode' must be one of values "
f"in ('valid', 'same', 'pad'), but got {pad_mode}.") f"in ('valid', 'same', 'pad'), but got {pad_mode}.")
self.pad_mode = pad_mode self.pad_mode = pad_mode
if isinstance(padding, int): if isinstance(padding, int):
@ -1429,8 +1429,8 @@ class DenseQuant(Cell):
if weight_init.ndim != 2 or weight_init.shape[0] != out_channels or \ if weight_init.ndim != 2 or weight_init.shape[0] != out_channels or \
weight_init.shape[1] != in_channels: weight_init.shape[1] != in_channels:
raise ValueError(f"For '{self.cls_name}', weight init shape error. The ndim of 'weight_init' should " raise ValueError(f"For '{self.cls_name}', weight init shape error. The ndim of 'weight_init' should "
f"be equal to 2, and the first dim should be equal to 'out_channels', and the " f"be equal to 2, and the first dim must be equal to 'out_channels', and the "
f"second dim should be equal to 'in_channels'. But got 'weight_init': {weight_init}, " f"second dim must be equal to 'in_channels'. But got 'weight_init': {weight_init}, "
f"'out_channels': {out_channels}, 'in_channels': {in_channels}.") f"'out_channels': {out_channels}, 'in_channels': {in_channels}.")
self.weight = Parameter(initializer( self.weight = Parameter(initializer(
@ -1440,7 +1440,7 @@ class DenseQuant(Cell):
if isinstance(bias_init, Tensor): if isinstance(bias_init, Tensor):
if bias_init.ndim != 1 or bias_init.shape[0] != out_channels: if bias_init.ndim != 1 or bias_init.shape[0] != out_channels:
raise ValueError(f"For '{self.cls_name}', bias init shape error. The ndim of 'bias_init' should " raise ValueError(f"For '{self.cls_name}', bias init shape error. The ndim of 'bias_init' should "
f"be equal to 1, and the first dim should be equal to 'out_channels'. But got " f"be equal to 1, and the first dim must be equal to 'out_channels'. But got "
f"'bias_init': {bias_init}, 'out_channels': {out_channels}.") f"'bias_init': {bias_init}, 'out_channels': {out_channels}.")
self.bias = Parameter(initializer( self.bias = Parameter(initializer(

View File

@ -37,7 +37,7 @@ def _check_input_dtype(input_dtype, param_name, allow_dtypes, cls_name):
def _check_is_tensor(param_name, input_data, cls_name): def _check_is_tensor(param_name, input_data, cls_name):
"""Internal function, used to check whether the input data is Tensor.""" """Internal function, used to check whether the input data is Tensor."""
if input_data is not None and not isinstance(P.typeof(input_data), mstype.tensor_type): if input_data is not None and not isinstance(P.typeof(input_data), mstype.tensor_type):
raise TypeError(f"For '{cls_name}', the '{param_name}' should be '{mstype.tensor_type}', " raise TypeError(f"For '{cls_name}', the '{param_name}' must be '{mstype.tensor_type}', "
f"but got '{P.typeof(input_data)}'") f"but got '{P.typeof(input_data)}'")
@ -45,7 +45,7 @@ def _check_is_tensor(param_name, input_data, cls_name):
def _check_is_tuple(param_name, input_data, cls_name): def _check_is_tuple(param_name, input_data, cls_name):
"""Internal function, used to check whether the input data is Tensor.""" """Internal function, used to check whether the input data is Tensor."""
if input_data is not None and not isinstance(P.typeof(input_data), mstype.Tuple): if input_data is not None and not isinstance(P.typeof(input_data), mstype.Tuple):
raise TypeError(f"For '{cls_name}', the '{param_name}' should be '{mstype.Tuple}', " raise TypeError(f"For '{cls_name}', the '{param_name}' must be '{mstype.Tuple}', "
f"but got '{P.typeof(input_data)}'") f"but got '{P.typeof(input_data)}'")
@ -53,14 +53,14 @@ def _check_is_tuple(param_name, input_data, cls_name):
def _check_tuple_length(param_name, input_data, length, cls_name): def _check_tuple_length(param_name, input_data, length, cls_name):
"""Internal function, used to check whether the input data is Tensor.""" """Internal function, used to check whether the input data is Tensor."""
if input_data is not None and len(input_data) != length: if input_data is not None and len(input_data) != length:
raise TypeError(f"For '{cls_name}', the length of '{param_name}' should be '{length}', " raise TypeError(f"For '{cls_name}', the length of '{param_name}' must be '{length}', "
f"but got '{len(input_data)}'") f"but got '{len(input_data)}'")
@constexpr @constexpr
def _check_batch_size_equal(batch_size_x, batch_size_hx, cls_name): def _check_batch_size_equal(batch_size_x, batch_size_hx, cls_name):
if batch_size_x != batch_size_hx: if batch_size_x != batch_size_hx:
raise ValueError(f"For '{cls_name}' batch size of x and hx should be equal, but got {batch_size_x} of x " raise ValueError(f"For '{cls_name}' batch size of x and hx must be equal, but got {batch_size_x} of x "
f"and {batch_size_hx} of hx.") f"and {batch_size_hx} of hx.")

View File

@ -61,7 +61,7 @@ def _check_input_dtype_same_and_valid(args_name, args_value, valid_values, cls_n
def _check_is_tensor(param_name, input_data, cls_name): def _check_is_tensor(param_name, input_data, cls_name):
"""Internal function, used to check whether the input data is Tensor.""" """Internal function, used to check whether the input data is Tensor."""
if input_data is not None and not isinstance(P.typeof(input_data), mstype.tensor_type): if input_data is not None and not isinstance(P.typeof(input_data), mstype.tensor_type):
raise TypeError(f"For '{cls_name}', the '{param_name}' should be '{mstype.tensor_type}', " raise TypeError(f"For '{cls_name}', the '{param_name}' must be '{mstype.tensor_type}', "
f"but got '{P.typeof(input_data)}'") f"but got '{P.typeof(input_data)}'")
@ -69,7 +69,7 @@ def _check_is_tensor(param_name, input_data, cls_name):
def _check_is_tuple(param_name, input_data, cls_name): def _check_is_tuple(param_name, input_data, cls_name):
"""Internal function, used to check whether the input data is Tensor.""" """Internal function, used to check whether the input data is Tensor."""
if input_data is not None and not isinstance(P.typeof(input_data), mstype.Tuple): if input_data is not None and not isinstance(P.typeof(input_data), mstype.Tuple):
raise TypeError(f"For '{cls_name}', the '{param_name}' should be '{mstype.Tuple}', " raise TypeError(f"For '{cls_name}', the '{param_name}' must be '{mstype.Tuple}', "
f"but got '{P.typeof(input_data)}'") f"but got '{P.typeof(input_data)}'")
@ -77,14 +77,14 @@ def _check_is_tuple(param_name, input_data, cls_name):
def _check_tuple_length(param_name, input_data, length, cls_name): def _check_tuple_length(param_name, input_data, length, cls_name):
"""Internal function, used to check whether the input data is Tensor.""" """Internal function, used to check whether the input data is Tensor."""
if input_data is not None and len(input_data) != length: if input_data is not None and len(input_data) != length:
raise TypeError(f"For '{cls_name}', the length of '{param_name}' should be '{length}', " raise TypeError(f"For '{cls_name}', the length of '{param_name}' must be '{length}', "
f"but got '{len(input_data)}'") f"but got '{len(input_data)}'")
@constexpr @constexpr
def _check_seq_length_size(batch_size_x, seq_length_size, cls_name): def _check_seq_length_size(batch_size_x, seq_length_size, cls_name):
if batch_size_x != seq_length_size: if batch_size_x != seq_length_size:
raise ValueError(f"For '{cls_name}' batch size of x and seq_length should be equal, " raise ValueError(f"For '{cls_name}' batch size of x and seq_length must be equal, "
f"but got {batch_size_x} of x and {seq_length_size} of seq_length.") f"but got {batch_size_x} of x and {seq_length_size} of seq_length.")
@ -378,7 +378,7 @@ class _RNNBase(Cell):
validator.check_value_type("bidirectional", bidirectional, [bool], self.cls_name) validator.check_value_type("bidirectional", bidirectional, [bool], self.cls_name)
if not 0 <= dropout < 1: if not 0 <= dropout < 1:
raise ValueError(f"For '{self.cls_name}', the 'dropout' should be a number in range [0, 1) " raise ValueError(f"For '{self.cls_name}', the 'dropout' must be a number in range [0, 1) "
f"representing the probability of an element being zeroed, but got {dropout}.") f"representing the probability of an element being zeroed, but got {dropout}.")
if dropout > 0 and num_layers == 1: if dropout > 0 and num_layers == 1:
@ -404,7 +404,7 @@ class _RNNBase(Cell):
gate_size = hidden_size gate_size = hidden_size
self.rnn = _DynamicRNNRelu() self.rnn = _DynamicRNNRelu()
else: else:
raise ValueError(f"For '{self.cls_name}', the 'mode' should be in ['RNN_RELU', 'RNN_TANH', 'LSTM', 'GRU'], " raise ValueError(f"For '{self.cls_name}', the 'mode' must be in ['RNN_RELU', 'RNN_TANH', 'LSTM', 'GRU'], "
f"but got {mode}.") f"but got {mode}.")
if context.get_context("device_target") == "CPU": if context.get_context("device_target") == "CPU":
@ -644,7 +644,7 @@ class RNN(_RNNBase):
elif kwargs['nonlinearity'] == 'relu': elif kwargs['nonlinearity'] == 'relu':
mode = 'RNN_RELU' mode = 'RNN_RELU'
else: else:
raise ValueError(f"For '{self.cls_name}', the 'nonlinearity' should be in ['tanh', 'relu'], " raise ValueError(f"For '{self.cls_name}', the 'nonlinearity' must be in ['tanh', 'relu'], "
f"but got {kwargs['nonlinearity']}.") f"but got {kwargs['nonlinearity']}.")
del kwargs['nonlinearity'] del kwargs['nonlinearity']
else: else:

View File

@ -102,8 +102,8 @@ class DenseThor(Cell):
if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \ if weight_init.dim() != 2 or weight_init.shape[0] != out_channels or \
weight_init.shape[1] != in_channels: weight_init.shape[1] != in_channels:
raise ValueError(f"For '{self.cls_name}', weight init shape error. The dim of 'weight_init' should " raise ValueError(f"For '{self.cls_name}', weight init shape error. The dim of 'weight_init' should "
f"be equal to 2, and the first dim should be equal to 'out_channels', and the " f"be equal to 2, and the first dim must be equal to 'out_channels', and the "
f"second dim should be equal to 'in_channels'. But got 'weight_init': {weight_init}, " f"second dim must be equal to 'in_channels'. But got 'weight_init': {weight_init}, "
f"'out_channels': {out_channels}, 'in_channels': {in_channels}.") f"'out_channels': {out_channels}, 'in_channels': {in_channels}.")
self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight") self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight")
self.bias = None self.bias = None
@ -111,7 +111,7 @@ class DenseThor(Cell):
if isinstance(bias_init, Tensor): if isinstance(bias_init, Tensor):
if bias_init.dim() != 1 or bias_init.shape[0] != out_channels: if bias_init.dim() != 1 or bias_init.shape[0] != out_channels:
raise ValueError(f"For '{self.cls_name}', bias init shape error. The dim of 'bias_init' should " raise ValueError(f"For '{self.cls_name}', bias init shape error. The dim of 'bias_init' should "
f"be equal to 1, and the first dim should be equal to 'out_channels'. But got " f"be equal to 1, and the first dim must be equal to 'out_channels'. But got "
f"'bias_init': {bias_init}, 'out_channels': {out_channels}.") f"'bias_init': {bias_init}, 'out_channels': {out_channels}.")
self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias") self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias")
self.bias_add = P.BiasAdd() self.bias_add = P.BiasAdd()
@ -254,21 +254,21 @@ class _ConvThor(Cell):
if (not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[1], int)) or \ if (not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[1], int)) or \
isinstance(kernel_size[0], bool) or isinstance(kernel_size[1], bool) or \ isinstance(kernel_size[0], bool) or isinstance(kernel_size[1], bool) or \
kernel_size[0] < 1 or kernel_size[1] < 1: kernel_size[0] < 1 or kernel_size[1] < 1:
raise ValueError(f"For '{self.cls_name}', all elements in 'kernel_size' should be int or tuple and " raise ValueError(f"For '{self.cls_name}', all elements in 'kernel_size' must be int or tuple and "
f"equal to or greater than 1, but got 'kernel_size': {kernel_size}.") f"equal to or greater than 1, but got 'kernel_size': {kernel_size}.")
def __validate_stride(self, stride): def __validate_stride(self, stride):
"""validate stride.""" """validate stride."""
if (not isinstance(stride[0], int)) or (not isinstance(stride[1], int)) or \ if (not isinstance(stride[0], int)) or (not isinstance(stride[1], int)) or \
isinstance(stride[0], bool) or isinstance(stride[1], bool) or stride[0] < 1 or stride[1] < 1: isinstance(stride[0], bool) or isinstance(stride[1], bool) or stride[0] < 1 or stride[1] < 1:
raise ValueError(f"For '{self.cls_name}', all elements in 'stride' should be int or tuple and " raise ValueError(f"For '{self.cls_name}', all elements in 'stride' must be int or tuple and "
f"equal to or greater than 1, but got 'stride': {stride}.") f"equal to or greater than 1, but got 'stride': {stride}.")
def __validate_dilation(self, dilation): def __validate_dilation(self, dilation):
"""validate dilation.""" """validate dilation."""
if (not isinstance(dilation[0], int)) or (not isinstance(dilation[1], int)) or \ if (not isinstance(dilation[0], int)) or (not isinstance(dilation[1], int)) or \
isinstance(dilation[0], bool) or isinstance(dilation[1], bool) or dilation[0] < 1 or dilation[1] < 1: isinstance(dilation[0], bool) or isinstance(dilation[1], bool) or dilation[0] < 1 or dilation[1] < 1:
raise ValueError(f"For '{self.cls_name}', all elements in 'dilation' should be int or tuple and " raise ValueError(f"For '{self.cls_name}', all elements in 'dilation' must be int or tuple and "
f"equal to or greater than 1, but got 'dilation': {dilation}.") f"equal to or greater than 1, but got 'dilation': {dilation}.")
@ -737,7 +737,7 @@ class EmbeddingLookupThor(Cell):
self.forward_unique = False self.forward_unique = False
self.dtype = mstype.float16 self.dtype = mstype.float16
if target not in ('CPU', 'DEVICE'): if target not in ('CPU', 'DEVICE'):
raise ValueError(f"For '{self.cls_name}', the 'target' should be one of values in ('CPU', 'DEVICE'), " raise ValueError(f"For '{self.cls_name}', the 'target' must be one of values in ('CPU', 'DEVICE'), "
f"but got {target}.") f"but got {target}.")
if not sparse and target == 'CPU': if not sparse and target == 'CPU':
raise ValueError(f"For '{self.cls_name}', embedding_lookup must be sparse when 'target' is CPU, but got " raise ValueError(f"For '{self.cls_name}', embedding_lookup must be sparse when 'target' is CPU, but got "
@ -805,12 +805,12 @@ class EmbeddingLookupThor(Cell):
self.embeddinglookup.shard(((1, 1), indices_strategy)) self.embeddinglookup.shard(((1, 1), indices_strategy))
else: else:
if is_auto_parallel: if is_auto_parallel:
raise ValueError(f"For '{self.cls_name}', the 'slice_mode' should be one of values in " raise ValueError(f"For '{self.cls_name}', the 'slice_mode' must be one of values in "
f"['field_slice', 'table_row_slice', 'table_column_slice', 'batch_slice'], " f"['field_slice', 'table_row_slice', 'table_column_slice', 'batch_slice'], "
f"but got 'slice_mode': {slice_mode}") f"but got 'slice_mode': {slice_mode}")
if self.cache_enable and not enable_ps: if self.cache_enable and not enable_ps:
if parallel_mode != ParallelMode.STAND_ALONE: if parallel_mode != ParallelMode.STAND_ALONE:
raise ValueError(f"For '{self.cls_name}', the 'parallel_mode' should be equal to " raise ValueError(f"For '{self.cls_name}', the 'parallel_mode' must be equal to "
f"'ParallelMode.STAND_ALONE', but got {parallel_mode}.") f"'ParallelMode.STAND_ALONE', but got {parallel_mode}.")
self._set_cache_enable() self._set_cache_enable()
self.embedding_table.unique = self.forward_unique self.embedding_table.unique = self.forward_unique

View File

@ -28,8 +28,8 @@ def _check_reshape_pos(reshape_pos, inputs_shape, outputs_shape, prim_name=None)
msg_prefix = f"For '{prim_name}', the" if prim_name else "The" msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
if reshape_pos >= len(outputs_shape) or inputs_shape[reshape_pos] != outputs_shape[reshape_pos]: if reshape_pos >= len(outputs_shape) or inputs_shape[reshape_pos] != outputs_shape[reshape_pos]:
raise ValueError(f"{msg_prefix} 'reshape_with_axis' is invalid in the input and output. " raise ValueError(f"{msg_prefix} 'reshape_with_axis' is invalid in the input and output. "
f"The 'reshape_pos' should be less than the length of 'outputs_shape', and the " f"The 'reshape_pos' must be less than the length of 'outputs_shape', and the "
f"'inputs_shape[reshape_pos]' should be equal to 'outputs_shape[reshape_pos]', but got " f"'inputs_shape[reshape_pos]' must be equal to 'outputs_shape[reshape_pos]', but got "
f"'reshape_pos': {reshape_pos}, 'inputs_shape': {inputs_shape}, 'outputs_shape': " f"'reshape_pos': {reshape_pos}, 'inputs_shape': {inputs_shape}, 'outputs_shape': "
f"{outputs_shape}. You may try pass parameters without 'reshape_with_axis'.") f"{outputs_shape}. You may try pass parameters without 'reshape_with_axis'.")
@ -38,7 +38,7 @@ def _check_reshape_pos(reshape_pos, inputs_shape, outputs_shape, prim_name=None)
def _check_expand_dims_axis(time_axis, ndim, prim_name=None): def _check_expand_dims_axis(time_axis, ndim, prim_name=None):
msg_prefix = f"For '{prim_name}', the" if prim_name else "The" msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
if time_axis > ndim: if time_axis > ndim:
raise ValueError(f"{msg_prefix} value of 'time_axis' should be in range of [{-ndim - 1}, {ndim}], " raise ValueError(f"{msg_prefix} value of 'time_axis' must be in range of [{-ndim - 1}, {ndim}], "
f"but got {time_axis}.") f"but got {time_axis}.")
@ -53,14 +53,14 @@ def _generate_perm(axis_a, axis_b, length):
def _check_data(flag, prim_name=None): def _check_data(flag, prim_name=None):
msg_prefix = f"For '{prim_name}', the" if prim_name else "The" msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
if not flag: if not flag:
raise TypeError(f"{msg_prefix} inputs and outputs should be a Tensor.") raise TypeError(f"{msg_prefix} inputs and outputs must be a Tensor.")
@constexpr @constexpr
def _check_inputs_dim(shape, prim_name=None): def _check_inputs_dim(shape, prim_name=None):
msg_prefix = f"For '{prim_name}', the" if prim_name else "The" msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
if len(shape) < 3: if len(shape) < 3:
raise ValueError(f"{msg_prefix} inputs shape should be at least 3D, but got {len(shape)}.") raise ValueError(f"{msg_prefix} inputs shape must be at least 3D, but got {len(shape)}.")
class TimeDistributed(Cell): class TimeDistributed(Cell):
@ -104,7 +104,7 @@ class TimeDistributed(Cell):
def __init__(self, layer, time_axis, reshape_with_axis=None): def __init__(self, layer, time_axis, reshape_with_axis=None):
"""Initialize TimeDistributed.""" """Initialize TimeDistributed."""
if not isinstance(layer, (Cell, Primitive)): if not isinstance(layer, (Cell, Primitive)):
raise TypeError(f"For '{self.cls_name}', the 'layer' should be Cell or Primitive instance, " raise TypeError(f"For '{self.cls_name}', the 'layer' must be Cell or Primitive instance, "
f"but got type: {type(layer).__name__}.") f"but got type: {type(layer).__name__}.")
super(TimeDistributed, self).__init__() super(TimeDistributed, self).__init__()
Validator.check_is_int(time_axis, "time_axis", self.cls_name) Validator.check_is_int(time_axis, "time_axis", self.cls_name)

View File

@ -318,7 +318,7 @@ class CosineDecayLR(LearningRateSchedule):
validator.check_is_float(max_lr, 'max_lr', self.cls_name) validator.check_is_float(max_lr, 'max_lr', self.cls_name)
validator.check_positive_int(decay_steps, "decay_steps", self.cls_name) validator.check_positive_int(decay_steps, "decay_steps", self.cls_name)
if min_lr >= max_lr: if min_lr >= max_lr:
raise ValueError("For 'CosineDecayLR', the 'max_lr' should be greater than the 'min_lr', " raise ValueError("For 'CosineDecayLR', the 'max_lr' must be greater than the 'min_lr', "
"but got 'max_lr' value: {}, 'min_lr' value: {}.".format(max_lr, min_lr)) "but got 'max_lr' value: {}, 'min_lr' value: {}.".format(max_lr, min_lr))
self.min_lr = min_lr self.min_lr = min_lr
self.max_lr = max_lr self.max_lr = max_lr

View File

@ -52,7 +52,7 @@ class LossBase(Cell):
super(LossBase, self).__init__() super(LossBase, self).__init__()
if reduction not in ('mean', 'sum', 'none'): if reduction not in ('mean', 'sum', 'none'):
raise ValueError(f"For '{self.cls_name}', the 'reduction' should be in ['mean', 'sum', 'none'], " raise ValueError(f"For '{self.cls_name}', the 'reduction' must be in ['mean', 'sum', 'none'], "
f"but got {reduction}.") f"but got {reduction}.")
self.average = True self.average = True
@ -175,7 +175,7 @@ class _Loss(LossBase):
def _check_is_tensor(param_name, input_data, cls_name): def _check_is_tensor(param_name, input_data, cls_name):
"""Internal function, used to check whether the input data is Tensor.""" """Internal function, used to check whether the input data is Tensor."""
if input_data is not None and not isinstance(F.typeof(input_data), mstype.tensor_type): if input_data is not None and not isinstance(F.typeof(input_data), mstype.tensor_type):
raise TypeError(f"For '{cls_name}', the '{param_name}' should be '{mstype.tensor_type}', " raise TypeError(f"For '{cls_name}', the '{param_name}' must be '{mstype.tensor_type}', "
f"but got '{F.typeof(input_data)}'") f"but got '{F.typeof(input_data)}'")
@ -720,9 +720,9 @@ def _check_ndim_multi(logits_dim, label_dim, prim_name=None):
"""Internal function, used to check whether the dimension of logits and label meets the requirements.""" """Internal function, used to check whether the dimension of logits and label meets the requirements."""
msg_prefix = f'For \'{prim_name}\', the' if prim_name else "The" msg_prefix = f'For \'{prim_name}\', the' if prim_name else "The"
if logits_dim < 2: if logits_dim < 2:
raise ValueError(f"{msg_prefix} 'logits' dimension should be greater than 1, but got {logits_dim}.") raise ValueError(f"{msg_prefix} 'logits' dimension must be greater than 1, but got {logits_dim}.")
if label_dim < 2: if label_dim < 2:
raise ValueError(f"{msg_prefix} 'labels' dimension should be greater than 1, but got {label_dim}.") raise ValueError(f"{msg_prefix} 'labels' dimension must be greater than 1, but got {label_dim}.")
@constexpr @constexpr
@ -730,7 +730,7 @@ def _check_weights(weight_shape, label_shape, prim_name=None):
"""Internal function, used to check whether the reduced shape meets the requirements.""" """Internal function, used to check whether the reduced shape meets the requirements."""
msg_prefix = f'For \'{prim_name}\', the' if prim_name else "The" msg_prefix = f'For \'{prim_name}\', the' if prim_name else "The"
if weight_shape != label_shape: if weight_shape != label_shape:
raise ValueError(f"{msg_prefix} weight_shape[0] should be equal to label_shape[1], " raise ValueError(f"{msg_prefix} weight_shape[0] must be equal to label_shape[1], "
f"but got weight_shape[0]: {weight_shape} and label_shape[1]: {label_shape}.") f"but got weight_shape[0]: {weight_shape} and label_shape[1]: {label_shape}.")
@ -785,7 +785,7 @@ class MultiClassDiceLoss(LossBase):
self.binarydiceloss = DiceLoss(smooth=1e-5) self.binarydiceloss = DiceLoss(smooth=1e-5)
self.weights = weights if weights is None else validator.check_value_type("weights", weights, [Tensor]) self.weights = weights if weights is None else validator.check_value_type("weights", weights, [Tensor])
if isinstance(self.weights, Tensor) and self.weights.ndim != 2: if isinstance(self.weights, Tensor) and self.weights.ndim != 2:
raise ValueError(f"For '{self.cls_name}', the dimension of 'weights' should be 2, " raise ValueError(f"For '{self.cls_name}', the dimension of 'weights' must be 2, "
f"but got {self.weights.ndim}.") f"but got {self.weights.ndim}.")
self.ignore_indiex = ignore_indiex if ignore_indiex is None else \ self.ignore_indiex = ignore_indiex if ignore_indiex is None else \
validator.check_value_type("ignore_indiex", ignore_indiex, [int]) validator.check_value_type("ignore_indiex", ignore_indiex, [int])
@ -1335,10 +1335,10 @@ def _check_ndim(logits_nidm, labels_ndim, prime_name=None):
'''Internal function, used to check whether the dimension of logits and labels meets the requirements.''' '''Internal function, used to check whether the dimension of logits and labels meets the requirements.'''
msg_prefix = f'For \'{prime_name}\', the' if prime_name else "The" msg_prefix = f'For \'{prime_name}\', the' if prime_name else "The"
if logits_nidm < 2 or logits_nidm > 4: if logits_nidm < 2 or logits_nidm > 4:
raise ValueError(f"{msg_prefix} dimensions of 'logits' should be in [2, 4], but got " raise ValueError(f"{msg_prefix} dimensions of 'logits' must be in [2, 4], but got "
f"dimension of 'logits' {logits_nidm}.") f"dimension of 'logits' {logits_nidm}.")
if labels_ndim < 2 or labels_ndim > 4: if labels_ndim < 2 or labels_ndim > 4:
raise ValueError(f"{msg_prefix} dimensions of 'labels' should be in [2, 4], but got " raise ValueError(f"{msg_prefix} dimensions of 'labels' must be in [2, 4], but got "
f"dimension of 'labels' {labels_ndim}.") f"dimension of 'labels' {labels_ndim}.")
if logits_nidm != labels_ndim: if logits_nidm != labels_ndim:
raise ValueError(f"{msg_prefix} dimensions of 'logits' and 'labels' must be equal, but got " raise ValueError(f"{msg_prefix} dimensions of 'logits' and 'labels' must be equal, but got "
@ -1420,10 +1420,10 @@ class FocalLoss(LossBase):
self.gamma = validator.check_value_type("gamma", gamma, [float]) self.gamma = validator.check_value_type("gamma", gamma, [float])
if weight is not None and not isinstance(weight, Tensor): if weight is not None and not isinstance(weight, Tensor):
raise TypeError(f"For '{self.cls_name}', the type of 'weight' should be a Tensor, " raise TypeError(f"For '{self.cls_name}', the type of 'weight' must be a Tensor, "
f"but got {type(weight).__name__}.") f"but got {type(weight).__name__}.")
if isinstance(weight, Tensor) and weight.ndim != 1: if isinstance(weight, Tensor) and weight.ndim != 1:
raise ValueError(f"For '{self.cls_name}', the dimension of 'weight' should be 1, but got {weight.ndim}.") raise ValueError(f"For '{self.cls_name}', the dimension of 'weight' must be 1, but got {weight.ndim}.")
self.weight = weight self.weight = weight
self.expand_dims = P.ExpandDims() self.expand_dims = P.ExpandDims()
self.gather_d = P.GatherD() self.gather_d = P.GatherD()

View File

@ -161,5 +161,5 @@ def get_metrics(metrics):
out_metrics[name] = get_metric_fn(name) out_metrics[name] = get_metric_fn(name)
return out_metrics return out_metrics
raise TypeError("For 'get_metrics', the argument 'metrics' should be None, dict or set, " raise TypeError("For 'get_metrics', the argument 'metrics' must be None, dict or set, "
"but got {}".format(metrics)) "but got {}".format(metrics))

View File

@ -70,7 +70,7 @@ def auc(x, y, reorder=False):
if np.all(dx <= 0): if np.all(dx <= 0):
direction = -1 direction = -1
else: else:
raise ValueError("For 'auc', if the argument is False, the argument 'x' array should be increasing " raise ValueError("For 'auc', if the argument is False, the argument 'x' array must be increasing "
"or decreasing, but got 'x': {}".format(x)) "or decreasing, but got 'x': {}".format(x))
area = direction * np.trapz(y, x) area = direction * np.trapz(y, x)

View File

@ -59,7 +59,7 @@ class ConfusionMatrix(Metric):
self.num_classes = validator.check_value_type("num_classes", num_classes, [int]) self.num_classes = validator.check_value_type("num_classes", num_classes, [int])
if normalize not in ["target", "prediction", "all", "no_norm"]: if normalize not in ["target", "prediction", "all", "no_norm"]:
raise ValueError("For 'ConfusionMatrix', the argument 'normalize' should be in " raise ValueError("For 'ConfusionMatrix', the argument 'normalize' must be in "
"['all', 'prediction', 'label', 'no_norm'(None)], but got {}.".format(normalize)) "['all', 'prediction', 'label', 'no_norm'(None)], but got {}.".format(normalize))
self.normalize = normalize self.normalize = normalize
@ -305,11 +305,11 @@ class _ConfusionMatrix:
ValueError: when `y_pred` has less than two dimensions. ValueError: when `y_pred` has less than two dimensions.
""" """
if not np.all(y.astype(np.uint8) == y): if not np.all(y.astype(np.uint8) == y):
raise ValueError("For 'ConfusionMatrix.update', the true value (input[1]) should be a binarized ndarray.") raise ValueError("For 'ConfusionMatrix.update', the true value (input[1]) must be a binarized ndarray.")
dims = y_pred.ndim dims = y_pred.ndim
if dims < 2: if dims < 2:
raise ValueError(f"For 'ConfusionMatrix.update', the predicted value (input[0]) should have at least 2 " raise ValueError(f"For 'ConfusionMatrix.update', the predicted value (input[0]) must have at least 2 "
f"dimensions, but got {dims}.") f"dimensions, but got {dims}.")
if dims == 2 or (dims == 3 and y_pred.shape[-1] == 1): if dims == 2 or (dims == 3 and y_pred.shape[-1] == 1):
@ -587,7 +587,7 @@ def _compute_confusion_matrix_metric(metric_name, confusion_matrix):
if input_dim == 1: if input_dim == 1:
confusion_matrix = np.expand_dims(confusion_matrix, 0) confusion_matrix = np.expand_dims(confusion_matrix, 0)
if confusion_matrix.shape[-1] != 4: if confusion_matrix.shape[-1] != 4:
raise ValueError(f"For 'ConfusionMatrix', the size of the last dimension of confusion_matrix should be 4, " raise ValueError(f"For 'ConfusionMatrix', the size of the last dimension of confusion_matrix must be 4, "
f"but got {confusion_matrix.shape[-1]}.") f"but got {confusion_matrix.shape[-1]}.")
tp = confusion_matrix[..., 0] tp = confusion_matrix[..., 0]

View File

@ -133,7 +133,7 @@ class HausdorffDistance(Metric):
result = tuple(tup) result = tuple(tup)
if result is None: if result is None:
raise ValueError(f"The sequence length should be {dim}, but got {len(tup)}.") raise ValueError(f"The sequence length must be {dim}, but got {len(tup)}.")
return result return result
@ -192,7 +192,7 @@ class HausdorffDistance(Metric):
if 0 <= self.percentile <= 100: if 0 <= self.percentile <= 100:
return np.percentile(surface_distance, self.percentile) return np.percentile(surface_distance, self.percentile)
raise ValueError(f"For 'HausdorffDistance', the value of the argument 'percentile' should be [0, 100], " raise ValueError(f"For 'HausdorffDistance', the value of the argument 'percentile' must be [0, 100], "
f"but got {self.percentile}.") f"but got {self.percentile}.")
def _get_surface_distance(self, y_pred_edges, y_edges): def _get_surface_distance(self, y_pred_edges, y_edges):
@ -281,12 +281,12 @@ class HausdorffDistance(Metric):
f"but got {type(label_idx)}.") f"but got {type(label_idx)}.")
if label_idx not in y_pred and label_idx not in y: if label_idx not in y_pred and label_idx not in y:
raise ValueError("For 'HausdorffDistance.update', the label index (input[2]) should be in predicted " raise ValueError("For 'HausdorffDistance.update', the label index (input[2]) must be in predicted "
"value (input[0]) or true value (input[1]), but {} is not.".format(label_idx)) "value (input[0]) or true value (input[1]), but {} is not.".format(label_idx))
if y_pred.size == 0 or y_pred.shape != y.shape: if y_pred.size == 0 or y_pred.shape != y.shape:
raise ValueError(f"For 'HausdorffDistance.update', the size of predicted value (input[0]) and true value " raise ValueError(f"For 'HausdorffDistance.update', the size of predicted value (input[0]) and true value "
f"(input[1]) should be greater than 0, in addition to that, predicted value and true " f"(input[1]) must be greater than 0, in addition to that, predicted value and true "
f"value should have the same shape, but got predicted value size: {y_pred.size}, shape: " f"value should have the same shape, but got predicted value size: {y_pred.size}, shape: "
f"{y_pred.shape}, true value size: {y.size}, shape: {y.shape}.") f"{y_pred.shape}, true value size: {y.size}, shape: {y.shape}.")

View File

@ -135,13 +135,13 @@ class MeanSurfaceDistance(Metric):
f"but got {type(label_idx)}.") f"but got {type(label_idx)}.")
if label_idx not in y_pred and label_idx not in y: if label_idx not in y_pred and label_idx not in y:
raise ValueError("For 'MeanSurfaceDistance.update', the label index (input[2]) should be in predicted " raise ValueError("For 'MeanSurfaceDistance.update', the label index (input[2]) must be in predicted "
"value (input[0]) or true value (input[1]), but {} is not.".format(label_idx)) "value (input[0]) or true value (input[1]), but {} is not.".format(label_idx))
if y_pred.size == 0 or y_pred.shape != y.shape: if y_pred.size == 0 or y_pred.shape != y.shape:
raise ValueError(f"For 'MeanSurfaceDistance.update', the size of predicted value (input[0]) and true " raise ValueError(f"For 'MeanSurfaceDistance.update', the size of predicted value (input[0]) and true "
f"value (input[1]) should be greater than 0, in addition to that, predicted value and " f"value (input[1]) must be greater than 0, in addition to that, predicted value and "
f"true value should have the same shape, but got predicted value size: {y_pred.size}, " f"true value must have the same shape, but got predicted value size: {y_pred.size}, "
f"shape: {y_pred.shape}, true value size: {y.size}, shape: {y.shape}.") f"shape: {y_pred.shape}, true value size: {y.size}, shape: {y.shape}.")
if y_pred.dtype != bool: if y_pred.dtype != bool:

View File

@ -147,7 +147,7 @@ class Metric(metaclass=ABCMeta):
0.3333333333333333 0.3333333333333333
""" """
if not isinstance(indexes, list) or not all(isinstance(i, int) for i in indexes): if not isinstance(indexes, list) or not all(isinstance(i, int) for i in indexes):
raise ValueError("For 'set_indexes', the argument 'indexes' should be a list and all its elements should " raise ValueError("For 'set_indexes', the argument 'indexes' must be a list and all its elements must "
"be int, please check whether it is correct.") "be int, please check whether it is correct.")
self._indexes = indexes self._indexes = indexes
return self return self

View File

@ -115,13 +115,13 @@ class OcclusionSensitivity(Metric):
if y_pred.shape[0] > 1: if y_pred.shape[0] > 1:
raise RuntimeError(f"For 'OcclusionSensitivity.update', the shape at index 0 of the predicted value " raise RuntimeError(f"For 'OcclusionSensitivity.update', the shape at index 0 of the predicted value "
f"(input[1]) should be 1, but got {y_pred.shape[0]}.") f"(input[1]) must be 1, but got {y_pred.shape[0]}.")
if isinstance(label, int): if isinstance(label, int):
label = np.array([[label]], dtype=int) label = np.array([[label]], dtype=int)
# If the label is a tensor, make sure there's only 1 element # If the label is a tensor, make sure there's only 1 element
elif np.prod(label.shape) != y_pred.shape[0]: elif np.prod(label.shape) != y_pred.shape[0]:
raise RuntimeError(f"For 'OcclusionSensitivity.update', the number of the label (input[2]) should be " raise RuntimeError(f"For 'OcclusionSensitivity.update', the number of the label (input[2]) must be "
f"same as the batches, but got the label number {np.prod(label.shape)}, " f"same as the batches, but got the label number {np.prod(label.shape)}, "
f"and batches {y_pred.shape[0]}.") f"and batches {y_pred.shape[0]}.")
@ -202,8 +202,8 @@ def _check_input_bounding_box(b_box, im_shape):
b_box_min = b_box_max = None b_box_min = b_box_max = None
else: else:
if len(b_box) != 2 * len(im_shape): if len(b_box) != 2 * len(im_shape):
raise ValueError(f"For 'OcclusionSensitivity', the bounding box should contain upper and lower for " raise ValueError(f"For 'OcclusionSensitivity', the bounding box must contain upper and lower for "
f"all dimensions (except batch number), and the length of 'b_box' should be twice " f"all dimensions (except batch number), and the length of 'b_box' must be twice "
f"as long as predicted value's (except batch number), but got 'b_box' length " f"as long as predicted value's (except batch number), but got 'b_box' length "
f"{len(b_box)}, predicted value length (except batch number) {len(im_shape)}.") f"{len(b_box)}, predicted value length (except batch number) {len(im_shape)}.")
@ -212,10 +212,10 @@ def _check_input_bounding_box(b_box, im_shape):
b_box_min[b_box_min < 0] = 0 b_box_min[b_box_min < 0] = 0
b_box_max[b_box_max < 0] = im_shape[b_box_max < 0] - 1 b_box_max[b_box_max < 0] = im_shape[b_box_max < 0] - 1
if np.any(b_box_max >= im_shape): if np.any(b_box_max >= im_shape):
raise ValueError("For 'OcclusionSensitivity', maximum bounding box should be smaller than image size " raise ValueError("For 'OcclusionSensitivity', maximum bounding box must be smaller than image size "
"for all values.") "for all values.")
if np.any(b_box_min > b_box_max): if np.any(b_box_min > b_box_max):
raise ValueError("For 'OcclusionSensitivity', minimum bounding box should be smaller than maximum " raise ValueError("For 'OcclusionSensitivity', minimum bounding box must be smaller than maximum "
"bounding box for all values.") "bounding box for all values.")
return b_box_min, b_box_max return b_box_min, b_box_max

View File

@ -188,7 +188,7 @@ class ROC(Metric):
def _precision_recall_curve_update(y_pred, y, class_num, pos_label): def _precision_recall_curve_update(y_pred, y, class_num, pos_label):
"""update curve""" """update curve"""
if not (len(y_pred.shape) == len(y.shape) or len(y_pred.shape) == len(y.shape) + 1): if not (len(y_pred.shape) == len(y.shape) or len(y_pred.shape) == len(y.shape) + 1):
raise ValueError(f"For 'ROC', predicted value (input[0]) and true value (input[1]) should have same " raise ValueError(f"For 'ROC', predicted value (input[0]) and true value (input[1]) must have same "
f"dimensions, or the dimension of predicted value equal the dimension of true value add " f"dimensions, or the dimension of predicted value equal the dimension of true value add "
f"1, but got predicted value ndim: {len(y_pred.shape)}, true value ndim: {len(y.shape)}.") f"1, but got predicted value ndim: {len(y_pred.shape)}, true value ndim: {len(y.shape)}.")
@ -196,7 +196,7 @@ def _precision_recall_curve_update(y_pred, y, class_num, pos_label):
if len(y_pred.shape) == len(y.shape): if len(y_pred.shape) == len(y.shape):
if class_num is not None and class_num != 1: if class_num is not None and class_num != 1:
raise ValueError(f"For 'ROC', when predicted value (input[0]) and true value (input[1]) have the same " raise ValueError(f"For 'ROC', when predicted value (input[0]) and true value (input[1]) have the same "
f"shape, the 'class_num' should be 1, but got {class_num}.") f"shape, the 'class_num' must be 1, but got {class_num}.")
class_num = 1 class_num = 1
if pos_label is None: if pos_label is None:
pos_label = 1 pos_label = 1
@ -207,10 +207,10 @@ def _precision_recall_curve_update(y_pred, y, class_num, pos_label):
elif len(y_pred.shape) == len(y.shape) + 1: elif len(y_pred.shape) == len(y.shape) + 1:
if pos_label is not None: if pos_label is not None:
raise ValueError(f"For 'ROC', when the dimension of predicted value (input[0]) equals the dimension " raise ValueError(f"For 'ROC', when the dimension of predicted value (input[0]) equals the dimension "
f"of true value (input[1]) add 1, the 'pos_label' should be None, " f"of true value (input[1]) add 1, the 'pos_label' must be None, "
f"but got {pos_label}.") f"but got {pos_label}.")
if class_num != y_pred.shape[1]: if class_num != y_pred.shape[1]:
raise ValueError("For 'ROC', the 'class_num' should equal the number of classes from predicted value " raise ValueError("For 'ROC', the 'class_num' must equal the number of classes from predicted value "
"(input[0]), but got 'class_num' {}, the number of classes from predicted value {}." "(input[0]), but got 'class_num' {}, the number of classes from predicted value {}."
.format(class_num, y_pred.shape[1])) .format(class_num, y_pred.shape[1]))
y_pred = y_pred.transpose(0, 1).reshape(class_num, -1).transpose(0, 1) y_pred = y_pred.transpose(0, 1).reshape(class_num, -1).transpose(0, 1)

View File

@ -396,7 +396,7 @@ def _parallel_check():
raise RuntimeError("Currently, the pipeline parallel is not supported with applying adasum.") raise RuntimeError("Currently, the pipeline parallel is not supported with applying adasum.")
stage_device_num = _get_stage_device_num() stage_device_num = _get_stage_device_num()
if stage_device_num < 16 or (stage_device_num & (stage_device_num - 1) != 0): if stage_device_num < 16 or (stage_device_num & (stage_device_num - 1) != 0):
raise RuntimeError("The device_num should be at least 16 and should be the power of 2 when applying adasum.") raise RuntimeError("The device_num must be at least 16 and must be the power of 2 when applying adasum.")
class AdaSumByGradWrapCell(Cell): class AdaSumByGradWrapCell(Cell):

View File

@ -172,7 +172,7 @@ class Momentum(Optimizer):
super(Momentum, self).__init__(learning_rate, params, weight_decay, loss_scale) super(Momentum, self).__init__(learning_rate, params, weight_decay, loss_scale)
Validator.check_value_type("momentum", momentum, [float], self.cls_name) Validator.check_value_type("momentum", momentum, [float], self.cls_name)
if isinstance(momentum, float) and momentum < 0.0: if isinstance(momentum, float) and momentum < 0.0:
raise ValueError("For 'Momentum', the argument 'momentum' should be at least 0.0, " raise ValueError("For 'Momentum', the argument 'momentum' must be at least 0.0, "
"but got {}".format(momentum)) "but got {}".format(momentum))
self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum") self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum")
self.params = self._parameters self.params = self._parameters

View File

@ -320,7 +320,7 @@ class Optimizer(Cell):
@staticmethod @staticmethod
def _preprocess_grad_centralization(grad_centralization): def _preprocess_grad_centralization(grad_centralization):
if not isinstance(grad_centralization, bool): if not isinstance(grad_centralization, bool):
raise TypeError("For 'Optimizer', the 'gradients_centralization' should be bool type, " raise TypeError("For 'Optimizer', the 'gradients_centralization' must be bool type, "
"but got {}.".format(type(grad_centralization))) "but got {}.".format(type(grad_centralization)))
return grad_centralization return grad_centralization
@ -461,7 +461,7 @@ class Optimizer(Cell):
self.dynamic_weight_decay = True self.dynamic_weight_decay = True
weight_decay = _WrappedWeightDecay(weight_decay, self.loss_scale) weight_decay = _WrappedWeightDecay(weight_decay, self.loss_scale)
else: else:
raise TypeError("For 'Optimizer', the argument 'Weight_decay' should be int, " raise TypeError("For 'Optimizer', the argument 'Weight_decay' must be int, "
"float or Cell.but got {}".format(type(weight_decay))) "float or Cell.but got {}".format(type(weight_decay)))
return weight_decay return weight_decay
@ -489,7 +489,7 @@ class Optimizer(Cell):
return learning_rate return learning_rate
if isinstance(learning_rate, LearningRateSchedule): if isinstance(learning_rate, LearningRateSchedule):
return learning_rate return learning_rate
raise TypeError("For 'Optimizer', the argument 'learning_rate' should be int, float, Tensor, Iterable or " raise TypeError("For 'Optimizer', the argument 'learning_rate' must be int, float, Tensor, Iterable or "
"LearningRateSchedule, but got {}.".format(type(learning_rate))) "LearningRateSchedule, but got {}.".format(type(learning_rate)))
def _build_single_lr(self, learning_rate, name): def _build_single_lr(self, learning_rate, name):
@ -514,7 +514,7 @@ class Optimizer(Cell):
for group_param in parameters: for group_param in parameters:
invalid_key = list(filter(lambda x: x not in parse_keys, group_param.keys())) invalid_key = list(filter(lambda x: x not in parse_keys, group_param.keys()))
if invalid_key: if invalid_key:
raise KeyError(f"For 'Optimizer', the key in group params should be one of in {parse_keys}, " raise KeyError(f"For 'Optimizer', the key in group params must be one of in {parse_keys}, "
f"but got {invalid_key}.") f"but got {invalid_key}.")
if 'order_params' in group_param.keys(): if 'order_params' in group_param.keys():
@ -546,7 +546,7 @@ class Optimizer(Cell):
raise ValueError(f"For 'Optimizer', the order params dict in group parameters should only include " raise ValueError(f"For 'Optimizer', the order params dict in group parameters should only include "
f"the 'order_params' key, but got {group_param.keys()}.") f"the 'order_params' key, but got {group_param.keys()}.")
if not isinstance(group_param['order_params'], Iterable): if not isinstance(group_param['order_params'], Iterable):
raise TypeError("For 'Optimizer', the value of 'order_params' in group parameters should be " raise TypeError("For 'Optimizer', the value of 'order_params' in group parameters must be "
"Iterable type, but got {}.".format(type(group_param['order_params']))) "Iterable type, but got {}.".format(type(group_param['order_params'])))
self.is_group_params_ordered = True self.is_group_params_ordered = True
continue continue
@ -560,7 +560,7 @@ class Optimizer(Cell):
if tensor_lr_length == 0: if tensor_lr_length == 0:
tensor_lr_length = group_lr_length tensor_lr_length = group_lr_length
elif group_lr_length != tensor_lr_length: elif group_lr_length != tensor_lr_length:
raise ValueError("For 'Optimizer', the Tensor type dynamic learning rate in group should be " raise ValueError("For 'Optimizer', the Tensor type dynamic learning rate in group must be "
"the same size as the argument 'learning_rate'.") "the same size as the argument 'learning_rate'.")
def _init_group_params(self, parameters, learning_rate, weight_decay, grad_centralization): def _init_group_params(self, parameters, learning_rate, weight_decay, grad_centralization):
@ -623,7 +623,7 @@ class Optimizer(Cell):
params_length = len(self.group_params) params_length = len(self.group_params)
if len(ordered_parameters) != len(self.group_params): if len(ordered_parameters) != len(self.group_params):
raise ValueError(f"For 'Optimizer'," raise ValueError(f"For 'Optimizer',"
f"the length of order parameters should be the same as the length of group parameters, " f"the length of order parameters must be the same as the length of group parameters, "
f"but got order parameters' length {len(ordered_parameters)}, " f"but got order parameters' length {len(ordered_parameters)}, "
f"group parameters' length {len(self.group_params)}.") f"group parameters' length {len(self.group_params)}.")
@ -926,7 +926,7 @@ class _IteratorLearningRate(LearningRateSchedule):
raise ValueError(f"For 'Optimizer', the dimension of the argument 'learning_rate' should " raise ValueError(f"For 'Optimizer', the dimension of the argument 'learning_rate' should "
f"be 1, but got {learning_rate.ndim}.") f"be 1, but got {learning_rate.ndim}.")
else: else:
raise TypeError("For 'Optimizer', the argument 'learning_rate' should be Tensor, " raise TypeError("For 'Optimizer', the argument 'learning_rate' must be Tensor, "
"but got {}.".format(type(learning_rate))) "but got {}.".format(type(learning_rate)))
self.learning_rate = Parameter(learning_rate, name) self.learning_rate = Parameter(learning_rate, name)

View File

@ -152,14 +152,14 @@ class Rprop(Optimizer):
super(Rprop, self).__init__(learning_rate, params, weight_decay) super(Rprop, self).__init__(learning_rate, params, weight_decay)
if not isinstance(etas, tuple): if not isinstance(etas, tuple):
raise TypeError("For Rprop, etas should be a tuple, but got {}.".format(type(etas))) raise TypeError("For Rprop, etas must be a tuple, but got {}.".format(type(etas)))
if len(etas) != 2: if len(etas) != 2:
raise ValueError("For Rprop, etas should be a tuple with the size of 2, but got {}.".format(len(etas))) raise ValueError("For Rprop, etas must be a tuple with the size of 2, but got {}.".format(len(etas)))
if not isinstance(step_sizes, tuple): if not isinstance(step_sizes, tuple):
raise TypeError("For Rprop, step_sizes should be a tuple, but got {}.".format(type(etas))) raise TypeError("For Rprop, step_sizes must be a tuple, but got {}.".format(type(etas)))
if len(step_sizes) != 2: if len(step_sizes) != 2:
raise ValueError("For Rprop, step_sizes should be a tuple with the size of 2, " raise ValueError("For Rprop, step_sizes must be a tuple with the size of 2, "
"but got {}.".format(len(step_sizes))) "but got {}.".format(len(step_sizes)))
if step_sizes[0] > step_sizes[1]: if step_sizes[0] > step_sizes[1]:
@ -169,7 +169,7 @@ class Rprop(Optimizer):
validator.check_float_range(etas[0], 0.0, 1.0, Rel.INC_NEITHER, "etaminus", self.cls_name) validator.check_float_range(etas[0], 0.0, 1.0, Rel.INC_NEITHER, "etaminus", self.cls_name)
validator.check_value_type("etaplus", etas[1], [float], self.cls_name) validator.check_value_type("etaplus", etas[1], [float], self.cls_name)
if etas[1] <= 1.0: if etas[1] <= 1.0:
raise ValueError("For Rprop, etaplus should be greater than 1.0, but got etaplus {}.".format(etas[1])) raise ValueError("For Rprop, etaplus must be greater than 1.0, but got etaplus {}.".format(etas[1]))
validator.check_value_type("min_step_sizes", step_sizes[0], [float], self.cls_name) validator.check_value_type("min_step_sizes", step_sizes[0], [float], self.cls_name)
validator.check_value_type("max_step_sizes", step_sizes[1], [float], self.cls_name) validator.check_value_type("max_step_sizes", step_sizes[1], [float], self.cls_name)

View File

@ -149,21 +149,21 @@ class SGD(Optimizer):
if isinstance(momentum, int): if isinstance(momentum, int):
momentum = float(momentum) momentum = float(momentum)
if not isinstance(momentum, float): if not isinstance(momentum, float):
raise TypeError("For 'SGD', the argument 'momentum' should be float type, " raise TypeError("For 'SGD', the argument 'momentum' must be float type, "
"but got {}.".format(type(momentum))) "but got {}.".format(type(momentum)))
if isinstance(momentum, float) and momentum < 0.0: if isinstance(momentum, float) and momentum < 0.0:
raise ValueError("For 'SGD', the argument 'momentum' should be at least 0.0, " raise ValueError("For 'SGD', the argument 'momentum' must be at least 0.0, "
"but got {}".format(momentum)) "but got {}".format(momentum))
if isinstance(dampening, int): if isinstance(dampening, int):
dampening = float(dampening) dampening = float(dampening)
if not isinstance(dampening, float): if not isinstance(dampening, float):
raise TypeError("For 'SGD', the argument 'dampening' should be float type, " raise TypeError("For 'SGD', the argument 'dampening' must be float type, "
"but got {}.".format(type(dampening))) "but got {}.".format(type(dampening)))
if dampening < 0.0: if dampening < 0.0:
raise ValueError("For 'SGD', the argument 'dampening' should be at least 0.0, " raise ValueError("For 'SGD', the argument 'dampening' must be at least 0.0, "
"but got 'dampening' {}".format(dampening)) "but got 'dampening' {}".format(dampening))
self.dampening = dampening self.dampening = dampening

View File

@ -108,11 +108,11 @@ def _check_param(momentum, frequency, lr, cls_name):
"""Check param.""" """Check param."""
Validator.check_value_type("momentum", momentum, [float], cls_name) Validator.check_value_type("momentum", momentum, [float], cls_name)
if isinstance(momentum, float) and momentum < 0.0: if isinstance(momentum, float) and momentum < 0.0:
raise ValueError("For 'thor', the argument 'momentum' should be at least 0.0, " raise ValueError("For 'thor', the argument 'momentum' must be at least 0.0, "
"but got 'momentum' {}.".format(momentum)) "but got 'momentum' {}.".format(momentum))
Validator.check_value_type("frequency", frequency, [int], cls_name) Validator.check_value_type("frequency", frequency, [int], cls_name)
if isinstance(frequency, int) and frequency < 2: if isinstance(frequency, int) and frequency < 2:
raise ValueError("For 'thor', the argument 'frequency' should be at least 2, " raise ValueError("For 'thor', the argument 'frequency' must be at least 2, "
"but got 'frequency' {}.".format(frequency)) "but got 'frequency' {}.".format(frequency))
Validator.check_value_type("learning rate", lr, [Tensor], cls_name) Validator.check_value_type("learning rate", lr, [Tensor], cls_name)

View File

@ -25,7 +25,7 @@ def check_prior(prior_fn, arg_name):
prior = prior_fn() prior = prior_fn()
for prior_name, prior_dist in prior.name_cells().items(): for prior_name, prior_dist in prior.name_cells().items():
if prior_name != 'normal': if prior_name != 'normal':
raise TypeError(f"The type of distribution of `{arg_name}` should be `normal`") raise TypeError(f"The type of distribution of `{arg_name}` must be `normal`")
if not (isinstance(getattr(prior_dist, '_mean_value'), Tensor) and if not (isinstance(getattr(prior_dist, '_mean_value'), Tensor) and
isinstance(getattr(prior_dist, '_sd_value'), Tensor)): isinstance(getattr(prior_dist, '_sd_value'), Tensor)):
raise TypeError(f"The input form of `{arg_name}` is incorrect") raise TypeError(f"The input form of `{arg_name}` is incorrect")
@ -37,10 +37,10 @@ def check_posterior(posterior_fn, shape, param_name, arg_name):
try: try:
posterior = posterior_fn(shape=shape, name=param_name) posterior = posterior_fn(shape=shape, name=param_name)
except TypeError: except TypeError:
raise TypeError(f'The type of `{arg_name}` should be `NormalPosterior`') raise TypeError(f'The type of `{arg_name}` must be `NormalPosterior`')
finally: finally:
pass pass
for posterior_name, _ in posterior.name_cells().items(): for posterior_name, _ in posterior.name_cells().items():
if posterior_name != 'normal': if posterior_name != 'normal':
raise TypeError(f"The type of distribution of `{arg_name}` should be `normal`") raise TypeError(f"The type of distribution of `{arg_name}` must be `normal`")
return posterior return posterior

View File

@ -66,14 +66,14 @@ class WithBNNLossCell(Cell):
def __init__(self, backbone, loss_fn, dnn_factor=1, bnn_factor=1): def __init__(self, backbone, loss_fn, dnn_factor=1, bnn_factor=1):
super(WithBNNLossCell, self).__init__(auto_prefix=False) super(WithBNNLossCell, self).__init__(auto_prefix=False)
if isinstance(dnn_factor, bool) or not isinstance(dnn_factor, (int, float)): if isinstance(dnn_factor, bool) or not isinstance(dnn_factor, (int, float)):
raise TypeError('The type of `dnn_factor` should be `int` or `float`') raise TypeError('The type of `dnn_factor` must be `int` or `float`')
if dnn_factor < 0: if dnn_factor < 0:
raise ValueError('The value of `dnn_factor` should >= 0') raise ValueError('The value of `dnn_factor` should >= 0')
if isinstance(bnn_factor, bool) or not isinstance(bnn_factor, (int, float)): if isinstance(bnn_factor, bool) or not isinstance(bnn_factor, (int, float)):
raise TypeError('The type of `bnn_factor` should be `int` or `float`') raise TypeError('The type of `bnn_factor` must be `int` or `float`')
if bnn_factor < 0: if bnn_factor < 0:
raise ValueError('The value of `bnn_factor` should >= 0') raise ValueError('The value of `bnn_factor` must >= 0')
self._backbone = backbone self._backbone = backbone
self._loss_fn = loss_fn self._loss_fn = loss_fn

View File

@ -79,24 +79,24 @@ class NormalPosterior(Cell):
untransformed_scale_std=0.1): untransformed_scale_std=0.1):
super(NormalPosterior, self).__init__() super(NormalPosterior, self).__init__()
if not isinstance(name, str): if not isinstance(name, str):
raise TypeError('The type of `name` should be `str`') raise TypeError('The type of `name` must be `str`')
if not isinstance(shape, (tuple, list)): if not isinstance(shape, (tuple, list)):
raise TypeError('The type of `shape` should be `tuple` or `list`') raise TypeError('The type of `shape` must be `tuple` or `list`')
if isinstance(loc_mean, bool) or not isinstance(loc_mean, (int, float)): if isinstance(loc_mean, bool) or not isinstance(loc_mean, (int, float)):
raise TypeError('The type of `loc_mean` should be `int` or `float`') raise TypeError('The type of `loc_mean` must be `int` or `float`')
if isinstance(untransformed_scale_mean, bool) or not isinstance(untransformed_scale_mean, (int, float)): if isinstance(untransformed_scale_mean, bool) or not isinstance(untransformed_scale_mean, (int, float)):
raise TypeError('The type of `untransformed_scale_mean` should be `int` or `float`') raise TypeError('The type of `untransformed_scale_mean` must be `int` or `float`')
if isinstance(loc_std, bool) or not (isinstance(loc_std, (int, float)) and loc_std >= 0): if isinstance(loc_std, bool) or not (isinstance(loc_std, (int, float)) and loc_std >= 0):
raise TypeError('The type of `loc_std` should be `int` or `float` and its value should > 0') raise TypeError('The type of `loc_std` must be `int` or `float` and its value must > 0')
if isinstance(loc_std, bool) or not (isinstance(untransformed_scale_std, (int, float)) and if isinstance(loc_std, bool) or not (isinstance(untransformed_scale_std, (int, float)) and
untransformed_scale_std >= 0): untransformed_scale_std >= 0):
raise TypeError('The type of `untransformed_scale_std` should be `int` or `float` and ' raise TypeError('The type of `untransformed_scale_std` must be `int` or `float` and '
'its value should > 0') 'its value must > 0')
self.mean = Parameter( self.mean = Parameter(
Tensor(np.random.normal(loc_mean, loc_std, shape), dtype=dtype), name=name + '_mean') Tensor(np.random.normal(loc_mean, loc_std, shape), dtype=dtype), name=name + '_mean')

View File

@ -89,7 +89,7 @@ def check_greater_equal_zero(value, name):
value = value.data value = value.data
comp = np.less(value.asnumpy(), np.zeros(value.shape)) comp = np.less(value.asnumpy(), np.zeros(value.shape))
if comp.any(): if comp.any():
raise ValueError(f'{name} should be greater than ot equal to zero.') raise ValueError(f'{name} must be greater than or equal to zero.')
def check_greater_zero(value, name): def check_greater_zero(value, name):
@ -112,7 +112,7 @@ def check_greater_zero(value, name):
value = value.data value = value.data
comp = np.less(np.zeros(value.shape), value.asnumpy()) comp = np.less(np.zeros(value.shape), value.asnumpy())
if not comp.all(): if not comp.all():
raise ValueError(f'{name} should be greater than zero.') raise ValueError(f'{name} must be greater than zero.')
def check_greater(a, b, name_a, name_b): def check_greater(a, b, name_a, name_b):
@ -134,7 +134,7 @@ def check_greater(a, b, name_a, name_b):
return return
comp = np.less(a.asnumpy(), b.asnumpy()) comp = np.less(a.asnumpy(), b.asnumpy())
if not comp.all(): if not comp.all():
raise ValueError(f'{name_a} should be less than {name_b}') raise ValueError(f'{name_a} must be less than {name_b}')
def check_prob(p): def check_prob(p):
@ -155,10 +155,10 @@ def check_prob(p):
p = p.data p = p.data
comp = np.less(np.zeros(p.shape), p.asnumpy()) comp = np.less(np.zeros(p.shape), p.asnumpy())
if not comp.all(): if not comp.all():
raise ValueError('Probabilities should be greater than zero') raise ValueError('Probabilities must be greater than zero')
comp = np.greater(np.ones(p.shape), p.asnumpy()) comp = np.greater(np.ones(p.shape), p.asnumpy())
if not comp.all(): if not comp.all():
raise ValueError('Probabilities should be less than one') raise ValueError('Probabilities must be less than one')
def check_sum_equal_one(probs): def check_sum_equal_one(probs):
@ -232,8 +232,8 @@ def probs_to_logits(probs, is_binary=False):
@constexpr @constexpr
def raise_none_error(name): def raise_none_error(name):
raise TypeError(f"the type {name} should be subclass of Tensor." raise TypeError(f"the type {name} must be subclass of Tensor."
f" It should not be None since it is not specified during initialization.") f" It can not be None since it is not specified during initialization.")
@constexpr @constexpr
@ -250,7 +250,7 @@ def raise_broadcast_error(shape_a, shape_b):
@constexpr @constexpr
def raise_not_impl_error(name): def raise_not_impl_error(name):
raise ValueError( raise ValueError(
f"{name} function should be implemented for non-linear transformation") f"{name} function must be implemented for non-linear transformation")
@constexpr @constexpr
@ -262,7 +262,7 @@ def raise_not_implemented_util(func_name, obj, *args, **kwargs):
@constexpr @constexpr
def raise_type_error(name, cur_type, required_type): def raise_type_error(name, cur_type, required_type):
raise TypeError( raise TypeError(
f"For {name} , the type should be or be subclass of {required_type}, but got {cur_type}") f"For {name} , the type must be or be subclass of {required_type}, but got {cur_type}")
@constexpr @constexpr
@ -275,7 +275,7 @@ def raise_not_defined(func_name, obj, *args, **kwargs):
def check_distribution_name(name, expected_name): def check_distribution_name(name, expected_name):
if name is None: if name is None:
raise ValueError( raise ValueError(
f"Input dist should be a constant which is not None.") f"Input dist must be a constant which is not None.")
if name != expected_name: if name != expected_name:
raise ValueError( raise ValueError(
f"Expected dist input is {expected_name}, but got {name}.") f"Expected dist input is {expected_name}, but got {name}.")
@ -293,7 +293,7 @@ class CheckTuple(PrimitiveWithInfer):
def __infer__(self, x, name): def __infer__(self, x, name):
if not isinstance(x['dtype'], tuple): if not isinstance(x['dtype'], tuple):
raise TypeError( raise TypeError(
f"For {name['value']}, Input type should b a tuple.") f"For {name['value']}, Input type must b a tuple.")
out = {'shape': None, out = {'shape': None,
'dtype': None, 'dtype': None,
@ -306,7 +306,7 @@ class CheckTuple(PrimitiveWithInfer):
return x return x
if context.get_context("mode") == 0: if context.get_context("mode") == 0:
return x["value"] return x["value"]
raise TypeError(f"For {name}, input type should be a tuple.") raise TypeError(f"For {name}, input type must be a tuple.")
class CheckTensor(PrimitiveWithInfer): class CheckTensor(PrimitiveWithInfer):
@ -334,7 +334,7 @@ class CheckTensor(PrimitiveWithInfer):
if x is None or isinstance(x, Tensor): if x is None or isinstance(x, Tensor):
return x return x
raise TypeError( raise TypeError(
f"For {name}, input type should be a Tensor or Parameter.") f"For {name}, input type must be a Tensor or Parameter.")
def set_param_type(args, hint_type): def set_param_type(args, hint_type):

View File

@ -59,11 +59,11 @@ class ConditionalVAE(Cell):
self.encoder = encoder self.encoder = encoder
self.decoder = decoder self.decoder = decoder
if (not isinstance(encoder, Cell)) or (not isinstance(decoder, Cell)): if (not isinstance(encoder, Cell)) or (not isinstance(decoder, Cell)):
raise TypeError('The encoder and decoder should be Cell type.') raise TypeError('The encoder and decoder must be Cell type.')
self.hidden_size = Validator.check_positive_int(hidden_size) self.hidden_size = Validator.check_positive_int(hidden_size)
self.latent_size = Validator.check_positive_int(latent_size) self.latent_size = Validator.check_positive_int(latent_size)
if hidden_size < latent_size: if hidden_size < latent_size:
raise ValueError('The latent_size should be less than or equal to the hidden_size.') raise ValueError('The latent_size must be less than or equal to the hidden_size.')
self.num_classes = Validator.check_positive_int(num_classes) self.num_classes = Validator.check_positive_int(num_classes)
self.normal = C.normal self.normal = C.normal
self.exp = P.Exp() self.exp = P.Exp()
@ -110,7 +110,7 @@ class ConditionalVAE(Cell):
""" """
generate_nums = Validator.check_positive_int(generate_nums) generate_nums = Validator.check_positive_int(generate_nums)
if not isinstance(shape, tuple) or len(shape) != 4 or (shape[0] != -1 and shape[0] != generate_nums): if not isinstance(shape, tuple) or len(shape) != 4 or (shape[0] != -1 and shape[0] != generate_nums):
raise ValueError('The shape should be (generate_nums, C, H, W) or (-1, C, H, W).') raise ValueError('The shape must be (generate_nums, C, H, W) or (-1, C, H, W).')
sample_z = self.normal((generate_nums, self.latent_size), self.to_tensor(0.0), self.to_tensor(1.0), seed=0) sample_z = self.normal((generate_nums, self.latent_size), self.to_tensor(0.0), self.to_tensor(1.0), seed=0)
sample_y = self.one_hot(sample_y) sample_y = self.one_hot(sample_y)
sample_c = self.concat((sample_z, sample_y)) sample_c = self.concat((sample_z, sample_y))

View File

@ -54,11 +54,11 @@ class VAE(Cell):
self.encoder = encoder self.encoder = encoder
self.decoder = decoder self.decoder = decoder
if (not isinstance(encoder, Cell)) or (not isinstance(decoder, Cell)): if (not isinstance(encoder, Cell)) or (not isinstance(decoder, Cell)):
raise TypeError('The encoder and decoder should be Cell type.') raise TypeError('The encoder and decoder must be Cell type.')
self.hidden_size = Validator.check_positive_int(hidden_size) self.hidden_size = Validator.check_positive_int(hidden_size)
self.latent_size = Validator.check_positive_int(latent_size) self.latent_size = Validator.check_positive_int(latent_size)
if hidden_size < latent_size: if hidden_size < latent_size:
raise ValueError('The latent_size should be less than or equal to the hidden_size.') raise ValueError('The latent_size must be less than or equal to the hidden_size.')
self.normal = C.normal self.normal = C.normal
self.exp = P.Exp() self.exp = P.Exp()
self.reshape = P.Reshape() self.reshape = P.Reshape()
@ -99,7 +99,7 @@ class VAE(Cell):
""" """
generate_nums = Validator.check_positive_int(generate_nums) generate_nums = Validator.check_positive_int(generate_nums)
if not isinstance(shape, tuple) or len(shape) != 4 or (shape[0] != -1 and shape[0] != generate_nums): if not isinstance(shape, tuple) or len(shape) != 4 or (shape[0] != -1 and shape[0] != generate_nums):
raise ValueError('The shape should be (generate_nums, C, H, W) or (-1, C, H, W).') raise ValueError('The shape must be (generate_nums, C, H, W) or (-1, C, H, W).')
sample_z = self.normal((generate_nums, self.latent_size), self.to_tensor(0.0), self.to_tensor(1.0), seed=0) sample_z = self.normal((generate_nums, self.latent_size), self.to_tensor(0.0), self.to_tensor(1.0), seed=0)
sample = self._decode(sample_z) sample = self._decode(sample_z)
sample = self.reshape(sample, shape) sample = self.reshape(sample, shape)

View File

@ -43,10 +43,10 @@ class SVI:
self.net_with_loss = net_with_loss self.net_with_loss = net_with_loss
self.loss_fn = getattr(net_with_loss, '_loss_fn') self.loss_fn = getattr(net_with_loss, '_loss_fn')
if not isinstance(self.loss_fn, ELBO): if not isinstance(self.loss_fn, ELBO):
raise TypeError('The loss function for variational inference should be ELBO.') raise TypeError('The loss function for variational inference must be ELBO.')
self.optimizer = optimizer self.optimizer = optimizer
if not isinstance(optimizer, Cell): if not isinstance(optimizer, Cell):
raise TypeError('The optimizer should be Cell type.') raise TypeError('The optimizer must be Cell type.')
self._loss = 0.0 self._loss = 0.0
def run(self, train_dataset, epochs=10): def run(self, train_dataset, epochs=10):

View File

@ -73,7 +73,7 @@ class VAEAnomalyDetection:
float, the predicted outlier score of the sample. float, the predicted outlier score of the sample.
""" """
if not isinstance(sample_x, Tensor): if not isinstance(sample_x, Tensor):
raise TypeError("The sample_x should be Tensor type.") raise TypeError("The sample_x must be Tensor type.")
reconstructed_sample = self.vae.reconstruct_sample(sample_x) reconstructed_sample = self.vae.reconstruct_sample(sample_x)
return self._calculate_euclidean_distance(sample_x.asnumpy(), reconstructed_sample.asnumpy()) return self._calculate_euclidean_distance(sample_x.asnumpy(), reconstructed_sample.asnumpy())

View File

@ -99,7 +99,7 @@ class UncertaintyEvaluation:
self.sum = P.ReduceSum() self.sum = P.ReduceSum()
self.pow = P.Pow() self.pow = P.Pow()
if not isinstance(model, Cell): if not isinstance(model, Cell):
raise TypeError('The model should be Cell type.') raise TypeError('The model must be Cell type.')
if task_type not in ('regression', 'classification'): if task_type not in ('regression', 'classification'):
raise ValueError( raise ValueError(
'The task should be regression or classification.') 'The task should be regression or classification.')

View File

@ -67,14 +67,14 @@ class TransformToBNN:
def __init__(self, trainable_dnn, dnn_factor=1, bnn_factor=1): def __init__(self, trainable_dnn, dnn_factor=1, bnn_factor=1):
if isinstance(dnn_factor, bool) or not isinstance(dnn_factor, (int, float)): if isinstance(dnn_factor, bool) or not isinstance(dnn_factor, (int, float)):
raise TypeError('The type of `dnn_factor` should be `int` or `float`') raise TypeError('The type of `dnn_factor` must be `int` or `float`')
if dnn_factor < 0: if dnn_factor < 0:
raise ValueError('The value of `dnn_factor` should >= 0') raise ValueError('The value of `dnn_factor` must >= 0')
if isinstance(bnn_factor, bool) or not isinstance(bnn_factor, (int, float)): if isinstance(bnn_factor, bool) or not isinstance(bnn_factor, (int, float)):
raise TypeError('The type of `bnn_factor` should be `int` or `float`') raise TypeError('The type of `bnn_factor` must be `int` or `float`')
if bnn_factor < 0: if bnn_factor < 0:
raise ValueError('The value of `bnn_factor` should >= 0') raise ValueError('The value of `bnn_factor` must >= 0')
net_with_loss = trainable_dnn.network net_with_loss = trainable_dnn.network
self.optimizer = trainable_dnn.optimizer self.optimizer = trainable_dnn.optimizer

View File

@ -46,7 +46,7 @@ class BayesianNet(nn.Cell):
""" Normal distribution wrapper """ """ Normal distribution wrapper """
if not isinstance(name, str): if not isinstance(name, str):
raise TypeError("The type of `name` should be string") raise TypeError("The type of `name` must be string")
if observation is None: if observation is None:
if reparameterize: if reparameterize:
@ -72,7 +72,7 @@ class BayesianNet(nn.Cell):
""" Bernoulli distribution wrapper """ """ Bernoulli distribution wrapper """
if not isinstance(name, str): if not isinstance(name, str):
raise TypeError("The type of `name` should be string") raise TypeError("The type of `name` must be string")
if observation is None: if observation is None:
sample = self.bernoulli_dist('sample', shape, probs) sample = self.bernoulli_dist('sample', shape, probs)

View File

@ -109,7 +109,7 @@ class _LayerInputCheck:
if len(input_shape) == item: if len(input_shape) == item:
matched = True matched = True
if not matched: if not matched:
raise ValueError(f"{func_name} {param_name} shape length should be one of {target_len} dimension, " raise ValueError(f"{func_name} {param_name} shape length must be one of {target_len} dimension, "
f"but got shape {input_shape}") f"but got shape {input_shape}")
return True return True
@ -136,7 +136,7 @@ class _LayerInputCheck:
break break
if not matched: if not matched:
raise ValueError(f"{func_name} {param_name} shape should be one of {target_shape}," raise ValueError(f"{func_name} {param_name} shape must be one of {target_shape},"
f"but got {input_shape}") f"but got {input_shape}")
return True return True
@ -144,7 +144,7 @@ class _LayerInputCheck:
def check_shape_value_on_axis(input_shape, dim, param_name, cls_name, target_value): def check_shape_value_on_axis(input_shape, dim, param_name, cls_name, target_value):
""" Check whether the input_shape[dim] is equal to target value""" """ Check whether the input_shape[dim] is equal to target value"""
if input_shape[dim] != target_value: if input_shape[dim] != target_value:
raise ValueError(f"{cls_name} {param_name} at {dim} shape should be {target_value}," raise ValueError(f"{cls_name} {param_name} at {dim} shape must be {target_value},"
f"but got {input_shape[dim]}") f"but got {input_shape[dim]}")
return True return True
@ -155,13 +155,13 @@ def _check_past_none_input_none(use_past, param_name, func_name, default_value,
""" If the past is True, check whether the inputs is None""" """ If the past is True, check whether the inputs is None"""
if not use_past: if not use_past:
if is_tensor: if is_tensor:
raise TypeError(f"{func_name} {param_name} should be {default_value}, if use_pat is False, but found " raise TypeError(f"{func_name} {param_name} must be {default_value}, if use_pat is False, but found "
f"a tensor") f"a tensor")
if not is_default: if not is_default:
raise TypeError(f"{func_name} {param_name} should be {default_value}, if use_pat is False.") raise TypeError(f"{func_name} {param_name} must be {default_value}, if use_pat is False.")
else: else:
if not is_tensor: if not is_tensor:
raise TypeError(f"{func_name} {param_name} should be tensor, if use_pat is True") raise TypeError(f"{func_name} {param_name} must be tensor, if use_pat is True")
return True return True

View File

@ -61,13 +61,13 @@ class MoEConfig:
Validator.check_positive_float(aux_loss_factor, "aux_loss_factor") Validator.check_positive_float(aux_loss_factor, "aux_loss_factor")
Validator.check_positive_int(num_experts_chosen, "num_experts_chosen") Validator.check_positive_int(num_experts_chosen, "num_experts_chosen")
if capacity_factor < 1.0: if capacity_factor < 1.0:
raise ValueError(f"'capacity_factor' should be equal to or greater than 1.0, " raise ValueError(f"'capacity_factor' must be equal to or greater than 1.0, "
f"but got {capacity_factor}.") f"but got {capacity_factor}.")
if aux_loss_factor >= 1.0: if aux_loss_factor >= 1.0:
raise ValueError(f"'aux_loss_factor' should be less than 1.0, " raise ValueError(f"'aux_loss_factor' must be less than 1.0, "
f"but got {aux_loss_factor}.") f"but got {aux_loss_factor}.")
if num_experts_chosen > expert_num: if num_experts_chosen > expert_num:
raise ValueError(f"'num_experts_chosen' should not be larger than 'expert_num', " raise ValueError(f"'num_experts_chosen' must not be larger than 'expert_num', "
f"but got {num_experts_chosen}.") f"but got {num_experts_chosen}.")
self.expert_num = expert_num self.expert_num = expert_num
self.capacity_factor = capacity_factor self.capacity_factor = capacity_factor
@ -83,7 +83,7 @@ def _check_moe_config(moe_config=None, parallel_config=None):
check if MoE with right configuration. check if MoE with right configuration.
""" """
if not isinstance(moe_config, MoEConfig): if not isinstance(moe_config, MoEConfig):
raise TypeError(f"'moe_config' should be an instance of MoEConfig, but got {type(moe_config).__name__}.") raise TypeError(f"'moe_config' must be an instance of MoEConfig, but got {type(moe_config).__name__}.")
use_moe = (moe_config.expert_num > 1) use_moe = (moe_config.expert_num > 1)
if use_moe is False: if use_moe is False:
return return
@ -95,10 +95,10 @@ def _check_moe_config(moe_config=None, parallel_config=None):
device_num = D.get_group_size() device_num = D.get_group_size()
if device_num % parallel_config.expert_parallel != 0: if device_num % parallel_config.expert_parallel != 0:
raise ValueError(f"device_num: {device_num} should be a multiple of expert_parallel: " raise ValueError(f"device_num: {device_num} must be a multiple of expert_parallel: "
f"{parallel_config.expert_parallel}.") f"{parallel_config.expert_parallel}.")
if parallel_config.data_parallel % parallel_config.expert_parallel != 0: if parallel_config.data_parallel % parallel_config.expert_parallel != 0:
raise ValueError(f"data parallel: {parallel_config.data_parallel} should be a multiple of " raise ValueError(f"data parallel: {parallel_config.data_parallel} must be a multiple of "
f"expert_parallel: {parallel_config.expert_parallel} when using MoE.") f"expert_parallel: {parallel_config.expert_parallel} when using MoE.")
if parallel_config.data_parallel * parallel_config.model_parallel > device_num: if parallel_config.data_parallel * parallel_config.model_parallel > device_num:
raise ValueError(f"The product of the data parallel: {parallel_config.data_parallel} and " raise ValueError(f"The product of the data parallel: {parallel_config.data_parallel} and "

View File

@ -241,7 +241,7 @@ class TransformerOpParallelConfig(_Config):
@recompute.setter @recompute.setter
def recompute(self, value): def recompute(self, value):
if not isinstance(value, TransformerRecomputeConfig) and not isinstance(value, bool): if not isinstance(value, TransformerRecomputeConfig) and not isinstance(value, bool):
raise TypeError(f"recompute should be a TransformerRecomputeConfig/bool, but got {type(value).__name__}.") raise TypeError(f"recompute must be a TransformerRecomputeConfig/bool, but got {type(value).__name__}.")
if isinstance(value, bool): if isinstance(value, bool):
logger.warning(f"TransformerRecomputeConfig is recommended as the recompute configuration type.") logger.warning(f"TransformerRecomputeConfig is recommended as the recompute configuration type.")
self._recompute = value self._recompute = value
@ -858,7 +858,7 @@ class MultiHeadAttention(Cell):
raise ValueError("For 'MultiHeadAttention', the class variable 'attention_dropout_rate' must be " raise ValueError("For 'MultiHeadAttention', the class variable 'attention_dropout_rate' must be "
"in range [0, 1.0), but got the value : {}.".format(attention_dropout_rate)) "in range [0, 1.0), but got the value : {}.".format(attention_dropout_rate))
if hidden_size % num_heads != 0: if hidden_size % num_heads != 0:
raise ValueError("For 'MultiHeadAttention', the class variable 'hidden_size' should be a multiple " raise ValueError("For 'MultiHeadAttention', the class variable 'hidden_size' must be a multiple "
"of 'num_heads', but got the hidden_size is {} and the num_heads is {}." "of 'num_heads', but got the hidden_size is {} and the num_heads is {}."
.format(hidden_size, num_heads)) .format(hidden_size, num_heads))
if num_heads % parallel_config.model_parallel != 0: if num_heads % parallel_config.model_parallel != 0:
@ -952,7 +952,7 @@ class MultiHeadAttention(Cell):
raise ValueError("For 'MultiHeadAttention', the class variable 'attention_dropout_rate' must be " raise ValueError("For 'MultiHeadAttention', the class variable 'attention_dropout_rate' must be "
"in range [0, 1.0), but got the value : {}.".format(attention_dropout_rate)) "in range [0, 1.0), but got the value : {}.".format(attention_dropout_rate))
if hidden_size % num_heads != 0: if hidden_size % num_heads != 0:
raise ValueError("For 'MultiHeadAttention', the class variable 'hidden_size' should be a multiple " raise ValueError("For 'MultiHeadAttention', the class variable 'hidden_size' must be a multiple "
"of 'num_heads', but got the hidden_size is {} and the num_heads is {}." "of 'num_heads', but got the hidden_size is {} and the num_heads is {}."
.format(hidden_size, num_heads)) .format(hidden_size, num_heads))
if num_heads % parallel_config.model_parallel != 0: if num_heads % parallel_config.model_parallel != 0:

View File

@ -256,17 +256,17 @@ class ForwardValueAndGrad(Cell):
super(ForwardValueAndGrad, self).__init__(auto_prefix=False) super(ForwardValueAndGrad, self).__init__(auto_prefix=False)
if not isinstance(network, (Cell, FunctionType, MethodType)): if not isinstance(network, (Cell, FunctionType, MethodType)):
raise TypeError(f"For 'ForwardValueAndGrad', " raise TypeError(f"For 'ForwardValueAndGrad', "
f"the argument 'network' should be cell, function type or method type, " f"the argument 'network' must be cell, function type or method type, "
f"but got '{type(network)}'") f"but got '{type(network)}'")
if not isinstance(get_all, bool): if not isinstance(get_all, bool):
raise TypeError(f"For 'ForwardValueAndGrad', " raise TypeError(f"For 'ForwardValueAndGrad', "
f"the type of 'get_all' should be bool, but got '{type(get_all)}'") f"the type of 'get_all' must be bool, but got '{type(get_all)}'")
if not isinstance(get_by_list, bool): if not isinstance(get_by_list, bool):
raise TypeError(f"For 'ForwardValueAndGrad', " raise TypeError(f"For 'ForwardValueAndGrad', "
f"the type of 'get_by_list' should be bool, but got '{type(get_by_list)}'") f"the type of 'get_by_list' must be bool, but got '{type(get_by_list)}'")
if get_by_list and not isinstance(weights, ParameterTuple): if get_by_list and not isinstance(weights, ParameterTuple):
raise TypeError(f"For 'ForwardValueAndGrad', " raise TypeError(f"For 'ForwardValueAndGrad', "
f"when 'get_by_list' is set to True, the argument 'weights' should be " f"when 'get_by_list' is set to True, the argument 'weights' must be "
f"ParameterTuple type, but got '{type(weights)}'") f"ParameterTuple type, but got '{type(weights)}'")
self.network = network self.network = network
if isinstance(network, Cell): if isinstance(network, Cell):
@ -456,7 +456,7 @@ class _VirtualDatasetCell(Cell):
def _check_shape_value_on_axis_divided_by_target_value(input_shape, dim, param_name, cls_name, target_value): def _check_shape_value_on_axis_divided_by_target_value(input_shape, dim, param_name, cls_name, target_value):
if input_shape[dim] % target_value != 0: if input_shape[dim] % target_value != 0:
raise ValueError(f"For MicroBatchInterleaved initialization, " raise ValueError(f"For MicroBatchInterleaved initialization, "
f"{cls_name} {param_name} at {dim} shape should be divided by {target_value}," f"{cls_name} {param_name} at {dim} shape must be divided by {target_value},"
f"but got {input_shape[dim]}") f"but got {input_shape[dim]}")
return True return True
@ -512,10 +512,10 @@ class MicroBatchInterleaved(Cell):
def __init__(self, network, interleave_num=2): def __init__(self, network, interleave_num=2):
super(MicroBatchInterleaved, self).__init__(auto_prefix=False) super(MicroBatchInterleaved, self).__init__(auto_prefix=False)
if not isinstance(interleave_num, int): if not isinstance(interleave_num, int):
raise TypeError("For 'MicroBatchInterleaved', the argument 'interleave_num' should be integer, " raise TypeError("For 'MicroBatchInterleaved', the argument 'interleave_num' must be integer, "
"but got the type : {}.".format(type(interleave_num))) "but got the type : {}.".format(type(interleave_num)))
if interleave_num <= 0: if interleave_num <= 0:
raise ValueError("For 'MicroBatchInterleaved', the argument 'interleave_num' should be greater than 0, " raise ValueError("For 'MicroBatchInterleaved', the argument 'interleave_num' must be greater than 0, "
"but got {}.".format(interleave_num)) "but got {}.".format(interleave_num))
self.network = network self.network = network
self.interleave_num = interleave_num self.interleave_num = interleave_num

View File

@ -497,7 +497,7 @@ class _TrainPipelineWithLossScaleCell(TrainOneStepCell):
self.clear_before_grad = P.NPUClearFloatStatus() self.clear_before_grad = P.NPUClearFloatStatus()
self.reduce_sum = P.ReduceSum(keep_dims=False) self.reduce_sum = P.ReduceSum(keep_dims=False)
if self.parallel_mode not in [ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL]: if self.parallel_mode not in [ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL]:
raise ValueError(f"ParallelMode should be one of " raise ValueError(f"ParallelMode must be one of "
f"[ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL], but found " f"[ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL], but found "
f"{self.parallel_mode}.") f"{self.parallel_mode}.")
self.allreduce = P.AllReduce() self.allreduce = P.AllReduce()

View File

@ -55,14 +55,14 @@ def _check_mn_shape(m_shape, n_shape, km_shape, kn_shape):
if m_shape != 1: if m_shape != 1:
if n_shape == 1 and km_shape % (cce.BLOCK_IN * cce.BLOCK_IN) != 0: if n_shape == 1 and km_shape % (cce.BLOCK_IN * cce.BLOCK_IN) != 0:
raise RuntimeError("input shape K1 should be multiple of %d" raise RuntimeError("input shape K1 must be multiple of %d"
% (cce.BLOCK_IN * cce.BLOCK_IN)) % (cce.BLOCK_IN * cce.BLOCK_IN))
if km_shape % cce.BLOCK_REDUCE != 0: if km_shape % cce.BLOCK_REDUCE != 0:
raise RuntimeError( raise RuntimeError(
"input shape K1 should be multiple of %d" % cce.BLOCK_IN) "input shape K1 should be multiple of %d" % cce.BLOCK_IN)
else: else:
if km_shape % (cce.BLOCK_IN * cce.BLOCK_IN) != 0: if km_shape % (cce.BLOCK_IN * cce.BLOCK_IN) != 0:
raise RuntimeError("input shape K1 should be multiple of %d" raise RuntimeError("input shape K1 must be multiple of %d"
% (cce.BLOCK_IN * cce.BLOCK_IN)) % (cce.BLOCK_IN * cce.BLOCK_IN))
@ -117,12 +117,12 @@ def _shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b):
if shape_len != 2: if shape_len != 2:
raise RuntimeError( raise RuntimeError(
"length of shape must be 2, more than 2 dimensions should use batch_matmul now!") "length of shape must be 2, more than 2 dimensions must use batch_matmul now!")
m_shape, km_shape, n_shape, kn_shape = _get_km_kn_shape(shape_a, shape_b, trans_a, trans_b) m_shape, km_shape, n_shape, kn_shape = _get_km_kn_shape(shape_a, shape_b, trans_a, trans_b)
if n_shape % cce.BLOCK_IN != 0 and n_shape != 1: if n_shape % cce.BLOCK_IN != 0 and n_shape != 1:
raise RuntimeError("input shape N should be 1 or multiple of %d" % cce.BLOCK_IN) raise RuntimeError("input shape N must be 1 or multiple of %d" % cce.BLOCK_IN)
_check_mn_shape(m_shape, n_shape, km_shape, kn_shape) _check_mn_shape(m_shape, n_shape, km_shape, kn_shape)
_check_bias(shape_bias, shape_a, shape_b, m_shape, n_shape) _check_bias(shape_bias, shape_a, shape_b, m_shape, n_shape)

View File

@ -52,7 +52,7 @@ def _get_bias_broadcast_shape(x_shape, bias_shape, bias_dim, data_format):
# In the 'NHWC' data format ('BN**C' actually), the last dimension is channel axis. # In the 'NHWC' data format ('BN**C' actually), the last dimension is channel axis.
x_channel = x_shape[-1] x_channel = x_shape[-1]
if x_channel != bias_channel: if x_channel != bias_channel:
raise ValueError("For 'BiadAdd, bias_channel should be equal to x_channel, " raise ValueError("For 'BiadAdd, bias_channel must be equal to x_channel, "
"but got date format: {}, got bias_channel: {}, " "but got date format: {}, got bias_channel: {}, "
"x_channel: {}.".format(data_format, bias_channel, x_channel)) "x_channel: {}.".format(data_format, bias_channel, x_channel))
if bias_dim is None: if bias_dim is None:
@ -63,7 +63,7 @@ def _get_bias_broadcast_shape(x_shape, bias_shape, bias_dim, data_format):
# In the 'NCHW' or 'NCDHW' data format ('BNC**' actually), the third dimension is channel axis. # In the 'NCHW' or 'NCDHW' data format ('BNC**' actually), the third dimension is channel axis.
x_channel = x_shape[2] x_channel = x_shape[2]
if x_channel != bias_channel: if x_channel != bias_channel:
raise ValueError("For 'BiadAdd, bias_channel should be equal to x_channel, but got date format: " raise ValueError("For 'BiadAdd, bias_channel must be equal to x_channel, but got date format: "
"{}, got bias_channel: {}, x_channel: {}.".format(data_format, bias_channel, x_channel)) "{}, got bias_channel: {}, x_channel: {}.".format(data_format, bias_channel, x_channel))
bias_broadcast_shape = (bias_batch, 1, bias_channel) bias_broadcast_shape = (bias_batch, 1, bias_channel)
if x_rank == x_min_rank: if x_rank == x_min_rank:

View File

@ -122,11 +122,11 @@ tensor_operator_registry.register('repeat_elements', repeat_elements)
def _check_sequence_mask_input_len(input_shape, prim_name=None): def _check_sequence_mask_input_len(input_shape, prim_name=None):
msg_prefix = f"For '{prim_name}', the" if prim_name else "The" msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
if not input_shape: if not input_shape:
raise ValueError(f"{msg_prefix} input_shape should be greater than 0, but got {input_shape}.") raise ValueError(f"{msg_prefix} input_shape must be greater than 0, but got {input_shape}.")
# broadcast only supports 7d shape # broadcast only supports 7d shape
shape_size = len(input_shape) shape_size = len(input_shape)
if shape_size >= 7: if shape_size >= 7:
raise ValueError(f"{msg_prefix} dimension of input_shape should be less than 7, but got {shape_size}d.") raise ValueError(f"{msg_prefix} dimension of input_shape must be less than 7, but got {shape_size}d.")
def sequence_mask(lengths, maxlen=None): def sequence_mask(lengths, maxlen=None):

View File

@ -30,7 +30,7 @@ from mindspore.ops.primitive import constexpr
def _check_output_shape(input_shape, out_shape, prim_name=None): def _check_output_shape(input_shape, out_shape, prim_name=None):
msg_prefix = f"For '{prim_name}', the" if prim_name else "The" msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
if input_shape != out_shape: if input_shape != out_shape:
raise ValueError(f"{msg_prefix} input 'x' shape should be equal to the output shape, but got " raise ValueError(f"{msg_prefix} input 'x' shape must be equal to the output shape, but got "
f"input 'x' shape {input_shape}, output shape {out_shape}.") f"input 'x' shape {input_shape}, output shape {out_shape}.")

View File

@ -133,7 +133,7 @@ def _check_axes(axes, prim_name=None):
if not isinstance(axes, int): if not isinstance(axes, int):
axes = list(axes) # to avoid immutability issues axes = list(axes) # to avoid immutability issues
if len(axes) != 2: if len(axes) != 2:
raise ValueError(f"{msg_prefix} dimension of 'axes' should be 2, but got 'axes': {axes}.") raise ValueError(f"{msg_prefix} dimension of 'axes' must be 2, but got 'axes': {axes}.")
axes = _int_to_tuple_conv(axes) # convert before length checks axes = _int_to_tuple_conv(axes) # convert before length checks
if len(axes[0]) != len(axes[1]): if len(axes[0]) != len(axes[1]):
raise ValueError(f"{msg_prefix} first and second dim of 'axes' have to be the same size/length, " raise ValueError(f"{msg_prefix} first and second dim of 'axes' have to be the same size/length, "
@ -193,7 +193,7 @@ def _validate_axes(x1_shape, x2_shape, axes, prim_name=None):
axes_len = len(x_axes) axes_len = len(x_axes)
shape_dim_len = len(shapes[ix_input]) shape_dim_len = len(shapes[ix_input])
if axes_len > shape_dim_len: if axes_len > shape_dim_len:
raise ValueError(f"{msg_prefix} length of element {x_axes} in 'axes' should be less than or equal to " raise ValueError(f"{msg_prefix} length of element {x_axes} in 'axes' must be less than or equal to "
f"{shape_dim_len}, but got {axes_len}.") f"{shape_dim_len}, but got {axes_len}.")
# axis values range check # axis values range check
@ -203,7 +203,7 @@ def _validate_axes(x1_shape, x2_shape, axes, prim_name=None):
min_val = -1 * len(comp_shape) min_val = -1 * len(comp_shape)
for _, x_value in enumerate(x_axes): for _, x_value in enumerate(x_axes):
if not min_val <= x_value <= max_val: if not min_val <= x_value <= max_val:
raise ValueError(f"{msg_prefix} value in 'axes' should be in range: [{min_val}, {max_val}], " raise ValueError(f"{msg_prefix} value in 'axes' must be in range: [{min_val}, {max_val}], "
f"but got {x_value}.") f"but got {x_value}.")
# check axis value with input shape - both ways for axis valid # check axis value with input shape - both ways for axis valid
@ -705,7 +705,7 @@ def _check_matmul_shapes(shape1, shape2, prim_name=None):
raise ValueError(f"{msg_prefix} dimension of input operands must be at least 1, but got " raise ValueError(f"{msg_prefix} dimension of input operands must be at least 1, but got "
f"the length of shape1: {ndim1}, the length of shape2: {ndim2}.") f"the length of shape1: {ndim1}, the length of shape2: {ndim2}.")
if ndim2 >= 2 and shape1[-1] != shape2[-2]: if ndim2 >= 2 and shape1[-1] != shape2[-2]:
raise ValueError(f"{msg_prefix} shape1[-1] should be equal to shape2[-2] when the length of shape2 " raise ValueError(f"{msg_prefix} shape1[-1] must be equal to shape2[-2] when the length of shape2 "
f"is greater than or equal to 2, but got shape1[-1]: {shape1[-1]}, " f"is greater than or equal to 2, but got shape1[-1]: {shape1[-1]}, "
f"shape2[-2]: {shape2[-2]}.") f"shape2[-2]: {shape2[-2]}.")
shape_out = deque() shape_out = deque()
@ -845,9 +845,9 @@ def _create_cummin_perm(axis, x_shape):
"""Insure axis is in [-len(x_shape),len(s_shape)-1]""" """Insure axis is in [-len(x_shape),len(s_shape)-1]"""
len_axis = len(x_shape) len_axis = len(x_shape)
if not isinstance(axis, int): if not isinstance(axis, int):
raise TypeError(f"The date type of 'axis' should be Int, but got {axis}.") raise TypeError(f"The date type of 'axis' must be Int, but got {axis}.")
if axis < -len_axis or axis > len_axis: if axis < -len_axis or axis > len_axis:
raise ValueError(f"The value of axis should be in [{-len_axis}, {len_axis}], but got {axis}.") raise ValueError(f"The value of axis must be in [{-len_axis}, {len_axis}], but got {axis}.")
prem = [i for i in range(len_axis)] prem = [i for i in range(len_axis)]
if axis < 0: if axis < 0:
axis = axis + len_axis axis = axis + len_axis

View File

@ -1108,7 +1108,7 @@ def reduce_(a, reduce_fn, cmp_fn=None, axis=None, keepdims=False, initial=None,
if initial is not None: if initial is not None:
if ((isinstance(initial, Tensor) and F.rank(initial) > 0) or if ((isinstance(initial, Tensor) and F.rank(initial) > 0) or
not isinstance(initial, (int, float, bool, Tensor))): not isinstance(initial, (int, float, bool, Tensor))):
const_utils.raise_type_error('initial should be scalar') const_utils.raise_type_error('initial must be scalar')
if F.shape_mul(shape) == 0: if F.shape_mul(shape) == 0:
const_utils.raise_value_error('zero-size tensors are not supported.') const_utils.raise_value_error('zero-size tensors are not supported.')

View File

@ -533,10 +533,10 @@ def _judge_order_continuous(order_sequence):
def scalar_in_sequence(x, y): def scalar_in_sequence(x, y):
"""Determine whether the scalar in the sequence.""" """Determine whether the scalar in the sequence."""
if x is None: if x is None:
raise ValueError("Judge scalar in tuple or list require scalar and sequence should be constant, " raise ValueError("Judge scalar in tuple or list require scalar and sequence must be constant, "
"but the scalar is not.") "but the scalar is not.")
if y is None: if y is None:
raise ValueError("Judge scalar in tuple or list require scalar and sequence should be constant, " raise ValueError("Judge scalar in tuple or list require scalar and sequence must be constant, "
"but the sequence is not.") "but the sequence is not.")
return x in y return x in y

View File

@ -152,7 +152,7 @@ def _convert_grad_position_type(grad_position):
if isinstance(grad_position, tuple): if isinstance(grad_position, tuple):
for gp in grad_position: for gp in grad_position:
if not isinstance(gp, int): if not isinstance(gp, int):
raise TypeError(f"For 'F.grad', the element in 'grad_position' should be int, " raise TypeError(f"For 'F.grad', the element in 'grad_position' must be int, "
f"but got {type(gp).__name__}") f"but got {type(gp).__name__}")
if gp < 0: if gp < 0:
raise ValueError("The element in grad_position must be >= 0.") raise ValueError("The element in grad_position must be >= 0.")
@ -161,7 +161,7 @@ def _convert_grad_position_type(grad_position):
raise ValueError("grad_position must be >= 0.") raise ValueError("grad_position must be >= 0.")
grad_position = (grad_position,) grad_position = (grad_position,)
else: else:
raise TypeError(f"For 'F.grad', the 'grad_position' should be int or tuple, " raise TypeError(f"For 'F.grad', the 'grad_position' must be int or tuple, "
f"but got {type(grad_position).__name__}") f"but got {type(grad_position).__name__}")
return grad_position return grad_position
@ -220,7 +220,7 @@ def _trans_jet_inputs(primals_item, series_item):
"""Trans inputs of jet""" """Trans inputs of jet"""
value_type = [mstype.int32, mstype.int64, mstype.float32, mstype.float64] value_type = [mstype.int32, mstype.int64, mstype.float32, mstype.float64]
if not dtype(primals_item) in value_type or dtype(primals_item) != dtype(series_item): if not dtype(primals_item) in value_type or dtype(primals_item) != dtype(series_item):
raise TypeError(f"For `F.jet`, the elements' types of primals and series should be the same and belong to " raise TypeError(f"For `F.jet`, the elements' types of primals and series must be the same and belong to "
f"`mstype.int32, mstype.int64, mstype.float32, mstype.float64`, but got" f"`mstype.int32, mstype.int64, mstype.float32, mstype.float64`, but got"
f" {dtype(primals_item).__name__} and {dtype(series_item).__name__}.") f" {dtype(primals_item).__name__} and {dtype(series_item).__name__}.")
if dtype(primals_item) in [mstype.int32, mstype.int64]: if dtype(primals_item) in [mstype.int32, mstype.int64]:
@ -232,15 +232,15 @@ def _trans_jet_inputs(primals_item, series_item):
def _check_jet_inputs(primals, series): def _check_jet_inputs(primals, series):
"""Check inputs of jet""" """Check inputs of jet"""
if not isinstance(primals, type(series)) or not isinstance(primals, (Tensor, tuple)): if not isinstance(primals, type(series)) or not isinstance(primals, (Tensor, tuple)):
raise TypeError(f"For 'F.jet', the 'primals' and `series` should be both Tensor or tuple, " raise TypeError(f"For 'F.jet', the 'primals' and `series` must be both Tensor or tuple, "
f"but got {type(primals).__name__} and {type(series).__name__}.") f"but got {type(primals).__name__} and {type(series).__name__}.")
if isinstance(primals, Tensor): if isinstance(primals, Tensor):
if primals.shape != series.shape[1:]: if primals.shape != series.shape[1:]:
raise ValueError("The shape of each element should be the same as the primals.") raise ValueError("The shape of each element must be the same as the primals.")
return _trans_jet_inputs(primals, series) return _trans_jet_inputs(primals, series)
if isinstance(primals, tuple): if isinstance(primals, tuple):
if len(primals) != len(series): if len(primals) != len(series):
raise ValueError("The lengths of primals and series should be the same.") raise ValueError("The lengths of primals and series must be the same.")
check_primals = [] check_primals = []
check_series = [] check_series = []
for i, j in zip(primals, series): for i, j in zip(primals, series):
@ -323,7 +323,7 @@ def _trans_derivative_inputs(primals_item):
"""Trans inputs of derivative""" """Trans inputs of derivative"""
value_type = [mstype.int32, mstype.int64, mstype.float32, mstype.float64] value_type = [mstype.int32, mstype.int64, mstype.float32, mstype.float64]
if not dtype(primals_item) in value_type: if not dtype(primals_item) in value_type:
raise TypeError(f"For `F.derivative`, the elements of primals should belong to " raise TypeError(f"For `F.derivative`, the elements of primals must belong to "
f"`mstype.int32, mstype.int64, mstype.float32, mstype.float64`, but got" f"`mstype.int32, mstype.int64, mstype.float32, mstype.float64`, but got"
f" {dtype(primals_item).__name__}.") f" {dtype(primals_item).__name__}.")
if dtype(primals_item) in [mstype.int32, mstype.int64]: if dtype(primals_item) in [mstype.int32, mstype.int64]:
@ -661,7 +661,7 @@ def narrow(inputs, axis, start, length):
@constexpr @constexpr
def _raise_type_error(): def _raise_type_error():
raise TypeError("The inputs type should be a Tensor, tuple or list of Tensors.") raise TypeError("The inputs type must be a Tensor, tuple or list of Tensors.")
@constexpr @constexpr

View File

@ -136,7 +136,7 @@ class SyncBatchNormGrad(PrimitiveWithInfer):
def __init__(self, epsilon=1e-5, group="group0", device_num=2): def __init__(self, epsilon=1e-5, group="group0", device_num=2):
validator.check_float_range(epsilon, 0, 1, Rel.INC_RIGHT, 'epsilon', self.name) validator.check_float_range(epsilon, 0, 1, Rel.INC_RIGHT, 'epsilon', self.name)
if not isinstance(group, str): if not isinstance(group, str):
raise TypeError("The group attr of SyncBatchNormGrad should be str.") raise TypeError("The group attr of SyncBatchNormGrad must be str.")
validator.check_int(device_num, 2, Rel.GE, "device_num", self.name) validator.check_int(device_num, 2, Rel.GE, "device_num", self.name)
def infer_shape(self, y_backprop_shape, x_shape, scale_shape, save_mean_shape, save_variance_shape): def infer_shape(self, y_backprop_shape, x_shape, scale_shape, save_mean_shape, save_variance_shape):
@ -313,7 +313,7 @@ class Conv3DBackpropFilter(PrimitiveWithInfer):
validator.check_value_type('pad_mode', pad_mode, [str], self.name) validator.check_value_type('pad_mode', pad_mode, [str], self.name)
self.pad_mode = validator.check_string(pad_mode.lower(), ['valid', 'same', 'pad'], 'pad_mode', self.name) self.pad_mode = validator.check_string(pad_mode.lower(), ['valid', 'same', 'pad'], 'pad_mode', self.name)
if self.pad_mode != 'pad' and self.pad_list != (0, 0, 0, 0, 0, 0): if self.pad_mode != 'pad' and self.pad_list != (0, 0, 0, 0, 0, 0):
raise ValueError(f"For '{self.name}', when pad is not 0, pad_mode should be set as 'pad'.") raise ValueError(f"For '{self.name}', when pad is not 0, pad_mode must be set as 'pad'.")
if self.pad_mode == 'pad': if self.pad_mode == 'pad':
for item in pad: for item in pad:
validator.check_non_negative_int(item, 'pad item', self.name) validator.check_non_negative_int(item, 'pad item', self.name)
@ -755,7 +755,7 @@ class _PoolGrad(PrimitiveWithInfer):
def _grad_check_int_or_tuple(arg_name, arg_val, is_argmax): def _grad_check_int_or_tuple(arg_name, arg_val, is_argmax):
validator.check_value_type(arg_name, arg_val, (int, tuple), self.name) validator.check_value_type(arg_name, arg_val, (int, tuple), self.name)
error_msg = ValueError(f"For '{self.name}' the '{arg_name}' should be an positive int number " error_msg = ValueError(f"For '{self.name}' the '{arg_name}' must be an positive int number "
f"or a tuple of two or four positive int numbers, but got {arg_val}") f"or a tuple of two or four positive int numbers, but got {arg_val}")
if isinstance(arg_val, int): if isinstance(arg_val, int):
ret = (1, arg_val, arg_val, 1) if is_argmax else (1, 1, arg_val, arg_val) ret = (1, arg_val, arg_val, 1) if is_argmax else (1, 1, arg_val, arg_val)
@ -963,10 +963,10 @@ class MaxPool3DGrad(PrimitiveWithInfer):
if len(self.pad_list) == 3: if len(self.pad_list) == 3:
self.pad_list = (pad_list[0], pad_list[0], pad_list[1], pad_list[1], pad_list[2], pad_list[3]) self.pad_list = (pad_list[0], pad_list[0], pad_list[1], pad_list[1], pad_list[2], pad_list[3])
if len(self.pad_list) != 3 and len(self.pad_list) != 6: if len(self.pad_list) != 3 and len(self.pad_list) != 6:
raise ValueError(f"For `maxpool3d` attr 'pad_list' should be an positive int number or a tuple of " raise ValueError(f"For `maxpool3d` attr 'pad_list' must be an positive int number or a tuple of "
f"three or six positive int numbers, but got `{len(self.pad_list)}` numbers.") f"three or six positive int numbers, but got `{len(self.pad_list)}` numbers.")
if self.pad_mode != 'CALCULATED' and self.pad_list != (0, 0, 0, 0, 0, 0): if self.pad_mode != 'CALCULATED' and self.pad_list != (0, 0, 0, 0, 0, 0):
raise ValueError(f"For '{self.name}', when pad_list is not 0, pad_mode should be set as 'pad'.") raise ValueError(f"For '{self.name}', when pad_list is not 0, pad_mode must be set as 'pad'.")
if self.pad_mode == 'CALCULATED': if self.pad_mode == 'CALCULATED':
for item in self.pad_list: for item in self.pad_list:
validator.check_non_negative_int(item, 'pad_list item', self.name) validator.check_non_negative_int(item, 'pad_list item', self.name)
@ -1036,7 +1036,7 @@ class MaxPoolGradWithArgmax(_PoolGrad):
def infer_shape(self, x_shape, grad_shape, argmax_shape): def infer_shape(self, x_shape, grad_shape, argmax_shape):
if not grad_shape: if not grad_shape:
raise TypeError("The dout of MaxPoolGradWithArgmax should be a Tensor.") raise TypeError("The dout of MaxPoolGradWithArgmax must be a Tensor.")
return x_shape return x_shape
def infer_dtype(self, x_dtype, grad_dtype, argmax_dtype): def infer_dtype(self, x_dtype, grad_dtype, argmax_dtype):
@ -1082,7 +1082,7 @@ class MaxPoolGradGradWithArgmax(_PoolGrad):
def infer_shape(self, x_shape, grad_shape, argmax_shape): def infer_shape(self, x_shape, grad_shape, argmax_shape):
if not grad_shape: if not grad_shape:
raise TypeError("The dout of MaxPoolGradGradWithArgmax should be a Tensor.") raise TypeError("The dout of MaxPoolGradGradWithArgmax must be a Tensor.")
return x_shape return x_shape
def infer_dtype(self, x_dtype, grad_dtype, argmax_dtype): def infer_dtype(self, x_dtype, grad_dtype, argmax_dtype):
@ -1703,7 +1703,7 @@ class ResizeBilinearGrad(PrimitiveWithInfer):
self.half_pixel_centers = validator.check_value_type("half_pixel_centers", self.half_pixel_centers = validator.check_value_type("half_pixel_centers",
half_pixel_centers, [bool], self.name) half_pixel_centers, [bool], self.name)
if half_pixel_centers and align_corners: if half_pixel_centers and align_corners:
raise ValueError(f"If half_pixel_centers is True, align_corners should be False, but got {align_corners}") raise ValueError(f"If half_pixel_centers is True, align_corners must be False, but got {align_corners}")
target = context.get_context("device_target") target = context.get_context("device_target")
if half_pixel_centers and target.lower() != "ascend": if half_pixel_centers and target.lower() != "ascend":
raise ValueError(f"Currently `half_pixel_centers`=True only support in Ascend device_target, " raise ValueError(f"Currently `half_pixel_centers`=True only support in Ascend device_target, "
@ -2334,7 +2334,7 @@ class ParallelResizeBilinearGrad(PrimitiveWithInfer):
validator.check_tensor_dtype_valid("grad_dtype", grad_dtype, [mstype.float16, mstype.float32], self.name) validator.check_tensor_dtype_valid("grad_dtype", grad_dtype, [mstype.float16, mstype.float32], self.name)
validator.check_tensor_dtype_valid("x_dtype", x_dtype, [mstype.float16, mstype.float32], self.name) validator.check_tensor_dtype_valid("x_dtype", x_dtype, [mstype.float16, mstype.float32], self.name)
if size_val is None: if size_val is None:
raise ValueError("size should be const input") raise ValueError("size must be const input")
output_shape = [grad_shape[0], grad_shape[1], x_shape[2], x_shape[3]] output_shape = [grad_shape[0], grad_shape[1], x_shape[2], x_shape[3]]
return {'shape': output_shape, return {'shape': output_shape,

View File

@ -108,10 +108,10 @@ class ExtractImagePatches(PrimitiveWithInfer):
def _check_tuple_or_list(arg_name, arg_val, prim_name): def _check_tuple_or_list(arg_name, arg_val, prim_name):
validator.check_value_type(f"{arg_name}s", arg_val, [tuple, list], self.name) validator.check_value_type(f"{arg_name}s", arg_val, [tuple, list], self.name)
if len(arg_val) != 4 or arg_val[0] != 1 or arg_val[1] != 1: if len(arg_val) != 4 or arg_val[0] != 1 or arg_val[1] != 1:
raise ValueError(f"For \'{prim_name}\' the format of {arg_name}s should be [1, {arg_name}_row, " raise ValueError(f"For \'{prim_name}\' the format of {arg_name}s must be [1, {arg_name}_row, "
f"{arg_name}_col, 1], but got {arg_val}.") f"{arg_name}_col, 1], but got {arg_val}.")
if not isinstance(arg_val[2], int) or not isinstance(arg_val[3], int) or arg_val[2] < 1 or arg_val[3] < 1: if not isinstance(arg_val[2], int) or not isinstance(arg_val[3], int) or arg_val[2] < 1 or arg_val[3] < 1:
raise ValueError(f"For '{prim_name}' the {arg_name}_row and {arg_name}_col in {arg_name}s should be " raise ValueError(f"For '{prim_name}' the {arg_name}_row and {arg_name}_col in {arg_name}s must be "
f"an positive integer number, but got {arg_name}_row is {arg_val[2]}, " f"an positive integer number, but got {arg_name}_row is {arg_val[2]}, "
f"{arg_name}_col is {arg_val[3]}") f"{arg_name}_col is {arg_val[3]}")
@ -126,7 +126,7 @@ class ExtractImagePatches(PrimitiveWithInfer):
def infer_shape(self, input_x): def infer_shape(self, input_x):
"""infer shape""" """infer shape"""
if len(input_x) != 4: if len(input_x) != 4:
raise ValueError("The `input_x` should be a 4-D tensor, " raise ValueError("The `input_x` must be a 4-D tensor, "
f"but got a {len(input_x)}-D tensor whose shape is {input_x}") f"but got a {len(input_x)}-D tensor whose shape is {input_x}")
in_batch, in_depth, in_row, in_col = input_x in_batch, in_depth, in_row, in_col = input_x
@ -205,10 +205,10 @@ class Range(PrimitiveWithInfer):
if self.delta == 0.0: if self.delta == 0.0:
raise ValueError("The input of `delta` can not be equal to zero.") raise ValueError("The input of `delta` can not be equal to zero.")
if self.delta > 0.0 and self.start > self.limit: if self.delta > 0.0 and self.start > self.limit:
raise ValueError(f"Limit should be greater than start when delta:{self.delta} is more than zero, " raise ValueError(f"Limit must be greater than start when delta:{self.delta} is more than zero, "
f"but got start:{self.start}, limit:{self.limit}") f"but got start:{self.start}, limit:{self.limit}")
if self.delta < 0.0 and self.start < self.limit: if self.delta < 0.0 and self.start < self.limit:
raise ValueError(f"Start should be greater than limit when delta:{self.delta} is less than zero, " raise ValueError(f"Start must be greater than limit when delta:{self.delta} is less than zero, "
f"but got start:{self.start}, limit:{self.limit}") f"but got start:{self.start}, limit:{self.limit}")
def infer_shape(self, x_shape): def infer_shape(self, x_shape):
@ -1775,7 +1775,7 @@ class ParallelResizeBilinear(PrimitiveWithInfer):
x_dtype = x['dtype'] x_dtype = x['dtype']
validator.check_tensor_dtype_valid("x_dtype", x_dtype, [mstype.float16, mstype.float32], self.name) validator.check_tensor_dtype_valid("x_dtype", x_dtype, [mstype.float16, mstype.float32], self.name)
if size_val is None: if size_val is None:
raise ValueError("size should be const input") raise ValueError("size must be const input")
output_shape = [x_shape[0], x_shape[1], self.split_size[0], self.split_size[1]] output_shape = [x_shape[0], x_shape[1], self.split_size[0], self.split_size[1]]
return {'shape': output_shape, return {'shape': output_shape,
@ -1884,7 +1884,7 @@ class CellBackwardHook(PrimitiveWithInfer):
TypeError: If the `hook_fn` is not a function of python. TypeError: If the `hook_fn` is not a function of python.
""" """
if not isinstance(hook_fn, (FunctionType, MethodType)): if not isinstance(hook_fn, (FunctionType, MethodType)):
raise TypeError(f"When using 'register_backward_hook(hook_fn)', the type of 'hook_fn' should be python " raise TypeError(f"When using 'register_backward_hook(hook_fn)', the type of 'hook_fn' must be python "
f"function, but got {type(hook_fn)}.") f"function, but got {type(hook_fn)}.")
key = self.add_backward_hook_fn(hook_fn) key = self.add_backward_hook_fn(hook_fn)
return key return key

View File

@ -544,12 +544,12 @@ def ms_hybrid(fn=None, reg_info=None, compile_attrs=None):
compile_attrs = {} compile_attrs = {}
if not isinstance(compile_attrs, dict): if not isinstance(compile_attrs, dict):
raise TypeError("The input 'compile_attrs' of @ms_hybrid should be a dict, " raise TypeError("The input 'compile_attrs' of @ms_hybrid must be a dict, "
"but get a {}".format(type(compile_attrs))) "but get a {}".format(type(compile_attrs)))
for key in compile_attrs.keys(): for key in compile_attrs.keys():
if not isinstance(key, str): if not isinstance(key, str):
raise TypeError("The key of 'compile_attrs' of @ms_hybrid should be a str, " raise TypeError("The key of 'compile_attrs' of @ms_hybrid must be a str, "
"but get a {}".format(type(key))) "but get a {}".format(type(key)))
if reg_info is not None and not isinstance(reg_info, (str, dict, tuple)): if reg_info is not None and not isinstance(reg_info, (str, dict, tuple)):

View File

@ -164,7 +164,7 @@ class MinMaxUpdatePerChannel(PrimitiveWithInfer):
def infer_shape(self, x_shape, min_shape, max_shape): def infer_shape(self, x_shape, min_shape, max_shape):
if self.is_ascend and len(x_shape) not in self.ascend_support_x_rank: if self.is_ascend and len(x_shape) not in self.ascend_support_x_rank:
raise ValueError(f"For '{self.name}' x rank should be in '{self.ascend_support_x_rank}'") raise ValueError(f"For '{self.name}' x rank must be in '{self.ascend_support_x_rank}'")
if not self.is_ascend: if not self.is_ascend:
validator.check_int(len(x_shape), 1, Rel.GE, "x rank", self.name) validator.check_int(len(x_shape), 1, Rel.GE, "x rank", self.name)
validator.check("min shape", min_shape, "max shape", validator.check("min shape", min_shape, "max shape",
@ -382,7 +382,7 @@ class FakeLearnedScaleQuantPerChannel(PrimitiveWithInfer):
def infer_shape(self, input_x_shape, alpha_shape, quant_max_shape): def infer_shape(self, input_x_shape, alpha_shape, quant_max_shape):
if self.is_ascend and len(input_x_shape) not in self.ascend_support_x_rank: if self.is_ascend and len(input_x_shape) not in self.ascend_support_x_rank:
raise ValueError(f"For '{self.name}' x rank should be in '{self.ascend_support_x_rank}'") raise ValueError(f"For '{self.name}' x rank must be in '{self.ascend_support_x_rank}'")
if not self.is_ascend: if not self.is_ascend:
validator.check_int(len(input_x_shape), 1, Rel.GE, "input_x rank", self.name) validator.check_int(len(input_x_shape), 1, Rel.GE, "input_x rank", self.name)
if len(input_x_shape) == 1: if len(input_x_shape) == 1:
@ -943,7 +943,7 @@ class FakeQuantPerChannel(PrimitiveWithInfer):
def infer_shape(self, x_shape, min_shape, max_shape): def infer_shape(self, x_shape, min_shape, max_shape):
if self.is_ascend and len(x_shape) not in self.ascend_support_x_rank: if self.is_ascend and len(x_shape) not in self.ascend_support_x_rank:
raise ValueError(f"For '{self.name}' x rank should be in '{self.ascend_support_x_rank}'") raise ValueError(f"For '{self.name}' x rank must be in '{self.ascend_support_x_rank}'")
if not self.is_ascend: if not self.is_ascend:
validator.check_int(len(x_shape), 1, Rel.GE, "x rank", self.name) validator.check_int(len(x_shape), 1, Rel.GE, "x rank", self.name)
if len(x_shape) == 1: if len(x_shape) == 1:

View File

@ -188,15 +188,15 @@ class DiscountedReturn(PrimitiveWithInfer):
def infer_shape(self, reward_shape, done_shape, last_state_value_shape): def infer_shape(self, reward_shape, done_shape, last_state_value_shape):
if len(reward_shape) != len(done_shape): if len(reward_shape) != len(done_shape):
raise ValueError(f'{self.name} len(reward) and len(done) should be same, ', raise ValueError(f'{self.name} len(reward) and len(done) must be same, ',
f'but got {len(reward_shape)} and {len(done_shape)}.') f'but got {len(reward_shape)} and {len(done_shape)}.')
if reward_shape[0] != done_shape[0]: if reward_shape[0] != done_shape[0]:
raise ValueError(f'{self.name} timestep of reward and done should be same, ', raise ValueError(f'{self.name} timestep of reward and done must be same, ',
f'but got {reward_shape[0]} and {done_shape[0]}.') f'but got {reward_shape[0]} and {done_shape[0]}.')
if reward_shape[1:] != last_state_value_shape: if reward_shape[1:] != last_state_value_shape:
raise ValueError(f'{self.name} state value shape should be match, ', raise ValueError(f'{self.name} state value shape must be match, ',
f'but got {reward_shape[1:]} and {last_state_value_shape}.') f'but got {reward_shape[1:]} and {last_state_value_shape}.')
return reward_shape return reward_shape
@ -536,7 +536,7 @@ class BatchAssign(PrimitiveWithInfer):
validator.check_equal_int(len(dst_shape), len(source_shape), "inputs elements", self.name) validator.check_equal_int(len(dst_shape), len(source_shape), "inputs elements", self.name)
for i, shp in enumerate(dst_shape): for i, shp in enumerate(dst_shape):
if shp != source_shape[i]: if shp != source_shape[i]:
raise ValueError(f'{self.name} element should be same, ', raise ValueError(f'{self.name} element must be same, ',
f'but got {shp} and {dst_shape[i]}.') f'but got {shp} and {dst_shape[i]}.')
return [] return []
@ -640,7 +640,7 @@ class TensorsQueuePut(PrimitiveWithInfer):
validator.check_equal_int(len(elements_shape), self.elements_num, "inputs elements", self.name) validator.check_equal_int(len(elements_shape), self.elements_num, "inputs elements", self.name)
for i, shape in enumerate(elements_shape): for i, shape in enumerate(elements_shape):
if tuple(shape) != self.shapes[i]: if tuple(shape) != self.shapes[i]:
raise ValueError(f'{self.name} init shape and ipnut shape should be same, ', raise ValueError(f'{self.name} init shape and ipnut shape must be the same, ',
f'but got {self.shapes[i]} and input {shape} in position {i}.') f'but got {self.shapes[i]} and input {shape} in position {i}.')
return () return ()

View File

@ -1118,7 +1118,7 @@ class Split(PrimitiveWithCheck):
# only validate when shape fully known # only validate when shape fully known
output_valid_check = x_shape[self.axis] % self.output_num output_valid_check = x_shape[self.axis] % self.output_num
if output_valid_check != 0: if output_valid_check != 0:
raise ValueError(f"For '{self.name}', the specified axis of 'input_x' should be divided exactly by " raise ValueError(f"For '{self.name}', the specified axis of 'input_x' must be divided exactly by "
f"'output_num', but got the shape of 'input_x' in 'axis' {self.axis} is " f"'output_num', but got the shape of 'input_x' in 'axis' {self.axis} is "
f"{x_shape[self.axis]}, 'output_num': {self.output_num}.") f"{x_shape[self.axis]}, 'output_num': {self.output_num}.")
size_splits = [x_shape[self.axis] // self.output_num] * self.output_num size_splits = [x_shape[self.axis] // self.output_num] * self.output_num
@ -2092,7 +2092,7 @@ class Tile(PrimitiveWithInfer):
def check_elim(self, base_tensor, multiplier): def check_elim(self, base_tensor, multiplier):
if not isinstance(base_tensor, Tensor): if not isinstance(base_tensor, Tensor):
raise TypeError(f"For '{self.name}', the type of 'input_x' should be Tensor, " raise TypeError(f"For '{self.name}', the type of 'input_x' must be Tensor, "
f"but got {type(base_tensor).__name__}.") f"but got {type(base_tensor).__name__}.")
if all(v == 1 for v in multiplier) and len(base_tensor.shape) >= len(multiplier): if all(v == 1 for v in multiplier) and len(base_tensor.shape) >= len(multiplier):
ret = Identity()(base_tensor) ret = Identity()(base_tensor)
@ -3710,7 +3710,7 @@ class StridedSlice(PrimitiveWithInfer):
if j < len(shrink_axis_pos) and shrink_axis_pos[j] == '1': if j < len(shrink_axis_pos) and shrink_axis_pos[j] == '1':
if (not -x_shape[i] <= begin < x_shape[i]) or stride < 0: if (not -x_shape[i] <= begin < x_shape[i]) or stride < 0:
raise IndexError(f"For '{self.name}', the 'strides[{i}]' cannot be negative number and " raise IndexError(f"For '{self.name}', the 'strides[{i}]' cannot be negative number and "
f"'begin[{i}]' should be in [-{x_shape[i]}, {x_shape[i]}) " f"'begin[{i}]' must be in [-{x_shape[i]}, {x_shape[i]}) "
f"when 'shrink_axis_mask' is greater than 0, " f"when 'shrink_axis_mask' is greater than 0, "
f"but got 'shrink_axis_mask': {self.shrink_axis_mask}, " f"but got 'shrink_axis_mask': {self.shrink_axis_mask}, "
f"'strides[{i}]': {stride}, 'begin[{i}]': {begin}.") f"'strides[{i}]': {stride}, 'begin[{i}]': {begin}.")
@ -3745,8 +3745,8 @@ class StridedSlice(PrimitiveWithInfer):
continue continue
if j < len(shrink_axis_pos) and shrink_axis_pos[j] == '1': if j < len(shrink_axis_pos) and shrink_axis_pos[j] == '1':
if (not -x_shape[i] <= begin < x_shape[i]) or stride < 0: if (not -x_shape[i] <= begin < x_shape[i]) or stride < 0:
raise IndexError(f"For '{self.name}', the 'strides[{i}]' cannot be negative number and " raise IndexError(f"For '{self.name}', the 'strides[{i}]' can not be negative number and "
f"'begin[{i}]' should be in [-{x_shape[i]}, {x_shape[i]}) " f"'begin[{i}]' must be in [-{x_shape[i]}, {x_shape[i]}) "
f"when 'shrink_axis_mask' is greater than 0, " f"when 'shrink_axis_mask' is greater than 0, "
f"but got 'shrink_axis_mask': {self.shrink_axis_mask}, " f"but got 'shrink_axis_mask': {self.shrink_axis_mask}, "
f"'strides[{i}]': {stride}, 'begin[{i}]': {begin}.") f"'strides[{i}]': {stride}, 'begin[{i}]': {begin}.")
@ -5543,7 +5543,7 @@ class SpaceToBatch(PrimitiveWithInfer):
padded = out_shape[i + 2] + self.paddings[i][0] + self.paddings[i][1] padded = out_shape[i + 2] + self.paddings[i][0] + self.paddings[i][1]
if padded % self.block_size != 0: if padded % self.block_size != 0:
msg_ndim = "2nd" if i + 2 == 2 else "3rd" msg_ndim = "2nd" if i + 2 == 2 else "3rd"
raise ValueError(f"For '{self.name}', the shape of the output tensor should be " raise ValueError(f"For '{self.name}', the shape of the output tensor must be "
f"divisible by 'block_size', but got the {msg_ndim} dimension of output: {padded} and " f"divisible by 'block_size', but got the {msg_ndim} dimension of output: {padded} and "
f"'block_size': {self.block_size}. Please check the official homepage " f"'block_size': {self.block_size}. Please check the official homepage "
f"for more information about the output tensor.") f"for more information about the output tensor.")
@ -5748,7 +5748,7 @@ class SpaceToBatchND(PrimitiveWithInfer):
padded = out_shape[i + offset] + self.paddings[i][0] + \ padded = out_shape[i + offset] + self.paddings[i][0] + \
self.paddings[i][1] self.paddings[i][1]
if padded % self.block_shape[i] != 0: if padded % self.block_shape[i] != 0:
raise ValueError(f"For '{self.name}', the padded should be divisible by 'block_shape', " raise ValueError(f"For '{self.name}', the padded must be divisible by 'block_shape', "
f"where padded = input_x_shape[i + 2] + paddings[i][0] + paddings[i][1], " f"where padded = input_x_shape[i + 2] + paddings[i][0] + paddings[i][1], "
f"but got input_x_shape[{i + offset}]: {out_shape[i + offset]}, " f"but got input_x_shape[{i + offset}]: {out_shape[i + offset]}, "
f"paddings[{i}][0]: {self.paddings[i][0]} and paddings[{i}][1]: {self.paddings[i][1]}." f"paddings[{i}][0]: {self.paddings[i][0]} and paddings[{i}][1]: {self.paddings[i][1]}."
@ -6663,7 +6663,7 @@ class SearchSorted(PrimitiveWithInfer):
def infer_shape(self, sequence_shape, values_shape): def infer_shape(self, sequence_shape, values_shape):
if len(sequence_shape) != 1 and sequence_shape[:-1] != values_shape[:-1]: if len(sequence_shape) != 1 and sequence_shape[:-1] != values_shape[:-1]:
raise ValueError(f"For '{self.name}', the 'sequence' should be 1 dimensional or " raise ValueError(f"For '{self.name}', the 'sequence' must be 1 dimensional or "
f"all dimensions except the last dimension of 'sequence' " f"all dimensions except the last dimension of 'sequence' "
f"must be the same as all dimensions except the last dimension of 'values'. " f"must be the same as all dimensions except the last dimension of 'values'. "
f"but got shape of 'sequence': {sequence_shape} " f"but got shape of 'sequence': {sequence_shape} "

View File

@ -163,9 +163,9 @@ class AllReduce(PrimitiveWithInfer):
def __init__(self, op=ReduceOp.SUM, group=GlobalComm.WORLD_COMM_GROUP): def __init__(self, op=ReduceOp.SUM, group=GlobalComm.WORLD_COMM_GROUP):
"""Initialize AllReduce.""" """Initialize AllReduce."""
if not isinstance(op, type(ReduceOp.SUM)): if not isinstance(op, type(ReduceOp.SUM)):
raise TypeError(f"For '{self.name}', the 'op' should be str, but got {type(op).__name__}.") raise TypeError(f"For '{self.name}', the 'op' must be str, but got {type(op).__name__}.")
if not isinstance(_get_group(group), str): if not isinstance(_get_group(group), str):
raise TypeError(f"For '{self.name}', the 'group' should be str, " raise TypeError(f"For '{self.name}', the 'group' must be str, "
f"but got {type(_get_group(group)).__name__}.") f"but got {type(_get_group(group)).__name__}.")
check_hcom_group_valid(group, prim_name=self.name) check_hcom_group_valid(group, prim_name=self.name)
self.op = op self.op = op
@ -453,9 +453,9 @@ class ReduceScatter(PrimitiveWithInfer):
def infer_shape(self, x_shape): def infer_shape(self, x_shape):
if self.rank_size == 0: if self.rank_size == 0:
raise ValueError(f"For '{self.name}', the 'rank_size' cannot be zero, but got {self.rank_size}.") raise ValueError(f"For '{self.name}', the 'rank_size' can not be zero, but got {self.rank_size}.")
if x_shape[0] % self.rank_size != 0: if x_shape[0] % self.rank_size != 0:
raise ValueError(f"For '{self.name}', the first dimension of 'x_shape' should be divided by 'rank_size', " raise ValueError(f"For '{self.name}', the first dimension of 'x_shape' must be divided by 'rank_size', "
f"but got 'x_shape[0]': {x_shape[0]}, 'rank_size': {self.rank_size}.") f"but got 'x_shape[0]': {x_shape[0]}, 'rank_size': {self.rank_size}.")
x_shape[0] = int(x_shape[0] / self.rank_size) x_shape[0] = int(x_shape[0] / self.rank_size)
return x_shape return x_shape
@ -509,7 +509,7 @@ class _HostReduceScatter(PrimitiveWithInfer):
def infer_shape(self, x_shape): def infer_shape(self, x_shape):
if x_shape[0] % self.group_size != 0: if x_shape[0] % self.group_size != 0:
raise ValueError(f"For '{self.name}', the first dimension of 'x_shape' should be divided by 'group_size', " raise ValueError(f"For '{self.name}', the first dimension of 'x_shape' must be divided by 'group_size', "
f"but got 'x_shape[0]': {x_shape[0]}, 'rank_size': {self.group_size}.") f"but got 'x_shape[0]': {x_shape[0]}, 'rank_size': {self.group_size}.")
x_shape[0] = int(x_shape[0] / self.group_size) x_shape[0] = int(x_shape[0] / self.group_size)
return x_shape return x_shape
@ -595,7 +595,7 @@ class Broadcast(PrimitiveWithInfer):
def infer_dtype(self, x_dtype): def infer_dtype(self, x_dtype):
if not isinstance(x_dtype, tuple): if not isinstance(x_dtype, tuple):
raise TypeError(f"For '{self.name}', the 'input_x' should be a tuple, but got {type(x_dtype).__name__}!") raise TypeError(f"For '{self.name}', the 'input_x' must be a tuple, but got {type(x_dtype).__name__}!")
for _ele in x_dtype: for _ele in x_dtype:
check_collective_target_dtype('x', _ele, self.name) check_collective_target_dtype('x', _ele, self.name)
return x_dtype return x_dtype

View File

@ -398,9 +398,9 @@ class Custom(ops.PrimitiveWithInfer):
def _check_julia_func(self): def _check_julia_func(self):
"""Check the validity of julia func""" """Check the validity of julia func"""
if not isinstance(self.func, str): if not isinstance(self.func, str):
raise TypeError("{}, 'func' should be of type str, but got {}".format(self.log_prefix, type(self.func))) raise TypeError("{}, 'func' must be of type str, but got {}".format(self.log_prefix, type(self.func)))
if self.func.count(':') != 2: if self.func.count(':') != 2:
raise ValueError("{}, the format of 'func' should be file:module:func".format(self.log_prefix)) raise ValueError("{}, the format of 'func' must be file:module:func".format(self.log_prefix))
source_file, module, func = self.func.split(':') source_file, module, func = self.func.split(':')
with open(source_file, 'r') as f: with open(source_file, 'r') as f:
jl = f.read() jl = f.read()
@ -414,17 +414,17 @@ class Custom(ops.PrimitiveWithInfer):
def _check_func(self): def _check_func(self):
"""Check the validity of func_type and type of func""" """Check the validity of func_type and type of func"""
if self.func_type not in self.supported_func_type: if self.func_type not in self.supported_func_type:
raise ValueError("{}, 'func_type' should be one of {}, but got {}" raise ValueError("{}, 'func_type' must be one of {}, but got {}"
.format(self.log_prefix, self.supported_func_type, self.func_type)) .format(self.log_prefix, self.supported_func_type, self.func_type))
if self.func_type == "aot": if self.func_type == "aot":
if not isinstance(self.func, str): if not isinstance(self.func, str):
raise TypeError("{}, 'func' should be of type str, but got {}".format( raise TypeError("{}, 'func' must be of type str, but got {}".format(
self.log_prefix, type(self.func))) self.log_prefix, type(self.func)))
elif self.func_type == "julia": elif self.func_type == "julia":
self._check_julia_func() self._check_julia_func()
elif self.func_type == "hybrid": elif self.func_type == "hybrid":
if not hasattr(self.func, "ms_hybrid_flag"): if not hasattr(self.func, "ms_hybrid_flag"):
raise TypeError("{}, 'func' should a function decorated by ms_hybrid".format(self.log_prefix)) raise TypeError("{}, 'func' must a function decorated by ms_hybrid".format(self.log_prefix))
self._is_ms_hybrid = True self._is_ms_hybrid = True
self._func_compile_attrs = getattr(self.func, "compile_attrs", {}) self._func_compile_attrs = getattr(self.func, "compile_attrs", {})
elif self.func_type == "akg": elif self.func_type == "akg":
@ -440,7 +440,7 @@ class Custom(ops.PrimitiveWithInfer):
.format(self.log_prefix)) .format(self.log_prefix))
else: else:
if not callable(self.func): if not callable(self.func):
raise TypeError("{}, 'func' should be of type function, but got {}" raise TypeError("{}, 'func' must be of type function, but got {}"
.format(self.log_prefix, type(self.func))) .format(self.log_prefix, type(self.func)))
def _update_func_info(self): def _update_func_info(self):
@ -480,7 +480,7 @@ class Custom(ops.PrimitiveWithInfer):
# uniq func name # uniq func name
self.uniq_name = self.name + "_" + self.func_name self.uniq_name = self.name + "_" + self.func_name
else: else:
raise TypeError("For '{}', 'func' should be of type function or str, but got {}" raise TypeError("For '{}', 'func' must be of type function or str, but got {}"
.format(self.name, type(self.func))) .format(self.name, type(self.func)))
def _register_info(self, info): def _register_info(self, info):
@ -569,7 +569,7 @@ class Custom(ops.PrimitiveWithInfer):
def _reformat_reg_info(self, reg_info, target): def _reformat_reg_info(self, reg_info, target):
"""Reformat registration information.""" """Reformat registration information."""
if not isinstance(reg_info, dict): if not isinstance(reg_info, dict):
raise TypeError("{}, the registration information should be of type dict, but got {} with type {}. Use " raise TypeError("{}, the registration information must be of type dict, but got {} with type {}. Use "
"'CustomRegOp' to generate the registration information, then pass it to 'reg_info' or " "'CustomRegOp' to generate the registration information, then pass it to 'reg_info' or "
"use 'custom_info_register' to bind it to 'func' if 'func' is a function." "use 'custom_info_register' to bind it to 'func' if 'func' is a function."
.format(self.log_prefix, reg_info, type(reg_info))) .format(self.log_prefix, reg_info, type(reg_info)))
@ -615,7 +615,7 @@ class Custom(ops.PrimitiveWithInfer):
func_type_to_target = {"tbe": "Ascend", "pyfunc": "CPU"} func_type_to_target = {"tbe": "Ascend", "pyfunc": "CPU"}
target = func_type_to_target.get(self.func_type) target = func_type_to_target.get(self.func_type)
if target not in self.supported_targets: if target not in self.supported_targets:
raise ValueError("{}, target set in registration information should be one of {}, but got {}" raise ValueError("{}, target set in registration information must be one of {}, but got {}"
.format(self.log_prefix, self.supported_targets, target)) .format(self.log_prefix, self.supported_targets, target))
return target return target
@ -836,11 +836,11 @@ class Custom(ops.PrimitiveWithInfer):
# after all automatic infer information fulfillment, throw error if infer_shape/infer_dtype is still None # after all automatic infer information fulfillment, throw error if infer_shape/infer_dtype is still None
if not isinstance(infer_shape, (tuple, list)): if not isinstance(infer_shape, (tuple, list)):
raise TypeError("{}, 'out_shape' should be one of [tuple, list, function], but got {}" raise TypeError("{}, 'out_shape' must be one of [tuple, list, function], but got {}"
.format(self.log_prefix, type(infer_shape))) .format(self.log_prefix, type(infer_shape)))
if not isinstance(infer_dtype, (typing.Type, tuple, list)): if not isinstance(infer_dtype, (typing.Type, tuple, list)):
raise TypeError("{}, 'out_dtype' should be one of [mindspore.dtype, tuple, list, function], but got {}" raise TypeError("{}, 'out_dtype' must be one of [mindspore.dtype, tuple, list, function], but got {}"
.format(self.log_prefix, type(infer_dtype))) .format(self.log_prefix, type(infer_dtype)))
out = { out = {

View File

@ -38,7 +38,7 @@ def _check_summary_param(name, value, class_name):
n_value = name['value'] n_value = name['value']
validator.check_value_type('name', n_type, [type(mstype.string)], class_name) validator.check_value_type('name', n_type, [type(mstype.string)], class_name)
if not n_value: if not n_value:
raise ValueError(f"For '{class_name}', the name should be valid string, but got '{n_value}'.") raise ValueError(f"For '{class_name}', the name must be valid string, but got '{n_value}'.")
v_type = value['dtype'] v_type = value['dtype']
validator.check_value_type('value', v_type, [type(mstype.tensor)], class_name) validator.check_value_type('value', v_type, [type(mstype.tensor)], class_name)
@ -144,7 +144,7 @@ class ImageSummary(PrimitiveWithInfer):
v_shape = value['shape'] v_shape = value['shape']
image_dim = 4 image_dim = 4
if len(v_shape) != image_dim: if len(v_shape) != image_dim:
raise ValueError(f"For '{self.name}', the dimension of 'value' should be {image_dim}," raise ValueError(f"For '{self.name}', the dimension of 'value' must be {image_dim},"
f" but got {len(v_shape)}.") f" but got {len(v_shape)}.")
return SUMMARY_RETURN_VALUE return SUMMARY_RETURN_VALUE
@ -243,7 +243,7 @@ class HistogramSummary(PrimitiveWithInfer):
v_shape = value['shape'] v_shape = value['shape']
# In the summary, the histogram value should be a tensor whose shape is not []. # In the summary, the histogram value should be a tensor whose shape is not [].
if not v_shape: if not v_shape:
raise ValueError(f"For '{self.name}', the type of 'value' should be tensor, " raise ValueError(f"For '{self.name}', the type of 'value' must be tensor, "
f"its shape should not be [], but got {v_shape}.") f"its shape should not be [], but got {v_shape}.")
return SUMMARY_RETURN_VALUE return SUMMARY_RETURN_VALUE
@ -385,7 +385,7 @@ class HookBackward(PrimitiveWithInfer):
"""Initialize HookBackward.""" """Initialize HookBackward."""
super(HookBackward, self).__init__(self.__class__.__name__) super(HookBackward, self).__init__(self.__class__.__name__)
if not isinstance(hook_fn, (FunctionType, MethodType)): if not isinstance(hook_fn, (FunctionType, MethodType)):
raise TypeError(f"For '{self.name}', the type of 'hook_fn' should be python function, " raise TypeError(f"For '{self.name}', the type of 'hook_fn' must be python function, "
f"but got {type(hook_fn)}.") f"but got {type(hook_fn)}.")
if cell_id != "": if cell_id != "":
logger.warning(f"The args 'cell_id' of HookBackward will be removed in a future version. If the value of " logger.warning(f"The args 'cell_id' of HookBackward will be removed in a future version. If the value of "

View File

@ -1441,7 +1441,7 @@ class BatchMatMul(MatMul):
def check_shape_size(self, x, y): def check_shape_size(self, x, y):
if len(x) < 3 or len(y) < 2: if len(x) < 3 or len(y) < 2:
raise ValueError(f"For '{self.name}', input 'x' should be greater than or equal to 3, input 'y' should " raise ValueError(f"For '{self.name}', input 'x' must be greater than or equal to 3, input 'y' should "
f"be greater than or equal to 2, but got 'x' size: {len(x)}, 'y' size: {len(y)}.") f"be greater than or equal to 2, but got 'x' size: {len(x)}, 'y' size: {len(y)}.")
@ -1566,7 +1566,7 @@ class AddN(Primitive):
return False, None return False, None
if isinstance(inputs[0], Tensor): if isinstance(inputs[0], Tensor):
return True, inputs[0] return True, inputs[0]
raise TypeError(f"For '{self.name}', the type of 'inputs[0]' should be a tensor, but " raise TypeError(f"For '{self.name}', the type of 'inputs[0]' must be a tensor, but "
f"got {type(inputs[0]).__name__}, " f"got {type(inputs[0]).__name__}, "
f"or the length of 'inputs' should not be equal to 1, but got ({len(inputs)}).") f"or the length of 'inputs' should not be equal to 1, but got ({len(inputs)}).")
@ -1624,7 +1624,7 @@ class AccumulateNV2(Primitive):
return False, None return False, None
if isinstance(inputs[0], Tensor): if isinstance(inputs[0], Tensor):
return True, inputs[0] return True, inputs[0]
raise TypeError(f"For '{self.name}', the type of 'inputs[0]' should be a tensor, " raise TypeError(f"For '{self.name}', the type of 'inputs[0]' must be a tensor, "
f"but got {type(inputs[0]).__name__}, " f"but got {type(inputs[0]).__name__}, "
f"or the length of 'inputs' should not be equal to 1, but got ({len(inputs)}).") f"or the length of 'inputs' should not be equal to 1, but got ({len(inputs)}).")

View File

@ -35,7 +35,7 @@ def _check_positive_int_or_tuple(arg_name, arg_value, prim_name, allow_four=Fals
""" """
def _raise_message(): def _raise_message():
raise ValueError(f"For '{prim_name}' attr '{arg_name}' should be an positive int number or a tuple of two " raise ValueError(f"For '{prim_name}' attr '{arg_name}' must be an positive int number or a tuple of two "
f"{'or four ' if allow_four else ''}positive int numbers, but got {arg_value}") f"{'or four ' if allow_four else ''}positive int numbers, but got {arg_value}")
def _get_return_value(): def _get_return_value():
@ -66,7 +66,7 @@ def _check_shape(arg_name, arg_value, prim_name):
""" """
def _raise_message(): def _raise_message():
raise ValueError(f"For '{prim_name}' attr '{arg_name}' dims elements should be positive int numbers, " raise ValueError(f"For '{prim_name}' attr '{arg_name}' dims elements must be positive int numbers, "
f"but got {arg_value}") f"but got {arg_value}")
validator.check_value_type(arg_name, arg_value, (list, tuple), prim_name) validator.check_value_type(arg_name, arg_value, (list, tuple), prim_name)
@ -270,7 +270,7 @@ class AdaptiveAvgPool2D(PrimitiveWithInfer):
def infer_shape(self, x_shape): def infer_shape(self, x_shape):
if len(x_shape) <= len(self.output_size): if len(x_shape) <= len(self.output_size):
raise ValueError("input_x {} dimension should be larger than output_size {} " raise ValueError("input_x {} dimension must be larger than output_size {} "
"dimension".format(x_shape, self.output_size)) "dimension".format(x_shape, self.output_size))
validator.check_int(len(x_shape), 5, Rel.LT, 'input_x_dimensions', self.name) validator.check_int(len(x_shape), 5, Rel.LT, 'input_x_dimensions', self.name)
for input_x_dimension in x_shape: for input_x_dimension in x_shape:
@ -1365,13 +1365,13 @@ class DepthwiseConv2dNative(PrimitiveWithInfer):
self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name) self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)
self.stride = _check_positive_int_or_tuple('stride', stride, self.name) self.stride = _check_positive_int_or_tuple('stride', stride, self.name)
if self.stride[0] != self.stride[1]: if self.stride[0] != self.stride[1]:
raise ValueError("The height and width of 'stride' should be equal," raise ValueError("The height and width of 'stride' must be equal,"
f"but got height:{self.stride[0]}, width:{self.stride[1]}") f"but got height:{self.stride[0]}, width:{self.stride[1]}")
self.add_prim_attr('stride', (1, 1, self.stride[0], self.stride[1])) self.add_prim_attr('stride', (1, 1, self.stride[0], self.stride[1]))
self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name) self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name)
if self.dilation[0] != self.dilation[1]: if self.dilation[0] != self.dilation[1]:
raise ValueError("The height and width of 'dilation' should be equal," raise ValueError("The height and width of 'dilation' must be equal,"
f"but got height:{self.dilation[0]}, width:{self.dilation[1]}") f"but got height:{self.dilation[0]}, width:{self.dilation[1]}")
self.add_prim_attr('dilation', (1, 1, self.dilation[0], self.dilation[1])) self.add_prim_attr('dilation', (1, 1, self.dilation[0], self.dilation[1]))
validator.check_value_type('pad', pad, (int, tuple), self.name) validator.check_value_type('pad', pad, (int, tuple), self.name)
@ -1405,7 +1405,7 @@ class DepthwiseConv2dNative(PrimitiveWithInfer):
_, _, stride_h, stride_w = self.stride _, _, stride_h, stride_w = self.stride
_, _, dilation_h, dilation_w = self.dilation _, _, dilation_h, dilation_w = self.dilation
if kernel_size_n != 1: if kernel_size_n != 1:
raise ValueError(f"For '{self.name}', the batch of 'weight' should be 1, but got {kernel_size_n}") raise ValueError(f"For '{self.name}', the batch of 'weight' must be 1, but got {kernel_size_n}")
if self.pad_mode == "valid": if self.pad_mode == "valid":
h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h) h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h)
w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w) w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w)
@ -1775,7 +1775,7 @@ class MaxPool3D(PrimitiveWithInfer):
if len(self.pad_list) == 3: if len(self.pad_list) == 3:
self.pad_list = (pad_list[0], pad_list[0], pad_list[1], pad_list[1], pad_list[2], pad_list[2]) self.pad_list = (pad_list[0], pad_list[0], pad_list[1], pad_list[1], pad_list[2], pad_list[2])
if len(self.pad_list) != 3 and len(self.pad_list) != 6: if len(self.pad_list) != 3 and len(self.pad_list) != 6:
raise ValueError(f"For '{self.name}', attr 'pad_list' should be an positive int number or a tuple of " raise ValueError(f"For '{self.name}', attr 'pad_list' must be an positive int number or a tuple of "
f"three or six positive int numbers, but got {len(self.pad_list)} numbers.") f"three or six positive int numbers, but got {len(self.pad_list)} numbers.")
if self.pad_mode != 'CALCULATED' and self.pad_list != (0, 0, 0, 0, 0, 0): if self.pad_mode != 'CALCULATED' and self.pad_list != (0, 0, 0, 0, 0, 0):
raise ValueError(f"For '{self.name}', the 'pad_list' must be zero or (0, 0, 0, 0, 0, 0) when 'pad_mode' " raise ValueError(f"For '{self.name}', the 'pad_list' must be zero or (0, 0, 0, 0, 0, 0) when 'pad_mode' "
@ -3238,7 +3238,7 @@ class ResizeBilinear(PrimitiveWithInfer):
self.half_pixel_centers = validator.check_value_type("half_pixel_centers", self.half_pixel_centers = validator.check_value_type("half_pixel_centers",
half_pixel_centers, [bool], self.name) half_pixel_centers, [bool], self.name)
if half_pixel_centers and align_corners: if half_pixel_centers and align_corners:
raise ValueError(f"If half_pixel_centers is True, align_corners should be False, but got {align_corners}") raise ValueError(f"If half_pixel_centers is True, align_corners must be False, but got {align_corners}")
target = context.get_context("device_target") target = context.get_context("device_target")
if half_pixel_centers and target.lower() != "ascend": if half_pixel_centers and target.lower() != "ascend":
raise ValueError(f"Currently `half_pixel_centers`=True only support in Ascend device_target, " raise ValueError(f"Currently `half_pixel_centers`=True only support in Ascend device_target, "
@ -3566,10 +3566,10 @@ class PReLU(PrimitiveWithInfer):
weight_dim = len(weight_shape) weight_dim = len(weight_shape)
if weight_dim != 1: if weight_dim != 1:
raise ValueError(f"For '{self.name}', the dimension of 'weight' should be 1, while got {weight_dim}.") raise ValueError(f"For '{self.name}', the dimension of 'weight' must be 1, while got {weight_dim}.")
if weight_shape[0] != 1 and weight_shape[0] != channel_num: if weight_shape[0] != 1 and weight_shape[0] != channel_num:
raise ValueError(f"For '{self.name}', the first dimension of 'weight' should be (1,) or " raise ValueError(f"For '{self.name}', the first dimension of 'weight' must be (1,) or "
f"it should be equal to number of channels: {channel_num}, but got {weight_shape}") f"it must be equal to number of channels: {channel_num}, but got {weight_shape}")
return input_x_shape return input_x_shape
def infer_dtype(self, input_x_dtype, weight_dtype): def infer_dtype(self, input_x_dtype, weight_dtype):
@ -3990,7 +3990,7 @@ class MirrorPad(PrimitiveWithInfer):
validator.check_subclass("paddings", paddings['dtype'], mstype.tensor, self.name) validator.check_subclass("paddings", paddings['dtype'], mstype.tensor, self.name)
x_shape = list(input_x['shape']) x_shape = list(input_x['shape'])
if paddings['value'] is None: if paddings['value'] is None:
raise ValueError(f"For '{self.name}', paddings should be a Tensor with type of int64, " raise ValueError(f"For '{self.name}', paddings must be a Tensor with type of int64, "
f"but got {paddings['value']}.") f"but got {paddings['value']}.")
paddings_value = paddings['value'].asnumpy() paddings_value = paddings['value'].asnumpy()
paddings_size = paddings_value.size paddings_size = paddings_value.size
@ -4615,7 +4615,7 @@ class FusedSparseAdam(PrimitiveWithInfer):
validator.check_int(len(indices_shape), 1, Rel.EQ, "indices rank", self.name) validator.check_int(len(indices_shape), 1, Rel.EQ, "indices rank", self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name) validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]: if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]:
raise ValueError(f"For '{self.name}', the shape of updates should be [] or " raise ValueError(f"For '{self.name}', the shape of updates must be [] or "
f"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, " f"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, "
f"indices_shape: {indices_shape}, grad_shape: {grad_shape}.") f"indices_shape: {indices_shape}, grad_shape: {grad_shape}.")
return var_shape, [1], [1] return var_shape, [1], [1]
@ -4766,7 +4766,7 @@ class FusedSparseLazyAdam(PrimitiveWithInfer):
validator.check_int(len(indices_shape), 1, Rel.EQ, "indices rank", self.name) validator.check_int(len(indices_shape), 1, Rel.EQ, "indices rank", self.name)
validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name) validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)
if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]: if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]:
raise ValueError(f"For '{self.name}', the shape of updates should be [] or " raise ValueError(f"For '{self.name}', the shape of updates must be [] or "
f"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, " f"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, "
f"indices_shape: {indices_shape}, grad_shape: {grad_shape}.") f"indices_shape: {indices_shape}, grad_shape: {grad_shape}.")
return var_shape, [1], [1] return var_shape, [1], [1]
@ -7104,14 +7104,14 @@ class DynamicRNN(PrimitiveWithInfer):
validator.check_int(len(h_shape), 3, Rel.EQ, "h_shape", self.name) validator.check_int(len(h_shape), 3, Rel.EQ, "h_shape", self.name)
validator.check_int(len(c_shape), 3, Rel.EQ, "c_shape", self.name) validator.check_int(len(c_shape), 3, Rel.EQ, "c_shape", self.name)
if seq_shape is not None: if seq_shape is not None:
raise ValueError(f"For '{self.name}', the 'seq_length' should be None.") raise ValueError(f"For '{self.name}', the 'seq_length' must be None.")
num_step, batch_size, input_size = x_shape num_step, batch_size, input_size = x_shape
hidden_size = w_shape[-1] // 4 hidden_size = w_shape[-1] // 4
validator.check("b_shape[-1]", b_shape[-1], "w_shape[-1]", w_shape[-1], Rel.EQ, self.name) validator.check("b_shape[-1]", b_shape[-1], "w_shape[-1]", w_shape[-1], Rel.EQ, self.name)
if w_shape[-1] % 4 != 0: if w_shape[-1] % 4 != 0:
raise ValueError(f"For '{self.name}', the last dimension of 'w' should be a multiple of 4, " raise ValueError(f"For '{self.name}', the last dimension of 'w' must be a multiple of 4, "
f"but got {w_shape[-1]}.") f"but got {w_shape[-1]}.")
validator.check("w_shape[0]", w_shape[0], "input_size + hidden_size", validator.check("w_shape[0]", w_shape[0], "input_size + hidden_size",
input_size + hidden_size, Rel.EQ, self.name) input_size + hidden_size, Rel.EQ, self.name)
@ -7273,7 +7273,7 @@ class DynamicGRUV2(PrimitiveWithInfer):
num_step, batch_size, input_size = x_shape num_step, batch_size, input_size = x_shape
hidden_size = winput_shape[-1] // 3 hidden_size = winput_shape[-1] // 3
if winput_shape[-1] % 3 != 0: if winput_shape[-1] % 3 != 0:
raise ValueError(f"For '{self.name}', the last dimension of 'w' should be a multiple of 3, " raise ValueError(f"For '{self.name}', the last dimension of 'w' must be a multiple of 3, "
f"but got {winput_shape[-1]}.") f"but got {winput_shape[-1]}.")
self.placeholder_index = [3, 4, 5] self.placeholder_index = [3, 4, 5]
@ -7287,7 +7287,7 @@ class DynamicGRUV2(PrimitiveWithInfer):
"3 * hidden_shape", [3 * hidden_size], Rel.EQ, self.name) "3 * hidden_shape", [3 * hidden_size], Rel.EQ, self.name)
self.placeholder_index.remove(4) self.placeholder_index.remove(4)
if seq_shape is not None: if seq_shape is not None:
raise ValueError(f"For '{self.name}', the dimension of 'seq_length' should be None, " raise ValueError(f"For '{self.name}', the dimension of 'seq_length' must be None, "
f"but got {seq_shape}.") f"but got {seq_shape}.")
validator.check_int(len(h_shape), 2, Rel.EQ, "init_h shape rank", self.name) validator.check_int(len(h_shape), 2, Rel.EQ, "init_h shape rank", self.name)
@ -7533,7 +7533,7 @@ class AvgPool3D(Primitive):
if isinstance(pad, int): if isinstance(pad, int):
pad = (pad,) * 6 pad = (pad,) * 6
if len(pad) != 6: if len(pad) != 6:
raise ValueError(f"For '{self.name}', attr 'pad' should be an positive int number or a tuple of " raise ValueError(f"For '{self.name}', attr 'pad' must be an positive int number or a tuple of "
f"six positive int numbers, but got {self.pad}.") f"six positive int numbers, but got {self.pad}.")
self.pad_list = pad self.pad_list = pad
self.add_prim_attr('pad_list', self.pad_list) self.add_prim_attr('pad_list', self.pad_list)
@ -7678,7 +7678,7 @@ class Conv3D(PrimitiveWithInfer):
if isinstance(pad, int): if isinstance(pad, int):
pad = (pad,) * 6 pad = (pad,) * 6
if len(pad) != 6: if len(pad) != 6:
raise ValueError(f"For '{self.name}', attr 'pad' should be an positive int number or a tuple of " raise ValueError(f"For '{self.name}', attr 'pad' must be an positive int number or a tuple of "
f"six positive int numbers, but got {self.pad}.") f"six positive int numbers, but got {self.pad}.")
validator.check_value_type('pad_mode', pad_mode, [str], self.name) validator.check_value_type('pad_mode', pad_mode, [str], self.name)
self.pad_mode = validator.check_string(pad_mode.lower(), ['valid', 'same', 'pad'], 'pad_mode', self.name) self.pad_mode = validator.check_string(pad_mode.lower(), ['valid', 'same', 'pad'], 'pad_mode', self.name)
@ -8279,7 +8279,7 @@ class Conv3DTranspose(PrimitiveWithInfer):
if isinstance(pad, int): if isinstance(pad, int):
pad = (pad,) * 6 pad = (pad,) * 6
if len(pad) != 6: if len(pad) != 6:
raise ValueError(f"For '{self.name}', attr 'pad' should be an positive int number or a tuple of " raise ValueError(f"For '{self.name}', attr 'pad' must be an positive int number or a tuple of "
f"six positive int numbers, but got {self.pad}.") f"six positive int numbers, but got {self.pad}.")
self.pad_list = pad self.pad_list = pad
validator.check_value_type('pad_mode', pad_mode, [str], self.name) validator.check_value_type('pad_mode', pad_mode, [str], self.name)
@ -9160,7 +9160,7 @@ class FractionalMaxPool3DWithFixedKsize(Primitive):
if isinstance(self.ksize, float): if isinstance(self.ksize, float):
self.ksize = (ksize, ksize, ksize) self.ksize = (ksize, ksize, ksize)
if len(self.ksize) != 3: if len(self.ksize) != 3:
raise ValueError(f"For '{self.name}', attr 'ksize' should be an positive float number or a tuple of " raise ValueError(f"For '{self.name}', attr 'ksize' must be an positive float number or a tuple of "
f"three float numbers, but got {len(self.ksize)} numbers.") f"three float numbers, but got {len(self.ksize)} numbers.")
for item in self.ksize: for item in self.ksize:
validator.check_positive_float(item, 'ksize item', self.name) validator.check_positive_float(item, 'ksize item', self.name)

View File

@ -494,7 +494,7 @@ class CheckBprop(PrimitiveWithInfer):
validator.check_value_type('grads', xshapes, (tuple,), tips) validator.check_value_type('grads', xshapes, (tuple,), tips)
validator.check_value_type('params', yshapes, (tuple,), tips) validator.check_value_type('params', yshapes, (tuple,), tips)
if not len(xshapes) == len(yshapes): if not len(xshapes) == len(yshapes):
raise ValueError(f"For {tips} the number of return values(gradients) should be equal to " raise ValueError(f"For {tips} the number of return values(gradients) must be equal to "
f"the number of input arguments except 'out' and 'dout', " f"the number of input arguments except 'out' and 'dout', "
f"which is:{len(yshapes)} but got {len(xshapes)}.") f"which is:{len(yshapes)} but got {len(xshapes)}.")
checking_range = len(yshapes) checking_range = len(yshapes)
@ -514,7 +514,7 @@ class CheckBprop(PrimitiveWithInfer):
validator.check_value_type('grads', xdtypes, (tuple,), tips) validator.check_value_type('grads', xdtypes, (tuple,), tips)
validator.check_value_type('params', ydtypes, (tuple,), tips) validator.check_value_type('params', ydtypes, (tuple,), tips)
if not len(xdtypes) == len(ydtypes): if not len(xdtypes) == len(ydtypes):
raise ValueError(f"For {tips}, the number of return values(gradients) should be equal to " raise ValueError(f"For {tips}, the number of return values(gradients) must be equal to "
f"the number of input arguments except 'out' and 'dout', " f"the number of input arguments except 'out' and 'dout', "
f"which is:{len(ydtypes)} but got {len(xdtypes)}.") f"which is:{len(ydtypes)} but got {len(xdtypes)}.")
checking_range = len(ydtypes) checking_range = len(ydtypes)

View File

@ -635,7 +635,7 @@ class RandomCategorical(PrimitiveWithInfer):
Validator.check_positive_int(num_samples_v, "num_samples", self.name) Validator.check_positive_int(num_samples_v, "num_samples", self.name)
x_shape = list(logits['shape']) x_shape = list(logits['shape'])
if len(x_shape) != 2: if len(x_shape) != 2:
raise ValueError(f"For '{self.name}', the shape of 'logits' should be 2-dimension, " raise ValueError(f"For '{self.name}', the shape of 'logits' must be 2-dimension, "
f"but got {len(x_shape)}.") f"but got {len(x_shape)}.")
ndim = len(x_shape) - 1 ndim = len(x_shape) - 1
x_shape[ndim] = num_samples_v x_shape[ndim] = num_samples_v

View File

@ -80,7 +80,7 @@ class SparseToDense(PrimitiveWithInfer):
raise ValueError(f"For '{self.name}', all elements in 'sparse_shape' must be " raise ValueError(f"For '{self.name}', all elements in 'sparse_shape' must be "
f"positive int number, but got 'sparse_shape': {sparse_shape_v}.") f"positive int number, but got 'sparse_shape': {sparse_shape_v}.")
if len(sparse_shape_v) != indices_shape[1]: if len(sparse_shape_v) != indices_shape[1]:
raise ValueError(f"For '{self.name}', the length of 'sparse_shape' should be equal to the second dimension " raise ValueError(f"For '{self.name}', the length of 'sparse_shape' must be equal to the second dimension "
f"length of 'indices', but got the second dimension length of 'indices': " f"length of 'indices', but got the second dimension length of 'indices': "
f"{indices_shape[1]}, length of 'sparse_shape': {len(sparse_shape_v)}.") f"{indices_shape[1]}, length of 'sparse_shape': {len(sparse_shape_v)}.")
out = {'shape': sparse_shape['value'], out = {'shape': sparse_shape['value'],
@ -172,7 +172,7 @@ class SparseTensorDenseMatmul(PrimitiveWithInfer):
f"positive int number, but got 'sparse_shape': {a_shape}.") f"positive int number, but got 'sparse_shape': {a_shape}.")
if len(a_shape) != 2 or len(b_shape) != 2: if len(a_shape) != 2 or len(b_shape) != 2:
raise ValueError(f"For '{self.name}', both the length of 'sparse_shape' and the tensor " raise ValueError(f"For '{self.name}', both the length of 'sparse_shape' and the tensor "
f"rank of 'dense' should be equal to 2, but got the length of " f"rank of 'dense' must be equal to 2, but got the length of "
f"'sparse_shape': {len(a_shape)}, " f"'sparse_shape': {len(a_shape)}, "
f"the tensor rank of 'dense': {len(b_shape)}.") f"the tensor rank of 'dense': {len(b_shape)}.")
if a_shape[1] != b_shape[0]: if a_shape[1] != b_shape[0]:

View File

@ -1204,7 +1204,7 @@ def test_tensor_slice_reduce_out_of_bounds_neg():
net = NetWork() net = NetWork()
with pytest.raises(IndexError) as ex: with pytest.raises(IndexError) as ex:
net(input_tensor) net(input_tensor)
assert "'begin[0]' should be in [-6, 6) when 'shrink_axis_mask' is greater than 0, " \ assert "'begin[0]' must be in [-6, 6) when 'shrink_axis_mask' is greater than 0, " \
"but got 'shrink_axis_mask': 7, 'strides[0]': 1, 'begin[0]': -7." in str(ex.value) "but got 'shrink_axis_mask': 7, 'strides[0]': 1, 'begin[0]': -7." in str(ex.value)
@ -1227,7 +1227,7 @@ def test_tensor_slice_reduce_out_of_bounds_positive():
net = NetWork() net = NetWork()
with pytest.raises(IndexError) as ex: with pytest.raises(IndexError) as ex:
net(input_tensor) net(input_tensor)
assert "'begin[0]' should be in [-6, 6) when 'shrink_axis_mask' is greater than 0, " \ assert "'begin[0]' must be in [-6, 6) when 'shrink_axis_mask' is greater than 0, " \
"but got 'shrink_axis_mask': 7, 'strides[0]': 1, 'begin[0]': 6." in str(ex.value) "but got 'shrink_axis_mask': 7, 'strides[0]': 1, 'begin[0]': 6." in str(ex.value)

View File

@ -49,7 +49,7 @@ def test_classification_accuracy_indexes_awareness():
@pytest.mark.parametrize('indexes', [0, [0., 2.], [0., 1], ['1', '0']]) @pytest.mark.parametrize('indexes', [0, [0., 2.], [0., 1], ['1', '0']])
def test_set_indexes(indexes): def test_set_indexes(indexes):
pat_str = "For 'set_indexes', the argument 'indexes' should be a list and all its elements should " \ pat_str = "For 'set_indexes', the argument 'indexes' must be a list and all its elements must " \
"be int, please check whether it is correct." "be int, please check whether it is correct."
with pytest.raises(ValueError, match=pat_str): with pytest.raises(ValueError, match=pat_str):
_ = Accuracy('classification').set_indexes(indexes) _ = Accuracy('classification').set_indexes(indexes)