modify error

This commit is contained in:
liuyang_655 2021-11-23 06:18:10 -05:00
parent 511441a27e
commit 12092c882f
22 changed files with 201 additions and 112 deletions

View File

@ -46,7 +46,8 @@ def _make_directory(path):
"""Make directory."""
real_path = None
if path is None or not isinstance(path, str) or path.strip() == "":
raise ValueError(f"Input path `{path}` is invalid type")
raise ValueError(f"For 'context.set_context', the 'save_graphs_path' or the 'print_file_path' is invalid "
f"type, it should be Non-empty string, but got '{path}'.")
# convert the relative paths
path = os.path.realpath(path)
@ -62,8 +63,8 @@ def _make_directory(path):
os.makedirs(path)
real_path = path
except PermissionError as e:
logger.critical(f"No write permission on the directory `{path}`, error = {e}")
raise ValueError(f"No write permission on the directory `{path}`.")
logger.critical(f"No write permission on the directory '{path}'', error = {e}")
raise ValueError(e.__str__() + f"\nNo write permission on the directory '{path}'.")
return real_path
@ -95,8 +96,8 @@ class _ThreadLocalInfo(threading.local):
def reserve_class_name_in_scope(self, reserve_class_name_in_scope):
"""Set whether to save the network class name in the scope."""
if not isinstance(reserve_class_name_in_scope, bool):
raise ValueError(
"Set reserve_class_name_in_scope value must be bool!")
raise ValueError("For '_ThreadLocalInfo', the type of the property 'reserve_class_name_in_scope' must "
"be bool, but got {}.".format(type(reserve_class_name_in_scope)))
self._reserve_class_name_in_scope = reserve_class_name_in_scope
@ -195,13 +196,15 @@ class _Context:
self.set_backend_policy("ge")
self._context_switches.push(False, None)
else:
raise ValueError(f'The execution mode {mode} is invalid!')
raise ValueError(f"For 'context.set_context', the argument 'mode' should be context.GRAPH_MODE (0) "
f"or context.PYNATIVE_MODE (1), but got {mode}.")
self.set_param(ms_ctx_param.mode, mode)
def set_backend_policy(self, policy):
success = self._context_handle.set_backend_policy(policy)
if not success:
raise RuntimeError("Backend policy must be one of ge, vm, ms.")
raise RuntimeError("Backend policy must be one of values in ['ge', 'vm', 'ms']. "
"But got {}.".format(policy))
def set_save_graphs_path(self, save_graphs_path):
self.set_param(ms_ctx_param.save_graphs_path, _make_directory(save_graphs_path))
@ -209,7 +212,8 @@ class _Context:
def set_device_target(self, target):
valid_targets = ["CPU", "GPU", "Ascend", "Davinci"]
if not target in valid_targets:
raise ValueError(f"Target device name {target} is invalid! It must be one of {valid_targets}")
raise ValueError(f"For 'context.set_context', the argument 'device_target' must be one of "
f"{valid_targets}, but got {target}.")
if target == "Davinci":
target = "Ascend"
self.set_param(ms_ctx_param.device_target, target)
@ -221,29 +225,37 @@ class _Context:
if tune_mode in candidate:
self.set_param(ms_ctx_param.tune_mode, tune_mode)
else:
raise ValueError(f"Tune mode must be in ['NO_TUNE', 'RL', 'GA', 'RL,GA', 'GA,RL'], but got {tune_mode}")
raise ValueError(f"For 'context.set_context', the argument 'auto_tune_mode' must be in "
f"['NO_TUNE', 'RL', 'GA', 'RL,GA', 'GA,RL'], but got {tune_mode}.")
def set_device_id(self, device_id):
if device_id < 0 or device_id > 4095:
raise ValueError(f"Device id must be in [0, 4095], but got {device_id}")
raise ValueError(f"For 'context.set_context', the argument 'device_id' must be in range [0, 4095], "
f"but got {device_id}.")
self.set_param(ms_ctx_param.device_id, device_id)
def set_max_call_depth(self, max_call_depth):
if max_call_depth <= 0:
raise ValueError(f"Max call depth must be greater than 0, but got {max_call_depth}")
raise ValueError(f"For 'context.set_context', the argument 'max_call_depth' must be greater than 0, "
f"but got {max_call_depth}.")
self.set_param(ms_ctx_param.max_call_depth, max_call_depth)
def set_profiling_options(self, option):
if not isinstance(option, str):
raise TypeError("The parameter option must be str.")
raise TypeError("For 'context.set_context', the argument 'profiling_option' must be string, "
"but got {}.".format(type(option)))
self.set_param(ms_ctx_param.profiling_options, option)
def set_variable_memory_max_size(self, variable_memory_max_size):
"""set values of variable_memory_max_size and graph_memory_max_size"""
if not Validator.check_str_by_regular(variable_memory_max_size, _re_pattern):
raise ValueError("Context param variable_memory_max_size should be in correct format! Such as \"5GB\"")
raise ValueError("For 'context.set_context', the argument 'variable_memory_max_size' should be in correct"
" format! It must be a string ending with 'GB', in addition to that, it must contain "
"only numbers or decimal points, such as \"5GB\" or \"3.5GB\", but got {}."
.format(variable_memory_max_size))
if int(variable_memory_max_size[:-2]) > _DEVICE_APP_MEMORY_SIZE:
raise ValueError("Context param variable_memory_max_size should be not greater than 31GB.")
raise ValueError("For 'context.set_context', the argument 'variable_memory_max_size' should not be "
"greater than 31GB, but got {}.".format(variable_memory_max_size))
variable_memory_max_size_ = variable_memory_max_size[:-2] + " * 1024 * 1024 * 1024"
graph_memory_max_size = _DEVICE_APP_MEMORY_SIZE - int(variable_memory_max_size[:-2])
graph_memory_max_size_ = str(graph_memory_max_size) + " * 1024 * 1024 * 1024"
@ -252,17 +264,21 @@ class _Context:
def set_max_device_memory(self, max_device_memory):
if not Validator.check_str_by_regular(max_device_memory, _re_pattern):
raise ValueError("Context param max_device_memory should be in correct format! Such as \"3.5GB\"")
raise ValueError("For 'context.set_context', the argument 'max_device_memory' should be in correct "
" format! It must be a string ending with 'GB', in addition to that, it must contain "
"only numbers or decimal points, such as \"5GB\" or \"3.5GB\", but got {}."
.format(max_device_memory))
max_device_memory_value = float(max_device_memory[:-2])
if max_device_memory_value == 0:
raise ValueError("Context param max_device_memory should be in correct format! Such as \"3.5GB\"")
raise ValueError("For 'context.set_context', the argument 'max_device_memory' should not be \"0GB\".")
self.set_param(ms_ctx_param.max_device_memory, max_device_memory_value)
def set_print_file_path(self, file_path):
"""Add timestamp suffix to file name. Sets print file path."""
print_file_path = os.path.realpath(file_path)
if os.path.isdir(print_file_path):
raise IOError("Print_file_path should be file path, but got {}.".format(file_path))
raise IOError("For 'context.set_context', the argument 'print_file_path' should be file path, "
"but got directory {}.".format(file_path))
if os.path.exists(print_file_path):
_path, _file_name = os.path.split(print_file_path)
@ -280,13 +296,15 @@ class _Context:
"with '-D on' and recompile source.")
env_config_path = os.path.realpath(env_config_path)
if not os.path.isfile(env_config_path):
raise ValueError("The %r set by 'env_config_path' should be an existing json file." % env_config_path)
raise ValueError("For 'context.set_context', the 'env_config_path' file %r is not exists, "
"please check whether 'env_config_path' is correct." % env_config_path)
try:
with open(env_config_path, 'r') as f:
json.load(f)
except (TypeError, ValueError) as exo:
raise ValueError("The %r set by 'env_config_path' should be a json file. "
"Detail: %s." % (env_config_path, str(exo)))
raise ValueError(str(exo) + "\nFor 'context.set_context', open or load the 'env_config_path' file {} "
"failed, please check whether 'env_config_path' is json file and correct, or may not "
"have permission to read it.".format(env_config_path))
self.set_param(ms_ctx_param.env_config_path, env_config_path)
setters = {
@ -813,7 +831,8 @@ def set_context(**kwargs):
if key in ms_ctx_param.__members__ and key[0] != '_':
ctx.set_param(ms_ctx_param.__members__[key], value)
continue
raise ValueError("Set context keyword %s is not recognized!" % key)
raise ValueError(f"For 'context.set_context', the keyword argument {key} is not recognized! For detailed "
f"usage of 'set_context', please refer to the Mindspore official website.")
def get_context(attr_key):
@ -841,7 +860,8 @@ def get_context(attr_key):
# enum variables beginning with '_' are for internal use
if attr_key in ms_ctx_param.__members__ and attr_key[0] != '_':
return ctx.get_param(ms_ctx_param.__members__[attr_key])
raise ValueError("Get context keyword %s is not recognized!" % attr_key)
raise ValueError(f"For 'context.get_context', the argument {attr_key} is not recognized! For detailed "
f"usage of 'get_context', please refer to the Mindspore official website.")
def _get_mode():

View File

@ -187,7 +187,7 @@ class Cell(Cell_):
value (bool): Specifies whether to enable bprop debug. Default: False.
"""
if not isinstance(value, bool):
raise TypeError("The 'bprop debug' value must be a bool type.")
raise TypeError(f"For 'Cell', the property 'bprop_debug' must be bool type, but got type {type(value)}.")
self._bprop_debug = value
def update_cell_prefix(self):
@ -215,7 +215,8 @@ class Cell(Cell_):
@cell_init_args.setter
def cell_init_args(self, value):
if not isinstance(value, str):
raise TypeError("The 'cell_init_args' must be a string type.")
raise TypeError(f"For 'Cell', the property 'cell_init_args' must be bool type, "
f"but got type {type(value)}.")
self._cell_init_args = value
@property
@ -225,7 +226,7 @@ class Cell(Cell_):
@phase.setter
def phase(self, value):
if not isinstance(value, str):
raise TypeError("The 'phase' must be a string type.")
raise TypeError(f"For 'Cell', the property 'phase' must be string type, but got type {type(value)}.")
self._phase = value
@property
@ -243,7 +244,8 @@ class Cell(Cell_):
@parameter_layout_dict.setter
def parameter_layout_dict(self, value):
if not isinstance(value, dict):
raise TypeError("The 'parameter_layout_dict' must be a dict type.")
raise TypeError("The type of parameter 'value' must be a dict type, "
"but got the type : {}.".format(type(value)))
self._parameter_layout_dict = value
@property
@ -253,7 +255,8 @@ class Cell(Cell_):
@parallel_parameter_name_list.setter
def parallel_parameter_name_list(self, value):
if not isinstance(value, list):
raise TypeError("The 'parallel_parameter_name_list' must be a list type.")
raise TypeError("The type of parameter 'parallel_parameter_name_list' must be a list type, "
"but got the type : {}.".format(type(value)))
self._parallel_parameter_name_list = value
@property
@ -262,13 +265,13 @@ class Cell(Cell_):
@pipeline_stage.setter
def pipeline_stage(self, value):
if isinstance(value, bool):
raise TypeError("'pipeline_stage' must be an int type, but got bool.")
if not isinstance(value, int):
raise TypeError("'pipeline_stage' must be an int type, but got {}".format(value))
if not isinstance(value, int) or isinstance(value, bool):
raise TypeError("The parameter 'pipeline_stage' must be an int type, "
"but got the type : {}.".format(type(value)))
if value < 0:
raise TypeError("'pipeline_stage' can not be less than 0 but got {}".format(value))
raise TypeError("The parameter 'pipeline_stage' can not be less than 0, "
"but got the value : {}".format(value))
self._pipeline_stage = value
for item in self.trainable_params():
item.add_pipeline_stage(value)
@ -280,7 +283,8 @@ class Cell(Cell_):
@parallel_parameter_merge_net_dict.setter
def parallel_parameter_merge_net_dict(self, value):
if not isinstance(value, dict):
raise TypeError("The 'parallel_parameter_merge_net_dict' must be a dict type.")
raise TypeError("The parameter 'parallel_parameter_merge_net_dict' must be a dict type, "
"but got the type : {}".format(type(value)))
self._parallel_parameter_merge_net_dict = value
def get_func_graph_proto(self):

View File

@ -117,6 +117,7 @@ class Accuracy(EvaluationBase):
RuntimeError: If the sample size is 0.
"""
if self._total_num == 0:
raise RuntimeError("The 'Accuracy' can not be calculated, because the number of samples is 0, please "
"check whether your inputs(predicted value, true value) are correct.")
raise RuntimeError("The 'Accuracy' can not be calculated, because the number of samples is 0, "
"please check whether your inputs(predicted value, true value) are empty, "
"or has called update method before calling eval method.")
return self._correct_num / self._total_num

View File

@ -259,7 +259,8 @@ class ConfusionMatrixMetric(Metric):
if self.calculation_method is True:
if self._class_num == 0:
raise RuntimeError("The 'ConfusionMatrixMetric' can not be calculated, because the number of samples "
"is 0, please check whether your inputs(predicted value, true value) are correct.")
"is 0, please check whether your inputs(predicted value, true value) are empty, or "
"has called update method before calling eval method.")
return self._total_num / self._class_num
@ -589,7 +590,8 @@ def _compute_confusion_matrix_metric(metric_name, confusion_matrix):
if input_dim == 1:
confusion_matrix = np.expand_dims(confusion_matrix, 0)
if confusion_matrix.shape[-1] != 4:
raise ValueError("The size of the last dimension of confusion_matrix should be 4.")
raise ValueError(f"The size of the last dimension of confusion_matrix should be 4, "
f"but got {confusion_matrix.shape[-1]}.")
tp = confusion_matrix[..., 0]
fp = confusion_matrix[..., 1]

View File

@ -106,6 +106,7 @@ class Dice(Metric):
"""
if self._samples_num == 0:
raise RuntimeError("The 'Dice coefficient' can not be calculated, because the number of samples is 0, "
"please check whether your inputs(predicted value, true value) are correct.")
"please check whether your inputs(predicted value, true value) are empty, or has "
"called update method before calling eval method.")
return self._dice_coeff_sum / float(self._samples_num)

View File

@ -87,7 +87,8 @@ class MAE(Metric):
"""
if self._samples_num == 0:
raise RuntimeError("The 'MAE' can not be calculated, because the number of samples is 0, "
"please check whether your inputs(predicted value, true value) are correct.")
"please check whether your inputs(predicted value, true value) are empty, "
"or has called update method before calling eval method.")
return self._abs_error_sum / self._samples_num
@ -157,5 +158,6 @@ class MSE(Metric):
"""
if self._samples_num == 0:
raise RuntimeError("The 'MSE' can not be calculated, because the number of samples is 0, "
"please check whether your inputs(predicted value, true value) are correct.")
"please check whether your inputs(predicted value, true value) are empty, "
"or has called update method before calling eval method.")
return self._squared_error_sum / self._samples_num

View File

@ -117,7 +117,8 @@ class Fbeta(Metric):
validator.check_value_type("average", average, [bool], self.__class__.__name__)
if self._class_num == 0:
raise RuntimeError("The 'Fbeta' can not be calculated, because the number of samples is 0, "
"please check whether your inputs(predicted value, true value) are correct.")
"please check whether your inputs(predicted value, true value) are empty, "
"or has called update method before calling eval method.")
fbeta = (1.0 + self.beta ** 2) * self._true_positives / \
(self.beta ** 2 * self._actual_positives + self._positives + self.eps)

View File

@ -44,8 +44,8 @@ class _ROISpatialData(metaclass=ABCMeta):
self.roi_end = np.maximum(self.roi_start + roi_size, self.roi_start)
else:
if roi_start is None or roi_end is None:
raise ValueError("Please provide the center coordinates, size or start coordinates and end coordinates"
" of ROI.")
raise ValueError("For '_ROISpatialData', When either 'roi_center' or 'roi_size' is None, "
"neither 'roi_start' nor 'roi_end' can be None.")
self.roi_start = np.maximum(np.asarray(roi_start, dtype=np.int16), 0)
self.roi_end = np.maximum(np.asarray(roi_end, dtype=np.int16), self.roi_start)
@ -200,7 +200,8 @@ class HausdorffDistance(Metric):
if 0 <= self.percentile <= 100:
return np.percentile(surface_distance, self.percentile)
raise ValueError(f"The percentile value should be between 0 and 100, but got {self.percentile}.")
raise ValueError(f"For 'HausdorffDistance', the value of the argument 'percentile' should be [0, 100], "
f"but got {self.percentile}.")
def _get_surface_distance(self, y_pred_edges, y_edges):
"""
@ -276,20 +277,26 @@ class HausdorffDistance(Metric):
self._is_update = True
if len(inputs) != 3:
raise ValueError('The HausdorffDistance needs 3 inputs (y_pred, y, label), but got {}'.format(len(inputs)))
raise ValueError("For 'HausdorffDistance.update', it needs 3 inputs (predicted value, true value, "
"label index), but got {}.".format(len(inputs)))
y_pred = self._convert_data(inputs[0])
y = self._convert_data(inputs[1])
label_idx = inputs[2]
if not isinstance(label_idx, (int, float)):
raise TypeError("The data type of label_idx must be int or float, but got {}.".format(type(label_idx)))
raise ValueError(f"For 'HausdorffDistance.update', the label index (input[2]) must be int or float, "
f"but got {type(label_idx)}.")
if label_idx not in y_pred and label_idx not in y:
raise ValueError("The label_idx should be in y_pred or y, but {} is not.".format(label_idx))
raise ValueError("For 'HausdorffDistance.update', the label index (input[2]) should be in predicted "
"value (input[0]) or true value (input[1]), but {} is not.".format(label_idx))
if y_pred.size == 0 or y_pred.shape != y.shape:
raise ValueError("Labelfields should have the same shape, but got {}, {}".format(y_pred.shape, y.shape))
raise ValueError(f"For 'HausdorffDistance.update', the size of predicted value (input[0]) and true value "
f"(input[1]) should be greater than 0, in addition to that, predicted value and true "
f"value should have the same shape, but got predicted value size: {y_pred.size}, shape: "
f"{y_pred.shape}, true value size: {y.size}, shape: {y.shape}.")
y_pred = (y_pred == label_idx) if y_pred.dtype is not bool else y_pred
y = (y == label_idx) if y.dtype is not bool else y
@ -307,7 +314,7 @@ class HausdorffDistance(Metric):
RuntimeError: If the update method is not called first, an error will be reported.
"""
if self._is_update is False:
raise RuntimeError('Call the update method before calling eval.')
raise RuntimeError('Please call the update method before calling eval method.')
hd = self._calculate_percent_hausdorff_distance(self.y_pred_edges, self.y_edges)
if self.directed:

View File

@ -57,7 +57,7 @@ class Loss(Metric):
ValueError: If the dimension of loss is not 1 or 0.
"""
if len(inputs) != 1:
raise ValueError('The length of inputs must be 1, but got {}'.format(len(inputs)))
raise ValueError("For 'Loss.update', it needs 1 input (loss), but got {}".format(len(inputs)))
loss = self._convert_data(inputs[0])
@ -65,7 +65,8 @@ class Loss(Metric):
loss = loss.reshape(1)
if loss.ndim != 1:
raise ValueError("The dimension of loss must be 1, but got {}".format(loss.ndim))
raise ValueError("For 'Loss.update', the dimension of your input (loss) must be 1, "
"but got {}.".format(loss.ndim))
loss = loss.mean(-1)
self._sum_loss += loss
@ -82,5 +83,6 @@ class Loss(Metric):
RuntimeError: If the total number is 0.
"""
if self._total_num == 0:
raise RuntimeError('The total number can not be 0.')
raise RuntimeError("The 'Loss' can not be calculated, because the number of samples is 0, please "
"check whether has called update method before calling eval method.")
return self._sum_loss / self._total_num

View File

@ -126,19 +126,25 @@ class MeanSurfaceDistance(Metric):
ValueError: If y_pred and y have different shapes.
"""
if len(inputs) != 3:
raise ValueError('MeanSurfaceDistance need 3 inputs (y_pred, y, label), but got {}.'.format(len(inputs)))
raise ValueError("For 'MeanSurfaceDistance.update', it needs 3 inputs (predicted value, true value, "
"label index), but got {}".format(len(inputs)))
y_pred = self._convert_data(inputs[0])
y = self._convert_data(inputs[1])
label_idx = inputs[2]
if not isinstance(label_idx, (int, float)):
raise TypeError("The data type of label_idx must be int or float, but got {}.".format(type(label_idx)))
raise ValueError(f"For 'MeanSurfaceDistance.update', the label index (input[2]) must be int or float, "
f"but got {type(label_idx)}.")
if label_idx not in y_pred and label_idx not in y:
raise ValueError("The label_idx should be in y_pred or y, but {} is not.".format(label_idx))
raise ValueError("For 'MeanSurfaceDistance.update', the label index (input[2]) should be in predicted "
"value (input[0]) or true value (input[1]), but {} is not.".format(label_idx))
if y_pred.size == 0 or y_pred.shape != y.shape:
raise ValueError("y_pred and y should have same shape, but got {}, {}.".format(y_pred.shape, y.shape))
raise ValueError(f"For 'MeanSurfaceDistance.update', the size of predicted value (input[0]) and true "
f"value (input[1]) should be greater than 0, in addition to that, predicted value and "
f"true value should have the same shape, but got predicted value size: {y_pred.size}, "
f"shape: {y_pred.shape}, true value size: {y.size}, shape: {y.shape}.")
if y_pred.dtype != bool:
y_pred = y_pred == label_idx
@ -160,7 +166,7 @@ class MeanSurfaceDistance(Metric):
RuntimeError: If the update method is not called first, an error will be reported.
"""
if self._is_update is False:
raise RuntimeError('Call the update method before calling eval.')
raise RuntimeError('Please call the update method before calling eval method.')
mean_surface_distance = self._get_surface_distance(self._y_pred_edges, self._y_edges)

View File

@ -94,7 +94,7 @@ class Metric(metaclass=ABCMeta):
elif isinstance(data, np.ndarray):
pass
else:
raise TypeError('The input data type must be a tensor, list or numpy.ndarray')
raise TypeError(f'The Input data type must be tensor, list or numpy.ndarray, but got {type(data)}.')
return data
def _check_onehot_data(self, data):
@ -181,7 +181,8 @@ class Metric(metaclass=ABCMeta):
0.3333333333333333
"""
if not isinstance(indexes, list) or not all(isinstance(i, int) for i in indexes):
raise ValueError("The indexes should be a list and all its elements should be int")
raise ValueError("For 'set_indexes', the argument 'indexes' should be a list and all its elements should "
"be int, please check whether it is correct.")
self._indexes = indexes
return self
@ -249,7 +250,7 @@ class EvaluationBase(Metric):
def __init__(self, eval_type):
super(EvaluationBase, self).__init__()
if eval_type not in _eval_types:
raise TypeError('Type must be in {}, but got {}'.format(_eval_types, eval_type))
raise TypeError("The argument 'eval_type' must be in {}, but got {}".format(_eval_types, eval_type))
self._type = eval_type
def _check_shape(self, y_pred, y):
@ -262,18 +263,23 @@ class EvaluationBase(Metric):
"""
if self._type == 'classification':
if y_pred.ndim != y.ndim + 1:
raise ValueError('Classification case, dims of y_pred equal dims of y add 1, '
'but got y_pred: {} dims and y: {} dims'.format(y_pred.ndim, y.ndim))
raise ValueError("In classification case, the dimension of y_pred (predicted value) should equal to "
"the dimension of y (true value) add 1, but got y_pred dimension: {} and y "
"dimension: {}.".format(y_pred.ndim, y.ndim))
if y.shape != (y_pred.shape[0],) + y_pred.shape[2:]:
raise ValueError('Classification case, y_pred shape and y shape can not match. '
'got y_pred shape is {} and y shape is {}'.format(y_pred.shape, y.shape))
raise ValueError("In classification case, y_pred (predicted value) shape and y (true value) shape "
"can not match, y shape should be equal to y_pred shape that the value at index 1 "
"is deleted. Such as y_pred shape (1, 2, 3), then y shape should be (1, 3). "
"But got y_pred shape {} and y shape {}".format(y_pred.shape, y.shape))
else:
if y_pred.ndim != y.ndim:
raise ValueError('{} case, dims of y_pred must be equal to dims of y, but got y_pred: {} '
'dims and y: {} dims.'.format(self._type, y_pred.ndim, y.ndim))
raise ValueError("In {} case, the dimension of y_pred (predicted value) should equal to the dimension"
" of y (true value), but got y_pred dimension: {} and y dimension: {}."
.format(self._type, y_pred.ndim, y.ndim))
if y_pred.shape != y.shape:
raise ValueError('{} case, y_pred shape must be equal to y shape, but got y_pred: {} and y: {}'.
format(self._type, y_pred.shape, y.shape))
raise ValueError("In {} case, the shape of y_pred (predicted value) should equal to the shape of y "
"(true value), but got y_pred shape: {} and y shape: {}."
.format(self._type, y_pred.shape, y.shape))
def _check_value(self, y_pred, y):
"""
@ -284,7 +290,8 @@ class EvaluationBase(Metric):
y (Tensor): Target array.
"""
if self._type != 'classification' and not (np.equal(y_pred ** 2, y_pred).all() and np.equal(y ** 2, y).all()):
raise ValueError('For multilabel case, input value must be 1 or 0.')
raise ValueError("In multilabel case, all elements in y_pred (predicted value) and y (true value) should "
"be 0 or 1.Please check whether your inputs y_pred and y are correct.")
def clear(self):
"""

View File

@ -139,8 +139,8 @@ class OcclusionSensitivity(Metric):
RuntimeError: If the number of labels is different from the number of batches.
"""
if len(inputs) != 3:
raise ValueError('The occlusion_sensitivity needs 3 inputs (model, y_pred, y), '
'but got {}'.format(len(inputs)))
raise ValueError("For 'OcclusionSensitivity.update', it needs 3 inputs (classification model, "
"predicted value, label), but got {}.".format(len(inputs)))
model = inputs[0]
y_pred = self._convert_data(inputs[1])
@ -148,7 +148,8 @@ class OcclusionSensitivity(Metric):
model = validator.check_value_type("model", model, [nn.Cell])
if y_pred.shape[0] > 1:
raise RuntimeError("Expected batch size of 1.")
raise RuntimeError(f"For 'OcclusionSensitivity.update', the shape at index 0 of the predicted value "
f"(input[1]) should be 1, but got {y_pred.shape[0]}.")
if isinstance(label, int):
label = np.array([[label]], dtype=int)
@ -204,7 +205,7 @@ class OcclusionSensitivity(Metric):
"""
if not self._is_update:
raise RuntimeError('Call the update method before calling eval.')
raise RuntimeError('Please call the update method before calling eval method.')
sensitivity = self._baseline - np.squeeze(self._sensitivity_im)

View File

@ -81,14 +81,16 @@ class Perplexity(Metric):
RuntimeError: If label shape is not equal to pred shape.
"""
if len(inputs) != 2:
raise ValueError('The perplexity needs 2 inputs (preds, labels), but got {}.'.format(len(inputs)))
raise ValueError("For 'Perplexity.update', it needs 2 inputs (predicted value, label), but got {}."
.format(len(inputs)))
preds = [self._convert_data(inputs[0])]
labels = [self._convert_data(inputs[1])]
if len(preds) != len(labels):
raise RuntimeError('The preds and labels should have the same length, but the length of preds is{}, '
'the length of labels is {}.'.format(len(preds), len(labels)))
raise RuntimeError("For 'Perplexity.update', predicted value (input[0]) and label (input[1]) should have "
"the same length, but got predicted value length {}, label length {}."
.format(len(preds), len(labels)))
loss = 0.
num = 0
@ -121,6 +123,7 @@ class Perplexity(Metric):
RuntimeError: If the sample size is 0.
"""
if self._num_inst == 0:
raise RuntimeError('The perplexity can not be calculated, because the number of samples is 0.')
raise RuntimeError("The 'Perplexity' can not be calculated, because the number of samples is 0, please "
"check whether has called update method before calling eval method.")
return math.exp(self._sum_metric / self._num_inst)

View File

@ -90,7 +90,8 @@ class Precision(EvaluationBase):
ValueError: If the number of inputs is not 2.
"""
if len(inputs) != 2:
raise ValueError('The precision needs 2 inputs (y_pred, y), but got {}'.format(len(inputs)))
raise ValueError("For 'Precision.update', it needs 2 inputs (predicted value, true value), "
"but got {}.".format(len(inputs)))
y_pred = self._convert_data(inputs[0])
y = self._convert_data(inputs[1])
if self._type == 'classification' and y_pred.ndim == y.ndim and self._check_onehot_data(y):
@ -101,8 +102,9 @@ class Precision(EvaluationBase):
if self._class_num == 0:
self._class_num = y_pred.shape[1]
elif y_pred.shape[1] != self._class_num:
raise ValueError('Class number not match, last input data contain {} classes, but current data contain {} '
'classes'.format(self._class_num, y_pred.shape[1]))
raise ValueError("Class number not match, last input predicted data contain {} classes, but current "
"predicted data contain {} classes, please check your predicted value(inputs[0])"
.format(self._class_num, y_pred.shape[1]))
class_num = self._class_num
if self._type == "classification":
@ -140,7 +142,9 @@ class Precision(EvaluationBase):
numpy.float64, the computed result.
"""
if self._class_num == 0:
raise RuntimeError('The input number of samples can not be 0.')
raise RuntimeError("The 'Precision' can not be calculated, because the number of samples is 0, "
"please check whether your inputs (predicted value, true value) are empty, or "
"has called update method before calling eval method.")
validator.check_value_type("average", average, [bool], self.__class__.__name__)
result = self._true_positives / (self._positives + self.eps)

View File

@ -91,7 +91,8 @@ class Recall(EvaluationBase):
ValueError: If the number of inputs is not 2.
"""
if len(inputs) != 2:
raise ValueError('The recall needs 2 inputs (y_pred, y), but got {}'.format(len(inputs)))
raise ValueError("For 'Recall.update', it needs 2 inputs (predicted value, true value), "
"but got {}.".format(len(inputs)))
y_pred = self._convert_data(inputs[0])
y = self._convert_data(inputs[1])
if self._type == 'classification' and y_pred.ndim == y.ndim and self._check_onehot_data(y):
@ -102,8 +103,9 @@ class Recall(EvaluationBase):
if self._class_num == 0:
self._class_num = y_pred.shape[1]
elif y_pred.shape[1] != self._class_num:
raise ValueError('The class number does not match, the last input data contains {} classes, '
'but the current data contains {} classes'.format(self._class_num, y_pred.shape[1]))
raise ValueError("Class number not match, last input predicted data contain {} classes, but current "
"predicted data contain {} classes, please check your predicted value(inputs[0])."
.format(self._class_num, y_pred.shape[1]))
class_num = self._class_num
if self._type == "classification":
@ -140,7 +142,9 @@ class Recall(EvaluationBase):
numpy.float64, the computed result.
"""
if self._class_num == 0:
raise RuntimeError('The input number of samples can not be 0.')
raise RuntimeError("The 'Recall' can not be calculated, because the number of samples is 0, please check "
"whether your inputs (predicted value, true value) are empty, or has called update "
"method before calling eval method.")
validator.check_value_type("average", average, [bool], self.__class__.__name__)
result = self._true_positives / (self._actual_positives + self.eps)

View File

@ -206,7 +206,7 @@ class ROC(Metric):
"""
if self._is_update is False:
raise RuntimeError('Call the update method before calling eval.')
raise RuntimeError('Please call the update method before calling eval method.')
y_pred = np.squeeze(np.vstack(self.y_pred))
y = np.squeeze(np.vstack(self.y))

View File

@ -238,10 +238,12 @@ class FTRL(Optimizer):
optimizer operation.
"""
if not isinstance(value, str):
raise TypeError("The value must be str type, but got value type is {}".format(type(value)))
raise TypeError("For 'FTRL', the property 'target' must be string type, "
"but got type {}.".format(type(value)))
if value not in ('CPU', 'Ascend', 'GPU'):
raise ValueError("The value must be 'CPU', 'Ascend' or 'GPU', but got value {}".format(value))
raise ValueError("For 'FTRL', the property 'target' must be 'CPU', 'Ascend' or 'GPU', "
"but got {}".format(value))
if value == 'CPU':
self.sparse_opt = P.FusedSparseFtrl(self.lr, self.l1, self.l2, self.lr_power, self.use_locking)

View File

@ -132,7 +132,8 @@ class Optimizer(Cell):
super(Optimizer, self).__init__(auto_prefix=False)
parameters = self._parameters_base_check(parameters, "parameters")
if not all(isinstance(x, Parameter) for x in parameters) and not all(isinstance(x, dict) for x in parameters):
raise TypeError("All elements of the optimizer parameters must be of type `Parameter` or `dict`.")
raise TypeError("For 'Optimizer', all elements of the argument 'parameters' must be "
"'Parameter' or 'dict'.")
if isinstance(loss_scale, int):
loss_scale = float(loss_scale)
@ -213,7 +214,8 @@ class Optimizer(Cell):
self.use_parallel = True
elif _get_parallel_mode() == ParallelMode.DATA_PARALLEL \
and context.get_context("device_target") != "Ascend":
raise RuntimeError("Parallel optimizer only supports Ascend in data parallel mode.")
raise RuntimeError("Parallel optimizer only supports 'Ascend' in data parallel mode, "
"but got {}.".format(context.get_context("device_target")))
elif _get_parallel_mode() in (ParallelMode.STAND_ALONE, ParallelMode.HYBRID_PARALLEL):
raise RuntimeError("Parallel optimizer is not supported in {}.".format(_get_parallel_mode()))
else:
@ -222,7 +224,8 @@ class Optimizer(Cell):
self.use_parallel = False
if self.use_parallel:
if self.cls_name not in ["Lamb", "AdamWeightDecay", "AdaFactor"]:
raise RuntimeError("Parallel optimizer does not support optimizer {}".format(self.cls_name))
raise RuntimeError("Parallel optimizer only support optimizer 'Lamb', 'AdamWeightDecay' or "
"'AdaFactor', but got {}.".format(self.cls_name))
self.dev_num = _get_device_num()
if self.dev_num > self.param_length:
raise RuntimeError("Parallel optimizer can not be applied when the number of parameters {} is"
@ -249,7 +252,8 @@ class Optimizer(Cell):
def unique(self, value):
"""Set the `unique` attribute."""
if not isinstance(value, bool):
raise TypeError("The value type must be bool, but got value type is {}".format(type(value)))
raise TypeError("For 'Optimizer', the property 'unique' must be bool, "
"but got {}".format(type(value)))
self._unique = value
@property
@ -274,19 +278,23 @@ class Optimizer(Cell):
optimizer operation.
"""
if not isinstance(value, str):
raise TypeError("The value must be str type, but got value type is {}".format(type(value)))
raise TypeError("For 'Optimizer', the property 'target' must be string, but got {}".format(type(value)))
if value not in ('CPU', 'Ascend', 'GPU'):
raise ValueError("The value must be 'CPU', 'Ascend' or 'GPU', but got value {}".format(value))
raise ValueError("For 'Optimizer', the property 'target' must be one of ['CPU', 'Ascend' ,'GPU'], "
"but got {}".format(value))
if self._target == "CPU" and value in ('Ascend', 'GPU'):
raise ValueError("In the CPU environment, target cannot be set to 'GPU' or 'Ascend'.")
raise ValueError("For 'Optimizer', the property 'target' cannot be set to 'GPU' or 'Ascend' "
"in the 'CPU' environment.")
if self._target == "Ascend" and value == 'GPU':
raise ValueError("In the Ascend environment, target cannot be set to 'GPU'.")
raise ValueError("For 'Optimizer', the property 'target' cannot be set to 'GPU' "
"in the 'Ascend' environment.")
if self._target == "GPU" and value == 'Ascend':
raise ValueError("In the GPU environment, target cannot be set to 'Ascend'.")
raise ValueError("For 'Optimizer', the property 'target' cannot be set to 'Ascend' "
"in the 'GPU' environment.")
self._is_device = (value != 'CPU')
self._target = value
@ -515,8 +523,9 @@ class Optimizer(Cell):
for param in group_param['params']:
validator.check_value_type("parameter", param, [Parameter], self.cls_name)
if param.name in params_store:
raise RuntimeError(f"The {param.name} parameter already exists in parameter groups, "
f"duplicate parameters are not supported.")
raise RuntimeError(f"The {param.name} parameter already exists, it does not support "
f"repeated setting. Please check whether the optimizer parameter "
f"has been set multiple times.")
params_store.append(param.name)
self.group_lr.append(lr)
@ -788,7 +797,8 @@ class _ConvertToCell(LearningRateSchedule):
def __init__(self, learning_rate):
super(_ConvertToCell, self).__init__()
if not isinstance(learning_rate, Parameter):
raise TypeError('Learning rate must be Parameter.')
raise TypeError("For '_ConvertToCell', the argument 'learning_rate' must be Parameter, "
"but got {}.".format(type(learning_rate)))
self.learning_rate = learning_rate
def construct(self, global_step):
@ -801,10 +811,11 @@ class _IteratorLearningRate(LearningRateSchedule):
super(_IteratorLearningRate, self).__init__()
if isinstance(learning_rate, Tensor):
if learning_rate.ndim != 1:
raise ValueError("The dim of `Tensor` type dynamic learning rate should be 1, "
f"but got {learning_rate.ndim}.")
raise ValueError(f"For '_IteratorLearningRate', the dimension of the argument 'learning_rate' should "
f"be 1, but got {learning_rate.ndim}.")
else:
raise TypeError("Learning rate should be Tensor.")
raise TypeError("For '_IteratorLearningRate', the argument 'learning_rate' should be Tensor, "
"but got {}.".format(type(learning_rate)))
self.learning_rate = Parameter(learning_rate, name)
self.gather = P.Gather()

View File

@ -207,10 +207,12 @@ class ProximalAdagrad(Optimizer):
optimizer operation.
"""
if not isinstance(value, str):
raise TypeError("The value must be str type, but got value type is {}".format(type(value)))
raise TypeError("For 'ProximalAdagrad', the property 'target' must be string type, "
"but got {}".format(type(value)))
if value not in ('CPU', 'Ascend', 'GPU'):
raise ValueError("The value must be 'CPU', 'Ascend' or 'GPU', but got value {}".format(value))
raise ValueError("For 'ProximalAdagrad', the property 'target' must be 'CPU', 'Ascend' or 'GPU', "
"but got {}.".format(value))
if value == 'CPU':
self.sparse_opt = P.FusedSparseProximalAdagrad(self.use_locking).add_prim_attr("primitive_target", "CPU")

View File

@ -107,10 +107,12 @@ def _check_param(momentum, frequency, lr, cls_name):
"""Check param."""
Validator.check_value_type("momentum", momentum, [float], cls_name)
if isinstance(momentum, float) and momentum < 0.0:
raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum))
raise ValueError("For 'thor', the argument 'momentum' should be at least 0.0, "
"but got 'momentum' {}.".format(momentum))
Validator.check_value_type("frequency", frequency, [int], cls_name)
if isinstance(frequency, int) and frequency < 2:
raise ValueError("frequency should be at least 2, but got frequency {}".format(frequency))
raise ValueError("For 'thor', the argument 'frequency' should be at least 2, "
"but got 'frequency' {}.".format(frequency))
Validator.check_value_type("learning rate", lr, [Tensor], cls_name)

View File

@ -1428,9 +1428,11 @@ def load_distributed_checkpoint(network, checkpoint_filenames, predict_strategy=
for dim in train_strategy[list(train_strategy.keys())[0]][0]:
train_dev_count *= dim
if train_dev_count != ckpt_file_len:
raise ValueError(f"For 'Load_distributed_checkpoint', the length of 'checkpoint_filenames' should be "
f"equal to the device count of training process. But the length of 'checkpoint_filenames'"
f" is {ckpt_file_len} and the device count is {train_dev_count}.")
raise ValueError(f"For 'load_distributed_checkpoint', the argument 'predict_strategy' is dict, "
f"the key of it must be string, and the value of it must be list or tuple that "
f"the first four elements are dev_matrix (list[int]), tensor_map (list[int]), "
f"param_split_shape (list[int]) and field_size (int, which value is 0)."
f"Please check whether 'predict_strategy' is correct.")
rank_list = _infer_rank_list(train_strategy, predict_strategy)
param_total_dict = defaultdict(dict)
@ -1564,7 +1566,10 @@ def _check_checkpoint_file(checkpoint_filenames):
for index, filename in enumerate(checkpoint_filenames):
if not isinstance(filename, str) or not os.path.exists(filename) \
or filename[-5:] != ".ckpt" or os.path.getsize(filename) == 0:
raise ValueError(f"Please make sure that the {filename} at index {index} is a valid checkpoint file.")
raise ValueError(f"For 'load_distributed_checkpoint', please check 'checkpoint_filenames', and "
f"make sure the {filename} at index {index} is a valid checkpoint file, it must "
f"be a string ending with '.ckpt', and the checkpoint file it represents must "
f"be exist and not empty.")
def _convert_to_list(strategy):

View File

@ -49,7 +49,9 @@ def test_classification_accuracy_indexes_awareness():
@pytest.mark.parametrize('indexes', [0, [0., 2.], [0., 1], ['1', '0']])
def test_set_indexes(indexes):
with pytest.raises(ValueError, match="indexes should be a list and all its elements should be int"):
pat_str = "For 'set_indexes', the argument 'indexes' should be a list and all its elements should " \
"be int, please check whether it is correct."
with pytest.raises(ValueError, match=pat_str):
_ = Accuracy('classification').set_indexes(indexes)