forked from mindspore-Ecosystem/mindspore
optimize the comment and log descriptions
修改: cell.py 修改: loss/loss.py 修改: metrics/hausdorff_distance.py 修改: metrics/mean_surface_distance.py 修改: metrics/root_mean_square_surface_distance.py 修改: mindspore/nn/dynamic_lr.py 修改: mindspore/nn/learning_rate_schedule.py 修改: .jenkins/check/config/whitelizard.txt
This commit is contained in:
parent
ec981124d0
commit
db08e766b6
|
@ -17,6 +17,7 @@ mindspore/model_zoo/official/recommend/wide_and_deep/src/wide_and_deep.py:__init
|
||||||
mindspore/model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py:__init__
|
mindspore/model_zoo/official/recommend/wide_and_deep_multitable/src/wide_and_deep.py:__init__
|
||||||
mindspore/mindspore/ccsrc/pipeline/jit/resource.cc:mindspore::pipeline::GetMethodMap
|
mindspore/mindspore/ccsrc/pipeline/jit/resource.cc:mindspore::pipeline::GetMethodMap
|
||||||
mindspore/mindspore/ops/operations/array_ops.py:_compute_slicing_shape
|
mindspore/mindspore/ops/operations/array_ops.py:_compute_slicing_shape
|
||||||
|
mindspore/mindspore/context.py:set_auto_parallel_context
|
||||||
mindspore/mindspore/common/tensor.py:__init__
|
mindspore/mindspore/common/tensor.py:__init__
|
||||||
mindspore/mindspore/common/parameter.py:set_data
|
mindspore/mindspore/common/parameter.py:set_data
|
||||||
mindspore/mindspore/ccsrc/pybind_api/ir/tensor_py.cc:mindspore::tensor::GetDataType
|
mindspore/mindspore/ccsrc/pybind_api/ir/tensor_py.cc:mindspore::tensor::GetDataType
|
||||||
|
|
|
@ -157,7 +157,7 @@ def check_number(arg_value, value, rel, arg_type=int, arg_name=None, prim_name=N
|
||||||
|
|
||||||
if isinstance(arg_value, arg_type):
|
if isinstance(arg_value, arg_type):
|
||||||
if math.isinf(arg_value) or math.isnan(arg_value) or np.isinf(arg_value) or np.isnan(arg_value):
|
if math.isinf(arg_value) or math.isnan(arg_value) or np.isinf(arg_value) or np.isnan(arg_value):
|
||||||
raise ValueError(f'{arg_name} {prim_name} must be legal value, but got `{arg_value}`.')
|
raise ValueError(f'{arg_name} {prim_name} must be a legal value, but got `{arg_value}`.')
|
||||||
else:
|
else:
|
||||||
raise TypeError(f'{arg_name} {prim_name} must be {arg_type.__name__}, but got `{type(arg_value).__name__}`')
|
raise TypeError(f'{arg_name} {prim_name} must be {arg_type.__name__}, but got `{type(arg_value).__name__}`')
|
||||||
|
|
||||||
|
@ -184,7 +184,7 @@ def check_is_number(arg_value, arg_type, arg_name=None, prim_name=None):
|
||||||
arg_name = f"\'{arg_name}\'" if arg_name else 'input value'
|
arg_name = f"\'{arg_name}\'" if arg_name else 'input value'
|
||||||
if isinstance(arg_value, arg_type) and not isinstance(arg_value, bool):
|
if isinstance(arg_value, arg_type) and not isinstance(arg_value, bool):
|
||||||
if math.isinf(arg_value) or math.isnan(arg_value) or np.isinf(arg_value) or np.isnan(arg_value):
|
if math.isinf(arg_value) or math.isnan(arg_value) or np.isinf(arg_value) or np.isnan(arg_value):
|
||||||
raise ValueError(f'{prim_name} {arg_name} must be legal float, but got `{arg_value}`.')
|
raise ValueError(f'{prim_name} {arg_name} must be a legal float, but got `{arg_value}`.')
|
||||||
return arg_value
|
return arg_value
|
||||||
raise TypeError(f'{prim_name} type of {arg_name} must be {arg_type.__name__}, but got `{type(arg_value).__name__}`')
|
raise TypeError(f'{prim_name} type of {arg_name} must be {arg_type.__name__}, but got `{type(arg_value).__name__}`')
|
||||||
|
|
||||||
|
@ -665,7 +665,7 @@ class Validator:
|
||||||
|
|
||||||
# if multiple arguments provided, it must be `ndim` number of ints
|
# if multiple arguments provided, it must be `ndim` number of ints
|
||||||
if len(axes) != ndim:
|
if len(axes) != ndim:
|
||||||
raise ValueError("The number of axes must equal to the dimension of tensor.")
|
raise ValueError("The number of axes must be equal to the dimension of tensor.")
|
||||||
return axes
|
return axes
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -705,11 +705,11 @@ class Validator:
|
||||||
if isinstance(axes, (tuple, list)):
|
if isinstance(axes, (tuple, list)):
|
||||||
for axis in axes:
|
for axis in axes:
|
||||||
if not isinstance(axis, int):
|
if not isinstance(axis, int):
|
||||||
raise TypeError(f"axis argument should be integer, but got {type(axis)}.")
|
raise TypeError(f"The axis argument should be integer, but got {type(axis)}.")
|
||||||
Validator.check_axis_in_range(axis, ndim)
|
Validator.check_axis_in_range(axis, ndim)
|
||||||
axes = tuple(map(lambda x: x % ndim, axes))
|
axes = tuple(map(lambda x: x % ndim, axes))
|
||||||
return axes
|
return axes
|
||||||
raise TypeError(f"axes should be integer, list or tuple for check, but got {type(axes)}.")
|
raise TypeError(f"The axes should be integer, list or tuple for check, but got {type(axes)}.")
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def prepare_shape_for_squeeze(shape, axes):
|
def prepare_shape_for_squeeze(shape, axes):
|
||||||
|
@ -730,33 +730,33 @@ class Validator:
|
||||||
# Convert to set
|
# Convert to set
|
||||||
if isinstance(axes, int):
|
if isinstance(axes, int):
|
||||||
if axes >= ndim or axes < -ndim:
|
if axes >= ndim or axes < -ndim:
|
||||||
raise ValueError(f"axis {axes} is out of bounds for tensor of dimension {ndim}")
|
raise ValueError(f"The axis {axes} is out of bounds for tensor of dimension {ndim}")
|
||||||
axes = {axes}
|
axes = {axes}
|
||||||
|
|
||||||
elif isinstance(axes, (list, tuple)):
|
elif isinstance(axes, (list, tuple)):
|
||||||
for axis in axes:
|
for axis in axes:
|
||||||
if axis >= ndim or axis < -ndim:
|
if axis >= ndim or axis < -ndim:
|
||||||
raise ValueError(f"axis {axis} is out of bounds for tensor of dimension {ndim}")
|
raise ValueError(f"The axis {axis} is out of bounds for tensor of dimension {ndim}")
|
||||||
axes = set(axes)
|
axes = set(axes)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
raise TypeError(f"only int, tuple and list are allowed for axes, but got {type(axes)}")
|
raise TypeError(f"Only int, tuple and list are allowed for axes, but got {type(axes)}")
|
||||||
|
|
||||||
for idx, s in enumerate(shape):
|
for idx, s in enumerate(shape):
|
||||||
if s != 1 or (idx not in axes) and (idx - ndim not in axes):
|
if s != 1 or (idx not in axes) and (idx - ndim not in axes):
|
||||||
new_shape.append(s)
|
new_shape.append(s)
|
||||||
# if an axis is selected with shape entry greater than one, an error is raised.
|
# if an axis is selected with shape entry greater than one, an error is raised.
|
||||||
if s != 1 and ((idx in axes) or (idx - ndim in axes)):
|
if s != 1 and ((idx in axes) or (idx - ndim in axes)):
|
||||||
raise ValueError(f"axis {axes} has shape entry {s} > 1, cannot be squeezed.")
|
raise ValueError(f"The axis {axes} has shape entry {s} > 1, cannot be squeezed.")
|
||||||
return tuple(new_shape)
|
return tuple(new_shape)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def check_axis_in_range(axis, ndim):
|
def check_axis_in_range(axis, ndim):
|
||||||
"""Checks axes are with the bounds of ndim"""
|
"""Checks axes are with the bounds of ndim"""
|
||||||
if not isinstance(axis, int):
|
if not isinstance(axis, int):
|
||||||
raise TypeError(f'axes should be integers, not {type(axis)}')
|
raise TypeError(f'The axes should be integers, not {type(axis)}')
|
||||||
if not -ndim <= axis < ndim:
|
if not -ndim <= axis < ndim:
|
||||||
raise ValueError(f'axis {axis} is out of bounds for array of dimension {ndim}')
|
raise ValueError(f'The axis {axis} is out of bounds for array of dimension {ndim}')
|
||||||
return axis % ndim
|
return axis % ndim
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -809,7 +809,7 @@ class Validator:
|
||||||
for items in zip_longest(*reversed_shapes, fillvalue=1):
|
for items in zip_longest(*reversed_shapes, fillvalue=1):
|
||||||
max_size = 0 if 0 in items else max(items)
|
max_size = 0 if 0 in items else max(items)
|
||||||
if any(item not in (1, max_size) for item in items):
|
if any(item not in (1, max_size) for item in items):
|
||||||
raise ValueError(f'operands could not be broadcast together with shapes {*shapes,}')
|
raise ValueError(f'The operands could not be broadcast together with shapes {*shapes,}')
|
||||||
shape_out.appendleft(max_size)
|
shape_out.appendleft(max_size)
|
||||||
return tuple(shape_out)
|
return tuple(shape_out)
|
||||||
|
|
||||||
|
@ -835,7 +835,7 @@ class Validator:
|
||||||
type_str += "tuple, "
|
type_str += "tuple, "
|
||||||
if type_list:
|
if type_list:
|
||||||
type_str += "list, "
|
type_str += "list, "
|
||||||
raise TypeError(f"Axis should be {type_str}but got {type(axis)}.")
|
raise TypeError(f"The axis should be {type_str}but got {type(axis)}.")
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def check_and_canonicalize_axes(axes, ndim):
|
def check_and_canonicalize_axes(axes, ndim):
|
||||||
|
@ -846,7 +846,7 @@ class Validator:
|
||||||
if not isinstance(ax, int):
|
if not isinstance(ax, int):
|
||||||
raise TypeError((f"Each axis should be integer, but got {type(ax)} in {axes}."))
|
raise TypeError((f"Each axis should be integer, but got {type(ax)} in {axes}."))
|
||||||
if not -ndim <= ax < ndim:
|
if not -ndim <= ax < ndim:
|
||||||
raise ValueError(f'axis {ax} is out of bounds for array of dimension {ndim}')
|
raise ValueError(f'The axis {ax} is out of bounds for array of dimension {ndim}')
|
||||||
ax = ax if ax >= 0 else ax + ndim
|
ax = ax if ax >= 0 else ax + ndim
|
||||||
new_axes += (ax,)
|
new_axes += (ax,)
|
||||||
if any(new_axes.count(el) > 1 for el in new_axes):
|
if any(new_axes.count(el) > 1 for el in new_axes):
|
||||||
|
@ -956,7 +956,7 @@ def args_type_check(*type_args, **type_kwargs):
|
||||||
for name, value in argument_dict.items():
|
for name, value in argument_dict.items():
|
||||||
if name in bound_types:
|
if name in bound_types:
|
||||||
if value is not None and not isinstance(value, bound_types[name]):
|
if value is not None and not isinstance(value, bound_types[name]):
|
||||||
raise TypeError('Argument {} must be {}'.format(name, bound_types[name]))
|
raise TypeError('The argument {} must be {}'.format(name, bound_types[name]))
|
||||||
return func(*args, **kwargs)
|
return func(*args, **kwargs)
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
|
@ -141,7 +141,7 @@ class _Context:
|
||||||
|
|
||||||
Note:
|
Note:
|
||||||
Create a context through instantiating Context object is not recommended.
|
Create a context through instantiating Context object is not recommended.
|
||||||
should use context() to get the context since Context is singleton.
|
should use context() to get the context since Context is a singleton.
|
||||||
"""
|
"""
|
||||||
_instance = None
|
_instance = None
|
||||||
_instance_lock = threading.Lock()
|
_instance_lock = threading.Lock()
|
||||||
|
|
|
@ -244,11 +244,11 @@ class Cell(Cell_):
|
||||||
@pipeline_stage.setter
|
@pipeline_stage.setter
|
||||||
def pipeline_stage(self, value):
|
def pipeline_stage(self, value):
|
||||||
if isinstance(value, bool):
|
if isinstance(value, bool):
|
||||||
raise TypeError("'pipeline_stage' must be int type, but got bool.")
|
raise TypeError("'pipeline_stage' must be a int type, but got bool.")
|
||||||
if not isinstance(value, int):
|
if not isinstance(value, int):
|
||||||
raise TypeError("'pipeline_stage' must be int type.")
|
raise TypeError("'pipeline_stage' must be a int type.")
|
||||||
if value < 0:
|
if value < 0:
|
||||||
raise TypeError("'pipeline_stage' can not less than 0.")
|
raise TypeError("'pipeline_stage' can not be less than 0.")
|
||||||
self._pipeline_stage = value
|
self._pipeline_stage = value
|
||||||
for item in self.trainable_params():
|
for item in self.trainable_params():
|
||||||
item.add_pipeline_stage(value)
|
item.add_pipeline_stage(value)
|
||||||
|
|
|
@ -710,7 +710,6 @@ def _check_ndim_multi(logits_dim, label_dim, prim_name=None):
|
||||||
if label_dim < 2:
|
if label_dim < 2:
|
||||||
raise ValueError(f"{msg_prefix} Label dimension should be greater than 1, but got {label_dim}.")
|
raise ValueError(f"{msg_prefix} Label dimension should be greater than 1, but got {label_dim}.")
|
||||||
|
|
||||||
|
|
||||||
@constexpr
|
@constexpr
|
||||||
def _check_weights(weight_shape, label_shape, prim_name=None):
|
def _check_weights(weight_shape, label_shape, prim_name=None):
|
||||||
"""Internal function, used to check whether the reduced shape meets the requirements."""
|
"""Internal function, used to check whether the reduced shape meets the requirements."""
|
||||||
|
@ -1293,7 +1292,6 @@ def _check_ndim(logits_nidm, labels_ndim, prime_name=None):
|
||||||
raise ValueError(f"{msg_prefix} dimensions of 'logits' and 'labels' must be equal, but got"
|
raise ValueError(f"{msg_prefix} dimensions of 'logits' and 'labels' must be equal, but got"
|
||||||
f"dimension of 'logits' {logits_nidm} and dimension of 'labels' {labels_ndim}.")
|
f"dimension of 'logits' {logits_nidm} and dimension of 'labels' {labels_ndim}.")
|
||||||
|
|
||||||
|
|
||||||
@constexpr
|
@constexpr
|
||||||
def _check_channel_and_shape(logits, labels, prime_name=None):
|
def _check_channel_and_shape(logits, labels, prime_name=None):
|
||||||
'''Internal function, used to check whether the channels or shape of logits and labels meets the requirements.'''
|
'''Internal function, used to check whether the channels or shape of logits and labels meets the requirements.'''
|
||||||
|
|
|
@ -96,7 +96,7 @@ def _update_param(param, new_param, strict_load):
|
||||||
if param.data.shape != new_param.data.shape:
|
if param.data.shape != new_param.data.shape:
|
||||||
if not _special_process_par(param, new_param):
|
if not _special_process_par(param, new_param):
|
||||||
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
|
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
|
||||||
msg = ("Net parameters {} shape({}) different from parameter_dict's({})"
|
msg = ("Net parameters {} shape({}) are different from parameter_dict's({})"
|
||||||
.format(param.name, param.data.shape, new_param.data.shape))
|
.format(param.name, param.data.shape, new_param.data.shape))
|
||||||
raise RuntimeError(msg)
|
raise RuntimeError(msg)
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ def _update_param(param, new_param, strict_load):
|
||||||
return
|
return
|
||||||
|
|
||||||
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
|
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
|
||||||
msg = ("Net parameters {} type({}) different from parameter_dict's({})"
|
msg = ("Net parameters {} type({}) are different from parameter_dict's({})"
|
||||||
.format(param.name, param.data.dtype, new_param.data.dtype))
|
.format(param.name, param.data.dtype, new_param.data.dtype))
|
||||||
raise RuntimeError(msg)
|
raise RuntimeError(msg)
|
||||||
|
|
||||||
|
@ -124,7 +124,7 @@ def _update_param(param, new_param, strict_load):
|
||||||
|
|
||||||
elif isinstance(new_param.data, Tensor) and not isinstance(param.data, Tensor):
|
elif isinstance(new_param.data, Tensor) and not isinstance(param.data, Tensor):
|
||||||
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
|
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
|
||||||
msg = ("Net parameters {} type({}) different from parameter_dict's({})"
|
msg = ("Net parameters {} type({}) are different from parameter_dict's({})"
|
||||||
.format(param.name, type(param.data), type(new_param.data)))
|
.format(param.name, type(param.data), type(new_param.data)))
|
||||||
raise RuntimeError(msg)
|
raise RuntimeError(msg)
|
||||||
|
|
||||||
|
@ -572,11 +572,11 @@ def load_param_into_net(net, parameter_dict, strict_load=False):
|
||||||
|
|
||||||
|
|
||||||
def _load_dismatch_prefix_params(net, parameter_dict, param_not_load, strict_load):
|
def _load_dismatch_prefix_params(net, parameter_dict, param_not_load, strict_load):
|
||||||
"""When some net parameter did not load, try to continue load."""
|
"""When some net parameter did not load, try to continue loading."""
|
||||||
prefix_name = ""
|
prefix_name = ""
|
||||||
longest_name = param_not_load[0]
|
longest_name = param_not_load[0]
|
||||||
while prefix_name != longest_name and param_not_load:
|
while prefix_name != longest_name and param_not_load:
|
||||||
logger.debug("Count: {} parameters has not been loaded, try to load continue.".format(len(param_not_load)))
|
logger.debug("Count: {} parameters has not been loaded, try to continue loading.".format(len(param_not_load)))
|
||||||
prefix_name = longest_name
|
prefix_name = longest_name
|
||||||
for net_param_name in param_not_load:
|
for net_param_name in param_not_load:
|
||||||
for dict_name in parameter_dict:
|
for dict_name in parameter_dict:
|
||||||
|
@ -628,7 +628,7 @@ def _get_merged_param_data(net, param_name, param_data, integrated_save):
|
||||||
"""
|
"""
|
||||||
layout = net.parameter_layout_dict[param_name]
|
layout = net.parameter_layout_dict[param_name]
|
||||||
if len(layout) < 6:
|
if len(layout) < 6:
|
||||||
logger.info("layout dict does not contain the key %s", param_name)
|
logger.info("The layout dict does not contain the key %s", param_name)
|
||||||
return param_data
|
return param_data
|
||||||
|
|
||||||
dev_mat = layout[0]
|
dev_mat = layout[0]
|
||||||
|
@ -645,7 +645,7 @@ def _get_merged_param_data(net, param_name, param_data, integrated_save):
|
||||||
if param_name in net.parallel_parameter_merge_net_dict:
|
if param_name in net.parallel_parameter_merge_net_dict:
|
||||||
allgather_net = net.parallel_parameter_merge_net_dict[param_name]
|
allgather_net = net.parallel_parameter_merge_net_dict[param_name]
|
||||||
else:
|
else:
|
||||||
logger.info("need to create allgather net for %s", param_name)
|
logger.info("Need to create allgather net for %s", param_name)
|
||||||
if integrated_save:
|
if integrated_save:
|
||||||
if context.get_auto_parallel_context("pipeline_stages") > 1:
|
if context.get_auto_parallel_context("pipeline_stages") > 1:
|
||||||
raise RuntimeError("Pipeline Parallel don't support Integrated save checkpoint now.")
|
raise RuntimeError("Pipeline Parallel don't support Integrated save checkpoint now.")
|
||||||
|
@ -739,7 +739,7 @@ def export(net, *inputs, file_name, file_format='AIR', **kwargs):
|
||||||
net = _quant_export(net, *inputs, file_format=file_format, **kwargs)
|
net = _quant_export(net, *inputs, file_format=file_format, **kwargs)
|
||||||
if 'enc_key' in kwargs.keys():
|
if 'enc_key' in kwargs.keys():
|
||||||
if file_format != 'MINDIR':
|
if file_format != 'MINDIR':
|
||||||
raise ValueError(f"enc_key can be passed in only when file_format=='MINDIR', but got {file_format}")
|
raise ValueError(f"The enc_key can be passed in only when file_format=='MINDIR', but got {file_format}")
|
||||||
|
|
||||||
enc_key = Validator.check_isinstance('enc_key', kwargs['enc_key'], bytes)
|
enc_key = Validator.check_isinstance('enc_key', kwargs['enc_key'], bytes)
|
||||||
enc_mode = 'AES-GCM'
|
enc_mode = 'AES-GCM'
|
||||||
|
@ -908,8 +908,8 @@ def _save_mindir_together(net_dict, model, file_name, is_encrypt, **kwargs):
|
||||||
param_data = net_dict[param_name].data.asnumpy().tobytes()
|
param_data = net_dict[param_name].data.asnumpy().tobytes()
|
||||||
param_proto.raw_data = param_data
|
param_proto.raw_data = param_data
|
||||||
else:
|
else:
|
||||||
logger.error("The parameter %s in the graph are not in the network.", param_name)
|
logger.error("The parameter %s in the graph is not in the network.", param_name)
|
||||||
raise ValueError("The parameter in the graph must in the network.")
|
raise ValueError("The parameter in the graph must be in the network.")
|
||||||
if not file_name.endswith('.mindir'):
|
if not file_name.endswith('.mindir'):
|
||||||
file_name += ".mindir"
|
file_name += ".mindir"
|
||||||
current_path = os.path.abspath(file_name)
|
current_path = os.path.abspath(file_name)
|
||||||
|
@ -968,7 +968,7 @@ def _quant_export(network, *inputs, file_format, **kwargs):
|
||||||
|
|
||||||
quant_mode = kwargs['quant_mode']
|
quant_mode = kwargs['quant_mode']
|
||||||
if quant_mode not in quant_mode_formats:
|
if quant_mode not in quant_mode_formats:
|
||||||
raise KeyError(f'Quant_mode input is wrong, Please choose the right mode of the quant_mode.')
|
raise KeyError(f'The quant_mode input is wrong, Please choose the right mode of the quant_mode.')
|
||||||
if quant_mode == 'NONQUANT':
|
if quant_mode == 'NONQUANT':
|
||||||
return network
|
return network
|
||||||
quant_net = copy.deepcopy(network)
|
quant_net = copy.deepcopy(network)
|
||||||
|
@ -1049,7 +1049,7 @@ def parse_print(print_file_name):
|
||||||
pb_content = f.read()
|
pb_content = f.read()
|
||||||
print_list.ParseFromString(pb_content)
|
print_list.ParseFromString(pb_content)
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
logger.error("Failed to read the print file %s, please check the correct of the file.", print_file_name)
|
logger.error("Failed to read the print file %s, please check the correctness of the file.", print_file_name)
|
||||||
raise ValueError(e.__str__())
|
raise ValueError(e.__str__())
|
||||||
|
|
||||||
tensor_list = []
|
tensor_list = []
|
||||||
|
|
Loading…
Reference in New Issue