fix code check and some docs

fix code check and some docs

fix code check and some docs

fix code check and some docs

fix code check and some docs

fix format

fix format

fix commit number

fix format
This commit is contained in:
Zichun Ye 2022-08-09 23:50:40 +08:00
parent ace944e14b
commit 2def22dd5d
7 changed files with 83 additions and 77 deletions

View File

@ -17,10 +17,6 @@ mindspore.ops.space_to_batch_nd
w'_i = (w_i + paddings[i][0] + paddings[i][1])//block\_shape[i]
\end{array}
.. note::
Ascend只支持四维张量的输入。
参数:
- **input_x** (Tensor) - 输入张量Ascend平台必须为四维。
- **block_size** (list[int], tuple[int], int) - 块形状描述空间维度为分割的个数。如果 `block_size` 为list或者tuple其长度 `M` 为空间维度的长度。如果 `block_size` 为整数,那么所有空间维度分割的个数均为 `block_size` 。在Ascend后端 `M` 必须为2。
@ -34,6 +30,6 @@ mindspore.ops.space_to_batch_nd
- **TypeError** - 如果 `paddings` 不是 list 或者 tuple。
- **ValueError** - 如果当 `block_size` 为 list 或 tuple `block_size` 不是一维。
- **ValueError** - 如果 Ascend 平台上 `block_size` 长度不是2。
- **ValueError** - 如果 `paddings` 的形状不是 (2, M), 其中 M 为 `block_size` 的长度。
- **ValueError** - 如果 `paddings` 的形状不是 (M, 2), 其中 M 为 `block_size` 的长度。
- **ValueError** - 如果 `block_size` 的元素不是大于一的整数。
- **ValueError** - 如果 `paddings` 的元素不是非负的整数。

View File

@ -153,8 +153,7 @@ class Categorical(Distribution):
self.less = P.Less()
# when the graph kernel mode is enable
# use Log directly as akg will handle the corner cases
self.log = P.Log() if context.get_context(
"enable_graph_kernel") else log_generic
self.log = P.Log() if context.get_context("enable_graph_kernel") else log_generic
self.log_softmax = P.LogSoftmax()
self.logicor = P.LogicalOr()
self.logicand = P.LogicalAnd()

View File

@ -162,8 +162,7 @@ class Distribution(Cell):
self.default_parameters = []
self.parameter_names = []
# cast value to a tensor if it is not None
value_t = None if value is None else cast_to_tensor(
value, self.parameter_type)
value_t = None if value is None else cast_to_tensor(value, self.parameter_type)
self.default_parameters.append(value_t)
self.parameter_names.append(name)
return value_t
@ -184,8 +183,7 @@ class Distribution(Cell):
if arg is not None:
self.checktensor(arg, name)
else:
arg = default if default is not None else raise_none_error(
name)
arg = default if default is not None else raise_none_error(name)
# broadcast if the number of args > 1
if broadcast_shape is None:

View File

@ -170,8 +170,7 @@ class Normal(Distribution):
self.expm1 = P.Expm1()
# when the graph kernel mode is enable
# use Log directly as akg will handle the corner cases
self.log = P.Log() if context.get_context(
"enable_graph_kernel") else log_generic
self.log = P.Log() if context.get_context("enable_graph_kernel") else log_generic
self.erf = P.Erf()
self.squeeze = P.Squeeze(0)
self.cast = P.Cast()

View File

@ -2729,7 +2729,7 @@ def space_to_batch_nd(input_x, block_size, paddings):
ValueError: If `block_size` is not one dimensional when `block_size` is a list or tuple.
ValueError: If the length of `block_size` is not 2 on Ascend.
ValueError: If the element of `block_size` is not an integer larger than 1.
ValueError: If shape of `paddings` is not (2, M), where M is the length of `block_size`.
ValueError: If shape of `paddings` is not (M, 2), where M is the length of `block_size`.
ValueError: If the element of `paddings` is not an integer larger than 0.
TypeError: If `block_size` is not one of list, tuple, int.
TypeError: If `paddings` is neither list nor tuple.

View File

@ -269,7 +269,7 @@ class VariableUsage(ast.NodeVisitor):
else:
decl, loop, usage = self.status.get(node.id, (None, None, None))
usage.add(type(node.ctx))
if not loop in self.scope_level:
if loop not in self.scope_level:
raise ValueError(
"In the function {} written in the Hybrid DSL, there is "
"a variable used out of the scope it is defined: {}".format(self.func_name, node.id))
@ -412,7 +412,7 @@ class VariableUsage(ast.NodeVisitor):
"In the function {} written in the Hybrid DSL, getattr is only supported for a tensor object, "
"not for the object with type: {}".format(self.func_name, type(node.value)))
if not node.value.id in self.output_tensor + self.temp_tensor + list(self.args_index.keys()):
if node.value.id not in self.output_tensor + self.temp_tensor + list(self.args_index.keys()):
raise ValueError(
"In the function {} written in the Hybrid DSL, getattr is only supported for a tensor variable "
"after its declaration, not for: {}".format(self.func_name, node.value.id))
@ -442,16 +442,17 @@ class VariableUsage(ast.NodeVisitor):
"should be the name of a tensor, but get a {}.".format(self.func_name, type(i)))
symbols = list(i.id for i in node.value.elts)
for sy in symbols:
if not sy in list(self.args_index.keys()) + self.output_tensor:
if sy not in list(self.args_index.keys()) + self.output_tensor:
raise TypeError("In the function {} written in the Hybrid DSL, the element in the return value "
"should be either an input tensor or a tensor allocated by output_tensor, "
"but get name: {}".format(self.func_name, sy))
for sy in self.output_tensor:
if not sy in symbols:
if sy not in symbols:
raise TypeError("In the function {} written in the Hybrid DSL, the tensor is allocated as an output "
"tensor but not in the return value: {}".format(self.func_name, sy))
self.inplace_assign_output = list([idx, self.args_index.get(val, -1)]
for idx, val in enumerate(symbols) if val in self.args_index)
for idx, val in enumerate(symbols)
if val in self.args_index)
def determine_variable_usage(root, func_name):

View File

@ -122,7 +122,7 @@ def _compile_aot(file):
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
(out, _) = proc.communicate(timeout=30)
if proc.returncode != 0:
msg = "Compilation error in compiling {}:\n".format(file)
@ -146,6 +146,17 @@ class Custom(ops.PrimitiveWithInfer):
.. warning::
This is an experimental prototype that is subject to change.
.. note::
The supported platforms are determined by the input `func_type`:
- "hybrid": supports ["Ascend", "GPU", "CPU"].
- "akg": supports ["Ascend", "GPU", "CPU"].
- "tbe": supports ["Ascend"].
- "aot": supports ["GPU", "CPU"].
- "pyfunc": supports ["CPU"].
- "julia": supports ["CPU"].
- "aicpu": supports ["Ascend"].
Args:
func (Union[function, str]):
@ -496,6 +507,60 @@ class Custom(ops.PrimitiveWithInfer):
self.add_prim_attr("func_type", self.func_type)
self._update_attr()
def __infer__(self, *args):
if callable(self.out_shape):
infer_shape = self.out_shape(*(x["shape"] for x in args))
else:
infer_shape = self.out_shape
if callable(self.out_dtype):
infer_dtype = self.out_dtype(*(x["dtype"] for x in args))
else:
infer_dtype = self.out_dtype
infer_value = None
# deal with the case of ms script
# enable auto infer function if any infer information is missing
if self._is_ms_kernel and (infer_dtype is None or infer_shape is None):
logger.warning("{}, 'out_shape' or 'out_dtype' is None, infer the output shape and output dtype "
"automatically. There might be some Python RuntimeWarning but it wouldn't influence the "
"result.".format(self.log_prefix))
auto_infer_result = self._auto_infer(*args)
# use automatically inferred shape/dtype if the input infer values are null
infer_shape = auto_infer_result[0] if infer_shape is None else infer_shape
infer_dtype = auto_infer_result[1] if infer_dtype is None else infer_dtype
infer_value = auto_infer_result[2]
# deal with case that the custom op is of type pyfunc with empty output
if self.func_type == "pyfunc":
if infer_shape == ():
logger.warning("{}, 'out_shape' is an empty tuple. Add a placeholder instead. "
"Not recommend to use it as it could be any uninitialized data.".format(self.log_prefix))
infer_shape = (1,)
if infer_dtype == ():
logger.warning("{}, 'out_dtype' is an empty tuple. Add a placeholder instead. "
"Not recommend to use it as it could be any uninitialized data.".format(self.log_prefix))
infer_dtype = mstype.int32
# after all automatic infer information fulfillment, throw error if infer_shape/infer_dtype is still None
if not isinstance(infer_shape, (tuple, list)):
raise TypeError("{}, 'out_shape' must be one of [tuple, list, function], but got {}"
.format(self.log_prefix, type(infer_shape)))
if not isinstance(infer_dtype, (typing.Type, tuple, list)):
raise TypeError("{}, 'out_dtype' must be one of [mindspore.dtype, tuple, list, function], but got {}"
.format(self.log_prefix, type(infer_dtype)))
out = {
"shape": infer_shape,
"dtype": infer_dtype,
"value": infer_value,
}
return out
def get_bprop(self):
return self.bprop
@ -560,7 +625,7 @@ class Custom(ops.PrimitiveWithInfer):
"""Update information of func"""
if callable(self.func):
# For the func_type other then hybrid, get the original function if func is decorated
if "__wrapped__" in self.func.__dict__ and not self.func_type in ["hybrid", "pyfunc"]:
if "__wrapped__" in self.func.__dict__ and self.func_type not in ["hybrid", "pyfunc"]:
self.func = self.func.__dict__["__wrapped__"]
# func name
self.func_name = self.func.__name__
@ -590,7 +655,9 @@ class Custom(ops.PrimitiveWithInfer):
inplace_assign_output = determine_variable_usage(root, self.func_name)
if inplace_assign_output:
self.add_prim_attr("inplace_assign_output",
" ".join((str(j) for i in inplace_assign_output for j in i)))
" ".join((str(j)
for i in inplace_assign_output
for j in i)))
self.add_prim_attr('func_source_str', self.func_source_str)
# unique func name
@ -630,7 +697,7 @@ class Custom(ops.PrimitiveWithInfer):
new_dtype_format.append(i + (DataType.I32_Default,))
reg_info["dtype_format"] = new_dtype_format
for i, item in enumerate(reg_info.get("outputs", [])):
for _, item in enumerate(reg_info.get("outputs", [])):
output_name_list = []
if isinstance(item, dict) and item.get("name"):
output_name_list.append(item.get("name"))
@ -936,57 +1003,3 @@ class Custom(ops.PrimitiveWithInfer):
infer_value = Tensor(fake_output) if enable_infer_value else None
return infer_shape, infer_dtype, infer_value
def __infer__(self, *args):
if callable(self.out_shape):
infer_shape = self.out_shape(*(x["shape"] for x in args))
else:
infer_shape = self.out_shape
if callable(self.out_dtype):
infer_dtype = self.out_dtype(*(x["dtype"] for x in args))
else:
infer_dtype = self.out_dtype
infer_value = None
# deal with the case of ms script
# enable auto infer function if any infer information is missing
if self._is_ms_kernel and (infer_dtype is None or infer_shape is None):
logger.warning("{}, 'out_shape' or 'out_dtype' is None, infer the output shape and output dtype "
"automatically. There might be some Python RuntimeWarning but it wouldn't influence the "
"result.".format(self.log_prefix))
auto_infer_result = self._auto_infer(*args)
# use automatically inferred shape/dtype if the input infer values are null
infer_shape = auto_infer_result[0] if infer_shape is None else infer_shape
infer_dtype = auto_infer_result[1] if infer_dtype is None else infer_dtype
infer_value = auto_infer_result[2]
# deal with case that the custom op is of type pyfunc with empty output
if self.func_type == "pyfunc":
if infer_shape == ():
logger.warning("{}, 'out_shape' is an empty tuple. Add a placeholder instead. "
"Not recommend to use it as it could be any uninitialized data.".format(self.log_prefix))
infer_shape = (1,)
if infer_dtype == ():
logger.warning("{}, 'out_dtype' is an empty tuple. Add a placeholder instead. "
"Not recommend to use it as it could be any uninitialized data.".format(self.log_prefix))
infer_dtype = mstype.int32
# after all automatic infer information fulfillment, throw error if infer_shape/infer_dtype is still None
if not isinstance(infer_shape, (tuple, list)):
raise TypeError("{}, 'out_shape' must be one of [tuple, list, function], but got {}"
.format(self.log_prefix, type(infer_shape)))
if not isinstance(infer_dtype, (typing.Type, tuple, list)):
raise TypeError("{}, 'out_dtype' must be one of [mindspore.dtype, tuple, list, function], but got {}"
.format(self.log_prefix, type(infer_dtype)))
out = {
"shape": infer_shape,
"dtype": infer_dtype,
"value": infer_value,
}
return out