fix api bugs

This commit is contained in:
yuzhenhua 2023-01-17 19:05:35 +08:00
parent 32899f3333
commit 757667d90e
4 changed files with 21 additions and 18 deletions

View File

@ -7,11 +7,14 @@ mindspore.ops.full
参数:
- **size** (Union(tuple[int], list[int])) - 指定输出Tensor的shape。
- **fill_value** (number.Number) - 用来填充输出Tensor的值。
- **dtype** (mindspore.dtype) - 指定输出Tensor的数据类型。数据类型只支持 `bool_ <https://www.mindspore.cn/docs/zh-CN/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_`number <https://www.mindspore.cn/docs/zh-CN/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_
- **fill_value** (number.Number) - 用来填充输出Tensor的值。当前不支持复数类型。
关键字参数:
- **dtype** (mindspore.dtype) - 指定输出Tensor的数据类型。数据类型只支持 `bool_``number` ,更多细节详见 :class:`mindspore.dtype` 。默认值None。
返回:
Tensor。
异常:
- **TypeError** - `size` 不是元组。
- **TypeError** - `size` 中包含小于0的成员。
- **ValueError** - `size` 中包含小于0的成员。

View File

@ -7,7 +7,7 @@ mindspore.ops.full_like
参数:
- **x** (Tensor) - `x` 的shape决定输出Tensor的shape。
- **fill_value** (number.Number) - 用来填充输出Tensor的值。
- **fill_value** (number.Number) - 用来填充输出Tensor的值。当前不支持复数类型。
关键字参数:
- **dtype** (mindspore.dtype, 可选) - 指定输出Tensor的数据类型。数据类型只支持 `bool_``number` ,更多细节详见 :class:`mindspore.dtype` 。默认值None。

View File

@ -373,7 +373,7 @@ def hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype
if window_length <= 1:
return Tensor(np.ones(window_length))
if dtype is not None and dtype not in mstype.float_type:
raise TypeError(f"For array function 'hamming_window', 'dtype' must be floating pont dtypes, but got {dtype}.")
raise TypeError(f"For array function 'hamming_window', 'dtype' must be floating point dtypes, but got {dtype}.")
if periodic:
window_length += 1
@ -638,7 +638,7 @@ def one_hot(indices, depth, on_value, off_value, axis=-1):
return onehot(indices, depth, on_value, off_value)
def fill(type, shape, value):
def fill(type, shape, value): # pylint: disable=redefined-outer-name
"""
Create a Tensor of the specified shape and fill it with the specified value.
@ -724,17 +724,18 @@ def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-nam
Args:
size (Union(tuple[int], list[int])): The specified shape of output tensor.
fill_value (number.Number): Value to fill the returned tensor.
dtype (mindspore.dtype): The specified type of output tensor. The data type only supports
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ and
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore.html#mindspore.dtype>`_ .
fill_value (number.Number): Value to fill the returned tensor. Complex numbers are not supported for now.
Keyword Args:
dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for details,
please refer to :class:`mindspore.dtype` . Default: None.
Returns:
Tensor.
Raises:
TypeError: If `size` is not a tuple or list.
TypeError: The element in `size` is less than 0.
ValueError: The element in `size` is less than 0.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
@ -768,11 +769,11 @@ def full_like(x, fill_value, *, dtype=None):
Args:
x (Tensor): The shape of `x` will determine shape of the output Tensor.
fill_value (number.Number): Value to fill the returned Tensor.
fill_value (number.Number): Value to fill the returned Tensor. Complex numbers are not supported for now.
Keyword Args:
dtype (mindspore.dtype, optional): The specified type of output tensor. The data type only supports
`bool_` and `number` , for details, please refer to :class:`mindspore.dtype` . Default: None.
dtype (mindspore.dtype, optional): The specified type of output tensor. `bool_` and `number` are supported,
for details, please refer to :class:`mindspore.dtype` . Default: None.
Returns:
Tensor.
@ -860,8 +861,9 @@ def chunk(x, chunks, axis=0):
size1 = _tuple_setitem(arr_shape, axis, length1)
start2 = _tuple_setitem(start1, axis, length1)
size2 = _tuple_setitem(arr_shape, axis, length2)
res = P.Split(axis, true_chunks)(tensor_slice(x, start1, size1)) + \
P.Split(axis, 1)(tensor_slice(x, start2, size2))
res = P.Split(axis, true_chunks)(tensor_slice(x, start1, size1))
if length2:
res += P.Split(axis, 1)(tensor_slice(x, start2, size2))
return res

View File

@ -3599,8 +3599,6 @@ def cosine_embedding_loss(input1, input2, target, margin=0.0, reduction="mean"):
if margin_f > 1.0 or margin_f < -1.0:
raise ValueError(f"For ops.cosine_embedding_loss, the value of 'margin' should be in [-1, 1],"
f"but got {margin_f}.")
# if target > 0, 1-cosine(input1, input2)
# else, max(0, cosine(input1, input2)-margin)
prod_sum = _get_cache_prim(P.ReduceSum)()(input1 * input2, (1,))
square1 = _get_cache_prim(P.ReduceSum)()(ops.square(input1), (1,))
square2 = _get_cache_prim(P.ReduceSum)()(ops.square(input2), (1,))