Update api docs
This commit is contained in:
parent
1eeeba149e
commit
c5693d534e
|
@ -3,13 +3,13 @@ mindspore.Tensor.new_ones
|
||||||
|
|
||||||
.. py:method:: mindspore.Tensor.new_ones(size, *, dtype=None)
|
.. py:method:: mindspore.Tensor.new_ones(size, *, dtype=None)
|
||||||
|
|
||||||
返回一个大小为 `size` 的Tensor,填充值为1。默认情况下,返回的Tensor和 `self` 具有相同的数据类型。
|
返回一个大小为 `size` 的Tensor,填充值为1。
|
||||||
|
|
||||||
参数:
|
参数:
|
||||||
- **size** (Union[int, tuple, list]) - 定义输出的shape。
|
- **size** (Union[int, tuple, list]) - 定义输出的shape。
|
||||||
|
|
||||||
关键字参数:
|
关键字参数:
|
||||||
- **dtype** (mindspore.dtype, 可选) - 输出的数据类型。默认值:None,使用和 `self` 相同的数据类型。
|
- **dtype** (mindspore.dtype, 可选) - 输出的数据类型。默认值:None,返回的Tensor使用和 `self` 相同的数据类型。
|
||||||
|
|
||||||
返回:
|
返回:
|
||||||
Tensor,shape和dtype由输入定义,填充值为1。
|
Tensor,shape和dtype由输入定义,填充值为1。
|
||||||
|
|
|
@ -3,13 +3,13 @@ mindspore.Tensor.new_zeros
|
||||||
|
|
||||||
.. py:method:: mindspore.Tensor.new_zeros(size, *, dtype=None)
|
.. py:method:: mindspore.Tensor.new_zeros(size, *, dtype=None)
|
||||||
|
|
||||||
返回一个大小为 `size` 的Tensor,填充值为0。默认情况下,返回的Tensor和 `self` 具有相同的数据类型。
|
返回一个大小为 `size` 的Tensor,填充值为0。
|
||||||
|
|
||||||
参数:
|
参数:
|
||||||
- **size** (Union[int, tuple, list]) - 定义输出的shape。
|
- **size** (Union[int, tuple, list]) - 定义输出的shape。
|
||||||
|
|
||||||
关键字参数:
|
关键字参数:
|
||||||
- **dtype** (mindspore.dtype, 可选) - 输出的数据类型。默认值:None,使用和 `self` 相同的数据类型。
|
- **dtype** (mindspore.dtype, 可选) - 输出的数据类型。默认值:None,返回的Tensor使用和 `self` 相同的数据类型。
|
||||||
|
|
||||||
返回:
|
返回:
|
||||||
Tensor,shape和dtype由输入定义,填充值为0。
|
Tensor,shape和dtype由输入定义,填充值为0。
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
.. py:function:: mindspore.ops.addbmm(x, batch1, batch2, *, beta=1, alpha=1)
|
.. py:function:: mindspore.ops.addbmm(x, batch1, batch2, *, beta=1, alpha=1)
|
||||||
|
|
||||||
对 `batch1` 和 `batch2` 应用批量矩阵乘法后进行reduced add。矩阵 `x` 和最终的结果相加。
|
对 `batch1` 和 `batch2` 应用批量矩阵乘法后进行reduced add, `x` 和最终的结果相加。
|
||||||
`alpha` 和 `beta` 分别是 `batch1` 和 `batch2` 矩阵乘法和 `x` 的乘数。如果 `beta` 是0,那么 `x` 将会被忽略。
|
`alpha` 和 `beta` 分别是 `batch1` 和 `batch2` 矩阵乘法和 `x` 的乘数。如果 `beta` 是0,那么 `x` 将会被忽略。
|
||||||
|
|
||||||
.. math::
|
.. math::
|
||||||
|
|
|
@ -3,7 +3,7 @@ mindspore.ops.hinge_embedding_loss
|
||||||
|
|
||||||
.. py:function:: mindspore.ops.hinge_embedding_loss(inputs, targets, margin=1.0, reduction="mean")
|
.. py:function:: mindspore.ops.hinge_embedding_loss(inputs, targets, margin=1.0, reduction="mean")
|
||||||
|
|
||||||
Hinge Embedding 损失函数。按输入元素计算输出。衡量输入x和标签y(包含1或-1)之间的损失值。通常被用来衡量两个输入之间的相似度。
|
Hinge Embedding 损失函数,衡量输入 `inputs` 和标签 `targets` (包含1或-1)之间的损失值。
|
||||||
|
|
||||||
mini-batch中的第n个样例的损失函数为:
|
mini-batch中的第n个样例的损失函数为:
|
||||||
|
|
||||||
|
|
|
@ -3,10 +3,14 @@ mindspore.ops.inner
|
||||||
|
|
||||||
.. py:function:: mindspore.ops.inner(x, other)
|
.. py:function:: mindspore.ops.inner(x, other)
|
||||||
|
|
||||||
计算两个1D Tensor的点积。对于更高维度来说,计算结果为在最后一维上,逐元素乘法的和。
|
计算两个1D Tensor的点积。
|
||||||
|
|
||||||
|
对于1D Tensor(没有复数共轭的情况),返回两个向量的点积。
|
||||||
|
|
||||||
|
对于更高的维度,返回最后一个轴上的和积。
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
如果 `x` 或 `other` 之一是标量,那么相当于 :code:`mindspore.ops.mul(x, other)`。
|
如果 `x` 或 `other` 之一是标量,那么 :func:`mindspore.ops.inner` 相当于 :func:`mindspore.ops.mul`。
|
||||||
|
|
||||||
参数:
|
参数:
|
||||||
- **x** (Tensor) - 第一个输入。
|
- **x** (Tensor) - 第一个输入。
|
||||||
|
|
|
@ -3,7 +3,7 @@ mindspore.ops.randint_like
|
||||||
|
|
||||||
.. py:function:: mindspore.ops.randint_like(x, low, high, *, dtype=None, seed=None)
|
.. py:function:: mindspore.ops.randint_like(x, low, high, *, dtype=None, seed=None)
|
||||||
|
|
||||||
返回一个Tensor,其元素为 [ `low` , `high` ) 区间的随机整数。
|
返回一个Tensor,其元素为 [ `low` , `high` ) 区间的随机整数,根据 `x` 决定shape和dtype。
|
||||||
|
|
||||||
参数:
|
参数:
|
||||||
- **x** (Tensor) - 输入的Tensor,用来决定输出Tensor的shape和默认的dtype。
|
- **x** (Tensor) - 输入的Tensor,用来决定输出Tensor的shape和默认的dtype。
|
||||||
|
|
|
@ -4126,14 +4126,14 @@ class Tensor(Tensor_):
|
||||||
|
|
||||||
def new_zeros(self, size, *, dtype=None):
|
def new_zeros(self, size, *, dtype=None):
|
||||||
r"""
|
r"""
|
||||||
Return a tensor of `size` filled with zeros. By default, the returned tensor has the same dtype as `self`.
|
Return a tensor of `size` filled with zeros.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
size (Union[int, tuple, list]): An int, list or tuple of integers defining the output shape.
|
size (Union[int, tuple, list]): An int, list or tuple of integers defining the output shape.
|
||||||
|
|
||||||
Keyword Args:
|
Keyword Args:
|
||||||
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, same dtype as `self`.
|
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned tensor has
|
||||||
Default: None.
|
thesame dtype as `self`. Default: None.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tensor, the shape and dtype is defined above and filled with zeros.
|
Tensor, the shape and dtype is defined above and filled with zeros.
|
||||||
|
@ -4160,14 +4160,14 @@ class Tensor(Tensor_):
|
||||||
|
|
||||||
def new_ones(self, size, *, dtype=None):
|
def new_ones(self, size, *, dtype=None):
|
||||||
r"""
|
r"""
|
||||||
Return a tensor of `size` filled with ones. By default, the returned tensor has the same dtype as `self`.
|
Return a tensor of `size` filled with ones.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
size (Union[int, tuple, list]): An int, list or tuple of integers defining the output shape.
|
size (Union[int, tuple, list]): An int, list or tuple of integers defining the output shape.
|
||||||
|
|
||||||
Keyword Args:
|
Keyword Args:
|
||||||
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. Default: if None, same dtype as
|
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned
|
||||||
`self`.
|
tensor has the same dtype as `self`. Default: None.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tensor, the shape and dtype is defined above and filled with ones.
|
Tensor, the shape and dtype is defined above and filled with ones.
|
||||||
|
|
|
@ -2469,9 +2469,7 @@ class GaussianNLLLoss(LossBase):
|
||||||
|
|
||||||
class HingeEmbeddingLoss(LossBase):
|
class HingeEmbeddingLoss(LossBase):
|
||||||
r"""
|
r"""
|
||||||
Hinge Embedding Loss. Compute the output according to the input elements. Measures the loss given an input tensor x
|
Measures Hinge Embedding Loss given an input Tensor `logits` and a labels Tensor `labels` (containing 1 or -1).
|
||||||
and a labels tensor y (containing 1 or -1).
|
|
||||||
This is usually used for measuring the similarity between two inputs.
|
|
||||||
|
|
||||||
The loss function for :math:`n`-th sample in the mini-batch is
|
The loss function for :math:`n`-th sample in the mini-batch is
|
||||||
|
|
||||||
|
|
|
@ -387,15 +387,15 @@ def hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype
|
||||||
|
|
||||||
def where(condition, x, y):
|
def where(condition, x, y):
|
||||||
r"""
|
r"""
|
||||||
Returns a tensor whose elements are selected from either `x` or `y` depending on `condition`.
|
Selects elements from `x` or `y` based on `condition` and returns a tensor.
|
||||||
|
|
||||||
.. math::
|
.. math::
|
||||||
output_i = \begin{cases} x_i,\quad &if\ condition_i \\ y_i,\quad &otherwise \end{cases}
|
output_i = \begin{cases} x_i,\quad &if\ condition_i \\ y_i,\quad &otherwise \end{cases}
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
condition (Union[Bool Tensor, bool, scalar]): If True, yield `x` otherwise yield `y`.
|
condition (Union[Bool Tensor, bool, scalar]): If True, yield `x`, otherwise yield `y`.
|
||||||
x (Union[Tensor, Scalar]): Value (if `x` is a scalar) or values selected at indices where condition is True.
|
x (Union[Tensor, Scalar]): When `condition` is True, values to select from.
|
||||||
y (Union[Tensor, Scalar]): Value (if `y` is a scalar) or values selected at indices where condition is False.
|
y (Union[Tensor, Scalar]): When `condition` is Fasle, values to select from.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tensor, elements are selected from `x` and `y`.
|
Tensor, elements are selected from `x` and `y`.
|
||||||
|
|
|
@ -1285,7 +1285,7 @@ def log(x):
|
||||||
|
|
||||||
def logdet(x):
|
def logdet(x):
|
||||||
r"""
|
r"""
|
||||||
Calculates log determinant of a square matrix or batches of square matrices.
|
Calculates log determinant of one or a batch of square matrices.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
x (Tensor): Input Tensor of any dimension.
|
x (Tensor): Input Tensor of any dimension.
|
||||||
|
@ -5003,8 +5003,7 @@ def mv(mat, vec):
|
||||||
|
|
||||||
def addbmm(x, batch1, batch2, *, beta=1, alpha=1):
|
def addbmm(x, batch1, batch2, *, beta=1, alpha=1):
|
||||||
r"""
|
r"""
|
||||||
Applies batch matrix multiplication to `batch1` and `batch2`, with a reduced add step. The matrix `x` is add to
|
Applies batch matrix multiplication to `batch1` and `batch2`, with a reduced add step and add `x` to the result.
|
||||||
final result.
|
|
||||||
|
|
||||||
The optional values `alpha` and `beta` are the matrix-matrix product between `batch1` and `batch2` and the scale
|
The optional values `alpha` and `beta` are the matrix-matrix product between `batch1` and `batch2` and the scale
|
||||||
factor for the added tensor `x` respectively. If `beta` is 0, then `x` will be ignored.
|
factor for the added tensor `x` respectively. If `beta` is 0, then `x` will be ignored.
|
||||||
|
@ -5163,7 +5162,7 @@ def addmv(x, mat, vec, beta=1, alpha=1):
|
||||||
|
|
||||||
def adjoint(x):
|
def adjoint(x):
|
||||||
r"""
|
r"""
|
||||||
Returns a view of the tensor conjugated and with the last two dimensions transposed.
|
Returns the conjugate with the last two dimensions transposed.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
x (Tensor): Input tensor.
|
x (Tensor): Input tensor.
|
||||||
|
@ -7933,11 +7932,15 @@ def matmul(x1, x2):
|
||||||
|
|
||||||
def inner(x, other):
|
def inner(x, other):
|
||||||
r"""
|
r"""
|
||||||
Computes the dot product of 1D tensors. For higher dimensions, the result will be the summation of the elemental
|
Returns the inner product of two tensors.
|
||||||
wise production along their last dimension.
|
|
||||||
|
For 1-D tensors (without complex conjugation), returns the ordinary inner product of vectors.
|
||||||
|
|
||||||
|
For higher dimensions, returns a sum product over the last axis.
|
||||||
|
|
||||||
Note:
|
Note:
|
||||||
If either `x` or `other` is a Tensor scalar, the result is equivalent to mindspore.mul(x, other).
|
If `x` or `other` is a Tensor scalar, :func:`mindspore.ops.inner` will be the same as
|
||||||
|
:func:`mindspore.ops.mul` .
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
x (Tensor): First input.
|
x (Tensor): First input.
|
||||||
|
|
|
@ -4065,9 +4065,7 @@ def gaussian_nll_loss(x, target, var, full=False, eps=1e-6, reduction='mean'):
|
||||||
|
|
||||||
def hinge_embedding_loss(inputs, targets, margin=1.0, reduction='mean'):
|
def hinge_embedding_loss(inputs, targets, margin=1.0, reduction='mean'):
|
||||||
r"""
|
r"""
|
||||||
Hinge Embedding Loss. Compute the output according to the input elements. Measures the loss given an input tensor x
|
Measures Hinge Embedding Loss given an input Tensor `logits` and a labels Tensor `labels` (containing 1 or -1).
|
||||||
and a labels tensor y (containing 1 or -1).
|
|
||||||
This is usually used for measuring the similarity between two inputs.
|
|
||||||
|
|
||||||
The loss function for :math:`n`-th sample in the mini-batch is
|
The loss function for :math:`n`-th sample in the mini-batch is
|
||||||
|
|
||||||
|
|
|
@ -847,8 +847,8 @@ def _generate_shapes(shape):
|
||||||
@_function_forbid_reuse
|
@_function_forbid_reuse
|
||||||
def rand(*size, dtype=None, seed=None):
|
def rand(*size, dtype=None, seed=None):
|
||||||
r"""
|
r"""
|
||||||
Returns a new Tensor with given shape and dtype, filled with random numbers from the uniform distribution on the
|
Returns a new tensor that fills numbers from the uniform distribution over an interval :math:`[0, 1)`
|
||||||
interval :math:`[0, 1)`.
|
based on the given shape and dtype.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
size (Union[int, tuple(int), list(int)]): Shape of the new tensor, e.g. :math:`(2, 3)` or :math:`2`.
|
size (Union[int, tuple(int), list(int)]): Shape of the new tensor, e.g. :math:`(2, 3)` or :math:`2`.
|
||||||
|
@ -890,8 +890,8 @@ def rand(*size, dtype=None, seed=None):
|
||||||
@_function_forbid_reuse
|
@_function_forbid_reuse
|
||||||
def rand_like(x, seed=None, *, dtype=None):
|
def rand_like(x, seed=None, *, dtype=None):
|
||||||
r"""
|
r"""
|
||||||
Returns a new Tensor with the shape and dtype as `x`, filled with random numbers from the uniform distribution on
|
Returns a new tensor that fills numbers from the uniform distribution over an interval :math:`[0, 1)`
|
||||||
the interval :math:`[0, 1)`.
|
based on the given shape and dtype.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
x (Tensor): Input Tensor to specify the output shape and its default dtype.
|
x (Tensor): Input Tensor to specify the output shape and its default dtype.
|
||||||
|
@ -961,9 +961,9 @@ def randn(*size, dtype=None, seed=None):
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
>>> import mindspore.ops as ops
|
>>> import mindspore.ops as ops
|
||||||
>>> print(ops.randn((2,3)))
|
>>> print(ops.randn((2, 2)))
|
||||||
[[ 0.30639967 -0.42438635 -0.20454668]
|
[[ 0.30639967 -0.42438635]
|
||||||
[-0.4287376 1.3054721 0.64747655]]
|
[-0.4287376 1.3054721 ]]
|
||||||
"""
|
"""
|
||||||
if dtype is None:
|
if dtype is None:
|
||||||
dtype = mstype.float32
|
dtype = mstype.float32
|
||||||
|
@ -1025,7 +1025,7 @@ def randn_like(x, seed=None, *, dtype=None):
|
||||||
@_function_forbid_reuse
|
@_function_forbid_reuse
|
||||||
def randint(low, high, size, seed=None, *, dtype=None):
|
def randint(low, high, size, seed=None, *, dtype=None):
|
||||||
r"""
|
r"""
|
||||||
Return a Tensor whose elements are random integers from low (inclusive) to high (exclusive).
|
Returns a Tensor whose elements are random integers in the range of [ `low` , `high` ) .
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
low (int): Start value of interval.
|
low (int): Start value of interval.
|
||||||
|
@ -1077,8 +1077,8 @@ def randint(low, high, size, seed=None, *, dtype=None):
|
||||||
@_function_forbid_reuse
|
@_function_forbid_reuse
|
||||||
def randint_like(x, low, high, seed=None, *, dtype=None):
|
def randint_like(x, low, high, seed=None, *, dtype=None):
|
||||||
r"""
|
r"""
|
||||||
Returns a tensor with the same shape as Tensor `x` filled with random integers generated uniformly between
|
Returns a tensor with the same shape as Tensor `x` whose elements are random integers in the range
|
||||||
low (inclusive) and high (exclusive).
|
of [ `low` , `high` ) .
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
x (Tensor): Input Tensor to specify the output shape and its default dtype.
|
x (Tensor): Input Tensor to specify the output shape and its default dtype.
|
||||||
|
|
Loading…
Reference in New Issue