!48885 fixed_huber_loss_master
Merge pull request !48885 from yide12/ops_threshold_leaky_relu_master
This commit is contained in:
commit
d00f378a5a
|
@ -3,10 +3,10 @@ mindspore.ops.einsum
|
||||||
|
|
||||||
.. py:function:: mindspore.ops.einsum(equation, *operands)
|
.. py:function:: mindspore.ops.einsum(equation, *operands)
|
||||||
|
|
||||||
基于爱因斯坦求和约定(Einsum)符号,指定维度对输入Tensor元素的乘积求和。你可以使用这个运算符来执行对角线、减法、转置、矩阵乘法、乘法、内积运算等等。
|
基于爱因斯坦求和约定(Einsum)符号,沿着指定维度对输入Tensor元素的乘积求和。你可以使用这个运算符来执行对角线、减法、转置、矩阵乘法、乘法、内积运算等等。
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
现在支持子列表模式。例如,ops.einsum(op1, sublist1, op2, sublist2, ..., sublist_out)。在子列表模式中,`equation` 由子列表推导得到,Python的省略号和介于[0, 52)的整数list组成子列表。每个操作数后面都有一个子列表,并且最后有一个表示输出的子列表。
|
现在支持子列表模式。例如,ops.einsum(op1, sublist1, op2, sublist2, ..., sublist_out)。在子列表模式中, `equation` 由子列表推导得到,Python的省略号和介于[0, 52)的整数list组成子列表。每个操作数后面都有一个子列表,并且最后有一个表示输出的子列表。
|
||||||
|
|
||||||
参数:
|
参数:
|
||||||
- **equation** (str) - 基于爱因斯坦求和约定的符号,表示想要执行的操作。符号只能包含字母、逗号、省略号和箭头。字母表示输入Tensor维数,逗号表示单独的Tensor,省略号表示忽略的Tensor维数,箭头的左边表示输入Tensor,右边表示期望输出的维度。
|
- **equation** (str) - 基于爱因斯坦求和约定的符号,表示想要执行的操作。符号只能包含字母、逗号、省略号和箭头。字母表示输入Tensor维数,逗号表示单独的Tensor,省略号表示忽略的Tensor维数,箭头的左边表示输入Tensor,右边表示期望输出的维度。
|
||||||
|
|
|
@ -31,9 +31,9 @@ mindspore.ops.huber_loss
|
||||||
|
|
||||||
参数:
|
参数:
|
||||||
- **x** (Tensor) - 输入预测值,任意维度的Tensor。
|
- **x** (Tensor) - 输入预测值,任意维度的Tensor。
|
||||||
- **target** (Tensor) - 目标值,与 `x` 的shape和dtype相同。
|
- **target** (Tensor) - 目标值,通常情况下与 `x` 的shape和dtype相同。但是当 `target` 和 `x` 的shape不同时,需要保证他们之间可以互相广播。
|
||||||
- **reduction** (str) - 应用于loss的reduction类型。取值为"mean","sum"或"none"。默认值:"mean"。
|
- **reduction** (str) - 应用于loss的reduction类型。取值为"mean","sum"或"none"。默认值:"mean"。
|
||||||
- **delta** (Union[int, float]) - 两种损失之间变化的阈值。该值必须为正。默认值:1.0。
|
- **delta** (Union[int, float]) - 两种损失之间变化的阈值。该值必须大于零。默认值:1.0。
|
||||||
|
|
||||||
返回:
|
返回:
|
||||||
Tensor,和 `x` 具有相同的dtype和shape。
|
Tensor,和 `x` 具有相同的dtype和shape。
|
||||||
|
@ -43,4 +43,4 @@ mindspore.ops.huber_loss
|
||||||
- **TypeError** - `delta` 不是float或int。
|
- **TypeError** - `delta` 不是float或int。
|
||||||
- **ValueError** - `delta` 的值小于或等于0。
|
- **ValueError** - `delta` 的值小于或等于0。
|
||||||
- **ValueError** - `reduction` 不为"mean"、"sum"或"none"。
|
- **ValueError** - `reduction` 不为"mean"、"sum"或"none"。
|
||||||
- **ValueError** - `x` 和 `target` 有不同的shape。
|
- **ValueError** - `x` 和 `target` 有不同的shape,且不能互相广播。
|
||||||
|
|
|
@ -9172,8 +9172,8 @@ def _einsum_convert_num_to_char(num):
|
||||||
|
|
||||||
def einsum(equation, *operands):
|
def einsum(equation, *operands):
|
||||||
r"""
|
r"""
|
||||||
Sums the product of the elements of the input Tensor along
|
According to the Einstein summation Convention (Einsum),
|
||||||
dimensions specified notation based on the Einstein summation convention(Einsum).
|
the product of the input tensor elements is summed along the specified dimension.
|
||||||
You can use this operator to perform diagonal, reducesum, transpose, matmul, mul, inner product operations, etc.
|
You can use this operator to perform diagonal, reducesum, transpose, matmul, mul, inner product operations, etc.
|
||||||
|
|
||||||
Note::
|
Note::
|
||||||
|
|
|
@ -4464,7 +4464,7 @@ def hardtanh(x, min_val=-1.0, max_val=1.0):
|
||||||
def huber_loss(x, target, reduction='mean', delta=1.0):
|
def huber_loss(x, target, reduction='mean', delta=1.0):
|
||||||
r"""
|
r"""
|
||||||
huber_loss calculates the error between the predicted value and the target value.
|
huber_loss calculates the error between the predicted value and the target value.
|
||||||
It has the advantages of both l1_loss and mse_loss.
|
It has the best of both the loss of l1 and the loss of mse.
|
||||||
|
|
||||||
Assuming that the :math:`x` and :math:`y` are 1-D Tensor, length :math:`N`, the reduction parameter is set to "none"
|
Assuming that the :math:`x` and :math:`y` are 1-D Tensor, length :math:`N`, the reduction parameter is set to "none"
|
||||||
then calculate the loss of :math:`x` and :math:`y` without dimensionality reduction. The formula is as follows:
|
then calculate the loss of :math:`x` and :math:`y` without dimensionality reduction. The formula is as follows:
|
||||||
|
@ -4493,11 +4493,13 @@ def huber_loss(x, target, reduction='mean', delta=1.0):
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
x (Tensor): Predicted value, Tensor of any dimension.
|
x (Tensor): Predicted value, Tensor of any dimension.
|
||||||
target (Tensor): Target value, same dtype and shape as the `x`.
|
target (Tensor): Target value, has same dtype and shape as the `x` in common cases.
|
||||||
reduction (str): Type of reduction to be applied to loss. The optional values are "mean", "sum", and "none".
|
However, when the shape of `target` is different from the shape of `x`,
|
||||||
|
and they should be broadcasted to each other.
|
||||||
|
reduction (str): Type of reduction to be applied to loss. The optional values are "mean", "sum" and "none".
|
||||||
Default: "mean".
|
Default: "mean".
|
||||||
delta (Union[int, float]): The threshold to change between two type of loss.
|
delta (Union[int, float]): The threshold to change between two type of loss.
|
||||||
The value must be positive. Default: 1.0.
|
The value must be greater than zero. Default: 1.0.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tensor, with the same dtype and shape as `x`.
|
Tensor, with the same dtype and shape as `x`.
|
||||||
|
@ -4507,7 +4509,7 @@ def huber_loss(x, target, reduction='mean', delta=1.0):
|
||||||
TypeError: If dtype of `delta` is neither float nor int.
|
TypeError: If dtype of `delta` is neither float nor int.
|
||||||
ValueError: If `delta` is less than or equal to 0.
|
ValueError: If `delta` is less than or equal to 0.
|
||||||
ValueError: If `reduction` is not one of "none", "mean", "sum".
|
ValueError: If `reduction` is not one of "none", "mean", "sum".
|
||||||
ValueError: If `x` and `target` have different shapes.
|
ValueError: If `x` and `target` have different shapes and cannot be broadcasted to each other.
|
||||||
|
|
||||||
Supported Platforms:
|
Supported Platforms:
|
||||||
``Ascend`` ``GPU`` ``CPU``
|
``Ascend`` ``GPU`` ``CPU``
|
||||||
|
@ -4523,8 +4525,6 @@ def huber_loss(x, target, reduction='mean', delta=1.0):
|
||||||
_check_is_tensor('target', target, "huber_loss")
|
_check_is_tensor('target', target, "huber_loss")
|
||||||
_check_value_type("delta", delta, [int, float], "huber_loss")
|
_check_value_type("delta", delta, [int, float], "huber_loss")
|
||||||
_check_number_gt_value("delta", delta, 0.0, "huber_loss")
|
_check_number_gt_value("delta", delta, 0.0, "huber_loss")
|
||||||
if x.shape != target.shape:
|
|
||||||
raise ValueError(f"For huber_loss, x and target must be the same shape, but got {x.shape} and {target.shape}")
|
|
||||||
sub = _get_cache_prim(P.Sub)()
|
sub = _get_cache_prim(P.Sub)()
|
||||||
multi = _get_cache_prim(P.Mul)()
|
multi = _get_cache_prim(P.Mul)()
|
||||||
z = sub(x, target)
|
z = sub(x, target)
|
||||||
|
|
Loading…
Reference in New Issue