!47050 func_l1_loss_master

Merge pull request !47050 from yide12/func_l1_loss_master
This commit is contained in:
i-robot 2022-12-23 07:08:17 +00:00 committed by Gitee
commit 68b3f78879
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
6 changed files with 171 additions and 0 deletions

View File

@ -66,6 +66,7 @@ mindspore.ops
mindspore.ops.cross_entropy
mindspore.ops.gaussian_nll_loss
mindspore.ops.hinge_embedding_loss
mindspore.ops.l1_loss
mindspore.ops.mse_loss
mindspore.ops.nll_loss
mindspore.ops.smooth_l1_loss

View File

@ -0,0 +1,36 @@
mindspore.ops.l1_loss
=====================
.. py:function:: mindspore.ops.l1_loss(x, target, reduction='mean'):
l1_loss用于计算预测值和目标值之间的平均绝对误差。
假设 :math:`x`:math:`y` 为一维Tensor长度 :math:`N` `reduction` 设置为"none",则计算 :math:`x`:math:`y` 的loss不进行降维操作。
公式如下:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad \text{with } l_n = \left| x_n - y_n \right|,
其中, :math:`N` 为batch size。
如果 `reduction` 是"mean"或者"sum",则:
.. math::
\ell(x, y) =
\begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{'sum'.}
\end{cases}
参数:
- **x** (Tensor) - 预测值任意维度的Tensor。
- **target** (Tensor) - 目标值,与 `x` 的shape相同。
- **reduction** (str, optional) - 应用于loss的reduction类型。取值为"mean""sum"或"none"。默认值:"mean"。
返回:
Tensorl1_loss的结果。
异常:
- **ValueError** - `reduction` 不为"mean"、"sum"或"none"。
- **ValueError** - `x``target` 有不同的shape。

View File

@ -67,6 +67,7 @@ Loss Functions
mindspore.ops.cross_entropy
mindspore.ops.gaussian_nll_loss
mindspore.ops.hinge_embedding_loss
mindspore.ops.l1_loss
mindspore.ops.mse_loss
mindspore.ops.nll_loss
mindspore.ops.smooth_l1_loss

View File

@ -421,6 +421,7 @@ from .nn_func import (
mirror_pad,
nll_loss,
smooth_l1_loss,
l1_loss,
cross_entropy,
grid_sample,
ctc_greedy_decoder,

View File

@ -2943,6 +2943,66 @@ def _nll_loss(inputs, target, target_dim=-1, weight=None, ignore_index=None, red
return loss
def l1_loss(x, target, reduction='mean'):
r"""
l1_loss is used to calculate the mean absolute error between the `x` value and the target value.
Assuming that the :math:`x` and :math:`y` are 1-D Tensor, length :math:`N`, `reduction` is set to "none" ,
then calculate the loss of :math:`x` and :math:`y` without dimensionality reduction.
The formula is as follows:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad \text{with } l_n = \left| x_n - y_n \right|,
where :math:`N` is the batch size.
If `reduction` is "mean" or "sum", then:
.. math::
\ell(x, y) =
\begin{cases}
\operatorname{mean}(L), & \text{if reduction} = \text{'mean';}\\
\operatorname{sum}(L), & \text{if reduction} = \text{'sum'.}
\end{cases}
Args:
x (Tensor) - Predicted value, Tensor of any dimension.
target (Tensor) - Target value, same shape as the `x` .
reduction (str, optional): Type of reduction to be applied to loss. The optional value is "mean", "sum" or
"none". Default: "mean".
Returns:
Tensor, the result of l1_loss.
Raises:
ValueError: If `reduction` is not one of "none", "mean" or "sum".
ValueError: If `x` and `target` have different shapes.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> x = ms.Tensor([[1, 2, 3], [4, 5, 6]], ms.float32)
>>> target = ms.Tensor([[6, 5, 4], [3, 2, 1]], ms.float32)
>>> output = ops.l1_loss(x, target, reduction="mean")
>>> print(output)
3.0
"""
_check_is_tensor('x', x, "l1_loss")
_check_is_tensor('target', target, "l1_loss")
if reduction not in ('mean', 'sum', 'none'):
raise ValueError(f"For l1_loss, the 'reduction' must be in ['mean', 'sum', 'none'], but got {reduction}.")
if x.shape != target.shape:
raise ValueError(f"For l1_loss, x and target must be the same shape, but got {x.shape} and {target.shape}")
loss = _get_cache_prim(P.Abs)()(x - target)
if reduction == "mean":
loss = _get_cache_prim(P.ReduceMean)()(loss, _get_axis(loss))
if reduction == "sum":
loss = _get_cache_prim(P.ReduceSum)()(loss, _get_axis(loss))
return loss
def smooth_l1_loss(logits, labels, beta=1.0, reduction='none'):
r"""
Computes smooth L1 loss, a robust L1 loss.
@ -5101,6 +5161,7 @@ __all__ = [
'cross_entropy',
'grid_sample',
'smooth_l1_loss',
'l1_loss',
'nll_loss',
'ctc_loss',
'ctc_greedy_decoder',

View File

@ -0,0 +1,71 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
import mindspore.nn as nn
import mindspore.ops as ops
class Net(nn.Cell):
def construct(self, x, target):
output0 = ops.l1_loss(x, target, reduction="none")
output1 = ops.l1_loss(x, target, reduction="mean")
output2 = ops.l1_loss(x, target, reduction="sum")
return output0, output1, output2
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_l1_loss(mode):
"""
Feature: Test l1_loss
Description: Test the functionality of l1_loss
Expectation: Success
"""
ms.set_context(mode=mode)
net = Net()
x = ms.Tensor([[[1.17273476, -0.05052809, 0.61813106, 0.16455488, -1.35581311],
[1.32487223, 0.13208311, -1.31230669, -0.50771298, 1.32278446],
[-0.04625993, 1.18794348, -1.21238798, 0.01314028, -1.20131357]],
[[-1.4510571, -1.03311918, -1.00915919, 0.6134792, 0.56710962],
[-1.39683892, -0.0932166, -1.06056463, 0.20178101, 0.47950521],
[-1.39548584, -1.70302071, -0.48198836, -0.77789908, 0.87970894]]], ms.float32)
target = ms.Tensor([[[-1.30292448, -0.35515205, 1.48585374, 0.22724189, 0.60810377],
[-1.14444725, 1.90415392, 0.45537515, -1.20027348, 1.81567979],
[0.30801377, -0.79452551, 1.80005659, 0.98829231, 2.07602126]],
[[0.05371826, 0.20575326, 1.3496286, 1.55930587, -0.50407597],
[-1.97812696, -1.38987021, -1.95899861, -1.05986999, 0.02349943],
[0.25305345, 0.42477621, 1.74664105, -0.50482991, -0.24119833]]], ms.float32)
out0, out1, out2 = net(x, target)
expect_out0 = [[[2.47565937e+00, 3.04623961e-01, 8.67722750e-01, 6.26870096e-02, 1.96391690e+00],
[2.46931934e+00, 1.77207088e+00, 1.76768184e+00, 6.92560554e-01, 4.92895365e-01],
[3.54273707e-01, 1.98246896e+00, 3.01244450e+00, 9.75152075e-01, 3.27733469e+00]],
[[1.50477529e+00, 1.23887253e+00, 2.35878778e+00, 9.45826709e-01, 1.07118559e+00],
[5.81288099e-01, 1.29665351e+00, 8.98433924e-01, 1.26165104e+00, 4.56005782e-01],
[1.64853930e+00, 2.12779689e+00, 2.22862935e+00, 2.73069203e-01, 1.12090731e+00]]]
expect_out1 = [1.3827745]
expect_out2 = [41.483234]
assert np.allclose(out0.asnumpy(), expect_out0)
assert np.allclose(out1.asnumpy(), expect_out1)
assert np.allclose(out2.asnumpy(), expect_out2)