forked from mindspore-Ecosystem/mindspore
add rmse loss
This commit is contained in:
parent
095d7fb877
commit
112b8488c5
|
@ -21,9 +21,11 @@ It shows how well the model works on a dataset and the optimization target which
|
|||
|
||||
from .loss import L1Loss, MSELoss, SmoothL1Loss, FocalLoss,\
|
||||
SoftmaxCrossEntropyWithLogits, BCELoss, CosineEmbeddingLoss, \
|
||||
SampledSoftmaxLoss, DiceLoss, BCEWithLogitsLoss, MultiClassDiceLoss
|
||||
SampledSoftmaxLoss, DiceLoss, BCEWithLogitsLoss, MultiClassDiceLoss,\
|
||||
RMSELoss, MAELoss
|
||||
|
||||
|
||||
__all__ = ['L1Loss', 'MSELoss', 'SmoothL1Loss', 'FocalLoss',
|
||||
'SoftmaxCrossEntropyWithLogits', 'BCELoss', 'BCEWithLogitsLoss',
|
||||
'CosineEmbeddingLoss', 'SampledSoftmaxLoss', 'DiceLoss', 'MultiClassDiceLoss']
|
||||
'CosineEmbeddingLoss', 'SampledSoftmaxLoss', 'DiceLoss', 'MultiClassDiceLoss',
|
||||
'RMSELoss', 'MAELoss']
|
||||
|
|
|
@ -173,6 +173,98 @@ class MSELoss(_Loss):
|
|||
return self.get_loss(x)
|
||||
|
||||
|
||||
class RMSELoss(_Loss):
|
||||
r"""
|
||||
RMSELoss creates a standard to measure the root mean square error between :math:`x` and :math:`y`
|
||||
element-wise, where :math:`x` is the input and :math:`y` is the target.
|
||||
|
||||
For simplicity, let :math:`x` and :math:`y` be 1-dimensional Tensor with length :math:`N`,
|
||||
the unreduced loss (i.e. with argument reduction set to 'none') of :math:`x` and :math:`y` is given as:
|
||||
|
||||
.. math::
|
||||
loss = \sqrt{\frac{1}{M}\sum_{m=1}^{M}{(x_m-y_m)^2}}
|
||||
|
||||
Args:
|
||||
reduction (str): Type of reduction to be applied to loss. The optional values are "mean", "sum", and "none".
|
||||
Default: "mean".
|
||||
|
||||
Inputs:
|
||||
- **logits** (Tensor) - Tensor of shape :math:`(x_1, x_2, ..., x_R)`.
|
||||
- **label** (Tensor) - Tensor of shape :math:`(y_1, y_2, ..., y_S)`.
|
||||
|
||||
Outputs:
|
||||
Tensor, weighted loss float tensor.
|
||||
|
||||
Raises:
|
||||
ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
|
||||
ValueError: If the dimensions are different.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU``
|
||||
|
||||
Examples:
|
||||
>>> loss = nn.RMSELoss()
|
||||
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
||||
>>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)
|
||||
>>> output = loss(input_data, target_data)
|
||||
>>> print(output)
|
||||
0.57735026
|
||||
"""
|
||||
def __init__(self):
|
||||
super(RMSELoss, self).__init__()
|
||||
self.MSELoss = MSELoss()
|
||||
|
||||
def construct(self, logits, label):
|
||||
_check_shape(logits.shape, label.shape)
|
||||
rmse_loss = F.sqrt(self.MSELoss(logits, label))
|
||||
|
||||
return rmse_loss
|
||||
|
||||
|
||||
class MAELoss(_Loss):
|
||||
r"""
|
||||
MAELoss creates a standard to measure the average absolute error between :math:`x` and :math:`y`
|
||||
element-wise, where :math:`x` is the input and :math:`y` is the target.
|
||||
|
||||
For simplicity, let :math:`x` and :math:`y` be 1-dimensional Tensor with length :math:`N`,
|
||||
the unreduced loss (i.e. with argument reduction set to 'none') of :math:`x` and :math:`y` is given as:
|
||||
|
||||
.. math::
|
||||
\text{MAE} = \frac{1}{M}\sum_{m=1}^N\left| x_m - y_m \right|
|
||||
|
||||
Args:
|
||||
reduction (str): Type of reduction to be applied to loss. The optional values are "mean", "sum", and "none".
|
||||
Default: "mean".
|
||||
|
||||
Inputs:
|
||||
- **logits** (Tensor) - Tensor of shape :math:`(x_1, x_2, ..., x_R)`.
|
||||
- **label** (Tensor) - Tensor of shape :math:`(y_1, y_2, ..., y_S)`.
|
||||
|
||||
Outputs:
|
||||
Tensor, weighted loss float tensor.
|
||||
|
||||
Raises:
|
||||
ValueError: If `reduction` is not one of 'none', 'mean', 'sum'.
|
||||
ValueError: If the dimensions are different.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU``
|
||||
|
||||
Examples:
|
||||
>>> loss = nn.MAELoss()
|
||||
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
||||
>>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)
|
||||
>>> output = loss(input_data, target_data)
|
||||
>>> print(output)
|
||||
0.33333334
|
||||
"""
|
||||
|
||||
def construct(self, logits, label):
|
||||
_check_shape(logits.shape, label.shape)
|
||||
x = F.absolute(logits - label)
|
||||
return self.get_loss(x)
|
||||
|
||||
|
||||
class SmoothL1Loss(_Loss):
|
||||
r"""
|
||||
A loss class for learning region proposals.
|
||||
|
|
|
@ -204,3 +204,17 @@ def test_multi_class_dice_loss_init_activation2():
|
|||
y_pred = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]), mstype.float32)
|
||||
y = Tensor(np.array([[1, 0], [0, 1]]), mstype.float32)
|
||||
loss(y_pred, y)
|
||||
|
||||
|
||||
def test_rmse_loss():
|
||||
loss = nn.RMSELoss()
|
||||
input_data = Tensor(np.array([[1, 2, 3], [2, 3, 2]]).astype(np.float32))
|
||||
target_data = Tensor(np.array([[0, 0, 5], [1, 2, 3]]).astype(np.float32))
|
||||
loss(input_data, target_data)
|
||||
|
||||
|
||||
def test_mae_loss():
|
||||
loss = nn.MAELoss()
|
||||
input_data = Tensor(np.array([[1, 2, 3], [2, 3, 2]]).astype(np.float32))
|
||||
target_data = Tensor(np.array([[0, 0, 5], [1, 2, 3]]).astype(np.float32))
|
||||
loss(input_data, target_data)
|
||||
|
|
Loading…
Reference in New Issue