From d0f452828706d632590a0930c19663800a6a34b2 Mon Sep 17 00:00:00 2001 From: zhujiaxing Date: Sat, 25 Feb 2023 14:10:31 +0800 Subject: [PATCH] modify docs of nn function multi_margin_loss. --- .../mindspore.ops.func_multi_margin_loss.rst | 21 +++++++--------- ...dspore.ops.func_multilabel_margin_loss.rst | 4 +-- .../python/mindspore/ops/function/nn_func.py | 25 ++++++++----------- 3 files changed, 22 insertions(+), 28 deletions(-) diff --git a/docs/api/api_python/ops/mindspore.ops.func_multi_margin_loss.rst b/docs/api/api_python/ops/mindspore.ops.func_multi_margin_loss.rst index 1c8c6a60dbd..d023a8fdf82 100644 --- a/docs/api/api_python/ops/mindspore.ops.func_multi_margin_loss.rst +++ b/docs/api/api_python/ops/mindspore.ops.func_multi_margin_loss.rst @@ -3,23 +3,20 @@ mindspore.ops.multi_margin_loss .. py:function:: mindspore.ops.multi_margin_loss(inputs, target, p=1, margin=1, weight=None, reduction='mean') - 用于优化多类分类问题的铰链损失。 + 用于优化多类分类问题的合页损失。 - 创建一个标准,用于优化输入 :math:`x` (一个2D小批量Tensor) - 和输出 :math:`y` (一个目标类索引的1DTensor :math:`0 \leq y \leq \text{x.size}(1)-1`) - 之间的多类分类铰链损失(基于边距的损失): - 对于每个小批量样本,1D输入的损失 :math:`x` 和标量输出 :math:`y` 是: + 优化输入和输出之间的多级分类合页损耗(基于边缘损失)。 + + 对于每个小批量样本,1D输入 :math:`x` 和标量输出 :math:`y` 的损失为: .. math:: - \text{loss}(x, y) = \frac{\sum_i \max(0, w[y] * (\text{margin} - x[y] + x[i]))^p}{\text{x.size}(0)} + \text{loss}(x, y) = \frac{\sum_i \max(0, \text{margin} - x[y] + x[i])^p}{\text{x.size}(0)} - 其中 :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}` - 并且 :math:`i \neq y`。 - 或者,通过向构造函数传递一个1D输入 `weight` 的Tensor来对类赋予不同的权重。 + 其中 :math:`i\in \{0,⋯,x.size(0)−1\}` 并且 :math:`i \ne y`。 参数: - - **inputs** (Tensor) - 输入,shape为 :math:`(N, C)`。数据类型只支持float32、float16或float64。 - - **target** (Tensor) - 真实标签,shape为 :math:`(N,)`。数据类型只支持int64。值应为非负值,且小于C。 + - **inputs** (Tensor) - 输入,shape为 :math:`(N, C)`。数据类型只支持float32、float16或float64。即上述公式中的 :math:`x` 。 + - **target** (Tensor) - 真实标签,shape为 :math:`(N,)`。数据类型只支持int64。值应为非负值,且小于C。即上述公式中的 :math:`y` 。 - **p** (int, 可选) - 对偶距离的范数度。必须为1或2。默认值:1。 - **margin** (int, 可选) - 改变对偶距离的参数。默认值:1。 - **weight** (Tensor, 可选) - 每个类别的缩放权重,shape为 :math:`(C,)`。数据类型只支持float32、float16或float64。默认值:None。 @@ -30,7 +27,7 @@ mindspore.ops.multi_margin_loss - 'sum':输出的总和。 返回: - - **outputs** - (Tensor),当 `reduction` 为"none"时,shape为 :math:`(N,)`。否则,为标量。类型与 `inputs` 相同。 + - **outputs** - 当 `reduction` 为"none"时,类型为Tensor,shape和 `target` 相同。否则,为标量。 异常: - **TypeError** - `p` 或者 `target` 数据类型不是int。 diff --git a/docs/api/api_python/ops/mindspore.ops.func_multilabel_margin_loss.rst b/docs/api/api_python/ops/mindspore.ops.func_multilabel_margin_loss.rst index a6f8493a467..4062c78b507 100644 --- a/docs/api/api_python/ops/mindspore.ops.func_multilabel_margin_loss.rst +++ b/docs/api/api_python/ops/mindspore.ops.func_multilabel_margin_loss.rst @@ -3,10 +3,10 @@ mindspore.ops.multilabel_margin_loss .. py:function:: mindspore.ops.multilabel_margin_loss(inputs, target, reduction='mean') - 用于优化多标签分类问题的铰链损失。 + 用于优化多标签分类问题的合页损失。 创建一个标准,用于优化输入 :math:`x` (一个2D小批量Tensor) - 和输出 :math:`y` (一个目标类别索引的2DTensor)之间的多标签分类铰链损失(基于边距的损失): + 和输出 :math:`y` (一个目标类别索引的2DTensor)之间的多标签分类合页损失(基于边距的损失): 对于每个小批量样本: .. math:: diff --git a/mindspore/python/mindspore/ops/function/nn_func.py b/mindspore/python/mindspore/ops/function/nn_func.py index d3adfc3d46f..0ebeae1e6d2 100644 --- a/mindspore/python/mindspore/ops/function/nn_func.py +++ b/mindspore/python/mindspore/ops/function/nn_func.py @@ -5141,25 +5141,22 @@ def multi_margin_loss(inputs, target, p=1, margin=1, weight=None, reduction='mea r""" Hinge loss for optimizing a multi-class classification. - Creates a criterion that optimizes a multi-class classification hinge - loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) and - output :math:`y` (which is a 1D tensor of target class indices, - :math:`0 \leq y \leq \text{x.size}(1)-1`): - For each mini-batch sample, the loss in terms of the 1D input :math:`x` and scalar - output :math:`y` is: + Optimizes a multi-class classification hinge + loss (margin-based loss) between input and + output. + + For each mini-batch sample, the loss in terms of the 1D input :math:`x` and scalar output :math:`y` is: .. math:: - \text{loss}(x, y) = \frac{\sum_i \max(0, w[y] * (\text{margin} - x[y] + x[i]))^p}{\text{x.size}(0)} + \text{loss}(x, y) = \frac{\sum_i \max(0, \text{margin} - x[y] + x[i])^p}{\text{x.size}(0)} - where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}` - and :math:`i \neq y`. - Optionally, you can give non-equal weighting on the classes by passing - a 1D input `weight` tensor into the constructor. + where : math: `i\in \{0,⋯,x.size(0)−1\} \space and \space i \ne y` Args: inputs (Tensor): Input , with shape :math:`(N, C)`. Data type only support float32, float16 or float64. + It is :math:`x` in the above formula. target (Tensor): Ground truth labels, with shape :math:`(N,)`. Data type only support int64. The - value of target should be non-negative, less than C. + value of target should be non-negative, less than C. It is :math:`y` in the above formula. p (int, optional): The norm degree for pairwise distance. Should be 1 or 2. Default: 1. margin (int, optional): A parameter to change pairwise distance. Default: 1. weight (Tensor, optional): The rescaling weight to each class with shape :math:`(C,)`. Data type only @@ -5172,8 +5169,8 @@ def multi_margin_loss(inputs, target, p=1, margin=1, weight=None, reduction='mea - 'sum': the output will be summed. Returns: - Tensor, When `reduction` is 'none', the shape is :math:`(N,)`. - Otherwise, it is a scalar. Has the same data type with `inputs`. + Tensor. If `reduction` is 'none', returns a Tensor with the same shape as `target`. + Otherwise, it is a scalar. Raises: TypeError: If dtype of `p` or `target` is not int.