forked from mindspore-Ecosystem/mindspore
!13669 modify example
From: @lijiaqi0612 Reviewed-by: @kingxian,@kisnwang Signed-off-by: @kingxian
This commit is contained in:
commit
958ec1c85b
|
@ -436,7 +436,7 @@ class DiceLoss(_Loss):
|
|||
>>> y = Tensor(np.array([[0, 1], [1, 0], [0, 1]]), mstype.float32)
|
||||
>>> output = loss(y_pred, y)
|
||||
>>> print(output)
|
||||
[0.38596618]
|
||||
0.38596618
|
||||
"""
|
||||
def __init__(self, smooth=1e-5):
|
||||
super(DiceLoss, self).__init__()
|
||||
|
@ -1031,7 +1031,11 @@ class FocalLoss(_Loss):
|
|||
r"""
|
||||
The loss function proposed by Kaiming team in their paper ``Focal Loss for Dense Object Detection`` improves the
|
||||
effect of image object detection. It is a loss function to solve the imbalance of categories and the difference of
|
||||
classification difficulty.
|
||||
classification difficulty. If you want to learn more, please refer to the paper.
|
||||
`https://arxiv.org/pdf/1708.02002.pdf`. The function is shown as follows:
|
||||
|
||||
.. math::
|
||||
FL(p_t) = -(1-p_t)^\gamma log(p_t)
|
||||
|
||||
Args:
|
||||
gamma (float): Gamma is used to adjust the steepness of weight curve in focal loss. Default: 2.0.
|
||||
|
@ -1053,14 +1057,14 @@ class FocalLoss(_Loss):
|
|||
Tensor, it's a tensor with the same shape and type as input `predict`.
|
||||
|
||||
Raises:
|
||||
TypeError: If the data type of ``gamma`` is not float..
|
||||
TypeError: If ``weight`` is not a Parameter.
|
||||
TypeError: If the data type of ``gamma`` is not float.
|
||||
TypeError: If ``weight`` is not a Tensor.
|
||||
ValueError: If ``target`` dim different from ``predict``.
|
||||
ValueError: If ``target`` channel is not 1 and ``target`` shape is different from ``predict``.
|
||||
ValueError: If ``reduction`` is not one of 'none', 'mean', 'sum'.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU``
|
||||
``Ascend``
|
||||
|
||||
Example:
|
||||
>>> predict = Tensor([[0.8, 1.4], [0.5, 0.9], [1.2, 0.9]], mstype.float32)
|
||||
|
|
|
@ -32,14 +32,18 @@ def auc(x, y, reorder=False):
|
|||
Returns:
|
||||
area (float): Compute result.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> y_pred = np.array([[3, 0, 1], [1, 3, 0], [1, 0, 2]])
|
||||
>>> y = np.array([[0, 2, 1], [1, 2, 1], [0, 0, 1]])
|
||||
>>> metric = ROC(pos_label=2)
|
||||
>>> metric = nn.ROC(pos_label=2)
|
||||
>>> metric.clear()
|
||||
>>> metric.update(y_pred, y)
|
||||
>>> fpr, tpr, thre = metric.eval()
|
||||
>>> output = auc(fpr, tpr)
|
||||
>>> print(output)
|
||||
0.5357142857142857
|
||||
"""
|
||||
if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray):
|
||||
|
|
|
@ -27,6 +27,9 @@ class BleuScore(Metric):
|
|||
n_gram (int): The n_gram value ranged from 1 to 4. Default: 4
|
||||
smooth (bool): Whether or not to apply smoothing. Default: False
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Example:
|
||||
>>> candidate_corpus = [['i', 'have', 'a', 'pen', 'on', 'my', 'desk']]
|
||||
>>> reference_corpus = [[['i', 'have', 'a', 'pen', 'in', 'my', 'desk'],
|
||||
|
@ -35,6 +38,7 @@ class BleuScore(Metric):
|
|||
>>> metric.clear()
|
||||
>>> metric.update(candidate_corpus, reference_corpus)
|
||||
>>> bleu_score = metric.eval()
|
||||
>>> print(output)
|
||||
0.5946035575013605
|
||||
"""
|
||||
def __init__(self, n_gram=4, smooth=False):
|
||||
|
|
|
@ -39,10 +39,13 @@ class ConfusionMatrix(Metric):
|
|||
|
||||
threshold (float): A threshold, which is used to compare with the input tensor. Default: 0.5.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([1, 0, 1, 0]))
|
||||
>>> y = Tensor(np.array([1, 0, 0, 1]))
|
||||
>>> metric = nn.ConfusionMatrix(num_classes=2, normalize=NO_NORM, threshold=0.5)
|
||||
>>> metric = nn.ConfusionMatrix(num_classes=2, normalize='no_norm', threshold=0.5)
|
||||
>>> metric.clear()
|
||||
>>> metric.update(x, y)
|
||||
>>> output = metric.eval()
|
||||
|
@ -165,9 +168,12 @@ class ConfusionMatrixMetric(Metric):
|
|||
calculation_method is True. Default: "mean". Choose from:
|
||||
["none", "mean", "sum", "mean_batch", "sum_batch", "mean_channel", "sum_channel"].
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> metric = ConfusionMatrixMetric(skip_channel=True, metric_name="tpr",
|
||||
>>> calculation_method=False, decrease="mean")
|
||||
... calculation_method=False, decrease="mean")
|
||||
>>> metric.clear()
|
||||
>>> x = Tensor(np.array([[[0], [1]], [[1], [0]]]))
|
||||
>>> y = Tensor(np.array([[[0], [1]], [[0], [1]]]))
|
||||
|
|
|
@ -29,17 +29,21 @@ class CosineSimilarity(Metric):
|
|||
|
||||
Return:
|
||||
A square matrix (input1, input1) with the similarity scores between all elements.
|
||||
If sum or mean are used, then returns (b, 1) with the reduced value for each row
|
||||
If sum or mean are used, then returns (b, 1) with the reduced value for each row.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Example:
|
||||
>>> test_data = np.random.randn(4, 8)
|
||||
>>> test_data = np.array([[1, 3, 4, 7], [2, 4, 2, 5], [3, 1, 5, 8]])
|
||||
>>> metric = CosineSimilarity()
|
||||
>>> metric.clear()
|
||||
>>> metric.update(test_data)
|
||||
>>> square_matrix = metric.eval()
|
||||
[[0. -0.14682831 0.19102288 -0.36204537]
|
||||
...
|
||||
]
|
||||
>>> print(square_matrix)
|
||||
[[0. 0.94025615 0.95162452]
|
||||
[0.94025615 0. 0.86146098]
|
||||
[0.95162452 0.86146098 0.]]
|
||||
"""
|
||||
def __init__(self, similarity='cosine', reduction='none', zero_diagonal=True):
|
||||
super().__init__()
|
||||
|
|
|
@ -32,6 +32,9 @@ class Dice(Metric):
|
|||
smooth (float): A term added to the denominator to improve numerical stability. Should be greater than 0.
|
||||
Default: 1e-5.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]))
|
||||
>>> y = Tensor(np.array([[0, 1], [1, 0], [0, 1]]))
|
||||
|
|
|
@ -86,10 +86,13 @@ class HausdorffDistance(Metric):
|
|||
here the bounding box is achieved by (y_pred | y) which represents the union set of two images.
|
||||
Default: True.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([[3, 0, 1], [1, 3, 0], [1, 0, 2]]))
|
||||
>>> y = Tensor(np.array([[0, 2, 1], [1, 2, 1], [0, 0, 1]]))
|
||||
>>> metric = nn.HausdorffDistance
|
||||
>>> metric = nn.HausdorffDistance()
|
||||
>>> metric.clear()
|
||||
>>> metric.update(x, y, 0)
|
||||
>>> mean_average_distance = metric.eval()
|
||||
|
|
|
@ -32,6 +32,9 @@ class MeanSurfaceDistance(Metric):
|
|||
if sets ``symmetric = True``, the average symmetric surface distance between these two inputs
|
||||
will be returned. Defaults: False.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([[3, 0, 1], [1, 3, 0], [1, 0, 2]]))
|
||||
>>> y = Tensor(np.array([[0, 2, 1], [1, 2, 1], [0, 0, 1]]))
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
# limitations under the License.
|
||||
# ============================================================================
|
||||
"""OcclusionSensitivity."""
|
||||
from collections.abc import Sequence
|
||||
import numpy as np
|
||||
from mindspore import nn
|
||||
from mindspore.common.tensor import Tensor
|
||||
|
@ -47,16 +46,19 @@ class OcclusionSensitivity(Metric):
|
|||
as the input image. If a bounding box is used, the output image will be cropped to this size.
|
||||
Default: None.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU``
|
||||
|
||||
Example:
|
||||
>>> class DenseNet(nn.Cell):
|
||||
>>> def init(self):
|
||||
>>> super(DenseNet, self).init()
|
||||
>>> w = np.array([[0.1, 0.8, 0.1, 0.1],[1, 1, 1, 1]]).astype(np.float32)
|
||||
>>> b = np.array([0.3, 0.6]).astype(np.float32)
|
||||
>>> self.dense = nn.Dense(4, 2, weight_init=Tensor(w), bias_init=Tensor(b))
|
||||
>>>
|
||||
>>> def construct(self, x):
|
||||
>>> return self.dense(x)
|
||||
... def init(self):
|
||||
... super(DenseNet, self).init()
|
||||
... w = np.array([[0.1, 0.8, 0.1, 0.1],[1, 1, 1, 1]]).astype(np.float32)
|
||||
... b = np.array([0.3, 0.6]).astype(np.float32)
|
||||
... self.dense = nn.Dense(4, 2, weight_init=Tensor(w), bias_init=Tensor(b))
|
||||
...
|
||||
... def construct(self, x):
|
||||
... return self.dense(x)
|
||||
>>>
|
||||
>>> model = DenseNet()
|
||||
>>> test_data = np.array([[0.1, 0.2, 0.3, 0.4]]).astype(np.float32)
|
||||
|
@ -65,6 +67,7 @@ class OcclusionSensitivity(Metric):
|
|||
>>> metric.clear()
|
||||
>>> metric.update(model, test_data, label)
|
||||
>>> score = metric.eval()
|
||||
>>> print(score)
|
||||
[0.29999995 0.6 1 0.9]
|
||||
"""
|
||||
def __init__(self, pad_val=0.0, margin=2, n_batch=128, b_box=None):
|
||||
|
|
|
@ -32,6 +32,9 @@ class Perplexity(Metric):
|
|||
ignore_label (int): Index of an invalid label to be ignored when counting. If set to `None`, it will include all
|
||||
entries. Default: -1.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([[0.2, 0.5], [0.3, 0.1], [0.9, 0.6]]))
|
||||
>>> y = Tensor(np.array([1, 0, 1]))
|
||||
|
@ -39,6 +42,7 @@ class Perplexity(Metric):
|
|||
>>> metric.clear()
|
||||
>>> metric.update(x, y)
|
||||
>>> perplexity = metric.eval()
|
||||
>>> print(perplexity)
|
||||
2.231443166940565
|
||||
"""
|
||||
|
||||
|
|
|
@ -30,6 +30,9 @@ class ROC(Metric):
|
|||
to 1. For multiclass problems, this argument should not be set, as it is iteratively changed in the
|
||||
range [0,num_classes-1]. Default: None.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> # 1) binary classification example
|
||||
>>> x = Tensor(np.array([3, 1, 4, 2]))
|
||||
|
@ -38,8 +41,11 @@ class ROC(Metric):
|
|||
>>> metric.clear()
|
||||
>>> metric.update(x, y)
|
||||
>>> fpr, tpr, thresholds = metric.eval()
|
||||
>>> print(fpr)
|
||||
[0., 0., 0.33333333, 0.6666667, 1.]
|
||||
>>> print(tpr)
|
||||
[0., 1, 1., 1., 1.]
|
||||
>>> print(thresholds)
|
||||
[5, 4, 3, 2, 1]
|
||||
>>>
|
||||
>>> # 2) multiclass classification example
|
||||
|
@ -50,9 +56,12 @@ class ROC(Metric):
|
|||
>>> metric.clear()
|
||||
>>> metric.update(x, y)
|
||||
>>> fpr, tpr, thresholds = metric.eval()
|
||||
>>> print(fpr)
|
||||
[array([0., 0., 0.33333333, 0.66666667, 1.]), array([0., 0.33333333, 0.33333333, 1.]),
|
||||
array([0., 0.33333333, 1.]), array([0., 0., 1.])]
|
||||
>>> print(tpr)
|
||||
[array([0., 1., 1., 1., 1.]), array([0., 0., 1., 1.]), array([0., 1., 1.]), array([0., 1., 1.])]
|
||||
>>> print(thresholds)
|
||||
[array([1.28, 0.28, 0.2, 0.1, 0.05]), array([1.55, 0.55, 0.2, 0.05]), array([1.15, 0.15, 0.05]),
|
||||
array([1.75, 0.75, 0.05])]
|
||||
"""
|
||||
|
|
|
@ -33,6 +33,9 @@ class RootMeanSquareDistance(Metric):
|
|||
if sets ``symmetric = True``, the average symmetric surface distance between these two inputs
|
||||
will be returned. Defaults: False.
|
||||
|
||||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([[3, 0, 1], [1, 3, 0], [1, 0, 2]]))
|
||||
>>> y = Tensor(np.array([[0, 2, 1], [1, 2, 1], [0, 0, 1]]))
|
||||
|
|
Loading…
Reference in New Issue