!9370 Fixing some tiny faults in notes of classes' examples

From: @zhangz0911gm
Reviewed-by: @liangchenghui,@c_34
Signed-off-by: @liangchenghui
This commit is contained in:
mindspore-ci-bot 2020-12-02 18:44:09 +08:00 committed by Gitee
commit cffe2c94fe
14 changed files with 32 additions and 30 deletions

View File

@ -366,7 +366,8 @@ class ReLU(GraphKernel):
>>> relu = ReLU()
>>> result = relu(input_x)
>>> print(result)
[[0, 4.0, 0.0], [2.0, 0.0, 9.0]]
[[0. 4. 0.]
[2. 0. 9.]]
"""
def __init__(self):
super(ReLU, self).__init__()
@ -685,7 +686,7 @@ class LogSoftmax(GraphKernel):
>>> log_softmax = LogSoftmax()
>>> result = log_softmax(input_x)
>>> print(result)
[-4.4519143, -3.4519143, -2.4519143, -1.4519144, -0.4519144]
[-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]
"""
def __init__(self, axis=-1):
@ -743,7 +744,7 @@ class Tanh(GraphKernel):
>>> tanh = Tanh()
>>> result = tanh(input_x)
>>> print(result)
[0.7615941, 0.9640276, 0.9950548, 0.9993293, 0.99990916]
[0.7615941 0.9640276 0.9950548 0.9993293 0.99990916]
"""
def __init__(self):
super(Tanh, self).__init__()

View File

@ -264,7 +264,7 @@ class LeakyReLU(Cell):
>>> output = leaky_relu(input_x)
>>> print(output)
[[-0.2 4. -1.6]
[ 2 -1. 9. ]]
[ 2. -1. 9. ]]
"""
def __init__(self, alpha=0.2):

View File

@ -748,8 +748,8 @@ class Triu(Cell):
>>> triu = nn.Triu()
>>> result = triu(x)
>>> print(result)
[[1 2]
[0 4]]
[[1 0]
[3 4]]
"""
def __init__(self):
super(Triu, self).__init__()
@ -796,8 +796,8 @@ class MatrixDiag(Cell):
>>> matrix_diag = nn.MatrixDiag()
>>> output = matrix_diag(x)
>>> print(output)
[[1. 0.]
[0. -1.]]
[[ 1. 0.]
[ 0. -1.]]
"""
def __init__(self):
super(MatrixDiag, self).__init__()

View File

@ -398,7 +398,7 @@ class PSNR(Cell):
>>> img2 = Tensor(np.random.random((1,3,16,16)))
>>> output = net(img1, img2)
>>> print(output)
[7.7229595]
[7.915369]
"""
def __init__(self, max_val=1.0):
super(PSNR, self).__init__()

View File

@ -182,7 +182,7 @@ class MaxPool1d(_PoolNd):
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4]), mindspore.float32)
>>> output = max_pool(x)
>>> result = output.shape
>>> printI(result)
>>> print(result)
(1, 2, 2)
"""

View File

@ -148,7 +148,7 @@ class NaturalExpDecayLR(LearningRateSchedule):
>>> natural_exp_decay_lr = NaturalExpDecayLR(learning_rate, decay_rate, decay_steps, True)
>>> result = natural_exp_decay_lr(global_step)
>>> print(result)
0.016529894
0.1
"""
def __init__(self, learning_rate, decay_rate, decay_steps, is_stair=False):
super(NaturalExpDecayLR, self).__init__()

View File

@ -599,7 +599,7 @@ class CosineEmbeddingLoss(_Loss):
>>> cosine_embedding_loss = nn.CosineEmbeddingLoss()
>>> output = cosine_embedding_loss(x1, x2, y)
>>> print(output)
[0.0003426075]
0.0003426075
"""
def __init__(self, margin=0.0, reduction="mean"):
super(CosineEmbeddingLoss, self).__init__(reduction)

View File

@ -42,7 +42,7 @@ class Accuracy(EvaluationBase):
>>> metric.update(x, y)
>>> accuracy = metric.eval()
>>> print(accuracy)
0.66666666
0.6666666666666666
"""
def __init__(self, eval_type='classification'):
super(Accuracy, self).__init__(eval_type)

View File

@ -51,8 +51,8 @@ def normal(shape, mean, stddev, seed=None):
>>> stddev = Tensor(1.0, mstype.float32)
>>> output = C.normal(shape, mean, stddev, seed=5)
>>> print(output)
[[1.0996436 0.44371283 0.11127508 -0.48055804]
[0.31989878 -1.0644426 1.5076542 1.2290289 ]]
[[ 1.0996436 0.44371283 0.11127508 -0.48055804]
[ 0.31989878 -1.0644426 1.5076542 1.2290289 ]]
"""
mean_dtype = F.dtype(mean)
stddev_dtype = F.dtype(stddev)

View File

@ -63,7 +63,7 @@ class ScalarSummary(PrimitiveWithInfer):
... self.summary(name, x)
... x = self.add(x, y)
... return x
...
...
"""
@prim_attr_register

View File

@ -613,7 +613,7 @@ class ReduceProd(_Reduce):
>>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
>>> op = ops.ReduceProd(keep_dims=True)
>>> output = op(input_x, 1)
>>> reuslt = output.shape
>>> result = output.shape
>>> print(result)
(3, 1, 5, 6)
"""
@ -2513,8 +2513,9 @@ class Equal(_LogicBinaryOp):
Examples:
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
>>> equal = ops.Equal()
>>> equal(input_x, 2.0)
[False, True, False]
>>> output = equal(input_x, 2.0)
>>> print(output)
Tensor(shape=[3], dtype=Bool, value= [False, True, False])
>>>
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
>>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)

View File

@ -6124,7 +6124,7 @@ class CTCGreedyDecoder(PrimitiveWithInfer):
containing sequence log-probability, has the same type as `inputs`.
Examples:
>>>class CTCGreedyDecoderNet(nn.Cell):
>>> class CTCGreedyDecoderNet(nn.Cell):
... def __init__(self):
... super(CTCGreedyDecoderNet, self).__init__()
... self.ctc_greedy_decoder = P.CTCGreedyDecoder()

View File

@ -318,9 +318,9 @@ class IOU(PrimitiveWithInfer):
>>> gt_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]), mindspore.float16)
>>> output = iou(anchor_boxes, gt_boxes)
>>> print(output)
[[65000. 65500. -0.]
[65000. 65500. -0.]
[ 0. 0. 0.]]
[[65500. 65500. 65500.]
[ -0. -0. -0.]
[ -0. -0. -0.]]
"""
@ -524,7 +524,7 @@ class ConfusionMatrix(PrimitiveWithInfer):
>>> predictions = Tensor([1, 2, 1, 3], mindspore.int32)
>>> output = confusion_matrix(labels, predictions)
>>> print(output)
[[0 1 0 0
[[0 1 0 0]
[0 1 1 0]
[0 0 0 0]
[0 0 0 1]]

View File

@ -420,7 +420,7 @@ class RandomChoiceWithMask(PrimitiveWithInfer):
>>> print(result)
(256, 2)
>>> result = output_mask.shape
>>> print(reuslt)
>>> print(result)
(256,)
"""
@ -474,16 +474,16 @@ class RandomCategorical(PrimitiveWithInfer):
>>> net = Net(8)
>>> output = net(Tensor(x))
>>> print(output)
[[0 2 1 3 4 2 0 2]
[[0 2 0 3 4 2 0 2]
[0 2 1 3 4 2 0 2]
[0 2 0 3 4 2 0 2]
[0 2 1 3 4 2 0 2]
[0 2 1 3 4 2 0 2]
[0 2 1 3 4 2 0 2]
[0 2 0 3 4 2 0 2]
[0 2 0 3 4 2 0 2]
[0 2 1 3 4 3 0 3]
[0 2 1 3 4 2 0 2]
[0 2 1 3 4 2 0 2]
[0 2 1 3 4 2 0 2]
[0 2 0 3 4 2 0 2]]
[0 2 1 3 4 2 0 2]]
"""
@prim_attr_register