!9413 Fixing errors in classes' notes

From: @zhangz0911gm
Reviewed-by: @liangchenghui,@c_34,@c_34,@liangchenghui
Signed-off-by: @liangchenghui,@c_34
This commit is contained in:
mindspore-ci-bot 2020-12-07 10:36:31 +08:00 committed by Gitee
commit 29622602e0
11 changed files with 18 additions and 18 deletions

View File

@ -159,7 +159,7 @@ class ELU(Cell):
>>> elu = nn.ELU()
>>> result = elu(input_x)
>>> print(result)
[-0.63212055 -0.86466473 0 2 1]
[-0.63212055 -0.86466473 0. 2. 1.]
"""
def __init__(self, alpha=1.0):
@ -389,8 +389,8 @@ class FastGelu(Cell):
>>> fast_gelu = nn.FastGelu()
>>> output = fast_gelu(input_x)
>>> print(output)
[[-1.5420423e-01 3.9955849e+00 -9.7664278e-06]
[ 1.9356585e+00 -1.0070159e-03 8.9999981e+00]]
[[-1.5420423e-01 3.9955850e+00 -9.7664279e-06]
[ 1.9356586e+00 -1.0070159e-03 8.9999981e+00]]
"""
def __init__(self):

View File

@ -712,7 +712,7 @@ class LBeta(Cell):
>>> input_x = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
>>> input_y = Tensor(np.array([2.0, 3.0, 14.0, 15.0]).astype(np.float32))
>>> lbeta = nn.LBeta()
>>> output = lbeta(input_a, input_x)
>>> output = lbeta(input_y, input_x)
>>> print (output)
[-1.7917596 -4.094345 -12.000229 -14.754799]
"""

View File

@ -59,7 +59,7 @@ class GumbelCDF(Bijector):
>>> y = Tensor([0.1, 0.2, 0.3], dtype=mindspore.float32)
>>> ans1 = gumbel_cdf.forward(x)
>>> print(ans1)
[0.36787945 0.54523915 0.6922006 ]
[0.36787945 0.5452392 0.69220066]
>>> ans2 = gumbel_cdf.inverse(y)
>>> print(ans2)
[-0.66806495 0.04822993 0.62874645]

View File

@ -64,7 +64,7 @@ class ScalarAffine(Bijector):
0.0
>>> ans4 = scalaraffine.inverse_log_jacobian(value)
>>> print(ans4)
-0.0
0.0
...
"""

View File

@ -69,7 +69,7 @@ class Bernoulli(Distribution):
>>> # Evaluate `prob` with respect to distribution b.
>>> ans = b1.prob(value, probs_b)
>>> print(ans)
[0.2 0.5 0.4]
[0.2 0.7 0.4]
>>> # `probs` must be passed in during function calls.
>>> ans = b2.prob(value, probs_a)
>>> print(ans)

View File

@ -70,7 +70,7 @@ class Cauchy(Distribution):
>>> # by replacing 'prob' by the name of the function
>>> ans = cauchy1.prob(value)
>>> print(ans)
[0.06366198 0.07489645 0.07957747]
[0.06366198 0.07489645 0.07957748]
>>> # Evaluate with respect to distribution b.
>>> ans = cauchy1.prob(value, loc_b, scale_b)
>>> print(ans)

View File

@ -71,7 +71,7 @@ class Exponential(Distribution):
>>> # Evaluate with respect to distribution b.
>>> ans = e1.prob(value, rate_b)
>>> print(ans)
[0.16374613 0.18393973 0.12047768]
[0.16374615 0.18393973 0.12047768]
>>> # `rate` must be passed in during function calls.
>>> ans = e2.prob(value, rate_a)
>>> print(ans)

View File

@ -64,7 +64,7 @@ class Gumbel(TransformedDistribution):
>>> value = Tensor([1.0, 2.0, 3.0], dtype=mindspore.float32)
>>> ans = gumbel.prob(value)
>>> print(ans)
[0.07926048 0.08889321 0.09196986]
[0.07926048 0.08889319 0.09196986]
>>> # Functions `mean`, `mode`, sd`, `var`, and `entropy` do not take in any argument.
>>> ans = gumbel.mean()
>>> print(ans)

View File

@ -71,7 +71,7 @@ class Logistic(Distribution):
>>> # by replacing 'prob' by the name of the function
>>> ans = l1.prob(value)
>>> print(ans)
[0.05875093 0.06153353 0.0625 ]
[0.05875093 0.06153352 0.0625 ]
>>> # Evaluate with respect to distribution b.
>>> ans = l1.prob(value, loc_b, scale_b)
>>> print(ans)

View File

@ -71,10 +71,10 @@ def repeat_elements(x, rep, axis=0):
>>> x = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
>>> output = C.repeat_elements(x, rep = 2, axis = 0)
>>> print(output)
[[0, 1, 2],
[0, 1, 2],
[3, 4, 5],
[3, 4, 5]],
[[0 1 2]
[0 1 2]
[3 4 5]
[3 4 5]]
"""
const_utils.check_valid_type(F.dtype(x), mstype.number_type, 'input x')
rep = _check_positive_int(rep, "rep", "repeat_elements")

View File

@ -186,9 +186,9 @@ def tensor_dot(x1, x2, axes):
>>> input_x2 = Tensor(np.ones(shape=[3, 1, 2]), mindspore.float32)
>>> output = C.tensor_dot(input_x1, input_x2, ((0,1),(1,2)))
>>> print(output)
[[2,2,2],
[2,2,2],
[2,2,2]]
[[2. 2. 2]
[2. 2. 2]
[2. 2. 2]]
"""
shape_op = P.Shape()
reshape_op = P.Reshape()