diff --git a/mindspore/nn/layer/activation.py b/mindspore/nn/layer/activation.py index 81b5b8ac591..02c920160e1 100644 --- a/mindspore/nn/layer/activation.py +++ b/mindspore/nn/layer/activation.py @@ -159,7 +159,7 @@ class ELU(Cell): >>> elu = nn.ELU() >>> result = elu(input_x) >>> print(result) - [-0.63212055 -0.86466473 0 2 1] + [-0.63212055 -0.86466473 0. 2. 1.] """ def __init__(self, alpha=1.0): @@ -389,8 +389,8 @@ class FastGelu(Cell): >>> fast_gelu = nn.FastGelu() >>> output = fast_gelu(input_x) >>> print(output) - [[-1.5420423e-01 3.9955849e+00 -9.7664278e-06] - [ 1.9356585e+00 -1.0070159e-03 8.9999981e+00]] + [[-1.5420423e-01 3.9955850e+00 -9.7664279e-06] + [ 1.9356586e+00 -1.0070159e-03 8.9999981e+00]] """ def __init__(self): diff --git a/mindspore/nn/layer/math.py b/mindspore/nn/layer/math.py index 506d8390b58..9cbed5cc194 100644 --- a/mindspore/nn/layer/math.py +++ b/mindspore/nn/layer/math.py @@ -712,7 +712,7 @@ class LBeta(Cell): >>> input_x = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32)) >>> input_y = Tensor(np.array([2.0, 3.0, 14.0, 15.0]).astype(np.float32)) >>> lbeta = nn.LBeta() - >>> output = lbeta(input_a, input_x) + >>> output = lbeta(input_y, input_x) >>> print (output) [-1.7917596 -4.094345 -12.000229 -14.754799] """ diff --git a/mindspore/nn/probability/bijector/gumbel_cdf.py b/mindspore/nn/probability/bijector/gumbel_cdf.py index 18a4733a292..ce2373f9f5a 100644 --- a/mindspore/nn/probability/bijector/gumbel_cdf.py +++ b/mindspore/nn/probability/bijector/gumbel_cdf.py @@ -59,7 +59,7 @@ class GumbelCDF(Bijector): >>> y = Tensor([0.1, 0.2, 0.3], dtype=mindspore.float32) >>> ans1 = gumbel_cdf.forward(x) >>> print(ans1) - [0.36787945 0.54523915 0.6922006 ] + [0.36787945 0.5452392 0.69220066] >>> ans2 = gumbel_cdf.inverse(y) >>> print(ans2) [-0.66806495 0.04822993 0.62874645] diff --git a/mindspore/nn/probability/bijector/scalar_affine.py b/mindspore/nn/probability/bijector/scalar_affine.py index a54b3cd7bbb..b8c3feba6af 100644 --- a/mindspore/nn/probability/bijector/scalar_affine.py +++ b/mindspore/nn/probability/bijector/scalar_affine.py @@ -64,7 +64,7 @@ class ScalarAffine(Bijector): 0.0 >>> ans4 = scalaraffine.inverse_log_jacobian(value) >>> print(ans4) - -0.0 + 0.0 ... """ diff --git a/mindspore/nn/probability/distribution/bernoulli.py b/mindspore/nn/probability/distribution/bernoulli.py index efadce70583..d579dc23566 100644 --- a/mindspore/nn/probability/distribution/bernoulli.py +++ b/mindspore/nn/probability/distribution/bernoulli.py @@ -69,7 +69,7 @@ class Bernoulli(Distribution): >>> # Evaluate `prob` with respect to distribution b. >>> ans = b1.prob(value, probs_b) >>> print(ans) - [0.2 0.5 0.4] + [0.2 0.7 0.4] >>> # `probs` must be passed in during function calls. >>> ans = b2.prob(value, probs_a) >>> print(ans) diff --git a/mindspore/nn/probability/distribution/cauchy.py b/mindspore/nn/probability/distribution/cauchy.py index cc5536a8dff..41ed2eaa0b0 100644 --- a/mindspore/nn/probability/distribution/cauchy.py +++ b/mindspore/nn/probability/distribution/cauchy.py @@ -70,7 +70,7 @@ class Cauchy(Distribution): >>> # by replacing 'prob' by the name of the function >>> ans = cauchy1.prob(value) >>> print(ans) - [0.06366198 0.07489645 0.07957747] + [0.06366198 0.07489645 0.07957748] >>> # Evaluate with respect to distribution b. >>> ans = cauchy1.prob(value, loc_b, scale_b) >>> print(ans) diff --git a/mindspore/nn/probability/distribution/exponential.py b/mindspore/nn/probability/distribution/exponential.py index b72afbdfbcf..b71be7ff405 100644 --- a/mindspore/nn/probability/distribution/exponential.py +++ b/mindspore/nn/probability/distribution/exponential.py @@ -71,7 +71,7 @@ class Exponential(Distribution): >>> # Evaluate with respect to distribution b. >>> ans = e1.prob(value, rate_b) >>> print(ans) - [0.16374613 0.18393973 0.12047768] + [0.16374615 0.18393973 0.12047768] >>> # `rate` must be passed in during function calls. >>> ans = e2.prob(value, rate_a) >>> print(ans) diff --git a/mindspore/nn/probability/distribution/gumbel.py b/mindspore/nn/probability/distribution/gumbel.py index c23c6186a2b..83406c75699 100644 --- a/mindspore/nn/probability/distribution/gumbel.py +++ b/mindspore/nn/probability/distribution/gumbel.py @@ -64,7 +64,7 @@ class Gumbel(TransformedDistribution): >>> value = Tensor([1.0, 2.0, 3.0], dtype=mindspore.float32) >>> ans = gumbel.prob(value) >>> print(ans) - [0.07926048 0.08889321 0.09196986] + [0.07926048 0.08889319 0.09196986] >>> # Functions `mean`, `mode`, sd`, `var`, and `entropy` do not take in any argument. >>> ans = gumbel.mean() >>> print(ans) diff --git a/mindspore/nn/probability/distribution/logistic.py b/mindspore/nn/probability/distribution/logistic.py index 647a15b8944..2330cbe6be0 100644 --- a/mindspore/nn/probability/distribution/logistic.py +++ b/mindspore/nn/probability/distribution/logistic.py @@ -71,7 +71,7 @@ class Logistic(Distribution): >>> # by replacing 'prob' by the name of the function >>> ans = l1.prob(value) >>> print(ans) - [0.05875093 0.06153353 0.0625 ] + [0.05875093 0.06153352 0.0625 ] >>> # Evaluate with respect to distribution b. >>> ans = l1.prob(value, loc_b, scale_b) >>> print(ans) diff --git a/mindspore/ops/composite/array_ops.py b/mindspore/ops/composite/array_ops.py index 20421e15d97..bff967de20e 100644 --- a/mindspore/ops/composite/array_ops.py +++ b/mindspore/ops/composite/array_ops.py @@ -71,10 +71,10 @@ def repeat_elements(x, rep, axis=0): >>> x = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32) >>> output = C.repeat_elements(x, rep = 2, axis = 0) >>> print(output) - [[0, 1, 2], - [0, 1, 2], - [3, 4, 5], - [3, 4, 5]], + [[0 1 2] + [0 1 2] + [3 4 5] + [3 4 5]] """ const_utils.check_valid_type(F.dtype(x), mstype.number_type, 'input x') rep = _check_positive_int(rep, "rep", "repeat_elements") diff --git a/mindspore/ops/composite/math_ops.py b/mindspore/ops/composite/math_ops.py index 03174802448..ea6fe0e8944 100644 --- a/mindspore/ops/composite/math_ops.py +++ b/mindspore/ops/composite/math_ops.py @@ -186,9 +186,9 @@ def tensor_dot(x1, x2, axes): >>> input_x2 = Tensor(np.ones(shape=[3, 1, 2]), mindspore.float32) >>> output = C.tensor_dot(input_x1, input_x2, ((0,1),(1,2))) >>> print(output) - [[2,2,2], - [2,2,2], - [2,2,2]] + [[2. 2. 2] + [2. 2. 2] + [2. 2. 2]] """ shape_op = P.Shape() reshape_op = P.Reshape()