From 36901f77a846d8ed71924227b3b40ae1d0d25d2c Mon Sep 17 00:00:00 2001 From: tacyi139 Date: Thu, 30 Dec 2021 21:52:29 +0800 Subject: [PATCH] correct code examples etc. --- .../api/api_python/nn/mindspore.nn.HSwish.rst | 2 +- .../python/mindspore/nn/layer/activation.py | 2 +- .../python/mindspore/nn/sparse/sparse.py | 6 +++--- .../python/mindspore/ops/op_info_register.py | 2 +- .../mindspore/ops/operations/math_ops.py | 20 +++++++++---------- .../mindspore/ops/operations/sparse_ops.py | 12 +++++------ 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/docs/api/api_python/nn/mindspore.nn.HSwish.rst b/docs/api/api_python/nn/mindspore.nn.HSwish.rst index 96d69aea69a..9c10fbb075c 100644 --- a/docs/api/api_python/nn/mindspore.nn.HSwish.rst +++ b/docs/api/api_python/nn/mindspore.nn.HSwish.rst @@ -36,5 +36,5 @@ mindspore.nn.HSwish >>> hswish = nn.HSwish() >>> result = hswish(x) >>> print(result) - [-0.3333 -0.3333 0 1.666 0.6665] + [-0.3333 -0.3333 0. 1.667 0.6665] \ No newline at end of file diff --git a/mindspore/python/mindspore/nn/layer/activation.py b/mindspore/python/mindspore/nn/layer/activation.py index 7b394ba38db..7c04a5f8953 100644 --- a/mindspore/python/mindspore/nn/layer/activation.py +++ b/mindspore/python/mindspore/nn/layer/activation.py @@ -735,7 +735,7 @@ class HSwish(Cell): >>> hswish = nn.HSwish() >>> result = hswish(x) >>> print(result) - [-0.3333 -0.3333 0 1.666 0.6665] + [-0.3333 -0.3333 0. 1.667 0.6665] """ def __init__(self): diff --git a/mindspore/python/mindspore/nn/sparse/sparse.py b/mindspore/python/mindspore/nn/sparse/sparse.py index bb3f55ce64e..793e384c138 100644 --- a/mindspore/python/mindspore/nn/sparse/sparse.py +++ b/mindspore/python/mindspore/nn/sparse/sparse.py @@ -114,9 +114,9 @@ class SparseTensorDenseMatmul(Cell): >>> sparse_dense_matmul = nn.SparseTensorDenseMatmul() >>> out = sparse_dense_matmul(indices, values, sparse_shape, dense) >>> print(out) - [[2 2] - [6 6] - [0 0]] + [[2. 2.] + [6. 6.] + [0. 0.]] """ def __init__(self, adjoint_st=False, adjoint_dt=False): diff --git a/mindspore/python/mindspore/ops/op_info_register.py b/mindspore/python/mindspore/ops/op_info_register.py index de8cbbea267..c15b57119c5 100644 --- a/mindspore/python/mindspore/ops/op_info_register.py +++ b/mindspore/python/mindspore/ops/op_info_register.py @@ -676,7 +676,7 @@ class CustomRegOp(RegOp): Class used for generating the registration information for the `func` parameter of :class:`mindspore.ops.Custom`. Args: - op_name (str): kernel name. No need to set this value as `Custom` operator will generate a unique name + op_name (str): kernel name. No need to set this value as `Custom`, operator will generate a unique name automatically. Default: "Custom". Examples: diff --git a/mindspore/python/mindspore/ops/operations/math_ops.py b/mindspore/python/mindspore/ops/operations/math_ops.py index 3023a10225a..2e5efe9d41b 100644 --- a/mindspore/python/mindspore/ops/operations/math_ops.py +++ b/mindspore/python/mindspore/ops/operations/math_ops.py @@ -748,8 +748,8 @@ class ReduceSum(_Reduce): class ReduceAll(_Reduce): """ - Reduces a dimension of a tensor by the "logicalAND" of all elements in the dimension, by Default. And also can - reduces a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by + Reduces a dimension of a tensor by the "logicalAND" of all elements in the dimension, by default. And also can + reduce a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by controlling `keep_dims`. Args: @@ -807,8 +807,8 @@ class ReduceAll(_Reduce): class ReduceAny(_Reduce): """ - Reduces a dimension of a tensor by the "logical OR" of all elements in the dimension, by Default. And also can - reduces a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by + Reduces a dimension of a tensor by the "logical OR" of all elements in the dimension, by default. And also can + reduce a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by controlling `keep_dims`. Args: @@ -866,8 +866,8 @@ class ReduceAny(_Reduce): class ReduceMax(_Reduce): """ - Reduces a dimension of a tensor by the maximum value in this dimension, by Default. And also can - reduces a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by + Reduces a dimension of a tensor by the maximum value in this dimension, by default. And also can + reduce a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by controlling `keep_dims`. Args: @@ -953,8 +953,8 @@ class ReduceMax(_Reduce): class ReduceMin(_Reduce): """ - Reduces a dimension of a tensor by the minimum value in the dimension, by Default. And also can - reduces a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by + Reduces a dimension of a tensor by the minimum value in the dimension, by default. And also can + reduce a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by controlling `keep_dims`. Args: @@ -1031,8 +1031,8 @@ class ReduceMin(_Reduce): class ReduceProd(_Reduce): """ - Reduces a dimension of a tensor by multiplying all elements in the dimension, by Default. And also can - reduces a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by + Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can + reduce a dimension of `x` along the axis. Determine whether the dimensions of the output and input are the same by controlling `keep_dims`. Args: diff --git a/mindspore/python/mindspore/ops/operations/sparse_ops.py b/mindspore/python/mindspore/ops/operations/sparse_ops.py index 756402ab07d..f32a201419a 100644 --- a/mindspore/python/mindspore/ops/operations/sparse_ops.py +++ b/mindspore/python/mindspore/ops/operations/sparse_ops.py @@ -52,9 +52,9 @@ class SparseToDense(PrimitiveWithInfer): >>> sparse_to_dense = ops.SparseToDense() >>> out = sparse_to_dense(indices, values, sparse_shape) >>> print(out) - [[0 1 0 0] - [0 0 2 0] - [0 0 0 0]] + [[0. 1. 0. 0.] + [0. 0. 2. 0.] + [0. 0. 0. 0.]] """ @prim_attr_register @@ -133,9 +133,9 @@ class SparseTensorDenseMatmul(PrimitiveWithInfer): >>> sparse_dense_matmul = ops.SparseTensorDenseMatmul() >>> out = sparse_dense_matmul(indices, values, sparse_shape, dense) >>> print(out) - [[2 2] - [6 6] - [0 0]] + [[2. 2.] + [6. 6.] + [0. 0.]] """ @prim_attr_register