!45890 fix examples issues
Merge pull request !45890 from luojianing/code_docs_master
This commit is contained in:
commit
dc3660426c
|
@ -364,8 +364,8 @@ def dot(x1, x2):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> import mindspore
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> input_x1 = Tensor(np.ones(shape=[2, 3]), mindspore.float32)
|
||||
>>> input_x2 = Tensor(np.ones(shape=[1, 3, 2]), mindspore.float32)
|
||||
>>> output = ops.dot(input_x1, input_x2)
|
||||
|
|
|
@ -198,7 +198,7 @@ def crop_and_resize(image, boxes, box_indices, crop_size, method="bilinear", ext
|
|||
>>> boxes = np.random.uniform(size=[NUM_BOXES, 4]).astype(np.float32)
|
||||
>>> box_indices = np.random.uniform(size=[NUM_BOXES], low=0, high=BATCH_SIZE).astype(np.int32)
|
||||
>>> crop_size = (24, 24)
|
||||
>>> output = F.crop_and_resize(Tensor(image), Tensor(boxes), Tensor(box_indices), crop_size)
|
||||
>>> output = ops.crop_and_resize(Tensor(image), Tensor(boxes), Tensor(box_indices), crop_size)
|
||||
>>> print(output.shape)
|
||||
(5, 24, 24, 3)
|
||||
"""
|
||||
|
|
|
@ -3141,7 +3141,6 @@ def approximate_equal(x, y, tolerance=1e-5):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import mindspore.ops as ops
|
||||
>>> tol = 1.5
|
||||
>>> x = Tensor(np.array([1, 2, 3]), mstype.float32)
|
||||
>>> y = Tensor(np.array([2, 4, 6]), mstype.float32)
|
||||
|
|
|
@ -3567,7 +3567,7 @@ def hinge_embedding_loss(inputs, targets, margin=1.0, reduction='mean'):
|
|||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examplse:
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.common.dtype as mstype
|
||||
>>> import mindspore.ops as ops
|
||||
|
|
|
@ -45,7 +45,6 @@ def csr_cos(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -298,7 +297,6 @@ def coo_inv(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -341,7 +339,6 @@ def csr_relu(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -458,7 +455,6 @@ def coo_expm1(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -1732,7 +1728,6 @@ def coo_tanh(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
|
|
@ -1109,10 +1109,9 @@ class Padding(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.array_ops import Padding
|
||||
>>> x = Tensor(np.array([[8], [10]]), mindspore.float32)
|
||||
>>> pad_dim_size = 4
|
||||
>>> output = Padding(pad_dim_size)(x)
|
||||
>>> output = ops.Padding(pad_dim_size)(x)
|
||||
>>> print(output)
|
||||
[[ 8. 0. 0. 0.]
|
||||
[10. 0. 0. 0.]]
|
||||
|
@ -1285,7 +1284,6 @@ class MatrixDiagV3(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.array_ops import MatrixDiagV3
|
||||
>>> x = Tensor(np.array([[8, 9, 0],
|
||||
... [1, 2, 3],
|
||||
... [0, 4, 5]]), mindspore.float32)
|
||||
|
@ -1293,7 +1291,7 @@ class MatrixDiagV3(Primitive):
|
|||
>>> num_rows = Tensor(np.array(3), mindspore.int32)
|
||||
>>> num_cols = Tensor(np.array(3), mindspore.int32)
|
||||
>>> padding_value = Tensor(np.array(11), mindspore.float32)
|
||||
>>> matrix_diag_v3 = MatrixDiagV3(align='LEFT_RIGHT')
|
||||
>>> matrix_diag_v3 = ops.MatrixDiagV3(align='LEFT_RIGHT')
|
||||
>>> output = matrix_diag_v3(x, k, num_rows, num_cols, padding_value)
|
||||
>>> print(output)
|
||||
[[ 1. 8. 11.]
|
||||
|
@ -1326,7 +1324,7 @@ class MatrixDiagPartV3(Primitive):
|
|||
... [9, 8, 7, 6]]), mindspore.float32)
|
||||
>>> k =Tensor(np.array([1, 3]), mindspore.int32)
|
||||
>>> padding_value = Tensor(np.array(9), mindspore.float32)
|
||||
>>> matrix_diag_part_v3 = ops.operations.array_ops.MatrixDiagPartV3(align='RIGHT_LEFT')
|
||||
>>> matrix_diag_part_v3 = ops.MatrixDiagPartV3(align='RIGHT_LEFT')
|
||||
>>> output = matrix_diag_part_v3(x, k, padding_value)
|
||||
>>> print(output)
|
||||
[[9. 9. 4.]
|
||||
|
@ -3564,13 +3562,12 @@ class DiagPart(PrimitiveWithCheck):
|
|||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples
|
||||
Examples:
|
||||
>>> input_x = Tensor([[1, 0, 0, 0],
|
||||
... [0, 2, 0, 0],
|
||||
... [0, 0, 3, 0],
|
||||
... [0, 0, 0, 4]])
|
||||
>>> import mindspore.ops as P
|
||||
>>> diag_part = P.DiagPart()
|
||||
>>> diag_part = ops.DiagPart()
|
||||
>>> output = diag_part(input_x)
|
||||
>>> print(output)
|
||||
[1 2 3 4]
|
||||
|
@ -4461,7 +4458,6 @@ class ScatterMul(_ScatterOpDynamic):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import mindspore.ops as ops
|
||||
>>> input_x = Parameter(Tensor(np.array([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]), mstype.float32), name="x")
|
||||
>>> indices = Tensor(np.array([0, 1]), mstype.int32)
|
||||
>>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mstype.float32)
|
||||
|
@ -4771,11 +4767,10 @@ class ScatterNdMul(_ScatterNdOp):
|
|||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.array_ops import ScatterNdMul
|
||||
>>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
|
||||
>>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
|
||||
>>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
|
||||
>>> scatter_nd_mul = ScatterNdMul()
|
||||
>>> scatter_nd_mul = ops.ScatterNdMul()
|
||||
>>> output = scatter_nd_mul(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[ 1. 16. 18. 4. 35. 6. 7. 72.]
|
||||
|
@ -4783,7 +4778,7 @@ class ScatterNdMul(_ScatterNdOp):
|
|||
>>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
|
||||
>>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
|
||||
... [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)
|
||||
>>> scatter_nd_mul = ScatterNdMul()
|
||||
>>> scatter_nd_mul = ops.ScatterNdMul()
|
||||
>>> output = scatter_nd_mul(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[[[1 1 1 1]
|
||||
|
@ -4867,11 +4862,10 @@ class ScatterNdMax(_ScatterNdOp):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.array_ops import ScatterNdMax
|
||||
>>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
|
||||
>>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
|
||||
>>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
|
||||
>>> scatter_nd_max = ScatterNdMax()
|
||||
>>> scatter_nd_max = ops.ScatterNdMax()
|
||||
>>> output = scatter_nd_max(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[ 1. 8. 6. 4. 7. 6. 7. 9.]
|
||||
|
@ -4879,7 +4873,7 @@ class ScatterNdMax(_ScatterNdOp):
|
|||
>>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
|
||||
>>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
|
||||
... [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)
|
||||
>>> scatter_nd_max = ScatterNdMax()
|
||||
>>> scatter_nd_max = ops.ScatterNdMax()
|
||||
>>> output = scatter_nd_max(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[[[1 1 1 1]
|
||||
|
|
|
@ -78,9 +78,9 @@ class Svd(Primitive):
|
|||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor, set_context
|
||||
>>> from mindspore.ops.operations import linalg_ops as linalg
|
||||
>>> from mindspore import ops
|
||||
>>> set_context(device_target="CPU")
|
||||
>>> svd = linalg.Svd(full_matrices=True, compute_uv=True)
|
||||
>>> svd = ops.Svd(full_matrices=True, compute_uv=True)
|
||||
>>> a = Tensor(np.array([[1, 2], [-4, -5], [2, 1]]).astype(np.float32))
|
||||
>>> s, u, v = svd(a)
|
||||
>>> print(s)
|
||||
|
|
|
@ -6146,9 +6146,10 @@ class IsClose(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import mindspore
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.ops.operations.math_ops import IsClose
|
||||
>>> from mindspore.ops import IsClose
|
||||
>>> input = Tensor(np.array([1.3, 2.1, 3.2, 4.1, 5.1]), mindspore.float16)
|
||||
>>> other = Tensor(np.array([1.3, 3.3, 2.3, 3.1, 5.1]), mindspore.float16)
|
||||
>>> isclose = IsClose()
|
||||
|
@ -6468,7 +6469,7 @@ class Digamma(Primitive):
|
|||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([1.5, 0.5, 9]).astype(np.float16))
|
||||
>>> digamma = P.Digamma()
|
||||
>>> digamma = ops.Digamma()
|
||||
>>> output = digamma(x)
|
||||
>>> print(output)
|
||||
[ 0.0365 -1.964 2.14 ]
|
||||
|
|
|
@ -1430,7 +1430,7 @@ class DataFormatVecPermute(Primitive):
|
|||
>>> class Net(nn.Cell):
|
||||
... def __init__(self, src_format="NHWC", dst_format="NCHW"):
|
||||
... super().__init__()
|
||||
... self.op = P.nn_ops.DataFormatVecPermute(src_format, dst_format)
|
||||
... self.op = ops.DataFormatVecPermute(src_format, dst_format)
|
||||
... def construct(self, x):
|
||||
... return self.op(x)
|
||||
...
|
||||
|
@ -3097,9 +3097,9 @@ class L2Loss(Primitive):
|
|||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples
|
||||
Examples:
|
||||
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float16)
|
||||
>>> l2_loss = L2Loss()
|
||||
>>> l2_loss = ops.L2Loss()
|
||||
>>> output = l2_loss(input_x)
|
||||
>>> print(output)
|
||||
7.0
|
||||
|
@ -6976,8 +6976,7 @@ class Dropout2D(PrimitiveWithInfer):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.nn_ops import Dropout2D
|
||||
>>> dropout = Dropout2D(keep_prob=0.5)
|
||||
>>> dropout = ops.Dropout2D(keep_prob=0.5)
|
||||
>>> x = Tensor(np.ones([2, 1, 2, 3]), mindspore.float32)
|
||||
>>> output, mask = dropout(x)
|
||||
>>> print(output.shape)
|
||||
|
|
Loading…
Reference in New Issue