!46144 fix examples and docs issues
Merge pull request !46144 from luojianing/code_docs_r2.0.0-alpha
This commit is contained in:
commit
18ab31a4f4
|
@ -806,11 +806,12 @@ class ParameterTuple(tuple):
|
|||
|
||||
init (Union[Tensor, str, numbers.Number]): Clone the shape and dtype of Parameters in ParameterTuple and
|
||||
set data according to `init`. Default: 'same'.
|
||||
If `init` is a `Tensor` , set the new Parameter data to the input Tensor.
|
||||
If `init` is `numbers.Number` , set the new Parameter data to the input number.
|
||||
If `init` is a `str`, data will be seted according to the initialization method of the same name in
|
||||
the `Initializer`.
|
||||
If `init` is 'same', the new Parameter has the same value with the original Parameter.
|
||||
|
||||
- If `init` is a `Tensor` , set the new Parameter data to the input Tensor.
|
||||
- If `init` is `numbers.Number` , set the new Parameter data to the input number.
|
||||
- If `init` is a `str`, data will be set according to the initialization method of the same name in
|
||||
the `Initializer`.
|
||||
- If `init` is 'same', the new Parameter has the same value with the original Parameter.
|
||||
|
||||
|
||||
Returns:
|
||||
|
|
|
@ -74,11 +74,11 @@ def repeat_interleave(x, repeats, dim=None):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> import mindspore
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> import numpy as np
|
||||
>>> x = Tensor(np.array([[0, 1, 2], [3, 4, 5]]), mindspore.int32)
|
||||
>>> output = x.repeat_interleave(repeats=2, dims=0)
|
||||
>>> output = ops.repeat_interleave(repeats=2, dims=0)
|
||||
>>> print(output)
|
||||
[[0 1 2]
|
||||
[0 1 2]
|
||||
|
|
|
@ -210,8 +210,6 @@ class GradOperation(GradOperation_):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ParameterTuple
|
||||
>>> import mindspore.ops as ops
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self):
|
||||
... super(Net, self).__init__()
|
||||
|
|
|
@ -364,8 +364,8 @@ def dot(x1, x2):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> import mindspore
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> input_x1 = Tensor(np.ones(shape=[2, 3]), mindspore.float32)
|
||||
>>> input_x2 = Tensor(np.ones(shape=[1, 3, 2]), mindspore.float32)
|
||||
>>> output = ops.dot(input_x1, input_x2)
|
||||
|
|
|
@ -52,8 +52,9 @@ def normal(shape, mean, stddev, seed=None):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> import mindspore
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> shape = (3, 1, 2)
|
||||
>>> mean = Tensor(np.array([[3, 4], [5, 6]]), mindspore.float32)
|
||||
>>> stddev = Tensor(1.0, mindspore.float32)
|
||||
|
@ -227,8 +228,9 @@ def gamma(shape, alpha, beta, seed=None):
|
|||
``Ascend``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> import mindspore
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> # case 1: alpha_shape is (2, 2)
|
||||
>>> shape = (3, 1, 2)
|
||||
>>> alpha = Tensor(np.array([[3, 4], [5, 6]]), mindspore.float32)
|
||||
|
@ -362,8 +364,9 @@ def multinomial(inputs, num_sample, replacement=True, seed=None):
|
|||
``GPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> import mindspore
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> from mindspore import dtype as mstype
|
||||
>>> # case 1: The output is random, and the length of the output is the same as num_sample.
|
||||
>>> x = Tensor([0, 9, 4, 0], mindspore.float32)
|
||||
>>> output = ops.multinomial(x, 2)
|
||||
|
|
|
@ -189,7 +189,7 @@ def arange(start=0, end=None, step=1, *, dtype=None):
|
|||
|
||||
Examples:
|
||||
>>> import mindspore as ms
|
||||
>>> import mindspore.ops as ops
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> output = ops.arange(1, 6)
|
||||
>>> print(output)
|
||||
>>> print(output.dtype)
|
||||
|
@ -3084,7 +3084,6 @@ def tensor_scatter_max(input_x, indices, updates):
|
|||
Returns:
|
||||
Tensor, has the same shape and type as `input_x`.
|
||||
|
||||
Tensor, has the same shape and type as `input_x`.
|
||||
Raises:
|
||||
TypeError: If dtype of `indices` is neither int32 nor int64.
|
||||
ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
|
||||
|
@ -3101,12 +3100,11 @@ def tensor_scatter_max(input_x, indices, updates):
|
|||
>>> # 2, And input_x[0, 0] = -0.1
|
||||
>>> # 3, So input_x[indices] = [-0.1, -0.1]
|
||||
>>> # 4, Satisfy the above formula: input_x[indices].shape=(2) == updates.shape=(2)
|
||||
>>> op = ops.TensorScatterMax()
|
||||
>>> output = ops.tensor_scatter_max(input_x, indices, updates)
|
||||
>>> # 5, Perform the max operation for the first time:
|
||||
>>> # first_input_x = Max(input_x[0][0], updates[0]) = [[1.0, 0.3, 3.6], [0.4, 0.5, -3.2]]
|
||||
>>> # 6, Perform the max operation for the second time:
|
||||
>>> # second_input_x = Max(input_x[0][0], updates[1]) = [[2.2, 0.3, 3.6], [0.4, 0.5, -3.2]]
|
||||
>>> output = op(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[[ 2.2 0.3 3.6]
|
||||
[ 0.4 0.5 -3.2]]
|
||||
|
@ -3216,7 +3214,6 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops
|
||||
>>> input_x = Parameter(Tensor(np.array([[1, 2, 3, 4, 5]]), mindspore.float32), name="x")
|
||||
>>> indices = Tensor(np.array([[2, 4]]), mindspore.int32)
|
||||
>>> updates = Tensor(np.array([[8, 8]]), mindspore.float32)
|
||||
|
@ -4004,6 +4001,7 @@ def index_fill(x, dim, index, value):
|
|||
|
||||
Examples:
|
||||
>>> import mindspore
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.ops as ops
|
||||
>>> from mindspore import Tensor
|
||||
>>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32))
|
||||
|
|
|
@ -198,7 +198,7 @@ def crop_and_resize(image, boxes, box_indices, crop_size, method="bilinear", ext
|
|||
>>> boxes = np.random.uniform(size=[NUM_BOXES, 4]).astype(np.float32)
|
||||
>>> box_indices = np.random.uniform(size=[NUM_BOXES], low=0, high=BATCH_SIZE).astype(np.int32)
|
||||
>>> crop_size = (24, 24)
|
||||
>>> output = F.crop_and_resize(Tensor(image), Tensor(boxes), Tensor(box_indices), crop_size)
|
||||
>>> output = ops.crop_and_resize(Tensor(image), Tensor(boxes), Tensor(box_indices), crop_size)
|
||||
>>> print(output.shape)
|
||||
(5, 24, 24, 3)
|
||||
"""
|
||||
|
|
|
@ -116,7 +116,6 @@ def pinv(x, *, atol=None, rtol=None, hermitian=False):
|
|||
``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import mindspore.ops as ops
|
||||
>>> x = Tensor([[2., 1.], [1., 2.]], mindspore.float32)
|
||||
>>> output = ops.pinv(x)
|
||||
>>> print(output)
|
||||
|
|
|
@ -727,7 +727,7 @@ def subtract(x, other, *, alpha=1):
|
|||
Examples:
|
||||
>>> x = Tensor(np.array([4, 5, 6]), mindspore.float32)
|
||||
>>> y = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
||||
>>> z = subtract(x, y, alpha=1)
|
||||
>>> z = ops.subtract(x, y, alpha=1)
|
||||
>>> print(z)
|
||||
[3. 3. 3.]
|
||||
"""
|
||||
|
@ -3141,7 +3141,6 @@ def approximate_equal(x, y, tolerance=1e-5):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import mindspore.ops as ops
|
||||
>>> tol = 1.5
|
||||
>>> x = Tensor(np.array([1, 2, 3]), mstype.float32)
|
||||
>>> y = Tensor(np.array([2, 4, 6]), mstype.float32)
|
||||
|
@ -3608,6 +3607,7 @@ def heaviside(x, values):
|
|||
|
||||
Supported Platforms:
|
||||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([-1.5, 0., 2.]))
|
||||
>>> values = Tensor(np.array([0.5]))
|
||||
|
@ -4105,7 +4105,7 @@ def addr(x, vec1, vec2, beta=1, alpha=1):
|
|||
>>> x = Tensor(np.array([[2., 2.], [3., 2.], [3., 4.]], np.float32))
|
||||
>>> vec1 = Tensor(np.array([2., 3., 2.], np.float32))
|
||||
>>> vec2 = Tensor(np.array([3, 4], np.float32))
|
||||
>>> output = addr(x, vec1, vec2)
|
||||
>>> output = ops.addr(x, vec1, vec2)
|
||||
>>> print(output)
|
||||
[[ 8. 10.]
|
||||
[12. 14.]
|
||||
|
@ -4474,8 +4474,7 @@ def deg2rad(x):
|
|||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([[90.0, -90.0], [180.0, -180.0], [270.0, -270.0]]).astype(np.float32))
|
||||
>>> op = nn.Deg2Rad()
|
||||
>>> output = op(x)
|
||||
>>> output = ops.deg2Rad(x)
|
||||
>>> print(output)
|
||||
[[ 1.5707964 -1.5707964]
|
||||
[ 3.1415927 -3.1415927]
|
||||
|
@ -4515,7 +4514,7 @@ def rad2deg(x):
|
|||
>>> from mindspore import Tensor
|
||||
>>> import mindspore.ops as ops
|
||||
>>> x = Tensor([[6.283, -3.142],[1.570, -6.283],[3.142, -1.570]], mindspore.float32)
|
||||
>>> output = rad2deg(x)
|
||||
>>> output = ops.rad2deg(x)
|
||||
>>> print(output)
|
||||
[[ 359.98935 -180.02333]
|
||||
[ 89.95438 -359.98935]
|
||||
|
@ -5825,8 +5824,6 @@ def matmul(x1, x2):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> import mindspore
|
||||
>>> # case 1 : Reasonable application of broadcast mechanism
|
||||
>>> x1 = Tensor(np.arange(2*3*4).reshape(2, 3, 4), mindspore.float32)
|
||||
>>> x2 = Tensor(np.arange(4*5).reshape(4, 5), mindspore.float32)
|
||||
|
|
|
@ -1086,9 +1086,8 @@ def dropout(x, p=0.5, seed0=0, seed1=0):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops import dropout
|
||||
>>> x = Tensor(((20, 16), (50, 50)), mindspore.float32)
|
||||
>>> output, mask = dropout(x, p=0.5)
|
||||
>>> output, mask = ops.dropout(x, p=0.5)
|
||||
>>> print(output.shape)
|
||||
(2, 2)
|
||||
"""
|
||||
|
@ -1176,7 +1175,7 @@ def dropout1d(x, p=0.5, training=True):
|
|||
|
||||
Examples:
|
||||
>>> input_x = Tensor(np.random.randn(4, 3), mindspore.float32)
|
||||
>>> output = dropout1d(input_x, 0.5)
|
||||
>>> output = ops.dropout1d(input_x, 0.5)
|
||||
>>> print(output.shape)
|
||||
(4, 3)
|
||||
"""
|
||||
|
@ -1239,7 +1238,7 @@ def dropout2d(x, p=0.5):
|
|||
|
||||
Examples:
|
||||
>>> input_x = Tensor(np.ones([2, 1, 2, 3]), mindspore.float32)
|
||||
>>> output, mask = dropout2d(input_x, 0.5)
|
||||
>>> output, mask = ops.dropout2d(input_x, 0.5)
|
||||
>>> print(output.shape)
|
||||
(2, 1, 2, 3)
|
||||
"""
|
||||
|
@ -1285,7 +1284,7 @@ def dropout3d(x, p=0.5):
|
|||
|
||||
Examples:
|
||||
>>> input_x = Tensor(np.ones([2, 1, 2, 1, 2]), mindspore.float32)
|
||||
>>> output, mask = dropout3d(input_x, 0.5)
|
||||
>>> output, mask = ops.dropout3d(input_x, 0.5)
|
||||
>>> print(output.shape)
|
||||
(2, 1, 2, 1, 2)
|
||||
"""
|
||||
|
@ -2972,7 +2971,6 @@ def smooth_l1_loss(logits, labels, beta=1.0, reduction='none'):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops
|
||||
>>> logits = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
||||
>>> labels = Tensor(np.array([1, 2, 2]), mindspore.float32)
|
||||
>>> output = ops.smooth_l1_loss(logits, labels)
|
||||
|
@ -3567,7 +3565,7 @@ def hinge_embedding_loss(inputs, targets, margin=1.0, reduction='mean'):
|
|||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examplse:
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.common.dtype as mstype
|
||||
>>> import mindspore.ops as ops
|
||||
|
@ -4321,7 +4319,7 @@ def conv3d(inputs, weight, pad_mode="valid", padding=0, stride=1, dilation=1, gr
|
|||
Examples:
|
||||
>>> x = Tensor(np.ones([16, 3, 10, 32, 32]), mindspore.float16)
|
||||
>>> weight = Tensor(np.ones([32, 3, 4, 3, 3]), mindspore.float16)
|
||||
>>> output = conv3d(x, weight)
|
||||
>>> output = ops.conv3d(x, weight)
|
||||
>>> print(output.shape)
|
||||
(16, 32, 7, 30, 30)
|
||||
"""
|
||||
|
|
|
@ -58,7 +58,8 @@ def random_gamma(shape, alpha, seed=0, seed2=0):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import ops
|
||||
>>> import mindspore
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> shape = Tensor(np.array([7, 5]), mindspore.int32)
|
||||
>>> alpha = Tensor(np.array([0.5, 1.5]), mindspore.float32)
|
||||
>>> output = ops.random_gamma(shape, alpha, seed=5)
|
||||
|
@ -424,8 +425,9 @@ def random_poisson(shape, rate, seed=None, dtype=mstype.float32):
|
|||
``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> import mindspore
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> # case 1: 1-D shape, 2-D rate, float64 output
|
||||
>>> shape = Tensor(np.array([2, 2]), mindspore.int64)
|
||||
>>> rate = Tensor(np.array([[5.0, 10.0], [5.0, 1.0]]), mindspore.float32)
|
||||
|
@ -510,6 +512,7 @@ def log_uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, r
|
|||
``Ascend`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> output1, output2, output3 = ops.log_uniform_candidate_sampler(
|
||||
... Tensor(np.array([[1, 7], [0, 4], [3, 3]])), 2, 5, True, 5)
|
||||
|
|
|
@ -45,7 +45,6 @@ def csr_cos(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -85,7 +84,6 @@ def coo_cos(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -120,7 +118,6 @@ def csr_tan(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``CPU`` ``GPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -156,7 +153,6 @@ def coo_tan(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``CPU`` ``GPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -191,7 +187,6 @@ def csr_exp(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -227,7 +222,6 @@ def coo_exp(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -262,7 +256,6 @@ def csr_inv(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -298,7 +291,6 @@ def coo_inv(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -341,7 +333,6 @@ def csr_relu(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -385,7 +376,6 @@ def coo_relu(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -421,7 +411,6 @@ def csr_expm1(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -458,7 +447,6 @@ def coo_expm1(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -496,7 +484,6 @@ def csr_isfinite(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -535,7 +522,6 @@ def coo_isfinite(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -571,7 +557,6 @@ def csr_asin(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -608,7 +593,6 @@ def coo_asin(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -643,7 +627,6 @@ def csr_sqrt(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -679,7 +662,6 @@ def coo_sqrt(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -719,7 +701,6 @@ def csr_log(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -760,7 +741,6 @@ def coo_log(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -800,7 +780,6 @@ def csr_isnan(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -841,7 +820,6 @@ def coo_isnan(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -878,7 +856,6 @@ def csr_acos(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -916,7 +893,6 @@ def coo_acos(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -953,7 +929,6 @@ def csr_floor(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -991,7 +966,6 @@ def coo_floor(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -1027,7 +1001,6 @@ def csr_atan(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -1064,7 +1037,6 @@ def coo_atan(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -1099,7 +1071,6 @@ def csr_square(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -1135,7 +1106,6 @@ def coo_square(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -1173,7 +1143,6 @@ def csr_relu6(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -1212,7 +1181,6 @@ def coo_relu6(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -1247,7 +1215,6 @@ def csr_sinh(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -1283,7 +1250,6 @@ def coo_sinh(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -1319,7 +1285,6 @@ def csr_ceil(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -1356,7 +1321,6 @@ def coo_ceil(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -1394,7 +1358,6 @@ def csr_cosh(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -1433,7 +1396,6 @@ def coo_cosh(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -1470,7 +1432,6 @@ def csr_softsign(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -1508,7 +1469,6 @@ def coo_softsign(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -1544,7 +1504,6 @@ def csr_log1p(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -1580,7 +1539,6 @@ def coo_log1p(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -1615,7 +1573,6 @@ def csr_round(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -1651,7 +1608,6 @@ def coo_round(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -1691,7 +1647,6 @@ def csr_tanh(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -1732,7 +1687,6 @@ def coo_tanh(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -1767,7 +1721,6 @@ def csr_asinh(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -1803,7 +1756,6 @@ def coo_asinh(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -1838,7 +1790,6 @@ def csr_neg(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -1874,7 +1825,6 @@ def coo_neg(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -1913,7 +1863,6 @@ def csr_acosh(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -1953,7 +1902,6 @@ def coo_acosh(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -1993,7 +1941,6 @@ def csr_isinf(x: CSRTensor) -> CSRTensor:
|
|||
``CPU`` ``GPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -2034,7 +1981,6 @@ def coo_isinf(x: COOTensor) -> COOTensor:
|
|||
``CPU`` ``GPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -2074,7 +2020,6 @@ def csr_atanh(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -2115,7 +2060,6 @@ def coo_atanh(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -2155,7 +2099,6 @@ def csr_sigmoid(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -2196,7 +2139,6 @@ def coo_sigmoid(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -2231,7 +2173,6 @@ def csr_abs(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -2267,7 +2208,6 @@ def coo_abs(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
@ -2304,7 +2244,6 @@ def csr_sin(x: CSRTensor) -> CSRTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indptr = Tensor([0, 1, 2, 2], dtype=mstype.int32)
|
||||
>>> indices = Tensor([3, 0], dtype=mstype.int32)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
|
@ -2342,7 +2281,6 @@ def coo_sin(x: COOTensor) -> COOTensor:
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore import ops, Tensor
|
||||
>>> indices = Tensor([[0, 1], [1, 2]], dtype=mstype.int64)
|
||||
>>> values = Tensor([-1, 2], dtype=mstype.float32)
|
||||
>>> shape = (3, 4)
|
||||
|
|
|
@ -444,7 +444,7 @@ class AiCPURegOp(CpuRegOp):
|
|||
op_name (str):kernel name.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
|
||||
>>> from mindspore.ops import AiCPURegOp, DataType
|
||||
>>> stack_op_info = AiCPURegOp("Stack") \
|
||||
... .fusion_type("OPAQUE") \
|
||||
... .attr("axis", "int") \
|
||||
|
@ -479,7 +479,7 @@ class TBERegOp(RegOp):
|
|||
op_name (str):kernel name.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
|
||||
>>> from mindspore.ops import TBERegOp, DataType
|
||||
>>> abs_op_info = TBERegOp("Abs") \
|
||||
... .fusion_type("ELEMWISE") \
|
||||
... .async_flag(False) \
|
||||
|
|
|
@ -205,7 +205,7 @@ class Expand(Primitive):
|
|||
Examples:
|
||||
>>> x = Tensor(np.array([[1], [2], [3]]), mindspore.float32)
|
||||
>>> shape = Tensor(np.array([3,4]), mindspore.int32)
|
||||
>>> expand = Expand()
|
||||
>>> expand = ops.Expand()
|
||||
>>> y = expand(x, shape)
|
||||
>>> print(y)
|
||||
[[1. 1. 1. 1.]
|
||||
|
@ -478,7 +478,7 @@ class Im2Col(Primitive):
|
|||
|
||||
Examples:
|
||||
>>> x = Tensor(input_data=np.random.rand(4, 4, 32, 32), dtype=mstype.float64)
|
||||
>>> im2col = P.Im2Col(ksizes=3, strides=1, dilations=1)
|
||||
>>> im2col = ops.Im2Col(ksizes=3, strides=1, dilations=1)
|
||||
>>> y = im2col(x)
|
||||
>>> print(y.shape)
|
||||
(4, 36, 30, 30)
|
||||
|
@ -591,12 +591,11 @@ class Col2Im(Primitive):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> from mindspore import dtype as mstype
|
||||
>>> from mindspore.ops.operations.array_ops import Col2Im
|
||||
>>> x = Tensor(input_data=np.random.rand(16, 16, 4, 25), dtype=mstype.float32)
|
||||
>>> output_size = Tensor(input_data=[8, 8], dtype=mstype.int32)
|
||||
>>> col2im = Col2Im(kernel_size=[2, 2], dilation=[2, 2], padding=[2, 2], stride=[2, 2])
|
||||
>>> col2im = ops.Col2Im(kernel_size=[2, 2], dilation=[2, 2], padding=[2, 2], stride=[2, 2])
|
||||
>>> y = col2im(x, output_size)
|
||||
>>> print(y.shape)
|
||||
(16, 16, 8, 8)
|
||||
|
@ -861,10 +860,9 @@ class ConjugateTranspose(Primitive):
|
|||
``Ascend`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.array_ops import ConjugateTranspose
|
||||
>>> x = Tensor(np.array([[1 + 1j,2 + 2j], [3 + 3j, 4 + 4j]]), mindspore.complex64)
|
||||
>>> perm = (1, 0)
|
||||
>>> conjugate_transpose = ConjugateTranspose()
|
||||
>>> conjugate_transpose = ops.ConjugateTranspose()
|
||||
>>> output = conjugate_transpose(x, perm)
|
||||
>>> print(output)
|
||||
[[1.-1.j 3.-3.j]
|
||||
|
@ -951,7 +949,7 @@ class UniqueConsecutive(Primitive):
|
|||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore import dtype as mstype
|
||||
>>> from mindspore.ops.operations.array_ops import UniqueConsecutive
|
||||
>>> from mindspore.ops import UniqueConsecutive
|
||||
>>> x = Tensor(np.array([1, 1, 2, 2, 3, 1, 1, 2]), mstype.int32)
|
||||
>>> unique_consecutive = UniqueConsecutive(True, True, None)
|
||||
>>> output, idx, counts = unique_consecutive(x)
|
||||
|
@ -1109,10 +1107,9 @@ class Padding(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.array_ops import Padding
|
||||
>>> x = Tensor(np.array([[8], [10]]), mindspore.float32)
|
||||
>>> pad_dim_size = 4
|
||||
>>> output = Padding(pad_dim_size)(x)
|
||||
>>> output = ops.Padding(pad_dim_size)(x)
|
||||
>>> print(output)
|
||||
[[ 8. 0. 0. 0.]
|
||||
[10. 0. 0. 0.]]
|
||||
|
@ -1285,7 +1282,6 @@ class MatrixDiagV3(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.array_ops import MatrixDiagV3
|
||||
>>> x = Tensor(np.array([[8, 9, 0],
|
||||
... [1, 2, 3],
|
||||
... [0, 4, 5]]), mindspore.float32)
|
||||
|
@ -1293,7 +1289,7 @@ class MatrixDiagV3(Primitive):
|
|||
>>> num_rows = Tensor(np.array(3), mindspore.int32)
|
||||
>>> num_cols = Tensor(np.array(3), mindspore.int32)
|
||||
>>> padding_value = Tensor(np.array(11), mindspore.float32)
|
||||
>>> matrix_diag_v3 = MatrixDiagV3(align='LEFT_RIGHT')
|
||||
>>> matrix_diag_v3 = ops.MatrixDiagV3(align='LEFT_RIGHT')
|
||||
>>> output = matrix_diag_v3(x, k, num_rows, num_cols, padding_value)
|
||||
>>> print(output)
|
||||
[[ 1. 8. 11.]
|
||||
|
@ -1326,7 +1322,7 @@ class MatrixDiagPartV3(Primitive):
|
|||
... [9, 8, 7, 6]]), mindspore.float32)
|
||||
>>> k =Tensor(np.array([1, 3]), mindspore.int32)
|
||||
>>> padding_value = Tensor(np.array(9), mindspore.float32)
|
||||
>>> matrix_diag_part_v3 = ops.operations.array_ops.MatrixDiagPartV3(align='RIGHT_LEFT')
|
||||
>>> matrix_diag_part_v3 = ops.MatrixDiagPartV3(align='RIGHT_LEFT')
|
||||
>>> output = matrix_diag_part_v3(x, k, padding_value)
|
||||
>>> print(output)
|
||||
[[9. 9. 4.]
|
||||
|
@ -1434,8 +1430,7 @@ class MatrixBandPart(Primitive):
|
|||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.array_ops import MatrixBandPart
|
||||
>>> matrix_band_part = MatrixBandPart()
|
||||
>>> matrix_band_part = ops.MatrixBandPart()
|
||||
>>> x = np.ones([2, 4, 4]).astype(np.float32)
|
||||
>>> output = matrix_band_part(Tensor(x), 2, 1)
|
||||
>>> print(output)
|
||||
|
@ -3552,13 +3547,12 @@ class DiagPart(PrimitiveWithCheck):
|
|||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples
|
||||
Examples:
|
||||
>>> input_x = Tensor([[1, 0, 0, 0],
|
||||
... [0, 2, 0, 0],
|
||||
... [0, 0, 3, 0],
|
||||
... [0, 0, 0, 4]])
|
||||
>>> import mindspore.ops as P
|
||||
>>> diag_part = P.DiagPart()
|
||||
>>> diag_part = ops.DiagPart()
|
||||
>>> output = diag_part(input_x)
|
||||
>>> print(output)
|
||||
[1 2 3 4]
|
||||
|
@ -4368,7 +4362,7 @@ class Triu(Primitive):
|
|||
... [ 5, 6, 7, 8],
|
||||
... [10, 11, 12, 13],
|
||||
... [14, 15, 16, 17]]))
|
||||
>>> triu = P.Triu()
|
||||
>>> triu = ops.Triu()
|
||||
>>> result = triu(x)
|
||||
>>> print(result)
|
||||
[[ 1 2 3 4]
|
||||
|
@ -4379,7 +4373,7 @@ class Triu(Primitive):
|
|||
... [ 5, 6, 7, 8],
|
||||
... [10, 11, 12, 13],
|
||||
... [14, 15, 16, 17]]))
|
||||
>>> triu = P.Triu(diagonal=1)
|
||||
>>> triu = ops.Triu(diagonal=1)
|
||||
>>> result = triu(x)
|
||||
>>> print(result)
|
||||
[[ 0 2 3 4]
|
||||
|
@ -4390,7 +4384,7 @@ class Triu(Primitive):
|
|||
... [ 5, 6, 7, 8],
|
||||
... [10, 11, 12, 13],
|
||||
... [14, 15, 16, 17]]))
|
||||
>>> triu = P.Triu(diagonal=-1)
|
||||
>>> triu = ops.Triu(diagonal=-1)
|
||||
>>> result = triu(x)
|
||||
>>> print(result)
|
||||
[[ 1 2 3 4]
|
||||
|
@ -4557,7 +4551,6 @@ class ScatterDiv(_ScatterOpDynamic):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import mindspore.ops as ops
|
||||
>>> input_x = Parameter(Tensor(np.array([[6.0, 6.0, 6.0], [2.0, 2.0, 2.0]]), mstype.float32), name="x")
|
||||
>>> indices = Tensor(np.array([0, 1]), mstype.int32)
|
||||
>>> updates = Tensor(np.array([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]]), mstype.float32)
|
||||
|
@ -4636,12 +4629,11 @@ class ScatterNdAdd(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.array_ops import ScatterNdAdd
|
||||
>>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
|
||||
>>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
|
||||
>>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
|
||||
>>> use_locking = False
|
||||
>>> scatter_nd_add = ScatterNdAdd(use_locking)
|
||||
>>> scatter_nd_add = ops.ScatterNdAdd(use_locking)
|
||||
>>> output = scatter_nd_add(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[ 1. 10. 9. 4. 12. 6. 7. 17.]
|
||||
|
@ -4650,7 +4642,7 @@ class ScatterNdAdd(Primitive):
|
|||
>>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
|
||||
... [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)
|
||||
>>> use_locking = False
|
||||
>>> scatter_nd_add = ScatterNdAdd(use_locking)
|
||||
>>> scatter_nd_add = ops.ScatterNdAdd(use_locking)
|
||||
>>> output = scatter_nd_add(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[[[1 1 1 1]
|
||||
|
@ -4697,12 +4689,11 @@ class ScatterNdSub(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.array_ops import ScatterNdSub
|
||||
>>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
|
||||
>>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
|
||||
>>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
|
||||
>>> use_locking = False
|
||||
>>> scatter_nd_sub = ScatterNdSub(use_locking)
|
||||
>>> scatter_nd_sub = ops.ScatterNdSub(use_locking)
|
||||
>>> output = scatter_nd_sub(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[ 1. -6. -3. 4. -2. 6. 7. -1.]
|
||||
|
@ -4711,7 +4702,7 @@ class ScatterNdSub(Primitive):
|
|||
>>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
|
||||
... [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)
|
||||
>>> use_locking = False
|
||||
>>> scatter_nd_sub = ScatterNdSub(use_locking)
|
||||
>>> scatter_nd_sub = ops.ScatterNdSub(use_locking)
|
||||
>>> output = scatter_nd_sub(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[[[-1 -1 -1 -1]
|
||||
|
@ -4759,11 +4750,10 @@ class ScatterNdMul(_ScatterNdOp):
|
|||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.array_ops import ScatterNdMul
|
||||
>>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
|
||||
>>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
|
||||
>>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
|
||||
>>> scatter_nd_mul = ScatterNdMul()
|
||||
>>> scatter_nd_mul = ops.ScatterNdMul()
|
||||
>>> output = scatter_nd_mul(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[ 1. 16. 18. 4. 35. 6. 7. 72.]
|
||||
|
@ -4771,7 +4761,7 @@ class ScatterNdMul(_ScatterNdOp):
|
|||
>>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
|
||||
>>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
|
||||
... [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)
|
||||
>>> scatter_nd_mul = ScatterNdMul()
|
||||
>>> scatter_nd_mul = ops.ScatterNdMul()
|
||||
>>> output = scatter_nd_mul(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[[[1 1 1 1]
|
||||
|
@ -4855,11 +4845,10 @@ class ScatterNdMax(_ScatterNdOp):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.array_ops import ScatterNdMax
|
||||
>>> input_x = Parameter(Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mindspore.float32), name="x")
|
||||
>>> indices = Tensor(np.array([[2], [4], [1], [7]]), mindspore.int32)
|
||||
>>> updates = Tensor(np.array([6, 7, 8, 9]), mindspore.float32)
|
||||
>>> scatter_nd_max = ScatterNdMax()
|
||||
>>> scatter_nd_max = ops.ScatterNdMax()
|
||||
>>> output = scatter_nd_max(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[ 1. 8. 6. 4. 7. 6. 7. 9.]
|
||||
|
@ -4867,7 +4856,7 @@ class ScatterNdMax(_ScatterNdOp):
|
|||
>>> indices = Tensor(np.array([[0], [2]]), mindspore.int32)
|
||||
>>> updates = Tensor(np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
|
||||
... [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]]), mindspore.int32)
|
||||
>>> scatter_nd_max = ScatterNdMax()
|
||||
>>> scatter_nd_max = ops.ScatterNdMax()
|
||||
>>> output = scatter_nd_max(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[[[1 1 1 1]
|
||||
|
@ -6717,7 +6706,7 @@ class ExtractVolumePatches(Primitive):
|
|||
>>> strides = (1, 1, 1, 1, 1)
|
||||
>>> padding = "VALID"
|
||||
>>> input_x = P.Reshape()(Tensor(np.arange(1, 28), mstype.float16), (1, 1, 3, 3, 3))
|
||||
>>> output_y = P.ExtractVolumePatches(kernel_size, strides, padding)(input_x)
|
||||
>>> output_y = ops.ExtractVolumePatches(kernel_size, strides, padding)(input_x)
|
||||
>>> print(output_y.shape)
|
||||
(1, 8, 2, 2, 2)
|
||||
"""
|
||||
|
@ -7187,7 +7176,7 @@ class Tril(Primitive):
|
|||
... [ 5, 6, 7, 8],
|
||||
... [10, 11, 12, 13],
|
||||
... [14, 15, 16, 17]]))
|
||||
>>> tril = P.Tril()
|
||||
>>> tril = ops.Tril()
|
||||
>>> result = tril(x)
|
||||
>>> print(result)
|
||||
[[ 1 0 0 0]
|
||||
|
@ -7198,7 +7187,7 @@ class Tril(Primitive):
|
|||
... [ 5, 6, 7, 8],
|
||||
... [10, 11, 12, 13],
|
||||
... [14, 15, 16, 17]]))
|
||||
>>> tril = P.Tril(diagonal=1)
|
||||
>>> tril = ops.Tril(diagonal=1)
|
||||
>>> result = tril(x)
|
||||
>>> print(result)
|
||||
[[ 1 2 0 0]
|
||||
|
@ -7209,7 +7198,7 @@ class Tril(Primitive):
|
|||
... [ 5, 6, 7, 8],
|
||||
... [10, 11, 12, 13],
|
||||
... [14, 15, 16, 17]]))
|
||||
>>> tril = P.Tril(diagonal=-1)
|
||||
>>> tril = ops.Tril(diagonal=-1)
|
||||
>>> result = tril(x)
|
||||
>>> print(result)
|
||||
[[ 0 0 0 0]
|
||||
|
@ -7254,9 +7243,7 @@ class IndexFill(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import mindspore
|
||||
>>> from mindspore.ops.operations.array_ops import IndexFill
|
||||
>>> index_fill = IndexFill()
|
||||
>>> index_fill = ops.IndexFill()
|
||||
>>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32))
|
||||
>>> index = Tensor([0, 2], mindspore.int32)
|
||||
>>> value = Tensor(-2.0, mindspore.float32)
|
||||
|
@ -7486,7 +7473,7 @@ class FillDiagonal(Primitive):
|
|||
|
||||
Inputs:
|
||||
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
||||
The data type must be float32, int32 or int64.
|
||||
The data type must be float32, int32 or int64.
|
||||
|
||||
Outputs:
|
||||
- **y** (Tensor) - Tensor, has the same shape and data type as the input `x`.
|
||||
|
@ -7502,7 +7489,7 @@ class FillDiagonal(Primitive):
|
|||
Examples:
|
||||
>>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32))
|
||||
>>> fill_value = 9.9
|
||||
>>> fill_diagonal = FillDiagonal(fill_value)
|
||||
>>> fill_diagonal = ops.FillDiagonal(fill_value)
|
||||
>>> y = fill_diagonal(x)
|
||||
>>> print(y)
|
||||
[[9.9 2. 3. ]
|
||||
|
@ -7510,7 +7497,7 @@ class FillDiagonal(Primitive):
|
|||
[7. 8. 9.9]]
|
||||
>>> x = Tensor(np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4], [5, 5, 5]]).astype(np.int32))
|
||||
>>> fill_value = 9.0
|
||||
>>> fill_diagonal = FillDiagonal(fill_value)
|
||||
>>> fill_diagonal = ops.FillDiagonal(fill_value)
|
||||
>>> y = fill_diagonal(x)
|
||||
>>> print(y)
|
||||
[[9 0 0]
|
||||
|
@ -7589,13 +7576,13 @@ class HammingWindow(Primitive):
|
|||
Examples:
|
||||
>>> # case 1: periodic=True.
|
||||
>>> length = Tensor(np.array([6]).astype(np.int32))
|
||||
>>> hamming_window = HammingWindow(periodic=True)
|
||||
>>> hamming_window = ops.HammingWindow(periodic=True)
|
||||
>>> y = hamming_window(length)
|
||||
>>> print(y)
|
||||
[0.08000001 0.31 0.77000004 1. 0.77000004 0.31 ]
|
||||
>>> # case 2: periodic=False.
|
||||
>>> length = Tensor(np.array([7]).astype(np.int32))
|
||||
>>> hamming_window = HammingWindow(periodic=False)
|
||||
>>> hamming_window = ops.HammingWindow(periodic=False)
|
||||
>>> y = hamming_window(length)
|
||||
>>> print(y)
|
||||
[0.08000001 0.31 0.77000004 1. 0.77000004 0.31 0.08000001]
|
||||
|
@ -7629,7 +7616,7 @@ class AffineGrid(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> affinegrid = AffineGrid(align_corners=False)
|
||||
>>> affinegrid = ops.AffineGrid(align_corners=False)
|
||||
>>> theta = Tensor([[[0.8, 0.5, 0],[-0.5, 0.8, 0]]], mindspore.float32)
|
||||
>>> out_size = (1, 3, 2, 3)
|
||||
>>> output = affinegrid(theta, out_size)
|
||||
|
@ -7831,7 +7818,7 @@ class Bincount(Primitive):
|
|||
>>> array = Tensor(np.array([1, 2, 2, 3, 3, 3, 4, 4, 4, 4]), mindspore.int32)
|
||||
>>> size = Tensor(5, mindspore.int32)
|
||||
>>> weights = Tensor(np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), mindspore.float32)
|
||||
>>> bincount = P.Bincount()
|
||||
>>> bincount = ops.Bincount()
|
||||
>>> bins = bincount(array, size, weights)
|
||||
>>> print(bins)
|
||||
[0. 1. 2. 3. 4.]
|
||||
|
|
|
@ -63,9 +63,8 @@ class ReduceOp:
|
|||
This example should be run with multiple devices.
|
||||
|
||||
>>> from mindspore.communication import init
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> from mindspore import Tensor, ops, nn
|
||||
>>> from mindspore.ops import ReduceOp
|
||||
>>> import mindspore.nn as nn
|
||||
>>>
|
||||
>>> init()
|
||||
>>> class Net(nn.Cell):
|
||||
|
|
|
@ -318,7 +318,7 @@ class Custom(ops.PrimitiveWithInfer):
|
|||
>>> import mindspore.ops as ops
|
||||
>>> import numpy as np
|
||||
>>> from mindspore.ops import CustomRegOp, custom_info_register, DataType, kernel
|
||||
>>> from mindspore.common import dtype as mstype
|
||||
>>> from mindspore import dtype as mstype
|
||||
>>> from mindspore.nn import Cell
|
||||
>>> input_x = Tensor(np.ones([16, 16]).astype(np.float32))
|
||||
>>> input_y = Tensor(np.ones([16, 16]).astype(np.float32))
|
||||
|
|
|
@ -56,7 +56,7 @@ class AdjustSaturation(Primitive):
|
|||
... [[7.0, 8.0, 9.0],
|
||||
... [10.0, 11.0, 12.0]]])
|
||||
>>> scale = Tensor(float(0.5))
|
||||
>>> adjustsaturation = AdjustSaturation()
|
||||
>>> adjustsaturation = ops.AdjustSaturation()
|
||||
>>> output = adjustsaturation(x, scale)
|
||||
>>> print(output)
|
||||
[[[ 2. 2.4999998 3. ]
|
||||
|
@ -151,7 +151,7 @@ class AdjustHue(Primitive):
|
|||
>>> class AdjustHue(nn.Cell):
|
||||
... def __init__(self):
|
||||
... super(AdjustHue, self).__init__()
|
||||
... self.adjustHue = P.AdjustHue()
|
||||
... self.adjustHue = ops.AdjustHue()
|
||||
... def construct(self, image, delta):
|
||||
... return self.adjustHue(image, delta)
|
||||
...
|
||||
|
@ -511,7 +511,7 @@ class HSVToRGB(Primitive):
|
|||
|
||||
Examples:
|
||||
>>> image = np.array([0.5, 0.5, 0.5]).astype(np.float32).reshape([1, 1, 1, 3])
|
||||
>>> hsv_to_rgb = P.HSVToRGB()
|
||||
>>> hsv_to_rgb = ops.HSVToRGB()
|
||||
>>> output = hsv_to_rgb(Tensor(image))
|
||||
>>> print(output)
|
||||
[[[[0.25 0.5 0.5 ]]]]
|
||||
|
@ -721,7 +721,7 @@ class ResizeBilinearV2(Primitive):
|
|||
|
||||
Examples:
|
||||
>>> x = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.float32)
|
||||
>>> output = ResizeBilinearV2(x, (5, 5))
|
||||
>>> output = ops.ResizeBilinearV2(x, (5, 5))
|
||||
>>> print(output)
|
||||
[[[[1. 2. 3. 4. 5.]
|
||||
[1. 2. 3. 4. 5.]
|
||||
|
@ -790,7 +790,7 @@ class ResizeBicubic(Primitive):
|
|||
... super(NetResizeBicubic, self).__init__()
|
||||
... align_corners = False
|
||||
... half_pixel_centers = False
|
||||
... self.resize = P.ResizeBicubic(align_corners, half_pixel_centers)
|
||||
... self.resize = ops.ResizeBicubic(align_corners, half_pixel_centers)
|
||||
...
|
||||
... def construct(self, images, size):
|
||||
... return self.resize(images, size)
|
||||
|
@ -1048,7 +1048,7 @@ class ScaleAndTranslate(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> op = ScaleAndTranslate()
|
||||
>>> op = ops.ScaleAndTranslate()
|
||||
>>> image = Tensor(np.array([[[[9.0], [5.0], [2.0], [1.0]],
|
||||
... [[6.0], [1.0], [9.0], [7.0]]]]), mindspore.float32)
|
||||
>>> size = Tensor(np.array([2, 2]).astype(np.int32))
|
||||
|
@ -1113,7 +1113,7 @@ class CombinedNonMaxSuppression(Primitive):
|
|||
Raises:
|
||||
TypeError: If the dtype of `boxes`, `scores` , `iou_threshold` , `score threshold` are not float32.
|
||||
TypeError: If the dtype of `max_output_size_per_class` and `max_total_size` are not int32.
|
||||
ValueError: If `boxes`is not 4D.
|
||||
ValueError: If `boxes` is not 4D.
|
||||
ValueError: If `max_output_size_per_class`, `max_total_size`, `iou_threshold` and `score threshold` are not 0D.
|
||||
ValueError: If shape[0] of `boxes` is not same with shape[0] of `scores`.
|
||||
ValueError: If `scores` is not 3D.
|
||||
|
@ -1137,7 +1137,7 @@ class CombinedNonMaxSuppression(Primitive):
|
|||
>>> max_total_size = Tensor(1, mstype.int32)
|
||||
>>> iou_threshold = Tensor(0, mstype.float32)
|
||||
>>> score_threshold = Tensor(0, mstype.float32)
|
||||
>>> net = P.CombinedNonMaxSuppression()
|
||||
>>> net = ops.CombinedNonMaxSuppression()
|
||||
>>> out = net(boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold)
|
||||
>>> print(out)
|
||||
(Tensor(shape=[1, 1, 4], dtype=Float32, value= [[[1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
|
||||
|
|
|
@ -49,9 +49,8 @@ class Geqrf(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations import linalg_ops as linalg
|
||||
>>> input_x = Tensor(np.array([[-2.0, -1.0], [1.0, 2.0]]).astype(np.float32))
|
||||
>>> geqrf = linalg.Geqrf()
|
||||
>>> geqrf = ops.Geqrf()
|
||||
>>> y, tau = geqrf(input_x)
|
||||
>>> print(y)
|
||||
[[ 2.236068 1.7888544]
|
||||
|
@ -97,10 +96,9 @@ class Svd(Primitive):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor, set_context
|
||||
>>> from mindspore.ops.operations import linalg_ops as linalg
|
||||
>>> from mindspore import Tensor, ops, set_context
|
||||
>>> set_context(device_target="CPU")
|
||||
>>> svd = linalg.Svd(full_matrices=True, compute_uv=True)
|
||||
>>> svd = ops.Svd(full_matrices=True, compute_uv=True)
|
||||
>>> a = Tensor(np.array([[1, 2], [-4, -5], [2, 1]]).astype(np.float32))
|
||||
>>> s, u, v = svd(a)
|
||||
>>> print(s)
|
||||
|
|
|
@ -1375,7 +1375,8 @@ class Cdist(Primitive):
|
|||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore.ops as ops
|
||||
>>> import mindspore
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> input_x = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
|
||||
>>> input_y = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
|
||||
>>> op = ops.Cdist(p=2.0)
|
||||
|
@ -1407,9 +1408,8 @@ class LpNorm(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.math_ops import LpNorm
|
||||
>>> input_x = Tensor(np.array([[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]]).astype(np.float32))
|
||||
>>> op = LpNorm(axis=[0, 1], p=2, keep_dims=False)
|
||||
>>> op = ops.LpNorm(axis=[0, 1], p=2, keep_dims=False)
|
||||
>>> output = op(input_x)
|
||||
>>> print(output)
|
||||
[ 9.165152 10.954452]
|
||||
|
@ -1636,7 +1636,7 @@ class Betainc(Primitive):
|
|||
>>> a = Tensor(np.array([1, 1, 1]), mindspore.float32)
|
||||
>>> b = Tensor(np.array([1, 1, 1]), mindspore.float32)
|
||||
>>> x = Tensor(np.array([1, 1,1 ]), mindspore.float32)
|
||||
>>> betainc = P.Betainc()
|
||||
>>> betainc = ops.Betainc()
|
||||
>>> print(betainc(a, b, x))
|
||||
[1. 1. 1.]
|
||||
"""
|
||||
|
@ -3170,10 +3170,9 @@ class DivNoNan(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.math_ops import DivNoNan
|
||||
>>> x1 = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32)
|
||||
>>> x2 = Tensor(np.array([0., 0., 0., 2.0, 3.0]), mindspore.float32)
|
||||
>>> div_no_nan = DivNoNan()
|
||||
>>> div_no_nan = ops.DivNoNan()
|
||||
>>> output = div_no_nan(x1, x2)
|
||||
>>> print(output)
|
||||
[0. 0. 0. 2.5 2. ]
|
||||
|
@ -4007,10 +4006,9 @@ class Lerp(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.math_ops import Lerp
|
||||
>>> start = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
|
||||
>>> end = Tensor(np.array([10., 10., 10., 10.]), mindspore.float32)
|
||||
>>> lerp = Lerp()
|
||||
>>> lerp = ops.Lerp()
|
||||
>>> output = lerp(start, end, 0.5)
|
||||
>>> print(output)
|
||||
[5.5 6. 6.5 7. ]
|
||||
|
@ -5354,8 +5352,7 @@ class Inv(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.math_ops import Inv
|
||||
>>> inv = Inv()
|
||||
>>> inv = ops.Inv()
|
||||
>>> x = Tensor(np.array([0.25, 0.4, 0.31, 0.52]), mindspore.float32)
|
||||
>>> output = inv(x)
|
||||
>>> print(output)
|
||||
|
@ -5377,8 +5374,7 @@ class Invert(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.math_ops import Invert
|
||||
>>> invert = Invert()
|
||||
>>> invert = ops.Invert()
|
||||
>>> x = Tensor(np.array([25, 4, 13, 9]), mindspore.int16)
|
||||
>>> output = invert(x)
|
||||
>>> print(output)
|
||||
|
@ -5571,9 +5567,8 @@ class MatrixDeterminant(Primitive):
|
|||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.math_ops import MatrixDeterminant
|
||||
>>> input_x = Tensor(np.array([[[-4.5, -1.5], [7.0, 6.0]], [[2.5, 0.5], [3.0, 9.0]]]), mindspore.float32)
|
||||
>>> op = MatrixDeterminant()
|
||||
>>> op = ops.MatrixDeterminant()
|
||||
>>> output = op(input_x)
|
||||
>>> print(output)
|
||||
[-16.5 21. ]
|
||||
|
@ -5596,9 +5591,8 @@ class LogMatrixDeterminant(Primitive):
|
|||
``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.math_ops import LogMatrixDeterminant
|
||||
>>> input_x = Tensor(np.array([[[-4.5, -1.5], [7.0, 6.0]], [[2.5, 0.5], [3.0, 9.0]]]), mindspore.float32)
|
||||
>>> op = LogMatrixDeterminant()
|
||||
>>> op = ops.LogMatrixDeterminant()
|
||||
>>> sign, output = op(input_x)
|
||||
>>> print(sign)
|
||||
[-1. 1.]
|
||||
|
@ -5635,7 +5629,7 @@ class MatrixLogarithm(Primitive):
|
|||
|
||||
Examples:
|
||||
>>> x = Tensor([[1 + 2j, 2 + 1j], [4 + 1j, 5 + 2j]])
|
||||
>>> matrix_logarithm = MatrixLogarithm()
|
||||
>>> matrix_logarithm = ops.MatrixLogarithm()
|
||||
>>> y = matrix_logarithm(x)
|
||||
>>> print(y)
|
||||
[[0.69155775+1.71618359j 0.64665196-0.34928196j]
|
||||
|
@ -6146,9 +6140,10 @@ class IsClose(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> import mindspore
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.ops.operations.math_ops import IsClose
|
||||
>>> from mindspore.ops import IsClose
|
||||
>>> input = Tensor(np.array([1.3, 2.1, 3.2, 4.1, 5.1]), mindspore.float16)
|
||||
>>> other = Tensor(np.array([1.3, 3.3, 2.3, 3.1, 5.1]), mindspore.float16)
|
||||
>>> isclose = IsClose()
|
||||
|
@ -6486,7 +6481,7 @@ class Digamma(Primitive):
|
|||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([1.5, 0.5, 9]).astype(np.float16))
|
||||
>>> digamma = P.Digamma()
|
||||
>>> digamma = ops.Digamma()
|
||||
>>> output = digamma(x)
|
||||
>>> print(output)
|
||||
[ 0.0365 -1.964 2.14 ]
|
||||
|
@ -6634,6 +6629,7 @@ class RaggedRange(Primitive):
|
|||
- if type of the input `starts`, input `limits` and input `deltas`
|
||||
are float32 or float64, shape of the output `rt_dense_values` is equal to
|
||||
sum(ceil(abs((limits[i] - starts[i]) / deltas[i]))).
|
||||
|
||||
Raises:
|
||||
TypeError: If any input is not Tensor.
|
||||
TypeError: If the type of `starts` is not one of the following dtype: int32, int64, float32, float64.
|
||||
|
@ -6824,7 +6820,7 @@ class Zeta(Primitive):
|
|||
Examples:
|
||||
>>> x = Tensor(np.array([10.]), mindspore.float32)
|
||||
>>> q = Tensor(np.array([1.]), mindspore.float32)
|
||||
>>> zeta = P.Zeta()
|
||||
>>> zeta = ops.Zeta()
|
||||
>>> z = zeta(x, q)
|
||||
>>> print(z)
|
||||
[1.0009946]
|
||||
|
@ -6930,7 +6926,7 @@ class Renorm(Primitive):
|
|||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]]), mindspore.float32)
|
||||
>>> y = Renorm(p=1, dim=0, maxnorm=5.)(x)
|
||||
>>> y = ops.Renorm(p=1, dim=0, maxnorm=5.)(x)
|
||||
>>> print(y)
|
||||
[[1. 1. 1. ]
|
||||
[1.6666666 1.6666666 1.6666666 ]
|
||||
|
@ -6999,7 +6995,7 @@ class STFT(Primitive):
|
|||
|
||||
Examples:
|
||||
>>> import mindspore as ms
|
||||
>>> from mindspore.ops.operations.math_ops import STFT
|
||||
>>> from mindspore.ops import STFT
|
||||
>>> import numpy as np
|
||||
>>> x = ms.Tensor(np.random.rand(2,7192), ms.float32)
|
||||
>>> window = ms.Tensor(np.random.rand(64), ms.float32)
|
||||
|
|
|
@ -155,8 +155,9 @@ class AdaptiveAvgPool3D(Primitive):
|
|||
|
||||
Examples:
|
||||
>>> import mindspore
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import nn, Tensor
|
||||
>>> from mindspore.ops.operations.nn_ops import AdaptiveAvgPool3D
|
||||
>>> from mindspore.ops import AdaptiveAvgPool3D
|
||||
>>> class AdaptiveAvgPool3DNet(nn.Cell):
|
||||
... def __init__(self, output_size):
|
||||
... super(AdaptiveAvgPool3DNet, self).__init__()
|
||||
|
@ -543,9 +544,8 @@ class Softsign(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.nn_ops import Softsign
|
||||
>>> input_x = Tensor(np.array([0, -1, 2, 30, -30]), mindspore.float32)
|
||||
>>> softsign = Softsign()
|
||||
>>> softsign = ops.Softsign()
|
||||
>>> output = softsign(input_x)
|
||||
>>> print(output)
|
||||
[ 0. -0.5 0.6666667 0.9677419 -0.9677419]
|
||||
|
@ -696,9 +696,8 @@ class SeLU(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.nn_ops import SeLU
|
||||
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
|
||||
>>> selu = SeLU()
|
||||
>>> selu = ops.SeLU()
|
||||
>>> output = selu(input_x)
|
||||
>>> print(output)
|
||||
[[-1.1113307 4.202804 -1.7575096]
|
||||
|
@ -1430,7 +1429,7 @@ class DataFormatVecPermute(Primitive):
|
|||
>>> class Net(nn.Cell):
|
||||
... def __init__(self, src_format="NHWC", dst_format="NCHW"):
|
||||
... super().__init__()
|
||||
... self.op = P.nn_ops.DataFormatVecPermute(src_format, dst_format)
|
||||
... self.op = ops.DataFormatVecPermute(src_format, dst_format)
|
||||
... def construct(self, x):
|
||||
... return self.op(x)
|
||||
...
|
||||
|
@ -2120,7 +2119,7 @@ class MaxUnpool2D(Primitive):
|
|||
Examples:
|
||||
>>> x = Tensor(np.array([[[[0, 1], [8, 9]]]]).astype(np.float32))
|
||||
>>> argmax = Tensor(np.array([[[[0, 1], [2, 3]]]]).astype(np.int64))
|
||||
>>> maxunpool2d = P.MaxUnpool2D(ksize=1, strides=1, pads=0)
|
||||
>>> maxunpool2d = ops.MaxUnpool2D(ksize=1, strides=1, pads=0)
|
||||
>>> output = maxunpool2d(x, argmax)
|
||||
>>> print(output.asnumpy())
|
||||
[[[[0. 1.]
|
||||
|
@ -3097,7 +3096,7 @@ class L2Loss(Primitive):
|
|||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples
|
||||
Examples:
|
||||
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float16)
|
||||
>>> l2_loss = L2Loss()
|
||||
>>> output = l2_loss(input_x)
|
||||
|
@ -3770,12 +3769,12 @@ class UpsampleTrilinear3D(Primitive):
|
|||
``Ascend`` ``CPU`` ``GPU``
|
||||
|
||||
Examples:
|
||||
>>> ops = P.UpsampleTrilinear3D(output_size=[4, 64, 48])
|
||||
>>> ops = ops.UpsampleTrilinear3D(output_size=[4, 64, 48])
|
||||
>>> out = ops(Tensor(input_data=np.random.randn(2, 3, 4, 512, 256)))
|
||||
>>> print(out.shape)
|
||||
(2, 3, 4, 64, 48)
|
||||
...
|
||||
>>> ops = P.UpsampleTrilinear3D(output_size=[2, 4, 4])
|
||||
>>> ops = ops.UpsampleTrilinear3D(output_size=[2, 4, 4])
|
||||
>>> in_x = Tensor(np.arange(1, 5, dtype=np.float32).reshape((1, 1, 1, 2, 2)))
|
||||
>>> out = ops(in_x)
|
||||
>>> print(out)
|
||||
|
@ -6976,8 +6975,7 @@ class Dropout2D(PrimitiveWithInfer):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.operations.nn_ops import Dropout2D
|
||||
>>> dropout = Dropout2D(keep_prob=0.5)
|
||||
>>> dropout = ops.Dropout2D(keep_prob=0.5)
|
||||
>>> x = Tensor(np.ones([2, 1, 2, 3]), mindspore.float32)
|
||||
>>> output, mask = dropout(x)
|
||||
>>> print(output.shape)
|
||||
|
@ -8490,7 +8488,7 @@ class ApplyAdagradDA(Primitive):
|
|||
>>> class ApplyAdagradDANet(nn.Cell):
|
||||
... def __init__(self, use_locking=False):
|
||||
... super(ApplyAdagradDANet, self).__init__()
|
||||
... self.apply_adagrad_d_a = P.ApplyAdagradDA(use_locking)
|
||||
... self.apply_adagrad_d_a = ops.ApplyAdagradDA(use_locking)
|
||||
... self.var = Parameter(Tensor(np.array([[0.6, 0.4], [0.1, 0.5]]).astype(np.float32)), name="var")
|
||||
... self.gradient_accumulator = Parameter(Tensor(np.array([[0.1, 0.3],
|
||||
... [0.1, 0.5]]).astype(np.float32)),
|
||||
|
@ -9042,7 +9040,7 @@ class GridSampler3D(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> gridsampler = GridSampler3D(interpolation_mode='bilinear', padding_mode='zeros', align_corners=True)
|
||||
>>> gridsampler = ops.GridSampler3D(interpolation_mode='bilinear', padding_mode='zeros', align_corners=True)
|
||||
>>> input_x = Tensor(np.arange(32).reshape((2, 2, 2, 2, 2)).astype(np.float32))
|
||||
>>> grid = Tensor(np.arange(-0.2, 1, 0.1).reshape((2, 2, 1, 1, 3)).astype(np.float32))
|
||||
>>> output = gridsampler(input_x, grid)
|
||||
|
@ -9363,7 +9361,7 @@ class NthElement(Primitive):
|
|||
Examples:
|
||||
>>> input = Tensor(np.array([[1,2,3],[4,5,6]]) , mstype.int8)
|
||||
>>> n = 1
|
||||
>>> net = P.NthElement()
|
||||
>>> net = ops.NthElement()
|
||||
>>> out = net(input, n)
|
||||
>>> print(out)
|
||||
[2 5]
|
||||
|
@ -9418,8 +9416,7 @@ class PSROIPooling(Primitive):
|
|||
Examples:
|
||||
>>> import mindspore
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.ops.operations import nn_ops
|
||||
>>> from mindspore import Tensor, ops
|
||||
>>> features = np.random.randn(4, 3 * 7 * 7, 80, 48)
|
||||
>>> features = Tensor.from_numpy(features).astype(mindspore.float32)
|
||||
>>> rois = Tensor.from_numpy(
|
||||
|
@ -9443,7 +9440,7 @@ class PSROIPooling(Primitive):
|
|||
... [387.4919],
|
||||
... [778.7322],
|
||||
... [562.7321]]])).astype(mindspore.float32)
|
||||
>>> psROIPooling = nn_ops.PSROIPooling(spatial_scale=1.0/16, output_dim=3,
|
||||
>>> psROIPooling = ops.PSROIPooling(spatial_scale=1.0/16, output_dim=3,
|
||||
... group_size=7)
|
||||
>>> out = psROIPooling(features, rois)
|
||||
>>> print(out.shape)
|
||||
|
@ -9645,7 +9642,7 @@ class GridSampler2D(Primitive):
|
|||
``Ascend`` ``CPU`` ``GPU``
|
||||
|
||||
Examples:
|
||||
>>> gridsampler = GridSampler2D(interpolation_mode='bilinear', padding_mode='zeros', align_corners=True)
|
||||
>>> gridsampler = ops.GridSampler2D(interpolation_mode='bilinear', padding_mode='zeros', align_corners=True)
|
||||
>>> input_x = Tensor(np.arange(16).reshape((2, 2, 2, 2)).astype(np.float32))
|
||||
>>> grid = Tensor(np.arange(-9, 9, 0.5).reshape((2, 3, 3, 2)).astype(np.float32))
|
||||
>>> output = gridsampler(input_x, grid)
|
||||
|
@ -10249,6 +10246,7 @@ class FractionalMaxPoolWithFixedKsize(Primitive):
|
|||
- **y** (Tensor) - Has the same type as the `input_x`.
|
||||
Has the shape :math:`(N, C, output\underline{~}shape{H}, output\underline{~}shape{W})`.
|
||||
- **argmax** (Tensor) -A tensor whose data type must be int64. Has the same shape as the `y`.
|
||||
|
||||
Raises:
|
||||
TypeError: If data type of `input_x` is not one of the following: float16, float32, float64, int32, int64.
|
||||
TypeError: If data type of `random_samples` is not one of the following: float16, float32, float64.
|
||||
|
@ -10266,10 +10264,6 @@ class FractionalMaxPoolWithFixedKsize(Primitive):
|
|||
|
||||
Examples:
|
||||
>>> # the ksize is an int number and the output_shape is a tuple.
|
||||
>>> import mindspore
|
||||
>>> import numpy as np
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.ops.operations import nn_ops
|
||||
>>> ksize = 2
|
||||
>>> output_shape = (2,2)
|
||||
>>> data_format = "NCHW"
|
||||
|
@ -10279,7 +10273,7 @@ class FractionalMaxPoolWithFixedKsize(Primitive):
|
|||
... 0.4980, 0.9673, 0.9879, 0.6988, 0.9022,
|
||||
... 0.9304, 0.1558, 0.0153, 0.1559, 0.9852]).reshape([1, 1, 5, 5]), mstype.float32)
|
||||
>>> random_samples = Tensor(np.array([[[0.8, 0.8]]]), mstype.float32)
|
||||
>>> net = nn_ops.FractionalMaxPoolWithFixedKsize(ksize, output_shape, data_format)
|
||||
>>> net = ops.FractionalMaxPoolWithFixedKsize(ksize, output_shape, data_format)
|
||||
>>> y, argmax = net(input_x, random_samples)
|
||||
>>> print(y)
|
||||
[[[[0.9545 0.8764]
|
||||
|
|
|
@ -284,7 +284,7 @@ class LogNormalReverse(Primitive):
|
|||
|
||||
Inputs:
|
||||
- **input** (Tensor) - The tensor to be generated with log-normal distribution.
|
||||
Must be one of the following types: float16, float32, float64.
|
||||
Must be one of the following types: float16, float32, float64.
|
||||
|
||||
Outputs:
|
||||
Tensor. A Tensor with the same type and shape of input.
|
||||
|
@ -296,7 +296,6 @@ class LogNormalReverse(Primitive):
|
|||
Supported Platforms:
|
||||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.random.randn(3,4),mstype.float64)
|
||||
>>> mean = 2.0
|
||||
|
|
|
@ -37,7 +37,7 @@ class Primitive(Primitive_):
|
|||
name (str): Name for the current Primitive.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.primitive import prim_attr_register, Primitive
|
||||
>>> from mindspore.ops import prim_attr_register, Primitive
|
||||
>>> add = Primitive('add')
|
||||
>>>
|
||||
>>> # or work with prim_attr_register:
|
||||
|
@ -249,6 +249,7 @@ class Primitive(Primitive_):
|
|||
|
||||
Args:
|
||||
instance_name (str): Instance name of primitive operator set by user.
|
||||
|
||||
Examples:
|
||||
>>> import mindspore.ops as ops
|
||||
>>> a = ops.Add()
|
||||
|
@ -283,9 +284,10 @@ class Primitive(Primitive_):
|
|||
the second element is calculated result.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.primitive import prim_attr_register, Primitive
|
||||
>>> from mindspore import Tensor
|
||||
>>> import numpy as np
|
||||
>>> import mindspore
|
||||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.ops import prim_attr_register, Primitive
|
||||
>>> class AddN(Primitive):
|
||||
... @prim_attr_register
|
||||
... def __init__(self):
|
||||
|
@ -369,6 +371,7 @@ class Primitive(Primitive_):
|
|||
|
||||
Args:
|
||||
mode (bool): Specifies whether the primitive is recomputed. Default: True.
|
||||
|
||||
Examples:
|
||||
>>> import numpy as np
|
||||
>>> import mindspore as ms
|
||||
|
@ -473,7 +476,7 @@ class PrimitiveWithCheck(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.primitive import prim_attr_register, PrimitiveWithCheck
|
||||
>>> from mindspore.ops import prim_attr_register, PrimitiveWithCheck
|
||||
>>> # init a Primitive class with check
|
||||
>>> class Flatten(PrimitiveWithCheck):
|
||||
... @prim_attr_register
|
||||
|
@ -555,7 +558,7 @@ class PrimitiveWithInfer(Primitive):
|
|||
``Ascend`` ``GPU`` ``CPU``
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.primitive import prim_attr_register, PrimitiveWithCheck
|
||||
>>> from mindspore.ops import prim_attr_register, PrimitiveWithInfer
|
||||
>>> # init a Primitive class with infer
|
||||
>>> class Add(PrimitiveWithInfer):
|
||||
... @prim_attr_register
|
||||
|
@ -689,7 +692,7 @@ def prim_attr_register(fn):
|
|||
function, original function.
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops.primitive import prim_attr_register, PrimitiveWithCheck
|
||||
>>> from mindspore.ops import prim_attr_register, PrimitiveWithCheck
|
||||
>>> class MatMul(PrimitiveWithCheck):
|
||||
... @prim_attr_register
|
||||
... def __init__(self, transpose_a=False, transpose_b=False):
|
||||
|
|
|
@ -72,8 +72,7 @@ def get_vm_impl_fn(prim):
|
|||
function, vm function
|
||||
|
||||
Examples:
|
||||
>>> from mindspore.ops import vm_impl_registry
|
||||
>>> from mindspore.ops.vm_impl_registry import get_vm_impl_fn
|
||||
>>> from mindspore.ops import vm_impl_registry, get_vm_impl_fn
|
||||
...
|
||||
>>> @vm_impl_registry.register("Type")
|
||||
... def vm_impl_dtype(self):
|
||||
|
|
Loading…
Reference in New Issue