forked from mindspore-Ecosystem/mindspore
!8771 [#I24U3E][#I24U50][#I24TZT][#I24U7V] BUG-Fixed: [CT][MS][Document] the example in doc has no print
From: @david-he91 Reviewed-by: @liangchenghui Signed-off-by: @liangchenghui
This commit is contained in:
commit
c12e3876cc
|
@ -517,10 +517,9 @@ class LogSigmoid(Cell):
|
|||
Examples:
|
||||
>>> net = nn.LogSigmoid()
|
||||
>>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
||||
>>> logsigmoid = net(input_x)
|
||||
>>> print(logsigmoid)
|
||||
[-3.1326166e-01, -1.2692806e-01, -4.8587345e-02]
|
||||
|
||||
>>> output = net(input_x)
|
||||
>>> print(output)
|
||||
[-0.31326166 -0.12692806 -0.04858734]
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
|
|
|
@ -78,10 +78,10 @@ class Dropout(Cell):
|
|||
>>> net.set_train()
|
||||
>>> output = net(x)
|
||||
>>> print(output)
|
||||
[[[0., 1.25, 0.],
|
||||
[1.25, 1.25, 1.25]],
|
||||
[[1.25, 1.25, 1.25],
|
||||
[1.25, 1.25, 1.25]]]
|
||||
[[[0. 1.25 0. ]
|
||||
[1.25 1.25 1.25]]
|
||||
[[1.25 1.25 1.25]
|
||||
[1.25 1.25 1.25]]]
|
||||
"""
|
||||
|
||||
def __init__(self, keep_prob=0.5, dtype=mstype.float32):
|
||||
|
@ -320,8 +320,8 @@ class ClipByNorm(Cell):
|
|||
>>> net = nn.ClipByNorm()
|
||||
>>> input = Tensor(np.random.randint(0, 10, [4, 16]), mindspore.float32)
|
||||
>>> clip_norm = Tensor(np.array([100]).astype(np.float32))
|
||||
>>> result = net(input, clip_norm).shape
|
||||
>>> print(result)
|
||||
>>> output = net(input, clip_norm)
|
||||
>>> print(output.shape)
|
||||
(4, 16)
|
||||
|
||||
"""
|
||||
|
@ -392,7 +392,7 @@ class Norm(Cell):
|
|||
>>> input = Tensor(np.random.randint(0, 10, [2, 4]), mindspore.float32)
|
||||
>>> output = net(input)
|
||||
>>> print(output)
|
||||
[2.236068 9.848858 4. 5.656854]
|
||||
[7.81025 6.708204 0. 8.602325]
|
||||
"""
|
||||
|
||||
def __init__(self, axis=(), keep_dims=False):
|
||||
|
@ -514,7 +514,12 @@ class Pad(Cell):
|
|||
... return self.pad(x)
|
||||
>>> x = np.random.random(size=(2, 3)).astype(np.float32)
|
||||
>>> pad = Net()
|
||||
>>> ms_output = pad(Tensor(x))
|
||||
>>> output = pad(Tensor(x))
|
||||
>>> print(output)
|
||||
[[0. 0. 0. 0. 0. 0. ]
|
||||
[0. 0. 0.82691735 0.36147234 0.70918983 0. ]
|
||||
[0. 0. 0.7842975 0.44726616 0.4353459 0. ]
|
||||
[0. 0. 0. 0. 0. 0. ]]
|
||||
"""
|
||||
|
||||
def __init__(self, paddings, mode="CONSTANT"):
|
||||
|
@ -574,9 +579,8 @@ class Unfold(Cell):
|
|||
>>> net = Unfold(ksizes=[1, 2, 2, 1], strides=[1, 2, 2, 1], rates=[1, 2, 2, 1])
|
||||
>>> image = Tensor(np.ones([2, 3, 6, 6]), dtype=mstype.float16)
|
||||
>>> output = net(image)
|
||||
>>> print(output)
|
||||
[[[[1, 1] [1, 1]] [[1, 1], [1, 1]] [[1, 1] [1, 1]], [[1, 1] [1, 1]], [[1, 1] [1, 1]],
|
||||
[[1, 1], [1, 1]]]]
|
||||
>>> print(output.shape)
|
||||
(2, 12, 2, 2)
|
||||
"""
|
||||
|
||||
def __init__(self, ksizes, strides, rates, padding="valid"):
|
||||
|
@ -627,8 +631,8 @@ class MatrixDiag(Cell):
|
|||
Examples:
|
||||
>>> x = Tensor(np.array([1, -1]), mstype.float32)
|
||||
>>> matrix_diag = nn.MatrixDiag()
|
||||
>>> result = matrix_diag(x)
|
||||
>>> print(result)
|
||||
>>> output = matrix_diag(x)
|
||||
>>> print(output)
|
||||
[[1. 0.]
|
||||
[0. -1.]]
|
||||
"""
|
||||
|
@ -659,9 +663,11 @@ class MatrixDiagPart(Cell):
|
|||
Examples:
|
||||
>>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
|
||||
>>> matrix_diag_part = nn.MatrixDiagPart()
|
||||
>>> result = matrix_diag_part(x)
|
||||
>>> print(result)
|
||||
[[-1., 1.], [-1., 1.], [-1., 1.]]
|
||||
>>> output = matrix_diag_part(x)
|
||||
>>> print(output)
|
||||
[[-1. 1.]
|
||||
[-1. 1.]
|
||||
[-1. 1.]]
|
||||
"""
|
||||
def __init__(self):
|
||||
super(MatrixDiagPart, self).__init__()
|
||||
|
@ -692,9 +698,14 @@ class MatrixSetDiag(Cell):
|
|||
>>> x = Tensor([[[-1, 0], [0, 1]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]], mindspore.float32)
|
||||
>>> diagonal = Tensor([[-1., 2.], [-1., 1.], [-1., 1.]], mindspore.float32)
|
||||
>>> matrix_set_diag = nn.MatrixSetDiag()
|
||||
>>> result = matrix_set_diag(x, diagonal)
|
||||
>>> print(result)
|
||||
[[[-1, 0], [0, 2]], [[-1, 0], [0, 1]], [[-1, 0], [0, 1]]]
|
||||
>>> output = matrix_set_diag(x, diagonal)
|
||||
>>> print(output)
|
||||
[[[-1. 0.]
|
||||
[ 0. 2.]]
|
||||
[[-1. 0.]
|
||||
[ 0. 1.]]
|
||||
[[-1. 0.]
|
||||
[ 0. 1.]]]
|
||||
"""
|
||||
def __init__(self):
|
||||
super(MatrixSetDiag, self).__init__()
|
||||
|
|
|
@ -85,7 +85,6 @@ class SequentialCell(Cell):
|
|||
>>> bn = nn.BatchNorm2d(2)
|
||||
>>> relu = nn.ReLU()
|
||||
>>> seq = nn.SequentialCell([conv, bn, relu])
|
||||
>>>
|
||||
>>> x = Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32)
|
||||
>>> output = seq(x)
|
||||
>>> print(output)
|
||||
|
@ -158,10 +157,10 @@ class SequentialCell(Cell):
|
|||
>>> x = Tensor(np.ones([1, 3, 4, 4]), dtype=mindspore.float32)
|
||||
>>> output = seq(x)
|
||||
>>> print(output)
|
||||
[[[[0.12445523 0.12445523]
|
||||
[0.12445523 0.12445523]]
|
||||
[[0. 0. ]
|
||||
[0. 0. ]]]]
|
||||
[[[[0.08789019 0.08789019]
|
||||
[0.08789019 0.08789019]]
|
||||
[[0.07690391 0.07690391]
|
||||
[0.07690391 0.07690391]]]]
|
||||
"""
|
||||
if _valid_cell(cell):
|
||||
self._cells[str(len(self))] = cell
|
||||
|
@ -195,9 +194,11 @@ class CellList(_CellListBase, Cell):
|
|||
>>> x = Tensor(np.random.random((1, 3, 4, 4)), dtype=mindspore.float32)
|
||||
>>> # not same as nn.SequentialCell, `cell_ls(x)` is not correct
|
||||
>>> cell_ls
|
||||
CellList< (0): Conv2d<input_channels=100, ..., bias_init=None>
|
||||
CellList<
|
||||
(0): Conv2d<input_channels=100, ..., bias_init=None>
|
||||
(1): BatchNorm2d<num_features=20, ..., moving_variance=Parameter (name=variance)>
|
||||
(2): ReLU<> >
|
||||
(2): ReLU<>
|
||||
>
|
||||
"""
|
||||
def __init__(self, *args):
|
||||
_CellListBase.__init__(self)
|
||||
|
|
|
@ -52,13 +52,14 @@ class ImageGradients(Cell):
|
|||
|
||||
Examples:
|
||||
>>> net = nn.ImageGradients()
|
||||
>>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mstype.int32)
|
||||
>>> image = Tensor(np.array([[[[1,2],[3,4]]]]), dtype=mindspore.int32)
|
||||
>>> output = net(image)
|
||||
>>> print(output)
|
||||
[[[[2,2]
|
||||
[0,0]]]]
|
||||
[[[[1,0]
|
||||
[1,0]]]]
|
||||
(Tensor(shape=[1, 1, 2, 2], dtype=Int32, value=
|
||||
[[[[2, 2],
|
||||
[0, 0]]]]), Tensor(shape=[1, 1, 2, 2], dtype=Int32, value=
|
||||
[[[[1, 0],
|
||||
[1, 0]]]]))
|
||||
"""
|
||||
def __init__(self):
|
||||
super(ImageGradients, self).__init__()
|
||||
|
@ -214,8 +215,8 @@ class SSIM(Cell):
|
|||
>>> net = nn.SSIM()
|
||||
>>> img1 = Tensor(np.random.random((1,3,16,16)), mindspore.float32)
|
||||
>>> img2 = Tensor(np.random.random((1,3,16,16)), mindspore.float32)
|
||||
>>> ssim = net(img1, img2)
|
||||
>>> print(ssim)
|
||||
>>> output = net(img1, img2)
|
||||
>>> print(output)
|
||||
[0.12174469]
|
||||
"""
|
||||
def __init__(self, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03):
|
||||
|
@ -292,9 +293,9 @@ class MSSSIM(Cell):
|
|||
>>> net = nn.MSSSIM(power_factors=(0.033, 0.033, 0.033))
|
||||
>>> img1 = Tensor(np.random.random((1,3,128,128)))
|
||||
>>> img2 = Tensor(np.random.random((1,3,128,128)))
|
||||
>>> result = net(img1, img2)
|
||||
>>> print(result)
|
||||
[0.20930639]
|
||||
>>> output = net(img1, img2)
|
||||
>>> print(output)
|
||||
[0.22965115]
|
||||
"""
|
||||
def __init__(self, max_val=1.0, power_factors=(0.0448, 0.2856, 0.3001, 0.2363, 0.1333), filter_size=11,
|
||||
filter_sigma=1.5, k1=0.01, k2=0.03):
|
||||
|
@ -382,9 +383,9 @@ class PSNR(Cell):
|
|||
>>> net = nn.PSNR()
|
||||
>>> img1 = Tensor(np.random.random((1,3,16,16)))
|
||||
>>> img2 = Tensor(np.random.random((1,3,16,16)))
|
||||
>>> psnr = net(img1, img2)
|
||||
>>> print(psnr)
|
||||
[7.8297315]
|
||||
>>> output = net(img1, img2)
|
||||
>>> print(output)
|
||||
[7.7229595]
|
||||
"""
|
||||
def __init__(self, max_val=1.0):
|
||||
super(PSNR, self).__init__()
|
||||
|
@ -452,8 +453,7 @@ class CentralCrop(Cell):
|
|||
>>> net = nn.CentralCrop(central_fraction=0.5)
|
||||
>>> image = Tensor(np.random.random((4, 3, 4, 4)), mindspore.float32)
|
||||
>>> output = net(image)
|
||||
>>> result = output.shape
|
||||
>>> print(result)
|
||||
>>> print(output.shape)
|
||||
(4, 3, 2, 2)
|
||||
"""
|
||||
|
||||
|
|
|
@ -64,8 +64,7 @@ class ReduceLogSumExp(Cell):
|
|||
>>> input_x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
||||
>>> op = nn.ReduceLogSumExp(1, keep_dims=True)
|
||||
>>> output = op(input_x)
|
||||
>>> result = output.shape
|
||||
>>> print(reuslt)
|
||||
>>> print(output.shape)
|
||||
(3, 1, 5, 6)
|
||||
"""
|
||||
|
||||
|
@ -101,9 +100,9 @@ class Range(Cell):
|
|||
|
||||
Examples:
|
||||
>>> net = nn.Range(1, 8, 2)
|
||||
>>> out = net()
|
||||
>>> print(out)
|
||||
[1, 3, 5, 7]
|
||||
>>> output = net()
|
||||
>>> print(output)
|
||||
[1 3 5 7]
|
||||
"""
|
||||
|
||||
def __init__(self, start, limit=None, delta=1):
|
||||
|
@ -157,7 +156,7 @@ class LinSpace(Cell):
|
|||
>>> linspace = nn.LinSpace(1, 10, 5)
|
||||
>>> output = linspace()
|
||||
>>> print(output)
|
||||
[1, 3.25, 5.5, 7.75, 10]
|
||||
[ 1. 3.25 5.5 7.75 10. ]
|
||||
"""
|
||||
|
||||
def __init__(self, start, stop, num):
|
||||
|
@ -230,6 +229,7 @@ class LGamma(Cell):
|
|||
>>> input_x = Tensor(np.array([2, 3, 4]).astype(np.float32))
|
||||
>>> op = nn.LGamma()
|
||||
>>> output = op(input_x)
|
||||
>>> print(output)
|
||||
[3.5762787e-07 6.9314754e-01 1.7917603e+00]
|
||||
"""
|
||||
|
||||
|
@ -830,9 +830,13 @@ class Moments(Cell):
|
|||
Examples:
|
||||
>>> net = nn.Moments(axis=3, keep_dims=True)
|
||||
>>> input_x = Tensor(np.array([[[[1, 2, 3, 4], [3, 4, 5, 6]]]]), mindspore.float32)
|
||||
>>> mean, var = net(input_x)
|
||||
mean: [[[[2.5], [4.5]]]]
|
||||
var: [[[[1.25], [1.25]]]]
|
||||
>>> output = net(input_x)
|
||||
>>> print(output)
|
||||
(Tensor(shape=[1, 1, 2, 1], dtype=Float32, value=
|
||||
[[[[ 2.50000000e+00],
|
||||
[ 4.50000000e+00]]]]), Tensor(shape=[1, 1, 2, 1], dtype=Float32, value=
|
||||
[[[[ 1.25000000e+00],
|
||||
[ 1.25000000e+00]]]]))
|
||||
"""
|
||||
|
||||
def __init__(self, axis=None, keep_dims=None):
|
||||
|
|
|
@ -285,12 +285,11 @@ class BatchNorm1d(_BatchNorm):
|
|||
|
||||
Examples:
|
||||
>>> net = nn.BatchNorm1d(num_features=4)
|
||||
>>> input = Tensor(np.random.randint(0, 255, [3, 4]), mindspore.float32)
|
||||
>>> result = net(input)
|
||||
>>> print(result)
|
||||
[[ 57.99971 50.99974 220.99889 222.99889 ]
|
||||
[106.99947 193.99902 77.99961 101.99949 ]
|
||||
[ 85.99957 188.99905 46.99976 226.99887 ]]
|
||||
>>> input = Tensor(np.random.randint(0, 255, [2, 4]), mindspore.float32)
|
||||
>>> output = net(input)
|
||||
>>> print(output)
|
||||
[[210.99895 136.99931 89.99955 240.9988 ]
|
||||
[ 87.99956 157.9992 89.99955 42.999786]]
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
@ -371,23 +370,15 @@ class BatchNorm2d(_BatchNorm):
|
|||
|
||||
Examples:
|
||||
>>> net = nn.BatchNorm2d(num_features=3)
|
||||
>>> input = Tensor(np.random.randint(0, 255, [1, 3, 4, 4]), mindspore.float32)
|
||||
>>> result = net(input)
|
||||
>>> print(result)
|
||||
[[[[148.99925 148.99925 178.9991 77.99961 ]
|
||||
[ 41.99979 97.99951 157.9992 94.99953 ]
|
||||
[ 87.99956 158.9992 50.99974 179.9991 ]
|
||||
[146.99927 27.99986 119.9994 253.99873 ]]
|
||||
|
||||
[[178.9991 187.99905 190.99904 88.99956 ]
|
||||
[213.99893 158.9992 13.99993 200.999 ]
|
||||
[224.99887 56.99971 246.99876 239.9988 ]
|
||||
[ 97.99951 34.99983 28.99986 57.99971 ]]
|
||||
|
||||
[[ 14.99993 31.99984 136.99931 207.99896 ]
|
||||
[180.9991 28.99986 23.99988 71.99964 ]
|
||||
[112.99944 36.99981 213.99893 71.99964 ]
|
||||
[ 8.99996 162.99919 157.9992 41.99979 ]]]]
|
||||
>>> input = Tensor(np.random.randint(0, 255, [1, 3, 2, 2]), mindspore.float32)
|
||||
>>> output = net(input)
|
||||
>>> print(output)
|
||||
[[[[128.99936 53.99973]
|
||||
[191.99904 183.99908]]
|
||||
[[146.99927 182.99908]
|
||||
[184.99907 120.9994 ]]
|
||||
[[ 33.99983 234.99883]
|
||||
[188.99905 11.99994]]]]
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
@ -618,7 +609,7 @@ class GroupNorm(Cell):
|
|||
[[[[0. 0. 0. 0.]
|
||||
[0. 0. 0. 0.]
|
||||
[0. 0. 0. 0.]
|
||||
[0. 0. 0. 0.]],
|
||||
[0. 0. 0. 0.]]
|
||||
[[0. 0. 0. 0.]
|
||||
[0. 0. 0. 0.]
|
||||
[0. 0. 0. 0.]
|
||||
|
|
|
@ -107,19 +107,7 @@ class MaxPool2d(_PoolNd):
|
|||
Examples:
|
||||
>>> pool = nn.MaxPool2d(kernel_size=3, stride=1)
|
||||
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32)
|
||||
>>> print(x)
|
||||
[[[[1. 5. 5. 1.]
|
||||
[0. 3. 4. 8.]
|
||||
[4. 2. 7. 6.]
|
||||
[4. 9. 0. 1.]]
|
||||
[[3. 6. 2. 6.]
|
||||
[4. 4. 7. 8.]
|
||||
[0. 0. 4. 0.]
|
||||
[1. 8. 7. 0.]]]]
|
||||
>>> output = pool(x)
|
||||
>>> reuslt = output.shape
|
||||
>>> print(result)
|
||||
(1, 2, 2, 2)
|
||||
>>> print(output)
|
||||
[[[[7. 8.]
|
||||
[9. 9.]]
|
||||
|
@ -272,19 +260,7 @@ class AvgPool2d(_PoolNd):
|
|||
Examples:
|
||||
>>> pool = nn.AvgPool2d(kernel_size=3, stride=1)
|
||||
>>> x = Tensor(np.random.randint(0, 10, [1, 2, 4, 4]), mindspore.float32)
|
||||
>>> print(x)
|
||||
[[[[5. 5. 9. 9.]
|
||||
[8. 4. 3. 0.]
|
||||
[2. 7. 1. 2.]
|
||||
[1. 8. 3. 3.]]
|
||||
[[6. 8. 2. 4.]
|
||||
[3. 0. 2. 1.]
|
||||
[0. 8. 9. 7.]
|
||||
[2. 1. 4. 9.]]]]
|
||||
>>> output = pool(x)
|
||||
>>> result = output.shape
|
||||
>>> print(result)
|
||||
(1, 2, 2, 2)
|
||||
>>> print(output)
|
||||
[[[[4.888889 4.4444447]
|
||||
[4.111111 3.4444444]]
|
||||
|
|
|
@ -234,9 +234,10 @@ class FakeQuantWithMinMaxObserver(UniformQuantObserver):
|
|||
Examples:
|
||||
>>> fake_quant = nn.FakeQuantWithMinMaxObserver()
|
||||
>>> input = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32)
|
||||
>>> result = fake_quant(input)
|
||||
>>> print(result)
|
||||
[[0.9882355, 1.9764705, 0.9882355], [-1.9764705, 0. , -0.9882355]]
|
||||
>>> output = fake_quant(input)
|
||||
>>> print(output)
|
||||
[[ 0.9882355 1.9764705 0.9882355]
|
||||
[-1.9764705 0. -0.9882355]]
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
@ -589,11 +590,10 @@ class Conv2dBnFoldQuant(Cell):
|
|||
Examples:
|
||||
>>> qconfig = compression.quant.create_quant_config()
|
||||
>>> conv2d_bnfold = nn.Conv2dBnFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid",
|
||||
>>> quant_config=qconfig)
|
||||
... quant_config=qconfig)
|
||||
>>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mindspore.float32)
|
||||
>>> result = conv2d_bnfold(input)
|
||||
>>> output = result.shape
|
||||
>>> print(output)
|
||||
>>> output = conv2d_bnfold(input)
|
||||
>>> print(output.shape)
|
||||
(2, 6, 2, 2)
|
||||
"""
|
||||
|
||||
|
@ -775,11 +775,10 @@ class Conv2dBnWithoutFoldQuant(Cell):
|
|||
Examples:
|
||||
>>> qconfig = compression.quant.create_quant_config()
|
||||
>>> conv2d_no_bnfold = nn.Conv2dBnWithoutFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid",
|
||||
>>> quant_config=qconfig)
|
||||
... quant_config=qconfig)
|
||||
>>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mstype.float32)
|
||||
>>> result = conv2d_no_bnfold(input)
|
||||
>>> output = result.shape
|
||||
>>> print(output)
|
||||
>>> output = conv2d_no_bnfold(input)
|
||||
>>> print(output.shape)
|
||||
(2, 6, 2, 2)
|
||||
"""
|
||||
|
||||
|
@ -897,11 +896,10 @@ class Conv2dQuant(Cell):
|
|||
Examples:
|
||||
>>> qconfig = compression.quant.create_quant_config()
|
||||
>>> conv2d_quant = nn.Conv2dQuant(1, 6, kernel_size= (2, 2), stride=(1, 1), pad_mode="valid",
|
||||
>>> quant_config=qconfig)
|
||||
... quant_config=qconfig)
|
||||
>>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mindspore.float32)
|
||||
>>> result = conv2d_quant(input)
|
||||
>>> output = result.shape
|
||||
>>> print(output)
|
||||
>>> output = conv2d_quant(input)
|
||||
>>> print(output.shape)
|
||||
(2, 6, 2, 2)
|
||||
"""
|
||||
|
||||
|
@ -1106,9 +1104,10 @@ class ActQuant(_QuantActivation):
|
|||
>>> qconfig = compression.quant.create_quant_config()
|
||||
>>> act_quant = nn.ActQuant(nn.ReLU(), quant_config=qconfig)
|
||||
>>> input = Tensor(np.array([[1, 2, -1], [-2, 0, -1]]), mindspore.float32)
|
||||
>>> result = act_quant(input)
|
||||
>>> print(result)
|
||||
[[0.9882355, 1.9764705, 0.], [0., 0., 0.]]
|
||||
>>> output = act_quant(input)
|
||||
>>> print(output)
|
||||
[[0.9882355 1.9764705 0. ]
|
||||
[0. 0. 0. ]]
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
@ -1168,9 +1167,10 @@ class TensorAddQuant(Cell):
|
|||
>>> add_quant = nn.TensorAddQuant(quant_config=qconfig)
|
||||
>>> input_x1 = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32)
|
||||
>>> input_x2 = Tensor(np.ones((2, 3)), mindspore.float32)
|
||||
>>> result = add_quant(input_x1, input_x2)
|
||||
>>> print(result)
|
||||
[[1.9764705, 3.011765, 1.9764705], [-0.9882355, 0.9882355, 0.]]
|
||||
>>> output = add_quant(input_x1, input_x2)
|
||||
>>> print(output)
|
||||
[[ 1.9764705 3.011765 1.9764705]
|
||||
[-0.9882355 0.9882355 0. ]]
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
@ -1215,9 +1215,10 @@ class MulQuant(Cell):
|
|||
>>> mul_quant = nn.MulQuant(quant_config=qconfig)
|
||||
>>> input_x1 = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32)
|
||||
>>> input_x2 = Tensor(np.ones((2, 3)) * 2, mindspore.float32)
|
||||
>>> result = mul_quant(input_x1, input_x2)
|
||||
>>> print(result)
|
||||
[[1.9764705, 4.0000005, 1.9764705], [-4., 0., -1.9764705]]
|
||||
>>> output = mul_quant(input_x1, input_x2)
|
||||
>>> print(output)
|
||||
[[ 1.9764705 4.0000005 1.9764705]
|
||||
[-4. 0. -1.9764705]]
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
|
|
@ -95,7 +95,8 @@ class L1Loss(_Loss):
|
|||
>>> loss = nn.L1Loss()
|
||||
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
||||
>>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)
|
||||
>>> loss(input_data, target_data)
|
||||
>>> output = loss(input_data, target_data)
|
||||
>>> print(output)
|
||||
0.33333334
|
||||
"""
|
||||
def __init__(self, reduction='mean'):
|
||||
|
@ -183,7 +184,9 @@ class SmoothL1Loss(_Loss):
|
|||
>>> loss = nn.SmoothL1Loss()
|
||||
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
||||
>>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)
|
||||
>>> loss(input_data, target_data)
|
||||
>>> output = loss(input_data, target_data)
|
||||
>>> print(output)
|
||||
[0. 0. 0.5]
|
||||
"""
|
||||
def __init__(self, beta=1.0):
|
||||
super(SmoothL1Loss, self).__init__()
|
||||
|
@ -236,7 +239,9 @@ class SoftmaxCrossEntropyWithLogits(_Loss):
|
|||
>>> logits = Tensor(np.random.randint(0, 9, [1, 10]), mindspore.float32)
|
||||
>>> labels_np = np.ones([1,]).astype(np.int32)
|
||||
>>> labels = Tensor(labels_np)
|
||||
>>> loss(logits, labels)
|
||||
>>> output = loss(logits, labels)
|
||||
>>> print(output)
|
||||
[5.6924148]
|
||||
"""
|
||||
def __init__(self,
|
||||
sparse=False,
|
||||
|
@ -299,7 +304,7 @@ class SampledSoftmaxLoss(_Loss):
|
|||
>>> labels = Tensor([0, 1, 2])
|
||||
>>> inputs = Tensor(np.random.randint(0, 9, [3, 10]), mindspore.float32)
|
||||
>>> output = loss(weights, biases, labels, inputs)
|
||||
>>> print(output) # output is ranndom
|
||||
>>> print(output)
|
||||
[ 4.0181947 46.050743 7.0009117]
|
||||
"""
|
||||
|
||||
|
@ -557,7 +562,7 @@ class CosineEmbeddingLoss(_Loss):
|
|||
>>> cosine_embedding_loss = nn.CosineEmbeddingLoss()
|
||||
>>> output = cosine_embedding_loss(x1, x2, y)
|
||||
>>> print(output)
|
||||
[0.0003426671]
|
||||
[0.0003426075]
|
||||
"""
|
||||
def __init__(self, margin=0.0, reduction="mean"):
|
||||
super(CosineEmbeddingLoss, self).__init__(reduction)
|
||||
|
|
|
@ -39,7 +39,9 @@ class TopKCategoricalAccuracy(Metric):
|
|||
>>> topk = nn.TopKCategoricalAccuracy(3)
|
||||
>>> topk.clear()
|
||||
>>> topk.update(x, y)
|
||||
>>> result = topk.eval()
|
||||
>>> output = topk.eval()
|
||||
>>> print(output)
|
||||
0.6666666666666666
|
||||
"""
|
||||
def __init__(self, k):
|
||||
super(TopKCategoricalAccuracy, self).__init__()
|
||||
|
@ -103,7 +105,9 @@ class Top1CategoricalAccuracy(TopKCategoricalAccuracy):
|
|||
>>> topk = nn.Top1CategoricalAccuracy()
|
||||
>>> topk.clear()
|
||||
>>> topk.update(x, y)
|
||||
>>> result = topk.eval()
|
||||
>>> output = topk.eval()
|
||||
>>> print(output)
|
||||
0.0
|
||||
"""
|
||||
def __init__(self):
|
||||
super(Top1CategoricalAccuracy, self).__init__(1)
|
||||
|
@ -121,7 +125,9 @@ class Top5CategoricalAccuracy(TopKCategoricalAccuracy):
|
|||
>>> topk = nn.Top5CategoricalAccuracy()
|
||||
>>> topk.clear()
|
||||
>>> topk.update(x, y)
|
||||
>>> result = topk.eval()
|
||||
>>> output = topk.eval()
|
||||
>>> print(output)
|
||||
1.0
|
||||
"""
|
||||
def __init__(self):
|
||||
super(Top5CategoricalAccuracy, self).__init__(5)
|
||||
|
|
|
@ -45,6 +45,7 @@ class Exp(PowerTransform):
|
|||
... ans2 = self.s1.inverse(value)
|
||||
... ans3 = self.s1.forward_log_jacobian(value)
|
||||
... ans4 = self.s1.inverse_log_jacobian(value)
|
||||
...
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
|
|
@ -53,6 +53,7 @@ class GumbelCDF(Bijector):
|
|||
... ans2 = self.gum.inverse(value)
|
||||
... ans3 = self.gum.forward_log_jacobian(value)
|
||||
... ans4 = self.gum.inverse_log_jacobian(value)
|
||||
...
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
|
|
@ -57,6 +57,7 @@ class PowerTransform(Bijector):
|
|||
... ans2 = self.s1.inverse(value)
|
||||
... ans3 = self.s1.forward_log_jacobian(value)
|
||||
... ans4 = self.s1.inverse_log_jacobian(value)
|
||||
...
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
|
|
@ -53,6 +53,7 @@ class ScalarAffine(Bijector):
|
|||
... ans2 = self.s1.inverse(value)
|
||||
... ans3 = self.s1.forward_log_jacobian(value)
|
||||
... ans4 = self.s1.inverse_log_jacobian(value)
|
||||
...
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
|
|
@ -50,62 +50,63 @@ class Bernoulli(Distribution):
|
|||
>>>
|
||||
>>> # To use the Bernoulli distribution in a network.
|
||||
>>> class net(Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(net, self).__init__():
|
||||
>>> self.b1 = msd.Bernoulli(0.5, dtype=mstype.int32)
|
||||
>>> self.b2 = msd.Bernoulli(dtype=mstype.int32)
|
||||
>>>
|
||||
>>> # All the following calls in construct are valid.
|
||||
>>> def construct(self, value, probs_b, probs_a):
|
||||
>>>
|
||||
>>> # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows.
|
||||
>>> # Args:
|
||||
>>> # value (Tensor): the value to be evaluated.
|
||||
>>> # probs1 (Tensor): the probability of success. Default: self.probs.
|
||||
>>>
|
||||
>>> # Examples of `prob`.
|
||||
>>> # Similar calls can be made to other probability functions
|
||||
>>> # by replacing `prob` by the name of the function.
|
||||
>>> ans = self.b1.prob(value)
|
||||
>>> # Evaluate `prob` with respect to distribution b.
|
||||
>>> ans = self.b1.prob(value, probs_b)
|
||||
>>> # `probs` must be passed in during function calls.
|
||||
>>> ans = self.b2.prob(value, probs_a)
|
||||
>>>
|
||||
>>>
|
||||
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
|
||||
>>> # Args:
|
||||
>>> # probs1 (Tensor): the probability of success. Default: self.probs.
|
||||
>>>
|
||||
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
|
||||
>>> ans = self.b1.mean() # return 0.5
|
||||
>>> ans = self.b1.mean(probs_b) # return probs_b
|
||||
>>> # `probs` must be passed in during function calls.
|
||||
>>> ans = self.b2.mean(probs_a)
|
||||
>>>
|
||||
>>>
|
||||
>>> # Interfaces of `kl_loss` and `cross_entropy` are the same as follows:
|
||||
>>> # Args:
|
||||
>>> # dist (str): the name of the distribution. Only 'Bernoulli' is supported.
|
||||
>>> # probs1_b (Tensor): the probability of success of distribution b.
|
||||
>>> # probs1_a (Tensor): the probability of success of distribution a. Default: self.probs.
|
||||
>>>
|
||||
>>> # Examples of kl_loss. `cross_entropy` is similar.
|
||||
>>> ans = self.b1.kl_loss('Bernoulli', probs_b)
|
||||
>>> ans = self.b1.kl_loss('Bernoulli', probs_b, probs_a)
|
||||
>>> # An additional `probs_a` must be passed in.
|
||||
>>> ans = self.b2.kl_loss('Bernoulli', probs_b, probs_a)
|
||||
>>>
|
||||
>>>
|
||||
>>> # Examples of `sample`.
|
||||
>>> # Args:
|
||||
>>> # shape (tuple): the shape of the sample. Default: ().
|
||||
>>> # probs1 (Tensor): the probability of success. Default: self.probs.
|
||||
>>> ans = self.b1.sample()
|
||||
>>> ans = self.b1.sample((2,3))
|
||||
>>> ans = self.b1.sample((2,3), probs_b)
|
||||
>>> ans = self.b2.sample((2,3), probs_a)
|
||||
... def __init__(self):
|
||||
... super(net, self).__init__():
|
||||
... self.b1 = msd.Bernoulli(0.5, dtype=mstype.int32)
|
||||
... self.b2 = msd.Bernoulli(dtype=mstype.int32)
|
||||
...
|
||||
... # All the following calls in construct are valid.
|
||||
... def construct(self, value, probs_b, probs_a):
|
||||
...
|
||||
... # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows.
|
||||
... # Args:
|
||||
... # value (Tensor): the value to be evaluated.
|
||||
... # probs1 (Tensor): the probability of success. Default: self.probs.
|
||||
...
|
||||
... # Examples of `prob`.
|
||||
... # Similar calls can be made to other probability functions
|
||||
... # by replacing `prob` by the name of the function.
|
||||
... ans = self.b1.prob(value)
|
||||
... # Evaluate `prob` with respect to distribution b.
|
||||
... ans = self.b1.prob(value, probs_b)
|
||||
... # `probs` must be passed in during function calls.
|
||||
... ans = self.b2.prob(value, probs_a)
|
||||
...
|
||||
...
|
||||
... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
|
||||
... # Args:
|
||||
... # probs1 (Tensor): the probability of success. Default: self.probs.
|
||||
...
|
||||
... # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
|
||||
... ans = self.b1.mean() # return 0.5
|
||||
... ans = self.b1.mean(probs_b) # return probs_b
|
||||
... # `probs` must be passed in during function calls.
|
||||
... ans = self.b2.mean(probs_a)
|
||||
...
|
||||
...
|
||||
... # Interfaces of `kl_loss` and `cross_entropy` are the same as follows:
|
||||
... # Args:
|
||||
... # dist (str): the name of the distribution. Only 'Bernoulli' is supported.
|
||||
... # probs1_b (Tensor): the probability of success of distribution b.
|
||||
... # probs1_a (Tensor): the probability of success of distribution a. Default: self.probs.
|
||||
...
|
||||
... # Examples of kl_loss. `cross_entropy` is similar.
|
||||
... ans = self.b1.kl_loss('Bernoulli', probs_b)
|
||||
... ans = self.b1.kl_loss('Bernoulli', probs_b, probs_a)
|
||||
... # An additional `probs_a` must be passed in.
|
||||
... ans = self.b2.kl_loss('Bernoulli', probs_b, probs_a)
|
||||
...
|
||||
...
|
||||
... # Examples of `sample`.
|
||||
... # Args:
|
||||
... # shape (tuple): the shape of the sample. Default: ().
|
||||
... # probs1 (Tensor): the probability of success. Default: self.probs.
|
||||
... ans = self.b1.sample()
|
||||
... ans = self.b1.sample((2,3))
|
||||
... ans = self.b1.sample((2,3), probs_b)
|
||||
... ans = self.b2.sample((2,3), probs_a)
|
||||
...
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
|
|
@ -46,59 +46,60 @@ class Categorical(Distribution):
|
|||
>>>
|
||||
>>> # To use a Categorical distribution in a network
|
||||
>>> class net(Cell):
|
||||
>>> def __init__(self, probs):
|
||||
>>> super(net, self).__init__():
|
||||
>>> self.ca = msd.Categorical(probs=[0.2, 0.8], dtype=mstype.int32)
|
||||
>>> self.ca1 = msd.Categorical(dtype=mstype.int32)
|
||||
>>>
|
||||
>>> # All the following calls in construct are valid
|
||||
>>> def construct(self, value):
|
||||
>>>
|
||||
>>> # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows.
|
||||
>>> # Args:
|
||||
>>> # value (Tensor): the value to be evaluated.
|
||||
>>> # probs (Tensor): event probabilities. Default: self.probs.
|
||||
>>>
|
||||
>>> # Examples of `prob`.
|
||||
>>> # Similar calls can be made to other probability functions
|
||||
>>> # by replacing `prob` by the name of the function.
|
||||
>>> ans = self.ca.prob(value)
|
||||
>>> # Evaluate `prob` with respect to distribution b.
|
||||
>>> ans = self.ca.prob(value, probs_b)
|
||||
>>> # `probs` must be passed in during function calls.
|
||||
>>> ans = self.ca1.prob(value, probs_a)
|
||||
>>>
|
||||
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
|
||||
>>> # Args:
|
||||
>>> # probs (Tensor): event probabilities. Default: self.probs.
|
||||
>>>
|
||||
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
|
||||
>>> ans = self.ca.mean() # return 0.8
|
||||
>>> ans = self.ca.mean(probs_b)
|
||||
>>> # `probs` must be passed in during function calls.
|
||||
>>> ans = self.ca1.mean(probs_a)
|
||||
>>>
|
||||
>>> # Interfaces of `kl_loss` and `cross_entropy` are the same as follows:
|
||||
>>> # Args:
|
||||
>>> # dist (str): the name of the distribution. Only 'Categorical' is supported.
|
||||
>>> # probs_b (Tensor): event probabilities of distribution b.
|
||||
>>> # probs (Tensor): event probabilities of distribution a. Default: self.probs.
|
||||
>>>
|
||||
>>> # Examples of kl_loss. `cross_entropy` is similar.
|
||||
>>> ans = self.ca.kl_loss('Categorical', probs_b)
|
||||
>>> ans = self.ca.kl_loss('Categorical', probs_b, probs_a)
|
||||
>>> # An additional `probs` must be passed in.
|
||||
>>> ans = self.ca1.kl_loss('Categorical', probs_b, probs_a)
|
||||
>>>
|
||||
>>> # Examples of `sample`.
|
||||
>>> # Args:
|
||||
>>> # shape (tuple): the shape of the sample. Default: ().
|
||||
>>> # probs (Tensor): event probabilities. Default: self.probs.
|
||||
>>> ans = self.ca.sample()
|
||||
>>> ans = self.ca.sample((2,3))
|
||||
>>> ans = self.ca.sample((2,3), probs_b)
|
||||
>>> ans = self.ca1.sample((2,3), probs_a)
|
||||
... def __init__(self, probs):
|
||||
... super(net, self).__init__():
|
||||
... self.ca = msd.Categorical(probs=[0.2, 0.8], dtype=mstype.int32)
|
||||
... self.ca1 = msd.Categorical(dtype=mstype.int32)
|
||||
...
|
||||
... # All the following calls in construct are valid
|
||||
... def construct(self, value):
|
||||
...
|
||||
... # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows.
|
||||
... # Args:
|
||||
... # value (Tensor): the value to be evaluated.
|
||||
... # probs (Tensor): event probabilities. Default: self.probs.
|
||||
...
|
||||
... # Examples of `prob`.
|
||||
... # Similar calls can be made to other probability functions
|
||||
... # by replacing `prob` by the name of the function.
|
||||
... ans = self.ca.prob(value)
|
||||
... # Evaluate `prob` with respect to distribution b.
|
||||
... ans = self.ca.prob(value, probs_b)
|
||||
... # `probs` must be passed in during function calls.
|
||||
... ans = self.ca1.prob(value, probs_a)
|
||||
...
|
||||
... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
|
||||
... # Args:
|
||||
... # probs (Tensor): event probabilities. Default: self.probs.
|
||||
...
|
||||
... # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
|
||||
... ans = self.ca.mean() # return 0.8
|
||||
... ans = self.ca.mean(probs_b)
|
||||
... # `probs` must be passed in during function calls.
|
||||
... ans = self.ca1.mean(probs_a)
|
||||
...
|
||||
... # Interfaces of `kl_loss` and `cross_entropy` are the same as follows:
|
||||
... # Args:
|
||||
... # dist (str): the name of the distribution. Only 'Categorical' is supported.
|
||||
... # probs_b (Tensor): event probabilities of distribution b.
|
||||
... # probs (Tensor): event probabilities of distribution a. Default: self.probs.
|
||||
...
|
||||
... # Examples of kl_loss. `cross_entropy` is similar.
|
||||
... ans = self.ca.kl_loss('Categorical', probs_b)
|
||||
... ans = self.ca.kl_loss('Categorical', probs_b, probs_a)
|
||||
... # An additional `probs` must be passed in.
|
||||
... ans = self.ca1.kl_loss('Categorical', probs_b, probs_a)
|
||||
...
|
||||
... # Examples of `sample`.
|
||||
... # Args:
|
||||
... # shape (tuple): the shape of the sample. Default: ().
|
||||
... # probs (Tensor): event probabilities. Default: self.probs.
|
||||
... ans = self.ca.sample()
|
||||
... ans = self.ca.sample((2,3))
|
||||
... ans = self.ca.sample((2,3), probs_b)
|
||||
... ans = self.ca1.sample((2,3), probs_a)
|
||||
...
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
|
|
@ -52,62 +52,63 @@ class Exponential(Distribution):
|
|||
>>>
|
||||
>>> # To use an Exponential distribution in a network.
|
||||
>>> class net(Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(net, self).__init__():
|
||||
>>> self.e1 = msd.Exponential(0.5, dtype=mstype.float32)
|
||||
>>> self.e2 = msd.Exponential(dtype=mstype.float32)
|
||||
>>>
|
||||
>>> # All the following calls in construct are valid.
|
||||
>>> def construct(self, value, rate_b, rate_a):
|
||||
>>>
|
||||
>>> # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows.
|
||||
>>> # Args:
|
||||
>>> # value (Tensor): the value to be evaluated.
|
||||
>>> # rate (Tensor): the rate of the distribution. Default: self.rate.
|
||||
>>>
|
||||
>>> # Examples of `prob`.
|
||||
>>> # Similar calls can be made to other probability functions
|
||||
>>> # by replacing `prob` by the name of the function.
|
||||
>>> ans = self.e1.prob(value)
|
||||
>>> # Evaluate with respect to distribution b.
|
||||
>>> ans = self.e1.prob(value, rate_b)
|
||||
>>> # `rate` must be passed in during function calls.
|
||||
>>> ans = self.e2.prob(value, rate_a)
|
||||
>>>
|
||||
>>>
|
||||
>>> # Functions `mean`, `sd`, 'var', and 'entropy' have the same arguments as follows.
|
||||
>>> # Args:
|
||||
>>> # rate (Tensor): the rate of the distribution. Default: self.rate.
|
||||
>>>
|
||||
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
|
||||
>>> ans = self.e1.mean() # return 2
|
||||
>>> ans = self.e1.mean(rate_b) # return 1 / rate_b
|
||||
>>> # `rate` must be passed in during function calls.
|
||||
>>> ans = self.e2.mean(rate_a)
|
||||
>>>
|
||||
>>>
|
||||
>>> # Interfaces of `kl_loss` and `cross_entropy` are the same.
|
||||
>>> # Args:
|
||||
>>> # dist (str): The name of the distribution. Only 'Exponential' is supported.
|
||||
>>> # rate_b (Tensor): the rate of distribution b.
|
||||
>>> # rate_a (Tensor): the rate of distribution a. Default: self.rate.
|
||||
>>>
|
||||
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
|
||||
>>> ans = self.e1.kl_loss('Exponential', rate_b)
|
||||
>>> ans = self.e1.kl_loss('Exponential', rate_b, rate_a)
|
||||
>>> # An additional `rate` must be passed in.
|
||||
>>> ans = self.e2.kl_loss('Exponential', rate_b, rate_a)
|
||||
>>>
|
||||
>>>
|
||||
>>> # Examples of `sample`.
|
||||
>>> # Args:
|
||||
>>> # shape (tuple): the shape of the sample. Default: ()
|
||||
>>> # probs1 (Tensor): the rate of the distribution. Default: self.rate.
|
||||
>>> ans = self.e1.sample()
|
||||
>>> ans = self.e1.sample((2,3))
|
||||
>>> ans = self.e1.sample((2,3), rate_b)
|
||||
>>> ans = self.e2.sample((2,3), rate_a)
|
||||
... def __init__(self):
|
||||
... super(net, self).__init__():
|
||||
... self.e1 = msd.Exponential(0.5, dtype=mstype.float32)
|
||||
... self.e2 = msd.Exponential(dtype=mstype.float32)
|
||||
...
|
||||
... # All the following calls in construct are valid.
|
||||
... def construct(self, value, rate_b, rate_a):
|
||||
...
|
||||
... # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows.
|
||||
... # Args:
|
||||
... # value (Tensor): the value to be evaluated.
|
||||
... # rate (Tensor): the rate of the distribution. Default: self.rate.
|
||||
...
|
||||
... # Examples of `prob`.
|
||||
... # Similar calls can be made to other probability functions
|
||||
... # by replacing `prob` by the name of the function.
|
||||
... ans = self.e1.prob(value)
|
||||
... # Evaluate with respect to distribution b.
|
||||
... ans = self.e1.prob(value, rate_b)
|
||||
... # `rate` must be passed in during function calls.
|
||||
... ans = self.e2.prob(value, rate_a)
|
||||
...
|
||||
...
|
||||
... # Functions `mean`, `sd`, 'var', and 'entropy' have the same arguments as follows.
|
||||
... # Args:
|
||||
... # rate (Tensor): the rate of the distribution. Default: self.rate.
|
||||
...
|
||||
... # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
|
||||
... ans = self.e1.mean() # return 2
|
||||
... ans = self.e1.mean(rate_b) # return 1 / rate_b
|
||||
... # `rate` must be passed in during function calls.
|
||||
... ans = self.e2.mean(rate_a)
|
||||
...
|
||||
...
|
||||
... # Interfaces of `kl_loss` and `cross_entropy` are the same.
|
||||
... # Args:
|
||||
... # dist (str): The name of the distribution. Only 'Exponential' is supported.
|
||||
... # rate_b (Tensor): the rate of distribution b.
|
||||
... # rate_a (Tensor): the rate of distribution a. Default: self.rate.
|
||||
...
|
||||
... # Examples of `kl_loss`. `cross_entropy` is similar.
|
||||
... ans = self.e1.kl_loss('Exponential', rate_b)
|
||||
... ans = self.e1.kl_loss('Exponential', rate_b, rate_a)
|
||||
... # An additional `rate` must be passed in.
|
||||
... ans = self.e2.kl_loss('Exponential', rate_b, rate_a)
|
||||
...
|
||||
...
|
||||
... # Examples of `sample`.
|
||||
... # Args:
|
||||
... # shape (tuple): the shape of the sample. Default: ()
|
||||
... # probs1 (Tensor): the rate of the distribution. Default: self.rate.
|
||||
... ans = self.e1.sample()
|
||||
... ans = self.e1.sample((2,3))
|
||||
... ans = self.e1.sample((2,3), rate_b)
|
||||
... ans = self.e2.sample((2,3), rate_a)
|
||||
...
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
|
|
@ -53,62 +53,63 @@ class Geometric(Distribution):
|
|||
>>>
|
||||
>>> # To use a Geometric distribution in a network.
|
||||
>>> class net(Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(net, self).__init__():
|
||||
>>> self.g1 = msd.Geometric(0.5, dtype=mstype.int32)
|
||||
>>> self.g2 = msd.Geometric(dtype=mstype.int32)
|
||||
>>>
|
||||
>>> # The following calls are valid in construct.
|
||||
>>> def construct(self, value, probs_b, probs_a):
|
||||
>>>
|
||||
>>> # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
|
||||
>>> # Args:
|
||||
>>> # value (Tensor): the value to be evaluated.
|
||||
>>> # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs.
|
||||
>>>
|
||||
>>> # Examples of `prob`.
|
||||
>>> # Similar calls can be made to other probability functions
|
||||
>>> # by replacing `prob` by the name of the function.
|
||||
>>> ans = self.g1.prob(value)
|
||||
>>> # Evaluate with respect to distribution b.
|
||||
>>> ans = self.g1.prob(value, probs_b)
|
||||
>>> # `probs` must be passed in during function calls.
|
||||
>>> ans = self.g2.prob(value, probs_a)
|
||||
>>>
|
||||
>>>
|
||||
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
|
||||
>>> # Args:
|
||||
>>> # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs.
|
||||
>>>
|
||||
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
|
||||
>>> ans = self.g1.mean() # return 1.0
|
||||
>>> ans = self.g1.mean(probs_b)
|
||||
>>> # Probs must be passed in during function calls
|
||||
>>> ans = self.g2.mean(probs_a)
|
||||
>>>
|
||||
>>>
|
||||
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same.
|
||||
>>> # Args:
|
||||
>>> # dist (str): the name of the distribution. Only 'Geometric' is supported.
|
||||
>>> # probs1_b (Tensor): the probability of success of a Bernoulli trail of distribution b.
|
||||
>>> # probs1_a (Tensor): the probability of success of a Bernoulli trail of distribution a. Default: self.probs.
|
||||
>>>
|
||||
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
|
||||
>>> ans = self.g1.kl_loss('Geometric', probs_b)
|
||||
>>> ans = self.g1.kl_loss('Geometric', probs_b, probs_a)
|
||||
>>> # An additional `probs` must be passed in.
|
||||
>>> ans = self.g2.kl_loss('Geometric', probs_b, probs_a)
|
||||
>>>
|
||||
>>>
|
||||
>>> # Examples of `sample`.
|
||||
>>> # Args:
|
||||
>>> # shape (tuple): the shape of the sample. Default: ()
|
||||
>>> # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs.
|
||||
>>> ans = self.g1.sample()
|
||||
>>> ans = self.g1.sample((2,3))
|
||||
>>> ans = self.g1.sample((2,3), probs_b)
|
||||
>>> ans = self.g2.sample((2,3), probs_a)
|
||||
... def __init__(self):
|
||||
... super(net, self).__init__():
|
||||
... self.g1 = msd.Geometric(0.5, dtype=mstype.int32)
|
||||
... self.g2 = msd.Geometric(dtype=mstype.int32)
|
||||
...
|
||||
... # The following calls are valid in construct.
|
||||
... def construct(self, value, probs_b, probs_a):
|
||||
...
|
||||
... # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
|
||||
... # Args:
|
||||
... # value (Tensor): the value to be evaluated.
|
||||
... # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs.
|
||||
...
|
||||
... # Examples of `prob`.
|
||||
... # Similar calls can be made to other probability functions
|
||||
... # by replacing `prob` by the name of the function.
|
||||
... ans = self.g1.prob(value)
|
||||
... # Evaluate with respect to distribution b.
|
||||
... ans = self.g1.prob(value, probs_b)
|
||||
... # `probs` must be passed in during function calls.
|
||||
... ans = self.g2.prob(value, probs_a)
|
||||
...
|
||||
...
|
||||
... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
|
||||
... # Args:
|
||||
... # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs.
|
||||
...
|
||||
... # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
|
||||
... ans = self.g1.mean() # return 1.0
|
||||
... ans = self.g1.mean(probs_b)
|
||||
... # Probs must be passed in during function calls
|
||||
... ans = self.g2.mean(probs_a)
|
||||
...
|
||||
...
|
||||
... # Interfaces of 'kl_loss' and 'cross_entropy' are the same.
|
||||
... # Args:
|
||||
... # dist (str): the name of the distribution. Only 'Geometric' is supported.
|
||||
... # probs1_b (Tensor): the probability of success of a Bernoulli trail of distribution b.
|
||||
... # probs1_a (Tensor): the probability of success of a Bernoulli trail of distribution a. Default: self.probs.
|
||||
...
|
||||
... # Examples of `kl_loss`. `cross_entropy` is similar.
|
||||
... ans = self.g1.kl_loss('Geometric', probs_b)
|
||||
... ans = self.g1.kl_loss('Geometric', probs_b, probs_a)
|
||||
... # An additional `probs` must be passed in.
|
||||
... ans = self.g2.kl_loss('Geometric', probs_b, probs_a)
|
||||
...
|
||||
...
|
||||
... # Examples of `sample`.
|
||||
... # Args:
|
||||
... # shape (tuple): the shape of the sample. Default: ()
|
||||
... # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs.
|
||||
... ans = self.g1.sample()
|
||||
... ans = self.g1.sample((2,3))
|
||||
... ans = self.g1.sample((2,3), probs_b)
|
||||
... ans = self.g2.sample((2,3), probs_a)
|
||||
...
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
|
|
@ -50,47 +50,48 @@ class Gumbel(TransformedDistribution):
|
|||
>>>
|
||||
>>> # To use a Gumbel distribution in a network.
|
||||
>>> class net(Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(net, self).__init__():
|
||||
>>> self.g1 = msd.Gumbel(0.0, 1.0, dtype=mstype.float32)
|
||||
>>>
|
||||
>>> # The following calls are valid in construct.
|
||||
>>> def construct(self, value, loc_b, scale_b):
|
||||
>>>
|
||||
>>> # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same
|
||||
>>> # arguments as follows.
|
||||
>>> # Args:
|
||||
>>> # value (Tensor): the value to be evaluated.
|
||||
>>>
|
||||
>>> # Examples of `prob`.
|
||||
>>> # Similar calls can be made to other probability functions
|
||||
>>> # by replacing 'prob' by the name of the function.
|
||||
>>> ans = self.g1.prob(value)
|
||||
>>>
|
||||
>>> # Functions `mean`, `mode`, sd`, `var`, and `entropy` do not take in any argument.
|
||||
>>> ans = self.g1.mean()
|
||||
>>> ans = self.g1.mode()
|
||||
>>> ans = self.g1.sd()
|
||||
>>> ans = self.g1.entropy()
|
||||
>>> ans = self.g1.var()
|
||||
>>>
|
||||
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
|
||||
>>> # Args:
|
||||
>>> # dist (str): the type of the distributions. Only "Gumbel" is supported.
|
||||
>>> # loc_b (Tensor): the loc of distribution b.
|
||||
>>> # scale_b (Tensor): the scale distribution b.
|
||||
>>>
|
||||
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
|
||||
>>> ans = self.g1.kl_loss('Gumbel', loc_b, scale_b)
|
||||
>>> ans = self.g1.cross_entropy('Gumbel', loc_b, scale_b)
|
||||
>>>
|
||||
>>> # Examples of `sample`.
|
||||
>>> # Args:
|
||||
>>> # shape (tuple): the shape of the sample. Default: ()
|
||||
>>>
|
||||
>>> ans = self.g1.sample()
|
||||
>>> ans = self.g1.sample((2,3))
|
||||
... def __init__(self):
|
||||
... super(net, self).__init__():
|
||||
... self.g1 = msd.Gumbel(0.0, 1.0, dtype=mstype.float32)
|
||||
...
|
||||
... # The following calls are valid in construct.
|
||||
... def construct(self, value, loc_b, scale_b):
|
||||
...
|
||||
... # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same
|
||||
... # arguments as follows.
|
||||
... # Args:
|
||||
... # value (Tensor): the value to be evaluated.
|
||||
...
|
||||
... # Examples of `prob`.
|
||||
... # Similar calls can be made to other probability functions
|
||||
... # by replacing 'prob' by the name of the function.
|
||||
... ans = self.g1.prob(value)
|
||||
...
|
||||
... # Functions `mean`, `mode`, sd`, `var`, and `entropy` do not take in any argument.
|
||||
... ans = self.g1.mean()
|
||||
... ans = self.g1.mode()
|
||||
... ans = self.g1.sd()
|
||||
... ans = self.g1.entropy()
|
||||
... ans = self.g1.var()
|
||||
...
|
||||
... # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
|
||||
... # Args:
|
||||
... # dist (str): the type of the distributions. Only "Gumbel" is supported.
|
||||
... # loc_b (Tensor): the loc of distribution b.
|
||||
... # scale_b (Tensor): the scale distribution b.
|
||||
...
|
||||
... # Examples of `kl_loss`. `cross_entropy` is similar.
|
||||
... ans = self.g1.kl_loss('Gumbel', loc_b, scale_b)
|
||||
... ans = self.g1.cross_entropy('Gumbel', loc_b, scale_b)
|
||||
...
|
||||
... # Examples of `sample`.
|
||||
... # Args:
|
||||
... # shape (tuple): the shape of the sample. Default: ()
|
||||
...
|
||||
... ans = self.g1.sample()
|
||||
... ans = self.g1.sample((2,3))
|
||||
...
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
|
|
@ -53,75 +53,76 @@ class LogNormal(msd.TransformedDistribution):
|
|||
>>>
|
||||
>>> # To use a LogNormal distribution in a network.
|
||||
>>> class net(Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(net, self).__init__():
|
||||
>>> self.n1 = msd.LogNormal(0.0, 1.0, dtype=mstype.float32)
|
||||
>>> self.n2 = msd.LogNormal(dtype=mstype.float32)
|
||||
>>>
|
||||
>>> # The following calls are valid in construct.
|
||||
>>> def construct(self, value, loc_b, scale_b, loc_a, scale_a):
|
||||
>>>
|
||||
>>> # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same
|
||||
>>> # arguments as follows.
|
||||
>>> # Args:
|
||||
>>> # value (Tensor): the value to be evaluated.
|
||||
>>> # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None,
|
||||
>>> # the mean of the underlying Normal distribution will be used.
|
||||
>>> # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None,
|
||||
>>> # the standard deviation of the underlying Normal distribution will be used.
|
||||
>>>
|
||||
>>> # Examples of `prob`.
|
||||
>>> # Similar calls can be made to other probability functions
|
||||
>>> # by replacing 'prob' by the name of the function.
|
||||
>>> ans = self.n1.prob(value)
|
||||
>>> # Evaluate with respect to distribution b.
|
||||
>>> ans = self.n1.prob(value, loc_b, scale_b)
|
||||
>>> # `loc` and `scale` must be passed in during function calls since they were not passed in construct.
|
||||
>>> ans = self.n2.prob(value, loc_a, scale_a)
|
||||
>>>
|
||||
>>>
|
||||
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
|
||||
>>> # Args:
|
||||
>>> # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None,
|
||||
>>> # the mean of the underlying Normal distribution will be used.
|
||||
>>> # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None,
|
||||
>>> # the standard deviation of the underlying Normal distribution will be used.
|
||||
>>>
|
||||
>>> # Example of `mean`. `sd`, `var`, and `entropy` are similar.
|
||||
>>> ans = self.n1.mean() # return 0.0
|
||||
>>> ans = self.n1.mean(loc_b, scale_b) # return mean_b
|
||||
>>> # `loc` and `scale` must be passed in during function calls since they were not passed in construct.
|
||||
>>> ans = self.n2.mean(loc_a, scale_a)
|
||||
>>>
|
||||
>>>
|
||||
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
|
||||
>>> # Args:
|
||||
>>> # dist (str): the type of the distributions. Only "Normal" is supported.
|
||||
>>> # loc_b (Tensor): the loc of distribution b.
|
||||
>>> # scale_b (Tensor): the scale distribution b.
|
||||
>>> # loc_a (Tensor): the loc of distribution a. Default: None. If `loc` is passed in as None,
|
||||
>>> # the mean of the underlying Normal distribution will be used.
|
||||
>>> # scale_a (Tensor): the scale distribution a. Default: None. If `scale` is passed in as None,
|
||||
>>> # the standard deviation of the underlying Normal distribution will be used.
|
||||
>>>
|
||||
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
|
||||
>>> ans = self.n1.kl_loss('Normal', loc_b, scale_b)
|
||||
>>> ans = self.n1.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a)
|
||||
>>> # Additional `loc` and `scale` must be passed in since they were not passed in construct.
|
||||
>>> ans = self.n2.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a)
|
||||
>>>
|
||||
>>> # Examples of `sample`.
|
||||
>>> # Args:
|
||||
>>> # shape (tuple): the shape of the sample. Default: ()
|
||||
>>> # loc (Tensor): the loc of the distribution. Default: None. If `loc` is passed in as None,
|
||||
>>> # the mean of the underlying Normal distribution will be used.
|
||||
>>> # scale (Tensor): the scale of the distribution. Default: None. If `scale` is passed in as None,
|
||||
>>> # the standard deviation of the underlying Normal distribution will be used.
|
||||
>>> ans = self.n1.sample()
|
||||
>>> ans = self.n1.sample((2,3))
|
||||
>>> ans = self.n1.sample((2,3), loc_b, scale_b)
|
||||
>>> ans = self.n2.sample((2,3), loc_a, scale_a)
|
||||
... def __init__(self):
|
||||
... super(net, self).__init__():
|
||||
... self.n1 = msd.LogNormal(0.0, 1.0, dtype=mstype.float32)
|
||||
... self.n2 = msd.LogNormal(dtype=mstype.float32)
|
||||
...
|
||||
... # The following calls are valid in construct.
|
||||
... def construct(self, value, loc_b, scale_b, loc_a, scale_a):
|
||||
...
|
||||
... # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same
|
||||
... # arguments as follows.
|
||||
... # Args:
|
||||
... # value (Tensor): the value to be evaluated.
|
||||
... # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None,
|
||||
... # the mean of the underlying Normal distribution will be used.
|
||||
... # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None,
|
||||
... # the standard deviation of the underlying Normal distribution will be used.
|
||||
...
|
||||
... # Examples of `prob`.
|
||||
... # Similar calls can be made to other probability functions
|
||||
... # by replacing 'prob' by the name of the function.
|
||||
... ans = self.n1.prob(value)
|
||||
... # Evaluate with respect to distribution b.
|
||||
... ans = self.n1.prob(value, loc_b, scale_b)
|
||||
... # `loc` and `scale` must be passed in during function calls since they were not passed in construct.
|
||||
... ans = self.n2.prob(value, loc_a, scale_a)
|
||||
...
|
||||
...
|
||||
... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
|
||||
... # Args:
|
||||
... # loc (Tensor): the loc of distribution. Default: None. If `loc` is passed in as None,
|
||||
... # the mean of the underlying Normal distribution will be used.
|
||||
... # scale (Tensor): the scale of distribution. Default: None. If `scale` is passed in as None,
|
||||
... # the standard deviation of the underlying Normal distribution will be used.
|
||||
...
|
||||
... # Example of `mean`. `sd`, `var`, and `entropy` are similar.
|
||||
... ans = self.n1.mean() # return 0.0
|
||||
... ans = self.n1.mean(loc_b, scale_b) # return mean_b
|
||||
... # `loc` and `scale` must be passed in during function calls since they were not passed in construct.
|
||||
... ans = self.n2.mean(loc_a, scale_a)
|
||||
...
|
||||
...
|
||||
... # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
|
||||
... # Args:
|
||||
... # dist (str): the type of the distributions. Only "Normal" is supported.
|
||||
... # loc_b (Tensor): the loc of distribution b.
|
||||
... # scale_b (Tensor): the scale distribution b.
|
||||
... # loc_a (Tensor): the loc of distribution a. Default: None. If `loc` is passed in as None,
|
||||
... # the mean of the underlying Normal distribution will be used.
|
||||
... # scale_a (Tensor): the scale distribution a. Default: None. If `scale` is passed in as None,
|
||||
... # the standard deviation of the underlying Normal distribution will be used.
|
||||
...
|
||||
... # Examples of `kl_loss`. `cross_entropy` is similar.
|
||||
... ans = self.n1.kl_loss('Normal', loc_b, scale_b)
|
||||
... ans = self.n1.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a)
|
||||
... # Additional `loc` and `scale` must be passed in since they were not passed in construct.
|
||||
... ans = self.n2.kl_loss('Normal', loc_b, scale_b, loc_a, scale_a)
|
||||
...
|
||||
... # Examples of `sample`.
|
||||
... # Args:
|
||||
... # shape (tuple): the shape of the sample. Default: ()
|
||||
... # loc (Tensor): the loc of the distribution. Default: None. If `loc` is passed in as None,
|
||||
... # the mean of the underlying Normal distribution will be used.
|
||||
... # scale (Tensor): the scale of the distribution. Default: None. If `scale` is passed in as None,
|
||||
... # the standard deviation of the underlying Normal distribution will be used.
|
||||
... ans = self.n1.sample()
|
||||
... ans = self.n1.sample((2,3))
|
||||
... ans = self.n1.sample((2,3), loc_b, scale_b)
|
||||
... ans = self.n2.sample((2,3), loc_a, scale_a)
|
||||
...
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
|
|
@ -53,50 +53,51 @@ class Logistic(Distribution):
|
|||
>>>
|
||||
>>> # To use a Normal distribution in a network.
|
||||
>>> class net(Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(net, self).__init__():
|
||||
>>> self.l1 = msd.Logistic(0.0, 1.0, dtype=mstype.float32)
|
||||
>>> self.l2 = msd.Logistic(dtype=mstype.float32)
|
||||
>>>
|
||||
>>> # The following calls are valid in construct.
|
||||
>>> def construct(self, value, loc_b, scale_b, loc_a, scale_a):
|
||||
>>>
|
||||
>>> # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
|
||||
>>> # Args:
|
||||
>>> # value (Tensor): the value to be evaluated.
|
||||
>>> # loc (Tensor): the location of the distribution. Default: self.loc.
|
||||
>>> # scale (Tensor): the scale of the distribution. Default: self.scale.
|
||||
>>>
|
||||
>>> # Examples of `prob`.
|
||||
>>> # Similar calls can be made to other probability functions
|
||||
>>> # by replacing 'prob' by the name of the function
|
||||
>>> ans = self.l1.prob(value)
|
||||
>>> # Evaluate with respect to distribution b.
|
||||
>>> ans = self.l1.prob(value, loc_b, scale_b)
|
||||
>>> # `loc` and `scale` must be passed in during function calls
|
||||
>>> ans = self.l2.prob(value, loc_a, scale_a)
|
||||
>>>
|
||||
>>> # Functions `mean`, `mode`, `sd`, `var`, and `entropy` have the same arguments.
|
||||
>>> # Args:
|
||||
>>> # loc (Tensor): the location of the distribution. Default: self.loc.
|
||||
>>> # scale (Tensor): the scale of the distribution. Default: self.scale.
|
||||
>>>
|
||||
>>> # Example of `mean`. `mode`, `sd`, `var`, and `entropy` are similar.
|
||||
>>> ans = self.l1.mean() # return 0.0
|
||||
>>> ans = self.l1.mean(loc_b, scale_b) # return loc_b
|
||||
>>> # `loc` and `scale` must be passed in during function calls.
|
||||
>>> ans = self.l2.mean(loc_a, scale_a)
|
||||
>>>
|
||||
>>> # Examples of `sample`.
|
||||
>>> # Args:
|
||||
>>> # shape (tuple): the shape of the sample. Default: ()
|
||||
>>> # loc (Tensor): the location of the distribution. Default: self.loc.
|
||||
>>> # scale (Tensor): the scale of the distribution. Default: self.scale.
|
||||
>>> ans = self.l1.sample()
|
||||
>>> ans = self.l1.sample((2,3))
|
||||
>>> ans = self.l1.sample((2,3), loc_b, scale_b)
|
||||
>>> ans = self.l2.sample((2,3), loc_a, scale_a)
|
||||
... def __init__(self):
|
||||
... super(net, self).__init__():
|
||||
... self.l1 = msd.Logistic(0.0, 1.0, dtype=mstype.float32)
|
||||
... self.l2 = msd.Logistic(dtype=mstype.float32)
|
||||
...
|
||||
... # The following calls are valid in construct.
|
||||
... def construct(self, value, loc_b, scale_b, loc_a, scale_a):
|
||||
...
|
||||
... # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
|
||||
... # Args:
|
||||
... # value (Tensor): the value to be evaluated.
|
||||
... # loc (Tensor): the location of the distribution. Default: self.loc.
|
||||
... # scale (Tensor): the scale of the distribution. Default: self.scale.
|
||||
...
|
||||
... # Examples of `prob`.
|
||||
... # Similar calls can be made to other probability functions
|
||||
... # by replacing 'prob' by the name of the function
|
||||
... ans = self.l1.prob(value)
|
||||
... # Evaluate with respect to distribution b.
|
||||
... ans = self.l1.prob(value, loc_b, scale_b)
|
||||
... # `loc` and `scale` must be passed in during function calls
|
||||
... ans = self.l2.prob(value, loc_a, scale_a)
|
||||
...
|
||||
... # Functions `mean`, `mode`, `sd`, `var`, and `entropy` have the same arguments.
|
||||
... # Args:
|
||||
... # loc (Tensor): the location of the distribution. Default: self.loc.
|
||||
... # scale (Tensor): the scale of the distribution. Default: self.scale.
|
||||
...
|
||||
... # Example of `mean`. `mode`, `sd`, `var`, and `entropy` are similar.
|
||||
... ans = self.l1.mean() # return 0.0
|
||||
... ans = self.l1.mean(loc_b, scale_b) # return loc_b
|
||||
... # `loc` and `scale` must be passed in during function calls.
|
||||
... ans = self.l2.mean(loc_a, scale_a)
|
||||
...
|
||||
... # Examples of `sample`.
|
||||
... # Args:
|
||||
... # shape (tuple): the shape of the sample. Default: ()
|
||||
... # loc (Tensor): the location of the distribution. Default: self.loc.
|
||||
... # scale (Tensor): the scale of the distribution. Default: self.scale.
|
||||
... ans = self.l1.sample()
|
||||
... ans = self.l1.sample((2,3))
|
||||
... ans = self.l1.sample((2,3), loc_b, scale_b)
|
||||
... ans = self.l2.sample((2,3), loc_a, scale_a)
|
||||
...
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
|
|
@ -53,66 +53,67 @@ class Normal(Distribution):
|
|||
>>>
|
||||
>>> # To use a Normal distribution in a network.
|
||||
>>> class net(Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(net, self).__init__():
|
||||
>>> self.n1 = msd.Nomral(0.0, 1.0, dtype=mstype.float32)
|
||||
>>> self.n2 = msd.Normal(dtype=mstype.float32)
|
||||
>>>
|
||||
>>> # The following calls are valid in construct.
|
||||
>>> def construct(self, value, mean_b, sd_b, mean_a, sd_a):
|
||||
>>>
|
||||
>>> # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
|
||||
>>> # Args:
|
||||
>>> # value (Tensor): the value to be evaluated.
|
||||
>>> # mean (Tensor): the mean of distribution. Default: self._mean_value.
|
||||
>>> # sd (Tensor): the standard deviation of distribution. Default: self._sd_value.
|
||||
>>>
|
||||
>>> # Examples of `prob`.
|
||||
>>> # Similar calls can be made to other probability functions
|
||||
>>> # by replacing 'prob' by the name of the function
|
||||
>>> ans = self.n1.prob(value)
|
||||
>>> # Evaluate with respect to distribution b.
|
||||
>>> ans = self.n1.prob(value, mean_b, sd_b)
|
||||
>>> # `mean` and `sd` must be passed in during function calls
|
||||
>>> ans = self.n2.prob(value, mean_a, sd_a)
|
||||
>>>
|
||||
>>>
|
||||
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
|
||||
>>> # Args:
|
||||
>>> # mean (Tensor): the mean of distribution. Default: self._mean_value.
|
||||
>>> # sd (Tensor): the standard deviation of distribution. Default: self._sd_value.
|
||||
>>>
|
||||
>>> # Example of `mean`. `sd`, `var`, and `entropy` are similar.
|
||||
>>> ans = self.n1.mean() # return 0.0
|
||||
>>> ans = self.n1.mean(mean_b, sd_b) # return mean_b
|
||||
>>> # `mean` and `sd` must be passed in during function calls.
|
||||
>>> ans = self.n2.mean(mean_a, sd_a)
|
||||
>>>
|
||||
>>>
|
||||
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
|
||||
>>> # Args:
|
||||
>>> # dist (str): the type of the distributions. Only "Normal" is supported.
|
||||
>>> # mean_b (Tensor): the mean of distribution b.
|
||||
>>> # sd_b (Tensor): the standard deviation distribution b.
|
||||
>>> # mean_a (Tensor): the mean of distribution a. Default: self._mean_value.
|
||||
>>> # sd_a (Tensor): the standard deviation distribution a. Default: self._sd_value.
|
||||
>>>
|
||||
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
|
||||
>>> ans = self.n1.kl_loss('Normal', mean_b, sd_b)
|
||||
>>> ans = self.n1.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a)
|
||||
>>> # Additional `mean` and `sd` must be passed in.
|
||||
>>> ans = self.n2.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a)
|
||||
>>>
|
||||
>>> # Examples of `sample`.
|
||||
>>> # Args:
|
||||
>>> # shape (tuple): the shape of the sample. Default: ()
|
||||
>>> # mean (Tensor): the mean of the distribution. Default: self._mean_value.
|
||||
>>> # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value.
|
||||
>>> ans = self.n1.sample()
|
||||
>>> ans = self.n1.sample((2,3))
|
||||
>>> ans = self.n1.sample((2,3), mean_b, sd_b)
|
||||
>>> ans = self.n2.sample((2,3), mean_a, sd_a)
|
||||
... def __init__(self):
|
||||
... super(net, self).__init__():
|
||||
... self.n1 = msd.Nomral(0.0, 1.0, dtype=mstype.float32)
|
||||
... self.n2 = msd.Normal(dtype=mstype.float32)
|
||||
...
|
||||
... # The following calls are valid in construct.
|
||||
... def construct(self, value, mean_b, sd_b, mean_a, sd_a):
|
||||
...
|
||||
... # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
|
||||
... # Args:
|
||||
... # value (Tensor): the value to be evaluated.
|
||||
... # mean (Tensor): the mean of distribution. Default: self._mean_value.
|
||||
... # sd (Tensor): the standard deviation of distribution. Default: self._sd_value.
|
||||
...
|
||||
... # Examples of `prob`.
|
||||
... # Similar calls can be made to other probability functions
|
||||
... # by replacing 'prob' by the name of the function
|
||||
... ans = self.n1.prob(value)
|
||||
... # Evaluate with respect to distribution b.
|
||||
... ans = self.n1.prob(value, mean_b, sd_b)
|
||||
... # `mean` and `sd` must be passed in during function calls
|
||||
... ans = self.n2.prob(value, mean_a, sd_a)
|
||||
...
|
||||
...
|
||||
... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
|
||||
... # Args:
|
||||
... # mean (Tensor): the mean of distribution. Default: self._mean_value.
|
||||
... # sd (Tensor): the standard deviation of distribution. Default: self._sd_value.
|
||||
...
|
||||
... # Example of `mean`. `sd`, `var`, and `entropy` are similar.
|
||||
... ans = self.n1.mean() # return 0.0
|
||||
... ans = self.n1.mean(mean_b, sd_b) # return mean_b
|
||||
... # `mean` and `sd` must be passed in during function calls.
|
||||
... ans = self.n2.mean(mean_a, sd_a)
|
||||
...
|
||||
...
|
||||
... # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
|
||||
... # Args:
|
||||
... # dist (str): the type of the distributions. Only "Normal" is supported.
|
||||
... # mean_b (Tensor): the mean of distribution b.
|
||||
... # sd_b (Tensor): the standard deviation distribution b.
|
||||
... # mean_a (Tensor): the mean of distribution a. Default: self._mean_value.
|
||||
... # sd_a (Tensor): the standard deviation distribution a. Default: self._sd_value.
|
||||
...
|
||||
... # Examples of `kl_loss`. `cross_entropy` is similar.
|
||||
... ans = self.n1.kl_loss('Normal', mean_b, sd_b)
|
||||
... ans = self.n1.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a)
|
||||
... # Additional `mean` and `sd` must be passed in.
|
||||
... ans = self.n2.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a)
|
||||
...
|
||||
... # Examples of `sample`.
|
||||
... # Args:
|
||||
... # shape (tuple): the shape of the sample. Default: ()
|
||||
... # mean (Tensor): the mean of the distribution. Default: self._mean_value.
|
||||
... # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value.
|
||||
... ans = self.n1.sample()
|
||||
... ans = self.n1.sample((2,3))
|
||||
... ans = self.n1.sample((2,3), mean_b, sd_b)
|
||||
... ans = self.n2.sample((2,3), mean_a, sd_a)
|
||||
...
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
|
|
@ -54,19 +54,20 @@ class TransformedDistribution(Distribution):
|
|||
>>> import mindspore.nn.probability.distribution as msd
|
||||
>>> import mindspore.nn.probability.bijector as msb
|
||||
>>> ln = msd.TransformedDistribution(msb.Exp(),
|
||||
>>> msd.Normal(0.0, 1.0, dtype=mstype.float32))
|
||||
>>>
|
||||
... msd.Normal(0.0, 1.0, dtype=mstype.float32))
|
||||
...
|
||||
>>> # To use a transformed distribution in a network.
|
||||
>>> class net(Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(net, self).__init__():
|
||||
>>> self.ln = msd.TransformedDistribution(msb.Exp(),
|
||||
>>> msd.Normal(0.0, 1.0, dtype=mstype.float32))
|
||||
>>>
|
||||
>>> def construct(self, value):
|
||||
>>> # Similar calls can be made to other functions
|
||||
>>> # by replacing 'sample' by the name of the function.
|
||||
>>> ans = self.ln.sample(shape=(2, 3))
|
||||
... def __init__(self):
|
||||
... super(net, self).__init__():
|
||||
... self.ln = msd.TransformedDistribution(msb.Exp(),
|
||||
... msd.Normal(0.0, 1.0, dtype=mstype.float32))
|
||||
...
|
||||
... def construct(self, value):
|
||||
... # Similar calls can be made to other functions
|
||||
... # by replacing 'sample' by the name of the function.
|
||||
... ans = self.ln.sample(shape=(2, 3))
|
||||
...
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
|
|
@ -52,66 +52,67 @@ class Uniform(Distribution):
|
|||
>>>
|
||||
>>> # To use a Uniform distribution in a network.
|
||||
>>> class net(Cell):
|
||||
>>> def __init__(self)
|
||||
>>> super(net, self).__init__():
|
||||
>>> self.u1 = msd.Uniform(0.0, 1.0, dtype=mstype.float32)
|
||||
>>> self.u2 = msd.Uniform(dtype=mstype.float32)
|
||||
>>>
|
||||
>>> # All the following calls in construct are valid.
|
||||
>>> def construct(self, value, low_b, high_b, low_a, high_a):
|
||||
>>>
|
||||
>>> # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments.
|
||||
>>> # Args:
|
||||
>>> # value (Tensor): the value to be evaluated.
|
||||
>>> # low (Tensor): the lower bound of distribution. Default: self.low.
|
||||
>>> # high (Tensor): the higher bound of distribution. Default: self.high.
|
||||
>>>
|
||||
>>> # Examples of `prob`.
|
||||
>>> # Similar calls can be made to other probability functions
|
||||
>>> # by replacing 'prob' by the name of the function.
|
||||
>>> ans = self.u1.prob(value)
|
||||
>>> # Evaluate with respect to distribution b.
|
||||
>>> ans = self.u1.prob(value, low_b, high_b)
|
||||
>>> # `high` and `low` must be passed in during function calls.
|
||||
>>> ans = self.u2.prob(value, low_a, high_a)
|
||||
>>>
|
||||
>>>
|
||||
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
|
||||
>>> # Args:
|
||||
>>> # low (Tensor): the lower bound of distribution. Default: self.low.
|
||||
>>> # high (Tensor): the higher bound of distribution. Default: self.high.
|
||||
>>>
|
||||
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
|
||||
>>> ans = self.u1.mean() # return 0.5
|
||||
>>> ans = self.u1.mean(low_b, high_b) # return (low_b + high_b) / 2
|
||||
>>> # `high` and `low` must be passed in during function calls.
|
||||
>>> ans = self.u2.mean(low_a, high_a)
|
||||
>>>
|
||||
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same.
|
||||
>>> # Args:
|
||||
>>> # dist (str): the type of the distributions. Should be "Uniform" in this case.
|
||||
>>> # low_b (Tensor): the lower bound of distribution b.
|
||||
>>> # high_b (Tensor): the upper bound of distribution b.
|
||||
>>> # low_a (Tensor): the lower bound of distribution a. Default: self.low.
|
||||
>>> # high_a (Tensor): the upper bound of distribution a. Default: self.high.
|
||||
>>>
|
||||
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
|
||||
>>> ans = self.u1.kl_loss('Uniform', low_b, high_b)
|
||||
>>> ans = self.u1.kl_loss('Uniform', low_b, high_b, low_a, high_a)
|
||||
>>> # Additional `high` and `low` must be passed in.
|
||||
>>> ans = self.u2.kl_loss('Uniform', low_b, high_b, low_a, high_a)
|
||||
>>>
|
||||
>>>
|
||||
>>> # Examples of `sample`.
|
||||
>>> # Args:
|
||||
>>> # shape (tuple): the shape of the sample. Default: ()
|
||||
>>> # low (Tensor): the lower bound of the distribution. Default: self.low.
|
||||
>>> # high (Tensor): the upper bound of the distribution. Default: self.high.
|
||||
>>> ans = self.u1.sample()
|
||||
>>> ans = self.u1.sample((2,3))
|
||||
>>> ans = self.u1.sample((2,3), low_b, high_b)
|
||||
>>> ans = self.u2.sample((2,3), low_a, high_a)
|
||||
... def __init__(self)
|
||||
... super(net, self).__init__():
|
||||
... self.u1 = msd.Uniform(0.0, 1.0, dtype=mstype.float32)
|
||||
... self.u2 = msd.Uniform(dtype=mstype.float32)
|
||||
...
|
||||
... # All the following calls in construct are valid.
|
||||
... def construct(self, value, low_b, high_b, low_a, high_a):
|
||||
...
|
||||
... # Private interfaces of probability functions corresponding to public interfaces, including
|
||||
... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments.
|
||||
... # Args:
|
||||
... # value (Tensor): the value to be evaluated.
|
||||
... # low (Tensor): the lower bound of distribution. Default: self.low.
|
||||
... # high (Tensor): the higher bound of distribution. Default: self.high.
|
||||
...
|
||||
... # Examples of `prob`.
|
||||
... # Similar calls can be made to other probability functions
|
||||
... # by replacing 'prob' by the name of the function.
|
||||
... ans = self.u1.prob(value)
|
||||
... # Evaluate with respect to distribution b.
|
||||
... ans = self.u1.prob(value, low_b, high_b)
|
||||
... # `high` and `low` must be passed in during function calls.
|
||||
... ans = self.u2.prob(value, low_a, high_a)
|
||||
...
|
||||
...
|
||||
... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
|
||||
... # Args:
|
||||
... # low (Tensor): the lower bound of distribution. Default: self.low.
|
||||
... # high (Tensor): the higher bound of distribution. Default: self.high.
|
||||
...
|
||||
... # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
|
||||
... ans = self.u1.mean() # return 0.5
|
||||
... ans = self.u1.mean(low_b, high_b) # return (low_b + high_b) / 2
|
||||
... # `high` and `low` must be passed in during function calls.
|
||||
... ans = self.u2.mean(low_a, high_a)
|
||||
...
|
||||
... # Interfaces of 'kl_loss' and 'cross_entropy' are the same.
|
||||
... # Args:
|
||||
... # dist (str): the type of the distributions. Should be "Uniform" in this case.
|
||||
... # low_b (Tensor): the lower bound of distribution b.
|
||||
... # high_b (Tensor): the upper bound of distribution b.
|
||||
... # low_a (Tensor): the lower bound of distribution a. Default: self.low.
|
||||
... # high_a (Tensor): the upper bound of distribution a. Default: self.high.
|
||||
...
|
||||
... # Examples of `kl_loss`. `cross_entropy` is similar.
|
||||
... ans = self.u1.kl_loss('Uniform', low_b, high_b)
|
||||
... ans = self.u1.kl_loss('Uniform', low_b, high_b, low_a, high_a)
|
||||
... # Additional `high` and `low` must be passed in.
|
||||
... ans = self.u2.kl_loss('Uniform', low_b, high_b, low_a, high_a)
|
||||
...
|
||||
...
|
||||
... # Examples of `sample`.
|
||||
... # Args:
|
||||
... # shape (tuple): the shape of the sample. Default: ()
|
||||
... # low (Tensor): the lower bound of the distribution. Default: self.low.
|
||||
... # high (Tensor): the upper bound of the distribution. Default: self.high.
|
||||
... ans = self.u1.sample()
|
||||
... ans = self.u1.sample((2,3))
|
||||
... ans = self.u1.sample((2,3), low_b, high_b)
|
||||
... ans = self.u2.sample((2,3), low_a, high_a)
|
||||
...
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
|
|
@ -31,14 +31,14 @@ class SparseToDense(Cell):
|
|||
|
||||
Examples:
|
||||
>>> class SparseToDenseCell(nn.Cell):
|
||||
>>> def __init__(self, dense_shape):
|
||||
>>> super(SparseToDenseCell, self).__init__()
|
||||
>>> self.dense_shape = dense_shape
|
||||
>>> self.sparse_to_dense = nn.SparseToDense()
|
||||
>>> def construct(self, indices, values):
|
||||
>>> sparse = SparseTensor(indices, values, self.dense_shape)
|
||||
>>> return self.sparse_to_dense(sparse)
|
||||
>>>
|
||||
... def __init__(self, dense_shape):
|
||||
... super(SparseToDenseCell, self).__init__()
|
||||
... self.dense_shape = dense_shape
|
||||
... self.sparse_to_dense = nn.SparseToDense()
|
||||
... def construct(self, indices, values):
|
||||
... sparse = SparseTensor(indices, values, self.dense_shape)
|
||||
... return self.sparse_to_dense(sparse)
|
||||
...
|
||||
>>> indices = Tensor([[0, 1], [1, 2]])
|
||||
>>> values = Tensor([1, 2], dtype=ms.float32)
|
||||
>>> dense_shape = (3, 4)
|
||||
|
|
|
@ -1417,13 +1417,15 @@ class IFMR(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> data = Tensor(np.random.rand(1, 3, 6, 4).astype(np.float32))
|
||||
>>> data_min = Tensor([0.1], mstype.float32)
|
||||
>>> data_max = Tensor([0.5], mstype.float32)
|
||||
>>> data_min = Tensor([0.1], mindspore.float32)
|
||||
>>> data_max = Tensor([0.5], mindspore.float32)
|
||||
>>> cumsum = Tensor(np.random.rand(4).astype(np.int32))
|
||||
>>> ifmr = Q.IFMR(min_percentile=0.2, max_percentile=0.9, search_range=(1.0, 2.0),
|
||||
>>> search_step=1.0, with_offset=False)
|
||||
... search_step=1.0, with_offset=False)
|
||||
>>> output = ifmr(data, data_min, data_max, cumsum)
|
||||
([7.87401572e-03], [0.00000000e+00])
|
||||
>>> print(output)
|
||||
(Tensor(shape=[1], dtype=Float32, value= [7.87401572e-03]),
|
||||
Tensor(shape=[1], dtype=Float32, value= [0.00000000e+00]))
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
|
|
@ -148,8 +148,8 @@ class ExpandDims(PrimitiveWithInfer):
|
|||
>>> expand_dims = P.ExpandDims()
|
||||
>>> output = expand_dims(input_tensor, 0)
|
||||
>>> print(output)
|
||||
[[[2.0, 2.0],
|
||||
[2.0, 2.0]]]
|
||||
[[[2. 2.]
|
||||
[2. 2.]]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -230,8 +230,8 @@ class SameTypeShape(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([[2, 2], [2, 2]]), mindspore.float32)
|
||||
>>> out = P.SameTypeShape()(input_x, input_y)
|
||||
>>> print(out)
|
||||
>>> output = P.SameTypeShape()(input_x, input_y)
|
||||
>>> print(output)
|
||||
[[2. 2.]
|
||||
[2. 2.]]
|
||||
"""
|
||||
|
@ -342,8 +342,8 @@ class IsSubClass(PrimitiveWithInfer):
|
|||
bool, the check result.
|
||||
|
||||
Examples:
|
||||
>>> result = P.IsSubClass()(mindspore.int32, mindspore.intc)
|
||||
>>> print(result)
|
||||
>>> output = P.IsSubClass()(mindspore.int32, mindspore.intc)
|
||||
>>> print(output)
|
||||
True
|
||||
"""
|
||||
|
||||
|
@ -379,9 +379,9 @@ class IsInstance(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> a = 1
|
||||
>>> result = P.IsInstance()(a, mindspore.int64)
|
||||
>>> print(result)
|
||||
True
|
||||
>>> output = P.IsInstance()(a, mindspore.int32)
|
||||
>>> print(output)
|
||||
False
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -668,8 +668,9 @@ class Unique(Primitive):
|
|||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([1, 2, 5, 2]), mindspore.int32)
|
||||
>>> out = P.Unique()(x)
|
||||
(Tensor([1, 2, 5], mindspore.int32), Tensor([0, 1, 2, 1], mindspore.int32))
|
||||
>>> output = P.Unique()(x)
|
||||
>>> print(output)
|
||||
(Tensor(shape=[3], dtype=Int32, value= [1, 2, 5]), Tensor(shape=[4], dtype=Int32, value= [0, 1, 2, 1]))
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -696,11 +697,11 @@ class GatherV2(PrimitiveWithCheck):
|
|||
>>> input_params = Tensor(np.array([[1, 2, 7, 42], [3, 4, 54, 22], [2, 2, 55, 3]]), mindspore.float32)
|
||||
>>> input_indices = Tensor(np.array([1, 2]), mindspore.int32)
|
||||
>>> axis = 1
|
||||
>>> out = P.GatherV2()(input_params, input_indices, axis)
|
||||
>>> print(out)
|
||||
[[2.0, 7.0],
|
||||
[4.0, 54.0],
|
||||
[2.0, 55.0]]
|
||||
>>> output = P.GatherV2()(input_params, input_indices, axis)
|
||||
>>> print(output)
|
||||
[[ 2. 7.]
|
||||
[ 4. 54.]
|
||||
[ 2. 55.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -770,9 +771,10 @@ class Padding(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> x = Tensor(np.array([[8], [10]]), mindspore.float32)
|
||||
>>> pad_dim_size = 4
|
||||
>>> out = P.Padding(pad_dim_size)(x)
|
||||
>>> print(out)
|
||||
[[8, 0, 0, 0], [10, 0, 0, 0]]
|
||||
>>> output = P.Padding(pad_dim_size)(x)
|
||||
>>> print(output)
|
||||
[[ 8. 0. 0. 0.]
|
||||
[10. 0. 0. 0.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -811,9 +813,10 @@ class UniqueWithPad(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> x = Tensor(np.array([1, 1, 5, 5, 4, 4, 3, 3, 2, 2,]), mindspore.int32)
|
||||
>>> pad_num = 8
|
||||
>>> out = P.UniqueWithPad()(x, pad_num)
|
||||
>>> print(out)
|
||||
([1, 5, 4, 3, 2, 8, 8, 8, 8, 8], [0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
|
||||
>>> output = P.UniqueWithPad()(x, pad_num)
|
||||
>>> print(output)
|
||||
(Tensor(shape=[10], dtype=Int32, value= [1, 5, 4, 3, 2, 8, 8, 8, 8, 8]),
|
||||
Tensor(shape=[10], dtype=Int32, value= [0, 0, 1, 1, 2, 2, 3, 3, 4, 4]))
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -854,13 +857,14 @@ class Split(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> split = P.Split(1, 2)
|
||||
>>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]))
|
||||
>>> x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]), mindspore.int32)
|
||||
>>> output = split(x)
|
||||
>>> print(output)
|
||||
([[1, 1],
|
||||
[2, 2]],
|
||||
(Tensor(shape=[2, 2], dtype=Int32, value=
|
||||
[[1, 1],
|
||||
[2, 2]])
|
||||
[2, 2]]), Tensor(shape=[2, 2], dtype=Int32, value=
|
||||
[[1, 1],
|
||||
[2, 2]]))
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1025,8 +1029,8 @@ class Fill(PrimitiveWithInfer):
|
|||
>>> fill = P.Fill()
|
||||
>>> output = fill(mindspore.float32, (2, 2), 1)
|
||||
>>> print(output)
|
||||
[[1.0, 1.0],
|
||||
[1.0, 1.0]]
|
||||
[[1. 1.]
|
||||
[1. 1.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1156,8 +1160,8 @@ class OnesLike(PrimitiveWithInfer):
|
|||
>>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.int32))
|
||||
>>> output = oneslike(x)
|
||||
>>> print(output)
|
||||
[[1, 1],
|
||||
[1, 1]]
|
||||
[[1 1]
|
||||
[1 1]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1189,8 +1193,8 @@ class ZerosLike(PrimitiveWithCheck):
|
|||
>>> x = Tensor(np.array([[0, 1], [2, 1]]).astype(np.float32))
|
||||
>>> output = zeroslike(x)
|
||||
>>> print(output)
|
||||
[[0.0, 0.0],
|
||||
[0.0, 0.0]]
|
||||
[[0. 0.]
|
||||
[0. 0.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1338,7 +1342,8 @@ class InvertPermutation(PrimitiveWithInfer):
|
|||
>>> invert = P.InvertPermutation()
|
||||
>>> input_data = (3, 4, 0, 2, 1)
|
||||
>>> output = invert(input_data)
|
||||
>>> output == (2, 4, 3, 0, 1)
|
||||
>>> print(output)
|
||||
(2, 4, 3, 0, 1)
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1400,8 +1405,8 @@ class Argmax(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> input_x = Tensor(np.array([2.0, 3.1, 1.2]), mindspore.float32)
|
||||
>>> index = P.Argmax(output_type=mindspore.int32)(input_x)
|
||||
>>> print(index)
|
||||
>>> output = P.Argmax(output_type=mindspore.int32)(input_x)
|
||||
>>> print(output)
|
||||
1
|
||||
"""
|
||||
|
||||
|
@ -1559,9 +1564,9 @@ class ArgMinWithValue(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> input_x = Tensor(np.random.rand(5), mindspore.float32)
|
||||
>>> index, output = P.ArgMinWithValue()(input_x)
|
||||
>>> print((index, output))
|
||||
0 0.0496291
|
||||
>>> output = P.ArgMinWithValue()(input_x)
|
||||
>>> print(output)
|
||||
(Tensor(shape=[], dtype=Int32, value= 2), Tensor(shape=[], dtype=Float32, value= 0.0595638))
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1616,8 +1621,8 @@ class Tile(PrimitiveWithInfer):
|
|||
>>> tile = P.Tile()
|
||||
>>> input_x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
|
||||
>>> multiples = (2, 3)
|
||||
>>> result = tile(input_x, multiples)
|
||||
>>> print(result)
|
||||
>>> output = tile(input_x, multiples)
|
||||
>>> print(output)
|
||||
[[1. 2. 1. 2. 1. 2.]
|
||||
[3. 4. 3. 4. 3. 4.]
|
||||
[1. 2. 1. 2. 1. 2.]
|
||||
|
@ -1693,7 +1698,7 @@ class UnsortedSegmentSum(PrimitiveWithInfer):
|
|||
>>> num_segments = 4
|
||||
>>> output = P.UnsortedSegmentSum()(input_x, segment_ids, num_segments)
|
||||
>>> print(output)
|
||||
[3, 3, 4, 0]
|
||||
[3. 3. 4. 0.]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1767,8 +1772,10 @@ class UnsortedSegmentMin(PrimitiveWithInfer):
|
|||
>>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32))
|
||||
>>> num_segments = 2
|
||||
>>> unsorted_segment_min = P.UnsortedSegmentMin()
|
||||
>>> unsorted_segment_min(input_x, segment_ids, num_segments)
|
||||
[[1., 2., 3.], [4., 2., 1.]]
|
||||
>>> output = unsorted_segment_min(input_x, segment_ids, num_segments)
|
||||
>>> print(output)
|
||||
[[1. 2. 3.]
|
||||
[4. 2. 1.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1821,8 +1828,10 @@ class UnsortedSegmentMax(PrimitiveWithInfer):
|
|||
>>> segment_ids = Tensor(np.array([0, 1, 1]).astype(np.int32))
|
||||
>>> num_segments = 2
|
||||
>>> unsorted_segment_max = P.UnsortedSegmentMax()
|
||||
>>> unsorted_segment_max(input_x, segment_ids, num_segments)
|
||||
[[1., 2., 3.], [4., 5., 6.]]
|
||||
>>> output = unsorted_segment_max(input_x, segment_ids, num_segments)
|
||||
>>> print(output)
|
||||
[[1. 2. 3.]
|
||||
[4. 5. 6.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1872,8 +1881,10 @@ class UnsortedSegmentProd(PrimitiveWithInfer):
|
|||
>>> segment_ids = Tensor(np.array([0, 1, 0]).astype(np.int32))
|
||||
>>> num_segments = 2
|
||||
>>> unsorted_segment_prod = P.UnsortedSegmentProd()
|
||||
>>> unsorted_segment_prod(input_x, segment_ids, num_segments)
|
||||
[[4., 4., 3.], [4., 5., 6.]]
|
||||
>>> output = unsorted_segment_prod(input_x, segment_ids, num_segments)
|
||||
>>> print(output)
|
||||
[[4. 4. 3.]
|
||||
[4. 5. 6.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1935,10 +1946,10 @@ class Concat(PrimitiveWithInfer):
|
|||
>>> op = P.Concat()
|
||||
>>> output = op((data1, data2))
|
||||
>>> print(output)
|
||||
[[0, 1],
|
||||
[2, 1],
|
||||
[0, 1],
|
||||
[2, 1]]
|
||||
[[0 1]
|
||||
[2 1]
|
||||
[0 1]
|
||||
[2 1]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1983,7 +1994,8 @@ class ParallelConcat(PrimitiveWithInfer):
|
|||
>>> op = P.ParallelConcat()
|
||||
>>> output = op((data1, data2))
|
||||
>>> print(output)
|
||||
[[0, 1], [2, 1]]
|
||||
[[0 1]
|
||||
[2 1]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2066,7 +2078,8 @@ class Pack(PrimitiveWithInfer):
|
|||
>>> pack = P.Pack()
|
||||
>>> output = pack([data1, data2])
|
||||
>>> print(output)
|
||||
[[0, 1], [2, 3]]
|
||||
[[0. 1.]
|
||||
[2. 3.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2116,7 +2129,8 @@ class Unpack(PrimitiveWithInfer):
|
|||
>>> input_x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]))
|
||||
>>> output = unpack(input_x)
|
||||
>>> print(output)
|
||||
([1, 1, 1, 1], [2, 2, 2, 2])
|
||||
(Tensor(shape=[4], dtype=Int32, value= [1, 1, 1, 1]),
|
||||
Tensor(shape=[4], dtype=Int32, value= [2, 2, 2, 2]))
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2169,8 +2183,9 @@ class Slice(PrimitiveWithInfer):
|
|||
>>> data = Tensor(np.array([[[1, 1, 1], [2, 2, 2]],
|
||||
... [[3, 3, 3], [4, 4, 4]],
|
||||
... [[5, 5, 5], [6, 6, 6]]]).astype(np.int32))
|
||||
>>> type = P.Slice()(data, (1, 0, 0), (1, 1, 3))
|
||||
>>> print(type)
|
||||
>>> slice = P.Slice()
|
||||
>>> output = slice(data, (1, 0, 0), (1, 1, 3))
|
||||
>>> print(output)
|
||||
[[[3 3 3]]]
|
||||
"""
|
||||
|
||||
|
@ -2223,7 +2238,8 @@ class ReverseV2(PrimitiveWithInfer):
|
|||
>>> op = P.ReverseV2(axis=[1])
|
||||
>>> output = op(input_x)
|
||||
>>> print(output)
|
||||
[[4, 3, 2, 1], [8, 7, 6, 5]]
|
||||
[[4 3 2 1]
|
||||
[8 7 6 5]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2261,7 +2277,7 @@ class Rint(PrimitiveWithInfer):
|
|||
>>> op = P.Rint()
|
||||
>>> output = op(input_x)
|
||||
>>> print(output)
|
||||
[-2., 0., 2., 2.]
|
||||
[-2. 0. 2. 2.]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2321,7 +2337,8 @@ class Select(PrimitiveWithInfer):
|
|||
>>> input_cond = Tensor([True, False])
|
||||
>>> input_x = Tensor([2,3], mindspore.float32)
|
||||
>>> input_y = Tensor([1,2], mindspore.float32)
|
||||
>>> select(input_cond, input_x, input_y)
|
||||
>>> output = select(input_cond, input_x, input_y)
|
||||
>>> print(output)
|
||||
[2. 2.]
|
||||
"""
|
||||
|
||||
|
@ -2454,10 +2471,8 @@ class StridedSlice(PrimitiveWithInfer):
|
|||
... [[5, 5, 5], [6, 6, 6]]], mindspore.float32)
|
||||
>>> slice = P.StridedSlice()
|
||||
>>> output = slice(input_x, (1, 0, 0), (2, 1, 3), (1, 1, 1))
|
||||
>>> output.shape
|
||||
(1, 1, 3)
|
||||
>>> output
|
||||
[[[3, 3, 3]]]
|
||||
>>> print(output)
|
||||
[[[3. 3. 3.]]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2648,13 +2663,13 @@ class DiagPart(PrimitiveWithInfer):
|
|||
|
||||
Examples
|
||||
>>> input_x = Tensor([[1, 0, 0, 0],
|
||||
>>> [0, 2, 0, 0],
|
||||
>>> [0, 0, 3, 0],
|
||||
>>> [0, 0, 0, 4]])
|
||||
... [0, 2, 0, 0],
|
||||
... [0, 0, 3, 0],
|
||||
... [0, 0, 0, 4]])
|
||||
>>> diag_part = P.DiagPart()
|
||||
>>> output = diag_part(input_x)
|
||||
>>> print(output)
|
||||
[1, 2, 3, 4]
|
||||
[1 2 3 4]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2702,10 +2717,10 @@ class Eye(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> eye = P.Eye()
|
||||
>>> out_tensor = eye(2, 2, mindspore.int32)
|
||||
>>> print(out_tensor)
|
||||
[[1, 0],
|
||||
[0, 1]]
|
||||
>>> output = eye(2, 2, mindspore.int32)
|
||||
>>> print(output)
|
||||
[[1 0]
|
||||
[0 1]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2836,7 +2851,7 @@ class GatherNd(PrimitiveWithInfer):
|
|||
>>> op = P.GatherNd()
|
||||
>>> output = op(input_x, indices)
|
||||
>>> print(output)
|
||||
[-0.1, 0.5]
|
||||
[-0.1 0.5]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2873,8 +2888,9 @@ class TensorScatterUpdate(PrimitiveWithInfer):
|
|||
>>> update = Tensor(np.array([1.0, 2.2]), mindspore.float32)
|
||||
>>> op = P.TensorScatterUpdate()
|
||||
>>> output = op(input_x, indices, update)
|
||||
[[1.0, 0.3, 3.6],
|
||||
[0.4, 2.2, -3.2]]
|
||||
>>> print(output)
|
||||
[[ 1. 0.3 3.6]
|
||||
[ 0.4 2.2 -3.2]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2928,8 +2944,8 @@ class ScatterUpdate(_ScatterOp_Dynamic):
|
|||
>>> op = P.ScatterUpdate()
|
||||
>>> output = op(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[[2.0, 1.2, 1.0],
|
||||
[3.0, 1.2, 1.0]]
|
||||
[[2. 1.2 1. ]
|
||||
[3. 1.2 1. ]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3017,7 +3033,8 @@ class ScatterMax(_ScatterOp):
|
|||
>>> scatter_max = P.ScatterMax()
|
||||
>>> output = scatter_max(input_x, indices, update)
|
||||
>>> print(output)
|
||||
[[88.0, 88.0, 88.0], [88.0, 88.0, 88.0]]
|
||||
[[88. 88. 88.]
|
||||
[88. 88. 88.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3058,7 +3075,8 @@ class ScatterMin(_ScatterOp):
|
|||
>>> scatter_min = P.ScatterMin()
|
||||
>>> output = scatter_min(input_x, indices, update)
|
||||
>>> print(output)
|
||||
[[0.0, 1.0, 1.0], [0.0, 0.0, 0.0]]
|
||||
[[0. 1. 1.]
|
||||
[0. 0. 0.]]
|
||||
"""
|
||||
|
||||
|
||||
|
@ -3093,7 +3111,8 @@ class ScatterAdd(_ScatterOp_Dynamic):
|
|||
>>> scatter_add = P.ScatterAdd()
|
||||
>>> output = scatter_add(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[[1.0, 1.0, 1.0], [3.0, 3.0, 3.0]]
|
||||
[[1. 1. 1.]
|
||||
[3. 3. 3.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3170,7 +3189,8 @@ class ScatterMul(_ScatterOp):
|
|||
>>> scatter_mul = P.ScatterMul()
|
||||
>>> output = scatter_mul(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[[2.0, 2.0, 2.0], [4.0, 4.0, 4.0]]
|
||||
[[2. 2. 2.]
|
||||
[4. 4. 4.]]
|
||||
"""
|
||||
|
||||
|
||||
|
@ -3205,7 +3225,8 @@ class ScatterDiv(_ScatterOp):
|
|||
>>> scatter_div = P.ScatterDiv()
|
||||
>>> output = scatter_div(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[[3.0, 3.0, 3.0], [1.0, 1.0, 1.0]]
|
||||
[[3. 3. 3.]
|
||||
[1. 1. 1.]]
|
||||
"""
|
||||
|
||||
|
||||
|
@ -3240,7 +3261,7 @@ class ScatterNdAdd(_ScatterNdOp):
|
|||
>>> scatter_nd_add = P.ScatterNdAdd()
|
||||
>>> output = scatter_nd_add(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[1, 10, 9, 4, 12, 6, 7, 17]
|
||||
[ 1. 10. 9. 4. 12. 6. 7. 17.]
|
||||
"""
|
||||
|
||||
|
||||
|
@ -3275,7 +3296,7 @@ class ScatterNdSub(_ScatterNdOp):
|
|||
>>> scatter_nd_sub = P.ScatterNdSub()
|
||||
>>> output = scatter_nd_sub(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[1, -6, -3, 4, -2, 6, 7, -1]
|
||||
[ 1. -6. -3. 4. -2. 6. 7. -1.]
|
||||
"""
|
||||
|
||||
|
||||
|
@ -3307,7 +3328,7 @@ class ScatterNonAliasingAdd(_ScatterNdOp):
|
|||
>>> scatter_non_aliasing_add = P.ScatterNonAliasingAdd()
|
||||
>>> output = scatter_non_aliasing_add(input_x, indices, updates)
|
||||
>>> print(output)
|
||||
[1, 10, 9, 4, 12, 6, 7, 17]
|
||||
[ 1. 10. 9. 4. 12. 6. 7. 17.]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3347,9 +3368,10 @@ class SpaceToDepth(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> x = Tensor(np.random.rand(1,3,2,2), mindspore.float32)
|
||||
>>> block_size = 2
|
||||
>>> op = P.SpaceToDepth(block_size)
|
||||
>>> output = op(x)
|
||||
>>> output.asnumpy().shape == (1,12,1,1)
|
||||
>>> space_to_depth = P.SpaceToDepth(block_size)
|
||||
>>> output = space_to_depth(x)
|
||||
>>> print(output)
|
||||
(1, 12, 1, 1)
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3404,8 +3426,8 @@ class DepthToSpace(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> x = Tensor(np.random.rand(1,12,1,1), mindspore.float32)
|
||||
>>> block_size = 2
|
||||
>>> op = P.DepthToSpace(block_size)
|
||||
>>> output = op(x)
|
||||
>>> depth_to_space = P.DepthToSpace(block_size)
|
||||
>>> output = depth_to_space(x)
|
||||
>>> print(output.shape)
|
||||
(1, 3, 2, 2)
|
||||
"""
|
||||
|
@ -3472,9 +3494,12 @@ class SpaceToBatch(PrimitiveWithInfer):
|
|||
>>> paddings = [[0, 0], [0, 0]]
|
||||
>>> space_to_batch = P.SpaceToBatch(block_size, paddings)
|
||||
>>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32)
|
||||
>>> space_to_batch(input_x)
|
||||
[[[[1.]]], [[[2.]]], [[[3.]]], [[[4.]]]]
|
||||
|
||||
>>> output = space_to_batch(input_x)
|
||||
>>> print(output)
|
||||
[[[[1.]]]
|
||||
[[[2.]]]
|
||||
[[[3.]]]
|
||||
[[[4.]]]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3541,11 +3566,12 @@ class BatchToSpace(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> block_size = 2
|
||||
>>> crops = [[0, 0], [0, 0]]
|
||||
>>> op = P.BatchToSpace(block_size, crops)
|
||||
>>> batch_to_space = P.BatchToSpace(block_size, crops)
|
||||
>>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
|
||||
>>> output = op(input_x)
|
||||
>>> output = batch_to_space(input_x)
|
||||
>>> print(output)
|
||||
[[[[1., 2.], [3., 4.]]]]
|
||||
[[[[1. 2.]
|
||||
[3. 4.]]]]
|
||||
|
||||
"""
|
||||
|
||||
|
@ -3620,9 +3646,12 @@ class SpaceToBatchND(PrimitiveWithInfer):
|
|||
>>> paddings = [[0, 0], [0, 0]]
|
||||
>>> space_to_batch_nd = P.SpaceToBatchND(block_shape, paddings)
|
||||
>>> input_x = Tensor(np.array([[[[1, 2], [3, 4]]]]), mindspore.float32)
|
||||
>>> space_to_batch_nd(input_x)
|
||||
[[[[1.]]], [[[2.]]], [[[3.]]], [[[4.]]]]
|
||||
|
||||
>>> output = space_to_batch_nd(input_x)
|
||||
>>> print(output)
|
||||
[[[[1.]]]
|
||||
[[[2.]]]
|
||||
[[[3.]]]
|
||||
[[[4.]]]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3715,7 +3744,8 @@ class BatchToSpaceND(PrimitiveWithInfer):
|
|||
>>> input_x = Tensor(np.array([[[[1]]], [[[2]]], [[[3]]], [[[4]]]]), mindspore.float32)
|
||||
>>> output = batch_to_space_nd(input_x)
|
||||
>>> print(output)
|
||||
[[[[1., 2.], [3., 4.]]]]
|
||||
[[[[1. 2.]
|
||||
[3. 4.]]]]
|
||||
|
||||
"""
|
||||
|
||||
|
@ -3791,8 +3821,10 @@ class BroadcastTo(PrimitiveWithInfer):
|
|||
>>> shape = (2, 3)
|
||||
>>> input_x = Tensor(np.array([1, 2, 3]).astype(np.float32))
|
||||
>>> broadcast_to = P.BroadcastTo(shape)
|
||||
>>> broadcast_to(input_x)
|
||||
[[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]
|
||||
>>> output = broadcast_to(input_x)
|
||||
>>> print(output)
|
||||
[[1. 2. 3.]
|
||||
[1. 2. 3.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3939,11 +3971,11 @@ class InplaceUpdate(PrimitiveWithInfer):
|
|||
>>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
|
||||
>>> v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
|
||||
>>> inplace_update = P.InplaceUpdate(indices)
|
||||
>>> result = inplace_update(x, v)
|
||||
>>> print(result)
|
||||
[[0.5, 1.0],
|
||||
[1.0, 1.5],
|
||||
[5.0, 6.0]]
|
||||
>>> output = inplace_update(x, v)
|
||||
>>> print(output)
|
||||
[[0.5 1. ]
|
||||
[1. 1.5]
|
||||
[5. 6. ]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3997,9 +4029,9 @@ class ReverseSequence(PrimitiveWithInfer):
|
|||
>>> reverse_sequence = P.ReverseSequence(seq_dim=1)
|
||||
>>> output = reverse_sequence(x, seq_lengths)
|
||||
>>> print(output)
|
||||
[[1 2 3]
|
||||
[5 4 6]
|
||||
[9 8 7]]
|
||||
[[1. 2. 3.]
|
||||
[5. 4. 6.]
|
||||
[9. 8. 7.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -4057,16 +4089,16 @@ class EditDistance(PrimitiveWithInfer):
|
|||
>>> import mindspore.ops.operations as P
|
||||
>>> context.set_context(mode=context.GRAPH_MODE)
|
||||
>>> class EditDistance(nn.Cell):
|
||||
>>> def __init__(self, hypothesis_shape, truth_shape, normalize=True):
|
||||
>>> super(EditDistance, self).__init__()
|
||||
>>> self.edit_distance = P.EditDistance(normalize)
|
||||
>>> self.hypothesis_shape = hypothesis_shape
|
||||
>>> self.truth_shape = truth_shape
|
||||
>>>
|
||||
>>> def construct(self, hypothesis_indices, hypothesis_values, truth_indices, truth_values):
|
||||
>>> return self.edit_distance(hypothesis_indices, hypothesis_values, self.hypothesis_shape,
|
||||
>>> truth_indices, truth_values, self.truth_shape)
|
||||
>>>
|
||||
... def __init__(self, hypothesis_shape, truth_shape, normalize=True):
|
||||
... super(EditDistance, self).__init__()
|
||||
... self.edit_distance = P.EditDistance(normalize)
|
||||
... self.hypothesis_shape = hypothesis_shape
|
||||
... self.truth_shape = truth_shape
|
||||
...
|
||||
... def construct(self, hypothesis_indices, hypothesis_values, truth_indices, truth_values):
|
||||
... return self.edit_distance(hypothesis_indices, hypothesis_values, self.hypothesis_shape,
|
||||
... truth_indices, truth_values, self.truth_shape)
|
||||
...
|
||||
>>> hypothesis_indices = Tensor(np.array([[0, 0, 0], [1, 0, 1], [1, 1, 1]]).astype(np.int64))
|
||||
>>> hypothesis_values = Tensor(np.array([1, 2, 3]).astype(np.float32))
|
||||
>>> hypothesis_shape = Tensor(np.array([1, 1, 2]).astype(np.int64))
|
||||
|
@ -4074,9 +4106,10 @@ class EditDistance(PrimitiveWithInfer):
|
|||
>>> truth_values = Tensor(np.array([1, 3, 2, 1]).astype(np.float32))
|
||||
>>> truth_shape = Tensor(np.array([2, 2, 2]).astype(np.int64))
|
||||
>>> edit_distance = EditDistance(hypothesis_shape, truth_shape)
|
||||
>>> out = edit_distance(hypothesis_indices, hypothesis_values, truth_indices, truth_values)
|
||||
>>> print(out)
|
||||
>>> [[1.0, 1.0], [1.0, 1.0]]
|
||||
>>> output = edit_distance(hypothesis_indices, hypothesis_values, truth_indices, truth_values)
|
||||
>>> print(output)
|
||||
[[1. 1.]
|
||||
[1. 1.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -4166,9 +4199,15 @@ class Sort(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
|
||||
>>> sort = P.Sort()
|
||||
>>> sort(x)
|
||||
([[1.0, 2.0, 8.0], [3.0, 5.0, 9.0], [4.0, 6.0 ,7.0]],
|
||||
[[2, 1, 0], [2, 0, 1], [0, 1, 2]])
|
||||
>>> output = sort(x)
|
||||
>>> print(output)
|
||||
(Tensor(shape=[3, 3], dtype=Float16, value=
|
||||
[[ 1.0000e+00, 2.0000e+00, 8.0000e+00],
|
||||
[ 3.0000e+00, 5.0000e+00, 9.0000e+00],
|
||||
[ 4.0000e+00, 6.0000e+00, 7.0000e+00]]), Tensor(shape=[3, 3], dtype=Int32, value=
|
||||
[[2, 1, 0],
|
||||
[2, 0, 1],
|
||||
[0, 1, 2]]))
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -4208,9 +4247,12 @@ class EmbeddingLookup(PrimitiveWithInfer):
|
|||
>>> input_params = Tensor(np.array([[8, 9], [10, 11], [12, 13], [14, 15]]), mindspore.float32)
|
||||
>>> input_indices = Tensor(np.array([[5, 2], [8, 5]]), mindspore.int32)
|
||||
>>> offset = 4
|
||||
>>> out = P.EmbeddingLookup()(input_params, input_indices, offset)
|
||||
>>> print(out)
|
||||
[[[10, 11], [0 ,0]], [[0, 0], [10, 11]]]
|
||||
>>> output = P.EmbeddingLookup()(input_params, input_indices, offset)
|
||||
>>> print(output)
|
||||
[[[10. 11.]
|
||||
[ 0. 0.]]
|
||||
[[ 0. 0.]
|
||||
[10. 11.]]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -4259,9 +4301,10 @@ class GatherD(PrimitiveWithInfer):
|
|||
>>> x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.int32)
|
||||
>>> index = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)
|
||||
>>> dim = 1
|
||||
>>> out = P.GatherD()(x, dim, index)
|
||||
>>> print(out)
|
||||
[[1, 1], [4, 3]]
|
||||
>>> output = P.GatherD()(x, dim, index)
|
||||
>>> print(output)
|
||||
[[1 1]
|
||||
[4 3]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -4304,9 +4347,9 @@ class Identity(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
|
||||
>>> y = P.Identity()(x)
|
||||
>>> print(y)
|
||||
[1, 2, 3, 4]
|
||||
>>> output = P.Identity()(x)
|
||||
>>> print(output)
|
||||
[1 2 3 4]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -4341,10 +4384,10 @@ class RepeatElements(PrimitiveWithInfer):
|
|||
>>> repeat_elements = P.RepeatElements(rep = 2, axis = 0)
|
||||
>>> output = repeat_elements(x)
|
||||
>>> print(output)
|
||||
[[0, 1, 2],
|
||||
[0, 1, 2],
|
||||
[3, 4, 5],
|
||||
[3, 4, 5]],
|
||||
[[0 1 2]
|
||||
[0 1 2]
|
||||
[3 4 5]
|
||||
[3 4 5]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
|
|
@ -76,16 +76,19 @@ class AllReduce(PrimitiveWithInfer):
|
|||
>>>
|
||||
>>> init()
|
||||
>>> class Net(nn.Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(Net, self).__init__()
|
||||
>>> self.allreduce_sum = P.AllReduce(ReduceOp.SUM, group="nccl_world_group")
|
||||
>>>
|
||||
>>> def construct(self, x):
|
||||
>>> return self.allreduce_sum(x)
|
||||
>>>
|
||||
... def __init__(self):
|
||||
... super(Net, self).__init__()
|
||||
... self.allreduce_sum = P.AllReduce(ReduceOp.SUM, group="nccl_world_group")
|
||||
...
|
||||
... def construct(self, x):
|
||||
... return self.allreduce_sum(x)
|
||||
...
|
||||
>>> input_ = Tensor(np.ones([2, 8]).astype(np.float32))
|
||||
>>> net = Net()
|
||||
>>> output = net(input_)
|
||||
>>> print(output)
|
||||
[[4. 5. 6. 0. 0. 0. 0. 0.]
|
||||
[0. 0. 0. 0. 0. 0. 0. 0.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -249,17 +252,18 @@ class AllGather(PrimitiveWithInfer):
|
|||
>>> from mindspore import Tensor
|
||||
>>>
|
||||
>>> init()
|
||||
>>> class Net(nn.Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(Net, self).__init__()
|
||||
>>> self.allgather = P.AllGather(group="nccl_world_group")
|
||||
>>>
|
||||
>>> def construct(self, x):
|
||||
>>> return self.allgather(x)
|
||||
>>>
|
||||
... class Net(nn.Cell):
|
||||
... def __init__(self):
|
||||
... super(Net, self).__init__()
|
||||
... self.allgather = P.AllGather(group="nccl_world_group")
|
||||
...
|
||||
... def construct(self, x):
|
||||
... return self.allgather(x)
|
||||
...
|
||||
>>> input_ = Tensor(np.ones([2, 8]).astype(np.float32))
|
||||
>>> net = Net()
|
||||
>>> output = net(input_)
|
||||
>>> print(output)
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -364,16 +368,17 @@ class ReduceScatter(PrimitiveWithInfer):
|
|||
>>>
|
||||
>>> init()
|
||||
>>> class Net(nn.Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(Net, self).__init__()
|
||||
>>> self.reducescatter = P.ReduceScatter(ReduceOp.SUM)
|
||||
>>>
|
||||
>>> def construct(self, x):
|
||||
>>> return self.reducescatter(x)
|
||||
>>>
|
||||
... def __init__(self):
|
||||
... super(Net, self).__init__()
|
||||
... self.reducescatter = P.ReduceScatter(ReduceOp.SUM)
|
||||
...
|
||||
... def construct(self, x):
|
||||
... return self.reducescatter(x)
|
||||
...
|
||||
>>> input_ = Tensor(np.ones([8, 8]).astype(np.float32))
|
||||
>>> net = Net()
|
||||
>>> output = net(input_)
|
||||
>>> print(output)
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -480,16 +485,20 @@ class Broadcast(PrimitiveWithInfer):
|
|||
>>>
|
||||
>>> init()
|
||||
>>> class Net(nn.Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(Net, self).__init__()
|
||||
>>> self.broadcast = P.Broadcast(1)
|
||||
>>>
|
||||
>>> def construct(self, x):
|
||||
>>> return self.broadcast((x,))
|
||||
>>>
|
||||
>>> input_ = Tensor(np.ones([2, 8]).astype(np.float32))
|
||||
... def __init__(self):
|
||||
... super(Net, self).__init__()
|
||||
... self.broadcast = P.Broadcast(1)
|
||||
...
|
||||
... def construct(self, x):
|
||||
... return self.broadcast((x,))
|
||||
...
|
||||
>>> input_ = Tensor(np.ones([2, 4]).astype(np.int32))
|
||||
>>> net = Net()
|
||||
>>> output = net(input_)
|
||||
>>> print(output)
|
||||
(Tensor(shape[2,4], dtype=Int32, value=
|
||||
[[1, 1, 1, 1],
|
||||
[1, 1, 1, 1]]),)
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
|
|
@ -51,16 +51,17 @@ class ControlDepend(Primitive):
|
|||
|
||||
Examples:
|
||||
>>> class Net(nn.Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(Net, self).__init__()
|
||||
>>> self.control_depend = P.ControlDepend()
|
||||
>>> self.softmax = P.Softmax()
|
||||
>>>
|
||||
>>> def construct(self, x, y):
|
||||
>>> mul = x * y
|
||||
>>> softmax = self.softmax(x)
|
||||
>>> ret = self.control_depend(mul, softmax)
|
||||
>>> return ret
|
||||
... def __init__(self):
|
||||
... super(Net, self).__init__()
|
||||
... self.control_depend = P.ControlDepend()
|
||||
... self.softmax = P.Softmax()
|
||||
...
|
||||
... def construct(self, x, y):
|
||||
... mul = x * y
|
||||
... softmax = self.softmax(x)
|
||||
... ret = self.control_depend(mul, softmax)
|
||||
... return ret
|
||||
...
|
||||
>>> x = Tensor(np.ones([4, 5]), dtype=mindspore.float32)
|
||||
>>> y = Tensor(np.ones([4, 5]), dtype=mindspore.float32)
|
||||
>>> net = Net()
|
||||
|
@ -70,8 +71,6 @@ class ControlDepend(Primitive):
|
|||
[1. 1. 1. 1. 1.]
|
||||
[1. 1. 1. 1. 1.]
|
||||
[1. 1. 1. 1. 1.]]
|
||||
>>> print(output.dtype)
|
||||
Float32
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -100,29 +99,30 @@ class GeSwitch(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> class Net(nn.Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(Net, self).__init__()
|
||||
>>> self.square = P.Square()
|
||||
>>> self.add = P.TensorAdd()
|
||||
>>> self.value = Tensor(np.full((1), 3), mindspore.float32)
|
||||
>>> self.switch = P.GeSwitch()
|
||||
>>> self.merge = P.Merge()
|
||||
>>> self.less = P.Less()
|
||||
>>>
|
||||
>>> def construct(self, x, y):
|
||||
>>> cond = self.less(x, y)
|
||||
>>> st1, sf1 = self.switch(x, cond)
|
||||
>>> st2, sf2 = self.switch(y, cond)
|
||||
>>> add_ret = self.add(st1, st2)
|
||||
>>> st3, sf3 = self.switch(self.value, cond)
|
||||
>>> sq_ret = self.square(sf3)
|
||||
>>> ret = self.merge((add_ret, sq_ret))
|
||||
>>> return ret[0]
|
||||
>>>
|
||||
... def __init__(self):
|
||||
... super(Net, self).__init__()
|
||||
... self.square = P.Square()
|
||||
... self.add = P.TensorAdd()
|
||||
... self.value = Tensor(np.full((1), 3), mindspore.float32)
|
||||
... self.switch = P.GeSwitch()
|
||||
... self.merge = P.Merge()
|
||||
... self.less = P.Less()
|
||||
...
|
||||
... def construct(self, x, y):
|
||||
... cond = self.less(x, y)
|
||||
... st1, sf1 = self.switch(x, cond)
|
||||
... st2, sf2 = self.switch(y, cond)
|
||||
... add_ret = self.add(st1, st2)
|
||||
... st3, sf3 = self.switch(self.value, cond)
|
||||
... sq_ret = self.square(sf3)
|
||||
... ret = self.merge((add_ret, sq_ret))
|
||||
... return ret[0]
|
||||
...
|
||||
>>> x = Tensor(10.0, dtype=mindspore.float32)
|
||||
>>> y = Tensor(5.0, dtype=mindspore.float32)
|
||||
>>> net = Net()
|
||||
>>> output = net(x, y)
|
||||
>>> print(output)
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
|
|
@ -50,16 +50,17 @@ class ScalarSummary(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> class SummaryDemo(nn.Cell):
|
||||
>>> def __init__(self,):
|
||||
>>> super(SummaryDemo, self).__init__()
|
||||
>>> self.summary = P.ScalarSummary()
|
||||
>>> self.add = P.TensorAdd()
|
||||
>>>
|
||||
>>> def construct(self, x, y):
|
||||
>>> name = "x"
|
||||
>>> self.summary(name, x)
|
||||
>>> x = self.add(x, y)
|
||||
>>> return x
|
||||
... def __init__(self,):
|
||||
... super(SummaryDemo, self).__init__()
|
||||
... self.summary = P.ScalarSummary()
|
||||
... self.add = P.TensorAdd()
|
||||
...
|
||||
... def construct(self, x, y):
|
||||
... name = "x"
|
||||
... self.summary(name, x)
|
||||
... x = self.add(x, y)
|
||||
... return x
|
||||
...
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -88,14 +89,15 @@ class ImageSummary(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> class Net(nn.Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(Net, self).__init__()
|
||||
>>> self.summary = P.ImageSummary()
|
||||
>>>
|
||||
>>> def construct(self, x):
|
||||
>>> name = "image"
|
||||
>>> out = self.summary(name, x)
|
||||
>>> return out
|
||||
... def __init__(self):
|
||||
... super(Net, self).__init__()
|
||||
... self.summary = P.ImageSummary()
|
||||
...
|
||||
... def construct(self, x):
|
||||
... name = "image"
|
||||
... out = self.summary(name, x)
|
||||
... return out
|
||||
...
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -125,16 +127,17 @@ class TensorSummary(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> class SummaryDemo(nn.Cell):
|
||||
>>> def __init__(self,):
|
||||
>>> super(SummaryDemo, self).__init__()
|
||||
>>> self.summary = P.TensorSummary()
|
||||
>>> self.add = P.TensorAdd()
|
||||
>>>
|
||||
>>> def construct(self, x, y):
|
||||
>>> x = self.add(x, y)
|
||||
>>> name = "x"
|
||||
>>> self.summary(name, x)
|
||||
>>> return x
|
||||
... def __init__(self,):
|
||||
... super(SummaryDemo, self).__init__()
|
||||
... self.summary = P.TensorSummary()
|
||||
... self.add = P.TensorAdd()
|
||||
...
|
||||
... def construct(self, x, y):
|
||||
... x = self.add(x, y)
|
||||
... name = "x"
|
||||
... self.summary(name, x)
|
||||
... return x
|
||||
...
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -163,16 +166,17 @@ class HistogramSummary(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> class SummaryDemo(nn.Cell):
|
||||
>>> def __init__(self,):
|
||||
>>> super(SummaryDemo, self).__init__()
|
||||
>>> self.summary = P.HistogramSummary()
|
||||
>>> self.add = P.TensorAdd()
|
||||
>>>
|
||||
>>> def construct(self, x, y):
|
||||
>>> x = self.add(x, y)
|
||||
>>> name = "x"
|
||||
>>> self.summary(name, x)
|
||||
>>> return x
|
||||
... def __init__(self,):
|
||||
... super(SummaryDemo, self).__init__()
|
||||
... self.summary = P.HistogramSummary()
|
||||
... self.add = P.TensorAdd()
|
||||
...
|
||||
... def construct(self, x, y):
|
||||
... x = self.add(x, y)
|
||||
... name = "x"
|
||||
... self.summary(name, x)
|
||||
... return x
|
||||
...
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -206,33 +210,34 @@ class InsertGradientOf(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> def clip_gradient(dx):
|
||||
>>> ret = dx
|
||||
>>> if ret > 1.0:
|
||||
>>> ret = 1.0
|
||||
>>>
|
||||
>>> if ret < 0.2:
|
||||
>>> ret = 0.2
|
||||
>>>
|
||||
>>> return ret
|
||||
>>>
|
||||
... ret = dx
|
||||
... if ret > 1.0:
|
||||
... ret = 1.0
|
||||
...
|
||||
... if ret < 0.2:
|
||||
... ret = 0.2
|
||||
...
|
||||
... return ret
|
||||
...
|
||||
>>> clip = P.InsertGradientOf(clip_gradient)
|
||||
>>> grad_all = C.GradOperation(get_all=True)
|
||||
>>> def InsertGradientOfClipDemo():
|
||||
>>> def clip_test(x, y):
|
||||
>>> x = clip(x)
|
||||
>>> y = clip(y)
|
||||
>>> c = x * y
|
||||
>>> return c
|
||||
>>>
|
||||
>>> @ms_function
|
||||
>>> def f(x, y):
|
||||
>>> return clip_test(x, y)
|
||||
>>>
|
||||
>>> def fd(x, y):
|
||||
>>> return grad_all(clip_test)(x, y)
|
||||
>>>
|
||||
>>> print("forward: ", f(1.1, 0.1))
|
||||
>>> print("clip_gradient:", fd(1.1, 0.1))
|
||||
... def clip_test(x, y):
|
||||
... x = clip(x)
|
||||
... y = clip(y)
|
||||
... c = x * y
|
||||
... return c
|
||||
...
|
||||
... @ms_function
|
||||
... def f(x, y):
|
||||
... return clip_test(x, y)
|
||||
...
|
||||
... def fd(x, y):
|
||||
... return grad_all(clip_test)(x, y)
|
||||
...
|
||||
... print("forward: ", f(1.1, 0.1))
|
||||
... print("clip_gradient:", fd(1.1, 0.1))
|
||||
...
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -266,21 +271,21 @@ class HookBackward(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> def hook_fn(grad_out):
|
||||
>>> print(grad_out)
|
||||
>>>
|
||||
... print(grad_out)
|
||||
...
|
||||
>>> grad_all = GradOperation(get_all=True)
|
||||
>>> hook = P.HookBackward(hook_fn)
|
||||
>>>
|
||||
>>> def hook_test(x, y):
|
||||
>>> z = x * y
|
||||
>>> z = hook(z)
|
||||
>>> z = z * y
|
||||
>>> return z
|
||||
>>>
|
||||
... z = x * y
|
||||
... z = hook(z)
|
||||
... z = z * y
|
||||
... return z
|
||||
...
|
||||
>>> def backward(x, y):
|
||||
>>> return grad_all(hook_test)(x, y)
|
||||
>>>
|
||||
>>> backward(1, 2)
|
||||
... return grad_all(hook_test)(x, y)
|
||||
...
|
||||
>>> output = backward(1, 2)
|
||||
>>> print(output)
|
||||
"""
|
||||
|
||||
def __init__(self, hook_fn, cell_id=""):
|
||||
|
@ -316,13 +321,14 @@ class Print(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> class PrintDemo(nn.Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(PrintDemo, self).__init__()
|
||||
>>> self.print = P.Print()
|
||||
>>>
|
||||
>>> def construct(self, x, y):
|
||||
>>> self.print('Print Tensor x and Tensor y:', x, y)
|
||||
>>> return x
|
||||
... def __init__(self):
|
||||
... super(PrintDemo, self).__init__()
|
||||
... self.print = P.Print()
|
||||
...
|
||||
... def construct(self, x, y):
|
||||
... self.print('Print Tensor x and Tensor y:', x, y)
|
||||
... return x
|
||||
...
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -356,15 +362,16 @@ class Assert(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> class AssertDemo(nn.Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(AssertDemo, self).__init__()
|
||||
>>> self.assert1 = P.Assert(summarize=10)
|
||||
>>> self.add = P.TensorAdd()
|
||||
>>>
|
||||
>>> def construct(self, x, y):
|
||||
>>> data = self.add(x, y)
|
||||
>>> self.assert1(True, [data])
|
||||
>>> return data
|
||||
... def __init__(self):
|
||||
... super(AssertDemo, self).__init__()
|
||||
... self.assert1 = P.Assert(summarize=10)
|
||||
... self.add = P.TensorAdd()
|
||||
...
|
||||
... def construct(self, x, y):
|
||||
... data = self.add(x, y)
|
||||
... self.assert1(True, [data])
|
||||
... return data
|
||||
...
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
|
|
@ -55,14 +55,14 @@ class CropAndResize(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> class CropAndResizeNet(nn.Cell):
|
||||
>>> def __init__(self, crop_size):
|
||||
>>> super(CropAndResizeNet, self).__init__()
|
||||
>>> self.crop_and_resize = P.CropAndResize()
|
||||
>>> self.crop_size = crop_size
|
||||
>>>
|
||||
>>> def construct(self, x, boxes, box_index):
|
||||
>>> return self.crop_and_resize(x, boxes, box_index, self.crop_size)
|
||||
>>>
|
||||
... def __init__(self, crop_size):
|
||||
... super(CropAndResizeNet, self).__init__()
|
||||
... self.crop_and_resize = P.CropAndResize()
|
||||
... self.crop_size = crop_size
|
||||
...
|
||||
... def construct(self, x, boxes, box_index):
|
||||
... return self.crop_and_resize(x, boxes, box_index, self.crop_size)
|
||||
...
|
||||
>>> BATCH_SIZE = 1
|
||||
>>> NUM_BOXES = 5
|
||||
>>> IMAGE_HEIGHT = 256
|
||||
|
@ -74,7 +74,7 @@ class CropAndResize(PrimitiveWithInfer):
|
|||
>>> crop_size = (24, 24)
|
||||
>>> crop_and_resize = CropAndResizeNet(crop_size=crop_size)
|
||||
>>> output = crop_and_resize(Tensor(image), Tensor(boxes), Tensor(box_index))
|
||||
>>> output.shape
|
||||
>>> print(output.shape)
|
||||
(5, 24, 24, 3)
|
||||
"""
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ class ScalarCast(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> scalar_cast = P.ScalarCast()
|
||||
>>> output = scalar_cast(255.0, mindspore.int32)
|
||||
>>> print(output)
|
||||
255
|
||||
"""
|
||||
|
||||
|
|
|
@ -139,8 +139,9 @@ class TensorAdd(_MathBinaryOp):
|
|||
>>> add = P.TensorAdd()
|
||||
>>> input_x = Tensor(np.array([1,2,3]).astype(np.float32))
|
||||
>>> input_y = Tensor(np.array([4,5,6]).astype(np.float32))
|
||||
>>> add(input_x, input_y)
|
||||
[5,7,9]
|
||||
>>> output = add(input_x, input_y)
|
||||
>>> print(output)
|
||||
[5. 7. 9.]
|
||||
"""
|
||||
|
||||
def infer_value(self, x, y):
|
||||
|
@ -170,16 +171,16 @@ class AssignAdd(PrimitiveWithInfer):
|
|||
It must have the same shape as `variable` if it is a Tensor.
|
||||
|
||||
Examples:
|
||||
>>> class Net(Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(Net, self).__init__()
|
||||
>>> self.AssignAdd = P.AssignAdd()
|
||||
>>> self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step")
|
||||
>>>
|
||||
>>> def construct(self, x):
|
||||
>>> self.AssignAdd(self.variable, x)
|
||||
>>> return self.variable
|
||||
>>>
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self):
|
||||
... super(Net, self).__init__()
|
||||
... self.AssignAdd = P.AssignAdd()
|
||||
... self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int64), name="global_step")
|
||||
...
|
||||
... def construct(self, x):
|
||||
... self.AssignAdd(self.variable, x)
|
||||
... return self.variable
|
||||
...
|
||||
>>> net = Net()
|
||||
>>> value = Tensor(np.ones([1]).astype(np.int64)*100)
|
||||
>>> output = net(value)
|
||||
|
@ -222,16 +223,16 @@ class AssignSub(PrimitiveWithInfer):
|
|||
It must have the same shape as `variable` if it is a Tensor.
|
||||
|
||||
Examples:
|
||||
>>> class Net(Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(Net, self).__init__()
|
||||
>>> self.AssignSub = P.AssignSub()
|
||||
>>> self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
|
||||
>>>
|
||||
>>> def construct(self, x):
|
||||
>>> self.AssignSub(self.variable, x)
|
||||
>>> return self.variable
|
||||
>>>
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self):
|
||||
... super(Net, self).__init__()
|
||||
... self.AssignSub = P.AssignSub()
|
||||
... self.variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
|
||||
...
|
||||
... def construct(self, x):
|
||||
... self.AssignSub(self.variable, x)
|
||||
... return self.variable
|
||||
...
|
||||
>>> net = Net()
|
||||
>>> value = Tensor(np.ones([1]).astype(np.int32)*100)
|
||||
>>> output = net(value)
|
||||
|
@ -422,6 +423,7 @@ class ReduceAll(_Reduce):
|
|||
>>> input_x = Tensor(np.array([[True, False], [True, True]]))
|
||||
>>> op = P.ReduceAll(keep_dims=True)
|
||||
>>> output = op(input_x, 1)
|
||||
>>> print(output)
|
||||
[[False]
|
||||
[ True]]
|
||||
"""
|
||||
|
@ -461,7 +463,7 @@ class ReduceAny(_Reduce):
|
|||
>>> op = P.ReduceAny(keep_dims=True)
|
||||
>>> output = op(input_x, 1)
|
||||
>>> print(output)
|
||||
[[True],
|
||||
[[ True]
|
||||
[ True]]
|
||||
"""
|
||||
|
||||
|
@ -744,6 +746,7 @@ class BatchMatMul(MatMul):
|
|||
>>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
|
||||
>>> batmatmul = P.BatchMatMul()
|
||||
>>> output = batmatmul(input_x, input_y)
|
||||
>>> print(output)
|
||||
[[[[3. 3. 3. 3.]]
|
||||
[[3. 3. 3. 3.]]
|
||||
[[3. 3. 3. 3.]]
|
||||
|
@ -757,6 +760,7 @@ class BatchMatMul(MatMul):
|
|||
>>> input_y = Tensor(np.ones(shape=[2, 4, 3, 4]), mindspore.float32)
|
||||
>>> batmatmul = P.BatchMatMul(transpose_a=True)
|
||||
>>> output = batmatmul(input_x, input_y)
|
||||
>>> print(output)
|
||||
[[[[3. 3. 3. 3.]]
|
||||
[[3. 3. 3. 3.]]
|
||||
[[3. 3. 3. 3.]]
|
||||
|
@ -800,6 +804,7 @@ class CumSum(PrimitiveWithInfer):
|
|||
>>> input = Tensor(np.array([[3, 4, 6, 10],[1, 6, 7, 9],[4, 3, 8, 7],[1, 3, 7, 9]]).astype(np.float32))
|
||||
>>> cumsum = P.CumSum()
|
||||
>>> output = cumsum(input, 1)
|
||||
>>> print(output)
|
||||
[[ 3. 7. 13. 23.]
|
||||
[ 1. 7. 14. 23.]
|
||||
[ 4. 7. 15. 22.]
|
||||
|
@ -842,18 +847,19 @@ class AddN(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> class NetAddN(nn.Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(NetAddN, self).__init__()
|
||||
>>> self.addN = P.AddN()
|
||||
>>>
|
||||
>>> def construct(self, *z):
|
||||
>>> return self.addN(z)
|
||||
>>>
|
||||
... def __init__(self):
|
||||
... super(NetAddN, self).__init__()
|
||||
... self.addN = P.AddN()
|
||||
...
|
||||
... def construct(self, *z):
|
||||
... return self.addN(z)
|
||||
...
|
||||
>>> net = NetAddN()
|
||||
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([4, 5, 6]), mindspore.float32)
|
||||
>>> net(input_x, input_y, input_x, input_y)
|
||||
[10.0, 14.0, 18.0]
|
||||
>>> output = net(input_x, input_y, input_x, input_y)
|
||||
>>> print(output)
|
||||
[10. 14. 18.]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -924,18 +930,19 @@ class AccumulateNV2(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> class NetAccumulateNV2(nn.Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(NetAccumulateNV2, self).__init__()
|
||||
>>> self.accumulateNV2 = P.AccumulateNV2()
|
||||
>>>
|
||||
>>> def construct(self, *z):
|
||||
>>> return self.accumulateNV2(z)
|
||||
>>>
|
||||
... def __init__(self):
|
||||
... super(NetAccumulateNV2, self).__init__()
|
||||
... self.accumulateNV2 = P.AccumulateNV2()
|
||||
...
|
||||
... def construct(self, *z):
|
||||
... return self.accumulateNV2(z)
|
||||
...
|
||||
>>> net = NetAccumulateNV2()
|
||||
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([4, 5, 6]), mindspore.float32)
|
||||
>>> net(input_x, input_y, input_x, input_y)
|
||||
Tensor([10., 14., 18.], shape=(3,), dtype=mindspore.float32)
|
||||
>>> output = net(input_x, input_y, input_x, input_y)
|
||||
>>> print(output)
|
||||
[10. 14. 18.]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -983,8 +990,8 @@ class Neg(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> neg = P.Neg()
|
||||
>>> input_x = Tensor(np.array([1, 2, -1, 2, 0, -3.5]), mindspore.float32)
|
||||
>>> result = neg(input_x)
|
||||
>>> print(result)
|
||||
>>> output = neg(input_x)
|
||||
>>> print(output)
|
||||
[-1. -2. 1. -2. 0. 3.5]
|
||||
"""
|
||||
|
||||
|
@ -1030,7 +1037,8 @@ class InplaceAdd(PrimitiveWithInfer):
|
|||
>>> input_x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
|
||||
>>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
|
||||
>>> inplaceAdd = P.InplaceAdd(indices)
|
||||
>>> inplaceAdd(input_x, input_v)
|
||||
>>> output = inplaceAdd(input_x, input_v)
|
||||
>>> print(output)
|
||||
[[1.5 3. ]
|
||||
[4. 5.5]
|
||||
[5. 6. ]]
|
||||
|
@ -1088,7 +1096,8 @@ class InplaceSub(PrimitiveWithInfer):
|
|||
>>> input_x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
|
||||
>>> input_v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
|
||||
>>> inplaceSub = P.InplaceSub(indices)
|
||||
>>> inplaceSub(input_x, input_v)
|
||||
>>> output = inplaceSub(input_x, input_v)
|
||||
>>> print(output)
|
||||
[[0.5 1. ]
|
||||
[2. 2.5]
|
||||
[5. 6. ]]
|
||||
|
@ -1150,8 +1159,9 @@ class Sub(_MathBinaryOp):
|
|||
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
||||
>>> input_y = Tensor(np.array([4, 5, 6]), mindspore.int32)
|
||||
>>> sub = P.Sub()
|
||||
>>> sub(input_x, input_y)
|
||||
[-3, -3, -3]
|
||||
>>> output = sub(input_x, input_y)
|
||||
>>> print(output)
|
||||
[-3 -3 -3]
|
||||
"""
|
||||
|
||||
def infer_value(self, x, y):
|
||||
|
@ -1189,8 +1199,9 @@ class Mul(_MathBinaryOp):
|
|||
>>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
|
||||
>>> mul = P.Mul()
|
||||
>>> mul(input_x, input_y)
|
||||
[4, 10, 18]
|
||||
>>> output = mul(input_x, input_y)
|
||||
>>> print(output)
|
||||
[ 4. 10. 18.]
|
||||
"""
|
||||
|
||||
def infer_value(self, x, y):
|
||||
|
@ -1228,8 +1239,9 @@ class SquaredDifference(_MathBinaryOp):
|
|||
>>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([2.0, 4.0, 6.0]), mindspore.float32)
|
||||
>>> squared_difference = P.SquaredDifference()
|
||||
>>> squared_difference(input_x, input_y)
|
||||
[1.0, 4.0, 9.0]
|
||||
>>> output = squared_difference(input_x, input_y)
|
||||
>>> print(output)
|
||||
[1. 4. 9.]
|
||||
"""
|
||||
|
||||
def infer_dtype(self, x_dtype, y_dtype):
|
||||
|
@ -1250,8 +1262,9 @@ class Square(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
||||
>>> square = P.Square()
|
||||
>>> square(input_x)
|
||||
[1.0, 4.0, 9.0]
|
||||
>>> output = square(input_x)
|
||||
>>> print(output)
|
||||
[1. 4. 9.]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1288,8 +1301,10 @@ class Rsqrt(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> input_tensor = Tensor([[4, 4], [9, 9]], mindspore.float32)
|
||||
>>> rsqrt = P.Rsqrt()
|
||||
>>> rsqrt(input_tensor)
|
||||
[[0.5, 0.5], [0.333333, 0.333333]]
|
||||
>>> output = rsqrt(input_tensor)
|
||||
>>> print(output)
|
||||
[[0.5 0.5 ]
|
||||
[0.333334 0.333334]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1326,8 +1341,9 @@ class Sqrt(PrimitiveWithCheck):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([1.0, 4.0, 9.0]), mindspore.float32)
|
||||
>>> sqrt = P.Sqrt()
|
||||
>>> sqrt(input_x)
|
||||
[1.0, 2.0, 3.0]
|
||||
>>> output = sqrt(input_x)
|
||||
>>> print(output)
|
||||
[1. 2. 3.]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1360,8 +1376,9 @@ class Reciprocal(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
||||
>>> reciprocal = P.Reciprocal()
|
||||
>>> reciprocal(input_x)
|
||||
[1.0, 0.5, 0.25]
|
||||
>>> output = reciprocal(input_x)
|
||||
>>> print(output)
|
||||
[1. 0.5 0.25]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1414,14 +1431,16 @@ class Pow(_MathBinaryOp):
|
|||
>>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
||||
>>> input_y = 3.0
|
||||
>>> pow = P.Pow()
|
||||
>>> pow(input_x, input_y)
|
||||
[1.0, 8.0, 64.0]
|
||||
>>> output = pow(input_x, input_y)
|
||||
>>> print(output)
|
||||
[ 1. 8. 64.]
|
||||
>>>
|
||||
>>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
|
||||
>>> pow = P.Pow()
|
||||
>>> pow(input_x, input_y)
|
||||
[1.0, 16.0, 64.0]
|
||||
>>> output = pow(input_x, input_y)
|
||||
>>> print(output)
|
||||
[ 1. 16. 64.]
|
||||
"""
|
||||
|
||||
def infer_value(self, x, power):
|
||||
|
@ -1447,8 +1466,9 @@ class Exp(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
||||
>>> exp = P.Exp()
|
||||
>>> exp(input_x)
|
||||
[ 2.71828183, 7.3890561 , 54.59815003]
|
||||
>>> output = exp(input_x)
|
||||
>>> print(output)
|
||||
[ 2.718282 7.389056 54.598152]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1485,8 +1505,9 @@ class Expm1(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([0.0, 1.0, 2.0, 4.0]), mindspore.float32)
|
||||
>>> expm1 = P.Expm1()
|
||||
>>> expm1(input_x)
|
||||
[ 0., 1.71828183, 6.3890561 , 53.59815003]
|
||||
>>> output = expm1(input_x)
|
||||
>>> print(output)
|
||||
[ 0. 1.718282 6.389056 53.598152]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1523,7 +1544,8 @@ class HistogramFixedWidth(PrimitiveWithInfer):
|
|||
>>> x = Tensor([-1.0, 0.0, 1.5, 2.0, 5.0, 15], mindspore.float16)
|
||||
>>> range = Tensor([0.0, 5.0], mindspore.float16)
|
||||
>>> hist = P.HistogramFixedWidth(5)
|
||||
>>> hist(x, range)
|
||||
>>> output = hist(x, range)
|
||||
>>> print(output)
|
||||
[2 1 1 0 2]
|
||||
"""
|
||||
|
||||
|
@ -1559,8 +1581,9 @@ class Log(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
||||
>>> log = P.Log()
|
||||
>>> log(input_x)
|
||||
[0.0, 0.69314718, 1.38629436]
|
||||
>>> output = log(input_x)
|
||||
>>> print(output)
|
||||
[0. 0.6931472 1.38629444]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1596,8 +1619,9 @@ class Log1p(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
||||
>>> log1p = P.Log1p()
|
||||
>>> log1p(input_x)
|
||||
[0.6931472, 1.0986123, 1.609438]
|
||||
>>> output = log1p(input_x)
|
||||
>>> print(output)
|
||||
[0.6931472 1.0986123 1.609438 ]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1626,8 +1650,9 @@ class Erf(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
|
||||
>>> erf = P.Erf()
|
||||
>>> erf(input_x)
|
||||
[-0.8427168, 0., 0.8427168, 0.99530876, 0.99997765]
|
||||
>>> output = erf(input_x)
|
||||
>>> print(output)
|
||||
[-0.8427168 0. 0.8427168 0.99530876 0.99997765]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1656,8 +1681,9 @@ class Erfc(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([-1, 0, 1, 2, 3]), mindspore.float32)
|
||||
>>> erfc = P.Erfc()
|
||||
>>> erfc(input_x)
|
||||
[1.8427168, 1.0, 0.1572832, 0.00469124, 0.00002235]
|
||||
>>> output = erfc(input_x)
|
||||
>>> print(output)
|
||||
[1.8427168e+00 1.0000000e+00 1.5728319e-01 4.6912432e-03 2.2351742e-05]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1698,8 +1724,9 @@ class Minimum(_MathBinaryOp):
|
|||
>>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
|
||||
>>> minimum = P.Minimum()
|
||||
>>> minimum(input_x, input_y)
|
||||
[1.0, 2.0, 3.0]
|
||||
>>> output = minimum(input_x, input_y)
|
||||
>>> print(output)
|
||||
[1. 2. 3.]
|
||||
"""
|
||||
|
||||
def infer_value(self, x, y):
|
||||
|
@ -1737,8 +1764,9 @@ class Maximum(_MathBinaryOp):
|
|||
>>> input_x = Tensor(np.array([1.0, 5.0, 3.0]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([4.0, 2.0, 6.0]), mindspore.float32)
|
||||
>>> maximum = P.Maximum()
|
||||
>>> maximum(input_x, input_y)
|
||||
[4.0, 5.0, 6.0]
|
||||
>>> output = maximum(input_x, input_y)
|
||||
>>> print(output)
|
||||
[4. 5. 6.]
|
||||
"""
|
||||
|
||||
def infer_value(self, x, y):
|
||||
|
@ -1776,8 +1804,9 @@ class RealDiv(_MathBinaryOp):
|
|||
>>> input_x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([4.0, 5.0, 6.0]), mindspore.float32)
|
||||
>>> realdiv = P.RealDiv()
|
||||
>>> realdiv(input_x, input_y)
|
||||
[0.25, 0.4, 0.5]
|
||||
>>> output = realdiv(input_x, input_y)
|
||||
>>> print(output)
|
||||
[0.25 0.4 0.5 ]
|
||||
"""
|
||||
|
||||
def infer_value(self, x, y):
|
||||
|
@ -1816,8 +1845,9 @@ class Div(_MathBinaryOp):
|
|||
>>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
|
||||
>>> div = P.Div()
|
||||
>>> div(input_x, input_y)
|
||||
[-1.3, 2.5, 2.0]
|
||||
>>> output = div(input_x, input_y)
|
||||
>>> print(output)
|
||||
[-1.3333334 2.5 2. ]
|
||||
"""
|
||||
|
||||
def infer_value(self, x, y):
|
||||
|
@ -1854,8 +1884,9 @@ class DivNoNan(_MathBinaryOp):
|
|||
>>> input_x = Tensor(np.array([-1.0, 0., 1.0, 5.0, 6.0]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([0., 0., 0., 2.0, 3.0]), mindspore.float32)
|
||||
>>> div_no_nan = P.DivNoNan()
|
||||
>>> div_no_nan(input_x, input_y)
|
||||
[0., 0., 0., 2.5, 2.0]
|
||||
>>> output = div_no_nan(input_x, input_y)
|
||||
>>> print(output)
|
||||
[0. 0. 0. 2.5 2. ]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1899,8 +1930,9 @@ class FloorDiv(_MathBinaryOp):
|
|||
>>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
|
||||
>>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
|
||||
>>> floor_div = P.FloorDiv()
|
||||
>>> floor_div(input_x, input_y)
|
||||
[0, 1, -1]
|
||||
>>> output = floor_div(input_x, input_y)
|
||||
>>> print(output)
|
||||
[ 0 1 -1]
|
||||
"""
|
||||
|
||||
|
||||
|
@ -1930,8 +1962,9 @@ class TruncateDiv(_MathBinaryOp):
|
|||
>>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
|
||||
>>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
|
||||
>>> truncate_div = P.TruncateDiv()
|
||||
>>> truncate_div(input_x, input_y)
|
||||
[0, 1, 0]
|
||||
>>> output = truncate_div(input_x, input_y)
|
||||
>>> print(output)
|
||||
[0 1 0]
|
||||
"""
|
||||
|
||||
|
||||
|
@ -1960,8 +1993,9 @@ class TruncateMod(_MathBinaryOp):
|
|||
>>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
|
||||
>>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
|
||||
>>> truncate_mod = P.TruncateMod()
|
||||
>>> truncate_mod(input_x, input_y)
|
||||
[2, 1, -1]
|
||||
>>> output = truncate_mod(input_x, input_y)
|
||||
>>> print(output)
|
||||
[ 2 1 -1]
|
||||
"""
|
||||
|
||||
|
||||
|
@ -1991,7 +2025,8 @@ class Mod(_MathBinaryOp):
|
|||
>>> input_x = Tensor(np.array([-4.0, 5.0, 6.0]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([3.0, 2.0, 3.0]), mindspore.float32)
|
||||
>>> mod = P.Mod()
|
||||
>>> mod(input_x, input_y)
|
||||
>>> output = mod(input_x, input_y)
|
||||
>>> print(output)
|
||||
[-1. 1. 0.]
|
||||
"""
|
||||
|
||||
|
@ -2016,8 +2051,9 @@ class Floor(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
|
||||
>>> floor = P.Floor()
|
||||
>>> floor(input_x)
|
||||
[1.0, 2.0, -2.0]
|
||||
>>> output = floor(input_x)
|
||||
>>> print(output)
|
||||
[ 1. 2. -2.]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2057,8 +2093,9 @@ class FloorMod(_MathBinaryOp):
|
|||
>>> input_x = Tensor(np.array([2, 4, -1]), mindspore.int32)
|
||||
>>> input_y = Tensor(np.array([3, 3, 3]), mindspore.int32)
|
||||
>>> floor_mod = P.FloorMod()
|
||||
>>> floor_mod(input_x, input_y)
|
||||
[2, 1, 2]
|
||||
>>> output = floor_mod(input_x, input_y)
|
||||
>>> print(output)
|
||||
[2 1 2]
|
||||
"""
|
||||
|
||||
|
||||
|
@ -2075,8 +2112,9 @@ class Ceil(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([1.1, 2.5, -1.5]), mindspore.float32)
|
||||
>>> ceil_op = P.Ceil()
|
||||
>>> ceil_op(input_x)
|
||||
[2.0, 3.0, -1.0]
|
||||
>>> output = ceil_op(input_x)
|
||||
>>> print(output)
|
||||
[ 2. 3. -1.]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2116,8 +2154,9 @@ class Xdivy(_MathBinaryOp):
|
|||
>>> input_x = Tensor(np.array([2, 4, -1]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([2, 2, 2]), mindspore.float32)
|
||||
>>> xdivy = P.Xdivy()
|
||||
>>> xdivy(input_x, input_y)
|
||||
[1.0, 2.0, -0.5]
|
||||
>>> output = xdivy(input_x, input_y)
|
||||
>>> print(output)
|
||||
[ 1. 2. -0.5]
|
||||
"""
|
||||
|
||||
def infer_dtype(self, x_dtype, y_dtype):
|
||||
|
@ -2151,8 +2190,9 @@ class Xlogy(_MathBinaryOp):
|
|||
>>> input_x = Tensor(np.array([-5, 0, 4]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([2, 2, 2]), mindspore.float32)
|
||||
>>> xlogy = P.Xlogy()
|
||||
>>> xlogy(input_x, input_y)
|
||||
[-3.465736, 0.0, 2.7725887]
|
||||
>>> output = xlogy(input_x, input_y)
|
||||
>>> print(output)
|
||||
[-3.465736 0. 2.7725887]
|
||||
"""
|
||||
|
||||
def infer_dtype(self, x_dtype, y_dtype):
|
||||
|
@ -2201,7 +2241,8 @@ class Cosh(PrimitiveWithInfer):
|
|||
>>> cosh = P.Cosh()
|
||||
>>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
|
||||
>>> output = cosh(input_x)
|
||||
[1.0289385 1.364684 1.048436 1.4228927]
|
||||
>>> print(output)
|
||||
[1.0289385 1.364684 1.048436 1.0040528]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2230,7 +2271,8 @@ class Asinh(PrimitiveWithInfer):
|
|||
>>> asinh = P.Asinh()
|
||||
>>> input_x = Tensor(np.array([-5.0, 1.5, 3.0, 100.0]), mindspore.float32)
|
||||
>>> output = asinh(input_x)
|
||||
[-2.3212, 1.1976, 1.8184, 5.2983]
|
||||
>>> print(output)
|
||||
[-2.3124385 1.1947632 1.8184465 5.298342 ]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2259,6 +2301,7 @@ class Sinh(PrimitiveWithInfer):
|
|||
>>> sinh = P.Sinh()
|
||||
>>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
|
||||
>>> output = sinh(input_x)
|
||||
>>> print(output)
|
||||
[0.6604918 0.28367308 0.44337422 0.6604918 ]
|
||||
"""
|
||||
|
||||
|
@ -2316,8 +2359,9 @@ class Equal(_LogicBinaryOp):
|
|||
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
||||
>>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
|
||||
>>> equal = P.Equal()
|
||||
>>> equal(input_x, input_y)
|
||||
[True, True, False]
|
||||
>>> output = equal(input_x, input_y)
|
||||
>>> print(output)
|
||||
[ True True False]
|
||||
"""
|
||||
|
||||
def infer_dtype(self, x_dtype, y_dtype):
|
||||
|
@ -2356,7 +2400,8 @@ class ApproximateEqual(_LogicBinaryOp):
|
|||
>>> x1 = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
||||
>>> x2 = Tensor(np.array([2, 4, 6]), mindspore.float32)
|
||||
>>> approximate_equal = P.ApproximateEqual(2.)
|
||||
>>> result = approximate_equal(x1, x2)
|
||||
>>> output = approximate_equal(x1, x2)
|
||||
>>> print(output)
|
||||
[ True True False]
|
||||
"""
|
||||
|
||||
|
@ -2393,7 +2438,8 @@ class EqualCount(PrimitiveWithInfer):
|
|||
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
||||
>>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
|
||||
>>> equal_count = P.EqualCount()
|
||||
>>> equal_count(input_x, input_y)
|
||||
>>> output = equal_count(input_x, input_y)
|
||||
>>> print(output)
|
||||
[2]
|
||||
"""
|
||||
|
||||
|
@ -2434,14 +2480,16 @@ class NotEqual(_LogicBinaryOp):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
||||
>>> not_equal = P.NotEqual()
|
||||
>>> not_equal(input_x, 2.0)
|
||||
[True, False, True]
|
||||
>>> output = not_equal(input_x, 2.0)
|
||||
>>> print(output)
|
||||
[ True False True]
|
||||
>>>
|
||||
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
||||
>>> input_y = Tensor(np.array([1, 2, 4]), mindspore.int32)
|
||||
>>> not_equal = P.NotEqual()
|
||||
>>> not_equal(input_x, input_y)
|
||||
[False, False, True]
|
||||
>>> output = not_equal(input_x, input_y)
|
||||
>>> print(output)
|
||||
[False False True]
|
||||
"""
|
||||
|
||||
def infer_dtype(self, x_dtype, y_dtype):
|
||||
|
@ -2472,8 +2520,9 @@ class Greater(_LogicBinaryOp):
|
|||
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
||||
>>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
|
||||
>>> greater = P.Greater()
|
||||
>>> greater(input_x, input_y)
|
||||
[False, True, False]
|
||||
>>> output = greater(input_x, input_y)
|
||||
>>> print(output)
|
||||
[False True False]
|
||||
"""
|
||||
|
||||
def infer_value(self, x, y):
|
||||
|
@ -2509,8 +2558,9 @@ class GreaterEqual(_LogicBinaryOp):
|
|||
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
||||
>>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
|
||||
>>> greater_equal = P.GreaterEqual()
|
||||
>>> greater_equal(input_x, input_y)
|
||||
[True, True, False]
|
||||
>>> output = greater_equal(input_x, input_y)
|
||||
>>> print(output)
|
||||
[ True True False]
|
||||
"""
|
||||
|
||||
def infer_value(self, x, y):
|
||||
|
@ -2546,8 +2596,9 @@ class Less(_LogicBinaryOp):
|
|||
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
||||
>>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
|
||||
>>> less = P.Less()
|
||||
>>> less(input_x, input_y)
|
||||
[False, False, True]
|
||||
>>> output = less(input_x, input_y)
|
||||
>>> print(output)
|
||||
[False False True]
|
||||
"""
|
||||
|
||||
def infer_value(self, x, y):
|
||||
|
@ -2583,8 +2634,9 @@ class LessEqual(_LogicBinaryOp):
|
|||
>>> input_x = Tensor(np.array([1, 2, 3]), mindspore.int32)
|
||||
>>> input_y = Tensor(np.array([1, 1, 4]), mindspore.int32)
|
||||
>>> less_equal = P.LessEqual()
|
||||
>>> less_equal(input_x, input_y)
|
||||
[True, False, True]
|
||||
>>> output = less_equal(input_x, input_y)
|
||||
>>> print(output)
|
||||
[ True False True]
|
||||
"""
|
||||
|
||||
def infer_value(self, x, y):
|
||||
|
@ -2609,8 +2661,9 @@ class LogicalNot(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
|
||||
>>> logical_not = P.LogicalNot()
|
||||
>>> logical_not(input_x)
|
||||
[False, True, False]
|
||||
>>> output = logical_not(input_x)
|
||||
>>> print(output)
|
||||
[False True False]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2649,8 +2702,9 @@ class LogicalAnd(_LogicBinaryOp):
|
|||
>>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
|
||||
>>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_)
|
||||
>>> logical_and = P.LogicalAnd()
|
||||
>>> logical_and(input_x, input_y)
|
||||
[True, False, False]
|
||||
>>> output = logical_and(input_x, input_y)
|
||||
>>> print(output)
|
||||
[ True False False]
|
||||
"""
|
||||
|
||||
def infer_dtype(self, x_dtype, y_dtype):
|
||||
|
@ -2680,8 +2734,9 @@ class LogicalOr(_LogicBinaryOp):
|
|||
>>> input_x = Tensor(np.array([True, False, True]), mindspore.bool_)
|
||||
>>> input_y = Tensor(np.array([True, True, False]), mindspore.bool_)
|
||||
>>> logical_or = P.LogicalOr()
|
||||
>>> logical_or(input_x, input_y)
|
||||
[True, True, True]
|
||||
>>> output = logical_or(input_x, input_y)
|
||||
>>> print(output)
|
||||
[ True True True]
|
||||
"""
|
||||
|
||||
def infer_dtype(self, x_dtype, y_dtype):
|
||||
|
@ -2757,7 +2812,8 @@ class IsFinite(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> is_finite = P.IsFinite()
|
||||
>>> input_x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
|
||||
>>> result = is_finite(input_x)
|
||||
>>> output = is_finite(input_x)
|
||||
>>> print(output)
|
||||
[False True False]
|
||||
"""
|
||||
|
||||
|
@ -2820,8 +2876,9 @@ class NPUAllocFloatStatus(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> alloc_status = P.NPUAllocFloatStatus()
|
||||
>>> init = alloc_status()
|
||||
Tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=(8,), dtype=mindspore.float32)
|
||||
>>> output = alloc_status()
|
||||
>>> print(output)
|
||||
[0. 0. 0. 0. 0. 0. 0. 0.]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2855,8 +2912,9 @@ class NPUGetFloatStatus(PrimitiveWithInfer):
|
|||
>>> alloc_status = P.NPUAllocFloatStatus()
|
||||
>>> get_status = P.NPUGetFloatStatus()
|
||||
>>> init = alloc_status()
|
||||
>>> flag = get_status(init)
|
||||
Tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=(8,), dtype=mindspore.float32)
|
||||
>>> output = get_status(init)
|
||||
>>> print(output)
|
||||
[0. 0. 0. 0. 0. 0. 0. 0.]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2898,9 +2956,9 @@ class NPUClearFloatStatus(PrimitiveWithInfer):
|
|||
>>> clear_status = P.NPUClearFloatStatus()
|
||||
>>> init = alloc_status()
|
||||
>>> flag = get_status(init)
|
||||
>>> clear = clear_status(init)
|
||||
>>> print(clear)
|
||||
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
|
||||
>>> output = clear_status(init)
|
||||
>>> print(output)
|
||||
[0. 0. 0. 0. 0. 0. 0. 0.]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -2991,6 +3049,7 @@ class Sin(PrimitiveWithInfer):
|
|||
>>> sin = P.Sin()
|
||||
>>> input_x = Tensor(np.array([0.62, 0.28, 0.43, 0.62]), mindspore.float32)
|
||||
>>> output = sin(input_x)
|
||||
>>> print(output)
|
||||
[0.5810352 0.27635565 0.41687083 0.5810352 ]
|
||||
"""
|
||||
|
||||
|
@ -3020,7 +3079,8 @@ class Asin(PrimitiveWithInfer):
|
|||
>>> asin = P.Asin()
|
||||
>>> input_x = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
|
||||
>>> output = asin(input_x)
|
||||
[0.8331, 0.0400, 0.3047, 0.5944]
|
||||
>>> print(output)
|
||||
[0.8330927 0.04001068 0.30469266 0.59438497]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3107,8 +3167,9 @@ class Abs(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([-1.0, 1.0, 0.0]), mindspore.float32)
|
||||
>>> abs = P.Abs()
|
||||
>>> abs(input_x)
|
||||
[1.0, 1.0, 0.0]
|
||||
>>> output = abs(input_x)
|
||||
>>> print(output)
|
||||
[1. 1. 0.]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3152,7 +3213,7 @@ class Sign(PrimitiveWithInfer):
|
|||
>>> sign = P.Sign()
|
||||
>>> output = sign(input_x)
|
||||
>>> print(output)
|
||||
[[1.0, 0.0, -1.0]]
|
||||
[[ 1. 0. -1.]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3180,8 +3241,9 @@ class Round(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([0.8, 1.5, 2.3, 2.5, -4.5]), mindspore.float32)
|
||||
>>> round = P.Round()
|
||||
>>> round(input_x)
|
||||
[1.0, 2.0, 2.0, 2.0, -4.0]
|
||||
>>> output = round(input_x)
|
||||
>>> print(output)
|
||||
[ 1. 2. 2. 2. -4.]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3245,7 +3307,7 @@ class Atan(PrimitiveWithInfer):
|
|||
>>> atan = P.Atan()
|
||||
>>> output = atan(output_y)
|
||||
>>> print(output)
|
||||
[[1.047, 0.7850001]]
|
||||
[1.047 0.7850001]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3273,8 +3335,9 @@ class Atanh(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> input_x = Tensor(np.array([1.047, 0.785]), mindspore.float32)
|
||||
>>> atanh = P.Atanh()
|
||||
>>> atanh(input_x)
|
||||
[[1.8869909 1.058268]]
|
||||
>>> output = atanh(input_x)
|
||||
>>> print(output)
|
||||
[1.8869909 1.058268 ]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3309,11 +3372,12 @@ class Atan2(_MathBinaryOp):
|
|||
Tensor, the shape is the same as the one after broadcasting,and the data type is same as `input_x`.
|
||||
|
||||
Examples:
|
||||
>>> input_x = Tensor(np.array([[0, 1]]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([[1, 1]]), mindspore.float32)
|
||||
>>> input_x = Tensor(np.array([0, 1]), mindspore.float32)
|
||||
>>> input_y = Tensor(np.array([1, 1]), mindspore.float32)
|
||||
>>> atan2 = P.Atan2()
|
||||
>>> atan2(input_x, input_y)
|
||||
[[0. 0.7853982]]
|
||||
>>> output = atan2(input_x, input_y)
|
||||
>>> print(output)
|
||||
[0. 0.7853982]
|
||||
"""
|
||||
|
||||
|
||||
|
@ -3336,8 +3400,10 @@ class SquareSumAll(PrimitiveWithInfer):
|
|||
>>> input_x1 = Tensor(np.array([0, 0, 2, 0]), mindspore.float32)
|
||||
>>> input_x2 = Tensor(np.array([0, 0, 2, 4]), mindspore.float32)
|
||||
>>> square_sum_all = P.SquareSumAll()
|
||||
>>> square_sum_all(input_x1, input_x2)
|
||||
(4, 20)
|
||||
>>> output = square_sum_all(input_x1, input_x2)
|
||||
>>> print(output)
|
||||
(Tensor(shape=[], dtype=Float32, value= 4),
|
||||
Tensor(shape=[], dtype=Float32, value= 20))
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3373,11 +3439,12 @@ class BitwiseAnd(_BitwiseBinaryOp):
|
|||
Tensor, has the same type as the `input_x1`.
|
||||
|
||||
Examples:
|
||||
>>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16)
|
||||
>>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16)
|
||||
>>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
|
||||
>>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
|
||||
>>> bitwise_and = P.BitwiseAnd()
|
||||
>>> bitwise_and(input_x1, input_x2)
|
||||
[0, 0, 1, -1, 1, 0, 1]
|
||||
>>> output = bitwise_and(input_x1, input_x2)
|
||||
>>> print(output)
|
||||
[ 0 0 1 -1 1 0 1]
|
||||
"""
|
||||
|
||||
|
||||
|
@ -3399,11 +3466,12 @@ class BitwiseOr(_BitwiseBinaryOp):
|
|||
Tensor, has the same type as the `input_x1`.
|
||||
|
||||
Examples:
|
||||
>>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16)
|
||||
>>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16)
|
||||
>>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
|
||||
>>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
|
||||
>>> bitwise_or = P.BitwiseOr()
|
||||
>>> bitwise_or(input_x1, input_x2)
|
||||
[0, 1, 1, -1, -1, 3, 3]
|
||||
>>> boutput = itwise_or(input_x1, input_x2)
|
||||
>>> print(output)
|
||||
[ 0 1 1 -1 -1 3 3]
|
||||
"""
|
||||
|
||||
|
||||
|
@ -3425,11 +3493,12 @@ class BitwiseXor(_BitwiseBinaryOp):
|
|||
Tensor, has the same type as the `input_x1`.
|
||||
|
||||
Examples:
|
||||
>>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mstype.int16)
|
||||
>>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mstype.int16)
|
||||
>>> input_x1 = Tensor(np.array([0, 0, 1, -1, 1, 1, 1]), mindspore.int16)
|
||||
>>> input_x2 = Tensor(np.array([0, 1, 1, -1, -1, 2, 3]), mindspore.int16)
|
||||
>>> bitwise_xor = P.BitwiseXor()
|
||||
>>> bitwise_xor(input_x1, input_x2)
|
||||
[0, 1, 0, 0, -2, 3, 2]
|
||||
>>> output = bitwise_xor(input_x1, input_x2)
|
||||
>>> print(output)
|
||||
[ 0 1 0 0 -2 3 2]
|
||||
"""
|
||||
|
||||
|
||||
|
@ -3449,7 +3518,7 @@ class BesselI0e(PrimitiveWithInfer):
|
|||
>>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
|
||||
>>> output = bessel_i0e(input_x)
|
||||
>>> print(output)
|
||||
[0.7979961, 0.5144438, 0.75117415, 0.9157829]
|
||||
[0.7979961 0.5144438 0.75117415 0.9157829 ]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3480,7 +3549,7 @@ class BesselI1e(PrimitiveWithInfer):
|
|||
>>> input_x = Tensor(np.array([0.24, 0.83, 0.31, 0.09]), mindspore.float32)
|
||||
>>> output = bessel_i1e(input_x)
|
||||
>>> print(output)
|
||||
[0.09507662, 0.19699717, 0.11505538, 0.04116856]
|
||||
[0.09507662 0.19699717 0.11505538 0.04116856]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3511,7 +3580,7 @@ class Inv(PrimitiveWithInfer):
|
|||
>>> input_x = Tensor(np.array([0.25, 0.4, 0.31, 0.52]), mindspore.float32)
|
||||
>>> output = inv(input_x)
|
||||
>>> print(output)
|
||||
[4., 2.5, 3.2258065, 1.923077]
|
||||
[4. 2.5 3.2258065 1.923077 ]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3542,7 +3611,7 @@ class Invert(PrimitiveWithInfer):
|
|||
>>> input_x = Tensor(np.array([25, 4, 13, 9]), mindspore.int16)
|
||||
>>> output = invert(input_x)
|
||||
>>> print(output)
|
||||
[-26, -5, -14, -10]
|
||||
[-26 -5 -14 -10]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -3569,9 +3638,9 @@ class Eps(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> input_x = Tensor([4, 1, 2, 3], mindspore.float32)
|
||||
>>> out = P.Eps()(input_x)
|
||||
>>> print(out)
|
||||
[1.52587891e-05, 1.52587891e-05, 1.52587891e-05, 1.52587891e-05]
|
||||
>>> output = P.Eps()(input_x)
|
||||
>>> print(output)
|
||||
[1.5258789e-05 1.5258789e-05 1.5258789e-05 1.5258789e-05]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -39,13 +39,14 @@ class Assign(PrimitiveWithCheck):
|
|||
|
||||
Examples:
|
||||
>>> class Net(nn.Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(Net, self).__init__()
|
||||
>>> self.y = mindspore.Parameter(Tensor([1.0], mindspore.float32), name="y")
|
||||
>>>
|
||||
>>> def construct(self, x):
|
||||
>>> P.Assign()(self.y, x)
|
||||
>>> return self.y
|
||||
... def __init__(self):
|
||||
... super(Net, self).__init__()
|
||||
... self.y = mindspore.Parameter(Tensor([1.0], mindspore.float32), name="y")
|
||||
...
|
||||
... def construct(self, x):
|
||||
... P.Assign()(self.y, x)
|
||||
... return self.y
|
||||
...
|
||||
>>> x = Tensor([2.0], mindspore.float32)
|
||||
>>> net = Net()
|
||||
>>> output = net(x)
|
||||
|
@ -78,13 +79,20 @@ class InplaceAssign(PrimitiveWithInfer):
|
|||
Outputs:
|
||||
Tensor, has the same type as original `variable`.
|
||||
Examples:
|
||||
>>> def construct(self, x):
|
||||
>>> val = x - 1.0
|
||||
>>> ret = x + 2.0
|
||||
>>> return InplaceAssign()(x, val, ret)
|
||||
>>> class Net(nn.Cell):
|
||||
... def __init__(self):
|
||||
... super(Net, self).__init__()
|
||||
... self.inplace_assign = P.InplaceAssign()
|
||||
...
|
||||
... def construct(self, x):
|
||||
... val = x - 1.0
|
||||
... ret = x + 2.0
|
||||
... return self.inplace_assign(x, val, ret)
|
||||
...
|
||||
>>> x = Tensor([2.0], mindspore.float32)
|
||||
>>> net = Net()
|
||||
>>> net(x)
|
||||
>>> output = net(x)
|
||||
>>> print(output)
|
||||
"""
|
||||
@ prim_attr_register
|
||||
def __init__(self):
|
||||
|
@ -116,10 +124,10 @@ class BoundingBoxEncode(PrimitiveWithInfer):
|
|||
>>> anchor_box = Tensor([[4,1,2,1],[2,2,2,3]],mindspore.float32)
|
||||
>>> groundtruth_box = Tensor([[3,1,2,2],[1,2,1,4]],mindspore.float32)
|
||||
>>> boundingbox_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0))
|
||||
>>> boundingbox_encode(anchor_box, groundtruth_box)
|
||||
>>> output = boundingbox_encode(anchor_box, groundtruth_box)
|
||||
>>> print(output)
|
||||
[[ 5.0000000e-01 5.0000000e-01 -6.5504000e+04 6.9335938e-01]
|
||||
[-1.0000000e+00 2.5000000e-01 0.0000000e+00 4.0551758e-01]]
|
||||
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -170,7 +178,8 @@ class BoundingBoxDecode(PrimitiveWithInfer):
|
|||
>>> deltas = Tensor([[3,1,2,2],[1,2,1,4]],mindspore.float32)
|
||||
>>> boundingbox_decode = P.BoundingBoxDecode(means=(0.0, 0.0, 0.0, 0.0), stds=(1.0, 1.0, 1.0, 1.0),
|
||||
... max_shape=(768, 1280), wh_ratio_clip=0.016)
|
||||
>>> boundingbox_decode(anchor_box, deltas)
|
||||
>>> output = boundingbox_decode(anchor_box, deltas)
|
||||
>>> print(output)
|
||||
[[ 4.1953125 0. 0. 5.1953125]
|
||||
[ 2.140625 0. 3.859375 60.59375 ]]
|
||||
|
||||
|
@ -226,13 +235,13 @@ class CheckValid(PrimitiveWithInfer):
|
|||
>>> from mindspore import Tensor
|
||||
>>> from mindspore.ops import operations as P
|
||||
>>> class Net(nn.Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(Net, self).__init__()
|
||||
>>> self.check_valid = P.CheckValid()
|
||||
>>> def construct(self, x, y):
|
||||
>>> valid_result = self.check_valid(x, y)
|
||||
>>> return valid_result
|
||||
>>>
|
||||
... def __init__(self):
|
||||
... super(Net, self).__init__()
|
||||
... self.check_valid = P.CheckValid()
|
||||
... def construct(self, x, y):
|
||||
... valid_result = self.check_valid(x, y)
|
||||
... return valid_result
|
||||
...
|
||||
>>> bboxes = Tensor(np.linspace(0, 6, 12).reshape(3, 4), mindspore.float32)
|
||||
>>> img_metas = Tensor(np.array([2, 1, 3]), mindspore.float32)
|
||||
>>> net = Net()
|
||||
|
@ -292,10 +301,12 @@ class IOU(PrimitiveWithInfer):
|
|||
>>> iou = P.IOU()
|
||||
>>> anchor_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]), mindspore.float16)
|
||||
>>> gt_boxes = Tensor(np.random.randint(1.0, 5.0, [3, 4]), mindspore.float16)
|
||||
>>> iou(anchor_boxes, gt_boxes)
|
||||
[[0.0, 65504, 65504],
|
||||
[0.0, 0.0, 0.0],
|
||||
[0.22253, 0.0, 0.0]]
|
||||
>>> output = iou(anchor_boxes, gt_boxes)
|
||||
>>> print(output)
|
||||
[[65000. 65500. -0.]
|
||||
[65000. 65500. -0.]
|
||||
[ 0. 0. 0.]]
|
||||
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -336,19 +347,20 @@ class MakeRefKey(Primitive):
|
|||
Examples:
|
||||
>>> from mindspore.ops import functional as F
|
||||
>>> class Net(nn.Cell):
|
||||
>>> def __init__(self):
|
||||
>>> super(Net, self).__init__()
|
||||
>>> self.y = mindspore.Parameter(Tensor(np.ones([6, 8, 10]), mindspore.int32), name="y")
|
||||
>>> self.make_ref_key = P.MakeRefKey("y")
|
||||
>>>
|
||||
>>> def construct(self, x):
|
||||
>>> key = self.make_ref_key()
|
||||
>>> ref = F.make_ref(key, x, self.y)
|
||||
>>> return ref * x
|
||||
>>>
|
||||
... def __init__(self):
|
||||
... super(Net, self).__init__()
|
||||
... self.y = mindspore.Parameter(Tensor(np.ones([6, 8, 10]), mindspore.int32), name="y")
|
||||
... self.make_ref_key = P.MakeRefKey("y")
|
||||
...
|
||||
... def construct(self, x):
|
||||
... key = self.make_ref_key()
|
||||
... ref = F.make_ref(key, x, self.y)
|
||||
... return ref * x
|
||||
...
|
||||
>>> x = Tensor(np.ones([3, 4, 5]), mindspore.int32)
|
||||
>>> net = Net()
|
||||
>>> net(x)
|
||||
>>> output = net(x)
|
||||
>>> print(output)
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -536,7 +548,9 @@ class PopulationCount(PrimitiveWithInfer):
|
|||
Examples:
|
||||
>>> population_count = P.PopulationCount()
|
||||
>>> x_input = Tensor([0, 1, 3], mindspore.int16)
|
||||
>>> population_count(x_input)
|
||||
>>> output = population_count(x_input)
|
||||
>>> print(output)
|
||||
[0 1 2]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
|
|
@ -396,16 +396,27 @@ class RandomCategorical(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> class Net(nn.Cell):
|
||||
>>> def __init__(self, num_sample):
|
||||
>>> super(Net, self).__init__()
|
||||
>>> self.random_categorical = P.RandomCategorical(mindspore.int64)
|
||||
>>> self.num_sample = num_sample
|
||||
>>> def construct(self, logits, seed=0):
|
||||
>>> return self.random_categorical(logits, self.num_sample, seed)
|
||||
>>>
|
||||
... def __init__(self, num_sample):
|
||||
... super(Net, self).__init__()
|
||||
... self.random_categorical = P.RandomCategorical(mindspore.int64)
|
||||
... self.num_sample = num_sample
|
||||
... def construct(self, logits, seed=0):
|
||||
... return self.random_categorical(logits, self.num_sample, seed)
|
||||
...
|
||||
>>> x = np.random.random((10, 5)).astype(np.float32)
|
||||
>>> net = Net(8)
|
||||
>>> output = net(Tensor(x))
|
||||
>>> print(output)
|
||||
[[0 2 1 3 4 2 0 2]
|
||||
[0 2 1 3 4 2 0 2]
|
||||
[0 2 1 3 4 2 0 2]
|
||||
[0 2 1 3 4 2 0 2]
|
||||
[0 2 0 3 4 2 0 2]
|
||||
[0 2 1 3 4 3 0 3]
|
||||
[0 2 1 3 4 2 0 2]
|
||||
[0 2 1 3 4 2 0 2]
|
||||
[0 2 1 3 4 2 0 2]
|
||||
[0 2 0 3 4 2 0 2]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
|
Loading…
Reference in New Issue