1.update the documentation of IndexAdd operator

2.update the input of Slice
3.update the example of Unstack.
This commit is contained in:
liuhe 2021-03-20 15:36:58 +08:00
parent fb69f3e509
commit 96bfe123a2
4 changed files with 60 additions and 25 deletions

View File

@ -2546,8 +2546,7 @@ class Unstack(PrimitiveWithInfer):
>>> input_x = Tensor(np.array([[1, 1, 1, 1], [2, 2, 2, 2]]))
>>> output = unstack(input_x)
>>> print(output)
(Tensor(shape=[4], dtype=Int32, value= [1, 1, 1, 1]),
Tensor(shape=[4], dtype=Int32, value= [2, 2, 2, 2]))
(Tensor(shape=[4], dtype=Int64, value= [1, 1, 1, 1]), Tensor(shape=[4], dtype=Int64, value= [2, 2, 2, 2]))
"""
@prim_attr_register
@ -2599,8 +2598,8 @@ class Slice(PrimitiveWithInfer):
Inputs:
- **input_x** (Tensor): The target tensor.
- **begin** (tuple, list): The beginning of the slice. Only constant value is allowed.
- **size** (tuple, list): The size of the slice. Only constant value is allowed.
- **begin** (Union[tuple, list]): The beginning of the slice. Only constant value is allowed.
- **size** (Union[tuple, list]): The size of the slice. Only constant value is allowed.
Outputs:
Tensor, the shape is : input `size`, the data type is the same as `input_x`.
@ -5230,6 +5229,9 @@ class Range(PrimitiveWithCheck):
Outputs:
A 1-D Tensor, with the same type as the inputs.
Supported Platforms:
``GPU``
Examples:
>>> start = Tensor(0, mstype.int32)
>>> limit = Tensor(10, mstype.int32)
@ -5237,9 +5239,6 @@ class Range(PrimitiveWithCheck):
>>> output = ops.Range()(start, limit, delta)
>>> print(output)
[0, 4, 8]
Supported Platforms:
``GPU`` ``CPU``
"""
@prim_attr_register

View File

@ -884,12 +884,20 @@ class BatchMatMul(MatMul):
>>> output = batmatmul(input_x, input_y)
>>> print(output)
[[[[3. 3. 3. 3.]]
[[3. 3. 3. 3.]]
[[3. 3. 3. 3.]]
[[3. 3. 3. 3.]]],
[[3. 3. 3. 3.]]]
[[[3. 3. 3. 3.]]
[[3. 3. 3. 3.]]
[[3. 3. 3. 3.]]
[[3. 3. 3. 3.]]]]
>>>
>>> input_x = Tensor(np.ones(shape=[2, 4, 3, 1]), mindspore.float32)
@ -898,12 +906,20 @@ class BatchMatMul(MatMul):
>>> output = batmatmul(input_x, input_y)
>>> print(output)
[[[[3. 3. 3. 3.]]
[[3. 3. 3. 3.]]
[[3. 3. 3. 3.]]
[[3. 3. 3. 3.]]],
[[3. 3. 3. 3.]]]
[[[3. 3. 3. 3.]]
[[3. 3. 3. 3.]]
[[3. 3. 3. 3.]]
[[3. 3. 3. 3.]]]]
"""
@ -4520,7 +4536,8 @@ class MatrixInverse(PrimitiveWithInfer):
class IndexAdd(PrimitiveWithInfer):
"""
Adds tensor y to specified axis and indices of tensor x.
Adds tensor y to specified axis and indices of tensor x. The axis should be in the range from 0 to len(x.dim) - 1,
and indices should be in the range from 0 to the size of x at the axis dimension.
Args:
axis (int): The dimension along which to index.
@ -4529,7 +4546,7 @@ class IndexAdd(PrimitiveWithInfer):
- **input_x** (Parameter) - The input tensor to add to, with data type float64, float32, float16, int32, int16,
int8, uint8.
- **indices** (Tensor) - The index of `input_x` on the `axis`th dimension to add to, with data type int32.
The `indices` must be 1D with the size same as the size of the `axis`th dimension of `input_y`. The values
The `indices` must be 1D with the same size as the size of the `axis`th dimension of `input_y`. The values
of `indices` should be in the range of 0 to the size of the `axis`th dimension of `input_x`.
- **input_y** (Tensor) - The input tensor with the value to add. Must have same data type as `input_x`.
The shape must be the same as `input_x` except the `axis`th dimension.
@ -4537,19 +4554,32 @@ class IndexAdd(PrimitiveWithInfer):
Outputs:
Tensor, has the same shape and dtype as input_x.
Raises:
TypeError: If dtype of `input_x` is not one of: float64, float32, float16, int32, int16, int8, uint8.
TypeError: If neither `indices` nor `input_y` is a Tensor.
TypeError: If shape of `input_y` is not same as the `input_x`.
Supported Platforms:
``GPU``
Examples:
>>> input_x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [6, 7, 8]]), mindspore.float32)
>>> class Net(nn.Cell):
... def __init__(self):
... super(Net, self).__init__()
... self.index_add = ops.IndexAdd(axis=1)
... self.input_x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32))
... self.indices = Tensor(np.array([0, 2]), mindspore.int32)
...
... def construct(self, input_y):
... return self.index_add(self.input_x, self.indices, input_y)
...
>>> input_y = Tensor(np.array([[0.5, 1.0], [1.0, 1.5], [2.0, 2.5]]), mindspore.float32)
>>> indices = Tensor(np.array([0, 2]), mindspore.int32)
>>> index_add = ops.IndexAdd(axis=1)
>>> output = index_add(input_x, indices, input_y)
>>> net = Net()
>>> output = net(input_y)
>>> print(output)
[[ 1.5 2. 4. ]
[ 5. 5. 7.5]
[ 8. 7. 10.5]]
[ 9. 8. 11.5]]
"""
__mindspore_signature__ = (
sig.make_sig('input_x', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),

View File

@ -978,9 +978,14 @@ class BNTrainingReduce(PrimitiveWithInfer):
>>> bn_training_reduce = ops.BNTrainingReduce()
>>> output = bn_training_reduce(input_x)
>>> print(output)
(Tensor(shape=[3], dtype=Float32, value=
[ 1.22880000e+04, 1.22880000e+04, 1.22880000e+04]), Tensor(shape=[3], dtype=Float32, value=
[ 1.22880000e+04, 1.22880000e+04, 1.22880000e+04]))
(Tensor(shape=[1, 2, 2, 2], dtype=Float32, value=
[[[[ 2.73200464e+00, 2,73200464e+00],
[ 2.73200464e+00, 2,73200464e+00]],
[[ 2.73200464e+00, 2,73200464e+00],
[ 2.73200464e+00, 2,73200464e+00]]]]), Tensor(shape=[2], dtype=Float32, value= [ 9.24999952e-01,
9.24999952e-01]), Tensor(shape=[2], dtype=Float32, value= [ 9.24999952e-01, 9.24999952e-01]),
Tensor(shape=[2], dtype=Float32, value= [ 2.50000000e-01, 2.50000000e-01]), Tensor(shape=[2], dtype=Float32,
value= [ 1.87500000e-01, 1.87500000-01]))
"""
@prim_attr_register
@ -1036,7 +1041,7 @@ class BNTrainingUpdate(PrimitiveWithInfer):
TypeError: If dtype of `epsilon` or `factor` is not float.
TypeError: If `x`, `sum`, `square_sum`, `scale`, `offset`, `mean` or `variance` is not a Tensor.
TypeError: If dtype of `x`, `sum`, `square_sum`, `scale`, `offset`, `mean` or `variance` is neither float16 nor
float32.
float32.
Supported Platforms:
``Ascend``
@ -2227,9 +2232,9 @@ class NLLLoss(PrimitiveWithInfer):
>>> nll_loss = ops.NLLLoss(reduction="mean")
>>> loss, weight = nll_loss(input, target, weight)
>>> print(loss)
[-0.52507716]
-0.52507716
>>> print(weight)
[1.1503246 0.79172504]
1.1503246
"""
@prim_attr_register
@ -3006,7 +3011,7 @@ class LayerNorm(Primitive):
[2.]]
>>> print(variance)
[[0.6666667]
[0.6666667]])
[0.6666667]]
"""
@prim_attr_register

View File

@ -38,8 +38,9 @@ class Primitive(Primitive_):
>>> class Add(Primitive):
... @prim_attr_register
... def __init__(self, attr1, attr2):
... # check attr1 and attr2 or do some initializations
>>> # init a Primitive obj with attr1=1 and attr2=2
... '''init for add'''
... # check attr1 and attr2 or do some initializations
... # init a Primitive obj with attr1=1 and attr2=2
>>> add = Add(attr1=1, attr2=2)
"""
_repr_ignore_list = ['input_names', 'output_names']