forked from mindspore-Ecosystem/mindspore
!6118 fix bugs of op Concat, Norm, InTopK, L1Loss and InplaceAdd etc.
Merge pull request !6118 from lihongkang/v2_master
This commit is contained in:
commit
de9375ba1c
|
@ -335,7 +335,7 @@ class Norm(Cell):
|
|||
Computes the norm of vectors, currently including Euclidean norm, i.e., :math:`L_2`-norm.
|
||||
|
||||
Args:
|
||||
axis (tuple): The axis over which to compute vector norms. Default: ().
|
||||
axis (Union[tuple, int]): The axis over which to compute vector norms. Default: ().
|
||||
keep_dims (bool): If True, the axis indicated in `axis` are kept with size 1. Otherwise,
|
||||
the dimensions in `axis` are removed from the output shape. Default: False.
|
||||
|
||||
|
@ -348,12 +348,14 @@ class Norm(Cell):
|
|||
|
||||
Examples:
|
||||
>>> net = nn.Norm(axis=0)
|
||||
>>> input = Tensor(np.random.randint(0, 10, [4, 16]), mindspore.float32)
|
||||
>>> input = Tensor(np.random.randint(0, 10, [2, 4]), mindspore.float32)
|
||||
>>> net(input)
|
||||
[2.236068 9.848858 4. 5.656854]
|
||||
"""
|
||||
|
||||
def __init__(self, axis=(), keep_dims=False):
|
||||
super(Norm, self).__init__()
|
||||
validator.check_value_type("keep_dims", keep_dims, [bool], self.cls_name)
|
||||
self.axis = axis
|
||||
self.keep_dims = keep_dims
|
||||
self.reduce_sum = P.ReduceSum(True)
|
||||
|
|
|
@ -884,8 +884,8 @@ class DepthwiseConv2d(Cell):
|
|||
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
|
||||
|
||||
Examples:
|
||||
>>> net = nn.DepthwiseConv2d(120, 240, 4, has_bias=False, weight_init='normal')
|
||||
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
|
||||
>>> net = nn.DepthwiseConv2d(240, 240, 4, group=None, has_bias=False, weight_init='normal')
|
||||
>>> input = Tensor(np.ones([1, 240, 1024, 640]), mindspore.float32)
|
||||
>>> net(input).shape
|
||||
(1, 240, 1024, 640)
|
||||
"""
|
||||
|
|
|
@ -83,8 +83,10 @@ class L1Loss(_Loss):
|
|||
Default: "mean".
|
||||
|
||||
Inputs:
|
||||
- **input_data** (Tensor) - Tensor of shape :math:`(x_1, x_2, ..., x_R)`.
|
||||
- **target_data** (Tensor) - Tensor of shape :math:`(y_1, y_2, ..., y_S)`.
|
||||
- **input_data** (Tensor) - Tensor of shape :math:`(x_1, x_2, ..., x_R)`. The data type should be float16 or
|
||||
float32.
|
||||
- **target_data** (Tensor) - Tensor of shape :math:`(y_1, y_2, ..., y_S)`. The data type should be float16 or
|
||||
float32.
|
||||
|
||||
Outputs:
|
||||
Tensor, loss float tensor.
|
||||
|
@ -94,6 +96,7 @@ class L1Loss(_Loss):
|
|||
>>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
||||
>>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)
|
||||
>>> loss(input_data, target_data)
|
||||
0.33333334
|
||||
"""
|
||||
def __init__(self, reduction='mean'):
|
||||
super(L1Loss, self).__init__(reduction)
|
||||
|
|
|
@ -300,6 +300,7 @@ class IsSubClass(PrimitiveWithInfer):
|
|||
|
||||
Examples:
|
||||
>>> result = P.IsSubClass()(mindspore.int32, mindspore.intc)
|
||||
True
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
@ -1099,7 +1100,7 @@ class InvertPermutation(PrimitiveWithInfer):
|
|||
- **input_x** (Union(tuple[int], list[int]) - The input is constructed by multiple
|
||||
integers, i.e., :math:`(y_1, y_2, ..., y_S)` representing the indices.
|
||||
The values must include 0. There can be no duplicate values or negative values.
|
||||
Only constant value is allowed.
|
||||
Only constant value is allowed. The maximum value msut be equal to length of input_x.
|
||||
|
||||
Outputs:
|
||||
tuple[int]. It has the same length as the input.
|
||||
|
@ -1599,7 +1600,7 @@ class Concat(PrimitiveWithInfer):
|
|||
|
||||
Note:
|
||||
The input data is a tuple of tensors. These tensors have the same rank `R`. Set the given axis as `m`, and
|
||||
:math:`0 \le m < N`. Set the number of input tensors as `N`. For the :math:`i`-th tensor :math:`t_i`, it has
|
||||
:math:`0 \le m < R`. Set the number of input tensors as `N`. For the :math:`i`-th tensor :math:`t_i`, it has
|
||||
the shape of :math:`(x_1, x_2, ..., x_{mi}, ..., x_R)`. :math:`x_{mi}` is the :math:`m`-th dimension of the
|
||||
:math:`i`-th tensor. Then, the shape of the output tensor is
|
||||
|
||||
|
@ -3451,7 +3452,8 @@ class InplaceUpdate(PrimitiveWithInfer):
|
|||
Updates specified rows with values in `v`.
|
||||
|
||||
Args:
|
||||
indices (Union[int, tuple]): Indices into the left-most dimension of `x`.
|
||||
indices (Union[int, tuple]): Indices into the left-most dimension of `x`, and determines which rows of x
|
||||
to update with v. It is a int or tuple, whose value is in [0, the first dimension size of x).
|
||||
|
||||
Inputs:
|
||||
- **x** (Tensor) - A tensor which to be inplace updated. It can be one of the following data types:
|
||||
|
@ -3463,22 +3465,14 @@ class InplaceUpdate(PrimitiveWithInfer):
|
|||
Tensor, with the same type and shape as the input `x`.
|
||||
|
||||
Examples:
|
||||
>>> x = Tensor(np.arange(24).reshape(3, 4, 2), mindspore.float32)
|
||||
>>> v = Tensor(np.arange(-8, 8).reshape(2, 4, 2), mindspore.float32)
|
||||
>>> inplace_update = P.InplaceUpdate((0, 2))
|
||||
>>> indices = (0, 1)
|
||||
>>> x = Tensor(np.array([[1, 2], [3, 4], [5, 6]]), mindspore.float32)
|
||||
>>> v = Tensor(np.array([[0.5, 1.0], [1.0, 1.5]]), mindspore.float32)
|
||||
>>> inplace_update = P.InplaceUpdate(indices)
|
||||
>>> result = inplace_update(x, v)
|
||||
[[[-8. -7.]
|
||||
[-6. -5.]
|
||||
[-4. -3.]
|
||||
[-2. -1.]]
|
||||
[[ 8. 9.]
|
||||
[10. 11.]
|
||||
[12. 13.]
|
||||
[14. 15.]]
|
||||
[[ 0. 1.]
|
||||
[ 2. 3.]
|
||||
[ 4. 5.]
|
||||
[ 6. 7.]]]
|
||||
[[0.5, 1.0],
|
||||
[1.0, 1.5],
|
||||
[5.0, 6.0]]
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
|
|
@ -965,7 +965,7 @@ class InplaceAdd(PrimitiveWithInfer):
|
|||
the first dimension, which must be the same as indices's size. It has the same data type with `input_x`.
|
||||
|
||||
Outputs:
|
||||
Tensor, has the same shape and dtype as input.
|
||||
Tensor, has the same shape and dtype as input_x.
|
||||
|
||||
Examples:
|
||||
>>> indices = (0, 1)
|
||||
|
@ -1011,7 +1011,7 @@ class InplaceAdd(PrimitiveWithInfer):
|
|||
|
||||
class InplaceSub(PrimitiveWithInfer):
|
||||
"""
|
||||
Subtracts v into specified rows of x. Computes y = x; y[i, :] -= v; return y.
|
||||
Subtracts v into specified rows of x. Computes y = x; y[i, :] -= v.
|
||||
|
||||
Args:
|
||||
indices (Union[int, tuple]): Indices into the left-most dimension of x, and determines which rows of x
|
||||
|
@ -1023,7 +1023,7 @@ class InplaceSub(PrimitiveWithInfer):
|
|||
the first dimension, which must be the same as indices's size. It has the same data type with `input_x`.
|
||||
|
||||
Outputs:
|
||||
Tensor, has the same shape and dtype as input.
|
||||
Tensor, has the same shape and dtype as input_x.
|
||||
|
||||
Examples:
|
||||
>>> indices = (0, 1)
|
||||
|
@ -1496,7 +1496,7 @@ class Log(PrimitiveWithInfer):
|
|||
Returns the natural logarithm of a tensor element-wise.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Tensor) - The input tensor.
|
||||
- **input_x** (Tensor) - The input tensor. With float16 or float32 data type. The value must be greater than 0.
|
||||
|
||||
Outputs:
|
||||
Tensor, has the same shape as the `input_x`.
|
||||
|
@ -1533,7 +1533,7 @@ class Log1p(PrimitiveWithInfer):
|
|||
Returns the natural logarithm of one plus the input tensor element-wise.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Tensor) - The input tensor. With float16 or float32 data type.
|
||||
- **input_x** (Tensor) - The input tensor. With float16 or float32 data type. The value must be greater than -1.
|
||||
|
||||
Outputs:
|
||||
Tensor, has the same shape as the `input_x`.
|
||||
|
|
|
@ -5541,7 +5541,8 @@ class InTopK(PrimitiveWithInfer):
|
|||
|
||||
Inputs:
|
||||
- **x1** (Tensor) - A 2D Tensor defines the predictions of a batch of samples with float16 or float32 data type.
|
||||
- **x2** (Tensor) - A 1D Tensor defines the labels of a batch of samples with int32 data type.
|
||||
- **x2** (Tensor) - A 1D Tensor defines the labels of a batch of samples with int32 data type. The size of x2
|
||||
must be equal to x1's first dimension.
|
||||
|
||||
Outputs:
|
||||
Tensor has 1 dimension of type bool and the same shape with `x2`. For labeling sample `i` in `x2`,
|
||||
|
@ -5568,8 +5569,8 @@ class InTopK(PrimitiveWithInfer):
|
|||
return mstype.tensor_type(mstype.bool_)
|
||||
|
||||
def infer_shape(self, x1_shape, x2_shape):
|
||||
validator.check("x1", len(x1_shape), "", 2, Rel.EQ, self.name)
|
||||
validator.check("x2", len(x2_shape), "", 1, Rel.EQ, self.name)
|
||||
validator.check("x1 shape", len(x1_shape), "", 2, Rel.EQ, self.name)
|
||||
validator.check("x2 shape", len(x2_shape), "", 1, Rel.EQ, self.name)
|
||||
validator.check("size of x2", x2_shape[0], "x1's first dimension", x1_shape[0], Rel.EQ, self.name)
|
||||
return x2_shape
|
||||
|
||||
|
|
Loading…
Reference in New Issue