delete pack description about num and add raise error
SparseApplyAdagrad example format PReLU and grad state 1-d not supported atan2 bprop dx dy revert to original shapes
This commit is contained in:
parent
d17d61265d
commit
4694c979a5
|
@ -788,7 +788,7 @@ def get_bprop_atan2(self):
|
|||
|
||||
def bprop(x, y, out, dout):
|
||||
tmp = dout / (square(x) + square(y))
|
||||
dx = tmp * y
|
||||
dy = tmp * (-x)
|
||||
return (dx, dy)
|
||||
bc_dx = tmp * y
|
||||
bc_dy = tmp * (-x)
|
||||
return binop_grad_common(x, y, bc_dx, bc_dy)
|
||||
return bprop
|
||||
|
|
|
@ -669,6 +669,9 @@ class PReLUGrad(PrimitiveWithInfer):
|
|||
r"""
|
||||
Gradients of PReLU operation.
|
||||
|
||||
Note:
|
||||
1-dimensional input_x is not supported.
|
||||
|
||||
Inputs:
|
||||
- **y_backprop** (Tensor) - Representing the backprop of the next layer.
|
||||
- **input_x** (Tensor) - Should be the input `input_x` of forward operator PRelu.
|
||||
|
@ -683,6 +686,8 @@ class PReLUGrad(PrimitiveWithInfer):
|
|||
pass
|
||||
|
||||
def infer_shape(self, y_backprop_shape, A_shape, w_shape):
|
||||
if len(A_shape) == 1:
|
||||
raise ValueError(f'For \'{self.name}\' input_x rank 1 is not supported.')
|
||||
return y_backprop_shape, w_shape
|
||||
|
||||
def infer_dtype(self, y_backprop_dtype, A_dtype, w_dtype):
|
||||
|
|
|
@ -1308,8 +1308,8 @@ class Concat(PrimitiveWithInfer):
|
|||
def _get_pack_shape(x_shape, x_type, axis, prim_name):
|
||||
"""for pack output shape"""
|
||||
validator.check_value_type("shape", x_shape, [tuple, list], prim_name)
|
||||
validator.check_integer("len of input_x shape", len(x_shape), 0, Rel.GT, prim_name)
|
||||
validator.check_subclass("shape0", x_type[0], mstype.tensor, prim_name)
|
||||
validator.check_integer("len of input_x", len(x_shape), 1, Rel.GT, prim_name)
|
||||
validator.check_subclass("input_x[0]", x_type[0], mstype.tensor, prim_name)
|
||||
validator.check_integer("len of input_x0 shape", len(x_shape[0]), 0, Rel.GT, prim_name)
|
||||
rank_base = len(x_shape[0])
|
||||
N = len(x_shape)
|
||||
|
@ -1320,7 +1320,7 @@ def _get_pack_shape(x_shape, x_type, axis, prim_name):
|
|||
for i in range(1, N):
|
||||
v = x_shape[i]
|
||||
validator.check('len of x_shape[%d]' % i, len(v), 'len of rank_base', rank_base, Rel.EQ, prim_name)
|
||||
validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0], Rel.EQ, prim_name)
|
||||
validator.check('x_type[%d]' % i, x_type[i], 'base', x_type[0], Rel.EQ, prim_name, TypeError)
|
||||
for j in range(rank_base):
|
||||
if v[j] != x_shape[0][j]:
|
||||
raise ValueError(f"For \'{prim_name}\' element {i} shape in input can not pack with first element")
|
||||
|
@ -1346,6 +1346,12 @@ class Pack(PrimitiveWithInfer):
|
|||
Outputs:
|
||||
Tensor. A packed Tensor with the same type as `input_x`.
|
||||
|
||||
Raises:
|
||||
TypeError: If the data types of elements in input_x are not the same.
|
||||
ValueError: If length of input_x is not greater than 1;
|
||||
or if axis is out of the range [-(R+1), R+1);
|
||||
or if the shapes of elements in input_x are not the same.
|
||||
|
||||
Examples:
|
||||
>>> data1 = Tensor(np.array([0, 1]).astype(np.float32))
|
||||
>>> data2 = Tensor(np.array([2, 3]).astype(np.float32))
|
||||
|
@ -1386,8 +1392,6 @@ class Unpack(PrimitiveWithInfer):
|
|||
Args:
|
||||
axis (int): Dimension along which to pack. Default: 0.
|
||||
Negative values wrap around. The range is [-R, R).
|
||||
num (int): The number of tensors to be unpacked to. Default : "None".
|
||||
If `num` is not specified, it is inferred from the shape of `input_x`.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Tensor) - The shape is :math:`(x_1, x_2, ..., x_R)`.
|
||||
|
@ -1397,8 +1401,7 @@ class Unpack(PrimitiveWithInfer):
|
|||
A tuple of Tensors, the shape of each objects is same.
|
||||
|
||||
Raises:
|
||||
ValueError: If axis is out of the range [-len(input_x.shape()), len(input_x.shape())),
|
||||
or if len(input_x.shape[axis]) not equal to num.
|
||||
ValueError: If axis is out of the range [-len(input_x.shape()), len(input_x.shape())).
|
||||
|
||||
Examples:
|
||||
>>> unpack = P.Unpack()
|
||||
|
|
|
@ -2087,6 +2087,9 @@ class PReLU(PrimitiveWithInfer):
|
|||
|
||||
where :math:`x_i` is an element of an channel of the input.
|
||||
|
||||
Note:
|
||||
1-dimensional input_x is not supported.
|
||||
|
||||
Inputs:
|
||||
- **input_x** (Tensor) - Float tensor, representing the output of the preview layer.
|
||||
- **weight** (Tensor) - Float Tensor, w > 0, there is only two shapes are legitimate,
|
||||
|
@ -2106,14 +2109,13 @@ class PReLU(PrimitiveWithInfer):
|
|||
input_x_dim = len(input_x_shape)
|
||||
weight_dim = len(weight_shape)
|
||||
|
||||
if input_x_dim == 1:
|
||||
raise ValueError(f'For \'{self.name}\' input_x rank 1 is not supported.')
|
||||
|
||||
if weight_dim != 1:
|
||||
raise ValueError(f'For \'{self.name}\' weight_dim must be 1, while weight_dim is {weight_dim}.')
|
||||
|
||||
if input_x_dim == 1 and weight_shape[0] != 1:
|
||||
raise ValueError(f'For \'{self.name}\' when input_x_dim is 1, weight_shape[0] must be 1, '
|
||||
f'while weight_shape[0] is {weight_shape[0]}.')
|
||||
|
||||
if input_x_dim != 1 and weight_shape[0] != input_x_shape[1] and weight_shape[0] != 1:
|
||||
if weight_shape[0] != input_x_shape[1] and weight_shape[0] != 1:
|
||||
raise ValueError(f'For \'{self.name}\' channel of input_x and weight must be matched,'
|
||||
f' while channel of input_x is {input_x_shape[1]},'
|
||||
f' weight_shape[0] is {weight_shape[0]}.')
|
||||
|
@ -2556,12 +2558,12 @@ class SparseApplyAdagrad(PrimitiveWithInfer):
|
|||
Tensor, has the same shape and type as `var`.
|
||||
|
||||
Examples:
|
||||
var = Tensor(np.random.random((3, 3)), mindspore.float32)
|
||||
accum = Tensor(np.random.random((3, 3)), mindspore.float32)
|
||||
grad = Tensor(np.random.random((3, 3)), mindspore.float32)
|
||||
indices = Tensor(np.ones((3,), np.int32))
|
||||
sparse_apply_ada_grad = P.SparseApplyAdagrad(0.5)
|
||||
sparse_apply_ada_grad(var, accum, grad, indices)
|
||||
>>> var = Tensor(np.random.random((3, 3)), mindspore.float32)
|
||||
>>> accum = Tensor(np.random.random((3, 3)), mindspore.float32)
|
||||
>>> grad = Tensor(np.random.random((3, 3)), mindspore.float32)
|
||||
>>> indices = Tensor(np.ones((3,), np.int32))
|
||||
>>> sparse_apply_ada_grad = P.SparseApplyAdagrad(0.5)
|
||||
>>> sparse_apply_ada_grad(var, accum, grad, indices)
|
||||
"""
|
||||
|
||||
@prim_attr_register
|
||||
|
|
|
@ -1033,11 +1033,6 @@ test_case_array_ops = [
|
|||
'desc_bprop':[[3, 2, 3, 3]],
|
||||
}),
|
||||
('Pack_2', {
|
||||
'block': NetForPackInput(P.Pack()),
|
||||
'desc_inputs':[[2, 2]],
|
||||
'desc_bprop':[[1, 2, 2]],
|
||||
}),
|
||||
('Pack_3', {
|
||||
'block': NetForPackInput(P.Pack()),
|
||||
'desc_inputs':[[128, 128], [128, 128]],
|
||||
'desc_bprop':[[2, 128, 128]],
|
||||
|
@ -1052,16 +1047,26 @@ test_case_array_ops = [
|
|||
'desc_inputs':[Tensor(np.array([[1, 1, 1]], np.float32))],
|
||||
'desc_bprop':[[1], [1], [1]],
|
||||
}),
|
||||
('Diag', {
|
||||
('Diag_1', {
|
||||
'block': P.Diag(),
|
||||
'desc_inputs': [[4]],
|
||||
'desc_bprop': [[4, 4]],
|
||||
}),
|
||||
('DiagPart', {
|
||||
('Diag_2', {
|
||||
'block': P.Diag(),
|
||||
'desc_inputs': [[4, 4]],
|
||||
'desc_bprop': [[4, 4, 4, 4]],
|
||||
}),
|
||||
('DiagPart_1', {
|
||||
'block': P.DiagPart(),
|
||||
'desc_inputs': [[4, 4]],
|
||||
'desc_bprop': [[4]],
|
||||
}),
|
||||
('DiagPart_2', {
|
||||
'block': P.DiagPart(),
|
||||
'desc_inputs': [[4, 4, 4, 4]],
|
||||
'desc_bprop': [[4, 4]],
|
||||
}),
|
||||
('SpaceToBatch_1', {
|
||||
'block': P.SpaceToBatch(2, [[0, 0], [0, 0]]),
|
||||
'desc_inputs': [[1, 3, 2, 2]],
|
||||
|
@ -1200,6 +1205,15 @@ raise_set = [
|
|||
Tensor(np.ones((2, 2), np.int32)),
|
||||
Tensor(np.ones((2,), np.float32))),
|
||||
'desc_bprop': [[2, 3]]}),
|
||||
('Pack', {
|
||||
'block': (NetForPackInput(P.Pack()), {'exception': ValueError}),
|
||||
'desc_inputs':[[2, 2]],
|
||||
'desc_bprop':[[1, 2, 2]]}),
|
||||
('PReLU', {
|
||||
'block': (P.PReLU(), {'exception': ValueError}),
|
||||
'desc_inputs':[[2], [1]],
|
||||
'desc_bprop':[[1]]}),
|
||||
|
||||
]
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue