forked from mindspore-Ecosystem/mindspore
!7168 fix bugs of op DynamicRNN, LayerNorm, ReduceAny and Neg
Merge pull request !7168 from lihongkang/v2_master
This commit is contained in:
commit
f427a47c81
|
@ -490,7 +490,7 @@ class LayerNorm(Cell):
|
|||
Args:
|
||||
normalized_shape (Union(tuple[int], list[int]): The normalization is performed over axis
|
||||
`begin_norm_axis ... R - 1`.
|
||||
begin_norm_axis (int): It first normalization dimension: normalization will be performed along dimensions
|
||||
begin_norm_axis (int): The first normalization dimension: normalization will be performed along dimensions
|
||||
`begin_norm_axis: rank(inputs)`, the value should be in [-1, rank(input)). Default: -1.
|
||||
begin_params_axis (int): The first parameter(beta, gamma)dimension: scale and centering parameters
|
||||
will have dimensions `begin_params_axis: rank(inputs)` and will be broadcast with
|
||||
|
@ -514,7 +514,8 @@ class LayerNorm(Cell):
|
|||
>>> x = Tensor(np.ones([20, 5, 10, 10]), mindspore.float32)
|
||||
>>> shape1 = x.shape[1:]
|
||||
>>> m = nn.LayerNorm(shape1, begin_norm_axis=1, begin_params_axis=1)
|
||||
>>> m(x)
|
||||
>>> m(x).shape
|
||||
(20, 5, 10, 10)
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
|
|
|
@ -29,6 +29,7 @@ neg_op_info = TBERegOp("Neg") \
|
|||
.dtype_format(DataType.I32_None, DataType.I32_None) \
|
||||
.dtype_format(DataType.F16_None, DataType.F16_None) \
|
||||
.dtype_format(DataType.F32_None, DataType.F32_None) \
|
||||
.dtype_format(DataType.I8_None, DataType.I8_None) \
|
||||
.get_op_info()
|
||||
|
||||
|
||||
|
|
|
@ -449,6 +449,8 @@ class ReduceAny(_Reduce):
|
|||
>>> input_x = Tensor(np.array([[True, False], [True, True]]))
|
||||
>>> op = P.ReduceAny(keep_dims=True)
|
||||
>>> output = op(input_x, 1)
|
||||
[[True],
|
||||
[True]]
|
||||
"""
|
||||
|
||||
def __infer__(self, input_x, axis):
|
||||
|
|
|
@ -5551,10 +5551,10 @@ class DynamicRNN(PrimitiveWithInfer):
|
|||
The data type must be float16 or float32.
|
||||
- **b** (Tensor) - Bias. Tensor of shape (`4 x hidden_size`).
|
||||
The data type must be float16 or float32.
|
||||
- **seq_length (Tensor) - The length of each batch. Tensor of shape (`batch_size`).
|
||||
- **seq_length** (Tensor) - The length of each batch. Tensor of shape (`batch_size`).
|
||||
Only `None` is currently supported.
|
||||
- **init_h (Tensor) - Hidden state of initial time. Tensor of shape (1, `batch_size`, `hidden_size`).
|
||||
- **init_c (Tensor) - Cell state of initial time. Tensor of shape (1, `batch_size`, `hidden_size`).
|
||||
- **init_h** (Tensor) - Hidden state of initial time. Tensor of shape (1, `batch_size`, `hidden_size`).
|
||||
- **init_c** (Tensor) - Cell state of initial time. Tensor of shape (1, `batch_size`, `hidden_size`).
|
||||
|
||||
Outputs:
|
||||
- **y** (Tensor) - A Tensor of shape (`num_step`, `batch_size`, `hidden_size`).
|
||||
|
|
Loading…
Reference in New Issue