!10619 add timedistributed

From: @d00562747
Reviewed-by: 
Signed-off-by:
This commit is contained in:
mindspore-ci-bot 2020-12-30 09:33:27 +08:00 committed by Gitee
commit f80b28313f
6 changed files with 540 additions and 5 deletions

View File

@ -53,7 +53,7 @@ bool AssignCPUKernel::Launch(const std::vector<AddressPtr> &inputs, const std::v
MS_LOG(EXCEPTION) << "Memcpy size must <= max_size, but got memcpy size is : " << total_size
<< ", max size is : " << max_size;
}
int ret = memcpy_s(inputs[0]->addr, total_size, inputs[1]->addr, total_size);
int ret = memcpy_s(inputs[0]->addr, max_size, inputs[1]->addr, total_size);
if (ret != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, error no " << ret;
}

View File

@ -19,7 +19,6 @@
namespace mindspore {
namespace kernel {
template <typename T>
void MaximumCPUKernel<T>::InitKernel(const CNodePtr &kernel_node) {
CheckParam(kernel_node);
@ -216,6 +215,5 @@ void MaximumCPUKernel<T>::BroadcastArithTensors(const T *input_x, const T *input
output[i] = MaximumFunc(input_x[i], input_y[i]);
}
}
} // namespace kernel
} // namespace mindspore

View File

@ -17,7 +17,8 @@ Layer.
The high-level components(Cells) used to construct the neural network.
"""
from . import activation, normalization, container, conv, lstm, basic, embedding, pooling, image, quant, math, combined
from . import activation, normalization, container, conv, lstm, basic, embedding, pooling, image, quant, math, \
combined, timedistributed
from .activation import *
from .normalization import *
from .container import *
@ -30,6 +31,7 @@ from .image import *
from .quant import *
from .math import *
from .combined import *
from .timedistributed import *
__all__ = []
__all__.extend(activation.__all__)
@ -44,3 +46,4 @@ __all__.extend(image.__all__)
__all__.extend(quant.__all__)
__all__.extend(math.__all__)
__all__.extend(combined.__all__)
__all__.extend(timedistributed.__all__)

View File

@ -0,0 +1,138 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Time Distributed."""
from mindspore.ops.primitive import constexpr, Primitive
from mindspore.ops import Reshape, Transpose, Pack, Unpack
from mindspore.common.dtype import tensor
from ..cell import Cell
__all__ = ['TimeDistributed']
@constexpr
def _check_reshape_pos(reshape_pos, inputs_shape, outputs_shape):
if reshape_pos >= len(outputs_shape) or inputs_shape[reshape_pos] != outputs_shape[reshape_pos]:
raise ValueError("The parameter reshape_with_axis is invalid in the input and output of TimeDistributed. "
"You may try pass parameters without reshape_with_axis.")
@constexpr
def _check_expand_dims_axis(time_axis, ndim):
if time_axis > ndim:
raise ValueError("The parameter time_axis is invalid in the input. "
"The value of time_axis should be in range of [{}, {}].".format(-ndim - 1, ndim))
@constexpr
def _generate_perm(axis_a, axis_b, length):
perm = tuple(range(length))
axis_a, axis_b = (axis_a, axis_b) if axis_a < axis_b else (axis_b, axis_a)
return perm[:axis_a] + perm[axis_a + 1: axis_b + 1] + (perm[axis_a],) + perm[axis_b + 1:]
@constexpr
def _check_data(flag):
if not flag:
raise TypeError("The inputs and outputs shuould be a Tensor.")
@constexpr
def _check_inputs_dim(shape):
if len(shape) < 3:
raise ValueError("The inputs should be at least 3D.")
class TimeDistributed(Cell):
r"""
The time distributed layer.
Time distributed is a wrapper which allows to apply a layer to every temporal slice of an input.
And the input should be at least 3D.
There are two cases in the implementation.
When reshape_with_axis provided, the reshape method will be chosen, which is more efficient;
otherwise, the method of dividing the inputs along time axis will be used, which is more general.
For example, reshape_with_axis could not be provided when deal with batch normal.
Args:
layer(Union[Cell, Primitive]): The Cell or Primitive which will be wrapped.
time_axis(int): The axis of time_step.
reshape_with_axis(int): The axis which time_axis will be reshaped with. Default: 'None'.
Raises:
TypeError: If cell is not a Cell or Primitive.
inputs:
-**input**(Tensor)-Tensor of shape: math:'(N, T, *)'
Outputs:
Tensor of shape: math:'(N, T, *)'
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> input = Tensor(np.random.random([32, 10, 3]), mindspore.float32)
>>> dense = nn.Dense(3, 6)
>>> net = TimeDistributed(dense, time_axis=1, reshape_with_axis=0)
>>> output = net(input)
>>> print(output.shape)
(32, 10, 6)
"""
def __init__(self, layer, time_axis, reshape_with_axis=None):
if not isinstance(layer, (Cell, Primitive)):
raise TypeError("Please initialize TimeDistributed with mindspore.nn.Cell or "
"mindspore.ops.Primitive instance. You passed: {input}".format(input=layer))
super(TimeDistributed, self).__init__()
self.layer = layer
self.time_axis = time_axis
self.reshape_with_axis = reshape_with_axis
self.transpose = Transpose()
self.reshape = Reshape()
def construct(self, inputs):
_check_data(isinstance(inputs, tensor))
_check_inputs_dim(inputs.shape)
time_axis = self.time_axis % len(inputs.shape)
if self.reshape_with_axis is not None:
reshape_with_axis = self.reshape_with_axis % len(inputs.shape)
inputs_shape = inputs.shape
time_axis_new = len(inputs_shape) - 2 if reshape_with_axis == len(inputs_shape) - 1 \
else (reshape_with_axis + 1 if time_axis > reshape_with_axis else
reshape_with_axis - 1)
reshape_pos = time_axis_new if time_axis_new < reshape_with_axis else reshape_with_axis
perm = _generate_perm(time_axis_new, time_axis, len(inputs_shape))
inputs = self.transpose(inputs, perm)
inputs_shape_new = inputs.shape
inputs = self.reshape(inputs, inputs_shape_new[: reshape_pos] + (-1,) + inputs_shape_new[reshape_pos + 2:])
outputs = self.layer(inputs)
_check_data(isinstance(outputs, tensor))
_check_reshape_pos(reshape_pos, inputs.shape, outputs.shape)
outputs_shape_new = outputs.shape[:reshape_pos] + inputs_shape_new[reshape_pos: reshape_pos + 2]
if reshape_pos + 1 < len(outputs.shape):
outputs_shape_new += outputs.shape[reshape_pos + 1:]
return self.reshape(outputs, outputs_shape_new)
unpack = Unpack(time_axis)
inputs = unpack(inputs)
y = ()
for item in inputs:
outputs = self.layer(item)
_check_data(isinstance(outputs, tensor))
_check_expand_dims_axis(time_axis, outputs.ndim)
y += (outputs,)
y = Pack(time_axis)(y)
return y

View File

@ -0,0 +1,198 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore
import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class TestTimeDistributed(nn.Cell):
def __init__(self, cell, time_axis, reshape_with_axis=None):
super(TestTimeDistributed, self).__init__()
self.time_distributed = nn.TimeDistributed(cell, time_axis, reshape_with_axis)
def construct(self, inputs):
return self.time_distributed(inputs)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_time_distributed_conv2d():
inputs = np.random.randint(0, 10, [32, 12, 10, 10])
conv2d = nn.Conv2d(12, 24, 4, has_bias=False, weight_init='normal')
output_expect = conv2d(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([32, 1, 12, 10, 10]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(conv2d, time_axis=1, reshape_with_axis=0)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(np.abs(output[:, i, :] - output_expect) < 1e-5)
print("Conv2D layer wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_time_distributed_maxpool2d():
inputs = np.random.randint(0, 10, [32, 12, 10, 10])
pool = nn.MaxPool2d(kernel_size=3, stride=1)
output_expect = pool(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([32, 1, 12, 10, 10]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(pool, time_axis=1, reshape_with_axis=0)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i, :] == output_expect)
print("MaxPooling2D layer wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_time_distributed_dense():
inputs = np.random.randint(0, 10, [32, 10])
dense = nn.Dense(10, 6)
output_expect = dense(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([32, 1, 10]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(dense, time_axis=1, reshape_with_axis=0)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i, :] == output_expect)
print("Dense layer wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_time_distributed_dense_with_reshape_axis_not_first():
inputs = np.random.randint(0, 10, [32, 10])
dense = nn.Dense(10, 6)
output_expect = dense(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([1, 32, 10]).repeat(6, axis=0)
time_distributed = TestTimeDistributed(dense, time_axis=0, reshape_with_axis=1)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[0]):
assert np.all(output[i, :] == output_expect)
print("Dense layer wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_time_distributed_argmax():
inputs = np.random.randint(0, 10, [3, 4])
argmax = ops.Argmax(output_type=mindspore.int32, axis=1)
output_expect = argmax(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([3, 1, 4]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(argmax, time_axis=1, reshape_with_axis=0)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i] == output_expect)
print("Argmax op wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_time_distributed_flatten():
inputs = np.random.randint(0, 10, [3, 4, 5])
flatten = nn.Flatten()
output_expect = flatten(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([3, 1, 4, 5]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(flatten, time_axis=1, reshape_with_axis=0)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i, :] == output_expect)
print("Flatten op wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_time_distributed_conv2d_no_reshape_axis():
inputs = np.random.randint(0, 10, [32, 12, 10, 10])
conv2d = nn.Conv2d(12, 24, 4, has_bias=False, weight_init='normal')
output_expect = conv2d(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([32, 1, 12, 10, 10]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(conv2d, time_axis=1)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i, :] == output_expect)
print("Conv2D layer with no reshape axis wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_time_distributed_maxpool2d_no_reshape_axis():
inputs = np.random.randint(0, 10, [32, 12, 10, 10])
pool = nn.MaxPool2d(kernel_size=3, stride=1)
output_expect = pool(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([32, 1, 12, 10, 10]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(pool, time_axis=1)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i, :] == output_expect)
print("MaxPooling2D layer with no reshape axis wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_time_distributed_dense_no_reshape_axis():
inputs = np.random.randint(0, 10, [32, 10])
dense = nn.Dense(10, 6)
output_expect = dense(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([32, 1, 10]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(dense, time_axis=1)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i, :] == output_expect)
print("Dense layer with no reshape axis wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_time_distributed_argmax_no_reshape_axis():
inputs = np.random.randint(0, 10, [3, 4])
argmax = ops.Argmax(output_type=mindspore.int32, axis=1)
output_expect = argmax(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([3, 1, 4]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(argmax, time_axis=1)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i] == output_expect)
print("Argmax op with no reshape axis wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_time_distributed_flatten_no_reshape_axis():
inputs = np.random.randint(0, 10, [3, 4, 5])
flatten = nn.Flatten()
output_expect = flatten(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([3, 1, 4, 5]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(flatten, time_axis=1)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i, :] == output_expect)
print("Flatten op with no reshape axis wrapped successful")

View File

@ -0,0 +1,198 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore
import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
class TestTimeDistributed(nn.Cell):
def __init__(self, cell, time_axis, reshape_with_axis=None):
super(TestTimeDistributed, self).__init__()
self.time_distributed = nn.TimeDistributed(cell, time_axis, reshape_with_axis)
def construct(self, inputs):
return self.time_distributed(inputs)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_time_distributed_conv2d():
inputs = np.random.randint(0, 10, [32, 12, 10, 10])
conv2d = nn.Conv2d(12, 24, 4, has_bias=False, weight_init='normal')
output_expect = conv2d(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([32, 1, 12, 10, 10]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(conv2d, time_axis=1, reshape_with_axis=0)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(np.abs(output[:, i, :] - output_expect) < 1e-5)
print("Conv2D layer wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_time_distributed_maxpool2d():
inputs = np.random.randint(0, 10, [32, 12, 10, 10])
pool = nn.MaxPool2d(kernel_size=3, stride=1)
output_expect = pool(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([32, 1, 12, 10, 10]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(pool, time_axis=1, reshape_with_axis=0)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i, :] == output_expect)
print("MaxPooling2D layer wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_time_distributed_dense():
inputs = np.random.randint(0, 10, [32, 10])
dense = nn.Dense(10, 6)
output_expect = dense(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([32, 1, 10]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(dense, time_axis=1, reshape_with_axis=0)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i, :] == output_expect)
print("Dense layer wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_time_distributed_dense_with_reshape_axis_not_first():
inputs = np.random.randint(0, 10, [32, 10])
dense = nn.Dense(10, 6)
output_expect = dense(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([1, 32, 10]).repeat(6, axis=0)
time_distributed = TestTimeDistributed(dense, time_axis=0, reshape_with_axis=1)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[0]):
assert np.all(output[i, :] == output_expect)
print("Dense layer wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_time_distributed_argmax():
inputs = np.random.randint(0, 10, [3, 4])
argmax = ops.Argmax(output_type=mindspore.int32, axis=1)
output_expect = argmax(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([3, 1, 4]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(argmax, time_axis=1, reshape_with_axis=0)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i] == output_expect)
print("Argmax op wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_time_distributed_flatten():
inputs = np.random.randint(0, 10, [3, 4, 5])
flatten = nn.Flatten()
output_expect = flatten(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([3, 1, 4, 5]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(flatten, time_axis=1, reshape_with_axis=0)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i, :] == output_expect)
print("Flatten op wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_time_distributed_conv2d_no_reshape_axis():
inputs = np.random.randint(0, 10, [32, 12, 10, 10])
conv2d = nn.Conv2d(12, 24, 4, has_bias=False, weight_init='normal')
output_expect = conv2d(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([32, 1, 12, 10, 10]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(conv2d, time_axis=1)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i, :] == output_expect)
print("Conv2D layer with no reshape axis wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_time_distributed_maxpool2d_no_reshape_axis():
inputs = np.random.randint(0, 10, [32, 12, 10, 10])
pool = nn.MaxPool2d(kernel_size=3, stride=1)
output_expect = pool(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([32, 1, 12, 10, 10]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(pool, time_axis=1)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i, :] == output_expect)
print("MaxPooling2D layer with no reshape axis wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_time_distributed_dense_no_reshape_axis():
inputs = np.random.randint(0, 10, [32, 10])
dense = nn.Dense(10, 6)
output_expect = dense(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([32, 1, 10]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(dense, time_axis=1)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i, :] == output_expect)
print("Dense layer with no reshape axis wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_time_distributed_argmax_no_reshape_axis():
inputs = np.random.randint(0, 10, [3, 4])
argmax = ops.Argmax(output_type=mindspore.int32, axis=1)
output_expect = argmax(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([3, 1, 4]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(argmax, time_axis=1)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i] == output_expect)
print("Argmax op with no reshape axis wrapped successful")
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_time_distributed_flatten_no_reshape_axis():
inputs = np.random.randint(0, 10, [3, 4, 5])
flatten = nn.Flatten()
output_expect = flatten(Tensor(inputs, mindspore.float32)).asnumpy()
inputs = inputs.reshape([3, 1, 4, 5]).repeat(6, axis=1)
time_distributed = TestTimeDistributed(flatten, time_axis=1)
output = time_distributed(Tensor(inputs, mindspore.float32)).asnumpy()
for i in range(output.shape[1]):
assert np.all(output[:, i, :] == output_expect)
print("Flatten op with no reshape axis wrapped successful")