move vmap testcases to st/ops directory

This commit is contained in:
looop5 2022-05-18 15:03:52 +08:00
parent efd0972b66
commit c05037c0e7
4 changed files with 125 additions and 196 deletions

View File

@ -21,6 +21,7 @@ import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor, Parameter, ParameterTuple
from mindspore.ops.functional import vmap
class NetIndexAdd(nn.Cell):
@ -324,3 +325,73 @@ def test_index_add_dynamic():
net.set_inputs(Tensor(idx), y_dyn)
output = net(Tensor(idx), Tensor(y))
assert (output.asnumpy() == expect).all()
def vmap_case():
class Net(nn.Cell):
def __init__(self, axis):
super(Net, self).__init__()
self.index_add = ops.IndexAdd(axis)
def construct(self, a, idx, b):
return self.index_add(a, idx, b)
class WrapNet(nn.Cell):
def __init__(self, net, a, in_axes, out_axes):
super(WrapNet, self).__init__()
self.net = net
self.a = a
self.in_axes = in_axes
self.out_axes = out_axes
def construct(self, idx, b):
return vmap(self.net, self.in_axes, self.out_axes)(self.a, idx, b)
# batch dimension of x and y is same, batch dimension <= axis
x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)))
indices = Tensor(np.array([0, 2], dtype=np.int32))
y = Tensor(np.array([[0.5, 1], [1, 1.5], [2, 2.5]], dtype=np.float32))
output = WrapNet(Net(0), x, (0, None, 0), 0)(indices, y)
expect = np.array([[1.5, 2, 4], [5, 5, 7.5], [9, 8, 11.5]], dtype=np.float32)
assert np.allclose(output.asnumpy(), expect)
# batch dimension of x and y is different, batch dimension <= axis
x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)))
indices = Tensor(np.array([0, 2], dtype=np.int32))
y = Tensor(np.array([[0.5, 1, 2], [1, 1.5, 2.5]], dtype=np.float32))
output = WrapNet(Net(0), x, (0, None, 1), 0)(indices, y)
expect = np.array([[1.5, 2, 4], [5, 5, 7.5], [9, 8, 11.5]], dtype=np.float32)
assert np.allclose(output.asnumpy(), expect)
# batch dimension y is None
x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)))
indices = Tensor(np.array([0, 2], dtype=np.int32))
y = Tensor(np.array([0.5, 1], dtype=np.float32))
output = WrapNet(Net(0), x, (0, None, None), 0)(indices, y)
expect = np.array([[1.5, 2, 4], [4.5, 5, 7], [7.5, 8, 10]], dtype=np.float32)
assert np.allclose(output.asnumpy(), expect)
# batch dimension of x and y is same, batch dimension > axis
x = Parameter(Tensor(np.array([[[1, 1], [1, 1]],
[[2, 2], [2, 2]],
[[3, 3], [3, 3]]], dtype=np.float32)))
indices = Tensor(np.array([0, 2], dtype=np.int32))
y = Tensor(np.array([[[0, 0.5], [1, 1.5]], [[1.5, 2], [2.5, 3]]], dtype=np.float32))
output = WrapNet(Net(0), x, (2, None, 2), 2)(indices, y)
expect = np.array([[[1, 1.5], [2, 2.5]],
[[2, 2], [2, 2]],
[[4.5, 5], [5.5, 6]]], dtype=np.float32)
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_index_add_vmap_cpu():
"""
Feature: test IndexAdd vmap on CPU.
Description: inputs with batch.
Expectation: the result match with expect
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
vmap_case()

View File

@ -22,6 +22,7 @@ from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops.functional import vmap
class SpaceToBatchNDNet(nn.Cell):
@ -199,3 +200,56 @@ def test_space_to_batch_nd_dynamic():
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
output = dyn_net(input_x, input_y)
assert (output.asnumpy() == expect).all()
def vmap_case():
class Net(nn.Cell):
def __init__(self, block_size, paddings):
super(Net, self).__init__()
self.space_to_batch_nd = ops.SpaceToBatchND(block_size, paddings)
def construct(self, a):
return self.space_to_batch_nd(a)
class WrapNet(nn.Cell):
def __init__(self, net, in_axes, out_axes):
super(WrapNet, self).__init__()
self.net = net
self.in_axes = in_axes
self.out_axes = out_axes
def construct(self, input_x):
return vmap(self.net, self.in_axes, self.out_axes)(input_x)
block_size = [2, 2]
paddings = [[0, 0], [0, 0]]
input_shape = (2, 3, 1, 4, 4)
data_np = np.arange(np.prod(input_shape)).reshape(input_shape).astype(np.float32)
net = Net(block_size, paddings)
# test input axis and output axis are the same
v_net_1 = WrapNet(Net(block_size, paddings), (0,), 0)
output_v = v_net_1(Tensor(data_np)).asnumpy()
for i in range(input_shape[0]):
assert np.allclose(output_v[i, :, :, :, :], net(Tensor(data_np[i, :, :, :, :])).asnumpy())
# test input axis and output axis are different
v_net_2 = WrapNet(Net(block_size, paddings), (0,), 1)
output_v = v_net_2(Tensor(data_np)).asnumpy()
for i in range(input_shape[0]):
assert np.allclose(output_v[:, i, :, :, :], net(Tensor(data_np[i, :, :, :, :])).asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_space_to_batch_nd_vmap_cpu():
"""
Feature: test SpactToBatchND vmap on CPU.
Description: inputs with batch.
Expectation: the result match with expect
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
vmap_case()

View File

@ -1,120 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor, Parameter
from mindspore.ops.functional import vmap
def vmap_case():
class Net(nn.Cell):
def __init__(self, axis):
super(Net, self).__init__()
self.index_add = ops.IndexAdd(axis)
def construct(self, a, idx, b):
return self.index_add(a, idx, b)
class WrapNet(nn.Cell):
def __init__(self, net, a, in_axes, out_axes):
super(WrapNet, self).__init__()
self.net = net
self.a = a
self.in_axes = in_axes
self.out_axes = out_axes
def construct(self, idx, b):
return vmap(self.net, self.in_axes, self.out_axes)(self.a, idx, b)
# batch dimension of x and y is same, batch dimension <= axis
x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)))
indices = Tensor(np.array([0, 2], dtype=np.int32))
y = Tensor(np.array([[0.5, 1], [1, 1.5], [2, 2.5]], dtype=np.float32))
output = WrapNet(Net(0), x, (0, None, 0), 0)(indices, y)
expect = np.array([[1.5, 2, 4], [5, 5, 7.5], [9, 8, 11.5]], dtype=np.float32)
assert np.allclose(output.asnumpy(), expect)
# batch dimension of x and y is different, batch dimension <= axis
x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)))
indices = Tensor(np.array([0, 2], dtype=np.int32))
y = Tensor(np.array([[0.5, 1, 2], [1, 1.5, 2.5]], dtype=np.float32))
output = WrapNet(Net(0), x, (0, None, 1), 0)(indices, y)
expect = np.array([[1.5, 2, 4], [5, 5, 7.5], [9, 8, 11.5]], dtype=np.float32)
assert np.allclose(output.asnumpy(), expect)
# batch dimension y is None
x = Parameter(Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)))
indices = Tensor(np.array([0, 2], dtype=np.int32))
y = Tensor(np.array([0.5, 1], dtype=np.float32))
output = WrapNet(Net(0), x, (0, None, None), 0)(indices, y)
expect = np.array([[1.5, 2, 4], [4.5, 5, 7], [7.5, 8, 10]], dtype=np.float32)
assert np.allclose(output.asnumpy(), expect)
# batch dimension of x and y is same, batch dimension > axis
x = Parameter(Tensor(np.array([[[1, 1], [1, 1]],
[[2, 2], [2, 2]],
[[3, 3], [3, 3]]], dtype=np.float32)))
indices = Tensor(np.array([0, 2], dtype=np.int32))
y = Tensor(np.array([[[0, 0.5], [1, 1.5]], [[1.5, 2], [2.5, 3]]], dtype=np.float32))
output = WrapNet(Net(0), x, (2, None, 2), 2)(indices, y)
expect = np.array([[[1, 1.5], [2, 2.5]],
[[2, 2], [2, 2]],
[[4.5, 5], [5.5, 6]]], dtype=np.float32)
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_index_add_vmap_cpu():
"""
Feature: test IndexAdd vmap on CPU.
Description: inputs with batch.
Expectation: the result match with expect
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
vmap_case()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_index_add_vmap_gpu():
"""
Feature: test IndexAdd vmap on GPU.
Description: inputs with batch.
Expectation: the result match with expect
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
vmap_case()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_index_add_vmap_ascend():
"""
Feature: test IndexAdd vmap on Ascend.
Description: inputs with batch.
Expectation: the result match with expect
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
vmap_case()

View File

@ -1,76 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops as ops
from mindspore import Tensor
from mindspore.ops.functional import vmap
def vmap_case():
class Net(nn.Cell):
def __init__(self, block_size, paddings):
super(Net, self).__init__()
self.space_to_batch_nd = ops.SpaceToBatchND(block_size, paddings)
def construct(self, a):
return self.space_to_batch_nd(a)
class WrapNet(nn.Cell):
def __init__(self, net, in_axes, out_axes):
super(WrapNet, self).__init__()
self.net = net
self.in_axes = in_axes
self.out_axes = out_axes
def construct(self, input_x):
return vmap(self.net, self.in_axes, self.out_axes)(input_x)
block_size = [2, 2]
paddings = [[0, 0], [0, 0]]
input_shape = (2, 3, 1, 4, 4)
data_np = np.arange(np.prod(input_shape)).reshape(input_shape).astype(np.float32)
net = Net(block_size, paddings)
# test input axis and output axis are the same
v_net_1 = WrapNet(Net(block_size, paddings), (0,), 0)
output_v = v_net_1(Tensor(data_np)).asnumpy()
for i in range(input_shape[0]):
assert np.allclose(output_v[i, :, :, :, :], net(Tensor(data_np[i, :, :, :, :])).asnumpy())
# test input axis and output axis are different
v_net_2 = WrapNet(Net(block_size, paddings), (0,), 1)
output_v = v_net_2(Tensor(data_np)).asnumpy()
for i in range(input_shape[0]):
assert np.allclose(output_v[:, i, :, :, :], net(Tensor(data_np[i, :, :, :, :])).asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_space_to_batch_nd_vmap_cpu():
"""
Feature: test SpactToBatchND vmap on CPU.
Description: inputs with batch.
Expectation: the result match with expect
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
vmap_case()