!35943 add HSwish&HSwishGrad testcases.

Merge pull request !35943 from liujunzhu/master
This commit is contained in:
i-robot 2022-06-15 01:21:43 +00:00 committed by Gitee
commit e767848149
No known key found for this signature in database
GPG Key ID: 173E9B9CA92EEF8F
2 changed files with 125 additions and 0 deletions

View File

@ -173,6 +173,15 @@ class SquareSumAllNet(nn.Cell):
return self.square_sum_all(x, y)
class HSwishNet(nn.Cell):
def __init__(self):
super(HSwishNet, self).__init__()
self.hswish = ops.HSwish()
def construct(self, x):
return self.hswish(x)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@ -396,3 +405,21 @@ def test_dynamic_square_sum_all():
out = dynamic_shape_sink_process(net, dataset)
out_expect = fixed_shape_process(net, dataset)
assert compare(out, out_expect)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.parametrize('dtype', [np.float32])
@pytest.mark.env_onecard
def test_dynamic_hswish(dtype):
"""
Feature: Test HSwish and its backward. The input shape is dynamic.
Description: The input shape is dynamic.
Expectation: Assert that results are consistent with fixed shape.
"""
batch_size = 16
dynamic_range = range(48, 50)
input_shape = [(batch_size, 2, None), (batch_size, 2, None)]
net = HSwishNet()
common_func(dynamic_range, input_shape, dtype, net)

View File

@ -13,6 +13,7 @@
# limitations under the License.
# ============================================================================
import time
import numpy as np
import pytest
@ -22,6 +23,7 @@ from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.ops import operations as P
from mindspore.ops.composite import GradOperation
from mindspore.ops import functional as F
class Grad(nn.Cell):
@ -114,3 +116,99 @@ def test_hardswish_forward_and_backward():
for mode in modes:
for dtype in dtypes:
generate_test_cases(dtype, mode)
def np_all_close_with_loss(out, expect):
"""np_all_close_with_loss"""
return np.allclose(out, expect, 0.005, 0.005, equal_nan=True)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('dtype', [np.float32, np.float16])
def test_hswish_vmap(dtype, shape=(100, 2)):
"""
Feature: HSwish vmap
Description: test the rightness of HSwish vmap feature.
Expectation: Success.
"""
def hswish_func(x):
"""hswish_func"""
return P.HSwish()(x)
prop = 100 if np.random.random() > 0.5 else -100
x_np = (np.random.randn(*shape) * prop).astype(dtype)
x = Tensor(x_np)
x = F.sub(x, 0)
start_time = time.perf_counter()
output_vmap = F.vmap(hswish_func, in_axes=(0,))(x)
vmap_time = time.perf_counter() - start_time
start_time_manually = time.perf_counter()
@ms_function
def manually_batched(xs):
"""manually_batched"""
output = []
for i in range(xs.shape[0]):
output.append(hswish_func(xs[i]))
return F.stack(output)
output_manually = manually_batched(x)
manually_time = time.perf_counter() - start_time_manually
assert np_all_close_with_loss(output_vmap.asnumpy(), output_manually.asnumpy())
assert vmap_time < manually_time
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('dtype', [np.float32, np.float16])
def test_hswish_grad_vmap(dtype, shape=(100, 2)):
"""
Feature: HSwishGrad vmap
Description: test the rightness of HSwishGrad vmap feature.
Expectation: Success.
"""
net = Net()
grad = Grad(net)
def hswish_grad_func(dy, x):
"""hswish_grad_func"""
output = grad(dy, x)
return output[0]
prop = 1 if np.random.random() > 0.5 else -1
dy_np = (np.random.randn(*shape) * prop).astype(dtype)
x_np = (np.random.randn(*shape) * prop).astype(dtype)
dy = Tensor(dy_np)
x = Tensor(x_np)
dy = F.sub(dy, 0)
x = F.sub(x, 0)
start_time = time.perf_counter()
output_vmap = F.vmap(hswish_grad_func, in_axes=(0, 0))(dy, x)
vmap_time = time.perf_counter() - start_time
start_time_manually = time.perf_counter()
@ms_function
def manually_batched(dys, xs):
"""manually_batched"""
output = []
for i in range(dys.shape[0]):
output.append(hswish_grad_func(dys[i], xs[i]))
return F.stack(output)
output_manually = manually_batched(dy, x)
manually_time = time.perf_counter() - start_time_manually
assert np_all_close_with_loss(output_vmap.asnumpy(), output_manually.asnumpy())
assert vmap_time < manually_time