rectify nn api's ut and st

This commit is contained in:
王南 2022-12-26 21:09:54 +08:00
parent cea9e119e1
commit e60a343b41
26 changed files with 504 additions and 1714 deletions

View File

@ -33,6 +33,8 @@ class Net(nn.Cell):
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_avgpool3d_normal(mode):
@ -51,4 +53,3 @@ def test_avgpool3d_normal(mode):
[[126.0, 127.0, 128.0], [131.0, 132.0, 133.0]]]]])
assert output.shape == (1, 2, 2, 2, 3)
assert np.allclose(output.asnumpy(), expect_output, rtol=1e-3)

View File

@ -20,26 +20,6 @@ import mindspore as ms
import mindspore.nn as nn
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.pool = nn.MaxPool3d(kernel_size=3, stride=1, padding=1)
def construct(self, x):
out = self.pool(x)
return out
class Net2(nn.Cell):
def __init__(self):
super(Net2, self).__init__()
self.pool = nn.MaxPool3d(kernel_size=3, stride=1, padding=1, return_indices=True)
def construct(self, x):
out = self.pool(x)
return out
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@ -53,8 +33,8 @@ def test_maxpool3d_normal(mode):
ms.set_context(mode=mode)
np_array = np.arange(1 * 2 * 4 * 4 * 5).reshape((1, 2, 4, 4, 5))
net1 = Net()
net2 = Net2()
net1 = nn.MaxPool3d(kernel_size=3, stride=1, padding=1)
net2 = nn.MaxPool3d(kernel_size=3, stride=1, padding=1, return_indices=True)
x = ms.Tensor(np_array, ms.float32)
output1 = net1(x)
output2 = net2(x)

View File

@ -0,0 +1,186 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.nn as nn
import mindspore.context as context
from mindspore import Tensor
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_reflection_pad1d_input3d(mode):
"""
Feature: ReflectionPad1d
Description: Test ReflectionPad1d with 3D input.
Expectation: success
"""
context.set_context(mode=mode)
x = Tensor(np.array([[[0, 1, 2, 3], [4, 5, 6, 7]]]).astype(np.float32))
padding = (3, 1)
net = nn.ReflectionPad1d(padding)
output = net(x)
expected_output = Tensor(np.array([[[3, 2, 1, 0, 1, 2, 3, 2],
[7, 6, 5, 4, 5, 6, 7, 6]]]).astype(np.float32))
assert np.array_equal(output.asnumpy(), expected_output)
padding = 2
expected_output = Tensor(np.array([[[2, 1, 0, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 7, 6, 5]]]).astype(np.float32))
net = nn.ReflectionPad1d(padding)
output = net(x)
assert np.array_equal(output.asnumpy(), expected_output)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_reflection_pad1d_input2d(mode):
"""
Feature: ReflectionPad1d
Description: Test ReflectionPad1d with 2D input.
Expectation: success
"""
context.set_context(mode=mode)
x = Tensor(np.array([[0, 1, 2, 3], [4, 5, 6, 7]]).astype(np.float32))
padding = (3, 1)
net = nn.ReflectionPad1d(padding)
output = net(x)
expected_output = Tensor(np.array([[3, 2, 1, 0, 1, 2, 3, 2],
[7, 6, 5, 4, 5, 6, 7, 6]]).astype(np.float32))
assert np.array_equal(output.asnumpy(), expected_output)
padding = 2
expected_output = Tensor(np.array([[2, 1, 0, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 7, 6, 5]]).astype(np.float32))
net = nn.ReflectionPad1d(padding)
output = net(x)
assert np.array_equal(output.asnumpy(), expected_output)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_reflection_pad2d_input4d(mode):
r"""
Feature: ReflectionPad2d
Description: Test ReflectionPad2d with 4D input.
Expectation: success
"""
context.set_context(mode=mode)
x = Tensor(np.array([[[[0, 1, 2], [3, 4, 5], [6, 7, 8]]]]).astype(np.float32))
padding = (1, 1, 2, 0)
net = nn.ReflectionPad2d(padding)
output = net(x)
expected_output = Tensor(np.array([[[[7, 6, 7, 8, 7], [4, 3, 4, 5, 4], [1, 0, 1, 2, 1],
[4, 3, 4, 5, 4], [7, 6, 7, 8, 7]]]]).astype(np.float32))
assert np.array_equal(output.asnumpy(), expected_output)
padding = 2
output = nn.ReflectionPad2d(padding)(x)
expected_output = Tensor(np.array([[[[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0], [5, 4, 3, 4, 5, 4, 3],
[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0]]]]).astype(np.float32))
assert np.array_equal(output.asnumpy(), expected_output)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_reflection_pad2d_input3d(mode):
r"""
Feature: ReflectionPad2d
Description: Test ReflectionPad2d with 3D input.
Expectation: success
"""
context.set_context(mode=mode)
x = Tensor(np.array([[[0, 1, 2], [3, 4, 5], [6, 7, 8]]]).astype(np.float32))
padding = (1, 1, 2, 0)
net = nn.ReflectionPad2d(padding)
output = net(x)
expected_output = Tensor(np.array([[[7, 6, 7, 8, 7], [4, 3, 4, 5, 4], [1, 0, 1, 2, 1],
[4, 3, 4, 5, 4], [7, 6, 7, 8, 7]]]).astype(np.float32))
print(output, expected_output)
padding = 2
output = nn.ReflectionPad2d(padding)(x)
expected_output = Tensor(np.array([[[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0], [5, 4, 3, 4, 5, 4, 3],
[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0]]]).astype(np.float32))
assert np.array_equal(output.asnumpy(), expected_output)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_reflection_pad_3d(mode):
"""
Feature: ReflectionPad3d
Description: Infer process of ReflectionPad3d with three type parameters.
Expectation: success
"""
context.set_context(mode=mode)
arr = np.arange(8).astype(np.float32).reshape((1, 2, 2, 2))
x = Tensor(arr)
padding = (1, 1, 1, 0, 0, 1)
net3d = nn.ReflectionPad3d(padding)
output = net3d(x)
expected_output = Tensor(np.array([[[[3, 2, 3, 2], [1, 0, 1, 0], [3, 2, 3, 2]],
[[7, 6, 7, 6], [5, 4, 5, 4], [7, 6, 7, 6]],
[[3, 2, 3, 2], [1, 0, 1, 0], [3, 2, 3, 2]]]]).astype(np.float32))
assert np.array_equal(output.asnumpy(), expected_output)
padding = 1
output = nn.ReflectionPad3d(padding)(x)
expected_output = Tensor(np.array([[[[7., 6., 7., 6.], [5., 4., 5., 4.],
[7., 6., 7., 6.], [5., 4., 5., 4.]],
[[3., 2., 3., 2.], [1., 0., 1., 0.],
[3., 2., 3., 2.], [1., 0., 1., 0.]],
[[7., 6., 7., 6.], [5., 4., 5., 4.],
[7., 6., 7., 6.], [5., 4., 5., 4.]],
[[3., 2., 3., 2.], [1., 0., 1., 0.],
[3., 2., 3., 2.], [1., 0., 1., 0.]]]]).astype(np.float32))
assert np.array_equal(output.asnumpy(), expected_output)

View File

@ -1,67 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.nn as nn
import mindspore.context as context
from mindspore import Tensor
class Net(nn.Cell):
def __init__(self, padding):
super(Net, self).__init__()
self.pad = nn.ReflectionPad3d(padding)
def construct(self, x):
return self.pad(x)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_reflection_pad_3d(mode):
"""
Feature: ReflectionPad3d
Description: Infer process of ReflectionPad3d with three type parameters.
Expectation: success
"""
context.set_context(mode=mode)
arr = np.arange(8).astype(np.float32).reshape((1, 2, 2, 2))
x = Tensor(arr)
padding = (1, 1, 1, 0, 0, 1)
net3d = Net(padding)
output = net3d(x)
expected_output = Tensor(np.array([[[[3, 2, 3, 2], [1, 0, 1, 0], [3, 2, 3, 2]],
[[7, 6, 7, 6], [5, 4, 5, 4], [7, 6, 7, 6]],
[[3, 2, 3, 2], [1, 0, 1, 0], [3, 2, 3, 2]]]]).astype(np.float32))
assert np.array_equal(output.asnumpy(), expected_output)
padding = 1
output = Net(padding)(x)
expected_output = Tensor(np.array([[[[7., 6., 7., 6.], [5., 4., 5., 4.],
[7., 6., 7., 6.], [5., 4., 5., 4.]],
[[3., 2., 3., 2.], [1., 0., 1., 0.],
[3., 2., 3., 2.], [1., 0., 1., 0.]],
[[7., 6., 7., 6.], [5., 4., 5., 4.],
[7., 6., 7., 6.], [5., 4., 5., 4.]],
[[3., 2., 3., 2.], [1., 0., 1., 0.],
[3., 2., 3., 2.], [1., 0., 1., 0.]]]]).astype(np.float32))
assert np.array_equal(output.asnumpy(), expected_output)

View File

@ -1,127 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import Tensor
class Net1d(nn.Cell):
def __init__(self, padding):
super(Net1d, self).__init__()
self.pad = nn.ReflectionPad1d(padding)
def construct(self, x):
return self.pad(x)
class Net2d(nn.Cell):
def __init__(self, padding):
super(Net2d, self).__init__()
self.pad = nn.ReflectionPad2d(padding)
def construct(self, x):
return self.pad(x)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_reflection_pad_1d():
"""
Feature: ReflectionPad1d
Description: Infer process of ReflectionPad1d with 2 types of parameters.
Expectation: success
"""
# Test functionality with 3D tensor input
x = Tensor(np.array([[[0, 1, 2, 3], [4, 5, 6, 7]]]).astype(np.float32))
padding = (3, 1)
net = Net1d(padding)
output = net(x)
expected_output = Tensor(np.array([[[3, 2, 1, 0, 1, 2, 3, 2],
[7, 6, 5, 4, 5, 6, 7, 6]]]).astype(np.float32))
assert np.array_equal(output, expected_output)
padding = 2
expected_output = Tensor(np.array([[[2, 1, 0, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 7, 6, 5]]]).astype(np.float32))
net = Net1d(padding)
output = net(x)
assert np.array_equal(output, expected_output)
# Test functionality with 2D tensor as input
x = Tensor(np.array([[0, 1, 2, 3], [4, 5, 6, 7]]).astype(np.float16))
padding = (3, 1)
net = Net1d(padding)
output = net(x)
expected_output = Tensor(np.array([[3, 2, 1, 0, 1, 2, 3, 2],
[7, 6, 5, 4, 5, 6, 7, 6]]).astype(np.float16))
assert np.array_equal(output, expected_output)
padding = 2
expected_output = Tensor(np.array([[2, 1, 0, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 7, 6, 5]]).astype(np.float16))
net = Net1d(padding)
output = net(x)
assert np.array_equal(output, expected_output)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_reflection_pad_2d():
r"""
Feature: ReflectionPad2d
Description: Infer process of ReflectionPad2d with three type parameters.
Expectation: success
"""
# Test functionality with 4D tensor as input
x = Tensor(np.array([[[[0, 1, 2], [3, 4, 5], [6, 7, 8]]]]).astype(np.int32))
padding = (1, 1, 2, 0)
net = Net2d(padding)
output = net(x)
expected_output = Tensor(np.array([[[[7, 6, 7, 8, 7], [4, 3, 4, 5, 4], [1, 0, 1, 2, 1],
[4, 3, 4, 5, 4], [7, 6, 7, 8, 7]]]]).astype(np.int32))
assert np.array_equal(output, expected_output)
padding = 2
net = Net2d(padding)
output = net(x)
expected_output = Tensor(np.array([[[[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0], [5, 4, 3, 4, 5, 4, 3],
[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0]]]]).astype(np.int32))
assert np.array_equal(output, expected_output)
# Test functionality with 3D tensor as input
x = Tensor(np.array([[[0, 1, 2], [3, 4, 5], [6, 7, 8]]]).astype(np.float32))
padding = (1, 1, 2, 0)
net = Net2d(padding)
output = net(x)
expected_output = Tensor(np.array([[[7, 6, 7, 8, 7], [4, 3, 4, 5, 4], [1, 0, 1, 2, 1],
[4, 3, 4, 5, 4], [7, 6, 7, 8, 7]]]).astype(np.float32))
assert np.array_equal(output, expected_output)
padding = 2
net = Net2d(padding)
output = net(x)
expected_output = Tensor(np.array([[[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0], [5, 4, 3, 4, 5, 4, 3],
[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0]]]).astype(np.float32))
assert np.array_equal(output, expected_output)

View File

@ -1,53 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
import mindspore.nn as nn
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.pool = nn.AvgPool3d(kernel_size=3, stride=1)
def construct(self, x):
out = self.pool(x)
return out
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_avgpool3d_normal(mode):
"""
Feature: AvgPool3d
Description: Verify the result of AvgPool3d
Expectation: success
"""
ms.set_context(mode=mode)
np_array = np.arange(1 * 2 * 4 * 4 * 5).reshape((1, 2, 4, 4, 5))
net = Net()
x = ms.Tensor(np_array, ms.float32)
output = net(x)
expect_output = np.array([[[[[26.0, 27.0, 28.0], [31.0, 32.0, 33.0]], [[46.0, 47.0, 48.0], [51.0, 52.0, 53.0]]],
[[[106.0, 107.0, 108.0], [111.0, 112.0, 113.0]],
[[126.0, 127.0, 128.0], [131.0, 132.0, 133.0]]]]])
assert output.shape == (1, 2, 2, 2, 3)
assert np.allclose(output.asnumpy(), expect_output)

View File

@ -1,127 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor
import pytest
class Net1d(nn.Cell):
def __init__(self, padding):
super(Net1d, self).__init__()
self.pad = nn.ReflectionPad1d(padding)
def construct(self, x):
return self.pad(x)
class Net2d(nn.Cell):
def __init__(self, padding):
super(Net2d, self).__init__()
self.pad = nn.ReflectionPad2d(padding)
def construct(self, x):
return self.pad(x)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.env_onecard
def test_reflection_pad_1d():
"""
Feature: ReflectionPad1d
Description: Infer process of ReflectionPad1d with 2 types of parameters.
Expectation: success
"""
# Test functionality with 3D tensor input
x = Tensor(np.array([[[0, 1, 2, 3], [4, 5, 6, 7]]]).astype(np.float32))
padding = (3, 1)
net = Net1d(padding)
output = net(x)
expected_output = Tensor(np.array([[[3, 2, 1, 0, 1, 2, 3, 2],
[7, 6, 5, 4, 5, 6, 7, 6]]]).astype(np.float32))
assert np.array_equal(output, expected_output)
padding = 2
expected_output = Tensor(np.array([[[2, 1, 0, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 7, 6, 5]]]).astype(np.float32))
net = Net1d(padding)
output = net(x)
assert np.array_equal(output, expected_output)
# Test functionality with 2D tensor as input
x = Tensor(np.array([[0, 1, 2, 3], [4, 5, 6, 7]]).astype(np.float16))
padding = (3, 1)
net = Net1d(padding)
output = net(x)
expected_output = Tensor(np.array([[3, 2, 1, 0, 1, 2, 3, 2],
[7, 6, 5, 4, 5, 6, 7, 6]]).astype(np.float16))
assert np.array_equal(output, expected_output)
padding = 2
expected_output = Tensor(np.array([[2, 1, 0, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 7, 6, 5]]).astype(np.float16))
net = Net1d(padding)
output = net(x)
assert np.array_equal(output, expected_output)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.env_onecard
def test_reflection_pad_2d():
r"""
Feature: ReflectionPad2d
Description: Infer process of ReflectionPad2d with three type parameters.
Expectation: success
"""
# Test functionality with 4D tensor as input
x = Tensor(np.array([[[[0, 1, 2], [3, 4, 5], [6, 7, 8]]]]).astype(np.int32))
padding = (1, 1, 2, 0)
net = Net2d(padding)
output = net(x)
expected_output = Tensor(np.array([[[[7, 6, 7, 8, 7], [4, 3, 4, 5, 4], [1, 0, 1, 2, 1],
[4, 3, 4, 5, 4], [7, 6, 7, 8, 7]]]]).astype(np.int32))
assert np.array_equal(output, expected_output)
padding = 2
net = Net2d(padding)
output = net(x)
expected_output = Tensor(np.array([[[[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0], [5, 4, 3, 4, 5, 4, 3],
[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0]]]]).astype(np.int32))
assert np.array_equal(output, expected_output)
# Test functionality with 3D tensor as input
x = Tensor(np.array([[[0, 1, 2], [3, 4, 5], [6, 7, 8]]]).astype(np.float32))
padding = (1, 1, 2, 0)
net = Net2d(padding)
output = net(x)
expected_output = Tensor(np.array([[[7, 6, 7, 8, 7], [4, 3, 4, 5, 4], [1, 0, 1, 2, 1],
[4, 3, 4, 5, 4], [7, 6, 7, 8, 7]]]).astype(np.float32))
assert np.array_equal(output, expected_output)
padding = 2
net = Net2d(padding)
output = net(x)
expected_output = Tensor(np.array([[[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0], [5, 4, 3, 4, 5, 4, 3],
[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0]]]).astype(np.float32))
assert np.array_equal(output, expected_output)

View File

@ -1,111 +0,0 @@
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor
import pytest
class Net1d(nn.Cell):
def __init__(self, padding):
super(Net1d, self).__init__()
self.pad = nn.ReflectionPad1d(padding)
def construct(self, x):
return self.pad(x)
class Net2d(nn.Cell):
def __init__(self, padding):
super(Net2d, self).__init__()
self.pad = nn.ReflectionPad2d(padding)
def construct(self, x):
return self.pad(x)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_reflection_pad_1d():
"""
Feature: ReflectionPad1d
Description: Infer process of ReflectionPad1d with 2 types of parameters.
Expectation: success
"""
# Test functionality with 3D tensor input
x = Tensor(np.array([[[0, 1, 2, 3], [4, 5, 6, 7]]]).astype(np.float32))
padding = (3, 1)
net = Net1d(padding)
output = net(x)
expected_output = Tensor(np.array([[[3, 2, 1, 0, 1, 2, 3, 2],
[7, 6, 5, 4, 5, 6, 7, 6]]]).astype(np.float32))
assert np.array_equal(output, expected_output)
padding = 2
expected_output = Tensor(np.array([[[2, 1, 0, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 7, 6, 5]]]).astype(np.float32))
net = Net1d(padding)
output = net(x)
assert np.array_equal(output, expected_output)
# Test functionality with 2D tensor as input
x = Tensor(np.array([[0, 1, 2, 3], [4, 5, 6, 7]]).astype(np.float16))
padding = (3, 1)
net = Net1d(padding)
output = net(x)
expected_output = Tensor(np.array([[3, 2, 1, 0, 1, 2, 3, 2],
[7, 6, 5, 4, 5, 6, 7, 6]]).astype(np.float16))
assert np.array_equal(output, expected_output)
padding = 2
expected_output = Tensor(np.array([[2, 1, 0, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 7, 6, 5]]).astype(np.float16))
net = Net1d(padding)
output = net(x)
assert np.array_equal(output, expected_output)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_reflection_pad_2d():
r"""
Feature: ReflectionPad2d
Description: Infer process of ReflectionPad2d with three type parameters.
Expectation: success
"""
# Test functionality with 4D tensor as input
x = Tensor(np.array([[[[0, 1, 2], [3, 4, 5], [6, 7, 8]]]]).astype(np.int32))
padding = (1, 1, 2, 0)
net = Net2d(padding)
output = net(x)
expected_output = Tensor(np.array([[[[7, 6, 7, 8, 7], [4, 3, 4, 5, 4], [1, 0, 1, 2, 1],
[4, 3, 4, 5, 4], [7, 6, 7, 8, 7]]]]).astype(np.int32))
assert np.array_equal(output, expected_output)
padding = 2
net = Net2d(padding)
output = net(x)
expected_output = Tensor(np.array([[[[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0], [5, 4, 3, 4, 5, 4, 3],
[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0]]]]).astype(np.int32))
assert np.array_equal(output, expected_output)
# Test functionality with 3D tensor as input
x = Tensor(np.array([[[0, 1, 2], [3, 4, 5], [6, 7, 8]]]).astype(np.float32))
padding = (1, 1, 2, 0)
net = Net2d(padding)
output = net(x)
expected_output = Tensor(np.array([[[7, 6, 7, 8, 7], [4, 3, 4, 5, 4], [1, 0, 1, 2, 1],
[4, 3, 4, 5, 4], [7, 6, 7, 8, 7]]]).astype(np.float32))
assert np.array_equal(output, expected_output)
padding = 2
net = Net2d(padding)
output = net(x)
expected_output = Tensor(np.array([[[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0], [5, 4, 3, 4, 5, 4, 3],
[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0]]]).astype(np.float32))
assert np.array_equal(output, expected_output)

View File

@ -146,12 +146,17 @@ class MyLamb(nn.Cell):
return self.lamb(self.param, self.m, self.v, lr, beta1, beta2, eps, weight_decay, global_step, self.gradient)
def test_gpu_net():
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_gpu_net(mode):
"""
Feature: gpu testcase for Lamb
Description: fixed input when using gpu
Expectation: get the same result when use new lamb kernel and old kernel
"""
context.set_context(mode=mode)
my_lamb = MyLamb(param_val, m_val, v_val, grad_val)
my_lamb(beta1_val, beta2_val, eps_val, global_step_val, lr_val, weight_decay_val)
@ -161,12 +166,18 @@ def test_gpu_net():
assert np.allclose(my_lamb.param.asnumpy(), lamb_gpu_origin.param.asnumpy())
def test_ascend_net():
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_ascend_net(mode):
"""
Feature: ascend testcase for Lamb
Description: fixed input when using ascend
Expectation: get the same result when use new lamb kernel and old kernel
"""
context.set_context(mode=mode)
my_lamb = MyLamb(param_val, m_val, v_val, grad_val)
my_lamb(beta1_val, beta2_val, eps_val, global_step_val, lr_val, weight_decay_val)
@ -174,57 +185,3 @@ def test_ascend_net():
lamb_ascend_origin(beta1_val, beta2_val, eps_val, global_step_val, lr_val, weight_decay_val, True)
assert np.allclose(my_lamb.param.asnumpy(), lamb_ascend_origin.param.asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_graph_net():
"""
Feature: graph kernel testcase for Lamb
Description: fixed input when using ascend in graph mode
Expectation: get the same result when use new lamb kernel and old kernel
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
test_gpu_net()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_pynative_net():
"""
Feature: pynative kernel testcase for Lamb
Description: fixed input when using ascend in pynative mode
Expectation: get the same result when use new lamb kernel and old kernel
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
test_gpu_net()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ascend_graph_net():
"""
Feature: graph kernel testcase for Lamb
Description: fixed input when using ascend in graph mode
Expectation: get the same result when use new lamb kernel and old kernel
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
test_ascend_net()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ascend_pynative_net():
"""
Feature: pynative kernel testcase for Lamb
Description: fixed input when using ascend in pynative mode
Expectation: get the same result when use new lamb kernel and old kernel
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
test_ascend_net()

View File

@ -94,7 +94,7 @@ class FakeNet(nn.Cell):
m.bias.set_data(Tensor(fc2_bias))
def build_network(opt_config, is_group=False, net=FakeNet(), loss_fn=nn.MSELoss(reduction='sum')):
def build_network(opt_config, net, is_group=False, loss_fn=nn.MSELoss(reduction='sum')):
"""
Construct training
"""

View File

@ -14,10 +14,11 @@
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore import nn, Tensor
from optimizer_utils import build_network, \
loss_default_adamax, loss_not_default_adamax, loss_group_adamax
from .optimizer_utils import build_network, loss_default_adamax, loss_not_default_adamax, loss_group_adamax
w1 = np.array([[0.03909272, 0.08893055, -0.259909, -0.459185,
@ -51,78 +52,63 @@ class Net(nn.Cell):
return self.fc2(x)
def test_default_adamax_pynative():
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_default_adamax(mode):
"""
Feature: Test adamax optimizer
Description: Test adamax in Pynative mode with default parameter
Description: Test adamax with default parameter
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target='Ascend')
context.set_context(mode=mode)
config = {'name': 'adamax', 'lr': 0.001, "beta1": 0.9, "beta2": 0.999, "eps": 1e-07,
'weight_decay': 0.0}
loss = build_network(config, net=Net(), loss_fn=nn.MSELoss(reduction='mean'))
assert np.allclose(loss_default_adamax, loss, atol=1.e-5)
def test_default_adamax_graph():
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_no_default_adamax(mode):
"""
Feature: Test adamax optimizer
Description: Test adamax in Graph mode with default parameter
Description: Test adamax with another set of parameter
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
config = {'name': 'adamax', 'lr': 0.001, "beta1": 0.9, "beta2": 0.999, "eps": 1e-07,
'weight_decay': 0.0}
loss = build_network(config, net=Net(), loss_fn=nn.MSELoss(reduction='mean'))
assert np.allclose(loss_default_adamax, loss, atol=1.e-5)
def test_no_default_adamax_pynative():
"""
Feature: Test adamax optimizer
Description: Test adamax in Pynative mode with another set of parameter
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target='Ascend')
context.set_context(mode=mode)
config = {'name': 'adamax', 'lr': 0.01, "beta1": 0.9, "beta2": 0.98, "eps": 1e-06,
'weight_decay': 0.0}
loss = build_network(config, net=Net(), loss_fn=nn.MSELoss(reduction='mean'))
assert np.allclose(loss_not_default_adamax, loss, atol=1.e-5)
def test_no_default_adamax_graph():
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_default_adamax_group(mode):
"""
Feature: Test adamax optimizer
Description: Test adamax in Graph mode with another set of parameter
Description: Test adamax with parameter grouping
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
config = {'name': 'adamax', 'lr': 0.01, "beta1": 0.9, "beta2": 0.98, "eps": 1e-06,
'weight_decay': 0.0}
loss = build_network(config, net=Net(), loss_fn=nn.MSELoss(reduction='mean'))
assert np.allclose(loss_not_default_adamax, loss, atol=1.e-5)
def test_default_adamax_group_pynative():
"""
Feature: Test adamax optimizer
Description: Test adamax in Pynative mode with parameter grouping
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target='Ascend')
config = {'name': 'adamax', 'lr': 0.002, "beta1": 0.9, "beta2": 0.999, "eps": 1e-08,
'weight_decay': 0.0}
loss = build_network(config, is_group=True, net=Net(), loss_fn=nn.MSELoss(reduction='mean'))
assert np.allclose(loss_group_adamax, loss, atol=1.e-5)
def test_default_adamax_group_graph():
"""
Feature: Test adamax optimizer
Description: Test adamax in Graph mode with parameter grouping
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
context.set_context(mode=mode)
config = {'name': 'adamax', 'lr': 0.002, "beta1": 0.9, "beta2": 0.999, "eps": 1e-08,
'weight_decay': 0.0}
loss = build_network(config, is_group=True, net=Net(), loss_fn=nn.MSELoss(reduction='mean'))

View File

@ -1,4 +1,4 @@
# Copyright 2021 Huawei Technologies Co., Ltd
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -13,21 +13,31 @@
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from .optimizer_utils import build_network, loss_not_default_asgd, loss_default_asgd, loss_group_asgd
from .optimizer_utils import FakeNet, build_network, loss_not_default_asgd, loss_default_asgd, loss_group_asgd
def test_default_asgd_graph():
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_default_asgd(mode):
"""
Feature: Test ASGD optimizer
Description: Test ASGD in Graph mode with default parameter
Description: Test ASGD with default parameter
Expectation: Loss values and parameters conform to preset values.
"""
from .optimizer_utils import default_fc1_weight_asgd, \
default_fc1_bias_asgd, default_fc2_weight_asgd, default_fc2_bias_asgd
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
context.set_context(mode=mode)
config = {'name': 'ASGD', 'lr': 0.01, 'lambd': 1e-4, 'alpha': 0.75, 't0': 1e6, 'weight_decay': 0.0}
loss, cells = build_network(config)
loss, cells = build_network(config, FakeNet())
assert np.allclose(cells.ax[0].asnumpy(), default_fc1_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[1].asnumpy(), default_fc1_bias_asgd, atol=1.e-5)
assert np.allclose(cells.ax[2].asnumpy(), default_fc2_weight_asgd, atol=1.e-5)
@ -35,17 +45,25 @@ def test_default_asgd_graph():
assert np.allclose(loss_default_asgd, loss, atol=1.e-5)
def test_no_default_asgd_graph():
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_no_default_asgd(mode):
"""
Feature: Test ASGD optimizer
Description: Test ASGD in Graph mode with another set of parameter
Description: Test ASGD with another set of parameter
Expectation: Loss values and parameters conform to preset values.
"""
from .optimizer_utils import no_default_fc1_weight_asgd, \
no_default_fc1_bias_asgd, no_default_fc2_weight_asgd, no_default_fc2_bias_asgd
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
config = {'name': 'ASGD', 'lr': 0.001, 'lambd': 1e-3, 'alpha': 0.8, 't0': 50., 'weight_decay': 0.001}
loss, cells = build_network(config)
context.set_context(mode=mode)
loss, cells = build_network(config, FakeNet())
assert np.allclose(cells.ax[0].asnumpy(), no_default_fc1_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[1].asnumpy(), no_default_fc1_bias_asgd, atol=1.e-5)
assert np.allclose(cells.ax[2].asnumpy(), no_default_fc2_weight_asgd, atol=1.e-5)
@ -53,18 +71,25 @@ def test_no_default_asgd_graph():
assert np.allclose(loss_not_default_asgd, loss, atol=1.e-5, rtol=1e-3)
def test_default_asgd_group_graph():
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_default_asgd_group(mode):
"""
Feature: Test ASGD optimizer
Description: Test ASGD in Graph mode with parameter grouping
Description: Test ASGD with parameter grouping
Expectation: Loss values and parameters conform to preset values.
"""
from .optimizer_utils import no_default_group_fc1_weight_asgd, no_default_group_fc1_bias_asgd, \
no_default_group_fc2_weight_asgd, no_default_group_fc2_bias_asgd
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
context.set_context(mode=mode)
config = {'name': 'ASGD', 'lr': 0.1, 'lambd': 1e-3, 'alpha': 0.8, 't0': 50., 'weight_decay': 0.001}
loss, cells = build_network(config, is_group=True)
loss, cells = build_network(config, FakeNet(), is_group=True)
assert np.allclose(cells.ax[0].asnumpy(), no_default_group_fc1_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[1].asnumpy(), no_default_group_fc1_bias_asgd, atol=1.e-5)
assert np.allclose(cells.ax[2].asnumpy(), no_default_group_fc2_weight_asgd, atol=1.e-5)

View File

@ -1,125 +0,0 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
from .optimizer_utils import build_network, loss_not_default_asgd, loss_default_asgd, loss_group_asgd
def test_default_asgd_pynative():
"""
Feature: Test ASGD optimizer
Description: Test ASGD in Pynative mode with default parameter
Expectation: Loss values and parameters conform to preset values.
"""
from .optimizer_utils import default_fc1_weight_asgd, \
default_fc1_bias_asgd, default_fc2_weight_asgd, default_fc2_bias_asgd
context.set_context(mode=context.PYNATIVE_MODE, device_target='Ascend')
config = {'name': 'ASGD', 'lr': 0.01, 'lambd': 1e-4, 'alpha': 0.75, 't0': 1e6, 'weight_decay': 0.0}
loss, cells = build_network(config)
assert np.allclose(cells.ax[0].asnumpy(), default_fc1_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[1].asnumpy(), default_fc1_bias_asgd, atol=1.e-5)
assert np.allclose(cells.ax[2].asnumpy(), default_fc2_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[3].asnumpy(), default_fc2_bias_asgd, atol=1.e-5)
assert np.allclose(loss_default_asgd, loss, atol=1.e-5)
def test_default_asgd_graph():
"""
Feature: Test ASGD optimizer
Description: Test ASGD in Graph mode with default parameter
Expectation: Loss values and parameters conform to preset values.
"""
from .optimizer_utils import default_fc1_weight_asgd, \
default_fc1_bias_asgd, default_fc2_weight_asgd, default_fc2_bias_asgd
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
config = {'name': 'ASGD', 'lr': 0.01, 'lambd': 1e-4, 'alpha': 0.75, 't0': 1e6, 'weight_decay': 0.0}
loss, cells = build_network(config)
assert np.allclose(cells.ax[0].asnumpy(), default_fc1_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[1].asnumpy(), default_fc1_bias_asgd, atol=1.e-5)
assert np.allclose(cells.ax[2].asnumpy(), default_fc2_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[3].asnumpy(), default_fc2_bias_asgd, atol=1.e-5)
assert np.allclose(loss_default_asgd, loss, atol=1.e-5)
def test_no_default_asgd_pynative():
"""
Feature: Test ASGD optimizer
Description: Test ASGD in Pynative mode with another set of parameter
Expectation: Loss values and parameters conform to preset values.
"""
from .optimizer_utils import no_default_fc1_weight_asgd, \
no_default_fc1_bias_asgd, no_default_fc2_weight_asgd, no_default_fc2_bias_asgd
context.set_context(mode=context.PYNATIVE_MODE, device_target='Ascend')
config = {'name': 'ASGD', 'lr': 0.001, 'lambd': 1e-3, 'alpha': 0.8, 't0': 50., 'weight_decay': 0.001}
loss, cells = build_network(config)
assert np.allclose(cells.ax[0].asnumpy(), no_default_fc1_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[1].asnumpy(), no_default_fc1_bias_asgd, atol=1.e-5)
assert np.allclose(cells.ax[2].asnumpy(), no_default_fc2_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[3].asnumpy(), no_default_fc2_bias_asgd, atol=1.e-5)
assert np.allclose(loss_not_default_asgd, loss, atol=1.e-5, rtol=1e-3)
def test_no_default_asgd_graph():
"""
Feature: Test ASGD optimizer
Description: Test ASGD in Graph mode with another set of parameter
Expectation: Loss values and parameters conform to preset values.
"""
from .optimizer_utils import no_default_fc1_weight_asgd, \
no_default_fc1_bias_asgd, no_default_fc2_weight_asgd, no_default_fc2_bias_asgd
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
config = {'name': 'ASGD', 'lr': 0.001, 'lambd': 1e-3, 'alpha': 0.8, 't0': 50., 'weight_decay': 0.001}
loss, cells = build_network(config)
assert np.allclose(cells.ax[0].asnumpy(), no_default_fc1_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[1].asnumpy(), no_default_fc1_bias_asgd, atol=1.e-5)
assert np.allclose(cells.ax[2].asnumpy(), no_default_fc2_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[3].asnumpy(), no_default_fc2_bias_asgd, atol=1.e-5)
assert np.allclose(loss_not_default_asgd, loss, atol=1.e-5, rtol=1e-3)
def test_default_asgd_group_pynative():
"""
Feature: Test ASGD optimizer
Description: Test ASGD in Pynative mode with parameter grouping
Expectation: Loss values and parameters conform to preset values.
"""
from .optimizer_utils import no_default_group_fc1_weight_asgd, no_default_group_fc1_bias_asgd, \
no_default_group_fc2_weight_asgd, no_default_group_fc2_bias_asgd
context.set_context(mode=context.PYNATIVE_MODE, device_target='Ascend')
config = {'name': 'ASGD', 'lr': 0.1, 'lambd': 1e-3, 'alpha': 0.8, 't0': 50., 'weight_decay': 0.001}
loss, cells = build_network(config, is_group=True)
assert np.allclose(cells.ax[0].asnumpy(), no_default_group_fc1_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[1].asnumpy(), no_default_group_fc1_bias_asgd, atol=1.e-5)
assert np.allclose(cells.ax[2].asnumpy(), no_default_group_fc2_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[3].asnumpy(), no_default_group_fc2_bias_asgd, atol=1.e-5)
assert np.allclose(loss_group_asgd, loss, atol=1.e-5, rtol=1e-3)
def test_default_asgd_group_graph():
"""
Feature: Test ASGD optimizer
Description: Test ASGD in Graph mode with parameter grouping
Expectation: Loss values and parameters conform to preset values.
"""
from .optimizer_utils import no_default_group_fc1_weight_asgd, no_default_group_fc1_bias_asgd, \
no_default_group_fc2_weight_asgd, no_default_group_fc2_bias_asgd
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
config = {'name': 'ASGD', 'lr': 0.1, 'lambd': 1e-3, 'alpha': 0.8, 't0': 50., 'weight_decay': 0.001}
loss, cells = build_network(config, is_group=True)
assert np.allclose(cells.ax[0].asnumpy(), no_default_group_fc1_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[1].asnumpy(), no_default_group_fc1_bias_asgd, atol=1.e-5)
assert np.allclose(cells.ax[2].asnumpy(), no_default_group_fc2_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[3].asnumpy(), no_default_group_fc2_bias_asgd, atol=1.e-5)
assert np.allclose(loss_group_asgd, loss, atol=1.e-5, rtol=1e-3)

View File

@ -1,125 +0,0 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
from .optimizer_utils import build_network, loss_not_default_asgd, loss_default_asgd, loss_group_asgd
def test_default_asgd_pynative():
"""
Feature: Test ASGD optimizer
Description: Test ASGD in Pynative mode with default parameter
Expectation: Loss values and parameters conform to preset values.
"""
from .optimizer_utils import default_fc1_weight_asgd, \
default_fc1_bias_asgd, default_fc2_weight_asgd, default_fc2_bias_asgd
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
config = {'name': 'ASGD', 'lr': 0.01, 'lambd': 1e-4, 'alpha': 0.75, 't0': 1e6, 'weight_decay': 0.0}
loss, cells = build_network(config)
assert np.allclose(cells.ax[0].asnumpy(), default_fc1_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[1].asnumpy(), default_fc1_bias_asgd, atol=1.e-5)
assert np.allclose(cells.ax[2].asnumpy(), default_fc2_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[3].asnumpy(), default_fc2_bias_asgd, atol=1.e-5)
assert np.allclose(loss_default_asgd, loss, atol=1.e-5)
def test_default_asgd_graph():
"""
Feature: Test ASGD optimizer
Description: Test ASGD in Graph mode with default parameter
Expectation: Loss values and parameters conform to preset values.
"""
from .optimizer_utils import default_fc1_weight_asgd, \
default_fc1_bias_asgd, default_fc2_weight_asgd, default_fc2_bias_asgd
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
config = {'name': 'ASGD', 'lr': 0.01, 'lambd': 1e-4, 'alpha': 0.75, 't0': 1e6, 'weight_decay': 0.0}
loss, cells = build_network(config)
assert np.allclose(cells.ax[0].asnumpy(), default_fc1_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[1].asnumpy(), default_fc1_bias_asgd, atol=1.e-5)
assert np.allclose(cells.ax[2].asnumpy(), default_fc2_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[3].asnumpy(), default_fc2_bias_asgd, atol=1.e-5)
assert np.allclose(loss_default_asgd, loss, atol=1.e-5)
def test_no_default_asgd_pynative():
"""
Feature: Test ASGD optimizer
Description: Test ASGD in Pynative mode with another set of parameter
Expectation: Loss values and parameters conform to preset values.
"""
from .optimizer_utils import no_default_fc1_weight_asgd, \
no_default_fc1_bias_asgd, no_default_fc2_weight_asgd, no_default_fc2_bias_asgd
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
config = {'name': 'ASGD', 'lr': 0.001, 'lambd': 1e-3, 'alpha': 0.8, 't0': 50., 'weight_decay': 0.001}
loss, cells = build_network(config)
assert np.allclose(cells.ax[0].asnumpy(), no_default_fc1_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[1].asnumpy(), no_default_fc1_bias_asgd, atol=1.e-5)
assert np.allclose(cells.ax[2].asnumpy(), no_default_fc2_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[3].asnumpy(), no_default_fc2_bias_asgd, atol=1.e-5)
assert np.allclose(loss_not_default_asgd, loss, atol=1.e-5, rtol=1e-3)
def test_no_default_asgd_graph():
"""
Feature: Test ASGD optimizer
Description: Test ASGD in Graph mode with another set of parameter
Expectation: Loss values and parameters conform to preset values.
"""
from .optimizer_utils import no_default_fc1_weight_asgd, \
no_default_fc1_bias_asgd, no_default_fc2_weight_asgd, no_default_fc2_bias_asgd
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
config = {'name': 'ASGD', 'lr': 0.001, 'lambd': 1e-3, 'alpha': 0.8, 't0': 50., 'weight_decay': 0.001}
loss, cells = build_network(config)
assert np.allclose(cells.ax[0].asnumpy(), no_default_fc1_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[1].asnumpy(), no_default_fc1_bias_asgd, atol=1.e-5)
assert np.allclose(cells.ax[2].asnumpy(), no_default_fc2_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[3].asnumpy(), no_default_fc2_bias_asgd, atol=1.e-5)
assert np.allclose(loss_not_default_asgd, loss, atol=1.e-5, rtol=1e-3)
def test_default_asgd_group_pynative():
"""
Feature: Test ASGD optimizer
Description: Test ASGD in Pynative mode with parameter grouping
Expectation: Loss values and parameters conform to preset values.
"""
from .optimizer_utils import no_default_group_fc1_weight_asgd, no_default_group_fc1_bias_asgd, \
no_default_group_fc2_weight_asgd, no_default_group_fc2_bias_asgd
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
config = {'name': 'ASGD', 'lr': 0.1, 'lambd': 1e-3, 'alpha': 0.8, 't0': 50., 'weight_decay': 0.001}
loss, cells = build_network(config, is_group=True)
assert np.allclose(cells.ax[0].asnumpy(), no_default_group_fc1_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[1].asnumpy(), no_default_group_fc1_bias_asgd, atol=1.e-5)
assert np.allclose(cells.ax[2].asnumpy(), no_default_group_fc2_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[3].asnumpy(), no_default_group_fc2_bias_asgd, atol=1.e-5)
assert np.allclose(loss_group_asgd, loss, atol=1.e-5, rtol=1e-3)
def test_default_asgd_group_graph():
"""
Feature: Test ASGD optimizer
Description: Test ASGD in Graph mode with parameter grouping
Expectation: Loss values and parameters conform to preset values.
"""
from .optimizer_utils import no_default_group_fc1_weight_asgd, no_default_group_fc1_bias_asgd, \
no_default_group_fc2_weight_asgd, no_default_group_fc2_bias_asgd
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
config = {'name': 'ASGD', 'lr': 0.1, 'lambd': 1e-3, 'alpha': 0.8, 't0': 50., 'weight_decay': 0.001}
loss, cells = build_network(config, is_group=True)
assert np.allclose(cells.ax[0].asnumpy(), no_default_group_fc1_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[1].asnumpy(), no_default_group_fc1_bias_asgd, atol=1.e-5)
assert np.allclose(cells.ax[2].asnumpy(), no_default_group_fc2_weight_asgd, atol=1.e-5)
assert np.allclose(cells.ax[3].asnumpy(), no_default_group_fc2_bias_asgd, atol=1.e-5)
assert np.allclose(loss_group_asgd, loss, atol=1.e-5, rtol=1e-3)

View File

@ -12,18 +12,28 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import mindspore.context as context
import mindspore.nn as nn
from .weight_decay_utils import dynamic_weight_decay_cmp, WeightDecaySchdule, Net
def test_momentum_dynamic_weight_decay_pynative():
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_momentum_dynamic_weight_decay(mode):
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Momentum
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
context.set_context(mode=mode)
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
optimizer1 = nn.Momentum(net1.trainable_params(), momentum=0.001, learning_rate=0.001, weight_decay=0.001)
@ -32,28 +42,21 @@ def test_momentum_dynamic_weight_decay_pynative():
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_momentum_dynamic_weight_decay_graph():
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_momentum_dynamic_weight_decay_group(mode):
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Momentum
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
optimizer1 = nn.Momentum(net1.trainable_params(), momentum=0.001, learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.Momentum(net2.trainable_params(), momentum=0.001, learning_rate=0.001,
weight_decay=weight_decay_schedule)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_momentum_dynamic_weight_decay_graph_group():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Momentum
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(mode=mode)
weight_decay_schedule = WeightDecaySchdule()
net1, net2 = Net(), Net()
@ -74,13 +77,21 @@ def test_momentum_dynamic_weight_decay_graph_group():
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_adamweightdecay_dynamic_weight_decay_pynative():
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_adamweightdecay_dynamic_weight_decay(mode):
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for AdamWeightDecay
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
context.set_context(mode=mode)
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
optimizer1 = nn.AdamWeightDecay(net1.trainable_params(), learning_rate=0.001, weight_decay=0.001)
@ -88,27 +99,21 @@ def test_adamweightdecay_dynamic_weight_decay_pynative():
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_adamweightdecay_dynamic_weight_decay_graph():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for AdamWeightDecay
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
optimizer1 = nn.AdamWeightDecay(net1.trainable_params(), learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.AdamWeightDecay(net2.trainable_params(), learning_rate=0.001, weight_decay=weight_decay_schedule)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_adamweightdecay_dynamic_weight_decay_graph_group():
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_adamweightdecay_dynamic_weight_decay_group(mode):
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Momentum
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(mode=mode)
weight_decay_schedule = WeightDecaySchdule()
net1, net2 = Net(), Net()
@ -129,42 +134,19 @@ def test_adamweightdecay_dynamic_weight_decay_graph_group():
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_lamb_dynamic_weight_decay_pynative():
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_lamb_dynamic_weight_decay_graph_group(mode):
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Lamb
Description: Test dynamic weight decay for LAMB
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
optimizer1 = nn.Lamb(net1.trainable_params(), learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.Lamb(net2.trainable_params(), learning_rate=0.001, weight_decay=weight_decay_schedule)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_lamb_dynamic_weight_decay_graph():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Lamb
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
optimizer1 = nn.Lamb(net1.trainable_params(), learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.Lamb(net2.trainable_params(), learning_rate=0.001, weight_decay=weight_decay_schedule)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_lamb_dynamic_weight_decay_graph_group():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Momentum
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(mode=mode)
weight_decay_schedule = WeightDecaySchdule()
net1, net2 = Net(), Net()
@ -185,13 +167,18 @@ def test_lamb_dynamic_weight_decay_graph_group():
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_lars_dynamic_weight_decay_pynative():
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_lars_dynamic_weight_decay(mode):
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Lars
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
context.set_context(mode=mode)
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
@ -202,30 +189,18 @@ def test_lars_dynamic_weight_decay_pynative():
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_lars_dynamic_weight_decay_graph():
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_lars_dynamic_weight_decay_group(mode):
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Lars
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
opt1 = nn.Momentum(net1.trainable_params(), momentum=0.001, learning_rate=0.001, weight_decay=0.001)
opt2 = nn.Momentum(net2.trainable_params(), momentum=0.001, learning_rate=0.001, weight_decay=weight_decay_schedule)
optimizer1 = nn.LARS(opt1, lars_filter=lambda x: 'LayerNorm' not in x.name)
optimizer2 = nn.LARS(opt2, lars_filter=lambda x: 'LayerNorm' not in x.name)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_lars_dynamic_weight_decay_graph_group():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Momentum
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(mode=mode)
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()

View File

@ -1,129 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore.context as context
import mindspore.nn as nn
from .weight_decay_utils import dynamic_weight_decay_cmp, WeightDecaySchdule, Net
def test_momentum_dynamic_weight_decay_pynative():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Momentum
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
optimizer1 = nn.Momentum(net1.trainable_params(), momentum=0.001, learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.Momentum(net2.trainable_params(), momentum=0.001, learning_rate=0.001,
weight_decay=weight_decay_schedule)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_momentum_dynamic_weight_decay_graph():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Momentum
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
optimizer1 = nn.Momentum(net1.trainable_params(), momentum=0.001, learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.Momentum(net2.trainable_params(), momentum=0.001, learning_rate=0.001,
weight_decay=weight_decay_schedule)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_momentum_dynamic_weight_decay_graph_group():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Momentum
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
weight_decay_schedule = WeightDecaySchdule()
net1, net2 = Net(), Net()
net1_fc1_params = list(filter(lambda x: 'fc1' in x.name, net1.trainable_params()))
net1_fc2_params = list(filter(lambda x: 'fc1' not in x.name, net1.trainable_params()))
net2_fc1_params = list(filter(lambda x: 'fc1' in x.name, net2.trainable_params()))
net2_fc2_params = list(filter(lambda x: 'fc1' not in x.name, net2.trainable_params()))
params1 = [{'params': net1_fc1_params, 'weight_decay': 0.01, 'lr': 0.01},
{'params': net1_fc2_params, 'weight_decay': 0.001, 'lr': 0.001}]
params2 = [{'params': net2_fc1_params, 'weight_decay': 0.01, 'lr': 0.01},
{'params': net2_fc2_params, 'weight_decay': weight_decay_schedule, 'lr': 0.001}]
optimizer1 = nn.Momentum(params1, momentum=0.001, learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.Momentum(params2, momentum=0.001, learning_rate=0.001, weight_decay=0.001)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_adamweightdecay_dynamic_weight_decay_pynative():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for AdamWeightDecay
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
optimizer1 = nn.AdamWeightDecay(net1.trainable_params(), learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.AdamWeightDecay(net2.trainable_params(), learning_rate=0.001, weight_decay=weight_decay_schedule)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_adamweightdecay_dynamic_weight_decay_graph():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for AdamWeightDecay
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
optimizer1 = nn.AdamWeightDecay(net1.trainable_params(), learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.AdamWeightDecay(net2.trainable_params(), learning_rate=0.001, weight_decay=weight_decay_schedule)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_adamweightdecay_dynamic_weight_decay_graph_group():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Momentum
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
weight_decay_schedule = WeightDecaySchdule()
net1, net2 = Net(), Net()
net1_fc1_params = list(filter(lambda x: 'fc1' in x.name, net1.trainable_params()))
net1_fc2_params = list(filter(lambda x: 'fc1' not in x.name, net1.trainable_params()))
net2_fc1_params = list(filter(lambda x: 'fc1' in x.name, net2.trainable_params()))
net2_fc2_params = list(filter(lambda x: 'fc1' not in x.name, net2.trainable_params()))
params1 = [{'params': net1_fc1_params, 'weight_decay': 0.01, 'lr': 0.01},
{'params': net1_fc2_params, 'weight_decay': 0.001, 'lr': 0.001}]
params2 = [{'params': net2_fc1_params, 'weight_decay': 0.01, 'lr': 0.01},
{'params': net2_fc2_params, 'weight_decay': weight_decay_schedule, 'lr': 0.001}]
optimizer1 = nn.AdamWeightDecay(params1, learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.AdamWeightDecay(params2, learning_rate=0.001, weight_decay=0.001)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)

View File

@ -1,184 +0,0 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore.context as context
import mindspore.nn as nn
from .weight_decay_utils import dynamic_weight_decay_cmp, WeightDecaySchdule, Net
def test_momentum_dynamic_weight_decay_pynative():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Momentum
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
optimizer1 = nn.Momentum(net1.trainable_params(), momentum=0.001, learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.Momentum(net2.trainable_params(), momentum=0.001, learning_rate=0.001,
weight_decay=weight_decay_schedule)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_momentum_dynamic_weight_decay_graph():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Momentum
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
optimizer1 = nn.Momentum(net1.trainable_params(), momentum=0.001, learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.Momentum(net2.trainable_params(), momentum=0.001, learning_rate=0.001,
weight_decay=weight_decay_schedule)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_momentum_dynamic_weight_decay_graph_group():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Momentum
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
weight_decay_schedule = WeightDecaySchdule()
net1, net2 = Net(), Net()
net1_fc1_params = list(filter(lambda x: 'fc1' in x.name, net1.trainable_params()))
net1_fc2_params = list(filter(lambda x: 'fc1' not in x.name, net1.trainable_params()))
net2_fc1_params = list(filter(lambda x: 'fc1' in x.name, net2.trainable_params()))
net2_fc2_params = list(filter(lambda x: 'fc1' not in x.name, net2.trainable_params()))
params1 = [{'params': net1_fc1_params, 'weight_decay': 0.01, 'lr': 0.01},
{'params': net1_fc2_params, 'weight_decay': 0.001, 'lr': 0.001}]
params2 = [{'params': net2_fc1_params, 'weight_decay': 0.01, 'lr': 0.01},
{'params': net2_fc2_params, 'weight_decay': weight_decay_schedule, 'lr': 0.001}]
optimizer1 = nn.Momentum(params1, momentum=0.001, learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.Momentum(params2, momentum=0.001, learning_rate=0.001, weight_decay=0.001)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_adamweightdecay_dynamic_weight_decay_pynative():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for AdamWeightDecay
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
optimizer1 = nn.AdamWeightDecay(net1.trainable_params(), learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.AdamWeightDecay(net2.trainable_params(), learning_rate=0.001, weight_decay=weight_decay_schedule)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_adamweightdecay_dynamic_weight_decay_graph():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for AdamWeightDecay
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
optimizer1 = nn.AdamWeightDecay(net1.trainable_params(), learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.AdamWeightDecay(net2.trainable_params(), learning_rate=0.001, weight_decay=weight_decay_schedule)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_adamweightdecay_dynamic_weight_decay_graph_group():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Momentum
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
weight_decay_schedule = WeightDecaySchdule()
net1, net2 = Net(), Net()
net1_fc1_params = list(filter(lambda x: 'fc1' in x.name, net1.trainable_params()))
net1_fc2_params = list(filter(lambda x: 'fc1' not in x.name, net1.trainable_params()))
net2_fc1_params = list(filter(lambda x: 'fc1' in x.name, net2.trainable_params()))
net2_fc2_params = list(filter(lambda x: 'fc1' not in x.name, net2.trainable_params()))
params1 = [{'params': net1_fc1_params, 'weight_decay': 0.01, 'lr': 0.01},
{'params': net1_fc2_params, 'weight_decay': 0.001, 'lr': 0.001}]
params2 = [{'params': net2_fc1_params, 'weight_decay': 0.01, 'lr': 0.01},
{'params': net2_fc2_params, 'weight_decay': weight_decay_schedule, 'lr': 0.001}]
optimizer1 = nn.AdamWeightDecay(params1, learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.AdamWeightDecay(params2, learning_rate=0.001, weight_decay=0.001)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_lamb_dynamic_weight_decay_pynative():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Lamb
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
optimizer1 = nn.Lamb(net1.trainable_params(), learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.Lamb(net2.trainable_params(), learning_rate=0.001, weight_decay=weight_decay_schedule)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_lamb_dynamic_weight_decay_graph():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Lamb
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net1, net2 = Net(), Net()
weight_decay_schedule = WeightDecaySchdule()
optimizer1 = nn.Lamb(net1.trainable_params(), learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.Lamb(net2.trainable_params(), learning_rate=0.001, weight_decay=weight_decay_schedule)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)
def test_lamb_dynamic_weight_decay_graph_group():
"""
Feature: Dynamic weight decay
Description: Test dynamic weight decay for Momentum
Expectation: The value of decay changes according to preset weight decay schedule
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
weight_decay_schedule = WeightDecaySchdule()
net1, net2 = Net(), Net()
net1_fc1_params = list(filter(lambda x: 'fc1' in x.name, net1.trainable_params()))
net1_fc2_params = list(filter(lambda x: 'fc1' not in x.name, net1.trainable_params()))
net2_fc1_params = list(filter(lambda x: 'fc1' in x.name, net2.trainable_params()))
net2_fc2_params = list(filter(lambda x: 'fc1' not in x.name, net2.trainable_params()))
params1 = [{'params': net1_fc1_params, 'weight_decay': 0.01, 'lr': 0.01},
{'params': net1_fc2_params, 'weight_decay': 0.001, 'lr': 0.001}]
params2 = [{'params': net2_fc1_params, 'weight_decay': 0.01, 'lr': 0.01},
{'params': net2_fc2_params, 'weight_decay': weight_decay_schedule, 'lr': 0.001}]
optimizer1 = nn.Lamb(params1, learning_rate=0.001, weight_decay=0.001)
optimizer2 = nn.Lamb(params2, learning_rate=0.001, weight_decay=0.001)
dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2)

View File

@ -21,18 +21,18 @@ from mindspore import Tensor
from mindspore.nn import TrainOneStepCell, WithLossCell
from tests.st.networks.models.lenet import LeNet
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_lenet():
@pytest.mark.parametrize('mode', [context.GRAPH_MODE])
def test_lenet(mode):
'''
Feature: AdaFactor
Description: Test AdaFactor
Expectation: Run lenet success
'''
context.set_context(mode=mode)
data = Tensor(np.ones([32, 3, 32, 32]).astype(np.float32) * 0.01)
label = Tensor(np.ones([32]).astype(np.int32))
net = LeNet()

View File

@ -23,18 +23,19 @@ from mindspore.common import set_seed
from tests.st.networks.models.lenet import LeNet
set_seed(1)
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_lenet_flatten_weight_with_adam():
@pytest.mark.parametrize('mode', [context.GRAPH_MODE])
def test_lenet_flatten_weight_with_adam(mode):
'''
Feature: Fused optimizer
Description: Test fused adam with flatten weights
Expectation: Run lenet success and loss < 2.2
'''
context.set_context(mode=mode)
data = Tensor(np.ones([32, 3, 32, 32]).astype(np.float32) * 0.01)
label = Tensor(np.ones([32]).astype(np.int32))
net = LeNet()
@ -55,12 +56,14 @@ def test_lenet_flatten_weight_with_adam():
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_lenet_flatten_weight_with_adam_weight_decay():
@pytest.mark.parametrize('mode', [context.GRAPH_MODE])
def test_lenet_flatten_weight_with_adam_weight_decay(mode):
'''
Feature: Fused optimizer
Description: Test fused adam weight decay with flatten weights
Expectation: Run lenet success and loss < 0.1
'''
context.set_context(mode=mode)
data = Tensor(np.ones([32, 3, 32, 32]).astype(np.float32) * 0.01)
label = Tensor(np.ones([32]).astype(np.int32))
net = LeNet()

View File

@ -0,0 +1,80 @@
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from .optimizer_utils import FakeNet, build_network, loss_default_rprop, loss_group_rprop, loss_not_default_rprop
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_default_rprop(mode):
"""
Feature: Test Rprop optimizer
Description: Test Rprop with default parameter
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=mode)
config = {'name': 'Rprop', 'lr': 0.01, 'etas': (0.5, 1.2), 'step_sizes': (1e-6, 50.), 'weight_decay': 0.0}
loss = build_network(config, net=FakeNet())
assert np.allclose(loss_default_rprop, loss, atol=1.e-5)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_no_default_rprop(mode):
"""
Feature: Test Rprop optimizer
Description: Test Rprop with another set of parameter
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=mode)
config = {'name': 'Rprop', 'lr': 0.001, 'etas': (0.6, 1.9), 'step_sizes': (1e-3, 20.), 'weight_decay': 0.0}
loss = build_network(config, net=FakeNet())
assert np.allclose(loss_not_default_rprop, loss, atol=1.e-5)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_default_rprop_group(mode):
"""
Feature: Test Rprop optimizer
Description: Test Rprop with parameter grouping
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=mode)
config = {'name': 'Rprop', 'lr': 0.001, 'etas': (0.6, 1.9), 'step_sizes': (1e-2, 10.), 'weight_decay': 0.0}
loss = build_network(config, net=FakeNet(), is_group=True)
assert np.allclose(loss_group_rprop, loss, atol=1.e-5)

View File

@ -1,90 +0,0 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
from .optimizer_utils import build_network, loss_default_rprop, loss_group_rprop, loss_not_default_rprop
def test_default_rprop_pynative():
"""
Feature: Test Rprop optimizer
Description: Test Rprop in Pynative mode with default parameter
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target='Ascend')
config = {'name': 'Rprop', 'lr': 0.01, 'etas': (0.5, 1.2), 'step_sizes': (1e-6, 50.), 'weight_decay': 0.0}
loss = build_network(config)
assert np.allclose(loss_default_rprop, loss, atol=1.e-5)
def test_default_rprop_graph():
"""
Feature: Test Rprop optimizer
Description: Test Rprop in Graph mode with default parameter
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
config = {'name': 'Rprop', 'lr': 0.01, 'etas': (0.5, 1.2), 'step_sizes': (1e-6, 50.), 'weight_decay': 0.0}
loss = build_network(config)
assert np.allclose(loss_default_rprop, loss, atol=1.e-5)
def test_no_default_rprop_pynative():
"""
Feature: Test Rprop optimizer
Description: Test Rprop in Pynative mode with another set of parameter
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target='Ascend')
config = {'name': 'Rprop', 'lr': 0.001, 'etas': (0.6, 1.9), 'step_sizes': (1e-3, 20.), 'weight_decay': 0.0}
loss = build_network(config)
assert np.allclose(loss_not_default_rprop, loss, atol=1.e-5)
def test_no_default_rprop_graph():
"""
Feature: Test Rprop optimizer
Description: Test Rprop in Graph mode with another set of parameter
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
config = {'name': 'Rprop', 'lr': 0.001, 'etas': (0.6, 1.9), 'step_sizes': (1e-3, 20.), 'weight_decay': 0.0}
loss = build_network(config)
assert np.allclose(loss_not_default_rprop, loss, atol=1.e-5)
def test_default_rprop_group_pynative():
"""
Feature: Test Rprop optimizer
Description: Test Rprop in Pynative mode with parameter grouping
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target='Ascend')
config = {'name': 'Rprop', 'lr': 0.001, 'etas': (0.6, 1.9), 'step_sizes': (1e-2, 10.), 'weight_decay': 0.0}
loss = build_network(config, is_group=True)
assert np.allclose(loss_group_rprop, loss, atol=1.e-5)
def test_default_rprop_group_graph():
"""
Feature: Test Rprop optimizer
Description: Test Rprop in Graph mode with parameter grouping
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
config = {'name': 'Rprop', 'lr': 0.001, 'etas': (0.6, 1.9), 'step_sizes': (1e-2, 10.), 'weight_decay': 0.0}
loss = build_network(config, is_group=True)
assert np.allclose(loss_group_rprop, loss, atol=1.e-5)

View File

@ -1,54 +0,0 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
from .optimizer_utils import build_network, loss_default_rprop, loss_group_rprop, loss_not_default_rprop
def test_default_rprop_graph():
"""
Feature: Test Rprop optimizer
Description: Test Rprop in Graph mode with default parameter
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
config = {'name': 'Rprop', 'lr': 0.01, 'etas': (0.5, 1.2), 'step_sizes': (1e-6, 50.), 'weight_decay': 0.0}
loss = build_network(config)
assert np.allclose(loss_default_rprop, loss, atol=1.e-5)
def test_no_default_rprop_graph():
"""
Feature: Test Rprop optimizer
Description: Test Rprop in Graph mode with another set of parameter
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
config = {'name': 'Rprop', 'lr': 0.001, 'etas': (0.6, 1.9), 'step_sizes': (1e-3, 20.), 'weight_decay': 0.0}
loss = build_network(config)
assert np.allclose(loss_not_default_rprop, loss, atol=1.e-5)
def test_default_rprop_group_graph():
"""
Feature: Test Rprop optimizer
Description: Test Rprop in Graph mode with parameter grouping
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
config = {'name': 'Rprop', 'lr': 0.001, 'etas': (0.6, 1.9), 'step_sizes': (1e-2, 10.), 'weight_decay': 0.0}
loss = build_network(config, is_group=True)
assert np.allclose(loss_group_rprop, loss, atol=1.e-5)

View File

@ -1,90 +0,0 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
from .optimizer_utils import build_network, loss_default_rprop, loss_group_rprop, loss_not_default_rprop
def test_default_rprop_pynative():
"""
Feature: Test Rprop optimizer
Description: Test Rprop in Pynative mode with default parameter
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
config = {'name': 'Rprop', 'lr': 0.01, 'etas': (0.5, 1.2), 'step_sizes': (1e-6, 50.), 'weight_decay': 0.0}
loss = build_network(config)
assert np.allclose(loss_default_rprop, loss, atol=1.e-5)
def test_default_rprop_graph():
"""
Feature: Test Rprop optimizer
Description: Test Rprop in Graph mode with default parameter
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
config = {'name': 'Rprop', 'lr': 0.01, 'etas': (0.5, 1.2), 'step_sizes': (1e-6, 50.), 'weight_decay': 0.0}
loss = build_network(config)
assert np.allclose(loss_default_rprop, loss, atol=1.e-5)
def test_no_default_rprop_pynative():
"""
Feature: Test Rprop optimizer
Description: Test Rprop in Pynative mode with another set of parameter
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
config = {'name': 'Rprop', 'lr': 0.001, 'etas': (0.6, 1.9), 'step_sizes': (1e-3, 20.), 'weight_decay': 0.0}
loss = build_network(config)
assert np.allclose(loss_not_default_rprop, loss, atol=1.e-5)
def test_no_default_rprop_graph():
"""
Feature: Test Rprop optimizer
Description: Test Rprop in Graph mode with another set of parameter
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
config = {'name': 'Rprop', 'lr': 0.001, 'etas': (0.6, 1.9), 'step_sizes': (1e-3, 20.), 'weight_decay': 0.0}
loss = build_network(config)
assert np.allclose(loss_not_default_rprop, loss, atol=1.e-5)
def test_default_rprop_group_pynative():
"""
Feature: Test Rprop optimizer
Description: Test Rprop in Pynative mode with parameter grouping
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
config = {'name': 'Rprop', 'lr': 0.001, 'etas': (0.6, 1.9), 'step_sizes': (1e-2, 10.), 'weight_decay': 0.0}
loss = build_network(config, is_group=True)
assert np.allclose(loss_group_rprop, loss, atol=1.e-5)
def test_default_rprop_group_graph():
"""
Feature: Test Rprop optimizer
Description: Test Rprop in Graph mode with parameter grouping
Expectation: Loss values and parameters conform to preset values.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
config = {'name': 'Rprop', 'lr': 0.001, 'etas': (0.6, 1.9), 'step_sizes': (1e-2, 10.), 'weight_decay': 0.0}
loss = build_network(config, is_group=True)
assert np.allclose(loss_group_rprop, loss, atol=1.e-5)

View File

@ -72,10 +72,10 @@ def dynamic_weight_decay_cmp(net1, net2, optimizer1, optimizer2):
label = Tensor(np.array([0]).astype(np.int32))
loss1 = train_network1(data, label)
loss2 = train_network2(data, label)
assert abs(loss1.asnumpy() - loss2.asnumpy()) < 1.e-8
assert abs(loss1.asnumpy() - loss2.asnumpy()) < 1.e-3
data = Tensor(np.arange(0, 16).reshape(1, 1, 4, 4).astype(np.float32) * 0.01)
label = Tensor(np.array([0]).astype(np.int32))
loss1 = net_with_criterion1(data, label)
loss2 = net_with_criterion2(data, label)
assert abs(loss1.asnumpy() - loss2.asnumpy()) > 1.e-8
assert abs(loss1.asnumpy() - loss2.asnumpy()) < 1.e-3

View File

@ -14,6 +14,7 @@
# ============================================================================
""" test nn pad """
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import Tensor
@ -319,3 +320,70 @@ def test_zero_pad_2d_train():
grad = Grad(ZeroPad2dNet(padding))
output = grad(Tensor(x), Tensor(grads))
print(output)
def test_invalid_padding_reflection_pad_1d():
"""
Feature: ReflectionPad1d
Description: test 5 cases of invalid input.
Expectation: success
"""
# case 1: padding is not int or tuple
padding = '-1'
with pytest.raises(TypeError):
nn.ReflectionPad1d(padding)
# case 2: padding length is not divisible by 2
padding = (1, 2, 2)
with pytest.raises(ValueError):
nn.ReflectionPad1d(padding)
# case 3: padding element is not int
padding = ('2', 2)
with pytest.raises(TypeError):
nn.ReflectionPad1d(padding)
# case 4: negative padding
padding = (-1, 2)
with pytest.raises(ValueError):
nn.ReflectionPad1d(padding)
# case 5: padding dimension does not match tensor dimension
padding = (1, 1, 1, 1, 1, 1, 1, 1)
x = Tensor([[1, 2, 3], [1, 2, 3]])
with pytest.raises(ValueError):
nn.ReflectionPad1d(padding)(x)
def test_invalid_padding_reflection_pad_2d():
"""
Feature: ReflectionPad2d
Description: test 5 cases of invalid input.
Expectation: success
"""
# case 1: padding is not int or tuple
padding = '-1'
with pytest.raises(TypeError):
nn.ReflectionPad2d(padding)
# case 2: padding length is not divisible by 2
padding = (1, 2, 2)
with pytest.raises(ValueError):
nn.ReflectionPad2d(padding)
# case 3: padding element is not int
padding = ('2', 2)
with pytest.raises(TypeError):
nn.ReflectionPad2d(padding)
# case 4: negative padding
padding = (-1, 2)
with pytest.raises(ValueError):
nn.ReflectionPad2d(padding)
# case 5: padding dimension does not match tensor dimension
padding = (1, 1, 1, 1, 1, 1, 1, 1)
x = Tensor([[1, 2, 3], [1, 2, 3]])
with pytest.raises(ValueError):
nn.ReflectionPad2d(padding)(x)

View File

@ -1,189 +0,0 @@
import numpy as np
import pytest
from mindspore import Tensor
from mindspore import context
from mindspore.nn import ReflectionPad1d
from mindspore.nn import ReflectionPad2d
from mindspore.nn import ReflectionPad3d
context.set_context(mode=context.PYNATIVE_MODE)
def test_reflection_pad_1d():
"""
Feature: ReflectionPad1d
Description: Infer process of ReflectionPad1d with 2 types of parameters.
Expectation: success
"""
# Test functionality with 3D tensor input
x = Tensor(np.array([[[0, 1, 2, 3], [4, 5, 6, 7]]]).astype(np.float32))
padding = (3, 1)
net = ReflectionPad1d(padding)
output = net(x)
expected_output = Tensor(np.array([[[3, 2, 1, 0, 1, 2, 3, 2],
[7, 6, 5, 4, 5, 6, 7, 6]]]).astype(np.float32))
print(output, expected_output)
padding = 2
expected_output = Tensor(np.array([[[2, 1, 0, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 7, 6, 5]]]).astype(np.float32))
net = ReflectionPad1d(padding)
output = net(x)
print(output, expected_output)
# Test functionality with 2D tensor as input
x = Tensor(np.array([[0, 1, 2, 3], [4, 5, 6, 7]]).astype(np.float32))
padding = (3, 1)
net = ReflectionPad1d(padding)
output = net(x)
expected_output = Tensor(np.array([[3, 2, 1, 0, 1, 2, 3, 2],
[7, 6, 5, 4, 5, 6, 7, 6]]).astype(np.float32))
print(output, expected_output)
padding = 2
expected_output = Tensor(np.array([[2, 1, 0, 1, 2, 3, 2, 1],
[6, 5, 4, 5, 6, 7, 6, 5]]).astype(np.float32))
net = ReflectionPad1d(padding)
output = net(x)
print(output, expected_output)
def test_reflection_pad_2d():
r"""
Feature: ReflectionPad2d
Description: Infer process of ReflectionPad2d with three type parameters.
Expectation: success
"""
# Test functionality with 4D tensor as input
x = Tensor(np.array([[[[0, 1, 2], [3, 4, 5], [6, 7, 8]]]]).astype(np.float32))
padding = (1, 1, 2, 0)
net = ReflectionPad2d(padding)
output = net(x)
expected_output = Tensor(np.array([[[[7, 6, 7, 8, 7], [4, 3, 4, 5, 4], [1, 0, 1, 2, 1],
[4, 3, 4, 5, 4], [7, 6, 7, 8, 7]]]]).astype(np.float32))
print(output, expected_output)
padding = 2
output = ReflectionPad2d(padding)(x)
expected_output = Tensor(np.array([[[[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0], [5, 4, 3, 4, 5, 4, 3],
[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0]]]]).astype(np.float32))
print(output, expected_output)
# Test functionality with 3D tensor as input
x = Tensor(np.array([[[0, 1, 2], [3, 4, 5], [6, 7, 8]]]).astype(np.float32))
padding = (1, 1, 2, 0)
net = ReflectionPad2d(padding)
output = net(x)
expected_output = Tensor(np.array([[[7, 6, 7, 8, 7], [4, 3, 4, 5, 4], [1, 0, 1, 2, 1],
[4, 3, 4, 5, 4], [7, 6, 7, 8, 7]]]).astype(np.float32))
print(output, expected_output)
padding = 2
output = ReflectionPad2d(padding)(x)
expected_output = Tensor(np.array([[[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0], [5, 4, 3, 4, 5, 4, 3],
[8, 7, 6, 7, 8, 7, 6], [5, 4, 3, 4, 5, 4, 3],
[2, 1, 0, 1, 2, 1, 0]]]).astype(np.float32))
print(output, expected_output)
def test_invalid_padding_reflection_pad_1d():
"""
Feature: ReflectionPad1d
Description: test 5 cases of invalid input.
Expectation: success
"""
# case 1: padding is not int or tuple
padding = '-1'
with pytest.raises(TypeError):
ReflectionPad1d(padding)
# case 2: padding length is not divisible by 2
padding = (1, 2, 2)
with pytest.raises(ValueError):
ReflectionPad1d(padding)
# case 3: padding element is not int
padding = ('2', 2)
with pytest.raises(TypeError):
ReflectionPad1d(padding)
# case 4: negative padding
padding = (-1, 2)
with pytest.raises(ValueError):
ReflectionPad1d(padding)
# case 5: padding dimension does not match tensor dimension
padding = (1, 1, 1, 1, 1, 1, 1, 1)
x = Tensor([[1, 2, 3], [1, 2, 3]])
with pytest.raises(ValueError):
ReflectionPad1d(padding)(x)
def test_reflection_pad_3d():
"""
Feature: ReflectionPad3d
Description: Infer process of ReflectionPad3d with three type parameters.
Expectation: success
"""
# Test functionality with 4D tensor as input
arr = np.arange(8).astype(np.float32).reshape((1, 2, 2, 2))
x = Tensor(arr)
padding = (1, 1, 1, 0, 0, 1)
net3d = ReflectionPad3d(padding)
output = net3d(x)
expected_output = Tensor(np.array([[[[3, 2, 3, 2], [1, 0, 1, 0], [3, 2, 3, 2]],
[[7, 6, 7, 6], [5, 4, 5, 4], [7, 6, 7, 6]],
[[3, 2, 3, 2], [1, 0, 1, 0], [3, 2, 3, 2]]]]).astype(np.float32))
print(output, expected_output)
padding = 1
output = ReflectionPad3d(padding)(x)
expected_output = Tensor(np.array([[[[7., 6., 7., 6.], [5., 4., 5., 4.],
[7., 6., 7., 6.], [5., 4., 5., 4.]],
[[3., 2., 3., 2.], [1., 0., 1., 0.],
[3., 2., 3., 2.], [1., 0., 1., 0.]],
[[7., 6., 7., 6.], [5., 4., 5., 4.],
[7., 6., 7., 6.], [5., 4., 5., 4.]],
[[3., 2., 3., 2.], [1., 0., 1., 0.],
[3., 2., 3., 2.], [1., 0., 1., 0.]]]]).astype(np.float32))
print(output, expected_output)
def test_invalid_padding_reflection_pad_2d():
"""
Feature: ReflectionPad2d
Description: test 5 cases of invalid input.
Expectation: success
"""
# case 1: padding is not int or tuple
padding = '-1'
with pytest.raises(TypeError):
ReflectionPad2d(padding)
# case 2: padding length is not divisible by 2
padding = (1, 2, 2)
with pytest.raises(ValueError):
ReflectionPad2d(padding)
# case 3: padding element is not int
padding = ('2', 2)
with pytest.raises(TypeError):
ReflectionPad2d(padding)
# case 4: negative padding
padding = (-1, 2)
with pytest.raises(ValueError):
ReflectionPad2d(padding)
# case 5: padding dimension does not match tensor dimension
padding = (1, 1, 1, 1, 1, 1, 1, 1)
x = Tensor([[1, 2, 3], [1, 2, 3]])
with pytest.raises(ValueError):
ReflectionPad2d(padding)(x)