From eb83ea96072109167c8f2efab219464cff722dc3 Mon Sep 17 00:00:00 2001 From: Yi Huaijie Date: Thu, 10 Sep 2020 17:46:53 +0800 Subject: [PATCH] change internal API _get_strategy() to _get_shard_strategy() --- mindspore/common/api.py | 2 +- tests/ut/python/parallel/test_alltoall.py | 2 +- tests/ut/python/parallel/test_auto_parallel_arithmetic.py | 8 ++++---- .../test_auto_parallel_assign_sub_with_ref_key.py | 2 +- tests/ut/python/parallel/test_auto_parallel_cast.py | 2 +- .../parallel/test_auto_parallel_double_subgraphs.py | 4 ++-- .../ut/python/parallel/test_auto_parallel_matmul_prelu.py | 2 +- .../python/parallel/test_auto_parallel_parameter_cast.py | 2 +- tests/ut/python/parallel/test_auto_parallel_resnet.py | 8 ++++---- tests/ut/python/parallel/test_auto_parallel_transpose.py | 2 +- tests/ut/python/parallel/test_auto_parallel_two_bn.py | 2 +- tests/ut/python/parallel/test_auto_parallel_two_matmul.py | 2 +- tests/ut/python/parallel/test_one_dev.py | 2 +- 13 files changed, 20 insertions(+), 20 deletions(-) diff --git a/mindspore/common/api.py b/mindspore/common/api.py index 0d5241f4acd..571a54abc75 100644 --- a/mindspore/common/api.py +++ b/mindspore/common/api.py @@ -448,7 +448,7 @@ class _Executor: new_param = {x.name: replace[x] for x in replace if id(x) != id(replace[x])} return self._executor.updata_param_node_default_input(phase, new_param) - def _get_strategy(self, obj): + def _get_shard_strategy(self, obj): real_phase = self.phase_prefix + obj.phase + '.' + str(obj.create_time) return self._executor.get_strategy(real_phase) diff --git a/tests/ut/python/parallel/test_alltoall.py b/tests/ut/python/parallel/test_alltoall.py index 203cb036fbc..0f3c134c8b1 100644 --- a/tests/ut/python/parallel/test_alltoall.py +++ b/tests/ut/python/parallel/test_alltoall.py @@ -86,7 +86,7 @@ def all_to_all_common(strategy1): model = Model(net, loss, opt) model.train(epoch_size, dataset, dataset_sink_mode=False) - strategys = _executor._get_strategy(model._train_network) + strategys = _executor._get_shard_strategy(model._train_network) return strategys diff --git a/tests/ut/python/parallel/test_auto_parallel_arithmetic.py b/tests/ut/python/parallel/test_auto_parallel_arithmetic.py index 2d25f18081f..e6ac49ceb00 100644 --- a/tests/ut/python/parallel/test_auto_parallel_arithmetic.py +++ b/tests/ut/python/parallel/test_auto_parallel_arithmetic.py @@ -76,7 +76,7 @@ def test_auto_parallel_arithmetic(): y = Tensor(np.ones([32, 128]), dtype=ms.float32) b = Tensor(np.ones([64, 128]), dtype=ms.float32) compile_net(net, x, y, b, phase='train') - strategies = _executor._get_strategy(net) + strategies = _executor._get_shard_strategy(net) expected_strategies = {'Default/network-Net/FloorDiv-op0': [[2, 4], [2, 4]], 'Default/network-Net/MatMul-op1': [[2, 1], [1, 4]]} assert strategies == expected_strategies @@ -103,7 +103,7 @@ def test_auto_parallel_arithmetic_broadcast_both(): y = Tensor(np.ones([32, 1]), dtype=ms.float32) b = Tensor(np.ones([1, 64]), dtype=ms.float32) compile_net(net, x, y, b, phase='train') - strategies = _executor._get_strategy(net) + strategies = _executor._get_shard_strategy(net) expected_strategies = {'Default/network-Net/FloorDiv-op0': [[8, 1], [1, 1]], 'Default/network-Net/MatMul-op1': [[8, 1], [1, 1]]} assert strategies == expected_strategies @@ -130,7 +130,7 @@ def test_auto_parallel_arithmetic_broadcast_right(): y = Tensor(np.ones([32, 32]), dtype=ms.float32) b = Tensor(np.ones([32]), dtype=ms.float32) compile_net(net, x, y, b, phase='train') - strategies = _executor._get_strategy(net) + strategies = _executor._get_shard_strategy(net) expected_strategies = {'Default/network-Net/FloorDiv-op0': [[4, 2], [2]], 'Default/network-Net/MatMul-op1': [[4, 1], [1, 2]]} assert strategies == expected_strategies @@ -157,7 +157,7 @@ def test_auto_parallel_arithmetic_broadcast_left(): y = Tensor(np.ones([32, 32]), dtype=ms.float32) b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32) compile_net(net, x, y, b, phase="train") - strategies = _executor._get_strategy(net) + strategies = _executor._get_shard_strategy(net) expected_strategies = {'Default/network-Net/FloorDiv-op0': [[4, 2], [1, 4, 2]], 'Default/network-Net/MatMul-op1': [[4, 1], [1, 2]]} assert strategies == expected_strategies diff --git a/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py b/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py index 3c3cd40abb8..530a122cfce 100644 --- a/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py +++ b/tests/ut/python/parallel/test_auto_parallel_assign_sub_with_ref_key.py @@ -62,7 +62,7 @@ def test_auto_parallel_assign_sub_with_ref_key(): reset_op_id() _executor.compile(net, x, phase="train") - strategies = _executor._get_strategy(net) + strategies = _executor._get_shard_strategy(net) for (k, v) in strategies.items(): if re.search('PReLU-op', k) is not None: assert v == [[1, 1, 1, 8], [1]] diff --git a/tests/ut/python/parallel/test_auto_parallel_cast.py b/tests/ut/python/parallel/test_auto_parallel_cast.py index 0e498878ebd..7d1775385a8 100644 --- a/tests/ut/python/parallel/test_auto_parallel_cast.py +++ b/tests/ut/python/parallel/test_auto_parallel_cast.py @@ -82,7 +82,7 @@ def test_double_star_graph(): reset_op_id() _executor.compile(net, x, y, z, w, phase='train') - strategies = _executor._get_strategy(net) + strategies = _executor._get_shard_strategy(net) expected_strategies = {'Default/network-Net/Cast-op0': [[8, 1]], 'Default/network-Net/Cast-op1': [[1, 8]], 'Default/network-Net/MatMul-op3': [[8, 1], [1, 1]], diff --git a/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py b/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py index 93e38263201..7f1eb141c0c 100644 --- a/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py +++ b/tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py @@ -113,7 +113,7 @@ def test_double_subgraphs(): x = Tensor(np.ones([8, 8, 8, 8]), dtype=ms.float32) reset_op_id() _executor.compile(net, x, phase='train') - strategies = _executor._get_strategy(net) + strategies = _executor._get_shard_strategy(net) expected_strategies = {'Default/network-NetWithLoss/ReduceMean-op0': [[8, 1, 1, 1]], 'Default/network-NetWithLoss/net-Net/ReLU-op1': [[8, 1, 1, 1]], 'Default/network-NetWithLoss/net-Net/Mul-op2': [[8, 1, 1, 1], [8, 1, 1, 1]], @@ -159,7 +159,7 @@ def test_double_subgraphs_train(): ds_train = DatasetLenet(Tensor(batch_ids), None) model = Model(net) model.train(1, ds_train, dataset_sink_mode=False) - strategies = _executor._get_strategy(net) + strategies = _executor._get_shard_strategy(net) expected_strategies = {'Default/network-NetWithLoss/ReduceMean-op3': [[1, 1, 1, 1]], 'Default/network-NetWithLoss/net-Net/ReLU-op4': [[1, 1, 1, 1]], 'Default/network-NetWithLoss/net-Net/Mul-op5': [[1, 1, 1, 1], [1, 1, 1, 1]], diff --git a/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py b/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py index bc086c5907f..f8c64a4baa4 100644 --- a/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py +++ b/tests/ut/python/parallel/test_auto_parallel_matmul_prelu.py @@ -75,7 +75,7 @@ def test_matmul_prelu(): reset_op_id() _executor.compile(net, x, y, b, phase='train') - strategies = _executor._get_strategy(net) + strategies = _executor._get_shard_strategy(net) for (k, v) in strategies.items(): if re.search('PReLU-op', k) is not None: assert v == [[16, 1, 1, 1], [1]] diff --git a/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py b/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py index 6d4452407c0..ab84db70d04 100644 --- a/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py +++ b/tests/ut/python/parallel/test_auto_parallel_parameter_cast.py @@ -69,7 +69,7 @@ def test_common_parameter(): reset_op_id() _executor.compile(net, x, y, phase='train') - strategies = _executor._get_strategy(net) + strategies = _executor._get_shard_strategy(net) for (k, v) in strategies.items(): if re.search('MatMul-op', k) is not None: assert v == [[8, 1], [1, 1]] diff --git a/tests/ut/python/parallel/test_auto_parallel_resnet.py b/tests/ut/python/parallel/test_auto_parallel_resnet.py index 9d4d2f27554..99b93a570c5 100644 --- a/tests/ut/python/parallel/test_auto_parallel_resnet.py +++ b/tests/ut/python/parallel/test_auto_parallel_resnet.py @@ -295,7 +295,7 @@ def test_train_32k_8p(batch_size=32, num_classes=32768): opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) model = Model(net, loss_fn=loss, optimizer=opt) model.train(5, dataset, dataset_sink_mode=False) - strategies = _executor._get_strategy(model._train_network) + strategies = _executor._get_shard_strategy(model._train_network) for (k, v) in strategies.items(): if re.search('Conv2D-op', k) is not None: assert v[0][0] == dev_num @@ -671,7 +671,7 @@ def test_train_64k_8p(batch_size=32, num_classes=65536): # 1048576 #131072 #327 opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) model = Model(net, loss_fn=loss, optimizer=opt) model.train(5, dataset, dataset_sink_mode=False) - strategies = _executor._get_strategy(model._train_network) + strategies = _executor._get_shard_strategy(model._train_network) for (k, v) in strategies.items(): if re.search('Conv2D-op', k) is not None: assert v[0][0] == dev_num @@ -698,7 +698,7 @@ def test_train_8k_8p_gpu(batch_size=32, num_classes=8192): opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) model = Model(net, loss_fn=loss, optimizer=opt) model.train(5, dataset, dataset_sink_mode=False) - strategies = _executor._get_strategy(model._train_network) + strategies = _executor._get_shard_strategy(model._train_network) for (k, v) in strategies.items(): if re.search('Conv2D-op', k) is not None: assert v[0][0] == dev_num @@ -724,7 +724,7 @@ def test_train_4k_8p_gpu(batch_size=32, num_classes=4096): opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) model = Model(net, loss_fn=loss, optimizer=opt) model.train(5, dataset, dataset_sink_mode=False) - strategies = _executor._get_strategy(model._train_network) + strategies = _executor._get_shard_strategy(model._train_network) for (k, v) in strategies.items(): if re.search('Conv2D-op', k) is not None: assert v[0][0] == dev_num diff --git a/tests/ut/python/parallel/test_auto_parallel_transpose.py b/tests/ut/python/parallel/test_auto_parallel_transpose.py index b542004ea72..69ce7a13577 100644 --- a/tests/ut/python/parallel/test_auto_parallel_transpose.py +++ b/tests/ut/python/parallel/test_auto_parallel_transpose.py @@ -77,7 +77,7 @@ def test_two_matmul_transpose(): reset_op_id() _executor.compile(net, x, y, b, phase='train') - strategies = _executor._get_strategy(net) + strategies = _executor._get_shard_strategy(net) expected_strategies = {'Default/network-Net/Transpose-op0': [[1, 16]], 'Default/network-Net/Transpose-op1': [[16, 1]], 'Default/network-Net/MatMul-op2': [[16, 1], [1, 1]], diff --git a/tests/ut/python/parallel/test_auto_parallel_two_bn.py b/tests/ut/python/parallel/test_auto_parallel_two_bn.py index d771fc794cb..7a9702444db 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_bn.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_bn.py @@ -78,7 +78,7 @@ def test_two_bn(): reset_op_id() _executor.compile(net, x, phase='train') - strategies = _executor._get_strategy(net) + strategies = _executor._get_shard_strategy(net) assert len(strategies) == 4 for (k, v) in strategies.items(): diff --git a/tests/ut/python/parallel/test_auto_parallel_two_matmul.py b/tests/ut/python/parallel/test_auto_parallel_two_matmul.py index f4863cce1c1..9ddeb3d8c33 100644 --- a/tests/ut/python/parallel/test_auto_parallel_two_matmul.py +++ b/tests/ut/python/parallel/test_auto_parallel_two_matmul.py @@ -135,7 +135,7 @@ def test_two_matmul(): reset_op_id() _executor.compile(net, x, y, b, phase='train') - strategies = _executor._get_strategy(net) + strategies = _executor._get_shard_strategy(net) expected_strategies = {'Default/network-Net/MatMul-op0': [[16, 1], [1, 1]], 'Default/network-Net/MatMul-op1': [[16, 1], [1, 1]]} assert strategies == expected_strategies diff --git a/tests/ut/python/parallel/test_one_dev.py b/tests/ut/python/parallel/test_one_dev.py index 812be2950b1..764470531ad 100644 --- a/tests/ut/python/parallel/test_one_dev.py +++ b/tests/ut/python/parallel/test_one_dev.py @@ -87,7 +87,7 @@ def all_to_all_common(): model = Model(net, loss, opt) model.train(epoch_size, dataset, dataset_sink_mode=False) - strategys = _executor._get_strategy(model._train_network) + strategys = _executor._get_shard_strategy(model._train_network) return strategys