forked from mindspore-Ecosystem/mindspore
!5998 change internal API _get_strategy() to _get_shard_strategy()
Merge pull request !5998 from yihuaijie/master
This commit is contained in:
commit
f1979f8fe5
|
@ -448,7 +448,7 @@ class _Executor:
|
|||
new_param = {x.name: replace[x] for x in replace if id(x) != id(replace[x])}
|
||||
return self._executor.updata_param_node_default_input(phase, new_param)
|
||||
|
||||
def _get_strategy(self, obj):
|
||||
def _get_shard_strategy(self, obj):
|
||||
real_phase = self.phase_prefix + obj.phase + '.' + str(obj.create_time)
|
||||
return self._executor.get_strategy(real_phase)
|
||||
|
||||
|
|
|
@ -86,7 +86,7 @@ def all_to_all_common(strategy1):
|
|||
model = Model(net, loss, opt)
|
||||
|
||||
model.train(epoch_size, dataset, dataset_sink_mode=False)
|
||||
strategys = _executor._get_strategy(model._train_network)
|
||||
strategys = _executor._get_shard_strategy(model._train_network)
|
||||
return strategys
|
||||
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ def test_auto_parallel_arithmetic():
|
|||
y = Tensor(np.ones([32, 128]), dtype=ms.float32)
|
||||
b = Tensor(np.ones([64, 128]), dtype=ms.float32)
|
||||
compile_net(net, x, y, b, phase='train')
|
||||
strategies = _executor._get_strategy(net)
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
expected_strategies = {'Default/network-Net/FloorDiv-op0': [[2, 4], [2, 4]],
|
||||
'Default/network-Net/MatMul-op1': [[2, 1], [1, 4]]}
|
||||
assert strategies == expected_strategies
|
||||
|
@ -103,7 +103,7 @@ def test_auto_parallel_arithmetic_broadcast_both():
|
|||
y = Tensor(np.ones([32, 1]), dtype=ms.float32)
|
||||
b = Tensor(np.ones([1, 64]), dtype=ms.float32)
|
||||
compile_net(net, x, y, b, phase='train')
|
||||
strategies = _executor._get_strategy(net)
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
expected_strategies = {'Default/network-Net/FloorDiv-op0': [[8, 1], [1, 1]],
|
||||
'Default/network-Net/MatMul-op1': [[8, 1], [1, 1]]}
|
||||
assert strategies == expected_strategies
|
||||
|
@ -130,7 +130,7 @@ def test_auto_parallel_arithmetic_broadcast_right():
|
|||
y = Tensor(np.ones([32, 32]), dtype=ms.float32)
|
||||
b = Tensor(np.ones([32]), dtype=ms.float32)
|
||||
compile_net(net, x, y, b, phase='train')
|
||||
strategies = _executor._get_strategy(net)
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
expected_strategies = {'Default/network-Net/FloorDiv-op0': [[4, 2], [2]],
|
||||
'Default/network-Net/MatMul-op1': [[4, 1], [1, 2]]}
|
||||
assert strategies == expected_strategies
|
||||
|
@ -157,7 +157,7 @@ def test_auto_parallel_arithmetic_broadcast_left():
|
|||
y = Tensor(np.ones([32, 32]), dtype=ms.float32)
|
||||
b = Tensor(np.ones([128, 64, 32]), dtype=ms.float32)
|
||||
compile_net(net, x, y, b, phase="train")
|
||||
strategies = _executor._get_strategy(net)
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
expected_strategies = {'Default/network-Net/FloorDiv-op0': [[4, 2], [1, 4, 2]],
|
||||
'Default/network-Net/MatMul-op1': [[4, 1], [1, 2]]}
|
||||
assert strategies == expected_strategies
|
||||
|
|
|
@ -62,7 +62,7 @@ def test_auto_parallel_assign_sub_with_ref_key():
|
|||
reset_op_id()
|
||||
|
||||
_executor.compile(net, x, phase="train")
|
||||
strategies = _executor._get_strategy(net)
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
for (k, v) in strategies.items():
|
||||
if re.search('PReLU-op', k) is not None:
|
||||
assert v == [[1, 1, 1, 8], [1]]
|
||||
|
|
|
@ -82,7 +82,7 @@ def test_double_star_graph():
|
|||
reset_op_id()
|
||||
|
||||
_executor.compile(net, x, y, z, w, phase='train')
|
||||
strategies = _executor._get_strategy(net)
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
expected_strategies = {'Default/network-Net/Cast-op0': [[8, 1]],
|
||||
'Default/network-Net/Cast-op1': [[1, 8]],
|
||||
'Default/network-Net/MatMul-op3': [[8, 1], [1, 1]],
|
||||
|
|
|
@ -113,7 +113,7 @@ def test_double_subgraphs():
|
|||
x = Tensor(np.ones([8, 8, 8, 8]), dtype=ms.float32)
|
||||
reset_op_id()
|
||||
_executor.compile(net, x, phase='train')
|
||||
strategies = _executor._get_strategy(net)
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
expected_strategies = {'Default/network-NetWithLoss/ReduceMean-op0': [[8, 1, 1, 1]],
|
||||
'Default/network-NetWithLoss/net-Net/ReLU-op1': [[8, 1, 1, 1]],
|
||||
'Default/network-NetWithLoss/net-Net/Mul-op2': [[8, 1, 1, 1], [8, 1, 1, 1]],
|
||||
|
@ -159,7 +159,7 @@ def test_double_subgraphs_train():
|
|||
ds_train = DatasetLenet(Tensor(batch_ids), None)
|
||||
model = Model(net)
|
||||
model.train(1, ds_train, dataset_sink_mode=False)
|
||||
strategies = _executor._get_strategy(net)
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
expected_strategies = {'Default/network-NetWithLoss/ReduceMean-op3': [[1, 1, 1, 1]],
|
||||
'Default/network-NetWithLoss/net-Net/ReLU-op4': [[1, 1, 1, 1]],
|
||||
'Default/network-NetWithLoss/net-Net/Mul-op5': [[1, 1, 1, 1], [1, 1, 1, 1]],
|
||||
|
|
|
@ -75,7 +75,7 @@ def test_matmul_prelu():
|
|||
reset_op_id()
|
||||
|
||||
_executor.compile(net, x, y, b, phase='train')
|
||||
strategies = _executor._get_strategy(net)
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
for (k, v) in strategies.items():
|
||||
if re.search('PReLU-op', k) is not None:
|
||||
assert v == [[16, 1, 1, 1], [1]]
|
||||
|
|
|
@ -69,7 +69,7 @@ def test_common_parameter():
|
|||
reset_op_id()
|
||||
|
||||
_executor.compile(net, x, y, phase='train')
|
||||
strategies = _executor._get_strategy(net)
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
for (k, v) in strategies.items():
|
||||
if re.search('MatMul-op', k) is not None:
|
||||
assert v == [[8, 1], [1, 1]]
|
||||
|
|
|
@ -295,7 +295,7 @@ def test_train_32k_8p(batch_size=32, num_classes=32768):
|
|||
opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9)
|
||||
model = Model(net, loss_fn=loss, optimizer=opt)
|
||||
model.train(5, dataset, dataset_sink_mode=False)
|
||||
strategies = _executor._get_strategy(model._train_network)
|
||||
strategies = _executor._get_shard_strategy(model._train_network)
|
||||
for (k, v) in strategies.items():
|
||||
if re.search('Conv2D-op', k) is not None:
|
||||
assert v[0][0] == dev_num
|
||||
|
@ -671,7 +671,7 @@ def test_train_64k_8p(batch_size=32, num_classes=65536): # 1048576 #131072 #327
|
|||
opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9)
|
||||
model = Model(net, loss_fn=loss, optimizer=opt)
|
||||
model.train(5, dataset, dataset_sink_mode=False)
|
||||
strategies = _executor._get_strategy(model._train_network)
|
||||
strategies = _executor._get_shard_strategy(model._train_network)
|
||||
for (k, v) in strategies.items():
|
||||
if re.search('Conv2D-op', k) is not None:
|
||||
assert v[0][0] == dev_num
|
||||
|
@ -698,7 +698,7 @@ def test_train_8k_8p_gpu(batch_size=32, num_classes=8192):
|
|||
opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9)
|
||||
model = Model(net, loss_fn=loss, optimizer=opt)
|
||||
model.train(5, dataset, dataset_sink_mode=False)
|
||||
strategies = _executor._get_strategy(model._train_network)
|
||||
strategies = _executor._get_shard_strategy(model._train_network)
|
||||
for (k, v) in strategies.items():
|
||||
if re.search('Conv2D-op', k) is not None:
|
||||
assert v[0][0] == dev_num
|
||||
|
@ -724,7 +724,7 @@ def test_train_4k_8p_gpu(batch_size=32, num_classes=4096):
|
|||
opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9)
|
||||
model = Model(net, loss_fn=loss, optimizer=opt)
|
||||
model.train(5, dataset, dataset_sink_mode=False)
|
||||
strategies = _executor._get_strategy(model._train_network)
|
||||
strategies = _executor._get_shard_strategy(model._train_network)
|
||||
for (k, v) in strategies.items():
|
||||
if re.search('Conv2D-op', k) is not None:
|
||||
assert v[0][0] == dev_num
|
||||
|
|
|
@ -77,7 +77,7 @@ def test_two_matmul_transpose():
|
|||
reset_op_id()
|
||||
|
||||
_executor.compile(net, x, y, b, phase='train')
|
||||
strategies = _executor._get_strategy(net)
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
expected_strategies = {'Default/network-Net/Transpose-op0': [[1, 16]],
|
||||
'Default/network-Net/Transpose-op1': [[16, 1]],
|
||||
'Default/network-Net/MatMul-op2': [[16, 1], [1, 1]],
|
||||
|
|
|
@ -78,7 +78,7 @@ def test_two_bn():
|
|||
reset_op_id()
|
||||
|
||||
_executor.compile(net, x, phase='train')
|
||||
strategies = _executor._get_strategy(net)
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
assert len(strategies) == 4
|
||||
|
||||
for (k, v) in strategies.items():
|
||||
|
|
|
@ -135,7 +135,7 @@ def test_two_matmul():
|
|||
reset_op_id()
|
||||
|
||||
_executor.compile(net, x, y, b, phase='train')
|
||||
strategies = _executor._get_strategy(net)
|
||||
strategies = _executor._get_shard_strategy(net)
|
||||
expected_strategies = {'Default/network-Net/MatMul-op0': [[16, 1], [1, 1]],
|
||||
'Default/network-Net/MatMul-op1': [[16, 1], [1, 1]]}
|
||||
assert strategies == expected_strategies
|
||||
|
|
|
@ -87,7 +87,7 @@ def all_to_all_common():
|
|||
model = Model(net, loss, opt)
|
||||
|
||||
model.train(epoch_size, dataset, dataset_sink_mode=False)
|
||||
strategys = _executor._get_strategy(model._train_network)
|
||||
strategys = _executor._get_shard_strategy(model._train_network)
|
||||
return strategys
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue