forked from mindspore-Ecosystem/mindspore
!5224 Add test case about loss scale in parallel mode
Merge pull request !5224 from yangzhenzhang/add-split-sens-and-loss-scale-test-case
This commit is contained in:
commit
66d6320b21
|
@ -163,6 +163,20 @@ class Net(nn.Cell):
|
||||||
out = self.mean(out, -1)
|
out = self.mean(out, -1)
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class Net2(nn.Cell):
|
||||||
|
def __init__(self):
|
||||||
|
super(Net2, self).__init__()
|
||||||
|
self.matmul = P.MatMul()
|
||||||
|
self.relu = P.ReLU()
|
||||||
|
self.matmul_weight = Parameter(Tensor(np.ones([64, 64]), dtype=ms.float32), name="weight")
|
||||||
|
|
||||||
|
def construct(self, x, b):
|
||||||
|
out = self.matmul(x, self.matmul_weight)
|
||||||
|
out = self.relu(out)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
def test_loss_scale():
|
def test_loss_scale():
|
||||||
context.set_context(mode=context.GRAPH_MODE)
|
context.set_context(mode=context.GRAPH_MODE)
|
||||||
context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=8)
|
context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=8)
|
||||||
|
@ -174,3 +188,16 @@ def test_loss_scale():
|
||||||
net = TrainOneStepWithLossScaleCell(net, opt, update_cell)
|
net = TrainOneStepWithLossScaleCell(net, opt, update_cell)
|
||||||
model = Model(network=net)
|
model = Model(network=net)
|
||||||
model.train(2, dataset, dataset_sink_mode=False)
|
model.train(2, dataset, dataset_sink_mode=False)
|
||||||
|
|
||||||
|
|
||||||
|
def test_loss_scale2():
|
||||||
|
context.set_context(mode=context.GRAPH_MODE, save_graphs=True)
|
||||||
|
context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL, device_num=8)
|
||||||
|
predict = Tensor(np.ones([64, 64]), dtype=ms.float32)
|
||||||
|
label = Tensor(np.ones([64,]), dtype=ms.int32)
|
||||||
|
dataset = DatasetLenet(predict, label)
|
||||||
|
net = Net2()
|
||||||
|
opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9)
|
||||||
|
net = nn.TrainOneStepWithLossScaleCell(net, opt, update_cell)
|
||||||
|
model = Model(network=net)
|
||||||
|
model.train(2, dataset, dataset_sink_mode=False)
|
||||||
|
|
|
@ -109,11 +109,9 @@ def test_grad_sens_parameter_type():
|
||||||
out = self.matmul2(out, b)
|
out = self.matmul2(out, b)
|
||||||
return out
|
return out
|
||||||
|
|
||||||
context.set_auto_parallel_context(device_num=8, global_rank=0)
|
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=64, global_rank=0)
|
||||||
|
strategy1 = ((8, 1), (1, 8))
|
||||||
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
|
strategy2 = ((8, 8), (8, 1))
|
||||||
strategy1 = ((4, 2), (2, 1))
|
|
||||||
strategy2 = ((2, 4), (4, 1))
|
|
||||||
net = GradWrap(Net(strategy1, strategy2))
|
net = GradWrap(Net(strategy1, strategy2))
|
||||||
|
|
||||||
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
|
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
|
||||||
|
@ -121,9 +119,14 @@ def test_grad_sens_parameter_type():
|
||||||
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
|
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
|
||||||
|
|
||||||
sens = Tensor(np.ones([128, 64]), dtype=ms.float32)
|
sens = Tensor(np.ones([128, 64]), dtype=ms.float32)
|
||||||
# net(x, y, b, sens)
|
|
||||||
net.set_auto_parallel()
|
net.set_auto_parallel()
|
||||||
_executor.compile(net, x, y, b, sens)
|
_executor.compile(net, x, y, b, sens, phase='train', auto_parallel_mode=True)
|
||||||
|
x_layout = [[8, 8], [1, -1], [16, 32], [0], [1]]
|
||||||
|
y_layout = [[8, 8], [-1, 0], [32, 8], [0], [1]]
|
||||||
|
b_layout = [[8, 8], [0, -1], [8, 64], [0], [1]]
|
||||||
|
sens_layout = [[8, 8], [1, -1], [16, 64], [0], [1]]
|
||||||
|
expect_dict = {'x': x_layout, 'y': y_layout, 'b': b_layout, 'sens': sens_layout}
|
||||||
|
assert net.parameter_layout_dict == expect_dict
|
||||||
|
|
||||||
|
|
||||||
def test_grad_sens_tensor_type():
|
def test_grad_sens_tensor_type():
|
||||||
|
|
Loading…
Reference in New Issue