[test] Fix GRAPH_MODE not restored after PYNATIVE_MODE is set

This commit is contained in:
He Wei 2021-04-29 09:28:24 +08:00
parent fd2a9e700d
commit 171cd83188
3 changed files with 28 additions and 11 deletions

View File

@ -550,9 +550,12 @@ def test_side_effect_grad_two_addn_switch():
inputs = Tensor([9.0], ms.float32)
out1 = net.grad_mindspore_impl(inputs, grad_ys)
net = SideEffectTwoAddnSwitchNet()
context.set_context(mode=context.PYNATIVE_MODE)
out2 = net.grad_mindspore_impl(inputs, grad_ys)
allclose_nparray(out1[0][0].asnumpy(), out2[0][0].asnumpy(), 0.001, 0.001)
try:
context.set_context(mode=context.PYNATIVE_MODE)
out2 = net.grad_mindspore_impl(inputs, grad_ys)
allclose_nparray(out1[0][0].asnumpy(), out2[0][0].asnumpy(), 0.001, 0.001)
finally:
context.set_context(mode=context.GRAPH_MODE)
class SideEffectGradIfNet(Cell):
@ -590,9 +593,12 @@ def test_side_effect_grad_if():
inputs = Tensor([9.0], ms.float32)
out1 = net.grad_mindspore_impl(inputs, grad_ys)
net = SideEffectGradIfNet()
context.set_context(mode=context.PYNATIVE_MODE)
out2 = net.grad_mindspore_impl(inputs, grad_ys)
allclose_nparray(out1.asnumpy(), out2.asnumpy(), 0.001, 0.001)
try:
context.set_context(mode=context.PYNATIVE_MODE)
out2 = net.grad_mindspore_impl(inputs, grad_ys)
allclose_nparray(out1.asnumpy(), out2.asnumpy(), 0.001, 0.001)
finally:
context.set_context(mode=context.GRAPH_MODE)
class OneInputBprop(Cell):
@ -683,8 +689,11 @@ def test_side_effect_grad_control_flow_assign_depend_while_net():
inputs2 = Tensor([6.0], ms.float32)
inputs3 = Tensor([3.0], ms.float32)
out1 = net.grad_mindspore_impl(inputs1, inputs2, inputs3, grad_ys)
context.set_context(mode=context.PYNATIVE_MODE)
net = SideEffectControlFlowAssignDependWhileNet()
out2 = net.grad_mindspore_impl(inputs1, inputs2, inputs3, grad_ys)
allclose_nparray(out1[0][0].asnumpy(), out2[0][0].asnumpy(), 0.001, 0.001)
allclose_nparray(out1[1][0].asnumpy(), out2[1][0].asnumpy(), 0.001, 0.001)
try:
context.set_context(mode=context.PYNATIVE_MODE)
net = SideEffectControlFlowAssignDependWhileNet()
out2 = net.grad_mindspore_impl(inputs1, inputs2, inputs3, grad_ys)
allclose_nparray(out1[0][0].asnumpy(), out2[0][0].asnumpy(), 0.001, 0.001)
allclose_nparray(out1[1][0].asnumpy(), out2[1][0].asnumpy(), 0.001, 0.001)
finally:
context.set_context(mode=context.GRAPH_MODE)

View File

@ -29,6 +29,7 @@ class Sampling(nn.Cell):
"""
Test class: sample of Normal distribution.
"""
def __init__(self, shape, seed=0):
super(Sampling, self).__init__()
self.n1 = msd.Normal(0, 1, seed=seed, dtype=dtype.float32)
@ -400,6 +401,8 @@ class RandomChoiceWithMaskNet(nn.Cell):
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_random_choice_with_mask():
mode = context.get_context('mode')
assert (mode == context.GRAPH_MODE), 'GRAPH_MODE required but got ' + str(mode)
net = RandomChoiceWithMaskNet()
x = Tensor(np.array([[1, 0, 1, 0], [0, 0, 0, 1], [1, 1, 1, 1], [0, 0, 0, 1]]).astype(np.bool))
index1, index2, index3 = net(x)

View File

@ -312,6 +312,11 @@ def train_process_thor(q, device_id, epoch_size, device_num, enable_hccl):
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_single
def test_resnet_and_resnet_thor_imagenet_4p():
# reset context
context.set_context(save_graphs=False, enable_graph_kernel=False, enable_sparse=False)
context.reset_auto_parallel_context()
context.reset_ps_context()
q = Queue()
q2 = Queue()