!9178 add some pynative st test_cases in gpu

From: @lvchangquan
Reviewed-by: @kisnwang
Signed-off-by:
This commit is contained in:
mindspore-ci-bot 2020-11-30 14:39:37 +08:00 committed by Gitee
commit ff2c44c935
3 changed files with 241 additions and 90 deletions

View File

@ -23,9 +23,6 @@ from mindspore.common.tensor import Tensor
from mindspore.ops.composite import GradOperation
from mindspore.common import ParameterTuple
def setup_module():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
class MetaFactory:
def __init__(self):
self.device_target = context.get_context('device_target')
@ -228,11 +225,7 @@ def allclose_nparray(data_expected, data_me, rtol, atol, equal_nan=True):
else:
assert True
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_diff_hook():
def pynative_hook_diff_hook():
input_np = np.ones([1, 1, 224, 224]).astype(np.float32)
ms_net = FinalNet()
ms_net.set_grad()
@ -244,11 +237,7 @@ def test_pynative_hook_diff_hook():
grad_net.set_train()
grad_net(input_ms, Tensor(1), out_ms)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_outermost_cell_not_change_grad():
def pynative_hook_outermost_cell_not_change_grad():
input_np = np.ones([2, 2]).astype(np.float32)
ms_net = MsOneInputNet()
@ -269,11 +258,7 @@ def test_pynative_hook_outermost_cell_not_change_grad():
allclose_nparray(torch_net_grad_output, ms_net.grad_input_list[0].asnumpy(), 0.001, 0.001)
allclose_nparray(torch_net_grad_input, ms_net.grad_output_list[0].asnumpy(), 0.001, 0.001)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_all_cell_record_grad():
def pynative_hook_all_cell_record_grad():
input_np = np.ones([2, 2]).astype(np.float32)
ms_net = MsOneInputNet()
@ -301,11 +286,7 @@ def test_pynative_hook_all_cell_record_grad():
allclose_nparray(torch_net_grad_input3, ms_net.grad_output_list[2].asnumpy(), 0.001, 0.001)
allclose_nparray(torch_net_grad_output2, ms_net.grad_input_list[2].asnumpy(), 0.001, 0.001)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_mul_change_input_grad():
def pynative_hook_mul_change_input_grad():
input_np = np.ones([2, 2]).astype(np.float32)
ms_net = MsOneInputNet()
@ -321,11 +302,7 @@ def test_pynative_hook_mul_change_input_grad():
input_torch_grad = np.array([[40, 40], [40, 40]])
allclose_nparray(input_torch_grad, input_ms_grad[0].asnumpy(), 0.001, 0.001)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_mul2_change_input_grad():
def pynative_hook_mul2_change_input_grad():
input1_np = np.array([2.0, 3.0, 4.0]).astype(np.float32)
input2_np = np.array([2.0, 3.0, 4.0]).astype(np.float32)
@ -345,11 +322,7 @@ def test_pynative_hook_mul2_change_input_grad():
allclose_nparray(input1_torch_grad, input_ms_grad[0].asnumpy(), 0.001, 0.001)
allclose_nparray(input2_torch_grad, input_ms_grad[1].asnumpy(), 0.001, 0.001)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_outermost_cell_change_grad():
def pynative_hook_outermost_cell_change_grad():
input_np = np.ones([2, 2]).astype(np.float32)
ms_net = MsNetWithCellinCell()
@ -367,11 +340,7 @@ def test_pynative_hook_outermost_cell_change_grad():
allclose_nparray(out_torch, out_ms.asnumpy(), 0.001, 0.001)
allclose_nparray(input_torch_grad, input_ms_grad[0].asnumpy(), 0.001, 0.001)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_outermost_cell_record_grad():
def pynative_hook_outermost_cell_record_grad():
input_np = np.ones([2, 2]).astype(np.float32)
ms_net = MsSingleOpNetWithBprop()
@ -393,11 +362,7 @@ def test_pynative_hook_outermost_cell_record_grad():
allclose_nparray(out_torch, out_ms.asnumpy(), 0.001, 0.001)
allclose_nparray(input_torch_grad, input_ms_grad[0].asnumpy(), 0.001, 0.001)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_bprop_outermost_cell_record_grad():
def pynative_hook_bprop_outermost_cell_record_grad():
input_np = np.ones([2, 2]).astype(np.float32)
ms_net = MsNetHasBpropInChild()
@ -424,11 +389,7 @@ def test_pynative_hook_bprop_outermost_cell_record_grad():
allclose_nparray(torch_net_grad_output, ms_net.grad_input_list[0].asnumpy(), 0.001, 0.001)
allclose_nparray(torch_net_grad_input, ms_net.grad_output_list[0].asnumpy(), 0.001, 0.001)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_child_cell_record_grad():
def pynative_hook_child_cell_record_grad():
input_np = np.ones([2, 2]).astype(np.float32)
ms_net = MsMultiOpNetWithBprop()
@ -444,3 +405,138 @@ def test_pynative_hook_child_cell_record_grad():
if ms_net.grad_output_list or ms_net.grad_input_list:
assert False
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_diff_hook_ascend():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
pynative_hook_diff_hook()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_pynative_hook_diff_hook_gpu():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
pynative_hook_diff_hook()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_outermost_cell_not_change_grad_ascend():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
pynative_hook_outermost_cell_not_change_grad()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_pynative_hook_outermost_cell_not_change_grad_gpu():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
pynative_hook_outermost_cell_not_change_grad()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_all_cell_record_grad_ascend():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
pynative_hook_all_cell_record_grad()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_pynative_hook_all_cell_record_grad_gpu():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
pynative_hook_all_cell_record_grad()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_mul_change_input_grad_ascend():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
pynative_hook_mul_change_input_grad()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_pynative_hook_mul_change_input_grad_gpu():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
pynative_hook_mul_change_input_grad()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_mul2_change_input_grad_ascend():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
pynative_hook_mul2_change_input_grad()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_pynative_hook_mul2_change_input_grad_gpu():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
pynative_hook_mul2_change_input_grad()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_outermost_cell_change_grad_ascend():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
pynative_hook_outermost_cell_change_grad()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_pynative_hook_outermost_cell_change_grad_gpu():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
pynative_hook_outermost_cell_change_grad()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_outermost_cell_record_grad_ascend():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
pynative_hook_outermost_cell_record_grad()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_pynative_hook_outermost_cell_record_grad_gpu():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
pynative_hook_outermost_cell_record_grad()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_bprop_outermost_cell_record_grad_ascend():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
pynative_hook_bprop_outermost_cell_record_grad()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_pynative_hook_bprop_outermost_cell_record_grad_gpu():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
pynative_hook_bprop_outermost_cell_record_grad()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_pynative_hook_child_cell_record_grad_ascend():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
pynative_hook_child_cell_record_grad()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_pynative_hook_child_cell_record_grad_gpu():
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
pynative_hook_child_cell_record_grad()

View File

@ -126,19 +126,19 @@ class LayerNormFactory(OpsFactory):
return input_grad[0][0].asnumpy(), input_grad[1][1].asnumpy(), input_grad[1][0].asnumpy()
def forward_cmp(self):
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(mode=context.GRAPH_MODE, device_target=context.get_context('device_target'))
graph_out = self.forward_mindspore_impl()
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
context.set_context(mode=context.PYNATIVE_MODE, device_target=context.get_context('device_target'))
pynative_out = self.forward_mindspore_impl()
allclose_nparray(graph_out[0], pynative_out[0], self.loss, self.loss)
def grad_cmp(self):
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(mode=context.GRAPH_MODE, device_target=context.get_context('device_target'))
graph_grad1, graph_grad2, graph_grad3 = self.grad_mindspore_impl()
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
context.set_context(mode=context.PYNATIVE_MODE, device_target=context.get_context('device_target'))
pynative_grad1, pynative_grad2, pynative_grad3 = self.grad_mindspore_impl()
allclose_nparray(graph_grad1, pynative_grad1, self.loss, self.loss)
@ -197,30 +197,52 @@ class ArgMaxWithValueFactory(OpsFactory):
allclose_nparray(out_numpy[1], out_mindspore[1], self.loss, self.loss)
def grad_cmp(self):
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_context(mode=context.GRAPH_MODE, device_target=context.get_context('device_target'))
graph_grad = self.grad_mindspore_impl()
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
context.set_context(mode=context.PYNATIVE_MODE, device_target=context.get_context('device_target'))
pynative_grad = self.grad_mindspore_impl()
allclose_nparray(graph_grad, pynative_grad, self.loss, self.loss)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_layernorm_input():
def layernorm_input():
fact = LayerNormFactory(input_shape=(1, 128, 1024), norm_shape=(1024,), gamma_shape=(1024,), beta_shape=(1024,),
norm_axis=2, params_axis=2, dtype=np.float16)
fact.forward_cmp()
fact.loss = 5e-3
fact.grad_cmp()
def argmaxwithvalue_input():
fact = ArgMaxWithValueFactory(input_shape=[1024, 1024], axis=-1, keep_dims=False)
fact.forward_cmp()
fact.grad_cmp()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_argmaxwithvalue_input():
fact = ArgMaxWithValueFactory(input_shape=[1024, 1024], axis=-1, keep_dims=False)
fact.forward_cmp()
fact.grad_cmp()
def test_layernorm_input_ascend():
context.set_context(device_target="Ascend")
layernorm_input()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_layernorm_input_gpu():
context.set_context(device_target="GPU")
layernorm_input()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_argmaxwithvalue_input_ascend():
context.set_context(device_target="Ascend")
argmaxwithvalue_input()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_argmaxwithvalue_input_gpu():
context.set_context(device_target="GPU")
argmaxwithvalue_input()

View File

@ -85,7 +85,7 @@ def allclose_nparray(data_expected, data_me, rtol, atol, equal_nan=True):
else:
assert True
def mixed_precision_multiple_cells_01():
def mixed_precision_multiple_cells_temp_01():
np.random.seed(1)
x = np.random.randn(1, 3, 28, 28).astype(np.float32)
net = ReluTanhSoftmax()
@ -95,7 +95,7 @@ def mixed_precision_multiple_cells_01():
out_me_relu_01, out_me_tanh_01, out_me_softmax_01 = net(Tensor(x))
return out_me_relu_01, out_me_tanh_01, out_me_softmax_01
def mixed_precision_multiple_cells_02():
def mixed_precision_multiple_cells_temp_02():
np.random.seed(1)
x = np.random.randn(1, 3, 28, 28).astype(np.float32)
net = ReluTanhSoftmax()
@ -105,7 +105,7 @@ def mixed_precision_multiple_cells_02():
out_me_relu_02, out_me_tanh_02, out_me_softmax_02 = net(Tensor(x))
return out_me_relu_02, out_me_tanh_02, out_me_softmax_02
def mixed_precision_multiple_cells_03():
def mixed_precision_multiple_cells_temp_03():
np.random.seed(1)
x = np.random.randn(1, 3, 28, 28).astype(np.float32)
net = ReluTanhAdd()
@ -115,45 +115,78 @@ def mixed_precision_multiple_cells_03():
out_me = net(Tensor(x))
return out_me
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_mixed_precision_multiples_cell_01():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
graph_relu_01, graph_tanh_01, graph_softmax_01 = mixed_precision_multiple_cells_01()
def mixed_precision_multiples_cell_01():
context.set_context(mode=context.GRAPH_MODE, device_target=context.get_context('device_target'))
graph_relu_01, graph_tanh_01, graph_softmax_01 = mixed_precision_multiple_cells_temp_01()
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
pynative_relu_01, pynative_tanh_01, pynative_softmax_01 = mixed_precision_multiple_cells_01()
context.set_context(mode=context.PYNATIVE_MODE, device_target=context.get_context('device_target'))
pynative_relu_01, pynative_tanh_01, pynative_softmax_01 = mixed_precision_multiple_cells_temp_01()
allclose_nparray(graph_relu_01.asnumpy(), pynative_relu_01.asnumpy(), 0.001, 0.001)
allclose_nparray(graph_tanh_01.asnumpy(), pynative_tanh_01.asnumpy(), 0.001, 0.001)
allclose_nparray(graph_softmax_01.asnumpy(), pynative_softmax_01.asnumpy(), 0.001, 0.001)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_mixed_precision_multiples_cell_02():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
graph_relu_02, graph_tanh_02, graph_softmax_02 = mixed_precision_multiple_cells_02()
def mixed_precision_multiples_cell_02():
context.set_context(mode=context.GRAPH_MODE, device_target=context.get_context('device_target'))
graph_relu_02, graph_tanh_02, graph_softmax_02 = mixed_precision_multiple_cells_temp_02()
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
pynative_relu_02, pynative_tanh_02, pynative_softmax_02 = mixed_precision_multiple_cells_02()
context.set_context(mode=context.PYNATIVE_MODE, device_target=context.get_context('device_target'))
pynative_relu_02, pynative_tanh_02, pynative_softmax_02 = mixed_precision_multiple_cells_temp_02()
allclose_nparray(graph_relu_02.asnumpy(), pynative_relu_02.asnumpy(), 0.001, 0.001)
allclose_nparray(graph_tanh_02.asnumpy(), pynative_tanh_02.asnumpy(), 0.001, 0.001)
allclose_nparray(graph_softmax_02.asnumpy(), pynative_softmax_02.asnumpy(), 0.001, 0.001)
def mixed_precision_multiples_cell_03():
context.set_context(mode=context.GRAPH_MODE, device_target=context.get_context('device_target'))
graph_output_03 = mixed_precision_multiple_cells_temp_03()
context.set_context(mode=context.PYNATIVE_MODE, device_target=context.get_context('device_target'))
pynative_output_03 = mixed_precision_multiple_cells_temp_03()
allclose_nparray(graph_output_03.asnumpy(), pynative_output_03.asnumpy(), 0.001, 0.001)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_mixed_precision_multiples_cell_03():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
graph_output_03 = mixed_precision_multiple_cells_03()
def test_mixed_precision_multiples_cell_ascend_01():
context.set_context(device_target="Ascend")
mixed_precision_multiples_cell_01()
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
pynative_output_03 = mixed_precision_multiple_cells_03()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_mixed_precision_multiples_cell_gpu_01():
context.set_context(device_target="GPU")
mixed_precision_multiples_cell_01()
allclose_nparray(graph_output_03.asnumpy(), pynative_output_03.asnumpy(), 0.001, 0.001)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_mixed_precision_multiples_cell_ascend_02():
context.set_context(device_target="Ascend")
mixed_precision_multiples_cell_02()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_mixed_precision_multiples_cell_gpu_02():
context.set_context(device_target="GPU")
mixed_precision_multiples_cell_02()
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_mixed_precision_multiples_cell_ascend_03():
context.set_context(device_target="Ascend")
mixed_precision_multiples_cell_03()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_mixed_precision_multiples_cell_gpu_03():
context.set_context(device_target="GPU")
mixed_precision_multiples_cell_03()